summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/Kconfig43
-rw-r--r--arch/alpha/include/asm/checksum.h5
-rw-r--r--arch/alpha/kernel/module.c2
-rw-r--r--arch/alpha/kernel/signal.c2
-rw-r--r--arch/alpha/kernel/traps.c4
-rw-r--r--arch/alpha/kernel/vmlinux.lds.S1
-rw-r--r--arch/alpha/lib/csum_partial_copy.c164
-rw-r--r--arch/arc/boot/dts/hsdk.dts6
-rw-r--r--arch/arc/include/asm/pgalloc.h4
-rw-r--r--arch/arc/kernel/disasm.c2
-rw-r--r--arch/arc/kernel/kprobes.c54
-rw-r--r--arch/arc/kernel/perf_event.c14
-rw-r--r--arch/arc/kernel/signal.c2
-rw-r--r--arch/arc/kernel/troubleshoot.c77
-rw-r--r--arch/arc/kernel/unwind.c6
-rw-r--r--arch/arc/kernel/vmlinux.lds.S1
-rw-r--r--arch/arc/mm/init.c27
-rw-r--r--arch/arc/plat-eznps/include/plat/ctop.h1
-rw-r--r--arch/arm/Kconfig18
-rw-r--r--arch/arm/Makefile4
-rw-r--r--arch/arm/boot/compressed/Makefile2
-rw-r--r--arch/arm/boot/compressed/vmlinux.lds.S20
-rw-r--r--arch/arm/boot/dts/at91-sama5d2_icp.dts2
-rw-r--r--arch/arm/boot/dts/bcm-hr2.dtsi2
-rw-r--r--arch/arm/boot/dts/bcm-nsp.dtsi2
-rw-r--r--arch/arm/boot/dts/bcm2835-rpi.dtsi2
-rw-r--r--arch/arm/boot/dts/bcm5301x.dtsi2
-rw-r--r--arch/arm/boot/dts/imx6q-logicpd.dts2
-rw-r--r--arch/arm/boot/dts/imx6q-prtwd2.dts2
-rw-r--r--arch/arm/boot/dts/imx6qdl-gw51xx.dtsi2
-rw-r--r--arch/arm/boot/dts/imx6sx-pinfunc.h2
-rw-r--r--arch/arm/boot/dts/imx7d-zii-rmu2.dts2
-rw-r--r--arch/arm/boot/dts/imx7ulp.dtsi8
-rw-r--r--arch/arm/boot/dts/logicpd-som-lv-baseboard.dtsi29
-rw-r--r--arch/arm/boot/dts/logicpd-torpedo-baseboard.dtsi2
-rw-r--r--arch/arm/boot/dts/ls1021a.dtsi2
-rw-r--r--arch/arm/boot/dts/omap5.dtsi20
-rw-r--r--arch/arm/boot/dts/r8a7742.dtsi48
-rw-r--r--arch/arm/boot/dts/socfpga_arria10.dtsi2
-rw-r--r--arch/arm/boot/dts/tegra20-cpu-opp-microvolt.dtsi36
-rw-r--r--arch/arm/boot/dts/tegra20-cpu-opp.dtsi67
-rw-r--r--arch/arm/boot/dts/tegra30-cpu-opp-microvolt.dtsi512
-rw-r--r--arch/arm/boot/dts/tegra30-cpu-opp.dtsi866
-rw-r--r--arch/arm/boot/dts/vfxxx.dtsi2
-rw-r--r--arch/arm/configs/integrator_defconfig16
-rw-r--r--arch/arm/crypto/aes-neonbs-core.S54
-rw-r--r--arch/arm/crypto/aes-neonbs-glue.c58
-rw-r--r--arch/arm/crypto/curve25519-glue.c1
-rw-r--r--arch/arm/crypto/poly1305-glue.c1
-rw-r--r--arch/arm/crypto/sha256-armv4.pl4
-rw-r--r--arch/arm/crypto/sha256-core.S_shipped4
-rw-r--r--arch/arm/crypto/sha512-armv4.pl4
-rw-r--r--arch/arm/crypto/sha512-core.S_shipped4
-rw-r--r--arch/arm/include/asm/checksum.h17
-rw-r--r--arch/arm/include/asm/efi.h23
-rw-r--r--arch/arm/include/asm/hardirq.h17
-rw-r--r--arch/arm/include/asm/smp.h5
-rw-r--r--arch/arm/include/asm/tlb.h1
-rw-r--r--arch/arm/include/asm/topology.h5
-rw-r--r--arch/arm/include/asm/vmlinux.lds.h (renamed from arch/arm/kernel/vmlinux.lds.h)30
-rw-r--r--arch/arm/include/asm/xen/page.h5
-rw-r--r--arch/arm/kernel/hw_breakpoint.c10
-rw-r--r--arch/arm/kernel/irq.c1
-rw-r--r--arch/arm/kernel/setup.c18
-rw-r--r--arch/arm/kernel/signal.c2
-rw-r--r--arch/arm/kernel/smp.c140
-rw-r--r--arch/arm/kernel/topology.c26
-rw-r--r--arch/arm/kernel/vmlinux-xip.lds.S8
-rw-r--r--arch/arm/kernel/vmlinux.lds.S8
-rw-r--r--arch/arm/lib/csumpartialcopy.S4
-rw-r--r--arch/arm/lib/csumpartialcopygeneric.S1
-rw-r--r--arch/arm/lib/csumpartialcopyuser.S26
-rw-r--r--arch/arm/mach-ep93xx/crunch.c2
-rw-r--r--arch/arm/mach-imx/cpuidle-imx6q.c4
-rw-r--r--arch/arm/mach-mmp/pm-mmp2.c8
-rw-r--r--arch/arm/mach-mmp/pm-pxa910.c10
-rw-r--r--arch/arm/mach-omap2/id.c8
-rw-r--r--arch/arm/mach-omap2/omap-iommu.c2
-rw-r--r--arch/arm/mach-omap2/omap_device.c2
-rw-r--r--arch/arm/mach-omap2/pm34xx.c4
-rw-r--r--arch/arm/mach-orion5x/dns323-setup.c2
-rw-r--r--arch/arm/mach-rpc/riscpc.c2
-rw-r--r--arch/arm/mach-tegra/reset.c2
-rw-r--r--arch/arm/mm/alignment.c4
-rw-r--r--arch/arm/mm/init.c59
-rw-r--r--arch/arm/mm/mmu.c39
-rw-r--r--arch/arm/mm/pmsa-v7.c23
-rw-r--r--arch/arm/mm/pmsa-v8.c17
-rw-r--r--arch/arm/mm/proc-v7-bugs.c2
-rw-r--r--arch/arm/plat-omap/dma.c6
-rw-r--r--arch/arm/probes/decode.c2
-rw-r--r--arch/arm/probes/kprobes/core.c80
-rw-r--r--arch/arm/xen/enlighten.c8
-rw-r--r--arch/arm/xen/mm.c7
-rw-r--r--arch/arm64/Kconfig88
-rw-r--r--arch/arm64/Makefile28
-rw-r--r--arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi2
-rw-r--r--arch/arm64/boot/dts/freescale/Makefile1
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp.dtsi2
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mq.dtsi2
-rw-r--r--arch/arm64/boot/dts/mediatek/mt7622.dtsi2
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra186.dtsi20
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra194.dtsi15
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra210.dtsi20
-rw-r--r--arch/arm64/boot/dts/ti/k3-am65-main.dtsi36
-rw-r--r--arch/arm64/boot/dts/ti/k3-am65-mcu.dtsi12
-rw-r--r--arch/arm64/boot/dts/ti/k3-am65-wakeup.dtsi8
-rw-r--r--arch/arm64/boot/dts/ti/k3-am654-base-board.dts4
-rw-r--r--arch/arm64/boot/dts/ti/k3-j721e-common-proc-board.dts10
-rw-r--r--arch/arm64/boot/dts/ti/k3-j721e-main.dtsi43
-rw-r--r--arch/arm64/boot/dts/ti/k3-j721e-mcu-wakeup.dtsi12
-rw-r--r--arch/arm64/boot/dts/xilinx/zynqmp.dtsi12
-rw-r--r--arch/arm64/configs/defconfig12
-rw-r--r--arch/arm64/crypto/aes-neonbs-core.S4
-rw-r--r--arch/arm64/crypto/ghash-ce-glue.c4
-rw-r--r--arch/arm64/crypto/sha1-ce-glue.c3
-rw-r--r--arch/arm64/crypto/sha2-ce-glue.c3
-rw-r--r--arch/arm64/include/asm/arch_gicv3.h8
-rw-r--r--arch/arm64/include/asm/archrandom.h5
-rw-r--r--arch/arm64/include/asm/barrier.h1
-rw-r--r--arch/arm64/include/asm/boot.h3
-rw-r--r--arch/arm64/include/asm/compat.h2
-rw-r--r--arch/arm64/include/asm/compiler.h6
-rw-r--r--arch/arm64/include/asm/cpu_ops.h2
-rw-r--r--arch/arm64/include/asm/cpucaps.h7
-rw-r--r--arch/arm64/include/asm/cpufeature.h32
-rw-r--r--arch/arm64/include/asm/efi.h5
-rw-r--r--arch/arm64/include/asm/esr.h4
-rw-r--r--arch/arm64/include/asm/exception.h1
-rw-r--r--arch/arm64/include/asm/extable.h9
-rw-r--r--arch/arm64/include/asm/fpsimd.h3
-rw-r--r--arch/arm64/include/asm/fpsimdmacros.h48
-rw-r--r--arch/arm64/include/asm/hardirq.h9
-rw-r--r--arch/arm64/include/asm/hwcap.h11
-rw-r--r--arch/arm64/include/asm/insn.h4
-rw-r--r--arch/arm64/include/asm/io.h1
-rw-r--r--arch/arm64/include/asm/irq_work.h4
-rw-r--r--arch/arm64/include/asm/irqflags.h5
-rw-r--r--arch/arm64/include/asm/kernel-pgtable.h2
-rw-r--r--arch/arm64/include/asm/kvm_arm.h8
-rw-r--r--arch/arm64/include/asm/kvm_asm.h48
-rw-r--r--arch/arm64/include/asm/kvm_emulate.h28
-rw-r--r--arch/arm64/include/asm/kvm_host.h44
-rw-r--r--arch/arm64/include/asm/kvm_mmu.h90
-rw-r--r--arch/arm64/include/asm/memory.h19
-rw-r--r--arch/arm64/include/asm/mman.h56
-rw-r--r--arch/arm64/include/asm/mmu.h14
-rw-r--r--arch/arm64/include/asm/mmu_context.h11
-rw-r--r--arch/arm64/include/asm/mte.h86
-rw-r--r--arch/arm64/include/asm/numa.h3
-rw-r--r--arch/arm64/include/asm/page-def.h5
-rw-r--r--arch/arm64/include/asm/page.h19
-rw-r--r--arch/arm64/include/asm/pci.h1
-rw-r--r--arch/arm64/include/asm/perf_event.h3
-rw-r--r--arch/arm64/include/asm/pgtable-hwdef.h16
-rw-r--r--arch/arm64/include/asm/pgtable-prot.h9
-rw-r--r--arch/arm64/include/asm/pgtable.h110
-rw-r--r--arch/arm64/include/asm/processor.h56
-rw-r--r--arch/arm64/include/asm/ptrace.h14
-rw-r--r--arch/arm64/include/asm/smp.h16
-rw-r--r--arch/arm64/include/asm/spectre.h32
-rw-r--r--arch/arm64/include/asm/stacktrace.h2
-rw-r--r--arch/arm64/include/asm/sysreg.h87
-rw-r--r--arch/arm64/include/asm/thread_info.h4
-rw-r--r--arch/arm64/include/asm/tlbflush.h6
-rw-r--r--arch/arm64/include/asm/topology.h2
-rw-r--r--arch/arm64/include/asm/traps.h2
-rw-r--r--arch/arm64/include/asm/unistd32.h12
-rw-r--r--arch/arm64/include/asm/xen/page.h6
-rw-r--r--arch/arm64/include/uapi/asm/hwcap.h2
-rw-r--r--arch/arm64/include/uapi/asm/kvm.h9
-rw-r--r--arch/arm64/include/uapi/asm/mman.h1
-rw-r--r--arch/arm64/include/uapi/asm/ptrace.h4
-rw-r--r--arch/arm64/kernel/Makefile6
-rw-r--r--arch/arm64/kernel/acpi.c24
-rw-r--r--arch/arm64/kernel/cpu-reset.S4
-rw-r--r--arch/arm64/kernel/cpu_errata.c493
-rw-r--r--arch/arm64/kernel/cpufeature.c134
-rw-r--r--arch/arm64/kernel/cpuinfo.c179
-rw-r--r--arch/arm64/kernel/debug-monitors.c2
-rw-r--r--arch/arm64/kernel/entry-common.c21
-rw-r--r--arch/arm64/kernel/entry-fpsimd.S25
-rw-r--r--arch/arm64/kernel/entry.S66
-rw-r--r--arch/arm64/kernel/fpsimd.c8
-rw-r--r--arch/arm64/kernel/head.S16
-rw-r--r--arch/arm64/kernel/hibernate.c125
-rw-r--r--arch/arm64/kernel/hw_breakpoint.c6
-rw-r--r--arch/arm64/kernel/image-vars.h8
-rw-r--r--arch/arm64/kernel/image.h1
-rw-r--r--arch/arm64/kernel/insn.c11
-rw-r--r--arch/arm64/kernel/irq.c11
-rw-r--r--arch/arm64/kernel/machine_kexec_file.c6
-rw-r--r--arch/arm64/kernel/module-plts.c3
-rw-r--r--arch/arm64/kernel/module.c8
-rw-r--r--arch/arm64/kernel/mte.c336
-rw-r--r--arch/arm64/kernel/paravirt.c26
-rw-r--r--arch/arm64/kernel/perf_callchain.c6
-rw-r--r--arch/arm64/kernel/perf_event.c272
-rw-r--r--arch/arm64/kernel/perf_regs.c2
-rw-r--r--arch/arm64/kernel/probes/decode-insn.c9
-rw-r--r--arch/arm64/kernel/probes/kprobes.c78
-rw-r--r--arch/arm64/kernel/process.c107
-rw-r--r--arch/arm64/kernel/proton-pack.c792
-rw-r--r--arch/arm64/kernel/ptrace.c51
-rw-r--r--arch/arm64/kernel/relocate_kernel.S12
-rw-r--r--arch/arm64/kernel/return_address.c8
-rw-r--r--arch/arm64/kernel/setup.c5
-rw-r--r--arch/arm64/kernel/signal.c12
-rw-r--r--arch/arm64/kernel/smccc-call.S2
-rw-r--r--arch/arm64/kernel/smp.c131
-rw-r--r--arch/arm64/kernel/smp_spin_table.c4
-rw-r--r--arch/arm64/kernel/ssbd.c129
-rw-r--r--arch/arm64/kernel/stacktrace.c117
-rw-r--r--arch/arm64/kernel/suspend.c7
-rw-r--r--arch/arm64/kernel/syscall.c10
-rw-r--r--arch/arm64/kernel/topology.c41
-rw-r--r--arch/arm64/kernel/traps.c132
-rw-r--r--arch/arm64/kernel/vdso.c51
-rw-r--r--arch/arm64/kernel/vdso/Makefile7
-rw-r--r--arch/arm64/kernel/vdso32/Makefile2
-rw-r--r--arch/arm64/kernel/vmlinux.lds.S40
-rw-r--r--arch/arm64/kvm/Kconfig3
-rw-r--r--arch/arm64/kvm/arm.c47
-rw-r--r--arch/arm64/kvm/handle_exit.c2
-rw-r--r--arch/arm64/kvm/hyp/Makefile3
-rw-r--r--arch/arm64/kvm/hyp/entry.S15
-rw-r--r--arch/arm64/kvm/hyp/hyp-entry.S96
-rw-r--r--arch/arm64/kvm/hyp/include/hyp/debug-sr.h60
-rw-r--r--arch/arm64/kvm/hyp/include/hyp/switch.h64
-rw-r--r--arch/arm64/kvm/hyp/nvhe/switch.c9
-rw-r--r--arch/arm64/kvm/hyp/nvhe/tlb.c7
-rw-r--r--arch/arm64/kvm/hyp/vgic-v3-sr.c16
-rw-r--r--arch/arm64/kvm/hyp/vhe/switch.c9
-rw-r--r--arch/arm64/kvm/hypercalls.c33
-rw-r--r--arch/arm64/kvm/mmu.c31
-rw-r--r--arch/arm64/kvm/pmu-emul.c26
-rw-r--r--arch/arm64/kvm/psci.c74
-rw-r--r--arch/arm64/kvm/pvtime.c29
-rw-r--r--arch/arm64/kvm/reset.c4
-rw-r--r--arch/arm64/kvm/sys_regs.c19
-rw-r--r--arch/arm64/kvm/trace_arm.h16
-rw-r--r--arch/arm64/kvm/trace_handle_exit.h6
-rw-r--r--arch/arm64/kvm/vgic/vgic-mmio-v3.c4
-rw-r--r--arch/arm64/lib/Makefile2
-rw-r--r--arch/arm64/lib/mte.S151
-rw-r--r--arch/arm64/mm/Makefile3
-rw-r--r--arch/arm64/mm/context.c107
-rw-r--r--arch/arm64/mm/copypage.c25
-rw-r--r--arch/arm64/mm/extable.c4
-rw-r--r--arch/arm64/mm/fault.c13
-rw-r--r--arch/arm64/mm/init.c11
-rw-r--r--arch/arm64/mm/kasan_init.c10
-rw-r--r--arch/arm64/mm/mmu.c33
-rw-r--r--arch/arm64/mm/mteswap.c83
-rw-r--r--arch/arm64/mm/numa.c23
-rw-r--r--arch/arm64/mm/pageattr.c1
-rw-r--r--arch/arm64/mm/proc.S32
-rw-r--r--arch/arm64/mm/ptdump.c (renamed from arch/arm64/mm/dump.c)6
-rw-r--r--arch/arm64/net/bpf_jit_comp.c43
-rw-r--r--arch/c6x/include/asm/checksum.h3
-rw-r--r--arch/c6x/kernel/setup.c9
-rw-r--r--arch/c6x/kernel/signal.c4
-rw-r--r--arch/c6x/lib/csum_64plus.S4
-rw-r--r--arch/csky/Kconfig13
-rw-r--r--arch/csky/kernel/probes/kprobes.c77
-rw-r--r--arch/csky/kernel/signal.c2
-rw-r--r--arch/csky/kernel/vmlinux.lds.S1
-rw-r--r--arch/h8300/kernel/process.c2
-rw-r--r--arch/h8300/kernel/setup.c8
-rw-r--r--arch/h8300/kernel/signal.c2
-rw-r--r--arch/hexagon/include/asm/checksum.h11
-rw-r--r--arch/hexagon/kernel/module.c2
-rw-r--r--arch/hexagon/kernel/signal.c2
-rw-r--r--arch/hexagon/kernel/vmlinux.lds.S1
-rw-r--r--arch/hexagon/lib/checksum.c11
-rw-r--r--arch/ia64/Kconfig10
-rw-r--r--arch/ia64/configs/bigsur_defconfig8
-rw-r--r--arch/ia64/configs/generic_defconfig10
-rw-r--r--arch/ia64/configs/gensparse_defconfig10
-rw-r--r--arch/ia64/configs/tiger_defconfig10
-rw-r--r--arch/ia64/configs/zx1_defconfig8
-rw-r--r--arch/ia64/include/asm/acpi.h2
-rw-r--r--arch/ia64/include/asm/checksum.h3
-rw-r--r--arch/ia64/include/asm/pgtable.h9
-rw-r--r--arch/ia64/include/asm/processor.h10
-rw-r--r--arch/ia64/include/asm/switch_to.h10
-rw-r--r--arch/ia64/kernel/Makefile3
-rw-r--r--arch/ia64/kernel/crash.c2
-rw-r--r--arch/ia64/kernel/ia64_ksyms.c2
-rw-r--r--arch/ia64/kernel/irq_ia64.c7
-rw-r--r--arch/ia64/kernel/kprobes.c77
-rw-r--r--arch/ia64/kernel/module.c2
-rw-r--r--arch/ia64/kernel/perfmon.c6703
-rw-r--r--arch/ia64/kernel/process.c57
-rw-r--r--arch/ia64/kernel/ptrace.c24
-rw-r--r--arch/ia64/kernel/signal.c2
-rw-r--r--arch/ia64/kernel/smpboot.c8
-rw-r--r--arch/ia64/kernel/syscalls/syscall.tbl2
-rw-r--r--arch/ia64/kernel/unaligned.c6
-rw-r--r--arch/ia64/kernel/unwind.c2
-rw-r--r--arch/ia64/kernel/vmlinux.lds.S1
-rw-r--r--arch/ia64/lib/Makefile1
-rw-r--r--arch/ia64/lib/carta_random.S55
-rw-r--r--arch/ia64/lib/csum_partial_copy.c15
-rw-r--r--arch/ia64/mm/init.c6
-rw-r--r--arch/ia64/oprofile/Makefile1
-rw-r--r--arch/ia64/oprofile/init.c12
-rw-r--r--arch/ia64/oprofile/perfmon.c99
-rw-r--r--arch/m68k/Kconfig24
-rw-r--r--arch/m68k/amiga/config.c118
-rw-r--r--arch/m68k/atari/atakeyb.c2
-rw-r--r--arch/m68k/configs/mac_defconfig1
-rw-r--r--arch/m68k/configs/multi_defconfig1
-rw-r--r--arch/m68k/include/asm/checksum.h7
-rw-r--r--arch/m68k/include/asm/thread_info.h8
-rw-r--r--arch/m68k/kernel/head.S16
-rw-r--r--arch/m68k/kernel/process.c10
-rw-r--r--arch/m68k/kernel/signal.c2
-rw-r--r--arch/m68k/kernel/traps.c5
-rw-r--r--arch/m68k/lib/checksum.c88
-rw-r--r--arch/m68k/mac/config.c43
-rw-r--r--arch/m68k/mac/macboing.c2
-rw-r--r--arch/m68k/mac/via.c2
-rw-r--r--arch/m68k/mm/fault.c2
-rw-r--r--arch/m68k/mm/mcfmmu.c2
-rw-r--r--arch/m68k/mm/motorola.c9
-rw-r--r--arch/microblaze/Kconfig18
-rw-r--r--arch/microblaze/include/asm/Kbuild1
-rw-r--r--arch/microblaze/kernel/signal.c2
-rw-r--r--arch/microblaze/mm/init.c24
-rw-r--r--arch/mips/Kconfig19
-rw-r--r--arch/mips/bcm47xx/setup.c2
-rw-r--r--arch/mips/cavium-octeon/dma-octeon.c14
-rw-r--r--arch/mips/include/asm/checksum.h68
-rw-r--r--arch/mips/include/asm/compat.h2
-rw-r--r--arch/mips/include/asm/cpu-type.h1
-rw-r--r--arch/mips/include/asm/irqflags.h5
-rw-r--r--arch/mips/include/asm/kvm_host.h2
-rw-r--r--arch/mips/include/asm/mach-loongson64/cpu-feature-overrides.h2
-rw-r--r--arch/mips/include/asm/mach-loongson64/irq.h2
-rw-r--r--arch/mips/include/asm/mach-loongson64/mmzone.h1
-rw-r--r--arch/mips/include/asm/unroll.h64
-rw-r--r--arch/mips/kernel/kprobes.c54
-rw-r--r--arch/mips/kernel/perf_event_mipsxx.c4
-rw-r--r--arch/mips/kernel/setup.c31
-rw-r--r--arch/mips/kernel/smp-bmips.c2
-rw-r--r--arch/mips/kernel/syscalls/syscall_n32.tbl12
-rw-r--r--arch/mips/kernel/syscalls/syscall_o32.tbl12
-rw-r--r--arch/mips/kernel/traps.c12
-rw-r--r--arch/mips/kernel/vmlinux.lds.S1
-rw-r--r--arch/mips/kvm/mips.c2
-rw-r--r--arch/mips/kvm/mmu.c3
-rw-r--r--arch/mips/lib/csum_partial.S261
-rw-r--r--arch/mips/loongson2ef/Platform4
-rw-r--r--arch/mips/loongson64/cop2-ex.c24
-rw-r--r--arch/mips/mm/c-r4k.c4
-rw-r--r--arch/mips/netlogic/xlp/setup.c2
-rw-r--r--arch/mips/oprofile/op_model_mipsxx.c4
-rw-r--r--arch/mips/sni/a20r.c13
-rw-r--r--arch/nds32/include/asm/irqflags.h5
-rw-r--r--arch/nds32/kernel/fpu.c12
-rw-r--r--arch/nds32/kernel/setup.c8
-rw-r--r--arch/nds32/kernel/signal.c4
-rw-r--r--arch/nds32/kernel/vmlinux.lds.S1
-rw-r--r--arch/nios2/include/asm/checksum.h4
-rw-r--r--arch/nios2/kernel/process.c2
-rw-r--r--arch/nios2/kernel/vmlinux.lds.S1
-rw-r--r--arch/openrisc/include/asm/uaccess.h33
-rw-r--r--arch/openrisc/kernel/setup.c19
-rw-r--r--arch/openrisc/kernel/signal.c2
-rw-r--r--arch/openrisc/kernel/vmlinux.lds.S1
-rw-r--r--arch/openrisc/mm/cache.c2
-rw-r--r--arch/openrisc/mm/init.c8
-rw-r--r--arch/parisc/Kconfig16
-rw-r--r--arch/parisc/boot/compressed/vmlinux.lds.S1
-rw-r--r--arch/parisc/include/asm/checksum.h28
-rw-r--r--arch/parisc/include/asm/compat.h2
-rw-r--r--arch/parisc/kernel/kprobes.c76
-rw-r--r--arch/parisc/kernel/signal.c2
-rw-r--r--arch/parisc/kernel/syscalls/syscall.tbl12
-rw-r--r--arch/parisc/kernel/traps.c11
-rw-r--r--arch/parisc/kernel/vmlinux.lds.S1
-rw-r--r--arch/parisc/lib/checksum.c17
-rw-r--r--arch/parisc/mm/fault.c4
-rw-r--r--arch/powerpc/Kconfig33
-rw-r--r--arch/powerpc/configs/pasemi_defconfig1
-rw-r--r--arch/powerpc/configs/ppc6xx_defconfig1
-rw-r--r--arch/powerpc/crypto/crc-vpmsum_test.c6
-rw-r--r--arch/powerpc/include/asm/book3s/64/mmu.h10
-rw-r--r--arch/powerpc/include/asm/book3s/64/pgtable.h8
-rw-r--r--arch/powerpc/include/asm/checksum.h13
-rw-r--r--arch/powerpc/include/asm/compat.h2
-rw-r--r--arch/powerpc/include/asm/cputable.h15
-rw-r--r--arch/powerpc/include/asm/fixmap.h2
-rw-r--r--arch/powerpc/include/asm/hw_irq.h11
-rw-r--r--arch/powerpc/include/asm/kasan.h9
-rw-r--r--arch/powerpc/include/asm/kvm_host.h3
-rw-r--r--arch/powerpc/include/asm/mce.h7
-rw-r--r--arch/powerpc/include/asm/mman.h31
-rw-r--r--arch/powerpc/include/asm/nohash/64/pgtable.h2
-rw-r--r--arch/powerpc/include/asm/perf_event.h3
-rw-r--r--arch/powerpc/include/asm/perf_event_server.h5
-rw-r--r--arch/powerpc/include/asm/string.h2
-rw-r--r--arch/powerpc/include/asm/uaccess.h40
-rw-r--r--arch/powerpc/include/uapi/asm/mman.h2
-rw-r--r--arch/powerpc/include/uapi/asm/perf_regs.h20
-rw-r--r--arch/powerpc/kernel/cputable.c22
-rw-r--r--arch/powerpc/kernel/dma-iommu.c3
-rw-r--r--arch/powerpc/kernel/dt_cpu_ftrs.c6
-rw-r--r--arch/powerpc/kernel/entry_64.S4
-rw-r--r--arch/powerpc/kernel/fadump.c57
-rw-r--r--arch/powerpc/kernel/kprobes.c53
-rw-r--r--arch/powerpc/kernel/process.c12
-rw-r--r--arch/powerpc/kernel/setup-common.c1
-rw-r--r--arch/powerpc/kernel/syscalls/syscall.tbl12
-rw-r--r--arch/powerpc/kernel/vdso32/Makefile2
-rw-r--r--arch/powerpc/kernel/vdso32/vdso32.lds.S1
-rw-r--r--arch/powerpc/kernel/vdso64/Makefile2
-rw-r--r--arch/powerpc/kernel/vdso64/vdso64.lds.S3
-rw-r--r--arch/powerpc/kernel/vmlinux.lds.S2
-rw-r--r--arch/powerpc/kexec/file_load_64.c16
-rw-r--r--arch/powerpc/kvm/book3s.c3
-rw-r--r--arch/powerpc/kvm/book3s_hv_builtin.c12
-rw-r--r--arch/powerpc/kvm/book3s_hv_uvmem.c14
-rw-r--r--arch/powerpc/kvm/e500_mmu_host.c3
-rw-r--r--arch/powerpc/lib/Makefile2
-rw-r--r--arch/powerpc/lib/checksum_32.S74
-rw-r--r--arch/powerpc/lib/checksum_64.S37
-rw-r--r--arch/powerpc/lib/checksum_wrappers.c74
-rw-r--r--arch/powerpc/lib/copy_mc_64.S (renamed from arch/powerpc/lib/memcpy_mcsafe_64.S)4
-rw-r--r--arch/powerpc/mm/book3s32/mmu.c9
-rw-r--r--arch/powerpc/mm/book3s64/hash_utils.c22
-rw-r--r--arch/powerpc/mm/book3s64/radix_pgtable.c25
-rw-r--r--arch/powerpc/mm/init_64.c11
-rw-r--r--arch/powerpc/mm/kasan/kasan_init_32.c8
-rw-r--r--arch/powerpc/mm/mem.c33
-rw-r--r--arch/powerpc/mm/numa.c7
-rw-r--r--arch/powerpc/mm/pgtable_32.c8
-rw-r--r--arch/powerpc/net/bpf_jit_comp.c1
-rw-r--r--arch/powerpc/perf/core-book3s.c24
-rw-r--r--arch/powerpc/perf/hv-24x7.c11
-rw-r--r--arch/powerpc/perf/imc-pmu.c4
-rw-r--r--arch/powerpc/perf/perf_regs.c44
-rw-r--r--arch/powerpc/perf/power10-pmu.c6
-rw-r--r--arch/powerpc/perf/power9-pmu.c6
-rw-r--r--arch/powerpc/platforms/Kconfig.cputype2
-rw-r--r--arch/powerpc/platforms/powernv/idle.c2
-rw-r--r--arch/powerpc/platforms/powernv/pci-ioda.c2
-rw-r--r--arch/powerpc/platforms/pseries/hotplug-cpu.c18
-rw-r--r--arch/powerpc/platforms/pseries/papr_scm.c2
-rw-r--r--arch/powerpc/platforms/pseries/ras.c1
-rw-r--r--arch/riscv/Kconfig16
-rw-r--r--arch/riscv/Kconfig.socs2
-rw-r--r--arch/riscv/boot/dts/kendryte/k210.dtsi6
-rw-r--r--arch/riscv/configs/nommu_virt_defconfig7
-rw-r--r--arch/riscv/configs/rv32_defconfig5
-rw-r--r--arch/riscv/include/asm/clint.h57
-rw-r--r--arch/riscv/include/asm/ftrace.h7
-rw-r--r--arch/riscv/include/asm/smp.h19
-rw-r--r--arch/riscv/include/asm/stackprotector.h4
-rw-r--r--arch/riscv/include/asm/timex.h62
-rw-r--r--arch/riscv/kernel/Makefile2
-rw-r--r--arch/riscv/kernel/clint.c44
-rw-r--r--arch/riscv/kernel/ftrace.c19
-rw-r--r--arch/riscv/kernel/sbi.c14
-rw-r--r--arch/riscv/kernel/setup.c2
-rw-r--r--arch/riscv/kernel/signal.c2
-rw-r--r--arch/riscv/kernel/smp.c44
-rw-r--r--arch/riscv/kernel/smpboot.c4
-rw-r--r--arch/riscv/kernel/vmlinux.lds.S6
-rw-r--r--arch/riscv/mm/init.c44
-rw-r--r--arch/riscv/mm/kasan_init.c10
-rw-r--r--arch/riscv/net/bpf_jit_comp32.c4
-rw-r--r--arch/s390/Kconfig20
-rw-r--r--arch/s390/configs/debug_defconfig4
-rw-r--r--arch/s390/configs/defconfig3
-rw-r--r--arch/s390/configs/zfcpdump_defconfig1
-rw-r--r--arch/s390/include/asm/checksum.h7
-rw-r--r--arch/s390/include/asm/compat.h2
-rw-r--r--arch/s390/include/asm/percpu.h28
-rw-r--r--arch/s390/include/asm/pgtable.h42
-rw-r--r--arch/s390/kernel/entry.h1
-rw-r--r--arch/s390/kernel/idle.c6
-rw-r--r--arch/s390/kernel/kprobes.c79
-rw-r--r--arch/s390/kernel/pgm_check.S2
-rw-r--r--arch/s390/kernel/ptrace.c7
-rw-r--r--arch/s390/kernel/runtime_instr.c2
-rw-r--r--arch/s390/kernel/setup.c33
-rw-r--r--arch/s390/kernel/stacktrace.c4
-rw-r--r--arch/s390/kernel/syscalls/syscall.tbl12
-rw-r--r--arch/s390/kernel/vmlinux.lds.S1
-rw-r--r--arch/s390/mm/fault.c20
-rw-r--r--arch/s390/mm/page-states.c6
-rw-r--r--arch/s390/mm/vmem.c8
-rw-r--r--arch/s390/pci/pci.c26
-rw-r--r--arch/s390/pci/pci_bus.c52
-rw-r--r--arch/s390/pci/pci_bus.h13
-rw-r--r--arch/s390/pci/pci_event.c9
-rw-r--r--arch/sh/Kconfig16
-rw-r--r--arch/sh/drivers/platform_early.c2
-rw-r--r--arch/sh/include/asm/checksum_32.h36
-rw-r--r--arch/sh/include/asm/smp.h1
-rw-r--r--arch/sh/kernel/disassemble.c4
-rw-r--r--arch/sh/kernel/entry-common.S1
-rw-r--r--arch/sh/kernel/kgdb.c2
-rw-r--r--arch/sh/kernel/kprobes.c58
-rw-r--r--arch/sh/kernel/ptrace_32.c15
-rw-r--r--arch/sh/kernel/signal_32.c2
-rw-r--r--arch/sh/kernel/vmlinux.lds.S1
-rw-r--r--arch/sh/lib/checksum.S119
-rw-r--r--arch/sh/mm/init.c9
-rw-r--r--arch/sparc/Kconfig19
-rw-r--r--arch/sparc/include/asm/checksum.h2
-rw-r--r--arch/sparc/include/asm/checksum_32.h70
-rw-r--r--arch/sparc/include/asm/checksum_64.h39
-rw-r--r--arch/sparc/include/asm/compat.h3
-rw-r--r--arch/sparc/kernel/auxio_64.c1
-rw-r--r--arch/sparc/kernel/central.c2
-rw-r--r--arch/sparc/kernel/kgdb_32.c2
-rw-r--r--arch/sparc/kernel/kgdb_64.c2
-rw-r--r--arch/sparc/kernel/kprobes.c51
-rw-r--r--arch/sparc/kernel/pcr.c2
-rw-r--r--arch/sparc/kernel/process.c6
-rw-r--r--arch/sparc/kernel/prom_32.c2
-rw-r--r--arch/sparc/kernel/signal32.c4
-rw-r--r--arch/sparc/kernel/signal_32.c4
-rw-r--r--arch/sparc/kernel/signal_64.c4
-rw-r--r--arch/sparc/kernel/syscalls/syscall.tbl12
-rw-r--r--arch/sparc/kernel/vmlinux.lds.S1
-rw-r--r--arch/sparc/lib/checksum_32.S202
-rw-r--r--arch/sparc/lib/csum_copy.S3
-rw-r--r--arch/sparc/lib/csum_copy_from_user.S4
-rw-r--r--arch/sparc/lib/csum_copy_to_user.S4
-rw-r--r--arch/sparc/math-emu/math_32.c8
-rw-r--r--arch/sparc/mm/fault_32.c6
-rw-r--r--arch/sparc/mm/init_64.c12
-rw-r--r--arch/sparc/net/bpf_jit_comp_32.c2
-rw-r--r--arch/um/Kconfig16
-rw-r--r--arch/um/kernel/dyn.lds.S2
-rw-r--r--arch/um/kernel/signal.c2
-rw-r--r--arch/um/kernel/uml.lds.S2
-rw-r--r--arch/x86/Kconfig23
-rw-r--r--arch/x86/Kconfig.debug2
-rw-r--r--arch/x86/Makefile4
-rw-r--r--arch/x86/boot/cmdline.c4
-rw-r--r--arch/x86/boot/compressed/Makefile54
-rw-r--r--arch/x86/boot/compressed/cpuflags.c4
-rw-r--r--arch/x86/boot/compressed/head_32.S99
-rw-r--r--arch/x86/boot/compressed/head_64.S198
-rw-r--r--arch/x86/boot/compressed/ident_map_64.c349
-rw-r--r--arch/x86/boot/compressed/idt_64.c54
-rw-r--r--arch/x86/boot/compressed/idt_handlers_64.S77
-rw-r--r--arch/x86/boot/compressed/kaslr.c268
-rw-r--r--arch/x86/boot/compressed/kaslr_64.c153
-rw-r--r--arch/x86/boot/compressed/misc.c14
-rw-r--r--arch/x86/boot/compressed/misc.h54
-rw-r--r--arch/x86/boot/compressed/mkpiggy.c6
-rw-r--r--arch/x86/boot/compressed/pgtable_64.c9
-rw-r--r--arch/x86/boot/compressed/sev-es.c214
-rw-r--r--arch/x86/boot/compressed/vmlinux.lds.S50
-rw-r--r--arch/x86/boot/setup.ld2
-rw-r--r--arch/x86/boot/string.h5
-rw-r--r--arch/x86/boot/tools/build.c4
-rw-r--r--arch/x86/configs/i386_defconfig2
-rw-r--r--arch/x86/configs/x86_64_defconfig1
-rw-r--r--arch/x86/crypto/blake2s-glue.c1
-rw-r--r--arch/x86/crypto/chacha_glue.c1
-rw-r--r--arch/x86/crypto/crc32c-intel_glue.c18
-rw-r--r--arch/x86/crypto/curve25519-x86_64.c69
-rw-r--r--arch/x86/crypto/nhpoly1305-avx2-glue.c1
-rw-r--r--arch/x86/crypto/nhpoly1305-sse2-glue.c1
-rw-r--r--arch/x86/crypto/poly1305-x86_64-cryptogams.pl8
-rw-r--r--arch/x86/crypto/poly1305_glue.c4
-rw-r--r--arch/x86/entry/calling.h11
-rw-r--r--arch/x86/entry/common.c31
-rw-r--r--arch/x86/entry/entry_64.S91
-rw-r--r--arch/x86/entry/syscall_x32.c5
-rw-r--r--arch/x86/entry/syscalls/syscall_32.tbl14
-rw-r--r--arch/x86/entry/syscalls/syscall_64.tbl10
-rw-r--r--arch/x86/entry/thunk_32.S5
-rw-r--r--arch/x86/entry/vdso/vdso32/vclock_gettime.c1
-rw-r--r--arch/x86/events/amd/ibs.c93
-rw-r--r--arch/x86/events/amd/iommu.c2
-rw-r--r--arch/x86/events/amd/uncore.c186
-rw-r--r--arch/x86/events/core.c225
-rw-r--r--arch/x86/events/intel/core.c370
-rw-r--r--arch/x86/events/intel/ds.c32
-rw-r--r--arch/x86/events/intel/lbr.c2
-rw-r--r--arch/x86/events/intel/uncore.c275
-rw-r--r--arch/x86/events/intel/uncore.h2
-rw-r--r--arch/x86/events/intel/uncore_snb.c97
-rw-r--r--arch/x86/events/intel/uncore_snbep.c72
-rw-r--r--arch/x86/events/msr.c1
-rw-r--r--arch/x86/events/perf_event.h54
-rw-r--r--arch/x86/events/rapl.c1
-rw-r--r--arch/x86/hyperv/hv_init.c8
-rw-r--r--arch/x86/hyperv/hv_spinlock.c2
-rw-r--r--arch/x86/include/asm/acpi.h2
-rw-r--r--arch/x86/include/asm/apic.h8
-rw-r--r--arch/x86/include/asm/asm-prototypes.h1
-rw-r--r--arch/x86/include/asm/asm.h12
-rw-r--r--arch/x86/include/asm/checksum.h1
-rw-r--r--arch/x86/include/asm/checksum_32.h40
-rw-r--r--arch/x86/include/asm/checksum_64.h14
-rw-r--r--arch/x86/include/asm/compat.h3
-rw-r--r--arch/x86/include/asm/copy_mc_test.h75
-rw-r--r--arch/x86/include/asm/cpu_entry_area.h33
-rw-r--r--arch/x86/include/asm/cpufeatures.h6
-rw-r--r--arch/x86/include/asm/debugreg.h2
-rw-r--r--arch/x86/include/asm/desc.h27
-rw-r--r--arch/x86/include/asm/desc_defs.h10
-rw-r--r--arch/x86/include/asm/disabled-features.h9
-rw-r--r--arch/x86/include/asm/efi.h10
-rw-r--r--arch/x86/include/asm/entry-common.h12
-rw-r--r--arch/x86/include/asm/extable.h9
-rw-r--r--arch/x86/include/asm/fixmap.h2
-rw-r--r--arch/x86/include/asm/fpu/api.h12
-rw-r--r--arch/x86/include/asm/fpu/internal.h38
-rw-r--r--arch/x86/include/asm/fpu/types.h11
-rw-r--r--arch/x86/include/asm/fpu/xcr.h34
-rw-r--r--arch/x86/include/asm/fpu/xstate.h2
-rw-r--r--arch/x86/include/asm/frame.h19
-rw-r--r--arch/x86/include/asm/fsgsbase.h4
-rw-r--r--arch/x86/include/asm/hw_irq.h89
-rw-r--r--arch/x86/include/asm/hyperv-tlfs.h33
-rw-r--r--arch/x86/include/asm/idtentry.h60
-rw-r--r--arch/x86/include/asm/insn-eval.h6
-rw-r--r--arch/x86/include/asm/io.h17
-rw-r--r--arch/x86/include/asm/irq_remapping.h8
-rw-r--r--arch/x86/include/asm/irq_stack.h69
-rw-r--r--arch/x86/include/asm/irqdomain.h8
-rw-r--r--arch/x86/include/asm/kprobes.h4
-rw-r--r--arch/x86/include/asm/kvm_host.h3
-rw-r--r--arch/x86/include/asm/mce.h33
-rw-r--r--arch/x86/include/asm/mcsafe_test.h75
-rw-r--r--arch/x86/include/asm/mem_encrypt.h5
-rw-r--r--arch/x86/include/asm/mmu.h1
-rw-r--r--arch/x86/include/asm/mpspec.h10
-rw-r--r--arch/x86/include/asm/msi.h2
-rw-r--r--arch/x86/include/asm/msr-index.h10
-rw-r--r--arch/x86/include/asm/nospec-branch.h2
-rw-r--r--arch/x86/include/asm/numa.h10
-rw-r--r--arch/x86/include/asm/orc_types.h34
-rw-r--r--arch/x86/include/asm/page_64_types.h1
-rw-r--r--arch/x86/include/asm/paravirt.h151
-rw-r--r--arch/x86/include/asm/paravirt_types.h23
-rw-r--r--arch/x86/include/asm/pci.h11
-rw-r--r--arch/x86/include/asm/pci_x86.h11
-rw-r--r--arch/x86/include/asm/perf_event.h98
-rw-r--r--arch/x86/include/asm/pgtable-3level_types.h5
-rw-r--r--arch/x86/include/asm/pgtable.h9
-rw-r--r--arch/x86/include/asm/pgtable_64_types.h2
-rw-r--r--arch/x86/include/asm/processor.h3
-rw-r--r--arch/x86/include/asm/proto.h1
-rw-r--r--arch/x86/include/asm/ptrace.h2
-rw-r--r--arch/x86/include/asm/realmode.h7
-rw-r--r--arch/x86/include/asm/required-features.h2
-rw-r--r--arch/x86/include/asm/segment.h6
-rw-r--r--arch/x86/include/asm/setup.h6
-rw-r--r--arch/x86/include/asm/sev-es.h114
-rw-r--r--arch/x86/include/asm/special_insns.h98
-rw-r--r--arch/x86/include/asm/stacktrace.h2
-rw-r--r--arch/x86/include/asm/static_call.h40
-rw-r--r--arch/x86/include/asm/string_64.h32
-rw-r--r--arch/x86/include/asm/svm.h106
-rw-r--r--arch/x86/include/asm/sync_core.h34
-rw-r--r--arch/x86/include/asm/text-patching.h19
-rw-r--r--arch/x86/include/asm/trap_pf.h24
-rw-r--r--arch/x86/include/asm/trapnr.h1
-rw-r--r--arch/x86/include/asm/traps.h22
-rw-r--r--arch/x86/include/asm/uaccess.h318
-rw-r--r--arch/x86/include/asm/uaccess_64.h20
-rw-r--r--arch/x86/include/asm/unwind_hints.h56
-rw-r--r--arch/x86/include/asm/uv/bios.h17
-rw-r--r--arch/x86/include/asm/uv/uv.h4
-rw-r--r--arch/x86/include/asm/uv/uv_bau.h755
-rw-r--r--arch/x86/include/asm/uv/uv_hub.h165
-rw-r--r--arch/x86/include/asm/uv/uv_mmrs.h6992
-rw-r--r--arch/x86/include/asm/x86_init.h32
-rw-r--r--arch/x86/include/uapi/asm/svm.h11
-rw-r--r--arch/x86/kernel/Makefile4
-rw-r--r--arch/x86/kernel/alternative.c7
-rw-r--r--arch/x86/kernel/apic/apic.c3
-rw-r--r--arch/x86/kernel/apic/io_apic.c79
-rw-r--r--arch/x86/kernel/apic/msi.c118
-rw-r--r--arch/x86/kernel/apic/probe_32.c5
-rw-r--r--arch/x86/kernel/apic/probe_64.c3
-rw-r--r--arch/x86/kernel/apic/vector.c19
-rw-r--r--arch/x86/kernel/apic/x2apic_uv_x.c822
-rw-r--r--arch/x86/kernel/cpu/amd.c3
-rw-r--r--arch/x86/kernel/cpu/cacheinfo.c2
-rw-r--r--arch/x86/kernel/cpu/centaur.c27
-rw-r--r--arch/x86/kernel/cpu/common.c92
-rw-r--r--arch/x86/kernel/cpu/cpuid-deps.c2
-rw-r--r--arch/x86/kernel/cpu/mce/amd.c44
-rw-r--r--arch/x86/kernel/cpu/mce/core.c188
-rw-r--r--arch/x86/kernel/cpu/mce/dev-mcelog.c4
-rw-r--r--arch/x86/kernel/cpu/mce/inject.c2
-rw-r--r--arch/x86/kernel/cpu/mce/intel.c2
-rw-r--r--arch/x86/kernel/cpu/mce/internal.h13
-rw-r--r--arch/x86/kernel/cpu/mce/severity.c102
-rw-r--r--arch/x86/kernel/cpu/mshyperv.c8
-rw-r--r--arch/x86/kernel/cpu/mtrr/cyrix.c2
-rw-r--r--arch/x86/kernel/cpu/resctrl/core.c60
-rw-r--r--arch/x86/kernel/cpu/resctrl/ctrlmondata.c92
-rw-r--r--arch/x86/kernel/cpu/resctrl/internal.h49
-rw-r--r--arch/x86/kernel/cpu/resctrl/monitor.c16
-rw-r--r--arch/x86/kernel/cpu/resctrl/rdtgroup.c85
-rw-r--r--arch/x86/kernel/cpu/scattered.c3
-rw-r--r--arch/x86/kernel/cpu/vmware.c50
-rw-r--r--arch/x86/kernel/devicetree.c4
-rw-r--r--arch/x86/kernel/dumpstack.c10
-rw-r--r--arch/x86/kernel/dumpstack_64.c46
-rw-r--r--arch/x86/kernel/e820.c16
-rw-r--r--arch/x86/kernel/fpu/init.c41
-rw-r--r--arch/x86/kernel/fpu/xstate.c63
-rw-r--r--arch/x86/kernel/head64.c122
-rw-r--r--arch/x86/kernel/head_64.S165
-rw-r--r--arch/x86/kernel/hw_breakpoint.c60
-rw-r--r--arch/x86/kernel/idt.c44
-rw-r--r--arch/x86/kernel/irq.c2
-rw-r--r--arch/x86/kernel/irq_64.c2
-rw-r--r--arch/x86/kernel/kgdb.c9
-rw-r--r--arch/x86/kernel/kprobes/core.c111
-rw-r--r--arch/x86/kernel/kprobes/opt.c7
-rw-r--r--arch/x86/kernel/kvm.c39
-rw-r--r--arch/x86/kernel/mpparse.c31
-rw-r--r--arch/x86/kernel/msr.c18
-rw-r--r--arch/x86/kernel/nmi.c20
-rw-r--r--arch/x86/kernel/paravirt.c19
-rw-r--r--arch/x86/kernel/paravirt_patch.c17
-rw-r--r--arch/x86/kernel/process.c7
-rw-r--r--arch/x86/kernel/process_64.c8
-rw-r--r--arch/x86/kernel/ptrace.c8
-rw-r--r--arch/x86/kernel/quirks.c10
-rw-r--r--arch/x86/kernel/reboot.c4
-rw-r--r--arch/x86/kernel/setup.c60
-rw-r--r--arch/x86/kernel/sev-es-shared.c507
-rw-r--r--arch/x86/kernel/sev-es.c1404
-rw-r--r--arch/x86/kernel/signal.c2
-rw-r--r--arch/x86/kernel/signal_compat.c2
-rw-r--r--arch/x86/kernel/smpboot.c28
-rw-r--r--arch/x86/kernel/stacktrace.c10
-rw-r--r--arch/x86/kernel/static_call.c98
-rw-r--r--arch/x86/kernel/sys_ia32.c2
-rw-r--r--arch/x86/kernel/traps.c259
-rw-r--r--arch/x86/kernel/tsc.c10
-rw-r--r--arch/x86/kernel/umip.c89
-rw-r--r--arch/x86/kernel/unwind_orc.c11
-rw-r--r--arch/x86/kernel/uprobes.c4
-rw-r--r--arch/x86/kernel/vmlinux.lds.S40
-rw-r--r--arch/x86/kernel/x86_init.c26
-rw-r--r--arch/x86/kvm/cpuid.c2
-rw-r--r--arch/x86/kvm/emulate.c24
-rw-r--r--arch/x86/kvm/hyperv.c22
-rw-r--r--arch/x86/kvm/irq_comm.c2
-rw-r--r--arch/x86/kvm/lapic.c6
-rw-r--r--arch/x86/kvm/mmu/mmu.c7
-rw-r--r--arch/x86/kvm/svm/nested.c54
-rw-r--r--arch/x86/kvm/svm/sev.c4
-rw-r--r--arch/x86/kvm/svm/svm.c50
-rw-r--r--arch/x86/kvm/vmx/nested.c12
-rw-r--r--arch/x86/kvm/vmx/vmx.c56
-rw-r--r--arch/x86/kvm/vmx/vmx.h6
-rw-r--r--arch/x86/kvm/x86.c44
-rw-r--r--arch/x86/lib/Makefile3
-rw-r--r--arch/x86/lib/checksum_32.S117
-rw-r--r--arch/x86/lib/cmdline.c8
-rw-r--r--arch/x86/lib/copy_mc.c96
-rw-r--r--arch/x86/lib/copy_mc_64.S163
-rw-r--r--arch/x86/lib/copy_user_64.S111
-rw-r--r--arch/x86/lib/csum-copy_64.S140
-rw-r--r--arch/x86/lib/csum-wrappers_64.c86
-rw-r--r--arch/x86/lib/getuser.S60
-rw-r--r--arch/x86/lib/insn-eval.c136
-rw-r--r--arch/x86/lib/memcpy_64.S115
-rw-r--r--arch/x86/lib/putuser.S22
-rw-r--r--arch/x86/lib/usercopy_64.c23
-rw-r--r--arch/x86/math-emu/errors.c2
-rw-r--r--arch/x86/math-emu/fpu_trig.c2
-rw-r--r--arch/x86/mm/cpu_entry_area.c3
-rw-r--r--arch/x86/mm/extable.c25
-rw-r--r--arch/x86/mm/fault.c80
-rw-r--r--arch/x86/mm/init_64.c20
-rw-r--r--arch/x86/mm/ioremap.c2
-rw-r--r--arch/x86/mm/mem_encrypt.c38
-rw-r--r--arch/x86/mm/mem_encrypt_identity.c3
-rw-r--r--arch/x86/mm/numa.c34
-rw-r--r--arch/x86/mm/numa_emulation.c5
-rw-r--r--arch/x86/mm/pat/set_memory.c2
-rw-r--r--arch/x86/mm/tlb.c37
-rw-r--r--arch/x86/pci/common.c18
-rw-r--r--arch/x86/pci/init.c13
-rw-r--r--arch/x86/pci/sta2x11-fixup.c1
-rw-r--r--arch/x86/pci/xen.c138
-rw-r--r--arch/x86/platform/efi/efi.c72
-rw-r--r--arch/x86/platform/efi/efi_32.c44
-rw-r--r--arch/x86/platform/efi/efi_64.c12
-rw-r--r--arch/x86/platform/uv/Makefile2
-rw-r--r--arch/x86/platform/uv/bios_uv.c28
-rw-r--r--arch/x86/platform/uv/tlb_uv.c2097
-rw-r--r--arch/x86/platform/uv/uv_irq.c16
-rw-r--r--arch/x86/platform/uv/uv_nmi.c62
-rw-r--r--arch/x86/platform/uv/uv_time.c18
-rw-r--r--arch/x86/realmode/init.c24
-rw-r--r--arch/x86/realmode/rm/header.S3
-rw-r--r--arch/x86/realmode/rm/trampoline_64.S20
-rw-r--r--arch/x86/tools/gen-insn-attr-x86.awk50
-rw-r--r--arch/x86/um/asm/checksum.h16
-rw-r--r--arch/x86/um/asm/checksum_32.h23
-rw-r--r--arch/x86/xen/enlighten_pv.c19
-rw-r--r--arch/x86/xen/mmu_pv.c10
-rw-r--r--arch/xtensa/Kconfig14
-rw-r--r--arch/xtensa/include/asm/checksum.h34
-rw-r--r--arch/xtensa/kernel/signal.c2
-rw-r--r--arch/xtensa/lib/checksum.S67
-rw-r--r--arch/xtensa/mm/init.c55
817 files changed, 19084 insertions, 24049 deletions
diff --git a/arch/Kconfig b/arch/Kconfig
index af14a567b493..8519d9f42e33 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -106,6 +106,12 @@ config STATIC_KEYS_SELFTEST
help
Boot time self-test of the branch patching code.
+config STATIC_CALL_SELFTEST
+ bool "Static call selftest"
+ depends on HAVE_STATIC_CALL
+ help
+ Boot time self-test of the call patching code.
+
config OPTPROBES
def_bool y
depends on KPROBES && HAVE_OPTPROBES
@@ -444,10 +450,23 @@ config ARCH_WANT_OLD_COMPAT_IPC
select ARCH_WANT_COMPAT_IPC_PARSE_VERSION
bool
+config HAVE_ARCH_SECCOMP
+ bool
+ help
+ An arch should select this symbol to support seccomp mode 1 (the fixed
+ syscall policy), and must provide an overrides for __NR_seccomp_sigreturn,
+ and compat syscalls if the asm-generic/seccomp.h defaults need adjustment:
+ - __NR_seccomp_read_32
+ - __NR_seccomp_write_32
+ - __NR_seccomp_exit_32
+ - __NR_seccomp_sigreturn_32
+
config HAVE_ARCH_SECCOMP_FILTER
bool
+ select HAVE_ARCH_SECCOMP
help
An arch should select this symbol if it provides all of these things:
+ - all the requirements for HAVE_ARCH_SECCOMP
- syscall_get_arch()
- syscall_get_arguments()
- syscall_rollback()
@@ -458,6 +477,23 @@ config HAVE_ARCH_SECCOMP_FILTER
results in the system call being skipped immediately.
- seccomp syscall wired up
+config SECCOMP
+ prompt "Enable seccomp to safely execute untrusted bytecode"
+ def_bool y
+ depends on HAVE_ARCH_SECCOMP
+ help
+ This kernel feature is useful for number crunching applications
+ that may need to handle untrusted bytecode during their
+ execution. By using pipes or other transports made available
+ to the process as file descriptors supporting the read/write
+ syscalls, it's possible to isolate those applications in their
+ own address space using seccomp. Once seccomp is enabled via
+ prctl(PR_SET_SECCOMP) or the seccomp() syscall, it cannot be
+ disabled and the task is only allowed to execute a few safe
+ syscalls defined by each seccomp mode.
+
+ If unsure, say Y.
+
config SECCOMP_FILTER
def_bool y
depends on HAVE_ARCH_SECCOMP_FILTER && SECCOMP && NET
@@ -975,6 +1011,13 @@ config HAVE_SPARSE_SYSCALL_NR
config ARCH_HAS_VDSO_DATA
bool
+config HAVE_STATIC_CALL
+ bool
+
+config HAVE_STATIC_CALL_INLINE
+ bool
+ depends on HAVE_STATIC_CALL
+
source "kernel/gcov/Kconfig"
source "scripts/gcc-plugins/Kconfig"
diff --git a/arch/alpha/include/asm/checksum.h b/arch/alpha/include/asm/checksum.h
index 0eac81624d01..99d631e146b2 100644
--- a/arch/alpha/include/asm/checksum.h
+++ b/arch/alpha/include/asm/checksum.h
@@ -42,9 +42,10 @@ extern __wsum csum_partial(const void *buff, int len, __wsum sum);
* better 64-bit) boundary
*/
#define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER
-__wsum csum_and_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *errp);
+#define _HAVE_ARCH_CSUM_AND_COPY
+__wsum csum_and_copy_from_user(const void __user *src, void *dst, int len);
-__wsum csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum);
+__wsum csum_partial_copy_nocheck(const void *src, void *dst, int len);
/*
diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
index ac110ae8f978..5b60c248de9e 100644
--- a/arch/alpha/kernel/module.c
+++ b/arch/alpha/kernel/module.c
@@ -212,7 +212,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
STO_ALPHA_STD_GPLOAD)
/* Omit the prologue. */
value += 8;
- /* FALLTHRU */
+ fallthrough;
case R_ALPHA_BRADDR:
value -= (u64)location + 4;
if (value & 3)
diff --git a/arch/alpha/kernel/signal.c b/arch/alpha/kernel/signal.c
index a813020d2f11..15bc9d1e79f4 100644
--- a/arch/alpha/kernel/signal.c
+++ b/arch/alpha/kernel/signal.c
@@ -453,7 +453,7 @@ syscall_restart(unsigned long r0, unsigned long r19,
regs->r0 = EINTR;
break;
}
- /* fallthrough */
+ fallthrough;
case ERESTARTNOINTR:
regs->r0 = r0; /* reset v0 and a3 and replay syscall */
regs->r19 = r19;
diff --git a/arch/alpha/kernel/traps.c b/arch/alpha/kernel/traps.c
index 49754e07e04f..921d4b6e4d95 100644
--- a/arch/alpha/kernel/traps.c
+++ b/arch/alpha/kernel/traps.c
@@ -883,7 +883,7 @@ do_entUnaUser(void __user * va, unsigned long opcode,
case 0x26: /* sts */
fake_reg = s_reg_to_mem(alpha_read_fp_reg(reg));
- /* FALLTHRU */
+ fallthrough;
case 0x2c: /* stl */
__asm__ __volatile__(
@@ -911,7 +911,7 @@ do_entUnaUser(void __user * va, unsigned long opcode,
case 0x27: /* stt */
fake_reg = alpha_read_fp_reg(reg);
- /* FALLTHRU */
+ fallthrough;
case 0x2d: /* stq */
__asm__ __volatile__(
diff --git a/arch/alpha/kernel/vmlinux.lds.S b/arch/alpha/kernel/vmlinux.lds.S
index bc6f727278fd..5b78d640725d 100644
--- a/arch/alpha/kernel/vmlinux.lds.S
+++ b/arch/alpha/kernel/vmlinux.lds.S
@@ -72,6 +72,7 @@ SECTIONS
STABS_DEBUG
DWARF_DEBUG
+ ELF_DETAILS
DISCARDS
}
diff --git a/arch/alpha/lib/csum_partial_copy.c b/arch/alpha/lib/csum_partial_copy.c
index af1dad74e933..dc68efbe9367 100644
--- a/arch/alpha/lib/csum_partial_copy.c
+++ b/arch/alpha/lib/csum_partial_copy.c
@@ -39,12 +39,11 @@ __asm__ __volatile__("insql %1,%2,%0":"=r" (z):"r" (x),"r" (y))
#define insqh(x,y,z) \
__asm__ __volatile__("insqh %1,%2,%0":"=r" (z):"r" (x),"r" (y))
-
-#define __get_user_u(x,ptr) \
+#define __get_word(insn,x,ptr) \
({ \
long __guu_err; \
__asm__ __volatile__( \
- "1: ldq_u %0,%2\n" \
+ "1: "#insn" %0,%2\n" \
"2:\n" \
EXC(1b,2b,%0,%1) \
: "=r"(x), "=r"(__guu_err) \
@@ -52,19 +51,6 @@ __asm__ __volatile__("insqh %1,%2,%0":"=r" (z):"r" (x),"r" (y))
__guu_err; \
})
-#define __put_user_u(x,ptr) \
-({ \
- long __puu_err; \
- __asm__ __volatile__( \
- "1: stq_u %2,%1\n" \
- "2:\n" \
- EXC(1b,2b,$31,%0) \
- : "=r"(__puu_err) \
- : "m"(__m(addr)), "rJ"(x), "0"(0)); \
- __puu_err; \
-})
-
-
static inline unsigned short from64to16(unsigned long x)
{
/* Using extract instructions is a bit more efficient
@@ -95,15 +81,15 @@ static inline unsigned short from64to16(unsigned long x)
*/
static inline unsigned long
csum_partial_cfu_aligned(const unsigned long __user *src, unsigned long *dst,
- long len, unsigned long checksum,
- int *errp)
+ long len)
{
+ unsigned long checksum = ~0U;
unsigned long carry = 0;
- int err = 0;
while (len >= 0) {
unsigned long word;
- err |= __get_user(word, src);
+ if (__get_word(ldq, word, src))
+ return 0;
checksum += carry;
src++;
checksum += word;
@@ -116,7 +102,8 @@ csum_partial_cfu_aligned(const unsigned long __user *src, unsigned long *dst,
checksum += carry;
if (len) {
unsigned long word, tmp;
- err |= __get_user(word, src);
+ if (__get_word(ldq, word, src))
+ return 0;
tmp = *dst;
mskql(word, len, word);
checksum += word;
@@ -125,7 +112,6 @@ csum_partial_cfu_aligned(const unsigned long __user *src, unsigned long *dst,
*dst = word | tmp;
checksum += carry;
}
- if (err && errp) *errp = err;
return checksum;
}
@@ -137,20 +123,21 @@ static inline unsigned long
csum_partial_cfu_dest_aligned(const unsigned long __user *src,
unsigned long *dst,
unsigned long soff,
- long len, unsigned long checksum,
- int *errp)
+ long len)
{
unsigned long first;
unsigned long word, carry;
unsigned long lastsrc = 7+len+(unsigned long)src;
- int err = 0;
+ unsigned long checksum = ~0U;
- err |= __get_user_u(first,src);
+ if (__get_word(ldq_u, first,src))
+ return 0;
carry = 0;
while (len >= 0) {
unsigned long second;
- err |= __get_user_u(second, src+1);
+ if (__get_word(ldq_u, second, src+1))
+ return 0;
extql(first, soff, word);
len -= 8;
src++;
@@ -168,7 +155,8 @@ csum_partial_cfu_dest_aligned(const unsigned long __user *src,
if (len) {
unsigned long tmp;
unsigned long second;
- err |= __get_user_u(second, lastsrc);
+ if (__get_word(ldq_u, second, lastsrc))
+ return 0;
tmp = *dst;
extql(first, soff, word);
extqh(second, soff, first);
@@ -180,7 +168,6 @@ csum_partial_cfu_dest_aligned(const unsigned long __user *src,
*dst = word | tmp;
checksum += carry;
}
- if (err && errp) *errp = err;
return checksum;
}
@@ -191,18 +178,18 @@ static inline unsigned long
csum_partial_cfu_src_aligned(const unsigned long __user *src,
unsigned long *dst,
unsigned long doff,
- long len, unsigned long checksum,
- unsigned long partial_dest,
- int *errp)
+ long len,
+ unsigned long partial_dest)
{
unsigned long carry = 0;
unsigned long word;
unsigned long second_dest;
- int err = 0;
+ unsigned long checksum = ~0U;
mskql(partial_dest, doff, partial_dest);
while (len >= 0) {
- err |= __get_user(word, src);
+ if (__get_word(ldq, word, src))
+ return 0;
len -= 8;
insql(word, doff, second_dest);
checksum += carry;
@@ -216,7 +203,8 @@ csum_partial_cfu_src_aligned(const unsigned long __user *src,
len += 8;
if (len) {
checksum += carry;
- err |= __get_user(word, src);
+ if (__get_word(ldq, word, src))
+ return 0;
mskql(word, len, word);
len -= 8;
checksum += word;
@@ -237,7 +225,6 @@ csum_partial_cfu_src_aligned(const unsigned long __user *src,
stq_u(partial_dest | second_dest, dst);
out:
checksum += carry;
- if (err && errp) *errp = err;
return checksum;
}
@@ -249,23 +236,23 @@ static inline unsigned long
csum_partial_cfu_unaligned(const unsigned long __user * src,
unsigned long * dst,
unsigned long soff, unsigned long doff,
- long len, unsigned long checksum,
- unsigned long partial_dest,
- int *errp)
+ long len, unsigned long partial_dest)
{
unsigned long carry = 0;
unsigned long first;
unsigned long lastsrc;
- int err = 0;
+ unsigned long checksum = ~0U;
- err |= __get_user_u(first, src);
+ if (__get_word(ldq_u, first, src))
+ return 0;
lastsrc = 7+len+(unsigned long)src;
mskql(partial_dest, doff, partial_dest);
while (len >= 0) {
unsigned long second, word;
unsigned long second_dest;
- err |= __get_user_u(second, src+1);
+ if (__get_word(ldq_u, second, src+1))
+ return 0;
extql(first, soff, word);
checksum += carry;
len -= 8;
@@ -286,7 +273,8 @@ csum_partial_cfu_unaligned(const unsigned long __user * src,
unsigned long second, word;
unsigned long second_dest;
- err |= __get_user_u(second, lastsrc);
+ if (__get_word(ldq_u, second, lastsrc))
+ return 0;
extql(first, soff, word);
extqh(second, soff, first);
word |= first;
@@ -307,7 +295,8 @@ csum_partial_cfu_unaligned(const unsigned long __user * src,
unsigned long second, word;
unsigned long second_dest;
- err |= __get_user_u(second, lastsrc);
+ if (__get_word(ldq_u, second, lastsrc))
+ return 0;
extql(first, soff, word);
extqh(second, soff, first);
word |= first;
@@ -320,66 +309,55 @@ csum_partial_cfu_unaligned(const unsigned long __user * src,
stq_u(partial_dest | word | second_dest, dst);
checksum += carry;
}
- if (err && errp) *errp = err;
return checksum;
}
-__wsum
-csum_and_copy_from_user(const void __user *src, void *dst, int len,
- __wsum sum, int *errp)
+static __wsum __csum_and_copy(const void __user *src, void *dst, int len)
{
- unsigned long checksum = (__force u32) sum;
unsigned long soff = 7 & (unsigned long) src;
unsigned long doff = 7 & (unsigned long) dst;
-
- if (len) {
- if (!access_ok(src, len)) {
- if (errp) *errp = -EFAULT;
- memset(dst, 0, len);
- return sum;
- }
- if (!doff) {
- if (!soff)
- checksum = csum_partial_cfu_aligned(
- (const unsigned long __user *) src,
- (unsigned long *) dst,
- len-8, checksum, errp);
- else
- checksum = csum_partial_cfu_dest_aligned(
- (const unsigned long __user *) src,
- (unsigned long *) dst,
- soff, len-8, checksum, errp);
- } else {
- unsigned long partial_dest;
- ldq_u(partial_dest, dst);
- if (!soff)
- checksum = csum_partial_cfu_src_aligned(
- (const unsigned long __user *) src,
- (unsigned long *) dst,
- doff, len-8, checksum,
- partial_dest, errp);
- else
- checksum = csum_partial_cfu_unaligned(
- (const unsigned long __user *) src,
- (unsigned long *) dst,
- soff, doff, len-8, checksum,
- partial_dest, errp);
- }
- checksum = from64to16 (checksum);
+ unsigned long checksum;
+
+ if (!doff) {
+ if (!soff)
+ checksum = csum_partial_cfu_aligned(
+ (const unsigned long __user *) src,
+ (unsigned long *) dst, len-8);
+ else
+ checksum = csum_partial_cfu_dest_aligned(
+ (const unsigned long __user *) src,
+ (unsigned long *) dst,
+ soff, len-8);
+ } else {
+ unsigned long partial_dest;
+ ldq_u(partial_dest, dst);
+ if (!soff)
+ checksum = csum_partial_cfu_src_aligned(
+ (const unsigned long __user *) src,
+ (unsigned long *) dst,
+ doff, len-8, partial_dest);
+ else
+ checksum = csum_partial_cfu_unaligned(
+ (const unsigned long __user *) src,
+ (unsigned long *) dst,
+ soff, doff, len-8, partial_dest);
}
- return (__force __wsum)checksum;
+ return (__force __wsum)from64to16 (checksum);
+}
+
+__wsum
+csum_and_copy_from_user(const void __user *src, void *dst, int len)
+{
+ if (!access_ok(src, len))
+ return 0;
+ return __csum_and_copy(src, dst, len);
}
EXPORT_SYMBOL(csum_and_copy_from_user);
__wsum
-csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum)
+csum_partial_copy_nocheck(const void *src, void *dst, int len)
{
- __wsum checksum;
- mm_segment_t oldfs = get_fs();
- set_fs(KERNEL_DS);
- checksum = csum_and_copy_from_user((__force const void __user *)src,
- dst, len, sum, NULL);
- set_fs(oldfs);
- return checksum;
+ return __csum_and_copy((__force const void __user *)src,
+ dst, len);
}
EXPORT_SYMBOL(csum_partial_copy_nocheck);
diff --git a/arch/arc/boot/dts/hsdk.dts b/arch/arc/boot/dts/hsdk.dts
index 9acbeba832c0..dcaa44e408ac 100644
--- a/arch/arc/boot/dts/hsdk.dts
+++ b/arch/arc/boot/dts/hsdk.dts
@@ -88,6 +88,8 @@
arcpct: pct {
compatible = "snps,archs-pct";
+ interrupt-parent = <&cpu_intc>;
+ interrupts = <20>;
};
/* TIMER0 with interrupt for clockevent */
@@ -208,7 +210,7 @@
reg = <0x8000 0x2000>;
interrupts = <10>;
interrupt-names = "macirq";
- phy-mode = "rgmii";
+ phy-mode = "rgmii-id";
snps,pbl = <32>;
snps,multicast-filter-bins = <256>;
clocks = <&gmacclk>;
@@ -226,7 +228,7 @@
#address-cells = <1>;
#size-cells = <0>;
compatible = "snps,dwmac-mdio";
- phy0: ethernet-phy@0 {
+ phy0: ethernet-phy@0 { /* Micrel KSZ9031 */
reg = <0>;
};
};
diff --git a/arch/arc/include/asm/pgalloc.h b/arch/arc/include/asm/pgalloc.h
index b747f2ec2928..6147db925248 100644
--- a/arch/arc/include/asm/pgalloc.h
+++ b/arch/arc/include/asm/pgalloc.h
@@ -18,10 +18,10 @@
* vineetg: April 2010
* -Switched pgtable_t from being struct page * to unsigned long
* =Needed so that Page Table allocator (pte_alloc_one) is not forced to
- * to deal with struct page. Thay way in future we can make it allocate
+ * deal with struct page. That way in future we can make it allocate
* multiple PG Tbls in one Page Frame
* =sweet side effect is avoiding calls to ugly page_address( ) from the
- * pg-tlb allocator sub-sys (pte_alloc_one, ptr_free, pmd_populate
+ * pg-tlb allocator sub-sys (pte_alloc_one, ptr_free, pmd_populate)
*
* Amit Bhor, Sameer Dhavale: Codito Technologies 2004
*/
diff --git a/arch/arc/kernel/disasm.c b/arch/arc/kernel/disasm.c
index d04837d91b40..03f8b1be0c3a 100644
--- a/arch/arc/kernel/disasm.c
+++ b/arch/arc/kernel/disasm.c
@@ -339,7 +339,7 @@ void __kprobes disasm_instr(unsigned long addr, struct disasm_state *state,
case op_LDWX_S: /* LDWX_S c, [b, u6] */
state->x = 1;
- /* intentional fall-through */
+ fallthrough;
case op_LDW_S: /* LDW_S c, [b, u6] */
state->zz = 2;
diff --git a/arch/arc/kernel/kprobes.c b/arch/arc/kernel/kprobes.c
index 7d3efe83cba7..cabef45f11df 100644
--- a/arch/arc/kernel/kprobes.c
+++ b/arch/arc/kernel/kprobes.c
@@ -388,6 +388,7 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
{
ri->ret_addr = (kprobe_opcode_t *) regs->blink;
+ ri->fp = NULL;
/* Replace the return addr with trampoline addr */
regs->blink = (unsigned long)&kretprobe_trampoline;
@@ -396,58 +397,7 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
static int __kprobes trampoline_probe_handler(struct kprobe *p,
struct pt_regs *regs)
{
- struct kretprobe_instance *ri = NULL;
- struct hlist_head *head, empty_rp;
- struct hlist_node *tmp;
- unsigned long flags, orig_ret_address = 0;
- unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
-
- INIT_HLIST_HEAD(&empty_rp);
- kretprobe_hash_lock(current, &head, &flags);
-
- /*
- * It is possible to have multiple instances associated with a given
- * task either because an multiple functions in the call path
- * have a return probe installed on them, and/or more than one return
- * return probe was registered for a target function.
- *
- * We can handle this because:
- * - instances are always inserted at the head of the list
- * - when multiple return probes are registered for the same
- * function, the first instance's ret_addr will point to the
- * real return address, and all the rest will point to
- * kretprobe_trampoline
- */
- hlist_for_each_entry_safe(ri, tmp, head, hlist) {
- if (ri->task != current)
- /* another task is sharing our hash bucket */
- continue;
-
- if (ri->rp && ri->rp->handler)
- ri->rp->handler(ri, regs);
-
- orig_ret_address = (unsigned long)ri->ret_addr;
- recycle_rp_inst(ri, &empty_rp);
-
- if (orig_ret_address != trampoline_address) {
- /*
- * This is the real return address. Any other
- * instances associated with this task are for
- * other calls deeper on the call stack
- */
- break;
- }
- }
-
- kretprobe_assert(ri, orig_ret_address, trampoline_address);
- regs->ret = orig_ret_address;
-
- kretprobe_hash_unlock(current, &flags);
-
- hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
- hlist_del(&ri->hlist);
- kfree(ri);
- }
+ regs->ret = __kretprobe_trampoline_handler(regs, &kretprobe_trampoline, NULL);
/* By returning a non zero value, we are telling the kprobe handler
* that we don't want the post_handler to run
diff --git a/arch/arc/kernel/perf_event.c b/arch/arc/kernel/perf_event.c
index 661fd842ea97..79849f37e782 100644
--- a/arch/arc/kernel/perf_event.c
+++ b/arch/arc/kernel/perf_event.c
@@ -562,7 +562,7 @@ static int arc_pmu_device_probe(struct platform_device *pdev)
{
struct arc_reg_pct_build pct_bcr;
struct arc_reg_cc_build cc_bcr;
- int i, has_interrupts;
+ int i, has_interrupts, irq;
int counter_size; /* in bits */
union cc_name {
@@ -637,13 +637,7 @@ static int arc_pmu_device_probe(struct platform_device *pdev)
.attr_groups = arc_pmu->attr_groups,
};
- if (has_interrupts) {
- int irq = platform_get_irq(pdev, 0);
-
- if (irq < 0) {
- pr_err("Cannot get IRQ number for the platform\n");
- return -ENODEV;
- }
+ if (has_interrupts && (irq = platform_get_irq(pdev, 0) >= 0)) {
arc_pmu->irq = irq;
@@ -652,9 +646,9 @@ static int arc_pmu_device_probe(struct platform_device *pdev)
this_cpu_ptr(&arc_pmu_cpu));
on_each_cpu(arc_cpu_pmu_irq_init, &irq, 1);
-
- } else
+ } else {
arc_pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
+ }
/*
* perf parser doesn't really like '-' symbol in events name, so let's
diff --git a/arch/arc/kernel/signal.c b/arch/arc/kernel/signal.c
index 3d57ed0d8535..8222f8c54690 100644
--- a/arch/arc/kernel/signal.c
+++ b/arch/arc/kernel/signal.c
@@ -321,7 +321,7 @@ static void arc_restart_syscall(struct k_sigaction *ka, struct pt_regs *regs)
regs->r0 = -EINTR;
break;
}
- /* fallthrough */
+ fallthrough;
case -ERESTARTNOINTR:
/*
diff --git a/arch/arc/kernel/troubleshoot.c b/arch/arc/kernel/troubleshoot.c
index 28e8bf04b253..a331bb5d8319 100644
--- a/arch/arc/kernel/troubleshoot.c
+++ b/arch/arc/kernel/troubleshoot.c
@@ -18,44 +18,37 @@
#define ARC_PATH_MAX 256
-/*
- * Common routine to print scratch regs (r0-r12) or callee regs (r13-r25)
- * -Prints 3 regs per line and a CR.
- * -To continue, callee regs right after scratch, special handling of CR
- */
-static noinline void print_reg_file(long *reg_rev, int start_num)
+static noinline void print_regs_scratch(struct pt_regs *regs)
{
- unsigned int i;
- char buf[512];
- int n = 0, len = sizeof(buf);
-
- for (i = start_num; i < start_num + 13; i++) {
- n += scnprintf(buf + n, len - n, "r%02u: 0x%08lx\t",
- i, (unsigned long)*reg_rev);
-
- if (((i + 1) % 3) == 0)
- n += scnprintf(buf + n, len - n, "\n");
-
- /* because pt_regs has regs reversed: r12..r0, r25..r13 */
- if (is_isa_arcv2() && start_num == 0)
- reg_rev++;
- else
- reg_rev--;
- }
-
- if (start_num != 0)
- n += scnprintf(buf + n, len - n, "\n\n");
+ pr_cont("BTA: 0x%08lx\n SP: 0x%08lx FP: 0x%08lx BLK: %pS\n",
+ regs->bta, regs->sp, regs->fp, (void *)regs->blink);
+ pr_cont("LPS: 0x%08lx\tLPE: 0x%08lx\tLPC: 0x%08lx\n",
+ regs->lp_start, regs->lp_end, regs->lp_count);
- /* To continue printing callee regs on same line as scratch regs */
- if (start_num == 0)
- pr_info("%s", buf);
- else
- pr_cont("%s\n", buf);
+ pr_info("r00: 0x%08lx\tr01: 0x%08lx\tr02: 0x%08lx\n" \
+ "r03: 0x%08lx\tr04: 0x%08lx\tr05: 0x%08lx\n" \
+ "r06: 0x%08lx\tr07: 0x%08lx\tr08: 0x%08lx\n" \
+ "r09: 0x%08lx\tr10: 0x%08lx\tr11: 0x%08lx\n" \
+ "r12: 0x%08lx\t",
+ regs->r0, regs->r1, regs->r2,
+ regs->r3, regs->r4, regs->r5,
+ regs->r6, regs->r7, regs->r8,
+ regs->r9, regs->r10, regs->r11,
+ regs->r12);
}
-static void show_callee_regs(struct callee_regs *cregs)
+static void print_regs_callee(struct callee_regs *regs)
{
- print_reg_file(&(cregs->r13), 13);
+ pr_cont("r13: 0x%08lx\tr14: 0x%08lx\n" \
+ "r15: 0x%08lx\tr16: 0x%08lx\tr17: 0x%08lx\n" \
+ "r18: 0x%08lx\tr19: 0x%08lx\tr20: 0x%08lx\n" \
+ "r21: 0x%08lx\tr22: 0x%08lx\tr23: 0x%08lx\n" \
+ "r24: 0x%08lx\tr25: 0x%08lx\n",
+ regs->r13, regs->r14,
+ regs->r15, regs->r16, regs->r17,
+ regs->r18, regs->r19, regs->r20,
+ regs->r21, regs->r22, regs->r23,
+ regs->r24, regs->r25);
}
static void print_task_path_n_nm(struct task_struct *tsk)
@@ -175,7 +168,7 @@ static void show_ecr_verbose(struct pt_regs *regs)
void show_regs(struct pt_regs *regs)
{
struct task_struct *tsk = current;
- struct callee_regs *cregs;
+ struct callee_regs *cregs = (struct callee_regs *)tsk->thread.callee_reg;
/*
* generic code calls us with preemption disabled, but some calls
@@ -204,25 +197,15 @@ void show_regs(struct pt_regs *regs)
STS_BIT(regs, A2), STS_BIT(regs, A1),
STS_BIT(regs, E2), STS_BIT(regs, E1));
#else
- pr_cont(" [%2s%2s%2s%2s]",
+ pr_cont(" [%2s%2s%2s%2s] ",
STS_BIT(regs, IE),
(regs->status32 & STATUS_U_MASK) ? "U " : "K ",
STS_BIT(regs, DE), STS_BIT(regs, AE));
#endif
- pr_cont(" BTA: 0x%08lx\n SP: 0x%08lx FP: 0x%08lx BLK: %pS\n",
- regs->bta, regs->sp, regs->fp, (void *)regs->blink);
- pr_info("LPS: 0x%08lx\tLPE: 0x%08lx\tLPC: 0x%08lx\n",
- regs->lp_start, regs->lp_end, regs->lp_count);
-
- /* print regs->r0 thru regs->r12
- * Sequential printing was generating horrible code
- */
- print_reg_file(&(regs->r0), 0);
- /* If Callee regs were saved, display them too */
- cregs = (struct callee_regs *)current->thread.callee_reg;
+ print_regs_scratch(regs);
if (cregs)
- show_callee_regs(cregs);
+ print_regs_callee(cregs);
preempt_disable();
}
diff --git a/arch/arc/kernel/unwind.c b/arch/arc/kernel/unwind.c
index f87758a6851b..74ad4256022e 100644
--- a/arch/arc/kernel/unwind.c
+++ b/arch/arc/kernel/unwind.c
@@ -572,7 +572,7 @@ static unsigned long read_pointer(const u8 **pLoc, const void *end,
#else
BUILD_BUG_ON(sizeof(u32) != sizeof(value));
#endif
- /* Fall through */
+ fallthrough;
case DW_EH_PE_native:
if (end < (const void *)(ptr.pul + 1))
return 0;
@@ -827,7 +827,7 @@ static int processCFI(const u8 *start, const u8 *end, unsigned long targetLoc,
case DW_CFA_def_cfa:
state->cfa.reg = get_uleb128(&ptr.p8, end);
unw_debug("cfa_def_cfa: r%lu ", state->cfa.reg);
- /* fall through */
+ fallthrough;
case DW_CFA_def_cfa_offset:
state->cfa.offs = get_uleb128(&ptr.p8, end);
unw_debug("cfa_def_cfa_offset: 0x%lx ",
@@ -835,7 +835,7 @@ static int processCFI(const u8 *start, const u8 *end, unsigned long targetLoc,
break;
case DW_CFA_def_cfa_sf:
state->cfa.reg = get_uleb128(&ptr.p8, end);
- /* fall through */
+ fallthrough;
case DW_CFA_def_cfa_offset_sf:
state->cfa.offs = get_sleb128(&ptr.p8, end)
* state->dataAlign;
diff --git a/arch/arc/kernel/vmlinux.lds.S b/arch/arc/kernel/vmlinux.lds.S
index 54139a6f469b..33ce59d91461 100644
--- a/arch/arc/kernel/vmlinux.lds.S
+++ b/arch/arc/kernel/vmlinux.lds.S
@@ -122,6 +122,7 @@ SECTIONS
_end = . ;
STABS_DEBUG
+ ELF_DETAILS
DISCARDS
.arcextmap 0 : {
diff --git a/arch/arc/mm/init.c b/arch/arc/mm/init.c
index f886ac69d8ad..3a35b82a718e 100644
--- a/arch/arc/mm/init.c
+++ b/arch/arc/mm/init.c
@@ -26,8 +26,8 @@ static unsigned long low_mem_sz;
#ifdef CONFIG_HIGHMEM
static unsigned long min_high_pfn, max_high_pfn;
-static u64 high_mem_start;
-static u64 high_mem_sz;
+static phys_addr_t high_mem_start;
+static phys_addr_t high_mem_sz;
#endif
#ifdef CONFIG_DISCONTIGMEM
@@ -69,6 +69,7 @@ void __init early_init_dt_add_memory_arch(u64 base, u64 size)
high_mem_sz = size;
in_use = 1;
memblock_add_node(base, size, 1);
+ memblock_reserve(base, size);
#endif
}
@@ -157,7 +158,7 @@ void __init setup_arch_memory(void)
min_high_pfn = PFN_DOWN(high_mem_start);
max_high_pfn = PFN_DOWN(high_mem_start + high_mem_sz);
- max_zone_pfn[ZONE_HIGHMEM] = max_high_pfn;
+ max_zone_pfn[ZONE_HIGHMEM] = min_low_pfn;
high_memory = (void *)(min_high_pfn << PAGE_SHIFT);
kmap_init();
@@ -166,22 +167,26 @@ void __init setup_arch_memory(void)
free_area_init(max_zone_pfn);
}
-/*
- * mem_init - initializes memory
- *
- * Frees up bootmem
- * Calculates and displays memory available/used
- */
-void __init mem_init(void)
+static void __init highmem_init(void)
{
#ifdef CONFIG_HIGHMEM
unsigned long tmp;
- reset_all_zones_managed_pages();
+ memblock_free(high_mem_start, high_mem_sz);
for (tmp = min_high_pfn; tmp < max_high_pfn; tmp++)
free_highmem_page(pfn_to_page(tmp));
#endif
+}
+/*
+ * mem_init - initializes memory
+ *
+ * Frees up bootmem
+ * Calculates and displays memory available/used
+ */
+void __init mem_init(void)
+{
memblock_free_all();
+ highmem_init();
mem_init_print_info(NULL);
}
diff --git a/arch/arc/plat-eznps/include/plat/ctop.h b/arch/arc/plat-eznps/include/plat/ctop.h
index a4a61531c7fb..77712c5ffe84 100644
--- a/arch/arc/plat-eznps/include/plat/ctop.h
+++ b/arch/arc/plat-eznps/include/plat/ctop.h
@@ -33,7 +33,6 @@
#define CTOP_AUX_DPC (CTOP_AUX_BASE + 0x02C)
#define CTOP_AUX_LPC (CTOP_AUX_BASE + 0x030)
#define CTOP_AUX_EFLAGS (CTOP_AUX_BASE + 0x080)
-#define CTOP_AUX_IACK (CTOP_AUX_BASE + 0x088)
#define CTOP_AUX_GPA1 (CTOP_AUX_BASE + 0x08C)
#define CTOP_AUX_UDMC (CTOP_AUX_BASE + 0x300)
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index e00d94b16658..3996b6572c3a 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -49,6 +49,7 @@ config ARM
select GENERIC_ARCH_TOPOLOGY if ARM_CPU_TOPOLOGY
select GENERIC_ATOMIC64 if CPU_V7M || CPU_V6 || !CPU_32v6K || !AEABI
select GENERIC_CLOCKEVENTS_BROADCAST if SMP
+ select GENERIC_IRQ_IPI if SMP
select GENERIC_CPU_AUTOPROBE
select GENERIC_EARLY_IOREMAP
select GENERIC_IDLE_POLL_SETUP
@@ -67,6 +68,7 @@ config ARM
select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL && !CPU_ENDIAN_BE32 && MMU
select HAVE_ARCH_KGDB if !CPU_ENDIAN_BE32 && MMU
select HAVE_ARCH_MMAP_RND_BITS if MMU
+ select HAVE_ARCH_SECCOMP
select HAVE_ARCH_SECCOMP_FILTER if AEABI && !OABI_COMPAT
select HAVE_ARCH_THREAD_STRUCT_WHITELIST
select HAVE_ARCH_TRACEHOOK
@@ -83,7 +85,7 @@ config ARM
select HAVE_FAST_GUP if ARM_LPAE
select HAVE_FTRACE_MCOUNT_RECORD if !XIP_KERNEL
select HAVE_FUNCTION_GRAPH_TRACER if !THUMB2_KERNEL && !CC_IS_CLANG
- select HAVE_FUNCTION_TRACER if !XIP_KERNEL && (CC_IS_GCC || CLANG_VERSION >= 100000)
+ select HAVE_FUNCTION_TRACER if !XIP_KERNEL
select HAVE_GCC_PLUGINS
select HAVE_HW_BREAKPOINT if PERF_EVENTS && (CPU_V6 || CPU_V6K || CPU_V7)
select HAVE_IDE if PCI || ISA || PCMCIA
@@ -1617,20 +1619,6 @@ config UACCESS_WITH_MEMCPY
However, if the CPU data cache is using a write-allocate mode,
this option is unlikely to provide any performance gain.
-config SECCOMP
- bool
- prompt "Enable seccomp to safely compute untrusted bytecode"
- help
- This kernel feature is useful for number crunching applications
- that may need to compute untrusted bytecode during their
- execution. By using pipes or other transports made available to
- the process as file descriptors supporting the read/write
- syscalls, it's possible to isolate those applications in
- their own address space using seccomp. Once seccomp is
- enabled via prctl(PR_SET_SECCOMP), it cannot be disabled
- and the task is only allowed to execute a few safe syscalls
- defined by each seccomp mode.
-
config PARAVIRT
bool "Enable paravirtualization code"
help
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index 4e877354515f..e589da3c8949 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -16,6 +16,10 @@ LDFLAGS_vmlinux += --be8
KBUILD_LDFLAGS_MODULE += --be8
endif
+# We never want expected sections to be placed heuristically by the
+# linker. All sections should be explicitly named in the linker script.
+LDFLAGS_vmlinux += $(call ld-option, --orphan-handling=warn)
+
ifeq ($(CONFIG_ARM_MODULE_PLTS),y)
KBUILD_LDS_MODULE += $(srctree)/arch/arm/kernel/module.lds
endif
diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile
index b1147b7f2c8d..58028abd05d9 100644
--- a/arch/arm/boot/compressed/Makefile
+++ b/arch/arm/boot/compressed/Makefile
@@ -123,6 +123,8 @@ endif
LDFLAGS_vmlinux += --no-undefined
# Delete all temporary local symbols
LDFLAGS_vmlinux += -X
+# Report orphan sections
+LDFLAGS_vmlinux += $(call ld-option, --orphan-handling=warn)
# Next argument is a linker script
LDFLAGS_vmlinux += -T
diff --git a/arch/arm/boot/compressed/vmlinux.lds.S b/arch/arm/boot/compressed/vmlinux.lds.S
index 09ac33f52814..b914be3a207b 100644
--- a/arch/arm/boot/compressed/vmlinux.lds.S
+++ b/arch/arm/boot/compressed/vmlinux.lds.S
@@ -2,6 +2,7 @@
/*
* Copyright (C) 2000 Russell King
*/
+#include <asm/vmlinux.lds.h>
#ifdef CONFIG_CPU_ENDIAN_BE8
#define ZIMAGE_MAGIC(x) ( (((x) >> 24) & 0x000000ff) | \
@@ -17,8 +18,11 @@ ENTRY(_start)
SECTIONS
{
/DISCARD/ : {
+ COMMON_DISCARDS
*(.ARM.exidx*)
*(.ARM.extab*)
+ *(.note.*)
+ *(.rel.*)
/*
* Discard any r/w data - this produces a link error if we have any,
* which is required for PIC decompression. Local data generates
@@ -36,9 +40,7 @@ SECTIONS
*(.start)
*(.text)
*(.text.*)
- *(.gnu.warning)
- *(.glue_7t)
- *(.glue_7)
+ ARM_STUBS_TEXT
}
.table : ALIGN(4) {
_table_start = .;
@@ -128,12 +130,10 @@ SECTIONS
PROVIDE(__pecoff_data_size = ALIGN(512) - ADDR(.data));
PROVIDE(__pecoff_end = ALIGN(512));
- .stab 0 : { *(.stab) }
- .stabstr 0 : { *(.stabstr) }
- .stab.excl 0 : { *(.stab.excl) }
- .stab.exclstr 0 : { *(.stab.exclstr) }
- .stab.index 0 : { *(.stab.index) }
- .stab.indexstr 0 : { *(.stab.indexstr) }
- .comment 0 : { *(.comment) }
+ STABS_DEBUG
+ DWARF_DEBUG
+ ARM_DETAILS
+
+ ARM_ASSERTS
}
ASSERT(_edata_real == _edata, "error: zImage file size is incorrect");
diff --git a/arch/arm/boot/dts/at91-sama5d2_icp.dts b/arch/arm/boot/dts/at91-sama5d2_icp.dts
index 8d19925fc09e..6783cf16ff81 100644
--- a/arch/arm/boot/dts/at91-sama5d2_icp.dts
+++ b/arch/arm/boot/dts/at91-sama5d2_icp.dts
@@ -116,7 +116,6 @@
switch0: ksz8563@0 {
compatible = "microchip,ksz8563";
reg = <0>;
- phy-mode = "mii";
reset-gpios = <&pioA PIN_PD4 GPIO_ACTIVE_LOW>;
spi-max-frequency = <500000>;
@@ -140,6 +139,7 @@
reg = <2>;
label = "cpu";
ethernet = <&macb0>;
+ phy-mode = "mii";
fixed-link {
speed = <100>;
full-duplex;
diff --git a/arch/arm/boot/dts/bcm-hr2.dtsi b/arch/arm/boot/dts/bcm-hr2.dtsi
index cbebed5f050e..e8df458aad39 100644
--- a/arch/arm/boot/dts/bcm-hr2.dtsi
+++ b/arch/arm/boot/dts/bcm-hr2.dtsi
@@ -217,7 +217,7 @@
};
qspi: spi@27200 {
- compatible = "brcm,spi-bcm-qspi", "brcm,spi-nsp-qspi";
+ compatible = "brcm,spi-nsp-qspi", "brcm,spi-bcm-qspi";
reg = <0x027200 0x184>,
<0x027000 0x124>,
<0x11c408 0x004>,
diff --git a/arch/arm/boot/dts/bcm-nsp.dtsi b/arch/arm/boot/dts/bcm-nsp.dtsi
index 0346ea621f0f..c846fa3c244d 100644
--- a/arch/arm/boot/dts/bcm-nsp.dtsi
+++ b/arch/arm/boot/dts/bcm-nsp.dtsi
@@ -284,7 +284,7 @@
};
qspi: spi@27200 {
- compatible = "brcm,spi-bcm-qspi", "brcm,spi-nsp-qspi";
+ compatible = "brcm,spi-nsp-qspi", "brcm,spi-bcm-qspi";
reg = <0x027200 0x184>,
<0x027000 0x124>,
<0x11c408 0x004>,
diff --git a/arch/arm/boot/dts/bcm2835-rpi.dtsi b/arch/arm/boot/dts/bcm2835-rpi.dtsi
index f7ae5a4530b8..d94357b21f7e 100644
--- a/arch/arm/boot/dts/bcm2835-rpi.dtsi
+++ b/arch/arm/boot/dts/bcm2835-rpi.dtsi
@@ -13,7 +13,7 @@
soc {
firmware: firmware {
- compatible = "raspberrypi,bcm2835-firmware", "simple-bus";
+ compatible = "raspberrypi,bcm2835-firmware", "simple-mfd";
#address-cells = <1>;
#size-cells = <1>;
diff --git a/arch/arm/boot/dts/bcm5301x.dtsi b/arch/arm/boot/dts/bcm5301x.dtsi
index 2d9b4dd05830..0016720ce530 100644
--- a/arch/arm/boot/dts/bcm5301x.dtsi
+++ b/arch/arm/boot/dts/bcm5301x.dtsi
@@ -488,7 +488,7 @@
};
spi@18029200 {
- compatible = "brcm,spi-bcm-qspi", "brcm,spi-nsp-qspi";
+ compatible = "brcm,spi-nsp-qspi", "brcm,spi-bcm-qspi";
reg = <0x18029200 0x184>,
<0x18029000 0x124>,
<0x1811b408 0x004>,
diff --git a/arch/arm/boot/dts/imx6q-logicpd.dts b/arch/arm/boot/dts/imx6q-logicpd.dts
index 7a3d1d3e54a9..8f94364ba484 100644
--- a/arch/arm/boot/dts/imx6q-logicpd.dts
+++ b/arch/arm/boot/dts/imx6q-logicpd.dts
@@ -13,7 +13,7 @@
backlight: backlight-lvds {
compatible = "pwm-backlight";
- pwms = <&pwm3 0 20000>;
+ pwms = <&pwm3 0 20000 0>;
brightness-levels = <0 4 8 16 32 64 128 255>;
default-brightness-level = <6>;
power-supply = <&reg_lcd>;
diff --git a/arch/arm/boot/dts/imx6q-prtwd2.dts b/arch/arm/boot/dts/imx6q-prtwd2.dts
index dffafbcaa7af..349959d38020 100644
--- a/arch/arm/boot/dts/imx6q-prtwd2.dts
+++ b/arch/arm/boot/dts/imx6q-prtwd2.dts
@@ -30,7 +30,7 @@
};
/* PRTWD2 rev 1 bitbang I2C for Ethernet Switch */
- i2c@4 {
+ i2c {
compatible = "i2c-gpio";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_i2c4>;
diff --git a/arch/arm/boot/dts/imx6qdl-gw51xx.dtsi b/arch/arm/boot/dts/imx6qdl-gw51xx.dtsi
index 7705285d9e3c..4d01c3300b97 100644
--- a/arch/arm/boot/dts/imx6qdl-gw51xx.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-gw51xx.dtsi
@@ -22,8 +22,6 @@
gpio-keys {
compatible = "gpio-keys";
- #address-cells = <1>;
- #size-cells = <0>;
user-pb {
label = "user_pb";
diff --git a/arch/arm/boot/dts/imx6sx-pinfunc.h b/arch/arm/boot/dts/imx6sx-pinfunc.h
index 0b02c7e60c17..f4dc46207954 100644
--- a/arch/arm/boot/dts/imx6sx-pinfunc.h
+++ b/arch/arm/boot/dts/imx6sx-pinfunc.h
@@ -1026,7 +1026,7 @@
#define MX6SX_PAD_QSPI1B_DQS__SIM_M_HADDR_15 0x01B0 0x04F8 0x0000 0x7 0x0
#define MX6SX_PAD_QSPI1B_SCLK__QSPI1_B_SCLK 0x01B4 0x04FC 0x0000 0x0 0x0
#define MX6SX_PAD_QSPI1B_SCLK__UART3_DCE_RX 0x01B4 0x04FC 0x0840 0x1 0x4
-#define MX6SX_PAD_QSPI1B_SCLK__UART3_DTE_TX 0x01B4 0x04FC 0x0000 0x0 0x0
+#define MX6SX_PAD_QSPI1B_SCLK__UART3_DTE_TX 0x01B4 0x04FC 0x0000 0x1 0x0
#define MX6SX_PAD_QSPI1B_SCLK__ECSPI3_SCLK 0x01B4 0x04FC 0x0730 0x2 0x1
#define MX6SX_PAD_QSPI1B_SCLK__ESAI_RX_HF_CLK 0x01B4 0x04FC 0x0780 0x3 0x2
#define MX6SX_PAD_QSPI1B_SCLK__CSI1_DATA_16 0x01B4 0x04FC 0x06DC 0x4 0x1
diff --git a/arch/arm/boot/dts/imx7d-zii-rmu2.dts b/arch/arm/boot/dts/imx7d-zii-rmu2.dts
index e5e20b07f184..7cb6153fc650 100644
--- a/arch/arm/boot/dts/imx7d-zii-rmu2.dts
+++ b/arch/arm/boot/dts/imx7d-zii-rmu2.dts
@@ -58,7 +58,7 @@
<&clks IMX7D_ENET1_TIME_ROOT_CLK>;
assigned-clock-parents = <&clks IMX7D_PLL_ENET_MAIN_100M_CLK>;
assigned-clock-rates = <0>, <100000000>;
- phy-mode = "rgmii";
+ phy-mode = "rgmii-id";
phy-handle = <&fec1_phy>;
status = "okay";
diff --git a/arch/arm/boot/dts/imx7ulp.dtsi b/arch/arm/boot/dts/imx7ulp.dtsi
index 367439639da9..b7ea37ad4e55 100644
--- a/arch/arm/boot/dts/imx7ulp.dtsi
+++ b/arch/arm/boot/dts/imx7ulp.dtsi
@@ -394,7 +394,7 @@
clocks = <&pcc2 IMX7ULP_CLK_RGPIO2P1>,
<&pcc3 IMX7ULP_CLK_PCTLC>;
clock-names = "gpio", "port";
- gpio-ranges = <&iomuxc1 0 0 32>;
+ gpio-ranges = <&iomuxc1 0 0 20>;
};
gpio_ptd: gpio@40af0000 {
@@ -408,7 +408,7 @@
clocks = <&pcc2 IMX7ULP_CLK_RGPIO2P1>,
<&pcc3 IMX7ULP_CLK_PCTLD>;
clock-names = "gpio", "port";
- gpio-ranges = <&iomuxc1 0 32 32>;
+ gpio-ranges = <&iomuxc1 0 32 12>;
};
gpio_pte: gpio@40b00000 {
@@ -422,7 +422,7 @@
clocks = <&pcc2 IMX7ULP_CLK_RGPIO2P1>,
<&pcc3 IMX7ULP_CLK_PCTLE>;
clock-names = "gpio", "port";
- gpio-ranges = <&iomuxc1 0 64 32>;
+ gpio-ranges = <&iomuxc1 0 64 16>;
};
gpio_ptf: gpio@40b10000 {
@@ -436,7 +436,7 @@
clocks = <&pcc2 IMX7ULP_CLK_RGPIO2P1>,
<&pcc3 IMX7ULP_CLK_PCTLF>;
clock-names = "gpio", "port";
- gpio-ranges = <&iomuxc1 0 96 32>;
+ gpio-ranges = <&iomuxc1 0 96 20>;
};
};
diff --git a/arch/arm/boot/dts/logicpd-som-lv-baseboard.dtsi b/arch/arm/boot/dts/logicpd-som-lv-baseboard.dtsi
index 100396f6c2fe..395e05f10d36 100644
--- a/arch/arm/boot/dts/logicpd-som-lv-baseboard.dtsi
+++ b/arch/arm/boot/dts/logicpd-som-lv-baseboard.dtsi
@@ -51,6 +51,8 @@
&mcbsp2 {
status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&mcbsp2_pins>;
};
&charger {
@@ -102,35 +104,18 @@
regulator-max-microvolt = <3300000>;
};
- lcd0: display@0 {
- compatible = "panel-dpi";
- label = "28";
- status = "okay";
- /* default-on; */
+ lcd0: display {
+ /* This isn't the exact LCD, but the timings meet spec */
+ compatible = "logicpd,type28";
pinctrl-names = "default";
pinctrl-0 = <&lcd_enable_pin>;
- enable-gpios = <&gpio5 27 GPIO_ACTIVE_HIGH>; /* gpio155, lcd INI */
+ backlight = <&bl>;
+ enable-gpios = <&gpio5 27 GPIO_ACTIVE_HIGH>;
port {
lcd_in: endpoint {
remote-endpoint = <&dpi_out>;
};
};
-
- panel-timing {
- clock-frequency = <9000000>;
- hactive = <480>;
- vactive = <272>;
- hfront-porch = <3>;
- hback-porch = <2>;
- hsync-len = <42>;
- vback-porch = <3>;
- vfront-porch = <2>;
- vsync-len = <11>;
- hsync-active = <1>;
- vsync-active = <1>;
- de-active = <1>;
- pixelclk-active = <0>;
- };
};
bl: backlight {
diff --git a/arch/arm/boot/dts/logicpd-torpedo-baseboard.dtsi b/arch/arm/boot/dts/logicpd-torpedo-baseboard.dtsi
index 381f0e82bb70..b0f6613e6d54 100644
--- a/arch/arm/boot/dts/logicpd-torpedo-baseboard.dtsi
+++ b/arch/arm/boot/dts/logicpd-torpedo-baseboard.dtsi
@@ -81,6 +81,8 @@
};
&mcbsp2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&mcbsp2_pins>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/ls1021a.dtsi b/arch/arm/boot/dts/ls1021a.dtsi
index 069af9a19bb6..827373ef1a54 100644
--- a/arch/arm/boot/dts/ls1021a.dtsi
+++ b/arch/arm/boot/dts/ls1021a.dtsi
@@ -182,7 +182,7 @@
#address-cells = <1>;
#size-cells = <0>;
reg = <0x0 0x1550000 0x0 0x10000>,
- <0x0 0x40000000 0x0 0x40000000>;
+ <0x0 0x40000000 0x0 0x20000000>;
reg-names = "QuadSPI", "QuadSPI-memory";
interrupts = <GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>;
clock-names = "qspi_en", "qspi";
diff --git a/arch/arm/boot/dts/omap5.dtsi b/arch/arm/boot/dts/omap5.dtsi
index 5da9cff7a53c..a82c96258a93 100644
--- a/arch/arm/boot/dts/omap5.dtsi
+++ b/arch/arm/boot/dts/omap5.dtsi
@@ -488,11 +488,11 @@
};
};
- target-module@5000 {
+ target-module@4000 {
compatible = "ti,sysc-omap2", "ti,sysc";
- reg = <0x5000 0x4>,
- <0x5010 0x4>,
- <0x5014 0x4>;
+ reg = <0x4000 0x4>,
+ <0x4010 0x4>,
+ <0x4014 0x4>;
reg-names = "rev", "sysc", "syss";
ti,sysc-sidle = <SYSC_IDLE_FORCE>,
<SYSC_IDLE_NO>,
@@ -504,7 +504,7 @@
ti,syss-mask = <1>;
#address-cells = <1>;
#size-cells = <1>;
- ranges = <0 0x5000 0x1000>;
+ ranges = <0 0x4000 0x1000>;
dsi1: encoder@0 {
compatible = "ti,omap5-dsi";
@@ -514,8 +514,9 @@
reg-names = "proto", "phy", "pll";
interrupts = <GIC_SPI 53 IRQ_TYPE_LEVEL_HIGH>;
status = "disabled";
- clocks = <&dss_clkctrl OMAP5_DSS_CORE_CLKCTRL 8>;
- clock-names = "fck";
+ clocks = <&dss_clkctrl OMAP5_DSS_CORE_CLKCTRL 8>,
+ <&dss_clkctrl OMAP5_DSS_CORE_CLKCTRL 10>;
+ clock-names = "fck", "sys_clk";
};
};
@@ -545,8 +546,9 @@
reg-names = "proto", "phy", "pll";
interrupts = <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>;
status = "disabled";
- clocks = <&dss_clkctrl OMAP5_DSS_CORE_CLKCTRL 8>;
- clock-names = "fck";
+ clocks = <&dss_clkctrl OMAP5_DSS_CORE_CLKCTRL 8>,
+ <&dss_clkctrl OMAP5_DSS_CORE_CLKCTRL 10>;
+ clock-names = "fck", "sys_clk";
};
};
diff --git a/arch/arm/boot/dts/r8a7742.dtsi b/arch/arm/boot/dts/r8a7742.dtsi
index 9743b4242801..0240d017c90d 100644
--- a/arch/arm/boot/dts/r8a7742.dtsi
+++ b/arch/arm/boot/dts/r8a7742.dtsi
@@ -386,6 +386,54 @@
#thermal-sensor-cells = <0>;
};
+ ipmmu_sy0: iommu@e6280000 {
+ compatible = "renesas,ipmmu-r8a7742",
+ "renesas,ipmmu-vmsa";
+ reg = <0 0xe6280000 0 0x1000>;
+ interrupts = <GIC_SPI 223 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 224 IRQ_TYPE_LEVEL_HIGH>;
+ #iommu-cells = <1>;
+ status = "disabled";
+ };
+
+ ipmmu_sy1: iommu@e6290000 {
+ compatible = "renesas,ipmmu-r8a7742",
+ "renesas,ipmmu-vmsa";
+ reg = <0 0xe6290000 0 0x1000>;
+ interrupts = <GIC_SPI 225 IRQ_TYPE_LEVEL_HIGH>;
+ #iommu-cells = <1>;
+ status = "disabled";
+ };
+
+ ipmmu_ds: iommu@e6740000 {
+ compatible = "renesas,ipmmu-r8a7742",
+ "renesas,ipmmu-vmsa";
+ reg = <0 0xe6740000 0 0x1000>;
+ interrupts = <GIC_SPI 198 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 199 IRQ_TYPE_LEVEL_HIGH>;
+ #iommu-cells = <1>;
+ status = "disabled";
+ };
+
+ ipmmu_mp: iommu@ec680000 {
+ compatible = "renesas,ipmmu-r8a7742",
+ "renesas,ipmmu-vmsa";
+ reg = <0 0xec680000 0 0x1000>;
+ interrupts = <GIC_SPI 226 IRQ_TYPE_LEVEL_HIGH>;
+ #iommu-cells = <1>;
+ status = "disabled";
+ };
+
+ ipmmu_mx: iommu@fe951000 {
+ compatible = "renesas,ipmmu-r8a7742",
+ "renesas,ipmmu-vmsa";
+ reg = <0 0xfe951000 0 0x1000>;
+ interrupts = <GIC_SPI 222 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 221 IRQ_TYPE_LEVEL_HIGH>;
+ #iommu-cells = <1>;
+ status = "disabled";
+ };
+
icram0: sram@e63a0000 {
compatible = "mmio-sram";
reg = <0 0xe63a0000 0 0x12000>;
diff --git a/arch/arm/boot/dts/socfpga_arria10.dtsi b/arch/arm/boot/dts/socfpga_arria10.dtsi
index fc4abef143a0..0013ec3463c4 100644
--- a/arch/arm/boot/dts/socfpga_arria10.dtsi
+++ b/arch/arm/boot/dts/socfpga_arria10.dtsi
@@ -821,7 +821,7 @@
timer3: timer3@ffd00100 {
compatible = "snps,dw-apb-timer";
interrupts = <0 118 IRQ_TYPE_LEVEL_HIGH>;
- reg = <0xffd01000 0x100>;
+ reg = <0xffd00100 0x100>;
clocks = <&l4_sys_free_clk>;
clock-names = "timer";
resets = <&rst L4SYSTIMER1_RESET>;
diff --git a/arch/arm/boot/dts/tegra20-cpu-opp-microvolt.dtsi b/arch/arm/boot/dts/tegra20-cpu-opp-microvolt.dtsi
index dce85d39480d..6f3e8c5fc5f0 100644
--- a/arch/arm/boot/dts/tegra20-cpu-opp-microvolt.dtsi
+++ b/arch/arm/boot/dts/tegra20-cpu-opp-microvolt.dtsi
@@ -26,14 +26,6 @@
opp-microvolt = <800000 800000 1125000>;
};
- opp@456000000,800,2,2 {
- opp-microvolt = <800000 800000 1125000>;
- };
-
- opp@456000000,800,3,2 {
- opp-microvolt = <800000 800000 1125000>;
- };
-
opp@456000000,825 {
opp-microvolt = <825000 825000 1125000>;
};
@@ -46,10 +38,6 @@
opp-microvolt = <800000 800000 1125000>;
};
- opp@608000000,800,3,2 {
- opp-microvolt = <800000 800000 1125000>;
- };
-
opp@608000000,825 {
opp-microvolt = <825000 825000 1125000>;
};
@@ -78,18 +66,6 @@
opp-microvolt = <875000 875000 1125000>;
};
- opp@760000000,875,1,1 {
- opp-microvolt = <875000 875000 1125000>;
- };
-
- opp@760000000,875,0,2 {
- opp-microvolt = <875000 875000 1125000>;
- };
-
- opp@760000000,875,1,2 {
- opp-microvolt = <875000 875000 1125000>;
- };
-
opp@760000000,900 {
opp-microvolt = <900000 900000 1125000>;
};
@@ -134,14 +110,6 @@
opp-microvolt = <950000 950000 1125000>;
};
- opp@912000000,950,0,2 {
- opp-microvolt = <950000 950000 1125000>;
- };
-
- opp@912000000,950,2,2 {
- opp-microvolt = <950000 950000 1125000>;
- };
-
opp@912000000,1000 {
opp-microvolt = <1000000 1000000 1125000>;
};
@@ -170,10 +138,6 @@
opp-microvolt = <1000000 1000000 1125000>;
};
- opp@1000000000,1000,0,2 {
- opp-microvolt = <1000000 1000000 1125000>;
- };
-
opp@1000000000,1025 {
opp-microvolt = <1025000 1025000 1125000>;
};
diff --git a/arch/arm/boot/dts/tegra20-cpu-opp.dtsi b/arch/arm/boot/dts/tegra20-cpu-opp.dtsi
index 9b8fedb57a1b..702a635e88e7 100644
--- a/arch/arm/boot/dts/tegra20-cpu-opp.dtsi
+++ b/arch/arm/boot/dts/tegra20-cpu-opp.dtsi
@@ -37,19 +37,8 @@
opp@456000000,800 {
clock-latency-ns = <400000>;
- opp-supported-hw = <0x03 0x0006>;
- opp-hz = /bits/ 64 <456000000>;
- };
-
- opp@456000000,800,2,2 {
- clock-latency-ns = <400000>;
- opp-supported-hw = <0x04 0x0004>;
- opp-hz = /bits/ 64 <456000000>;
- };
-
- opp@456000000,800,3,2 {
- clock-latency-ns = <400000>;
- opp-supported-hw = <0x08 0x0004>;
+ opp-supported-hw = <0x03 0x0006>, <0x04 0x0004>,
+ <0x08 0x0004>;
opp-hz = /bits/ 64 <456000000>;
};
@@ -67,13 +56,7 @@
opp@608000000,800 {
clock-latency-ns = <400000>;
- opp-supported-hw = <0x04 0x0006>;
- opp-hz = /bits/ 64 <608000000>;
- };
-
- opp@608000000,800,3,2 {
- clock-latency-ns = <400000>;
- opp-supported-hw = <0x08 0x0004>;
+ opp-supported-hw = <0x04 0x0006>, <0x08 0x0004>;
opp-hz = /bits/ 64 <608000000>;
};
@@ -115,25 +98,8 @@
opp@760000000,875 {
clock-latency-ns = <400000>;
- opp-supported-hw = <0x04 0x0001>;
- opp-hz = /bits/ 64 <760000000>;
- };
-
- opp@760000000,875,1,1 {
- clock-latency-ns = <400000>;
- opp-supported-hw = <0x02 0x0002>;
- opp-hz = /bits/ 64 <760000000>;
- };
-
- opp@760000000,875,0,2 {
- clock-latency-ns = <400000>;
- opp-supported-hw = <0x01 0x0004>;
- opp-hz = /bits/ 64 <760000000>;
- };
-
- opp@760000000,875,1,2 {
- clock-latency-ns = <400000>;
- opp-supported-hw = <0x02 0x0004>;
+ opp-supported-hw = <0x04 0x0001>, <0x02 0x0002>,
+ <0x01 0x0004>, <0x02 0x0004>;
opp-hz = /bits/ 64 <760000000>;
};
@@ -199,19 +165,8 @@
opp@912000000,950 {
clock-latency-ns = <400000>;
- opp-supported-hw = <0x02 0x0006>;
- opp-hz = /bits/ 64 <912000000>;
- };
-
- opp@912000000,950,0,2 {
- clock-latency-ns = <400000>;
- opp-supported-hw = <0x01 0x0004>;
- opp-hz = /bits/ 64 <912000000>;
- };
-
- opp@912000000,950,2,2 {
- clock-latency-ns = <400000>;
- opp-supported-hw = <0x04 0x0004>;
+ opp-supported-hw = <0x02 0x0006>, <0x01 0x0004>,
+ <0x04 0x0004>;
opp-hz = /bits/ 64 <912000000>;
};
@@ -253,13 +208,7 @@
opp@1000000000,1000 {
clock-latency-ns = <400000>;
- opp-supported-hw = <0x02 0x0006>;
- opp-hz = /bits/ 64 <1000000000>;
- };
-
- opp@1000000000,1000,0,2 {
- clock-latency-ns = <400000>;
- opp-supported-hw = <0x01 0x0004>;
+ opp-supported-hw = <0x02 0x0006>, <0x01 0x0004>;
opp-hz = /bits/ 64 <1000000000>;
};
diff --git a/arch/arm/boot/dts/tegra30-cpu-opp-microvolt.dtsi b/arch/arm/boot/dts/tegra30-cpu-opp-microvolt.dtsi
index d682f7437146..1be715d2a442 100644
--- a/arch/arm/boot/dts/tegra30-cpu-opp-microvolt.dtsi
+++ b/arch/arm/boot/dts/tegra30-cpu-opp-microvolt.dtsi
@@ -74,22 +74,6 @@
opp-microvolt = <850000 850000 1250000>;
};
- opp@475000000,850,0,1 {
- opp-microvolt = <850000 850000 1250000>;
- };
-
- opp@475000000,850,0,4 {
- opp-microvolt = <850000 850000 1250000>;
- };
-
- opp@475000000,850,0,7 {
- opp-microvolt = <850000 850000 1250000>;
- };
-
- opp@475000000,850,0,8 {
- opp-microvolt = <850000 850000 1250000>;
- };
-
opp@608000000,850 {
opp-microvolt = <850000 850000 1250000>;
};
@@ -106,62 +90,6 @@
opp-microvolt = <850000 850000 1250000>;
};
- opp@640000000,850,1,1 {
- opp-microvolt = <850000 850000 1250000>;
- };
-
- opp@640000000,850,2,1 {
- opp-microvolt = <850000 850000 1250000>;
- };
-
- opp@640000000,850,3,1 {
- opp-microvolt = <850000 850000 1250000>;
- };
-
- opp@640000000,850,1,4 {
- opp-microvolt = <850000 850000 1250000>;
- };
-
- opp@640000000,850,2,4 {
- opp-microvolt = <850000 850000 1250000>;
- };
-
- opp@640000000,850,3,4 {
- opp-microvolt = <850000 850000 1250000>;
- };
-
- opp@640000000,850,1,7 {
- opp-microvolt = <850000 850000 1250000>;
- };
-
- opp@640000000,850,2,7 {
- opp-microvolt = <850000 850000 1250000>;
- };
-
- opp@640000000,850,3,7 {
- opp-microvolt = <850000 850000 1250000>;
- };
-
- opp@640000000,850,4,7 {
- opp-microvolt = <850000 850000 1250000>;
- };
-
- opp@640000000,850,1,8 {
- opp-microvolt = <850000 850000 1250000>;
- };
-
- opp@640000000,850,2,8 {
- opp-microvolt = <850000 850000 1250000>;
- };
-
- opp@640000000,850,3,8 {
- opp-microvolt = <850000 850000 1250000>;
- };
-
- opp@640000000,850,4,8 {
- opp-microvolt = <850000 850000 1250000>;
- };
-
opp@640000000,900 {
opp-microvolt = <900000 900000 1250000>;
};
@@ -170,94 +98,10 @@
opp-microvolt = <850000 850000 1250000>;
};
- opp@760000000,850,3,1 {
- opp-microvolt = <850000 850000 1250000>;
- };
-
- opp@760000000,850,3,2 {
- opp-microvolt = <850000 850000 1250000>;
- };
-
- opp@760000000,850,3,3 {
- opp-microvolt = <850000 850000 1250000>;
- };
-
- opp@760000000,850,3,4 {
- opp-microvolt = <850000 850000 1250000>;
- };
-
- opp@760000000,850,3,7 {
- opp-microvolt = <850000 850000 1250000>;
- };
-
- opp@760000000,850,4,7 {
- opp-microvolt = <850000 850000 1250000>;
- };
-
- opp@760000000,850,3,8 {
- opp-microvolt = <850000 850000 1250000>;
- };
-
- opp@760000000,850,4,8 {
- opp-microvolt = <850000 850000 1250000>;
- };
-
- opp@760000000,850,0,10 {
- opp-microvolt = <850000 850000 1250000>;
- };
-
opp@760000000,900 {
opp-microvolt = <900000 900000 1250000>;
};
- opp@760000000,900,1,1 {
- opp-microvolt = <900000 900000 1250000>;
- };
-
- opp@760000000,900,2,1 {
- opp-microvolt = <900000 900000 1250000>;
- };
-
- opp@760000000,900,1,2 {
- opp-microvolt = <900000 900000 1250000>;
- };
-
- opp@760000000,900,2,2 {
- opp-microvolt = <900000 900000 1250000>;
- };
-
- opp@760000000,900,1,3 {
- opp-microvolt = <900000 900000 1250000>;
- };
-
- opp@760000000,900,2,3 {
- opp-microvolt = <900000 900000 1250000>;
- };
-
- opp@760000000,900,1,4 {
- opp-microvolt = <900000 900000 1250000>;
- };
-
- opp@760000000,900,2,4 {
- opp-microvolt = <900000 900000 1250000>;
- };
-
- opp@760000000,900,1,7 {
- opp-microvolt = <900000 900000 1250000>;
- };
-
- opp@760000000,900,2,7 {
- opp-microvolt = <900000 900000 1250000>;
- };
-
- opp@760000000,900,1,8 {
- opp-microvolt = <900000 900000 1250000>;
- };
-
- opp@760000000,900,2,8 {
- opp-microvolt = <900000 900000 1250000>;
- };
-
opp@760000000,912 {
opp-microvolt = <912000 912000 1250000>;
};
@@ -282,90 +126,10 @@
opp-microvolt = <900000 900000 1250000>;
};
- opp@860000000,900,2,1 {
- opp-microvolt = <900000 900000 1250000>;
- };
-
- opp@860000000,900,3,1 {
- opp-microvolt = <900000 900000 1250000>;
- };
-
- opp@860000000,900,2,2 {
- opp-microvolt = <900000 900000 1250000>;
- };
-
- opp@860000000,900,3,2 {
- opp-microvolt = <900000 900000 1250000>;
- };
-
- opp@860000000,900,2,3 {
- opp-microvolt = <900000 900000 1250000>;
- };
-
- opp@860000000,900,3,3 {
- opp-microvolt = <900000 900000 1250000>;
- };
-
- opp@860000000,900,2,4 {
- opp-microvolt = <900000 900000 1250000>;
- };
-
- opp@860000000,900,3,4 {
- opp-microvolt = <900000 900000 1250000>;
- };
-
- opp@860000000,900,2,7 {
- opp-microvolt = <900000 900000 1250000>;
- };
-
- opp@860000000,900,3,7 {
- opp-microvolt = <900000 900000 1250000>;
- };
-
- opp@860000000,900,4,7 {
- opp-microvolt = <900000 900000 1250000>;
- };
-
- opp@860000000,900,2,8 {
- opp-microvolt = <900000 900000 1250000>;
- };
-
- opp@860000000,900,3,8 {
- opp-microvolt = <900000 900000 1250000>;
- };
-
- opp@860000000,900,4,8 {
- opp-microvolt = <900000 900000 1250000>;
- };
-
opp@860000000,975 {
opp-microvolt = <975000 975000 1250000>;
};
- opp@860000000,975,1,1 {
- opp-microvolt = <975000 975000 1250000>;
- };
-
- opp@860000000,975,1,2 {
- opp-microvolt = <975000 975000 1250000>;
- };
-
- opp@860000000,975,1,3 {
- opp-microvolt = <975000 975000 1250000>;
- };
-
- opp@860000000,975,1,4 {
- opp-microvolt = <975000 975000 1250000>;
- };
-
- opp@860000000,975,1,7 {
- opp-microvolt = <975000 975000 1250000>;
- };
-
- opp@860000000,975,1,8 {
- opp-microvolt = <975000 975000 1250000>;
- };
-
opp@860000000,1000 {
opp-microvolt = <1000000 1000000 1250000>;
};
@@ -382,62 +146,6 @@
opp-microvolt = <975000 975000 1250000>;
};
- opp@1000000000,975,2,1 {
- opp-microvolt = <975000 975000 1250000>;
- };
-
- opp@1000000000,975,3,1 {
- opp-microvolt = <975000 975000 1250000>;
- };
-
- opp@1000000000,975,2,2 {
- opp-microvolt = <975000 975000 1250000>;
- };
-
- opp@1000000000,975,3,2 {
- opp-microvolt = <975000 975000 1250000>;
- };
-
- opp@1000000000,975,2,3 {
- opp-microvolt = <975000 975000 1250000>;
- };
-
- opp@1000000000,975,3,3 {
- opp-microvolt = <975000 975000 1250000>;
- };
-
- opp@1000000000,975,2,4 {
- opp-microvolt = <975000 975000 1250000>;
- };
-
- opp@1000000000,975,3,4 {
- opp-microvolt = <975000 975000 1250000>;
- };
-
- opp@1000000000,975,2,7 {
- opp-microvolt = <975000 975000 1250000>;
- };
-
- opp@1000000000,975,3,7 {
- opp-microvolt = <975000 975000 1250000>;
- };
-
- opp@1000000000,975,4,7 {
- opp-microvolt = <975000 975000 1250000>;
- };
-
- opp@1000000000,975,2,8 {
- opp-microvolt = <975000 975000 1250000>;
- };
-
- opp@1000000000,975,3,8 {
- opp-microvolt = <975000 975000 1250000>;
- };
-
- opp@1000000000,975,4,8 {
- opp-microvolt = <975000 975000 1250000>;
- };
-
opp@1000000000,1000 {
opp-microvolt = <1000000 1000000 1250000>;
};
@@ -454,66 +162,10 @@
opp-microvolt = <975000 975000 1250000>;
};
- opp@1100000000,975,3,1 {
- opp-microvolt = <975000 975000 1250000>;
- };
-
- opp@1100000000,975,3,2 {
- opp-microvolt = <975000 975000 1250000>;
- };
-
- opp@1100000000,975,3,3 {
- opp-microvolt = <975000 975000 1250000>;
- };
-
- opp@1100000000,975,3,4 {
- opp-microvolt = <975000 975000 1250000>;
- };
-
- opp@1100000000,975,3,7 {
- opp-microvolt = <975000 975000 1250000>;
- };
-
- opp@1100000000,975,4,7 {
- opp-microvolt = <975000 975000 1250000>;
- };
-
- opp@1100000000,975,3,8 {
- opp-microvolt = <975000 975000 1250000>;
- };
-
- opp@1100000000,975,4,8 {
- opp-microvolt = <975000 975000 1250000>;
- };
-
opp@1100000000,1000 {
opp-microvolt = <1000000 1000000 1250000>;
};
- opp@1100000000,1000,2,1 {
- opp-microvolt = <1000000 1000000 1250000>;
- };
-
- opp@1100000000,1000,2,2 {
- opp-microvolt = <1000000 1000000 1250000>;
- };
-
- opp@1100000000,1000,2,3 {
- opp-microvolt = <1000000 1000000 1250000>;
- };
-
- opp@1100000000,1000,2,4 {
- opp-microvolt = <1000000 1000000 1250000>;
- };
-
- opp@1100000000,1000,2,7 {
- opp-microvolt = <1000000 1000000 1250000>;
- };
-
- opp@1100000000,1000,2,8 {
- opp-microvolt = <1000000 1000000 1250000>;
- };
-
opp@1100000000,1025 {
opp-microvolt = <1025000 1025000 1250000>;
};
@@ -534,66 +186,10 @@
opp-microvolt = <1000000 1000000 1250000>;
};
- opp@1200000000,1000,3,1 {
- opp-microvolt = <1000000 1000000 1250000>;
- };
-
- opp@1200000000,1000,3,2 {
- opp-microvolt = <1000000 1000000 1250000>;
- };
-
- opp@1200000000,1000,3,3 {
- opp-microvolt = <1000000 1000000 1250000>;
- };
-
- opp@1200000000,1000,3,4 {
- opp-microvolt = <1000000 1000000 1250000>;
- };
-
- opp@1200000000,1000,3,7 {
- opp-microvolt = <1000000 1000000 1250000>;
- };
-
- opp@1200000000,1000,4,7 {
- opp-microvolt = <1000000 1000000 1250000>;
- };
-
- opp@1200000000,1000,3,8 {
- opp-microvolt = <1000000 1000000 1250000>;
- };
-
- opp@1200000000,1000,4,8 {
- opp-microvolt = <1000000 1000000 1250000>;
- };
-
opp@1200000000,1025 {
opp-microvolt = <1025000 1025000 1250000>;
};
- opp@1200000000,1025,2,1 {
- opp-microvolt = <1025000 1025000 1250000>;
- };
-
- opp@1200000000,1025,2,2 {
- opp-microvolt = <1025000 1025000 1250000>;
- };
-
- opp@1200000000,1025,2,3 {
- opp-microvolt = <1025000 1025000 1250000>;
- };
-
- opp@1200000000,1025,2,4 {
- opp-microvolt = <1025000 1025000 1250000>;
- };
-
- opp@1200000000,1025,2,7 {
- opp-microvolt = <1025000 1025000 1250000>;
- };
-
- opp@1200000000,1025,2,8 {
- opp-microvolt = <1025000 1025000 1250000>;
- };
-
opp@1200000000,1050 {
opp-microvolt = <1050000 1050000 1250000>;
};
@@ -610,90 +206,18 @@
opp-microvolt = <1000000 1000000 1250000>;
};
- opp@1300000000,1000,4,7 {
- opp-microvolt = <1000000 1000000 1250000>;
- };
-
- opp@1300000000,1000,4,8 {
- opp-microvolt = <1000000 1000000 1250000>;
- };
-
opp@1300000000,1025 {
opp-microvolt = <1025000 1025000 1250000>;
};
- opp@1300000000,1025,3,1 {
- opp-microvolt = <1025000 1025000 1250000>;
- };
-
- opp@1300000000,1025,3,7 {
- opp-microvolt = <1025000 1025000 1250000>;
- };
-
- opp@1300000000,1025,3,8 {
- opp-microvolt = <1025000 1025000 1250000>;
- };
-
opp@1300000000,1050 {
opp-microvolt = <1050000 1050000 1250000>;
};
- opp@1300000000,1050,2,1 {
- opp-microvolt = <1050000 1050000 1250000>;
- };
-
- opp@1300000000,1050,3,2 {
- opp-microvolt = <1050000 1050000 1250000>;
- };
-
- opp@1300000000,1050,3,3 {
- opp-microvolt = <1050000 1050000 1250000>;
- };
-
- opp@1300000000,1050,3,4 {
- opp-microvolt = <1050000 1050000 1250000>;
- };
-
- opp@1300000000,1050,3,5 {
- opp-microvolt = <1050000 1050000 1250000>;
- };
-
- opp@1300000000,1050,3,6 {
- opp-microvolt = <1050000 1050000 1250000>;
- };
-
- opp@1300000000,1050,2,7 {
- opp-microvolt = <1050000 1050000 1250000>;
- };
-
- opp@1300000000,1050,2,8 {
- opp-microvolt = <1050000 1050000 1250000>;
- };
-
- opp@1300000000,1050,3,12 {
- opp-microvolt = <1050000 1050000 1250000>;
- };
-
- opp@1300000000,1050,3,13 {
- opp-microvolt = <1050000 1050000 1250000>;
- };
-
opp@1300000000,1075 {
opp-microvolt = <1075000 1075000 1250000>;
};
- opp@1300000000,1075,2,2 {
- opp-microvolt = <1075000 1075000 1250000>;
- };
-
- opp@1300000000,1075,2,3 {
- opp-microvolt = <1075000 1075000 1250000>;
- };
-
- opp@1300000000,1075,2,4 {
- opp-microvolt = <1075000 1075000 1250000>;
- };
-
opp@1300000000,1100 {
opp-microvolt = <1100000 1100000 1250000>;
};
@@ -722,10 +246,6 @@
opp-microvolt = <1150000 1150000 1250000>;
};
- opp@1400000000,1150,2,4 {
- opp-microvolt = <1150000 1150000 1250000>;
- };
-
opp@1400000000,1175 {
opp-microvolt = <1175000 1175000 1250000>;
};
@@ -738,42 +258,10 @@
opp-microvolt = <1125000 1125000 1250000>;
};
- opp@1500000000,1125,4,5 {
- opp-microvolt = <1125000 1125000 1250000>;
- };
-
- opp@1500000000,1125,4,6 {
- opp-microvolt = <1125000 1125000 1250000>;
- };
-
- opp@1500000000,1125,4,12 {
- opp-microvolt = <1125000 1125000 1250000>;
- };
-
- opp@1500000000,1125,4,13 {
- opp-microvolt = <1125000 1125000 1250000>;
- };
-
opp@1500000000,1150 {
opp-microvolt = <1150000 1150000 1250000>;
};
- opp@1500000000,1150,3,5 {
- opp-microvolt = <1150000 1150000 1250000>;
- };
-
- opp@1500000000,1150,3,6 {
- opp-microvolt = <1150000 1150000 1250000>;
- };
-
- opp@1500000000,1150,3,12 {
- opp-microvolt = <1150000 1150000 1250000>;
- };
-
- opp@1500000000,1150,3,13 {
- opp-microvolt = <1150000 1150000 1250000>;
- };
-
opp@1500000000,1200 {
opp-microvolt = <1200000 1200000 1250000>;
};
diff --git a/arch/arm/boot/dts/tegra30-cpu-opp.dtsi b/arch/arm/boot/dts/tegra30-cpu-opp.dtsi
index 8e434f6713cd..0f7135006d19 100644
--- a/arch/arm/boot/dts/tegra30-cpu-opp.dtsi
+++ b/arch/arm/boot/dts/tegra30-cpu-opp.dtsi
@@ -109,31 +109,9 @@
opp@475000000,850 {
clock-latency-ns = <100000>;
- opp-supported-hw = <0x0F 0x0001>;
- opp-hz = /bits/ 64 <475000000>;
- };
-
- opp@475000000,850,0,1 {
- clock-latency-ns = <100000>;
- opp-supported-hw = <0x01 0x0002>;
- opp-hz = /bits/ 64 <475000000>;
- };
-
- opp@475000000,850,0,4 {
- clock-latency-ns = <100000>;
- opp-supported-hw = <0x01 0x0010>;
- opp-hz = /bits/ 64 <475000000>;
- };
-
- opp@475000000,850,0,7 {
- clock-latency-ns = <100000>;
- opp-supported-hw = <0x01 0x0080>;
- opp-hz = /bits/ 64 <475000000>;
- };
-
- opp@475000000,850,0,8 {
- clock-latency-ns = <100000>;
- opp-supported-hw = <0x01 0x0100>;
+ opp-supported-hw = <0x0F 0x0001>, <0x01 0x0002>,
+ <0x01 0x0010>, <0x01 0x0080>,
+ <0x01 0x0100>;
opp-hz = /bits/ 64 <475000000>;
};
@@ -157,91 +135,14 @@
opp@640000000,850 {
clock-latency-ns = <100000>;
- opp-supported-hw = <0x0F 0x0001>;
- opp-hz = /bits/ 64 <640000000>;
- };
-
- opp@640000000,850,1,1 {
- clock-latency-ns = <100000>;
- opp-supported-hw = <0x02 0x0002>;
- opp-hz = /bits/ 64 <640000000>;
- };
-
- opp@640000000,850,2,1 {
- clock-latency-ns = <100000>;
- opp-supported-hw = <0x04 0x0002>;
- opp-hz = /bits/ 64 <640000000>;
- };
-
- opp@640000000,850,3,1 {
- clock-latency-ns = <100000>;
- opp-supported-hw = <0x08 0x0002>;
- opp-hz = /bits/ 64 <640000000>;
- };
-
- opp@640000000,850,1,4 {
- clock-latency-ns = <100000>;
- opp-supported-hw = <0x02 0x0010>;
- opp-hz = /bits/ 64 <640000000>;
- };
-
- opp@640000000,850,2,4 {
- clock-latency-ns = <100000>;
- opp-supported-hw = <0x04 0x0010>;
- opp-hz = /bits/ 64 <640000000>;
- };
-
- opp@640000000,850,3,4 {
- clock-latency-ns = <100000>;
- opp-supported-hw = <0x08 0x0010>;
- opp-hz = /bits/ 64 <640000000>;
- };
-
- opp@640000000,850,1,7 {
- clock-latency-ns = <100000>;
- opp-supported-hw = <0x02 0x0080>;
- opp-hz = /bits/ 64 <640000000>;
- };
-
- opp@640000000,850,2,7 {
- clock-latency-ns = <100000>;
- opp-supported-hw = <0x04 0x0080>;
- opp-hz = /bits/ 64 <640000000>;
- };
-
- opp@640000000,850,3,7 {
- clock-latency-ns = <100000>;
- opp-supported-hw = <0x08 0x0080>;
- opp-hz = /bits/ 64 <640000000>;
- };
-
- opp@640000000,850,4,7 {
- clock-latency-ns = <100000>;
- opp-supported-hw = <0x10 0x0080>;
- opp-hz = /bits/ 64 <640000000>;
- };
-
- opp@640000000,850,1,8 {
- clock-latency-ns = <100000>;
- opp-supported-hw = <0x02 0x0100>;
- opp-hz = /bits/ 64 <640000000>;
- };
-
- opp@640000000,850,2,8 {
- clock-latency-ns = <100000>;
- opp-supported-hw = <0x04 0x0100>;
- opp-hz = /bits/ 64 <640000000>;
- };
-
- opp@640000000,850,3,8 {
- clock-latency-ns = <100000>;
- opp-supported-hw = <0x08 0x0100>;
- opp-hz = /bits/ 64 <640000000>;
- };
-
- opp@640000000,850,4,8 {
- clock-latency-ns = <100000>;
- opp-supported-hw = <0x10 0x0100>;
+ opp-supported-hw = <0x0F 0x0001>, <0x02 0x0002>,
+ <0x04 0x0002>, <0x08 0x0002>,
+ <0x02 0x0010>, <0x04 0x0010>,
+ <0x08 0x0010>, <0x02 0x0080>,
+ <0x04 0x0080>, <0x08 0x0080>,
+ <0x10 0x0080>, <0x02 0x0100>,
+ <0x04 0x0100>, <0x08 0x0100>,
+ <0x10 0x0100>;
opp-hz = /bits/ 64 <640000000>;
};
@@ -253,139 +154,23 @@
opp@760000000,850 {
clock-latency-ns = <100000>;
- opp-supported-hw = <0x1E 0x3461>;
- opp-hz = /bits/ 64 <760000000>;
- };
-
- opp@760000000,850,3,1 {
- clock-latency-ns = <100000>;
- opp-supported-hw = <0x08 0x0002>;
- opp-hz = /bits/ 64 <760000000>;
- };
-
- opp@760000000,850,3,2 {
- clock-latency-ns = <100000>;
- opp-supported-hw = <0x08 0x0004>;
- opp-hz = /bits/ 64 <760000000>;
- };
-
- opp@760000000,850,3,3 {
- clock-latency-ns = <100000>;
- opp-supported-hw = <0x08 0x0008>;
- opp-hz = /bits/ 64 <760000000>;
- };
-
- opp@760000000,850,3,4 {
- clock-latency-ns = <100000>;
- opp-supported-hw = <0x08 0x0010>;
- opp-hz = /bits/ 64 <760000000>;
- };
-
- opp@760000000,850,3,7 {
- clock-latency-ns = <100000>;
- opp-supported-hw = <0x08 0x0080>;
- opp-hz = /bits/ 64 <760000000>;
- };
-
- opp@760000000,850,4,7 {
- clock-latency-ns = <100000>;
- opp-supported-hw = <0x10 0x0080>;
- opp-hz = /bits/ 64 <760000000>;
- };
-
- opp@760000000,850,3,8 {
- clock-latency-ns = <100000>;
- opp-supported-hw = <0x08 0x0100>;
- opp-hz = /bits/ 64 <760000000>;
- };
-
- opp@760000000,850,4,8 {
- clock-latency-ns = <100000>;
- opp-supported-hw = <0x10 0x0100>;
- opp-hz = /bits/ 64 <760000000>;
- };
-
- opp@760000000,850,0,10 {
- clock-latency-ns = <100000>;
- opp-supported-hw = <0x01 0x0400>;
+ opp-supported-hw = <0x1E 0x3461>, <0x08 0x0002>,
+ <0x08 0x0004>, <0x08 0x0008>,
+ <0x08 0x0010>, <0x08 0x0080>,
+ <0x10 0x0080>, <0x08 0x0100>,
+ <0x10 0x0100>, <0x01 0x0400>;
opp-hz = /bits/ 64 <760000000>;
};
opp@760000000,900 {
clock-latency-ns = <100000>;
- opp-supported-hw = <0x01 0x0001>;
- opp-hz = /bits/ 64 <760000000>;
- };
-
- opp@760000000,900,1,1 {
- clock-latency-ns = <100000>;
- opp-supported-hw = <0x02 0x0002>;
- opp-hz = /bits/ 64 <760000000>;
- };
-
- opp@760000000,900,2,1 {
- clock-latency-ns = <100000>;
- opp-supported-hw = <0x04 0x0002>;
- opp-hz = /bits/ 64 <760000000>;
- };
-
- opp@760000000,900,1,2 {
- clock-latency-ns = <100000>;
- opp-supported-hw = <0x02 0x0004>;
- opp-hz = /bits/ 64 <760000000>;
- };
-
- opp@760000000,900,2,2 {
- clock-latency-ns = <100000>;
- opp-supported-hw = <0x04 0x0004>;
- opp-hz = /bits/ 64 <760000000>;
- };
-
- opp@760000000,900,1,3 {
- clock-latency-ns = <100000>;
- opp-supported-hw = <0x02 0x0008>;
- opp-hz = /bits/ 64 <760000000>;
- };
-
- opp@760000000,900,2,3 {
- clock-latency-ns = <100000>;
- opp-supported-hw = <0x04 0x0008>;
- opp-hz = /bits/ 64 <760000000>;
- };
-
- opp@760000000,900,1,4 {
- clock-latency-ns = <100000>;
- opp-supported-hw = <0x02 0x0010>;
- opp-hz = /bits/ 64 <760000000>;
- };
-
- opp@760000000,900,2,4 {
- clock-latency-ns = <100000>;
- opp-supported-hw = <0x04 0x0010>;
- opp-hz = /bits/ 64 <760000000>;
- };
-
- opp@760000000,900,1,7 {
- clock-latency-ns = <100000>;
- opp-supported-hw = <0x02 0x0080>;
- opp-hz = /bits/ 64 <760000000>;
- };
-
- opp@760000000,900,2,7 {
- clock-latency-ns = <100000>;
- opp-supported-hw = <0x04 0x0080>;
- opp-hz = /bits/ 64 <760000000>;
- };
-
- opp@760000000,900,1,8 {
- clock-latency-ns = <100000>;
- opp-supported-hw = <0x02 0x0100>;
- opp-hz = /bits/ 64 <760000000>;
- };
-
- opp@760000000,900,2,8 {
- clock-latency-ns = <100000>;
- opp-supported-hw = <0x04 0x0100>;
+ opp-supported-hw = <0x01 0x0001>, <0x02 0x0002>,
+ <0x04 0x0002>, <0x02 0x0004>,
+ <0x04 0x0004>, <0x02 0x0008>,
+ <0x04 0x0008>, <0x02 0x0010>,
+ <0x04 0x0010>, <0x02 0x0080>,
+ <0x04 0x0080>, <0x02 0x0100>,
+ <0x04 0x0100>;
opp-hz = /bits/ 64 <760000000>;
};
@@ -421,133 +206,23 @@
opp@860000000,900 {
clock-latency-ns = <100000>;
- opp-supported-hw = <0x02 0x0001>;
- opp-hz = /bits/ 64 <860000000>;
- };
-
- opp@860000000,900,2,1 {
- clock-latency-ns = <100000>;
- opp-supported-hw = <0x04 0x0002>;
- opp-hz = /bits/ 64 <860000000>;
- };
-
- opp@860000000,900,3,1 {
- clock-latency-ns = <100000>;
- opp-supported-hw = <0x08 0x0002>;
- opp-hz = /bits/ 64 <860000000>;
- };
-
- opp@860000000,900,2,2 {
- clock-latency-ns = <100000>;
- opp-supported-hw = <0x04 0x0004>;
- opp-hz = /bits/ 64 <860000000>;
- };
-
- opp@860000000,900,3,2 {
- clock-latency-ns = <100000>;
- opp-supported-hw = <0x08 0x0004>;
- opp-hz = /bits/ 64 <860000000>;
- };
-
- opp@860000000,900,2,3 {
- clock-latency-ns = <100000>;
- opp-supported-hw = <0x04 0x0008>;
- opp-hz = /bits/ 64 <860000000>;
- };
-
- opp@860000000,900,3,3 {
- clock-latency-ns = <100000>;
- opp-supported-hw = <0x08 0x0008>;
- opp-hz = /bits/ 64 <860000000>;
- };
-
- opp@860000000,900,2,4 {
- clock-latency-ns = <100000>;
- opp-supported-hw = <0x04 0x0010>;
- opp-hz = /bits/ 64 <860000000>;
- };
-
- opp@860000000,900,3,4 {
- clock-latency-ns = <100000>;
- opp-supported-hw = <0x08 0x0010>;
- opp-hz = /bits/ 64 <860000000>;
- };
-
- opp@860000000,900,2,7 {
- clock-latency-ns = <100000>;
- opp-supported-hw = <0x04 0x0080>;
- opp-hz = /bits/ 64 <860000000>;
- };
-
- opp@860000000,900,3,7 {
- clock-latency-ns = <100000>;
- opp-supported-hw = <0x08 0x0080>;
- opp-hz = /bits/ 64 <860000000>;
- };
-
- opp@860000000,900,4,7 {
- clock-latency-ns = <100000>;
- opp-supported-hw = <0x10 0x0080>;
- opp-hz = /bits/ 64 <860000000>;
- };
-
- opp@860000000,900,2,8 {
- clock-latency-ns = <100000>;
- opp-supported-hw = <0x04 0x0100>;
- opp-hz = /bits/ 64 <860000000>;
- };
-
- opp@860000000,900,3,8 {
- clock-latency-ns = <100000>;
- opp-supported-hw = <0x08 0x0100>;
- opp-hz = /bits/ 64 <860000000>;
- };
-
- opp@860000000,900,4,8 {
- clock-latency-ns = <100000>;
- opp-supported-hw = <0x10 0x0100>;
+ opp-supported-hw = <0x02 0x0001>, <0x04 0x0002>,
+ <0x08 0x0002>, <0x04 0x0004>,
+ <0x08 0x0004>, <0x04 0x0008>,
+ <0x08 0x0008>, <0x04 0x0010>,
+ <0x08 0x0010>, <0x04 0x0080>,
+ <0x08 0x0080>, <0x10 0x0080>,
+ <0x04 0x0100>, <0x08 0x0100>,
+ <0x10 0x0100>;
opp-hz = /bits/ 64 <860000000>;
};
opp@860000000,975 {
clock-latency-ns = <100000>;
- opp-supported-hw = <0x01 0x0001>;
- opp-hz = /bits/ 64 <860000000>;
- };
-
- opp@860000000,975,1,1 {
- clock-latency-ns = <100000>;
- opp-supported-hw = <0x02 0x0002>;
- opp-hz = /bits/ 64 <860000000>;
- };
-
- opp@860000000,975,1,2 {
- clock-latency-ns = <100000>;
- opp-supported-hw = <0x02 0x0004>;
- opp-hz = /bits/ 64 <860000000>;
- };
-
- opp@860000000,975,1,3 {
- clock-latency-ns = <100000>;
- opp-supported-hw = <0x02 0x0008>;
- opp-hz = /bits/ 64 <860000000>;
- };
-
- opp@860000000,975,1,4 {
- clock-latency-ns = <100000>;
- opp-supported-hw = <0x02 0x0010>;
- opp-hz = /bits/ 64 <860000000>;
- };
-
- opp@860000000,975,1,7 {
- clock-latency-ns = <100000>;
- opp-supported-hw = <0x02 0x0080>;
- opp-hz = /bits/ 64 <860000000>;
- };
-
- opp@860000000,975,1,8 {
- clock-latency-ns = <100000>;
- opp-supported-hw = <0x02 0x0100>;
+ opp-supported-hw = <0x01 0x0001>, <0x02 0x0002>,
+ <0x02 0x0004>, <0x02 0x0008>,
+ <0x02 0x0010>, <0x02 0x0080>,
+ <0x02 0x0100>;
opp-hz = /bits/ 64 <860000000>;
};
@@ -571,91 +246,14 @@
opp@1000000000,975 {
clock-latency-ns = <100000>;
- opp-supported-hw = <0x03 0x0001>;
- opp-hz = /bits/ 64 <1000000000>;
- };
-
- opp@1000000000,975,2,1 {
- clock-latency-ns = <100000>;
- opp-supported-hw = <0x04 0x0002>;
- opp-hz = /bits/ 64 <1000000000>;
- };
-
- opp@1000000000,975,3,1 {
- clock-latency-ns = <100000>;
- opp-supported-hw = <0x08 0x0002>;
- opp-hz = /bits/ 64 <1000000000>;
- };
-
- opp@1000000000,975,2,2 {
- clock-latency-ns = <100000>;
- opp-supported-hw = <0x04 0x0004>;
- opp-hz = /bits/ 64 <1000000000>;
- };
-
- opp@1000000000,975,3,2 {
- clock-latency-ns = <100000>;
- opp-supported-hw = <0x08 0x0004>;
- opp-hz = /bits/ 64 <1000000000>;
- };
-
- opp@1000000000,975,2,3 {
- clock-latency-ns = <100000>;
- opp-supported-hw = <0x04 0x0008>;
- opp-hz = /bits/ 64 <1000000000>;
- };
-
- opp@1000000000,975,3,3 {
- clock-latency-ns = <100000>;
- opp-supported-hw = <0x08 0x0008>;
- opp-hz = /bits/ 64 <1000000000>;
- };
-
- opp@1000000000,975,2,4 {
- clock-latency-ns = <100000>;
- opp-supported-hw = <0x04 0x0010>;
- opp-hz = /bits/ 64 <1000000000>;
- };
-
- opp@1000000000,975,3,4 {
- clock-latency-ns = <100000>;
- opp-supported-hw = <0x08 0x0010>;
- opp-hz = /bits/ 64 <1000000000>;
- };
-
- opp@1000000000,975,2,7 {
- clock-latency-ns = <100000>;
- opp-supported-hw = <0x04 0x0080>;
- opp-hz = /bits/ 64 <1000000000>;
- };
-
- opp@1000000000,975,3,7 {
- clock-latency-ns = <100000>;
- opp-supported-hw = <0x08 0x0080>;
- opp-hz = /bits/ 64 <1000000000>;
- };
-
- opp@1000000000,975,4,7 {
- clock-latency-ns = <100000>;
- opp-supported-hw = <0x10 0x0080>;
- opp-hz = /bits/ 64 <1000000000>;
- };
-
- opp@1000000000,975,2,8 {
- clock-latency-ns = <100000>;
- opp-supported-hw = <0x04 0x0100>;
- opp-hz = /bits/ 64 <1000000000>;
- };
-
- opp@1000000000,975,3,8 {
- clock-latency-ns = <100000>;
- opp-supported-hw = <0x08 0x0100>;
- opp-hz = /bits/ 64 <1000000000>;
- };
-
- opp@1000000000,975,4,8 {
- clock-latency-ns = <100000>;
- opp-supported-hw = <0x10 0x0100>;
+ opp-supported-hw = <0x03 0x0001>, <0x04 0x0002>,
+ <0x08 0x0002>, <0x04 0x0004>,
+ <0x08 0x0004>, <0x04 0x0008>,
+ <0x08 0x0008>, <0x04 0x0010>,
+ <0x08 0x0010>, <0x04 0x0080>,
+ <0x08 0x0080>, <0x10 0x0080>,
+ <0x04 0x0100>, <0x08 0x0100>,
+ <0x10 0x0100>;
opp-hz = /bits/ 64 <1000000000>;
};
@@ -679,97 +277,20 @@
opp@1100000000,975 {
clock-latency-ns = <100000>;
- opp-supported-hw = <0x06 0x0001>;
- opp-hz = /bits/ 64 <1100000000>;
- };
-
- opp@1100000000,975,3,1 {
- clock-latency-ns = <100000>;
- opp-supported-hw = <0x08 0x0002>;
- opp-hz = /bits/ 64 <1100000000>;
- };
-
- opp@1100000000,975,3,2 {
- clock-latency-ns = <100000>;
- opp-supported-hw = <0x08 0x0004>;
- opp-hz = /bits/ 64 <1100000000>;
- };
-
- opp@1100000000,975,3,3 {
- clock-latency-ns = <100000>;
- opp-supported-hw = <0x08 0x0008>;
- opp-hz = /bits/ 64 <1100000000>;
- };
-
- opp@1100000000,975,3,4 {
- clock-latency-ns = <100000>;
- opp-supported-hw = <0x08 0x0010>;
- opp-hz = /bits/ 64 <1100000000>;
- };
-
- opp@1100000000,975,3,7 {
- clock-latency-ns = <100000>;
- opp-supported-hw = <0x08 0x0080>;
- opp-hz = /bits/ 64 <1100000000>;
- };
-
- opp@1100000000,975,4,7 {
- clock-latency-ns = <100000>;
- opp-supported-hw = <0x10 0x0080>;
- opp-hz = /bits/ 64 <1100000000>;
- };
-
- opp@1100000000,975,3,8 {
- clock-latency-ns = <100000>;
- opp-supported-hw = <0x08 0x0100>;
- opp-hz = /bits/ 64 <1100000000>;
- };
-
- opp@1100000000,975,4,8 {
- clock-latency-ns = <100000>;
- opp-supported-hw = <0x10 0x0100>;
+ opp-supported-hw = <0x06 0x0001>, <0x08 0x0002>,
+ <0x08 0x0004>, <0x08 0x0008>,
+ <0x08 0x0010>, <0x08 0x0080>,
+ <0x10 0x0080>, <0x08 0x0100>,
+ <0x10 0x0100>;
opp-hz = /bits/ 64 <1100000000>;
};
opp@1100000000,1000 {
clock-latency-ns = <100000>;
- opp-supported-hw = <0x01 0x0001>;
- opp-hz = /bits/ 64 <1100000000>;
- };
-
- opp@1100000000,1000,2,1 {
- clock-latency-ns = <100000>;
- opp-supported-hw = <0x04 0x0002>;
- opp-hz = /bits/ 64 <1100000000>;
- };
-
- opp@1100000000,1000,2,2 {
- clock-latency-ns = <100000>;
- opp-supported-hw = <0x04 0x0004>;
- opp-hz = /bits/ 64 <1100000000>;
- };
-
- opp@1100000000,1000,2,3 {
- clock-latency-ns = <100000>;
- opp-supported-hw = <0x04 0x0008>;
- opp-hz = /bits/ 64 <1100000000>;
- };
-
- opp@1100000000,1000,2,4 {
- clock-latency-ns = <100000>;
- opp-supported-hw = <0x04 0x0010>;
- opp-hz = /bits/ 64 <1100000000>;
- };
-
- opp@1100000000,1000,2,7 {
- clock-latency-ns = <100000>;
- opp-supported-hw = <0x04 0x0080>;
- opp-hz = /bits/ 64 <1100000000>;
- };
-
- opp@1100000000,1000,2,8 {
- clock-latency-ns = <100000>;
- opp-supported-hw = <0x04 0x0100>;
+ opp-supported-hw = <0x01 0x0001>, <0x04 0x0002>,
+ <0x04 0x0004>, <0x04 0x0008>,
+ <0x04 0x0010>, <0x04 0x0080>,
+ <0x04 0x0100>;
opp-hz = /bits/ 64 <1100000000>;
};
@@ -799,97 +320,20 @@
opp@1200000000,1000 {
clock-latency-ns = <100000>;
- opp-supported-hw = <0x04 0x0001>;
- opp-hz = /bits/ 64 <1200000000>;
- };
-
- opp@1200000000,1000,3,1 {
- clock-latency-ns = <100000>;
- opp-supported-hw = <0x08 0x0002>;
- opp-hz = /bits/ 64 <1200000000>;
- };
-
- opp@1200000000,1000,3,2 {
- clock-latency-ns = <100000>;
- opp-supported-hw = <0x08 0x0004>;
- opp-hz = /bits/ 64 <1200000000>;
- };
-
- opp@1200000000,1000,3,3 {
- clock-latency-ns = <100000>;
- opp-supported-hw = <0x08 0x0008>;
- opp-hz = /bits/ 64 <1200000000>;
- };
-
- opp@1200000000,1000,3,4 {
- clock-latency-ns = <100000>;
- opp-supported-hw = <0x08 0x0010>;
- opp-hz = /bits/ 64 <1200000000>;
- };
-
- opp@1200000000,1000,3,7 {
- clock-latency-ns = <100000>;
- opp-supported-hw = <0x08 0x0080>;
- opp-hz = /bits/ 64 <1200000000>;
- };
-
- opp@1200000000,1000,4,7 {
- clock-latency-ns = <100000>;
- opp-supported-hw = <0x10 0x0080>;
- opp-hz = /bits/ 64 <1200000000>;
- };
-
- opp@1200000000,1000,3,8 {
- clock-latency-ns = <100000>;
- opp-supported-hw = <0x08 0x0100>;
- opp-hz = /bits/ 64 <1200000000>;
- };
-
- opp@1200000000,1000,4,8 {
- clock-latency-ns = <100000>;
- opp-supported-hw = <0x10 0x0100>;
+ opp-supported-hw = <0x04 0x0001>, <0x08 0x0002>,
+ <0x08 0x0004>, <0x08 0x0008>,
+ <0x08 0x0010>, <0x08 0x0080>,
+ <0x10 0x0080>, <0x08 0x0100>,
+ <0x10 0x0100>;
opp-hz = /bits/ 64 <1200000000>;
};
opp@1200000000,1025 {
clock-latency-ns = <100000>;
- opp-supported-hw = <0x02 0x0001>;
- opp-hz = /bits/ 64 <1200000000>;
- };
-
- opp@1200000000,1025,2,1 {
- clock-latency-ns = <100000>;
- opp-supported-hw = <0x04 0x0002>;
- opp-hz = /bits/ 64 <1200000000>;
- };
-
- opp@1200000000,1025,2,2 {
- clock-latency-ns = <100000>;
- opp-supported-hw = <0x04 0x0004>;
- opp-hz = /bits/ 64 <1200000000>;
- };
-
- opp@1200000000,1025,2,3 {
- clock-latency-ns = <100000>;
- opp-supported-hw = <0x04 0x0008>;
- opp-hz = /bits/ 64 <1200000000>;
- };
-
- opp@1200000000,1025,2,4 {
- clock-latency-ns = <100000>;
- opp-supported-hw = <0x04 0x0010>;
- opp-hz = /bits/ 64 <1200000000>;
- };
-
- opp@1200000000,1025,2,7 {
- clock-latency-ns = <100000>;
- opp-supported-hw = <0x04 0x0080>;
- opp-hz = /bits/ 64 <1200000000>;
- };
-
- opp@1200000000,1025,2,8 {
- clock-latency-ns = <100000>;
- opp-supported-hw = <0x04 0x0100>;
+ opp-supported-hw = <0x02 0x0001>, <0x04 0x0002>,
+ <0x04 0x0004>, <0x04 0x0008>,
+ <0x04 0x0010>, <0x04 0x0080>,
+ <0x04 0x0100>;
opp-hz = /bits/ 64 <1200000000>;
};
@@ -913,133 +357,33 @@
opp@1300000000,1000 {
clock-latency-ns = <100000>;
- opp-supported-hw = <0x08 0x0001>;
- opp-hz = /bits/ 64 <1300000000>;
- };
-
- opp@1300000000,1000,4,7 {
- clock-latency-ns = <100000>;
- opp-supported-hw = <0x10 0x0080>;
- opp-hz = /bits/ 64 <1300000000>;
- };
-
- opp@1300000000,1000,4,8 {
- clock-latency-ns = <100000>;
- opp-supported-hw = <0x10 0x0100>;
+ opp-supported-hw = <0x08 0x0001>, <0x10 0x0080>,
+ <0x10 0x0100>;
opp-hz = /bits/ 64 <1300000000>;
};
opp@1300000000,1025 {
clock-latency-ns = <100000>;
- opp-supported-hw = <0x04 0x0001>;
- opp-hz = /bits/ 64 <1300000000>;
- };
-
- opp@1300000000,1025,3,1 {
- clock-latency-ns = <100000>;
- opp-supported-hw = <0x08 0x0002>;
- opp-hz = /bits/ 64 <1300000000>;
- };
-
- opp@1300000000,1025,3,7 {
- clock-latency-ns = <100000>;
- opp-supported-hw = <0x08 0x0080>;
- opp-hz = /bits/ 64 <1300000000>;
- };
-
- opp@1300000000,1025,3,8 {
- clock-latency-ns = <100000>;
- opp-supported-hw = <0x08 0x0100>;
+ opp-supported-hw = <0x04 0x0001>, <0x08 0x0002>,
+ <0x08 0x0080>, <0x08 0x0100>;
opp-hz = /bits/ 64 <1300000000>;
};
opp@1300000000,1050 {
clock-latency-ns = <100000>;
- opp-supported-hw = <0x12 0x3061>;
- opp-hz = /bits/ 64 <1300000000>;
- };
-
- opp@1300000000,1050,2,1 {
- clock-latency-ns = <100000>;
- opp-supported-hw = <0x04 0x0002>;
- opp-hz = /bits/ 64 <1300000000>;
- };
-
- opp@1300000000,1050,3,2 {
- clock-latency-ns = <100000>;
- opp-supported-hw = <0x08 0x0004>;
- opp-hz = /bits/ 64 <1300000000>;
- };
-
- opp@1300000000,1050,3,3 {
- clock-latency-ns = <100000>;
- opp-supported-hw = <0x08 0x0008>;
- opp-hz = /bits/ 64 <1300000000>;
- };
-
- opp@1300000000,1050,3,4 {
- clock-latency-ns = <100000>;
- opp-supported-hw = <0x08 0x0010>;
- opp-hz = /bits/ 64 <1300000000>;
- };
-
- opp@1300000000,1050,3,5 {
- clock-latency-ns = <100000>;
- opp-supported-hw = <0x08 0x0020>;
- opp-hz = /bits/ 64 <1300000000>;
- };
-
- opp@1300000000,1050,3,6 {
- clock-latency-ns = <100000>;
- opp-supported-hw = <0x08 0x0040>;
- opp-hz = /bits/ 64 <1300000000>;
- };
-
- opp@1300000000,1050,2,7 {
- clock-latency-ns = <100000>;
- opp-supported-hw = <0x04 0x0080>;
- opp-hz = /bits/ 64 <1300000000>;
- };
-
- opp@1300000000,1050,2,8 {
- clock-latency-ns = <100000>;
- opp-supported-hw = <0x04 0x0100>;
- opp-hz = /bits/ 64 <1300000000>;
- };
-
- opp@1300000000,1050,3,12 {
- clock-latency-ns = <100000>;
- opp-supported-hw = <0x08 0x1000>;
- opp-hz = /bits/ 64 <1300000000>;
- };
-
- opp@1300000000,1050,3,13 {
- clock-latency-ns = <100000>;
- opp-supported-hw = <0x08 0x2000>;
+ opp-supported-hw = <0x12 0x3061>, <0x04 0x0002>,
+ <0x08 0x0004>, <0x08 0x0008>,
+ <0x08 0x0010>, <0x08 0x0020>,
+ <0x08 0x0040>, <0x04 0x0080>,
+ <0x04 0x0100>, <0x08 0x1000>,
+ <0x08 0x2000>;
opp-hz = /bits/ 64 <1300000000>;
};
opp@1300000000,1075 {
clock-latency-ns = <100000>;
- opp-supported-hw = <0x02 0x0182>;
- opp-hz = /bits/ 64 <1300000000>;
- };
-
- opp@1300000000,1075,2,2 {
- clock-latency-ns = <100000>;
- opp-supported-hw = <0x04 0x0004>;
- opp-hz = /bits/ 64 <1300000000>;
- };
-
- opp@1300000000,1075,2,3 {
- clock-latency-ns = <100000>;
- opp-supported-hw = <0x04 0x0008>;
- opp-hz = /bits/ 64 <1300000000>;
- };
-
- opp@1300000000,1075,2,4 {
- clock-latency-ns = <100000>;
- opp-supported-hw = <0x04 0x0010>;
+ opp-supported-hw = <0x02 0x0182>, <0x04 0x0004>,
+ <0x04 0x0008>, <0x04 0x0010>;
opp-hz = /bits/ 64 <1300000000>;
};
@@ -1081,13 +425,7 @@
opp@1400000000,1150 {
clock-latency-ns = <100000>;
- opp-supported-hw = <0x02 0x000C>;
- opp-hz = /bits/ 64 <1400000000>;
- };
-
- opp@1400000000,1150,2,4 {
- clock-latency-ns = <100000>;
- opp-supported-hw = <0x04 0x0010>;
+ opp-supported-hw = <0x02 0x000C>, <0x04 0x0010>;
opp-hz = /bits/ 64 <1400000000>;
};
@@ -1105,61 +443,17 @@
opp@1500000000,1125 {
clock-latency-ns = <100000>;
- opp-supported-hw = <0x08 0x0010>;
- opp-hz = /bits/ 64 <1500000000>;
- };
-
- opp@1500000000,1125,4,5 {
- clock-latency-ns = <100000>;
- opp-supported-hw = <0x10 0x0020>;
- opp-hz = /bits/ 64 <1500000000>;
- };
-
- opp@1500000000,1125,4,6 {
- clock-latency-ns = <100000>;
- opp-supported-hw = <0x10 0x0040>;
- opp-hz = /bits/ 64 <1500000000>;
- };
-
- opp@1500000000,1125,4,12 {
- clock-latency-ns = <100000>;
- opp-supported-hw = <0x10 0x1000>;
- opp-hz = /bits/ 64 <1500000000>;
- };
-
- opp@1500000000,1125,4,13 {
- clock-latency-ns = <100000>;
- opp-supported-hw = <0x10 0x2000>;
+ opp-supported-hw = <0x08 0x0010>, <0x10 0x0020>,
+ <0x10 0x0040>, <0x10 0x1000>,
+ <0x10 0x2000>;
opp-hz = /bits/ 64 <1500000000>;
};
opp@1500000000,1150 {
clock-latency-ns = <100000>;
- opp-supported-hw = <0x04 0x0010>;
- opp-hz = /bits/ 64 <1500000000>;
- };
-
- opp@1500000000,1150,3,5 {
- clock-latency-ns = <100000>;
- opp-supported-hw = <0x08 0x0020>;
- opp-hz = /bits/ 64 <1500000000>;
- };
-
- opp@1500000000,1150,3,6 {
- clock-latency-ns = <100000>;
- opp-supported-hw = <0x08 0x0040>;
- opp-hz = /bits/ 64 <1500000000>;
- };
-
- opp@1500000000,1150,3,12 {
- clock-latency-ns = <100000>;
- opp-supported-hw = <0x08 0x1000>;
- opp-hz = /bits/ 64 <1500000000>;
- };
-
- opp@1500000000,1150,3,13 {
- clock-latency-ns = <100000>;
- opp-supported-hw = <0x08 0x2000>;
+ opp-supported-hw = <0x04 0x0010>, <0x08 0x0020>,
+ <0x08 0x0040>, <0x08 0x1000>,
+ <0x08 0x2000>;
opp-hz = /bits/ 64 <1500000000>;
};
diff --git a/arch/arm/boot/dts/vfxxx.dtsi b/arch/arm/boot/dts/vfxxx.dtsi
index 0fe03aa0367f..2259d11af721 100644
--- a/arch/arm/boot/dts/vfxxx.dtsi
+++ b/arch/arm/boot/dts/vfxxx.dtsi
@@ -495,7 +495,7 @@
};
ocotp: ocotp@400a5000 {
- compatible = "fsl,vf610-ocotp";
+ compatible = "fsl,vf610-ocotp", "syscon";
reg = <0x400a5000 0x1000>;
clocks = <&clks VF610_CLK_OCOTP>;
};
diff --git a/arch/arm/configs/integrator_defconfig b/arch/arm/configs/integrator_defconfig
index a9755c501bec..b06e537d5149 100644
--- a/arch/arm/configs/integrator_defconfig
+++ b/arch/arm/configs/integrator_defconfig
@@ -1,13 +1,11 @@
CONFIG_SYSVIPC=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
+CONFIG_PREEMPT=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-CONFIG_PARTITION_ADVANCED=y
CONFIG_ARCH_MULTI_V4T=y
CONFIG_ARCH_MULTI_V5=y
# CONFIG_ARCH_MULTI_V7 is not set
@@ -15,19 +13,17 @@ CONFIG_ARCH_INTEGRATOR=y
CONFIG_ARCH_INTEGRATOR_AP=y
CONFIG_INTEGRATOR_IMPD1=y
CONFIG_ARCH_INTEGRATOR_CP=y
-CONFIG_PCI=y
-CONFIG_PREEMPT=y
CONFIG_AEABI=y
# CONFIG_ATAGS is not set
-CONFIG_ZBOOT_ROM_TEXT=0x0
-CONFIG_ZBOOT_ROM_BSS=0x0
CONFIG_CMDLINE="console=ttyAM0,38400n8 root=/dev/nfs ip=bootp"
CONFIG_CPU_FREQ=y
CONFIG_CPU_FREQ_GOV_POWERSAVE=y
CONFIG_CPU_FREQ_GOV_USERSPACE=y
CONFIG_CPU_FREQ_GOV_ONDEMAND=y
CONFIG_CPUFREQ_DT=y
-CONFIG_CMA=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_PARTITION_ADVANCED=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
@@ -37,6 +33,7 @@ CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
# CONFIG_IPV6 is not set
+CONFIG_PCI=y
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
CONFIG_MTD_AFS_PARTS=y
@@ -52,9 +49,12 @@ CONFIG_BLK_DEV_RAM_SIZE=8192
CONFIG_NETDEVICES=y
CONFIG_E100=y
CONFIG_SMC91X=y
+CONFIG_INPUT_EVDEV=y
# CONFIG_KEYBOARD_ATKBD is not set
+CONFIG_KEYBOARD_GPIO=y
# CONFIG_SERIO_SERPORT is not set
CONFIG_DRM=y
+CONFIG_DRM_DISPLAY_CONNECTOR=y
CONFIG_DRM_SIMPLE_BRIDGE=y
CONFIG_DRM_PL111=y
CONFIG_FB_MODE_HELPERS=y
diff --git a/arch/arm/crypto/aes-neonbs-core.S b/arch/arm/crypto/aes-neonbs-core.S
index cfaed4e67535..7d0cc7f226a5 100644
--- a/arch/arm/crypto/aes-neonbs-core.S
+++ b/arch/arm/crypto/aes-neonbs-core.S
@@ -77,11 +77,6 @@
vldr \out\()h, \sym + 8
.endm
- .macro __adr, reg, lbl
- adr \reg, \lbl
-THUMB( orr \reg, \reg, #1 )
- .endm
-
.macro in_bs_ch, b0, b1, b2, b3, b4, b5, b6, b7
veor \b2, \b2, \b1
veor \b5, \b5, \b6
@@ -629,11 +624,11 @@ ENDPROC(aesbs_decrypt8)
push {r4-r6, lr}
ldr r5, [sp, #16] // number of blocks
-99: __adr ip, 0f
+99: adr ip, 0f
and lr, r5, #7
cmp r5, #8
sub ip, ip, lr, lsl #2
- bxlt ip // computed goto if blocks < 8
+ movlt pc, ip // computed goto if blocks < 8
vld1.8 {q0}, [r1]!
vld1.8 {q1}, [r1]!
@@ -648,11 +643,11 @@ ENDPROC(aesbs_decrypt8)
mov rounds, r3
bl \do8
- __adr ip, 1f
+ adr ip, 1f
and lr, r5, #7
cmp r5, #8
sub ip, ip, lr, lsl #2
- bxlt ip // computed goto if blocks < 8
+ movlt pc, ip // computed goto if blocks < 8
vst1.8 {\o0}, [r0]!
vst1.8 {\o1}, [r0]!
@@ -689,12 +684,12 @@ ENTRY(aesbs_cbc_decrypt)
push {r4-r6, lr}
ldm ip, {r5-r6} // load args 4-5
-99: __adr ip, 0f
+99: adr ip, 0f
and lr, r5, #7
cmp r5, #8
sub ip, ip, lr, lsl #2
mov lr, r1
- bxlt ip // computed goto if blocks < 8
+ movlt pc, ip // computed goto if blocks < 8
vld1.8 {q0}, [lr]!
vld1.8 {q1}, [lr]!
@@ -718,11 +713,11 @@ ENTRY(aesbs_cbc_decrypt)
vmov q14, q8
vmov q15, q8
- __adr ip, 1f
+ adr ip, 1f
and lr, r5, #7
cmp r5, #8
sub ip, ip, lr, lsl #2
- bxlt ip // computed goto if blocks < 8
+ movlt pc, ip // computed goto if blocks < 8
vld1.8 {q9}, [r1]!
vld1.8 {q10}, [r1]!
@@ -733,9 +728,9 @@ ENTRY(aesbs_cbc_decrypt)
vld1.8 {q15}, [r1]!
W(nop)
-1: __adr ip, 2f
+1: adr ip, 2f
sub ip, ip, lr, lsl #3
- bxlt ip // computed goto if blocks < 8
+ movlt pc, ip // computed goto if blocks < 8
veor q0, q0, q8
vst1.8 {q0}, [r0]!
@@ -804,13 +799,13 @@ ENTRY(aesbs_ctr_encrypt)
vmov q6, q0
vmov q7, q0
- __adr ip, 0f
+ adr ip, 0f
sub lr, r5, #1
and lr, lr, #7
cmp r5, #8
sub ip, ip, lr, lsl #5
sub ip, ip, lr, lsl #2
- bxlt ip // computed goto if blocks < 8
+ movlt pc, ip // computed goto if blocks < 8
next_ctr q1
next_ctr q2
@@ -824,13 +819,13 @@ ENTRY(aesbs_ctr_encrypt)
mov rounds, r3
bl aesbs_encrypt8
- __adr ip, 1f
+ adr ip, 1f
and lr, r5, #7
cmp r5, #8
movgt r4, #0
ldrle r4, [sp, #40] // load final in the last round
sub ip, ip, lr, lsl #2
- bxlt ip // computed goto if blocks < 8
+ movlt pc, ip // computed goto if blocks < 8
vld1.8 {q8}, [r1]!
vld1.8 {q9}, [r1]!
@@ -843,10 +838,10 @@ ENTRY(aesbs_ctr_encrypt)
1: bne 2f
vld1.8 {q15}, [r1]!
-2: __adr ip, 3f
+2: adr ip, 3f
cmp r5, #8
sub ip, ip, lr, lsl #3
- bxlt ip // computed goto if blocks < 8
+ movlt pc, ip // computed goto if blocks < 8
veor q0, q0, q8
vst1.8 {q0}, [r0]!
@@ -900,12 +895,12 @@ __xts_prepare8:
vshr.u64 d30, d31, #7
vmov q12, q14
- __adr ip, 0f
+ adr ip, 0f
and r4, r6, #7
cmp r6, #8
sub ip, ip, r4, lsl #5
mov r4, sp
- bxlt ip // computed goto if blocks < 8
+ movlt pc, ip // computed goto if blocks < 8
vld1.8 {q0}, [r1]!
next_tweak q12, q14, q15, q13
@@ -961,8 +956,7 @@ ENDPROC(__xts_prepare8)
push {r4-r8, lr}
mov r5, sp // preserve sp
ldrd r6, r7, [sp, #24] // get blocks and iv args
- ldr r8, [sp, #32] // reorder final tweak?
- rsb r8, r8, #1
+ rsb r8, ip, #1
sub ip, sp, #128 // make room for 8x tweak
bic ip, ip, #0xf // align sp to 16 bytes
mov sp, ip
@@ -973,12 +967,12 @@ ENDPROC(__xts_prepare8)
mov rounds, r3
bl \do8
- __adr ip, 0f
+ adr ip, 0f
and lr, r6, #7
cmp r6, #8
sub ip, ip, lr, lsl #2
mov r4, sp
- bxlt ip // computed goto if blocks < 8
+ movlt pc, ip // computed goto if blocks < 8
vld1.8 {q8}, [r4, :128]!
vld1.8 {q9}, [r4, :128]!
@@ -989,9 +983,9 @@ ENDPROC(__xts_prepare8)
vld1.8 {q14}, [r4, :128]!
vld1.8 {q15}, [r4, :128]
-0: __adr ip, 1f
+0: adr ip, 1f
sub ip, ip, lr, lsl #3
- bxlt ip // computed goto if blocks < 8
+ movlt pc, ip // computed goto if blocks < 8
veor \o0, \o0, q8
vst1.8 {\o0}, [r0]!
@@ -1018,9 +1012,11 @@ ENDPROC(__xts_prepare8)
.endm
ENTRY(aesbs_xts_encrypt)
+ mov ip, #0 // never reorder final tweak
__xts_crypt aesbs_encrypt8, q0, q1, q4, q6, q3, q7, q2, q5
ENDPROC(aesbs_xts_encrypt)
ENTRY(aesbs_xts_decrypt)
+ ldr ip, [sp, #8] // reorder final tweak?
__xts_crypt aesbs_decrypt8, q0, q1, q6, q4, q2, q7, q3, q5
ENDPROC(aesbs_xts_decrypt)
diff --git a/arch/arm/crypto/aes-neonbs-glue.c b/arch/arm/crypto/aes-neonbs-glue.c
index e6fd32919c81..bda8bf17631e 100644
--- a/arch/arm/crypto/aes-neonbs-glue.c
+++ b/arch/arm/crypto/aes-neonbs-glue.c
@@ -8,7 +8,6 @@
#include <asm/neon.h>
#include <asm/simd.h>
#include <crypto/aes.h>
-#include <crypto/cbc.h>
#include <crypto/ctr.h>
#include <crypto/internal/simd.h>
#include <crypto/internal/skcipher.h>
@@ -49,7 +48,7 @@ struct aesbs_ctx {
struct aesbs_cbc_ctx {
struct aesbs_ctx key;
- struct crypto_cipher *enc_tfm;
+ struct crypto_skcipher *enc_tfm;
};
struct aesbs_xts_ctx {
@@ -140,19 +139,23 @@ static int aesbs_cbc_setkey(struct crypto_skcipher *tfm, const u8 *in_key,
kernel_neon_end();
memzero_explicit(&rk, sizeof(rk));
- return crypto_cipher_setkey(ctx->enc_tfm, in_key, key_len);
+ return crypto_skcipher_setkey(ctx->enc_tfm, in_key, key_len);
}
-static void cbc_encrypt_one(struct crypto_skcipher *tfm, const u8 *src, u8 *dst)
+static int cbc_encrypt(struct skcipher_request *req)
{
+ struct skcipher_request *subreq = skcipher_request_ctx(req);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct aesbs_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
- crypto_cipher_encrypt_one(ctx->enc_tfm, dst, src);
-}
+ skcipher_request_set_tfm(subreq, ctx->enc_tfm);
+ skcipher_request_set_callback(subreq,
+ skcipher_request_flags(req),
+ NULL, NULL);
+ skcipher_request_set_crypt(subreq, req->src, req->dst,
+ req->cryptlen, req->iv);
-static int cbc_encrypt(struct skcipher_request *req)
-{
- return crypto_cbc_encrypt_walk(req, cbc_encrypt_one);
+ return crypto_skcipher_encrypt(subreq);
}
static int cbc_decrypt(struct skcipher_request *req)
@@ -183,20 +186,27 @@ static int cbc_decrypt(struct skcipher_request *req)
return err;
}
-static int cbc_init(struct crypto_tfm *tfm)
+static int cbc_init(struct crypto_skcipher *tfm)
{
- struct aesbs_cbc_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct aesbs_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
+ unsigned int reqsize;
- ctx->enc_tfm = crypto_alloc_cipher("aes", 0, 0);
+ ctx->enc_tfm = crypto_alloc_skcipher("cbc(aes)", 0, CRYPTO_ALG_ASYNC);
+ if (IS_ERR(ctx->enc_tfm))
+ return PTR_ERR(ctx->enc_tfm);
- return PTR_ERR_OR_ZERO(ctx->enc_tfm);
+ reqsize = sizeof(struct skcipher_request);
+ reqsize += crypto_skcipher_reqsize(ctx->enc_tfm);
+ crypto_skcipher_set_reqsize(tfm, reqsize);
+
+ return 0;
}
-static void cbc_exit(struct crypto_tfm *tfm)
+static void cbc_exit(struct crypto_skcipher *tfm)
{
- struct aesbs_cbc_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct aesbs_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
- crypto_free_cipher(ctx->enc_tfm);
+ crypto_free_skcipher(ctx->enc_tfm);
}
static int aesbs_ctr_setkey_sync(struct crypto_skcipher *tfm, const u8 *in_key,
@@ -304,9 +314,9 @@ static int aesbs_xts_setkey(struct crypto_skcipher *tfm, const u8 *in_key,
return aesbs_setkey(tfm, in_key, key_len);
}
-static int xts_init(struct crypto_tfm *tfm)
+static int xts_init(struct crypto_skcipher *tfm)
{
- struct aesbs_xts_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct aesbs_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
ctx->cts_tfm = crypto_alloc_cipher("aes", 0, 0);
if (IS_ERR(ctx->cts_tfm))
@@ -319,9 +329,9 @@ static int xts_init(struct crypto_tfm *tfm)
return PTR_ERR_OR_ZERO(ctx->tweak_tfm);
}
-static void xts_exit(struct crypto_tfm *tfm)
+static void xts_exit(struct crypto_skcipher *tfm)
{
- struct aesbs_xts_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct aesbs_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
crypto_free_cipher(ctx->tweak_tfm);
crypto_free_cipher(ctx->cts_tfm);
@@ -432,8 +442,6 @@ static struct skcipher_alg aes_algs[] = { {
.base.cra_ctxsize = sizeof(struct aesbs_cbc_ctx),
.base.cra_module = THIS_MODULE,
.base.cra_flags = CRYPTO_ALG_INTERNAL,
- .base.cra_init = cbc_init,
- .base.cra_exit = cbc_exit,
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
@@ -442,6 +450,8 @@ static struct skcipher_alg aes_algs[] = { {
.setkey = aesbs_cbc_setkey,
.encrypt = cbc_encrypt,
.decrypt = cbc_decrypt,
+ .init = cbc_init,
+ .exit = cbc_exit,
}, {
.base.cra_name = "__ctr(aes)",
.base.cra_driver_name = "__ctr-aes-neonbs",
@@ -483,8 +493,6 @@ static struct skcipher_alg aes_algs[] = { {
.base.cra_ctxsize = sizeof(struct aesbs_xts_ctx),
.base.cra_module = THIS_MODULE,
.base.cra_flags = CRYPTO_ALG_INTERNAL,
- .base.cra_init = xts_init,
- .base.cra_exit = xts_exit,
.min_keysize = 2 * AES_MIN_KEY_SIZE,
.max_keysize = 2 * AES_MAX_KEY_SIZE,
@@ -493,6 +501,8 @@ static struct skcipher_alg aes_algs[] = { {
.setkey = aesbs_xts_setkey,
.encrypt = xts_encrypt,
.decrypt = xts_decrypt,
+ .init = xts_init,
+ .exit = xts_exit,
} };
static struct simd_skcipher_alg *aes_simd_algs[ARRAY_SIZE(aes_algs)];
diff --git a/arch/arm/crypto/curve25519-glue.c b/arch/arm/crypto/curve25519-glue.c
index 776ae07e0469..31eb75b6002f 100644
--- a/arch/arm/crypto/curve25519-glue.c
+++ b/arch/arm/crypto/curve25519-glue.c
@@ -16,6 +16,7 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/jump_label.h>
+#include <linux/scatterlist.h>
#include <crypto/curve25519.h>
asmlinkage void curve25519_neon(u8 mypublic[CURVE25519_KEY_SIZE],
diff --git a/arch/arm/crypto/poly1305-glue.c b/arch/arm/crypto/poly1305-glue.c
index 13cfef4ae22e..3023c1acfa19 100644
--- a/arch/arm/crypto/poly1305-glue.c
+++ b/arch/arm/crypto/poly1305-glue.c
@@ -20,6 +20,7 @@
void poly1305_init_arm(void *state, const u8 *key);
void poly1305_blocks_arm(void *state, const u8 *src, u32 len, u32 hibit);
+void poly1305_blocks_neon(void *state, const u8 *src, u32 len, u32 hibit);
void poly1305_emit_arm(void *state, u8 *digest, const u32 *nonce);
void __weak poly1305_blocks_neon(void *state, const u8 *src, u32 len, u32 hibit)
diff --git a/arch/arm/crypto/sha256-armv4.pl b/arch/arm/crypto/sha256-armv4.pl
index 9f96ff48e4a8..f3a2b54efd4e 100644
--- a/arch/arm/crypto/sha256-armv4.pl
+++ b/arch/arm/crypto/sha256-armv4.pl
@@ -175,7 +175,6 @@ $code=<<___;
#else
.syntax unified
# ifdef __thumb2__
-# define adrl adr
.thumb
# else
.code 32
@@ -471,7 +470,8 @@ sha256_block_data_order_neon:
stmdb sp!,{r4-r12,lr}
sub $H,sp,#16*4+16
- adrl $Ktbl,K256
+ adr $Ktbl,.Lsha256_block_data_order
+ sub $Ktbl,$Ktbl,#.Lsha256_block_data_order-K256
bic $H,$H,#15 @ align for 128-bit stores
mov $t2,sp
mov sp,$H @ alloca
diff --git a/arch/arm/crypto/sha256-core.S_shipped b/arch/arm/crypto/sha256-core.S_shipped
index ea04b2ab0c33..6363014a50d7 100644
--- a/arch/arm/crypto/sha256-core.S_shipped
+++ b/arch/arm/crypto/sha256-core.S_shipped
@@ -56,7 +56,6 @@
#else
.syntax unified
# ifdef __thumb2__
-# define adrl adr
.thumb
# else
.code 32
@@ -1885,7 +1884,8 @@ sha256_block_data_order_neon:
stmdb sp!,{r4-r12,lr}
sub r11,sp,#16*4+16
- adrl r14,K256
+ adr r14,.Lsha256_block_data_order
+ sub r14,r14,#.Lsha256_block_data_order-K256
bic r11,r11,#15 @ align for 128-bit stores
mov r12,sp
mov sp,r11 @ alloca
diff --git a/arch/arm/crypto/sha512-armv4.pl b/arch/arm/crypto/sha512-armv4.pl
index 69df68981acd..2fc3516912fa 100644
--- a/arch/arm/crypto/sha512-armv4.pl
+++ b/arch/arm/crypto/sha512-armv4.pl
@@ -212,7 +212,6 @@ $code=<<___;
#else
.syntax unified
# ifdef __thumb2__
-# define adrl adr
.thumb
# else
.code 32
@@ -602,7 +601,8 @@ sha512_block_data_order_neon:
dmb @ errata #451034 on early Cortex A8
add $len,$inp,$len,lsl#7 @ len to point at the end of inp
VFP_ABI_PUSH
- adrl $Ktbl,K512
+ adr $Ktbl,.Lsha512_block_data_order
+ sub $Ktbl,$Ktbl,.Lsha512_block_data_order-K512
vldmia $ctx,{$A-$H} @ load context
.Loop_neon:
___
diff --git a/arch/arm/crypto/sha512-core.S_shipped b/arch/arm/crypto/sha512-core.S_shipped
index cb147db5cbfe..03014624f2ab 100644
--- a/arch/arm/crypto/sha512-core.S_shipped
+++ b/arch/arm/crypto/sha512-core.S_shipped
@@ -79,7 +79,6 @@
#else
.syntax unified
# ifdef __thumb2__
-# define adrl adr
.thumb
# else
.code 32
@@ -543,7 +542,8 @@ sha512_block_data_order_neon:
dmb @ errata #451034 on early Cortex A8
add r2,r1,r2,lsl#7 @ len to point at the end of inp
VFP_ABI_PUSH
- adrl r3,K512
+ adr r3,.Lsha512_block_data_order
+ sub r3,r3,.Lsha512_block_data_order-K512
vldmia r0,{d16-d23} @ load context
.Loop_neon:
vshr.u64 d24,d20,#14 @ 0
diff --git a/arch/arm/include/asm/checksum.h b/arch/arm/include/asm/checksum.h
index ed6073fee338..f0f54aef3724 100644
--- a/arch/arm/include/asm/checksum.h
+++ b/arch/arm/include/asm/checksum.h
@@ -35,23 +35,20 @@ __wsum csum_partial(const void *buff, int len, __wsum sum);
*/
__wsum
-csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum);
+csum_partial_copy_nocheck(const void *src, void *dst, int len);
__wsum
-csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr);
+csum_partial_copy_from_user(const void __user *src, void *dst, int len);
#define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER
+#define _HAVE_ARCH_CSUM_AND_COPY
static inline
-__wsum csum_and_copy_from_user (const void __user *src, void *dst,
- int len, __wsum sum, int *err_ptr)
+__wsum csum_and_copy_from_user(const void __user *src, void *dst, int len)
{
- if (access_ok(src, len))
- return csum_partial_copy_from_user(src, dst, len, sum, err_ptr);
+ if (!access_ok(src, len))
+ return 0;
- if (len)
- *err_ptr = -EFAULT;
-
- return sum;
+ return csum_partial_copy_from_user(src, dst, len);
}
/*
diff --git a/arch/arm/include/asm/efi.h b/arch/arm/include/asm/efi.h
index 5dcf3c6011b7..3ee4f4381985 100644
--- a/arch/arm/include/asm/efi.h
+++ b/arch/arm/include/asm/efi.h
@@ -66,25 +66,24 @@ static inline void efifb_setup_from_dmi(struct screen_info *si, const char *opt)
#define MAX_UNCOMP_KERNEL_SIZE SZ_32M
/*
- * The kernel zImage should preferably be located between 32 MB and 128 MB
- * from the base of DRAM. The min address leaves space for a maximal size
- * uncompressed image, and the max address is due to how the zImage decompressor
- * picks a destination address.
+ * phys-to-virt patching requires that the physical to virtual offset fits
+ * into the immediate field of an add/sub instruction, which comes down to the
+ * 24 least significant bits being zero, and so the offset should be a multiple
+ * of 16 MB. Since PAGE_OFFSET itself is a multiple of 16 MB, the physical
+ * base should be aligned to 16 MB as well.
*/
-#define ZIMAGE_OFFSET_LIMIT SZ_128M
-#define MIN_ZIMAGE_OFFSET MAX_UNCOMP_KERNEL_SIZE
+#define EFI_PHYS_ALIGN SZ_16M
-/* on ARM, the FDT should be located in the first 128 MB of RAM */
-static inline unsigned long efi_get_max_fdt_addr(unsigned long dram_base)
+/* on ARM, the FDT should be located in a lowmem region */
+static inline unsigned long efi_get_max_fdt_addr(unsigned long image_addr)
{
- return dram_base + ZIMAGE_OFFSET_LIMIT;
+ return round_down(image_addr, EFI_PHYS_ALIGN) + SZ_512M;
}
/* on ARM, the initrd should be loaded in a lowmem region */
-static inline unsigned long efi_get_max_initrd_addr(unsigned long dram_base,
- unsigned long image_addr)
+static inline unsigned long efi_get_max_initrd_addr(unsigned long image_addr)
{
- return dram_base + SZ_512M;
+ return round_down(image_addr, EFI_PHYS_ALIGN) + SZ_512M;
}
struct efi_arm_entry_state {
diff --git a/arch/arm/include/asm/hardirq.h b/arch/arm/include/asm/hardirq.h
index 7a88f160b1fb..b95848ed2bc7 100644
--- a/arch/arm/include/asm/hardirq.h
+++ b/arch/arm/include/asm/hardirq.h
@@ -6,29 +6,12 @@
#include <linux/threads.h>
#include <asm/irq.h>
-/* number of IPIS _not_ including IPI_CPU_BACKTRACE */
-#define NR_IPI 7
-
typedef struct {
unsigned int __softirq_pending;
-#ifdef CONFIG_SMP
- unsigned int ipi_irqs[NR_IPI];
-#endif
} ____cacheline_aligned irq_cpustat_t;
#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
-#define __inc_irq_stat(cpu, member) __IRQ_STAT(cpu, member)++
-#define __get_irq_stat(cpu, member) __IRQ_STAT(cpu, member)
-
-#ifdef CONFIG_SMP
-u64 smp_irq_stat_cpu(unsigned int cpu);
-#else
-#define smp_irq_stat_cpu(cpu) 0
-#endif
-
-#define arch_irq_stat_cpu smp_irq_stat_cpu
-
#define __ARCH_IRQ_EXIT_IRQS_DISABLED 1
#endif /* __ASM_HARDIRQ_H */
diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h
index a91f21e3c5b5..0ca55a607d0a 100644
--- a/arch/arm/include/asm/smp.h
+++ b/arch/arm/include/asm/smp.h
@@ -39,11 +39,10 @@ void handle_IPI(int ipinr, struct pt_regs *regs);
*/
extern void smp_init_cpus(void);
-
/*
- * Provide a function to raise an IPI cross call on CPUs in callmap.
+ * Register IPI interrupts with the arch SMP code
*/
-extern void set_smp_cross_call(void (*)(const struct cpumask *, unsigned int));
+extern void set_smp_ipi_range(int ipi_base, int nr_ipi);
/*
* Called from platform specific assembly code, this is the
diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h
index 9415222b49ad..b8cbe03ad260 100644
--- a/arch/arm/include/asm/tlb.h
+++ b/arch/arm/include/asm/tlb.h
@@ -59,6 +59,7 @@ __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp, unsigned long addr)
#ifdef CONFIG_ARM_LPAE
struct page *page = virt_to_page(pmdp);
+ pgtable_pmd_page_dtor(page);
tlb_remove_table(tlb, page);
#endif
}
diff --git a/arch/arm/include/asm/topology.h b/arch/arm/include/asm/topology.h
index e0593cf095d0..470299ee2fba 100644
--- a/arch/arm/include/asm/topology.h
+++ b/arch/arm/include/asm/topology.h
@@ -7,8 +7,13 @@
#include <linux/cpumask.h>
#include <linux/arch_topology.h>
+/* big.LITTLE switcher is incompatible with frequency invariance */
+#ifndef CONFIG_BL_SWITCHER
/* Replace task scheduler's default frequency-invariant accounting */
+#define arch_set_freq_scale topology_set_freq_scale
#define arch_scale_freq_capacity topology_get_freq_scale
+#define arch_scale_freq_invariant topology_scale_freq_invariant
+#endif
/* Replace task scheduler's default cpu-invariant accounting */
#define arch_scale_cpu_capacity topology_get_cpu_scale
diff --git a/arch/arm/kernel/vmlinux.lds.h b/arch/arm/include/asm/vmlinux.lds.h
index 381a8e105fa5..4a91428c324d 100644
--- a/arch/arm/kernel/vmlinux.lds.h
+++ b/arch/arm/include/asm/vmlinux.lds.h
@@ -1,4 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
+#include <asm-generic/vmlinux.lds.h>
#ifdef CONFIG_HOTPLUG_CPU
#define ARM_CPU_DISCARD(x)
@@ -49,8 +50,29 @@
EXIT_CALL \
ARM_MMU_DISCARD(*(.text.fixup)) \
ARM_MMU_DISCARD(*(__ex_table)) \
- *(.discard) \
- *(.discard.*)
+ COMMON_DISCARDS
+
+/*
+ * Sections that should stay zero sized, which is safer to explicitly
+ * check instead of blindly discarding.
+ */
+#define ARM_ASSERTS \
+ .plt : { \
+ *(.iplt) *(.rel.iplt) *(.iplt) *(.igot.plt) \
+ } \
+ ASSERT(SIZEOF(.plt) == 0, \
+ "Unexpected run-time procedure linkages detected!")
+
+#define ARM_DETAILS \
+ ELF_DETAILS \
+ .ARM.attributes 0 : { *(.ARM.attributes) }
+
+#define ARM_STUBS_TEXT \
+ *(.gnu.warning) \
+ *(.glue_7) \
+ *(.glue_7t) \
+ *(.vfp11_veneer) \
+ *(.v4_bx)
#define ARM_TEXT \
IDMAP_TEXT \
@@ -64,9 +86,7 @@
CPUIDLE_TEXT \
LOCK_TEXT \
KPROBES_TEXT \
- *(.gnu.warning) \
- *(.glue_7) \
- *(.glue_7t) \
+ ARM_STUBS_TEXT \
. = ALIGN(4); \
*(.got) /* Global offset table */ \
ARM_CPU_KEEP(PROC_INFO)
diff --git a/arch/arm/include/asm/xen/page.h b/arch/arm/include/asm/xen/page.h
index 31bbc803cecb..dc7f6e91aafa 100644
--- a/arch/arm/include/asm/xen/page.h
+++ b/arch/arm/include/asm/xen/page.h
@@ -1 +1,6 @@
#include <xen/arm/page.h>
+
+static inline bool xen_kernel_unmapped_at_usr(void)
+{
+ return false;
+}
diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
index 7fff88e61252..7a4853b1213a 100644
--- a/arch/arm/kernel/hw_breakpoint.c
+++ b/arch/arm/kernel/hw_breakpoint.c
@@ -547,7 +547,7 @@ static int arch_build_bp_info(struct perf_event *bp,
if ((hw->ctrl.type != ARM_BREAKPOINT_EXECUTE)
&& max_watchpoint_len >= 8)
break;
- /* Else, fall through */
+ fallthrough;
default:
return -EINVAL;
}
@@ -612,12 +612,12 @@ int hw_breakpoint_arch_parse(struct perf_event *bp,
/* Allow halfword watchpoints and breakpoints. */
if (hw->ctrl.len == ARM_BREAKPOINT_LEN_2)
break;
- /* Else, fall through */
+ fallthrough;
case 3:
/* Allow single byte watchpoint. */
if (hw->ctrl.len == ARM_BREAKPOINT_LEN_1)
break;
- /* Else, fall through */
+ fallthrough;
default:
ret = -EINVAL;
goto out;
@@ -884,7 +884,7 @@ static int hw_breakpoint_pending(unsigned long addr, unsigned int fsr,
break;
case ARM_ENTRY_ASYNC_WATCHPOINT:
WARN(1, "Asynchronous watchpoint exception taken. Debugging results may be unreliable\n");
- /* Fall through */
+ fallthrough;
case ARM_ENTRY_SYNC_WATCHPOINT:
watchpoint_handler(addr, fsr, regs);
break;
@@ -933,7 +933,7 @@ static bool core_has_os_save_restore(void)
ARM_DBG_READ(c1, c1, 4, oslsr);
if (oslsr & ARM_OSLSR_OSLM0)
return true;
- /* Else, fall through */
+ fallthrough;
default:
return false;
}
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c
index ee514034c0a1..698b6f636156 100644
--- a/arch/arm/kernel/irq.c
+++ b/arch/arm/kernel/irq.c
@@ -18,7 +18,6 @@
* IRQ's are in fact implemented a bit like signal handlers for the kernel.
* Naturally it's not a 1:1 relation, but there are similarities.
*/
-#include <linux/kernel_stat.h>
#include <linux/signal.h>
#include <linux/ioport.h>
#include <linux/interrupt.h>
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index d8e18cdd96d3..3f65d0ac9f63 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -843,20 +843,26 @@ early_param("mem", early_mem);
static void __init request_standard_resources(const struct machine_desc *mdesc)
{
- struct memblock_region *region;
+ phys_addr_t start, end, res_end;
struct resource *res;
+ u64 i;
kernel_code.start = virt_to_phys(_text);
kernel_code.end = virt_to_phys(__init_begin - 1);
kernel_data.start = virt_to_phys(_sdata);
kernel_data.end = virt_to_phys(_end - 1);
- for_each_memblock(memory, region) {
- phys_addr_t start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
- phys_addr_t end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
+ for_each_mem_range(i, &start, &end) {
unsigned long boot_alias_start;
/*
+ * In memblock, end points to the first byte after the
+ * range while in resourses, end points to the last byte in
+ * the range.
+ */
+ res_end = end - 1;
+
+ /*
* Some systems have a special memory alias which is only
* used for booting. We need to advertise this region to
* kexec-tools so they know where bootable RAM is located.
@@ -869,7 +875,7 @@ static void __init request_standard_resources(const struct machine_desc *mdesc)
__func__, sizeof(*res));
res->name = "System RAM (boot alias)";
res->start = boot_alias_start;
- res->end = phys_to_idmap(end);
+ res->end = phys_to_idmap(res_end);
res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
request_resource(&iomem_resource, res);
}
@@ -880,7 +886,7 @@ static void __init request_standard_resources(const struct machine_desc *mdesc)
sizeof(*res));
res->name = "System RAM";
res->start = start;
- res->end = end;
+ res->end = res_end;
res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
request_resource(&iomem_resource, res);
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
index c9dc912b83f0..c1892f733f20 100644
--- a/arch/arm/kernel/signal.c
+++ b/arch/arm/kernel/signal.c
@@ -596,7 +596,7 @@ static int do_signal(struct pt_regs *regs, int syscall)
switch (retval) {
case -ERESTART_RESTARTBLOCK:
restart -= 2;
- /* Fall through */
+ fallthrough;
case -ERESTARTNOHAND:
case -ERESTARTSYS:
case -ERESTARTNOINTR:
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 5d9da61eff62..48099c6e1e4a 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -26,6 +26,7 @@
#include <linux/completion.h>
#include <linux/cpufreq.h>
#include <linux/irq_work.h>
+#include <linux/kernel_stat.h>
#include <linux/atomic.h>
#include <asm/bugs.h>
@@ -65,18 +66,26 @@ enum ipi_msg_type {
IPI_CPU_STOP,
IPI_IRQ_WORK,
IPI_COMPLETION,
+ NR_IPI,
/*
* CPU_BACKTRACE is special and not included in NR_IPI
* or tracable with trace_ipi_*
*/
- IPI_CPU_BACKTRACE,
+ IPI_CPU_BACKTRACE = NR_IPI,
/*
* SGI8-15 can be reserved by secure firmware, and thus may
* not be usable by the kernel. Please keep the above limited
* to at most 8 entries.
*/
+ MAX_IPI
};
+static int ipi_irq_base __read_mostly;
+static int nr_ipi __read_mostly = NR_IPI;
+static struct irq_desc *ipi_desc[MAX_IPI] __read_mostly;
+
+static void ipi_setup(int cpu);
+
static DECLARE_COMPLETION(cpu_running);
static struct smp_operations smp_ops __ro_after_init;
@@ -226,6 +235,17 @@ int platform_can_hotplug_cpu(unsigned int cpu)
return cpu != 0;
}
+static void ipi_teardown(int cpu)
+{
+ int i;
+
+ if (WARN_ON_ONCE(!ipi_irq_base))
+ return;
+
+ for (i = 0; i < nr_ipi; i++)
+ disable_percpu_irq(ipi_irq_base + i);
+}
+
/*
* __cpu_disable runs on the processor to be shutdown.
*/
@@ -247,6 +267,7 @@ int __cpu_disable(void)
* and we must not schedule until we're ready to give up the cpu.
*/
set_cpu_online(cpu, false);
+ ipi_teardown(cpu);
/*
* OK - migrate IRQs away from this CPU
@@ -422,6 +443,8 @@ asmlinkage void secondary_start_kernel(void)
notify_cpu_starting(cpu);
+ ipi_setup(cpu);
+
calibrate_delay();
smp_store_cpu_info(cpu);
@@ -500,14 +523,6 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
}
}
-static void (*__smp_cross_call)(const struct cpumask *, unsigned int);
-
-void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int))
-{
- if (!__smp_cross_call)
- __smp_cross_call = fn;
-}
-
static const char *ipi_types[NR_IPI] __tracepoint_string = {
#define S(x,s) [x] = s
S(IPI_WAKEUP, "CPU wakeup interrupts"),
@@ -519,38 +534,28 @@ static const char *ipi_types[NR_IPI] __tracepoint_string = {
S(IPI_COMPLETION, "completion interrupts"),
};
-static void smp_cross_call(const struct cpumask *target, unsigned int ipinr)
-{
- trace_ipi_raise_rcuidle(target, ipi_types[ipinr]);
- __smp_cross_call(target, ipinr);
-}
+static void smp_cross_call(const struct cpumask *target, unsigned int ipinr);
void show_ipi_list(struct seq_file *p, int prec)
{
unsigned int cpu, i;
for (i = 0; i < NR_IPI; i++) {
+ unsigned int irq;
+
+ if (!ipi_desc[i])
+ continue;
+
+ irq = irq_desc_get_irq(ipi_desc[i]);
seq_printf(p, "%*s%u: ", prec - 1, "IPI", i);
for_each_online_cpu(cpu)
- seq_printf(p, "%10u ",
- __get_irq_stat(cpu, ipi_irqs[i]));
+ seq_printf(p, "%10u ", kstat_irqs_cpu(irq, cpu));
seq_printf(p, " %s\n", ipi_types[i]);
}
}
-u64 smp_irq_stat_cpu(unsigned int cpu)
-{
- u64 sum = 0;
- int i;
-
- for (i = 0; i < NR_IPI; i++)
- sum += __get_irq_stat(cpu, ipi_irqs[i]);
-
- return sum;
-}
-
void arch_send_call_function_ipi_mask(const struct cpumask *mask)
{
smp_cross_call(mask, IPI_CALL_FUNC);
@@ -627,15 +632,12 @@ asmlinkage void __exception_irq_entry do_IPI(int ipinr, struct pt_regs *regs)
handle_IPI(ipinr, regs);
}
-void handle_IPI(int ipinr, struct pt_regs *regs)
+static void do_handle_IPI(int ipinr)
{
unsigned int cpu = smp_processor_id();
- struct pt_regs *old_regs = set_irq_regs(regs);
- if ((unsigned)ipinr < NR_IPI) {
+ if ((unsigned)ipinr < NR_IPI)
trace_ipi_entry_rcuidle(ipi_types[ipinr]);
- __inc_irq_stat(cpu, ipi_irqs[ipinr]);
- }
switch (ipinr) {
case IPI_WAKEUP:
@@ -643,9 +645,7 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
case IPI_TIMER:
- irq_enter();
tick_receive_broadcast();
- irq_exit();
break;
#endif
@@ -654,36 +654,26 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
break;
case IPI_CALL_FUNC:
- irq_enter();
generic_smp_call_function_interrupt();
- irq_exit();
break;
case IPI_CPU_STOP:
- irq_enter();
ipi_cpu_stop(cpu);
- irq_exit();
break;
#ifdef CONFIG_IRQ_WORK
case IPI_IRQ_WORK:
- irq_enter();
irq_work_run();
- irq_exit();
break;
#endif
case IPI_COMPLETION:
- irq_enter();
ipi_complete(cpu);
- irq_exit();
break;
case IPI_CPU_BACKTRACE:
printk_nmi_enter();
- irq_enter();
- nmi_cpu_backtrace(regs);
- irq_exit();
+ nmi_cpu_backtrace(get_irq_regs());
printk_nmi_exit();
break;
@@ -695,9 +685,67 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
if ((unsigned)ipinr < NR_IPI)
trace_ipi_exit_rcuidle(ipi_types[ipinr]);
+}
+
+/* Legacy version, should go away once all irqchips have been converted */
+void handle_IPI(int ipinr, struct pt_regs *regs)
+{
+ struct pt_regs *old_regs = set_irq_regs(regs);
+
+ irq_enter();
+ do_handle_IPI(ipinr);
+ irq_exit();
+
set_irq_regs(old_regs);
}
+static irqreturn_t ipi_handler(int irq, void *data)
+{
+ do_handle_IPI(irq - ipi_irq_base);
+ return IRQ_HANDLED;
+}
+
+static void smp_cross_call(const struct cpumask *target, unsigned int ipinr)
+{
+ trace_ipi_raise_rcuidle(target, ipi_types[ipinr]);
+ __ipi_send_mask(ipi_desc[ipinr], target);
+}
+
+static void ipi_setup(int cpu)
+{
+ int i;
+
+ if (WARN_ON_ONCE(!ipi_irq_base))
+ return;
+
+ for (i = 0; i < nr_ipi; i++)
+ enable_percpu_irq(ipi_irq_base + i, 0);
+}
+
+void __init set_smp_ipi_range(int ipi_base, int n)
+{
+ int i;
+
+ WARN_ON(n < MAX_IPI);
+ nr_ipi = min(n, MAX_IPI);
+
+ for (i = 0; i < nr_ipi; i++) {
+ int err;
+
+ err = request_percpu_irq(ipi_base + i, ipi_handler,
+ "IPI", &irq_stat);
+ WARN_ON(err);
+
+ ipi_desc[i] = irq_to_desc(ipi_base + i);
+ irq_set_status_flags(ipi_base + i, IRQ_HIDDEN);
+ }
+
+ ipi_irq_base = ipi_base;
+
+ /* Setup the boot CPU immediately */
+ ipi_setup(smp_processor_id());
+}
+
void smp_send_reschedule(int cpu)
{
smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
@@ -805,7 +853,7 @@ core_initcall(register_cpufreq_notifier);
static void raise_nmi(cpumask_t *mask)
{
- __smp_cross_call(mask, IPI_CPU_BACKTRACE);
+ __ipi_send_mask(ipi_desc[IPI_CPU_BACKTRACE], mask);
}
void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)
diff --git a/arch/arm/kernel/topology.c b/arch/arm/kernel/topology.c
index b5adaf744630..ef0058de432b 100644
--- a/arch/arm/kernel/topology.c
+++ b/arch/arm/kernel/topology.c
@@ -178,15 +178,6 @@ static inline void update_cpu_capacity(unsigned int cpuid) {}
#endif
/*
- * The current assumption is that we can power gate each core independently.
- * This will be superseded by DT binding once available.
- */
-const struct cpumask *cpu_corepower_mask(int cpu)
-{
- return &cpu_topology[cpu].thread_sibling;
-}
-
-/*
* store_cpu_topology is called at boot when only one cpu is running
* and with the mutex cpu_hotplug.lock locked, when several cpus have booted,
* which prevents simultaneous write access to cpu_topology array
@@ -241,20 +232,6 @@ topology_populated:
update_siblings_masks(cpuid);
}
-static inline int cpu_corepower_flags(void)
-{
- return SD_SHARE_PKG_RESOURCES | SD_SHARE_POWERDOMAIN;
-}
-
-static struct sched_domain_topology_level arm_topology[] = {
-#ifdef CONFIG_SCHED_MC
- { cpu_corepower_mask, cpu_corepower_flags, SD_INIT_NAME(GMC) },
- { cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) },
-#endif
- { cpu_cpu_mask, SD_INIT_NAME(DIE) },
- { NULL, },
-};
-
/*
* init_cpu_topology is called at boot when only one cpu is running
* which prevent simultaneous write access to cpu_topology array
@@ -265,7 +242,4 @@ void __init init_cpu_topology(void)
smp_wmb();
parse_dt_topology();
-
- /* Set scheduler topology descriptor */
- set_sched_topology(arm_topology);
}
diff --git a/arch/arm/kernel/vmlinux-xip.lds.S b/arch/arm/kernel/vmlinux-xip.lds.S
index 6d2be994ae58..50136828f5b5 100644
--- a/arch/arm/kernel/vmlinux-xip.lds.S
+++ b/arch/arm/kernel/vmlinux-xip.lds.S
@@ -9,15 +9,13 @@
#include <linux/sizes.h>
-#include <asm-generic/vmlinux.lds.h>
+#include <asm/vmlinux.lds.h>
#include <asm/cache.h>
#include <asm/thread_info.h>
#include <asm/memory.h>
#include <asm/mpu.h>
#include <asm/page.h>
-#include "vmlinux.lds.h"
-
OUTPUT_ARCH(arm)
ENTRY(stext)
@@ -152,6 +150,10 @@ SECTIONS
_end = .;
STABS_DEBUG
+ DWARF_DEBUG
+ ARM_DETAILS
+
+ ARM_ASSERTS
}
/*
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
index 7f24bc08403e..5f4922e858d0 100644
--- a/arch/arm/kernel/vmlinux.lds.S
+++ b/arch/arm/kernel/vmlinux.lds.S
@@ -9,15 +9,13 @@
#else
#include <linux/pgtable.h>
-#include <asm-generic/vmlinux.lds.h>
+#include <asm/vmlinux.lds.h>
#include <asm/cache.h>
#include <asm/thread_info.h>
#include <asm/memory.h>
#include <asm/mpu.h>
#include <asm/page.h>
-#include "vmlinux.lds.h"
-
OUTPUT_ARCH(arm)
ENTRY(stext)
@@ -151,6 +149,10 @@ SECTIONS
_end = .;
STABS_DEBUG
+ DWARF_DEBUG
+ ARM_DETAILS
+
+ ARM_ASSERTS
}
#ifdef CONFIG_STRICT_KERNEL_RWX
diff --git a/arch/arm/lib/csumpartialcopy.S b/arch/arm/lib/csumpartialcopy.S
index 184d97254a7a..1ca6aadd649c 100644
--- a/arch/arm/lib/csumpartialcopy.S
+++ b/arch/arm/lib/csumpartialcopy.S
@@ -9,8 +9,8 @@
.text
-/* Function: __u32 csum_partial_copy_nocheck(const char *src, char *dst, int len, __u32 sum)
- * Params : r0 = src, r1 = dst, r2 = len, r3 = checksum
+/* Function: __u32 csum_partial_copy_nocheck(const char *src, char *dst, int len)
+ * Params : r0 = src, r1 = dst, r2 = len
* Returns : r0 = new checksum
*/
diff --git a/arch/arm/lib/csumpartialcopygeneric.S b/arch/arm/lib/csumpartialcopygeneric.S
index 0b706a39a677..0fd5c10e90a7 100644
--- a/arch/arm/lib/csumpartialcopygeneric.S
+++ b/arch/arm/lib/csumpartialcopygeneric.S
@@ -86,6 +86,7 @@ sum .req r3
FN_ENTRY
save_regs
+ mov sum, #-1
cmp len, #8 @ Ensure that we have at least
blo .Lless8 @ 8 bytes to copy.
diff --git a/arch/arm/lib/csumpartialcopyuser.S b/arch/arm/lib/csumpartialcopyuser.S
index 6bd3a93eaa3c..6928781e6bee 100644
--- a/arch/arm/lib/csumpartialcopyuser.S
+++ b/arch/arm/lib/csumpartialcopyuser.S
@@ -62,9 +62,9 @@
/*
* unsigned int
- * csum_partial_copy_from_user(const char *src, char *dst, int len, int sum, int *err_ptr)
- * r0 = src, r1 = dst, r2 = len, r3 = sum, [sp] = *err_ptr
- * Returns : r0 = checksum, [[sp, #0], #0] = 0 or -EFAULT
+ * csum_partial_copy_from_user(const char *src, char *dst, int len)
+ * r0 = src, r1 = dst, r2 = len
+ * Returns : r0 = checksum or 0
*/
#define FN_ENTRY ENTRY(csum_partial_copy_from_user)
@@ -73,25 +73,11 @@
#include "csumpartialcopygeneric.S"
/*
- * FIXME: minor buglet here
- * We don't return the checksum for the data present in the buffer. To do
- * so properly, we would have to add in whatever registers were loaded before
- * the fault, which, with the current asm above is not predictable.
+ * We report fault by returning 0 csum - impossible in normal case, since
+ * we start with 0xffffffff for initial sum.
*/
.pushsection .text.fixup,"ax"
.align 4
-9001: mov r4, #-EFAULT
-#ifdef CONFIG_CPU_SW_DOMAIN_PAN
- ldr r5, [sp, #9*4] @ *err_ptr
-#else
- ldr r5, [sp, #8*4] @ *err_ptr
-#endif
- str r4, [r5]
- ldmia sp, {r1, r2} @ retrieve dst, len
- add r2, r2, r1
- mov r0, #0 @ zero the buffer
-9002: teq r2, r1
- strbne r0, [r1], #1
- bne 9002b
+9001: mov r0, #0
load_regs
.popsection
diff --git a/arch/arm/mach-ep93xx/crunch.c b/arch/arm/mach-ep93xx/crunch.c
index 1c05c5bf7e5c..757032d82f63 100644
--- a/arch/arm/mach-ep93xx/crunch.c
+++ b/arch/arm/mach-ep93xx/crunch.c
@@ -49,7 +49,7 @@ static int crunch_do(struct notifier_block *self, unsigned long cmd, void *t)
* FALLTHROUGH: Ensure we don't try to overwrite our newly
* initialised state information on the first fault.
*/
- /* Fall through */
+ fallthrough;
case THREAD_NOTIFY_EXIT:
crunch_task_release(thread);
diff --git a/arch/arm/mach-imx/cpuidle-imx6q.c b/arch/arm/mach-imx/cpuidle-imx6q.c
index 24dd5bbe60e4..094337dc1bc7 100644
--- a/arch/arm/mach-imx/cpuidle-imx6q.c
+++ b/arch/arm/mach-imx/cpuidle-imx6q.c
@@ -24,7 +24,9 @@ static int imx6q_enter_wait(struct cpuidle_device *dev,
imx6_set_lpm(WAIT_UNCLOCKED);
raw_spin_unlock(&cpuidle_lock);
+ rcu_idle_enter();
cpu_do_idle();
+ rcu_idle_exit();
raw_spin_lock(&cpuidle_lock);
if (num_idle_cpus-- == num_online_cpus())
@@ -44,7 +46,7 @@ static struct cpuidle_driver imx6q_cpuidle_driver = {
{
.exit_latency = 50,
.target_residency = 75,
- .flags = CPUIDLE_FLAG_TIMER_STOP,
+ .flags = CPUIDLE_FLAG_TIMER_STOP | CPUIDLE_FLAG_RCU_IDLE,
.enter = imx6q_enter_wait,
.name = "WAIT",
.desc = "Clock off",
diff --git a/arch/arm/mach-mmp/pm-mmp2.c b/arch/arm/mach-mmp/pm-mmp2.c
index 2d86381e152d..7a6f74c32d42 100644
--- a/arch/arm/mach-mmp/pm-mmp2.c
+++ b/arch/arm/mach-mmp/pm-mmp2.c
@@ -123,19 +123,19 @@ void mmp2_pm_enter_lowpower_mode(int state)
case POWER_MODE_SYS_SLEEP:
apcr |= MPMU_PCR_PJ_SLPEN; /* set the SLPEN bit */
apcr |= MPMU_PCR_PJ_VCTCXOSD; /* set VCTCXOSD */
- /* fall through */
+ fallthrough;
case POWER_MODE_CHIP_SLEEP:
apcr |= MPMU_PCR_PJ_SLPEN;
- /* fall through */
+ fallthrough;
case POWER_MODE_APPS_SLEEP:
apcr |= MPMU_PCR_PJ_APBSD; /* set APBSD */
- /* fall through */
+ fallthrough;
case POWER_MODE_APPS_IDLE:
apcr |= MPMU_PCR_PJ_AXISD; /* set AXISDD bit */
apcr |= MPMU_PCR_PJ_DDRCORSD; /* set DDRCORSD bit */
idle_cfg |= APMU_PJ_IDLE_CFG_PJ_PWRDWN; /* PJ power down */
apcr |= MPMU_PCR_PJ_SPSD;
- /* fall through */
+ fallthrough;
case POWER_MODE_CORE_EXTIDLE:
idle_cfg |= APMU_PJ_IDLE_CFG_PJ_IDLE; /* set the IDLE bit */
idle_cfg &= ~APMU_PJ_IDLE_CFG_ISO_MODE_CNTRL_MASK;
diff --git a/arch/arm/mach-mmp/pm-pxa910.c b/arch/arm/mach-mmp/pm-pxa910.c
index 69ebe18ff209..1d71d73c1862 100644
--- a/arch/arm/mach-mmp/pm-pxa910.c
+++ b/arch/arm/mach-mmp/pm-pxa910.c
@@ -145,23 +145,23 @@ void pxa910_pm_enter_lowpower_mode(int state)
case POWER_MODE_UDR:
/* only shutdown APB in UDR */
apcr |= MPMU_APCR_STBYEN | MPMU_APCR_APBSD;
- /* fall through */
+ fallthrough;
case POWER_MODE_SYS_SLEEP:
apcr |= MPMU_APCR_SLPEN; /* set the SLPEN bit */
apcr |= MPMU_APCR_VCTCXOSD; /* set VCTCXOSD */
- /* fall through */
+ fallthrough;
case POWER_MODE_APPS_SLEEP:
apcr |= MPMU_APCR_DDRCORSD; /* set DDRCORSD */
- /* fall through */
+ fallthrough;
case POWER_MODE_APPS_IDLE:
apcr |= MPMU_APCR_AXISD; /* set AXISDD bit */
- /* fall through */
+ fallthrough;
case POWER_MODE_CORE_EXTIDLE:
idle_cfg |= APMU_MOH_IDLE_CFG_MOH_IDLE;
idle_cfg |= APMU_MOH_IDLE_CFG_MOH_PWRDWN;
idle_cfg |= APMU_MOH_IDLE_CFG_MOH_PWR_SW(3)
| APMU_MOH_IDLE_CFG_MOH_L2_PWR_SW(3);
- /* fall through */
+ fallthrough;
case POWER_MODE_CORE_INTIDLE:
break;
}
diff --git a/arch/arm/mach-omap2/id.c b/arch/arm/mach-omap2/id.c
index 1d119b974f5f..59755b5a1ad7 100644
--- a/arch/arm/mach-omap2/id.c
+++ b/arch/arm/mach-omap2/id.c
@@ -396,7 +396,6 @@ void __init omap3xxx_check_revision(void)
cpu_rev = "3.1";
break;
case 7:
- /* FALLTHROUGH */
default:
/* Use the latest known revision as default */
omap_revision = OMAP3430_REV_ES3_1_2;
@@ -416,7 +415,6 @@ void __init omap3xxx_check_revision(void)
cpu_rev = "1.0";
break;
case 1:
- /* FALLTHROUGH */
default:
omap_revision = AM35XX_REV_ES1_1;
cpu_rev = "1.1";
@@ -435,7 +433,6 @@ void __init omap3xxx_check_revision(void)
cpu_rev = "1.1";
break;
case 2:
- /* FALLTHROUGH */
default:
omap_revision = OMAP3630_REV_ES1_2;
cpu_rev = "1.2";
@@ -456,7 +453,6 @@ void __init omap3xxx_check_revision(void)
cpu_rev = "2.0";
break;
case 3:
- /* FALLTHROUGH */
default:
omap_revision = TI8168_REV_ES2_1;
cpu_rev = "2.1";
@@ -473,7 +469,6 @@ void __init omap3xxx_check_revision(void)
cpu_rev = "2.0";
break;
case 2:
- /* FALLTHROUGH */
default:
omap_revision = AM335X_REV_ES2_1;
cpu_rev = "2.1";
@@ -491,7 +486,6 @@ void __init omap3xxx_check_revision(void)
cpu_rev = "1.1";
break;
case 2:
- /* FALLTHROUGH */
default:
omap_revision = AM437X_REV_ES1_2;
cpu_rev = "1.2";
@@ -502,7 +496,6 @@ void __init omap3xxx_check_revision(void)
case 0xb968:
switch (rev) {
case 0:
- /* FALLTHROUGH */
case 1:
omap_revision = TI8148_REV_ES1_0;
cpu_rev = "1.0";
@@ -512,7 +505,6 @@ void __init omap3xxx_check_revision(void)
cpu_rev = "2.0";
break;
case 3:
- /* FALLTHROUGH */
default:
omap_revision = TI8148_REV_ES2_1;
cpu_rev = "2.1";
diff --git a/arch/arm/mach-omap2/omap-iommu.c b/arch/arm/mach-omap2/omap-iommu.c
index 54aff33e55e6..bfa5e1b8dba7 100644
--- a/arch/arm/mach-omap2/omap-iommu.c
+++ b/arch/arm/mach-omap2/omap-iommu.c
@@ -74,7 +74,7 @@ static struct powerdomain *_get_pwrdm(struct device *dev)
return pwrdm;
clk = of_clk_get(dev->of_node->parent, 0);
- if (!clk) {
+ if (IS_ERR(clk)) {
dev_err(dev, "no fck found\n");
return NULL;
}
diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c
index 6b4548f3b57f..fc7bb2ca1672 100644
--- a/arch/arm/mach-omap2/omap_device.c
+++ b/arch/arm/mach-omap2/omap_device.c
@@ -240,7 +240,7 @@ static int _omap_device_notifier_call(struct notifier_block *nb,
if (pdev->dev.of_node)
omap_device_build_from_dt(pdev);
omap_auxdata_legacy_init(dev);
- /* fall through */
+ fallthrough;
default:
od = to_omap_device(pdev);
if (od)
diff --git a/arch/arm/mach-omap2/pm34xx.c b/arch/arm/mach-omap2/pm34xx.c
index 6df395fff971..f5dfddf492e2 100644
--- a/arch/arm/mach-omap2/pm34xx.c
+++ b/arch/arm/mach-omap2/pm34xx.c
@@ -298,11 +298,7 @@ static void omap3_pm_idle(void)
if (omap_irq_pending())
return;
- trace_cpu_idle_rcuidle(1, smp_processor_id());
-
omap_sram_idle();
-
- trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
}
#ifdef CONFIG_SUSPEND
diff --git a/arch/arm/mach-orion5x/dns323-setup.c b/arch/arm/mach-orion5x/dns323-setup.c
index d13344b2ddcd..87cb47220e82 100644
--- a/arch/arm/mach-orion5x/dns323-setup.c
+++ b/arch/arm/mach-orion5x/dns323-setup.c
@@ -624,7 +624,7 @@ static void __init dns323_init(void)
dns323ab_leds[0].active_low = 1;
gpio_request(DNS323_GPIO_LED_POWER1, "Power Led Enable");
gpio_direction_output(DNS323_GPIO_LED_POWER1, 0);
- /* Fall through */
+ fallthrough;
case DNS323_REV_B1:
i2c_register_board_info(0, dns323ab_i2c_devices,
ARRAY_SIZE(dns323ab_i2c_devices));
diff --git a/arch/arm/mach-rpc/riscpc.c b/arch/arm/mach-rpc/riscpc.c
index ea2c84214bac..d23970bd638d 100644
--- a/arch/arm/mach-rpc/riscpc.c
+++ b/arch/arm/mach-rpc/riscpc.c
@@ -46,7 +46,7 @@ static int __init parse_tag_acorn(const struct tag *tag)
switch (tag->u.acorn.vram_pages) {
case 512:
vram_size += PAGE_SIZE * 256;
- /* Fall through - ??? */
+ fallthrough; /* ??? */
case 256:
vram_size += PAGE_SIZE * 256;
default:
diff --git a/arch/arm/mach-tegra/reset.c b/arch/arm/mach-tegra/reset.c
index 76a65df42d10..d5c805adf7a8 100644
--- a/arch/arm/mach-tegra/reset.c
+++ b/arch/arm/mach-tegra/reset.c
@@ -70,7 +70,7 @@ static void __init tegra_cpu_reset_handler_enable(void)
switch (err) {
case -ENOSYS:
tegra_cpu_reset_handler_set(reset_address);
- /* fall through */
+ fallthrough;
case 0:
is_enabled = true;
break;
diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
index f4bfc1cac91a..ea81e89e7740 100644
--- a/arch/arm/mm/alignment.c
+++ b/arch/arm/mm/alignment.c
@@ -694,7 +694,7 @@ thumb2arm(u16 tinstr)
return subset[(L<<1) | ((tinstr & (1<<8)) >> 8)] |
(tinstr & 255); /* register_list */
}
- /* Else, fall through - for illegal instruction case */
+ fallthrough; /* for illegal instruction case */
default:
return BAD_INSTR;
@@ -750,7 +750,7 @@ do_alignment_t32_to_handler(u32 *pinstr, struct pt_regs *regs,
case 0xe8e0:
case 0xe9e0:
poffset->un = (tinst2 & 0xff) << 2;
- /* Fall through */
+ fallthrough;
case 0xe940:
case 0xe9c0:
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 000c1b48e973..45f9d5ec2360 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -299,16 +299,14 @@ free_memmap(unsigned long start_pfn, unsigned long end_pfn)
*/
static void __init free_unused_memmap(void)
{
- unsigned long start, prev_end = 0;
- struct memblock_region *reg;
+ unsigned long start, end, prev_end = 0;
+ int i;
/*
* This relies on each bank being in address order.
* The banks are sorted previously in bootmem_init().
*/
- for_each_memblock(memory, reg) {
- start = memblock_region_memory_base_pfn(reg);
-
+ for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, NULL) {
#ifdef CONFIG_SPARSEMEM
/*
* Take care not to free memmap entries that don't exist
@@ -336,8 +334,7 @@ static void __init free_unused_memmap(void)
* memmap entries are valid from the bank end aligned to
* MAX_ORDER_NR_PAGES.
*/
- prev_end = ALIGN(memblock_region_memory_end_pfn(reg),
- MAX_ORDER_NR_PAGES);
+ prev_end = ALIGN(end, MAX_ORDER_NR_PAGES);
}
#ifdef CONFIG_SPARSEMEM
@@ -347,61 +344,29 @@ static void __init free_unused_memmap(void)
#endif
}
-#ifdef CONFIG_HIGHMEM
-static inline void free_area_high(unsigned long pfn, unsigned long end)
-{
- for (; pfn < end; pfn++)
- free_highmem_page(pfn_to_page(pfn));
-}
-#endif
-
static void __init free_highpages(void)
{
#ifdef CONFIG_HIGHMEM
unsigned long max_low = max_low_pfn;
- struct memblock_region *mem, *res;
+ phys_addr_t range_start, range_end;
+ u64 i;
/* set highmem page free */
- for_each_memblock(memory, mem) {
- unsigned long start = memblock_region_memory_base_pfn(mem);
- unsigned long end = memblock_region_memory_end_pfn(mem);
+ for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE,
+ &range_start, &range_end, NULL) {
+ unsigned long start = PHYS_PFN(range_start);
+ unsigned long end = PHYS_PFN(range_end);
/* Ignore complete lowmem entries */
if (end <= max_low)
continue;
- if (memblock_is_nomap(mem))
- continue;
-
/* Truncate partial highmem entries */
if (start < max_low)
start = max_low;
- /* Find and exclude any reserved regions */
- for_each_memblock(reserved, res) {
- unsigned long res_start, res_end;
-
- res_start = memblock_region_reserved_base_pfn(res);
- res_end = memblock_region_reserved_end_pfn(res);
-
- if (res_end < start)
- continue;
- if (res_start < start)
- res_start = start;
- if (res_start > end)
- res_start = end;
- if (res_end > end)
- res_end = end;
- if (res_start != start)
- free_area_high(start, res_start);
- start = res_end;
- if (start == end)
- break;
- }
-
- /* And now free anything which remains */
- if (start < end)
- free_area_high(start, end);
+ for (; start < end; start++)
+ free_highmem_page(pfn_to_page(start));
}
#endif
}
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index c36f977b2ccb..698cc740c6b8 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -1154,9 +1154,8 @@ phys_addr_t arm_lowmem_limit __initdata = 0;
void __init adjust_lowmem_bounds(void)
{
- phys_addr_t memblock_limit = 0;
- u64 vmalloc_limit;
- struct memblock_region *reg;
+ phys_addr_t block_start, block_end, memblock_limit = 0;
+ u64 vmalloc_limit, i;
phys_addr_t lowmem_limit = 0;
/*
@@ -1172,26 +1171,18 @@ void __init adjust_lowmem_bounds(void)
* The first usable region must be PMD aligned. Mark its start
* as MEMBLOCK_NOMAP if it isn't
*/
- for_each_memblock(memory, reg) {
- if (!memblock_is_nomap(reg)) {
- if (!IS_ALIGNED(reg->base, PMD_SIZE)) {
- phys_addr_t len;
+ for_each_mem_range(i, &block_start, &block_end) {
+ if (!IS_ALIGNED(block_start, PMD_SIZE)) {
+ phys_addr_t len;
- len = round_up(reg->base, PMD_SIZE) - reg->base;
- memblock_mark_nomap(reg->base, len);
- }
- break;
+ len = round_up(block_start, PMD_SIZE) - block_start;
+ memblock_mark_nomap(block_start, len);
}
+ break;
}
- for_each_memblock(memory, reg) {
- phys_addr_t block_start = reg->base;
- phys_addr_t block_end = reg->base + reg->size;
-
- if (memblock_is_nomap(reg))
- continue;
-
- if (reg->base < vmalloc_limit) {
+ for_each_mem_range(i, &block_start, &block_end) {
+ if (block_start < vmalloc_limit) {
if (block_end > lowmem_limit)
/*
* Compare as u64 to ensure vmalloc_limit does
@@ -1440,19 +1431,15 @@ static void __init kmap_init(void)
static void __init map_lowmem(void)
{
- struct memblock_region *reg;
phys_addr_t kernel_x_start = round_down(__pa(KERNEL_START), SECTION_SIZE);
phys_addr_t kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);
+ phys_addr_t start, end;
+ u64 i;
/* Map all the lowmem memory banks. */
- for_each_memblock(memory, reg) {
- phys_addr_t start = reg->base;
- phys_addr_t end = start + reg->size;
+ for_each_mem_range(i, &start, &end) {
struct map_desc map;
- if (memblock_is_nomap(reg))
- continue;
-
if (end > arm_lowmem_limit)
end = arm_lowmem_limit;
if (start >= end)
diff --git a/arch/arm/mm/pmsa-v7.c b/arch/arm/mm/pmsa-v7.c
index 699fa2e88725..88950e41a3a9 100644
--- a/arch/arm/mm/pmsa-v7.c
+++ b/arch/arm/mm/pmsa-v7.c
@@ -231,12 +231,12 @@ static int __init allocate_region(phys_addr_t base, phys_addr_t size,
void __init pmsav7_adjust_lowmem_bounds(void)
{
phys_addr_t specified_mem_size = 0, total_mem_size = 0;
- struct memblock_region *reg;
- bool first = true;
phys_addr_t mem_start;
phys_addr_t mem_end;
+ phys_addr_t reg_start, reg_end;
unsigned int mem_max_regions;
- int num, i;
+ int num;
+ u64 i;
/* Free-up PMSAv7_PROBE_REGION */
mpu_min_region_order = __mpu_min_region_order();
@@ -262,20 +262,19 @@ void __init pmsav7_adjust_lowmem_bounds(void)
mem_max_regions -= num;
#endif
- for_each_memblock(memory, reg) {
- if (first) {
+ for_each_mem_range(i, &reg_start, &reg_end) {
+ if (i == 0) {
phys_addr_t phys_offset = PHYS_OFFSET;
/*
* Initially only use memory continuous from
* PHYS_OFFSET */
- if (reg->base != phys_offset)
+ if (reg_start != phys_offset)
panic("First memory bank must be contiguous from PHYS_OFFSET");
- mem_start = reg->base;
- mem_end = reg->base + reg->size;
- specified_mem_size = reg->size;
- first = false;
+ mem_start = reg_start;
+ mem_end = reg_end;
+ specified_mem_size = mem_end - mem_start;
} else {
/*
* memblock auto merges contiguous blocks, remove
@@ -283,8 +282,8 @@ void __init pmsav7_adjust_lowmem_bounds(void)
* blocks separately while iterating)
*/
pr_notice("Ignoring RAM after %pa, memory at %pa ignored\n",
- &mem_end, &reg->base);
- memblock_remove(reg->base, 0 - reg->base);
+ &mem_end, &reg_start);
+ memblock_remove(reg_start, 0 - reg_start);
break;
}
}
diff --git a/arch/arm/mm/pmsa-v8.c b/arch/arm/mm/pmsa-v8.c
index 0d7d5fb59247..2de019f7503e 100644
--- a/arch/arm/mm/pmsa-v8.c
+++ b/arch/arm/mm/pmsa-v8.c
@@ -94,20 +94,19 @@ static __init bool is_region_fixed(int number)
void __init pmsav8_adjust_lowmem_bounds(void)
{
phys_addr_t mem_end;
- struct memblock_region *reg;
- bool first = true;
+ phys_addr_t reg_start, reg_end;
+ u64 i;
- for_each_memblock(memory, reg) {
- if (first) {
+ for_each_mem_range(i, &reg_start, &reg_end) {
+ if (i == 0) {
phys_addr_t phys_offset = PHYS_OFFSET;
/*
* Initially only use memory continuous from
* PHYS_OFFSET */
- if (reg->base != phys_offset)
+ if (reg_start != phys_offset)
panic("First memory bank must be contiguous from PHYS_OFFSET");
- mem_end = reg->base + reg->size;
- first = false;
+ mem_end = reg_end;
} else {
/*
* memblock auto merges contiguous blocks, remove
@@ -115,8 +114,8 @@ void __init pmsav8_adjust_lowmem_bounds(void)
* blocks separately while iterating)
*/
pr_notice("Ignoring RAM after %pa, memory at %pa ignored\n",
- &mem_end, &reg->base);
- memblock_remove(reg->base, 0 - reg->base);
+ &mem_end, &reg_start);
+ memblock_remove(reg_start, 0 - reg_start);
break;
}
}
diff --git a/arch/arm/mm/proc-v7-bugs.c b/arch/arm/mm/proc-v7-bugs.c
index c0fbfca5da8b..114c05ab4dd9 100644
--- a/arch/arm/mm/proc-v7-bugs.c
+++ b/arch/arm/mm/proc-v7-bugs.c
@@ -71,7 +71,7 @@ static void cpu_v7_spectre_init(void)
/* Other ARM CPUs require no workaround */
if (read_cpuid_implementor() == ARM_CPU_IMP_ARM)
break;
- /* fallthrough */
+ fallthrough;
/* Cortex A57/A72 require firmware workaround */
case ARM_CPU_PART_CORTEX_A57:
case ARM_CPU_PART_CORTEX_A72: {
diff --git a/arch/arm/plat-omap/dma.c b/arch/arm/plat-omap/dma.c
index b2e9e822426f..1eb59003bdec 100644
--- a/arch/arm/plat-omap/dma.c
+++ b/arch/arm/plat-omap/dma.c
@@ -309,14 +309,14 @@ void omap_set_dma_src_burst_mode(int lch, enum omap_dma_burst_mode burst_mode)
* not supported by current hardware on OMAP1
* w |= (0x03 << 7);
*/
- /* fall through */
+ fallthrough;
case OMAP_DMA_DATA_BURST_16:
if (dma_omap2plus()) {
burst = 0x3;
break;
}
/* OMAP1 don't support burst 16 */
- /* fall through */
+ fallthrough;
default:
BUG();
}
@@ -393,7 +393,7 @@ void omap_set_dma_dest_burst_mode(int lch, enum omap_dma_burst_mode burst_mode)
break;
}
/* OMAP1 don't support burst 16 */
- /* fall through */
+ fallthrough;
default:
printk(KERN_ERR "Invalid DMA burst mode\n");
BUG();
diff --git a/arch/arm/probes/decode.c b/arch/arm/probes/decode.c
index fe81a9c21f2d..c84053a81358 100644
--- a/arch/arm/probes/decode.c
+++ b/arch/arm/probes/decode.c
@@ -307,7 +307,7 @@ static bool __kprobes decode_regs(probes_opcode_t *pinsn, u32 regs, bool modify)
case REG_TYPE_NOPCWB:
if (!is_writeback(insn))
break; /* No writeback, so any register is OK */
- /* fall through... */
+ fallthrough;
case REG_TYPE_NOPC:
case REG_TYPE_NOPCX:
/* Reject PC (R15) */
diff --git a/arch/arm/probes/kprobes/core.c b/arch/arm/probes/kprobes/core.c
index 90b5bc723c83..a9653117ca0d 100644
--- a/arch/arm/probes/kprobes/core.c
+++ b/arch/arm/probes/kprobes/core.c
@@ -280,7 +280,7 @@ void __kprobes kprobe_handler(struct pt_regs *regs)
/* A nested probe was hit in FIQ, it is a BUG */
pr_warn("Unrecoverable kprobe detected.\n");
dump_kprobe(p);
- /* fall through */
+ fallthrough;
default:
/* impossible cases */
BUG();
@@ -413,87 +413,15 @@ void __naked __kprobes kretprobe_trampoline(void)
/* Called from kretprobe_trampoline */
static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
{
- struct kretprobe_instance *ri = NULL;
- struct hlist_head *head, empty_rp;
- struct hlist_node *tmp;
- unsigned long flags, orig_ret_address = 0;
- unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
- kprobe_opcode_t *correct_ret_addr = NULL;
-
- INIT_HLIST_HEAD(&empty_rp);
- kretprobe_hash_lock(current, &head, &flags);
-
- /*
- * It is possible to have multiple instances associated with a given
- * task either because multiple functions in the call path have
- * a return probe installed on them, and/or more than one return
- * probe was registered for a target function.
- *
- * We can handle this because:
- * - instances are always inserted at the head of the list
- * - when multiple return probes are registered for the same
- * function, the first instance's ret_addr will point to the
- * real return address, and all the rest will point to
- * kretprobe_trampoline
- */
- hlist_for_each_entry_safe(ri, tmp, head, hlist) {
- if (ri->task != current)
- /* another task is sharing our hash bucket */
- continue;
-
- orig_ret_address = (unsigned long)ri->ret_addr;
-
- if (orig_ret_address != trampoline_address)
- /*
- * This is the real return address. Any other
- * instances associated with this task are for
- * other calls deeper on the call stack
- */
- break;
- }
-
- kretprobe_assert(ri, orig_ret_address, trampoline_address);
-
- correct_ret_addr = ri->ret_addr;
- hlist_for_each_entry_safe(ri, tmp, head, hlist) {
- if (ri->task != current)
- /* another task is sharing our hash bucket */
- continue;
-
- orig_ret_address = (unsigned long)ri->ret_addr;
- if (ri->rp && ri->rp->handler) {
- __this_cpu_write(current_kprobe, &ri->rp->kp);
- get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE;
- ri->ret_addr = correct_ret_addr;
- ri->rp->handler(ri, regs);
- __this_cpu_write(current_kprobe, NULL);
- }
-
- recycle_rp_inst(ri, &empty_rp);
-
- if (orig_ret_address != trampoline_address)
- /*
- * This is the real return address. Any other
- * instances associated with this task are for
- * other calls deeper on the call stack
- */
- break;
- }
-
- kretprobe_hash_unlock(current, &flags);
-
- hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
- hlist_del(&ri->hlist);
- kfree(ri);
- }
-
- return (void *)orig_ret_address;
+ return (void *)kretprobe_trampoline_handler(regs, &kretprobe_trampoline,
+ (void *)regs->ARM_fp);
}
void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
struct pt_regs *regs)
{
ri->ret_addr = (kprobe_opcode_t *)regs->ARM_lr;
+ ri->fp = (void *)regs->ARM_fp;
/* Replace the return addr with trampoline addr. */
regs->ARM_lr = (unsigned long)&kretprobe_trampoline;
diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c
index e93145d72c26..60e901cd0de6 100644
--- a/arch/arm/xen/enlighten.c
+++ b/arch/arm/xen/enlighten.c
@@ -150,7 +150,7 @@ static int xen_starting_cpu(unsigned int cpu)
pr_info("Xen: initializing cpu%d\n", cpu);
vcpup = per_cpu_ptr(xen_vcpu_info, cpu);
- info.mfn = virt_to_gfn(vcpup);
+ info.mfn = percpu_to_gfn(vcpup);
info.offset = xen_offset_in_page(vcpup);
err = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_info, xen_vcpu_nr(cpu),
@@ -158,7 +158,8 @@ static int xen_starting_cpu(unsigned int cpu)
BUG_ON(err);
per_cpu(xen_vcpu, cpu) = vcpup;
- xen_setup_runstate_info(cpu);
+ if (!xen_kernel_unmapped_at_usr())
+ xen_setup_runstate_info(cpu);
after_register_vcpu_info:
enable_percpu_irq(xen_events_irq, 0);
@@ -387,7 +388,8 @@ static int __init xen_guest_init(void)
return -EINVAL;
}
- xen_time_setup_guest();
+ if (!xen_kernel_unmapped_at_usr())
+ xen_time_setup_guest();
if (xen_initial_domain())
pvclock_gtod_register_notifier(&xen_pvclock_gtod_notifier);
diff --git a/arch/arm/xen/mm.c b/arch/arm/xen/mm.c
index 396797ffe2b1..d3ef975a0965 100644
--- a/arch/arm/xen/mm.c
+++ b/arch/arm/xen/mm.c
@@ -25,11 +25,12 @@
unsigned long xen_get_swiotlb_free_pages(unsigned int order)
{
- struct memblock_region *reg;
+ phys_addr_t base;
gfp_t flags = __GFP_NOWARN|__GFP_KSWAPD_RECLAIM;
+ u64 i;
- for_each_memblock(memory, reg) {
- if (reg->base < (phys_addr_t)0xffffffff) {
+ for_each_mem_range(i, &base, NULL) {
+ if (base < (phys_addr_t)0xffffffff) {
if (IS_ENABLED(CONFIG_ZONE_DMA32))
flags |= __GFP_DMA32;
else
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 6d232837cbee..893130ce1626 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -29,6 +29,7 @@ config ARM64
select ARCH_HAS_SETUP_DMA_OPS
select ARCH_HAS_SET_DIRECT_MAP
select ARCH_HAS_SET_MEMORY
+ select ARCH_STACKWALK
select ARCH_HAS_STRICT_KERNEL_RWX
select ARCH_HAS_STRICT_MODULE_RWX
select ARCH_HAS_SYNC_DMA_FOR_DEVICE
@@ -106,6 +107,7 @@ config ARM64
select GENERIC_CPU_VULNERABILITIES
select GENERIC_EARLY_IOREMAP
select GENERIC_IDLE_POLL_SETUP
+ select GENERIC_IRQ_IPI
select GENERIC_IRQ_MULTI_HANDLER
select GENERIC_IRQ_PROBE
select GENERIC_IRQ_SHOW
@@ -211,12 +213,18 @@ config ARM64_PAGE_SHIFT
default 14 if ARM64_16K_PAGES
default 12
-config ARM64_CONT_SHIFT
+config ARM64_CONT_PTE_SHIFT
int
default 5 if ARM64_64K_PAGES
default 7 if ARM64_16K_PAGES
default 4
+config ARM64_CONT_PMD_SHIFT
+ int
+ default 5 if ARM64_64K_PAGES
+ default 5 if ARM64_16K_PAGES
+ default 4
+
config ARCH_MMAP_RND_BITS_MIN
default 14 if ARM64_64K_PAGES
default 16 if ARM64_16K_PAGES
@@ -1033,19 +1041,6 @@ config ARCH_ENABLE_SPLIT_PMD_PTLOCK
config CC_HAVE_SHADOW_CALL_STACK
def_bool $(cc-option, -fsanitize=shadow-call-stack -ffixed-x18)
-config SECCOMP
- bool "Enable seccomp to safely compute untrusted bytecode"
- help
- This kernel feature is useful for number crunching applications
- that may need to compute untrusted bytecode during their
- execution. By using pipes or other transports made available to
- the process as file descriptors supporting the read/write
- syscalls, it's possible to isolate those applications in
- their own address space using seccomp. Once seccomp is
- enabled via prctl(PR_SET_SECCOMP), it cannot be disabled
- and the task is only allowed to execute a few safe syscalls
- defined by each seccomp mode.
-
config PARAVIRT
bool "Enable paravirtualization code"
help
@@ -1165,32 +1160,6 @@ config UNMAP_KERNEL_AT_EL0
If unsure, say Y.
-config HARDEN_BRANCH_PREDICTOR
- bool "Harden the branch predictor against aliasing attacks" if EXPERT
- default y
- help
- Speculation attacks against some high-performance processors rely on
- being able to manipulate the branch predictor for a victim context by
- executing aliasing branches in the attacker context. Such attacks
- can be partially mitigated against by clearing internal branch
- predictor state and limiting the prediction logic in some situations.
-
- This config option will take CPU-specific actions to harden the
- branch predictor against aliasing attacks and may rely on specific
- instruction sequences or control bits being set by the system
- firmware.
-
- If unsure, say Y.
-
-config ARM64_SSBD
- bool "Speculative Store Bypass Disable" if EXPERT
- default y
- help
- This enables mitigation of the bypassing of previous stores
- by speculative loads.
-
- If unsure, say Y.
-
config RODATA_FULL_DEFAULT_ENABLED
bool "Apply r/o permissions of VM areas also to their linear aliases"
default y
@@ -1630,8 +1599,6 @@ config ARM64_BTI_KERNEL
depends on CC_HAS_BRANCH_PROT_PAC_RET_BTI
# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=94697
depends on !CC_IS_GCC || GCC_VERSION >= 100100
- # https://reviews.llvm.org/rGb8ae3fdfa579dbf366b1bb1cbfdbf8c51db7fa55
- depends on !CC_IS_CLANG || CLANG_VERSION >= 100001
depends on !(CC_IS_CLANG && GCOV_KERNEL)
depends on (!FUNCTION_GRAPH_TRACER || DYNAMIC_FTRACE_WITH_REGS)
help
@@ -1664,6 +1631,39 @@ config ARCH_RANDOM
provides a high bandwidth, cryptographically secure
hardware random number generator.
+config ARM64_AS_HAS_MTE
+ # Initial support for MTE went in binutils 2.32.0, checked with
+ # ".arch armv8.5-a+memtag" below. However, this was incomplete
+ # as a late addition to the final architecture spec (LDGM/STGM)
+ # is only supported in the newer 2.32.x and 2.33 binutils
+ # versions, hence the extra "stgm" instruction check below.
+ def_bool $(as-instr,.arch armv8.5-a+memtag\nstgm xzr$(comma)[x0])
+
+config ARM64_MTE
+ bool "Memory Tagging Extension support"
+ default y
+ depends on ARM64_AS_HAS_MTE && ARM64_TAGGED_ADDR_ABI
+ select ARCH_USES_HIGH_VMA_FLAGS
+ help
+ Memory Tagging (part of the ARMv8.5 Extensions) provides
+ architectural support for run-time, always-on detection of
+ various classes of memory error to aid with software debugging
+ to eliminate vulnerabilities arising from memory-unsafe
+ languages.
+
+ This option enables the support for the Memory Tagging
+ Extension at EL0 (i.e. for userspace).
+
+ Selecting this option allows the feature to be detected at
+ runtime. Any secondary CPU not implementing this feature will
+ not be allowed a late bring-up.
+
+ Userspace binaries that want to use this feature must
+ explicitly opt in. The mechanism for the userspace is
+ described in:
+
+ Documentation/arm64/memory-tagging-extension.rst.
+
endmenu
config ARM64_SVE
@@ -1876,6 +1876,10 @@ config ARCH_ENABLE_HUGEPAGE_MIGRATION
def_bool y
depends on HUGETLB_PAGE && MIGRATION
+config ARCH_ENABLE_THP_MIGRATION
+ def_bool y
+ depends on TRANSPARENT_HUGEPAGE
+
menu "Power management options"
source "kernel/power/Kconfig"
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
index 55bc8546d9c7..f4717facf31e 100644
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -11,7 +11,6 @@
# Copyright (C) 1995-2001 by Russell King
LDFLAGS_vmlinux :=--no-undefined -X
-CPPFLAGS_vmlinux.lds = -DTEXT_OFFSET=$(TEXT_OFFSET)
ifeq ($(CONFIG_RELOCATABLE), y)
# Pass --no-apply-dynamic-relocs to restore pre-binutils-2.27 behaviour
@@ -29,6 +28,10 @@ LDFLAGS_vmlinux += --fix-cortex-a53-843419
endif
endif
+# We never want expected sections to be placed heuristically by the
+# linker. All sections should be explicitly named in the linker script.
+LDFLAGS_vmlinux += $(call ld-option, --orphan-handling=warn)
+
ifeq ($(CONFIG_ARM64_USE_LSE_ATOMICS), y)
ifneq ($(CONFIG_ARM64_LSE_ATOMICS), y)
$(warning LSE atomics not supported by binutils)
@@ -47,13 +50,16 @@ endif
KBUILD_CFLAGS += -mgeneral-regs-only \
$(compat_vdso) $(cc_has_k_constraint)
-KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
KBUILD_CFLAGS += $(call cc-disable-warning, psabi)
KBUILD_AFLAGS += $(compat_vdso)
KBUILD_CFLAGS += $(call cc-option,-mabi=lp64)
KBUILD_AFLAGS += $(call cc-option,-mabi=lp64)
+# Avoid generating .eh_frame* sections.
+KBUILD_CFLAGS += -fno-asynchronous-unwind-tables -fno-unwind-tables
+KBUILD_AFLAGS += -fno-asynchronous-unwind-tables -fno-unwind-tables
+
ifeq ($(CONFIG_STACKPROTECTOR_PER_TASK),y)
prepare: stack_protector_prepare
stack_protector_prepare: prepare0
@@ -82,8 +88,8 @@ endif
# compiler to generate them and consequently to break the single image contract
# we pass it only to the assembler. This option is utilized only in case of non
# integrated assemblers.
-ifneq ($(CONFIG_AS_HAS_ARMV8_4), y)
-branch-prot-flags-$(CONFIG_AS_HAS_PAC) += -Wa,-march=armv8.3-a
+ifeq ($(CONFIG_AS_HAS_PAC), y)
+asm-arch := armv8.3-a
endif
endif
@@ -91,7 +97,12 @@ KBUILD_CFLAGS += $(branch-prot-flags-y)
ifeq ($(CONFIG_AS_HAS_ARMV8_4), y)
# make sure to pass the newest target architecture to -march.
-KBUILD_CFLAGS += -Wa,-march=armv8.4-a
+asm-arch := armv8.4-a
+endif
+
+ifdef asm-arch
+KBUILD_CFLAGS += -Wa,-march=$(asm-arch) \
+ -DARM64_ASM_ARCH='"$(asm-arch)"'
endif
ifeq ($(CONFIG_SHADOW_CALL_STACK), y)
@@ -127,9 +138,6 @@ endif
# Default value
head-y := arch/arm64/kernel/head.o
-# The byte offset of the kernel image in RAM from the start of RAM.
-TEXT_OFFSET := 0x0
-
ifeq ($(CONFIG_KASAN_SW_TAGS), y)
KASAN_SHADOW_SCALE_SHIFT := 4
else
@@ -140,8 +148,6 @@ KBUILD_CFLAGS += -DKASAN_SHADOW_SCALE_SHIFT=$(KASAN_SHADOW_SCALE_SHIFT)
KBUILD_CPPFLAGS += -DKASAN_SHADOW_SCALE_SHIFT=$(KASAN_SHADOW_SCALE_SHIFT)
KBUILD_AFLAGS += -DKASAN_SHADOW_SCALE_SHIFT=$(KASAN_SHADOW_SCALE_SHIFT)
-export TEXT_OFFSET
-
core-y += arch/arm64/
libs-y := arch/arm64/lib/ $(libs-y)
libs-$(CONFIG_EFI_STUB) += $(objtree)/drivers/firmware/efi/libstub/lib.a
@@ -165,6 +171,8 @@ zinstall install:
PHONY += vdso_install
vdso_install:
$(Q)$(MAKE) $(build)=arch/arm64/kernel/vdso $@
+ $(if $(CONFIG_COMPAT_VDSO), \
+ $(Q)$(MAKE) $(build)=arch/arm64/kernel/vdso32 $@)
# We use MRPROPER_FILES and CLEAN_FILES now
archclean:
diff --git a/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi b/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi
index 15f7b0ed3836..39802066232e 100644
--- a/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi
+++ b/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi
@@ -745,7 +745,7 @@
};
qspi: spi@66470200 {
- compatible = "brcm,spi-bcm-qspi", "brcm,spi-ns2-qspi";
+ compatible = "brcm,spi-ns2-qspi", "brcm,spi-bcm-qspi";
reg = <0x66470200 0x184>,
<0x66470000 0x124>,
<0x67017408 0x004>,
diff --git a/arch/arm64/boot/dts/freescale/Makefile b/arch/arm64/boot/dts/freescale/Makefile
index a39f0a1723e0..903c0eb61290 100644
--- a/arch/arm64/boot/dts/freescale/Makefile
+++ b/arch/arm64/boot/dts/freescale/Makefile
@@ -28,6 +28,7 @@ dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-lx2160a-honeycomb.dtb
dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-lx2160a-qds.dtb
dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-lx2160a-rdb.dtb
+dtb-$(CONFIG_ARCH_MXC) += imx8mm-beacon-kit.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mm-evk.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mn-evk.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mn-ddr4-evk.dtb
diff --git a/arch/arm64/boot/dts/freescale/imx8mp.dtsi b/arch/arm64/boot/dts/freescale/imx8mp.dtsi
index 9de2aa1c573c..a5154f13a18e 100644
--- a/arch/arm64/boot/dts/freescale/imx8mp.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mp.dtsi
@@ -702,7 +702,7 @@
reg = <0x30bd0000 0x10000>;
interrupts = <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clk IMX8MP_CLK_SDMA1_ROOT>,
- <&clk IMX8MP_CLK_SDMA1_ROOT>;
+ <&clk IMX8MP_CLK_AHB>;
clock-names = "ipg", "ahb";
#dma-cells = <3>;
fsl,sdma-ram-script-name = "imx/sdma/sdma-imx7d.bin";
diff --git a/arch/arm64/boot/dts/freescale/imx8mq.dtsi b/arch/arm64/boot/dts/freescale/imx8mq.dtsi
index f70435cf9ad5..561fa792fe5a 100644
--- a/arch/arm64/boot/dts/freescale/imx8mq.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mq.dtsi
@@ -423,7 +423,7 @@
tmu: tmu@30260000 {
compatible = "fsl,imx8mq-tmu";
reg = <0x30260000 0x10000>;
- interrupt = <GIC_SPI 49 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 49 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clk IMX8MQ_CLK_TMU_ROOT>;
little-endian;
fsl,tmu-range = <0xb0000 0xa0026 0x80048 0x70061>;
diff --git a/arch/arm64/boot/dts/mediatek/mt7622.dtsi b/arch/arm64/boot/dts/mediatek/mt7622.dtsi
index 1a39e0ef776b..5b9ec032ce8d 100644
--- a/arch/arm64/boot/dts/mediatek/mt7622.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt7622.dtsi
@@ -686,6 +686,8 @@
clocks = <&pericfg CLK_PERI_MSDC30_0_PD>,
<&topckgen CLK_TOP_MSDC50_0_SEL>;
clock-names = "source", "hclk";
+ resets = <&pericfg MT7622_PERI_MSDC0_SW_RST>;
+ reset-names = "hrst";
status = "disabled";
};
diff --git a/arch/arm64/boot/dts/nvidia/tegra186.dtsi b/arch/arm64/boot/dts/nvidia/tegra186.dtsi
index 34d249d85da7..8eb61dd9921e 100644
--- a/arch/arm64/boot/dts/nvidia/tegra186.dtsi
+++ b/arch/arm64/boot/dts/nvidia/tegra186.dtsi
@@ -337,8 +337,9 @@
compatible = "nvidia,tegra186-sdhci";
reg = <0x0 0x03400000 0x0 0x10000>;
interrupts = <GIC_SPI 62 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&bpmp TEGRA186_CLK_SDMMC1>;
- clock-names = "sdhci";
+ clocks = <&bpmp TEGRA186_CLK_SDMMC1>,
+ <&bpmp TEGRA186_CLK_SDMMC_LEGACY_TM>;
+ clock-names = "sdhci", "tmclk";
resets = <&bpmp TEGRA186_RESET_SDMMC1>;
reset-names = "sdhci";
interconnects = <&mc TEGRA186_MEMORY_CLIENT_SDMMCRA &emc>,
@@ -366,8 +367,9 @@
compatible = "nvidia,tegra186-sdhci";
reg = <0x0 0x03420000 0x0 0x10000>;
interrupts = <GIC_SPI 63 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&bpmp TEGRA186_CLK_SDMMC2>;
- clock-names = "sdhci";
+ clocks = <&bpmp TEGRA186_CLK_SDMMC2>,
+ <&bpmp TEGRA186_CLK_SDMMC_LEGACY_TM>;
+ clock-names = "sdhci", "tmclk";
resets = <&bpmp TEGRA186_RESET_SDMMC2>;
reset-names = "sdhci";
interconnects = <&mc TEGRA186_MEMORY_CLIENT_SDMMCRAA &emc>,
@@ -390,8 +392,9 @@
compatible = "nvidia,tegra186-sdhci";
reg = <0x0 0x03440000 0x0 0x10000>;
interrupts = <GIC_SPI 64 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&bpmp TEGRA186_CLK_SDMMC3>;
- clock-names = "sdhci";
+ clocks = <&bpmp TEGRA186_CLK_SDMMC3>,
+ <&bpmp TEGRA186_CLK_SDMMC_LEGACY_TM>;
+ clock-names = "sdhci", "tmclk";
resets = <&bpmp TEGRA186_RESET_SDMMC3>;
reset-names = "sdhci";
interconnects = <&mc TEGRA186_MEMORY_CLIENT_SDMMCR &emc>,
@@ -416,8 +419,9 @@
compatible = "nvidia,tegra186-sdhci";
reg = <0x0 0x03460000 0x0 0x10000>;
interrupts = <GIC_SPI 65 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&bpmp TEGRA186_CLK_SDMMC4>;
- clock-names = "sdhci";
+ clocks = <&bpmp TEGRA186_CLK_SDMMC4>,
+ <&bpmp TEGRA186_CLK_SDMMC_LEGACY_TM>;
+ clock-names = "sdhci", "tmclk";
assigned-clocks = <&bpmp TEGRA186_CLK_SDMMC4>,
<&bpmp TEGRA186_CLK_PLLC4_VCO>;
assigned-clock-parents = <&bpmp TEGRA186_CLK_PLLC4_VCO>;
diff --git a/arch/arm64/boot/dts/nvidia/tegra194.dtsi b/arch/arm64/boot/dts/nvidia/tegra194.dtsi
index 48160f48003a..ca5cb6aef5ee 100644
--- a/arch/arm64/boot/dts/nvidia/tegra194.dtsi
+++ b/arch/arm64/boot/dts/nvidia/tegra194.dtsi
@@ -460,8 +460,9 @@
compatible = "nvidia,tegra194-sdhci";
reg = <0x03400000 0x10000>;
interrupts = <GIC_SPI 62 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&bpmp TEGRA194_CLK_SDMMC1>;
- clock-names = "sdhci";
+ clocks = <&bpmp TEGRA194_CLK_SDMMC1>,
+ <&bpmp TEGRA194_CLK_SDMMC_LEGACY_TM>;
+ clock-names = "sdhci", "tmclk";
resets = <&bpmp TEGRA194_RESET_SDMMC1>;
reset-names = "sdhci";
interconnects = <&mc TEGRA194_MEMORY_CLIENT_SDMMCRA &emc>,
@@ -485,8 +486,9 @@
compatible = "nvidia,tegra194-sdhci";
reg = <0x03440000 0x10000>;
interrupts = <GIC_SPI 64 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&bpmp TEGRA194_CLK_SDMMC3>;
- clock-names = "sdhci";
+ clocks = <&bpmp TEGRA194_CLK_SDMMC3>,
+ <&bpmp TEGRA194_CLK_SDMMC_LEGACY_TM>;
+ clock-names = "sdhci", "tmclk";
resets = <&bpmp TEGRA194_RESET_SDMMC3>;
reset-names = "sdhci";
interconnects = <&mc TEGRA194_MEMORY_CLIENT_SDMMCR &emc>,
@@ -511,8 +513,9 @@
compatible = "nvidia,tegra194-sdhci";
reg = <0x03460000 0x10000>;
interrupts = <GIC_SPI 65 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&bpmp TEGRA194_CLK_SDMMC4>;
- clock-names = "sdhci";
+ clocks = <&bpmp TEGRA194_CLK_SDMMC4>,
+ <&bpmp TEGRA194_CLK_SDMMC_LEGACY_TM>;
+ clock-names = "sdhci", "tmclk";
assigned-clocks = <&bpmp TEGRA194_CLK_SDMMC4>,
<&bpmp TEGRA194_CLK_PLLC4>;
assigned-clock-parents =
diff --git a/arch/arm64/boot/dts/nvidia/tegra210.dtsi b/arch/arm64/boot/dts/nvidia/tegra210.dtsi
index 829f786af133..8cca2166a446 100644
--- a/arch/arm64/boot/dts/nvidia/tegra210.dtsi
+++ b/arch/arm64/boot/dts/nvidia/tegra210.dtsi
@@ -1194,8 +1194,9 @@
compatible = "nvidia,tegra210-sdhci";
reg = <0x0 0x700b0000 0x0 0x200>;
interrupts = <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&tegra_car TEGRA210_CLK_SDMMC1>;
- clock-names = "sdhci";
+ clocks = <&tegra_car TEGRA210_CLK_SDMMC1>,
+ <&tegra_car TEGRA210_CLK_SDMMC_LEGACY>;
+ clock-names = "sdhci", "tmclk";
resets = <&tegra_car 14>;
reset-names = "sdhci";
pinctrl-names = "sdmmc-3v3", "sdmmc-1v8",
@@ -1222,8 +1223,9 @@
compatible = "nvidia,tegra210-sdhci";
reg = <0x0 0x700b0200 0x0 0x200>;
interrupts = <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&tegra_car TEGRA210_CLK_SDMMC2>;
- clock-names = "sdhci";
+ clocks = <&tegra_car TEGRA210_CLK_SDMMC2>,
+ <&tegra_car TEGRA210_CLK_SDMMC_LEGACY>;
+ clock-names = "sdhci", "tmclk";
resets = <&tegra_car 9>;
reset-names = "sdhci";
pinctrl-names = "sdmmc-1v8-drv";
@@ -1239,8 +1241,9 @@
compatible = "nvidia,tegra210-sdhci";
reg = <0x0 0x700b0400 0x0 0x200>;
interrupts = <GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&tegra_car TEGRA210_CLK_SDMMC3>;
- clock-names = "sdhci";
+ clocks = <&tegra_car TEGRA210_CLK_SDMMC3>,
+ <&tegra_car TEGRA210_CLK_SDMMC_LEGACY>;
+ clock-names = "sdhci", "tmclk";
resets = <&tegra_car 69>;
reset-names = "sdhci";
pinctrl-names = "sdmmc-3v3", "sdmmc-1v8",
@@ -1262,8 +1265,9 @@
compatible = "nvidia,tegra210-sdhci";
reg = <0x0 0x700b0600 0x0 0x200>;
interrupts = <GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&tegra_car TEGRA210_CLK_SDMMC4>;
- clock-names = "sdhci";
+ clocks = <&tegra_car TEGRA210_CLK_SDMMC4>,
+ <&tegra_car TEGRA210_CLK_SDMMC_LEGACY>;
+ clock-names = "sdhci", "tmclk";
resets = <&tegra_car 15>;
reset-names = "sdhci";
pinctrl-names = "sdmmc-3v3-drv", "sdmmc-1v8-drv";
diff --git a/arch/arm64/boot/dts/ti/k3-am65-main.dtsi b/arch/arm64/boot/dts/ti/k3-am65-main.dtsi
index 9edfae5944f7..24ef18fe77df 100644
--- a/arch/arm64/boot/dts/ti/k3-am65-main.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am65-main.dtsi
@@ -417,10 +417,10 @@
ti,intr-trigger-type = <1>;
interrupt-controller;
interrupt-parent = <&gic500>;
- #interrupt-cells = <2>;
+ #interrupt-cells = <1>;
ti,sci = <&dmsc>;
- ti,sci-dst-id = <56>;
- ti,sci-rm-range-girq = <0x1>;
+ ti,sci-dev-id = <100>;
+ ti,interrupt-ranges = <0 392 32>;
};
main_navss {
@@ -438,10 +438,11 @@
ti,intr-trigger-type = <4>;
interrupt-controller;
interrupt-parent = <&gic500>;
- #interrupt-cells = <2>;
+ #interrupt-cells = <1>;
ti,sci = <&dmsc>;
- ti,sci-dst-id = <56>;
- ti,sci-rm-range-girq = <0x0>, <0x2>;
+ ti,sci-dev-id = <182>;
+ ti,interrupt-ranges = <0 64 64>,
+ <64 448 64>;
};
inta_main_udmass: interrupt-controller@33d00000 {
@@ -452,8 +453,7 @@
msi-controller;
ti,sci = <&dmsc>;
ti,sci-dev-id = <179>;
- ti,sci-rm-range-vint = <0x0>;
- ti,sci-rm-range-global-event = <0x1>;
+ ti,interrupt-ranges = <0 0 256>;
};
secure_proxy_main: mailbox@32c00000 {
@@ -589,7 +589,7 @@
<0x0 0x33000000 0x0 0x40000>;
reg-names = "rt", "fifos", "proxy_gcfg", "proxy_target";
ti,num-rings = <818>;
- ti,sci-rm-range-gp-rings = <0x2>; /* GP ring range */
+ ti,sci-rm-range-gp-rings = <0x1>; /* GP ring range */
ti,dma-ring-reset-quirk;
ti,sci = <&dmsc>;
ti,sci-dev-id = <187>;
@@ -609,11 +609,11 @@
ti,sci-dev-id = <188>;
ti,ringacc = <&ringacc>;
- ti,sci-rm-range-tchan = <0x1>, /* TX_HCHAN */
- <0x2>; /* TX_CHAN */
- ti,sci-rm-range-rchan = <0x4>, /* RX_HCHAN */
- <0x5>; /* RX_CHAN */
- ti,sci-rm-range-rflow = <0x6>; /* GP RFLOW */
+ ti,sci-rm-range-tchan = <0xf>, /* TX_HCHAN */
+ <0xd>; /* TX_CHAN */
+ ti,sci-rm-range-rchan = <0xb>, /* RX_HCHAN */
+ <0xa>; /* RX_CHAN */
+ ti,sci-rm-range-rflow = <0x0>; /* GP RFLOW */
};
cpts@310d0000 {
@@ -622,7 +622,7 @@
reg-names = "cpts";
clocks = <&main_cpts_mux>;
clock-names = "cpts";
- interrupts-extended = <&intr_main_navss 163 0>;
+ interrupts-extended = <&intr_main_navss 391>;
interrupt-names = "cpts";
ti,cpts-periodic-outputs = <6>;
ti,cpts-ext-ts-inputs = <8>;
@@ -645,8 +645,7 @@
gpio-controller;
#gpio-cells = <2>;
interrupt-parent = <&intr_main_gpio>;
- interrupts = <57 256>, <57 257>, <57 258>, <57 259>, <57 260>,
- <57 261>;
+ interrupts = <192>, <193>, <194>, <195>, <196>, <197>;
interrupt-controller;
#interrupt-cells = <2>;
ti,ngpio = <96>;
@@ -661,8 +660,7 @@
gpio-controller;
#gpio-cells = <2>;
interrupt-parent = <&intr_main_gpio>;
- interrupts = <58 256>, <58 257>, <58 258>, <58 259>, <58 260>,
- <58 261>;
+ interrupts = <200>, <201>, <202>, <203>, <204>, <205>;
interrupt-controller;
#interrupt-cells = <2>;
ti,ngpio = <90>;
diff --git a/arch/arm64/boot/dts/ti/k3-am65-mcu.dtsi b/arch/arm64/boot/dts/ti/k3-am65-mcu.dtsi
index 8c1abcfe0860..51ca4b4d4c21 100644
--- a/arch/arm64/boot/dts/ti/k3-am65-mcu.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am65-mcu.dtsi
@@ -134,7 +134,7 @@
<0x0 0x2a500000 0x0 0x40000>;
reg-names = "rt", "fifos", "proxy_gcfg", "proxy_target";
ti,num-rings = <286>;
- ti,sci-rm-range-gp-rings = <0x2>; /* GP ring range */
+ ti,sci-rm-range-gp-rings = <0x1>; /* GP ring range */
ti,dma-ring-reset-quirk;
ti,sci = <&dmsc>;
ti,sci-dev-id = <195>;
@@ -154,11 +154,11 @@
ti,sci-dev-id = <194>;
ti,ringacc = <&mcu_ringacc>;
- ti,sci-rm-range-tchan = <0x1>, /* TX_HCHAN */
- <0x2>; /* TX_CHAN */
- ti,sci-rm-range-rchan = <0x3>, /* RX_HCHAN */
- <0x4>; /* RX_CHAN */
- ti,sci-rm-range-rflow = <0x5>; /* GP RFLOW */
+ ti,sci-rm-range-tchan = <0xf>, /* TX_HCHAN */
+ <0xd>; /* TX_CHAN */
+ ti,sci-rm-range-rchan = <0xb>, /* RX_HCHAN */
+ <0xa>; /* RX_CHAN */
+ ti,sci-rm-range-rflow = <0x0>; /* GP RFLOW */
};
};
diff --git a/arch/arm64/boot/dts/ti/k3-am65-wakeup.dtsi b/arch/arm64/boot/dts/ti/k3-am65-wakeup.dtsi
index 5f55b9e82cf1..a1ffe88d9664 100644
--- a/arch/arm64/boot/dts/ti/k3-am65-wakeup.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am65-wakeup.dtsi
@@ -74,10 +74,10 @@
ti,intr-trigger-type = <1>;
interrupt-controller;
interrupt-parent = <&gic500>;
- #interrupt-cells = <2>;
+ #interrupt-cells = <1>;
ti,sci = <&dmsc>;
- ti,sci-dst-id = <56>;
- ti,sci-rm-range-girq = <0x4>;
+ ti,sci-dev-id = <156>;
+ ti,interrupt-ranges = <0 712 16>;
};
wkup_gpio0: wkup_gpio0@42110000 {
@@ -86,7 +86,7 @@
gpio-controller;
#gpio-cells = <2>;
interrupt-parent = <&intr_wkup_gpio>;
- interrupts = <59 128>, <59 129>, <59 130>, <59 131>;
+ interrupts = <60>, <61>, <62>, <63>;
interrupt-controller;
#interrupt-cells = <2>;
ti,ngpio = <56>;
diff --git a/arch/arm64/boot/dts/ti/k3-am654-base-board.dts b/arch/arm64/boot/dts/ti/k3-am654-base-board.dts
index 611e66207010..b8a8a0fcb8af 100644
--- a/arch/arm64/boot/dts/ti/k3-am654-base-board.dts
+++ b/arch/arm64/boot/dts/ti/k3-am654-base-board.dts
@@ -384,7 +384,7 @@
};
&mailbox0_cluster0 {
- interrupts = <164 0>;
+ interrupts = <436>;
mbox_mcu_r5fss0_core0: mbox-mcu-r5fss0-core0 {
ti,mbox-tx = <1 0 0>;
@@ -393,7 +393,7 @@
};
&mailbox0_cluster1 {
- interrupts = <165 0>;
+ interrupts = <432>;
mbox_mcu_r5fss0_core1: mbox-mcu-r5fss0-core1 {
ti,mbox-tx = <1 0 0>;
diff --git a/arch/arm64/boot/dts/ti/k3-j721e-common-proc-board.dts b/arch/arm64/boot/dts/ti/k3-j721e-common-proc-board.dts
index 8bc1e6ecc50e..e8fc01d97ada 100644
--- a/arch/arm64/boot/dts/ti/k3-j721e-common-proc-board.dts
+++ b/arch/arm64/boot/dts/ti/k3-j721e-common-proc-board.dts
@@ -287,7 +287,7 @@
};
&mailbox0_cluster0 {
- interrupts = <214 0>;
+ interrupts = <436>;
mbox_mcu_r5fss0_core0: mbox-mcu-r5fss0-core0 {
ti,mbox-rx = <0 0 0>;
@@ -301,7 +301,7 @@
};
&mailbox0_cluster1 {
- interrupts = <215 0>;
+ interrupts = <432>;
mbox_main_r5fss0_core0: mbox-main-r5fss0-core0 {
ti,mbox-rx = <0 0 0>;
@@ -315,7 +315,7 @@
};
&mailbox0_cluster2 {
- interrupts = <216 0>;
+ interrupts = <428>;
mbox_main_r5fss1_core0: mbox-main-r5fss1-core0 {
ti,mbox-rx = <0 0 0>;
@@ -329,7 +329,7 @@
};
&mailbox0_cluster3 {
- interrupts = <217 0>;
+ interrupts = <424>;
mbox_c66_0: mbox-c66-0 {
ti,mbox-rx = <0 0 0>;
@@ -343,7 +343,7 @@
};
&mailbox0_cluster4 {
- interrupts = <218 0>;
+ interrupts = <420>;
mbox_c71_0: mbox-c71-0 {
ti,mbox-rx = <0 0 0>;
diff --git a/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi b/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi
index d14060207f00..12ceea9b3c9a 100644
--- a/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi
@@ -80,10 +80,10 @@
ti,intr-trigger-type = <1>;
interrupt-controller;
interrupt-parent = <&gic500>;
- #interrupt-cells = <2>;
+ #interrupt-cells = <1>;
ti,sci = <&dmsc>;
- ti,sci-dst-id = <14>;
- ti,sci-rm-range-girq = <0x1>;
+ ti,sci-dev-id = <131>;
+ ti,interrupt-ranges = <8 392 56>;
};
main_navss {
@@ -101,10 +101,12 @@
ti,intr-trigger-type = <4>;
interrupt-controller;
interrupt-parent = <&gic500>;
- #interrupt-cells = <2>;
+ #interrupt-cells = <1>;
ti,sci = <&dmsc>;
- ti,sci-dst-id = <14>;
- ti,sci-rm-range-girq = <0>, <2>;
+ ti,sci-dev-id = <213>;
+ ti,interrupt-ranges = <0 64 64>,
+ <64 448 64>,
+ <128 672 64>;
};
main_udmass_inta: interrupt-controller@33d00000 {
@@ -115,8 +117,7 @@
msi-controller;
ti,sci = <&dmsc>;
ti,sci-dev-id = <209>;
- ti,sci-rm-range-vint = <0xa>;
- ti,sci-rm-range-global-event = <0xd>;
+ ti,interrupt-ranges = <0 0 256>;
};
secure_proxy_main: mailbox@32c00000 {
@@ -296,7 +297,7 @@
reg-names = "cpts";
clocks = <&k3_clks 201 1>;
clock-names = "cpts";
- interrupts-extended = <&main_navss_intr 201 0>;
+ interrupts-extended = <&main_navss_intr 391>;
interrupt-names = "cpts";
ti,cpts-periodic-outputs = <6>;
ti,cpts-ext-ts-inputs = <8>;
@@ -688,8 +689,8 @@
gpio-controller;
#gpio-cells = <2>;
interrupt-parent = <&main_gpio_intr>;
- interrupts = <105 0>, <105 1>, <105 2>, <105 3>,
- <105 4>, <105 5>, <105 6>, <105 7>;
+ interrupts = <256>, <257>, <258>, <259>,
+ <260>, <261>, <262>, <263>;
interrupt-controller;
#interrupt-cells = <2>;
ti,ngpio = <128>;
@@ -705,7 +706,7 @@
gpio-controller;
#gpio-cells = <2>;
interrupt-parent = <&main_gpio_intr>;
- interrupts = <106 0>, <106 1>, <106 2>;
+ interrupts = <288>, <289>, <290>;
interrupt-controller;
#interrupt-cells = <2>;
ti,ngpio = <36>;
@@ -721,8 +722,8 @@
gpio-controller;
#gpio-cells = <2>;
interrupt-parent = <&main_gpio_intr>;
- interrupts = <107 0>, <107 1>, <107 2>, <107 3>,
- <107 4>, <107 5>, <107 6>, <107 7>;
+ interrupts = <264>, <265>, <266>, <267>,
+ <268>, <269>, <270>, <271>;
interrupt-controller;
#interrupt-cells = <2>;
ti,ngpio = <128>;
@@ -738,7 +739,7 @@
gpio-controller;
#gpio-cells = <2>;
interrupt-parent = <&main_gpio_intr>;
- interrupts = <108 0>, <108 1>, <108 2>;
+ interrupts = <292>, <293>, <294>;
interrupt-controller;
#interrupt-cells = <2>;
ti,ngpio = <36>;
@@ -754,8 +755,8 @@
gpio-controller;
#gpio-cells = <2>;
interrupt-parent = <&main_gpio_intr>;
- interrupts = <109 0>, <109 1>, <109 2>, <109 3>,
- <109 4>, <109 5>, <109 6>, <109 7>;
+ interrupts = <272>, <273>, <274>, <275>,
+ <276>, <277>, <278>, <279>;
interrupt-controller;
#interrupt-cells = <2>;
ti,ngpio = <128>;
@@ -771,7 +772,7 @@
gpio-controller;
#gpio-cells = <2>;
interrupt-parent = <&main_gpio_intr>;
- interrupts = <110 0>, <110 1>, <110 2>;
+ interrupts = <296>, <297>, <298>;
interrupt-controller;
#interrupt-cells = <2>;
ti,ngpio = <36>;
@@ -787,8 +788,8 @@
gpio-controller;
#gpio-cells = <2>;
interrupt-parent = <&main_gpio_intr>;
- interrupts = <111 0>, <111 1>, <111 2>, <111 3>,
- <111 4>, <111 5>, <111 6>, <111 7>;
+ interrupts = <280>, <281>, <282>, <283>,
+ <284>, <285>, <286>, <287>;
interrupt-controller;
#interrupt-cells = <2>;
ti,ngpio = <128>;
@@ -804,7 +805,7 @@
gpio-controller;
#gpio-cells = <2>;
interrupt-parent = <&main_gpio_intr>;
- interrupts = <112 0>, <112 1>, <112 2>;
+ interrupts = <300>, <301>, <302>;
interrupt-controller;
#interrupt-cells = <2>;
ti,ngpio = <36>;
diff --git a/arch/arm64/boot/dts/ti/k3-j721e-mcu-wakeup.dtsi b/arch/arm64/boot/dts/ti/k3-j721e-mcu-wakeup.dtsi
index 30a735bcd0c8..c4a48e8d420a 100644
--- a/arch/arm64/boot/dts/ti/k3-j721e-mcu-wakeup.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-j721e-mcu-wakeup.dtsi
@@ -101,10 +101,10 @@
ti,intr-trigger-type = <1>;
interrupt-controller;
interrupt-parent = <&gic500>;
- #interrupt-cells = <2>;
+ #interrupt-cells = <1>;
ti,sci = <&dmsc>;
- ti,sci-dst-id = <14>;
- ti,sci-rm-range-girq = <0x5>;
+ ti,sci-dev-id = <137>;
+ ti,interrupt-ranges = <16 960 16>;
};
wkup_gpio0: gpio@42110000 {
@@ -113,8 +113,7 @@
gpio-controller;
#gpio-cells = <2>;
interrupt-parent = <&wkup_gpio_intr>;
- interrupts = <113 0>, <113 1>, <113 2>,
- <113 3>, <113 4>, <113 5>;
+ interrupts = <103>, <104>, <105>, <106>, <107>, <108>;
interrupt-controller;
#interrupt-cells = <2>;
ti,ngpio = <84>;
@@ -130,8 +129,7 @@
gpio-controller;
#gpio-cells = <2>;
interrupt-parent = <&wkup_gpio_intr>;
- interrupts = <114 0>, <114 1>, <114 2>,
- <114 3>, <114 4>, <114 5>;
+ interrupts = <112>, <113>, <114>, <115>, <116>, <117>;
interrupt-controller;
#interrupt-cells = <2>;
ti,ngpio = <84>;
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp.dtsi b/arch/arm64/boot/dts/xilinx/zynqmp.dtsi
index 9174ddc76bdc..3ec99f13c259 100644
--- a/arch/arm64/boot/dts/xilinx/zynqmp.dtsi
+++ b/arch/arm64/boot/dts/xilinx/zynqmp.dtsi
@@ -13,6 +13,7 @@
*/
#include <dt-bindings/power/xlnx-zynqmp-power.h>
+#include <dt-bindings/reset/xlnx-zynqmp-resets.h>
/ {
compatible = "xlnx,zynqmp";
@@ -558,6 +559,15 @@
};
};
+ psgtr: phy@fd400000 {
+ compatible = "xlnx,zynqmp-psgtr-v1.1";
+ status = "disabled";
+ reg = <0x0 0xfd400000 0x0 0x40000>,
+ <0x0 0xfd3d0000 0x0 0x1000>;
+ reg-names = "serdes", "siou";
+ #phy-cells = <4>;
+ };
+
rtc: rtc@ffa60000 {
compatible = "xlnx,zynqmp-rtc";
status = "disabled";
@@ -601,7 +611,7 @@
power-domains = <&zynqmp_firmware PD_SD_1>;
};
- smmu: smmu@fd800000 {
+ smmu: iommu@fd800000 {
compatible = "arm,mmu-500";
reg = <0x0 0xfd800000 0x0 0x20000>;
status = "disabled";
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
index e0f33826819f..6d04b9577b0b 100644
--- a/arch/arm64/configs/defconfig
+++ b/arch/arm64/configs/defconfig
@@ -724,6 +724,17 @@ CONFIG_USB_GADGET=y
CONFIG_USB_RENESAS_USBHS_UDC=m
CONFIG_USB_RENESAS_USB3=m
CONFIG_USB_TEGRA_XUDC=m
+CONFIG_USB_CONFIGFS=m
+CONFIG_USB_CONFIGFS_SERIAL=y
+CONFIG_USB_CONFIGFS_ACM=y
+CONFIG_USB_CONFIGFS_OBEX=y
+CONFIG_USB_CONFIGFS_NCM=y
+CONFIG_USB_CONFIGFS_ECM=y
+CONFIG_USB_CONFIGFS_ECM_SUBSET=y
+CONFIG_USB_CONFIGFS_RNDIS=y
+CONFIG_USB_CONFIGFS_EEM=y
+CONFIG_USB_CONFIGFS_MASS_STORAGE=y
+CONFIG_USB_CONFIGFS_F_FS=y
CONFIG_TYPEC=m
CONFIG_TYPEC_TCPM=m
CONFIG_TYPEC_FUSB302=m
@@ -914,6 +925,7 @@ CONFIG_ARCH_TEGRA_194_SOC=y
CONFIG_ARCH_K3_AM6_SOC=y
CONFIG_ARCH_K3_J721E_SOC=y
CONFIG_TI_SCI_PM_DOMAINS=y
+CONFIG_EXTCON_PTN5150=m
CONFIG_EXTCON_USB_GPIO=y
CONFIG_EXTCON_USBC_CROS_EC=y
CONFIG_IIO=y
diff --git a/arch/arm64/crypto/aes-neonbs-core.S b/arch/arm64/crypto/aes-neonbs-core.S
index b357164379f6..63a52ad9a75c 100644
--- a/arch/arm64/crypto/aes-neonbs-core.S
+++ b/arch/arm64/crypto/aes-neonbs-core.S
@@ -788,7 +788,7 @@ SYM_FUNC_START_LOCAL(__xts_crypt8)
0: mov bskey, x21
mov rounds, x22
- br x7
+ br x16
SYM_FUNC_END(__xts_crypt8)
.macro __xts_crypt, do8, o0, o1, o2, o3, o4, o5, o6, o7
@@ -806,7 +806,7 @@ SYM_FUNC_END(__xts_crypt8)
uzp1 v30.4s, v30.4s, v25.4s
ld1 {v25.16b}, [x24]
-99: adr x7, \do8
+99: adr x16, \do8
bl __xts_crypt8
ldp q16, q17, [sp, #.Lframe_local_offset]
diff --git a/arch/arm64/crypto/ghash-ce-glue.c b/arch/arm64/crypto/ghash-ce-glue.c
index da1034867aaa..8536008e3e35 100644
--- a/arch/arm64/crypto/ghash-ce-glue.c
+++ b/arch/arm64/crypto/ghash-ce-glue.c
@@ -347,7 +347,7 @@ static int gcm_encrypt(struct aead_request *req)
u8 buf[AES_BLOCK_SIZE];
u8 iv[AES_BLOCK_SIZE];
u64 dg[2] = {};
- u128 lengths;
+ be128 lengths;
u8 *tag;
int err;
@@ -461,7 +461,7 @@ static int gcm_decrypt(struct aead_request *req)
u8 buf[AES_BLOCK_SIZE];
u8 iv[AES_BLOCK_SIZE];
u64 dg[2] = {};
- u128 lengths;
+ be128 lengths;
u8 *tag;
int err;
diff --git a/arch/arm64/crypto/sha1-ce-glue.c b/arch/arm64/crypto/sha1-ce-glue.c
index 565ef604ca04..c63b99211db3 100644
--- a/arch/arm64/crypto/sha1-ce-glue.c
+++ b/arch/arm64/crypto/sha1-ce-glue.c
@@ -25,6 +25,9 @@ struct sha1_ce_state {
u32 finalize;
};
+extern const u32 sha1_ce_offsetof_count;
+extern const u32 sha1_ce_offsetof_finalize;
+
asmlinkage void sha1_ce_transform(struct sha1_ce_state *sst, u8 const *src,
int blocks);
diff --git a/arch/arm64/crypto/sha2-ce-glue.c b/arch/arm64/crypto/sha2-ce-glue.c
index 9450d19b9e6e..5e956d7582a5 100644
--- a/arch/arm64/crypto/sha2-ce-glue.c
+++ b/arch/arm64/crypto/sha2-ce-glue.c
@@ -25,6 +25,9 @@ struct sha256_ce_state {
u32 finalize;
};
+extern const u32 sha256_ce_offsetof_count;
+extern const u32 sha256_ce_offsetof_finalize;
+
asmlinkage void sha2_ce_transform(struct sha256_ce_state *sst, u8 const *src,
int blocks);
diff --git a/arch/arm64/include/asm/arch_gicv3.h b/arch/arm64/include/asm/arch_gicv3.h
index 6647ae4f0231..880b9054d75c 100644
--- a/arch/arm64/include/asm/arch_gicv3.h
+++ b/arch/arm64/include/asm/arch_gicv3.h
@@ -153,7 +153,7 @@ static inline bool gic_prio_masking_enabled(void)
static inline void gic_pmr_mask_irqs(void)
{
- BUILD_BUG_ON(GICD_INT_DEF_PRI < (GIC_PRIO_IRQOFF |
+ BUILD_BUG_ON(GICD_INT_DEF_PRI < (__GIC_PRIO_IRQOFF |
GIC_PRIO_PSR_I_SET));
BUILD_BUG_ON(GICD_INT_DEF_PRI >= GIC_PRIO_IRQON);
/*
@@ -162,6 +162,12 @@ static inline void gic_pmr_mask_irqs(void)
* are applied to IRQ priorities
*/
BUILD_BUG_ON((0x80 | (GICD_INT_DEF_PRI >> 1)) >= GIC_PRIO_IRQON);
+ /*
+ * Same situation as above, but now we make sure that we can mask
+ * regular interrupts.
+ */
+ BUILD_BUG_ON((0x80 | (GICD_INT_DEF_PRI >> 1)) < (__GIC_PRIO_IRQOFF_NS |
+ GIC_PRIO_PSR_I_SET));
gic_write_pmr(GIC_PRIO_IRQOFF);
}
diff --git a/arch/arm64/include/asm/archrandom.h b/arch/arm64/include/asm/archrandom.h
index 44209f6146aa..ffb1a40d5475 100644
--- a/arch/arm64/include/asm/archrandom.h
+++ b/arch/arm64/include/asm/archrandom.h
@@ -79,10 +79,5 @@ arch_get_random_seed_long_early(unsigned long *v)
}
#define arch_get_random_seed_long_early arch_get_random_seed_long_early
-#else
-
-static inline bool __arm64_rndr(unsigned long *v) { return false; }
-static inline bool __init __early_cpu_has_rndr(void) { return false; }
-
#endif /* CONFIG_ARCH_RANDOM */
#endif /* _ASM_ARCHRANDOM_H */
diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h
index fb4c27506ef4..c3009b0e5239 100644
--- a/arch/arm64/include/asm/barrier.h
+++ b/arch/arm64/include/asm/barrier.h
@@ -45,6 +45,7 @@
#define rmb() dsb(ld)
#define wmb() dsb(st)
+#define dma_mb() dmb(osh)
#define dma_rmb() dmb(oshld)
#define dma_wmb() dmb(oshst)
diff --git a/arch/arm64/include/asm/boot.h b/arch/arm64/include/asm/boot.h
index c7f67da13cd9..3e7943fd17a4 100644
--- a/arch/arm64/include/asm/boot.h
+++ b/arch/arm64/include/asm/boot.h
@@ -13,8 +13,7 @@
#define MAX_FDT_SIZE SZ_2M
/*
- * arm64 requires the kernel image to placed
- * TEXT_OFFSET bytes beyond a 2 MB aligned base
+ * arm64 requires the kernel image to placed at a 2 MB aligned base address
*/
#define MIN_KIMG_ALIGN SZ_2M
diff --git a/arch/arm64/include/asm/compat.h b/arch/arm64/include/asm/compat.h
index 935d2aa231bf..23a9fb73c04f 100644
--- a/arch/arm64/include/asm/compat.h
+++ b/arch/arm64/include/asm/compat.h
@@ -35,8 +35,6 @@ typedef s32 compat_nlink_t;
typedef u16 compat_ipc_pid_t;
typedef u32 compat_caddr_t;
typedef __kernel_fsid_t compat_fsid_t;
-typedef s64 compat_s64;
-typedef u64 compat_u64;
struct compat_stat {
#ifdef __AARCH64EB__
diff --git a/arch/arm64/include/asm/compiler.h b/arch/arm64/include/asm/compiler.h
index 51a7ce87cdfe..6fb2e6bcc392 100644
--- a/arch/arm64/include/asm/compiler.h
+++ b/arch/arm64/include/asm/compiler.h
@@ -2,6 +2,12 @@
#ifndef __ASM_COMPILER_H
#define __ASM_COMPILER_H
+#ifdef ARM64_ASM_ARCH
+#define ARM64_ASM_PREAMBLE ".arch " ARM64_ASM_ARCH "\n"
+#else
+#define ARM64_ASM_PREAMBLE
+#endif
+
/*
* The EL0/EL1 pointer bits used by a pointer authentication code.
* This is dependent on TBI0/TBI1 being enabled, or bits 63:56 would also apply.
diff --git a/arch/arm64/include/asm/cpu_ops.h b/arch/arm64/include/asm/cpu_ops.h
index d28e8f37d3b4..e95c4df83911 100644
--- a/arch/arm64/include/asm/cpu_ops.h
+++ b/arch/arm64/include/asm/cpu_ops.h
@@ -21,7 +21,7 @@
* mechanism for doing so, tests whether it is possible to boot
* the given CPU.
* @cpu_boot: Boots a cpu into the kernel.
- * @cpu_postboot: Optionally, perform any post-boot cleanup or necesary
+ * @cpu_postboot: Optionally, perform any post-boot cleanup or necessary
* synchronisation. Called from the cpu being booted.
* @cpu_can_disable: Determines whether a CPU can be disabled based on
* mechanism-specific information.
diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h
index 07b643a70710..42868dbd29fd 100644
--- a/arch/arm64/include/asm/cpucaps.h
+++ b/arch/arm64/include/asm/cpucaps.h
@@ -31,13 +31,13 @@
#define ARM64_HAS_DCPOP 21
#define ARM64_SVE 22
#define ARM64_UNMAP_KERNEL_AT_EL0 23
-#define ARM64_HARDEN_BRANCH_PREDICTOR 24
+#define ARM64_SPECTRE_V2 24
#define ARM64_HAS_RAS_EXTN 25
#define ARM64_WORKAROUND_843419 26
#define ARM64_HAS_CACHE_IDC 27
#define ARM64_HAS_CACHE_DIC 28
#define ARM64_HW_DBM 29
-#define ARM64_SSBD 30
+#define ARM64_SPECTRE_V4 30
#define ARM64_MISMATCHED_CACHE_TYPE 31
#define ARM64_HAS_STAGE2_FWB 32
#define ARM64_HAS_CRC32 33
@@ -64,7 +64,8 @@
#define ARM64_BTI 54
#define ARM64_HAS_ARMv8_4_TTL 55
#define ARM64_HAS_TLB_RANGE 56
+#define ARM64_MTE 57
-#define ARM64_NCAPS 57
+#define ARM64_NCAPS 58
#endif /* __ASM_CPUCAPS_H */
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index 89b4f0142c28..f7e7144af174 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -358,7 +358,7 @@ static inline int cpucap_default_scope(const struct arm64_cpu_capabilities *cap)
}
/*
- * Generic helper for handling capabilties with multiple (match,enable) pairs
+ * Generic helper for handling capabilities with multiple (match,enable) pairs
* of call backs, sharing the same capability bit.
* Iterate over each entry to see if at least one matches.
*/
@@ -681,6 +681,12 @@ static __always_inline bool system_uses_irq_prio_masking(void)
cpus_have_const_cap(ARM64_HAS_IRQ_PRIO_MASKING);
}
+static inline bool system_supports_mte(void)
+{
+ return IS_ENABLED(CONFIG_ARM64_MTE) &&
+ cpus_have_const_cap(ARM64_MTE);
+}
+
static inline bool system_has_prio_mask_debugging(void)
{
return IS_ENABLED(CONFIG_ARM64_DEBUG_PRIORITY_MASKING) &&
@@ -698,30 +704,6 @@ static inline bool system_supports_tlb_range(void)
cpus_have_const_cap(ARM64_HAS_TLB_RANGE);
}
-#define ARM64_BP_HARDEN_UNKNOWN -1
-#define ARM64_BP_HARDEN_WA_NEEDED 0
-#define ARM64_BP_HARDEN_NOT_REQUIRED 1
-
-int get_spectre_v2_workaround_state(void);
-
-#define ARM64_SSBD_UNKNOWN -1
-#define ARM64_SSBD_FORCE_DISABLE 0
-#define ARM64_SSBD_KERNEL 1
-#define ARM64_SSBD_FORCE_ENABLE 2
-#define ARM64_SSBD_MITIGATED 3
-
-static inline int arm64_get_ssbd_state(void)
-{
-#ifdef CONFIG_ARM64_SSBD
- extern int ssbd_state;
- return ssbd_state;
-#else
- return ARM64_SSBD_UNKNOWN;
-#endif
-}
-
-void arm64_set_ssbd_mitigation(bool state);
-
extern int do_emulate_mrs(struct pt_regs *regs, u32 sys_reg, u32 rt);
static inline u32 id_aa64mmfr0_parange_to_phys_shift(int parange)
diff --git a/arch/arm64/include/asm/efi.h b/arch/arm64/include/asm/efi.h
index d4ab3f73e7a3..973b14415271 100644
--- a/arch/arm64/include/asm/efi.h
+++ b/arch/arm64/include/asm/efi.h
@@ -65,7 +65,7 @@ efi_status_t __efi_rt_asm_wrapper(void *, const char *, ...);
(SEGMENT_ALIGN > THREAD_ALIGN ? SEGMENT_ALIGN : THREAD_ALIGN)
/* on arm64, the FDT may be located anywhere in system RAM */
-static inline unsigned long efi_get_max_fdt_addr(unsigned long dram_base)
+static inline unsigned long efi_get_max_fdt_addr(unsigned long image_addr)
{
return ULONG_MAX;
}
@@ -80,8 +80,7 @@ static inline unsigned long efi_get_max_fdt_addr(unsigned long dram_base)
* apply to other bootloaders, and are required for some kernel
* configurations.
*/
-static inline unsigned long efi_get_max_initrd_addr(unsigned long dram_base,
- unsigned long image_addr)
+static inline unsigned long efi_get_max_initrd_addr(unsigned long image_addr)
{
return (image_addr & ~(SZ_1G - 1UL)) + (1UL << (VA_BITS_MIN - 1));
}
diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h
index 035003acfa87..22c81f1edda2 100644
--- a/arch/arm64/include/asm/esr.h
+++ b/arch/arm64/include/asm/esr.h
@@ -35,7 +35,9 @@
#define ESR_ELx_EC_SYS64 (0x18)
#define ESR_ELx_EC_SVE (0x19)
#define ESR_ELx_EC_ERET (0x1a) /* EL2 only */
-/* Unallocated EC: 0x1b - 0x1E */
+/* Unallocated EC: 0x1B */
+#define ESR_ELx_EC_FPAC (0x1C) /* EL1 and above */
+/* Unallocated EC: 0x1D - 0x1E */
#define ESR_ELx_EC_IMP_DEF (0x1f) /* EL3 only */
#define ESR_ELx_EC_IABT_LOW (0x20)
#define ESR_ELx_EC_IABT_CUR (0x21)
diff --git a/arch/arm64/include/asm/exception.h b/arch/arm64/include/asm/exception.h
index 7577a754d443..99b9383cd036 100644
--- a/arch/arm64/include/asm/exception.h
+++ b/arch/arm64/include/asm/exception.h
@@ -47,4 +47,5 @@ void bad_el0_sync(struct pt_regs *regs, int reason, unsigned int esr);
void do_cp15instr(unsigned int esr, struct pt_regs *regs);
void do_el0_svc(struct pt_regs *regs);
void do_el0_svc_compat(struct pt_regs *regs);
+void do_ptrauth_fault(struct pt_regs *regs, unsigned int esr);
#endif /* __ASM_EXCEPTION_H */
diff --git a/arch/arm64/include/asm/extable.h b/arch/arm64/include/asm/extable.h
index 840a35ed92ec..b15eb4a3e6b2 100644
--- a/arch/arm64/include/asm/extable.h
+++ b/arch/arm64/include/asm/extable.h
@@ -22,6 +22,15 @@ struct exception_table_entry
#define ARCH_HAS_RELATIVE_EXTABLE
+static inline bool in_bpf_jit(struct pt_regs *regs)
+{
+ if (!IS_ENABLED(CONFIG_BPF_JIT))
+ return false;
+
+ return regs->pc >= BPF_JIT_REGION_START &&
+ regs->pc < BPF_JIT_REGION_END;
+}
+
#ifdef CONFIG_BPF_JIT
int arm64_bpf_fixup_exception(const struct exception_table_entry *ex,
struct pt_regs *regs);
diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h
index 59f10dd13f12..bec5f14b622a 100644
--- a/arch/arm64/include/asm/fpsimd.h
+++ b/arch/arm64/include/asm/fpsimd.h
@@ -69,6 +69,9 @@ static inline void *sve_pffr(struct thread_struct *thread)
extern void sve_save_state(void *state, u32 *pfpsr);
extern void sve_load_state(void const *state, u32 const *pfpsr,
unsigned long vq_minus_1);
+extern void sve_flush_live(void);
+extern void sve_load_from_fpsimd_state(struct user_fpsimd_state const *state,
+ unsigned long vq_minus_1);
extern unsigned int sve_get_vl(void);
struct arm64_cpu_capabilities;
diff --git a/arch/arm64/include/asm/fpsimdmacros.h b/arch/arm64/include/asm/fpsimdmacros.h
index 636e9d9c7929..af43367534c7 100644
--- a/arch/arm64/include/asm/fpsimdmacros.h
+++ b/arch/arm64/include/asm/fpsimdmacros.h
@@ -164,25 +164,59 @@
| ((\np) << 5)
.endm
+/* PFALSE P\np.B */
+.macro _sve_pfalse np
+ _sve_check_preg \np
+ .inst 0x2518e400 \
+ | (\np)
+.endm
+
.macro __for from:req, to:req
.if (\from) == (\to)
- _for__body \from
+ _for__body %\from
.else
- __for \from, (\from) + ((\to) - (\from)) / 2
- __for (\from) + ((\to) - (\from)) / 2 + 1, \to
+ __for %\from, %((\from) + ((\to) - (\from)) / 2)
+ __for %((\from) + ((\to) - (\from)) / 2 + 1), %\to
.endif
.endm
.macro _for var:req, from:req, to:req, insn:vararg
.macro _for__body \var:req
+ .noaltmacro
\insn
+ .altmacro
.endm
+ .altmacro
__for \from, \to
+ .noaltmacro
.purgem _for__body
.endm
+/* Update ZCR_EL1.LEN with the new VQ */
+.macro sve_load_vq xvqminus1, xtmp, xtmp2
+ mrs_s \xtmp, SYS_ZCR_EL1
+ bic \xtmp2, \xtmp, ZCR_ELx_LEN_MASK
+ orr \xtmp2, \xtmp2, \xvqminus1
+ cmp \xtmp2, \xtmp
+ b.eq 921f
+ msr_s SYS_ZCR_EL1, \xtmp2 //self-synchronising
+921:
+.endm
+
+/* Preserve the first 128-bits of Znz and zero the rest. */
+.macro _sve_flush_z nz
+ _sve_check_zreg \nz
+ mov v\nz\().16b, v\nz\().16b
+.endm
+
+.macro sve_flush
+ _for n, 0, 31, _sve_flush_z \n
+ _for n, 0, 15, _sve_pfalse \n
+ _sve_wrffr 0
+.endm
+
.macro sve_save nxbase, xpfpsr, nxtmp
_for n, 0, 31, _sve_str_v \n, \nxbase, \n - 34
_for n, 0, 15, _sve_str_p \n, \nxbase, \n - 16
@@ -197,13 +231,7 @@
.endm
.macro sve_load nxbase, xpfpsr, xvqminus1, nxtmp, xtmp2
- mrs_s x\nxtmp, SYS_ZCR_EL1
- bic \xtmp2, x\nxtmp, ZCR_ELx_LEN_MASK
- orr \xtmp2, \xtmp2, \xvqminus1
- cmp \xtmp2, x\nxtmp
- b.eq 921f
- msr_s SYS_ZCR_EL1, \xtmp2 // self-synchronising
-921:
+ sve_load_vq \xvqminus1, x\nxtmp, \xtmp2
_for n, 0, 31, _sve_ldr_v \n, \nxbase, \n - 34
_sve_ldr_p 0, \nxbase
_sve_wrffr 0
diff --git a/arch/arm64/include/asm/hardirq.h b/arch/arm64/include/asm/hardirq.h
index 985493af704b..5ffa4bacdad3 100644
--- a/arch/arm64/include/asm/hardirq.h
+++ b/arch/arm64/include/asm/hardirq.h
@@ -13,21 +13,12 @@
#include <asm/kvm_arm.h>
#include <asm/sysreg.h>
-#define NR_IPI 7
-
typedef struct {
unsigned int __softirq_pending;
- unsigned int ipi_irqs[NR_IPI];
} ____cacheline_aligned irq_cpustat_t;
#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
-#define __inc_irq_stat(cpu, member) __IRQ_STAT(cpu, member)++
-#define __get_irq_stat(cpu, member) __IRQ_STAT(cpu, member)
-
-u64 smp_irq_stat_cpu(unsigned int cpu);
-#define arch_irq_stat_cpu smp_irq_stat_cpu
-
#define __ARCH_IRQ_EXIT_IRQS_DISABLED 1
struct nmi_ctx {
diff --git a/arch/arm64/include/asm/hwcap.h b/arch/arm64/include/asm/hwcap.h
index 22f73fe09030..9a5498c2c8ee 100644
--- a/arch/arm64/include/asm/hwcap.h
+++ b/arch/arm64/include/asm/hwcap.h
@@ -8,18 +8,27 @@
#include <uapi/asm/hwcap.h>
#include <asm/cpufeature.h>
+#define COMPAT_HWCAP_SWP (1 << 0)
#define COMPAT_HWCAP_HALF (1 << 1)
#define COMPAT_HWCAP_THUMB (1 << 2)
+#define COMPAT_HWCAP_26BIT (1 << 3)
#define COMPAT_HWCAP_FAST_MULT (1 << 4)
+#define COMPAT_HWCAP_FPA (1 << 5)
#define COMPAT_HWCAP_VFP (1 << 6)
#define COMPAT_HWCAP_EDSP (1 << 7)
+#define COMPAT_HWCAP_JAVA (1 << 8)
+#define COMPAT_HWCAP_IWMMXT (1 << 9)
+#define COMPAT_HWCAP_CRUNCH (1 << 10)
+#define COMPAT_HWCAP_THUMBEE (1 << 11)
#define COMPAT_HWCAP_NEON (1 << 12)
#define COMPAT_HWCAP_VFPv3 (1 << 13)
+#define COMPAT_HWCAP_VFPV3D16 (1 << 14)
#define COMPAT_HWCAP_TLS (1 << 15)
#define COMPAT_HWCAP_VFPv4 (1 << 16)
#define COMPAT_HWCAP_IDIVA (1 << 17)
#define COMPAT_HWCAP_IDIVT (1 << 18)
#define COMPAT_HWCAP_IDIV (COMPAT_HWCAP_IDIVA|COMPAT_HWCAP_IDIVT)
+#define COMPAT_HWCAP_VFPD32 (1 << 19)
#define COMPAT_HWCAP_LPAE (1 << 20)
#define COMPAT_HWCAP_EVTSTRM (1 << 21)
@@ -95,7 +104,7 @@
#define KERNEL_HWCAP_DGH __khwcap2_feature(DGH)
#define KERNEL_HWCAP_RNG __khwcap2_feature(RNG)
#define KERNEL_HWCAP_BTI __khwcap2_feature(BTI)
-/* reserved for KERNEL_HWCAP_MTE __khwcap2_feature(MTE) */
+#define KERNEL_HWCAP_MTE __khwcap2_feature(MTE)
/*
* This yields a mask that user programs can use to figure out what
diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h
index 0bc46149e491..4b39293d0f72 100644
--- a/arch/arm64/include/asm/insn.h
+++ b/arch/arm64/include/asm/insn.h
@@ -359,9 +359,13 @@ __AARCH64_INSN_FUNCS(brk, 0xFFE0001F, 0xD4200000)
__AARCH64_INSN_FUNCS(exception, 0xFF000000, 0xD4000000)
__AARCH64_INSN_FUNCS(hint, 0xFFFFF01F, 0xD503201F)
__AARCH64_INSN_FUNCS(br, 0xFFFFFC1F, 0xD61F0000)
+__AARCH64_INSN_FUNCS(br_auth, 0xFEFFF800, 0xD61F0800)
__AARCH64_INSN_FUNCS(blr, 0xFFFFFC1F, 0xD63F0000)
+__AARCH64_INSN_FUNCS(blr_auth, 0xFEFFF800, 0xD63F0800)
__AARCH64_INSN_FUNCS(ret, 0xFFFFFC1F, 0xD65F0000)
+__AARCH64_INSN_FUNCS(ret_auth, 0xFFFFFBFF, 0xD65F0BFF)
__AARCH64_INSN_FUNCS(eret, 0xFFFFFFFF, 0xD69F03E0)
+__AARCH64_INSN_FUNCS(eret_auth, 0xFFFFFBFF, 0xD69F0BFF)
__AARCH64_INSN_FUNCS(mrs, 0xFFF00000, 0xD5300000)
__AARCH64_INSN_FUNCS(msr_imm, 0xFFF8F01F, 0xD500401F)
__AARCH64_INSN_FUNCS(msr_reg, 0xFFF00000, 0xD5100000)
diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h
index ff50dd731852..fd172c41df90 100644
--- a/arch/arm64/include/asm/io.h
+++ b/arch/arm64/include/asm/io.h
@@ -110,6 +110,7 @@ static inline u64 __raw_readq(const volatile void __iomem *addr)
#define __io_par(v) __iormb(v)
#define __iowmb() dma_wmb()
+#define __iomb() dma_mb()
/*
* Relaxed I/O memory access primitives. These follow the Device memory
diff --git a/arch/arm64/include/asm/irq_work.h b/arch/arm64/include/asm/irq_work.h
index 8a1ef1907760..a1020285ea75 100644
--- a/arch/arm64/include/asm/irq_work.h
+++ b/arch/arm64/include/asm/irq_work.h
@@ -2,11 +2,9 @@
#ifndef __ASM_IRQ_WORK_H
#define __ASM_IRQ_WORK_H
-#include <asm/smp.h>
-
static inline bool arch_irq_work_has_interrupt(void)
{
- return !!__smp_cross_call;
+ return true;
}
#endif /* __ASM_IRQ_WORK_H */
diff --git a/arch/arm64/include/asm/irqflags.h b/arch/arm64/include/asm/irqflags.h
index aa4b6521ef14..ff328e5bbb75 100644
--- a/arch/arm64/include/asm/irqflags.h
+++ b/arch/arm64/include/asm/irqflags.h
@@ -95,6 +95,11 @@ static inline int arch_irqs_disabled_flags(unsigned long flags)
return res;
}
+static inline int arch_irqs_disabled(void)
+{
+ return arch_irqs_disabled_flags(arch_local_save_flags());
+}
+
static inline unsigned long arch_local_irq_save(void)
{
unsigned long flags;
diff --git a/arch/arm64/include/asm/kernel-pgtable.h b/arch/arm64/include/asm/kernel-pgtable.h
index 329fb15f6bac..19ca76ea60d9 100644
--- a/arch/arm64/include/asm/kernel-pgtable.h
+++ b/arch/arm64/include/asm/kernel-pgtable.h
@@ -86,7 +86,7 @@
+ EARLY_PGDS((vstart), (vend)) /* each PGDIR needs a next level page table */ \
+ EARLY_PUDS((vstart), (vend)) /* each PUD needs a next level page table */ \
+ EARLY_PMDS((vstart), (vend))) /* each PMD needs a next level page table */
-#define INIT_DIR_SIZE (PAGE_SIZE * EARLY_PAGES(KIMAGE_VADDR + TEXT_OFFSET, _end))
+#define INIT_DIR_SIZE (PAGE_SIZE * EARLY_PAGES(KIMAGE_VADDR, _end))
#define IDMAP_DIR_SIZE (IDMAP_PGTABLE_LEVELS * PAGE_SIZE)
#ifdef CONFIG_ARM64_SW_TTBR0_PAN
diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
index 51c1d9918999..64ce29378467 100644
--- a/arch/arm64/include/asm/kvm_arm.h
+++ b/arch/arm64/include/asm/kvm_arm.h
@@ -12,6 +12,7 @@
#include <asm/types.h>
/* Hyp Configuration Register (HCR) bits */
+#define HCR_ATA (UL(1) << 56)
#define HCR_FWB (UL(1) << 46)
#define HCR_API (UL(1) << 41)
#define HCR_APK (UL(1) << 40)
@@ -66,18 +67,19 @@
* TWI: Trap WFI
* TIDCP: Trap L2CTLR/L2ECTLR
* BSU_IS: Upgrade barriers to the inner shareable domain
- * FB: Force broadcast of all maintainance operations
+ * FB: Force broadcast of all maintenance operations
* AMO: Override CPSR.A and enable signaling with VA
* IMO: Override CPSR.I and enable signaling with VI
* FMO: Override CPSR.F and enable signaling with VF
* SWIO: Turn set/way invalidates into set/way clean+invalidate
+ * PTW: Take a stage2 fault if a stage1 walk steps in device memory
*/
#define HCR_GUEST_FLAGS (HCR_TSC | HCR_TSW | HCR_TWE | HCR_TWI | HCR_VM | \
HCR_BSU_IS | HCR_FB | HCR_TAC | \
HCR_AMO | HCR_SWIO | HCR_TIDCP | HCR_RW | HCR_TLOR | \
- HCR_FMO | HCR_IMO)
+ HCR_FMO | HCR_IMO | HCR_PTW )
#define HCR_VIRT_EXCP_MASK (HCR_VSE | HCR_VI | HCR_VF)
-#define HCR_HOST_NVHE_FLAGS (HCR_RW | HCR_API | HCR_APK)
+#define HCR_HOST_NVHE_FLAGS (HCR_RW | HCR_API | HCR_APK | HCR_ATA)
#define HCR_HOST_VHE_FLAGS (HCR_RW | HCR_TGE | HCR_E2H)
/* TCR_EL2 Registers bits */
diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
index fb1a922b31ba..7f7072f6cb45 100644
--- a/arch/arm64/include/asm/kvm_asm.h
+++ b/arch/arm64/include/asm/kvm_asm.h
@@ -9,9 +9,6 @@
#include <asm/virt.h>
-#define VCPU_WORKAROUND_2_FLAG_SHIFT 0
-#define VCPU_WORKAROUND_2_FLAG (_AC(1, UL) << VCPU_WORKAROUND_2_FLAG_SHIFT)
-
#define ARM_EXIT_WITH_SERROR_BIT 31
#define ARM_EXCEPTION_CODE(x) ((x) & ~(1U << ARM_EXIT_WITH_SERROR_BIT))
#define ARM_EXCEPTION_IS_TRAP(x) (ARM_EXCEPTION_CODE((x)) == ARM_EXCEPTION_TRAP)
@@ -102,11 +99,9 @@ DECLARE_KVM_HYP_SYM(__kvm_hyp_vector);
#define __kvm_hyp_init CHOOSE_NVHE_SYM(__kvm_hyp_init)
#define __kvm_hyp_vector CHOOSE_HYP_SYM(__kvm_hyp_vector)
-#ifdef CONFIG_KVM_INDIRECT_VECTORS
extern atomic_t arm64_el2_vector_last_slot;
DECLARE_KVM_HYP_SYM(__bp_harden_hyp_vecs);
#define __bp_harden_hyp_vecs CHOOSE_HYP_SYM(__bp_harden_hyp_vecs)
-#endif
extern void __kvm_flush_vm_context(void);
extern void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, phys_addr_t ipa,
@@ -169,6 +164,34 @@ extern char __smccc_workaround_1_smc[__SMCCC_WORKAROUND_1_SMC_SZ];
*__hyp_this_cpu_ptr(sym); \
})
+#define __KVM_EXTABLE(from, to) \
+ " .pushsection __kvm_ex_table, \"a\"\n" \
+ " .align 3\n" \
+ " .long (" #from " - .), (" #to " - .)\n" \
+ " .popsection\n"
+
+
+#define __kvm_at(at_op, addr) \
+( { \
+ int __kvm_at_err = 0; \
+ u64 spsr, elr; \
+ asm volatile( \
+ " mrs %1, spsr_el2\n" \
+ " mrs %2, elr_el2\n" \
+ "1: at "at_op", %3\n" \
+ " isb\n" \
+ " b 9f\n" \
+ "2: msr spsr_el2, %1\n" \
+ " msr elr_el2, %2\n" \
+ " mov %w0, %4\n" \
+ "9:\n" \
+ __KVM_EXTABLE(1b, 2b) \
+ : "+r" (__kvm_at_err), "=&r" (spsr), "=&r" (elr) \
+ : "r" (addr), "i" (-EFAULT)); \
+ __kvm_at_err; \
+} )
+
+
#else /* __ASSEMBLY__ */
.macro hyp_adr_this_cpu reg, sym, tmp
@@ -193,6 +216,21 @@ extern char __smccc_workaround_1_smc[__SMCCC_WORKAROUND_1_SMC_SZ];
ldr \vcpu, [\ctxt, #HOST_CONTEXT_VCPU]
.endm
+/*
+ * KVM extable for unexpected exceptions.
+ * In the same format _asm_extable, but output to a different section so that
+ * it can be mapped to EL2. The KVM version is not sorted. The caller must
+ * ensure:
+ * x18 has the hypervisor value to allow any Shadow-Call-Stack instrumented
+ * code to write to it, and that SPSR_EL2 and ELR_EL2 are restored by the fixup.
+ */
+.macro _kvm_extable, from, to
+ .pushsection __kvm_ex_table, "a"
+ .align 3
+ .long (\from - .), (\to - .)
+ .popsection
+.endm
+
#endif
#endif /* __ARM_KVM_ASM_H__ */
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index 49a55be2b9a2..5ef2669ccd6c 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -298,15 +298,15 @@ static __always_inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu)
return (kvm_vcpu_get_esr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT;
}
-static __always_inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu *vcpu)
+static __always_inline bool kvm_vcpu_abt_iss1tw(const struct kvm_vcpu *vcpu)
{
return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_S1PTW);
}
+/* Always check for S1PTW *before* using this. */
static __always_inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu)
{
- return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_WNR) ||
- kvm_vcpu_dabt_iss1tw(vcpu); /* AF/DBM update */
+ return kvm_vcpu_get_esr(vcpu) & ESR_ELx_WNR;
}
static inline bool kvm_vcpu_dabt_is_cm(const struct kvm_vcpu *vcpu)
@@ -335,6 +335,11 @@ static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu)
return kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_IABT_LOW;
}
+static inline bool kvm_vcpu_trap_is_exec_fault(const struct kvm_vcpu *vcpu)
+{
+ return kvm_vcpu_trap_is_iabt(vcpu) && !kvm_vcpu_abt_iss1tw(vcpu);
+}
+
static __always_inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu)
{
return kvm_vcpu_get_esr(vcpu) & ESR_ELx_FSC;
@@ -372,6 +377,9 @@ static __always_inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu)
static inline bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
{
+ if (kvm_vcpu_abt_iss1tw(vcpu))
+ return true;
+
if (kvm_vcpu_trap_is_iabt(vcpu))
return false;
@@ -383,20 +391,6 @@ static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu)
return vcpu_read_sys_reg(vcpu, MPIDR_EL1) & MPIDR_HWID_BITMASK;
}
-static inline bool kvm_arm_get_vcpu_workaround_2_flag(struct kvm_vcpu *vcpu)
-{
- return vcpu->arch.workaround_flags & VCPU_WORKAROUND_2_FLAG;
-}
-
-static inline void kvm_arm_set_vcpu_workaround_2_flag(struct kvm_vcpu *vcpu,
- bool flag)
-{
- if (flag)
- vcpu->arch.workaround_flags |= VCPU_WORKAROUND_2_FLAG;
- else
- vcpu->arch.workaround_flags &= ~VCPU_WORKAROUND_2_FLAG;
-}
-
static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu)
{
if (vcpu_mode_is_32bit(vcpu)) {
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 65568b23868a..bb5e5b88d439 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -368,7 +368,6 @@ struct kvm_vcpu_arch {
/* Guest PV state */
struct {
- u64 steal;
u64 last_steal;
gpa_t base;
} steal;
@@ -473,7 +472,7 @@ int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
#define KVM_ARCH_WANT_MMU_NOTIFIER
int kvm_unmap_hva_range(struct kvm *kvm,
- unsigned long start, unsigned long end);
+ unsigned long start, unsigned long end, unsigned flags);
int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
@@ -544,6 +543,7 @@ long kvm_hypercall_pv_features(struct kvm_vcpu *vcpu);
gpa_t kvm_init_stolen_time(struct kvm_vcpu *vcpu);
void kvm_update_stolen_time(struct kvm_vcpu *vcpu);
+bool kvm_arm_pvtime_supported(void);
int kvm_arm_pvtime_set_attr(struct kvm_vcpu *vcpu,
struct kvm_device_attr *attr);
int kvm_arm_pvtime_get_attr(struct kvm_vcpu *vcpu,
@@ -631,46 +631,6 @@ static inline void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr) {}
static inline void kvm_clr_pmu_events(u32 clr) {}
#endif
-#define KVM_BP_HARDEN_UNKNOWN -1
-#define KVM_BP_HARDEN_WA_NEEDED 0
-#define KVM_BP_HARDEN_NOT_REQUIRED 1
-
-static inline int kvm_arm_harden_branch_predictor(void)
-{
- switch (get_spectre_v2_workaround_state()) {
- case ARM64_BP_HARDEN_WA_NEEDED:
- return KVM_BP_HARDEN_WA_NEEDED;
- case ARM64_BP_HARDEN_NOT_REQUIRED:
- return KVM_BP_HARDEN_NOT_REQUIRED;
- case ARM64_BP_HARDEN_UNKNOWN:
- default:
- return KVM_BP_HARDEN_UNKNOWN;
- }
-}
-
-#define KVM_SSBD_UNKNOWN -1
-#define KVM_SSBD_FORCE_DISABLE 0
-#define KVM_SSBD_KERNEL 1
-#define KVM_SSBD_FORCE_ENABLE 2
-#define KVM_SSBD_MITIGATED 3
-
-static inline int kvm_arm_have_ssbd(void)
-{
- switch (arm64_get_ssbd_state()) {
- case ARM64_SSBD_FORCE_DISABLE:
- return KVM_SSBD_FORCE_DISABLE;
- case ARM64_SSBD_KERNEL:
- return KVM_SSBD_KERNEL;
- case ARM64_SSBD_FORCE_ENABLE:
- return KVM_SSBD_FORCE_ENABLE;
- case ARM64_SSBD_MITIGATED:
- return KVM_SSBD_MITIGATED;
- case ARM64_SSBD_UNKNOWN:
- default:
- return KVM_SSBD_UNKNOWN;
- }
-}
-
void kvm_vcpu_load_sysregs_vhe(struct kvm_vcpu *vcpu);
void kvm_vcpu_put_sysregs_vhe(struct kvm_vcpu *vcpu);
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index 189839c3706a..cff1cebc7590 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -9,6 +9,7 @@
#include <asm/page.h>
#include <asm/memory.h>
+#include <asm/mmu.h>
#include <asm/cpufeature.h>
/*
@@ -430,19 +431,17 @@ static inline int kvm_write_guest_lock(struct kvm *kvm, gpa_t gpa,
return ret;
}
-#ifdef CONFIG_KVM_INDIRECT_VECTORS
/*
* EL2 vectors can be mapped and rerouted in a number of ways,
* depending on the kernel configuration and CPU present:
*
- * - If the CPU has the ARM64_HARDEN_BRANCH_PREDICTOR cap, the
- * hardening sequence is placed in one of the vector slots, which is
- * executed before jumping to the real vectors.
+ * - If the CPU is affected by Spectre-v2, the hardening sequence is
+ * placed in one of the vector slots, which is executed before jumping
+ * to the real vectors.
*
- * - If the CPU has both the ARM64_HARDEN_EL2_VECTORS cap and the
- * ARM64_HARDEN_BRANCH_PREDICTOR cap, the slot containing the
- * hardening sequence is mapped next to the idmap page, and executed
- * before jumping to the real vectors.
+ * - If the CPU also has the ARM64_HARDEN_EL2_VECTORS cap, the slot
+ * containing the hardening sequence is mapped next to the idmap page,
+ * and executed before jumping to the real vectors.
*
* - If the CPU only has the ARM64_HARDEN_EL2_VECTORS cap, then an
* empty slot is selected, mapped next to the idmap page, and
@@ -452,19 +451,16 @@ static inline int kvm_write_guest_lock(struct kvm *kvm, gpa_t gpa,
* VHE, as we don't have hypervisor-specific mappings. If the system
* is VHE and yet selects this capability, it will be ignored.
*/
-#include <asm/mmu.h>
-
extern void *__kvm_bp_vect_base;
extern int __kvm_harden_el2_vector_slot;
-/* This is called on both VHE and !VHE systems */
static inline void *kvm_get_hyp_vector(void)
{
struct bp_hardening_data *data = arm64_get_bp_hardening_data();
void *vect = kern_hyp_va(kvm_ksym_ref(__kvm_hyp_vector));
int slot = -1;
- if (cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR) && data->fn) {
+ if (cpus_have_const_cap(ARM64_SPECTRE_V2) && data->fn) {
vect = kern_hyp_va(kvm_ksym_ref(__bp_harden_hyp_vecs));
slot = data->hyp_vectors_slot;
}
@@ -481,76 +477,6 @@ static inline void *kvm_get_hyp_vector(void)
return vect;
}
-/* This is only called on a !VHE system */
-static inline int kvm_map_vectors(void)
-{
- /*
- * HBP = ARM64_HARDEN_BRANCH_PREDICTOR
- * HEL2 = ARM64_HARDEN_EL2_VECTORS
- *
- * !HBP + !HEL2 -> use direct vectors
- * HBP + !HEL2 -> use hardened vectors in place
- * !HBP + HEL2 -> allocate one vector slot and use exec mapping
- * HBP + HEL2 -> use hardened vertors and use exec mapping
- */
- if (cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR)) {
- __kvm_bp_vect_base = kvm_ksym_ref(__bp_harden_hyp_vecs);
- __kvm_bp_vect_base = kern_hyp_va(__kvm_bp_vect_base);
- }
-
- if (cpus_have_const_cap(ARM64_HARDEN_EL2_VECTORS)) {
- phys_addr_t vect_pa = __pa_symbol(__bp_harden_hyp_vecs);
- unsigned long size = __BP_HARDEN_HYP_VECS_SZ;
-
- /*
- * Always allocate a spare vector slot, as we don't
- * know yet which CPUs have a BP hardening slot that
- * we can reuse.
- */
- __kvm_harden_el2_vector_slot = atomic_inc_return(&arm64_el2_vector_last_slot);
- BUG_ON(__kvm_harden_el2_vector_slot >= BP_HARDEN_EL2_SLOTS);
- return create_hyp_exec_mappings(vect_pa, size,
- &__kvm_bp_vect_base);
- }
-
- return 0;
-}
-#else
-static inline void *kvm_get_hyp_vector(void)
-{
- return kern_hyp_va(kvm_ksym_ref(__kvm_hyp_vector));
-}
-
-static inline int kvm_map_vectors(void)
-{
- return 0;
-}
-#endif
-
-#ifdef CONFIG_ARM64_SSBD
-DECLARE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
-
-static inline int hyp_map_aux_data(void)
-{
- int cpu, err;
-
- for_each_possible_cpu(cpu) {
- u64 *ptr;
-
- ptr = per_cpu_ptr(&arm64_ssbd_callback_required, cpu);
- err = create_hyp_mappings(ptr, ptr + 1, PAGE_HYP);
- if (err)
- return err;
- }
- return 0;
-}
-#else
-static inline int hyp_map_aux_data(void)
-{
- return 0;
-}
-#endif
-
#define kvm_phys_to_vttbr(addr) phys_to_ttbr(addr)
/*
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index afa722504bfd..43640d797455 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -126,13 +126,18 @@
/*
* Memory types available.
+ *
+ * IMPORTANT: MT_NORMAL must be index 0 since vm_get_page_prot() may 'or' in
+ * the MT_NORMAL_TAGGED memory type for PROT_MTE mappings. Note
+ * that protection_map[] only contains MT_NORMAL attributes.
*/
-#define MT_DEVICE_nGnRnE 0
-#define MT_DEVICE_nGnRE 1
-#define MT_DEVICE_GRE 2
-#define MT_NORMAL_NC 3
-#define MT_NORMAL 4
-#define MT_NORMAL_WT 5
+#define MT_NORMAL 0
+#define MT_NORMAL_TAGGED 1
+#define MT_NORMAL_NC 2
+#define MT_NORMAL_WT 3
+#define MT_DEVICE_nGnRnE 4
+#define MT_DEVICE_nGnRE 5
+#define MT_DEVICE_GRE 6
/*
* Memory types for Stage-2 translation
@@ -169,7 +174,7 @@ extern s64 memstart_addr;
/* PHYS_OFFSET - the physical address of the start of memory. */
#define PHYS_OFFSET ({ VM_BUG_ON(memstart_addr & 1); memstart_addr; })
-/* the virtual base of the kernel image (minus TEXT_OFFSET) */
+/* the virtual base of the kernel image */
extern u64 kimage_vaddr;
/* the offset between the kernel virtual and physical mappings */
diff --git a/arch/arm64/include/asm/mman.h b/arch/arm64/include/asm/mman.h
index 081ec8de9ea6..e3e28f7daf62 100644
--- a/arch/arm64/include/asm/mman.h
+++ b/arch/arm64/include/asm/mman.h
@@ -9,16 +9,53 @@
static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot,
unsigned long pkey __always_unused)
{
+ unsigned long ret = 0;
+
if (system_supports_bti() && (prot & PROT_BTI))
- return VM_ARM64_BTI;
+ ret |= VM_ARM64_BTI;
- return 0;
+ if (system_supports_mte() && (prot & PROT_MTE))
+ ret |= VM_MTE;
+
+ return ret;
}
#define arch_calc_vm_prot_bits(prot, pkey) arch_calc_vm_prot_bits(prot, pkey)
+static inline unsigned long arch_calc_vm_flag_bits(unsigned long flags)
+{
+ /*
+ * Only allow MTE on anonymous mappings as these are guaranteed to be
+ * backed by tags-capable memory. The vm_flags may be overridden by a
+ * filesystem supporting MTE (RAM-based).
+ */
+ if (system_supports_mte() && (flags & MAP_ANONYMOUS))
+ return VM_MTE_ALLOWED;
+
+ return 0;
+}
+#define arch_calc_vm_flag_bits(flags) arch_calc_vm_flag_bits(flags)
+
static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
{
- return (vm_flags & VM_ARM64_BTI) ? __pgprot(PTE_GP) : __pgprot(0);
+ pteval_t prot = 0;
+
+ if (vm_flags & VM_ARM64_BTI)
+ prot |= PTE_GP;
+
+ /*
+ * There are two conditions required for returning a Normal Tagged
+ * memory type: (1) the user requested it via PROT_MTE passed to
+ * mmap() or mprotect() and (2) the corresponding vma supports MTE. We
+ * register (1) as VM_MTE in the vma->vm_flags and (2) as
+ * VM_MTE_ALLOWED. Note that the latter can only be set during the
+ * mmap() call since mprotect() does not accept MAP_* flags.
+ * Checking for VM_MTE only is sufficient since arch_validate_flags()
+ * does not permit (VM_MTE & !VM_MTE_ALLOWED).
+ */
+ if (vm_flags & VM_MTE)
+ prot |= PTE_ATTRINDX(MT_NORMAL_TAGGED);
+
+ return __pgprot(prot);
}
#define arch_vm_get_page_prot(vm_flags) arch_vm_get_page_prot(vm_flags)
@@ -30,8 +67,21 @@ static inline bool arch_validate_prot(unsigned long prot,
if (system_supports_bti())
supported |= PROT_BTI;
+ if (system_supports_mte())
+ supported |= PROT_MTE;
+
return (prot & ~supported) == 0;
}
#define arch_validate_prot(prot, addr) arch_validate_prot(prot, addr)
+static inline bool arch_validate_flags(unsigned long vm_flags)
+{
+ if (!system_supports_mte())
+ return true;
+
+ /* only allow VM_MTE if VM_MTE_ALLOWED has been set previously */
+ return !(vm_flags & VM_MTE) || (vm_flags & VM_MTE_ALLOWED);
+}
+#define arch_validate_flags(vm_flags) arch_validate_flags(vm_flags)
+
#endif /* ! __ASM_MMAN_H__ */
diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h
index a7a5ecaa2e83..b2e91c187e2a 100644
--- a/arch/arm64/include/asm/mmu.h
+++ b/arch/arm64/include/asm/mmu.h
@@ -17,11 +17,14 @@
#ifndef __ASSEMBLY__
+#include <linux/refcount.h>
+
typedef struct {
atomic64_t id;
#ifdef CONFIG_COMPAT
void *sigpage;
#endif
+ refcount_t pinned;
void *vdso;
unsigned long flags;
} mm_context_t;
@@ -45,7 +48,6 @@ struct bp_hardening_data {
bp_hardening_cb_t fn;
};
-#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
DECLARE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
static inline struct bp_hardening_data *arm64_get_bp_hardening_data(void)
@@ -57,21 +59,13 @@ static inline void arm64_apply_bp_hardening(void)
{
struct bp_hardening_data *d;
- if (!cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR))
+ if (!cpus_have_const_cap(ARM64_SPECTRE_V2))
return;
d = arm64_get_bp_hardening_data();
if (d->fn)
d->fn();
}
-#else
-static inline struct bp_hardening_data *arm64_get_bp_hardening_data(void)
-{
- return NULL;
-}
-
-static inline void arm64_apply_bp_hardening(void) { }
-#endif /* CONFIG_HARDEN_BRANCH_PREDICTOR */
extern void arm64_memblock_init(void);
extern void paging_init(void);
diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h
index f2d7537d6f83..0672236e1aea 100644
--- a/arch/arm64/include/asm/mmu_context.h
+++ b/arch/arm64/include/asm/mmu_context.h
@@ -177,7 +177,13 @@ static inline void cpu_replace_ttbr1(pgd_t *pgdp)
#define destroy_context(mm) do { } while(0)
void check_and_switch_context(struct mm_struct *mm);
-#define init_new_context(tsk,mm) ({ atomic64_set(&(mm)->context.id, 0); 0; })
+static inline int
+init_new_context(struct task_struct *tsk, struct mm_struct *mm)
+{
+ atomic64_set(&mm->context.id, 0);
+ refcount_set(&mm->context.pinned, 0);
+ return 0;
+}
#ifdef CONFIG_ARM64_SW_TTBR0_PAN
static inline void update_saved_ttbr0(struct task_struct *tsk,
@@ -248,6 +254,9 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
void verify_cpu_asid_bits(void);
void post_ttbr_update_workaround(void);
+unsigned long arm64_mm_context_get(struct mm_struct *mm);
+void arm64_mm_context_put(struct mm_struct *mm);
+
#endif /* !__ASSEMBLY__ */
#endif /* !__ASM_MMU_CONTEXT_H */
diff --git a/arch/arm64/include/asm/mte.h b/arch/arm64/include/asm/mte.h
new file mode 100644
index 000000000000..1c99fcadb58c
--- /dev/null
+++ b/arch/arm64/include/asm/mte.h
@@ -0,0 +1,86 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020 ARM Ltd.
+ */
+#ifndef __ASM_MTE_H
+#define __ASM_MTE_H
+
+#define MTE_GRANULE_SIZE UL(16)
+#define MTE_GRANULE_MASK (~(MTE_GRANULE_SIZE - 1))
+#define MTE_TAG_SHIFT 56
+#define MTE_TAG_SIZE 4
+
+#ifndef __ASSEMBLY__
+
+#include <linux/page-flags.h>
+
+#include <asm/pgtable-types.h>
+
+void mte_clear_page_tags(void *addr);
+unsigned long mte_copy_tags_from_user(void *to, const void __user *from,
+ unsigned long n);
+unsigned long mte_copy_tags_to_user(void __user *to, void *from,
+ unsigned long n);
+int mte_save_tags(struct page *page);
+void mte_save_page_tags(const void *page_addr, void *tag_storage);
+bool mte_restore_tags(swp_entry_t entry, struct page *page);
+void mte_restore_page_tags(void *page_addr, const void *tag_storage);
+void mte_invalidate_tags(int type, pgoff_t offset);
+void mte_invalidate_tags_area(int type);
+void *mte_allocate_tag_storage(void);
+void mte_free_tag_storage(char *storage);
+
+#ifdef CONFIG_ARM64_MTE
+
+/* track which pages have valid allocation tags */
+#define PG_mte_tagged PG_arch_2
+
+void mte_sync_tags(pte_t *ptep, pte_t pte);
+void mte_copy_page_tags(void *kto, const void *kfrom);
+void flush_mte_state(void);
+void mte_thread_switch(struct task_struct *next);
+void mte_suspend_exit(void);
+long set_mte_ctrl(struct task_struct *task, unsigned long arg);
+long get_mte_ctrl(struct task_struct *task);
+int mte_ptrace_copy_tags(struct task_struct *child, long request,
+ unsigned long addr, unsigned long data);
+
+#else
+
+/* unused if !CONFIG_ARM64_MTE, silence the compiler */
+#define PG_mte_tagged 0
+
+static inline void mte_sync_tags(pte_t *ptep, pte_t pte)
+{
+}
+static inline void mte_copy_page_tags(void *kto, const void *kfrom)
+{
+}
+static inline void flush_mte_state(void)
+{
+}
+static inline void mte_thread_switch(struct task_struct *next)
+{
+}
+static inline void mte_suspend_exit(void)
+{
+}
+static inline long set_mte_ctrl(struct task_struct *task, unsigned long arg)
+{
+ return 0;
+}
+static inline long get_mte_ctrl(struct task_struct *task)
+{
+ return 0;
+}
+static inline int mte_ptrace_copy_tags(struct task_struct *child,
+ long request, unsigned long addr,
+ unsigned long data)
+{
+ return -EIO;
+}
+
+#endif
+
+#endif /* __ASSEMBLY__ */
+#endif /* __ASM_MTE_H */
diff --git a/arch/arm64/include/asm/numa.h b/arch/arm64/include/asm/numa.h
index 626ad01e83bf..dd870390d639 100644
--- a/arch/arm64/include/asm/numa.h
+++ b/arch/arm64/include/asm/numa.h
@@ -25,6 +25,9 @@ const struct cpumask *cpumask_of_node(int node);
/* Returns a pointer to the cpumask of CPUs on Node 'node'. */
static inline const struct cpumask *cpumask_of_node(int node)
{
+ if (node == NUMA_NO_NODE)
+ return cpu_all_mask;
+
return node_to_cpumask_map[node];
}
#endif
diff --git a/arch/arm64/include/asm/page-def.h b/arch/arm64/include/asm/page-def.h
index f99d48ecbeef..2403f7b4cdbf 100644
--- a/arch/arm64/include/asm/page-def.h
+++ b/arch/arm64/include/asm/page-def.h
@@ -11,13 +11,8 @@
#include <linux/const.h>
/* PAGE_SHIFT determines the page size */
-/* CONT_SHIFT determines the number of pages which can be tracked together */
#define PAGE_SHIFT CONFIG_ARM64_PAGE_SHIFT
-#define CONT_SHIFT CONFIG_ARM64_CONT_SHIFT
#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
#define PAGE_MASK (~(PAGE_SIZE-1))
-#define CONT_SIZE (_AC(1, UL) << (CONT_SHIFT + PAGE_SHIFT))
-#define CONT_MASK (~(CONT_SIZE-1))
-
#endif /* __ASM_PAGE_DEF_H */
diff --git a/arch/arm64/include/asm/page.h b/arch/arm64/include/asm/page.h
index c01b52add377..012cffc574e8 100644
--- a/arch/arm64/include/asm/page.h
+++ b/arch/arm64/include/asm/page.h
@@ -15,18 +15,25 @@
#include <linux/personality.h> /* for READ_IMPLIES_EXEC */
#include <asm/pgtable-types.h>
-extern void __cpu_clear_user_page(void *p, unsigned long user);
-extern void __cpu_copy_user_page(void *to, const void *from,
- unsigned long user);
+struct page;
+struct vm_area_struct;
+
extern void copy_page(void *to, const void *from);
extern void clear_page(void *to);
+void copy_user_highpage(struct page *to, struct page *from,
+ unsigned long vaddr, struct vm_area_struct *vma);
+#define __HAVE_ARCH_COPY_USER_HIGHPAGE
+
+void copy_highpage(struct page *to, struct page *from);
+#define __HAVE_ARCH_COPY_HIGHPAGE
+
#define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \
alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr)
#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
-#define clear_user_page(addr,vaddr,pg) __cpu_clear_user_page(addr, vaddr)
-#define copy_user_page(to,from,vaddr,pg) __cpu_copy_user_page(to, from, vaddr)
+#define clear_user_page(page, vaddr, pg) clear_page(page)
+#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
typedef struct page *pgtable_t;
@@ -36,7 +43,7 @@ extern int pfn_valid(unsigned long);
#endif /* !__ASSEMBLY__ */
-#define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_TSK_EXEC
+#define VM_DATA_DEFAULT_FLAGS (VM_DATA_FLAGS_TSK_EXEC | VM_MTE_ALLOWED)
#include <asm-generic/getorder.h>
diff --git a/arch/arm64/include/asm/pci.h b/arch/arm64/include/asm/pci.h
index 70b323cf8300..b33ca260e3c9 100644
--- a/arch/arm64/include/asm/pci.h
+++ b/arch/arm64/include/asm/pci.h
@@ -17,6 +17,7 @@
#define pcibios_assign_all_busses() \
(pci_has_flag(PCI_REASSIGN_ALL_BUS))
+#define arch_can_pci_mmap_wc() 1
#define ARCH_GENERIC_PCI_MMAP_RESOURCE 1
extern int isa_dma_bridge_buggy;
diff --git a/arch/arm64/include/asm/perf_event.h b/arch/arm64/include/asm/perf_event.h
index 2c2d7dbe8a02..60731f602d3e 100644
--- a/arch/arm64/include/asm/perf_event.h
+++ b/arch/arm64/include/asm/perf_event.h
@@ -236,6 +236,9 @@
#define ARMV8_PMU_USERENR_CR (1 << 2) /* Cycle counter can be read at EL0 */
#define ARMV8_PMU_USERENR_ER (1 << 3) /* Event counter can be read at EL0 */
+/* PMMIR_EL1.SLOTS mask */
+#define ARMV8_PMU_SLOTS_MASK 0xff
+
#ifdef CONFIG_PERF_EVENTS
struct pt_regs;
extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h
index d400a4d9aee2..94b3f2ac2e9d 100644
--- a/arch/arm64/include/asm/pgtable-hwdef.h
+++ b/arch/arm64/include/asm/pgtable-hwdef.h
@@ -81,25 +81,15 @@
/*
* Contiguous page definitions.
*/
-#ifdef CONFIG_ARM64_64K_PAGES
-#define CONT_PTE_SHIFT (5 + PAGE_SHIFT)
-#define CONT_PMD_SHIFT (5 + PMD_SHIFT)
-#elif defined(CONFIG_ARM64_16K_PAGES)
-#define CONT_PTE_SHIFT (7 + PAGE_SHIFT)
-#define CONT_PMD_SHIFT (5 + PMD_SHIFT)
-#else
-#define CONT_PTE_SHIFT (4 + PAGE_SHIFT)
-#define CONT_PMD_SHIFT (4 + PMD_SHIFT)
-#endif
-
+#define CONT_PTE_SHIFT (CONFIG_ARM64_CONT_PTE_SHIFT + PAGE_SHIFT)
#define CONT_PTES (1 << (CONT_PTE_SHIFT - PAGE_SHIFT))
#define CONT_PTE_SIZE (CONT_PTES * PAGE_SIZE)
#define CONT_PTE_MASK (~(CONT_PTE_SIZE - 1))
+
+#define CONT_PMD_SHIFT (CONFIG_ARM64_CONT_PMD_SHIFT + PMD_SHIFT)
#define CONT_PMDS (1 << (CONT_PMD_SHIFT - PMD_SHIFT))
#define CONT_PMD_SIZE (CONT_PMDS * PMD_SIZE)
#define CONT_PMD_MASK (~(CONT_PMD_SIZE - 1))
-/* the numerical offset of the PTE within a range of CONT_PTES */
-#define CONT_RANGE_OFFSET(addr) (((addr)>>PAGE_SHIFT)&(CONT_PTES-1))
/*
* Hardware page table definitions.
diff --git a/arch/arm64/include/asm/pgtable-prot.h b/arch/arm64/include/asm/pgtable-prot.h
index 4d867c6446c4..4cd0d6ca8aa1 100644
--- a/arch/arm64/include/asm/pgtable-prot.h
+++ b/arch/arm64/include/asm/pgtable-prot.h
@@ -19,6 +19,13 @@
#define PTE_DEVMAP (_AT(pteval_t, 1) << 57)
#define PTE_PROT_NONE (_AT(pteval_t, 1) << 58) /* only when !PTE_VALID */
+/*
+ * This bit indicates that the entry is present i.e. pmd_page()
+ * still points to a valid huge page in memory even if the pmd
+ * has been invalidated.
+ */
+#define PMD_PRESENT_INVALID (_AT(pteval_t, 1) << 59) /* only when !PMD_SECT_VALID */
+
#ifndef __ASSEMBLY__
#include <asm/cpufeature.h>
@@ -50,6 +57,7 @@ extern bool arm64_use_ng_mappings;
#define PROT_NORMAL_NC (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL_NC))
#define PROT_NORMAL_WT (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL_WT))
#define PROT_NORMAL (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL))
+#define PROT_NORMAL_TAGGED (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL_TAGGED))
#define PROT_SECT_DEVICE_nGnRE (PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_DEVICE_nGnRE))
#define PROT_SECT_NORMAL (PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_NORMAL))
@@ -59,6 +67,7 @@ extern bool arm64_use_ng_mappings;
#define _HYP_PAGE_DEFAULT _PAGE_DEFAULT
#define PAGE_KERNEL __pgprot(PROT_NORMAL)
+#define PAGE_KERNEL_TAGGED __pgprot(PROT_NORMAL_TAGGED)
#define PAGE_KERNEL_RO __pgprot((PROT_NORMAL & ~PTE_WRITE) | PTE_RDONLY)
#define PAGE_KERNEL_ROX __pgprot((PROT_NORMAL & ~(PTE_WRITE | PTE_PXN)) | PTE_RDONLY)
#define PAGE_KERNEL_EXEC __pgprot(PROT_NORMAL & ~PTE_PXN)
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index d5d3fbe73953..a11bf52e0c38 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -9,6 +9,7 @@
#include <asm/proc-fns.h>
#include <asm/memory.h>
+#include <asm/mte.h>
#include <asm/pgtable-hwdef.h>
#include <asm/pgtable-prot.h>
#include <asm/tlbflush.h>
@@ -35,11 +36,6 @@
extern struct page *vmemmap;
-extern void __pte_error(const char *file, int line, unsigned long val);
-extern void __pmd_error(const char *file, int line, unsigned long val);
-extern void __pud_error(const char *file, int line, unsigned long val);
-extern void __pgd_error(const char *file, int line, unsigned long val);
-
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
#define __HAVE_ARCH_FLUSH_PMD_TLB_RANGE
@@ -51,13 +47,22 @@ extern void __pgd_error(const char *file, int line, unsigned long val);
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
/*
+ * Outside of a few very special situations (e.g. hibernation), we always
+ * use broadcast TLB invalidation instructions, therefore a spurious page
+ * fault on one CPU which has been handled concurrently by another CPU
+ * does not need to perform additional invalidation.
+ */
+#define flush_tlb_fix_spurious_fault(vma, address) do { } while (0)
+
+/*
* ZERO_PAGE is a global shared page that is always zero: used
* for zero-mapped memory areas etc..
*/
extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
#define ZERO_PAGE(vaddr) phys_to_page(__pa_symbol(empty_zero_page))
-#define pte_ERROR(pte) __pte_error(__FILE__, __LINE__, pte_val(pte))
+#define pte_ERROR(e) \
+ pr_err("%s:%d: bad pte %016llx.\n", __FILE__, __LINE__, pte_val(e))
/*
* Macros to convert between a physical address and its placement in a
@@ -90,6 +95,8 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
#define pte_user_exec(pte) (!(pte_val(pte) & PTE_UXN))
#define pte_cont(pte) (!!(pte_val(pte) & PTE_CONT))
#define pte_devmap(pte) (!!(pte_val(pte) & PTE_DEVMAP))
+#define pte_tagged(pte) ((pte_val(pte) & PTE_ATTRINDX_MASK) == \
+ PTE_ATTRINDX(MT_NORMAL_TAGGED))
#define pte_cont_addr_end(addr, end) \
({ unsigned long __boundary = ((addr) + CONT_PTE_SIZE) & CONT_PTE_MASK; \
@@ -145,6 +152,18 @@ static inline pte_t set_pte_bit(pte_t pte, pgprot_t prot)
return pte;
}
+static inline pmd_t clear_pmd_bit(pmd_t pmd, pgprot_t prot)
+{
+ pmd_val(pmd) &= ~pgprot_val(prot);
+ return pmd;
+}
+
+static inline pmd_t set_pmd_bit(pmd_t pmd, pgprot_t prot)
+{
+ pmd_val(pmd) |= pgprot_val(prot);
+ return pmd;
+}
+
static inline pte_t pte_wrprotect(pte_t pte)
{
pte = clear_pte_bit(pte, __pgprot(PTE_WRITE));
@@ -284,6 +303,10 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
if (pte_present(pte) && pte_user_exec(pte) && !pte_special(pte))
__sync_icache_dcache(pte);
+ if (system_supports_mte() &&
+ pte_present(pte) && pte_tagged(pte) && !pte_special(pte))
+ mte_sync_tags(ptep, pte);
+
__check_racy_pte_update(mm, ptep, pte);
set_pte(ptep, pte);
@@ -363,15 +386,24 @@ static inline int pmd_protnone(pmd_t pmd)
}
#endif
+#define pmd_present_invalid(pmd) (!!(pmd_val(pmd) & PMD_PRESENT_INVALID))
+
+static inline int pmd_present(pmd_t pmd)
+{
+ return pte_present(pmd_pte(pmd)) || pmd_present_invalid(pmd);
+}
+
/*
* THP definitions.
*/
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-#define pmd_trans_huge(pmd) (pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT))
+static inline int pmd_trans_huge(pmd_t pmd)
+{
+ return pmd_val(pmd) && pmd_present(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT);
+}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
-#define pmd_present(pmd) pte_present(pmd_pte(pmd))
#define pmd_dirty(pmd) pte_dirty(pmd_pte(pmd))
#define pmd_young(pmd) pte_young(pmd_pte(pmd))
#define pmd_valid(pmd) pte_valid(pmd_pte(pmd))
@@ -381,7 +413,14 @@ static inline int pmd_protnone(pmd_t pmd)
#define pmd_mkclean(pmd) pte_pmd(pte_mkclean(pmd_pte(pmd)))
#define pmd_mkdirty(pmd) pte_pmd(pte_mkdirty(pmd_pte(pmd)))
#define pmd_mkyoung(pmd) pte_pmd(pte_mkyoung(pmd_pte(pmd)))
-#define pmd_mkinvalid(pmd) (__pmd(pmd_val(pmd) & ~PMD_SECT_VALID))
+
+static inline pmd_t pmd_mkinvalid(pmd_t pmd)
+{
+ pmd = set_pmd_bit(pmd, __pgprot(PMD_PRESENT_INVALID));
+ pmd = clear_pmd_bit(pmd, __pgprot(PMD_SECT_VALID));
+
+ return pmd;
+}
#define pmd_thp_or_huge(pmd) (pmd_huge(pmd) || pmd_trans_huge(pmd))
@@ -541,7 +580,8 @@ static inline unsigned long pmd_page_vaddr(pmd_t pmd)
#if CONFIG_PGTABLE_LEVELS > 2
-#define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd_val(pmd))
+#define pmd_ERROR(e) \
+ pr_err("%s:%d: bad pmd %016llx.\n", __FILE__, __LINE__, pmd_val(e))
#define pud_none(pud) (!pud_val(pud))
#define pud_bad(pud) (!(pud_val(pud) & PUD_TABLE_BIT))
@@ -608,7 +648,8 @@ static inline unsigned long pud_page_vaddr(pud_t pud)
#if CONFIG_PGTABLE_LEVELS > 3
-#define pud_ERROR(pud) __pud_error(__FILE__, __LINE__, pud_val(pud))
+#define pud_ERROR(e) \
+ pr_err("%s:%d: bad pud %016llx.\n", __FILE__, __LINE__, pud_val(e))
#define p4d_none(p4d) (!p4d_val(p4d))
#define p4d_bad(p4d) (!(p4d_val(p4d) & 2))
@@ -667,15 +708,21 @@ static inline unsigned long p4d_page_vaddr(p4d_t p4d)
#endif /* CONFIG_PGTABLE_LEVELS > 3 */
-#define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd_val(pgd))
+#define pgd_ERROR(e) \
+ pr_err("%s:%d: bad pgd %016llx.\n", __FILE__, __LINE__, pgd_val(e))
#define pgd_set_fixmap(addr) ((pgd_t *)set_fixmap_offset(FIX_PGD, addr))
#define pgd_clear_fixmap() clear_fixmap(FIX_PGD)
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
+ /*
+ * Normal and Normal-Tagged are two different memory types and indices
+ * in MAIR_EL1. The mask below has to include PTE_ATTRINDX_MASK.
+ */
const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY |
- PTE_PROT_NONE | PTE_VALID | PTE_WRITE | PTE_GP;
+ PTE_PROT_NONE | PTE_VALID | PTE_WRITE | PTE_GP |
+ PTE_ATTRINDX_MASK;
/* preserve the hardware dirty information */
if (pte_hw_dirty(pte))
pte = pte_mkdirty(pte);
@@ -847,6 +894,11 @@ static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
#define __swp_entry_to_pte(swp) ((pte_t) { (swp).val })
+#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
+#define __pmd_to_swp_entry(pmd) ((swp_entry_t) { pmd_val(pmd) })
+#define __swp_entry_to_pmd(swp) __pmd((swp).val)
+#endif /* CONFIG_ARCH_ENABLE_THP_MIGRATION */
+
/*
* Ensure that there are not more swap files than can be encoded in the kernel
* PTEs.
@@ -855,6 +907,38 @@ static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
extern int kern_addr_valid(unsigned long addr);
+#ifdef CONFIG_ARM64_MTE
+
+#define __HAVE_ARCH_PREPARE_TO_SWAP
+static inline int arch_prepare_to_swap(struct page *page)
+{
+ if (system_supports_mte())
+ return mte_save_tags(page);
+ return 0;
+}
+
+#define __HAVE_ARCH_SWAP_INVALIDATE
+static inline void arch_swap_invalidate_page(int type, pgoff_t offset)
+{
+ if (system_supports_mte())
+ mte_invalidate_tags(type, offset);
+}
+
+static inline void arch_swap_invalidate_area(int type)
+{
+ if (system_supports_mte())
+ mte_invalidate_tags_area(type);
+}
+
+#define __HAVE_ARCH_SWAP_RESTORE
+static inline void arch_swap_restore(swp_entry_t entry, struct page *page)
+{
+ if (system_supports_mte() && mte_restore_tags(entry, page))
+ set_bit(PG_mte_tagged, &page->flags);
+}
+
+#endif /* CONFIG_ARM64_MTE */
+
/*
* On AArch64, the cache coherency is handled via the set_pte_at() function.
*/
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
index 240fe5e5b720..fce8cbecd6bc 100644
--- a/arch/arm64/include/asm/processor.h
+++ b/arch/arm64/include/asm/processor.h
@@ -38,6 +38,7 @@
#include <asm/pgtable-hwdef.h>
#include <asm/pointer_auth.h>
#include <asm/ptrace.h>
+#include <asm/spectre.h>
#include <asm/types.h>
/*
@@ -151,6 +152,10 @@ struct thread_struct {
struct ptrauth_keys_user keys_user;
struct ptrauth_keys_kernel keys_kernel;
#endif
+#ifdef CONFIG_ARM64_MTE
+ u64 sctlr_tcf0;
+ u64 gcr_user_incl;
+#endif
};
static inline void arch_thread_struct_whitelist(unsigned long *offset,
@@ -197,40 +202,15 @@ static inline void start_thread_common(struct pt_regs *regs, unsigned long pc)
regs->pmr_save = GIC_PRIO_IRQON;
}
-static inline void set_ssbs_bit(struct pt_regs *regs)
-{
- regs->pstate |= PSR_SSBS_BIT;
-}
-
-static inline void set_compat_ssbs_bit(struct pt_regs *regs)
-{
- regs->pstate |= PSR_AA32_SSBS_BIT;
-}
-
static inline void start_thread(struct pt_regs *regs, unsigned long pc,
unsigned long sp)
{
start_thread_common(regs, pc);
regs->pstate = PSR_MODE_EL0t;
-
- if (arm64_get_ssbd_state() != ARM64_SSBD_FORCE_ENABLE)
- set_ssbs_bit(regs);
-
+ spectre_v4_enable_task_mitigation(current);
regs->sp = sp;
}
-static inline bool is_ttbr0_addr(unsigned long addr)
-{
- /* entry assembly clears tags for TTBR0 addrs */
- return addr < TASK_SIZE;
-}
-
-static inline bool is_ttbr1_addr(unsigned long addr)
-{
- /* TTBR1 addresses may have a tag if KASAN_SW_TAGS is in use */
- return arch_kasan_reset_tag(addr) >= PAGE_OFFSET;
-}
-
#ifdef CONFIG_COMPAT
static inline void compat_start_thread(struct pt_regs *regs, unsigned long pc,
unsigned long sp)
@@ -244,13 +224,23 @@ static inline void compat_start_thread(struct pt_regs *regs, unsigned long pc,
regs->pstate |= PSR_AA32_E_BIT;
#endif
- if (arm64_get_ssbd_state() != ARM64_SSBD_FORCE_ENABLE)
- set_compat_ssbs_bit(regs);
-
+ spectre_v4_enable_task_mitigation(current);
regs->compat_sp = sp;
}
#endif
+static inline bool is_ttbr0_addr(unsigned long addr)
+{
+ /* entry assembly clears tags for TTBR0 addrs */
+ return addr < TASK_SIZE;
+}
+
+static inline bool is_ttbr1_addr(unsigned long addr)
+{
+ /* TTBR1 addresses may have a tag if KASAN_SW_TAGS is in use */
+ return arch_kasan_reset_tag(addr) >= PAGE_OFFSET;
+}
+
/* Forward declaration, a strange C thing */
struct task_struct;
@@ -315,10 +305,10 @@ extern void __init minsigstksz_setup(void);
#ifdef CONFIG_ARM64_TAGGED_ADDR_ABI
/* PR_{SET,GET}_TAGGED_ADDR_CTRL prctl */
-long set_tagged_addr_ctrl(unsigned long arg);
-long get_tagged_addr_ctrl(void);
-#define SET_TAGGED_ADDR_CTRL(arg) set_tagged_addr_ctrl(arg)
-#define GET_TAGGED_ADDR_CTRL() get_tagged_addr_ctrl()
+long set_tagged_addr_ctrl(struct task_struct *task, unsigned long arg);
+long get_tagged_addr_ctrl(struct task_struct *task);
+#define SET_TAGGED_ADDR_CTRL(arg) set_tagged_addr_ctrl(current, arg)
+#define GET_TAGGED_ADDR_CTRL() get_tagged_addr_ctrl(current)
#endif
/*
diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h
index 966ed30ed5f7..997cf8c8cd52 100644
--- a/arch/arm64/include/asm/ptrace.h
+++ b/arch/arm64/include/asm/ptrace.h
@@ -31,9 +31,21 @@
* interrupt disabling temporarily does not rely on IRQ priorities.
*/
#define GIC_PRIO_IRQON 0xe0
-#define GIC_PRIO_IRQOFF (GIC_PRIO_IRQON & ~0x80)
+#define __GIC_PRIO_IRQOFF (GIC_PRIO_IRQON & ~0x80)
+#define __GIC_PRIO_IRQOFF_NS 0xa0
#define GIC_PRIO_PSR_I_SET (1 << 4)
+#define GIC_PRIO_IRQOFF \
+ ({ \
+ extern struct static_key_false gic_nonsecure_priorities;\
+ u8 __prio = __GIC_PRIO_IRQOFF; \
+ \
+ if (static_branch_unlikely(&gic_nonsecure_priorities)) \
+ __prio = __GIC_PRIO_IRQOFF_NS; \
+ \
+ __prio; \
+ })
+
/* Additional SPSR bits not exposed in the UABI */
#define PSR_MODE_THREAD_BIT (1 << 0)
#define PSR_IL_BIT (1 << 20)
diff --git a/arch/arm64/include/asm/smp.h b/arch/arm64/include/asm/smp.h
index 0eadbf933e35..2e7f529ec5a6 100644
--- a/arch/arm64/include/asm/smp.h
+++ b/arch/arm64/include/asm/smp.h
@@ -56,27 +56,15 @@ static inline void set_cpu_logical_map(int cpu, u64 hwid)
struct seq_file;
/*
- * generate IPI list text
- */
-extern void show_ipi_list(struct seq_file *p, int prec);
-
-/*
- * Called from C code, this handles an IPI.
- */
-extern void handle_IPI(int ipinr, struct pt_regs *regs);
-
-/*
* Discover the set of possible CPUs and determine their
* SMP operations.
*/
extern void smp_init_cpus(void);
/*
- * Provide a function to raise an IPI cross call on CPUs in callmap.
+ * Register IPI interrupts with the arch SMP code
*/
-extern void set_smp_cross_call(void (*)(const struct cpumask *, unsigned int));
-
-extern void (*__smp_cross_call)(const struct cpumask *, unsigned int);
+extern void set_smp_ipi_range(int ipi_base, int nr_ipi);
/*
* Called from the secondary holding pen, this is the secondary CPU entry point.
diff --git a/arch/arm64/include/asm/spectre.h b/arch/arm64/include/asm/spectre.h
new file mode 100644
index 000000000000..fcdfbce302bd
--- /dev/null
+++ b/arch/arm64/include/asm/spectre.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Interface for managing mitigations for Spectre vulnerabilities.
+ *
+ * Copyright (C) 2020 Google LLC
+ * Author: Will Deacon <will@kernel.org>
+ */
+
+#ifndef __ASM_SPECTRE_H
+#define __ASM_SPECTRE_H
+
+#include <asm/cpufeature.h>
+
+/* Watch out, ordering is important here. */
+enum mitigation_state {
+ SPECTRE_UNAFFECTED,
+ SPECTRE_MITIGATED,
+ SPECTRE_VULNERABLE,
+};
+
+struct task_struct;
+
+enum mitigation_state arm64_get_spectre_v2_state(void);
+bool has_spectre_v2(const struct arm64_cpu_capabilities *cap, int scope);
+void spectre_v2_enable_mitigation(const struct arm64_cpu_capabilities *__unused);
+
+enum mitigation_state arm64_get_spectre_v4_state(void);
+bool has_spectre_v4(const struct arm64_cpu_capabilities *cap, int scope);
+void spectre_v4_enable_mitigation(const struct arm64_cpu_capabilities *__unused);
+void spectre_v4_enable_task_mitigation(struct task_struct *tsk);
+
+#endif /* __ASM_SPECTRE_H */
diff --git a/arch/arm64/include/asm/stacktrace.h b/arch/arm64/include/asm/stacktrace.h
index fc7613023c19..eb29b1fe8255 100644
--- a/arch/arm64/include/asm/stacktrace.h
+++ b/arch/arm64/include/asm/stacktrace.h
@@ -63,7 +63,7 @@ struct stackframe {
extern int unwind_frame(struct task_struct *tsk, struct stackframe *frame);
extern void walk_stackframe(struct task_struct *tsk, struct stackframe *frame,
- int (*fn)(struct stackframe *, void *), void *data);
+ bool (*fn)(void *, unsigned long), void *data);
extern void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk,
const char *loglvl);
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index 554a7e8ecb07..d52c1b3ce589 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -91,10 +91,12 @@
#define PSTATE_PAN pstate_field(0, 4)
#define PSTATE_UAO pstate_field(0, 3)
#define PSTATE_SSBS pstate_field(3, 1)
+#define PSTATE_TCO pstate_field(3, 4)
#define SET_PSTATE_PAN(x) __emit_inst(0xd500401f | PSTATE_PAN | ((!!x) << PSTATE_Imm_shift))
#define SET_PSTATE_UAO(x) __emit_inst(0xd500401f | PSTATE_UAO | ((!!x) << PSTATE_Imm_shift))
#define SET_PSTATE_SSBS(x) __emit_inst(0xd500401f | PSTATE_SSBS | ((!!x) << PSTATE_Imm_shift))
+#define SET_PSTATE_TCO(x) __emit_inst(0xd500401f | PSTATE_TCO | ((!!x) << PSTATE_Imm_shift))
#define __SYS_BARRIER_INSN(CRm, op2, Rt) \
__emit_inst(0xd5000000 | sys_insn(0, 3, 3, (CRm), (op2)) | ((Rt) & 0x1f))
@@ -181,6 +183,8 @@
#define SYS_SCTLR_EL1 sys_reg(3, 0, 1, 0, 0)
#define SYS_ACTLR_EL1 sys_reg(3, 0, 1, 0, 1)
#define SYS_CPACR_EL1 sys_reg(3, 0, 1, 0, 2)
+#define SYS_RGSR_EL1 sys_reg(3, 0, 1, 0, 5)
+#define SYS_GCR_EL1 sys_reg(3, 0, 1, 0, 6)
#define SYS_ZCR_EL1 sys_reg(3, 0, 1, 2, 0)
@@ -218,6 +222,8 @@
#define SYS_ERXADDR_EL1 sys_reg(3, 0, 5, 4, 3)
#define SYS_ERXMISC0_EL1 sys_reg(3, 0, 5, 5, 0)
#define SYS_ERXMISC1_EL1 sys_reg(3, 0, 5, 5, 1)
+#define SYS_TFSR_EL1 sys_reg(3, 0, 5, 6, 0)
+#define SYS_TFSRE0_EL1 sys_reg(3, 0, 5, 6, 1)
#define SYS_FAR_EL1 sys_reg(3, 0, 6, 0, 0)
#define SYS_PAR_EL1 sys_reg(3, 0, 7, 4, 0)
@@ -321,6 +327,8 @@
#define SYS_PMINTENSET_EL1 sys_reg(3, 0, 9, 14, 1)
#define SYS_PMINTENCLR_EL1 sys_reg(3, 0, 9, 14, 2)
+#define SYS_PMMIR_EL1 sys_reg(3, 0, 9, 14, 6)
+
#define SYS_MAIR_EL1 sys_reg(3, 0, 10, 2, 0)
#define SYS_AMAIR_EL1 sys_reg(3, 0, 10, 3, 0)
@@ -368,6 +376,7 @@
#define SYS_CCSIDR_EL1 sys_reg(3, 1, 0, 0, 0)
#define SYS_CLIDR_EL1 sys_reg(3, 1, 0, 0, 1)
+#define SYS_GMID_EL1 sys_reg(3, 1, 0, 0, 4)
#define SYS_AIDR_EL1 sys_reg(3, 1, 0, 0, 7)
#define SYS_CSSELR_EL1 sys_reg(3, 2, 0, 0, 0)
@@ -460,6 +469,7 @@
#define SYS_ESR_EL2 sys_reg(3, 4, 5, 2, 0)
#define SYS_VSESR_EL2 sys_reg(3, 4, 5, 2, 3)
#define SYS_FPEXC32_EL2 sys_reg(3, 4, 5, 3, 0)
+#define SYS_TFSR_EL2 sys_reg(3, 4, 5, 6, 0)
#define SYS_FAR_EL2 sys_reg(3, 4, 6, 0, 0)
#define SYS_VDISR_EL2 sys_reg(3, 4, 12, 1, 1)
@@ -516,6 +526,7 @@
#define SYS_AFSR0_EL12 sys_reg(3, 5, 5, 1, 0)
#define SYS_AFSR1_EL12 sys_reg(3, 5, 5, 1, 1)
#define SYS_ESR_EL12 sys_reg(3, 5, 5, 2, 0)
+#define SYS_TFSR_EL12 sys_reg(3, 5, 5, 6, 0)
#define SYS_FAR_EL12 sys_reg(3, 5, 6, 0, 0)
#define SYS_MAIR_EL12 sys_reg(3, 5, 10, 2, 0)
#define SYS_AMAIR_EL12 sys_reg(3, 5, 10, 3, 0)
@@ -531,6 +542,15 @@
/* Common SCTLR_ELx flags. */
#define SCTLR_ELx_DSSBS (BIT(44))
+#define SCTLR_ELx_ATA (BIT(43))
+
+#define SCTLR_ELx_TCF_SHIFT 40
+#define SCTLR_ELx_TCF_NONE (UL(0x0) << SCTLR_ELx_TCF_SHIFT)
+#define SCTLR_ELx_TCF_SYNC (UL(0x1) << SCTLR_ELx_TCF_SHIFT)
+#define SCTLR_ELx_TCF_ASYNC (UL(0x2) << SCTLR_ELx_TCF_SHIFT)
+#define SCTLR_ELx_TCF_MASK (UL(0x3) << SCTLR_ELx_TCF_SHIFT)
+
+#define SCTLR_ELx_ITFSB (BIT(37))
#define SCTLR_ELx_ENIA (BIT(31))
#define SCTLR_ELx_ENIB (BIT(30))
#define SCTLR_ELx_ENDA (BIT(27))
@@ -559,6 +579,14 @@
#endif
/* SCTLR_EL1 specific flags. */
+#define SCTLR_EL1_ATA0 (BIT(42))
+
+#define SCTLR_EL1_TCF0_SHIFT 38
+#define SCTLR_EL1_TCF0_NONE (UL(0x0) << SCTLR_EL1_TCF0_SHIFT)
+#define SCTLR_EL1_TCF0_SYNC (UL(0x1) << SCTLR_EL1_TCF0_SHIFT)
+#define SCTLR_EL1_TCF0_ASYNC (UL(0x2) << SCTLR_EL1_TCF0_SHIFT)
+#define SCTLR_EL1_TCF0_MASK (UL(0x3) << SCTLR_EL1_TCF0_SHIFT)
+
#define SCTLR_EL1_BT1 (BIT(36))
#define SCTLR_EL1_BT0 (BIT(35))
#define SCTLR_EL1_UCI (BIT(26))
@@ -587,6 +615,7 @@
SCTLR_EL1_SA0 | SCTLR_EL1_SED | SCTLR_ELx_I |\
SCTLR_EL1_DZE | SCTLR_EL1_UCT |\
SCTLR_EL1_NTWE | SCTLR_ELx_IESB | SCTLR_EL1_SPAN |\
+ SCTLR_ELx_ITFSB| SCTLR_ELx_ATA | SCTLR_EL1_ATA0 |\
ENDIAN_SET_EL1 | SCTLR_EL1_UCI | SCTLR_EL1_RES1)
/* MAIR_ELx memory attributes (used by Linux) */
@@ -595,6 +624,7 @@
#define MAIR_ATTR_DEVICE_GRE UL(0x0c)
#define MAIR_ATTR_NORMAL_NC UL(0x44)
#define MAIR_ATTR_NORMAL_WT UL(0xbb)
+#define MAIR_ATTR_NORMAL_TAGGED UL(0xf0)
#define MAIR_ATTR_NORMAL UL(0xff)
#define MAIR_ATTR_MASK UL(0xff)
@@ -636,14 +666,22 @@
#define ID_AA64ISAR1_APA_SHIFT 4
#define ID_AA64ISAR1_DPB_SHIFT 0
-#define ID_AA64ISAR1_APA_NI 0x0
-#define ID_AA64ISAR1_APA_ARCHITECTED 0x1
-#define ID_AA64ISAR1_API_NI 0x0
-#define ID_AA64ISAR1_API_IMP_DEF 0x1
-#define ID_AA64ISAR1_GPA_NI 0x0
-#define ID_AA64ISAR1_GPA_ARCHITECTED 0x1
-#define ID_AA64ISAR1_GPI_NI 0x0
-#define ID_AA64ISAR1_GPI_IMP_DEF 0x1
+#define ID_AA64ISAR1_APA_NI 0x0
+#define ID_AA64ISAR1_APA_ARCHITECTED 0x1
+#define ID_AA64ISAR1_APA_ARCH_EPAC 0x2
+#define ID_AA64ISAR1_APA_ARCH_EPAC2 0x3
+#define ID_AA64ISAR1_APA_ARCH_EPAC2_FPAC 0x4
+#define ID_AA64ISAR1_APA_ARCH_EPAC2_FPAC_CMB 0x5
+#define ID_AA64ISAR1_API_NI 0x0
+#define ID_AA64ISAR1_API_IMP_DEF 0x1
+#define ID_AA64ISAR1_API_IMP_DEF_EPAC 0x2
+#define ID_AA64ISAR1_API_IMP_DEF_EPAC2 0x3
+#define ID_AA64ISAR1_API_IMP_DEF_EPAC2_FPAC 0x4
+#define ID_AA64ISAR1_API_IMP_DEF_EPAC2_FPAC_CMB 0x5
+#define ID_AA64ISAR1_GPA_NI 0x0
+#define ID_AA64ISAR1_GPA_ARCHITECTED 0x1
+#define ID_AA64ISAR1_GPI_NI 0x0
+#define ID_AA64ISAR1_GPI_IMP_DEF 0x1
/* id_aa64pfr0 */
#define ID_AA64PFR0_CSV3_SHIFT 60
@@ -686,6 +724,10 @@
#define ID_AA64PFR1_SSBS_PSTATE_INSNS 2
#define ID_AA64PFR1_BT_BTI 0x1
+#define ID_AA64PFR1_MTE_NI 0x0
+#define ID_AA64PFR1_MTE_EL0 0x1
+#define ID_AA64PFR1_MTE 0x2
+
/* id_aa64zfr0 */
#define ID_AA64ZFR0_F64MM_SHIFT 56
#define ID_AA64ZFR0_F32MM_SHIFT 52
@@ -920,6 +962,28 @@
#define CPACR_EL1_ZEN_EL0EN (BIT(17)) /* enable EL0 access, if EL1EN set */
#define CPACR_EL1_ZEN (CPACR_EL1_ZEN_EL1EN | CPACR_EL1_ZEN_EL0EN)
+/* TCR EL1 Bit Definitions */
+#define SYS_TCR_EL1_TCMA1 (BIT(58))
+#define SYS_TCR_EL1_TCMA0 (BIT(57))
+
+/* GCR_EL1 Definitions */
+#define SYS_GCR_EL1_RRND (BIT(16))
+#define SYS_GCR_EL1_EXCL_MASK 0xffffUL
+
+/* RGSR_EL1 Definitions */
+#define SYS_RGSR_EL1_TAG_MASK 0xfUL
+#define SYS_RGSR_EL1_SEED_SHIFT 8
+#define SYS_RGSR_EL1_SEED_MASK 0xffffUL
+
+/* GMID_EL1 field definitions */
+#define SYS_GMID_EL1_BS_SHIFT 0
+#define SYS_GMID_EL1_BS_SIZE 4
+
+/* TFSR{,E0}_EL1 bit definitions */
+#define SYS_TFSR_EL1_TF0_SHIFT 0
+#define SYS_TFSR_EL1_TF1_SHIFT 1
+#define SYS_TFSR_EL1_TF0 (UL(1) << SYS_TFSR_EL1_TF0_SHIFT)
+#define SYS_TFSR_EL1_TF1 (UK(2) << SYS_TFSR_EL1_TF1_SHIFT)
/* Safe value for MPIDR_EL1: Bit31:RES1, Bit30:U:0, Bit24:MT:0 */
#define SYS_MPIDR_SAFE_VAL (BIT(31))
@@ -1024,6 +1088,13 @@
write_sysreg(__scs_new, sysreg); \
} while (0)
+#define sysreg_clear_set_s(sysreg, clear, set) do { \
+ u64 __scs_val = read_sysreg_s(sysreg); \
+ u64 __scs_new = (__scs_val & ~(u64)(clear)) | (set); \
+ if (__scs_new != __scs_val) \
+ write_sysreg_s(__scs_new, sysreg); \
+} while (0)
+
#endif
#endif /* __ASM_SYSREG_H */
diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h
index 5e784e16ee89..1fbab854a51b 100644
--- a/arch/arm64/include/asm/thread_info.h
+++ b/arch/arm64/include/asm/thread_info.h
@@ -67,6 +67,7 @@ void arch_release_task_struct(struct task_struct *tsk);
#define TIF_FOREIGN_FPSTATE 3 /* CPU's FP state is not current's */
#define TIF_UPROBE 4 /* uprobe breakpoint or singlestep */
#define TIF_FSCHECK 5 /* Check FS is USER_DS on return */
+#define TIF_MTE_ASYNC_FAULT 6 /* MTE Asynchronous Tag Check Fault */
#define TIF_SYSCALL_TRACE 8 /* syscall trace active */
#define TIF_SYSCALL_AUDIT 9 /* syscall auditing */
#define TIF_SYSCALL_TRACEPOINT 10 /* syscall tracepoint for ftrace */
@@ -96,10 +97,11 @@ void arch_release_task_struct(struct task_struct *tsk);
#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
#define _TIF_32BIT (1 << TIF_32BIT)
#define _TIF_SVE (1 << TIF_SVE)
+#define _TIF_MTE_ASYNC_FAULT (1 << TIF_MTE_ASYNC_FAULT)
#define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \
_TIF_NOTIFY_RESUME | _TIF_FOREIGN_FPSTATE | \
- _TIF_UPROBE | _TIF_FSCHECK)
+ _TIF_UPROBE | _TIF_FSCHECK | _TIF_MTE_ASYNC_FAULT)
#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
_TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | \
diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h
index d493174415db..cc3f5a33ff9c 100644
--- a/arch/arm64/include/asm/tlbflush.h
+++ b/arch/arm64/include/asm/tlbflush.h
@@ -28,14 +28,16 @@
* not. The macros handles invoking the asm with or without the
* register argument as appropriate.
*/
-#define __TLBI_0(op, arg) asm ("tlbi " #op "\n" \
+#define __TLBI_0(op, arg) asm (ARM64_ASM_PREAMBLE \
+ "tlbi " #op "\n" \
ALTERNATIVE("nop\n nop", \
"dsb ish\n tlbi " #op, \
ARM64_WORKAROUND_REPEAT_TLBI, \
CONFIG_ARM64_WORKAROUND_REPEAT_TLBI) \
: : )
-#define __TLBI_1(op, arg) asm ("tlbi " #op ", %0\n" \
+#define __TLBI_1(op, arg) asm (ARM64_ASM_PREAMBLE \
+ "tlbi " #op ", %0\n" \
ALTERNATIVE("nop\n nop", \
"dsb ish\n tlbi " #op ", %0", \
ARM64_WORKAROUND_REPEAT_TLBI, \
diff --git a/arch/arm64/include/asm/topology.h b/arch/arm64/include/asm/topology.h
index e042f6527981..11a465243f66 100644
--- a/arch/arm64/include/asm/topology.h
+++ b/arch/arm64/include/asm/topology.h
@@ -26,7 +26,9 @@ void topology_scale_freq_tick(void);
#endif /* CONFIG_ARM64_AMU_EXTN */
/* Replace task scheduler's default frequency-invariant accounting */
+#define arch_set_freq_scale topology_set_freq_scale
#define arch_scale_freq_capacity topology_get_freq_scale
+#define arch_scale_freq_invariant topology_scale_freq_invariant
/* Replace task scheduler's default cpu-invariant accounting */
#define arch_scale_cpu_capacity topology_get_cpu_scale
diff --git a/arch/arm64/include/asm/traps.h b/arch/arm64/include/asm/traps.h
index cee5928e1b7d..d96dc2c7c09d 100644
--- a/arch/arm64/include/asm/traps.h
+++ b/arch/arm64/include/asm/traps.h
@@ -24,7 +24,7 @@ struct undef_hook {
void register_undef_hook(struct undef_hook *hook);
void unregister_undef_hook(struct undef_hook *hook);
-void force_signal_inject(int signal, int code, unsigned long address);
+void force_signal_inject(int signal, int code, unsigned long address, unsigned int err);
void arm64_notify_segfault(unsigned long addr);
void arm64_force_sig_fault(int signo, int code, void __user *addr, const char *str);
void arm64_force_sig_mceerr(int code, void __user *addr, short lsb, const char *str);
diff --git a/arch/arm64/include/asm/unistd32.h b/arch/arm64/include/asm/unistd32.h
index 734860ac7cf9..2a3ad9b9accd 100644
--- a/arch/arm64/include/asm/unistd32.h
+++ b/arch/arm64/include/asm/unistd32.h
@@ -53,7 +53,7 @@ __SYSCALL(__NR_lseek, compat_sys_lseek)
#define __NR_getpid 20
__SYSCALL(__NR_getpid, sys_getpid)
#define __NR_mount 21
-__SYSCALL(__NR_mount, compat_sys_mount)
+__SYSCALL(__NR_mount, sys_mount)
/* 22 was sys_umount */
__SYSCALL(22, sys_ni_syscall)
#define __NR_setuid 23
@@ -301,9 +301,9 @@ __SYSCALL(__NR_flock, sys_flock)
#define __NR_msync 144
__SYSCALL(__NR_msync, sys_msync)
#define __NR_readv 145
-__SYSCALL(__NR_readv, compat_sys_readv)
+__SYSCALL(__NR_readv, sys_readv)
#define __NR_writev 146
-__SYSCALL(__NR_writev, compat_sys_writev)
+__SYSCALL(__NR_writev, sys_writev)
#define __NR_getsid 147
__SYSCALL(__NR_getsid, sys_getsid)
#define __NR_fdatasync 148
@@ -697,7 +697,7 @@ __SYSCALL(__NR_sync_file_range2, compat_sys_aarch32_sync_file_range2)
#define __NR_tee 342
__SYSCALL(__NR_tee, sys_tee)
#define __NR_vmsplice 343
-__SYSCALL(__NR_vmsplice, compat_sys_vmsplice)
+__SYSCALL(__NR_vmsplice, sys_vmsplice)
#define __NR_move_pages 344
__SYSCALL(__NR_move_pages, compat_sys_move_pages)
#define __NR_getcpu 345
@@ -763,9 +763,9 @@ __SYSCALL(__NR_sendmmsg, compat_sys_sendmmsg)
#define __NR_setns 375
__SYSCALL(__NR_setns, sys_setns)
#define __NR_process_vm_readv 376
-__SYSCALL(__NR_process_vm_readv, compat_sys_process_vm_readv)
+__SYSCALL(__NR_process_vm_readv, sys_process_vm_readv)
#define __NR_process_vm_writev 377
-__SYSCALL(__NR_process_vm_writev, compat_sys_process_vm_writev)
+__SYSCALL(__NR_process_vm_writev, sys_process_vm_writev)
#define __NR_kcmp 378
__SYSCALL(__NR_kcmp, sys_kcmp)
#define __NR_finit_module 379
diff --git a/arch/arm64/include/asm/xen/page.h b/arch/arm64/include/asm/xen/page.h
index 31bbc803cecb..dffdc773221b 100644
--- a/arch/arm64/include/asm/xen/page.h
+++ b/arch/arm64/include/asm/xen/page.h
@@ -1 +1,7 @@
#include <xen/arm/page.h>
+#include <asm/mmu.h>
+
+static inline bool xen_kernel_unmapped_at_usr(void)
+{
+ return arm64_kernel_unmapped_at_el0();
+}
diff --git a/arch/arm64/include/uapi/asm/hwcap.h b/arch/arm64/include/uapi/asm/hwcap.h
index 912162f73529..b8f41aa234ee 100644
--- a/arch/arm64/include/uapi/asm/hwcap.h
+++ b/arch/arm64/include/uapi/asm/hwcap.h
@@ -74,6 +74,6 @@
#define HWCAP2_DGH (1 << 15)
#define HWCAP2_RNG (1 << 16)
#define HWCAP2_BTI (1 << 17)
-/* reserved for HWCAP2_MTE (1 << 18) */
+#define HWCAP2_MTE (1 << 18)
#endif /* _UAPI__ASM_HWCAP_H */
diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h
index ba85bb23f060..7d804fd0a682 100644
--- a/arch/arm64/include/uapi/asm/kvm.h
+++ b/arch/arm64/include/uapi/asm/kvm.h
@@ -242,6 +242,15 @@ struct kvm_vcpu_events {
#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_AVAIL 0
#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_AVAIL 1
#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_REQUIRED 2
+
+/*
+ * Only two states can be presented by the host kernel:
+ * - NOT_REQUIRED: the guest doesn't need to do anything
+ * - NOT_AVAIL: the guest isn't mitigated (it can still use SSBS if available)
+ *
+ * All the other values are deprecated. The host still accepts all
+ * values (they are ABI), but will narrow them to the above two.
+ */
#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2 KVM_REG_ARM_FW_REG(2)
#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL 0
#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_UNKNOWN 1
diff --git a/arch/arm64/include/uapi/asm/mman.h b/arch/arm64/include/uapi/asm/mman.h
index 6fdd71eb644f..1e6482a838e1 100644
--- a/arch/arm64/include/uapi/asm/mman.h
+++ b/arch/arm64/include/uapi/asm/mman.h
@@ -5,5 +5,6 @@
#include <asm-generic/mman.h>
#define PROT_BTI 0x10 /* BTI guarded page */
+#define PROT_MTE 0x20 /* Normal Tagged mapping */
#endif /* ! _UAPI__ASM_MMAN_H */
diff --git a/arch/arm64/include/uapi/asm/ptrace.h b/arch/arm64/include/uapi/asm/ptrace.h
index 42cbe34d95ce..758ae984ff97 100644
--- a/arch/arm64/include/uapi/asm/ptrace.h
+++ b/arch/arm64/include/uapi/asm/ptrace.h
@@ -51,6 +51,7 @@
#define PSR_PAN_BIT 0x00400000
#define PSR_UAO_BIT 0x00800000
#define PSR_DIT_BIT 0x01000000
+#define PSR_TCO_BIT 0x02000000
#define PSR_V_BIT 0x10000000
#define PSR_C_BIT 0x20000000
#define PSR_Z_BIT 0x40000000
@@ -75,6 +76,9 @@
/* syscall emulation path in ptrace */
#define PTRACE_SYSEMU 31
#define PTRACE_SYSEMU_SINGLESTEP 32
+/* MTE allocation tag access */
+#define PTRACE_PEEKMTETAGS 33
+#define PTRACE_POKEMTETAGS 34
#ifndef __ASSEMBLY__
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile
index a561cbb91d4d..bbaf0bc4ad60 100644
--- a/arch/arm64/kernel/Makefile
+++ b/arch/arm64/kernel/Makefile
@@ -3,8 +3,6 @@
# Makefile for the linux kernel.
#
-CPPFLAGS_vmlinux.lds := -DTEXT_OFFSET=$(TEXT_OFFSET)
-AFLAGS_head.o := -DTEXT_OFFSET=$(TEXT_OFFSET)
CFLAGS_armv8_deprecated.o := -I$(src)
CFLAGS_REMOVE_ftrace.o = $(CC_FLAGS_FTRACE)
@@ -19,7 +17,7 @@ obj-y := debug-monitors.o entry.o irq.o fpsimd.o \
return_address.o cpuinfo.o cpu_errata.o \
cpufeature.o alternative.o cacheinfo.o \
smp.o smp_spin_table.o topology.o smccc-call.o \
- syscall.o
+ syscall.o proton-pack.o
targets += efi-entry.o
@@ -59,9 +57,9 @@ arm64-reloc-test-y := reloc_test_core.o reloc_test_syms.o
obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
obj-$(CONFIG_CRASH_CORE) += crash_core.o
obj-$(CONFIG_ARM_SDE_INTERFACE) += sdei.o
-obj-$(CONFIG_ARM64_SSBD) += ssbd.o
obj-$(CONFIG_ARM64_PTR_AUTH) += pointer_auth.o
obj-$(CONFIG_SHADOW_CALL_STACK) += scs.o
+obj-$(CONFIG_ARM64_MTE) += mte.o
obj-y += vdso/ probes/
obj-$(CONFIG_COMPAT_VDSO) += vdso32/
diff --git a/arch/arm64/kernel/acpi.c b/arch/arm64/kernel/acpi.c
index 455966401102..cada0b816c8a 100644
--- a/arch/arm64/kernel/acpi.c
+++ b/arch/arm64/kernel/acpi.c
@@ -298,8 +298,21 @@ void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size)
case EFI_BOOT_SERVICES_DATA:
case EFI_CONVENTIONAL_MEMORY:
case EFI_PERSISTENT_MEMORY:
- pr_warn(FW_BUG "requested region covers kernel memory @ %pa\n", &phys);
- return NULL;
+ if (memblock_is_map_memory(phys) ||
+ !memblock_is_region_memory(phys, size)) {
+ pr_warn(FW_BUG "requested region covers kernel memory @ %pa\n", &phys);
+ return NULL;
+ }
+ /*
+ * Mapping kernel memory is permitted if the region in
+ * question is covered by a single memblock with the
+ * NOMAP attribute set: this enables the use of ACPI
+ * table overrides passed via initramfs, which are
+ * reserved in memory using arch_reserve_mem_area()
+ * below. As this particular use case only requires
+ * read access, fall through to the R/O mapping case.
+ */
+ fallthrough;
case EFI_RUNTIME_SERVICES_CODE:
/*
@@ -322,7 +335,7 @@ void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size)
*/
if (memblock_is_map_memory(phys))
return (void __iomem *)__phys_to_virt(phys);
- /* fall through */
+ fallthrough;
default:
if (region->attribute & EFI_MEMORY_WB)
@@ -388,3 +401,8 @@ int apei_claim_sea(struct pt_regs *regs)
return err;
}
+
+void arch_reserve_mem_area(acpi_physical_address addr, size_t size)
+{
+ memblock_mark_nomap(addr, size);
+}
diff --git a/arch/arm64/kernel/cpu-reset.S b/arch/arm64/kernel/cpu-reset.S
index 4a18055b2ff9..37721eb6f9a1 100644
--- a/arch/arm64/kernel/cpu-reset.S
+++ b/arch/arm64/kernel/cpu-reset.S
@@ -35,6 +35,10 @@ SYM_CODE_START(__cpu_soft_restart)
mov_q x13, SCTLR_ELx_FLAGS
bic x12, x12, x13
pre_disable_mmu_workaround
+ /*
+ * either disable EL1&0 translation regime or disable EL2&0 translation
+ * regime if HCR_EL2.E2H == 1
+ */
msr sctlr_el1, x12
isb
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
index 6bd1d3ad037a..24d75af344b1 100644
--- a/arch/arm64/kernel/cpu_errata.c
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -106,365 +106,6 @@ cpu_enable_trap_ctr_access(const struct arm64_cpu_capabilities *cap)
sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCT, 0);
}
-atomic_t arm64_el2_vector_last_slot = ATOMIC_INIT(-1);
-
-#include <asm/mmu_context.h>
-#include <asm/cacheflush.h>
-
-DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
-
-#ifdef CONFIG_KVM_INDIRECT_VECTORS
-static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start,
- const char *hyp_vecs_end)
-{
- void *dst = lm_alias(__bp_harden_hyp_vecs + slot * SZ_2K);
- int i;
-
- for (i = 0; i < SZ_2K; i += 0x80)
- memcpy(dst + i, hyp_vecs_start, hyp_vecs_end - hyp_vecs_start);
-
- __flush_icache_range((uintptr_t)dst, (uintptr_t)dst + SZ_2K);
-}
-
-static void install_bp_hardening_cb(bp_hardening_cb_t fn,
- const char *hyp_vecs_start,
- const char *hyp_vecs_end)
-{
- static DEFINE_RAW_SPINLOCK(bp_lock);
- int cpu, slot = -1;
-
- /*
- * detect_harden_bp_fw() passes NULL for the hyp_vecs start/end if
- * we're a guest. Skip the hyp-vectors work.
- */
- if (!hyp_vecs_start) {
- __this_cpu_write(bp_hardening_data.fn, fn);
- return;
- }
-
- raw_spin_lock(&bp_lock);
- for_each_possible_cpu(cpu) {
- if (per_cpu(bp_hardening_data.fn, cpu) == fn) {
- slot = per_cpu(bp_hardening_data.hyp_vectors_slot, cpu);
- break;
- }
- }
-
- if (slot == -1) {
- slot = atomic_inc_return(&arm64_el2_vector_last_slot);
- BUG_ON(slot >= BP_HARDEN_EL2_SLOTS);
- __copy_hyp_vect_bpi(slot, hyp_vecs_start, hyp_vecs_end);
- }
-
- __this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot);
- __this_cpu_write(bp_hardening_data.fn, fn);
- raw_spin_unlock(&bp_lock);
-}
-#else
-static void install_bp_hardening_cb(bp_hardening_cb_t fn,
- const char *hyp_vecs_start,
- const char *hyp_vecs_end)
-{
- __this_cpu_write(bp_hardening_data.fn, fn);
-}
-#endif /* CONFIG_KVM_INDIRECT_VECTORS */
-
-#include <linux/arm-smccc.h>
-
-static void __maybe_unused call_smc_arch_workaround_1(void)
-{
- arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
-}
-
-static void call_hvc_arch_workaround_1(void)
-{
- arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
-}
-
-static void qcom_link_stack_sanitization(void)
-{
- u64 tmp;
-
- asm volatile("mov %0, x30 \n"
- ".rept 16 \n"
- "bl . + 4 \n"
- ".endr \n"
- "mov x30, %0 \n"
- : "=&r" (tmp));
-}
-
-static bool __nospectre_v2;
-static int __init parse_nospectre_v2(char *str)
-{
- __nospectre_v2 = true;
- return 0;
-}
-early_param("nospectre_v2", parse_nospectre_v2);
-
-/*
- * -1: No workaround
- * 0: No workaround required
- * 1: Workaround installed
- */
-static int detect_harden_bp_fw(void)
-{
- bp_hardening_cb_t cb;
- void *smccc_start, *smccc_end;
- struct arm_smccc_res res;
- u32 midr = read_cpuid_id();
-
- arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
- ARM_SMCCC_ARCH_WORKAROUND_1, &res);
-
- switch ((int)res.a0) {
- case 1:
- /* Firmware says we're just fine */
- return 0;
- case 0:
- break;
- default:
- return -1;
- }
-
- switch (arm_smccc_1_1_get_conduit()) {
- case SMCCC_CONDUIT_HVC:
- cb = call_hvc_arch_workaround_1;
- /* This is a guest, no need to patch KVM vectors */
- smccc_start = NULL;
- smccc_end = NULL;
- break;
-
-#if IS_ENABLED(CONFIG_KVM)
- case SMCCC_CONDUIT_SMC:
- cb = call_smc_arch_workaround_1;
- smccc_start = __smccc_workaround_1_smc;
- smccc_end = __smccc_workaround_1_smc +
- __SMCCC_WORKAROUND_1_SMC_SZ;
- break;
-#endif
-
- default:
- return -1;
- }
-
- if (((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR) ||
- ((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR_V1))
- cb = qcom_link_stack_sanitization;
-
- if (IS_ENABLED(CONFIG_HARDEN_BRANCH_PREDICTOR))
- install_bp_hardening_cb(cb, smccc_start, smccc_end);
-
- return 1;
-}
-
-DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
-
-int ssbd_state __read_mostly = ARM64_SSBD_KERNEL;
-static bool __ssb_safe = true;
-
-static const struct ssbd_options {
- const char *str;
- int state;
-} ssbd_options[] = {
- { "force-on", ARM64_SSBD_FORCE_ENABLE, },
- { "force-off", ARM64_SSBD_FORCE_DISABLE, },
- { "kernel", ARM64_SSBD_KERNEL, },
-};
-
-static int __init ssbd_cfg(char *buf)
-{
- int i;
-
- if (!buf || !buf[0])
- return -EINVAL;
-
- for (i = 0; i < ARRAY_SIZE(ssbd_options); i++) {
- int len = strlen(ssbd_options[i].str);
-
- if (strncmp(buf, ssbd_options[i].str, len))
- continue;
-
- ssbd_state = ssbd_options[i].state;
- return 0;
- }
-
- return -EINVAL;
-}
-early_param("ssbd", ssbd_cfg);
-
-void __init arm64_update_smccc_conduit(struct alt_instr *alt,
- __le32 *origptr, __le32 *updptr,
- int nr_inst)
-{
- u32 insn;
-
- BUG_ON(nr_inst != 1);
-
- switch (arm_smccc_1_1_get_conduit()) {
- case SMCCC_CONDUIT_HVC:
- insn = aarch64_insn_get_hvc_value();
- break;
- case SMCCC_CONDUIT_SMC:
- insn = aarch64_insn_get_smc_value();
- break;
- default:
- return;
- }
-
- *updptr = cpu_to_le32(insn);
-}
-
-void __init arm64_enable_wa2_handling(struct alt_instr *alt,
- __le32 *origptr, __le32 *updptr,
- int nr_inst)
-{
- BUG_ON(nr_inst != 1);
- /*
- * Only allow mitigation on EL1 entry/exit and guest
- * ARCH_WORKAROUND_2 handling if the SSBD state allows it to
- * be flipped.
- */
- if (arm64_get_ssbd_state() == ARM64_SSBD_KERNEL)
- *updptr = cpu_to_le32(aarch64_insn_gen_nop());
-}
-
-void arm64_set_ssbd_mitigation(bool state)
-{
- int conduit;
-
- if (!IS_ENABLED(CONFIG_ARM64_SSBD)) {
- pr_info_once("SSBD disabled by kernel configuration\n");
- return;
- }
-
- if (this_cpu_has_cap(ARM64_SSBS)) {
- if (state)
- asm volatile(SET_PSTATE_SSBS(0));
- else
- asm volatile(SET_PSTATE_SSBS(1));
- return;
- }
-
- conduit = arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_WORKAROUND_2, state,
- NULL);
-
- WARN_ON_ONCE(conduit == SMCCC_CONDUIT_NONE);
-}
-
-static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
- int scope)
-{
- struct arm_smccc_res res;
- bool required = true;
- s32 val;
- bool this_cpu_safe = false;
- int conduit;
-
- WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
-
- if (cpu_mitigations_off())
- ssbd_state = ARM64_SSBD_FORCE_DISABLE;
-
- /* delay setting __ssb_safe until we get a firmware response */
- if (is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list))
- this_cpu_safe = true;
-
- if (this_cpu_has_cap(ARM64_SSBS)) {
- if (!this_cpu_safe)
- __ssb_safe = false;
- required = false;
- goto out_printmsg;
- }
-
- conduit = arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
- ARM_SMCCC_ARCH_WORKAROUND_2, &res);
-
- if (conduit == SMCCC_CONDUIT_NONE) {
- ssbd_state = ARM64_SSBD_UNKNOWN;
- if (!this_cpu_safe)
- __ssb_safe = false;
- return false;
- }
-
- val = (s32)res.a0;
-
- switch (val) {
- case SMCCC_RET_NOT_SUPPORTED:
- ssbd_state = ARM64_SSBD_UNKNOWN;
- if (!this_cpu_safe)
- __ssb_safe = false;
- return false;
-
- /* machines with mixed mitigation requirements must not return this */
- case SMCCC_RET_NOT_REQUIRED:
- pr_info_once("%s mitigation not required\n", entry->desc);
- ssbd_state = ARM64_SSBD_MITIGATED;
- return false;
-
- case SMCCC_RET_SUCCESS:
- __ssb_safe = false;
- required = true;
- break;
-
- case 1: /* Mitigation not required on this CPU */
- required = false;
- break;
-
- default:
- WARN_ON(1);
- if (!this_cpu_safe)
- __ssb_safe = false;
- return false;
- }
-
- switch (ssbd_state) {
- case ARM64_SSBD_FORCE_DISABLE:
- arm64_set_ssbd_mitigation(false);
- required = false;
- break;
-
- case ARM64_SSBD_KERNEL:
- if (required) {
- __this_cpu_write(arm64_ssbd_callback_required, 1);
- arm64_set_ssbd_mitigation(true);
- }
- break;
-
- case ARM64_SSBD_FORCE_ENABLE:
- arm64_set_ssbd_mitigation(true);
- required = true;
- break;
-
- default:
- WARN_ON(1);
- break;
- }
-
-out_printmsg:
- switch (ssbd_state) {
- case ARM64_SSBD_FORCE_DISABLE:
- pr_info_once("%s disabled from command-line\n", entry->desc);
- break;
-
- case ARM64_SSBD_FORCE_ENABLE:
- pr_info_once("%s forced from command-line\n", entry->desc);
- break;
- }
-
- return required;
-}
-
-/* known invulnerable cores */
-static const struct midr_range arm64_ssb_cpus[] = {
- MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
- MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
- MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
- MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53),
- MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER),
- MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER),
- {},
-};
-
#ifdef CONFIG_ARM64_ERRATUM_1463225
DEFINE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa);
@@ -519,83 +160,6 @@ cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused)
.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \
CAP_MIDR_RANGE_LIST(midr_list)
-/* Track overall mitigation state. We are only mitigated if all cores are ok */
-static bool __hardenbp_enab = true;
-static bool __spectrev2_safe = true;
-
-int get_spectre_v2_workaround_state(void)
-{
- if (__spectrev2_safe)
- return ARM64_BP_HARDEN_NOT_REQUIRED;
-
- if (!__hardenbp_enab)
- return ARM64_BP_HARDEN_UNKNOWN;
-
- return ARM64_BP_HARDEN_WA_NEEDED;
-}
-
-/*
- * List of CPUs that do not need any Spectre-v2 mitigation at all.
- */
-static const struct midr_range spectre_v2_safe_list[] = {
- MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
- MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
- MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
- MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53),
- MIDR_ALL_VERSIONS(MIDR_HISI_TSV110),
- MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER),
- MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER),
- { /* sentinel */ }
-};
-
-/*
- * Track overall bp hardening for all heterogeneous cores in the machine.
- * We are only considered "safe" if all booted cores are known safe.
- */
-static bool __maybe_unused
-check_branch_predictor(const struct arm64_cpu_capabilities *entry, int scope)
-{
- int need_wa;
-
- WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
-
- /* If the CPU has CSV2 set, we're safe */
- if (cpuid_feature_extract_unsigned_field(read_cpuid(ID_AA64PFR0_EL1),
- ID_AA64PFR0_CSV2_SHIFT))
- return false;
-
- /* Alternatively, we have a list of unaffected CPUs */
- if (is_midr_in_range_list(read_cpuid_id(), spectre_v2_safe_list))
- return false;
-
- /* Fallback to firmware detection */
- need_wa = detect_harden_bp_fw();
- if (!need_wa)
- return false;
-
- __spectrev2_safe = false;
-
- if (!IS_ENABLED(CONFIG_HARDEN_BRANCH_PREDICTOR)) {
- pr_warn_once("spectrev2 mitigation disabled by kernel configuration\n");
- __hardenbp_enab = false;
- return false;
- }
-
- /* forced off */
- if (__nospectre_v2 || cpu_mitigations_off()) {
- pr_info_once("spectrev2 mitigation disabled by command line option\n");
- __hardenbp_enab = false;
- return false;
- }
-
- if (need_wa < 0) {
- pr_warn_once("ARM_SMCCC_ARCH_WORKAROUND_1 missing from firmware\n");
- __hardenbp_enab = false;
- }
-
- return (need_wa > 0);
-}
-
static const __maybe_unused struct midr_range tx2_family_cpus[] = {
MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
@@ -887,9 +451,11 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
},
#endif
{
- .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
+ .desc = "Spectre-v2",
+ .capability = ARM64_SPECTRE_V2,
.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
- .matches = check_branch_predictor,
+ .matches = has_spectre_v2,
+ .cpu_enable = spectre_v2_enable_mitigation,
},
#ifdef CONFIG_RANDOMIZE_BASE
{
@@ -899,17 +465,23 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
},
#endif
{
- .desc = "Speculative Store Bypass Disable",
- .capability = ARM64_SSBD,
+ .desc = "Spectre-v4",
+ .capability = ARM64_SPECTRE_V4,
.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
- .matches = has_ssbd_mitigation,
- .midr_range_list = arm64_ssb_cpus,
+ .matches = has_spectre_v4,
+ .cpu_enable = spectre_v4_enable_mitigation,
},
#ifdef CONFIG_ARM64_ERRATUM_1418040
{
.desc = "ARM erratum 1418040",
.capability = ARM64_WORKAROUND_1418040,
ERRATA_MIDR_RANGE_LIST(erratum_1418040_list),
+ /*
+ * We need to allow affected CPUs to come in late, but
+ * also need the non-affected CPUs to be able to come
+ * in at any point in time. Wonderful.
+ */
+ .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
},
#endif
#ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_AT
@@ -954,40 +526,3 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
{
}
};
-
-ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- return sprintf(buf, "Mitigation: __user pointer sanitization\n");
-}
-
-ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- switch (get_spectre_v2_workaround_state()) {
- case ARM64_BP_HARDEN_NOT_REQUIRED:
- return sprintf(buf, "Not affected\n");
- case ARM64_BP_HARDEN_WA_NEEDED:
- return sprintf(buf, "Mitigation: Branch predictor hardening\n");
- case ARM64_BP_HARDEN_UNKNOWN:
- default:
- return sprintf(buf, "Vulnerable\n");
- }
-}
-
-ssize_t cpu_show_spec_store_bypass(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- if (__ssb_safe)
- return sprintf(buf, "Not affected\n");
-
- switch (ssbd_state) {
- case ARM64_SSBD_KERNEL:
- case ARM64_SSBD_FORCE_ENABLE:
- if (IS_ENABLED(CONFIG_ARM64_SSBD))
- return sprintf(buf,
- "Mitigation: Speculative Store Bypass disabled via prctl\n");
- }
-
- return sprintf(buf, "Vulnerable\n");
-}
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index a389b999482e..dcc165b3fc04 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -75,6 +75,7 @@
#include <asm/cpu_ops.h>
#include <asm/fpsimd.h>
#include <asm/mmu_context.h>
+#include <asm/mte.h>
#include <asm/processor.h>
#include <asm/sysreg.h>
#include <asm/traps.h>
@@ -197,9 +198,9 @@ static const struct arm64_ftr_bits ftr_id_aa64isar1[] = {
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_FCMA_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_JSCVT_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH),
- FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_API_SHIFT, 4, 0),
+ FTR_STRICT, FTR_EXACT, ID_AA64ISAR1_API_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH),
- FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_APA_SHIFT, 4, 0),
+ FTR_STRICT, FTR_EXACT, ID_AA64ISAR1_APA_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_DPB_SHIFT, 4, 0),
ARM64_FTR_END,
};
@@ -227,7 +228,9 @@ static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = {
static const struct arm64_ftr_bits ftr_id_aa64pfr1[] = {
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_MPAMFRAC_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_RASFRAC_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_SSBS_SHIFT, 4, ID_AA64PFR1_SSBS_PSTATE_NI),
+ ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_MTE),
+ FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_MTE_SHIFT, 4, ID_AA64PFR1_MTE_NI),
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR1_SSBS_SHIFT, 4, ID_AA64PFR1_SSBS_PSTATE_NI),
ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_BTI),
FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_BT_SHIFT, 4, 0),
ARM64_FTR_END,
@@ -487,7 +490,7 @@ static const struct arm64_ftr_bits ftr_id_pfr1[] = {
};
static const struct arm64_ftr_bits ftr_id_pfr2[] = {
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR2_SSBS_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_PFR2_SSBS_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_PFR2_CSV3_SHIFT, 4, 0),
ARM64_FTR_END,
};
@@ -686,7 +689,7 @@ static s64 arm64_ftr_safe_value(const struct arm64_ftr_bits *ftrp, s64 new,
case FTR_HIGHER_OR_ZERO_SAFE:
if (!cur || !new)
break;
- /* Fallthrough */
+ fallthrough;
case FTR_HIGHER_SAFE:
ret = new > cur ? new : cur;
break;
@@ -1111,6 +1114,7 @@ u64 read_sanitised_ftr_reg(u32 id)
return 0;
return regp->sys_val;
}
+EXPORT_SYMBOL_GPL(read_sanitised_ftr_reg);
#define read_sysreg_case(r) \
case r: return read_sysreg_s(r)
@@ -1443,6 +1447,7 @@ static inline void __cpu_enable_hw_dbm(void)
write_sysreg(tcr, tcr_el1);
isb();
+ local_flush_tlb_all();
}
static bool cpu_has_broken_dbm(void)
@@ -1583,48 +1588,6 @@ static void cpu_has_fwb(const struct arm64_cpu_capabilities *__unused)
WARN_ON(val & (7 << 27 | 7 << 21));
}
-#ifdef CONFIG_ARM64_SSBD
-static int ssbs_emulation_handler(struct pt_regs *regs, u32 instr)
-{
- if (user_mode(regs))
- return 1;
-
- if (instr & BIT(PSTATE_Imm_shift))
- regs->pstate |= PSR_SSBS_BIT;
- else
- regs->pstate &= ~PSR_SSBS_BIT;
-
- arm64_skip_faulting_instruction(regs, 4);
- return 0;
-}
-
-static struct undef_hook ssbs_emulation_hook = {
- .instr_mask = ~(1U << PSTATE_Imm_shift),
- .instr_val = 0xd500401f | PSTATE_SSBS,
- .fn = ssbs_emulation_handler,
-};
-
-static void cpu_enable_ssbs(const struct arm64_cpu_capabilities *__unused)
-{
- static bool undef_hook_registered = false;
- static DEFINE_RAW_SPINLOCK(hook_lock);
-
- raw_spin_lock(&hook_lock);
- if (!undef_hook_registered) {
- register_undef_hook(&ssbs_emulation_hook);
- undef_hook_registered = true;
- }
- raw_spin_unlock(&hook_lock);
-
- if (arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE) {
- sysreg_clear_set(sctlr_el1, 0, SCTLR_ELx_DSSBS);
- arm64_set_ssbd_mitigation(false);
- } else {
- arm64_set_ssbd_mitigation(true);
- }
-}
-#endif /* CONFIG_ARM64_SSBD */
-
#ifdef CONFIG_ARM64_PAN
static void cpu_enable_pan(const struct arm64_cpu_capabilities *__unused)
{
@@ -1648,11 +1611,37 @@ static void cpu_clear_disr(const struct arm64_cpu_capabilities *__unused)
#endif /* CONFIG_ARM64_RAS_EXTN */
#ifdef CONFIG_ARM64_PTR_AUTH
-static bool has_address_auth(const struct arm64_cpu_capabilities *entry,
- int __unused)
+static bool has_address_auth_cpucap(const struct arm64_cpu_capabilities *entry, int scope)
{
- return __system_matches_cap(ARM64_HAS_ADDRESS_AUTH_ARCH) ||
- __system_matches_cap(ARM64_HAS_ADDRESS_AUTH_IMP_DEF);
+ int boot_val, sec_val;
+
+ /* We don't expect to be called with SCOPE_SYSTEM */
+ WARN_ON(scope == SCOPE_SYSTEM);
+ /*
+ * The ptr-auth feature levels are not intercompatible with lower
+ * levels. Hence we must match ptr-auth feature level of the secondary
+ * CPUs with that of the boot CPU. The level of boot cpu is fetched
+ * from the sanitised register whereas direct register read is done for
+ * the secondary CPUs.
+ * The sanitised feature state is guaranteed to match that of the
+ * boot CPU as a mismatched secondary CPU is parked before it gets
+ * a chance to update the state, with the capability.
+ */
+ boot_val = cpuid_feature_extract_field(read_sanitised_ftr_reg(entry->sys_reg),
+ entry->field_pos, entry->sign);
+ if (scope & SCOPE_BOOT_CPU)
+ return boot_val >= entry->min_field_value;
+ /* Now check for the secondary CPUs with SCOPE_LOCAL_CPU scope */
+ sec_val = cpuid_feature_extract_field(__read_sysreg_by_encoding(entry->sys_reg),
+ entry->field_pos, entry->sign);
+ return sec_val == boot_val;
+}
+
+static bool has_address_auth_metacap(const struct arm64_cpu_capabilities *entry,
+ int scope)
+{
+ return has_address_auth_cpucap(cpu_hwcaps_ptrs[ARM64_HAS_ADDRESS_AUTH_ARCH], scope) ||
+ has_address_auth_cpucap(cpu_hwcaps_ptrs[ARM64_HAS_ADDRESS_AUTH_IMP_DEF], scope);
}
static bool has_generic_auth(const struct arm64_cpu_capabilities *entry,
@@ -1702,6 +1691,22 @@ static void bti_enable(const struct arm64_cpu_capabilities *__unused)
}
#endif /* CONFIG_ARM64_BTI */
+#ifdef CONFIG_ARM64_MTE
+static void cpu_enable_mte(struct arm64_cpu_capabilities const *cap)
+{
+ static bool cleared_zero_page = false;
+
+ /*
+ * Clear the tags in the zero page. This needs to be done via the
+ * linear map which has the Tagged attribute.
+ */
+ if (!cleared_zero_page) {
+ cleared_zero_page = true;
+ mte_clear_page_tags(lm_alias(empty_zero_page));
+ }
+}
+#endif /* CONFIG_ARM64_MTE */
+
/* Internal helper functions to match cpu capability type */
static bool
cpucap_late_cpu_optional(const struct arm64_cpu_capabilities *cap)
@@ -1976,19 +1981,16 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.field_pos = ID_AA64ISAR0_CRC32_SHIFT,
.min_field_value = 1,
},
-#ifdef CONFIG_ARM64_SSBD
{
.desc = "Speculative Store Bypassing Safe (SSBS)",
.capability = ARM64_SSBS,
- .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
+ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
.matches = has_cpuid_feature,
.sys_reg = SYS_ID_AA64PFR1_EL1,
.field_pos = ID_AA64PFR1_SSBS_SHIFT,
.sign = FTR_UNSIGNED,
.min_field_value = ID_AA64PFR1_SSBS_PSTATE_ONLY,
- .cpu_enable = cpu_enable_ssbs,
},
-#endif
#ifdef CONFIG_ARM64_CNP
{
.desc = "Common not Private translations",
@@ -2021,7 +2023,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.sign = FTR_UNSIGNED,
.field_pos = ID_AA64ISAR1_APA_SHIFT,
.min_field_value = ID_AA64ISAR1_APA_ARCHITECTED,
- .matches = has_cpuid_feature,
+ .matches = has_address_auth_cpucap,
},
{
.desc = "Address authentication (IMP DEF algorithm)",
@@ -2031,12 +2033,12 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.sign = FTR_UNSIGNED,
.field_pos = ID_AA64ISAR1_API_SHIFT,
.min_field_value = ID_AA64ISAR1_API_IMP_DEF,
- .matches = has_cpuid_feature,
+ .matches = has_address_auth_cpucap,
},
{
.capability = ARM64_HAS_ADDRESS_AUTH,
.type = ARM64_CPUCAP_BOOT_CPU_FEATURE,
- .matches = has_address_auth,
+ .matches = has_address_auth_metacap,
},
{
.desc = "Generic authentication (architected algorithm)",
@@ -2121,6 +2123,19 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.sign = FTR_UNSIGNED,
},
#endif
+#ifdef CONFIG_ARM64_MTE
+ {
+ .desc = "Memory Tagging Extension",
+ .capability = ARM64_MTE,
+ .type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE,
+ .matches = has_cpuid_feature,
+ .sys_reg = SYS_ID_AA64PFR1_EL1,
+ .field_pos = ID_AA64PFR1_MTE_SHIFT,
+ .min_field_value = ID_AA64PFR1_MTE,
+ .sign = FTR_UNSIGNED,
+ .cpu_enable = cpu_enable_mte,
+ },
+#endif /* CONFIG_ARM64_MTE */
{},
};
@@ -2237,6 +2252,9 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
HWCAP_MULTI_CAP(ptr_auth_hwcap_addr_matches, CAP_HWCAP, KERNEL_HWCAP_PACA),
HWCAP_MULTI_CAP(ptr_auth_hwcap_gen_matches, CAP_HWCAP, KERNEL_HWCAP_PACG),
#endif
+#ifdef CONFIG_ARM64_MTE
+ HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_MTE_SHIFT, FTR_UNSIGNED, ID_AA64PFR1_MTE, CAP_HWCAP, KERNEL_HWCAP_MTE),
+#endif /* CONFIG_ARM64_MTE */
{},
};
diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c
index 393c6fb1f1cb..6a7bb3729d60 100644
--- a/arch/arm64/kernel/cpuinfo.c
+++ b/arch/arm64/kernel/cpuinfo.c
@@ -43,94 +43,93 @@ static const char *icache_policy_str[] = {
unsigned long __icache_flags;
static const char *const hwcap_str[] = {
- "fp",
- "asimd",
- "evtstrm",
- "aes",
- "pmull",
- "sha1",
- "sha2",
- "crc32",
- "atomics",
- "fphp",
- "asimdhp",
- "cpuid",
- "asimdrdm",
- "jscvt",
- "fcma",
- "lrcpc",
- "dcpop",
- "sha3",
- "sm3",
- "sm4",
- "asimddp",
- "sha512",
- "sve",
- "asimdfhm",
- "dit",
- "uscat",
- "ilrcpc",
- "flagm",
- "ssbs",
- "sb",
- "paca",
- "pacg",
- "dcpodp",
- "sve2",
- "sveaes",
- "svepmull",
- "svebitperm",
- "svesha3",
- "svesm4",
- "flagm2",
- "frint",
- "svei8mm",
- "svef32mm",
- "svef64mm",
- "svebf16",
- "i8mm",
- "bf16",
- "dgh",
- "rng",
- "bti",
- /* reserved for "mte" */
- NULL
+ [KERNEL_HWCAP_FP] = "fp",
+ [KERNEL_HWCAP_ASIMD] = "asimd",
+ [KERNEL_HWCAP_EVTSTRM] = "evtstrm",
+ [KERNEL_HWCAP_AES] = "aes",
+ [KERNEL_HWCAP_PMULL] = "pmull",
+ [KERNEL_HWCAP_SHA1] = "sha1",
+ [KERNEL_HWCAP_SHA2] = "sha2",
+ [KERNEL_HWCAP_CRC32] = "crc32",
+ [KERNEL_HWCAP_ATOMICS] = "atomics",
+ [KERNEL_HWCAP_FPHP] = "fphp",
+ [KERNEL_HWCAP_ASIMDHP] = "asimdhp",
+ [KERNEL_HWCAP_CPUID] = "cpuid",
+ [KERNEL_HWCAP_ASIMDRDM] = "asimdrdm",
+ [KERNEL_HWCAP_JSCVT] = "jscvt",
+ [KERNEL_HWCAP_FCMA] = "fcma",
+ [KERNEL_HWCAP_LRCPC] = "lrcpc",
+ [KERNEL_HWCAP_DCPOP] = "dcpop",
+ [KERNEL_HWCAP_SHA3] = "sha3",
+ [KERNEL_HWCAP_SM3] = "sm3",
+ [KERNEL_HWCAP_SM4] = "sm4",
+ [KERNEL_HWCAP_ASIMDDP] = "asimddp",
+ [KERNEL_HWCAP_SHA512] = "sha512",
+ [KERNEL_HWCAP_SVE] = "sve",
+ [KERNEL_HWCAP_ASIMDFHM] = "asimdfhm",
+ [KERNEL_HWCAP_DIT] = "dit",
+ [KERNEL_HWCAP_USCAT] = "uscat",
+ [KERNEL_HWCAP_ILRCPC] = "ilrcpc",
+ [KERNEL_HWCAP_FLAGM] = "flagm",
+ [KERNEL_HWCAP_SSBS] = "ssbs",
+ [KERNEL_HWCAP_SB] = "sb",
+ [KERNEL_HWCAP_PACA] = "paca",
+ [KERNEL_HWCAP_PACG] = "pacg",
+ [KERNEL_HWCAP_DCPODP] = "dcpodp",
+ [KERNEL_HWCAP_SVE2] = "sve2",
+ [KERNEL_HWCAP_SVEAES] = "sveaes",
+ [KERNEL_HWCAP_SVEPMULL] = "svepmull",
+ [KERNEL_HWCAP_SVEBITPERM] = "svebitperm",
+ [KERNEL_HWCAP_SVESHA3] = "svesha3",
+ [KERNEL_HWCAP_SVESM4] = "svesm4",
+ [KERNEL_HWCAP_FLAGM2] = "flagm2",
+ [KERNEL_HWCAP_FRINT] = "frint",
+ [KERNEL_HWCAP_SVEI8MM] = "svei8mm",
+ [KERNEL_HWCAP_SVEF32MM] = "svef32mm",
+ [KERNEL_HWCAP_SVEF64MM] = "svef64mm",
+ [KERNEL_HWCAP_SVEBF16] = "svebf16",
+ [KERNEL_HWCAP_I8MM] = "i8mm",
+ [KERNEL_HWCAP_BF16] = "bf16",
+ [KERNEL_HWCAP_DGH] = "dgh",
+ [KERNEL_HWCAP_RNG] = "rng",
+ [KERNEL_HWCAP_BTI] = "bti",
+ [KERNEL_HWCAP_MTE] = "mte",
};
#ifdef CONFIG_COMPAT
+#define COMPAT_KERNEL_HWCAP(x) const_ilog2(COMPAT_HWCAP_ ## x)
static const char *const compat_hwcap_str[] = {
- "swp",
- "half",
- "thumb",
- "26bit",
- "fastmult",
- "fpa",
- "vfp",
- "edsp",
- "java",
- "iwmmxt",
- "crunch",
- "thumbee",
- "neon",
- "vfpv3",
- "vfpv3d16",
- "tls",
- "vfpv4",
- "idiva",
- "idivt",
- "vfpd32",
- "lpae",
- "evtstrm",
- NULL
+ [COMPAT_KERNEL_HWCAP(SWP)] = "swp",
+ [COMPAT_KERNEL_HWCAP(HALF)] = "half",
+ [COMPAT_KERNEL_HWCAP(THUMB)] = "thumb",
+ [COMPAT_KERNEL_HWCAP(26BIT)] = NULL, /* Not possible on arm64 */
+ [COMPAT_KERNEL_HWCAP(FAST_MULT)] = "fastmult",
+ [COMPAT_KERNEL_HWCAP(FPA)] = NULL, /* Not possible on arm64 */
+ [COMPAT_KERNEL_HWCAP(VFP)] = "vfp",
+ [COMPAT_KERNEL_HWCAP(EDSP)] = "edsp",
+ [COMPAT_KERNEL_HWCAP(JAVA)] = NULL, /* Not possible on arm64 */
+ [COMPAT_KERNEL_HWCAP(IWMMXT)] = NULL, /* Not possible on arm64 */
+ [COMPAT_KERNEL_HWCAP(CRUNCH)] = NULL, /* Not possible on arm64 */
+ [COMPAT_KERNEL_HWCAP(THUMBEE)] = NULL, /* Not possible on arm64 */
+ [COMPAT_KERNEL_HWCAP(NEON)] = "neon",
+ [COMPAT_KERNEL_HWCAP(VFPv3)] = "vfpv3",
+ [COMPAT_KERNEL_HWCAP(VFPV3D16)] = NULL, /* Not possible on arm64 */
+ [COMPAT_KERNEL_HWCAP(TLS)] = "tls",
+ [COMPAT_KERNEL_HWCAP(VFPv4)] = "vfpv4",
+ [COMPAT_KERNEL_HWCAP(IDIVA)] = "idiva",
+ [COMPAT_KERNEL_HWCAP(IDIVT)] = "idivt",
+ [COMPAT_KERNEL_HWCAP(VFPD32)] = NULL, /* Not possible on arm64 */
+ [COMPAT_KERNEL_HWCAP(LPAE)] = "lpae",
+ [COMPAT_KERNEL_HWCAP(EVTSTRM)] = "evtstrm",
};
+#define COMPAT_KERNEL_HWCAP2(x) const_ilog2(COMPAT_HWCAP2_ ## x)
static const char *const compat_hwcap2_str[] = {
- "aes",
- "pmull",
- "sha1",
- "sha2",
- "crc32",
- NULL
+ [COMPAT_KERNEL_HWCAP2(AES)] = "aes",
+ [COMPAT_KERNEL_HWCAP2(PMULL)] = "pmull",
+ [COMPAT_KERNEL_HWCAP2(SHA1)] = "sha1",
+ [COMPAT_KERNEL_HWCAP2(SHA2)] = "sha2",
+ [COMPAT_KERNEL_HWCAP2(CRC32)] = "crc32",
};
#endif /* CONFIG_COMPAT */
@@ -166,16 +165,25 @@ static int c_show(struct seq_file *m, void *v)
seq_puts(m, "Features\t:");
if (compat) {
#ifdef CONFIG_COMPAT
- for (j = 0; compat_hwcap_str[j]; j++)
- if (compat_elf_hwcap & (1 << j))
+ for (j = 0; j < ARRAY_SIZE(compat_hwcap_str); j++) {
+ if (compat_elf_hwcap & (1 << j)) {
+ /*
+ * Warn once if any feature should not
+ * have been present on arm64 platform.
+ */
+ if (WARN_ON_ONCE(!compat_hwcap_str[j]))
+ continue;
+
seq_printf(m, " %s", compat_hwcap_str[j]);
+ }
+ }
- for (j = 0; compat_hwcap2_str[j]; j++)
+ for (j = 0; j < ARRAY_SIZE(compat_hwcap2_str); j++)
if (compat_elf_hwcap2 & (1 << j))
seq_printf(m, " %s", compat_hwcap2_str[j]);
#endif /* CONFIG_COMPAT */
} else {
- for (j = 0; hwcap_str[j]; j++)
+ for (j = 0; j < ARRAY_SIZE(hwcap_str); j++)
if (cpu_have_feature(j))
seq_printf(m, " %s", hwcap_str[j]);
}
@@ -327,7 +335,6 @@ static void cpuinfo_detect_icache_policy(struct cpuinfo_arm64 *info)
set_bit(ICACHEF_VPIPT, &__icache_flags);
break;
default:
- /* Fallthrough */
case ICACHE_POLICY_VIPT:
/* Assume aliasing */
set_bit(ICACHEF_ALIASING, &__icache_flags);
diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c
index 7310a4f7f993..fa76151de6ff 100644
--- a/arch/arm64/kernel/debug-monitors.c
+++ b/arch/arm64/kernel/debug-monitors.c
@@ -384,7 +384,7 @@ void __init debug_traps_init(void)
hook_debug_fault_code(DBG_ESR_EVT_HWSS, single_step_handler, SIGTRAP,
TRAP_TRACE, "single-step handler");
hook_debug_fault_code(DBG_ESR_EVT_BRK, brk_handler, SIGTRAP,
- TRAP_BRKPT, "ptrace BRK handler");
+ TRAP_BRKPT, "BRK handler");
}
/* Re-enable single step for syscall restarting. */
diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c
index d3be9dbf5490..43d4c329775f 100644
--- a/arch/arm64/kernel/entry-common.c
+++ b/arch/arm64/kernel/entry-common.c
@@ -66,6 +66,13 @@ static void notrace el1_dbg(struct pt_regs *regs, unsigned long esr)
}
NOKPROBE_SYMBOL(el1_dbg);
+static void notrace el1_fpac(struct pt_regs *regs, unsigned long esr)
+{
+ local_daif_inherit(regs);
+ do_ptrauth_fault(regs, esr);
+}
+NOKPROBE_SYMBOL(el1_fpac);
+
asmlinkage void notrace el1_sync_handler(struct pt_regs *regs)
{
unsigned long esr = read_sysreg(esr_el1);
@@ -92,6 +99,9 @@ asmlinkage void notrace el1_sync_handler(struct pt_regs *regs)
case ESR_ELx_EC_BRK64:
el1_dbg(regs, esr);
break;
+ case ESR_ELx_EC_FPAC:
+ el1_fpac(regs, esr);
+ break;
default:
el1_inv(regs, esr);
}
@@ -227,6 +237,14 @@ static void notrace el0_svc(struct pt_regs *regs)
}
NOKPROBE_SYMBOL(el0_svc);
+static void notrace el0_fpac(struct pt_regs *regs, unsigned long esr)
+{
+ user_exit_irqoff();
+ local_daif_restore(DAIF_PROCCTX);
+ do_ptrauth_fault(regs, esr);
+}
+NOKPROBE_SYMBOL(el0_fpac);
+
asmlinkage void notrace el0_sync_handler(struct pt_regs *regs)
{
unsigned long esr = read_sysreg(esr_el1);
@@ -272,6 +290,9 @@ asmlinkage void notrace el0_sync_handler(struct pt_regs *regs)
case ESR_ELx_EC_BRK64:
el0_dbg(regs, esr);
break;
+ case ESR_ELx_EC_FPAC:
+ el0_fpac(regs, esr);
+ break;
default:
el0_inv(regs, esr);
}
diff --git a/arch/arm64/kernel/entry-fpsimd.S b/arch/arm64/kernel/entry-fpsimd.S
index f880dd63ddc3..2ca395c25448 100644
--- a/arch/arm64/kernel/entry-fpsimd.S
+++ b/arch/arm64/kernel/entry-fpsimd.S
@@ -32,6 +32,7 @@ SYM_FUNC_START(fpsimd_load_state)
SYM_FUNC_END(fpsimd_load_state)
#ifdef CONFIG_ARM64_SVE
+
SYM_FUNC_START(sve_save_state)
sve_save 0, x1, 2
ret
@@ -46,4 +47,28 @@ SYM_FUNC_START(sve_get_vl)
_sve_rdvl 0, 1
ret
SYM_FUNC_END(sve_get_vl)
+
+/*
+ * Load SVE state from FPSIMD state.
+ *
+ * x0 = pointer to struct fpsimd_state
+ * x1 = VQ - 1
+ *
+ * Each SVE vector will be loaded with the first 128-bits taken from FPSIMD
+ * and the rest zeroed. All the other SVE registers will be zeroed.
+ */
+SYM_FUNC_START(sve_load_from_fpsimd_state)
+ sve_load_vq x1, x2, x3
+ fpsimd_restore x0, 8
+ _for n, 0, 15, _sve_pfalse \n
+ _sve_wrffr 0
+ ret
+SYM_FUNC_END(sve_load_from_fpsimd_state)
+
+/* Zero all SVE registers but the first 128-bits of each vector */
+SYM_FUNC_START(sve_flush_live)
+ sve_flush
+ ret
+SYM_FUNC_END(sve_flush_live)
+
#endif /* CONFIG_ARM64_SVE */
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index 2646178c8329..f30007dff35f 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -132,9 +132,8 @@ alternative_else_nop_endif
* them if required.
*/
.macro apply_ssbd, state, tmp1, tmp2
-#ifdef CONFIG_ARM64_SSBD
-alternative_cb arm64_enable_wa2_handling
- b .L__asm_ssbd_skip\@
+alternative_cb spectre_v4_patch_fw_mitigation_enable
+ b .L__asm_ssbd_skip\@ // Patched to NOP
alternative_cb_end
ldr_this_cpu \tmp2, arm64_ssbd_callback_required, \tmp1
cbz \tmp2, .L__asm_ssbd_skip\@
@@ -142,10 +141,35 @@ alternative_cb_end
tbnz \tmp2, #TIF_SSBD, .L__asm_ssbd_skip\@
mov w0, #ARM_SMCCC_ARCH_WORKAROUND_2
mov w1, #\state
-alternative_cb arm64_update_smccc_conduit
+alternative_cb spectre_v4_patch_fw_mitigation_conduit
nop // Patched to SMC/HVC #0
alternative_cb_end
.L__asm_ssbd_skip\@:
+ .endm
+
+ /* Check for MTE asynchronous tag check faults */
+ .macro check_mte_async_tcf, flgs, tmp
+#ifdef CONFIG_ARM64_MTE
+alternative_if_not ARM64_MTE
+ b 1f
+alternative_else_nop_endif
+ mrs_s \tmp, SYS_TFSRE0_EL1
+ tbz \tmp, #SYS_TFSR_EL1_TF0_SHIFT, 1f
+ /* Asynchronous TCF occurred for TTBR0 access, set the TI flag */
+ orr \flgs, \flgs, #_TIF_MTE_ASYNC_FAULT
+ str \flgs, [tsk, #TSK_TI_FLAGS]
+ msr_s SYS_TFSRE0_EL1, xzr
+1:
+#endif
+ .endm
+
+ /* Clear the MTE asynchronous tag check faults */
+ .macro clear_mte_async_tcf
+#ifdef CONFIG_ARM64_MTE
+alternative_if ARM64_MTE
+ dsb ish
+ msr_s SYS_TFSRE0_EL1, xzr
+alternative_else_nop_endif
#endif
.endm
@@ -170,19 +194,6 @@ alternative_cb_end
stp x28, x29, [sp, #16 * 14]
.if \el == 0
- .if \regsize == 32
- /*
- * If we're returning from a 32-bit task on a system affected by
- * 1418040 then re-enable userspace access to the virtual counter.
- */
-#ifdef CONFIG_ARM64_ERRATUM_1418040
-alternative_if ARM64_WORKAROUND_1418040
- mrs x0, cntkctl_el1
- orr x0, x0, #2 // ARCH_TIMER_USR_VCT_ACCESS_EN
- msr cntkctl_el1, x0
-alternative_else_nop_endif
-#endif
- .endif
clear_gp_regs
mrs x21, sp_el0
ldr_this_cpu tsk, __entry_task, x20
@@ -195,6 +206,8 @@ alternative_else_nop_endif
ldr x19, [tsk, #TSK_TI_FLAGS]
disable_step_tsk x19, x20
+ /* Check for asynchronous tag check faults in user space */
+ check_mte_async_tcf x19, x22
apply_ssbd 1, x22, x23
ptrauth_keys_install_kernel tsk, x20, x22, x23
@@ -246,6 +259,13 @@ alternative_if ARM64_HAS_IRQ_PRIO_MASKING
str x20, [sp, #S_PMR_SAVE]
alternative_else_nop_endif
+ /* Re-enable tag checking (TCO set on exception entry) */
+#ifdef CONFIG_ARM64_MTE
+alternative_if ARM64_MTE
+ SET_PSTATE_TCO(0)
+alternative_else_nop_endif
+#endif
+
/*
* Registers that may be useful after this macro is invoked:
*
@@ -294,14 +314,6 @@ alternative_else_nop_endif
tst x22, #PSR_MODE32_BIT // native task?
b.eq 3f
-#ifdef CONFIG_ARM64_ERRATUM_1418040
-alternative_if ARM64_WORKAROUND_1418040
- mrs x0, cntkctl_el1
- bic x0, x0, #2 // ARCH_TIMER_USR_VCT_ACCESS_EN
- msr cntkctl_el1, x0
-alternative_else_nop_endif
-#endif
-
#ifdef CONFIG_ARM64_ERRATUM_845719
alternative_if ARM64_WORKAROUND_845719
#ifdef CONFIG_PID_IN_CONTEXTIDR
@@ -718,11 +730,9 @@ el0_irq_naked:
bl trace_hardirqs_off
#endif
-#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
tbz x22, #55, 1f
bl do_el0_irq_bp_hardening
1:
-#endif
irq_handler
#ifdef CONFIG_TRACE_IRQFLAGS
@@ -765,6 +775,8 @@ SYM_CODE_START_LOCAL(ret_to_user)
and x2, x1, #_TIF_WORK_MASK
cbnz x2, work_pending
finish_ret_to_user:
+ /* Ignore asynchronous tag check faults in the uaccess routines */
+ clear_mte_async_tcf
enable_step_tsk x1, x2
#ifdef CONFIG_GCC_PLUGIN_STACKLEAK
bl stackleak_erase
diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
index 55c8f3ec6705..a6d688c10745 100644
--- a/arch/arm64/kernel/fpsimd.c
+++ b/arch/arm64/kernel/fpsimd.c
@@ -32,9 +32,11 @@
#include <linux/swab.h>
#include <asm/esr.h>
+#include <asm/exception.h>
#include <asm/fpsimd.h>
#include <asm/cpufeature.h>
#include <asm/cputype.h>
+#include <asm/neon.h>
#include <asm/processor.h>
#include <asm/simd.h>
#include <asm/sigcontext.h>
@@ -312,7 +314,7 @@ static void fpsimd_save(void)
* re-enter user with corrupt state.
* There's no way to recover, so kill it:
*/
- force_signal_inject(SIGKILL, SI_KERNEL, 0);
+ force_signal_inject(SIGKILL, SI_KERNEL, 0, 0);
return;
}
@@ -928,7 +930,7 @@ void fpsimd_release_task(struct task_struct *dead_task)
* the SVE access trap will be disabled the next time this task
* reaches ret_to_user.
*
- * TIF_SVE should be clear on entry: otherwise, task_fpsimd_load()
+ * TIF_SVE should be clear on entry: otherwise, fpsimd_restore_current_state()
* would have disabled the SVE access trap for userspace during
* ret_to_user, making an SVE access trap impossible in that case.
*/
@@ -936,7 +938,7 @@ void do_sve_acc(unsigned int esr, struct pt_regs *regs)
{
/* Even if we chose not to use SVE, the hardware could still trap: */
if (unlikely(!system_supports_sve()) || WARN_ON(is_compat_task())) {
- force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc);
+ force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0);
return;
}
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index 037421c66b14..d8d9caf02834 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -36,14 +36,10 @@
#include "efi-header.S"
-#define __PHYS_OFFSET (KERNEL_START - TEXT_OFFSET)
+#define __PHYS_OFFSET KERNEL_START
-#if (TEXT_OFFSET & 0xfff) != 0
-#error TEXT_OFFSET must be at least 4KB aligned
-#elif (PAGE_OFFSET & 0x1fffff) != 0
+#if (PAGE_OFFSET & 0x1fffff) != 0
#error PAGE_OFFSET must be at least 2MB aligned
-#elif TEXT_OFFSET > 0x1fffff
-#error TEXT_OFFSET must be less than 2MB
#endif
/*
@@ -55,7 +51,7 @@
* x0 = physical address to the FDT blob.
*
* This code is mostly position independent so you call this at
- * __pa(PAGE_OFFSET + TEXT_OFFSET).
+ * __pa(PAGE_OFFSET).
*
* Note that the callee-saved registers are used for storing variables
* that are useful before the MMU is enabled. The allocations are described
@@ -77,7 +73,7 @@ _head:
b primary_entry // branch to kernel start, magic
.long 0 // reserved
#endif
- le64sym _kernel_offset_le // Image load offset from start of RAM, little-endian
+ .quad 0 // Image load offset from start of RAM, little-endian
le64sym _kernel_size_le // Effective size of kernel image, little-endian
le64sym _kernel_flags_le // Informative flags, little-endian
.quad 0 // reserved
@@ -382,7 +378,7 @@ SYM_FUNC_START_LOCAL(__create_page_tables)
* Map the kernel image (starting with PHYS_OFFSET).
*/
adrp x0, init_pg_dir
- mov_q x5, KIMAGE_VADDR + TEXT_OFFSET // compile time __va(_text)
+ mov_q x5, KIMAGE_VADDR // compile time __va(_text)
add x5, x5, x23 // add KASLR displacement
mov x4, PTRS_PER_PGD
adrp x6, _end // runtime __pa(_end)
@@ -474,7 +470,7 @@ SYM_FUNC_END(__primary_switched)
.pushsection ".rodata", "a"
SYM_DATA_START(kimage_vaddr)
- .quad _text - TEXT_OFFSET
+ .quad _text
SYM_DATA_END(kimage_vaddr)
EXPORT_SYMBOL(kimage_vaddr)
.popsection
diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c
index 68e14152d6e9..42003774d261 100644
--- a/arch/arm64/kernel/hibernate.c
+++ b/arch/arm64/kernel/hibernate.c
@@ -21,7 +21,6 @@
#include <linux/sched.h>
#include <linux/suspend.h>
#include <linux/utsname.h>
-#include <linux/version.h>
#include <asm/barrier.h>
#include <asm/cacheflush.h>
@@ -31,6 +30,7 @@
#include <asm/kexec.h>
#include <asm/memory.h>
#include <asm/mmu_context.h>
+#include <asm/mte.h>
#include <asm/pgalloc.h>
#include <asm/pgtable-hwdef.h>
#include <asm/sections.h>
@@ -285,6 +285,117 @@ static int create_safe_exec_page(void *src_start, size_t length,
#define dcache_clean_range(start, end) __flush_dcache_area(start, (end - start))
+#ifdef CONFIG_ARM64_MTE
+
+static DEFINE_XARRAY(mte_pages);
+
+static int save_tags(struct page *page, unsigned long pfn)
+{
+ void *tag_storage, *ret;
+
+ tag_storage = mte_allocate_tag_storage();
+ if (!tag_storage)
+ return -ENOMEM;
+
+ mte_save_page_tags(page_address(page), tag_storage);
+
+ ret = xa_store(&mte_pages, pfn, tag_storage, GFP_KERNEL);
+ if (WARN(xa_is_err(ret), "Failed to store MTE tags")) {
+ mte_free_tag_storage(tag_storage);
+ return xa_err(ret);
+ } else if (WARN(ret, "swsusp: %s: Duplicate entry", __func__)) {
+ mte_free_tag_storage(ret);
+ }
+
+ return 0;
+}
+
+static void swsusp_mte_free_storage(void)
+{
+ XA_STATE(xa_state, &mte_pages, 0);
+ void *tags;
+
+ xa_lock(&mte_pages);
+ xas_for_each(&xa_state, tags, ULONG_MAX) {
+ mte_free_tag_storage(tags);
+ }
+ xa_unlock(&mte_pages);
+
+ xa_destroy(&mte_pages);
+}
+
+static int swsusp_mte_save_tags(void)
+{
+ struct zone *zone;
+ unsigned long pfn, max_zone_pfn;
+ int ret = 0;
+ int n = 0;
+
+ if (!system_supports_mte())
+ return 0;
+
+ for_each_populated_zone(zone) {
+ max_zone_pfn = zone_end_pfn(zone);
+ for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) {
+ struct page *page = pfn_to_online_page(pfn);
+
+ if (!page)
+ continue;
+
+ if (!test_bit(PG_mte_tagged, &page->flags))
+ continue;
+
+ ret = save_tags(page, pfn);
+ if (ret) {
+ swsusp_mte_free_storage();
+ goto out;
+ }
+
+ n++;
+ }
+ }
+ pr_info("Saved %d MTE pages\n", n);
+
+out:
+ return ret;
+}
+
+static void swsusp_mte_restore_tags(void)
+{
+ XA_STATE(xa_state, &mte_pages, 0);
+ int n = 0;
+ void *tags;
+
+ xa_lock(&mte_pages);
+ xas_for_each(&xa_state, tags, ULONG_MAX) {
+ unsigned long pfn = xa_state.xa_index;
+ struct page *page = pfn_to_online_page(pfn);
+
+ mte_restore_page_tags(page_address(page), tags);
+
+ mte_free_tag_storage(tags);
+ n++;
+ }
+ xa_unlock(&mte_pages);
+
+ pr_info("Restored %d MTE pages\n", n);
+
+ xa_destroy(&mte_pages);
+}
+
+#else /* CONFIG_ARM64_MTE */
+
+static int swsusp_mte_save_tags(void)
+{
+ return 0;
+}
+
+static void swsusp_mte_restore_tags(void)
+{
+}
+
+#endif /* CONFIG_ARM64_MTE */
+
int swsusp_arch_suspend(void)
{
int ret = 0;
@@ -302,6 +413,10 @@ int swsusp_arch_suspend(void)
/* make the crash dump kernel image visible/saveable */
crash_prepare_suspend();
+ ret = swsusp_mte_save_tags();
+ if (ret)
+ return ret;
+
sleep_cpu = smp_processor_id();
ret = swsusp_save();
} else {
@@ -315,6 +430,8 @@ int swsusp_arch_suspend(void)
dcache_clean_range(__hyp_text_start, __hyp_text_end);
}
+ swsusp_mte_restore_tags();
+
/* make the crash dump kernel image protected again */
crash_post_resume();
@@ -332,11 +449,7 @@ int swsusp_arch_suspend(void)
* mitigation off behind our back, let's set the state
* to what we expect it to be.
*/
- switch (arm64_get_ssbd_state()) {
- case ARM64_SSBD_FORCE_ENABLE:
- case ARM64_SSBD_KERNEL:
- arm64_set_ssbd_mitigation(true);
- }
+ spectre_v4_enable_mitigation(NULL);
}
local_daif_restore(flags);
diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c
index af234a1e08b7..712e97c03e54 100644
--- a/arch/arm64/kernel/hw_breakpoint.c
+++ b/arch/arm64/kernel/hw_breakpoint.c
@@ -257,7 +257,7 @@ static int hw_breakpoint_control(struct perf_event *bp,
* level.
*/
enable_debug_monitors(dbg_el);
- /* Fall through */
+ fallthrough;
case HW_BREAKPOINT_RESTORE:
/* Setup the address register. */
write_wb_reg(val_reg, i, info->address);
@@ -541,13 +541,13 @@ int hw_breakpoint_arch_parse(struct perf_event *bp,
if (hw->ctrl.len == ARM_BREAKPOINT_LEN_2)
break;
- /* Fallthrough */
+ fallthrough;
case 3:
/* Allow single byte watchpoint. */
if (hw->ctrl.len == ARM_BREAKPOINT_LEN_1)
break;
- /* Fallthrough */
+ fallthrough;
default:
return -EINVAL;
}
diff --git a/arch/arm64/kernel/image-vars.h b/arch/arm64/kernel/image-vars.h
index 9e897c500237..843ecfb16a69 100644
--- a/arch/arm64/kernel/image-vars.h
+++ b/arch/arm64/kernel/image-vars.h
@@ -64,12 +64,10 @@ __efistub__ctype = _ctype;
#define KVM_NVHE_ALIAS(sym) __kvm_nvhe_##sym = sym;
/* Alternative callbacks for init-time patching of nVHE hyp code. */
-KVM_NVHE_ALIAS(arm64_enable_wa2_handling);
KVM_NVHE_ALIAS(kvm_patch_vector_branch);
KVM_NVHE_ALIAS(kvm_update_va_mask);
/* Global kernel state accessed by nVHE hyp code. */
-KVM_NVHE_ALIAS(arm64_ssbd_callback_required);
KVM_NVHE_ALIAS(kvm_host_data);
KVM_NVHE_ALIAS(kvm_vgic_global_state);
@@ -101,8 +99,14 @@ KVM_NVHE_ALIAS(vgic_v3_cpuif_trap);
/* Static key checked in pmr_sync(). */
#ifdef CONFIG_ARM64_PSEUDO_NMI
KVM_NVHE_ALIAS(gic_pmr_sync);
+/* Static key checked in GIC_PRIO_IRQOFF. */
+KVM_NVHE_ALIAS(gic_nonsecure_priorities);
#endif
+/* EL2 exception handling */
+KVM_NVHE_ALIAS(__start___kvm_ex_table);
+KVM_NVHE_ALIAS(__stop___kvm_ex_table);
+
#endif /* CONFIG_KVM */
#endif /* __ARM64_KERNEL_IMAGE_VARS_H */
diff --git a/arch/arm64/kernel/image.h b/arch/arm64/kernel/image.h
index c7d38c660372..7bc3ba897901 100644
--- a/arch/arm64/kernel/image.h
+++ b/arch/arm64/kernel/image.h
@@ -62,7 +62,6 @@
*/
#define HEAD_SYMBOLS \
DEFINE_IMAGE_LE64(_kernel_size_le, _end - _text); \
- DEFINE_IMAGE_LE64(_kernel_offset_le, TEXT_OFFSET); \
DEFINE_IMAGE_LE64(_kernel_flags_le, __HEAD_FLAGS);
#endif /* __ARM64_KERNEL_IMAGE_H */
diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c
index a107375005bc..6c0de2f60ea9 100644
--- a/arch/arm64/kernel/insn.c
+++ b/arch/arm64/kernel/insn.c
@@ -60,16 +60,10 @@ bool __kprobes aarch64_insn_is_steppable_hint(u32 insn)
case AARCH64_INSN_HINT_XPACLRI:
case AARCH64_INSN_HINT_PACIA_1716:
case AARCH64_INSN_HINT_PACIB_1716:
- case AARCH64_INSN_HINT_AUTIA_1716:
- case AARCH64_INSN_HINT_AUTIB_1716:
case AARCH64_INSN_HINT_PACIAZ:
case AARCH64_INSN_HINT_PACIASP:
case AARCH64_INSN_HINT_PACIBZ:
case AARCH64_INSN_HINT_PACIBSP:
- case AARCH64_INSN_HINT_AUTIAZ:
- case AARCH64_INSN_HINT_AUTIASP:
- case AARCH64_INSN_HINT_AUTIBZ:
- case AARCH64_INSN_HINT_AUTIBSP:
case AARCH64_INSN_HINT_BTI:
case AARCH64_INSN_HINT_BTIC:
case AARCH64_INSN_HINT_BTIJ:
@@ -176,7 +170,7 @@ bool __kprobes aarch64_insn_uses_literal(u32 insn)
bool __kprobes aarch64_insn_is_branch(u32 insn)
{
- /* b, bl, cb*, tb*, b.cond, br, blr */
+ /* b, bl, cb*, tb*, ret*, b.cond, br*, blr* */
return aarch64_insn_is_b(insn) ||
aarch64_insn_is_bl(insn) ||
@@ -185,8 +179,11 @@ bool __kprobes aarch64_insn_is_branch(u32 insn)
aarch64_insn_is_tbz(insn) ||
aarch64_insn_is_tbnz(insn) ||
aarch64_insn_is_ret(insn) ||
+ aarch64_insn_is_ret_auth(insn) ||
aarch64_insn_is_br(insn) ||
+ aarch64_insn_is_br_auth(insn) ||
aarch64_insn_is_blr(insn) ||
+ aarch64_insn_is_blr_auth(insn) ||
aarch64_insn_is_bcond(insn);
}
diff --git a/arch/arm64/kernel/irq.c b/arch/arm64/kernel/irq.c
index 04a327ccf84d..9cf2fb87584a 100644
--- a/arch/arm64/kernel/irq.c
+++ b/arch/arm64/kernel/irq.c
@@ -10,10 +10,10 @@
* Copyright (C) 2012 ARM Ltd.
*/
-#include <linux/kernel_stat.h>
#include <linux/irq.h>
#include <linux/memory.h>
#include <linux/smp.h>
+#include <linux/hardirq.h>
#include <linux/init.h>
#include <linux/irqchip.h>
#include <linux/kprobes.h>
@@ -22,20 +22,11 @@
#include <asm/daifflags.h>
#include <asm/vmap_stack.h>
-unsigned long irq_err_count;
-
/* Only access this in an NMI enter/exit */
DEFINE_PER_CPU(struct nmi_ctx, nmi_contexts);
DEFINE_PER_CPU(unsigned long *, irq_stack_ptr);
-int arch_show_interrupts(struct seq_file *p, int prec)
-{
- show_ipi_list(p, prec);
- seq_printf(p, "%*s: %10lu\n", prec, "Err", irq_err_count);
- return 0;
-}
-
#ifdef CONFIG_VMAP_STACK
static void init_irq_stacks(void)
{
diff --git a/arch/arm64/kernel/machine_kexec_file.c b/arch/arm64/kernel/machine_kexec_file.c
index 361a1143e09e..5b0e67b93cdc 100644
--- a/arch/arm64/kernel/machine_kexec_file.c
+++ b/arch/arm64/kernel/machine_kexec_file.c
@@ -215,8 +215,7 @@ static int prepare_elf_headers(void **addr, unsigned long *sz)
phys_addr_t start, end;
nr_ranges = 1; /* for exclusion of crashkernel region */
- for_each_mem_range(i, &memblock.memory, NULL, NUMA_NO_NODE,
- MEMBLOCK_NONE, &start, &end, NULL)
+ for_each_mem_range(i, &start, &end)
nr_ranges++;
cmem = kmalloc(struct_size(cmem, ranges, nr_ranges), GFP_KERNEL);
@@ -225,8 +224,7 @@ static int prepare_elf_headers(void **addr, unsigned long *sz)
cmem->max_nr_ranges = nr_ranges;
cmem->nr_ranges = 0;
- for_each_mem_range(i, &memblock.memory, NULL, NUMA_NO_NODE,
- MEMBLOCK_NONE, &start, &end, NULL) {
+ for_each_mem_range(i, &start, &end) {
cmem->ranges[cmem->nr_ranges].start = start;
cmem->ranges[cmem->nr_ranges].end = end - 1;
cmem->nr_ranges++;
diff --git a/arch/arm64/kernel/module-plts.c b/arch/arm64/kernel/module-plts.c
index 0ce3a28e3347..2e224435c024 100644
--- a/arch/arm64/kernel/module-plts.c
+++ b/arch/arm64/kernel/module-plts.c
@@ -305,8 +305,7 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
mod->arch.core.plt_shndx = i;
else if (!strcmp(secstrings + sechdrs[i].sh_name, ".init.plt"))
mod->arch.init.plt_shndx = i;
- else if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE) &&
- !strcmp(secstrings + sechdrs[i].sh_name,
+ else if (!strcmp(secstrings + sechdrs[i].sh_name,
".text.ftrace_trampoline"))
tramp = sechdrs + i;
else if (sechdrs[i].sh_type == SHT_SYMTAB)
diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c
index 1cd1a4d0ed30..2a1ad95d9b2c 100644
--- a/arch/arm64/kernel/module.c
+++ b/arch/arm64/kernel/module.c
@@ -315,21 +315,21 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
/* MOVW instruction relocations. */
case R_AARCH64_MOVW_UABS_G0_NC:
overflow_check = false;
- /* Fall through */
+ fallthrough;
case R_AARCH64_MOVW_UABS_G0:
ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0,
AARCH64_INSN_IMM_MOVKZ);
break;
case R_AARCH64_MOVW_UABS_G1_NC:
overflow_check = false;
- /* Fall through */
+ fallthrough;
case R_AARCH64_MOVW_UABS_G1:
ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16,
AARCH64_INSN_IMM_MOVKZ);
break;
case R_AARCH64_MOVW_UABS_G2_NC:
overflow_check = false;
- /* Fall through */
+ fallthrough;
case R_AARCH64_MOVW_UABS_G2:
ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32,
AARCH64_INSN_IMM_MOVKZ);
@@ -397,7 +397,7 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
break;
case R_AARCH64_ADR_PREL_PG_HI21_NC:
overflow_check = false;
- /* Fall through */
+ fallthrough;
case R_AARCH64_ADR_PREL_PG_HI21:
ovf = reloc_insn_adrp(me, sechdrs, loc, val);
if (ovf && ovf != -ERANGE)
diff --git a/arch/arm64/kernel/mte.c b/arch/arm64/kernel/mte.c
new file mode 100644
index 000000000000..52a0638ed967
--- /dev/null
+++ b/arch/arm64/kernel/mte.c
@@ -0,0 +1,336 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020 ARM Ltd.
+ */
+
+#include <linux/bitops.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/prctl.h>
+#include <linux/sched.h>
+#include <linux/sched/mm.h>
+#include <linux/string.h>
+#include <linux/swap.h>
+#include <linux/swapops.h>
+#include <linux/thread_info.h>
+#include <linux/uio.h>
+
+#include <asm/cpufeature.h>
+#include <asm/mte.h>
+#include <asm/ptrace.h>
+#include <asm/sysreg.h>
+
+static void mte_sync_page_tags(struct page *page, pte_t *ptep, bool check_swap)
+{
+ pte_t old_pte = READ_ONCE(*ptep);
+
+ if (check_swap && is_swap_pte(old_pte)) {
+ swp_entry_t entry = pte_to_swp_entry(old_pte);
+
+ if (!non_swap_entry(entry) && mte_restore_tags(entry, page))
+ return;
+ }
+
+ mte_clear_page_tags(page_address(page));
+}
+
+void mte_sync_tags(pte_t *ptep, pte_t pte)
+{
+ struct page *page = pte_page(pte);
+ long i, nr_pages = compound_nr(page);
+ bool check_swap = nr_pages == 1;
+
+ /* if PG_mte_tagged is set, tags have already been initialised */
+ for (i = 0; i < nr_pages; i++, page++) {
+ if (!test_and_set_bit(PG_mte_tagged, &page->flags))
+ mte_sync_page_tags(page, ptep, check_swap);
+ }
+}
+
+int memcmp_pages(struct page *page1, struct page *page2)
+{
+ char *addr1, *addr2;
+ int ret;
+
+ addr1 = page_address(page1);
+ addr2 = page_address(page2);
+ ret = memcmp(addr1, addr2, PAGE_SIZE);
+
+ if (!system_supports_mte() || ret)
+ return ret;
+
+ /*
+ * If the page content is identical but at least one of the pages is
+ * tagged, return non-zero to avoid KSM merging. If only one of the
+ * pages is tagged, set_pte_at() may zero or change the tags of the
+ * other page via mte_sync_tags().
+ */
+ if (test_bit(PG_mte_tagged, &page1->flags) ||
+ test_bit(PG_mte_tagged, &page2->flags))
+ return addr1 != addr2;
+
+ return ret;
+}
+
+static void update_sctlr_el1_tcf0(u64 tcf0)
+{
+ /* ISB required for the kernel uaccess routines */
+ sysreg_clear_set(sctlr_el1, SCTLR_EL1_TCF0_MASK, tcf0);
+ isb();
+}
+
+static void set_sctlr_el1_tcf0(u64 tcf0)
+{
+ /*
+ * mte_thread_switch() checks current->thread.sctlr_tcf0 as an
+ * optimisation. Disable preemption so that it does not see
+ * the variable update before the SCTLR_EL1.TCF0 one.
+ */
+ preempt_disable();
+ current->thread.sctlr_tcf0 = tcf0;
+ update_sctlr_el1_tcf0(tcf0);
+ preempt_enable();
+}
+
+static void update_gcr_el1_excl(u64 incl)
+{
+ u64 excl = ~incl & SYS_GCR_EL1_EXCL_MASK;
+
+ /*
+ * Note that 'incl' is an include mask (controlled by the user via
+ * prctl()) while GCR_EL1 accepts an exclude mask.
+ * No need for ISB since this only affects EL0 currently, implicit
+ * with ERET.
+ */
+ sysreg_clear_set_s(SYS_GCR_EL1, SYS_GCR_EL1_EXCL_MASK, excl);
+}
+
+static void set_gcr_el1_excl(u64 incl)
+{
+ current->thread.gcr_user_incl = incl;
+ update_gcr_el1_excl(incl);
+}
+
+void flush_mte_state(void)
+{
+ if (!system_supports_mte())
+ return;
+
+ /* clear any pending asynchronous tag fault */
+ dsb(ish);
+ write_sysreg_s(0, SYS_TFSRE0_EL1);
+ clear_thread_flag(TIF_MTE_ASYNC_FAULT);
+ /* disable tag checking */
+ set_sctlr_el1_tcf0(SCTLR_EL1_TCF0_NONE);
+ /* reset tag generation mask */
+ set_gcr_el1_excl(0);
+}
+
+void mte_thread_switch(struct task_struct *next)
+{
+ if (!system_supports_mte())
+ return;
+
+ /* avoid expensive SCTLR_EL1 accesses if no change */
+ if (current->thread.sctlr_tcf0 != next->thread.sctlr_tcf0)
+ update_sctlr_el1_tcf0(next->thread.sctlr_tcf0);
+ update_gcr_el1_excl(next->thread.gcr_user_incl);
+}
+
+void mte_suspend_exit(void)
+{
+ if (!system_supports_mte())
+ return;
+
+ update_gcr_el1_excl(current->thread.gcr_user_incl);
+}
+
+long set_mte_ctrl(struct task_struct *task, unsigned long arg)
+{
+ u64 tcf0;
+ u64 gcr_incl = (arg & PR_MTE_TAG_MASK) >> PR_MTE_TAG_SHIFT;
+
+ if (!system_supports_mte())
+ return 0;
+
+ switch (arg & PR_MTE_TCF_MASK) {
+ case PR_MTE_TCF_NONE:
+ tcf0 = SCTLR_EL1_TCF0_NONE;
+ break;
+ case PR_MTE_TCF_SYNC:
+ tcf0 = SCTLR_EL1_TCF0_SYNC;
+ break;
+ case PR_MTE_TCF_ASYNC:
+ tcf0 = SCTLR_EL1_TCF0_ASYNC;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (task != current) {
+ task->thread.sctlr_tcf0 = tcf0;
+ task->thread.gcr_user_incl = gcr_incl;
+ } else {
+ set_sctlr_el1_tcf0(tcf0);
+ set_gcr_el1_excl(gcr_incl);
+ }
+
+ return 0;
+}
+
+long get_mte_ctrl(struct task_struct *task)
+{
+ unsigned long ret;
+
+ if (!system_supports_mte())
+ return 0;
+
+ ret = task->thread.gcr_user_incl << PR_MTE_TAG_SHIFT;
+
+ switch (task->thread.sctlr_tcf0) {
+ case SCTLR_EL1_TCF0_NONE:
+ return PR_MTE_TCF_NONE;
+ case SCTLR_EL1_TCF0_SYNC:
+ ret |= PR_MTE_TCF_SYNC;
+ break;
+ case SCTLR_EL1_TCF0_ASYNC:
+ ret |= PR_MTE_TCF_ASYNC;
+ break;
+ }
+
+ return ret;
+}
+
+/*
+ * Access MTE tags in another process' address space as given in mm. Update
+ * the number of tags copied. Return 0 if any tags copied, error otherwise.
+ * Inspired by __access_remote_vm().
+ */
+static int __access_remote_tags(struct mm_struct *mm, unsigned long addr,
+ struct iovec *kiov, unsigned int gup_flags)
+{
+ struct vm_area_struct *vma;
+ void __user *buf = kiov->iov_base;
+ size_t len = kiov->iov_len;
+ int ret;
+ int write = gup_flags & FOLL_WRITE;
+
+ if (!access_ok(buf, len))
+ return -EFAULT;
+
+ if (mmap_read_lock_killable(mm))
+ return -EIO;
+
+ while (len) {
+ unsigned long tags, offset;
+ void *maddr;
+ struct page *page = NULL;
+
+ ret = get_user_pages_remote(mm, addr, 1, gup_flags, &page,
+ &vma, NULL);
+ if (ret <= 0)
+ break;
+
+ /*
+ * Only copy tags if the page has been mapped as PROT_MTE
+ * (PG_mte_tagged set). Otherwise the tags are not valid and
+ * not accessible to user. Moreover, an mprotect(PROT_MTE)
+ * would cause the existing tags to be cleared if the page
+ * was never mapped with PROT_MTE.
+ */
+ if (!test_bit(PG_mte_tagged, &page->flags)) {
+ ret = -EOPNOTSUPP;
+ put_page(page);
+ break;
+ }
+
+ /* limit access to the end of the page */
+ offset = offset_in_page(addr);
+ tags = min(len, (PAGE_SIZE - offset) / MTE_GRANULE_SIZE);
+
+ maddr = page_address(page);
+ if (write) {
+ tags = mte_copy_tags_from_user(maddr + offset, buf, tags);
+ set_page_dirty_lock(page);
+ } else {
+ tags = mte_copy_tags_to_user(buf, maddr + offset, tags);
+ }
+ put_page(page);
+
+ /* error accessing the tracer's buffer */
+ if (!tags)
+ break;
+
+ len -= tags;
+ buf += tags;
+ addr += tags * MTE_GRANULE_SIZE;
+ }
+ mmap_read_unlock(mm);
+
+ /* return an error if no tags copied */
+ kiov->iov_len = buf - kiov->iov_base;
+ if (!kiov->iov_len) {
+ /* check for error accessing the tracee's address space */
+ if (ret <= 0)
+ return -EIO;
+ else
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+/*
+ * Copy MTE tags in another process' address space at 'addr' to/from tracer's
+ * iovec buffer. Return 0 on success. Inspired by ptrace_access_vm().
+ */
+static int access_remote_tags(struct task_struct *tsk, unsigned long addr,
+ struct iovec *kiov, unsigned int gup_flags)
+{
+ struct mm_struct *mm;
+ int ret;
+
+ mm = get_task_mm(tsk);
+ if (!mm)
+ return -EPERM;
+
+ if (!tsk->ptrace || (current != tsk->parent) ||
+ ((get_dumpable(mm) != SUID_DUMP_USER) &&
+ !ptracer_capable(tsk, mm->user_ns))) {
+ mmput(mm);
+ return -EPERM;
+ }
+
+ ret = __access_remote_tags(mm, addr, kiov, gup_flags);
+ mmput(mm);
+
+ return ret;
+}
+
+int mte_ptrace_copy_tags(struct task_struct *child, long request,
+ unsigned long addr, unsigned long data)
+{
+ int ret;
+ struct iovec kiov;
+ struct iovec __user *uiov = (void __user *)data;
+ unsigned int gup_flags = FOLL_FORCE;
+
+ if (!system_supports_mte())
+ return -EIO;
+
+ if (get_user(kiov.iov_base, &uiov->iov_base) ||
+ get_user(kiov.iov_len, &uiov->iov_len))
+ return -EFAULT;
+
+ if (request == PTRACE_POKEMTETAGS)
+ gup_flags |= FOLL_WRITE;
+
+ /* align addr to the MTE tag granule */
+ addr &= MTE_GRANULE_MASK;
+
+ ret = access_remote_tags(child, addr, &kiov, gup_flags);
+ if (!ret)
+ ret = put_user(kiov.iov_len, &uiov->iov_len);
+
+ return ret;
+}
diff --git a/arch/arm64/kernel/paravirt.c b/arch/arm64/kernel/paravirt.c
index 295d66490584..c07d7a034941 100644
--- a/arch/arm64/kernel/paravirt.c
+++ b/arch/arm64/kernel/paravirt.c
@@ -50,16 +50,19 @@ static u64 pv_steal_clock(int cpu)
struct pv_time_stolen_time_region *reg;
reg = per_cpu_ptr(&stolen_time_region, cpu);
- if (!reg->kaddr) {
- pr_warn_once("stolen time enabled but not configured for cpu %d\n",
- cpu);
+
+ /*
+ * paravirt_steal_clock() may be called before the CPU
+ * online notification callback runs. Until the callback
+ * has run we just return zero.
+ */
+ if (!reg->kaddr)
return 0;
- }
return le64_to_cpu(READ_ONCE(reg->kaddr->stolen_time));
}
-static int stolen_time_dying_cpu(unsigned int cpu)
+static int stolen_time_cpu_down_prepare(unsigned int cpu)
{
struct pv_time_stolen_time_region *reg;
@@ -73,7 +76,7 @@ static int stolen_time_dying_cpu(unsigned int cpu)
return 0;
}
-static int init_stolen_time_cpu(unsigned int cpu)
+static int stolen_time_cpu_online(unsigned int cpu)
{
struct pv_time_stolen_time_region *reg;
struct arm_smccc_res res;
@@ -103,19 +106,20 @@ static int init_stolen_time_cpu(unsigned int cpu)
return 0;
}
-static int pv_time_init_stolen_time(void)
+static int __init pv_time_init_stolen_time(void)
{
int ret;
- ret = cpuhp_setup_state(CPUHP_AP_ARM_KVMPV_STARTING,
- "hypervisor/arm/pvtime:starting",
- init_stolen_time_cpu, stolen_time_dying_cpu);
+ ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
+ "hypervisor/arm/pvtime:online",
+ stolen_time_cpu_online,
+ stolen_time_cpu_down_prepare);
if (ret < 0)
return ret;
return 0;
}
-static bool has_pv_steal_clock(void)
+static bool __init has_pv_steal_clock(void)
{
struct arm_smccc_res res;
diff --git a/arch/arm64/kernel/perf_callchain.c b/arch/arm64/kernel/perf_callchain.c
index b0e03e052dd1..88ff471b0bce 100644
--- a/arch/arm64/kernel/perf_callchain.c
+++ b/arch/arm64/kernel/perf_callchain.c
@@ -137,11 +137,11 @@ void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
* whist unwinding the stackframe and is like a subroutine return so we use
* the PC.
*/
-static int callchain_trace(struct stackframe *frame, void *data)
+static bool callchain_trace(void *data, unsigned long pc)
{
struct perf_callchain_entry_ctx *entry = data;
- perf_callchain_store(entry, frame->pc);
- return 0;
+ perf_callchain_store(entry, pc);
+ return true;
}
void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
index 462f9a9cc44b..3605f77ad4df 100644
--- a/arch/arm64/kernel/perf_event.c
+++ b/arch/arm64/kernel/perf_event.c
@@ -69,6 +69,9 @@ static const unsigned armv8_pmuv3_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
[C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1I_TLB_REFILL,
[C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1I_TLB,
+ [C(LL)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_LL_CACHE_MISS_RD,
+ [C(LL)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_LL_CACHE_RD,
+
[C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_BR_PRED,
[C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_BR_MIS_PRED,
};
@@ -302,13 +305,33 @@ static struct attribute_group armv8_pmuv3_format_attr_group = {
.attrs = armv8_pmuv3_format_attrs,
};
+static ssize_t slots_show(struct device *dev, struct device_attribute *attr,
+ char *page)
+{
+ struct pmu *pmu = dev_get_drvdata(dev);
+ struct arm_pmu *cpu_pmu = container_of(pmu, struct arm_pmu, pmu);
+ u32 slots = cpu_pmu->reg_pmmir & ARMV8_PMU_SLOTS_MASK;
+
+ return snprintf(page, PAGE_SIZE, "0x%08x\n", slots);
+}
+
+static DEVICE_ATTR_RO(slots);
+
+static struct attribute *armv8_pmuv3_caps_attrs[] = {
+ &dev_attr_slots.attr,
+ NULL,
+};
+
+static struct attribute_group armv8_pmuv3_caps_attr_group = {
+ .name = "caps",
+ .attrs = armv8_pmuv3_caps_attrs,
+};
+
/*
* Perf Events' indices
*/
#define ARMV8_IDX_CYCLE_COUNTER 0
#define ARMV8_IDX_COUNTER0 1
-#define ARMV8_IDX_COUNTER_LAST(cpu_pmu) \
- (ARMV8_IDX_CYCLE_COUNTER + cpu_pmu->num_events - 1)
/*
@@ -348,6 +371,73 @@ static inline bool armv8pmu_event_is_chained(struct perf_event *event)
#define ARMV8_IDX_TO_COUNTER(x) \
(((x) - ARMV8_IDX_COUNTER0) & ARMV8_PMU_COUNTER_MASK)
+/*
+ * This code is really good
+ */
+
+#define PMEVN_CASE(n, case_macro) \
+ case n: case_macro(n); break
+
+#define PMEVN_SWITCH(x, case_macro) \
+ do { \
+ switch (x) { \
+ PMEVN_CASE(0, case_macro); \
+ PMEVN_CASE(1, case_macro); \
+ PMEVN_CASE(2, case_macro); \
+ PMEVN_CASE(3, case_macro); \
+ PMEVN_CASE(4, case_macro); \
+ PMEVN_CASE(5, case_macro); \
+ PMEVN_CASE(6, case_macro); \
+ PMEVN_CASE(7, case_macro); \
+ PMEVN_CASE(8, case_macro); \
+ PMEVN_CASE(9, case_macro); \
+ PMEVN_CASE(10, case_macro); \
+ PMEVN_CASE(11, case_macro); \
+ PMEVN_CASE(12, case_macro); \
+ PMEVN_CASE(13, case_macro); \
+ PMEVN_CASE(14, case_macro); \
+ PMEVN_CASE(15, case_macro); \
+ PMEVN_CASE(16, case_macro); \
+ PMEVN_CASE(17, case_macro); \
+ PMEVN_CASE(18, case_macro); \
+ PMEVN_CASE(19, case_macro); \
+ PMEVN_CASE(20, case_macro); \
+ PMEVN_CASE(21, case_macro); \
+ PMEVN_CASE(22, case_macro); \
+ PMEVN_CASE(23, case_macro); \
+ PMEVN_CASE(24, case_macro); \
+ PMEVN_CASE(25, case_macro); \
+ PMEVN_CASE(26, case_macro); \
+ PMEVN_CASE(27, case_macro); \
+ PMEVN_CASE(28, case_macro); \
+ PMEVN_CASE(29, case_macro); \
+ PMEVN_CASE(30, case_macro); \
+ default: WARN(1, "Invalid PMEV* index\n"); \
+ } \
+ } while (0)
+
+#define RETURN_READ_PMEVCNTRN(n) \
+ return read_sysreg(pmevcntr##n##_el0)
+static unsigned long read_pmevcntrn(int n)
+{
+ PMEVN_SWITCH(n, RETURN_READ_PMEVCNTRN);
+ return 0;
+}
+
+#define WRITE_PMEVCNTRN(n) \
+ write_sysreg(val, pmevcntr##n##_el0)
+static void write_pmevcntrn(int n, unsigned long val)
+{
+ PMEVN_SWITCH(n, WRITE_PMEVCNTRN);
+}
+
+#define WRITE_PMEVTYPERN(n) \
+ write_sysreg(val, pmevtyper##n##_el0)
+static void write_pmevtypern(int n, unsigned long val)
+{
+ PMEVN_SWITCH(n, WRITE_PMEVTYPERN);
+}
+
static inline u32 armv8pmu_pmcr_read(void)
{
return read_sysreg(pmcr_el0);
@@ -365,28 +455,16 @@ static inline int armv8pmu_has_overflowed(u32 pmovsr)
return pmovsr & ARMV8_PMU_OVERFLOWED_MASK;
}
-static inline int armv8pmu_counter_valid(struct arm_pmu *cpu_pmu, int idx)
-{
- return idx >= ARMV8_IDX_CYCLE_COUNTER &&
- idx <= ARMV8_IDX_COUNTER_LAST(cpu_pmu);
-}
-
static inline int armv8pmu_counter_has_overflowed(u32 pmnc, int idx)
{
return pmnc & BIT(ARMV8_IDX_TO_COUNTER(idx));
}
-static inline void armv8pmu_select_counter(int idx)
+static inline u32 armv8pmu_read_evcntr(int idx)
{
u32 counter = ARMV8_IDX_TO_COUNTER(idx);
- write_sysreg(counter, pmselr_el0);
- isb();
-}
-static inline u64 armv8pmu_read_evcntr(int idx)
-{
- armv8pmu_select_counter(idx);
- return read_sysreg(pmxevcntr_el0);
+ return read_pmevcntrn(counter);
}
static inline u64 armv8pmu_read_hw_counter(struct perf_event *event)
@@ -440,15 +518,11 @@ static u64 armv8pmu_unbias_long_counter(struct perf_event *event, u64 value)
static u64 armv8pmu_read_counter(struct perf_event *event)
{
- struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
int idx = hwc->idx;
u64 value = 0;
- if (!armv8pmu_counter_valid(cpu_pmu, idx))
- pr_err("CPU%u reading wrong counter %d\n",
- smp_processor_id(), idx);
- else if (idx == ARMV8_IDX_CYCLE_COUNTER)
+ if (idx == ARMV8_IDX_CYCLE_COUNTER)
value = read_sysreg(pmccntr_el0);
else
value = armv8pmu_read_hw_counter(event);
@@ -458,8 +532,9 @@ static u64 armv8pmu_read_counter(struct perf_event *event)
static inline void armv8pmu_write_evcntr(int idx, u64 value)
{
- armv8pmu_select_counter(idx);
- write_sysreg(value, pmxevcntr_el0);
+ u32 counter = ARMV8_IDX_TO_COUNTER(idx);
+
+ write_pmevcntrn(counter, value);
}
static inline void armv8pmu_write_hw_counter(struct perf_event *event,
@@ -477,16 +552,12 @@ static inline void armv8pmu_write_hw_counter(struct perf_event *event,
static void armv8pmu_write_counter(struct perf_event *event, u64 value)
{
- struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
int idx = hwc->idx;
value = armv8pmu_bias_long_counter(event, value);
- if (!armv8pmu_counter_valid(cpu_pmu, idx))
- pr_err("CPU%u writing wrong counter %d\n",
- smp_processor_id(), idx);
- else if (idx == ARMV8_IDX_CYCLE_COUNTER)
+ if (idx == ARMV8_IDX_CYCLE_COUNTER)
write_sysreg(value, pmccntr_el0);
else
armv8pmu_write_hw_counter(event, value);
@@ -494,9 +565,10 @@ static void armv8pmu_write_counter(struct perf_event *event, u64 value)
static inline void armv8pmu_write_evtype(int idx, u32 val)
{
- armv8pmu_select_counter(idx);
+ u32 counter = ARMV8_IDX_TO_COUNTER(idx);
+
val &= ARMV8_PMU_EVTYPE_MASK;
- write_sysreg(val, pmxevtyper_el0);
+ write_pmevtypern(counter, val);
}
static inline void armv8pmu_write_event_type(struct perf_event *event)
@@ -516,7 +588,10 @@ static inline void armv8pmu_write_event_type(struct perf_event *event)
armv8pmu_write_evtype(idx - 1, hwc->config_base);
armv8pmu_write_evtype(idx, chain_evt);
} else {
- armv8pmu_write_evtype(idx, hwc->config_base);
+ if (idx == ARMV8_IDX_CYCLE_COUNTER)
+ write_sysreg(hwc->config_base, pmccfiltr_el0);
+ else
+ armv8pmu_write_evtype(idx, hwc->config_base);
}
}
@@ -532,6 +607,11 @@ static u32 armv8pmu_event_cnten_mask(struct perf_event *event)
static inline void armv8pmu_enable_counter(u32 mask)
{
+ /*
+ * Make sure event configuration register writes are visible before we
+ * enable the counter.
+ * */
+ isb();
write_sysreg(mask, pmcntenset_el0);
}
@@ -550,6 +630,11 @@ static inline void armv8pmu_enable_event_counter(struct perf_event *event)
static inline void armv8pmu_disable_counter(u32 mask)
{
write_sysreg(mask, pmcntenclr_el0);
+ /*
+ * Make sure the effects of disabling the counter are visible before we
+ * start configuring the event.
+ */
+ isb();
}
static inline void armv8pmu_disable_event_counter(struct perf_event *event)
@@ -606,15 +691,10 @@ static inline u32 armv8pmu_getreset_flags(void)
static void armv8pmu_enable_event(struct perf_event *event)
{
- unsigned long flags;
- struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
- struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
-
/*
* Enable counter and interrupt, and set the counter to count
* the event that we're interested in.
*/
- raw_spin_lock_irqsave(&events->pmu_lock, flags);
/*
* Disable counter
@@ -622,7 +702,7 @@ static void armv8pmu_enable_event(struct perf_event *event)
armv8pmu_disable_event_counter(event);
/*
- * Set event (if destined for PMNx counters).
+ * Set event.
*/
armv8pmu_write_event_type(event);
@@ -635,21 +715,10 @@ static void armv8pmu_enable_event(struct perf_event *event)
* Enable counter
*/
armv8pmu_enable_event_counter(event);
-
- raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
}
static void armv8pmu_disable_event(struct perf_event *event)
{
- unsigned long flags;
- struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
- struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
-
- /*
- * Disable counter and interrupt
- */
- raw_spin_lock_irqsave(&events->pmu_lock, flags);
-
/*
* Disable counter
*/
@@ -659,30 +728,18 @@ static void armv8pmu_disable_event(struct perf_event *event)
* Disable interrupt for this counter
*/
armv8pmu_disable_event_irq(event);
-
- raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
}
static void armv8pmu_start(struct arm_pmu *cpu_pmu)
{
- unsigned long flags;
- struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
-
- raw_spin_lock_irqsave(&events->pmu_lock, flags);
/* Enable all counters */
armv8pmu_pmcr_write(armv8pmu_pmcr_read() | ARMV8_PMU_PMCR_E);
- raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
}
static void armv8pmu_stop(struct arm_pmu *cpu_pmu)
{
- unsigned long flags;
- struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
-
- raw_spin_lock_irqsave(&events->pmu_lock, flags);
/* Disable all counters */
armv8pmu_pmcr_write(armv8pmu_pmcr_read() & ~ARMV8_PMU_PMCR_E);
- raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
}
static irqreturn_t armv8pmu_handle_irq(struct arm_pmu *cpu_pmu)
@@ -735,20 +792,16 @@ static irqreturn_t armv8pmu_handle_irq(struct arm_pmu *cpu_pmu)
if (!armpmu_event_set_period(event))
continue;
+ /*
+ * Perf event overflow will queue the processing of the event as
+ * an irq_work which will be taken care of in the handling of
+ * IPI_IRQ_WORK.
+ */
if (perf_event_overflow(event, &data, regs))
cpu_pmu->disable(event);
}
armv8pmu_start(cpu_pmu);
- /*
- * Handle the pending perf events.
- *
- * Note: this call *must* be run with interrupts disabled. For
- * platforms that can have the PMU interrupts raised as an NMI, this
- * will not work.
- */
- irq_work_run();
-
return IRQ_HANDLED;
}
@@ -997,6 +1050,12 @@ static void __armv8pmu_probe_pmu(void *info)
bitmap_from_arr32(cpu_pmu->pmceid_ext_bitmap,
pmceid, ARMV8_PMUV3_MAX_COMMON_EVENTS);
+
+ /* store PMMIR_EL1 register for sysfs */
+ if (pmuver >= ID_AA64DFR0_PMUVER_8_4 && (pmceid_raw[1] & BIT(31)))
+ cpu_pmu->reg_pmmir = read_cpuid(PMMIR_EL1);
+ else
+ cpu_pmu->reg_pmmir = 0;
}
static int armv8pmu_probe_pmu(struct arm_pmu *cpu_pmu)
@@ -1019,7 +1078,8 @@ static int armv8pmu_probe_pmu(struct arm_pmu *cpu_pmu)
static int armv8_pmu_init(struct arm_pmu *cpu_pmu, char *name,
int (*map_event)(struct perf_event *event),
const struct attribute_group *events,
- const struct attribute_group *format)
+ const struct attribute_group *format,
+ const struct attribute_group *caps)
{
int ret = armv8pmu_probe_pmu(cpu_pmu);
if (ret)
@@ -1044,104 +1104,112 @@ static int armv8_pmu_init(struct arm_pmu *cpu_pmu, char *name,
events : &armv8_pmuv3_events_attr_group;
cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] = format ?
format : &armv8_pmuv3_format_attr_group;
+ cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_CAPS] = caps ?
+ caps : &armv8_pmuv3_caps_attr_group;
return 0;
}
+static int armv8_pmu_init_nogroups(struct arm_pmu *cpu_pmu, char *name,
+ int (*map_event)(struct perf_event *event))
+{
+ return armv8_pmu_init(cpu_pmu, name, map_event, NULL, NULL, NULL);
+}
+
static int armv8_pmuv3_init(struct arm_pmu *cpu_pmu)
{
- return armv8_pmu_init(cpu_pmu, "armv8_pmuv3",
- armv8_pmuv3_map_event, NULL, NULL);
+ return armv8_pmu_init_nogroups(cpu_pmu, "armv8_pmuv3",
+ armv8_pmuv3_map_event);
}
static int armv8_a34_pmu_init(struct arm_pmu *cpu_pmu)
{
- return armv8_pmu_init(cpu_pmu, "armv8_cortex_a34",
- armv8_pmuv3_map_event, NULL, NULL);
+ return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cortex_a34",
+ armv8_pmuv3_map_event);
}
static int armv8_a35_pmu_init(struct arm_pmu *cpu_pmu)
{
- return armv8_pmu_init(cpu_pmu, "armv8_cortex_a35",
- armv8_a53_map_event, NULL, NULL);
+ return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cortex_a35",
+ armv8_a53_map_event);
}
static int armv8_a53_pmu_init(struct arm_pmu *cpu_pmu)
{
- return armv8_pmu_init(cpu_pmu, "armv8_cortex_a53",
- armv8_a53_map_event, NULL, NULL);
+ return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cortex_a53",
+ armv8_a53_map_event);
}
static int armv8_a55_pmu_init(struct arm_pmu *cpu_pmu)
{
- return armv8_pmu_init(cpu_pmu, "armv8_cortex_a55",
- armv8_pmuv3_map_event, NULL, NULL);
+ return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cortex_a55",
+ armv8_pmuv3_map_event);
}
static int armv8_a57_pmu_init(struct arm_pmu *cpu_pmu)
{
- return armv8_pmu_init(cpu_pmu, "armv8_cortex_a57",
- armv8_a57_map_event, NULL, NULL);
+ return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cortex_a57",
+ armv8_a57_map_event);
}
static int armv8_a65_pmu_init(struct arm_pmu *cpu_pmu)
{
- return armv8_pmu_init(cpu_pmu, "armv8_cortex_a65",
- armv8_pmuv3_map_event, NULL, NULL);
+ return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cortex_a65",
+ armv8_pmuv3_map_event);
}
static int armv8_a72_pmu_init(struct arm_pmu *cpu_pmu)
{
- return armv8_pmu_init(cpu_pmu, "armv8_cortex_a72",
- armv8_a57_map_event, NULL, NULL);
+ return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cortex_a72",
+ armv8_a57_map_event);
}
static int armv8_a73_pmu_init(struct arm_pmu *cpu_pmu)
{
- return armv8_pmu_init(cpu_pmu, "armv8_cortex_a73",
- armv8_a73_map_event, NULL, NULL);
+ return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cortex_a73",
+ armv8_a73_map_event);
}
static int armv8_a75_pmu_init(struct arm_pmu *cpu_pmu)
{
- return armv8_pmu_init(cpu_pmu, "armv8_cortex_a75",
- armv8_pmuv3_map_event, NULL, NULL);
+ return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cortex_a75",
+ armv8_pmuv3_map_event);
}
static int armv8_a76_pmu_init(struct arm_pmu *cpu_pmu)
{
- return armv8_pmu_init(cpu_pmu, "armv8_cortex_a76",
- armv8_pmuv3_map_event, NULL, NULL);
+ return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cortex_a76",
+ armv8_pmuv3_map_event);
}
static int armv8_a77_pmu_init(struct arm_pmu *cpu_pmu)
{
- return armv8_pmu_init(cpu_pmu, "armv8_cortex_a77",
- armv8_pmuv3_map_event, NULL, NULL);
+ return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cortex_a77",
+ armv8_pmuv3_map_event);
}
static int armv8_e1_pmu_init(struct arm_pmu *cpu_pmu)
{
- return armv8_pmu_init(cpu_pmu, "armv8_neoverse_e1",
- armv8_pmuv3_map_event, NULL, NULL);
+ return armv8_pmu_init_nogroups(cpu_pmu, "armv8_neoverse_e1",
+ armv8_pmuv3_map_event);
}
static int armv8_n1_pmu_init(struct arm_pmu *cpu_pmu)
{
- return armv8_pmu_init(cpu_pmu, "armv8_neoverse_n1",
- armv8_pmuv3_map_event, NULL, NULL);
+ return armv8_pmu_init_nogroups(cpu_pmu, "armv8_neoverse_n1",
+ armv8_pmuv3_map_event);
}
static int armv8_thunder_pmu_init(struct arm_pmu *cpu_pmu)
{
- return armv8_pmu_init(cpu_pmu, "armv8_cavium_thunder",
- armv8_thunder_map_event, NULL, NULL);
+ return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cavium_thunder",
+ armv8_thunder_map_event);
}
static int armv8_vulcan_pmu_init(struct arm_pmu *cpu_pmu)
{
- return armv8_pmu_init(cpu_pmu, "armv8_brcm_vulcan",
- armv8_vulcan_map_event, NULL, NULL);
+ return armv8_pmu_init_nogroups(cpu_pmu, "armv8_brcm_vulcan",
+ armv8_vulcan_map_event);
}
static const struct of_device_id armv8_pmu_of_device_ids[] = {
diff --git a/arch/arm64/kernel/perf_regs.c b/arch/arm64/kernel/perf_regs.c
index 666b225aeb3a..94e8718e7229 100644
--- a/arch/arm64/kernel/perf_regs.c
+++ b/arch/arm64/kernel/perf_regs.c
@@ -16,7 +16,7 @@ u64 perf_reg_value(struct pt_regs *regs, int idx)
/*
* Our handling of compat tasks (PERF_SAMPLE_REGS_ABI_32) is weird, but
- * we're stuck with it for ABI compatability reasons.
+ * we're stuck with it for ABI compatibility reasons.
*
* For a 32-bit consumer inspecting a 32-bit task, then it will look at
* the first 16 registers (see arch/arm/include/uapi/asm/perf_regs.h).
diff --git a/arch/arm64/kernel/probes/decode-insn.c b/arch/arm64/kernel/probes/decode-insn.c
index 263d5fba4c8a..104101f633b1 100644
--- a/arch/arm64/kernel/probes/decode-insn.c
+++ b/arch/arm64/kernel/probes/decode-insn.c
@@ -29,7 +29,8 @@ static bool __kprobes aarch64_insn_is_steppable(u32 insn)
aarch64_insn_is_msr_imm(insn) ||
aarch64_insn_is_msr_reg(insn) ||
aarch64_insn_is_exception(insn) ||
- aarch64_insn_is_eret(insn))
+ aarch64_insn_is_eret(insn) ||
+ aarch64_insn_is_eret_auth(insn))
return false;
/*
@@ -42,8 +43,10 @@ static bool __kprobes aarch64_insn_is_steppable(u32 insn)
!= AARCH64_INSN_SPCLREG_DAIF;
/*
- * The HINT instruction is is problematic when single-stepping,
- * except for the NOP case.
+ * The HINT instruction is steppable only if it is in whitelist
+ * and the rest of other such instructions are blocked for
+ * single stepping as they may cause exception or other
+ * unintended behaviour.
*/
if (aarch64_insn_is_hint(insn))
return aarch64_insn_is_steppable_hint(insn);
diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c
index 5290f17a4d80..deba738142ed 100644
--- a/arch/arm64/kernel/probes/kprobes.c
+++ b/arch/arm64/kernel/probes/kprobes.c
@@ -464,87 +464,15 @@ int __init arch_populate_kprobe_blacklist(void)
void __kprobes __used *trampoline_probe_handler(struct pt_regs *regs)
{
- struct kretprobe_instance *ri = NULL;
- struct hlist_head *head, empty_rp;
- struct hlist_node *tmp;
- unsigned long flags, orig_ret_address = 0;
- unsigned long trampoline_address =
- (unsigned long)&kretprobe_trampoline;
- kprobe_opcode_t *correct_ret_addr = NULL;
-
- INIT_HLIST_HEAD(&empty_rp);
- kretprobe_hash_lock(current, &head, &flags);
-
- /*
- * It is possible to have multiple instances associated with a given
- * task either because multiple functions in the call path have
- * return probes installed on them, and/or more than one
- * return probe was registered for a target function.
- *
- * We can handle this because:
- * - instances are always pushed into the head of the list
- * - when multiple return probes are registered for the same
- * function, the (chronologically) first instance's ret_addr
- * will be the real return address, and all the rest will
- * point to kretprobe_trampoline.
- */
- hlist_for_each_entry_safe(ri, tmp, head, hlist) {
- if (ri->task != current)
- /* another task is sharing our hash bucket */
- continue;
-
- orig_ret_address = (unsigned long)ri->ret_addr;
-
- if (orig_ret_address != trampoline_address)
- /*
- * This is the real return address. Any other
- * instances associated with this task are for
- * other calls deeper on the call stack
- */
- break;
- }
-
- kretprobe_assert(ri, orig_ret_address, trampoline_address);
-
- correct_ret_addr = ri->ret_addr;
- hlist_for_each_entry_safe(ri, tmp, head, hlist) {
- if (ri->task != current)
- /* another task is sharing our hash bucket */
- continue;
-
- orig_ret_address = (unsigned long)ri->ret_addr;
- if (ri->rp && ri->rp->handler) {
- __this_cpu_write(current_kprobe, &ri->rp->kp);
- get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE;
- ri->ret_addr = correct_ret_addr;
- ri->rp->handler(ri, regs);
- __this_cpu_write(current_kprobe, NULL);
- }
-
- recycle_rp_inst(ri, &empty_rp);
-
- if (orig_ret_address != trampoline_address)
- /*
- * This is the real return address. Any other
- * instances associated with this task are for
- * other calls deeper on the call stack
- */
- break;
- }
-
- kretprobe_hash_unlock(current, &flags);
-
- hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
- hlist_del(&ri->hlist);
- kfree(ri);
- }
- return (void *)orig_ret_address;
+ return (void *)kretprobe_trampoline_handler(regs, &kretprobe_trampoline,
+ (void *)kernel_stack_pointer(regs));
}
void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
struct pt_regs *regs)
{
ri->ret_addr = (kprobe_opcode_t *)regs->regs[30];
+ ri->fp = (void *)kernel_stack_pointer(regs);
/* replace return addr (x30) with trampoline */
regs->regs[30] = (long)&kretprobe_trampoline;
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index 84ec630b8ab5..4784011cecac 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -21,6 +21,7 @@
#include <linux/lockdep.h>
#include <linux/mman.h>
#include <linux/mm.h>
+#include <linux/nospec.h>
#include <linux/stddef.h>
#include <linux/sysctl.h>
#include <linux/unistd.h>
@@ -52,6 +53,7 @@
#include <asm/exec.h>
#include <asm/fpsimd.h>
#include <asm/mmu_context.h>
+#include <asm/mte.h>
#include <asm/processor.h>
#include <asm/pointer_auth.h>
#include <asm/stacktrace.h>
@@ -123,10 +125,8 @@ void arch_cpu_idle(void)
* This should do all the clock switching and wait for interrupt
* tricks
*/
- trace_cpu_idle_rcuidle(1, smp_processor_id());
cpu_do_idle();
local_irq_enable();
- trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
}
#ifdef CONFIG_HOTPLUG_CPU
@@ -241,7 +241,7 @@ static void print_pstate(struct pt_regs *regs)
const char *btype_str = btypes[(pstate & PSR_BTYPE_MASK) >>
PSR_BTYPE_SHIFT];
- printk("pstate: %08llx (%c%c%c%c %c%c%c%c %cPAN %cUAO BTYPE=%s)\n",
+ printk("pstate: %08llx (%c%c%c%c %c%c%c%c %cPAN %cUAO %cTCO BTYPE=%s)\n",
pstate,
pstate & PSR_N_BIT ? 'N' : 'n',
pstate & PSR_Z_BIT ? 'Z' : 'z',
@@ -253,6 +253,7 @@ static void print_pstate(struct pt_regs *regs)
pstate & PSR_F_BIT ? 'F' : 'f',
pstate & PSR_PAN_BIT ? '+' : '-',
pstate & PSR_UAO_BIT ? '+' : '-',
+ pstate & PSR_TCO_BIT ? '+' : '-',
btype_str);
}
}
@@ -338,6 +339,7 @@ void flush_thread(void)
tls_thread_flush();
flush_ptrace_hw_breakpoint(current);
flush_tagged_addr_state();
+ flush_mte_state();
}
void release_thread(struct task_struct *dead_task)
@@ -370,6 +372,9 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
dst->thread.sve_state = NULL;
clear_tsk_thread_flag(dst, TIF_SVE);
+ /* clear any pending asynchronous tag fault raised by the parent */
+ clear_tsk_thread_flag(dst, TIF_MTE_ASYNC_FAULT);
+
return 0;
}
@@ -423,8 +428,7 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start,
cpus_have_const_cap(ARM64_HAS_UAO))
childregs->pstate |= PSR_UAO_BIT;
- if (arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE)
- set_ssbs_bit(childregs);
+ spectre_v4_enable_task_mitigation(p);
if (system_uses_irq_prio_masking())
childregs->pmr_save = GIC_PRIO_IRQON;
@@ -474,8 +478,6 @@ void uao_thread_switch(struct task_struct *next)
*/
static void ssbs_thread_switch(struct task_struct *next)
{
- struct pt_regs *regs = task_pt_regs(next);
-
/*
* Nothing to do for kernel threads, but 'regs' may be junk
* (e.g. idle task) so check the flags and bail early.
@@ -487,18 +489,10 @@ static void ssbs_thread_switch(struct task_struct *next)
* If all CPUs implement the SSBS extension, then we just need to
* context-switch the PSTATE field.
*/
- if (cpu_have_feature(cpu_feature(SSBS)))
+ if (cpus_have_const_cap(ARM64_SSBS))
return;
- /* If the mitigation is enabled, then we leave SSBS clear. */
- if ((arm64_get_ssbd_state() == ARM64_SSBD_FORCE_ENABLE) ||
- test_tsk_thread_flag(next, TIF_SSBD))
- return;
-
- if (compat_user_mode(regs))
- set_compat_ssbs_bit(regs);
- else if (user_mode(regs))
- set_ssbs_bit(regs);
+ spectre_v4_enable_task_mitigation(next);
}
/*
@@ -516,6 +510,39 @@ static void entry_task_switch(struct task_struct *next)
}
/*
+ * ARM erratum 1418040 handling, affecting the 32bit view of CNTVCT.
+ * Assuming the virtual counter is enabled at the beginning of times:
+ *
+ * - disable access when switching from a 64bit task to a 32bit task
+ * - enable access when switching from a 32bit task to a 64bit task
+ */
+static void erratum_1418040_thread_switch(struct task_struct *prev,
+ struct task_struct *next)
+{
+ bool prev32, next32;
+ u64 val;
+
+ if (!(IS_ENABLED(CONFIG_ARM64_ERRATUM_1418040) &&
+ cpus_have_const_cap(ARM64_WORKAROUND_1418040)))
+ return;
+
+ prev32 = is_compat_thread(task_thread_info(prev));
+ next32 = is_compat_thread(task_thread_info(next));
+
+ if (prev32 == next32)
+ return;
+
+ val = read_sysreg(cntkctl_el1);
+
+ if (!next32)
+ val |= ARCH_TIMER_USR_VCT_ACCESS_EN;
+ else
+ val &= ~ARCH_TIMER_USR_VCT_ACCESS_EN;
+
+ write_sysreg(val, cntkctl_el1);
+}
+
+/*
* Thread switching.
*/
__notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev,
@@ -530,6 +557,7 @@ __notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev,
entry_task_switch(next);
uao_thread_switch(next);
ssbs_thread_switch(next);
+ erratum_1418040_thread_switch(prev, next);
/*
* Complete any pending TLB or cache maintenance on this CPU in case
@@ -539,6 +567,13 @@ __notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev,
*/
dsb(ish);
+ /*
+ * MTE thread switching must happen after the DSB above to ensure that
+ * any asynchronous tag check faults have been logged in the TFSR*_EL1
+ * registers.
+ */
+ mte_thread_switch(next);
+
/* the actual thread switch */
last = cpu_switch_to(prev, next);
@@ -588,6 +623,11 @@ void arch_setup_new_exec(void)
current->mm->context.flags = is_compat_task() ? MMCF_AARCH32 : 0;
ptrauth_thread_init_user(current);
+
+ if (task_spec_ssb_noexec(current)) {
+ arch_prctl_spec_ctrl_set(current, PR_SPEC_STORE_BYPASS,
+ PR_SPEC_ENABLE);
+ }
}
#ifdef CONFIG_ARM64_TAGGED_ADDR_ABI
@@ -596,11 +636,18 @@ void arch_setup_new_exec(void)
*/
static unsigned int tagged_addr_disabled;
-long set_tagged_addr_ctrl(unsigned long arg)
+long set_tagged_addr_ctrl(struct task_struct *task, unsigned long arg)
{
- if (is_compat_task())
+ unsigned long valid_mask = PR_TAGGED_ADDR_ENABLE;
+ struct thread_info *ti = task_thread_info(task);
+
+ if (is_compat_thread(ti))
return -EINVAL;
- if (arg & ~PR_TAGGED_ADDR_ENABLE)
+
+ if (system_supports_mte())
+ valid_mask |= PR_MTE_TCF_MASK | PR_MTE_TAG_MASK;
+
+ if (arg & ~valid_mask)
return -EINVAL;
/*
@@ -610,20 +657,28 @@ long set_tagged_addr_ctrl(unsigned long arg)
if (arg & PR_TAGGED_ADDR_ENABLE && tagged_addr_disabled)
return -EINVAL;
- update_thread_flag(TIF_TAGGED_ADDR, arg & PR_TAGGED_ADDR_ENABLE);
+ if (set_mte_ctrl(task, arg) != 0)
+ return -EINVAL;
+
+ update_ti_thread_flag(ti, TIF_TAGGED_ADDR, arg & PR_TAGGED_ADDR_ENABLE);
return 0;
}
-long get_tagged_addr_ctrl(void)
+long get_tagged_addr_ctrl(struct task_struct *task)
{
- if (is_compat_task())
+ long ret = 0;
+ struct thread_info *ti = task_thread_info(task);
+
+ if (is_compat_thread(ti))
return -EINVAL;
- if (test_thread_flag(TIF_TAGGED_ADDR))
- return PR_TAGGED_ADDR_ENABLE;
+ if (test_ti_thread_flag(ti, TIF_TAGGED_ADDR))
+ ret = PR_TAGGED_ADDR_ENABLE;
- return 0;
+ ret |= get_mte_ctrl(task);
+
+ return ret;
}
/*
diff --git a/arch/arm64/kernel/proton-pack.c b/arch/arm64/kernel/proton-pack.c
new file mode 100644
index 000000000000..68b710f1b43f
--- /dev/null
+++ b/arch/arm64/kernel/proton-pack.c
@@ -0,0 +1,792 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Handle detection, reporting and mitigation of Spectre v1, v2 and v4, as
+ * detailed at:
+ *
+ * https://developer.arm.com/support/arm-security-updates/speculative-processor-vulnerability
+ *
+ * This code was originally written hastily under an awful lot of stress and so
+ * aspects of it are somewhat hacky. Unfortunately, changing anything in here
+ * instantly makes me feel ill. Thanks, Jann. Thann.
+ *
+ * Copyright (C) 2018 ARM Ltd, All Rights Reserved.
+ * Copyright (C) 2020 Google LLC
+ *
+ * "If there's something strange in your neighbourhood, who you gonna call?"
+ *
+ * Authors: Will Deacon <will@kernel.org> and Marc Zyngier <maz@kernel.org>
+ */
+
+#include <linux/arm-smccc.h>
+#include <linux/cpu.h>
+#include <linux/device.h>
+#include <linux/nospec.h>
+#include <linux/prctl.h>
+#include <linux/sched/task_stack.h>
+
+#include <asm/spectre.h>
+#include <asm/traps.h>
+
+/*
+ * We try to ensure that the mitigation state can never change as the result of
+ * onlining a late CPU.
+ */
+static void update_mitigation_state(enum mitigation_state *oldp,
+ enum mitigation_state new)
+{
+ enum mitigation_state state;
+
+ do {
+ state = READ_ONCE(*oldp);
+ if (new <= state)
+ break;
+
+ /* Userspace almost certainly can't deal with this. */
+ if (WARN_ON(system_capabilities_finalized()))
+ break;
+ } while (cmpxchg_relaxed(oldp, state, new) != state);
+}
+
+/*
+ * Spectre v1.
+ *
+ * The kernel can't protect userspace for this one: it's each person for
+ * themselves. Advertise what we're doing and be done with it.
+ */
+ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "Mitigation: __user pointer sanitization\n");
+}
+
+/*
+ * Spectre v2.
+ *
+ * This one sucks. A CPU is either:
+ *
+ * - Mitigated in hardware and advertised by ID_AA64PFR0_EL1.CSV2.
+ * - Mitigated in hardware and listed in our "safe list".
+ * - Mitigated in software by firmware.
+ * - Mitigated in software by a CPU-specific dance in the kernel.
+ * - Vulnerable.
+ *
+ * It's not unlikely for different CPUs in a big.LITTLE system to fall into
+ * different camps.
+ */
+static enum mitigation_state spectre_v2_state;
+
+static bool __read_mostly __nospectre_v2;
+static int __init parse_spectre_v2_param(char *str)
+{
+ __nospectre_v2 = true;
+ return 0;
+}
+early_param("nospectre_v2", parse_spectre_v2_param);
+
+static bool spectre_v2_mitigations_off(void)
+{
+ bool ret = __nospectre_v2 || cpu_mitigations_off();
+
+ if (ret)
+ pr_info_once("spectre-v2 mitigation disabled by command line option\n");
+
+ return ret;
+}
+
+ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ switch (spectre_v2_state) {
+ case SPECTRE_UNAFFECTED:
+ return sprintf(buf, "Not affected\n");
+ case SPECTRE_MITIGATED:
+ return sprintf(buf, "Mitigation: Branch predictor hardening\n");
+ case SPECTRE_VULNERABLE:
+ fallthrough;
+ default:
+ return sprintf(buf, "Vulnerable\n");
+ }
+}
+
+static enum mitigation_state spectre_v2_get_cpu_hw_mitigation_state(void)
+{
+ u64 pfr0;
+ static const struct midr_range spectre_v2_safe_list[] = {
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
+ MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53),
+ MIDR_ALL_VERSIONS(MIDR_HISI_TSV110),
+ MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER),
+ MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER),
+ { /* sentinel */ }
+ };
+
+ /* If the CPU has CSV2 set, we're safe */
+ pfr0 = read_cpuid(ID_AA64PFR0_EL1);
+ if (cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_CSV2_SHIFT))
+ return SPECTRE_UNAFFECTED;
+
+ /* Alternatively, we have a list of unaffected CPUs */
+ if (is_midr_in_range_list(read_cpuid_id(), spectre_v2_safe_list))
+ return SPECTRE_UNAFFECTED;
+
+ return SPECTRE_VULNERABLE;
+}
+
+#define SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED (1)
+
+static enum mitigation_state spectre_v2_get_cpu_fw_mitigation_state(void)
+{
+ int ret;
+ struct arm_smccc_res res;
+
+ arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
+ ARM_SMCCC_ARCH_WORKAROUND_1, &res);
+
+ ret = res.a0;
+ switch (ret) {
+ case SMCCC_RET_SUCCESS:
+ return SPECTRE_MITIGATED;
+ case SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED:
+ return SPECTRE_UNAFFECTED;
+ default:
+ fallthrough;
+ case SMCCC_RET_NOT_SUPPORTED:
+ return SPECTRE_VULNERABLE;
+ }
+}
+
+bool has_spectre_v2(const struct arm64_cpu_capabilities *entry, int scope)
+{
+ WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
+
+ if (spectre_v2_get_cpu_hw_mitigation_state() == SPECTRE_UNAFFECTED)
+ return false;
+
+ if (spectre_v2_get_cpu_fw_mitigation_state() == SPECTRE_UNAFFECTED)
+ return false;
+
+ return true;
+}
+
+DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
+
+enum mitigation_state arm64_get_spectre_v2_state(void)
+{
+ return spectre_v2_state;
+}
+
+#ifdef CONFIG_KVM
+#include <asm/cacheflush.h>
+#include <asm/kvm_asm.h>
+
+atomic_t arm64_el2_vector_last_slot = ATOMIC_INIT(-1);
+
+static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start,
+ const char *hyp_vecs_end)
+{
+ void *dst = lm_alias(__bp_harden_hyp_vecs + slot * SZ_2K);
+ int i;
+
+ for (i = 0; i < SZ_2K; i += 0x80)
+ memcpy(dst + i, hyp_vecs_start, hyp_vecs_end - hyp_vecs_start);
+
+ __flush_icache_range((uintptr_t)dst, (uintptr_t)dst + SZ_2K);
+}
+
+static void install_bp_hardening_cb(bp_hardening_cb_t fn)
+{
+ static DEFINE_RAW_SPINLOCK(bp_lock);
+ int cpu, slot = -1;
+ const char *hyp_vecs_start = __smccc_workaround_1_smc;
+ const char *hyp_vecs_end = __smccc_workaround_1_smc +
+ __SMCCC_WORKAROUND_1_SMC_SZ;
+
+ /*
+ * detect_harden_bp_fw() passes NULL for the hyp_vecs start/end if
+ * we're a guest. Skip the hyp-vectors work.
+ */
+ if (!is_hyp_mode_available()) {
+ __this_cpu_write(bp_hardening_data.fn, fn);
+ return;
+ }
+
+ raw_spin_lock(&bp_lock);
+ for_each_possible_cpu(cpu) {
+ if (per_cpu(bp_hardening_data.fn, cpu) == fn) {
+ slot = per_cpu(bp_hardening_data.hyp_vectors_slot, cpu);
+ break;
+ }
+ }
+
+ if (slot == -1) {
+ slot = atomic_inc_return(&arm64_el2_vector_last_slot);
+ BUG_ON(slot >= BP_HARDEN_EL2_SLOTS);
+ __copy_hyp_vect_bpi(slot, hyp_vecs_start, hyp_vecs_end);
+ }
+
+ __this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot);
+ __this_cpu_write(bp_hardening_data.fn, fn);
+ raw_spin_unlock(&bp_lock);
+}
+#else
+static void install_bp_hardening_cb(bp_hardening_cb_t fn)
+{
+ __this_cpu_write(bp_hardening_data.fn, fn);
+}
+#endif /* CONFIG_KVM */
+
+static void call_smc_arch_workaround_1(void)
+{
+ arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
+}
+
+static void call_hvc_arch_workaround_1(void)
+{
+ arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
+}
+
+static void qcom_link_stack_sanitisation(void)
+{
+ u64 tmp;
+
+ asm volatile("mov %0, x30 \n"
+ ".rept 16 \n"
+ "bl . + 4 \n"
+ ".endr \n"
+ "mov x30, %0 \n"
+ : "=&r" (tmp));
+}
+
+static enum mitigation_state spectre_v2_enable_fw_mitigation(void)
+{
+ bp_hardening_cb_t cb;
+ enum mitigation_state state;
+
+ state = spectre_v2_get_cpu_fw_mitigation_state();
+ if (state != SPECTRE_MITIGATED)
+ return state;
+
+ if (spectre_v2_mitigations_off())
+ return SPECTRE_VULNERABLE;
+
+ switch (arm_smccc_1_1_get_conduit()) {
+ case SMCCC_CONDUIT_HVC:
+ cb = call_hvc_arch_workaround_1;
+ break;
+
+ case SMCCC_CONDUIT_SMC:
+ cb = call_smc_arch_workaround_1;
+ break;
+
+ default:
+ return SPECTRE_VULNERABLE;
+ }
+
+ install_bp_hardening_cb(cb);
+ return SPECTRE_MITIGATED;
+}
+
+static enum mitigation_state spectre_v2_enable_sw_mitigation(void)
+{
+ u32 midr;
+
+ if (spectre_v2_mitigations_off())
+ return SPECTRE_VULNERABLE;
+
+ midr = read_cpuid_id();
+ if (((midr & MIDR_CPU_MODEL_MASK) != MIDR_QCOM_FALKOR) &&
+ ((midr & MIDR_CPU_MODEL_MASK) != MIDR_QCOM_FALKOR_V1))
+ return SPECTRE_VULNERABLE;
+
+ install_bp_hardening_cb(qcom_link_stack_sanitisation);
+ return SPECTRE_MITIGATED;
+}
+
+void spectre_v2_enable_mitigation(const struct arm64_cpu_capabilities *__unused)
+{
+ enum mitigation_state state;
+
+ WARN_ON(preemptible());
+
+ state = spectre_v2_get_cpu_hw_mitigation_state();
+ if (state == SPECTRE_VULNERABLE)
+ state = spectre_v2_enable_fw_mitigation();
+ if (state == SPECTRE_VULNERABLE)
+ state = spectre_v2_enable_sw_mitigation();
+
+ update_mitigation_state(&spectre_v2_state, state);
+}
+
+/*
+ * Spectre v4.
+ *
+ * If you thought Spectre v2 was nasty, wait until you see this mess. A CPU is
+ * either:
+ *
+ * - Mitigated in hardware and listed in our "safe list".
+ * - Mitigated in hardware via PSTATE.SSBS.
+ * - Mitigated in software by firmware (sometimes referred to as SSBD).
+ *
+ * Wait, that doesn't sound so bad, does it? Keep reading...
+ *
+ * A major source of headaches is that the software mitigation is enabled both
+ * on a per-task basis, but can also be forced on for the kernel, necessitating
+ * both context-switch *and* entry/exit hooks. To make it even worse, some CPUs
+ * allow EL0 to toggle SSBS directly, which can end up with the prctl() state
+ * being stale when re-entering the kernel. The usual big.LITTLE caveats apply,
+ * so you can have systems that have both firmware and SSBS mitigations. This
+ * means we actually have to reject late onlining of CPUs with mitigations if
+ * all of the currently onlined CPUs are safelisted, as the mitigation tends to
+ * be opt-in for userspace. Yes, really, the cure is worse than the disease.
+ *
+ * The only good part is that if the firmware mitigation is present, then it is
+ * present for all CPUs, meaning we don't have to worry about late onlining of a
+ * vulnerable CPU if one of the boot CPUs is using the firmware mitigation.
+ *
+ * Give me a VAX-11/780 any day of the week...
+ */
+static enum mitigation_state spectre_v4_state;
+
+/* This is the per-cpu state tracking whether we need to talk to firmware */
+DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
+
+enum spectre_v4_policy {
+ SPECTRE_V4_POLICY_MITIGATION_DYNAMIC,
+ SPECTRE_V4_POLICY_MITIGATION_ENABLED,
+ SPECTRE_V4_POLICY_MITIGATION_DISABLED,
+};
+
+static enum spectre_v4_policy __read_mostly __spectre_v4_policy;
+
+static const struct spectre_v4_param {
+ const char *str;
+ enum spectre_v4_policy policy;
+} spectre_v4_params[] = {
+ { "force-on", SPECTRE_V4_POLICY_MITIGATION_ENABLED, },
+ { "force-off", SPECTRE_V4_POLICY_MITIGATION_DISABLED, },
+ { "kernel", SPECTRE_V4_POLICY_MITIGATION_DYNAMIC, },
+};
+static int __init parse_spectre_v4_param(char *str)
+{
+ int i;
+
+ if (!str || !str[0])
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(spectre_v4_params); i++) {
+ const struct spectre_v4_param *param = &spectre_v4_params[i];
+
+ if (strncmp(str, param->str, strlen(param->str)))
+ continue;
+
+ __spectre_v4_policy = param->policy;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+early_param("ssbd", parse_spectre_v4_param);
+
+/*
+ * Because this was all written in a rush by people working in different silos,
+ * we've ended up with multiple command line options to control the same thing.
+ * Wrap these up in some helpers, which prefer disabling the mitigation if faced
+ * with contradictory parameters. The mitigation is always either "off",
+ * "dynamic" or "on".
+ */
+static bool spectre_v4_mitigations_off(void)
+{
+ bool ret = cpu_mitigations_off() ||
+ __spectre_v4_policy == SPECTRE_V4_POLICY_MITIGATION_DISABLED;
+
+ if (ret)
+ pr_info_once("spectre-v4 mitigation disabled by command-line option\n");
+
+ return ret;
+}
+
+/* Do we need to toggle the mitigation state on entry to/exit from the kernel? */
+static bool spectre_v4_mitigations_dynamic(void)
+{
+ return !spectre_v4_mitigations_off() &&
+ __spectre_v4_policy == SPECTRE_V4_POLICY_MITIGATION_DYNAMIC;
+}
+
+static bool spectre_v4_mitigations_on(void)
+{
+ return !spectre_v4_mitigations_off() &&
+ __spectre_v4_policy == SPECTRE_V4_POLICY_MITIGATION_ENABLED;
+}
+
+ssize_t cpu_show_spec_store_bypass(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ switch (spectre_v4_state) {
+ case SPECTRE_UNAFFECTED:
+ return sprintf(buf, "Not affected\n");
+ case SPECTRE_MITIGATED:
+ return sprintf(buf, "Mitigation: Speculative Store Bypass disabled via prctl\n");
+ case SPECTRE_VULNERABLE:
+ fallthrough;
+ default:
+ return sprintf(buf, "Vulnerable\n");
+ }
+}
+
+enum mitigation_state arm64_get_spectre_v4_state(void)
+{
+ return spectre_v4_state;
+}
+
+static enum mitigation_state spectre_v4_get_cpu_hw_mitigation_state(void)
+{
+ static const struct midr_range spectre_v4_safe_list[] = {
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
+ MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53),
+ MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER),
+ MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER),
+ { /* sentinel */ },
+ };
+
+ if (is_midr_in_range_list(read_cpuid_id(), spectre_v4_safe_list))
+ return SPECTRE_UNAFFECTED;
+
+ /* CPU features are detected first */
+ if (this_cpu_has_cap(ARM64_SSBS))
+ return SPECTRE_MITIGATED;
+
+ return SPECTRE_VULNERABLE;
+}
+
+static enum mitigation_state spectre_v4_get_cpu_fw_mitigation_state(void)
+{
+ int ret;
+ struct arm_smccc_res res;
+
+ arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
+ ARM_SMCCC_ARCH_WORKAROUND_2, &res);
+
+ ret = res.a0;
+ switch (ret) {
+ case SMCCC_RET_SUCCESS:
+ return SPECTRE_MITIGATED;
+ case SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED:
+ fallthrough;
+ case SMCCC_RET_NOT_REQUIRED:
+ return SPECTRE_UNAFFECTED;
+ default:
+ fallthrough;
+ case SMCCC_RET_NOT_SUPPORTED:
+ return SPECTRE_VULNERABLE;
+ }
+}
+
+bool has_spectre_v4(const struct arm64_cpu_capabilities *cap, int scope)
+{
+ enum mitigation_state state;
+
+ WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
+
+ state = spectre_v4_get_cpu_hw_mitigation_state();
+ if (state == SPECTRE_VULNERABLE)
+ state = spectre_v4_get_cpu_fw_mitigation_state();
+
+ return state != SPECTRE_UNAFFECTED;
+}
+
+static int ssbs_emulation_handler(struct pt_regs *regs, u32 instr)
+{
+ if (user_mode(regs))
+ return 1;
+
+ if (instr & BIT(PSTATE_Imm_shift))
+ regs->pstate |= PSR_SSBS_BIT;
+ else
+ regs->pstate &= ~PSR_SSBS_BIT;
+
+ arm64_skip_faulting_instruction(regs, 4);
+ return 0;
+}
+
+static struct undef_hook ssbs_emulation_hook = {
+ .instr_mask = ~(1U << PSTATE_Imm_shift),
+ .instr_val = 0xd500401f | PSTATE_SSBS,
+ .fn = ssbs_emulation_handler,
+};
+
+static enum mitigation_state spectre_v4_enable_hw_mitigation(void)
+{
+ static bool undef_hook_registered = false;
+ static DEFINE_RAW_SPINLOCK(hook_lock);
+ enum mitigation_state state;
+
+ /*
+ * If the system is mitigated but this CPU doesn't have SSBS, then
+ * we must be on the safelist and there's nothing more to do.
+ */
+ state = spectre_v4_get_cpu_hw_mitigation_state();
+ if (state != SPECTRE_MITIGATED || !this_cpu_has_cap(ARM64_SSBS))
+ return state;
+
+ raw_spin_lock(&hook_lock);
+ if (!undef_hook_registered) {
+ register_undef_hook(&ssbs_emulation_hook);
+ undef_hook_registered = true;
+ }
+ raw_spin_unlock(&hook_lock);
+
+ if (spectre_v4_mitigations_off()) {
+ sysreg_clear_set(sctlr_el1, 0, SCTLR_ELx_DSSBS);
+ asm volatile(SET_PSTATE_SSBS(1));
+ return SPECTRE_VULNERABLE;
+ }
+
+ /* SCTLR_EL1.DSSBS was initialised to 0 during boot */
+ asm volatile(SET_PSTATE_SSBS(0));
+ return SPECTRE_MITIGATED;
+}
+
+/*
+ * Patch a branch over the Spectre-v4 mitigation code with a NOP so that
+ * we fallthrough and check whether firmware needs to be called on this CPU.
+ */
+void __init spectre_v4_patch_fw_mitigation_enable(struct alt_instr *alt,
+ __le32 *origptr,
+ __le32 *updptr, int nr_inst)
+{
+ BUG_ON(nr_inst != 1); /* Branch -> NOP */
+
+ if (spectre_v4_mitigations_off())
+ return;
+
+ if (cpus_have_final_cap(ARM64_SSBS))
+ return;
+
+ if (spectre_v4_mitigations_dynamic())
+ *updptr = cpu_to_le32(aarch64_insn_gen_nop());
+}
+
+/*
+ * Patch a NOP in the Spectre-v4 mitigation code with an SMC/HVC instruction
+ * to call into firmware to adjust the mitigation state.
+ */
+void __init spectre_v4_patch_fw_mitigation_conduit(struct alt_instr *alt,
+ __le32 *origptr,
+ __le32 *updptr, int nr_inst)
+{
+ u32 insn;
+
+ BUG_ON(nr_inst != 1); /* NOP -> HVC/SMC */
+
+ switch (arm_smccc_1_1_get_conduit()) {
+ case SMCCC_CONDUIT_HVC:
+ insn = aarch64_insn_get_hvc_value();
+ break;
+ case SMCCC_CONDUIT_SMC:
+ insn = aarch64_insn_get_smc_value();
+ break;
+ default:
+ return;
+ }
+
+ *updptr = cpu_to_le32(insn);
+}
+
+static enum mitigation_state spectre_v4_enable_fw_mitigation(void)
+{
+ enum mitigation_state state;
+
+ state = spectre_v4_get_cpu_fw_mitigation_state();
+ if (state != SPECTRE_MITIGATED)
+ return state;
+
+ if (spectre_v4_mitigations_off()) {
+ arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_WORKAROUND_2, false, NULL);
+ return SPECTRE_VULNERABLE;
+ }
+
+ arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_WORKAROUND_2, true, NULL);
+
+ if (spectre_v4_mitigations_dynamic())
+ __this_cpu_write(arm64_ssbd_callback_required, 1);
+
+ return SPECTRE_MITIGATED;
+}
+
+void spectre_v4_enable_mitigation(const struct arm64_cpu_capabilities *__unused)
+{
+ enum mitigation_state state;
+
+ WARN_ON(preemptible());
+
+ state = spectre_v4_enable_hw_mitigation();
+ if (state == SPECTRE_VULNERABLE)
+ state = spectre_v4_enable_fw_mitigation();
+
+ update_mitigation_state(&spectre_v4_state, state);
+}
+
+static void __update_pstate_ssbs(struct pt_regs *regs, bool state)
+{
+ u64 bit = compat_user_mode(regs) ? PSR_AA32_SSBS_BIT : PSR_SSBS_BIT;
+
+ if (state)
+ regs->pstate |= bit;
+ else
+ regs->pstate &= ~bit;
+}
+
+void spectre_v4_enable_task_mitigation(struct task_struct *tsk)
+{
+ struct pt_regs *regs = task_pt_regs(tsk);
+ bool ssbs = false, kthread = tsk->flags & PF_KTHREAD;
+
+ if (spectre_v4_mitigations_off())
+ ssbs = true;
+ else if (spectre_v4_mitigations_dynamic() && !kthread)
+ ssbs = !test_tsk_thread_flag(tsk, TIF_SSBD);
+
+ __update_pstate_ssbs(regs, ssbs);
+}
+
+/*
+ * The Spectre-v4 mitigation can be controlled via a prctl() from userspace.
+ * This is interesting because the "speculation disabled" behaviour can be
+ * configured so that it is preserved across exec(), which means that the
+ * prctl() may be necessary even when PSTATE.SSBS can be toggled directly
+ * from userspace.
+ */
+static void ssbd_prctl_enable_mitigation(struct task_struct *task)
+{
+ task_clear_spec_ssb_noexec(task);
+ task_set_spec_ssb_disable(task);
+ set_tsk_thread_flag(task, TIF_SSBD);
+}
+
+static void ssbd_prctl_disable_mitigation(struct task_struct *task)
+{
+ task_clear_spec_ssb_noexec(task);
+ task_clear_spec_ssb_disable(task);
+ clear_tsk_thread_flag(task, TIF_SSBD);
+}
+
+static int ssbd_prctl_set(struct task_struct *task, unsigned long ctrl)
+{
+ switch (ctrl) {
+ case PR_SPEC_ENABLE:
+ /* Enable speculation: disable mitigation */
+ /*
+ * Force disabled speculation prevents it from being
+ * re-enabled.
+ */
+ if (task_spec_ssb_force_disable(task))
+ return -EPERM;
+
+ /*
+ * If the mitigation is forced on, then speculation is forced
+ * off and we again prevent it from being re-enabled.
+ */
+ if (spectre_v4_mitigations_on())
+ return -EPERM;
+
+ ssbd_prctl_disable_mitigation(task);
+ break;
+ case PR_SPEC_FORCE_DISABLE:
+ /* Force disable speculation: force enable mitigation */
+ /*
+ * If the mitigation is forced off, then speculation is forced
+ * on and we prevent it from being disabled.
+ */
+ if (spectre_v4_mitigations_off())
+ return -EPERM;
+
+ task_set_spec_ssb_force_disable(task);
+ fallthrough;
+ case PR_SPEC_DISABLE:
+ /* Disable speculation: enable mitigation */
+ /* Same as PR_SPEC_FORCE_DISABLE */
+ if (spectre_v4_mitigations_off())
+ return -EPERM;
+
+ ssbd_prctl_enable_mitigation(task);
+ break;
+ case PR_SPEC_DISABLE_NOEXEC:
+ /* Disable speculation until execve(): enable mitigation */
+ /*
+ * If the mitigation state is forced one way or the other, then
+ * we must fail now before we try to toggle it on execve().
+ */
+ if (task_spec_ssb_force_disable(task) ||
+ spectre_v4_mitigations_off() ||
+ spectre_v4_mitigations_on()) {
+ return -EPERM;
+ }
+
+ ssbd_prctl_enable_mitigation(task);
+ task_set_spec_ssb_noexec(task);
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ spectre_v4_enable_task_mitigation(task);
+ return 0;
+}
+
+int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
+ unsigned long ctrl)
+{
+ switch (which) {
+ case PR_SPEC_STORE_BYPASS:
+ return ssbd_prctl_set(task, ctrl);
+ default:
+ return -ENODEV;
+ }
+}
+
+static int ssbd_prctl_get(struct task_struct *task)
+{
+ switch (spectre_v4_state) {
+ case SPECTRE_UNAFFECTED:
+ return PR_SPEC_NOT_AFFECTED;
+ case SPECTRE_MITIGATED:
+ if (spectre_v4_mitigations_on())
+ return PR_SPEC_NOT_AFFECTED;
+
+ if (spectre_v4_mitigations_dynamic())
+ break;
+
+ /* Mitigations are disabled, so we're vulnerable. */
+ fallthrough;
+ case SPECTRE_VULNERABLE:
+ fallthrough;
+ default:
+ return PR_SPEC_ENABLE;
+ }
+
+ /* Check the mitigation state for this task */
+ if (task_spec_ssb_force_disable(task))
+ return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
+
+ if (task_spec_ssb_noexec(task))
+ return PR_SPEC_PRCTL | PR_SPEC_DISABLE_NOEXEC;
+
+ if (task_spec_ssb_disable(task))
+ return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
+
+ return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
+}
+
+int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
+{
+ switch (which) {
+ case PR_SPEC_STORE_BYPASS:
+ return ssbd_prctl_get(task);
+ default:
+ return -ENODEV;
+ }
+}
diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
index d8ebfd813e28..f49b349e16a3 100644
--- a/arch/arm64/kernel/ptrace.c
+++ b/arch/arm64/kernel/ptrace.c
@@ -34,6 +34,7 @@
#include <asm/cpufeature.h>
#include <asm/debug-monitors.h>
#include <asm/fpsimd.h>
+#include <asm/mte.h>
#include <asm/pointer_auth.h>
#include <asm/stacktrace.h>
#include <asm/syscall.h>
@@ -1032,6 +1033,35 @@ static int pac_generic_keys_set(struct task_struct *target,
#endif /* CONFIG_CHECKPOINT_RESTORE */
#endif /* CONFIG_ARM64_PTR_AUTH */
+#ifdef CONFIG_ARM64_TAGGED_ADDR_ABI
+static int tagged_addr_ctrl_get(struct task_struct *target,
+ const struct user_regset *regset,
+ struct membuf to)
+{
+ long ctrl = get_tagged_addr_ctrl(target);
+
+ if (IS_ERR_VALUE(ctrl))
+ return ctrl;
+
+ return membuf_write(&to, &ctrl, sizeof(ctrl));
+}
+
+static int tagged_addr_ctrl_set(struct task_struct *target, const struct
+ user_regset *regset, unsigned int pos,
+ unsigned int count, const void *kbuf, const
+ void __user *ubuf)
+{
+ int ret;
+ long ctrl;
+
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl, 0, -1);
+ if (ret)
+ return ret;
+
+ return set_tagged_addr_ctrl(target, ctrl);
+}
+#endif
+
enum aarch64_regset {
REGSET_GPR,
REGSET_FPR,
@@ -1051,6 +1081,9 @@ enum aarch64_regset {
REGSET_PACG_KEYS,
#endif
#endif
+#ifdef CONFIG_ARM64_TAGGED_ADDR_ABI
+ REGSET_TAGGED_ADDR_CTRL,
+#endif
};
static const struct user_regset aarch64_regsets[] = {
@@ -1148,6 +1181,16 @@ static const struct user_regset aarch64_regsets[] = {
},
#endif
#endif
+#ifdef CONFIG_ARM64_TAGGED_ADDR_ABI
+ [REGSET_TAGGED_ADDR_CTRL] = {
+ .core_note_type = NT_ARM_TAGGED_ADDR_CTRL,
+ .n = 1,
+ .size = sizeof(long),
+ .align = sizeof(long),
+ .regset_get = tagged_addr_ctrl_get,
+ .set = tagged_addr_ctrl_set,
+ },
+#endif
};
static const struct user_regset_view user_aarch64_view = {
@@ -1691,6 +1734,12 @@ const struct user_regset_view *task_user_regset_view(struct task_struct *task)
long arch_ptrace(struct task_struct *child, long request,
unsigned long addr, unsigned long data)
{
+ switch (request) {
+ case PTRACE_PEEKMTETAGS:
+ case PTRACE_POKEMTETAGS:
+ return mte_ptrace_copy_tags(child, request, addr, data);
+ }
+
return ptrace_request(child, request, addr, data);
}
@@ -1793,7 +1842,7 @@ void syscall_trace_exit(struct pt_regs *regs)
* We also reserve IL for the kernel; SS is handled dynamically.
*/
#define SPSR_EL1_AARCH64_RES0_BITS \
- (GENMASK_ULL(63, 32) | GENMASK_ULL(27, 25) | GENMASK_ULL(23, 22) | \
+ (GENMASK_ULL(63, 32) | GENMASK_ULL(27, 26) | GENMASK_ULL(23, 22) | \
GENMASK_ULL(20, 13) | GENMASK_ULL(5, 5))
#define SPSR_EL1_AARCH32_RES0_BITS \
(GENMASK_ULL(63, 32) | GENMASK_ULL(22, 22) | GENMASK_ULL(20, 20))
diff --git a/arch/arm64/kernel/relocate_kernel.S b/arch/arm64/kernel/relocate_kernel.S
index 542d6edc6806..84eec95ec06c 100644
--- a/arch/arm64/kernel/relocate_kernel.S
+++ b/arch/arm64/kernel/relocate_kernel.S
@@ -36,18 +36,6 @@ SYM_CODE_START(arm64_relocate_new_kernel)
mov x14, xzr /* x14 = entry ptr */
mov x13, xzr /* x13 = copy dest */
- /* Clear the sctlr_el2 flags. */
- mrs x0, CurrentEL
- cmp x0, #CurrentEL_EL2
- b.ne 1f
- mrs x0, sctlr_el2
- mov_q x1, SCTLR_ELx_FLAGS
- bic x0, x0, x1
- pre_disable_mmu_workaround
- msr sctlr_el2, x0
- isb
-1:
-
/* Check if the new image needs relocation. */
tbnz x16, IND_DONE_BIT, .Ldone
diff --git a/arch/arm64/kernel/return_address.c b/arch/arm64/kernel/return_address.c
index a5e8b3b9d798..a6d18755652f 100644
--- a/arch/arm64/kernel/return_address.c
+++ b/arch/arm64/kernel/return_address.c
@@ -18,16 +18,16 @@ struct return_address_data {
void *addr;
};
-static int save_return_addr(struct stackframe *frame, void *d)
+static bool save_return_addr(void *d, unsigned long pc)
{
struct return_address_data *data = d;
if (!data->level) {
- data->addr = (void *)frame->pc;
- return 1;
+ data->addr = (void *)pc;
+ return false;
} else {
--data->level;
- return 0;
+ return true;
}
}
NOKPROBE_SYMBOL(save_return_addr);
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index 77c4c9bad1b8..133257ffd859 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -217,7 +217,7 @@ static void __init request_standard_resources(void)
if (!standard_resources)
panic("%s: Failed to allocate %zu bytes\n", __func__, res_size);
- for_each_memblock(memory, region) {
+ for_each_mem_region(region) {
res = &standard_resources[i++];
if (memblock_is_nomap(region)) {
res->name = "reserved";
@@ -257,7 +257,7 @@ static int __init reserve_memblock_reserved_regions(void)
if (!memblock_is_region_reserved(mem->start, mem_size))
continue;
- for_each_reserved_mem_region(j, &r_start, &r_end) {
+ for_each_reserved_mem_range(j, &r_start, &r_end) {
resource_size_t start, end;
start = max(PFN_PHYS(PFN_DOWN(r_start)), mem->start);
@@ -280,7 +280,6 @@ u64 cpu_logical_map(int cpu)
{
return __cpu_logical_map[cpu];
}
-EXPORT_SYMBOL_GPL(cpu_logical_map);
void __init __no_sanitize_address setup_arch(char **cmdline_p)
{
diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c
index 3b4f31f35e45..bdcaaf091e1e 100644
--- a/arch/arm64/kernel/signal.c
+++ b/arch/arm64/kernel/signal.c
@@ -244,7 +244,8 @@ static int preserve_sve_context(struct sve_context __user *ctx)
if (vq) {
/*
* This assumes that the SVE state has already been saved to
- * the task struct by calling preserve_fpsimd_context().
+ * the task struct by calling the function
+ * fpsimd_signal_preserve_current_state().
*/
err |= __copy_to_user((char __user *)ctx + SVE_SIG_REGS_OFFSET,
current->thread.sve_state,
@@ -748,6 +749,9 @@ static void setup_return(struct pt_regs *regs, struct k_sigaction *ka,
regs->pstate |= PSR_BTYPE_C;
}
+ /* TCO (Tag Check Override) always cleared for signal handlers */
+ regs->pstate &= ~PSR_TCO_BIT;
+
if (ka->sa.sa_flags & SA_RESTORER)
sigtramp = ka->sa.sa_restorer;
else
@@ -932,6 +936,12 @@ asmlinkage void do_notify_resume(struct pt_regs *regs,
if (thread_flags & _TIF_UPROBE)
uprobe_notify_resume(regs);
+ if (thread_flags & _TIF_MTE_ASYNC_FAULT) {
+ clear_thread_flag(TIF_MTE_ASYNC_FAULT);
+ send_sig_fault(SIGSEGV, SEGV_MTEAERR,
+ (void __user *)NULL, current);
+ }
+
if (thread_flags & _TIF_SIGPENDING)
do_signal(regs);
diff --git a/arch/arm64/kernel/smccc-call.S b/arch/arm64/kernel/smccc-call.S
index 1f93809528a4..d62447964ed9 100644
--- a/arch/arm64/kernel/smccc-call.S
+++ b/arch/arm64/kernel/smccc-call.S
@@ -9,7 +9,6 @@
#include <asm/assembler.h>
.macro SMCCC instr
- .cfi_startproc
\instr #0
ldr x4, [sp]
stp x0, x1, [x4, #ARM_SMCCC_RES_X0_OFFS]
@@ -21,7 +20,6 @@
b.ne 1f
str x6, [x4, ARM_SMCCC_QUIRK_STATE_OFFS]
1: ret
- .cfi_endproc
.endm
/*
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index 03957a1ae6c0..82e75fc2c903 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -30,6 +30,7 @@
#include <linux/completion.h>
#include <linux/of.h>
#include <linux/irq_work.h>
+#include <linux/kernel_stat.h>
#include <linux/kexec.h>
#include <linux/kvm_host.h>
@@ -72,10 +73,18 @@ enum ipi_msg_type {
IPI_CPU_CRASH_STOP,
IPI_TIMER,
IPI_IRQ_WORK,
- IPI_WAKEUP
+ IPI_WAKEUP,
+ NR_IPI
};
+static int ipi_irq_base __read_mostly;
+static int nr_ipi __read_mostly = NR_IPI;
+static struct irq_desc *ipi_desc[NR_IPI] __read_mostly;
+
+static void ipi_setup(int cpu);
+
#ifdef CONFIG_HOTPLUG_CPU
+static void ipi_teardown(int cpu);
static int op_cpu_kill(unsigned int cpu);
#else
static inline int op_cpu_kill(unsigned int cpu)
@@ -151,7 +160,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
break;
}
pr_crit("CPU%u: may not have shut down cleanly\n", cpu);
- /* Fall through */
+ fallthrough;
case CPU_STUCK_IN_KERNEL:
pr_crit("CPU%u: is stuck in kernel\n", cpu);
if (status & CPU_STUCK_REASON_52_BIT_VA)
@@ -237,6 +246,8 @@ asmlinkage notrace void secondary_start_kernel(void)
*/
notify_cpu_starting(cpu);
+ ipi_setup(cpu);
+
store_cpu_topology(cpu);
numa_add_cpu(cpu);
@@ -302,6 +313,7 @@ int __cpu_disable(void)
* and we must not schedule until we're ready to give up the cpu.
*/
set_cpu_online(cpu, false);
+ ipi_teardown(cpu);
/*
* OK - migrate IRQs away from this CPU
@@ -772,13 +784,6 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
}
}
-void (*__smp_cross_call)(const struct cpumask *, unsigned int);
-
-void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int))
-{
- __smp_cross_call = fn;
-}
-
static const char *ipi_types[NR_IPI] __tracepoint_string = {
#define S(x,s) [x] = s
S(IPI_RESCHEDULE, "Rescheduling interrupts"),
@@ -790,35 +795,25 @@ static const char *ipi_types[NR_IPI] __tracepoint_string = {
S(IPI_WAKEUP, "CPU wake-up interrupts"),
};
-static void smp_cross_call(const struct cpumask *target, unsigned int ipinr)
-{
- trace_ipi_raise(target, ipi_types[ipinr]);
- __smp_cross_call(target, ipinr);
-}
+static void smp_cross_call(const struct cpumask *target, unsigned int ipinr);
-void show_ipi_list(struct seq_file *p, int prec)
+unsigned long irq_err_count;
+
+int arch_show_interrupts(struct seq_file *p, int prec)
{
unsigned int cpu, i;
for (i = 0; i < NR_IPI; i++) {
+ unsigned int irq = irq_desc_get_irq(ipi_desc[i]);
seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i,
prec >= 4 ? " " : "");
for_each_online_cpu(cpu)
- seq_printf(p, "%10u ",
- __get_irq_stat(cpu, ipi_irqs[i]));
+ seq_printf(p, "%10u ", kstat_irqs_cpu(irq, cpu));
seq_printf(p, " %s\n", ipi_types[i]);
}
-}
-
-u64 smp_irq_stat_cpu(unsigned int cpu)
-{
- u64 sum = 0;
- int i;
- for (i = 0; i < NR_IPI; i++)
- sum += __get_irq_stat(cpu, ipi_irqs[i]);
-
- return sum;
+ seq_printf(p, "%*s: %10lu\n", prec, "Err", irq_err_count);
+ return 0;
}
void arch_send_call_function_ipi_mask(const struct cpumask *mask)
@@ -841,8 +836,7 @@ void arch_send_wakeup_ipi_mask(const struct cpumask *mask)
#ifdef CONFIG_IRQ_WORK
void arch_irq_work_raise(void)
{
- if (__smp_cross_call)
- smp_cross_call(cpumask_of(smp_processor_id()), IPI_IRQ_WORK);
+ smp_cross_call(cpumask_of(smp_processor_id()), IPI_IRQ_WORK);
}
#endif
@@ -890,15 +884,12 @@ static void ipi_cpu_crash_stop(unsigned int cpu, struct pt_regs *regs)
/*
* Main handler for inter-processor interrupts
*/
-void handle_IPI(int ipinr, struct pt_regs *regs)
+static void do_handle_IPI(int ipinr)
{
unsigned int cpu = smp_processor_id();
- struct pt_regs *old_regs = set_irq_regs(regs);
- if ((unsigned)ipinr < NR_IPI) {
+ if ((unsigned)ipinr < NR_IPI)
trace_ipi_entry_rcuidle(ipi_types[ipinr]);
- __inc_irq_stat(cpu, ipi_irqs[ipinr]);
- }
switch (ipinr) {
case IPI_RESCHEDULE:
@@ -906,21 +897,16 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
break;
case IPI_CALL_FUNC:
- irq_enter();
generic_smp_call_function_interrupt();
- irq_exit();
break;
case IPI_CPU_STOP:
- irq_enter();
local_cpu_stop();
- irq_exit();
break;
case IPI_CPU_CRASH_STOP:
if (IS_ENABLED(CONFIG_KEXEC_CORE)) {
- irq_enter();
- ipi_cpu_crash_stop(cpu, regs);
+ ipi_cpu_crash_stop(cpu, get_irq_regs());
unreachable();
}
@@ -928,17 +914,13 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
case IPI_TIMER:
- irq_enter();
tick_receive_broadcast();
- irq_exit();
break;
#endif
#ifdef CONFIG_IRQ_WORK
case IPI_IRQ_WORK:
- irq_enter();
irq_work_run();
- irq_exit();
break;
#endif
@@ -957,7 +939,66 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
if ((unsigned)ipinr < NR_IPI)
trace_ipi_exit_rcuidle(ipi_types[ipinr]);
- set_irq_regs(old_regs);
+}
+
+static irqreturn_t ipi_handler(int irq, void *data)
+{
+ do_handle_IPI(irq - ipi_irq_base);
+ return IRQ_HANDLED;
+}
+
+static void smp_cross_call(const struct cpumask *target, unsigned int ipinr)
+{
+ trace_ipi_raise(target, ipi_types[ipinr]);
+ __ipi_send_mask(ipi_desc[ipinr], target);
+}
+
+static void ipi_setup(int cpu)
+{
+ int i;
+
+ if (WARN_ON_ONCE(!ipi_irq_base))
+ return;
+
+ for (i = 0; i < nr_ipi; i++)
+ enable_percpu_irq(ipi_irq_base + i, 0);
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+static void ipi_teardown(int cpu)
+{
+ int i;
+
+ if (WARN_ON_ONCE(!ipi_irq_base))
+ return;
+
+ for (i = 0; i < nr_ipi; i++)
+ disable_percpu_irq(ipi_irq_base + i);
+}
+#endif
+
+void __init set_smp_ipi_range(int ipi_base, int n)
+{
+ int i;
+
+ WARN_ON(n < NR_IPI);
+ nr_ipi = min(n, NR_IPI);
+
+ for (i = 0; i < nr_ipi; i++) {
+ int err;
+
+ err = request_percpu_irq(ipi_base + i, ipi_handler,
+ "IPI", &cpu_number);
+ WARN_ON(err);
+
+ ipi_desc[i] = irq_to_desc(ipi_base + i);
+ irq_set_status_flags(ipi_base + i, IRQ_HIDDEN);
+ }
+
+ ipi_irq_base = ipi_base;
+
+ /* Setup the boot CPU immediately */
+ ipi_setup(smp_processor_id());
}
void smp_send_reschedule(int cpu)
diff --git a/arch/arm64/kernel/smp_spin_table.c b/arch/arm64/kernel/smp_spin_table.c
index c8a3fee00c11..5892e79fa429 100644
--- a/arch/arm64/kernel/smp_spin_table.c
+++ b/arch/arm64/kernel/smp_spin_table.c
@@ -83,9 +83,9 @@ static int smp_spin_table_cpu_prepare(unsigned int cpu)
/*
* We write the release address as LE regardless of the native
- * endianess of the kernel. Therefore, any boot-loaders that
+ * endianness of the kernel. Therefore, any boot-loaders that
* read this address need to convert this address to the
- * boot-loader's endianess before jumping. This is mandated by
+ * boot-loader's endianness before jumping. This is mandated by
* the boot protocol.
*/
writeq_relaxed(__pa_symbol(secondary_holding_pen), release_addr);
diff --git a/arch/arm64/kernel/ssbd.c b/arch/arm64/kernel/ssbd.c
deleted file mode 100644
index b26955f56750..000000000000
--- a/arch/arm64/kernel/ssbd.c
+++ /dev/null
@@ -1,129 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (C) 2018 ARM Ltd, All Rights Reserved.
- */
-
-#include <linux/compat.h>
-#include <linux/errno.h>
-#include <linux/prctl.h>
-#include <linux/sched.h>
-#include <linux/sched/task_stack.h>
-#include <linux/thread_info.h>
-
-#include <asm/cpufeature.h>
-
-static void ssbd_ssbs_enable(struct task_struct *task)
-{
- u64 val = is_compat_thread(task_thread_info(task)) ?
- PSR_AA32_SSBS_BIT : PSR_SSBS_BIT;
-
- task_pt_regs(task)->pstate |= val;
-}
-
-static void ssbd_ssbs_disable(struct task_struct *task)
-{
- u64 val = is_compat_thread(task_thread_info(task)) ?
- PSR_AA32_SSBS_BIT : PSR_SSBS_BIT;
-
- task_pt_regs(task)->pstate &= ~val;
-}
-
-/*
- * prctl interface for SSBD
- */
-static int ssbd_prctl_set(struct task_struct *task, unsigned long ctrl)
-{
- int state = arm64_get_ssbd_state();
-
- /* Unsupported */
- if (state == ARM64_SSBD_UNKNOWN)
- return -ENODEV;
-
- /* Treat the unaffected/mitigated state separately */
- if (state == ARM64_SSBD_MITIGATED) {
- switch (ctrl) {
- case PR_SPEC_ENABLE:
- return -EPERM;
- case PR_SPEC_DISABLE:
- case PR_SPEC_FORCE_DISABLE:
- return 0;
- }
- }
-
- /*
- * Things are a bit backward here: the arm64 internal API
- * *enables the mitigation* when the userspace API *disables
- * speculation*. So much fun.
- */
- switch (ctrl) {
- case PR_SPEC_ENABLE:
- /* If speculation is force disabled, enable is not allowed */
- if (state == ARM64_SSBD_FORCE_ENABLE ||
- task_spec_ssb_force_disable(task))
- return -EPERM;
- task_clear_spec_ssb_disable(task);
- clear_tsk_thread_flag(task, TIF_SSBD);
- ssbd_ssbs_enable(task);
- break;
- case PR_SPEC_DISABLE:
- if (state == ARM64_SSBD_FORCE_DISABLE)
- return -EPERM;
- task_set_spec_ssb_disable(task);
- set_tsk_thread_flag(task, TIF_SSBD);
- ssbd_ssbs_disable(task);
- break;
- case PR_SPEC_FORCE_DISABLE:
- if (state == ARM64_SSBD_FORCE_DISABLE)
- return -EPERM;
- task_set_spec_ssb_disable(task);
- task_set_spec_ssb_force_disable(task);
- set_tsk_thread_flag(task, TIF_SSBD);
- ssbd_ssbs_disable(task);
- break;
- default:
- return -ERANGE;
- }
-
- return 0;
-}
-
-int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
- unsigned long ctrl)
-{
- switch (which) {
- case PR_SPEC_STORE_BYPASS:
- return ssbd_prctl_set(task, ctrl);
- default:
- return -ENODEV;
- }
-}
-
-static int ssbd_prctl_get(struct task_struct *task)
-{
- switch (arm64_get_ssbd_state()) {
- case ARM64_SSBD_UNKNOWN:
- return -ENODEV;
- case ARM64_SSBD_FORCE_ENABLE:
- return PR_SPEC_DISABLE;
- case ARM64_SSBD_KERNEL:
- if (task_spec_ssb_force_disable(task))
- return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
- if (task_spec_ssb_disable(task))
- return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
- return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
- case ARM64_SSBD_FORCE_DISABLE:
- return PR_SPEC_ENABLE;
- default:
- return PR_SPEC_NOT_AFFECTED;
- }
-}
-
-int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
-{
- switch (which) {
- case PR_SPEC_STORE_BYPASS:
- return ssbd_prctl_get(task);
- default:
- return -ENODEV;
- }
-}
diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
index 2dd8e3b8b94b..fa56af1a59c3 100644
--- a/arch/arm64/kernel/stacktrace.c
+++ b/arch/arm64/kernel/stacktrace.c
@@ -118,12 +118,12 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
NOKPROBE_SYMBOL(unwind_frame);
void notrace walk_stackframe(struct task_struct *tsk, struct stackframe *frame,
- int (*fn)(struct stackframe *, void *), void *data)
+ bool (*fn)(void *, unsigned long), void *data)
{
while (1) {
int ret;
- if (fn(frame, data))
+ if (!fn(data, frame->pc))
break;
ret = unwind_frame(tsk, frame);
if (ret < 0)
@@ -132,84 +132,89 @@ void notrace walk_stackframe(struct task_struct *tsk, struct stackframe *frame,
}
NOKPROBE_SYMBOL(walk_stackframe);
-#ifdef CONFIG_STACKTRACE
-struct stack_trace_data {
- struct stack_trace *trace;
- unsigned int no_sched_functions;
- unsigned int skip;
-};
-
-static int save_trace(struct stackframe *frame, void *d)
+static void dump_backtrace_entry(unsigned long where, const char *loglvl)
{
- struct stack_trace_data *data = d;
- struct stack_trace *trace = data->trace;
- unsigned long addr = frame->pc;
-
- if (data->no_sched_functions && in_sched_functions(addr))
- return 0;
- if (data->skip) {
- data->skip--;
- return 0;
- }
-
- trace->entries[trace->nr_entries++] = addr;
-
- return trace->nr_entries >= trace->max_entries;
+ printk("%s %pS\n", loglvl, (void *)where);
}
-void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
+void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk,
+ const char *loglvl)
{
- struct stack_trace_data data;
struct stackframe frame;
+ int skip = 0;
- data.trace = trace;
- data.skip = trace->skip;
- data.no_sched_functions = 0;
+ pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk);
- start_backtrace(&frame, regs->regs[29], regs->pc);
- walk_stackframe(current, &frame, save_trace, &data);
-}
-EXPORT_SYMBOL_GPL(save_stack_trace_regs);
+ if (regs) {
+ if (user_mode(regs))
+ return;
+ skip = 1;
+ }
-static noinline void __save_stack_trace(struct task_struct *tsk,
- struct stack_trace *trace, unsigned int nosched)
-{
- struct stack_trace_data data;
- struct stackframe frame;
+ if (!tsk)
+ tsk = current;
if (!try_get_task_stack(tsk))
return;
- data.trace = trace;
- data.skip = trace->skip;
- data.no_sched_functions = nosched;
-
- if (tsk != current) {
- start_backtrace(&frame, thread_saved_fp(tsk),
- thread_saved_pc(tsk));
- } else {
- /* We don't want this function nor the caller */
- data.skip += 2;
+ if (tsk == current) {
start_backtrace(&frame,
(unsigned long)__builtin_frame_address(0),
- (unsigned long)__save_stack_trace);
+ (unsigned long)dump_backtrace);
+ } else {
+ /*
+ * task blocked in __switch_to
+ */
+ start_backtrace(&frame,
+ thread_saved_fp(tsk),
+ thread_saved_pc(tsk));
}
- walk_stackframe(tsk, &frame, save_trace, &data);
+ printk("%sCall trace:\n", loglvl);
+ do {
+ /* skip until specified stack frame */
+ if (!skip) {
+ dump_backtrace_entry(frame.pc, loglvl);
+ } else if (frame.fp == regs->regs[29]) {
+ skip = 0;
+ /*
+ * Mostly, this is the case where this function is
+ * called in panic/abort. As exception handler's
+ * stack frame does not contain the corresponding pc
+ * at which an exception has taken place, use regs->pc
+ * instead.
+ */
+ dump_backtrace_entry(regs->pc, loglvl);
+ }
+ } while (!unwind_frame(tsk, &frame));
put_task_stack(tsk);
}
-void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
+void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl)
{
- __save_stack_trace(tsk, trace, 1);
+ dump_backtrace(NULL, tsk, loglvl);
+ barrier();
}
-EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
-void save_stack_trace(struct stack_trace *trace)
+#ifdef CONFIG_STACKTRACE
+
+void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
+ struct task_struct *task, struct pt_regs *regs)
{
- __save_stack_trace(current, trace, 0);
+ struct stackframe frame;
+
+ if (regs)
+ start_backtrace(&frame, regs->regs[29], regs->pc);
+ else if (task == current)
+ start_backtrace(&frame,
+ (unsigned long)__builtin_frame_address(0),
+ (unsigned long)arch_stack_walk);
+ else
+ start_backtrace(&frame, thread_saved_fp(task),
+ thread_saved_pc(task));
+
+ walk_stackframe(task, &frame, consume_entry, cookie);
}
-EXPORT_SYMBOL_GPL(save_stack_trace);
#endif
diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c
index c1dee9066ff9..96cd347c7a46 100644
--- a/arch/arm64/kernel/suspend.c
+++ b/arch/arm64/kernel/suspend.c
@@ -10,6 +10,7 @@
#include <asm/daifflags.h>
#include <asm/debug-monitors.h>
#include <asm/exec.h>
+#include <asm/mte.h>
#include <asm/memory.h>
#include <asm/mmu_context.h>
#include <asm/smp_plat.h>
@@ -72,8 +73,10 @@ void notrace __cpu_suspend_exit(void)
* have turned the mitigation on. If the user has forcefully
* disabled it, make sure their wishes are obeyed.
*/
- if (arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE)
- arm64_set_ssbd_mitigation(false);
+ spectre_v4_enable_mitigation(NULL);
+
+ /* Restore additional MTE-specific configuration */
+ mte_suspend_exit();
}
/*
diff --git a/arch/arm64/kernel/syscall.c b/arch/arm64/kernel/syscall.c
index 5f0c04863d2c..e4c0dadf0d92 100644
--- a/arch/arm64/kernel/syscall.c
+++ b/arch/arm64/kernel/syscall.c
@@ -123,6 +123,16 @@ static void el0_svc_common(struct pt_regs *regs, int scno, int sc_nr,
local_daif_restore(DAIF_PROCCTX);
user_exit();
+ if (system_supports_mte() && (flags & _TIF_MTE_ASYNC_FAULT)) {
+ /*
+ * Process the asynchronous tag check fault before the actual
+ * syscall. do_notify_resume() will send a signal to userspace
+ * before the syscall is restarted.
+ */
+ regs->regs[0] = -ERESTARTNOINTR;
+ return;
+ }
+
if (has_syscall_work(flags)) {
/*
* The de-facto standard way to skip a system call using ptrace
diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c
index 0801a0f3c156..543c67cae02f 100644
--- a/arch/arm64/kernel/topology.c
+++ b/arch/arm64/kernel/topology.c
@@ -36,21 +36,23 @@ void store_cpu_topology(unsigned int cpuid)
if (mpidr & MPIDR_UP_BITMASK)
return;
- /* Create cpu topology mapping based on MPIDR. */
- if (mpidr & MPIDR_MT_BITMASK) {
- /* Multiprocessor system : Multi-threads per core */
- cpuid_topo->thread_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
- cpuid_topo->core_id = MPIDR_AFFINITY_LEVEL(mpidr, 1);
- cpuid_topo->package_id = MPIDR_AFFINITY_LEVEL(mpidr, 2) |
- MPIDR_AFFINITY_LEVEL(mpidr, 3) << 8;
- } else {
- /* Multiprocessor system : Single-thread per core */
- cpuid_topo->thread_id = -1;
- cpuid_topo->core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
- cpuid_topo->package_id = MPIDR_AFFINITY_LEVEL(mpidr, 1) |
- MPIDR_AFFINITY_LEVEL(mpidr, 2) << 8 |
- MPIDR_AFFINITY_LEVEL(mpidr, 3) << 16;
- }
+ /*
+ * This would be the place to create cpu topology based on MPIDR.
+ *
+ * However, it cannot be trusted to depict the actual topology; some
+ * pieces of the architecture enforce an artificial cap on Aff0 values
+ * (e.g. GICv3's ICC_SGI1R_EL1 limits it to 15), leading to an
+ * artificial cycling of Aff1, Aff2 and Aff3 values. IOW, these end up
+ * having absolutely no relationship to the actual underlying system
+ * topology, and cannot be reasonably used as core / package ID.
+ *
+ * If the MT bit is set, Aff0 *could* be used to define a thread ID, but
+ * we still wouldn't be able to obtain a sane core ID. This means we
+ * need to entirely ignore MPIDR for any topology deduction.
+ */
+ cpuid_topo->thread_id = -1;
+ cpuid_topo->core_id = cpuid;
+ cpuid_topo->package_id = cpu_to_node(cpuid);
pr_debug("CPU%u: cluster %d core %d thread %d mpidr %#016llx\n",
cpuid, cpuid_topo->package_id, cpuid_topo->core_id,
@@ -246,6 +248,13 @@ static int __init init_amu_fie(void)
static_branch_enable(&amu_fie_key);
}
+ /*
+ * If the system is not fully invariant after AMU init, disable
+ * partial use of counters for frequency invariance.
+ */
+ if (!topology_scale_freq_invariant())
+ static_branch_disable(&amu_fie_key);
+
free_valid_mask:
free_cpumask_var(valid_cpus);
@@ -253,7 +262,7 @@ free_valid_mask:
}
late_initcall_sync(init_amu_fie);
-bool arch_freq_counters_available(struct cpumask *cpus)
+bool arch_freq_counters_available(const struct cpumask *cpus)
{
return amu_freq_invariant() &&
cpumask_subset(cpus, amu_fie_cpus);
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index 13ebd5ca2070..8af4e0e85736 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -34,6 +34,7 @@
#include <asm/daifflags.h>
#include <asm/debug-monitors.h>
#include <asm/esr.h>
+#include <asm/extable.h>
#include <asm/insn.h>
#include <asm/kprobes.h>
#include <asm/traps.h>
@@ -53,11 +54,6 @@ static const char *handler[]= {
int show_unhandled_signals = 0;
-static void dump_backtrace_entry(unsigned long where, const char *loglvl)
-{
- printk("%s %pS\n", loglvl, (void *)where);
-}
-
static void dump_kernel_instr(const char *lvl, struct pt_regs *regs)
{
unsigned long addr = instruction_pointer(regs);
@@ -83,66 +79,6 @@ static void dump_kernel_instr(const char *lvl, struct pt_regs *regs)
printk("%sCode: %s\n", lvl, str);
}
-void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk,
- const char *loglvl)
-{
- struct stackframe frame;
- int skip = 0;
-
- pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk);
-
- if (regs) {
- if (user_mode(regs))
- return;
- skip = 1;
- }
-
- if (!tsk)
- tsk = current;
-
- if (!try_get_task_stack(tsk))
- return;
-
- if (tsk == current) {
- start_backtrace(&frame,
- (unsigned long)__builtin_frame_address(0),
- (unsigned long)dump_backtrace);
- } else {
- /*
- * task blocked in __switch_to
- */
- start_backtrace(&frame,
- thread_saved_fp(tsk),
- thread_saved_pc(tsk));
- }
-
- printk("%sCall trace:\n", loglvl);
- do {
- /* skip until specified stack frame */
- if (!skip) {
- dump_backtrace_entry(frame.pc, loglvl);
- } else if (frame.fp == regs->regs[29]) {
- skip = 0;
- /*
- * Mostly, this is the case where this function is
- * called in panic/abort. As exception handler's
- * stack frame does not contain the corresponding pc
- * at which an exception has taken place, use regs->pc
- * instead.
- */
- dump_backtrace_entry(regs->pc, loglvl);
- }
- } while (!unwind_frame(tsk, &frame));
-
- put_task_stack(tsk);
-}
-
-void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl)
-{
- dump_backtrace(NULL, tsk, loglvl);
- barrier();
-}
-
#ifdef CONFIG_PREEMPT
#define S_PREEMPT " PREEMPT"
#elif defined(CONFIG_PREEMPT_RT)
@@ -200,9 +136,9 @@ void die(const char *str, struct pt_regs *regs, int err)
oops_exit();
if (in_interrupt())
- panic("Fatal exception in interrupt");
+ panic("%s: Fatal exception in interrupt", str);
if (panic_on_oops)
- panic("Fatal exception");
+ panic("%s: Fatal exception", str);
raw_spin_unlock_irqrestore(&die_lock, flags);
@@ -412,7 +348,7 @@ exit:
return fn ? fn(regs, instr) : 1;
}
-void force_signal_inject(int signal, int code, unsigned long address)
+void force_signal_inject(int signal, int code, unsigned long address, unsigned int err)
{
const char *desc;
struct pt_regs *regs = current_pt_regs();
@@ -438,7 +374,7 @@ void force_signal_inject(int signal, int code, unsigned long address)
signal = SIGKILL;
}
- arm64_notify_die(desc, regs, signal, code, (void __user *)address, 0);
+ arm64_notify_die(desc, regs, signal, code, (void __user *)address, err);
}
/*
@@ -455,7 +391,7 @@ void arm64_notify_segfault(unsigned long addr)
code = SEGV_ACCERR;
mmap_read_unlock(current->mm);
- force_signal_inject(SIGSEGV, code, addr);
+ force_signal_inject(SIGSEGV, code, addr, 0);
}
void do_undefinstr(struct pt_regs *regs)
@@ -468,17 +404,28 @@ void do_undefinstr(struct pt_regs *regs)
return;
BUG_ON(!user_mode(regs));
- force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc);
+ force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0);
}
NOKPROBE_SYMBOL(do_undefinstr);
void do_bti(struct pt_regs *regs)
{
BUG_ON(!user_mode(regs));
- force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc);
+ force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0);
}
NOKPROBE_SYMBOL(do_bti);
+void do_ptrauth_fault(struct pt_regs *regs, unsigned int esr)
+{
+ /*
+ * Unexpected FPAC exception or pointer authentication failure in
+ * the kernel: kill the task before it does any more harm.
+ */
+ BUG_ON(!user_mode(regs));
+ force_signal_inject(SIGILL, ILL_ILLOPN, regs->pc, esr);
+}
+NOKPROBE_SYMBOL(do_ptrauth_fault);
+
#define __user_cache_maint(insn, address, res) \
if (address >= user_addr_max()) { \
res = -EFAULT; \
@@ -528,7 +475,7 @@ static void user_cache_maint_handler(unsigned int esr, struct pt_regs *regs)
__user_cache_maint("ic ivau", address, ret);
break;
default:
- force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc);
+ force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0);
return;
}
@@ -581,7 +528,7 @@ static void mrs_handler(unsigned int esr, struct pt_regs *regs)
sysreg = esr_sys64_to_sysreg(esr);
if (do_emulate_mrs(regs, sysreg, rt) != 0)
- force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc);
+ force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0);
}
static void wfi_handler(unsigned int esr, struct pt_regs *regs)
@@ -775,6 +722,7 @@ static const char *esr_class_str[] = {
[ESR_ELx_EC_SYS64] = "MSR/MRS (AArch64)",
[ESR_ELx_EC_SVE] = "SVE",
[ESR_ELx_EC_ERET] = "ERET/ERETAA/ERETAB",
+ [ESR_ELx_EC_FPAC] = "FPAC",
[ESR_ELx_EC_IMP_DEF] = "EL3 IMP DEF",
[ESR_ELx_EC_IABT_LOW] = "IABT (lower EL)",
[ESR_ELx_EC_IABT_CUR] = "IABT (current EL)",
@@ -935,26 +883,6 @@ asmlinkage void enter_from_user_mode(void)
}
NOKPROBE_SYMBOL(enter_from_user_mode);
-void __pte_error(const char *file, int line, unsigned long val)
-{
- pr_err("%s:%d: bad pte %016lx.\n", file, line, val);
-}
-
-void __pmd_error(const char *file, int line, unsigned long val)
-{
- pr_err("%s:%d: bad pmd %016lx.\n", file, line, val);
-}
-
-void __pud_error(const char *file, int line, unsigned long val)
-{
- pr_err("%s:%d: bad pud %016lx.\n", file, line, val);
-}
-
-void __pgd_error(const char *file, int line, unsigned long val)
-{
- pr_err("%s:%d: bad pgd %016lx.\n", file, line, val);
-}
-
/* GENERIC_BUG traps */
int is_valid_bugaddr(unsigned long addr)
@@ -994,6 +922,21 @@ static struct break_hook bug_break_hook = {
.imm = BUG_BRK_IMM,
};
+static int reserved_fault_handler(struct pt_regs *regs, unsigned int esr)
+{
+ pr_err("%s generated an invalid instruction at %pS!\n",
+ in_bpf_jit(regs) ? "BPF JIT" : "Kernel text patching",
+ (void *)instruction_pointer(regs));
+
+ /* We cannot handle this */
+ return DBG_HOOK_ERROR;
+}
+
+static struct break_hook fault_break_hook = {
+ .fn = reserved_fault_handler,
+ .imm = FAULT_BRK_IMM,
+};
+
#ifdef CONFIG_KASAN_SW_TAGS
#define KASAN_ESR_RECOVER 0x20
@@ -1059,6 +1002,7 @@ int __init early_brk64(unsigned long addr, unsigned int esr,
void __init trap_init(void)
{
register_kernel_break_hook(&bug_break_hook);
+ register_kernel_break_hook(&fault_break_hook);
#ifdef CONFIG_KASAN_SW_TAGS
register_kernel_break_hook(&kasan_break_hook);
#endif
diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c
index d4202a32abc9..debb8995d57f 100644
--- a/arch/arm64/kernel/vdso.c
+++ b/arch/arm64/kernel/vdso.c
@@ -30,15 +30,11 @@
#include <asm/vdso.h>
extern char vdso_start[], vdso_end[];
-#ifdef CONFIG_COMPAT_VDSO
extern char vdso32_start[], vdso32_end[];
-#endif /* CONFIG_COMPAT_VDSO */
enum vdso_abi {
VDSO_ABI_AA64,
-#ifdef CONFIG_COMPAT_VDSO
VDSO_ABI_AA32,
-#endif /* CONFIG_COMPAT_VDSO */
};
enum vvar_pages {
@@ -284,21 +280,17 @@ up_fail:
/*
* Create and map the vectors page for AArch32 tasks.
*/
-#ifdef CONFIG_COMPAT_VDSO
static int aarch32_vdso_mremap(const struct vm_special_mapping *sm,
struct vm_area_struct *new_vma)
{
return __vdso_remap(VDSO_ABI_AA32, sm, new_vma);
}
-#endif /* CONFIG_COMPAT_VDSO */
enum aarch32_map {
AA32_MAP_VECTORS, /* kuser helpers */
-#ifdef CONFIG_COMPAT_VDSO
+ AA32_MAP_SIGPAGE,
AA32_MAP_VVAR,
AA32_MAP_VDSO,
-#endif
- AA32_MAP_SIGPAGE
};
static struct page *aarch32_vectors_page __ro_after_init;
@@ -309,7 +301,10 @@ static struct vm_special_mapping aarch32_vdso_maps[] = {
.name = "[vectors]", /* ABI */
.pages = &aarch32_vectors_page,
},
-#ifdef CONFIG_COMPAT_VDSO
+ [AA32_MAP_SIGPAGE] = {
+ .name = "[sigpage]", /* ABI */
+ .pages = &aarch32_sig_page,
+ },
[AA32_MAP_VVAR] = {
.name = "[vvar]",
.fault = vvar_fault,
@@ -319,11 +314,6 @@ static struct vm_special_mapping aarch32_vdso_maps[] = {
.name = "[vdso]",
.mremap = aarch32_vdso_mremap,
},
-#endif /* CONFIG_COMPAT_VDSO */
- [AA32_MAP_SIGPAGE] = {
- .name = "[sigpage]", /* ABI */
- .pages = &aarch32_sig_page,
- },
};
static int aarch32_alloc_kuser_vdso_page(void)
@@ -362,25 +352,25 @@ static int aarch32_alloc_sigpage(void)
return 0;
}
-#ifdef CONFIG_COMPAT_VDSO
static int __aarch32_alloc_vdso_pages(void)
{
+
+ if (!IS_ENABLED(CONFIG_COMPAT_VDSO))
+ return 0;
+
vdso_info[VDSO_ABI_AA32].dm = &aarch32_vdso_maps[AA32_MAP_VVAR];
vdso_info[VDSO_ABI_AA32].cm = &aarch32_vdso_maps[AA32_MAP_VDSO];
return __vdso_init(VDSO_ABI_AA32);
}
-#endif /* CONFIG_COMPAT_VDSO */
static int __init aarch32_alloc_vdso_pages(void)
{
int ret;
-#ifdef CONFIG_COMPAT_VDSO
ret = __aarch32_alloc_vdso_pages();
if (ret)
return ret;
-#endif
ret = aarch32_alloc_sigpage();
if (ret)
@@ -449,14 +439,12 @@ int aarch32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
if (ret)
goto out;
-#ifdef CONFIG_COMPAT_VDSO
- ret = __setup_additional_pages(VDSO_ABI_AA32,
- mm,
- bprm,
- uses_interp);
- if (ret)
- goto out;
-#endif /* CONFIG_COMPAT_VDSO */
+ if (IS_ENABLED(CONFIG_COMPAT_VDSO)) {
+ ret = __setup_additional_pages(VDSO_ABI_AA32, mm, bprm,
+ uses_interp);
+ if (ret)
+ goto out;
+ }
ret = aarch32_sigreturn_setup(mm);
out:
@@ -497,8 +485,7 @@ static int __init vdso_init(void)
}
arch_initcall(vdso_init);
-int arch_setup_additional_pages(struct linux_binprm *bprm,
- int uses_interp)
+int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
{
struct mm_struct *mm = current->mm;
int ret;
@@ -506,11 +493,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm,
if (mmap_write_lock_killable(mm))
return -EINTR;
- ret = __setup_additional_pages(VDSO_ABI_AA64,
- mm,
- bprm,
- uses_interp);
-
+ ret = __setup_additional_pages(VDSO_ABI_AA64, mm, bprm, uses_interp);
mmap_write_unlock(mm);
return ret;
diff --git a/arch/arm64/kernel/vdso/Makefile b/arch/arm64/kernel/vdso/Makefile
index 45d5cfe46429..04021a93171c 100644
--- a/arch/arm64/kernel/vdso/Makefile
+++ b/arch/arm64/kernel/vdso/Makefile
@@ -43,13 +43,6 @@ ifneq ($(c-gettimeofday-y),)
CFLAGS_vgettimeofday.o += -include $(c-gettimeofday-y)
endif
-# Clang versions less than 8 do not support -mcmodel=tiny
-ifeq ($(CONFIG_CC_IS_CLANG), y)
- ifeq ($(shell test $(CONFIG_CLANG_VERSION) -lt 80000; echo $$?),0)
- CFLAGS_REMOVE_vgettimeofday.o += -mcmodel=tiny
- endif
-endif
-
# Disable gcov profiling for VDSO code
GCOV_PROFILE := n
diff --git a/arch/arm64/kernel/vdso32/Makefile b/arch/arm64/kernel/vdso32/Makefile
index 5139a5f19256..d6adb4677c25 100644
--- a/arch/arm64/kernel/vdso32/Makefile
+++ b/arch/arm64/kernel/vdso32/Makefile
@@ -208,7 +208,7 @@ quiet_cmd_vdsosym = VDSOSYM $@
cmd_vdsosym = $(NM) $< | $(gen-vdsosym) | LC_ALL=C sort > $@
# Install commands for the unstripped file
-quiet_cmd_vdso_install = INSTALL $@
+quiet_cmd_vdso_install = INSTALL32 $@
cmd_vdso_install = cp $(obj)/$@.dbg $(MODLIB)/vdso/vdso32.so
vdso.so: $(obj)/vdso.so.dbg
diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
index ec8e894684a7..5ca957e656ab 100644
--- a/arch/arm64/kernel/vmlinux.lds.S
+++ b/arch/arm64/kernel/vmlinux.lds.S
@@ -6,6 +6,7 @@
*/
#define RO_EXCEPTION_TABLE_ALIGN 8
+#define RUNTIME_DISCARD_EXIT
#include <asm-generic/vmlinux.lds.h>
#include <asm/cache.h>
@@ -20,6 +21,13 @@ ENTRY(_text)
jiffies = jiffies_64;
+
+#define HYPERVISOR_EXTABLE \
+ . = ALIGN(SZ_8); \
+ __start___kvm_ex_table = .; \
+ *(__kvm_ex_table) \
+ __stop___kvm_ex_table = .;
+
#define HYPERVISOR_TEXT \
/* \
* Align to 4 KB so that \
@@ -35,6 +43,7 @@ jiffies = jiffies_64;
__hyp_idmap_text_end = .; \
__hyp_text_start = .; \
*(.hyp.text) \
+ HYPERVISOR_EXTABLE \
__hyp_text_end = .;
#define IDMAP_TEXT \
@@ -88,16 +97,13 @@ SECTIONS
* matching the same input section name. There is no documented
* order of matching.
*/
+ DISCARDS
/DISCARD/ : {
- EXIT_CALL
- *(.discard)
- *(.discard.*)
*(.interp .dynamic)
*(.dynsym .dynstr .hash .gnu.hash)
- *(.eh_frame)
}
- . = KIMAGE_VADDR + TEXT_OFFSET;
+ . = KIMAGE_VADDR;
.head.text : {
_text = .;
@@ -123,6 +129,14 @@ SECTIONS
*(.got) /* Global offset table */
}
+ /*
+ * Make sure that the .got.plt is either completely empty or it
+ * contains only the lazy dispatch entries.
+ */
+ .got.plt : { *(.got.plt) }
+ ASSERT(SIZEOF(.got.plt) == 0 || SIZEOF(.got.plt) == 0x18,
+ "Unexpected GOT/PLT entries detected!")
+
. = ALIGN(SEGMENT_ALIGN);
_etext = .; /* End of text section */
@@ -241,8 +255,22 @@ SECTIONS
_end = .;
STABS_DEBUG
+ DWARF_DEBUG
+ ELF_DETAILS
HEAD_SYMBOLS
+
+ /*
+ * Sections that should stay zero sized, which is safer to
+ * explicitly check instead of blindly discarding.
+ */
+ .plt : {
+ *(.plt) *(.plt.*) *(.iplt) *(.igot)
+ }
+ ASSERT(SIZEOF(.plt) == 0, "Unexpected run-time procedure linkages detected!")
+
+ .data.rel.ro : { *(.data.rel.ro) }
+ ASSERT(SIZEOF(.data.rel.ro) == 0, "Unexpected RELRO detected!")
}
#include "image-vars.h"
@@ -266,4 +294,4 @@ ASSERT((__entry_tramp_text_end - __entry_tramp_text_start) == PAGE_SIZE,
/*
* If padding is applied before .head.text, virt<->phys conversions will fail.
*/
-ASSERT(_text == (KIMAGE_VADDR + TEXT_OFFSET), "HEAD is misaligned")
+ASSERT(_text == KIMAGE_VADDR, "HEAD is misaligned")
diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig
index 318c8f2df245..043756db8f6e 100644
--- a/arch/arm64/kvm/Kconfig
+++ b/arch/arm64/kvm/Kconfig
@@ -57,9 +57,6 @@ config KVM_ARM_PMU
Adds support for a virtual Performance Monitoring Unit (PMU) in
virtual machines.
-config KVM_INDIRECT_VECTORS
- def_bool HARDEN_BRANCH_PREDICTOR || RANDOMIZE_BASE
-
endif # KVM
endif # VIRTUALIZATION
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index 691d21e4c717..acf9a993dfb6 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -206,6 +206,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
*/
r = 1;
break;
+ case KVM_CAP_STEAL_TIME:
+ r = kvm_arm_pvtime_supported();
+ break;
default:
r = kvm_arch_vm_ioctl_check_extension(kvm, ext);
break;
@@ -1256,6 +1259,40 @@ long kvm_arch_vm_ioctl(struct file *filp,
}
}
+static int kvm_map_vectors(void)
+{
+ /*
+ * SV2 = ARM64_SPECTRE_V2
+ * HEL2 = ARM64_HARDEN_EL2_VECTORS
+ *
+ * !SV2 + !HEL2 -> use direct vectors
+ * SV2 + !HEL2 -> use hardened vectors in place
+ * !SV2 + HEL2 -> allocate one vector slot and use exec mapping
+ * SV2 + HEL2 -> use hardened vectors and use exec mapping
+ */
+ if (cpus_have_const_cap(ARM64_SPECTRE_V2)) {
+ __kvm_bp_vect_base = kvm_ksym_ref(__bp_harden_hyp_vecs);
+ __kvm_bp_vect_base = kern_hyp_va(__kvm_bp_vect_base);
+ }
+
+ if (cpus_have_const_cap(ARM64_HARDEN_EL2_VECTORS)) {
+ phys_addr_t vect_pa = __pa_symbol(__bp_harden_hyp_vecs);
+ unsigned long size = __BP_HARDEN_HYP_VECS_SZ;
+
+ /*
+ * Always allocate a spare vector slot, as we don't
+ * know yet which CPUs have a BP hardening slot that
+ * we can reuse.
+ */
+ __kvm_harden_el2_vector_slot = atomic_inc_return(&arm64_el2_vector_last_slot);
+ BUG_ON(__kvm_harden_el2_vector_slot >= BP_HARDEN_EL2_SLOTS);
+ return create_hyp_exec_mappings(vect_pa, size,
+ &__kvm_bp_vect_base);
+ }
+
+ return 0;
+}
+
static void cpu_init_hyp_mode(void)
{
phys_addr_t pgd_ptr;
@@ -1292,7 +1329,7 @@ static void cpu_init_hyp_mode(void)
* at EL2.
*/
if (this_cpu_has_cap(ARM64_SSBS) &&
- arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE) {
+ arm64_get_spectre_v4_state() == SPECTRE_VULNERABLE) {
kvm_call_hyp_nvhe(__kvm_enable_ssbs);
}
}
@@ -1549,10 +1586,6 @@ static int init_hyp_mode(void)
}
}
- err = hyp_map_aux_data();
- if (err)
- kvm_err("Cannot map host auxiliary data: %d\n", err);
-
return 0;
out_err:
@@ -1640,6 +1673,10 @@ int kvm_arch_init(void *opaque)
return -ENODEV;
}
+ if (cpus_have_final_cap(ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE))
+ kvm_info("Guests without required CPU erratum workarounds can deadlock system!\n" \
+ "Only trusted guests should be used on this system.\n");
+
for_each_online_cpu(cpu) {
smp_call_function_single(cpu, check_kvm_target_cpu, &ret, 1);
if (ret < 0) {
diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c
index fe6c7d79309d..5d690d60ccad 100644
--- a/arch/arm64/kvm/handle_exit.c
+++ b/arch/arm64/kvm/handle_exit.c
@@ -128,7 +128,7 @@ static int kvm_handle_guest_debug(struct kvm_vcpu *vcpu)
switch (ESR_ELx_EC(esr)) {
case ESR_ELx_EC_WATCHPT_LOW:
run->debug.arch.far = vcpu->arch.fault.far_el2;
- /* fall through */
+ fallthrough;
case ESR_ELx_EC_SOFTSTP_LOW:
case ESR_ELx_EC_BREAKPT_LOW:
case ESR_ELx_EC_BKPT32:
diff --git a/arch/arm64/kvm/hyp/Makefile b/arch/arm64/kvm/hyp/Makefile
index f54f0e89a71c..d898f0da5802 100644
--- a/arch/arm64/kvm/hyp/Makefile
+++ b/arch/arm64/kvm/hyp/Makefile
@@ -10,5 +10,4 @@ subdir-ccflags-y := -I$(incdir) \
-DDISABLE_BRANCH_PROFILING \
$(DISABLE_STACKLEAK_PLUGIN)
-obj-$(CONFIG_KVM) += vhe/ nvhe/
-obj-$(CONFIG_KVM_INDIRECT_VECTORS) += smccc_wa.o
+obj-$(CONFIG_KVM) += vhe/ nvhe/ smccc_wa.o
diff --git a/arch/arm64/kvm/hyp/entry.S b/arch/arm64/kvm/hyp/entry.S
index ee32a7743389..76e7eaf4675e 100644
--- a/arch/arm64/kvm/hyp/entry.S
+++ b/arch/arm64/kvm/hyp/entry.S
@@ -196,20 +196,23 @@ alternative_endif
// This is our single instruction exception window. A pending
// SError is guaranteed to occur at the earliest when we unmask
// it, and at the latest just after the ISB.
- .global abort_guest_exit_start
abort_guest_exit_start:
isb
- .global abort_guest_exit_end
abort_guest_exit_end:
msr daifset, #4 // Mask aborts
+ ret
+
+ _kvm_extable abort_guest_exit_start, 9997f
+ _kvm_extable abort_guest_exit_end, 9997f
+9997:
+ msr daifset, #4 // Mask aborts
+ mov x0, #(1 << ARM_EXIT_WITH_SERROR_BIT)
- // If the exception took place, restore the EL1 exception
- // context so that we can report some information.
- // Merge the exception code with the SError pending bit.
- tbz x0, #ARM_EXIT_WITH_SERROR_BIT, 1f
+ // restore the EL1 exception context so that we can report some
+ // information. Merge the exception code with the SError pending bit.
msr elr_el2, x2
msr esr_el2, x3
msr spsr_el2, x4
diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S
index 689fccbc9de7..7ea277b82967 100644
--- a/arch/arm64/kvm/hyp/hyp-entry.S
+++ b/arch/arm64/kvm/hyp/hyp-entry.S
@@ -15,6 +15,30 @@
#include <asm/kvm_mmu.h>
#include <asm/mmu.h>
+.macro save_caller_saved_regs_vect
+ /* x0 and x1 were saved in the vector entry */
+ stp x2, x3, [sp, #-16]!
+ stp x4, x5, [sp, #-16]!
+ stp x6, x7, [sp, #-16]!
+ stp x8, x9, [sp, #-16]!
+ stp x10, x11, [sp, #-16]!
+ stp x12, x13, [sp, #-16]!
+ stp x14, x15, [sp, #-16]!
+ stp x16, x17, [sp, #-16]!
+.endm
+
+.macro restore_caller_saved_regs_vect
+ ldp x16, x17, [sp], #16
+ ldp x14, x15, [sp], #16
+ ldp x12, x13, [sp], #16
+ ldp x10, x11, [sp], #16
+ ldp x8, x9, [sp], #16
+ ldp x6, x7, [sp], #16
+ ldp x4, x5, [sp], #16
+ ldp x2, x3, [sp], #16
+ ldp x0, x1, [sp], #16
+.endm
+
.text
.macro do_el2_call
@@ -92,35 +116,6 @@ el1_hvc_guest:
ARM_SMCCC_ARCH_WORKAROUND_2)
cbnz w1, el1_trap
-#ifdef CONFIG_ARM64_SSBD
-alternative_cb arm64_enable_wa2_handling
- b wa2_end
-alternative_cb_end
- get_vcpu_ptr x2, x0
- ldr x0, [x2, #VCPU_WORKAROUND_FLAGS]
-
- // Sanitize the argument and update the guest flags
- ldr x1, [sp, #8] // Guest's x1
- clz w1, w1 // Murphy's device:
- lsr w1, w1, #5 // w1 = !!w1 without using
- eor w1, w1, #1 // the flags...
- bfi x0, x1, #VCPU_WORKAROUND_2_FLAG_SHIFT, #1
- str x0, [x2, #VCPU_WORKAROUND_FLAGS]
-
- /* Check that we actually need to perform the call */
- hyp_ldr_this_cpu x0, arm64_ssbd_callback_required, x2
- cbz x0, wa2_end
-
- mov w0, #ARM_SMCCC_ARCH_WORKAROUND_2
- smc #0
-
- /* Don't leak data from the SMC call */
- mov x3, xzr
-wa2_end:
- mov x2, xzr
- mov x1, xzr
-#endif
-
wa_epilogue:
mov x0, xzr
add sp, sp, #16
@@ -143,13 +138,19 @@ el1_error:
b __guest_exit
el2_sync:
- /* Check for illegal exception return, otherwise panic */
+ /* Check for illegal exception return */
mrs x0, spsr_el2
+ tbnz x0, #20, 1f
- /* if this was something else, then panic! */
- tst x0, #PSR_IL_BIT
- b.eq __hyp_panic
+ save_caller_saved_regs_vect
+ stp x29, x30, [sp, #-16]!
+ bl kvm_unexpected_el2_exception
+ ldp x29, x30, [sp], #16
+ restore_caller_saved_regs_vect
+
+ eret
+1:
/* Let's attempt a recovery from the illegal exception return */
get_vcpu_ptr x1, x0
mov x0, #ARM_EXCEPTION_IL
@@ -157,27 +158,14 @@ el2_sync:
el2_error:
- ldp x0, x1, [sp], #16
+ save_caller_saved_regs_vect
+ stp x29, x30, [sp, #-16]!
+
+ bl kvm_unexpected_el2_exception
+
+ ldp x29, x30, [sp], #16
+ restore_caller_saved_regs_vect
- /*
- * Only two possibilities:
- * 1) Either we come from the exit path, having just unmasked
- * PSTATE.A: change the return code to an EL2 fault, and
- * carry on, as we're already in a sane state to handle it.
- * 2) Or we come from anywhere else, and that's a bug: we panic.
- *
- * For (1), x0 contains the original return code and x1 doesn't
- * contain anything meaningful at that stage. We can reuse them
- * as temp registers.
- * For (2), who cares?
- */
- mrs x0, elr_el2
- adr x1, abort_guest_exit_start
- cmp x0, x1
- adr x1, abort_guest_exit_end
- ccmp x0, x1, #4, ne
- b.ne __hyp_panic
- mov x0, #(1 << ARM_EXIT_WITH_SERROR_BIT)
eret
sb
@@ -271,7 +259,6 @@ SYM_CODE_START(__kvm_hyp_vector)
valid_vect el1_error // Error 32-bit EL1
SYM_CODE_END(__kvm_hyp_vector)
-#ifdef CONFIG_KVM_INDIRECT_VECTORS
.macro hyp_ventry
.align 7
1: esb
@@ -321,4 +308,3 @@ SYM_CODE_START(__bp_harden_hyp_vecs)
1: .org __bp_harden_hyp_vecs + __BP_HARDEN_HYP_VECS_SZ
.org 1b
SYM_CODE_END(__bp_harden_hyp_vecs)
-#endif
diff --git a/arch/arm64/kvm/hyp/include/hyp/debug-sr.h b/arch/arm64/kvm/hyp/include/hyp/debug-sr.h
index 0297dc63988c..5e28ea6aa097 100644
--- a/arch/arm64/kvm/hyp/include/hyp/debug-sr.h
+++ b/arch/arm64/kvm/hyp/include/hyp/debug-sr.h
@@ -21,70 +21,70 @@
#define save_debug(ptr,reg,nr) \
switch (nr) { \
case 15: ptr[15] = read_debug(reg, 15); \
- /* Fall through */ \
+ fallthrough; \
case 14: ptr[14] = read_debug(reg, 14); \
- /* Fall through */ \
+ fallthrough; \
case 13: ptr[13] = read_debug(reg, 13); \
- /* Fall through */ \
+ fallthrough; \
case 12: ptr[12] = read_debug(reg, 12); \
- /* Fall through */ \
+ fallthrough; \
case 11: ptr[11] = read_debug(reg, 11); \
- /* Fall through */ \
+ fallthrough; \
case 10: ptr[10] = read_debug(reg, 10); \
- /* Fall through */ \
+ fallthrough; \
case 9: ptr[9] = read_debug(reg, 9); \
- /* Fall through */ \
+ fallthrough; \
case 8: ptr[8] = read_debug(reg, 8); \
- /* Fall through */ \
+ fallthrough; \
case 7: ptr[7] = read_debug(reg, 7); \
- /* Fall through */ \
+ fallthrough; \
case 6: ptr[6] = read_debug(reg, 6); \
- /* Fall through */ \
+ fallthrough; \
case 5: ptr[5] = read_debug(reg, 5); \
- /* Fall through */ \
+ fallthrough; \
case 4: ptr[4] = read_debug(reg, 4); \
- /* Fall through */ \
+ fallthrough; \
case 3: ptr[3] = read_debug(reg, 3); \
- /* Fall through */ \
+ fallthrough; \
case 2: ptr[2] = read_debug(reg, 2); \
- /* Fall through */ \
+ fallthrough; \
case 1: ptr[1] = read_debug(reg, 1); \
- /* Fall through */ \
+ fallthrough; \
default: ptr[0] = read_debug(reg, 0); \
}
#define restore_debug(ptr,reg,nr) \
switch (nr) { \
case 15: write_debug(ptr[15], reg, 15); \
- /* Fall through */ \
+ fallthrough; \
case 14: write_debug(ptr[14], reg, 14); \
- /* Fall through */ \
+ fallthrough; \
case 13: write_debug(ptr[13], reg, 13); \
- /* Fall through */ \
+ fallthrough; \
case 12: write_debug(ptr[12], reg, 12); \
- /* Fall through */ \
+ fallthrough; \
case 11: write_debug(ptr[11], reg, 11); \
- /* Fall through */ \
+ fallthrough; \
case 10: write_debug(ptr[10], reg, 10); \
- /* Fall through */ \
+ fallthrough; \
case 9: write_debug(ptr[9], reg, 9); \
- /* Fall through */ \
+ fallthrough; \
case 8: write_debug(ptr[8], reg, 8); \
- /* Fall through */ \
+ fallthrough; \
case 7: write_debug(ptr[7], reg, 7); \
- /* Fall through */ \
+ fallthrough; \
case 6: write_debug(ptr[6], reg, 6); \
- /* Fall through */ \
+ fallthrough; \
case 5: write_debug(ptr[5], reg, 5); \
- /* Fall through */ \
+ fallthrough; \
case 4: write_debug(ptr[4], reg, 4); \
- /* Fall through */ \
+ fallthrough; \
case 3: write_debug(ptr[3], reg, 3); \
- /* Fall through */ \
+ fallthrough; \
case 2: write_debug(ptr[2], reg, 2); \
- /* Fall through */ \
+ fallthrough; \
case 1: write_debug(ptr[1], reg, 1); \
- /* Fall through */ \
+ fallthrough; \
default: write_debug(ptr[0], reg, 0); \
}
diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h
index 426ef65601dd..d0f07e8cc3ff 100644
--- a/arch/arm64/kvm/hyp/include/hyp/switch.h
+++ b/arch/arm64/kvm/hyp/include/hyp/switch.h
@@ -17,6 +17,7 @@
#include <asm/barrier.h>
#include <asm/cpufeature.h>
+#include <asm/extable.h>
#include <asm/kprobes.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_emulate.h>
@@ -29,6 +30,9 @@
extern const char __hyp_panic_string[];
+extern struct exception_table_entry __start___kvm_ex_table;
+extern struct exception_table_entry __stop___kvm_ex_table;
+
/* Check whether the FP regs were dirtied while in the host-side run loop: */
static inline bool update_fp_enabled(struct kvm_vcpu *vcpu)
{
@@ -142,10 +146,10 @@ static inline bool __translate_far_to_hpfar(u64 far, u64 *hpfar)
* saved the guest context yet, and we may return early...
*/
par = read_sysreg(par_el1);
- asm volatile("at s1e1r, %0" : : "r" (far));
- isb();
-
- tmp = read_sysreg(par_el1);
+ if (!__kvm_at("s1e1r", far))
+ tmp = read_sysreg(par_el1);
+ else
+ tmp = SYS_PAR_EL1_F; /* back to the guest */
write_sysreg(par, par_el1);
if (unlikely(tmp & SYS_PAR_EL1_F))
@@ -445,7 +449,7 @@ static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
kvm_vcpu_trap_get_fault_type(vcpu) == FSC_FAULT &&
kvm_vcpu_dabt_isvalid(vcpu) &&
!kvm_vcpu_abt_issea(vcpu) &&
- !kvm_vcpu_dabt_iss1tw(vcpu);
+ !kvm_vcpu_abt_iss1tw(vcpu);
if (valid) {
int ret = __vgic_v2_perform_cpuif_access(vcpu);
@@ -475,37 +479,31 @@ exit:
return false;
}
-static inline bool __needs_ssbd_off(struct kvm_vcpu *vcpu)
+static inline void __kvm_unexpected_el2_exception(void)
{
- if (!cpus_have_final_cap(ARM64_SSBD))
- return false;
-
- return !(vcpu->arch.workaround_flags & VCPU_WORKAROUND_2_FLAG);
-}
+ unsigned long addr, fixup;
+ struct kvm_cpu_context *host_ctxt;
+ struct exception_table_entry *entry, *end;
+ unsigned long elr_el2 = read_sysreg(elr_el2);
+
+ entry = hyp_symbol_addr(__start___kvm_ex_table);
+ end = hyp_symbol_addr(__stop___kvm_ex_table);
+ host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
+
+ while (entry < end) {
+ addr = (unsigned long)&entry->insn + entry->insn;
+ fixup = (unsigned long)&entry->fixup + entry->fixup;
+
+ if (addr != elr_el2) {
+ entry++;
+ continue;
+ }
-static inline void __set_guest_arch_workaround_state(struct kvm_vcpu *vcpu)
-{
-#ifdef CONFIG_ARM64_SSBD
- /*
- * The host runs with the workaround always present. If the
- * guest wants it disabled, so be it...
- */
- if (__needs_ssbd_off(vcpu) &&
- __hyp_this_cpu_read(arm64_ssbd_callback_required))
- arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, 0, NULL);
-#endif
-}
+ write_sysreg(fixup, elr_el2);
+ return;
+ }
-static inline void __set_host_arch_workaround_state(struct kvm_vcpu *vcpu)
-{
-#ifdef CONFIG_ARM64_SSBD
- /*
- * If the guest has disabled the workaround, bring it back on.
- */
- if (__needs_ssbd_off(vcpu) &&
- __hyp_this_cpu_read(arm64_ssbd_callback_required))
- arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, 1, NULL);
-#endif
+ hyp_panic(host_ctxt);
}
#endif /* __ARM64_KVM_HYP_SWITCH_H__ */
diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c
index 341be2f2f312..8d3dd4f47924 100644
--- a/arch/arm64/kvm/hyp/nvhe/switch.c
+++ b/arch/arm64/kvm/hyp/nvhe/switch.c
@@ -202,8 +202,6 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
__debug_switch_to_guest(vcpu);
- __set_guest_arch_workaround_state(vcpu);
-
do {
/* Jump in the fire! */
exit_code = __guest_enter(vcpu, host_ctxt);
@@ -211,8 +209,6 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
/* And we're baaack! */
} while (fixup_guest_exit(vcpu, &exit_code));
- __set_host_arch_workaround_state(vcpu);
-
__sysreg_save_state_nvhe(guest_ctxt);
__sysreg32_save_state(vcpu);
__timer_disable_traps(vcpu);
@@ -270,3 +266,8 @@ void __noreturn hyp_panic(struct kvm_cpu_context *host_ctxt)
read_sysreg(hpfar_el2), par, vcpu);
unreachable();
}
+
+asmlinkage void kvm_unexpected_el2_exception(void)
+{
+ return __kvm_unexpected_el2_exception();
+}
diff --git a/arch/arm64/kvm/hyp/nvhe/tlb.c b/arch/arm64/kvm/hyp/nvhe/tlb.c
index 69eae608d670..b15d65a42042 100644
--- a/arch/arm64/kvm/hyp/nvhe/tlb.c
+++ b/arch/arm64/kvm/hyp/nvhe/tlb.c
@@ -31,7 +31,14 @@ static void __tlb_switch_to_guest(struct kvm_s2_mmu *mmu,
isb();
}
+ /*
+ * __load_guest_stage2() includes an ISB only when the AT
+ * workaround is applied. Take care of the opposite condition,
+ * ensuring that we always have an ISB, but not two ISBs back
+ * to back.
+ */
__load_guest_stage2(mmu);
+ asm(ALTERNATIVE("isb", "nop", ARM64_WORKAROUND_SPECULATIVE_AT));
}
static void __tlb_switch_to_host(struct tlb_inv_context *cxt)
diff --git a/arch/arm64/kvm/hyp/vgic-v3-sr.c b/arch/arm64/kvm/hyp/vgic-v3-sr.c
index 5a0073511efb..452f4cacd674 100644
--- a/arch/arm64/kvm/hyp/vgic-v3-sr.c
+++ b/arch/arm64/kvm/hyp/vgic-v3-sr.c
@@ -340,10 +340,10 @@ void __vgic_v3_save_aprs(struct vgic_v3_cpu_if *cpu_if)
case 7:
cpu_if->vgic_ap0r[3] = __vgic_v3_read_ap0rn(3);
cpu_if->vgic_ap0r[2] = __vgic_v3_read_ap0rn(2);
- /* Fall through */
+ fallthrough;
case 6:
cpu_if->vgic_ap0r[1] = __vgic_v3_read_ap0rn(1);
- /* Fall through */
+ fallthrough;
default:
cpu_if->vgic_ap0r[0] = __vgic_v3_read_ap0rn(0);
}
@@ -352,10 +352,10 @@ void __vgic_v3_save_aprs(struct vgic_v3_cpu_if *cpu_if)
case 7:
cpu_if->vgic_ap1r[3] = __vgic_v3_read_ap1rn(3);
cpu_if->vgic_ap1r[2] = __vgic_v3_read_ap1rn(2);
- /* Fall through */
+ fallthrough;
case 6:
cpu_if->vgic_ap1r[1] = __vgic_v3_read_ap1rn(1);
- /* Fall through */
+ fallthrough;
default:
cpu_if->vgic_ap1r[0] = __vgic_v3_read_ap1rn(0);
}
@@ -373,10 +373,10 @@ void __vgic_v3_restore_aprs(struct vgic_v3_cpu_if *cpu_if)
case 7:
__vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[3], 3);
__vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[2], 2);
- /* Fall through */
+ fallthrough;
case 6:
__vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[1], 1);
- /* Fall through */
+ fallthrough;
default:
__vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[0], 0);
}
@@ -385,10 +385,10 @@ void __vgic_v3_restore_aprs(struct vgic_v3_cpu_if *cpu_if)
case 7:
__vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[3], 3);
__vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[2], 2);
- /* Fall through */
+ fallthrough;
case 6:
__vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[1], 1);
- /* Fall through */
+ fallthrough;
default:
__vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[0], 0);
}
diff --git a/arch/arm64/kvm/hyp/vhe/switch.c b/arch/arm64/kvm/hyp/vhe/switch.c
index c52d714e0d75..ecf67e678203 100644
--- a/arch/arm64/kvm/hyp/vhe/switch.c
+++ b/arch/arm64/kvm/hyp/vhe/switch.c
@@ -131,8 +131,6 @@ static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
sysreg_restore_guest_state_vhe(guest_ctxt);
__debug_switch_to_guest(vcpu);
- __set_guest_arch_workaround_state(vcpu);
-
do {
/* Jump in the fire! */
exit_code = __guest_enter(vcpu, host_ctxt);
@@ -140,8 +138,6 @@ static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
/* And we're baaack! */
} while (fixup_guest_exit(vcpu, &exit_code));
- __set_host_arch_workaround_state(vcpu);
-
sysreg_save_guest_state_vhe(guest_ctxt);
__deactivate_traps(vcpu);
@@ -217,3 +213,8 @@ void __noreturn hyp_panic(struct kvm_cpu_context *host_ctxt)
__hyp_call_panic(spsr, elr, par, host_ctxt);
unreachable();
}
+
+asmlinkage void kvm_unexpected_el2_exception(void)
+{
+ return __kvm_unexpected_el2_exception();
+}
diff --git a/arch/arm64/kvm/hypercalls.c b/arch/arm64/kvm/hypercalls.c
index 550dfa3e53cd..9824025ccc5c 100644
--- a/arch/arm64/kvm/hypercalls.c
+++ b/arch/arm64/kvm/hypercalls.c
@@ -24,27 +24,36 @@ int kvm_hvc_call_handler(struct kvm_vcpu *vcpu)
feature = smccc_get_arg1(vcpu);
switch (feature) {
case ARM_SMCCC_ARCH_WORKAROUND_1:
- switch (kvm_arm_harden_branch_predictor()) {
- case KVM_BP_HARDEN_UNKNOWN:
+ switch (arm64_get_spectre_v2_state()) {
+ case SPECTRE_VULNERABLE:
break;
- case KVM_BP_HARDEN_WA_NEEDED:
+ case SPECTRE_MITIGATED:
val = SMCCC_RET_SUCCESS;
break;
- case KVM_BP_HARDEN_NOT_REQUIRED:
+ case SPECTRE_UNAFFECTED:
val = SMCCC_RET_NOT_REQUIRED;
break;
}
break;
case ARM_SMCCC_ARCH_WORKAROUND_2:
- switch (kvm_arm_have_ssbd()) {
- case KVM_SSBD_FORCE_DISABLE:
- case KVM_SSBD_UNKNOWN:
+ switch (arm64_get_spectre_v4_state()) {
+ case SPECTRE_VULNERABLE:
break;
- case KVM_SSBD_KERNEL:
- val = SMCCC_RET_SUCCESS;
- break;
- case KVM_SSBD_FORCE_ENABLE:
- case KVM_SSBD_MITIGATED:
+ case SPECTRE_MITIGATED:
+ /*
+ * SSBS everywhere: Indicate no firmware
+ * support, as the SSBS support will be
+ * indicated to the guest and the default is
+ * safe.
+ *
+ * Otherwise, expose a permanent mitigation
+ * to the guest, and hide SSBS so that the
+ * guest stays protected.
+ */
+ if (cpus_have_final_cap(ARM64_SSBS))
+ break;
+ fallthrough;
+ case SPECTRE_UNAFFECTED:
val = SMCCC_RET_NOT_REQUIRED;
break;
}
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index 0121ef2c7c8d..3d26b47a1343 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -343,7 +343,8 @@ static void unmap_stage2_p4ds(struct kvm_s2_mmu *mmu, pgd_t *pgd,
* destroying the VM), otherwise another faulting VCPU may come in and mess
* with things behind our backs.
*/
-static void unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 size)
+static void __unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 size,
+ bool may_block)
{
struct kvm *kvm = mmu->kvm;
pgd_t *pgd;
@@ -369,11 +370,16 @@ static void unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 si
* If the range is too large, release the kvm->mmu_lock
* to prevent starvation and lockup detector warnings.
*/
- if (next != end)
+ if (may_block && next != end)
cond_resched_lock(&kvm->mmu_lock);
} while (pgd++, addr = next, addr != end);
}
+static void unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 size)
+{
+ __unmap_stage2_range(mmu, start, size, true);
+}
+
static void stage2_flush_ptes(struct kvm_s2_mmu *mmu, pmd_t *pmd,
phys_addr_t addr, phys_addr_t end)
{
@@ -1843,7 +1849,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
struct kvm_s2_mmu *mmu = vcpu->arch.hw_mmu;
write_fault = kvm_is_write_fault(vcpu);
- exec_fault = kvm_vcpu_trap_is_iabt(vcpu);
+ exec_fault = kvm_vcpu_trap_is_exec_fault(vcpu);
VM_BUG_ON(write_fault && exec_fault);
if (fault_status == FSC_PERM && !write_fault && !exec_fault) {
@@ -1871,6 +1877,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
!fault_supports_stage2_huge_mapping(memslot, hva, vma_pagesize)) {
force_pte = true;
vma_pagesize = PAGE_SIZE;
+ vma_shift = PAGE_SHIFT;
}
/*
@@ -1964,7 +1971,12 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
(fault_status == FSC_PERM &&
stage2_is_exec(mmu, fault_ipa, vma_pagesize));
- if (vma_pagesize == PUD_SIZE) {
+ /*
+ * If PUD_SIZE == PMD_SIZE, there is no real PUD level, and
+ * all we have is a 2-level page table. Trying to map a PUD in
+ * this case would be fatally wrong.
+ */
+ if (PUD_SIZE != PMD_SIZE && vma_pagesize == PUD_SIZE) {
pud_t new_pud = kvm_pfn_pud(pfn, mem_type);
new_pud = kvm_pud_mkhuge(new_pud);
@@ -2119,7 +2131,7 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu)
goto out;
}
- if (kvm_vcpu_dabt_iss1tw(vcpu)) {
+ if (kvm_vcpu_abt_iss1tw(vcpu)) {
kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu));
ret = 1;
goto out_unlock;
@@ -2208,18 +2220,21 @@ static int handle_hva_to_gpa(struct kvm *kvm,
static int kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data)
{
- unmap_stage2_range(&kvm->arch.mmu, gpa, size);
+ unsigned flags = *(unsigned *)data;
+ bool may_block = flags & MMU_NOTIFIER_RANGE_BLOCKABLE;
+
+ __unmap_stage2_range(&kvm->arch.mmu, gpa, size, may_block);
return 0;
}
int kvm_unmap_hva_range(struct kvm *kvm,
- unsigned long start, unsigned long end)
+ unsigned long start, unsigned long end, unsigned flags)
{
if (!kvm->arch.mmu.pgd)
return 0;
trace_kvm_unmap_hva_range(start, end);
- handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL);
+ handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, &flags);
return 0;
}
diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c
index f0d0312c0a55..81916e360b1e 100644
--- a/arch/arm64/kvm/pmu-emul.c
+++ b/arch/arm64/kvm/pmu-emul.c
@@ -269,6 +269,7 @@ void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu)
for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++)
kvm_pmu_release_perf_event(&pmu->pmc[i]);
+ irq_work_sync(&vcpu->arch.pmu.overflow_work);
}
u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu)
@@ -434,6 +435,22 @@ void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu)
}
/**
+ * When perf interrupt is an NMI, we cannot safely notify the vcpu corresponding
+ * to the event.
+ * This is why we need a callback to do it once outside of the NMI context.
+ */
+static void kvm_pmu_perf_overflow_notify_vcpu(struct irq_work *work)
+{
+ struct kvm_vcpu *vcpu;
+ struct kvm_pmu *pmu;
+
+ pmu = container_of(work, struct kvm_pmu, overflow_work);
+ vcpu = kvm_pmc_to_vcpu(pmu->pmc);
+
+ kvm_vcpu_kick(vcpu);
+}
+
+/**
* When the perf event overflows, set the overflow status and inform the vcpu.
*/
static void kvm_pmu_perf_overflow(struct perf_event *perf_event,
@@ -465,7 +482,11 @@ static void kvm_pmu_perf_overflow(struct perf_event *perf_event,
if (kvm_pmu_overflow_status(vcpu)) {
kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
- kvm_vcpu_kick(vcpu);
+
+ if (!in_nmi())
+ kvm_vcpu_kick(vcpu);
+ else
+ irq_work_queue(&vcpu->arch.pmu.overflow_work);
}
cpu_pmu->pmu.start(perf_event, PERF_EF_RELOAD);
@@ -764,6 +785,9 @@ static int kvm_arm_pmu_v3_init(struct kvm_vcpu *vcpu)
return ret;
}
+ init_irq_work(&vcpu->arch.pmu.overflow_work,
+ kvm_pmu_perf_overflow_notify_vcpu);
+
vcpu->arch.pmu.created = true;
return 0;
}
diff --git a/arch/arm64/kvm/psci.c b/arch/arm64/kvm/psci.c
index 83415e96b589..db4056ecccfd 100644
--- a/arch/arm64/kvm/psci.c
+++ b/arch/arm64/kvm/psci.c
@@ -425,27 +425,30 @@ static int get_kernel_wa_level(u64 regid)
{
switch (regid) {
case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1:
- switch (kvm_arm_harden_branch_predictor()) {
- case KVM_BP_HARDEN_UNKNOWN:
+ switch (arm64_get_spectre_v2_state()) {
+ case SPECTRE_VULNERABLE:
return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_AVAIL;
- case KVM_BP_HARDEN_WA_NEEDED:
+ case SPECTRE_MITIGATED:
return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_AVAIL;
- case KVM_BP_HARDEN_NOT_REQUIRED:
+ case SPECTRE_UNAFFECTED:
return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_REQUIRED;
}
return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_AVAIL;
case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2:
- switch (kvm_arm_have_ssbd()) {
- case KVM_SSBD_FORCE_DISABLE:
- return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL;
- case KVM_SSBD_KERNEL:
- return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_AVAIL;
- case KVM_SSBD_FORCE_ENABLE:
- case KVM_SSBD_MITIGATED:
+ switch (arm64_get_spectre_v4_state()) {
+ case SPECTRE_MITIGATED:
+ /*
+ * As for the hypercall discovery, we pretend we
+ * don't have any FW mitigation if SSBS is there at
+ * all times.
+ */
+ if (cpus_have_final_cap(ARM64_SSBS))
+ return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL;
+ fallthrough;
+ case SPECTRE_UNAFFECTED:
return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_REQUIRED;
- case KVM_SSBD_UNKNOWN:
- default:
- return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_UNKNOWN;
+ case SPECTRE_VULNERABLE:
+ return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL;
}
}
@@ -462,14 +465,8 @@ int kvm_arm_get_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
val = kvm_psci_version(vcpu, vcpu->kvm);
break;
case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1:
- val = get_kernel_wa_level(reg->id) & KVM_REG_FEATURE_LEVEL_MASK;
- break;
case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2:
val = get_kernel_wa_level(reg->id) & KVM_REG_FEATURE_LEVEL_MASK;
-
- if (val == KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_AVAIL &&
- kvm_arm_get_vcpu_workaround_2_flag(vcpu))
- val |= KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_ENABLED;
break;
default:
return -ENOENT;
@@ -527,34 +524,35 @@ int kvm_arm_set_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_ENABLED))
return -EINVAL;
- wa_level = val & KVM_REG_FEATURE_LEVEL_MASK;
-
- if (get_kernel_wa_level(reg->id) < wa_level)
- return -EINVAL;
-
/* The enabled bit must not be set unless the level is AVAIL. */
- if (wa_level != KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_AVAIL &&
- wa_level != val)
+ if ((val & KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_ENABLED) &&
+ (val & KVM_REG_FEATURE_LEVEL_MASK) != KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_AVAIL)
return -EINVAL;
- /* Are we finished or do we need to check the enable bit ? */
- if (kvm_arm_have_ssbd() != KVM_SSBD_KERNEL)
- return 0;
-
/*
- * If this kernel supports the workaround to be switched on
- * or off, make sure it matches the requested setting.
+ * Map all the possible incoming states to the only two we
+ * really want to deal with.
*/
- switch (wa_level) {
- case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_AVAIL:
- kvm_arm_set_vcpu_workaround_2_flag(vcpu,
- val & KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_ENABLED);
+ switch (val & KVM_REG_FEATURE_LEVEL_MASK) {
+ case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL:
+ case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_UNKNOWN:
+ wa_level = KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL;
break;
+ case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_AVAIL:
case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_REQUIRED:
- kvm_arm_set_vcpu_workaround_2_flag(vcpu, true);
+ wa_level = KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_REQUIRED;
break;
+ default:
+ return -EINVAL;
}
+ /*
+ * We can deal with NOT_AVAIL on NOT_REQUIRED, but not the
+ * other way around.
+ */
+ if (get_kernel_wa_level(reg->id) < wa_level)
+ return -EINVAL;
+
return 0;
default:
return -ENOENT;
diff --git a/arch/arm64/kvm/pvtime.c b/arch/arm64/kvm/pvtime.c
index f7b52ce1557e..920ac43077ad 100644
--- a/arch/arm64/kvm/pvtime.c
+++ b/arch/arm64/kvm/pvtime.c
@@ -13,25 +13,22 @@
void kvm_update_stolen_time(struct kvm_vcpu *vcpu)
{
struct kvm *kvm = vcpu->kvm;
- u64 steal;
- __le64 steal_le;
- u64 offset;
- int idx;
u64 base = vcpu->arch.steal.base;
+ u64 last_steal = vcpu->arch.steal.last_steal;
+ u64 offset = offsetof(struct pvclock_vcpu_stolen_time, stolen_time);
+ u64 steal = 0;
+ int idx;
if (base == GPA_INVALID)
return;
- /* Let's do the local bookkeeping */
- steal = vcpu->arch.steal.steal;
- steal += current->sched_info.run_delay - vcpu->arch.steal.last_steal;
- vcpu->arch.steal.last_steal = current->sched_info.run_delay;
- vcpu->arch.steal.steal = steal;
-
- steal_le = cpu_to_le64(steal);
idx = srcu_read_lock(&kvm->srcu);
- offset = offsetof(struct pvclock_vcpu_stolen_time, stolen_time);
- kvm_put_guest(kvm, base + offset, steal_le, u64);
+ if (!kvm_get_guest(kvm, base + offset, steal)) {
+ steal = le64_to_cpu(steal);
+ vcpu->arch.steal.last_steal = READ_ONCE(current->sched_info.run_delay);
+ steal += vcpu->arch.steal.last_steal - last_steal;
+ kvm_put_guest(kvm, base + offset, cpu_to_le64(steal));
+ }
srcu_read_unlock(&kvm->srcu, idx);
}
@@ -43,7 +40,8 @@ long kvm_hypercall_pv_features(struct kvm_vcpu *vcpu)
switch (feature) {
case ARM_SMCCC_HV_PV_TIME_FEATURES:
case ARM_SMCCC_HV_PV_TIME_ST:
- val = SMCCC_RET_SUCCESS;
+ if (vcpu->arch.steal.base != GPA_INVALID)
+ val = SMCCC_RET_SUCCESS;
break;
}
@@ -64,7 +62,6 @@ gpa_t kvm_init_stolen_time(struct kvm_vcpu *vcpu)
* Start counting stolen time from the time the guest requests
* the feature enabled.
*/
- vcpu->arch.steal.steal = 0;
vcpu->arch.steal.last_steal = current->sched_info.run_delay;
idx = srcu_read_lock(&kvm->srcu);
@@ -74,7 +71,7 @@ gpa_t kvm_init_stolen_time(struct kvm_vcpu *vcpu)
return base;
}
-static bool kvm_arm_pvtime_supported(void)
+bool kvm_arm_pvtime_supported(void)
{
return !!sched_info_on();
}
diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c
index ee33875c5c2a..f6e8b4a75cbb 100644
--- a/arch/arm64/kvm/reset.c
+++ b/arch/arm64/kvm/reset.c
@@ -319,10 +319,6 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
vcpu->arch.reset_state.reset = false;
}
- /* Default workaround setup is enabled (if supported) */
- if (kvm_arm_have_ssbd() == KVM_SSBD_KERNEL)
- vcpu->arch.workaround_flags |= VCPU_WORKAROUND_2_FLAG;
-
/* Reset timer */
ret = kvm_timer_vcpu_reset(vcpu);
out:
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index 077293b5115f..9ca270603980 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -1131,6 +1131,11 @@ static u64 read_id_reg(const struct kvm_vcpu *vcpu,
if (!vcpu_has_sve(vcpu))
val &= ~(0xfUL << ID_AA64PFR0_SVE_SHIFT);
val &= ~(0xfUL << ID_AA64PFR0_AMU_SHIFT);
+ if (!(val & (0xfUL << ID_AA64PFR0_CSV2_SHIFT)) &&
+ arm64_get_spectre_v2_state() == SPECTRE_UNAFFECTED)
+ val |= (1UL << ID_AA64PFR0_CSV2_SHIFT);
+ } else if (id == SYS_ID_AA64PFR1_EL1) {
+ val &= ~(0xfUL << ID_AA64PFR1_MTE_SHIFT);
} else if (id == SYS_ID_AA64ISAR1_EL1 && !vcpu_has_ptrauth(vcpu)) {
val &= ~((0xfUL << ID_AA64ISAR1_APA_SHIFT) |
(0xfUL << ID_AA64ISAR1_API_SHIFT) |
@@ -1382,6 +1387,13 @@ static bool access_ccsidr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
return true;
}
+static bool access_mte_regs(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
+ const struct sys_reg_desc *r)
+{
+ kvm_inject_undefined(vcpu);
+ return false;
+}
+
/* sys_reg_desc initialiser for known cpufeature ID registers */
#define ID_SANITISED(name) { \
SYS_DESC(SYS_##name), \
@@ -1547,6 +1559,10 @@ static const struct sys_reg_desc sys_reg_descs[] = {
{ SYS_DESC(SYS_SCTLR_EL1), access_vm_reg, reset_val, SCTLR_EL1, 0x00C50078 },
{ SYS_DESC(SYS_ACTLR_EL1), access_actlr, reset_actlr, ACTLR_EL1 },
{ SYS_DESC(SYS_CPACR_EL1), NULL, reset_val, CPACR_EL1, 0 },
+
+ { SYS_DESC(SYS_RGSR_EL1), access_mte_regs },
+ { SYS_DESC(SYS_GCR_EL1), access_mte_regs },
+
{ SYS_DESC(SYS_ZCR_EL1), NULL, reset_val, ZCR_EL1, 0, .visibility = sve_visibility },
{ SYS_DESC(SYS_TTBR0_EL1), access_vm_reg, reset_unknown, TTBR0_EL1 },
{ SYS_DESC(SYS_TTBR1_EL1), access_vm_reg, reset_unknown, TTBR1_EL1 },
@@ -1571,6 +1587,9 @@ static const struct sys_reg_desc sys_reg_descs[] = {
{ SYS_DESC(SYS_ERXMISC0_EL1), trap_raz_wi },
{ SYS_DESC(SYS_ERXMISC1_EL1), trap_raz_wi },
+ { SYS_DESC(SYS_TFSR_EL1), access_mte_regs },
+ { SYS_DESC(SYS_TFSRE0_EL1), access_mte_regs },
+
{ SYS_DESC(SYS_FAR_EL1), access_vm_reg, reset_unknown, FAR_EL1 },
{ SYS_DESC(SYS_PAR_EL1), NULL, reset_unknown, PAR_EL1 },
diff --git a/arch/arm64/kvm/trace_arm.h b/arch/arm64/kvm/trace_arm.h
index 4691053c5ee4..ff0444352bba 100644
--- a/arch/arm64/kvm/trace_arm.h
+++ b/arch/arm64/kvm/trace_arm.h
@@ -23,7 +23,7 @@ TRACE_EVENT(kvm_entry,
__entry->vcpu_pc = vcpu_pc;
),
- TP_printk("PC: 0x%08lx", __entry->vcpu_pc)
+ TP_printk("PC: 0x%016lx", __entry->vcpu_pc)
);
TRACE_EVENT(kvm_exit,
@@ -42,7 +42,7 @@ TRACE_EVENT(kvm_exit,
__entry->vcpu_pc = vcpu_pc;
),
- TP_printk("%s: HSR_EC: 0x%04x (%s), PC: 0x%08lx",
+ TP_printk("%s: HSR_EC: 0x%04x (%s), PC: 0x%016lx",
__print_symbolic(__entry->ret, kvm_arm_exception_type),
__entry->esr_ec,
__print_symbolic(__entry->esr_ec, kvm_arm_exception_class),
@@ -69,7 +69,7 @@ TRACE_EVENT(kvm_guest_fault,
__entry->ipa = ipa;
),
- TP_printk("ipa %#llx, hsr %#08lx, hxfar %#08lx, pc %#08lx",
+ TP_printk("ipa %#llx, hsr %#08lx, hxfar %#08lx, pc %#016lx",
__entry->ipa, __entry->hsr,
__entry->hxfar, __entry->vcpu_pc)
);
@@ -131,7 +131,7 @@ TRACE_EVENT(kvm_mmio_emulate,
__entry->cpsr = cpsr;
),
- TP_printk("Emulate MMIO at: 0x%08lx (instr: %08lx, cpsr: %08lx)",
+ TP_printk("Emulate MMIO at: 0x%016lx (instr: %08lx, cpsr: %08lx)",
__entry->vcpu_pc, __entry->instr, __entry->cpsr)
);
@@ -149,7 +149,7 @@ TRACE_EVENT(kvm_unmap_hva_range,
__entry->end = end;
),
- TP_printk("mmu notifier unmap range: %#08lx -- %#08lx",
+ TP_printk("mmu notifier unmap range: %#016lx -- %#016lx",
__entry->start, __entry->end)
);
@@ -165,7 +165,7 @@ TRACE_EVENT(kvm_set_spte_hva,
__entry->hva = hva;
),
- TP_printk("mmu notifier set pte hva: %#08lx", __entry->hva)
+ TP_printk("mmu notifier set pte hva: %#016lx", __entry->hva)
);
TRACE_EVENT(kvm_age_hva,
@@ -182,7 +182,7 @@ TRACE_EVENT(kvm_age_hva,
__entry->end = end;
),
- TP_printk("mmu notifier age hva: %#08lx -- %#08lx",
+ TP_printk("mmu notifier age hva: %#016lx -- %#016lx",
__entry->start, __entry->end)
);
@@ -198,7 +198,7 @@ TRACE_EVENT(kvm_test_age_hva,
__entry->hva = hva;
),
- TP_printk("mmu notifier test age hva: %#08lx", __entry->hva)
+ TP_printk("mmu notifier test age hva: %#016lx", __entry->hva)
);
TRACE_EVENT(kvm_set_way_flush,
diff --git a/arch/arm64/kvm/trace_handle_exit.h b/arch/arm64/kvm/trace_handle_exit.h
index 2c56d1e0f5bd..8d78acc4fba7 100644
--- a/arch/arm64/kvm/trace_handle_exit.h
+++ b/arch/arm64/kvm/trace_handle_exit.h
@@ -22,7 +22,7 @@ TRACE_EVENT(kvm_wfx_arm64,
__entry->is_wfe = is_wfe;
),
- TP_printk("guest executed wf%c at: 0x%08lx",
+ TP_printk("guest executed wf%c at: 0x%016lx",
__entry->is_wfe ? 'e' : 'i', __entry->vcpu_pc)
);
@@ -42,7 +42,7 @@ TRACE_EVENT(kvm_hvc_arm64,
__entry->imm = imm;
),
- TP_printk("HVC at 0x%08lx (r0: 0x%08lx, imm: 0x%lx)",
+ TP_printk("HVC at 0x%016lx (r0: 0x%016lx, imm: 0x%lx)",
__entry->vcpu_pc, __entry->r0, __entry->imm)
);
@@ -135,7 +135,7 @@ TRACE_EVENT(trap_reg,
__entry->write_value = write_value;
),
- TP_printk("%s %s reg %d (0x%08llx)", __entry->fn, __entry->is_write?"write to":"read from", __entry->reg, __entry->write_value)
+ TP_printk("%s %s reg %d (0x%016llx)", __entry->fn, __entry->is_write?"write to":"read from", __entry->reg, __entry->write_value)
);
TRACE_EVENT(kvm_handle_sys_reg,
diff --git a/arch/arm64/kvm/vgic/vgic-mmio-v3.c b/arch/arm64/kvm/vgic/vgic-mmio-v3.c
index 5c786b915cd3..52d6f24f65dc 100644
--- a/arch/arm64/kvm/vgic/vgic-mmio-v3.c
+++ b/arch/arm64/kvm/vgic/vgic-mmio-v3.c
@@ -1001,8 +1001,8 @@ void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg, bool allow_group1)
raw_spin_lock_irqsave(&irq->irq_lock, flags);
/*
- * An access targetting Group0 SGIs can only generate
- * those, while an access targetting Group1 SGIs can
+ * An access targeting Group0 SGIs can only generate
+ * those, while an access targeting Group1 SGIs can
* generate interrupts of either group.
*/
if (!irq->group || allow_group1) {
diff --git a/arch/arm64/lib/Makefile b/arch/arm64/lib/Makefile
index 2fc253466dbf..d31e1169d9b8 100644
--- a/arch/arm64/lib/Makefile
+++ b/arch/arm64/lib/Makefile
@@ -16,3 +16,5 @@ lib-$(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) += uaccess_flushcache.o
obj-$(CONFIG_CRC32) += crc32.o
obj-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o
+
+obj-$(CONFIG_ARM64_MTE) += mte.o
diff --git a/arch/arm64/lib/mte.S b/arch/arm64/lib/mte.S
new file mode 100644
index 000000000000..03ca6d8b8670
--- /dev/null
+++ b/arch/arm64/lib/mte.S
@@ -0,0 +1,151 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020 ARM Ltd.
+ */
+#include <linux/linkage.h>
+
+#include <asm/alternative.h>
+#include <asm/assembler.h>
+#include <asm/mte.h>
+#include <asm/page.h>
+#include <asm/sysreg.h>
+
+ .arch armv8.5-a+memtag
+
+/*
+ * multitag_transfer_size - set \reg to the block size that is accessed by the
+ * LDGM/STGM instructions.
+ */
+ .macro multitag_transfer_size, reg, tmp
+ mrs_s \reg, SYS_GMID_EL1
+ ubfx \reg, \reg, #SYS_GMID_EL1_BS_SHIFT, #SYS_GMID_EL1_BS_SIZE
+ mov \tmp, #4
+ lsl \reg, \tmp, \reg
+ .endm
+
+/*
+ * Clear the tags in a page
+ * x0 - address of the page to be cleared
+ */
+SYM_FUNC_START(mte_clear_page_tags)
+ multitag_transfer_size x1, x2
+1: stgm xzr, [x0]
+ add x0, x0, x1
+ tst x0, #(PAGE_SIZE - 1)
+ b.ne 1b
+ ret
+SYM_FUNC_END(mte_clear_page_tags)
+
+/*
+ * Copy the tags from the source page to the destination one
+ * x0 - address of the destination page
+ * x1 - address of the source page
+ */
+SYM_FUNC_START(mte_copy_page_tags)
+ mov x2, x0
+ mov x3, x1
+ multitag_transfer_size x5, x6
+1: ldgm x4, [x3]
+ stgm x4, [x2]
+ add x2, x2, x5
+ add x3, x3, x5
+ tst x2, #(PAGE_SIZE - 1)
+ b.ne 1b
+ ret
+SYM_FUNC_END(mte_copy_page_tags)
+
+/*
+ * Read tags from a user buffer (one tag per byte) and set the corresponding
+ * tags at the given kernel address. Used by PTRACE_POKEMTETAGS.
+ * x0 - kernel address (to)
+ * x1 - user buffer (from)
+ * x2 - number of tags/bytes (n)
+ * Returns:
+ * x0 - number of tags read/set
+ */
+SYM_FUNC_START(mte_copy_tags_from_user)
+ mov x3, x1
+ cbz x2, 2f
+1:
+ uao_user_alternative 2f, ldrb, ldtrb, w4, x1, 0
+ lsl x4, x4, #MTE_TAG_SHIFT
+ stg x4, [x0], #MTE_GRANULE_SIZE
+ add x1, x1, #1
+ subs x2, x2, #1
+ b.ne 1b
+
+ // exception handling and function return
+2: sub x0, x1, x3 // update the number of tags set
+ ret
+SYM_FUNC_END(mte_copy_tags_from_user)
+
+/*
+ * Get the tags from a kernel address range and write the tag values to the
+ * given user buffer (one tag per byte). Used by PTRACE_PEEKMTETAGS.
+ * x0 - user buffer (to)
+ * x1 - kernel address (from)
+ * x2 - number of tags/bytes (n)
+ * Returns:
+ * x0 - number of tags read/set
+ */
+SYM_FUNC_START(mte_copy_tags_to_user)
+ mov x3, x0
+ cbz x2, 2f
+1:
+ ldg x4, [x1]
+ ubfx x4, x4, #MTE_TAG_SHIFT, #MTE_TAG_SIZE
+ uao_user_alternative 2f, strb, sttrb, w4, x0, 0
+ add x0, x0, #1
+ add x1, x1, #MTE_GRANULE_SIZE
+ subs x2, x2, #1
+ b.ne 1b
+
+ // exception handling and function return
+2: sub x0, x0, x3 // update the number of tags copied
+ ret
+SYM_FUNC_END(mte_copy_tags_to_user)
+
+/*
+ * Save the tags in a page
+ * x0 - page address
+ * x1 - tag storage
+ */
+SYM_FUNC_START(mte_save_page_tags)
+ multitag_transfer_size x7, x5
+1:
+ mov x2, #0
+2:
+ ldgm x5, [x0]
+ orr x2, x2, x5
+ add x0, x0, x7
+ tst x0, #0xFF // 16 tag values fit in a register,
+ b.ne 2b // which is 16*16=256 bytes
+
+ str x2, [x1], #8
+
+ tst x0, #(PAGE_SIZE - 1)
+ b.ne 1b
+
+ ret
+SYM_FUNC_END(mte_save_page_tags)
+
+/*
+ * Restore the tags in a page
+ * x0 - page address
+ * x1 - tag storage
+ */
+SYM_FUNC_START(mte_restore_page_tags)
+ multitag_transfer_size x7, x5
+1:
+ ldr x2, [x1], #8
+2:
+ stgm x2, [x0]
+ add x0, x0, x7
+ tst x0, #0xFF
+ b.ne 2b
+
+ tst x0, #(PAGE_SIZE - 1)
+ b.ne 1b
+
+ ret
+SYM_FUNC_END(mte_restore_page_tags)
diff --git a/arch/arm64/mm/Makefile b/arch/arm64/mm/Makefile
index d91030f0ffee..5ead3c3de3b6 100644
--- a/arch/arm64/mm/Makefile
+++ b/arch/arm64/mm/Makefile
@@ -4,10 +4,11 @@ obj-y := dma-mapping.o extable.o fault.o init.o \
ioremap.o mmap.o pgd.o mmu.o \
context.o proc.o pageattr.o
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
-obj-$(CONFIG_PTDUMP_CORE) += dump.o
+obj-$(CONFIG_PTDUMP_CORE) += ptdump.o
obj-$(CONFIG_PTDUMP_DEBUGFS) += ptdump_debugfs.o
obj-$(CONFIG_NUMA) += numa.o
obj-$(CONFIG_DEBUG_VIRTUAL) += physaddr.o
+obj-$(CONFIG_ARM64_MTE) += mteswap.o
KASAN_SANITIZE_physaddr.o += n
obj-$(CONFIG_KASAN) += kasan_init.o
diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c
index a206655a39a5..001737a8f309 100644
--- a/arch/arm64/mm/context.c
+++ b/arch/arm64/mm/context.c
@@ -27,6 +27,10 @@ static DEFINE_PER_CPU(atomic64_t, active_asids);
static DEFINE_PER_CPU(u64, reserved_asids);
static cpumask_t tlb_flush_pending;
+static unsigned long max_pinned_asids;
+static unsigned long nr_pinned_asids;
+static unsigned long *pinned_asid_map;
+
#define ASID_MASK (~GENMASK(asid_bits - 1, 0))
#define ASID_FIRST_VERSION (1UL << asid_bits)
@@ -45,7 +49,7 @@ static u32 get_cpu_asid_bits(void)
default:
pr_warn("CPU%d: Unknown ASID size (%d); assuming 8-bit\n",
smp_processor_id(), fld);
- /* Fallthrough */
+ fallthrough;
case 0:
asid = 8;
break;
@@ -72,7 +76,7 @@ void verify_cpu_asid_bits(void)
}
}
-static void set_kpti_asid_bits(void)
+static void set_kpti_asid_bits(unsigned long *map)
{
unsigned int len = BITS_TO_LONGS(NUM_USER_ASIDS) * sizeof(unsigned long);
/*
@@ -81,13 +85,15 @@ static void set_kpti_asid_bits(void)
* is set, then the ASID will map only userspace. Thus
* mark even as reserved for kernel.
*/
- memset(asid_map, 0xaa, len);
+ memset(map, 0xaa, len);
}
static void set_reserved_asid_bits(void)
{
- if (arm64_kernel_unmapped_at_el0())
- set_kpti_asid_bits();
+ if (pinned_asid_map)
+ bitmap_copy(asid_map, pinned_asid_map, NUM_USER_ASIDS);
+ else if (arm64_kernel_unmapped_at_el0())
+ set_kpti_asid_bits(asid_map);
else
bitmap_clear(asid_map, 0, NUM_USER_ASIDS);
}
@@ -166,6 +172,14 @@ static u64 new_context(struct mm_struct *mm)
return newasid;
/*
+ * If it is pinned, we can keep using it. Note that reserved
+ * takes priority, because even if it is also pinned, we need to
+ * update the generation into the reserved_asids.
+ */
+ if (refcount_read(&mm->context.pinned))
+ return newasid;
+
+ /*
* We had a valid ASID in a previous life, so try to re-use
* it if possible.
*/
@@ -256,6 +270,71 @@ switch_mm_fastpath:
cpu_switch_mm(mm->pgd, mm);
}
+unsigned long arm64_mm_context_get(struct mm_struct *mm)
+{
+ unsigned long flags;
+ u64 asid;
+
+ if (!pinned_asid_map)
+ return 0;
+
+ raw_spin_lock_irqsave(&cpu_asid_lock, flags);
+
+ asid = atomic64_read(&mm->context.id);
+
+ if (refcount_inc_not_zero(&mm->context.pinned))
+ goto out_unlock;
+
+ if (nr_pinned_asids >= max_pinned_asids) {
+ asid = 0;
+ goto out_unlock;
+ }
+
+ if (!asid_gen_match(asid)) {
+ /*
+ * We went through one or more rollover since that ASID was
+ * used. Ensure that it is still valid, or generate a new one.
+ */
+ asid = new_context(mm);
+ atomic64_set(&mm->context.id, asid);
+ }
+
+ nr_pinned_asids++;
+ __set_bit(asid2idx(asid), pinned_asid_map);
+ refcount_set(&mm->context.pinned, 1);
+
+out_unlock:
+ raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
+
+ asid &= ~ASID_MASK;
+
+ /* Set the equivalent of USER_ASID_BIT */
+ if (asid && arm64_kernel_unmapped_at_el0())
+ asid |= 1;
+
+ return asid;
+}
+EXPORT_SYMBOL_GPL(arm64_mm_context_get);
+
+void arm64_mm_context_put(struct mm_struct *mm)
+{
+ unsigned long flags;
+ u64 asid = atomic64_read(&mm->context.id);
+
+ if (!pinned_asid_map)
+ return;
+
+ raw_spin_lock_irqsave(&cpu_asid_lock, flags);
+
+ if (refcount_dec_and_test(&mm->context.pinned)) {
+ __clear_bit(asid2idx(asid), pinned_asid_map);
+ nr_pinned_asids--;
+ }
+
+ raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
+}
+EXPORT_SYMBOL_GPL(arm64_mm_context_put);
+
/* Errata workaround post TTBRx_EL1 update. */
asmlinkage void post_ttbr_update_workaround(void)
{
@@ -296,8 +375,11 @@ static int asids_update_limit(void)
{
unsigned long num_available_asids = NUM_USER_ASIDS;
- if (arm64_kernel_unmapped_at_el0())
+ if (arm64_kernel_unmapped_at_el0()) {
num_available_asids /= 2;
+ if (pinned_asid_map)
+ set_kpti_asid_bits(pinned_asid_map);
+ }
/*
* Expect allocation after rollover to fail if we don't have at least
* one more ASID than CPUs. ASID #0 is reserved for init_mm.
@@ -305,6 +387,13 @@ static int asids_update_limit(void)
WARN_ON(num_available_asids - 1 <= num_possible_cpus());
pr_info("ASID allocator initialised with %lu entries\n",
num_available_asids);
+
+ /*
+ * There must always be an ASID available after rollover. Ensure that,
+ * even if all CPUs have a reserved ASID and the maximum number of ASIDs
+ * are pinned, there still is at least one empty slot in the ASID map.
+ */
+ max_pinned_asids = num_available_asids - num_possible_cpus() - 2;
return 0;
}
arch_initcall(asids_update_limit);
@@ -319,13 +408,17 @@ static int asids_init(void)
panic("Failed to allocate bitmap for %lu ASIDs\n",
NUM_USER_ASIDS);
+ pinned_asid_map = kcalloc(BITS_TO_LONGS(NUM_USER_ASIDS),
+ sizeof(*pinned_asid_map), GFP_KERNEL);
+ nr_pinned_asids = 0;
+
/*
* We cannot call set_reserved_asid_bits() here because CPU
* caps are not finalized yet, so it is safer to assume KPTI
* and reserve kernel ASID's from beginning.
*/
if (IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0))
- set_kpti_asid_bits();
+ set_kpti_asid_bits(asid_map);
return 0;
}
early_initcall(asids_init);
diff --git a/arch/arm64/mm/copypage.c b/arch/arm64/mm/copypage.c
index 2ee7b73433a5..70a71f38b6a9 100644
--- a/arch/arm64/mm/copypage.c
+++ b/arch/arm64/mm/copypage.c
@@ -6,21 +6,32 @@
* Copyright (C) 2012 ARM Ltd.
*/
+#include <linux/bitops.h>
#include <linux/mm.h>
#include <asm/page.h>
#include <asm/cacheflush.h>
+#include <asm/cpufeature.h>
+#include <asm/mte.h>
-void __cpu_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr)
+void copy_highpage(struct page *to, struct page *from)
{
- struct page *page = virt_to_page(kto);
+ struct page *kto = page_address(to);
+ struct page *kfrom = page_address(from);
+
copy_page(kto, kfrom);
- flush_dcache_page(page);
+
+ if (system_supports_mte() && test_bit(PG_mte_tagged, &from->flags)) {
+ set_bit(PG_mte_tagged, &to->flags);
+ mte_copy_page_tags(kto, kfrom);
+ }
}
-EXPORT_SYMBOL_GPL(__cpu_copy_user_page);
+EXPORT_SYMBOL(copy_highpage);
-void __cpu_clear_user_page(void *kaddr, unsigned long vaddr)
+void copy_user_highpage(struct page *to, struct page *from,
+ unsigned long vaddr, struct vm_area_struct *vma)
{
- clear_page(kaddr);
+ copy_highpage(to, from);
+ flush_dcache_page(to);
}
-EXPORT_SYMBOL_GPL(__cpu_clear_user_page);
+EXPORT_SYMBOL_GPL(copy_user_highpage);
diff --git a/arch/arm64/mm/extable.c b/arch/arm64/mm/extable.c
index eee1732ab6cd..aa0060178343 100644
--- a/arch/arm64/mm/extable.c
+++ b/arch/arm64/mm/extable.c
@@ -14,9 +14,7 @@ int fixup_exception(struct pt_regs *regs)
if (!fixup)
return 0;
- if (IS_ENABLED(CONFIG_BPF_JIT) &&
- regs->pc >= BPF_JIT_REGION_START &&
- regs->pc < BPF_JIT_REGION_END)
+ if (in_bpf_jit(regs))
return arm64_bpf_fixup_exception(fixup, regs);
regs->pc = (unsigned long)&fixup->fixup + fixup->fixup;
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index f07333e86c2f..94c99c1c19e3 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -218,7 +218,9 @@ int ptep_set_access_flags(struct vm_area_struct *vma,
pteval = cmpxchg_relaxed(&pte_val(*ptep), old_pteval, pteval);
} while (pteval != old_pteval);
- flush_tlb_fix_spurious_fault(vma, address);
+ /* Invalidate a stale read-only entry */
+ if (dirty)
+ flush_tlb_page(vma, address);
return 1;
}
@@ -641,6 +643,13 @@ static int do_sea(unsigned long addr, unsigned int esr, struct pt_regs *regs)
return 0;
}
+static int do_tag_check_fault(unsigned long addr, unsigned int esr,
+ struct pt_regs *regs)
+{
+ do_bad_area(addr, esr, regs);
+ return 0;
+}
+
static const struct fault_info fault_info[] = {
{ do_bad, SIGKILL, SI_KERNEL, "ttbr address size fault" },
{ do_bad, SIGKILL, SI_KERNEL, "level 1 address size fault" },
@@ -659,7 +668,7 @@ static const struct fault_info fault_info[] = {
{ do_page_fault, SIGSEGV, SEGV_ACCERR, "level 2 permission fault" },
{ do_page_fault, SIGSEGV, SEGV_ACCERR, "level 3 permission fault" },
{ do_sea, SIGBUS, BUS_OBJERR, "synchronous external abort" },
- { do_bad, SIGKILL, SI_KERNEL, "unknown 17" },
+ { do_tag_check_fault, SIGSEGV, SEGV_MTESERR, "synchronous tag check fault" },
{ do_bad, SIGKILL, SI_KERNEL, "unknown 18" },
{ do_bad, SIGKILL, SI_KERNEL, "unknown 19" },
{ do_sea, SIGKILL, SI_KERNEL, "level 0 (translation table walk)" },
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 481d22c32a2e..f0bf86d81622 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -471,12 +471,10 @@ static inline void free_memmap(unsigned long start_pfn, unsigned long end_pfn)
*/
static void __init free_unused_memmap(void)
{
- unsigned long start, prev_end = 0;
- struct memblock_region *reg;
-
- for_each_memblock(memory, reg) {
- start = __phys_to_pfn(reg->base);
+ unsigned long start, end, prev_end = 0;
+ int i;
+ for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, NULL) {
#ifdef CONFIG_SPARSEMEM
/*
* Take care not to free memmap entries that don't exist due
@@ -496,8 +494,7 @@ static void __init free_unused_memmap(void)
* memmap entries are valid from the bank end aligned to
* MAX_ORDER_NR_PAGES.
*/
- prev_end = ALIGN(__phys_to_pfn(reg->base + reg->size),
- MAX_ORDER_NR_PAGES);
+ prev_end = ALIGN(end, MAX_ORDER_NR_PAGES);
}
#ifdef CONFIG_SPARSEMEM
diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c
index 7291b26ce788..b24e43d20667 100644
--- a/arch/arm64/mm/kasan_init.c
+++ b/arch/arm64/mm/kasan_init.c
@@ -212,8 +212,8 @@ void __init kasan_init(void)
{
u64 kimg_shadow_start, kimg_shadow_end;
u64 mod_shadow_start, mod_shadow_end;
- struct memblock_region *reg;
- int i;
+ phys_addr_t pa_start, pa_end;
+ u64 i;
kimg_shadow_start = (u64)kasan_mem_to_shadow(_text) & PAGE_MASK;
kimg_shadow_end = PAGE_ALIGN((u64)kasan_mem_to_shadow(_end));
@@ -246,9 +246,9 @@ void __init kasan_init(void)
kasan_populate_early_shadow((void *)mod_shadow_end,
(void *)kimg_shadow_start);
- for_each_memblock(memory, reg) {
- void *start = (void *)__phys_to_virt(reg->base);
- void *end = (void *)__phys_to_virt(reg->base + reg->size);
+ for_each_mem_range(i, &pa_start, &pa_end) {
+ void *start = (void *)__phys_to_virt(pa_start);
+ void *end = (void *)__phys_to_virt(pa_end);
if (start >= end)
break;
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 75df62fea1b6..beff3ad8c7f8 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -43,7 +43,7 @@
u64 idmap_t0sz = TCR_T0SZ(VA_BITS);
u64 idmap_ptrs_per_pgd = PTRS_PER_PGD;
-u64 __section(".mmuoff.data.write") vabits_actual;
+u64 __section(.mmuoff.data.write) vabits_actual;
EXPORT_SYMBOL(vabits_actual);
u64 kimage_voffset __ro_after_init;
@@ -122,7 +122,7 @@ static bool pgattr_change_is_safe(u64 old, u64 new)
* The following mapping attributes may be updated in live
* kernel mappings without the need for break-before-make.
*/
- static const pteval_t mask = PTE_PXN | PTE_RDONLY | PTE_WRITE | PTE_NG;
+ pteval_t mask = PTE_PXN | PTE_RDONLY | PTE_WRITE | PTE_NG;
/* creating or taking down mappings is always safe */
if (old == 0 || new == 0)
@@ -136,6 +136,17 @@ static bool pgattr_change_is_safe(u64 old, u64 new)
if (old & ~new & PTE_NG)
return false;
+ /*
+ * Changing the memory type between Normal and Normal-Tagged is safe
+ * since Tagged is considered a permission attribute from the
+ * mismatched attribute aliases perspective.
+ */
+ if (((old & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL) ||
+ (old & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL_TAGGED)) &&
+ ((new & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL) ||
+ (new & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL_TAGGED)))
+ mask |= PTE_ATTRINDX_MASK;
+
return ((old ^ new) & ~mask) == 0;
}
@@ -462,8 +473,9 @@ static void __init map_mem(pgd_t *pgdp)
{
phys_addr_t kernel_start = __pa_symbol(_text);
phys_addr_t kernel_end = __pa_symbol(__init_begin);
- struct memblock_region *reg;
+ phys_addr_t start, end;
int flags = 0;
+ u64 i;
if (rodata_full || debug_pagealloc_enabled())
flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
@@ -482,16 +494,15 @@ static void __init map_mem(pgd_t *pgdp)
#endif
/* map all the memory banks */
- for_each_memblock(memory, reg) {
- phys_addr_t start = reg->base;
- phys_addr_t end = start + reg->size;
-
+ for_each_mem_range(i, &start, &end) {
if (start >= end)
break;
- if (memblock_is_nomap(reg))
- continue;
-
- __map_memblock(pgdp, start, end, PAGE_KERNEL, flags);
+ /*
+ * The linear map must allow allocation tags reading/writing
+ * if MTE is present. Otherwise, it has the same attributes as
+ * PAGE_KERNEL.
+ */
+ __map_memblock(pgdp, start, end, PAGE_KERNEL_TAGGED, flags);
}
/*
diff --git a/arch/arm64/mm/mteswap.c b/arch/arm64/mm/mteswap.c
new file mode 100644
index 000000000000..c52c1847079c
--- /dev/null
+++ b/arch/arm64/mm/mteswap.c
@@ -0,0 +1,83 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/pagemap.h>
+#include <linux/xarray.h>
+#include <linux/slab.h>
+#include <linux/swap.h>
+#include <linux/swapops.h>
+#include <asm/mte.h>
+
+static DEFINE_XARRAY(mte_pages);
+
+void *mte_allocate_tag_storage(void)
+{
+ /* tags granule is 16 bytes, 2 tags stored per byte */
+ return kmalloc(PAGE_SIZE / 16 / 2, GFP_KERNEL);
+}
+
+void mte_free_tag_storage(char *storage)
+{
+ kfree(storage);
+}
+
+int mte_save_tags(struct page *page)
+{
+ void *tag_storage, *ret;
+
+ if (!test_bit(PG_mte_tagged, &page->flags))
+ return 0;
+
+ tag_storage = mte_allocate_tag_storage();
+ if (!tag_storage)
+ return -ENOMEM;
+
+ mte_save_page_tags(page_address(page), tag_storage);
+
+ /* page_private contains the swap entry.val set in do_swap_page */
+ ret = xa_store(&mte_pages, page_private(page), tag_storage, GFP_KERNEL);
+ if (WARN(xa_is_err(ret), "Failed to store MTE tags")) {
+ mte_free_tag_storage(tag_storage);
+ return xa_err(ret);
+ } else if (ret) {
+ /* Entry is being replaced, free the old entry */
+ mte_free_tag_storage(ret);
+ }
+
+ return 0;
+}
+
+bool mte_restore_tags(swp_entry_t entry, struct page *page)
+{
+ void *tags = xa_load(&mte_pages, entry.val);
+
+ if (!tags)
+ return false;
+
+ mte_restore_page_tags(page_address(page), tags);
+
+ return true;
+}
+
+void mte_invalidate_tags(int type, pgoff_t offset)
+{
+ swp_entry_t entry = swp_entry(type, offset);
+ void *tags = xa_erase(&mte_pages, entry.val);
+
+ mte_free_tag_storage(tags);
+}
+
+void mte_invalidate_tags_area(int type)
+{
+ swp_entry_t entry = swp_entry(type, 0);
+ swp_entry_t last_entry = swp_entry(type + 1, 0);
+ void *tags;
+
+ XA_STATE(xa_state, &mte_pages, entry.val);
+
+ xa_lock(&mte_pages);
+ xas_for_each(&xa_state, tags, last_entry.val - 1) {
+ __xa_erase(&mte_pages, xa_state.xa_index);
+ mte_free_tag_storage(tags);
+ }
+ xa_unlock(&mte_pages);
+}
diff --git a/arch/arm64/mm/numa.c b/arch/arm64/mm/numa.c
index 73f8b49d485c..a8303bc6b62a 100644
--- a/arch/arm64/mm/numa.c
+++ b/arch/arm64/mm/numa.c
@@ -46,7 +46,11 @@ EXPORT_SYMBOL(node_to_cpumask_map);
*/
const struct cpumask *cpumask_of_node(int node)
{
- if (WARN_ON(node >= nr_node_ids))
+
+ if (node == NUMA_NO_NODE)
+ return cpu_all_mask;
+
+ if (WARN_ON(node < 0 || node >= nr_node_ids))
return cpu_none_mask;
if (WARN_ON(node_to_cpumask_map[node] == NULL))
@@ -350,7 +354,7 @@ static int __init numa_register_nodes(void)
struct memblock_region *mblk;
/* Check that valid nid is set to memblks */
- for_each_memblock(memory, mblk) {
+ for_each_mem_region(mblk) {
int mblk_nid = memblock_get_region_node(mblk);
if (mblk_nid == NUMA_NO_NODE || mblk_nid >= MAX_NUMNODES) {
@@ -423,19 +427,16 @@ out_free_distance:
*/
static int __init dummy_numa_init(void)
{
+ phys_addr_t start = memblock_start_of_DRAM();
+ phys_addr_t end = memblock_end_of_DRAM();
int ret;
- struct memblock_region *mblk;
if (numa_off)
pr_info("NUMA disabled\n"); /* Forced off on command line. */
- pr_info("Faking a node at [mem %#018Lx-%#018Lx]\n",
- memblock_start_of_DRAM(), memblock_end_of_DRAM() - 1);
-
- for_each_memblock(memory, mblk) {
- ret = numa_add_memblk(0, mblk->base, mblk->base + mblk->size);
- if (!ret)
- continue;
+ pr_info("Faking a node at [mem %#018Lx-%#018Lx]\n", start, end - 1);
+ ret = numa_add_memblk(0, start, end);
+ if (ret) {
pr_err("NUMA init failed\n");
return ret;
}
@@ -448,7 +449,7 @@ static int __init dummy_numa_init(void)
* arm64_numa_init() - Initialize NUMA
*
* Try each configured NUMA initialization method until one succeeds. The
- * last fallback is dummy single node config encomapssing whole memory.
+ * last fallback is dummy single node config encompassing whole memory.
*/
void __init arm64_numa_init(void)
{
diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c
index 23f648c2a199..1b94f5b82654 100644
--- a/arch/arm64/mm/pageattr.c
+++ b/arch/arm64/mm/pageattr.c
@@ -8,6 +8,7 @@
#include <linux/sched.h>
#include <linux/vmalloc.h>
+#include <asm/cacheflush.h>
#include <asm/set_memory.h>
#include <asm/tlbflush.h>
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index 796e47a571e6..23c326a06b2d 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -18,6 +18,7 @@
#include <asm/cpufeature.h>
#include <asm/alternative.h>
#include <asm/smp.h>
+#include <asm/sysreg.h>
#ifdef CONFIG_ARM64_64K_PAGES
#define TCR_TG_FLAGS TCR_TG0_64K | TCR_TG1_64K
@@ -44,14 +45,18 @@
#define TCR_KASAN_FLAGS 0
#endif
-/* Default MAIR_EL1 */
+/*
+ * Default MAIR_EL1. MT_NORMAL_TAGGED is initially mapped as Normal memory and
+ * changed during __cpu_setup to Normal Tagged if the system supports MTE.
+ */
#define MAIR_EL1_SET \
(MAIR_ATTRIDX(MAIR_ATTR_DEVICE_nGnRnE, MT_DEVICE_nGnRnE) | \
MAIR_ATTRIDX(MAIR_ATTR_DEVICE_nGnRE, MT_DEVICE_nGnRE) | \
MAIR_ATTRIDX(MAIR_ATTR_DEVICE_GRE, MT_DEVICE_GRE) | \
MAIR_ATTRIDX(MAIR_ATTR_NORMAL_NC, MT_NORMAL_NC) | \
MAIR_ATTRIDX(MAIR_ATTR_NORMAL, MT_NORMAL) | \
- MAIR_ATTRIDX(MAIR_ATTR_NORMAL_WT, MT_NORMAL_WT))
+ MAIR_ATTRIDX(MAIR_ATTR_NORMAL_WT, MT_NORMAL_WT) | \
+ MAIR_ATTRIDX(MAIR_ATTR_NORMAL, MT_NORMAL_TAGGED))
#ifdef CONFIG_CPU_PM
/**
@@ -421,6 +426,29 @@ SYM_FUNC_START(__cpu_setup)
* Memory region attributes
*/
mov_q x5, MAIR_EL1_SET
+#ifdef CONFIG_ARM64_MTE
+ /*
+ * Update MAIR_EL1, GCR_EL1 and TFSR*_EL1 if MTE is supported
+ * (ID_AA64PFR1_EL1[11:8] > 1).
+ */
+ mrs x10, ID_AA64PFR1_EL1
+ ubfx x10, x10, #ID_AA64PFR1_MTE_SHIFT, #4
+ cmp x10, #ID_AA64PFR1_MTE
+ b.lt 1f
+
+ /* Normal Tagged memory type at the corresponding MAIR index */
+ mov x10, #MAIR_ATTR_NORMAL_TAGGED
+ bfi x5, x10, #(8 * MT_NORMAL_TAGGED), #8
+
+ /* initialize GCR_EL1: all non-zero tags excluded by default */
+ mov x10, #(SYS_GCR_EL1_RRND | SYS_GCR_EL1_EXCL_MASK)
+ msr_s SYS_GCR_EL1, x10
+
+ /* clear any pending tag check faults in TFSR*_EL1 */
+ msr_s SYS_TFSR_EL1, xzr
+ msr_s SYS_TFSRE0_EL1, xzr
+1:
+#endif
msr mair_el1, x5
/*
* Set/prepare TCR and TTBR. We use 512GB (39-bit) address range for
diff --git a/arch/arm64/mm/dump.c b/arch/arm64/mm/ptdump.c
index 0b8da1cc1c07..807dc634bbd2 100644
--- a/arch/arm64/mm/dump.c
+++ b/arch/arm64/mm/ptdump.c
@@ -41,6 +41,8 @@ static struct addr_marker address_markers[] = {
{ 0 /* KASAN_SHADOW_START */, "Kasan shadow start" },
{ KASAN_SHADOW_END, "Kasan shadow end" },
#endif
+ { BPF_JIT_REGION_START, "BPF start" },
+ { BPF_JIT_REGION_END, "BPF end" },
{ MODULES_VADDR, "Modules start" },
{ MODULES_END, "Modules end" },
{ VMALLOC_START, "vmalloc() area" },
@@ -169,6 +171,10 @@ static const struct prot_bits pte_bits[] = {
.mask = PTE_ATTRINDX_MASK,
.val = PTE_ATTRINDX(MT_NORMAL),
.set = "MEM/NORMAL",
+ }, {
+ .mask = PTE_ATTRINDX_MASK,
+ .val = PTE_ATTRINDX(MT_NORMAL_TAGGED),
+ .set = "MEM/NORMAL-TAGGED",
}
};
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
index f8912e45be7a..ef9f1d5e989d 100644
--- a/arch/arm64/net/bpf_jit_comp.c
+++ b/arch/arm64/net/bpf_jit_comp.c
@@ -143,14 +143,17 @@ static inline void emit_addr_mov_i64(const int reg, const u64 val,
}
}
-static inline int bpf2a64_offset(int bpf_to, int bpf_from,
+static inline int bpf2a64_offset(int bpf_insn, int off,
const struct jit_ctx *ctx)
{
- int to = ctx->offset[bpf_to];
- /* -1 to account for the Branch instruction */
- int from = ctx->offset[bpf_from] - 1;
-
- return to - from;
+ /* BPF JMP offset is relative to the next instruction */
+ bpf_insn++;
+ /*
+ * Whereas arm64 branch instructions encode the offset
+ * from the branch itself, so we must subtract 1 from the
+ * instruction offset.
+ */
+ return ctx->offset[bpf_insn + off] - (ctx->offset[bpf_insn] - 1);
}
static void jit_fill_hole(void *area, unsigned int size)
@@ -642,7 +645,7 @@ emit_bswap_uxt:
/* JUMP off */
case BPF_JMP | BPF_JA:
- jmp_offset = bpf2a64_offset(i + off, i, ctx);
+ jmp_offset = bpf2a64_offset(i, off, ctx);
check_imm26(jmp_offset);
emit(A64_B(jmp_offset), ctx);
break;
@@ -669,7 +672,7 @@ emit_bswap_uxt:
case BPF_JMP32 | BPF_JSLE | BPF_X:
emit(A64_CMP(is64, dst, src), ctx);
emit_cond_jmp:
- jmp_offset = bpf2a64_offset(i + off, i, ctx);
+ jmp_offset = bpf2a64_offset(i, off, ctx);
check_imm19(jmp_offset);
switch (BPF_OP(code)) {
case BPF_JEQ:
@@ -908,10 +911,21 @@ static int build_body(struct jit_ctx *ctx, bool extra_pass)
const struct bpf_prog *prog = ctx->prog;
int i;
+ /*
+ * - offset[0] offset of the end of prologue,
+ * start of the 1st instruction.
+ * - offset[1] - offset of the end of 1st instruction,
+ * start of the 2nd instruction
+ * [....]
+ * - offset[3] - offset of the end of 3rd instruction,
+ * start of 4th instruction
+ */
for (i = 0; i < prog->len; i++) {
const struct bpf_insn *insn = &prog->insnsi[i];
int ret;
+ if (ctx->image == NULL)
+ ctx->offset[i] = ctx->idx;
ret = build_insn(insn, ctx, extra_pass);
if (ret > 0) {
i++;
@@ -919,11 +933,16 @@ static int build_body(struct jit_ctx *ctx, bool extra_pass)
ctx->offset[i] = ctx->idx;
continue;
}
- if (ctx->image == NULL)
- ctx->offset[i] = ctx->idx;
if (ret)
return ret;
}
+ /*
+ * offset is allocated with prog->len + 1 so fill in
+ * the last element with the offset after the last
+ * instruction (end of program)
+ */
+ if (ctx->image == NULL)
+ ctx->offset[i] = ctx->idx;
return 0;
}
@@ -1002,7 +1021,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
memset(&ctx, 0, sizeof(ctx));
ctx.prog = prog;
- ctx.offset = kcalloc(prog->len, sizeof(int), GFP_KERNEL);
+ ctx.offset = kcalloc(prog->len + 1, sizeof(int), GFP_KERNEL);
if (ctx.offset == NULL) {
prog = orig_prog;
goto out_off;
@@ -1089,7 +1108,7 @@ skip_init_ctx:
prog->jited_len = prog_size;
if (!prog->is_func || extra_pass) {
- bpf_prog_fill_jited_linfo(prog, ctx.offset);
+ bpf_prog_fill_jited_linfo(prog, ctx.offset + 1);
out_off:
kfree(ctx.offset);
kfree(jit_data);
diff --git a/arch/c6x/include/asm/checksum.h b/arch/c6x/include/asm/checksum.h
index 36770b8308d9..934918def632 100644
--- a/arch/c6x/include/asm/checksum.h
+++ b/arch/c6x/include/asm/checksum.h
@@ -26,6 +26,9 @@ csum_tcpudp_nofold(__be32 saddr, __be32 daddr, __u32 len,
}
#define csum_tcpudp_nofold csum_tcpudp_nofold
+#define _HAVE_ARCH_CSUM_AND_COPY
+extern __wsum csum_partial_copy_nocheck(const void *src, void *dst, int len);
+
#include <asm-generic/checksum.h>
#endif /* _ASM_C6X_CHECKSUM_H */
diff --git a/arch/c6x/kernel/setup.c b/arch/c6x/kernel/setup.c
index 8ef35131f999..9254c3b794a5 100644
--- a/arch/c6x/kernel/setup.c
+++ b/arch/c6x/kernel/setup.c
@@ -287,7 +287,8 @@ notrace void __init machine_init(unsigned long dt_ptr)
void __init setup_arch(char **cmdline_p)
{
- struct memblock_region *reg;
+ phys_addr_t start, end;
+ u64 i;
printk(KERN_INFO "Initializing kernel\n");
@@ -351,9 +352,9 @@ void __init setup_arch(char **cmdline_p)
disable_caching(ram_start, ram_end - 1);
/* Set caching of external RAM used by Linux */
- for_each_memblock(memory, reg)
- enable_caching(CACHE_REGION_START(reg->base),
- CACHE_REGION_START(reg->base + reg->size - 1));
+ for_each_mem_range(i, &start, &end)
+ enable_caching(CACHE_REGION_START(start),
+ CACHE_REGION_START(end - 1));
#ifdef CONFIG_BLK_DEV_INITRD
/*
diff --git a/arch/c6x/kernel/signal.c b/arch/c6x/kernel/signal.c
index e456652facce..d05c78eace1b 100644
--- a/arch/c6x/kernel/signal.c
+++ b/arch/c6x/kernel/signal.c
@@ -220,7 +220,7 @@ handle_restart(struct pt_regs *regs, struct k_sigaction *ka, int has_handler)
regs->a4 = -EINTR;
break;
}
- /* fallthrough */
+ fallthrough;
case -ERESTARTNOINTR:
do_restart:
regs->a4 = regs->orig_a4;
@@ -252,7 +252,7 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs,
break;
}
- /* fallthrough */
+ fallthrough;
case -ERESTARTNOINTR:
regs->a4 = regs->orig_a4;
regs->pc -= 4;
diff --git a/arch/c6x/lib/csum_64plus.S b/arch/c6x/lib/csum_64plus.S
index 9c07127485d1..57148866d8d3 100644
--- a/arch/c6x/lib/csum_64plus.S
+++ b/arch/c6x/lib/csum_64plus.S
@@ -24,7 +24,6 @@
ENTRY(csum_partial_copy_nocheck)
MVC .S2 ILC,B30
- MV .D1X B6,A31 ; given csum
ZERO .D1 A9 ; csum (a side)
|| ZERO .D2 B9 ; csum (b side)
|| SHRU .S2X A6,2,B5 ; len / 4
@@ -144,8 +143,7 @@ L91: SHRU .S2X A9,16,B4
SHRU .S1 A9,16,A0
[A0] BNOP .S1 L91,5
-L10: ADD .D1 A31,A9,A9
- MV .D1 A9,A4
+L10: MV .D1 A9,A4
BNOP .S2 B3,4
MVC .S2 B30,ILC
diff --git a/arch/csky/Kconfig b/arch/csky/Kconfig
index 3d5afb5f5685..7f424c85772c 100644
--- a/arch/csky/Kconfig
+++ b/arch/csky/Kconfig
@@ -309,16 +309,3 @@ endmenu
source "arch/csky/Kconfig.platforms"
source "kernel/Kconfig.hz"
-
-config SECCOMP
- bool "Enable seccomp to safely compute untrusted bytecode"
- help
- This kernel feature is useful for number crunching applications
- that may need to compute untrusted bytecode during their
- execution. By using pipes or other transports made available to
- the process as file descriptors supporting the read/write
- syscalls, it's possible to isolate those applications in
- their own address space using seccomp. Once seccomp is
- enabled via prctl(PR_SET_SECCOMP), it cannot be disabled
- and the task is only allowed to execute a few safe syscalls
- defined by each seccomp mode.
diff --git a/arch/csky/kernel/probes/kprobes.c b/arch/csky/kernel/probes/kprobes.c
index f0f733b7ac5a..589f090f48b9 100644
--- a/arch/csky/kernel/probes/kprobes.c
+++ b/arch/csky/kernel/probes/kprobes.c
@@ -404,87 +404,14 @@ int __init arch_populate_kprobe_blacklist(void)
void __kprobes __used *trampoline_probe_handler(struct pt_regs *regs)
{
- struct kretprobe_instance *ri = NULL;
- struct hlist_head *head, empty_rp;
- struct hlist_node *tmp;
- unsigned long flags, orig_ret_address = 0;
- unsigned long trampoline_address =
- (unsigned long)&kretprobe_trampoline;
- kprobe_opcode_t *correct_ret_addr = NULL;
-
- INIT_HLIST_HEAD(&empty_rp);
- kretprobe_hash_lock(current, &head, &flags);
-
- /*
- * It is possible to have multiple instances associated with a given
- * task either because multiple functions in the call path have
- * return probes installed on them, and/or more than one
- * return probe was registered for a target function.
- *
- * We can handle this because:
- * - instances are always pushed into the head of the list
- * - when multiple return probes are registered for the same
- * function, the (chronologically) first instance's ret_addr
- * will be the real return address, and all the rest will
- * point to kretprobe_trampoline.
- */
- hlist_for_each_entry_safe(ri, tmp, head, hlist) {
- if (ri->task != current)
- /* another task is sharing our hash bucket */
- continue;
-
- orig_ret_address = (unsigned long)ri->ret_addr;
-
- if (orig_ret_address != trampoline_address)
- /*
- * This is the real return address. Any other
- * instances associated with this task are for
- * other calls deeper on the call stack
- */
- break;
- }
-
- kretprobe_assert(ri, orig_ret_address, trampoline_address);
-
- correct_ret_addr = ri->ret_addr;
- hlist_for_each_entry_safe(ri, tmp, head, hlist) {
- if (ri->task != current)
- /* another task is sharing our hash bucket */
- continue;
-
- orig_ret_address = (unsigned long)ri->ret_addr;
- if (ri->rp && ri->rp->handler) {
- __this_cpu_write(current_kprobe, &ri->rp->kp);
- get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE;
- ri->ret_addr = correct_ret_addr;
- ri->rp->handler(ri, regs);
- __this_cpu_write(current_kprobe, NULL);
- }
-
- recycle_rp_inst(ri, &empty_rp);
-
- if (orig_ret_address != trampoline_address)
- /*
- * This is the real return address. Any other
- * instances associated with this task are for
- * other calls deeper on the call stack
- */
- break;
- }
-
- kretprobe_hash_unlock(current, &flags);
-
- hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
- hlist_del(&ri->hlist);
- kfree(ri);
- }
- return (void *)orig_ret_address;
+ return (void *)kretprobe_trampoline_handler(regs, &kretprobe_trampoline, NULL);
}
void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
struct pt_regs *regs)
{
ri->ret_addr = (kprobe_opcode_t *)regs->lr;
+ ri->fp = NULL;
regs->lr = (unsigned long) &kretprobe_trampoline;
}
diff --git a/arch/csky/kernel/signal.c b/arch/csky/kernel/signal.c
index 9452d6570b7e..970895df75ec 100644
--- a/arch/csky/kernel/signal.c
+++ b/arch/csky/kernel/signal.c
@@ -194,7 +194,7 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
regs->a0 = -EINTR;
break;
}
- /* fallthrough */
+ fallthrough;
case -ERESTARTNOINTR:
regs->a0 = regs->orig_a0;
regs->pc -= TRAP0_SIZE;
diff --git a/arch/csky/kernel/vmlinux.lds.S b/arch/csky/kernel/vmlinux.lds.S
index f05b413df328..f03033e17c29 100644
--- a/arch/csky/kernel/vmlinux.lds.S
+++ b/arch/csky/kernel/vmlinux.lds.S
@@ -109,6 +109,7 @@ SECTIONS
STABS_DEBUG
DWARF_DEBUG
+ ELF_DETAILS
DISCARDS
}
diff --git a/arch/h8300/kernel/process.c b/arch/h8300/kernel/process.c
index 83ce3caf7313..aea0a40b77a9 100644
--- a/arch/h8300/kernel/process.c
+++ b/arch/h8300/kernel/process.c
@@ -172,5 +172,5 @@ asmlinkage int sys_clone(unsigned long __user *args)
kargs.exit_signal = (lower_32_bits(clone_flags) & CSIGNAL);
kargs.stack = newsp;
- return _do_fork(&kargs);
+ return kernel_clone(&kargs);
}
diff --git a/arch/h8300/kernel/setup.c b/arch/h8300/kernel/setup.c
index 28ac88358a89..0281f92eea3d 100644
--- a/arch/h8300/kernel/setup.c
+++ b/arch/h8300/kernel/setup.c
@@ -74,17 +74,15 @@ static void __init bootmem_init(void)
memory_end = memory_start = 0;
/* Find main memory where is the kernel */
- for_each_memblock(memory, region) {
- memory_start = region->base;
- memory_end = region->base + region->size;
- }
+ memory_start = memblock_start_of_DRAM();
+ memory_end = memblock_end_of_DRAM();
if (!memory_end)
panic("No memory!");
/* setup bootmem globals (we use no_bootmem, but mm still depends on this) */
min_low_pfn = PFN_UP(memory_start);
- max_low_pfn = PFN_DOWN(memblock_end_of_DRAM());
+ max_low_pfn = PFN_DOWN(memory_end);
max_pfn = max_low_pfn;
memblock_reserve(__pa(_stext), _end - _stext);
diff --git a/arch/h8300/kernel/signal.c b/arch/h8300/kernel/signal.c
index 38d335488a54..69e68949787f 100644
--- a/arch/h8300/kernel/signal.c
+++ b/arch/h8300/kernel/signal.c
@@ -227,7 +227,7 @@ handle_restart(struct pt_regs *regs, struct k_sigaction *ka)
regs->er0 = -EINTR;
break;
}
- /* fallthrough */
+ fallthrough;
case -ERESTARTNOINTR:
do_restart:
regs->er0 = regs->orig_er0;
diff --git a/arch/hexagon/include/asm/checksum.h b/arch/hexagon/include/asm/checksum.h
index a5c42f4614c1..4bc6ad96c4c5 100644
--- a/arch/hexagon/include/asm/checksum.h
+++ b/arch/hexagon/include/asm/checksum.h
@@ -10,17 +10,6 @@
unsigned int do_csum(const void *voidptr, int len);
/*
- * the same as csum_partial, but copies from src while it
- * checksums
- *
- * here even more important to align src and dst on a 32-bit (or even
- * better 64-bit) boundary
- */
-#define csum_partial_copy_nocheck csum_partial_copy_nocheck
-__wsum csum_partial_copy_nocheck(const void *src, void *dst,
- int len, __wsum sum);
-
-/*
* computes the checksum of the TCP/UDP pseudo-header
* returns a 16-bit checksum, already complemented
*/
diff --git a/arch/hexagon/kernel/module.c b/arch/hexagon/kernel/module.c
index cf99fb79a124..cb3bf19b0640 100644
--- a/arch/hexagon/kernel/module.c
+++ b/arch/hexagon/kernel/module.c
@@ -120,7 +120,7 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
}
case R_HEXAGON_HI16:
value = (value>>16) & 0xffff;
- /* fallthrough */
+ fallthrough;
case R_HEXAGON_LO16:
*location &= ~0x00c03fff;
*location |= value & 0x3fff;
diff --git a/arch/hexagon/kernel/signal.c b/arch/hexagon/kernel/signal.c
index d48864c48e5a..94cc7ff52dce 100644
--- a/arch/hexagon/kernel/signal.c
+++ b/arch/hexagon/kernel/signal.c
@@ -155,7 +155,7 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
regs->r00 = -EINTR;
break;
}
- /* Fall through */
+ fallthrough;
case -ERESTARTNOINTR:
regs->r06 = regs->syscall_nr;
pt_set_elr(regs, pt_elr(regs) - 4);
diff --git a/arch/hexagon/kernel/vmlinux.lds.S b/arch/hexagon/kernel/vmlinux.lds.S
index 0ca2471ddb9f..35b18e55eae8 100644
--- a/arch/hexagon/kernel/vmlinux.lds.S
+++ b/arch/hexagon/kernel/vmlinux.lds.S
@@ -67,5 +67,6 @@ SECTIONS
STABS_DEBUG
DWARF_DEBUG
+ ELF_DETAILS
}
diff --git a/arch/hexagon/lib/checksum.c b/arch/hexagon/lib/checksum.c
index c4a6b72d97de..ba50822a0800 100644
--- a/arch/hexagon/lib/checksum.c
+++ b/arch/hexagon/lib/checksum.c
@@ -176,14 +176,3 @@ unsigned int do_csum(const void *voidptr, int len)
return 0xFFFF & sum0;
}
-
-/*
- * copy from ds while checksumming, otherwise like csum_partial
- */
-__wsum
-csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum)
-{
- memcpy(dst, src, len);
- return csum_partial(dst, len, sum);
-}
-EXPORT_SYMBOL(csum_partial_copy_nocheck);
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 5b4ec80bf586..996c410f2152 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -56,6 +56,7 @@ config IA64
select NEED_DMA_MAP_STATE
select NEED_SG_DMA_LENGTH
select NUMA if !FLATMEM
+ select PCI_MSI_ARCH_FALLBACKS if PCI_MSI
default y
help
The Itanium Processor Family is Intel's 64-bit successor to
@@ -362,15 +363,6 @@ config ARCH_PROC_KCORE_TEXT
config IA64_MCA_RECOVERY
tristate "MCA recovery from errors other than TLB."
-config PERFMON
- bool "Performance monitor support"
- depends on BROKEN
- help
- Selects whether support for the IA-64 performance monitor hardware
- is included in the kernel. This makes some kernel data-structures a
- little bigger and slows down execution a bit, but it is generally
- a good idea to turn this on. If you're unsure, say Y.
-
config IA64_PALINFO
tristate "/proc/pal support"
help
diff --git a/arch/ia64/configs/bigsur_defconfig b/arch/ia64/configs/bigsur_defconfig
index f3ba813a5b80..cfed5ed89301 100644
--- a/arch/ia64/configs/bigsur_defconfig
+++ b/arch/ia64/configs/bigsur_defconfig
@@ -11,7 +11,6 @@ CONFIG_SMP=y
CONFIG_NR_CPUS=2
CONFIG_PREEMPT=y
# CONFIG_VIRTUAL_MEM_MAP is not set
-CONFIG_PERFMON=y
CONFIG_IA64_PALINFO=y
CONFIG_EFI_VARS=y
CONFIG_BINFMT_MISC=m
@@ -27,10 +26,9 @@ CONFIG_BLK_DEV_LOOP=m
CONFIG_BLK_DEV_CRYPTOLOOP=m
CONFIG_BLK_DEV_NBD=m
CONFIG_BLK_DEV_RAM=m
-CONFIG_IDE=m
-CONFIG_BLK_DEV_IDECD=m
-CONFIG_BLK_DEV_GENERIC=m
-CONFIG_BLK_DEV_PIIX=m
+CONFIG_ATA=m
+CONFIG_ATA_GENERIC=m
+CONFIG_ATA_PIIX=m
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
CONFIG_SCSI_CONSTANTS=y
diff --git a/arch/ia64/configs/generic_defconfig b/arch/ia64/configs/generic_defconfig
index cb267a07c57f..ca0d596c800d 100644
--- a/arch/ia64/configs/generic_defconfig
+++ b/arch/ia64/configs/generic_defconfig
@@ -18,7 +18,6 @@ CONFIG_IA64_CYCLONE=y
CONFIG_SMP=y
CONFIG_HOTPLUG_CPU=y
CONFIG_IA64_MCA_RECOVERY=y
-CONFIG_PERFMON=y
CONFIG_IA64_PALINFO=y
CONFIG_KEXEC=y
CONFIG_CRASH_DUMP=y
@@ -45,11 +44,10 @@ CONFIG_BLK_DEV_CRYPTOLOOP=m
CONFIG_BLK_DEV_NBD=m
CONFIG_BLK_DEV_RAM=y
CONFIG_SGI_XP=m
-CONFIG_IDE=y
-CONFIG_BLK_DEV_IDECD=y
-CONFIG_BLK_DEV_GENERIC=y
-CONFIG_BLK_DEV_CMD64X=y
-CONFIG_BLK_DEV_PIIX=y
+CONFIG_ATA=y
+CONFIG_ATA_GENERIC=y
+CONFIG_PATA_CMD64X=y
+CONFIG_ATA_PIIX=y
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=m
CONFIG_BLK_DEV_SR=m
diff --git a/arch/ia64/configs/gensparse_defconfig b/arch/ia64/configs/gensparse_defconfig
index 7e25f2f031b6..281eb9c544f9 100644
--- a/arch/ia64/configs/gensparse_defconfig
+++ b/arch/ia64/configs/gensparse_defconfig
@@ -17,7 +17,6 @@ CONFIG_NR_CPUS=512
CONFIG_HOTPLUG_CPU=y
CONFIG_SPARSEMEM_MANUAL=y
CONFIG_IA64_MCA_RECOVERY=y
-CONFIG_PERFMON=y
CONFIG_IA64_PALINFO=y
CONFIG_EFI_VARS=y
CONFIG_BINFMT_MISC=m
@@ -36,12 +35,11 @@ CONFIG_BLK_DEV_LOOP=m
CONFIG_BLK_DEV_CRYPTOLOOP=m
CONFIG_BLK_DEV_NBD=m
CONFIG_BLK_DEV_RAM=y
-CONFIG_IDE=y
+CONFIG_ATA=y
CONFIG_BLK_DEV_IDECD=y
-CONFIG_IDE_GENERIC=y
-CONFIG_BLK_DEV_GENERIC=y
-CONFIG_BLK_DEV_CMD64X=y
-CONFIG_BLK_DEV_PIIX=y
+CONFIG_ATA_GENERIC=y
+CONFIG_PATA_CMD64X=y
+CONFIG_ATA_PIIX=y
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=m
diff --git a/arch/ia64/configs/tiger_defconfig b/arch/ia64/configs/tiger_defconfig
index 3f486d5bdc2d..b4f9819a1a45 100644
--- a/arch/ia64/configs/tiger_defconfig
+++ b/arch/ia64/configs/tiger_defconfig
@@ -21,7 +21,6 @@ CONFIG_HOTPLUG_CPU=y
CONFIG_PERMIT_BSP_REMOVE=y
CONFIG_FORCE_CPEI_RETARGET=y
CONFIG_IA64_MCA_RECOVERY=y
-CONFIG_PERFMON=y
CONFIG_IA64_PALINFO=y
CONFIG_KEXEC=y
CONFIG_EFI_VARS=y
@@ -41,11 +40,10 @@ CONFIG_BLK_DEV_LOOP=m
CONFIG_BLK_DEV_CRYPTOLOOP=m
CONFIG_BLK_DEV_NBD=m
CONFIG_BLK_DEV_RAM=y
-CONFIG_IDE=y
-CONFIG_BLK_DEV_IDECD=y
-CONFIG_BLK_DEV_GENERIC=y
-CONFIG_BLK_DEV_CMD64X=y
-CONFIG_BLK_DEV_PIIX=y
+CONFIG_ATA=y
+CONFIG_ATA_GENERIC=y
+CONFIG_PATA_CMD64X=y
+CONFIG_ATA_PIIX=y
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=m
diff --git a/arch/ia64/configs/zx1_defconfig b/arch/ia64/configs/zx1_defconfig
index 261e98e1f5fe..629cb9cdf723 100644
--- a/arch/ia64/configs/zx1_defconfig
+++ b/arch/ia64/configs/zx1_defconfig
@@ -10,7 +10,6 @@ CONFIG_NR_CPUS=16
CONFIG_HOTPLUG_CPU=y
CONFIG_FLATMEM_MANUAL=y
CONFIG_IA64_MCA_RECOVERY=y
-CONFIG_PERFMON=y
CONFIG_IA64_PALINFO=y
CONFIG_CRASH_DUMP=y
CONFIG_EFI_VARS=y
@@ -26,10 +25,9 @@ CONFIG_IP_MULTICAST=y
CONFIG_NETFILTER=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
-CONFIG_IDE=y
-CONFIG_BLK_DEV_IDECD=y
-CONFIG_BLK_DEV_GENERIC=y
-CONFIG_BLK_DEV_CMD64X=y
+CONFIG_ATA=y
+CONFIG_ATA_GENERIC=y
+CONFIG_PATA_CMD64X=y
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=y
diff --git a/arch/ia64/include/asm/acpi.h b/arch/ia64/include/asm/acpi.h
index b66ba907019c..87927eb824cc 100644
--- a/arch/ia64/include/asm/acpi.h
+++ b/arch/ia64/include/asm/acpi.h
@@ -74,8 +74,6 @@ static inline void arch_acpi_set_pdc_bits(u32 *buf)
buf[2] |= ACPI_PDC_EST_CAPABILITY_SMP;
}
-#define acpi_unlazy_tlb(x)
-
#ifdef CONFIG_ACPI_NUMA
extern cpumask_t early_cpu_possible_map;
#define for_each_possible_early_cpu(cpu) \
diff --git a/arch/ia64/include/asm/checksum.h b/arch/ia64/include/asm/checksum.h
index 2a1c64629cdc..f3026213aa32 100644
--- a/arch/ia64/include/asm/checksum.h
+++ b/arch/ia64/include/asm/checksum.h
@@ -37,9 +37,6 @@ extern __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
*/
extern __wsum csum_partial(const void *buff, int len, __wsum sum);
-extern __wsum csum_partial_copy_nocheck(const void *src, void *dst,
- int len, __wsum sum);
-
/*
* This routine is used for miscellaneous IP-like checksums, mainly in
* icmp.c
diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
index 10850897a91c..779b6972aa84 100644
--- a/arch/ia64/include/asm/pgtable.h
+++ b/arch/ia64/include/asm/pgtable.h
@@ -366,6 +366,15 @@ pgd_index (unsigned long address)
}
#define pgd_index pgd_index
+/*
+ * In the kernel's mapped region we know everything is in region number 5, so
+ * as an optimisation its PGD already points to the area for that region.
+ * However, this also means that we cannot use pgd_index() and we must
+ * never add the region here.
+ */
+#define pgd_offset_k(addr) \
+ (init_mm.pgd + (((addr) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)))
+
/* Look up a pgd entry in the gate area. On IA-64, the gate-area
resides in the kernel-mapped segment, hence we use pgd_offset_k()
here. */
diff --git a/arch/ia64/include/asm/processor.h b/arch/ia64/include/asm/processor.h
index 95a2ec37400f..2d8bcdc27d7f 100644
--- a/arch/ia64/include/asm/processor.h
+++ b/arch/ia64/include/asm/processor.h
@@ -280,15 +280,6 @@ struct thread_struct {
__u64 map_base; /* base address for get_unmapped_area() */
__u64 rbs_bot; /* the base address for the RBS */
int last_fph_cpu; /* CPU that may hold the contents of f32-f127 */
-
-#ifdef CONFIG_PERFMON
- void *pfm_context; /* pointer to detailed PMU context */
- unsigned long pfm_needs_checking; /* when >0, pending perfmon work on kernel exit */
-# define INIT_THREAD_PM .pfm_context = NULL, \
- .pfm_needs_checking = 0UL,
-#else
-# define INIT_THREAD_PM
-#endif
unsigned long dbr[IA64_NUM_DBG_REGS];
unsigned long ibr[IA64_NUM_DBG_REGS];
struct ia64_fpreg fph[96]; /* saved/loaded on demand */
@@ -301,7 +292,6 @@ struct thread_struct {
.map_base = DEFAULT_MAP_BASE, \
.rbs_bot = STACK_TOP - DEFAULT_USER_STACK_SIZE, \
.last_fph_cpu = -1, \
- INIT_THREAD_PM \
.dbr = {0, }, \
.ibr = {0, }, \
.fph = {{{{0}}}, } \
diff --git a/arch/ia64/include/asm/switch_to.h b/arch/ia64/include/asm/switch_to.h
index 9011e90a6b97..a5a4e09468fa 100644
--- a/arch/ia64/include/asm/switch_to.h
+++ b/arch/ia64/include/asm/switch_to.h
@@ -31,16 +31,8 @@ extern struct task_struct *ia64_switch_to (void *next_task);
extern void ia64_save_extra (struct task_struct *task);
extern void ia64_load_extra (struct task_struct *task);
-#ifdef CONFIG_PERFMON
- DECLARE_PER_CPU(unsigned long, pfm_syst_info);
-# define PERFMON_IS_SYSWIDE() (__this_cpu_read(pfm_syst_info) & 0x1)
-#else
-# define PERFMON_IS_SYSWIDE() (0)
-#endif
-
#define IA64_HAS_EXTRA_STATE(t) \
- ((t)->thread.flags & (IA64_THREAD_DBG_VALID|IA64_THREAD_PM_VALID) \
- || PERFMON_IS_SYSWIDE())
+ ((t)->thread.flags & (IA64_THREAD_DBG_VALID|IA64_THREAD_PM_VALID))
#define __switch_to(prev,next,last) do { \
if (IA64_HAS_EXTRA_STATE(prev)) \
diff --git a/arch/ia64/kernel/Makefile b/arch/ia64/kernel/Makefile
index 1a8df6669eee..81901c5e5426 100644
--- a/arch/ia64/kernel/Makefile
+++ b/arch/ia64/kernel/Makefile
@@ -10,7 +10,7 @@ endif
extra-y := head.o vmlinux.lds
obj-y := entry.o efi.o efi_stub.o gate-data.o fsys.o ia64_ksyms.o irq.o irq_ia64.o \
- irq_lsapic.o ivt.o pal.o patch.o process.o perfmon.o ptrace.o sal.o \
+ irq_lsapic.o ivt.o pal.o patch.o process.o ptrace.o sal.o \
salinfo.o setup.o signal.o sys_ia64.o time.o traps.o unaligned.o \
unwind.o mca.o mca_asm.o topology.o dma-mapping.o iosapic.o acpi.o \
acpi-ext.o
@@ -21,7 +21,6 @@ obj-$(CONFIG_IA64_PALINFO) += palinfo.o
obj-$(CONFIG_MODULES) += module.o
obj-$(CONFIG_SMP) += smp.o smpboot.o
obj-$(CONFIG_NUMA) += numa.o
-obj-$(CONFIG_PERFMON) += perfmon_default_smpl.o
obj-$(CONFIG_IA64_CYCLONE) += cyclone.o
obj-$(CONFIG_IA64_MCA_RECOVERY) += mca_recovery.o
obj-$(CONFIG_KPROBES) += kprobes.o
diff --git a/arch/ia64/kernel/crash.c b/arch/ia64/kernel/crash.c
index bec762a9b418..fec70d662d0c 100644
--- a/arch/ia64/kernel/crash.c
+++ b/arch/ia64/kernel/crash.c
@@ -163,7 +163,7 @@ kdump_init_notifier(struct notifier_block *self, unsigned long val, void *data)
case DIE_INIT_MONARCH_LEAVE:
if (!kdump_freeze_monarch)
break;
- /* fall through */
+ fallthrough;
case DIE_INIT_SLAVE_LEAVE:
case DIE_INIT_MONARCH_ENTER:
case DIE_MCA_RENDZVOUS_LEAVE:
diff --git a/arch/ia64/kernel/ia64_ksyms.c b/arch/ia64/kernel/ia64_ksyms.c
index b49fe6f618ed..f8150ee74f29 100644
--- a/arch/ia64/kernel/ia64_ksyms.c
+++ b/arch/ia64/kernel/ia64_ksyms.c
@@ -3,7 +3,7 @@
* Architecture-specific kernel symbols
*/
-#ifdef CONFIG_VIRTUAL_MEM_MAP
+#if defined(CONFIG_VIRTUAL_MEM_MAP) || defined(CONFIG_DISCONTIGMEM)
#include <linux/compiler.h>
#include <linux/export.h>
#include <linux/memblock.h>
diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c
index 6fff934150eb..46e33c5cb53d 100644
--- a/arch/ia64/kernel/irq_ia64.c
+++ b/arch/ia64/kernel/irq_ia64.c
@@ -40,10 +40,6 @@
#include <asm/hw_irq.h>
#include <asm/tlbflush.h>
-#ifdef CONFIG_PERFMON
-# include <asm/perfmon.h>
-#endif
-
#define IRQ_DEBUG 0
#define IRQ_VECTOR_UNASSIGNED (0)
@@ -627,9 +623,6 @@ init_IRQ (void)
"irq_move");
}
#endif
-#ifdef CONFIG_PERFMON
- pfm_init_percpu();
-#endif
}
void
diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c
index 7a7df944d798..fc1ff8a4d7de 100644
--- a/arch/ia64/kernel/kprobes.c
+++ b/arch/ia64/kernel/kprobes.c
@@ -396,83 +396,9 @@ static void kretprobe_trampoline(void)
{
}
-/*
- * At this point the target function has been tricked into
- * returning into our trampoline. Lookup the associated instance
- * and then:
- * - call the handler function
- * - cleanup by marking the instance as unused
- * - long jump back to the original return address
- */
int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
{
- struct kretprobe_instance *ri = NULL;
- struct hlist_head *head, empty_rp;
- struct hlist_node *tmp;
- unsigned long flags, orig_ret_address = 0;
- unsigned long trampoline_address =
- ((struct fnptr *)kretprobe_trampoline)->ip;
-
- INIT_HLIST_HEAD(&empty_rp);
- kretprobe_hash_lock(current, &head, &flags);
-
- /*
- * It is possible to have multiple instances associated with a given
- * task either because an multiple functions in the call path
- * have a return probe installed on them, and/or more than one return
- * return probe was registered for a target function.
- *
- * We can handle this because:
- * - instances are always inserted at the head of the list
- * - when multiple return probes are registered for the same
- * function, the first instance's ret_addr will point to the
- * real return address, and all the rest will point to
- * kretprobe_trampoline
- */
- hlist_for_each_entry_safe(ri, tmp, head, hlist) {
- if (ri->task != current)
- /* another task is sharing our hash bucket */
- continue;
-
- orig_ret_address = (unsigned long)ri->ret_addr;
- if (orig_ret_address != trampoline_address)
- /*
- * This is the real return address. Any other
- * instances associated with this task are for
- * other calls deeper on the call stack
- */
- break;
- }
-
- regs->cr_iip = orig_ret_address;
-
- hlist_for_each_entry_safe(ri, tmp, head, hlist) {
- if (ri->task != current)
- /* another task is sharing our hash bucket */
- continue;
-
- if (ri->rp && ri->rp->handler)
- ri->rp->handler(ri, regs);
-
- orig_ret_address = (unsigned long)ri->ret_addr;
- recycle_rp_inst(ri, &empty_rp);
-
- if (orig_ret_address != trampoline_address)
- /*
- * This is the real return address. Any other
- * instances associated with this task are for
- * other calls deeper on the call stack
- */
- break;
- }
- kretprobe_assert(ri, orig_ret_address, trampoline_address);
-
- kretprobe_hash_unlock(current, &flags);
-
- hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
- hlist_del(&ri->hlist);
- kfree(ri);
- }
+ regs->cr_iip = __kretprobe_trampoline_handler(regs, kretprobe_trampoline, NULL);
/*
* By returning a non-zero value, we are telling
* kprobe_handler() that we don't want the post_handler
@@ -485,6 +411,7 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
struct pt_regs *regs)
{
ri->ret_addr = (kprobe_opcode_t *)regs->b0;
+ ri->fp = NULL;
/* Replace the return addr with trampoline addr */
regs->b0 = ((struct fnptr *)kretprobe_trampoline)->ip;
diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
index 1a42ba885188..00a496cb346f 100644
--- a/arch/ia64/kernel/module.c
+++ b/arch/ia64/kernel/module.c
@@ -654,7 +654,7 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
}
} else if (!is_internal(mod, val))
val = get_plt(mod, location, val, &ok);
- /* FALL THROUGH */
+ fallthrough;
default:
val -= bundle(location);
break;
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
deleted file mode 100644
index 971f166873aa..000000000000
--- a/arch/ia64/kernel/perfmon.c
+++ /dev/null
@@ -1,6703 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * This file implements the perfmon-2 subsystem which is used
- * to program the IA-64 Performance Monitoring Unit (PMU).
- *
- * The initial version of perfmon.c was written by
- * Ganesh Venkitachalam, IBM Corp.
- *
- * Then it was modified for perfmon-1.x by Stephane Eranian and
- * David Mosberger, Hewlett Packard Co.
- *
- * Version Perfmon-2.x is a rewrite of perfmon-1.x
- * by Stephane Eranian, Hewlett Packard Co.
- *
- * Copyright (C) 1999-2005 Hewlett Packard Co
- * Stephane Eranian <eranian@hpl.hp.com>
- * David Mosberger-Tang <davidm@hpl.hp.com>
- *
- * More information about perfmon available at:
- * http://www.hpl.hp.com/research/linux/perfmon
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/sched/task.h>
-#include <linux/sched/task_stack.h>
-#include <linux/interrupt.h>
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#include <linux/init.h>
-#include <linux/vmalloc.h>
-#include <linux/mm.h>
-#include <linux/sysctl.h>
-#include <linux/list.h>
-#include <linux/file.h>
-#include <linux/poll.h>
-#include <linux/vfs.h>
-#include <linux/smp.h>
-#include <linux/pagemap.h>
-#include <linux/mount.h>
-#include <linux/pseudo_fs.h>
-#include <linux/bitops.h>
-#include <linux/capability.h>
-#include <linux/rcupdate.h>
-#include <linux/completion.h>
-#include <linux/tracehook.h>
-#include <linux/slab.h>
-#include <linux/cpu.h>
-
-#include <asm/errno.h>
-#include <asm/intrinsics.h>
-#include <asm/page.h>
-#include <asm/perfmon.h>
-#include <asm/processor.h>
-#include <asm/signal.h>
-#include <linux/uaccess.h>
-#include <asm/delay.h>
-
-#include "irq.h"
-
-#ifdef CONFIG_PERFMON
-/*
- * perfmon context state
- */
-#define PFM_CTX_UNLOADED 1 /* context is not loaded onto any task */
-#define PFM_CTX_LOADED 2 /* context is loaded onto a task */
-#define PFM_CTX_MASKED 3 /* context is loaded but monitoring is masked due to overflow */
-#define PFM_CTX_ZOMBIE 4 /* owner of the context is closing it */
-
-#define PFM_INVALID_ACTIVATION (~0UL)
-
-#define PFM_NUM_PMC_REGS 64 /* PMC save area for ctxsw */
-#define PFM_NUM_PMD_REGS 64 /* PMD save area for ctxsw */
-
-/*
- * depth of message queue
- */
-#define PFM_MAX_MSGS 32
-#define PFM_CTXQ_EMPTY(g) ((g)->ctx_msgq_head == (g)->ctx_msgq_tail)
-
-/*
- * type of a PMU register (bitmask).
- * bitmask structure:
- * bit0 : register implemented
- * bit1 : end marker
- * bit2-3 : reserved
- * bit4 : pmc has pmc.pm
- * bit5 : pmc controls a counter (has pmc.oi), pmd is used as counter
- * bit6-7 : register type
- * bit8-31: reserved
- */
-#define PFM_REG_NOTIMPL 0x0 /* not implemented at all */
-#define PFM_REG_IMPL 0x1 /* register implemented */
-#define PFM_REG_END 0x2 /* end marker */
-#define PFM_REG_MONITOR (0x1<<4|PFM_REG_IMPL) /* a PMC with a pmc.pm field only */
-#define PFM_REG_COUNTING (0x2<<4|PFM_REG_MONITOR) /* a monitor + pmc.oi+ PMD used as a counter */
-#define PFM_REG_CONTROL (0x4<<4|PFM_REG_IMPL) /* PMU control register */
-#define PFM_REG_CONFIG (0x8<<4|PFM_REG_IMPL) /* configuration register */
-#define PFM_REG_BUFFER (0xc<<4|PFM_REG_IMPL) /* PMD used as buffer */
-
-#define PMC_IS_LAST(i) (pmu_conf->pmc_desc[i].type & PFM_REG_END)
-#define PMD_IS_LAST(i) (pmu_conf->pmd_desc[i].type & PFM_REG_END)
-
-#define PMC_OVFL_NOTIFY(ctx, i) ((ctx)->ctx_pmds[i].flags & PFM_REGFL_OVFL_NOTIFY)
-
-/* i assumed unsigned */
-#define PMC_IS_IMPL(i) (i< PMU_MAX_PMCS && (pmu_conf->pmc_desc[i].type & PFM_REG_IMPL))
-#define PMD_IS_IMPL(i) (i< PMU_MAX_PMDS && (pmu_conf->pmd_desc[i].type & PFM_REG_IMPL))
-
-/* XXX: these assume that register i is implemented */
-#define PMD_IS_COUNTING(i) ((pmu_conf->pmd_desc[i].type & PFM_REG_COUNTING) == PFM_REG_COUNTING)
-#define PMC_IS_COUNTING(i) ((pmu_conf->pmc_desc[i].type & PFM_REG_COUNTING) == PFM_REG_COUNTING)
-#define PMC_IS_MONITOR(i) ((pmu_conf->pmc_desc[i].type & PFM_REG_MONITOR) == PFM_REG_MONITOR)
-#define PMC_IS_CONTROL(i) ((pmu_conf->pmc_desc[i].type & PFM_REG_CONTROL) == PFM_REG_CONTROL)
-
-#define PMC_DFL_VAL(i) pmu_conf->pmc_desc[i].default_value
-#define PMC_RSVD_MASK(i) pmu_conf->pmc_desc[i].reserved_mask
-#define PMD_PMD_DEP(i) pmu_conf->pmd_desc[i].dep_pmd[0]
-#define PMC_PMD_DEP(i) pmu_conf->pmc_desc[i].dep_pmd[0]
-
-#define PFM_NUM_IBRS IA64_NUM_DBG_REGS
-#define PFM_NUM_DBRS IA64_NUM_DBG_REGS
-
-#define CTX_OVFL_NOBLOCK(c) ((c)->ctx_fl_block == 0)
-#define CTX_HAS_SMPL(c) ((c)->ctx_fl_is_sampling)
-#define PFM_CTX_TASK(h) (h)->ctx_task
-
-#define PMU_PMC_OI 5 /* position of pmc.oi bit */
-
-/* XXX: does not support more than 64 PMDs */
-#define CTX_USED_PMD(ctx, mask) (ctx)->ctx_used_pmds[0] |= (mask)
-#define CTX_IS_USED_PMD(ctx, c) (((ctx)->ctx_used_pmds[0] & (1UL << (c))) != 0UL)
-
-#define CTX_USED_MONITOR(ctx, mask) (ctx)->ctx_used_monitors[0] |= (mask)
-
-#define CTX_USED_IBR(ctx,n) (ctx)->ctx_used_ibrs[(n)>>6] |= 1UL<< ((n) % 64)
-#define CTX_USED_DBR(ctx,n) (ctx)->ctx_used_dbrs[(n)>>6] |= 1UL<< ((n) % 64)
-#define CTX_USES_DBREGS(ctx) (((pfm_context_t *)(ctx))->ctx_fl_using_dbreg==1)
-#define PFM_CODE_RR 0 /* requesting code range restriction */
-#define PFM_DATA_RR 1 /* requestion data range restriction */
-
-#define PFM_CPUINFO_CLEAR(v) pfm_get_cpu_var(pfm_syst_info) &= ~(v)
-#define PFM_CPUINFO_SET(v) pfm_get_cpu_var(pfm_syst_info) |= (v)
-#define PFM_CPUINFO_GET() pfm_get_cpu_var(pfm_syst_info)
-
-#define RDEP(x) (1UL<<(x))
-
-/*
- * context protection macros
- * in SMP:
- * - we need to protect against CPU concurrency (spin_lock)
- * - we need to protect against PMU overflow interrupts (local_irq_disable)
- * in UP:
- * - we need to protect against PMU overflow interrupts (local_irq_disable)
- *
- * spin_lock_irqsave()/spin_unlock_irqrestore():
- * in SMP: local_irq_disable + spin_lock
- * in UP : local_irq_disable
- *
- * spin_lock()/spin_lock():
- * in UP : removed automatically
- * in SMP: protect against context accesses from other CPU. interrupts
- * are not masked. This is useful for the PMU interrupt handler
- * because we know we will not get PMU concurrency in that code.
- */
-#define PROTECT_CTX(c, f) \
- do { \
- DPRINT(("spinlock_irq_save ctx %p by [%d]\n", c, task_pid_nr(current))); \
- spin_lock_irqsave(&(c)->ctx_lock, f); \
- DPRINT(("spinlocked ctx %p by [%d]\n", c, task_pid_nr(current))); \
- } while(0)
-
-#define UNPROTECT_CTX(c, f) \
- do { \
- DPRINT(("spinlock_irq_restore ctx %p by [%d]\n", c, task_pid_nr(current))); \
- spin_unlock_irqrestore(&(c)->ctx_lock, f); \
- } while(0)
-
-#define PROTECT_CTX_NOPRINT(c, f) \
- do { \
- spin_lock_irqsave(&(c)->ctx_lock, f); \
- } while(0)
-
-
-#define UNPROTECT_CTX_NOPRINT(c, f) \
- do { \
- spin_unlock_irqrestore(&(c)->ctx_lock, f); \
- } while(0)
-
-
-#define PROTECT_CTX_NOIRQ(c) \
- do { \
- spin_lock(&(c)->ctx_lock); \
- } while(0)
-
-#define UNPROTECT_CTX_NOIRQ(c) \
- do { \
- spin_unlock(&(c)->ctx_lock); \
- } while(0)
-
-
-#ifdef CONFIG_SMP
-
-#define GET_ACTIVATION() pfm_get_cpu_var(pmu_activation_number)
-#define INC_ACTIVATION() pfm_get_cpu_var(pmu_activation_number)++
-#define SET_ACTIVATION(c) (c)->ctx_last_activation = GET_ACTIVATION()
-
-#else /* !CONFIG_SMP */
-#define SET_ACTIVATION(t) do {} while(0)
-#define GET_ACTIVATION(t) do {} while(0)
-#define INC_ACTIVATION(t) do {} while(0)
-#endif /* CONFIG_SMP */
-
-#define SET_PMU_OWNER(t, c) do { pfm_get_cpu_var(pmu_owner) = (t); pfm_get_cpu_var(pmu_ctx) = (c); } while(0)
-#define GET_PMU_OWNER() pfm_get_cpu_var(pmu_owner)
-#define GET_PMU_CTX() pfm_get_cpu_var(pmu_ctx)
-
-#define LOCK_PFS(g) spin_lock_irqsave(&pfm_sessions.pfs_lock, g)
-#define UNLOCK_PFS(g) spin_unlock_irqrestore(&pfm_sessions.pfs_lock, g)
-
-#define PFM_REG_RETFLAG_SET(flags, val) do { flags &= ~PFM_REG_RETFL_MASK; flags |= (val); } while(0)
-
-/*
- * cmp0 must be the value of pmc0
- */
-#define PMC0_HAS_OVFL(cmp0) (cmp0 & ~0x1UL)
-
-#define PFMFS_MAGIC 0xa0b4d889
-
-/*
- * debugging
- */
-#define PFM_DEBUGGING 1
-#ifdef PFM_DEBUGGING
-#define DPRINT(a) \
- do { \
- if (unlikely(pfm_sysctl.debug >0)) { printk("%s.%d: CPU%d [%d] ", __func__, __LINE__, smp_processor_id(), task_pid_nr(current)); printk a; } \
- } while (0)
-
-#define DPRINT_ovfl(a) \
- do { \
- if (unlikely(pfm_sysctl.debug > 0 && pfm_sysctl.debug_ovfl >0)) { printk("%s.%d: CPU%d [%d] ", __func__, __LINE__, smp_processor_id(), task_pid_nr(current)); printk a; } \
- } while (0)
-#endif
-
-/*
- * 64-bit software counter structure
- *
- * the next_reset_type is applied to the next call to pfm_reset_regs()
- */
-typedef struct {
- unsigned long val; /* virtual 64bit counter value */
- unsigned long lval; /* last reset value */
- unsigned long long_reset; /* reset value on sampling overflow */
- unsigned long short_reset; /* reset value on overflow */
- unsigned long reset_pmds[4]; /* which other pmds to reset when this counter overflows */
- unsigned long smpl_pmds[4]; /* which pmds are accessed when counter overflow */
- unsigned long seed; /* seed for random-number generator */
- unsigned long mask; /* mask for random-number generator */
- unsigned int flags; /* notify/do not notify */
- unsigned long eventid; /* overflow event identifier */
-} pfm_counter_t;
-
-/*
- * context flags
- */
-typedef struct {
- unsigned int block:1; /* when 1, task will blocked on user notifications */
- unsigned int system:1; /* do system wide monitoring */
- unsigned int using_dbreg:1; /* using range restrictions (debug registers) */
- unsigned int is_sampling:1; /* true if using a custom format */
- unsigned int excl_idle:1; /* exclude idle task in system wide session */
- unsigned int going_zombie:1; /* context is zombie (MASKED+blocking) */
- unsigned int trap_reason:2; /* reason for going into pfm_handle_work() */
- unsigned int no_msg:1; /* no message sent on overflow */
- unsigned int can_restart:1; /* allowed to issue a PFM_RESTART */
- unsigned int reserved:22;
-} pfm_context_flags_t;
-
-#define PFM_TRAP_REASON_NONE 0x0 /* default value */
-#define PFM_TRAP_REASON_BLOCK 0x1 /* we need to block on overflow */
-#define PFM_TRAP_REASON_RESET 0x2 /* we need to reset PMDs */
-
-
-/*
- * perfmon context: encapsulates all the state of a monitoring session
- */
-
-typedef struct pfm_context {
- spinlock_t ctx_lock; /* context protection */
-
- pfm_context_flags_t ctx_flags; /* bitmask of flags (block reason incl.) */
- unsigned int ctx_state; /* state: active/inactive (no bitfield) */
-
- struct task_struct *ctx_task; /* task to which context is attached */
-
- unsigned long ctx_ovfl_regs[4]; /* which registers overflowed (notification) */
-
- struct completion ctx_restart_done; /* use for blocking notification mode */
-
- unsigned long ctx_used_pmds[4]; /* bitmask of PMD used */
- unsigned long ctx_all_pmds[4]; /* bitmask of all accessible PMDs */
- unsigned long ctx_reload_pmds[4]; /* bitmask of force reload PMD on ctxsw in */
-
- unsigned long ctx_all_pmcs[4]; /* bitmask of all accessible PMCs */
- unsigned long ctx_reload_pmcs[4]; /* bitmask of force reload PMC on ctxsw in */
- unsigned long ctx_used_monitors[4]; /* bitmask of monitor PMC being used */
-
- unsigned long ctx_pmcs[PFM_NUM_PMC_REGS]; /* saved copies of PMC values */
-
- unsigned int ctx_used_ibrs[1]; /* bitmask of used IBR (speedup ctxsw in) */
- unsigned int ctx_used_dbrs[1]; /* bitmask of used DBR (speedup ctxsw in) */
- unsigned long ctx_dbrs[IA64_NUM_DBG_REGS]; /* DBR values (cache) when not loaded */
- unsigned long ctx_ibrs[IA64_NUM_DBG_REGS]; /* IBR values (cache) when not loaded */
-
- pfm_counter_t ctx_pmds[PFM_NUM_PMD_REGS]; /* software state for PMDS */
-
- unsigned long th_pmcs[PFM_NUM_PMC_REGS]; /* PMC thread save state */
- unsigned long th_pmds[PFM_NUM_PMD_REGS]; /* PMD thread save state */
-
- unsigned long ctx_saved_psr_up; /* only contains psr.up value */
-
- unsigned long ctx_last_activation; /* context last activation number for last_cpu */
- unsigned int ctx_last_cpu; /* CPU id of current or last CPU used (SMP only) */
- unsigned int ctx_cpu; /* cpu to which perfmon is applied (system wide) */
-
- int ctx_fd; /* file descriptor used my this context */
- pfm_ovfl_arg_t ctx_ovfl_arg; /* argument to custom buffer format handler */
-
- pfm_buffer_fmt_t *ctx_buf_fmt; /* buffer format callbacks */
- void *ctx_smpl_hdr; /* points to sampling buffer header kernel vaddr */
- unsigned long ctx_smpl_size; /* size of sampling buffer */
- void *ctx_smpl_vaddr; /* user level virtual address of smpl buffer */
-
- wait_queue_head_t ctx_msgq_wait;
- pfm_msg_t ctx_msgq[PFM_MAX_MSGS];
- int ctx_msgq_head;
- int ctx_msgq_tail;
- struct fasync_struct *ctx_async_queue;
-
- wait_queue_head_t ctx_zombieq; /* termination cleanup wait queue */
-} pfm_context_t;
-
-/*
- * magic number used to verify that structure is really
- * a perfmon context
- */
-#define PFM_IS_FILE(f) ((f)->f_op == &pfm_file_ops)
-
-#define PFM_GET_CTX(t) ((pfm_context_t *)(t)->thread.pfm_context)
-
-#ifdef CONFIG_SMP
-#define SET_LAST_CPU(ctx, v) (ctx)->ctx_last_cpu = (v)
-#define GET_LAST_CPU(ctx) (ctx)->ctx_last_cpu
-#else
-#define SET_LAST_CPU(ctx, v) do {} while(0)
-#define GET_LAST_CPU(ctx) do {} while(0)
-#endif
-
-
-#define ctx_fl_block ctx_flags.block
-#define ctx_fl_system ctx_flags.system
-#define ctx_fl_using_dbreg ctx_flags.using_dbreg
-#define ctx_fl_is_sampling ctx_flags.is_sampling
-#define ctx_fl_excl_idle ctx_flags.excl_idle
-#define ctx_fl_going_zombie ctx_flags.going_zombie
-#define ctx_fl_trap_reason ctx_flags.trap_reason
-#define ctx_fl_no_msg ctx_flags.no_msg
-#define ctx_fl_can_restart ctx_flags.can_restart
-
-#define PFM_SET_WORK_PENDING(t, v) do { (t)->thread.pfm_needs_checking = v; } while(0);
-#define PFM_GET_WORK_PENDING(t) (t)->thread.pfm_needs_checking
-
-/*
- * global information about all sessions
- * mostly used to synchronize between system wide and per-process
- */
-typedef struct {
- spinlock_t pfs_lock; /* lock the structure */
-
- unsigned int pfs_task_sessions; /* number of per task sessions */
- unsigned int pfs_sys_sessions; /* number of per system wide sessions */
- unsigned int pfs_sys_use_dbregs; /* incremented when a system wide session uses debug regs */
- unsigned int pfs_ptrace_use_dbregs; /* incremented when a process uses debug regs */
- struct task_struct *pfs_sys_session[NR_CPUS]; /* point to task owning a system-wide session */
-} pfm_session_t;
-
-/*
- * information about a PMC or PMD.
- * dep_pmd[]: a bitmask of dependent PMD registers
- * dep_pmc[]: a bitmask of dependent PMC registers
- */
-typedef int (*pfm_reg_check_t)(struct task_struct *task, pfm_context_t *ctx, unsigned int cnum, unsigned long *val, struct pt_regs *regs);
-typedef struct {
- unsigned int type;
- int pm_pos;
- unsigned long default_value; /* power-on default value */
- unsigned long reserved_mask; /* bitmask of reserved bits */
- pfm_reg_check_t read_check;
- pfm_reg_check_t write_check;
- unsigned long dep_pmd[4];
- unsigned long dep_pmc[4];
-} pfm_reg_desc_t;
-
-/* assume cnum is a valid monitor */
-#define PMC_PM(cnum, val) (((val) >> (pmu_conf->pmc_desc[cnum].pm_pos)) & 0x1)
-
-/*
- * This structure is initialized at boot time and contains
- * a description of the PMU main characteristics.
- *
- * If the probe function is defined, detection is based
- * on its return value:
- * - 0 means recognized PMU
- * - anything else means not supported
- * When the probe function is not defined, then the pmu_family field
- * is used and it must match the host CPU family such that:
- * - cpu->family & config->pmu_family != 0
- */
-typedef struct {
- unsigned long ovfl_val; /* overflow value for counters */
-
- pfm_reg_desc_t *pmc_desc; /* detailed PMC register dependencies descriptions */
- pfm_reg_desc_t *pmd_desc; /* detailed PMD register dependencies descriptions */
-
- unsigned int num_pmcs; /* number of PMCS: computed at init time */
- unsigned int num_pmds; /* number of PMDS: computed at init time */
- unsigned long impl_pmcs[4]; /* bitmask of implemented PMCS */
- unsigned long impl_pmds[4]; /* bitmask of implemented PMDS */
-
- char *pmu_name; /* PMU family name */
- unsigned int pmu_family; /* cpuid family pattern used to identify pmu */
- unsigned int flags; /* pmu specific flags */
- unsigned int num_ibrs; /* number of IBRS: computed at init time */
- unsigned int num_dbrs; /* number of DBRS: computed at init time */
- unsigned int num_counters; /* PMC/PMD counting pairs : computed at init time */
- int (*probe)(void); /* customized probe routine */
- unsigned int use_rr_dbregs:1; /* set if debug registers used for range restriction */
-} pmu_config_t;
-/*
- * PMU specific flags
- */
-#define PFM_PMU_IRQ_RESEND 1 /* PMU needs explicit IRQ resend */
-
-/*
- * debug register related type definitions
- */
-typedef struct {
- unsigned long ibr_mask:56;
- unsigned long ibr_plm:4;
- unsigned long ibr_ig:3;
- unsigned long ibr_x:1;
-} ibr_mask_reg_t;
-
-typedef struct {
- unsigned long dbr_mask:56;
- unsigned long dbr_plm:4;
- unsigned long dbr_ig:2;
- unsigned long dbr_w:1;
- unsigned long dbr_r:1;
-} dbr_mask_reg_t;
-
-typedef union {
- unsigned long val;
- ibr_mask_reg_t ibr;
- dbr_mask_reg_t dbr;
-} dbreg_t;
-
-
-/*
- * perfmon command descriptions
- */
-typedef struct {
- int (*cmd_func)(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs);
- char *cmd_name;
- int cmd_flags;
- unsigned int cmd_narg;
- size_t cmd_argsize;
- int (*cmd_getsize)(void *arg, size_t *sz);
-} pfm_cmd_desc_t;
-
-#define PFM_CMD_FD 0x01 /* command requires a file descriptor */
-#define PFM_CMD_ARG_READ 0x02 /* command must read argument(s) */
-#define PFM_CMD_ARG_RW 0x04 /* command must read/write argument(s) */
-#define PFM_CMD_STOP 0x08 /* command does not work on zombie context */
-
-
-#define PFM_CMD_NAME(cmd) pfm_cmd_tab[(cmd)].cmd_name
-#define PFM_CMD_READ_ARG(cmd) (pfm_cmd_tab[(cmd)].cmd_flags & PFM_CMD_ARG_READ)
-#define PFM_CMD_RW_ARG(cmd) (pfm_cmd_tab[(cmd)].cmd_flags & PFM_CMD_ARG_RW)
-#define PFM_CMD_USE_FD(cmd) (pfm_cmd_tab[(cmd)].cmd_flags & PFM_CMD_FD)
-#define PFM_CMD_STOPPED(cmd) (pfm_cmd_tab[(cmd)].cmd_flags & PFM_CMD_STOP)
-
-#define PFM_CMD_ARG_MANY -1 /* cannot be zero */
-
-typedef struct {
- unsigned long pfm_spurious_ovfl_intr_count; /* keep track of spurious ovfl interrupts */
- unsigned long pfm_replay_ovfl_intr_count; /* keep track of replayed ovfl interrupts */
- unsigned long pfm_ovfl_intr_count; /* keep track of ovfl interrupts */
- unsigned long pfm_ovfl_intr_cycles; /* cycles spent processing ovfl interrupts */
- unsigned long pfm_ovfl_intr_cycles_min; /* min cycles spent processing ovfl interrupts */
- unsigned long pfm_ovfl_intr_cycles_max; /* max cycles spent processing ovfl interrupts */
- unsigned long pfm_smpl_handler_calls;
- unsigned long pfm_smpl_handler_cycles;
- char pad[SMP_CACHE_BYTES] ____cacheline_aligned;
-} pfm_stats_t;
-
-/*
- * perfmon internal variables
- */
-static pfm_stats_t pfm_stats[NR_CPUS];
-static pfm_session_t pfm_sessions; /* global sessions information */
-
-static DEFINE_SPINLOCK(pfm_alt_install_check);
-static pfm_intr_handler_desc_t *pfm_alt_intr_handler;
-
-static struct proc_dir_entry *perfmon_dir;
-static pfm_uuid_t pfm_null_uuid = {0,};
-
-static spinlock_t pfm_buffer_fmt_lock;
-static LIST_HEAD(pfm_buffer_fmt_list);
-
-static pmu_config_t *pmu_conf;
-
-/* sysctl() controls */
-pfm_sysctl_t pfm_sysctl;
-EXPORT_SYMBOL(pfm_sysctl);
-
-static struct ctl_table pfm_ctl_table[] = {
- {
- .procname = "debug",
- .data = &pfm_sysctl.debug,
- .maxlen = sizeof(int),
- .mode = 0666,
- .proc_handler = proc_dointvec,
- },
- {
- .procname = "debug_ovfl",
- .data = &pfm_sysctl.debug_ovfl,
- .maxlen = sizeof(int),
- .mode = 0666,
- .proc_handler = proc_dointvec,
- },
- {
- .procname = "fastctxsw",
- .data = &pfm_sysctl.fastctxsw,
- .maxlen = sizeof(int),
- .mode = 0600,
- .proc_handler = proc_dointvec,
- },
- {
- .procname = "expert_mode",
- .data = &pfm_sysctl.expert_mode,
- .maxlen = sizeof(int),
- .mode = 0600,
- .proc_handler = proc_dointvec,
- },
- {}
-};
-static struct ctl_table pfm_sysctl_dir[] = {
- {
- .procname = "perfmon",
- .mode = 0555,
- .child = pfm_ctl_table,
- },
- {}
-};
-static struct ctl_table pfm_sysctl_root[] = {
- {
- .procname = "kernel",
- .mode = 0555,
- .child = pfm_sysctl_dir,
- },
- {}
-};
-static struct ctl_table_header *pfm_sysctl_header;
-
-static int pfm_context_unload(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs);
-
-#define pfm_get_cpu_var(v) __ia64_per_cpu_var(v)
-#define pfm_get_cpu_data(a,b) per_cpu(a, b)
-
-static inline void
-pfm_put_task(struct task_struct *task)
-{
- if (task != current) put_task_struct(task);
-}
-
-static inline unsigned long
-pfm_protect_ctx_ctxsw(pfm_context_t *x)
-{
- spin_lock(&(x)->ctx_lock);
- return 0UL;
-}
-
-static inline void
-pfm_unprotect_ctx_ctxsw(pfm_context_t *x, unsigned long f)
-{
- spin_unlock(&(x)->ctx_lock);
-}
-
-/* forward declaration */
-static const struct dentry_operations pfmfs_dentry_operations;
-
-static int pfmfs_init_fs_context(struct fs_context *fc)
-{
- struct pseudo_fs_context *ctx = init_pseudo(fc, PFMFS_MAGIC);
- if (!ctx)
- return -ENOMEM;
- ctx->dops = &pfmfs_dentry_operations;
- return 0;
-}
-
-static struct file_system_type pfm_fs_type = {
- .name = "pfmfs",
- .init_fs_context = pfmfs_init_fs_context,
- .kill_sb = kill_anon_super,
-};
-MODULE_ALIAS_FS("pfmfs");
-
-DEFINE_PER_CPU(unsigned long, pfm_syst_info);
-DEFINE_PER_CPU(struct task_struct *, pmu_owner);
-DEFINE_PER_CPU(pfm_context_t *, pmu_ctx);
-DEFINE_PER_CPU(unsigned long, pmu_activation_number);
-EXPORT_PER_CPU_SYMBOL_GPL(pfm_syst_info);
-
-
-/* forward declaration */
-static const struct file_operations pfm_file_ops;
-
-/*
- * forward declarations
- */
-#ifndef CONFIG_SMP
-static void pfm_lazy_save_regs (struct task_struct *ta);
-#endif
-
-void dump_pmu_state(const char *);
-static int pfm_write_ibr_dbr(int mode, pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs);
-
-#include "perfmon_itanium.h"
-#include "perfmon_mckinley.h"
-#include "perfmon_montecito.h"
-#include "perfmon_generic.h"
-
-static pmu_config_t *pmu_confs[]={
- &pmu_conf_mont,
- &pmu_conf_mck,
- &pmu_conf_ita,
- &pmu_conf_gen, /* must be last */
- NULL
-};
-
-
-static int pfm_end_notify_user(pfm_context_t *ctx);
-
-static inline void
-pfm_clear_psr_pp(void)
-{
- ia64_rsm(IA64_PSR_PP);
- ia64_srlz_i();
-}
-
-static inline void
-pfm_set_psr_pp(void)
-{
- ia64_ssm(IA64_PSR_PP);
- ia64_srlz_i();
-}
-
-static inline void
-pfm_clear_psr_up(void)
-{
- ia64_rsm(IA64_PSR_UP);
- ia64_srlz_i();
-}
-
-static inline void
-pfm_set_psr_up(void)
-{
- ia64_ssm(IA64_PSR_UP);
- ia64_srlz_i();
-}
-
-static inline unsigned long
-pfm_get_psr(void)
-{
- unsigned long tmp;
- tmp = ia64_getreg(_IA64_REG_PSR);
- ia64_srlz_i();
- return tmp;
-}
-
-static inline void
-pfm_set_psr_l(unsigned long val)
-{
- ia64_setreg(_IA64_REG_PSR_L, val);
- ia64_srlz_i();
-}
-
-static inline void
-pfm_freeze_pmu(void)
-{
- ia64_set_pmc(0,1UL);
- ia64_srlz_d();
-}
-
-static inline void
-pfm_unfreeze_pmu(void)
-{
- ia64_set_pmc(0,0UL);
- ia64_srlz_d();
-}
-
-static inline void
-pfm_restore_ibrs(unsigned long *ibrs, unsigned int nibrs)
-{
- int i;
-
- for (i=0; i < nibrs; i++) {
- ia64_set_ibr(i, ibrs[i]);
- ia64_dv_serialize_instruction();
- }
- ia64_srlz_i();
-}
-
-static inline void
-pfm_restore_dbrs(unsigned long *dbrs, unsigned int ndbrs)
-{
- int i;
-
- for (i=0; i < ndbrs; i++) {
- ia64_set_dbr(i, dbrs[i]);
- ia64_dv_serialize_data();
- }
- ia64_srlz_d();
-}
-
-/*
- * PMD[i] must be a counter. no check is made
- */
-static inline unsigned long
-pfm_read_soft_counter(pfm_context_t *ctx, int i)
-{
- return ctx->ctx_pmds[i].val + (ia64_get_pmd(i) & pmu_conf->ovfl_val);
-}
-
-/*
- * PMD[i] must be a counter. no check is made
- */
-static inline void
-pfm_write_soft_counter(pfm_context_t *ctx, int i, unsigned long val)
-{
- unsigned long ovfl_val = pmu_conf->ovfl_val;
-
- ctx->ctx_pmds[i].val = val & ~ovfl_val;
- /*
- * writing to unimplemented part is ignore, so we do not need to
- * mask off top part
- */
- ia64_set_pmd(i, val & ovfl_val);
-}
-
-static pfm_msg_t *
-pfm_get_new_msg(pfm_context_t *ctx)
-{
- int idx, next;
-
- next = (ctx->ctx_msgq_tail+1) % PFM_MAX_MSGS;
-
- DPRINT(("ctx_fd=%p head=%d tail=%d\n", ctx, ctx->ctx_msgq_head, ctx->ctx_msgq_tail));
- if (next == ctx->ctx_msgq_head) return NULL;
-
- idx = ctx->ctx_msgq_tail;
- ctx->ctx_msgq_tail = next;
-
- DPRINT(("ctx=%p head=%d tail=%d msg=%d\n", ctx, ctx->ctx_msgq_head, ctx->ctx_msgq_tail, idx));
-
- return ctx->ctx_msgq+idx;
-}
-
-static pfm_msg_t *
-pfm_get_next_msg(pfm_context_t *ctx)
-{
- pfm_msg_t *msg;
-
- DPRINT(("ctx=%p head=%d tail=%d\n", ctx, ctx->ctx_msgq_head, ctx->ctx_msgq_tail));
-
- if (PFM_CTXQ_EMPTY(ctx)) return NULL;
-
- /*
- * get oldest message
- */
- msg = ctx->ctx_msgq+ctx->ctx_msgq_head;
-
- /*
- * and move forward
- */
- ctx->ctx_msgq_head = (ctx->ctx_msgq_head+1) % PFM_MAX_MSGS;
-
- DPRINT(("ctx=%p head=%d tail=%d type=%d\n", ctx, ctx->ctx_msgq_head, ctx->ctx_msgq_tail, msg->pfm_gen_msg.msg_type));
-
- return msg;
-}
-
-static void
-pfm_reset_msgq(pfm_context_t *ctx)
-{
- ctx->ctx_msgq_head = ctx->ctx_msgq_tail = 0;
- DPRINT(("ctx=%p msgq reset\n", ctx));
-}
-
-static pfm_context_t *
-pfm_context_alloc(int ctx_flags)
-{
- pfm_context_t *ctx;
-
- /*
- * allocate context descriptor
- * must be able to free with interrupts disabled
- */
- ctx = kzalloc(sizeof(pfm_context_t), GFP_KERNEL);
- if (ctx) {
- DPRINT(("alloc ctx @%p\n", ctx));
-
- /*
- * init context protection lock
- */
- spin_lock_init(&ctx->ctx_lock);
-
- /*
- * context is unloaded
- */
- ctx->ctx_state = PFM_CTX_UNLOADED;
-
- /*
- * initialization of context's flags
- */
- ctx->ctx_fl_block = (ctx_flags & PFM_FL_NOTIFY_BLOCK) ? 1 : 0;
- ctx->ctx_fl_system = (ctx_flags & PFM_FL_SYSTEM_WIDE) ? 1: 0;
- ctx->ctx_fl_no_msg = (ctx_flags & PFM_FL_OVFL_NO_MSG) ? 1: 0;
- /*
- * will move to set properties
- * ctx->ctx_fl_excl_idle = (ctx_flags & PFM_FL_EXCL_IDLE) ? 1: 0;
- */
-
- /*
- * init restart semaphore to locked
- */
- init_completion(&ctx->ctx_restart_done);
-
- /*
- * activation is used in SMP only
- */
- ctx->ctx_last_activation = PFM_INVALID_ACTIVATION;
- SET_LAST_CPU(ctx, -1);
-
- /*
- * initialize notification message queue
- */
- ctx->ctx_msgq_head = ctx->ctx_msgq_tail = 0;
- init_waitqueue_head(&ctx->ctx_msgq_wait);
- init_waitqueue_head(&ctx->ctx_zombieq);
-
- }
- return ctx;
-}
-
-static void
-pfm_context_free(pfm_context_t *ctx)
-{
- if (ctx) {
- DPRINT(("free ctx @%p\n", ctx));
- kfree(ctx);
- }
-}
-
-static void
-pfm_mask_monitoring(struct task_struct *task)
-{
- pfm_context_t *ctx = PFM_GET_CTX(task);
- unsigned long mask, val, ovfl_mask;
- int i;
-
- DPRINT_ovfl(("masking monitoring for [%d]\n", task_pid_nr(task)));
-
- ovfl_mask = pmu_conf->ovfl_val;
- /*
- * monitoring can only be masked as a result of a valid
- * counter overflow. In UP, it means that the PMU still
- * has an owner. Note that the owner can be different
- * from the current task. However the PMU state belongs
- * to the owner.
- * In SMP, a valid overflow only happens when task is
- * current. Therefore if we come here, we know that
- * the PMU state belongs to the current task, therefore
- * we can access the live registers.
- *
- * So in both cases, the live register contains the owner's
- * state. We can ONLY touch the PMU registers and NOT the PSR.
- *
- * As a consequence to this call, the ctx->th_pmds[] array
- * contains stale information which must be ignored
- * when context is reloaded AND monitoring is active (see
- * pfm_restart).
- */
- mask = ctx->ctx_used_pmds[0];
- for (i = 0; mask; i++, mask>>=1) {
- /* skip non used pmds */
- if ((mask & 0x1) == 0) continue;
- val = ia64_get_pmd(i);
-
- if (PMD_IS_COUNTING(i)) {
- /*
- * we rebuild the full 64 bit value of the counter
- */
- ctx->ctx_pmds[i].val += (val & ovfl_mask);
- } else {
- ctx->ctx_pmds[i].val = val;
- }
- DPRINT_ovfl(("pmd[%d]=0x%lx hw_pmd=0x%lx\n",
- i,
- ctx->ctx_pmds[i].val,
- val & ovfl_mask));
- }
- /*
- * mask monitoring by setting the privilege level to 0
- * we cannot use psr.pp/psr.up for this, it is controlled by
- * the user
- *
- * if task is current, modify actual registers, otherwise modify
- * thread save state, i.e., what will be restored in pfm_load_regs()
- */
- mask = ctx->ctx_used_monitors[0] >> PMU_FIRST_COUNTER;
- for(i= PMU_FIRST_COUNTER; mask; i++, mask>>=1) {
- if ((mask & 0x1) == 0UL) continue;
- ia64_set_pmc(i, ctx->th_pmcs[i] & ~0xfUL);
- ctx->th_pmcs[i] &= ~0xfUL;
- DPRINT_ovfl(("pmc[%d]=0x%lx\n", i, ctx->th_pmcs[i]));
- }
- /*
- * make all of this visible
- */
- ia64_srlz_d();
-}
-
-/*
- * must always be done with task == current
- *
- * context must be in MASKED state when calling
- */
-static void
-pfm_restore_monitoring(struct task_struct *task)
-{
- pfm_context_t *ctx = PFM_GET_CTX(task);
- unsigned long mask, ovfl_mask;
- unsigned long psr, val;
- int i, is_system;
-
- is_system = ctx->ctx_fl_system;
- ovfl_mask = pmu_conf->ovfl_val;
-
- if (task != current) {
- printk(KERN_ERR "perfmon.%d: invalid task[%d] current[%d]\n", __LINE__, task_pid_nr(task), task_pid_nr(current));
- return;
- }
- if (ctx->ctx_state != PFM_CTX_MASKED) {
- printk(KERN_ERR "perfmon.%d: task[%d] current[%d] invalid state=%d\n", __LINE__,
- task_pid_nr(task), task_pid_nr(current), ctx->ctx_state);
- return;
- }
- psr = pfm_get_psr();
- /*
- * monitoring is masked via the PMC.
- * As we restore their value, we do not want each counter to
- * restart right away. We stop monitoring using the PSR,
- * restore the PMC (and PMD) and then re-establish the psr
- * as it was. Note that there can be no pending overflow at
- * this point, because monitoring was MASKED.
- *
- * system-wide session are pinned and self-monitoring
- */
- if (is_system && (PFM_CPUINFO_GET() & PFM_CPUINFO_DCR_PP)) {
- /* disable dcr pp */
- ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) & ~IA64_DCR_PP);
- pfm_clear_psr_pp();
- } else {
- pfm_clear_psr_up();
- }
- /*
- * first, we restore the PMD
- */
- mask = ctx->ctx_used_pmds[0];
- for (i = 0; mask; i++, mask>>=1) {
- /* skip non used pmds */
- if ((mask & 0x1) == 0) continue;
-
- if (PMD_IS_COUNTING(i)) {
- /*
- * we split the 64bit value according to
- * counter width
- */
- val = ctx->ctx_pmds[i].val & ovfl_mask;
- ctx->ctx_pmds[i].val &= ~ovfl_mask;
- } else {
- val = ctx->ctx_pmds[i].val;
- }
- ia64_set_pmd(i, val);
-
- DPRINT(("pmd[%d]=0x%lx hw_pmd=0x%lx\n",
- i,
- ctx->ctx_pmds[i].val,
- val));
- }
- /*
- * restore the PMCs
- */
- mask = ctx->ctx_used_monitors[0] >> PMU_FIRST_COUNTER;
- for(i= PMU_FIRST_COUNTER; mask; i++, mask>>=1) {
- if ((mask & 0x1) == 0UL) continue;
- ctx->th_pmcs[i] = ctx->ctx_pmcs[i];
- ia64_set_pmc(i, ctx->th_pmcs[i]);
- DPRINT(("[%d] pmc[%d]=0x%lx\n",
- task_pid_nr(task), i, ctx->th_pmcs[i]));
- }
- ia64_srlz_d();
-
- /*
- * must restore DBR/IBR because could be modified while masked
- * XXX: need to optimize
- */
- if (ctx->ctx_fl_using_dbreg) {
- pfm_restore_ibrs(ctx->ctx_ibrs, pmu_conf->num_ibrs);
- pfm_restore_dbrs(ctx->ctx_dbrs, pmu_conf->num_dbrs);
- }
-
- /*
- * now restore PSR
- */
- if (is_system && (PFM_CPUINFO_GET() & PFM_CPUINFO_DCR_PP)) {
- /* enable dcr pp */
- ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) | IA64_DCR_PP);
- ia64_srlz_i();
- }
- pfm_set_psr_l(psr);
-}
-
-static inline void
-pfm_save_pmds(unsigned long *pmds, unsigned long mask)
-{
- int i;
-
- ia64_srlz_d();
-
- for (i=0; mask; i++, mask>>=1) {
- if (mask & 0x1) pmds[i] = ia64_get_pmd(i);
- }
-}
-
-/*
- * reload from thread state (used for ctxw only)
- */
-static inline void
-pfm_restore_pmds(unsigned long *pmds, unsigned long mask)
-{
- int i;
- unsigned long val, ovfl_val = pmu_conf->ovfl_val;
-
- for (i=0; mask; i++, mask>>=1) {
- if ((mask & 0x1) == 0) continue;
- val = PMD_IS_COUNTING(i) ? pmds[i] & ovfl_val : pmds[i];
- ia64_set_pmd(i, val);
- }
- ia64_srlz_d();
-}
-
-/*
- * propagate PMD from context to thread-state
- */
-static inline void
-pfm_copy_pmds(struct task_struct *task, pfm_context_t *ctx)
-{
- unsigned long ovfl_val = pmu_conf->ovfl_val;
- unsigned long mask = ctx->ctx_all_pmds[0];
- unsigned long val;
- int i;
-
- DPRINT(("mask=0x%lx\n", mask));
-
- for (i=0; mask; i++, mask>>=1) {
-
- val = ctx->ctx_pmds[i].val;
-
- /*
- * We break up the 64 bit value into 2 pieces
- * the lower bits go to the machine state in the
- * thread (will be reloaded on ctxsw in).
- * The upper part stays in the soft-counter.
- */
- if (PMD_IS_COUNTING(i)) {
- ctx->ctx_pmds[i].val = val & ~ovfl_val;
- val &= ovfl_val;
- }
- ctx->th_pmds[i] = val;
-
- DPRINT(("pmd[%d]=0x%lx soft_val=0x%lx\n",
- i,
- ctx->th_pmds[i],
- ctx->ctx_pmds[i].val));
- }
-}
-
-/*
- * propagate PMC from context to thread-state
- */
-static inline void
-pfm_copy_pmcs(struct task_struct *task, pfm_context_t *ctx)
-{
- unsigned long mask = ctx->ctx_all_pmcs[0];
- int i;
-
- DPRINT(("mask=0x%lx\n", mask));
-
- for (i=0; mask; i++, mask>>=1) {
- /* masking 0 with ovfl_val yields 0 */
- ctx->th_pmcs[i] = ctx->ctx_pmcs[i];
- DPRINT(("pmc[%d]=0x%lx\n", i, ctx->th_pmcs[i]));
- }
-}
-
-
-
-static inline void
-pfm_restore_pmcs(unsigned long *pmcs, unsigned long mask)
-{
- int i;
-
- for (i=0; mask; i++, mask>>=1) {
- if ((mask & 0x1) == 0) continue;
- ia64_set_pmc(i, pmcs[i]);
- }
- ia64_srlz_d();
-}
-
-static inline int
-pfm_uuid_cmp(pfm_uuid_t a, pfm_uuid_t b)
-{
- return memcmp(a, b, sizeof(pfm_uuid_t));
-}
-
-static inline int
-pfm_buf_fmt_exit(pfm_buffer_fmt_t *fmt, struct task_struct *task, void *buf, struct pt_regs *regs)
-{
- int ret = 0;
- if (fmt->fmt_exit) ret = (*fmt->fmt_exit)(task, buf, regs);
- return ret;
-}
-
-static inline int
-pfm_buf_fmt_getsize(pfm_buffer_fmt_t *fmt, struct task_struct *task, unsigned int flags, int cpu, void *arg, unsigned long *size)
-{
- int ret = 0;
- if (fmt->fmt_getsize) ret = (*fmt->fmt_getsize)(task, flags, cpu, arg, size);
- return ret;
-}
-
-
-static inline int
-pfm_buf_fmt_validate(pfm_buffer_fmt_t *fmt, struct task_struct *task, unsigned int flags,
- int cpu, void *arg)
-{
- int ret = 0;
- if (fmt->fmt_validate) ret = (*fmt->fmt_validate)(task, flags, cpu, arg);
- return ret;
-}
-
-static inline int
-pfm_buf_fmt_init(pfm_buffer_fmt_t *fmt, struct task_struct *task, void *buf, unsigned int flags,
- int cpu, void *arg)
-{
- int ret = 0;
- if (fmt->fmt_init) ret = (*fmt->fmt_init)(task, buf, flags, cpu, arg);
- return ret;
-}
-
-static inline int
-pfm_buf_fmt_restart(pfm_buffer_fmt_t *fmt, struct task_struct *task, pfm_ovfl_ctrl_t *ctrl, void *buf, struct pt_regs *regs)
-{
- int ret = 0;
- if (fmt->fmt_restart) ret = (*fmt->fmt_restart)(task, ctrl, buf, regs);
- return ret;
-}
-
-static inline int
-pfm_buf_fmt_restart_active(pfm_buffer_fmt_t *fmt, struct task_struct *task, pfm_ovfl_ctrl_t *ctrl, void *buf, struct pt_regs *regs)
-{
- int ret = 0;
- if (fmt->fmt_restart_active) ret = (*fmt->fmt_restart_active)(task, ctrl, buf, regs);
- return ret;
-}
-
-static pfm_buffer_fmt_t *
-__pfm_find_buffer_fmt(pfm_uuid_t uuid)
-{
- struct list_head * pos;
- pfm_buffer_fmt_t * entry;
-
- list_for_each(pos, &pfm_buffer_fmt_list) {
- entry = list_entry(pos, pfm_buffer_fmt_t, fmt_list);
- if (pfm_uuid_cmp(uuid, entry->fmt_uuid) == 0)
- return entry;
- }
- return NULL;
-}
-
-/*
- * find a buffer format based on its uuid
- */
-static pfm_buffer_fmt_t *
-pfm_find_buffer_fmt(pfm_uuid_t uuid)
-{
- pfm_buffer_fmt_t * fmt;
- spin_lock(&pfm_buffer_fmt_lock);
- fmt = __pfm_find_buffer_fmt(uuid);
- spin_unlock(&pfm_buffer_fmt_lock);
- return fmt;
-}
-
-int
-pfm_register_buffer_fmt(pfm_buffer_fmt_t *fmt)
-{
- int ret = 0;
-
- /* some sanity checks */
- if (fmt == NULL || fmt->fmt_name == NULL) return -EINVAL;
-
- /* we need at least a handler */
- if (fmt->fmt_handler == NULL) return -EINVAL;
-
- /*
- * XXX: need check validity of fmt_arg_size
- */
-
- spin_lock(&pfm_buffer_fmt_lock);
-
- if (__pfm_find_buffer_fmt(fmt->fmt_uuid)) {
- printk(KERN_ERR "perfmon: duplicate sampling format: %s\n", fmt->fmt_name);
- ret = -EBUSY;
- goto out;
- }
- list_add(&fmt->fmt_list, &pfm_buffer_fmt_list);
- printk(KERN_INFO "perfmon: added sampling format %s\n", fmt->fmt_name);
-
-out:
- spin_unlock(&pfm_buffer_fmt_lock);
- return ret;
-}
-EXPORT_SYMBOL(pfm_register_buffer_fmt);
-
-int
-pfm_unregister_buffer_fmt(pfm_uuid_t uuid)
-{
- pfm_buffer_fmt_t *fmt;
- int ret = 0;
-
- spin_lock(&pfm_buffer_fmt_lock);
-
- fmt = __pfm_find_buffer_fmt(uuid);
- if (!fmt) {
- printk(KERN_ERR "perfmon: cannot unregister format, not found\n");
- ret = -EINVAL;
- goto out;
- }
- list_del_init(&fmt->fmt_list);
- printk(KERN_INFO "perfmon: removed sampling format: %s\n", fmt->fmt_name);
-
-out:
- spin_unlock(&pfm_buffer_fmt_lock);
- return ret;
-
-}
-EXPORT_SYMBOL(pfm_unregister_buffer_fmt);
-
-static int
-pfm_reserve_session(struct task_struct *task, int is_syswide, unsigned int cpu)
-{
- unsigned long flags;
- /*
- * validity checks on cpu_mask have been done upstream
- */
- LOCK_PFS(flags);
-
- DPRINT(("in sys_sessions=%u task_sessions=%u dbregs=%u syswide=%d cpu=%u\n",
- pfm_sessions.pfs_sys_sessions,
- pfm_sessions.pfs_task_sessions,
- pfm_sessions.pfs_sys_use_dbregs,
- is_syswide,
- cpu));
-
- if (is_syswide) {
- /*
- * cannot mix system wide and per-task sessions
- */
- if (pfm_sessions.pfs_task_sessions > 0UL) {
- DPRINT(("system wide not possible, %u conflicting task_sessions\n",
- pfm_sessions.pfs_task_sessions));
- goto abort;
- }
-
- if (pfm_sessions.pfs_sys_session[cpu]) goto error_conflict;
-
- DPRINT(("reserving system wide session on CPU%u currently on CPU%u\n", cpu, smp_processor_id()));
-
- pfm_sessions.pfs_sys_session[cpu] = task;
-
- pfm_sessions.pfs_sys_sessions++ ;
-
- } else {
- if (pfm_sessions.pfs_sys_sessions) goto abort;
- pfm_sessions.pfs_task_sessions++;
- }
-
- DPRINT(("out sys_sessions=%u task_sessions=%u dbregs=%u syswide=%d cpu=%u\n",
- pfm_sessions.pfs_sys_sessions,
- pfm_sessions.pfs_task_sessions,
- pfm_sessions.pfs_sys_use_dbregs,
- is_syswide,
- cpu));
-
- /*
- * Force idle() into poll mode
- */
- cpu_idle_poll_ctrl(true);
-
- UNLOCK_PFS(flags);
-
- return 0;
-
-error_conflict:
- DPRINT(("system wide not possible, conflicting session [%d] on CPU%d\n",
- task_pid_nr(pfm_sessions.pfs_sys_session[cpu]),
- cpu));
-abort:
- UNLOCK_PFS(flags);
-
- return -EBUSY;
-
-}
-
-static int
-pfm_unreserve_session(pfm_context_t *ctx, int is_syswide, unsigned int cpu)
-{
- unsigned long flags;
- /*
- * validity checks on cpu_mask have been done upstream
- */
- LOCK_PFS(flags);
-
- DPRINT(("in sys_sessions=%u task_sessions=%u dbregs=%u syswide=%d cpu=%u\n",
- pfm_sessions.pfs_sys_sessions,
- pfm_sessions.pfs_task_sessions,
- pfm_sessions.pfs_sys_use_dbregs,
- is_syswide,
- cpu));
-
-
- if (is_syswide) {
- pfm_sessions.pfs_sys_session[cpu] = NULL;
- /*
- * would not work with perfmon+more than one bit in cpu_mask
- */
- if (ctx && ctx->ctx_fl_using_dbreg) {
- if (pfm_sessions.pfs_sys_use_dbregs == 0) {
- printk(KERN_ERR "perfmon: invalid release for ctx %p sys_use_dbregs=0\n", ctx);
- } else {
- pfm_sessions.pfs_sys_use_dbregs--;
- }
- }
- pfm_sessions.pfs_sys_sessions--;
- } else {
- pfm_sessions.pfs_task_sessions--;
- }
- DPRINT(("out sys_sessions=%u task_sessions=%u dbregs=%u syswide=%d cpu=%u\n",
- pfm_sessions.pfs_sys_sessions,
- pfm_sessions.pfs_task_sessions,
- pfm_sessions.pfs_sys_use_dbregs,
- is_syswide,
- cpu));
-
- /* Undo forced polling. Last session reenables pal_halt */
- cpu_idle_poll_ctrl(false);
-
- UNLOCK_PFS(flags);
-
- return 0;
-}
-
-/*
- * removes virtual mapping of the sampling buffer.
- * IMPORTANT: cannot be called with interrupts disable, e.g. inside
- * a PROTECT_CTX() section.
- */
-static int
-pfm_remove_smpl_mapping(void *vaddr, unsigned long size)
-{
- struct task_struct *task = current;
- int r;
-
- /* sanity checks */
- if (task->mm == NULL || size == 0UL || vaddr == NULL) {
- printk(KERN_ERR "perfmon: pfm_remove_smpl_mapping [%d] invalid context mm=%p\n", task_pid_nr(task), task->mm);
- return -EINVAL;
- }
-
- DPRINT(("smpl_vaddr=%p size=%lu\n", vaddr, size));
-
- /*
- * does the actual unmapping
- */
- r = vm_munmap((unsigned long)vaddr, size);
-
- if (r !=0) {
- printk(KERN_ERR "perfmon: [%d] unable to unmap sampling buffer @%p size=%lu\n", task_pid_nr(task), vaddr, size);
- }
-
- DPRINT(("do_unmap(%p, %lu)=%d\n", vaddr, size, r));
-
- return 0;
-}
-
-/*
- * free actual physical storage used by sampling buffer
- */
-#if 0
-static int
-pfm_free_smpl_buffer(pfm_context_t *ctx)
-{
- pfm_buffer_fmt_t *fmt;
-
- if (ctx->ctx_smpl_hdr == NULL) goto invalid_free;
-
- /*
- * we won't use the buffer format anymore
- */
- fmt = ctx->ctx_buf_fmt;
-
- DPRINT(("sampling buffer @%p size %lu vaddr=%p\n",
- ctx->ctx_smpl_hdr,
- ctx->ctx_smpl_size,
- ctx->ctx_smpl_vaddr));
-
- pfm_buf_fmt_exit(fmt, current, NULL, NULL);
-
- /*
- * free the buffer
- */
- vfree(ctx->ctx_smpl_hdr);
-
- ctx->ctx_smpl_hdr = NULL;
- ctx->ctx_smpl_size = 0UL;
-
- return 0;
-
-invalid_free:
- printk(KERN_ERR "perfmon: pfm_free_smpl_buffer [%d] no buffer\n", task_pid_nr(current));
- return -EINVAL;
-}
-#endif
-
-static inline void
-pfm_exit_smpl_buffer(pfm_buffer_fmt_t *fmt)
-{
- if (fmt == NULL) return;
-
- pfm_buf_fmt_exit(fmt, current, NULL, NULL);
-
-}
-
-/*
- * pfmfs should _never_ be mounted by userland - too much of security hassle,
- * no real gain from having the whole whorehouse mounted. So we don't need
- * any operations on the root directory. However, we need a non-trivial
- * d_name - pfm: will go nicely and kill the special-casing in procfs.
- */
-static struct vfsmount *pfmfs_mnt __read_mostly;
-
-static int __init
-init_pfm_fs(void)
-{
- int err = register_filesystem(&pfm_fs_type);
- if (!err) {
- pfmfs_mnt = kern_mount(&pfm_fs_type);
- err = PTR_ERR(pfmfs_mnt);
- if (IS_ERR(pfmfs_mnt))
- unregister_filesystem(&pfm_fs_type);
- else
- err = 0;
- }
- return err;
-}
-
-static ssize_t
-pfm_read(struct file *filp, char __user *buf, size_t size, loff_t *ppos)
-{
- pfm_context_t *ctx;
- pfm_msg_t *msg;
- ssize_t ret;
- unsigned long flags;
- DECLARE_WAITQUEUE(wait, current);
- if (PFM_IS_FILE(filp) == 0) {
- printk(KERN_ERR "perfmon: pfm_poll: bad magic [%d]\n", task_pid_nr(current));
- return -EINVAL;
- }
-
- ctx = filp->private_data;
- if (ctx == NULL) {
- printk(KERN_ERR "perfmon: pfm_read: NULL ctx [%d]\n", task_pid_nr(current));
- return -EINVAL;
- }
-
- /*
- * check even when there is no message
- */
- if (size < sizeof(pfm_msg_t)) {
- DPRINT(("message is too small ctx=%p (>=%ld)\n", ctx, sizeof(pfm_msg_t)));
- return -EINVAL;
- }
-
- PROTECT_CTX(ctx, flags);
-
- /*
- * put ourselves on the wait queue
- */
- add_wait_queue(&ctx->ctx_msgq_wait, &wait);
-
-
- for(;;) {
- /*
- * check wait queue
- */
-
- set_current_state(TASK_INTERRUPTIBLE);
-
- DPRINT(("head=%d tail=%d\n", ctx->ctx_msgq_head, ctx->ctx_msgq_tail));
-
- ret = 0;
- if(PFM_CTXQ_EMPTY(ctx) == 0) break;
-
- UNPROTECT_CTX(ctx, flags);
-
- /*
- * check non-blocking read
- */
- ret = -EAGAIN;
- if(filp->f_flags & O_NONBLOCK) break;
-
- /*
- * check pending signals
- */
- if(signal_pending(current)) {
- ret = -EINTR;
- break;
- }
- /*
- * no message, so wait
- */
- schedule();
-
- PROTECT_CTX(ctx, flags);
- }
- DPRINT(("[%d] back to running ret=%ld\n", task_pid_nr(current), ret));
- set_current_state(TASK_RUNNING);
- remove_wait_queue(&ctx->ctx_msgq_wait, &wait);
-
- if (ret < 0) goto abort;
-
- ret = -EINVAL;
- msg = pfm_get_next_msg(ctx);
- if (msg == NULL) {
- printk(KERN_ERR "perfmon: pfm_read no msg for ctx=%p [%d]\n", ctx, task_pid_nr(current));
- goto abort_locked;
- }
-
- DPRINT(("fd=%d type=%d\n", msg->pfm_gen_msg.msg_ctx_fd, msg->pfm_gen_msg.msg_type));
-
- ret = -EFAULT;
- if(copy_to_user(buf, msg, sizeof(pfm_msg_t)) == 0) ret = sizeof(pfm_msg_t);
-
-abort_locked:
- UNPROTECT_CTX(ctx, flags);
-abort:
- return ret;
-}
-
-static ssize_t
-pfm_write(struct file *file, const char __user *ubuf,
- size_t size, loff_t *ppos)
-{
- DPRINT(("pfm_write called\n"));
- return -EINVAL;
-}
-
-static __poll_t
-pfm_poll(struct file *filp, poll_table * wait)
-{
- pfm_context_t *ctx;
- unsigned long flags;
- __poll_t mask = 0;
-
- if (PFM_IS_FILE(filp) == 0) {
- printk(KERN_ERR "perfmon: pfm_poll: bad magic [%d]\n", task_pid_nr(current));
- return 0;
- }
-
- ctx = filp->private_data;
- if (ctx == NULL) {
- printk(KERN_ERR "perfmon: pfm_poll: NULL ctx [%d]\n", task_pid_nr(current));
- return 0;
- }
-
-
- DPRINT(("pfm_poll ctx_fd=%d before poll_wait\n", ctx->ctx_fd));
-
- poll_wait(filp, &ctx->ctx_msgq_wait, wait);
-
- PROTECT_CTX(ctx, flags);
-
- if (PFM_CTXQ_EMPTY(ctx) == 0)
- mask = EPOLLIN | EPOLLRDNORM;
-
- UNPROTECT_CTX(ctx, flags);
-
- DPRINT(("pfm_poll ctx_fd=%d mask=0x%x\n", ctx->ctx_fd, mask));
-
- return mask;
-}
-
-static long
-pfm_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-{
- DPRINT(("pfm_ioctl called\n"));
- return -EINVAL;
-}
-
-/*
- * interrupt cannot be masked when coming here
- */
-static inline int
-pfm_do_fasync(int fd, struct file *filp, pfm_context_t *ctx, int on)
-{
- int ret;
-
- ret = fasync_helper (fd, filp, on, &ctx->ctx_async_queue);
-
- DPRINT(("pfm_fasync called by [%d] on ctx_fd=%d on=%d async_queue=%p ret=%d\n",
- task_pid_nr(current),
- fd,
- on,
- ctx->ctx_async_queue, ret));
-
- return ret;
-}
-
-static int
-pfm_fasync(int fd, struct file *filp, int on)
-{
- pfm_context_t *ctx;
- int ret;
-
- if (PFM_IS_FILE(filp) == 0) {
- printk(KERN_ERR "perfmon: pfm_fasync bad magic [%d]\n", task_pid_nr(current));
- return -EBADF;
- }
-
- ctx = filp->private_data;
- if (ctx == NULL) {
- printk(KERN_ERR "perfmon: pfm_fasync NULL ctx [%d]\n", task_pid_nr(current));
- return -EBADF;
- }
- /*
- * we cannot mask interrupts during this call because this may
- * may go to sleep if memory is not readily avalaible.
- *
- * We are protected from the conetxt disappearing by the get_fd()/put_fd()
- * done in caller. Serialization of this function is ensured by caller.
- */
- ret = pfm_do_fasync(fd, filp, ctx, on);
-
-
- DPRINT(("pfm_fasync called on ctx_fd=%d on=%d async_queue=%p ret=%d\n",
- fd,
- on,
- ctx->ctx_async_queue, ret));
-
- return ret;
-}
-
-#ifdef CONFIG_SMP
-/*
- * this function is exclusively called from pfm_close().
- * The context is not protected at that time, nor are interrupts
- * on the remote CPU. That's necessary to avoid deadlocks.
- */
-static void
-pfm_syswide_force_stop(void *info)
-{
- pfm_context_t *ctx = (pfm_context_t *)info;
- struct pt_regs *regs = task_pt_regs(current);
- struct task_struct *owner;
- unsigned long flags;
- int ret;
-
- if (ctx->ctx_cpu != smp_processor_id()) {
- printk(KERN_ERR "perfmon: pfm_syswide_force_stop for CPU%d but on CPU%d\n",
- ctx->ctx_cpu,
- smp_processor_id());
- return;
- }
- owner = GET_PMU_OWNER();
- if (owner != ctx->ctx_task) {
- printk(KERN_ERR "perfmon: pfm_syswide_force_stop CPU%d unexpected owner [%d] instead of [%d]\n",
- smp_processor_id(),
- task_pid_nr(owner), task_pid_nr(ctx->ctx_task));
- return;
- }
- if (GET_PMU_CTX() != ctx) {
- printk(KERN_ERR "perfmon: pfm_syswide_force_stop CPU%d unexpected ctx %p instead of %p\n",
- smp_processor_id(),
- GET_PMU_CTX(), ctx);
- return;
- }
-
- DPRINT(("on CPU%d forcing system wide stop for [%d]\n", smp_processor_id(), task_pid_nr(ctx->ctx_task)));
- /*
- * the context is already protected in pfm_close(), we simply
- * need to mask interrupts to avoid a PMU interrupt race on
- * this CPU
- */
- local_irq_save(flags);
-
- ret = pfm_context_unload(ctx, NULL, 0, regs);
- if (ret) {
- DPRINT(("context_unload returned %d\n", ret));
- }
-
- /*
- * unmask interrupts, PMU interrupts are now spurious here
- */
- local_irq_restore(flags);
-}
-
-static void
-pfm_syswide_cleanup_other_cpu(pfm_context_t *ctx)
-{
- int ret;
-
- DPRINT(("calling CPU%d for cleanup\n", ctx->ctx_cpu));
- ret = smp_call_function_single(ctx->ctx_cpu, pfm_syswide_force_stop, ctx, 1);
- DPRINT(("called CPU%d for cleanup ret=%d\n", ctx->ctx_cpu, ret));
-}
-#endif /* CONFIG_SMP */
-
-/*
- * called for each close(). Partially free resources.
- * When caller is self-monitoring, the context is unloaded.
- */
-static int
-pfm_flush(struct file *filp, fl_owner_t id)
-{
- pfm_context_t *ctx;
- struct task_struct *task;
- struct pt_regs *regs;
- unsigned long flags;
- unsigned long smpl_buf_size = 0UL;
- void *smpl_buf_vaddr = NULL;
- int state, is_system;
-
- if (PFM_IS_FILE(filp) == 0) {
- DPRINT(("bad magic for\n"));
- return -EBADF;
- }
-
- ctx = filp->private_data;
- if (ctx == NULL) {
- printk(KERN_ERR "perfmon: pfm_flush: NULL ctx [%d]\n", task_pid_nr(current));
- return -EBADF;
- }
-
- /*
- * remove our file from the async queue, if we use this mode.
- * This can be done without the context being protected. We come
- * here when the context has become unreachable by other tasks.
- *
- * We may still have active monitoring at this point and we may
- * end up in pfm_overflow_handler(). However, fasync_helper()
- * operates with interrupts disabled and it cleans up the
- * queue. If the PMU handler is called prior to entering
- * fasync_helper() then it will send a signal. If it is
- * invoked after, it will find an empty queue and no
- * signal will be sent. In both case, we are safe
- */
- PROTECT_CTX(ctx, flags);
-
- state = ctx->ctx_state;
- is_system = ctx->ctx_fl_system;
-
- task = PFM_CTX_TASK(ctx);
- regs = task_pt_regs(task);
-
- DPRINT(("ctx_state=%d is_current=%d\n",
- state,
- task == current ? 1 : 0));
-
- /*
- * if state == UNLOADED, then task is NULL
- */
-
- /*
- * we must stop and unload because we are losing access to the context.
- */
- if (task == current) {
-#ifdef CONFIG_SMP
- /*
- * the task IS the owner but it migrated to another CPU: that's bad
- * but we must handle this cleanly. Unfortunately, the kernel does
- * not provide a mechanism to block migration (while the context is loaded).
- *
- * We need to release the resource on the ORIGINAL cpu.
- */
- if (is_system && ctx->ctx_cpu != smp_processor_id()) {
-
- DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
- /*
- * keep context protected but unmask interrupt for IPI
- */
- local_irq_restore(flags);
-
- pfm_syswide_cleanup_other_cpu(ctx);
-
- /*
- * restore interrupt masking
- */
- local_irq_save(flags);
-
- /*
- * context is unloaded at this point
- */
- } else
-#endif /* CONFIG_SMP */
- {
-
- DPRINT(("forcing unload\n"));
- /*
- * stop and unload, returning with state UNLOADED
- * and session unreserved.
- */
- pfm_context_unload(ctx, NULL, 0, regs);
-
- DPRINT(("ctx_state=%d\n", ctx->ctx_state));
- }
- }
-
- /*
- * remove virtual mapping, if any, for the calling task.
- * cannot reset ctx field until last user is calling close().
- *
- * ctx_smpl_vaddr must never be cleared because it is needed
- * by every task with access to the context
- *
- * When called from do_exit(), the mm context is gone already, therefore
- * mm is NULL, i.e., the VMA is already gone and we do not have to
- * do anything here
- */
- if (ctx->ctx_smpl_vaddr && current->mm) {
- smpl_buf_vaddr = ctx->ctx_smpl_vaddr;
- smpl_buf_size = ctx->ctx_smpl_size;
- }
-
- UNPROTECT_CTX(ctx, flags);
-
- /*
- * if there was a mapping, then we systematically remove it
- * at this point. Cannot be done inside critical section
- * because some VM function reenables interrupts.
- *
- */
- if (smpl_buf_vaddr) pfm_remove_smpl_mapping(smpl_buf_vaddr, smpl_buf_size);
-
- return 0;
-}
-/*
- * called either on explicit close() or from exit_files().
- * Only the LAST user of the file gets to this point, i.e., it is
- * called only ONCE.
- *
- * IMPORTANT: we get called ONLY when the refcnt on the file gets to zero
- * (fput()),i.e, last task to access the file. Nobody else can access the
- * file at this point.
- *
- * When called from exit_files(), the VMA has been freed because exit_mm()
- * is executed before exit_files().
- *
- * When called from exit_files(), the current task is not yet ZOMBIE but we
- * flush the PMU state to the context.
- */
-static int
-pfm_close(struct inode *inode, struct file *filp)
-{
- pfm_context_t *ctx;
- struct task_struct *task;
- struct pt_regs *regs;
- DECLARE_WAITQUEUE(wait, current);
- unsigned long flags;
- unsigned long smpl_buf_size = 0UL;
- void *smpl_buf_addr = NULL;
- int free_possible = 1;
- int state, is_system;
-
- DPRINT(("pfm_close called private=%p\n", filp->private_data));
-
- if (PFM_IS_FILE(filp) == 0) {
- DPRINT(("bad magic\n"));
- return -EBADF;
- }
-
- ctx = filp->private_data;
- if (ctx == NULL) {
- printk(KERN_ERR "perfmon: pfm_close: NULL ctx [%d]\n", task_pid_nr(current));
- return -EBADF;
- }
-
- PROTECT_CTX(ctx, flags);
-
- state = ctx->ctx_state;
- is_system = ctx->ctx_fl_system;
-
- task = PFM_CTX_TASK(ctx);
- regs = task_pt_regs(task);
-
- DPRINT(("ctx_state=%d is_current=%d\n",
- state,
- task == current ? 1 : 0));
-
- /*
- * if task == current, then pfm_flush() unloaded the context
- */
- if (state == PFM_CTX_UNLOADED) goto doit;
-
- /*
- * context is loaded/masked and task != current, we need to
- * either force an unload or go zombie
- */
-
- /*
- * The task is currently blocked or will block after an overflow.
- * we must force it to wakeup to get out of the
- * MASKED state and transition to the unloaded state by itself.
- *
- * This situation is only possible for per-task mode
- */
- if (state == PFM_CTX_MASKED && CTX_OVFL_NOBLOCK(ctx) == 0) {
-
- /*
- * set a "partial" zombie state to be checked
- * upon return from down() in pfm_handle_work().
- *
- * We cannot use the ZOMBIE state, because it is checked
- * by pfm_load_regs() which is called upon wakeup from down().
- * In such case, it would free the context and then we would
- * return to pfm_handle_work() which would access the
- * stale context. Instead, we set a flag invisible to pfm_load_regs()
- * but visible to pfm_handle_work().
- *
- * For some window of time, we have a zombie context with
- * ctx_state = MASKED and not ZOMBIE
- */
- ctx->ctx_fl_going_zombie = 1;
-
- /*
- * force task to wake up from MASKED state
- */
- complete(&ctx->ctx_restart_done);
-
- DPRINT(("waking up ctx_state=%d\n", state));
-
- /*
- * put ourself to sleep waiting for the other
- * task to report completion
- *
- * the context is protected by mutex, therefore there
- * is no risk of being notified of completion before
- * begin actually on the waitq.
- */
- set_current_state(TASK_INTERRUPTIBLE);
- add_wait_queue(&ctx->ctx_zombieq, &wait);
-
- UNPROTECT_CTX(ctx, flags);
-
- /*
- * XXX: check for signals :
- * - ok for explicit close
- * - not ok when coming from exit_files()
- */
- schedule();
-
-
- PROTECT_CTX(ctx, flags);
-
-
- remove_wait_queue(&ctx->ctx_zombieq, &wait);
- set_current_state(TASK_RUNNING);
-
- /*
- * context is unloaded at this point
- */
- DPRINT(("after zombie wakeup ctx_state=%d for\n", state));
- }
- else if (task != current) {
-#ifdef CONFIG_SMP
- /*
- * switch context to zombie state
- */
- ctx->ctx_state = PFM_CTX_ZOMBIE;
-
- DPRINT(("zombie ctx for [%d]\n", task_pid_nr(task)));
- /*
- * cannot free the context on the spot. deferred until
- * the task notices the ZOMBIE state
- */
- free_possible = 0;
-#else
- pfm_context_unload(ctx, NULL, 0, regs);
-#endif
- }
-
-doit:
- /* reload state, may have changed during opening of critical section */
- state = ctx->ctx_state;
-
- /*
- * the context is still attached to a task (possibly current)
- * we cannot destroy it right now
- */
-
- /*
- * we must free the sampling buffer right here because
- * we cannot rely on it being cleaned up later by the
- * monitored task. It is not possible to free vmalloc'ed
- * memory in pfm_load_regs(). Instead, we remove the buffer
- * now. should there be subsequent PMU overflow originally
- * meant for sampling, the will be converted to spurious
- * and that's fine because the monitoring tools is gone anyway.
- */
- if (ctx->ctx_smpl_hdr) {
- smpl_buf_addr = ctx->ctx_smpl_hdr;
- smpl_buf_size = ctx->ctx_smpl_size;
- /* no more sampling */
- ctx->ctx_smpl_hdr = NULL;
- ctx->ctx_fl_is_sampling = 0;
- }
-
- DPRINT(("ctx_state=%d free_possible=%d addr=%p size=%lu\n",
- state,
- free_possible,
- smpl_buf_addr,
- smpl_buf_size));
-
- if (smpl_buf_addr) pfm_exit_smpl_buffer(ctx->ctx_buf_fmt);
-
- /*
- * UNLOADED that the session has already been unreserved.
- */
- if (state == PFM_CTX_ZOMBIE) {
- pfm_unreserve_session(ctx, ctx->ctx_fl_system , ctx->ctx_cpu);
- }
-
- /*
- * disconnect file descriptor from context must be done
- * before we unlock.
- */
- filp->private_data = NULL;
-
- /*
- * if we free on the spot, the context is now completely unreachable
- * from the callers side. The monitored task side is also cut, so we
- * can freely cut.
- *
- * If we have a deferred free, only the caller side is disconnected.
- */
- UNPROTECT_CTX(ctx, flags);
-
- /*
- * All memory free operations (especially for vmalloc'ed memory)
- * MUST be done with interrupts ENABLED.
- */
- vfree(smpl_buf_addr);
-
- /*
- * return the memory used by the context
- */
- if (free_possible) pfm_context_free(ctx);
-
- return 0;
-}
-
-static const struct file_operations pfm_file_ops = {
- .llseek = no_llseek,
- .read = pfm_read,
- .write = pfm_write,
- .poll = pfm_poll,
- .unlocked_ioctl = pfm_ioctl,
- .fasync = pfm_fasync,
- .release = pfm_close,
- .flush = pfm_flush
-};
-
-static char *pfmfs_dname(struct dentry *dentry, char *buffer, int buflen)
-{
- return dynamic_dname(dentry, buffer, buflen, "pfm:[%lu]",
- d_inode(dentry)->i_ino);
-}
-
-static const struct dentry_operations pfmfs_dentry_operations = {
- .d_delete = always_delete_dentry,
- .d_dname = pfmfs_dname,
-};
-
-
-static struct file *
-pfm_alloc_file(pfm_context_t *ctx)
-{
- struct file *file;
- struct inode *inode;
- struct path path;
- struct qstr this = { .name = "" };
-
- /*
- * allocate a new inode
- */
- inode = new_inode(pfmfs_mnt->mnt_sb);
- if (!inode)
- return ERR_PTR(-ENOMEM);
-
- DPRINT(("new inode ino=%ld @%p\n", inode->i_ino, inode));
-
- inode->i_mode = S_IFCHR|S_IRUGO;
- inode->i_uid = current_fsuid();
- inode->i_gid = current_fsgid();
-
- /*
- * allocate a new dcache entry
- */
- path.dentry = d_alloc(pfmfs_mnt->mnt_root, &this);
- if (!path.dentry) {
- iput(inode);
- return ERR_PTR(-ENOMEM);
- }
- path.mnt = mntget(pfmfs_mnt);
-
- d_add(path.dentry, inode);
-
- file = alloc_file(&path, FMODE_READ, &pfm_file_ops);
- if (IS_ERR(file)) {
- path_put(&path);
- return file;
- }
-
- file->f_flags = O_RDONLY;
- file->private_data = ctx;
-
- return file;
-}
-
-static int
-pfm_remap_buffer(struct vm_area_struct *vma, unsigned long buf, unsigned long addr, unsigned long size)
-{
- DPRINT(("CPU%d buf=0x%lx addr=0x%lx size=%ld\n", smp_processor_id(), buf, addr, size));
-
- while (size > 0) {
- unsigned long pfn = ia64_tpa(buf) >> PAGE_SHIFT;
-
-
- if (remap_pfn_range(vma, addr, pfn, PAGE_SIZE, PAGE_READONLY))
- return -ENOMEM;
-
- addr += PAGE_SIZE;
- buf += PAGE_SIZE;
- size -= PAGE_SIZE;
- }
- return 0;
-}
-
-/*
- * allocate a sampling buffer and remaps it into the user address space of the task
- */
-static int
-pfm_smpl_buffer_alloc(struct task_struct *task, struct file *filp, pfm_context_t *ctx, unsigned long rsize, void **user_vaddr)
-{
- struct mm_struct *mm = task->mm;
- struct vm_area_struct *vma = NULL;
- unsigned long size;
- void *smpl_buf;
-
-
- /*
- * the fixed header + requested size and align to page boundary
- */
- size = PAGE_ALIGN(rsize);
-
- DPRINT(("sampling buffer rsize=%lu size=%lu bytes\n", rsize, size));
-
- /*
- * check requested size to avoid Denial-of-service attacks
- * XXX: may have to refine this test
- * Check against address space limit.
- *
- * if ((mm->total_vm << PAGE_SHIFT) + len> task->rlim[RLIMIT_AS].rlim_cur)
- * return -ENOMEM;
- */
- if (size > task_rlimit(task, RLIMIT_MEMLOCK))
- return -ENOMEM;
-
- /*
- * We do the easy to undo allocations first.
- */
- smpl_buf = vzalloc(size);
- if (smpl_buf == NULL) {
- DPRINT(("Can't allocate sampling buffer\n"));
- return -ENOMEM;
- }
-
- DPRINT(("smpl_buf @%p\n", smpl_buf));
-
- /* allocate vma */
- vma = vm_area_alloc(mm);
- if (!vma) {
- DPRINT(("Cannot allocate vma\n"));
- goto error_kmem;
- }
-
- /*
- * partially initialize the vma for the sampling buffer
- */
- vma->vm_file = get_file(filp);
- vma->vm_flags = VM_READ|VM_MAYREAD|VM_DONTEXPAND|VM_DONTDUMP;
- vma->vm_page_prot = PAGE_READONLY; /* XXX may need to change */
-
- /*
- * Now we have everything we need and we can initialize
- * and connect all the data structures
- */
-
- ctx->ctx_smpl_hdr = smpl_buf;
- ctx->ctx_smpl_size = size; /* aligned size */
-
- /*
- * Let's do the difficult operations next.
- *
- * now we atomically find some area in the address space and
- * remap the buffer in it.
- */
- mmap_write_lock(task->mm);
-
- /* find some free area in address space, must have mmap sem held */
- vma->vm_start = get_unmapped_area(NULL, 0, size, 0, MAP_PRIVATE|MAP_ANONYMOUS);
- if (IS_ERR_VALUE(vma->vm_start)) {
- DPRINT(("Cannot find unmapped area for size %ld\n", size));
- mmap_write_unlock(task->mm);
- goto error;
- }
- vma->vm_end = vma->vm_start + size;
- vma->vm_pgoff = vma->vm_start >> PAGE_SHIFT;
-
- DPRINT(("aligned size=%ld, hdr=%p mapped @0x%lx\n", size, ctx->ctx_smpl_hdr, vma->vm_start));
-
- /* can only be applied to current task, need to have the mm semaphore held when called */
- if (pfm_remap_buffer(vma, (unsigned long)smpl_buf, vma->vm_start, size)) {
- DPRINT(("Can't remap buffer\n"));
- mmap_write_unlock(task->mm);
- goto error;
- }
-
- /*
- * now insert the vma in the vm list for the process, must be
- * done with mmap lock held
- */
- insert_vm_struct(mm, vma);
-
- vm_stat_account(vma->vm_mm, vma->vm_flags, vma_pages(vma));
- mmap_write_unlock(task->mm);
-
- /*
- * keep track of user level virtual address
- */
- ctx->ctx_smpl_vaddr = (void *)vma->vm_start;
- *(unsigned long *)user_vaddr = vma->vm_start;
-
- return 0;
-
-error:
- vm_area_free(vma);
-error_kmem:
- vfree(smpl_buf);
-
- return -ENOMEM;
-}
-
-/*
- * XXX: do something better here
- */
-static int
-pfm_bad_permissions(struct task_struct *task)
-{
- const struct cred *tcred;
- kuid_t uid = current_uid();
- kgid_t gid = current_gid();
- int ret;
-
- rcu_read_lock();
- tcred = __task_cred(task);
-
- /* inspired by ptrace_attach() */
- DPRINT(("cur: uid=%d gid=%d task: euid=%d suid=%d uid=%d egid=%d sgid=%d\n",
- from_kuid(&init_user_ns, uid),
- from_kgid(&init_user_ns, gid),
- from_kuid(&init_user_ns, tcred->euid),
- from_kuid(&init_user_ns, tcred->suid),
- from_kuid(&init_user_ns, tcred->uid),
- from_kgid(&init_user_ns, tcred->egid),
- from_kgid(&init_user_ns, tcred->sgid)));
-
- ret = ((!uid_eq(uid, tcred->euid))
- || (!uid_eq(uid, tcred->suid))
- || (!uid_eq(uid, tcred->uid))
- || (!gid_eq(gid, tcred->egid))
- || (!gid_eq(gid, tcred->sgid))
- || (!gid_eq(gid, tcred->gid))) && !capable(CAP_SYS_PTRACE);
-
- rcu_read_unlock();
- return ret;
-}
-
-static int
-pfarg_is_sane(struct task_struct *task, pfarg_context_t *pfx)
-{
- int ctx_flags;
-
- /* valid signal */
-
- ctx_flags = pfx->ctx_flags;
-
- if (ctx_flags & PFM_FL_SYSTEM_WIDE) {
-
- /*
- * cannot block in this mode
- */
- if (ctx_flags & PFM_FL_NOTIFY_BLOCK) {
- DPRINT(("cannot use blocking mode when in system wide monitoring\n"));
- return -EINVAL;
- }
- } else {
- }
- /* probably more to add here */
-
- return 0;
-}
-
-static int
-pfm_setup_buffer_fmt(struct task_struct *task, struct file *filp, pfm_context_t *ctx, unsigned int ctx_flags,
- unsigned int cpu, pfarg_context_t *arg)
-{
- pfm_buffer_fmt_t *fmt = NULL;
- unsigned long size = 0UL;
- void *uaddr = NULL;
- void *fmt_arg = NULL;
- int ret = 0;
-#define PFM_CTXARG_BUF_ARG(a) (pfm_buffer_fmt_t *)(a+1)
-
- /* invoke and lock buffer format, if found */
- fmt = pfm_find_buffer_fmt(arg->ctx_smpl_buf_id);
- if (fmt == NULL) {
- DPRINT(("[%d] cannot find buffer format\n", task_pid_nr(task)));
- return -EINVAL;
- }
-
- /*
- * buffer argument MUST be contiguous to pfarg_context_t
- */
- if (fmt->fmt_arg_size) fmt_arg = PFM_CTXARG_BUF_ARG(arg);
-
- ret = pfm_buf_fmt_validate(fmt, task, ctx_flags, cpu, fmt_arg);
-
- DPRINT(("[%d] after validate(0x%x,%d,%p)=%d\n", task_pid_nr(task), ctx_flags, cpu, fmt_arg, ret));
-
- if (ret) goto error;
-
- /* link buffer format and context */
- ctx->ctx_buf_fmt = fmt;
- ctx->ctx_fl_is_sampling = 1; /* assume record() is defined */
-
- /*
- * check if buffer format wants to use perfmon buffer allocation/mapping service
- */
- ret = pfm_buf_fmt_getsize(fmt, task, ctx_flags, cpu, fmt_arg, &size);
- if (ret) goto error;
-
- if (size) {
- /*
- * buffer is always remapped into the caller's address space
- */
- ret = pfm_smpl_buffer_alloc(current, filp, ctx, size, &uaddr);
- if (ret) goto error;
-
- /* keep track of user address of buffer */
- arg->ctx_smpl_vaddr = uaddr;
- }
- ret = pfm_buf_fmt_init(fmt, task, ctx->ctx_smpl_hdr, ctx_flags, cpu, fmt_arg);
-
-error:
- return ret;
-}
-
-static void
-pfm_reset_pmu_state(pfm_context_t *ctx)
-{
- int i;
-
- /*
- * install reset values for PMC.
- */
- for (i=1; PMC_IS_LAST(i) == 0; i++) {
- if (PMC_IS_IMPL(i) == 0) continue;
- ctx->ctx_pmcs[i] = PMC_DFL_VAL(i);
- DPRINT(("pmc[%d]=0x%lx\n", i, ctx->ctx_pmcs[i]));
- }
- /*
- * PMD registers are set to 0UL when the context in memset()
- */
-
- /*
- * On context switched restore, we must restore ALL pmc and ALL pmd even
- * when they are not actively used by the task. In UP, the incoming process
- * may otherwise pick up left over PMC, PMD state from the previous process.
- * As opposed to PMD, stale PMC can cause harm to the incoming
- * process because they may change what is being measured.
- * Therefore, we must systematically reinstall the entire
- * PMC state. In SMP, the same thing is possible on the
- * same CPU but also on between 2 CPUs.
- *
- * The problem with PMD is information leaking especially
- * to user level when psr.sp=0
- *
- * There is unfortunately no easy way to avoid this problem
- * on either UP or SMP. This definitively slows down the
- * pfm_load_regs() function.
- */
-
- /*
- * bitmask of all PMCs accessible to this context
- *
- * PMC0 is treated differently.
- */
- ctx->ctx_all_pmcs[0] = pmu_conf->impl_pmcs[0] & ~0x1;
-
- /*
- * bitmask of all PMDs that are accessible to this context
- */
- ctx->ctx_all_pmds[0] = pmu_conf->impl_pmds[0];
-
- DPRINT(("<%d> all_pmcs=0x%lx all_pmds=0x%lx\n", ctx->ctx_fd, ctx->ctx_all_pmcs[0],ctx->ctx_all_pmds[0]));
-
- /*
- * useful in case of re-enable after disable
- */
- ctx->ctx_used_ibrs[0] = 0UL;
- ctx->ctx_used_dbrs[0] = 0UL;
-}
-
-static int
-pfm_ctx_getsize(void *arg, size_t *sz)
-{
- pfarg_context_t *req = (pfarg_context_t *)arg;
- pfm_buffer_fmt_t *fmt;
-
- *sz = 0;
-
- if (!pfm_uuid_cmp(req->ctx_smpl_buf_id, pfm_null_uuid)) return 0;
-
- fmt = pfm_find_buffer_fmt(req->ctx_smpl_buf_id);
- if (fmt == NULL) {
- DPRINT(("cannot find buffer format\n"));
- return -EINVAL;
- }
- /* get just enough to copy in user parameters */
- *sz = fmt->fmt_arg_size;
- DPRINT(("arg_size=%lu\n", *sz));
-
- return 0;
-}
-
-
-
-/*
- * cannot attach if :
- * - kernel task
- * - task not owned by caller
- * - task incompatible with context mode
- */
-static int
-pfm_task_incompatible(pfm_context_t *ctx, struct task_struct *task)
-{
- /*
- * no kernel task or task not owner by caller
- */
- if (task->mm == NULL) {
- DPRINT(("task [%d] has not memory context (kernel thread)\n", task_pid_nr(task)));
- return -EPERM;
- }
- if (pfm_bad_permissions(task)) {
- DPRINT(("no permission to attach to [%d]\n", task_pid_nr(task)));
- return -EPERM;
- }
- /*
- * cannot block in self-monitoring mode
- */
- if (CTX_OVFL_NOBLOCK(ctx) == 0 && task == current) {
- DPRINT(("cannot load a blocking context on self for [%d]\n", task_pid_nr(task)));
- return -EINVAL;
- }
-
- if (task->exit_state == EXIT_ZOMBIE) {
- DPRINT(("cannot attach to zombie task [%d]\n", task_pid_nr(task)));
- return -EBUSY;
- }
-
- /*
- * always ok for self
- */
- if (task == current) return 0;
-
- if (!task_is_stopped_or_traced(task)) {
- DPRINT(("cannot attach to non-stopped task [%d] state=%ld\n", task_pid_nr(task), task->state));
- return -EBUSY;
- }
- /*
- * make sure the task is off any CPU
- */
- wait_task_inactive(task, 0);
-
- /* more to come... */
-
- return 0;
-}
-
-static int
-pfm_get_task(pfm_context_t *ctx, pid_t pid, struct task_struct **task)
-{
- struct task_struct *p = current;
- int ret;
-
- /* XXX: need to add more checks here */
- if (pid < 2) return -EPERM;
-
- if (pid != task_pid_vnr(current)) {
- /* make sure task cannot go away while we operate on it */
- p = find_get_task_by_vpid(pid);
- if (!p)
- return -ESRCH;
- }
-
- ret = pfm_task_incompatible(ctx, p);
- if (ret == 0) {
- *task = p;
- } else if (p != current) {
- pfm_put_task(p);
- }
- return ret;
-}
-
-
-
-static int
-pfm_context_create(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
-{
- pfarg_context_t *req = (pfarg_context_t *)arg;
- struct file *filp;
- struct path path;
- int ctx_flags;
- int fd;
- int ret;
-
- /* let's check the arguments first */
- ret = pfarg_is_sane(current, req);
- if (ret < 0)
- return ret;
-
- ctx_flags = req->ctx_flags;
-
- ret = -ENOMEM;
-
- fd = get_unused_fd_flags(0);
- if (fd < 0)
- return fd;
-
- ctx = pfm_context_alloc(ctx_flags);
- if (!ctx)
- goto error;
-
- filp = pfm_alloc_file(ctx);
- if (IS_ERR(filp)) {
- ret = PTR_ERR(filp);
- goto error_file;
- }
-
- req->ctx_fd = ctx->ctx_fd = fd;
-
- /*
- * does the user want to sample?
- */
- if (pfm_uuid_cmp(req->ctx_smpl_buf_id, pfm_null_uuid)) {
- ret = pfm_setup_buffer_fmt(current, filp, ctx, ctx_flags, 0, req);
- if (ret)
- goto buffer_error;
- }
-
- DPRINT(("ctx=%p flags=0x%x system=%d notify_block=%d excl_idle=%d no_msg=%d ctx_fd=%d\n",
- ctx,
- ctx_flags,
- ctx->ctx_fl_system,
- ctx->ctx_fl_block,
- ctx->ctx_fl_excl_idle,
- ctx->ctx_fl_no_msg,
- ctx->ctx_fd));
-
- /*
- * initialize soft PMU state
- */
- pfm_reset_pmu_state(ctx);
-
- fd_install(fd, filp);
-
- return 0;
-
-buffer_error:
- path = filp->f_path;
- put_filp(filp);
- path_put(&path);
-
- if (ctx->ctx_buf_fmt) {
- pfm_buf_fmt_exit(ctx->ctx_buf_fmt, current, NULL, regs);
- }
-error_file:
- pfm_context_free(ctx);
-
-error:
- put_unused_fd(fd);
- return ret;
-}
-
-static inline unsigned long
-pfm_new_counter_value (pfm_counter_t *reg, int is_long_reset)
-{
- unsigned long val = is_long_reset ? reg->long_reset : reg->short_reset;
- unsigned long new_seed, old_seed = reg->seed, mask = reg->mask;
- extern unsigned long carta_random32 (unsigned long seed);
-
- if (reg->flags & PFM_REGFL_RANDOM) {
- new_seed = carta_random32(old_seed);
- val -= (old_seed & mask); /* counter values are negative numbers! */
- if ((mask >> 32) != 0)
- /* construct a full 64-bit random value: */
- new_seed |= carta_random32(old_seed >> 32) << 32;
- reg->seed = new_seed;
- }
- reg->lval = val;
- return val;
-}
-
-static void
-pfm_reset_regs_masked(pfm_context_t *ctx, unsigned long *ovfl_regs, int is_long_reset)
-{
- unsigned long mask = ovfl_regs[0];
- unsigned long reset_others = 0UL;
- unsigned long val;
- int i;
-
- /*
- * now restore reset value on sampling overflowed counters
- */
- mask >>= PMU_FIRST_COUNTER;
- for(i = PMU_FIRST_COUNTER; mask; i++, mask >>= 1) {
-
- if ((mask & 0x1UL) == 0UL) continue;
-
- ctx->ctx_pmds[i].val = val = pfm_new_counter_value(ctx->ctx_pmds+ i, is_long_reset);
- reset_others |= ctx->ctx_pmds[i].reset_pmds[0];
-
- DPRINT_ovfl((" %s reset ctx_pmds[%d]=%lx\n", is_long_reset ? "long" : "short", i, val));
- }
-
- /*
- * Now take care of resetting the other registers
- */
- for(i = 0; reset_others; i++, reset_others >>= 1) {
-
- if ((reset_others & 0x1) == 0) continue;
-
- ctx->ctx_pmds[i].val = val = pfm_new_counter_value(ctx->ctx_pmds + i, is_long_reset);
-
- DPRINT_ovfl(("%s reset_others pmd[%d]=%lx\n",
- is_long_reset ? "long" : "short", i, val));
- }
-}
-
-static void
-pfm_reset_regs(pfm_context_t *ctx, unsigned long *ovfl_regs, int is_long_reset)
-{
- unsigned long mask = ovfl_regs[0];
- unsigned long reset_others = 0UL;
- unsigned long val;
- int i;
-
- DPRINT_ovfl(("ovfl_regs=0x%lx is_long_reset=%d\n", ovfl_regs[0], is_long_reset));
-
- if (ctx->ctx_state == PFM_CTX_MASKED) {
- pfm_reset_regs_masked(ctx, ovfl_regs, is_long_reset);
- return;
- }
-
- /*
- * now restore reset value on sampling overflowed counters
- */
- mask >>= PMU_FIRST_COUNTER;
- for(i = PMU_FIRST_COUNTER; mask; i++, mask >>= 1) {
-
- if ((mask & 0x1UL) == 0UL) continue;
-
- val = pfm_new_counter_value(ctx->ctx_pmds+ i, is_long_reset);
- reset_others |= ctx->ctx_pmds[i].reset_pmds[0];
-
- DPRINT_ovfl((" %s reset ctx_pmds[%d]=%lx\n", is_long_reset ? "long" : "short", i, val));
-
- pfm_write_soft_counter(ctx, i, val);
- }
-
- /*
- * Now take care of resetting the other registers
- */
- for(i = 0; reset_others; i++, reset_others >>= 1) {
-
- if ((reset_others & 0x1) == 0) continue;
-
- val = pfm_new_counter_value(ctx->ctx_pmds + i, is_long_reset);
-
- if (PMD_IS_COUNTING(i)) {
- pfm_write_soft_counter(ctx, i, val);
- } else {
- ia64_set_pmd(i, val);
- }
- DPRINT_ovfl(("%s reset_others pmd[%d]=%lx\n",
- is_long_reset ? "long" : "short", i, val));
- }
- ia64_srlz_d();
-}
-
-static int
-pfm_write_pmcs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
-{
- struct task_struct *task;
- pfarg_reg_t *req = (pfarg_reg_t *)arg;
- unsigned long value, pmc_pm;
- unsigned long smpl_pmds, reset_pmds, impl_pmds;
- unsigned int cnum, reg_flags, flags, pmc_type;
- int i, can_access_pmu = 0, is_loaded, is_system, expert_mode;
- int is_monitor, is_counting, state;
- int ret = -EINVAL;
- pfm_reg_check_t wr_func;
-#define PFM_CHECK_PMC_PM(x, y, z) ((x)->ctx_fl_system ^ PMC_PM(y, z))
-
- state = ctx->ctx_state;
- is_loaded = state == PFM_CTX_LOADED ? 1 : 0;
- is_system = ctx->ctx_fl_system;
- task = ctx->ctx_task;
- impl_pmds = pmu_conf->impl_pmds[0];
-
- if (state == PFM_CTX_ZOMBIE) return -EINVAL;
-
- if (is_loaded) {
- /*
- * In system wide and when the context is loaded, access can only happen
- * when the caller is running on the CPU being monitored by the session.
- * It does not have to be the owner (ctx_task) of the context per se.
- */
- if (is_system && ctx->ctx_cpu != smp_processor_id()) {
- DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
- return -EBUSY;
- }
- can_access_pmu = GET_PMU_OWNER() == task || is_system ? 1 : 0;
- }
- expert_mode = pfm_sysctl.expert_mode;
-
- for (i = 0; i < count; i++, req++) {
-
- cnum = req->reg_num;
- reg_flags = req->reg_flags;
- value = req->reg_value;
- smpl_pmds = req->reg_smpl_pmds[0];
- reset_pmds = req->reg_reset_pmds[0];
- flags = 0;
-
-
- if (cnum >= PMU_MAX_PMCS) {
- DPRINT(("pmc%u is invalid\n", cnum));
- goto error;
- }
-
- pmc_type = pmu_conf->pmc_desc[cnum].type;
- pmc_pm = (value >> pmu_conf->pmc_desc[cnum].pm_pos) & 0x1;
- is_counting = (pmc_type & PFM_REG_COUNTING) == PFM_REG_COUNTING ? 1 : 0;
- is_monitor = (pmc_type & PFM_REG_MONITOR) == PFM_REG_MONITOR ? 1 : 0;
-
- /*
- * we reject all non implemented PMC as well
- * as attempts to modify PMC[0-3] which are used
- * as status registers by the PMU
- */
- if ((pmc_type & PFM_REG_IMPL) == 0 || (pmc_type & PFM_REG_CONTROL) == PFM_REG_CONTROL) {
- DPRINT(("pmc%u is unimplemented or no-access pmc_type=%x\n", cnum, pmc_type));
- goto error;
- }
- wr_func = pmu_conf->pmc_desc[cnum].write_check;
- /*
- * If the PMC is a monitor, then if the value is not the default:
- * - system-wide session: PMCx.pm=1 (privileged monitor)
- * - per-task : PMCx.pm=0 (user monitor)
- */
- if (is_monitor && value != PMC_DFL_VAL(cnum) && is_system ^ pmc_pm) {
- DPRINT(("pmc%u pmc_pm=%lu is_system=%d\n",
- cnum,
- pmc_pm,
- is_system));
- goto error;
- }
-
- if (is_counting) {
- /*
- * enforce generation of overflow interrupt. Necessary on all
- * CPUs.
- */
- value |= 1 << PMU_PMC_OI;
-
- if (reg_flags & PFM_REGFL_OVFL_NOTIFY) {
- flags |= PFM_REGFL_OVFL_NOTIFY;
- }
-
- if (reg_flags & PFM_REGFL_RANDOM) flags |= PFM_REGFL_RANDOM;
-
- /* verify validity of smpl_pmds */
- if ((smpl_pmds & impl_pmds) != smpl_pmds) {
- DPRINT(("invalid smpl_pmds 0x%lx for pmc%u\n", smpl_pmds, cnum));
- goto error;
- }
-
- /* verify validity of reset_pmds */
- if ((reset_pmds & impl_pmds) != reset_pmds) {
- DPRINT(("invalid reset_pmds 0x%lx for pmc%u\n", reset_pmds, cnum));
- goto error;
- }
- } else {
- if (reg_flags & (PFM_REGFL_OVFL_NOTIFY|PFM_REGFL_RANDOM)) {
- DPRINT(("cannot set ovfl_notify or random on pmc%u\n", cnum));
- goto error;
- }
- /* eventid on non-counting monitors are ignored */
- }
-
- /*
- * execute write checker, if any
- */
- if (likely(expert_mode == 0 && wr_func)) {
- ret = (*wr_func)(task, ctx, cnum, &value, regs);
- if (ret) goto error;
- ret = -EINVAL;
- }
-
- /*
- * no error on this register
- */
- PFM_REG_RETFLAG_SET(req->reg_flags, 0);
-
- /*
- * Now we commit the changes to the software state
- */
-
- /*
- * update overflow information
- */
- if (is_counting) {
- /*
- * full flag update each time a register is programmed
- */
- ctx->ctx_pmds[cnum].flags = flags;
-
- ctx->ctx_pmds[cnum].reset_pmds[0] = reset_pmds;
- ctx->ctx_pmds[cnum].smpl_pmds[0] = smpl_pmds;
- ctx->ctx_pmds[cnum].eventid = req->reg_smpl_eventid;
-
- /*
- * Mark all PMDS to be accessed as used.
- *
- * We do not keep track of PMC because we have to
- * systematically restore ALL of them.
- *
- * We do not update the used_monitors mask, because
- * if we have not programmed them, then will be in
- * a quiescent state, therefore we will not need to
- * mask/restore then when context is MASKED.
- */
- CTX_USED_PMD(ctx, reset_pmds);
- CTX_USED_PMD(ctx, smpl_pmds);
- /*
- * make sure we do not try to reset on
- * restart because we have established new values
- */
- if (state == PFM_CTX_MASKED) ctx->ctx_ovfl_regs[0] &= ~1UL << cnum;
- }
- /*
- * Needed in case the user does not initialize the equivalent
- * PMD. Clearing is done indirectly via pfm_reset_pmu_state() so there is no
- * possible leak here.
- */
- CTX_USED_PMD(ctx, pmu_conf->pmc_desc[cnum].dep_pmd[0]);
-
- /*
- * keep track of the monitor PMC that we are using.
- * we save the value of the pmc in ctx_pmcs[] and if
- * the monitoring is not stopped for the context we also
- * place it in the saved state area so that it will be
- * picked up later by the context switch code.
- *
- * The value in ctx_pmcs[] can only be changed in pfm_write_pmcs().
- *
- * The value in th_pmcs[] may be modified on overflow, i.e., when
- * monitoring needs to be stopped.
- */
- if (is_monitor) CTX_USED_MONITOR(ctx, 1UL << cnum);
-
- /*
- * update context state
- */
- ctx->ctx_pmcs[cnum] = value;
-
- if (is_loaded) {
- /*
- * write thread state
- */
- if (is_system == 0) ctx->th_pmcs[cnum] = value;
-
- /*
- * write hardware register if we can
- */
- if (can_access_pmu) {
- ia64_set_pmc(cnum, value);
- }
-#ifdef CONFIG_SMP
- else {
- /*
- * per-task SMP only here
- *
- * we are guaranteed that the task is not running on the other CPU,
- * we indicate that this PMD will need to be reloaded if the task
- * is rescheduled on the CPU it ran last on.
- */
- ctx->ctx_reload_pmcs[0] |= 1UL << cnum;
- }
-#endif
- }
-
- DPRINT(("pmc[%u]=0x%lx ld=%d apmu=%d flags=0x%x all_pmcs=0x%lx used_pmds=0x%lx eventid=%ld smpl_pmds=0x%lx reset_pmds=0x%lx reloads_pmcs=0x%lx used_monitors=0x%lx ovfl_regs=0x%lx\n",
- cnum,
- value,
- is_loaded,
- can_access_pmu,
- flags,
- ctx->ctx_all_pmcs[0],
- ctx->ctx_used_pmds[0],
- ctx->ctx_pmds[cnum].eventid,
- smpl_pmds,
- reset_pmds,
- ctx->ctx_reload_pmcs[0],
- ctx->ctx_used_monitors[0],
- ctx->ctx_ovfl_regs[0]));
- }
-
- /*
- * make sure the changes are visible
- */
- if (can_access_pmu) ia64_srlz_d();
-
- return 0;
-error:
- PFM_REG_RETFLAG_SET(req->reg_flags, PFM_REG_RETFL_EINVAL);
- return ret;
-}
-
-static int
-pfm_write_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
-{
- struct task_struct *task;
- pfarg_reg_t *req = (pfarg_reg_t *)arg;
- unsigned long value, hw_value, ovfl_mask;
- unsigned int cnum;
- int i, can_access_pmu = 0, state;
- int is_counting, is_loaded, is_system, expert_mode;
- int ret = -EINVAL;
- pfm_reg_check_t wr_func;
-
-
- state = ctx->ctx_state;
- is_loaded = state == PFM_CTX_LOADED ? 1 : 0;
- is_system = ctx->ctx_fl_system;
- ovfl_mask = pmu_conf->ovfl_val;
- task = ctx->ctx_task;
-
- if (unlikely(state == PFM_CTX_ZOMBIE)) return -EINVAL;
-
- /*
- * on both UP and SMP, we can only write to the PMC when the task is
- * the owner of the local PMU.
- */
- if (likely(is_loaded)) {
- /*
- * In system wide and when the context is loaded, access can only happen
- * when the caller is running on the CPU being monitored by the session.
- * It does not have to be the owner (ctx_task) of the context per se.
- */
- if (unlikely(is_system && ctx->ctx_cpu != smp_processor_id())) {
- DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
- return -EBUSY;
- }
- can_access_pmu = GET_PMU_OWNER() == task || is_system ? 1 : 0;
- }
- expert_mode = pfm_sysctl.expert_mode;
-
- for (i = 0; i < count; i++, req++) {
-
- cnum = req->reg_num;
- value = req->reg_value;
-
- if (!PMD_IS_IMPL(cnum)) {
- DPRINT(("pmd[%u] is unimplemented or invalid\n", cnum));
- goto abort_mission;
- }
- is_counting = PMD_IS_COUNTING(cnum);
- wr_func = pmu_conf->pmd_desc[cnum].write_check;
-
- /*
- * execute write checker, if any
- */
- if (unlikely(expert_mode == 0 && wr_func)) {
- unsigned long v = value;
-
- ret = (*wr_func)(task, ctx, cnum, &v, regs);
- if (ret) goto abort_mission;
-
- value = v;
- ret = -EINVAL;
- }
-
- /*
- * no error on this register
- */
- PFM_REG_RETFLAG_SET(req->reg_flags, 0);
-
- /*
- * now commit changes to software state
- */
- hw_value = value;
-
- /*
- * update virtualized (64bits) counter
- */
- if (is_counting) {
- /*
- * write context state
- */
- ctx->ctx_pmds[cnum].lval = value;
-
- /*
- * when context is load we use the split value
- */
- if (is_loaded) {
- hw_value = value & ovfl_mask;
- value = value & ~ovfl_mask;
- }
- }
- /*
- * update reset values (not just for counters)
- */
- ctx->ctx_pmds[cnum].long_reset = req->reg_long_reset;
- ctx->ctx_pmds[cnum].short_reset = req->reg_short_reset;
-
- /*
- * update randomization parameters (not just for counters)
- */
- ctx->ctx_pmds[cnum].seed = req->reg_random_seed;
- ctx->ctx_pmds[cnum].mask = req->reg_random_mask;
-
- /*
- * update context value
- */
- ctx->ctx_pmds[cnum].val = value;
-
- /*
- * Keep track of what we use
- *
- * We do not keep track of PMC because we have to
- * systematically restore ALL of them.
- */
- CTX_USED_PMD(ctx, PMD_PMD_DEP(cnum));
-
- /*
- * mark this PMD register used as well
- */
- CTX_USED_PMD(ctx, RDEP(cnum));
-
- /*
- * make sure we do not try to reset on
- * restart because we have established new values
- */
- if (is_counting && state == PFM_CTX_MASKED) {
- ctx->ctx_ovfl_regs[0] &= ~1UL << cnum;
- }
-
- if (is_loaded) {
- /*
- * write thread state
- */
- if (is_system == 0) ctx->th_pmds[cnum] = hw_value;
-
- /*
- * write hardware register if we can
- */
- if (can_access_pmu) {
- ia64_set_pmd(cnum, hw_value);
- } else {
-#ifdef CONFIG_SMP
- /*
- * we are guaranteed that the task is not running on the other CPU,
- * we indicate that this PMD will need to be reloaded if the task
- * is rescheduled on the CPU it ran last on.
- */
- ctx->ctx_reload_pmds[0] |= 1UL << cnum;
-#endif
- }
- }
-
- DPRINT(("pmd[%u]=0x%lx ld=%d apmu=%d, hw_value=0x%lx ctx_pmd=0x%lx short_reset=0x%lx "
- "long_reset=0x%lx notify=%c seed=0x%lx mask=0x%lx used_pmds=0x%lx reset_pmds=0x%lx reload_pmds=0x%lx all_pmds=0x%lx ovfl_regs=0x%lx\n",
- cnum,
- value,
- is_loaded,
- can_access_pmu,
- hw_value,
- ctx->ctx_pmds[cnum].val,
- ctx->ctx_pmds[cnum].short_reset,
- ctx->ctx_pmds[cnum].long_reset,
- PMC_OVFL_NOTIFY(ctx, cnum) ? 'Y':'N',
- ctx->ctx_pmds[cnum].seed,
- ctx->ctx_pmds[cnum].mask,
- ctx->ctx_used_pmds[0],
- ctx->ctx_pmds[cnum].reset_pmds[0],
- ctx->ctx_reload_pmds[0],
- ctx->ctx_all_pmds[0],
- ctx->ctx_ovfl_regs[0]));
- }
-
- /*
- * make changes visible
- */
- if (can_access_pmu) ia64_srlz_d();
-
- return 0;
-
-abort_mission:
- /*
- * for now, we have only one possibility for error
- */
- PFM_REG_RETFLAG_SET(req->reg_flags, PFM_REG_RETFL_EINVAL);
- return ret;
-}
-
-/*
- * By the way of PROTECT_CONTEXT(), interrupts are masked while we are in this function.
- * Therefore we know, we do not have to worry about the PMU overflow interrupt. If an
- * interrupt is delivered during the call, it will be kept pending until we leave, making
- * it appears as if it had been generated at the UNPROTECT_CONTEXT(). At least we are
- * guaranteed to return consistent data to the user, it may simply be old. It is not
- * trivial to treat the overflow while inside the call because you may end up in
- * some module sampling buffer code causing deadlocks.
- */
-static int
-pfm_read_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
-{
- struct task_struct *task;
- unsigned long val = 0UL, lval, ovfl_mask, sval;
- pfarg_reg_t *req = (pfarg_reg_t *)arg;
- unsigned int cnum, reg_flags = 0;
- int i, can_access_pmu = 0, state;
- int is_loaded, is_system, is_counting, expert_mode;
- int ret = -EINVAL;
- pfm_reg_check_t rd_func;
-
- /*
- * access is possible when loaded only for
- * self-monitoring tasks or in UP mode
- */
-
- state = ctx->ctx_state;
- is_loaded = state == PFM_CTX_LOADED ? 1 : 0;
- is_system = ctx->ctx_fl_system;
- ovfl_mask = pmu_conf->ovfl_val;
- task = ctx->ctx_task;
-
- if (state == PFM_CTX_ZOMBIE) return -EINVAL;
-
- if (likely(is_loaded)) {
- /*
- * In system wide and when the context is loaded, access can only happen
- * when the caller is running on the CPU being monitored by the session.
- * It does not have to be the owner (ctx_task) of the context per se.
- */
- if (unlikely(is_system && ctx->ctx_cpu != smp_processor_id())) {
- DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
- return -EBUSY;
- }
- /*
- * this can be true when not self-monitoring only in UP
- */
- can_access_pmu = GET_PMU_OWNER() == task || is_system ? 1 : 0;
-
- if (can_access_pmu) ia64_srlz_d();
- }
- expert_mode = pfm_sysctl.expert_mode;
-
- DPRINT(("ld=%d apmu=%d ctx_state=%d\n",
- is_loaded,
- can_access_pmu,
- state));
-
- /*
- * on both UP and SMP, we can only read the PMD from the hardware register when
- * the task is the owner of the local PMU.
- */
-
- for (i = 0; i < count; i++, req++) {
-
- cnum = req->reg_num;
- reg_flags = req->reg_flags;
-
- if (unlikely(!PMD_IS_IMPL(cnum))) goto error;
- /*
- * we can only read the register that we use. That includes
- * the one we explicitly initialize AND the one we want included
- * in the sampling buffer (smpl_regs).
- *
- * Having this restriction allows optimization in the ctxsw routine
- * without compromising security (leaks)
- */
- if (unlikely(!CTX_IS_USED_PMD(ctx, cnum))) goto error;
-
- sval = ctx->ctx_pmds[cnum].val;
- lval = ctx->ctx_pmds[cnum].lval;
- is_counting = PMD_IS_COUNTING(cnum);
-
- /*
- * If the task is not the current one, then we check if the
- * PMU state is still in the local live register due to lazy ctxsw.
- * If true, then we read directly from the registers.
- */
- if (can_access_pmu){
- val = ia64_get_pmd(cnum);
- } else {
- /*
- * context has been saved
- * if context is zombie, then task does not exist anymore.
- * In this case, we use the full value saved in the context (pfm_flush_regs()).
- */
- val = is_loaded ? ctx->th_pmds[cnum] : 0UL;
- }
- rd_func = pmu_conf->pmd_desc[cnum].read_check;
-
- if (is_counting) {
- /*
- * XXX: need to check for overflow when loaded
- */
- val &= ovfl_mask;
- val += sval;
- }
-
- /*
- * execute read checker, if any
- */
- if (unlikely(expert_mode == 0 && rd_func)) {
- unsigned long v = val;
- ret = (*rd_func)(ctx->ctx_task, ctx, cnum, &v, regs);
- if (ret) goto error;
- val = v;
- ret = -EINVAL;
- }
-
- PFM_REG_RETFLAG_SET(reg_flags, 0);
-
- DPRINT(("pmd[%u]=0x%lx\n", cnum, val));
-
- /*
- * update register return value, abort all if problem during copy.
- * we only modify the reg_flags field. no check mode is fine because
- * access has been verified upfront in sys_perfmonctl().
- */
- req->reg_value = val;
- req->reg_flags = reg_flags;
- req->reg_last_reset_val = lval;
- }
-
- return 0;
-
-error:
- PFM_REG_RETFLAG_SET(req->reg_flags, PFM_REG_RETFL_EINVAL);
- return ret;
-}
-
-int
-pfm_mod_write_pmcs(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs)
-{
- pfm_context_t *ctx;
-
- if (req == NULL) return -EINVAL;
-
- ctx = GET_PMU_CTX();
-
- if (ctx == NULL) return -EINVAL;
-
- /*
- * for now limit to current task, which is enough when calling
- * from overflow handler
- */
- if (task != current && ctx->ctx_fl_system == 0) return -EBUSY;
-
- return pfm_write_pmcs(ctx, req, nreq, regs);
-}
-EXPORT_SYMBOL(pfm_mod_write_pmcs);
-
-int
-pfm_mod_read_pmds(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs)
-{
- pfm_context_t *ctx;
-
- if (req == NULL) return -EINVAL;
-
- ctx = GET_PMU_CTX();
-
- if (ctx == NULL) return -EINVAL;
-
- /*
- * for now limit to current task, which is enough when calling
- * from overflow handler
- */
- if (task != current && ctx->ctx_fl_system == 0) return -EBUSY;
-
- return pfm_read_pmds(ctx, req, nreq, regs);
-}
-EXPORT_SYMBOL(pfm_mod_read_pmds);
-
-/*
- * Only call this function when a process it trying to
- * write the debug registers (reading is always allowed)
- */
-int
-pfm_use_debug_registers(struct task_struct *task)
-{
- pfm_context_t *ctx = task->thread.pfm_context;
- unsigned long flags;
- int ret = 0;
-
- if (pmu_conf->use_rr_dbregs == 0) return 0;
-
- DPRINT(("called for [%d]\n", task_pid_nr(task)));
-
- /*
- * do it only once
- */
- if (task->thread.flags & IA64_THREAD_DBG_VALID) return 0;
-
- /*
- * Even on SMP, we do not need to use an atomic here because
- * the only way in is via ptrace() and this is possible only when the
- * process is stopped. Even in the case where the ctxsw out is not totally
- * completed by the time we come here, there is no way the 'stopped' process
- * could be in the middle of fiddling with the pfm_write_ibr_dbr() routine.
- * So this is always safe.
- */
- if (ctx && ctx->ctx_fl_using_dbreg == 1) return -1;
-
- LOCK_PFS(flags);
-
- /*
- * We cannot allow setting breakpoints when system wide monitoring
- * sessions are using the debug registers.
- */
- if (pfm_sessions.pfs_sys_use_dbregs> 0)
- ret = -1;
- else
- pfm_sessions.pfs_ptrace_use_dbregs++;
-
- DPRINT(("ptrace_use_dbregs=%u sys_use_dbregs=%u by [%d] ret = %d\n",
- pfm_sessions.pfs_ptrace_use_dbregs,
- pfm_sessions.pfs_sys_use_dbregs,
- task_pid_nr(task), ret));
-
- UNLOCK_PFS(flags);
-
- return ret;
-}
-
-/*
- * This function is called for every task that exits with the
- * IA64_THREAD_DBG_VALID set. This indicates a task which was
- * able to use the debug registers for debugging purposes via
- * ptrace(). Therefore we know it was not using them for
- * performance monitoring, so we only decrement the number
- * of "ptraced" debug register users to keep the count up to date
- */
-int
-pfm_release_debug_registers(struct task_struct *task)
-{
- unsigned long flags;
- int ret;
-
- if (pmu_conf->use_rr_dbregs == 0) return 0;
-
- LOCK_PFS(flags);
- if (pfm_sessions.pfs_ptrace_use_dbregs == 0) {
- printk(KERN_ERR "perfmon: invalid release for [%d] ptrace_use_dbregs=0\n", task_pid_nr(task));
- ret = -1;
- } else {
- pfm_sessions.pfs_ptrace_use_dbregs--;
- ret = 0;
- }
- UNLOCK_PFS(flags);
-
- return ret;
-}
-
-static int
-pfm_restart(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
-{
- struct task_struct *task;
- pfm_buffer_fmt_t *fmt;
- pfm_ovfl_ctrl_t rst_ctrl;
- int state, is_system;
- int ret = 0;
-
- state = ctx->ctx_state;
- fmt = ctx->ctx_buf_fmt;
- is_system = ctx->ctx_fl_system;
- task = PFM_CTX_TASK(ctx);
-
- switch(state) {
- case PFM_CTX_MASKED:
- break;
- case PFM_CTX_LOADED:
- if (CTX_HAS_SMPL(ctx) && fmt->fmt_restart_active) break;
- /* fall through */
- case PFM_CTX_UNLOADED:
- case PFM_CTX_ZOMBIE:
- DPRINT(("invalid state=%d\n", state));
- return -EBUSY;
- default:
- DPRINT(("state=%d, cannot operate (no active_restart handler)\n", state));
- return -EINVAL;
- }
-
- /*
- * In system wide and when the context is loaded, access can only happen
- * when the caller is running on the CPU being monitored by the session.
- * It does not have to be the owner (ctx_task) of the context per se.
- */
- if (is_system && ctx->ctx_cpu != smp_processor_id()) {
- DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
- return -EBUSY;
- }
-
- /* sanity check */
- if (unlikely(task == NULL)) {
- printk(KERN_ERR "perfmon: [%d] pfm_restart no task\n", task_pid_nr(current));
- return -EINVAL;
- }
-
- if (task == current || is_system) {
-
- fmt = ctx->ctx_buf_fmt;
-
- DPRINT(("restarting self %d ovfl=0x%lx\n",
- task_pid_nr(task),
- ctx->ctx_ovfl_regs[0]));
-
- if (CTX_HAS_SMPL(ctx)) {
-
- prefetch(ctx->ctx_smpl_hdr);
-
- rst_ctrl.bits.mask_monitoring = 0;
- rst_ctrl.bits.reset_ovfl_pmds = 0;
-
- if (state == PFM_CTX_LOADED)
- ret = pfm_buf_fmt_restart_active(fmt, task, &rst_ctrl, ctx->ctx_smpl_hdr, regs);
- else
- ret = pfm_buf_fmt_restart(fmt, task, &rst_ctrl, ctx->ctx_smpl_hdr, regs);
- } else {
- rst_ctrl.bits.mask_monitoring = 0;
- rst_ctrl.bits.reset_ovfl_pmds = 1;
- }
-
- if (ret == 0) {
- if (rst_ctrl.bits.reset_ovfl_pmds)
- pfm_reset_regs(ctx, ctx->ctx_ovfl_regs, PFM_PMD_LONG_RESET);
-
- if (rst_ctrl.bits.mask_monitoring == 0) {
- DPRINT(("resuming monitoring for [%d]\n", task_pid_nr(task)));
-
- if (state == PFM_CTX_MASKED) pfm_restore_monitoring(task);
- } else {
- DPRINT(("keeping monitoring stopped for [%d]\n", task_pid_nr(task)));
-
- // cannot use pfm_stop_monitoring(task, regs);
- }
- }
- /*
- * clear overflowed PMD mask to remove any stale information
- */
- ctx->ctx_ovfl_regs[0] = 0UL;
-
- /*
- * back to LOADED state
- */
- ctx->ctx_state = PFM_CTX_LOADED;
-
- /*
- * XXX: not really useful for self monitoring
- */
- ctx->ctx_fl_can_restart = 0;
-
- return 0;
- }
-
- /*
- * restart another task
- */
-
- /*
- * When PFM_CTX_MASKED, we cannot issue a restart before the previous
- * one is seen by the task.
- */
- if (state == PFM_CTX_MASKED) {
- if (ctx->ctx_fl_can_restart == 0) return -EINVAL;
- /*
- * will prevent subsequent restart before this one is
- * seen by other task
- */
- ctx->ctx_fl_can_restart = 0;
- }
-
- /*
- * if blocking, then post the semaphore is PFM_CTX_MASKED, i.e.
- * the task is blocked or on its way to block. That's the normal
- * restart path. If the monitoring is not masked, then the task
- * can be actively monitoring and we cannot directly intervene.
- * Therefore we use the trap mechanism to catch the task and
- * force it to reset the buffer/reset PMDs.
- *
- * if non-blocking, then we ensure that the task will go into
- * pfm_handle_work() before returning to user mode.
- *
- * We cannot explicitly reset another task, it MUST always
- * be done by the task itself. This works for system wide because
- * the tool that is controlling the session is logically doing
- * "self-monitoring".
- */
- if (CTX_OVFL_NOBLOCK(ctx) == 0 && state == PFM_CTX_MASKED) {
- DPRINT(("unblocking [%d]\n", task_pid_nr(task)));
- complete(&ctx->ctx_restart_done);
- } else {
- DPRINT(("[%d] armed exit trap\n", task_pid_nr(task)));
-
- ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_RESET;
-
- PFM_SET_WORK_PENDING(task, 1);
-
- set_notify_resume(task);
-
- /*
- * XXX: send reschedule if task runs on another CPU
- */
- }
- return 0;
-}
-
-static int
-pfm_debug(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
-{
- unsigned int m = *(unsigned int *)arg;
-
- pfm_sysctl.debug = m == 0 ? 0 : 1;
-
- printk(KERN_INFO "perfmon debugging %s (timing reset)\n", pfm_sysctl.debug ? "on" : "off");
-
- if (m == 0) {
- memset(pfm_stats, 0, sizeof(pfm_stats));
- for(m=0; m < NR_CPUS; m++) pfm_stats[m].pfm_ovfl_intr_cycles_min = ~0UL;
- }
- return 0;
-}
-
-/*
- * arg can be NULL and count can be zero for this function
- */
-static int
-pfm_write_ibr_dbr(int mode, pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
-{
- struct thread_struct *thread = NULL;
- struct task_struct *task;
- pfarg_dbreg_t *req = (pfarg_dbreg_t *)arg;
- unsigned long flags;
- dbreg_t dbreg;
- unsigned int rnum;
- int first_time;
- int ret = 0, state;
- int i, can_access_pmu = 0;
- int is_system, is_loaded;
-
- if (pmu_conf->use_rr_dbregs == 0) return -EINVAL;
-
- state = ctx->ctx_state;
- is_loaded = state == PFM_CTX_LOADED ? 1 : 0;
- is_system = ctx->ctx_fl_system;
- task = ctx->ctx_task;
-
- if (state == PFM_CTX_ZOMBIE) return -EINVAL;
-
- /*
- * on both UP and SMP, we can only write to the PMC when the task is
- * the owner of the local PMU.
- */
- if (is_loaded) {
- thread = &task->thread;
- /*
- * In system wide and when the context is loaded, access can only happen
- * when the caller is running on the CPU being monitored by the session.
- * It does not have to be the owner (ctx_task) of the context per se.
- */
- if (unlikely(is_system && ctx->ctx_cpu != smp_processor_id())) {
- DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
- return -EBUSY;
- }
- can_access_pmu = GET_PMU_OWNER() == task || is_system ? 1 : 0;
- }
-
- /*
- * we do not need to check for ipsr.db because we do clear ibr.x, dbr.r, and dbr.w
- * ensuring that no real breakpoint can be installed via this call.
- *
- * IMPORTANT: regs can be NULL in this function
- */
-
- first_time = ctx->ctx_fl_using_dbreg == 0;
-
- /*
- * don't bother if we are loaded and task is being debugged
- */
- if (is_loaded && (thread->flags & IA64_THREAD_DBG_VALID) != 0) {
- DPRINT(("debug registers already in use for [%d]\n", task_pid_nr(task)));
- return -EBUSY;
- }
-
- /*
- * check for debug registers in system wide mode
- *
- * If though a check is done in pfm_context_load(),
- * we must repeat it here, in case the registers are
- * written after the context is loaded
- */
- if (is_loaded) {
- LOCK_PFS(flags);
-
- if (first_time && is_system) {
- if (pfm_sessions.pfs_ptrace_use_dbregs)
- ret = -EBUSY;
- else
- pfm_sessions.pfs_sys_use_dbregs++;
- }
- UNLOCK_PFS(flags);
- }
-
- if (ret != 0) return ret;
-
- /*
- * mark ourself as user of the debug registers for
- * perfmon purposes.
- */
- ctx->ctx_fl_using_dbreg = 1;
-
- /*
- * clear hardware registers to make sure we don't
- * pick up stale state.
- *
- * for a system wide session, we do not use
- * thread.dbr, thread.ibr because this process
- * never leaves the current CPU and the state
- * is shared by all processes running on it
- */
- if (first_time && can_access_pmu) {
- DPRINT(("[%d] clearing ibrs, dbrs\n", task_pid_nr(task)));
- for (i=0; i < pmu_conf->num_ibrs; i++) {
- ia64_set_ibr(i, 0UL);
- ia64_dv_serialize_instruction();
- }
- ia64_srlz_i();
- for (i=0; i < pmu_conf->num_dbrs; i++) {
- ia64_set_dbr(i, 0UL);
- ia64_dv_serialize_data();
- }
- ia64_srlz_d();
- }
-
- /*
- * Now install the values into the registers
- */
- for (i = 0; i < count; i++, req++) {
-
- rnum = req->dbreg_num;
- dbreg.val = req->dbreg_value;
-
- ret = -EINVAL;
-
- if ((mode == PFM_CODE_RR && rnum >= PFM_NUM_IBRS) || ((mode == PFM_DATA_RR) && rnum >= PFM_NUM_DBRS)) {
- DPRINT(("invalid register %u val=0x%lx mode=%d i=%d count=%d\n",
- rnum, dbreg.val, mode, i, count));
-
- goto abort_mission;
- }
-
- /*
- * make sure we do not install enabled breakpoint
- */
- if (rnum & 0x1) {
- if (mode == PFM_CODE_RR)
- dbreg.ibr.ibr_x = 0;
- else
- dbreg.dbr.dbr_r = dbreg.dbr.dbr_w = 0;
- }
-
- PFM_REG_RETFLAG_SET(req->dbreg_flags, 0);
-
- /*
- * Debug registers, just like PMC, can only be modified
- * by a kernel call. Moreover, perfmon() access to those
- * registers are centralized in this routine. The hardware
- * does not modify the value of these registers, therefore,
- * if we save them as they are written, we can avoid having
- * to save them on context switch out. This is made possible
- * by the fact that when perfmon uses debug registers, ptrace()
- * won't be able to modify them concurrently.
- */
- if (mode == PFM_CODE_RR) {
- CTX_USED_IBR(ctx, rnum);
-
- if (can_access_pmu) {
- ia64_set_ibr(rnum, dbreg.val);
- ia64_dv_serialize_instruction();
- }
-
- ctx->ctx_ibrs[rnum] = dbreg.val;
-
- DPRINT(("write ibr%u=0x%lx used_ibrs=0x%x ld=%d apmu=%d\n",
- rnum, dbreg.val, ctx->ctx_used_ibrs[0], is_loaded, can_access_pmu));
- } else {
- CTX_USED_DBR(ctx, rnum);
-
- if (can_access_pmu) {
- ia64_set_dbr(rnum, dbreg.val);
- ia64_dv_serialize_data();
- }
- ctx->ctx_dbrs[rnum] = dbreg.val;
-
- DPRINT(("write dbr%u=0x%lx used_dbrs=0x%x ld=%d apmu=%d\n",
- rnum, dbreg.val, ctx->ctx_used_dbrs[0], is_loaded, can_access_pmu));
- }
- }
-
- return 0;
-
-abort_mission:
- /*
- * in case it was our first attempt, we undo the global modifications
- */
- if (first_time) {
- LOCK_PFS(flags);
- if (ctx->ctx_fl_system) {
- pfm_sessions.pfs_sys_use_dbregs--;
- }
- UNLOCK_PFS(flags);
- ctx->ctx_fl_using_dbreg = 0;
- }
- /*
- * install error return flag
- */
- PFM_REG_RETFLAG_SET(req->dbreg_flags, PFM_REG_RETFL_EINVAL);
-
- return ret;
-}
-
-static int
-pfm_write_ibrs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
-{
- return pfm_write_ibr_dbr(PFM_CODE_RR, ctx, arg, count, regs);
-}
-
-static int
-pfm_write_dbrs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
-{
- return pfm_write_ibr_dbr(PFM_DATA_RR, ctx, arg, count, regs);
-}
-
-int
-pfm_mod_write_ibrs(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs)
-{
- pfm_context_t *ctx;
-
- if (req == NULL) return -EINVAL;
-
- ctx = GET_PMU_CTX();
-
- if (ctx == NULL) return -EINVAL;
-
- /*
- * for now limit to current task, which is enough when calling
- * from overflow handler
- */
- if (task != current && ctx->ctx_fl_system == 0) return -EBUSY;
-
- return pfm_write_ibrs(ctx, req, nreq, regs);
-}
-EXPORT_SYMBOL(pfm_mod_write_ibrs);
-
-int
-pfm_mod_write_dbrs(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs)
-{
- pfm_context_t *ctx;
-
- if (req == NULL) return -EINVAL;
-
- ctx = GET_PMU_CTX();
-
- if (ctx == NULL) return -EINVAL;
-
- /*
- * for now limit to current task, which is enough when calling
- * from overflow handler
- */
- if (task != current && ctx->ctx_fl_system == 0) return -EBUSY;
-
- return pfm_write_dbrs(ctx, req, nreq, regs);
-}
-EXPORT_SYMBOL(pfm_mod_write_dbrs);
-
-
-static int
-pfm_get_features(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
-{
- pfarg_features_t *req = (pfarg_features_t *)arg;
-
- req->ft_version = PFM_VERSION;
- return 0;
-}
-
-static int
-pfm_stop(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
-{
- struct pt_regs *tregs;
- struct task_struct *task = PFM_CTX_TASK(ctx);
- int state, is_system;
-
- state = ctx->ctx_state;
- is_system = ctx->ctx_fl_system;
-
- /*
- * context must be attached to issue the stop command (includes LOADED,MASKED,ZOMBIE)
- */
- if (state == PFM_CTX_UNLOADED) return -EINVAL;
-
- /*
- * In system wide and when the context is loaded, access can only happen
- * when the caller is running on the CPU being monitored by the session.
- * It does not have to be the owner (ctx_task) of the context per se.
- */
- if (is_system && ctx->ctx_cpu != smp_processor_id()) {
- DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
- return -EBUSY;
- }
- DPRINT(("task [%d] ctx_state=%d is_system=%d\n",
- task_pid_nr(PFM_CTX_TASK(ctx)),
- state,
- is_system));
- /*
- * in system mode, we need to update the PMU directly
- * and the user level state of the caller, which may not
- * necessarily be the creator of the context.
- */
- if (is_system) {
- /*
- * Update local PMU first
- *
- * disable dcr pp
- */
- ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) & ~IA64_DCR_PP);
- ia64_srlz_i();
-
- /*
- * update local cpuinfo
- */
- PFM_CPUINFO_CLEAR(PFM_CPUINFO_DCR_PP);
-
- /*
- * stop monitoring, does srlz.i
- */
- pfm_clear_psr_pp();
-
- /*
- * stop monitoring in the caller
- */
- ia64_psr(regs)->pp = 0;
-
- return 0;
- }
- /*
- * per-task mode
- */
-
- if (task == current) {
- /* stop monitoring at kernel level */
- pfm_clear_psr_up();
-
- /*
- * stop monitoring at the user level
- */
- ia64_psr(regs)->up = 0;
- } else {
- tregs = task_pt_regs(task);
-
- /*
- * stop monitoring at the user level
- */
- ia64_psr(tregs)->up = 0;
-
- /*
- * monitoring disabled in kernel at next reschedule
- */
- ctx->ctx_saved_psr_up = 0;
- DPRINT(("task=[%d]\n", task_pid_nr(task)));
- }
- return 0;
-}
-
-
-static int
-pfm_start(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
-{
- struct pt_regs *tregs;
- int state, is_system;
-
- state = ctx->ctx_state;
- is_system = ctx->ctx_fl_system;
-
- if (state != PFM_CTX_LOADED) return -EINVAL;
-
- /*
- * In system wide and when the context is loaded, access can only happen
- * when the caller is running on the CPU being monitored by the session.
- * It does not have to be the owner (ctx_task) of the context per se.
- */
- if (is_system && ctx->ctx_cpu != smp_processor_id()) {
- DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
- return -EBUSY;
- }
-
- /*
- * in system mode, we need to update the PMU directly
- * and the user level state of the caller, which may not
- * necessarily be the creator of the context.
- */
- if (is_system) {
-
- /*
- * set user level psr.pp for the caller
- */
- ia64_psr(regs)->pp = 1;
-
- /*
- * now update the local PMU and cpuinfo
- */
- PFM_CPUINFO_SET(PFM_CPUINFO_DCR_PP);
-
- /*
- * start monitoring at kernel level
- */
- pfm_set_psr_pp();
-
- /* enable dcr pp */
- ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) | IA64_DCR_PP);
- ia64_srlz_i();
-
- return 0;
- }
-
- /*
- * per-process mode
- */
-
- if (ctx->ctx_task == current) {
-
- /* start monitoring at kernel level */
- pfm_set_psr_up();
-
- /*
- * activate monitoring at user level
- */
- ia64_psr(regs)->up = 1;
-
- } else {
- tregs = task_pt_regs(ctx->ctx_task);
-
- /*
- * start monitoring at the kernel level the next
- * time the task is scheduled
- */
- ctx->ctx_saved_psr_up = IA64_PSR_UP;
-
- /*
- * activate monitoring at user level
- */
- ia64_psr(tregs)->up = 1;
- }
- return 0;
-}
-
-static int
-pfm_get_pmc_reset(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
-{
- pfarg_reg_t *req = (pfarg_reg_t *)arg;
- unsigned int cnum;
- int i;
- int ret = -EINVAL;
-
- for (i = 0; i < count; i++, req++) {
-
- cnum = req->reg_num;
-
- if (!PMC_IS_IMPL(cnum)) goto abort_mission;
-
- req->reg_value = PMC_DFL_VAL(cnum);
-
- PFM_REG_RETFLAG_SET(req->reg_flags, 0);
-
- DPRINT(("pmc_reset_val pmc[%u]=0x%lx\n", cnum, req->reg_value));
- }
- return 0;
-
-abort_mission:
- PFM_REG_RETFLAG_SET(req->reg_flags, PFM_REG_RETFL_EINVAL);
- return ret;
-}
-
-static int
-pfm_check_task_exist(pfm_context_t *ctx)
-{
- struct task_struct *g, *t;
- int ret = -ESRCH;
-
- read_lock(&tasklist_lock);
-
- do_each_thread (g, t) {
- if (t->thread.pfm_context == ctx) {
- ret = 0;
- goto out;
- }
- } while_each_thread (g, t);
-out:
- read_unlock(&tasklist_lock);
-
- DPRINT(("pfm_check_task_exist: ret=%d ctx=%p\n", ret, ctx));
-
- return ret;
-}
-
-static int
-pfm_context_load(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
-{
- struct task_struct *task;
- struct thread_struct *thread;
- struct pfm_context_t *old;
- unsigned long flags;
-#ifndef CONFIG_SMP
- struct task_struct *owner_task = NULL;
-#endif
- pfarg_load_t *req = (pfarg_load_t *)arg;
- unsigned long *pmcs_source, *pmds_source;
- int the_cpu;
- int ret = 0;
- int state, is_system, set_dbregs = 0;
-
- state = ctx->ctx_state;
- is_system = ctx->ctx_fl_system;
- /*
- * can only load from unloaded or terminated state
- */
- if (state != PFM_CTX_UNLOADED) {
- DPRINT(("cannot load to [%d], invalid ctx_state=%d\n",
- req->load_pid,
- ctx->ctx_state));
- return -EBUSY;
- }
-
- DPRINT(("load_pid [%d] using_dbreg=%d\n", req->load_pid, ctx->ctx_fl_using_dbreg));
-
- if (CTX_OVFL_NOBLOCK(ctx) == 0 && req->load_pid == current->pid) {
- DPRINT(("cannot use blocking mode on self\n"));
- return -EINVAL;
- }
-
- ret = pfm_get_task(ctx, req->load_pid, &task);
- if (ret) {
- DPRINT(("load_pid [%d] get_task=%d\n", req->load_pid, ret));
- return ret;
- }
-
- ret = -EINVAL;
-
- /*
- * system wide is self monitoring only
- */
- if (is_system && task != current) {
- DPRINT(("system wide is self monitoring only load_pid=%d\n",
- req->load_pid));
- goto error;
- }
-
- thread = &task->thread;
-
- ret = 0;
- /*
- * cannot load a context which is using range restrictions,
- * into a task that is being debugged.
- */
- if (ctx->ctx_fl_using_dbreg) {
- if (thread->flags & IA64_THREAD_DBG_VALID) {
- ret = -EBUSY;
- DPRINT(("load_pid [%d] task is debugged, cannot load range restrictions\n", req->load_pid));
- goto error;
- }
- LOCK_PFS(flags);
-
- if (is_system) {
- if (pfm_sessions.pfs_ptrace_use_dbregs) {
- DPRINT(("cannot load [%d] dbregs in use\n",
- task_pid_nr(task)));
- ret = -EBUSY;
- } else {
- pfm_sessions.pfs_sys_use_dbregs++;
- DPRINT(("load [%d] increased sys_use_dbreg=%u\n", task_pid_nr(task), pfm_sessions.pfs_sys_use_dbregs));
- set_dbregs = 1;
- }
- }
-
- UNLOCK_PFS(flags);
-
- if (ret) goto error;
- }
-
- /*
- * SMP system-wide monitoring implies self-monitoring.
- *
- * The programming model expects the task to
- * be pinned on a CPU throughout the session.
- * Here we take note of the current CPU at the
- * time the context is loaded. No call from
- * another CPU will be allowed.
- *
- * The pinning via shed_setaffinity()
- * must be done by the calling task prior
- * to this call.
- *
- * systemwide: keep track of CPU this session is supposed to run on
- */
- the_cpu = ctx->ctx_cpu = smp_processor_id();
-
- ret = -EBUSY;
- /*
- * now reserve the session
- */
- ret = pfm_reserve_session(current, is_system, the_cpu);
- if (ret) goto error;
-
- /*
- * task is necessarily stopped at this point.
- *
- * If the previous context was zombie, then it got removed in
- * pfm_save_regs(). Therefore we should not see it here.
- * If we see a context, then this is an active context
- *
- * XXX: needs to be atomic
- */
- DPRINT(("before cmpxchg() old_ctx=%p new_ctx=%p\n",
- thread->pfm_context, ctx));
-
- ret = -EBUSY;
- old = ia64_cmpxchg(acq, &thread->pfm_context, NULL, ctx, sizeof(pfm_context_t *));
- if (old != NULL) {
- DPRINT(("load_pid [%d] already has a context\n", req->load_pid));
- goto error_unres;
- }
-
- pfm_reset_msgq(ctx);
-
- ctx->ctx_state = PFM_CTX_LOADED;
-
- /*
- * link context to task
- */
- ctx->ctx_task = task;
-
- if (is_system) {
- /*
- * we load as stopped
- */
- PFM_CPUINFO_SET(PFM_CPUINFO_SYST_WIDE);
- PFM_CPUINFO_CLEAR(PFM_CPUINFO_DCR_PP);
-
- if (ctx->ctx_fl_excl_idle) PFM_CPUINFO_SET(PFM_CPUINFO_EXCL_IDLE);
- } else {
- thread->flags |= IA64_THREAD_PM_VALID;
- }
-
- /*
- * propagate into thread-state
- */
- pfm_copy_pmds(task, ctx);
- pfm_copy_pmcs(task, ctx);
-
- pmcs_source = ctx->th_pmcs;
- pmds_source = ctx->th_pmds;
-
- /*
- * always the case for system-wide
- */
- if (task == current) {
-
- if (is_system == 0) {
-
- /* allow user level control */
- ia64_psr(regs)->sp = 0;
- DPRINT(("clearing psr.sp for [%d]\n", task_pid_nr(task)));
-
- SET_LAST_CPU(ctx, smp_processor_id());
- INC_ACTIVATION();
- SET_ACTIVATION(ctx);
-#ifndef CONFIG_SMP
- /*
- * push the other task out, if any
- */
- owner_task = GET_PMU_OWNER();
- if (owner_task) pfm_lazy_save_regs(owner_task);
-#endif
- }
- /*
- * load all PMD from ctx to PMU (as opposed to thread state)
- * restore all PMC from ctx to PMU
- */
- pfm_restore_pmds(pmds_source, ctx->ctx_all_pmds[0]);
- pfm_restore_pmcs(pmcs_source, ctx->ctx_all_pmcs[0]);
-
- ctx->ctx_reload_pmcs[0] = 0UL;
- ctx->ctx_reload_pmds[0] = 0UL;
-
- /*
- * guaranteed safe by earlier check against DBG_VALID
- */
- if (ctx->ctx_fl_using_dbreg) {
- pfm_restore_ibrs(ctx->ctx_ibrs, pmu_conf->num_ibrs);
- pfm_restore_dbrs(ctx->ctx_dbrs, pmu_conf->num_dbrs);
- }
- /*
- * set new ownership
- */
- SET_PMU_OWNER(task, ctx);
-
- DPRINT(("context loaded on PMU for [%d]\n", task_pid_nr(task)));
- } else {
- /*
- * when not current, task MUST be stopped, so this is safe
- */
- regs = task_pt_regs(task);
-
- /* force a full reload */
- ctx->ctx_last_activation = PFM_INVALID_ACTIVATION;
- SET_LAST_CPU(ctx, -1);
-
- /* initial saved psr (stopped) */
- ctx->ctx_saved_psr_up = 0UL;
- ia64_psr(regs)->up = ia64_psr(regs)->pp = 0;
- }
-
- ret = 0;
-
-error_unres:
- if (ret) pfm_unreserve_session(ctx, ctx->ctx_fl_system, the_cpu);
-error:
- /*
- * we must undo the dbregs setting (for system-wide)
- */
- if (ret && set_dbregs) {
- LOCK_PFS(flags);
- pfm_sessions.pfs_sys_use_dbregs--;
- UNLOCK_PFS(flags);
- }
- /*
- * release task, there is now a link with the context
- */
- if (is_system == 0 && task != current) {
- pfm_put_task(task);
-
- if (ret == 0) {
- ret = pfm_check_task_exist(ctx);
- if (ret) {
- ctx->ctx_state = PFM_CTX_UNLOADED;
- ctx->ctx_task = NULL;
- }
- }
- }
- return ret;
-}
-
-/*
- * in this function, we do not need to increase the use count
- * for the task via get_task_struct(), because we hold the
- * context lock. If the task were to disappear while having
- * a context attached, it would go through pfm_exit_thread()
- * which also grabs the context lock and would therefore be blocked
- * until we are here.
- */
-static void pfm_flush_pmds(struct task_struct *, pfm_context_t *ctx);
-
-static int
-pfm_context_unload(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
-{
- struct task_struct *task = PFM_CTX_TASK(ctx);
- struct pt_regs *tregs;
- int prev_state, is_system;
- int ret;
-
- DPRINT(("ctx_state=%d task [%d]\n", ctx->ctx_state, task ? task_pid_nr(task) : -1));
-
- prev_state = ctx->ctx_state;
- is_system = ctx->ctx_fl_system;
-
- /*
- * unload only when necessary
- */
- if (prev_state == PFM_CTX_UNLOADED) {
- DPRINT(("ctx_state=%d, nothing to do\n", prev_state));
- return 0;
- }
-
- /*
- * clear psr and dcr bits
- */
- ret = pfm_stop(ctx, NULL, 0, regs);
- if (ret) return ret;
-
- ctx->ctx_state = PFM_CTX_UNLOADED;
-
- /*
- * in system mode, we need to update the PMU directly
- * and the user level state of the caller, which may not
- * necessarily be the creator of the context.
- */
- if (is_system) {
-
- /*
- * Update cpuinfo
- *
- * local PMU is taken care of in pfm_stop()
- */
- PFM_CPUINFO_CLEAR(PFM_CPUINFO_SYST_WIDE);
- PFM_CPUINFO_CLEAR(PFM_CPUINFO_EXCL_IDLE);
-
- /*
- * save PMDs in context
- * release ownership
- */
- pfm_flush_pmds(current, ctx);
-
- /*
- * at this point we are done with the PMU
- * so we can unreserve the resource.
- */
- if (prev_state != PFM_CTX_ZOMBIE)
- pfm_unreserve_session(ctx, 1 , ctx->ctx_cpu);
-
- /*
- * disconnect context from task
- */
- task->thread.pfm_context = NULL;
- /*
- * disconnect task from context
- */
- ctx->ctx_task = NULL;
-
- /*
- * There is nothing more to cleanup here.
- */
- return 0;
- }
-
- /*
- * per-task mode
- */
- tregs = task == current ? regs : task_pt_regs(task);
-
- if (task == current) {
- /*
- * cancel user level control
- */
- ia64_psr(regs)->sp = 1;
-
- DPRINT(("setting psr.sp for [%d]\n", task_pid_nr(task)));
- }
- /*
- * save PMDs to context
- * release ownership
- */
- pfm_flush_pmds(task, ctx);
-
- /*
- * at this point we are done with the PMU
- * so we can unreserve the resource.
- *
- * when state was ZOMBIE, we have already unreserved.
- */
- if (prev_state != PFM_CTX_ZOMBIE)
- pfm_unreserve_session(ctx, 0 , ctx->ctx_cpu);
-
- /*
- * reset activation counter and psr
- */
- ctx->ctx_last_activation = PFM_INVALID_ACTIVATION;
- SET_LAST_CPU(ctx, -1);
-
- /*
- * PMU state will not be restored
- */
- task->thread.flags &= ~IA64_THREAD_PM_VALID;
-
- /*
- * break links between context and task
- */
- task->thread.pfm_context = NULL;
- ctx->ctx_task = NULL;
-
- PFM_SET_WORK_PENDING(task, 0);
-
- ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_NONE;
- ctx->ctx_fl_can_restart = 0;
- ctx->ctx_fl_going_zombie = 0;
-
- DPRINT(("disconnected [%d] from context\n", task_pid_nr(task)));
-
- return 0;
-}
-
-
-/*
- * called only from exit_thread()
- * we come here only if the task has a context attached (loaded or masked)
- */
-void
-pfm_exit_thread(struct task_struct *task)
-{
- pfm_context_t *ctx;
- unsigned long flags;
- struct pt_regs *regs = task_pt_regs(task);
- int ret, state;
- int free_ok = 0;
-
- ctx = PFM_GET_CTX(task);
-
- PROTECT_CTX(ctx, flags);
-
- DPRINT(("state=%d task [%d]\n", ctx->ctx_state, task_pid_nr(task)));
-
- state = ctx->ctx_state;
- switch(state) {
- case PFM_CTX_UNLOADED:
- /*
- * only comes to this function if pfm_context is not NULL, i.e., cannot
- * be in unloaded state
- */
- printk(KERN_ERR "perfmon: pfm_exit_thread [%d] ctx unloaded\n", task_pid_nr(task));
- break;
- case PFM_CTX_LOADED:
- case PFM_CTX_MASKED:
- ret = pfm_context_unload(ctx, NULL, 0, regs);
- if (ret) {
- printk(KERN_ERR "perfmon: pfm_exit_thread [%d] state=%d unload failed %d\n", task_pid_nr(task), state, ret);
- }
- DPRINT(("ctx unloaded for current state was %d\n", state));
-
- pfm_end_notify_user(ctx);
- break;
- case PFM_CTX_ZOMBIE:
- ret = pfm_context_unload(ctx, NULL, 0, regs);
- if (ret) {
- printk(KERN_ERR "perfmon: pfm_exit_thread [%d] state=%d unload failed %d\n", task_pid_nr(task), state, ret);
- }
- free_ok = 1;
- break;
- default:
- printk(KERN_ERR "perfmon: pfm_exit_thread [%d] unexpected state=%d\n", task_pid_nr(task), state);
- break;
- }
- UNPROTECT_CTX(ctx, flags);
-
- { u64 psr = pfm_get_psr();
- BUG_ON(psr & (IA64_PSR_UP|IA64_PSR_PP));
- BUG_ON(GET_PMU_OWNER());
- BUG_ON(ia64_psr(regs)->up);
- BUG_ON(ia64_psr(regs)->pp);
- }
-
- /*
- * All memory free operations (especially for vmalloc'ed memory)
- * MUST be done with interrupts ENABLED.
- */
- if (free_ok) pfm_context_free(ctx);
-}
-
-/*
- * functions MUST be listed in the increasing order of their index (see permfon.h)
- */
-#define PFM_CMD(name, flags, arg_count, arg_type, getsz) { name, #name, flags, arg_count, sizeof(arg_type), getsz }
-#define PFM_CMD_S(name, flags) { name, #name, flags, 0, 0, NULL }
-#define PFM_CMD_PCLRWS (PFM_CMD_FD|PFM_CMD_ARG_RW|PFM_CMD_STOP)
-#define PFM_CMD_PCLRW (PFM_CMD_FD|PFM_CMD_ARG_RW)
-#define PFM_CMD_NONE { NULL, "no-cmd", 0, 0, 0, NULL}
-
-static pfm_cmd_desc_t pfm_cmd_tab[]={
-/* 0 */PFM_CMD_NONE,
-/* 1 */PFM_CMD(pfm_write_pmcs, PFM_CMD_PCLRWS, PFM_CMD_ARG_MANY, pfarg_reg_t, NULL),
-/* 2 */PFM_CMD(pfm_write_pmds, PFM_CMD_PCLRWS, PFM_CMD_ARG_MANY, pfarg_reg_t, NULL),
-/* 3 */PFM_CMD(pfm_read_pmds, PFM_CMD_PCLRWS, PFM_CMD_ARG_MANY, pfarg_reg_t, NULL),
-/* 4 */PFM_CMD_S(pfm_stop, PFM_CMD_PCLRWS),
-/* 5 */PFM_CMD_S(pfm_start, PFM_CMD_PCLRWS),
-/* 6 */PFM_CMD_NONE,
-/* 7 */PFM_CMD_NONE,
-/* 8 */PFM_CMD(pfm_context_create, PFM_CMD_ARG_RW, 1, pfarg_context_t, pfm_ctx_getsize),
-/* 9 */PFM_CMD_NONE,
-/* 10 */PFM_CMD_S(pfm_restart, PFM_CMD_PCLRW),
-/* 11 */PFM_CMD_NONE,
-/* 12 */PFM_CMD(pfm_get_features, PFM_CMD_ARG_RW, 1, pfarg_features_t, NULL),
-/* 13 */PFM_CMD(pfm_debug, 0, 1, unsigned int, NULL),
-/* 14 */PFM_CMD_NONE,
-/* 15 */PFM_CMD(pfm_get_pmc_reset, PFM_CMD_ARG_RW, PFM_CMD_ARG_MANY, pfarg_reg_t, NULL),
-/* 16 */PFM_CMD(pfm_context_load, PFM_CMD_PCLRWS, 1, pfarg_load_t, NULL),
-/* 17 */PFM_CMD_S(pfm_context_unload, PFM_CMD_PCLRWS),
-/* 18 */PFM_CMD_NONE,
-/* 19 */PFM_CMD_NONE,
-/* 20 */PFM_CMD_NONE,
-/* 21 */PFM_CMD_NONE,
-/* 22 */PFM_CMD_NONE,
-/* 23 */PFM_CMD_NONE,
-/* 24 */PFM_CMD_NONE,
-/* 25 */PFM_CMD_NONE,
-/* 26 */PFM_CMD_NONE,
-/* 27 */PFM_CMD_NONE,
-/* 28 */PFM_CMD_NONE,
-/* 29 */PFM_CMD_NONE,
-/* 30 */PFM_CMD_NONE,
-/* 31 */PFM_CMD_NONE,
-/* 32 */PFM_CMD(pfm_write_ibrs, PFM_CMD_PCLRWS, PFM_CMD_ARG_MANY, pfarg_dbreg_t, NULL),
-/* 33 */PFM_CMD(pfm_write_dbrs, PFM_CMD_PCLRWS, PFM_CMD_ARG_MANY, pfarg_dbreg_t, NULL)
-};
-#define PFM_CMD_COUNT (sizeof(pfm_cmd_tab)/sizeof(pfm_cmd_desc_t))
-
-static int
-pfm_check_task_state(pfm_context_t *ctx, int cmd, unsigned long flags)
-{
- struct task_struct *task;
- int state, old_state;
-
-recheck:
- state = ctx->ctx_state;
- task = ctx->ctx_task;
-
- if (task == NULL) {
- DPRINT(("context %d no task, state=%d\n", ctx->ctx_fd, state));
- return 0;
- }
-
- DPRINT(("context %d state=%d [%d] task_state=%ld must_stop=%d\n",
- ctx->ctx_fd,
- state,
- task_pid_nr(task),
- task->state, PFM_CMD_STOPPED(cmd)));
-
- /*
- * self-monitoring always ok.
- *
- * for system-wide the caller can either be the creator of the
- * context (to one to which the context is attached to) OR
- * a task running on the same CPU as the session.
- */
- if (task == current || ctx->ctx_fl_system) return 0;
-
- /*
- * we are monitoring another thread
- */
- switch(state) {
- case PFM_CTX_UNLOADED:
- /*
- * if context is UNLOADED we are safe to go
- */
- return 0;
- case PFM_CTX_ZOMBIE:
- /*
- * no command can operate on a zombie context
- */
- DPRINT(("cmd %d state zombie cannot operate on context\n", cmd));
- return -EINVAL;
- case PFM_CTX_MASKED:
- /*
- * PMU state has been saved to software even though
- * the thread may still be running.
- */
- if (cmd != PFM_UNLOAD_CONTEXT) return 0;
- }
-
- /*
- * context is LOADED or MASKED. Some commands may need to have
- * the task stopped.
- *
- * We could lift this restriction for UP but it would mean that
- * the user has no guarantee the task would not run between
- * two successive calls to perfmonctl(). That's probably OK.
- * If this user wants to ensure the task does not run, then
- * the task must be stopped.
- */
- if (PFM_CMD_STOPPED(cmd)) {
- if (!task_is_stopped_or_traced(task)) {
- DPRINT(("[%d] task not in stopped state\n", task_pid_nr(task)));
- return -EBUSY;
- }
- /*
- * task is now stopped, wait for ctxsw out
- *
- * This is an interesting point in the code.
- * We need to unprotect the context because
- * the pfm_save_regs() routines needs to grab
- * the same lock. There are danger in doing
- * this because it leaves a window open for
- * another task to get access to the context
- * and possibly change its state. The one thing
- * that is not possible is for the context to disappear
- * because we are protected by the VFS layer, i.e.,
- * get_fd()/put_fd().
- */
- old_state = state;
-
- UNPROTECT_CTX(ctx, flags);
-
- wait_task_inactive(task, 0);
-
- PROTECT_CTX(ctx, flags);
-
- /*
- * we must recheck to verify if state has changed
- */
- if (ctx->ctx_state != old_state) {
- DPRINT(("old_state=%d new_state=%d\n", old_state, ctx->ctx_state));
- goto recheck;
- }
- }
- return 0;
-}
-
-/*
- * system-call entry point (must return long)
- */
-asmlinkage long
-sys_perfmonctl (int fd, int cmd, void __user *arg, int count)
-{
- struct fd f = {NULL, 0};
- pfm_context_t *ctx = NULL;
- unsigned long flags = 0UL;
- void *args_k = NULL;
- long ret; /* will expand int return types */
- size_t base_sz, sz, xtra_sz = 0;
- int narg, completed_args = 0, call_made = 0, cmd_flags;
- int (*func)(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs);
- int (*getsize)(void *arg, size_t *sz);
-#define PFM_MAX_ARGSIZE 4096
-
- /*
- * reject any call if perfmon was disabled at initialization
- */
- if (unlikely(pmu_conf == NULL)) return -ENOSYS;
-
- if (unlikely(cmd < 0 || cmd >= PFM_CMD_COUNT)) {
- DPRINT(("invalid cmd=%d\n", cmd));
- return -EINVAL;
- }
-
- func = pfm_cmd_tab[cmd].cmd_func;
- narg = pfm_cmd_tab[cmd].cmd_narg;
- base_sz = pfm_cmd_tab[cmd].cmd_argsize;
- getsize = pfm_cmd_tab[cmd].cmd_getsize;
- cmd_flags = pfm_cmd_tab[cmd].cmd_flags;
-
- if (unlikely(func == NULL)) {
- DPRINT(("invalid cmd=%d\n", cmd));
- return -EINVAL;
- }
-
- DPRINT(("cmd=%s idx=%d narg=0x%x argsz=%lu count=%d\n",
- PFM_CMD_NAME(cmd),
- cmd,
- narg,
- base_sz,
- count));
-
- /*
- * check if number of arguments matches what the command expects
- */
- if (unlikely((narg == PFM_CMD_ARG_MANY && count <= 0) || (narg > 0 && narg != count)))
- return -EINVAL;
-
-restart_args:
- sz = xtra_sz + base_sz*count;
- /*
- * limit abuse to min page size
- */
- if (unlikely(sz > PFM_MAX_ARGSIZE)) {
- printk(KERN_ERR "perfmon: [%d] argument too big %lu\n", task_pid_nr(current), sz);
- return -E2BIG;
- }
-
- /*
- * allocate default-sized argument buffer
- */
- if (likely(count && args_k == NULL)) {
- args_k = kmalloc(PFM_MAX_ARGSIZE, GFP_KERNEL);
- if (args_k == NULL) return -ENOMEM;
- }
-
- ret = -EFAULT;
-
- /*
- * copy arguments
- *
- * assume sz = 0 for command without parameters
- */
- if (sz && copy_from_user(args_k, arg, sz)) {
- DPRINT(("cannot copy_from_user %lu bytes @%p\n", sz, arg));
- goto error_args;
- }
-
- /*
- * check if command supports extra parameters
- */
- if (completed_args == 0 && getsize) {
- /*
- * get extra parameters size (based on main argument)
- */
- ret = (*getsize)(args_k, &xtra_sz);
- if (ret) goto error_args;
-
- completed_args = 1;
-
- DPRINT(("restart_args sz=%lu xtra_sz=%lu\n", sz, xtra_sz));
-
- /* retry if necessary */
- if (likely(xtra_sz)) goto restart_args;
- }
-
- if (unlikely((cmd_flags & PFM_CMD_FD) == 0)) goto skip_fd;
-
- ret = -EBADF;
-
- f = fdget(fd);
- if (unlikely(f.file == NULL)) {
- DPRINT(("invalid fd %d\n", fd));
- goto error_args;
- }
- if (unlikely(PFM_IS_FILE(f.file) == 0)) {
- DPRINT(("fd %d not related to perfmon\n", fd));
- goto error_args;
- }
-
- ctx = f.file->private_data;
- if (unlikely(ctx == NULL)) {
- DPRINT(("no context for fd %d\n", fd));
- goto error_args;
- }
- prefetch(&ctx->ctx_state);
-
- PROTECT_CTX(ctx, flags);
-
- /*
- * check task is stopped
- */
- ret = pfm_check_task_state(ctx, cmd, flags);
- if (unlikely(ret)) goto abort_locked;
-
-skip_fd:
- ret = (*func)(ctx, args_k, count, task_pt_regs(current));
-
- call_made = 1;
-
-abort_locked:
- if (likely(ctx)) {
- DPRINT(("context unlocked\n"));
- UNPROTECT_CTX(ctx, flags);
- }
-
- /* copy argument back to user, if needed */
- if (call_made && PFM_CMD_RW_ARG(cmd) && copy_to_user(arg, args_k, base_sz*count)) ret = -EFAULT;
-
-error_args:
- if (f.file)
- fdput(f);
-
- kfree(args_k);
-
- DPRINT(("cmd=%s ret=%ld\n", PFM_CMD_NAME(cmd), ret));
-
- return ret;
-}
-
-static void
-pfm_resume_after_ovfl(pfm_context_t *ctx, unsigned long ovfl_regs, struct pt_regs *regs)
-{
- pfm_buffer_fmt_t *fmt = ctx->ctx_buf_fmt;
- pfm_ovfl_ctrl_t rst_ctrl;
- int state;
- int ret = 0;
-
- state = ctx->ctx_state;
- /*
- * Unlock sampling buffer and reset index atomically
- * XXX: not really needed when blocking
- */
- if (CTX_HAS_SMPL(ctx)) {
-
- rst_ctrl.bits.mask_monitoring = 0;
- rst_ctrl.bits.reset_ovfl_pmds = 0;
-
- if (state == PFM_CTX_LOADED)
- ret = pfm_buf_fmt_restart_active(fmt, current, &rst_ctrl, ctx->ctx_smpl_hdr, regs);
- else
- ret = pfm_buf_fmt_restart(fmt, current, &rst_ctrl, ctx->ctx_smpl_hdr, regs);
- } else {
- rst_ctrl.bits.mask_monitoring = 0;
- rst_ctrl.bits.reset_ovfl_pmds = 1;
- }
-
- if (ret == 0) {
- if (rst_ctrl.bits.reset_ovfl_pmds) {
- pfm_reset_regs(ctx, &ovfl_regs, PFM_PMD_LONG_RESET);
- }
- if (rst_ctrl.bits.mask_monitoring == 0) {
- DPRINT(("resuming monitoring\n"));
- if (ctx->ctx_state == PFM_CTX_MASKED) pfm_restore_monitoring(current);
- } else {
- DPRINT(("stopping monitoring\n"));
- //pfm_stop_monitoring(current, regs);
- }
- ctx->ctx_state = PFM_CTX_LOADED;
- }
-}
-
-/*
- * context MUST BE LOCKED when calling
- * can only be called for current
- */
-static void
-pfm_context_force_terminate(pfm_context_t *ctx, struct pt_regs *regs)
-{
- int ret;
-
- DPRINT(("entering for [%d]\n", task_pid_nr(current)));
-
- ret = pfm_context_unload(ctx, NULL, 0, regs);
- if (ret) {
- printk(KERN_ERR "pfm_context_force_terminate: [%d] unloaded failed with %d\n", task_pid_nr(current), ret);
- }
-
- /*
- * and wakeup controlling task, indicating we are now disconnected
- */
- wake_up_interruptible(&ctx->ctx_zombieq);
-
- /*
- * given that context is still locked, the controlling
- * task will only get access when we return from
- * pfm_handle_work().
- */
-}
-
-static int pfm_ovfl_notify_user(pfm_context_t *ctx, unsigned long ovfl_pmds);
-
- /*
- * pfm_handle_work() can be called with interrupts enabled
- * (TIF_NEED_RESCHED) or disabled. The down_interruptible
- * call may sleep, therefore we must re-enable interrupts
- * to avoid deadlocks. It is safe to do so because this function
- * is called ONLY when returning to user level (pUStk=1), in which case
- * there is no risk of kernel stack overflow due to deep
- * interrupt nesting.
- */
-void
-pfm_handle_work(void)
-{
- pfm_context_t *ctx;
- struct pt_regs *regs;
- unsigned long flags, dummy_flags;
- unsigned long ovfl_regs;
- unsigned int reason;
- int ret;
-
- ctx = PFM_GET_CTX(current);
- if (ctx == NULL) {
- printk(KERN_ERR "perfmon: [%d] has no PFM context\n",
- task_pid_nr(current));
- return;
- }
-
- PROTECT_CTX(ctx, flags);
-
- PFM_SET_WORK_PENDING(current, 0);
-
- regs = task_pt_regs(current);
-
- /*
- * extract reason for being here and clear
- */
- reason = ctx->ctx_fl_trap_reason;
- ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_NONE;
- ovfl_regs = ctx->ctx_ovfl_regs[0];
-
- DPRINT(("reason=%d state=%d\n", reason, ctx->ctx_state));
-
- /*
- * must be done before we check for simple-reset mode
- */
- if (ctx->ctx_fl_going_zombie || ctx->ctx_state == PFM_CTX_ZOMBIE)
- goto do_zombie;
-
- //if (CTX_OVFL_NOBLOCK(ctx)) goto skip_blocking;
- if (reason == PFM_TRAP_REASON_RESET)
- goto skip_blocking;
-
- /*
- * restore interrupt mask to what it was on entry.
- * Could be enabled/diasbled.
- */
- UNPROTECT_CTX(ctx, flags);
-
- /*
- * force interrupt enable because of down_interruptible()
- */
- local_irq_enable();
-
- DPRINT(("before block sleeping\n"));
-
- /*
- * may go through without blocking on SMP systems
- * if restart has been received already by the time we call down()
- */
- ret = wait_for_completion_interruptible(&ctx->ctx_restart_done);
-
- DPRINT(("after block sleeping ret=%d\n", ret));
-
- /*
- * lock context and mask interrupts again
- * We save flags into a dummy because we may have
- * altered interrupts mask compared to entry in this
- * function.
- */
- PROTECT_CTX(ctx, dummy_flags);
-
- /*
- * we need to read the ovfl_regs only after wake-up
- * because we may have had pfm_write_pmds() in between
- * and that can changed PMD values and therefore
- * ovfl_regs is reset for these new PMD values.
- */
- ovfl_regs = ctx->ctx_ovfl_regs[0];
-
- if (ctx->ctx_fl_going_zombie) {
-do_zombie:
- DPRINT(("context is zombie, bailing out\n"));
- pfm_context_force_terminate(ctx, regs);
- goto nothing_to_do;
- }
- /*
- * in case of interruption of down() we don't restart anything
- */
- if (ret < 0)
- goto nothing_to_do;
-
-skip_blocking:
- pfm_resume_after_ovfl(ctx, ovfl_regs, regs);
- ctx->ctx_ovfl_regs[0] = 0UL;
-
-nothing_to_do:
- /*
- * restore flags as they were upon entry
- */
- UNPROTECT_CTX(ctx, flags);
-}
-
-static int
-pfm_notify_user(pfm_context_t *ctx, pfm_msg_t *msg)
-{
- if (ctx->ctx_state == PFM_CTX_ZOMBIE) {
- DPRINT(("ignoring overflow notification, owner is zombie\n"));
- return 0;
- }
-
- DPRINT(("waking up somebody\n"));
-
- if (msg) wake_up_interruptible(&ctx->ctx_msgq_wait);
-
- /*
- * safe, we are not in intr handler, nor in ctxsw when
- * we come here
- */
- kill_fasync (&ctx->ctx_async_queue, SIGIO, POLL_IN);
-
- return 0;
-}
-
-static int
-pfm_ovfl_notify_user(pfm_context_t *ctx, unsigned long ovfl_pmds)
-{
- pfm_msg_t *msg = NULL;
-
- if (ctx->ctx_fl_no_msg == 0) {
- msg = pfm_get_new_msg(ctx);
- if (msg == NULL) {
- printk(KERN_ERR "perfmon: pfm_ovfl_notify_user no more notification msgs\n");
- return -1;
- }
-
- msg->pfm_ovfl_msg.msg_type = PFM_MSG_OVFL;
- msg->pfm_ovfl_msg.msg_ctx_fd = ctx->ctx_fd;
- msg->pfm_ovfl_msg.msg_active_set = 0;
- msg->pfm_ovfl_msg.msg_ovfl_pmds[0] = ovfl_pmds;
- msg->pfm_ovfl_msg.msg_ovfl_pmds[1] = 0UL;
- msg->pfm_ovfl_msg.msg_ovfl_pmds[2] = 0UL;
- msg->pfm_ovfl_msg.msg_ovfl_pmds[3] = 0UL;
- msg->pfm_ovfl_msg.msg_tstamp = 0UL;
- }
-
- DPRINT(("ovfl msg: msg=%p no_msg=%d fd=%d ovfl_pmds=0x%lx\n",
- msg,
- ctx->ctx_fl_no_msg,
- ctx->ctx_fd,
- ovfl_pmds));
-
- return pfm_notify_user(ctx, msg);
-}
-
-static int
-pfm_end_notify_user(pfm_context_t *ctx)
-{
- pfm_msg_t *msg;
-
- msg = pfm_get_new_msg(ctx);
- if (msg == NULL) {
- printk(KERN_ERR "perfmon: pfm_end_notify_user no more notification msgs\n");
- return -1;
- }
- /* no leak */
- memset(msg, 0, sizeof(*msg));
-
- msg->pfm_end_msg.msg_type = PFM_MSG_END;
- msg->pfm_end_msg.msg_ctx_fd = ctx->ctx_fd;
- msg->pfm_ovfl_msg.msg_tstamp = 0UL;
-
- DPRINT(("end msg: msg=%p no_msg=%d ctx_fd=%d\n",
- msg,
- ctx->ctx_fl_no_msg,
- ctx->ctx_fd));
-
- return pfm_notify_user(ctx, msg);
-}
-
-/*
- * main overflow processing routine.
- * it can be called from the interrupt path or explicitly during the context switch code
- */
-static void pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx,
- unsigned long pmc0, struct pt_regs *regs)
-{
- pfm_ovfl_arg_t *ovfl_arg;
- unsigned long mask;
- unsigned long old_val, ovfl_val, new_val;
- unsigned long ovfl_notify = 0UL, ovfl_pmds = 0UL, smpl_pmds = 0UL, reset_pmds;
- unsigned long tstamp;
- pfm_ovfl_ctrl_t ovfl_ctrl;
- unsigned int i, has_smpl;
- int must_notify = 0;
-
- if (unlikely(ctx->ctx_state == PFM_CTX_ZOMBIE)) goto stop_monitoring;
-
- /*
- * sanity test. Should never happen
- */
- if (unlikely((pmc0 & 0x1) == 0)) goto sanity_check;
-
- tstamp = ia64_get_itc();
- mask = pmc0 >> PMU_FIRST_COUNTER;
- ovfl_val = pmu_conf->ovfl_val;
- has_smpl = CTX_HAS_SMPL(ctx);
-
- DPRINT_ovfl(("pmc0=0x%lx pid=%d iip=0x%lx, %s "
- "used_pmds=0x%lx\n",
- pmc0,
- task ? task_pid_nr(task): -1,
- (regs ? regs->cr_iip : 0),
- CTX_OVFL_NOBLOCK(ctx) ? "nonblocking" : "blocking",
- ctx->ctx_used_pmds[0]));
-
-
- /*
- * first we update the virtual counters
- * assume there was a prior ia64_srlz_d() issued
- */
- for (i = PMU_FIRST_COUNTER; mask ; i++, mask >>= 1) {
-
- /* skip pmd which did not overflow */
- if ((mask & 0x1) == 0) continue;
-
- /*
- * Note that the pmd is not necessarily 0 at this point as qualified events
- * may have happened before the PMU was frozen. The residual count is not
- * taken into consideration here but will be with any read of the pmd via
- * pfm_read_pmds().
- */
- old_val = new_val = ctx->ctx_pmds[i].val;
- new_val += 1 + ovfl_val;
- ctx->ctx_pmds[i].val = new_val;
-
- /*
- * check for overflow condition
- */
- if (likely(old_val > new_val)) {
- ovfl_pmds |= 1UL << i;
- if (PMC_OVFL_NOTIFY(ctx, i)) ovfl_notify |= 1UL << i;
- }
-
- DPRINT_ovfl(("ctx_pmd[%d].val=0x%lx old_val=0x%lx pmd=0x%lx ovfl_pmds=0x%lx ovfl_notify=0x%lx\n",
- i,
- new_val,
- old_val,
- ia64_get_pmd(i) & ovfl_val,
- ovfl_pmds,
- ovfl_notify));
- }
-
- /*
- * there was no 64-bit overflow, nothing else to do
- */
- if (ovfl_pmds == 0UL) return;
-
- /*
- * reset all control bits
- */
- ovfl_ctrl.val = 0;
- reset_pmds = 0UL;
-
- /*
- * if a sampling format module exists, then we "cache" the overflow by
- * calling the module's handler() routine.
- */
- if (has_smpl) {
- unsigned long start_cycles, end_cycles;
- unsigned long pmd_mask;
- int j, k, ret = 0;
- int this_cpu = smp_processor_id();
-
- pmd_mask = ovfl_pmds >> PMU_FIRST_COUNTER;
- ovfl_arg = &ctx->ctx_ovfl_arg;
-
- prefetch(ctx->ctx_smpl_hdr);
-
- for(i=PMU_FIRST_COUNTER; pmd_mask && ret == 0; i++, pmd_mask >>=1) {
-
- mask = 1UL << i;
-
- if ((pmd_mask & 0x1) == 0) continue;
-
- ovfl_arg->ovfl_pmd = (unsigned char )i;
- ovfl_arg->ovfl_notify = ovfl_notify & mask ? 1 : 0;
- ovfl_arg->active_set = 0;
- ovfl_arg->ovfl_ctrl.val = 0; /* module must fill in all fields */
- ovfl_arg->smpl_pmds[0] = smpl_pmds = ctx->ctx_pmds[i].smpl_pmds[0];
-
- ovfl_arg->pmd_value = ctx->ctx_pmds[i].val;
- ovfl_arg->pmd_last_reset = ctx->ctx_pmds[i].lval;
- ovfl_arg->pmd_eventid = ctx->ctx_pmds[i].eventid;
-
- /*
- * copy values of pmds of interest. Sampling format may copy them
- * into sampling buffer.
- */
- if (smpl_pmds) {
- for(j=0, k=0; smpl_pmds; j++, smpl_pmds >>=1) {
- if ((smpl_pmds & 0x1) == 0) continue;
- ovfl_arg->smpl_pmds_values[k++] = PMD_IS_COUNTING(j) ? pfm_read_soft_counter(ctx, j) : ia64_get_pmd(j);
- DPRINT_ovfl(("smpl_pmd[%d]=pmd%u=0x%lx\n", k-1, j, ovfl_arg->smpl_pmds_values[k-1]));
- }
- }
-
- pfm_stats[this_cpu].pfm_smpl_handler_calls++;
-
- start_cycles = ia64_get_itc();
-
- /*
- * call custom buffer format record (handler) routine
- */
- ret = (*ctx->ctx_buf_fmt->fmt_handler)(task, ctx->ctx_smpl_hdr, ovfl_arg, regs, tstamp);
-
- end_cycles = ia64_get_itc();
-
- /*
- * For those controls, we take the union because they have
- * an all or nothing behavior.
- */
- ovfl_ctrl.bits.notify_user |= ovfl_arg->ovfl_ctrl.bits.notify_user;
- ovfl_ctrl.bits.block_task |= ovfl_arg->ovfl_ctrl.bits.block_task;
- ovfl_ctrl.bits.mask_monitoring |= ovfl_arg->ovfl_ctrl.bits.mask_monitoring;
- /*
- * build the bitmask of pmds to reset now
- */
- if (ovfl_arg->ovfl_ctrl.bits.reset_ovfl_pmds) reset_pmds |= mask;
-
- pfm_stats[this_cpu].pfm_smpl_handler_cycles += end_cycles - start_cycles;
- }
- /*
- * when the module cannot handle the rest of the overflows, we abort right here
- */
- if (ret && pmd_mask) {
- DPRINT(("handler aborts leftover ovfl_pmds=0x%lx\n",
- pmd_mask<<PMU_FIRST_COUNTER));
- }
- /*
- * remove the pmds we reset now from the set of pmds to reset in pfm_restart()
- */
- ovfl_pmds &= ~reset_pmds;
- } else {
- /*
- * when no sampling module is used, then the default
- * is to notify on overflow if requested by user
- */
- ovfl_ctrl.bits.notify_user = ovfl_notify ? 1 : 0;
- ovfl_ctrl.bits.block_task = ovfl_notify ? 1 : 0;
- ovfl_ctrl.bits.mask_monitoring = ovfl_notify ? 1 : 0; /* XXX: change for saturation */
- ovfl_ctrl.bits.reset_ovfl_pmds = ovfl_notify ? 0 : 1;
- /*
- * if needed, we reset all overflowed pmds
- */
- if (ovfl_notify == 0) reset_pmds = ovfl_pmds;
- }
-
- DPRINT_ovfl(("ovfl_pmds=0x%lx reset_pmds=0x%lx\n", ovfl_pmds, reset_pmds));
-
- /*
- * reset the requested PMD registers using the short reset values
- */
- if (reset_pmds) {
- unsigned long bm = reset_pmds;
- pfm_reset_regs(ctx, &bm, PFM_PMD_SHORT_RESET);
- }
-
- if (ovfl_notify && ovfl_ctrl.bits.notify_user) {
- /*
- * keep track of what to reset when unblocking
- */
- ctx->ctx_ovfl_regs[0] = ovfl_pmds;
-
- /*
- * check for blocking context
- */
- if (CTX_OVFL_NOBLOCK(ctx) == 0 && ovfl_ctrl.bits.block_task) {
-
- ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_BLOCK;
-
- /*
- * set the perfmon specific checking pending work for the task
- */
- PFM_SET_WORK_PENDING(task, 1);
-
- /*
- * when coming from ctxsw, current still points to the
- * previous task, therefore we must work with task and not current.
- */
- set_notify_resume(task);
- }
- /*
- * defer until state is changed (shorten spin window). the context is locked
- * anyway, so the signal receiver would come spin for nothing.
- */
- must_notify = 1;
- }
-
- DPRINT_ovfl(("owner [%d] pending=%ld reason=%u ovfl_pmds=0x%lx ovfl_notify=0x%lx masked=%d\n",
- GET_PMU_OWNER() ? task_pid_nr(GET_PMU_OWNER()) : -1,
- PFM_GET_WORK_PENDING(task),
- ctx->ctx_fl_trap_reason,
- ovfl_pmds,
- ovfl_notify,
- ovfl_ctrl.bits.mask_monitoring ? 1 : 0));
- /*
- * in case monitoring must be stopped, we toggle the psr bits
- */
- if (ovfl_ctrl.bits.mask_monitoring) {
- pfm_mask_monitoring(task);
- ctx->ctx_state = PFM_CTX_MASKED;
- ctx->ctx_fl_can_restart = 1;
- }
-
- /*
- * send notification now
- */
- if (must_notify) pfm_ovfl_notify_user(ctx, ovfl_notify);
-
- return;
-
-sanity_check:
- printk(KERN_ERR "perfmon: CPU%d overflow handler [%d] pmc0=0x%lx\n",
- smp_processor_id(),
- task ? task_pid_nr(task) : -1,
- pmc0);
- return;
-
-stop_monitoring:
- /*
- * in SMP, zombie context is never restored but reclaimed in pfm_load_regs().
- * Moreover, zombies are also reclaimed in pfm_save_regs(). Therefore we can
- * come here as zombie only if the task is the current task. In which case, we
- * can access the PMU hardware directly.
- *
- * Note that zombies do have PM_VALID set. So here we do the minimal.
- *
- * In case the context was zombified it could not be reclaimed at the time
- * the monitoring program exited. At this point, the PMU reservation has been
- * returned, the sampiing buffer has been freed. We must convert this call
- * into a spurious interrupt. However, we must also avoid infinite overflows
- * by stopping monitoring for this task. We can only come here for a per-task
- * context. All we need to do is to stop monitoring using the psr bits which
- * are always task private. By re-enabling secure montioring, we ensure that
- * the monitored task will not be able to re-activate monitoring.
- * The task will eventually be context switched out, at which point the context
- * will be reclaimed (that includes releasing ownership of the PMU).
- *
- * So there might be a window of time where the number of per-task session is zero
- * yet one PMU might have a owner and get at most one overflow interrupt for a zombie
- * context. This is safe because if a per-task session comes in, it will push this one
- * out and by the virtue on pfm_save_regs(), this one will disappear. If a system wide
- * session is force on that CPU, given that we use task pinning, pfm_save_regs() will
- * also push our zombie context out.
- *
- * Overall pretty hairy stuff....
- */
- DPRINT(("ctx is zombie for [%d], converted to spurious\n", task ? task_pid_nr(task): -1));
- pfm_clear_psr_up();
- ia64_psr(regs)->up = 0;
- ia64_psr(regs)->sp = 1;
- return;
-}
-
-static int
-pfm_do_interrupt_handler(void *arg, struct pt_regs *regs)
-{
- struct task_struct *task;
- pfm_context_t *ctx;
- unsigned long flags;
- u64 pmc0;
- int this_cpu = smp_processor_id();
- int retval = 0;
-
- pfm_stats[this_cpu].pfm_ovfl_intr_count++;
-
- /*
- * srlz.d done before arriving here
- */
- pmc0 = ia64_get_pmc(0);
-
- task = GET_PMU_OWNER();
- ctx = GET_PMU_CTX();
-
- /*
- * if we have some pending bits set
- * assumes : if any PMC0.bit[63-1] is set, then PMC0.fr = 1
- */
- if (PMC0_HAS_OVFL(pmc0) && task) {
- /*
- * we assume that pmc0.fr is always set here
- */
-
- /* sanity check */
- if (!ctx) goto report_spurious1;
-
- if (ctx->ctx_fl_system == 0 && (task->thread.flags & IA64_THREAD_PM_VALID) == 0)
- goto report_spurious2;
-
- PROTECT_CTX_NOPRINT(ctx, flags);
-
- pfm_overflow_handler(task, ctx, pmc0, regs);
-
- UNPROTECT_CTX_NOPRINT(ctx, flags);
-
- } else {
- pfm_stats[this_cpu].pfm_spurious_ovfl_intr_count++;
- retval = -1;
- }
- /*
- * keep it unfrozen at all times
- */
- pfm_unfreeze_pmu();
-
- return retval;
-
-report_spurious1:
- printk(KERN_INFO "perfmon: spurious overflow interrupt on CPU%d: process %d has no PFM context\n",
- this_cpu, task_pid_nr(task));
- pfm_unfreeze_pmu();
- return -1;
-report_spurious2:
- printk(KERN_INFO "perfmon: spurious overflow interrupt on CPU%d: process %d, invalid flag\n",
- this_cpu,
- task_pid_nr(task));
- pfm_unfreeze_pmu();
- return -1;
-}
-
-static irqreturn_t
-pfm_interrupt_handler(int irq, void *arg)
-{
- unsigned long start_cycles, total_cycles;
- unsigned long min, max;
- int this_cpu;
- int ret;
- struct pt_regs *regs = get_irq_regs();
-
- this_cpu = get_cpu();
- if (likely(!pfm_alt_intr_handler)) {
- min = pfm_stats[this_cpu].pfm_ovfl_intr_cycles_min;
- max = pfm_stats[this_cpu].pfm_ovfl_intr_cycles_max;
-
- start_cycles = ia64_get_itc();
-
- ret = pfm_do_interrupt_handler(arg, regs);
-
- total_cycles = ia64_get_itc();
-
- /*
- * don't measure spurious interrupts
- */
- if (likely(ret == 0)) {
- total_cycles -= start_cycles;
-
- if (total_cycles < min) pfm_stats[this_cpu].pfm_ovfl_intr_cycles_min = total_cycles;
- if (total_cycles > max) pfm_stats[this_cpu].pfm_ovfl_intr_cycles_max = total_cycles;
-
- pfm_stats[this_cpu].pfm_ovfl_intr_cycles += total_cycles;
- }
- }
- else {
- (*pfm_alt_intr_handler->handler)(irq, arg, regs);
- }
-
- put_cpu();
- return IRQ_HANDLED;
-}
-
-/*
- * /proc/perfmon interface, for debug only
- */
-
-#define PFM_PROC_SHOW_HEADER ((void *)(long)nr_cpu_ids+1)
-
-static void *
-pfm_proc_start(struct seq_file *m, loff_t *pos)
-{
- if (*pos == 0) {
- return PFM_PROC_SHOW_HEADER;
- }
-
- while (*pos <= nr_cpu_ids) {
- if (cpu_online(*pos - 1)) {
- return (void *)*pos;
- }
- ++*pos;
- }
- return NULL;
-}
-
-static void *
-pfm_proc_next(struct seq_file *m, void *v, loff_t *pos)
-{
- ++*pos;
- return pfm_proc_start(m, pos);
-}
-
-static void
-pfm_proc_stop(struct seq_file *m, void *v)
-{
-}
-
-static void
-pfm_proc_show_header(struct seq_file *m)
-{
- struct list_head * pos;
- pfm_buffer_fmt_t * entry;
- unsigned long flags;
-
- seq_printf(m,
- "perfmon version : %u.%u\n"
- "model : %s\n"
- "fastctxsw : %s\n"
- "expert mode : %s\n"
- "ovfl_mask : 0x%lx\n"
- "PMU flags : 0x%x\n",
- PFM_VERSION_MAJ, PFM_VERSION_MIN,
- pmu_conf->pmu_name,
- pfm_sysctl.fastctxsw > 0 ? "Yes": "No",
- pfm_sysctl.expert_mode > 0 ? "Yes": "No",
- pmu_conf->ovfl_val,
- pmu_conf->flags);
-
- LOCK_PFS(flags);
-
- seq_printf(m,
- "proc_sessions : %u\n"
- "sys_sessions : %u\n"
- "sys_use_dbregs : %u\n"
- "ptrace_use_dbregs : %u\n",
- pfm_sessions.pfs_task_sessions,
- pfm_sessions.pfs_sys_sessions,
- pfm_sessions.pfs_sys_use_dbregs,
- pfm_sessions.pfs_ptrace_use_dbregs);
-
- UNLOCK_PFS(flags);
-
- spin_lock(&pfm_buffer_fmt_lock);
-
- list_for_each(pos, &pfm_buffer_fmt_list) {
- entry = list_entry(pos, pfm_buffer_fmt_t, fmt_list);
- seq_printf(m, "format : %16phD %s\n",
- entry->fmt_uuid, entry->fmt_name);
- }
- spin_unlock(&pfm_buffer_fmt_lock);
-
-}
-
-static int
-pfm_proc_show(struct seq_file *m, void *v)
-{
- unsigned long psr;
- unsigned int i;
- int cpu;
-
- if (v == PFM_PROC_SHOW_HEADER) {
- pfm_proc_show_header(m);
- return 0;
- }
-
- /* show info for CPU (v - 1) */
-
- cpu = (long)v - 1;
- seq_printf(m,
- "CPU%-2d overflow intrs : %lu\n"
- "CPU%-2d overflow cycles : %lu\n"
- "CPU%-2d overflow min : %lu\n"
- "CPU%-2d overflow max : %lu\n"
- "CPU%-2d smpl handler calls : %lu\n"
- "CPU%-2d smpl handler cycles : %lu\n"
- "CPU%-2d spurious intrs : %lu\n"
- "CPU%-2d replay intrs : %lu\n"
- "CPU%-2d syst_wide : %d\n"
- "CPU%-2d dcr_pp : %d\n"
- "CPU%-2d exclude idle : %d\n"
- "CPU%-2d owner : %d\n"
- "CPU%-2d context : %p\n"
- "CPU%-2d activations : %lu\n",
- cpu, pfm_stats[cpu].pfm_ovfl_intr_count,
- cpu, pfm_stats[cpu].pfm_ovfl_intr_cycles,
- cpu, pfm_stats[cpu].pfm_ovfl_intr_cycles_min,
- cpu, pfm_stats[cpu].pfm_ovfl_intr_cycles_max,
- cpu, pfm_stats[cpu].pfm_smpl_handler_calls,
- cpu, pfm_stats[cpu].pfm_smpl_handler_cycles,
- cpu, pfm_stats[cpu].pfm_spurious_ovfl_intr_count,
- cpu, pfm_stats[cpu].pfm_replay_ovfl_intr_count,
- cpu, pfm_get_cpu_data(pfm_syst_info, cpu) & PFM_CPUINFO_SYST_WIDE ? 1 : 0,
- cpu, pfm_get_cpu_data(pfm_syst_info, cpu) & PFM_CPUINFO_DCR_PP ? 1 : 0,
- cpu, pfm_get_cpu_data(pfm_syst_info, cpu) & PFM_CPUINFO_EXCL_IDLE ? 1 : 0,
- cpu, pfm_get_cpu_data(pmu_owner, cpu) ? pfm_get_cpu_data(pmu_owner, cpu)->pid: -1,
- cpu, pfm_get_cpu_data(pmu_ctx, cpu),
- cpu, pfm_get_cpu_data(pmu_activation_number, cpu));
-
- if (num_online_cpus() == 1 && pfm_sysctl.debug > 0) {
-
- psr = pfm_get_psr();
-
- ia64_srlz_d();
-
- seq_printf(m,
- "CPU%-2d psr : 0x%lx\n"
- "CPU%-2d pmc0 : 0x%lx\n",
- cpu, psr,
- cpu, ia64_get_pmc(0));
-
- for (i=0; PMC_IS_LAST(i) == 0; i++) {
- if (PMC_IS_COUNTING(i) == 0) continue;
- seq_printf(m,
- "CPU%-2d pmc%u : 0x%lx\n"
- "CPU%-2d pmd%u : 0x%lx\n",
- cpu, i, ia64_get_pmc(i),
- cpu, i, ia64_get_pmd(i));
- }
- }
- return 0;
-}
-
-const struct seq_operations pfm_seq_ops = {
- .start = pfm_proc_start,
- .next = pfm_proc_next,
- .stop = pfm_proc_stop,
- .show = pfm_proc_show
-};
-
-/*
- * we come here as soon as local_cpu_data->pfm_syst_wide is set. this happens
- * during pfm_enable() hence before pfm_start(). We cannot assume monitoring
- * is active or inactive based on mode. We must rely on the value in
- * local_cpu_data->pfm_syst_info
- */
-void
-pfm_syst_wide_update_task(struct task_struct *task, unsigned long info, int is_ctxswin)
-{
- struct pt_regs *regs;
- unsigned long dcr;
- unsigned long dcr_pp;
-
- dcr_pp = info & PFM_CPUINFO_DCR_PP ? 1 : 0;
-
- /*
- * pid 0 is guaranteed to be the idle task. There is one such task with pid 0
- * on every CPU, so we can rely on the pid to identify the idle task.
- */
- if ((info & PFM_CPUINFO_EXCL_IDLE) == 0 || task->pid) {
- regs = task_pt_regs(task);
- ia64_psr(regs)->pp = is_ctxswin ? dcr_pp : 0;
- return;
- }
- /*
- * if monitoring has started
- */
- if (dcr_pp) {
- dcr = ia64_getreg(_IA64_REG_CR_DCR);
- /*
- * context switching in?
- */
- if (is_ctxswin) {
- /* mask monitoring for the idle task */
- ia64_setreg(_IA64_REG_CR_DCR, dcr & ~IA64_DCR_PP);
- pfm_clear_psr_pp();
- ia64_srlz_i();
- return;
- }
- /*
- * context switching out
- * restore monitoring for next task
- *
- * Due to inlining this odd if-then-else construction generates
- * better code.
- */
- ia64_setreg(_IA64_REG_CR_DCR, dcr |IA64_DCR_PP);
- pfm_set_psr_pp();
- ia64_srlz_i();
- }
-}
-
-#ifdef CONFIG_SMP
-
-static void
-pfm_force_cleanup(pfm_context_t *ctx, struct pt_regs *regs)
-{
- struct task_struct *task = ctx->ctx_task;
-
- ia64_psr(regs)->up = 0;
- ia64_psr(regs)->sp = 1;
-
- if (GET_PMU_OWNER() == task) {
- DPRINT(("cleared ownership for [%d]\n",
- task_pid_nr(ctx->ctx_task)));
- SET_PMU_OWNER(NULL, NULL);
- }
-
- /*
- * disconnect the task from the context and vice-versa
- */
- PFM_SET_WORK_PENDING(task, 0);
-
- task->thread.pfm_context = NULL;
- task->thread.flags &= ~IA64_THREAD_PM_VALID;
-
- DPRINT(("force cleanup for [%d]\n", task_pid_nr(task)));
-}
-
-
-/*
- * in 2.6, interrupts are masked when we come here and the runqueue lock is held
- */
-void
-pfm_save_regs(struct task_struct *task)
-{
- pfm_context_t *ctx;
- unsigned long flags;
- u64 psr;
-
-
- ctx = PFM_GET_CTX(task);
- if (ctx == NULL) return;
-
- /*
- * we always come here with interrupts ALREADY disabled by
- * the scheduler. So we simply need to protect against concurrent
- * access, not CPU concurrency.
- */
- flags = pfm_protect_ctx_ctxsw(ctx);
-
- if (ctx->ctx_state == PFM_CTX_ZOMBIE) {
- struct pt_regs *regs = task_pt_regs(task);
-
- pfm_clear_psr_up();
-
- pfm_force_cleanup(ctx, regs);
-
- BUG_ON(ctx->ctx_smpl_hdr);
-
- pfm_unprotect_ctx_ctxsw(ctx, flags);
-
- pfm_context_free(ctx);
- return;
- }
-
- /*
- * save current PSR: needed because we modify it
- */
- ia64_srlz_d();
- psr = pfm_get_psr();
-
- BUG_ON(psr & (IA64_PSR_I));
-
- /*
- * stop monitoring:
- * This is the last instruction which may generate an overflow
- *
- * We do not need to set psr.sp because, it is irrelevant in kernel.
- * It will be restored from ipsr when going back to user level
- */
- pfm_clear_psr_up();
-
- /*
- * keep a copy of psr.up (for reload)
- */
- ctx->ctx_saved_psr_up = psr & IA64_PSR_UP;
-
- /*
- * release ownership of this PMU.
- * PM interrupts are masked, so nothing
- * can happen.
- */
- SET_PMU_OWNER(NULL, NULL);
-
- /*
- * we systematically save the PMD as we have no
- * guarantee we will be schedule at that same
- * CPU again.
- */
- pfm_save_pmds(ctx->th_pmds, ctx->ctx_used_pmds[0]);
-
- /*
- * save pmc0 ia64_srlz_d() done in pfm_save_pmds()
- * we will need it on the restore path to check
- * for pending overflow.
- */
- ctx->th_pmcs[0] = ia64_get_pmc(0);
-
- /*
- * unfreeze PMU if had pending overflows
- */
- if (ctx->th_pmcs[0] & ~0x1UL) pfm_unfreeze_pmu();
-
- /*
- * finally, allow context access.
- * interrupts will still be masked after this call.
- */
- pfm_unprotect_ctx_ctxsw(ctx, flags);
-}
-
-#else /* !CONFIG_SMP */
-void
-pfm_save_regs(struct task_struct *task)
-{
- pfm_context_t *ctx;
- u64 psr;
-
- ctx = PFM_GET_CTX(task);
- if (ctx == NULL) return;
-
- /*
- * save current PSR: needed because we modify it
- */
- psr = pfm_get_psr();
-
- BUG_ON(psr & (IA64_PSR_I));
-
- /*
- * stop monitoring:
- * This is the last instruction which may generate an overflow
- *
- * We do not need to set psr.sp because, it is irrelevant in kernel.
- * It will be restored from ipsr when going back to user level
- */
- pfm_clear_psr_up();
-
- /*
- * keep a copy of psr.up (for reload)
- */
- ctx->ctx_saved_psr_up = psr & IA64_PSR_UP;
-}
-
-static void
-pfm_lazy_save_regs (struct task_struct *task)
-{
- pfm_context_t *ctx;
- unsigned long flags;
-
- { u64 psr = pfm_get_psr();
- BUG_ON(psr & IA64_PSR_UP);
- }
-
- ctx = PFM_GET_CTX(task);
-
- /*
- * we need to mask PMU overflow here to
- * make sure that we maintain pmc0 until
- * we save it. overflow interrupts are
- * treated as spurious if there is no
- * owner.
- *
- * XXX: I don't think this is necessary
- */
- PROTECT_CTX(ctx,flags);
-
- /*
- * release ownership of this PMU.
- * must be done before we save the registers.
- *
- * after this call any PMU interrupt is treated
- * as spurious.
- */
- SET_PMU_OWNER(NULL, NULL);
-
- /*
- * save all the pmds we use
- */
- pfm_save_pmds(ctx->th_pmds, ctx->ctx_used_pmds[0]);
-
- /*
- * save pmc0 ia64_srlz_d() done in pfm_save_pmds()
- * it is needed to check for pended overflow
- * on the restore path
- */
- ctx->th_pmcs[0] = ia64_get_pmc(0);
-
- /*
- * unfreeze PMU if had pending overflows
- */
- if (ctx->th_pmcs[0] & ~0x1UL) pfm_unfreeze_pmu();
-
- /*
- * now get can unmask PMU interrupts, they will
- * be treated as purely spurious and we will not
- * lose any information
- */
- UNPROTECT_CTX(ctx,flags);
-}
-#endif /* CONFIG_SMP */
-
-#ifdef CONFIG_SMP
-/*
- * in 2.6, interrupts are masked when we come here and the runqueue lock is held
- */
-void
-pfm_load_regs (struct task_struct *task)
-{
- pfm_context_t *ctx;
- unsigned long pmc_mask = 0UL, pmd_mask = 0UL;
- unsigned long flags;
- u64 psr, psr_up;
- int need_irq_resend;
-
- ctx = PFM_GET_CTX(task);
- if (unlikely(ctx == NULL)) return;
-
- BUG_ON(GET_PMU_OWNER());
-
- /*
- * possible on unload
- */
- if (unlikely((task->thread.flags & IA64_THREAD_PM_VALID) == 0)) return;
-
- /*
- * we always come here with interrupts ALREADY disabled by
- * the scheduler. So we simply need to protect against concurrent
- * access, not CPU concurrency.
- */
- flags = pfm_protect_ctx_ctxsw(ctx);
- psr = pfm_get_psr();
-
- need_irq_resend = pmu_conf->flags & PFM_PMU_IRQ_RESEND;
-
- BUG_ON(psr & (IA64_PSR_UP|IA64_PSR_PP));
- BUG_ON(psr & IA64_PSR_I);
-
- if (unlikely(ctx->ctx_state == PFM_CTX_ZOMBIE)) {
- struct pt_regs *regs = task_pt_regs(task);
-
- BUG_ON(ctx->ctx_smpl_hdr);
-
- pfm_force_cleanup(ctx, regs);
-
- pfm_unprotect_ctx_ctxsw(ctx, flags);
-
- /*
- * this one (kmalloc'ed) is fine with interrupts disabled
- */
- pfm_context_free(ctx);
-
- return;
- }
-
- /*
- * we restore ALL the debug registers to avoid picking up
- * stale state.
- */
- if (ctx->ctx_fl_using_dbreg) {
- pfm_restore_ibrs(ctx->ctx_ibrs, pmu_conf->num_ibrs);
- pfm_restore_dbrs(ctx->ctx_dbrs, pmu_conf->num_dbrs);
- }
- /*
- * retrieve saved psr.up
- */
- psr_up = ctx->ctx_saved_psr_up;
-
- /*
- * if we were the last user of the PMU on that CPU,
- * then nothing to do except restore psr
- */
- if (GET_LAST_CPU(ctx) == smp_processor_id() && ctx->ctx_last_activation == GET_ACTIVATION()) {
-
- /*
- * retrieve partial reload masks (due to user modifications)
- */
- pmc_mask = ctx->ctx_reload_pmcs[0];
- pmd_mask = ctx->ctx_reload_pmds[0];
-
- } else {
- /*
- * To avoid leaking information to the user level when psr.sp=0,
- * we must reload ALL implemented pmds (even the ones we don't use).
- * In the kernel we only allow PFM_READ_PMDS on registers which
- * we initialized or requested (sampling) so there is no risk there.
- */
- pmd_mask = pfm_sysctl.fastctxsw ? ctx->ctx_used_pmds[0] : ctx->ctx_all_pmds[0];
-
- /*
- * ALL accessible PMCs are systematically reloaded, unused registers
- * get their default (from pfm_reset_pmu_state()) values to avoid picking
- * up stale configuration.
- *
- * PMC0 is never in the mask. It is always restored separately.
- */
- pmc_mask = ctx->ctx_all_pmcs[0];
- }
- /*
- * when context is MASKED, we will restore PMC with plm=0
- * and PMD with stale information, but that's ok, nothing
- * will be captured.
- *
- * XXX: optimize here
- */
- if (pmd_mask) pfm_restore_pmds(ctx->th_pmds, pmd_mask);
- if (pmc_mask) pfm_restore_pmcs(ctx->th_pmcs, pmc_mask);
-
- /*
- * check for pending overflow at the time the state
- * was saved.
- */
- if (unlikely(PMC0_HAS_OVFL(ctx->th_pmcs[0]))) {
- /*
- * reload pmc0 with the overflow information
- * On McKinley PMU, this will trigger a PMU interrupt
- */
- ia64_set_pmc(0, ctx->th_pmcs[0]);
- ia64_srlz_d();
- ctx->th_pmcs[0] = 0UL;
-
- /*
- * will replay the PMU interrupt
- */
- if (need_irq_resend) ia64_resend_irq(IA64_PERFMON_VECTOR);
-
- pfm_stats[smp_processor_id()].pfm_replay_ovfl_intr_count++;
- }
-
- /*
- * we just did a reload, so we reset the partial reload fields
- */
- ctx->ctx_reload_pmcs[0] = 0UL;
- ctx->ctx_reload_pmds[0] = 0UL;
-
- SET_LAST_CPU(ctx, smp_processor_id());
-
- /*
- * dump activation value for this PMU
- */
- INC_ACTIVATION();
- /*
- * record current activation for this context
- */
- SET_ACTIVATION(ctx);
-
- /*
- * establish new ownership.
- */
- SET_PMU_OWNER(task, ctx);
-
- /*
- * restore the psr.up bit. measurement
- * is active again.
- * no PMU interrupt can happen at this point
- * because we still have interrupts disabled.
- */
- if (likely(psr_up)) pfm_set_psr_up();
-
- /*
- * allow concurrent access to context
- */
- pfm_unprotect_ctx_ctxsw(ctx, flags);
-}
-#else /* !CONFIG_SMP */
-/*
- * reload PMU state for UP kernels
- * in 2.5 we come here with interrupts disabled
- */
-void
-pfm_load_regs (struct task_struct *task)
-{
- pfm_context_t *ctx;
- struct task_struct *owner;
- unsigned long pmd_mask, pmc_mask;
- u64 psr, psr_up;
- int need_irq_resend;
-
- owner = GET_PMU_OWNER();
- ctx = PFM_GET_CTX(task);
- psr = pfm_get_psr();
-
- BUG_ON(psr & (IA64_PSR_UP|IA64_PSR_PP));
- BUG_ON(psr & IA64_PSR_I);
-
- /*
- * we restore ALL the debug registers to avoid picking up
- * stale state.
- *
- * This must be done even when the task is still the owner
- * as the registers may have been modified via ptrace()
- * (not perfmon) by the previous task.
- */
- if (ctx->ctx_fl_using_dbreg) {
- pfm_restore_ibrs(ctx->ctx_ibrs, pmu_conf->num_ibrs);
- pfm_restore_dbrs(ctx->ctx_dbrs, pmu_conf->num_dbrs);
- }
-
- /*
- * retrieved saved psr.up
- */
- psr_up = ctx->ctx_saved_psr_up;
- need_irq_resend = pmu_conf->flags & PFM_PMU_IRQ_RESEND;
-
- /*
- * short path, our state is still there, just
- * need to restore psr and we go
- *
- * we do not touch either PMC nor PMD. the psr is not touched
- * by the overflow_handler. So we are safe w.r.t. to interrupt
- * concurrency even without interrupt masking.
- */
- if (likely(owner == task)) {
- if (likely(psr_up)) pfm_set_psr_up();
- return;
- }
-
- /*
- * someone else is still using the PMU, first push it out and
- * then we'll be able to install our stuff !
- *
- * Upon return, there will be no owner for the current PMU
- */
- if (owner) pfm_lazy_save_regs(owner);
-
- /*
- * To avoid leaking information to the user level when psr.sp=0,
- * we must reload ALL implemented pmds (even the ones we don't use).
- * In the kernel we only allow PFM_READ_PMDS on registers which
- * we initialized or requested (sampling) so there is no risk there.
- */
- pmd_mask = pfm_sysctl.fastctxsw ? ctx->ctx_used_pmds[0] : ctx->ctx_all_pmds[0];
-
- /*
- * ALL accessible PMCs are systematically reloaded, unused registers
- * get their default (from pfm_reset_pmu_state()) values to avoid picking
- * up stale configuration.
- *
- * PMC0 is never in the mask. It is always restored separately
- */
- pmc_mask = ctx->ctx_all_pmcs[0];
-
- pfm_restore_pmds(ctx->th_pmds, pmd_mask);
- pfm_restore_pmcs(ctx->th_pmcs, pmc_mask);
-
- /*
- * check for pending overflow at the time the state
- * was saved.
- */
- if (unlikely(PMC0_HAS_OVFL(ctx->th_pmcs[0]))) {
- /*
- * reload pmc0 with the overflow information
- * On McKinley PMU, this will trigger a PMU interrupt
- */
- ia64_set_pmc(0, ctx->th_pmcs[0]);
- ia64_srlz_d();
-
- ctx->th_pmcs[0] = 0UL;
-
- /*
- * will replay the PMU interrupt
- */
- if (need_irq_resend) ia64_resend_irq(IA64_PERFMON_VECTOR);
-
- pfm_stats[smp_processor_id()].pfm_replay_ovfl_intr_count++;
- }
-
- /*
- * establish new ownership.
- */
- SET_PMU_OWNER(task, ctx);
-
- /*
- * restore the psr.up bit. measurement
- * is active again.
- * no PMU interrupt can happen at this point
- * because we still have interrupts disabled.
- */
- if (likely(psr_up)) pfm_set_psr_up();
-}
-#endif /* CONFIG_SMP */
-
-/*
- * this function assumes monitoring is stopped
- */
-static void
-pfm_flush_pmds(struct task_struct *task, pfm_context_t *ctx)
-{
- u64 pmc0;
- unsigned long mask2, val, pmd_val, ovfl_val;
- int i, can_access_pmu = 0;
- int is_self;
-
- /*
- * is the caller the task being monitored (or which initiated the
- * session for system wide measurements)
- */
- is_self = ctx->ctx_task == task ? 1 : 0;
-
- /*
- * can access PMU is task is the owner of the PMU state on the current CPU
- * or if we are running on the CPU bound to the context in system-wide mode
- * (that is not necessarily the task the context is attached to in this mode).
- * In system-wide we always have can_access_pmu true because a task running on an
- * invalid processor is flagged earlier in the call stack (see pfm_stop).
- */
- can_access_pmu = (GET_PMU_OWNER() == task) || (ctx->ctx_fl_system && ctx->ctx_cpu == smp_processor_id());
- if (can_access_pmu) {
- /*
- * Mark the PMU as not owned
- * This will cause the interrupt handler to do nothing in case an overflow
- * interrupt was in-flight
- * This also guarantees that pmc0 will contain the final state
- * It virtually gives us full control on overflow processing from that point
- * on.
- */
- SET_PMU_OWNER(NULL, NULL);
- DPRINT(("releasing ownership\n"));
-
- /*
- * read current overflow status:
- *
- * we are guaranteed to read the final stable state
- */
- ia64_srlz_d();
- pmc0 = ia64_get_pmc(0); /* slow */
-
- /*
- * reset freeze bit, overflow status information destroyed
- */
- pfm_unfreeze_pmu();
- } else {
- pmc0 = ctx->th_pmcs[0];
- /*
- * clear whatever overflow status bits there were
- */
- ctx->th_pmcs[0] = 0;
- }
- ovfl_val = pmu_conf->ovfl_val;
- /*
- * we save all the used pmds
- * we take care of overflows for counting PMDs
- *
- * XXX: sampling situation is not taken into account here
- */
- mask2 = ctx->ctx_used_pmds[0];
-
- DPRINT(("is_self=%d ovfl_val=0x%lx mask2=0x%lx\n", is_self, ovfl_val, mask2));
-
- for (i = 0; mask2; i++, mask2>>=1) {
-
- /* skip non used pmds */
- if ((mask2 & 0x1) == 0) continue;
-
- /*
- * can access PMU always true in system wide mode
- */
- val = pmd_val = can_access_pmu ? ia64_get_pmd(i) : ctx->th_pmds[i];
-
- if (PMD_IS_COUNTING(i)) {
- DPRINT(("[%d] pmd[%d] ctx_pmd=0x%lx hw_pmd=0x%lx\n",
- task_pid_nr(task),
- i,
- ctx->ctx_pmds[i].val,
- val & ovfl_val));
-
- /*
- * we rebuild the full 64 bit value of the counter
- */
- val = ctx->ctx_pmds[i].val + (val & ovfl_val);
-
- /*
- * now everything is in ctx_pmds[] and we need
- * to clear the saved context from save_regs() such that
- * pfm_read_pmds() gets the correct value
- */
- pmd_val = 0UL;
-
- /*
- * take care of overflow inline
- */
- if (pmc0 & (1UL << i)) {
- val += 1 + ovfl_val;
- DPRINT(("[%d] pmd[%d] overflowed\n", task_pid_nr(task), i));
- }
- }
-
- DPRINT(("[%d] ctx_pmd[%d]=0x%lx pmd_val=0x%lx\n", task_pid_nr(task), i, val, pmd_val));
-
- if (is_self) ctx->th_pmds[i] = pmd_val;
-
- ctx->ctx_pmds[i].val = val;
- }
-}
-
-static void
-pfm_alt_save_pmu_state(void *data)
-{
- struct pt_regs *regs;
-
- regs = task_pt_regs(current);
-
- DPRINT(("called\n"));
-
- /*
- * should not be necessary but
- * let's take not risk
- */
- pfm_clear_psr_up();
- pfm_clear_psr_pp();
- ia64_psr(regs)->pp = 0;
-
- /*
- * This call is required
- * May cause a spurious interrupt on some processors
- */
- pfm_freeze_pmu();
-
- ia64_srlz_d();
-}
-
-void
-pfm_alt_restore_pmu_state(void *data)
-{
- struct pt_regs *regs;
-
- regs = task_pt_regs(current);
-
- DPRINT(("called\n"));
-
- /*
- * put PMU back in state expected
- * by perfmon
- */
- pfm_clear_psr_up();
- pfm_clear_psr_pp();
- ia64_psr(regs)->pp = 0;
-
- /*
- * perfmon runs with PMU unfrozen at all times
- */
- pfm_unfreeze_pmu();
-
- ia64_srlz_d();
-}
-
-int
-pfm_install_alt_pmu_interrupt(pfm_intr_handler_desc_t *hdl)
-{
- int ret, i;
- int reserve_cpu;
-
- /* some sanity checks */
- if (hdl == NULL || hdl->handler == NULL) return -EINVAL;
-
- /* do the easy test first */
- if (pfm_alt_intr_handler) return -EBUSY;
-
- /* one at a time in the install or remove, just fail the others */
- if (!spin_trylock(&pfm_alt_install_check)) {
- return -EBUSY;
- }
-
- /* reserve our session */
- for_each_online_cpu(reserve_cpu) {
- ret = pfm_reserve_session(NULL, 1, reserve_cpu);
- if (ret) goto cleanup_reserve;
- }
-
- /* save the current system wide pmu states */
- on_each_cpu(pfm_alt_save_pmu_state, NULL, 1);
-
- /* officially change to the alternate interrupt handler */
- pfm_alt_intr_handler = hdl;
-
- spin_unlock(&pfm_alt_install_check);
-
- return 0;
-
-cleanup_reserve:
- for_each_online_cpu(i) {
- /* don't unreserve more than we reserved */
- if (i >= reserve_cpu) break;
-
- pfm_unreserve_session(NULL, 1, i);
- }
-
- spin_unlock(&pfm_alt_install_check);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(pfm_install_alt_pmu_interrupt);
-
-int
-pfm_remove_alt_pmu_interrupt(pfm_intr_handler_desc_t *hdl)
-{
- int i;
-
- if (hdl == NULL) return -EINVAL;
-
- /* cannot remove someone else's handler! */
- if (pfm_alt_intr_handler != hdl) return -EINVAL;
-
- /* one at a time in the install or remove, just fail the others */
- if (!spin_trylock(&pfm_alt_install_check)) {
- return -EBUSY;
- }
-
- pfm_alt_intr_handler = NULL;
-
- on_each_cpu(pfm_alt_restore_pmu_state, NULL, 1);
-
- for_each_online_cpu(i) {
- pfm_unreserve_session(NULL, 1, i);
- }
-
- spin_unlock(&pfm_alt_install_check);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(pfm_remove_alt_pmu_interrupt);
-
-/*
- * perfmon initialization routine, called from the initcall() table
- */
-static int init_pfm_fs(void);
-
-static int __init
-pfm_probe_pmu(void)
-{
- pmu_config_t **p;
- int family;
-
- family = local_cpu_data->family;
- p = pmu_confs;
-
- while(*p) {
- if ((*p)->probe) {
- if ((*p)->probe() == 0) goto found;
- } else if ((*p)->pmu_family == family || (*p)->pmu_family == 0xff) {
- goto found;
- }
- p++;
- }
- return -1;
-found:
- pmu_conf = *p;
- return 0;
-}
-
-int __init
-pfm_init(void)
-{
- unsigned int n, n_counters, i;
-
- printk("perfmon: version %u.%u IRQ %u\n",
- PFM_VERSION_MAJ,
- PFM_VERSION_MIN,
- IA64_PERFMON_VECTOR);
-
- if (pfm_probe_pmu()) {
- printk(KERN_INFO "perfmon: disabled, there is no support for processor family %d\n",
- local_cpu_data->family);
- return -ENODEV;
- }
-
- /*
- * compute the number of implemented PMD/PMC from the
- * description tables
- */
- n = 0;
- for (i=0; PMC_IS_LAST(i) == 0; i++) {
- if (PMC_IS_IMPL(i) == 0) continue;
- pmu_conf->impl_pmcs[i>>6] |= 1UL << (i&63);
- n++;
- }
- pmu_conf->num_pmcs = n;
-
- n = 0; n_counters = 0;
- for (i=0; PMD_IS_LAST(i) == 0; i++) {
- if (PMD_IS_IMPL(i) == 0) continue;
- pmu_conf->impl_pmds[i>>6] |= 1UL << (i&63);
- n++;
- if (PMD_IS_COUNTING(i)) n_counters++;
- }
- pmu_conf->num_pmds = n;
- pmu_conf->num_counters = n_counters;
-
- /*
- * sanity checks on the number of debug registers
- */
- if (pmu_conf->use_rr_dbregs) {
- if (pmu_conf->num_ibrs > IA64_NUM_DBG_REGS) {
- printk(KERN_INFO "perfmon: unsupported number of code debug registers (%u)\n", pmu_conf->num_ibrs);
- pmu_conf = NULL;
- return -1;
- }
- if (pmu_conf->num_dbrs > IA64_NUM_DBG_REGS) {
- printk(KERN_INFO "perfmon: unsupported number of data debug registers (%u)\n", pmu_conf->num_ibrs);
- pmu_conf = NULL;
- return -1;
- }
- }
-
- printk("perfmon: %s PMU detected, %u PMCs, %u PMDs, %u counters (%lu bits)\n",
- pmu_conf->pmu_name,
- pmu_conf->num_pmcs,
- pmu_conf->num_pmds,
- pmu_conf->num_counters,
- ffz(pmu_conf->ovfl_val));
-
- /* sanity check */
- if (pmu_conf->num_pmds >= PFM_NUM_PMD_REGS || pmu_conf->num_pmcs >= PFM_NUM_PMC_REGS) {
- printk(KERN_ERR "perfmon: not enough pmc/pmd, perfmon disabled\n");
- pmu_conf = NULL;
- return -1;
- }
-
- /*
- * create /proc/perfmon (mostly for debugging purposes)
- */
- perfmon_dir = proc_create_seq("perfmon", S_IRUGO, NULL, &pfm_seq_ops);
- if (perfmon_dir == NULL) {
- printk(KERN_ERR "perfmon: cannot create /proc entry, perfmon disabled\n");
- pmu_conf = NULL;
- return -1;
- }
-
- /*
- * create /proc/sys/kernel/perfmon (for debugging purposes)
- */
- pfm_sysctl_header = register_sysctl_table(pfm_sysctl_root);
-
- /*
- * initialize all our spinlocks
- */
- spin_lock_init(&pfm_sessions.pfs_lock);
- spin_lock_init(&pfm_buffer_fmt_lock);
-
- init_pfm_fs();
-
- for(i=0; i < NR_CPUS; i++) pfm_stats[i].pfm_ovfl_intr_cycles_min = ~0UL;
-
- return 0;
-}
-
-__initcall(pfm_init);
-
-/*
- * this function is called before pfm_init()
- */
-void
-pfm_init_percpu (void)
-{
- static int first_time=1;
- /*
- * make sure no measurement is active
- * (may inherit programmed PMCs from EFI).
- */
- pfm_clear_psr_pp();
- pfm_clear_psr_up();
-
- /*
- * we run with the PMU not frozen at all times
- */
- pfm_unfreeze_pmu();
-
- if (first_time) {
- register_percpu_irq(IA64_PERFMON_VECTOR, pfm_interrupt_handler,
- 0, "perfmon");
- first_time=0;
- }
-
- ia64_setreg(_IA64_REG_CR_PMV, IA64_PERFMON_VECTOR);
- ia64_srlz_d();
-}
-
-/*
- * used for debug purposes only
- */
-void
-dump_pmu_state(const char *from)
-{
- struct task_struct *task;
- struct pt_regs *regs;
- pfm_context_t *ctx;
- unsigned long psr, dcr, info, flags;
- int i, this_cpu;
-
- local_irq_save(flags);
-
- this_cpu = smp_processor_id();
- regs = task_pt_regs(current);
- info = PFM_CPUINFO_GET();
- dcr = ia64_getreg(_IA64_REG_CR_DCR);
-
- if (info == 0 && ia64_psr(regs)->pp == 0 && (dcr & IA64_DCR_PP) == 0) {
- local_irq_restore(flags);
- return;
- }
-
- printk("CPU%d from %s() current [%d] iip=0x%lx %s\n",
- this_cpu,
- from,
- task_pid_nr(current),
- regs->cr_iip,
- current->comm);
-
- task = GET_PMU_OWNER();
- ctx = GET_PMU_CTX();
-
- printk("->CPU%d owner [%d] ctx=%p\n", this_cpu, task ? task_pid_nr(task) : -1, ctx);
-
- psr = pfm_get_psr();
-
- printk("->CPU%d pmc0=0x%lx psr.pp=%d psr.up=%d dcr.pp=%d syst_info=0x%lx user_psr.up=%d user_psr.pp=%d\n",
- this_cpu,
- ia64_get_pmc(0),
- psr & IA64_PSR_PP ? 1 : 0,
- psr & IA64_PSR_UP ? 1 : 0,
- dcr & IA64_DCR_PP ? 1 : 0,
- info,
- ia64_psr(regs)->up,
- ia64_psr(regs)->pp);
-
- ia64_psr(regs)->up = 0;
- ia64_psr(regs)->pp = 0;
-
- for (i=1; PMC_IS_LAST(i) == 0; i++) {
- if (PMC_IS_IMPL(i) == 0) continue;
- printk("->CPU%d pmc[%d]=0x%lx thread_pmc[%d]=0x%lx\n", this_cpu, i, ia64_get_pmc(i), i, ctx->th_pmcs[i]);
- }
-
- for (i=1; PMD_IS_LAST(i) == 0; i++) {
- if (PMD_IS_IMPL(i) == 0) continue;
- printk("->CPU%d pmd[%d]=0x%lx thread_pmd[%d]=0x%lx\n", this_cpu, i, ia64_get_pmd(i), i, ctx->th_pmds[i]);
- }
-
- if (ctx) {
- printk("->CPU%d ctx_state=%d vaddr=%p addr=%p fd=%d ctx_task=[%d] saved_psr_up=0x%lx\n",
- this_cpu,
- ctx->ctx_state,
- ctx->ctx_smpl_vaddr,
- ctx->ctx_smpl_hdr,
- ctx->ctx_msgq_head,
- ctx->ctx_msgq_tail,
- ctx->ctx_saved_psr_up);
- }
- local_irq_restore(flags);
-}
-
-/*
- * called from process.c:copy_thread(). task is new child.
- */
-void
-pfm_inherit(struct task_struct *task, struct pt_regs *regs)
-{
- struct thread_struct *thread;
-
- DPRINT(("perfmon: pfm_inherit clearing state for [%d]\n", task_pid_nr(task)));
-
- thread = &task->thread;
-
- /*
- * cut links inherited from parent (current)
- */
- thread->pfm_context = NULL;
-
- PFM_SET_WORK_PENDING(task, 0);
-
- /*
- * the psr bits are already set properly in copy_threads()
- */
-}
-#else /* !CONFIG_PERFMON */
-asmlinkage long
-sys_perfmonctl (int fd, int cmd, void *arg, int count)
-{
- return -ENOSYS;
-}
-#endif /* CONFIG_PERFMON */
diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c
index f19cb97c0098..f25f2f723196 100644
--- a/arch/ia64/kernel/process.c
+++ b/arch/ia64/kernel/process.c
@@ -51,10 +51,6 @@
#include "entry.h"
-#ifdef CONFIG_PERFMON
-# include <asm/perfmon.h>
-#endif
-
#include "sigframe.h"
void (*ia64_mark_idle)(int);
@@ -174,15 +170,6 @@ do_notify_resume_user(sigset_t *unused, struct sigscratch *scr, long in_syscall)
return;
}
-#ifdef CONFIG_PERFMON
- if (current->thread.pfm_needs_checking)
- /*
- * Note: pfm_handle_work() allow us to call it with interrupts
- * disabled, and may enable interrupts within the function.
- */
- pfm_handle_work();
-#endif
-
/* deal with pending signal delivery */
if (test_thread_flag(TIF_SIGPENDING)) {
local_irq_enable(); /* force interrupt enable */
@@ -264,41 +251,15 @@ void arch_cpu_idle(void)
void
ia64_save_extra (struct task_struct *task)
{
-#ifdef CONFIG_PERFMON
- unsigned long info;
-#endif
-
if ((task->thread.flags & IA64_THREAD_DBG_VALID) != 0)
ia64_save_debug_regs(&task->thread.dbr[0]);
-
-#ifdef CONFIG_PERFMON
- if ((task->thread.flags & IA64_THREAD_PM_VALID) != 0)
- pfm_save_regs(task);
-
- info = __this_cpu_read(pfm_syst_info);
- if (info & PFM_CPUINFO_SYST_WIDE)
- pfm_syst_wide_update_task(task, info, 0);
-#endif
}
void
ia64_load_extra (struct task_struct *task)
{
-#ifdef CONFIG_PERFMON
- unsigned long info;
-#endif
-
if ((task->thread.flags & IA64_THREAD_DBG_VALID) != 0)
ia64_load_debug_regs(&task->thread.dbr[0]);
-
-#ifdef CONFIG_PERFMON
- if ((task->thread.flags & IA64_THREAD_PM_VALID) != 0)
- pfm_load_regs(task);
-
- info = __this_cpu_read(pfm_syst_info);
- if (info & PFM_CPUINFO_SYST_WIDE)
- pfm_syst_wide_update_task(task, info, 1);
-#endif
}
/*
@@ -310,7 +271,7 @@ ia64_load_extra (struct task_struct *task)
*
* <clone syscall> <some kernel call frames>
* sys_clone :
- * _do_fork _do_fork
+ * kernel_clone kernel_clone
* copy_thread copy_thread
*
* This means that the stack layout is as follows:
@@ -432,11 +393,6 @@ copy_thread(unsigned long clone_flags, unsigned long user_stack_base,
*/
child_ptregs->cr_ipsr = ((child_ptregs->cr_ipsr | IA64_PSR_BITS_TO_SET)
& ~(IA64_PSR_BITS_TO_CLEAR | IA64_PSR_PP | IA64_PSR_UP));
-
-#ifdef CONFIG_PERFMON
- if (current->thread.pfm_context)
- pfm_inherit(p, child_ptregs);
-#endif
return retval;
}
@@ -455,7 +411,7 @@ asmlinkage long ia64_clone(unsigned long clone_flags, unsigned long stack_start,
.tls = tls,
};
- return _do_fork(&args);
+ return kernel_clone(&args);
}
static void
@@ -563,15 +519,6 @@ exit_thread (struct task_struct *tsk)
{
ia64_drop_fpu(tsk);
-#ifdef CONFIG_PERFMON
- /* if needed, stop monitoring and flush state to perfmon context */
- if (tsk->thread.pfm_context)
- pfm_exit_thread(tsk);
-
- /* free debug register resources */
- if (tsk->thread.flags & IA64_THREAD_DBG_VALID)
- pfm_release_debug_registers(tsk);
-#endif
}
unsigned long
diff --git a/arch/ia64/kernel/ptrace.c b/arch/ia64/kernel/ptrace.c
index 33ca9fa0fbf5..75c070aed81e 100644
--- a/arch/ia64/kernel/ptrace.c
+++ b/arch/ia64/kernel/ptrace.c
@@ -30,9 +30,6 @@
#include <asm/rse.h>
#include <linux/uaccess.h>
#include <asm/unwind.h>
-#ifdef CONFIG_PERFMON
-#include <asm/perfmon.h>
-#endif
#include "entry.h"
@@ -1951,27 +1948,6 @@ access_uarea(struct task_struct *child, unsigned long addr,
"address 0x%lx\n", addr);
return -1;
}
-#ifdef CONFIG_PERFMON
- /*
- * Check if debug registers are used by perfmon. This
- * test must be done once we know that we can do the
- * operation, i.e. the arguments are all valid, but
- * before we start modifying the state.
- *
- * Perfmon needs to keep a count of how many processes
- * are trying to modify the debug registers for system
- * wide monitoring sessions.
- *
- * We also include read access here, because they may
- * cause the PMU-installed debug register state
- * (dbr[], ibr[]) to be reset. The two arrays are also
- * used by perfmon, but we do not use
- * IA64_THREAD_DBG_VALID. The registers are restored
- * by the PMU context switch code.
- */
- if (pfm_use_debug_registers(child))
- return -1;
-#endif
if (!(child->thread.flags & IA64_THREAD_DBG_VALID)) {
child->thread.flags |= IA64_THREAD_DBG_VALID;
diff --git a/arch/ia64/kernel/signal.c b/arch/ia64/kernel/signal.c
index d07ed65c9c6e..e67b22fc3c60 100644
--- a/arch/ia64/kernel/signal.c
+++ b/arch/ia64/kernel/signal.c
@@ -374,7 +374,7 @@ ia64_do_signal (struct sigscratch *scr, long in_syscall)
/* note: scr->pt.r10 is already -1 */
break;
}
- /*FALLTHRU*/
+ fallthrough;
case ERESTARTNOINTR:
ia64_decrement_ip(&scr->pt);
restart = 0; /* don't restart twice if handle_signal() fails... */
diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c
index c29c600d7967..093040f7e626 100644
--- a/arch/ia64/kernel/smpboot.c
+++ b/arch/ia64/kernel/smpboot.c
@@ -355,10 +355,6 @@ smp_callin (void)
extern void ia64_init_itm(void);
extern volatile int time_keeper_id;
-#ifdef CONFIG_PERFMON
- extern void pfm_init_percpu(void);
-#endif
-
cpuid = smp_processor_id();
phys_id = hard_smp_processor_id();
itc_master = time_keeper_id;
@@ -389,10 +385,6 @@ smp_callin (void)
ia64_mca_cmc_vector_setup(); /* Setup vector on AP */
-#ifdef CONFIG_PERFMON
- pfm_init_percpu();
-#endif
-
local_irq_enable();
if (!(sal_platform_features & IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT)) {
diff --git a/arch/ia64/kernel/syscalls/syscall.tbl b/arch/ia64/kernel/syscalls/syscall.tbl
index f52a41f4c340..4799c96c325f 100644
--- a/arch/ia64/kernel/syscalls/syscall.tbl
+++ b/arch/ia64/kernel/syscalls/syscall.tbl
@@ -160,7 +160,7 @@
148 common mmap2 sys_mmap2
149 common pciconfig_read sys_pciconfig_read
150 common pciconfig_write sys_pciconfig_write
-151 common perfmonctl sys_perfmonctl
+151 common perfmonctl sys_ni_syscall
152 common sigaltstack sys_sigaltstack
153 common rt_sigaction sys_rt_sigaction
154 common rt_sigpending sys_rt_sigpending
diff --git a/arch/ia64/kernel/unaligned.c b/arch/ia64/kernel/unaligned.c
index 2d4e65ba5c3e..6c1a8951dfbb 100644
--- a/arch/ia64/kernel/unaligned.c
+++ b/arch/ia64/kernel/unaligned.c
@@ -1431,7 +1431,7 @@ ia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs)
if (u.insn.x)
/* oops, really a semaphore op (cmpxchg, etc) */
goto failure;
- /*FALLTHRU*/
+ fallthrough;
case LDS_IMM_OP:
case LDSA_IMM_OP:
case LDFS_OP:
@@ -1459,7 +1459,7 @@ ia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs)
if (u.insn.x)
/* oops, really a semaphore op (cmpxchg, etc) */
goto failure;
- /*FALLTHRU*/
+ fallthrough;
case LD_IMM_OP:
case LDA_IMM_OP:
case LDBIAS_IMM_OP:
@@ -1475,7 +1475,7 @@ ia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs)
if (u.insn.x)
/* oops, really a semaphore op (cmpxchg, etc) */
goto failure;
- /*FALLTHRU*/
+ fallthrough;
case ST_IMM_OP:
case STREL_IMM_OP:
ret = emulate_store_int(ifa, u.insn, regs);
diff --git a/arch/ia64/kernel/unwind.c b/arch/ia64/kernel/unwind.c
index 7601fe0622d2..6bd64c35e691 100644
--- a/arch/ia64/kernel/unwind.c
+++ b/arch/ia64/kernel/unwind.c
@@ -324,7 +324,7 @@ unw_access_gr (struct unw_frame_info *info, int regnum, unsigned long *val, char
return 0;
}
}
- /* fall through */
+ fallthrough;
case UNW_NAT_NONE:
dummy_nat = 0;
nat_addr = &dummy_nat;
diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
index d259690eb91a..9b265783be6a 100644
--- a/arch/ia64/kernel/vmlinux.lds.S
+++ b/arch/ia64/kernel/vmlinux.lds.S
@@ -218,6 +218,7 @@ SECTIONS {
STABS_DEBUG
DWARF_DEBUG
+ ELF_DETAILS
/* Default discards */
DISCARDS
diff --git a/arch/ia64/lib/Makefile b/arch/ia64/lib/Makefile
index 82118b38532f..081fcba01dc0 100644
--- a/arch/ia64/lib/Makefile
+++ b/arch/ia64/lib/Makefile
@@ -12,7 +12,6 @@ lib-y := io.o __divsi3.o __udivsi3.o __modsi3.o __umodsi3.o \
lib-$(CONFIG_ITANIUM) += copy_page.o copy_user.o memcpy.o
lib-$(CONFIG_MCKINLEY) += copy_page_mck.o memcpy_mck.o
-lib-$(CONFIG_PERFMON) += carta_random.o
AFLAGS___divdi3.o =
AFLAGS___udivdi3.o = -DUNSIGNED
diff --git a/arch/ia64/lib/carta_random.S b/arch/ia64/lib/carta_random.S
deleted file mode 100644
index 1a4a639dc42f..000000000000
--- a/arch/ia64/lib/carta_random.S
+++ /dev/null
@@ -1,55 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Fast, simple, yet decent quality random number generator based on
- * a paper by David G. Carta ("Two Fast Implementations of the
- * `Minimal Standard' Random Number Generator," Communications of the
- * ACM, January, 1990).
- *
- * Copyright (C) 2002 Hewlett-Packard Co
- * David Mosberger-Tang <davidm@hpl.hp.com>
- */
-
-#include <asm/asmmacro.h>
-
-#define a r2
-#define m r3
-#define lo r8
-#define hi r9
-#define t0 r16
-#define t1 r17
-#define seed r32
-
-GLOBAL_ENTRY(carta_random32)
- movl a = (16807 << 16) | 16807
- ;;
- pmpyshr2.u t0 = a, seed, 0
- pmpyshr2.u t1 = a, seed, 16
- ;;
- unpack2.l t0 = t1, t0
- dep m = -1, r0, 0, 31
- ;;
- zxt4 lo = t0
- shr.u hi = t0, 32
- ;;
- dep t0 = 0, hi, 15, 49 // t0 = (hi & 0x7fff)
- ;;
- shl t0 = t0, 16 // t0 = (hi & 0x7fff) << 16
- shr t1 = hi, 15 // t1 = (hi >> 15)
- ;;
- add lo = lo, t0
- ;;
- cmp.gtu p6, p0 = lo, m
- ;;
-(p6) and lo = lo, m
- ;;
-(p6) add lo = 1, lo
- ;;
- add lo = lo, t1
- ;;
- cmp.gtu p6, p0 = lo, m
- ;;
-(p6) and lo = lo, m
- ;;
-(p6) add lo = 1, lo
- br.ret.sptk.many rp
-END(carta_random32)
diff --git a/arch/ia64/lib/csum_partial_copy.c b/arch/ia64/lib/csum_partial_copy.c
index 6e82e0be8040..917e3138b277 100644
--- a/arch/ia64/lib/csum_partial_copy.c
+++ b/arch/ia64/lib/csum_partial_copy.c
@@ -96,18 +96,3 @@ unsigned long do_csum_c(const unsigned char * buff, int len, unsigned int psum)
out:
return result;
}
-
-/*
- * XXX Fixme
- *
- * This is very ugly but temporary. THIS NEEDS SERIOUS ENHANCEMENTS.
- * But it's very tricky to get right even in C.
- */
-__wsum
-csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum)
-{
- memcpy(dst, src, len);
- return csum_partial(dst, len, sum);
-}
-
-EXPORT_SYMBOL(csum_partial_copy_nocheck);
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index 0b3fb4c7af29..8e7b8c6c576e 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -538,7 +538,7 @@ virtual_memmap_init(u64 start, u64 end, void *arg)
if (map_start < map_end)
memmap_init_zone((unsigned long)(map_end - map_start),
args->nid, args->zone, page_to_pfn(map_start),
- MEMMAP_EARLY, NULL);
+ MEMINIT_EARLY, NULL);
return 0;
}
@@ -547,8 +547,8 @@ memmap_init (unsigned long size, int nid, unsigned long zone,
unsigned long start_pfn)
{
if (!vmem_map) {
- memmap_init_zone(size, nid, zone, start_pfn, MEMMAP_EARLY,
- NULL);
+ memmap_init_zone(size, nid, zone, start_pfn,
+ MEMINIT_EARLY, NULL);
} else {
struct page *start;
struct memmap_init_callback_data args;
diff --git a/arch/ia64/oprofile/Makefile b/arch/ia64/oprofile/Makefile
index cd134d6643bf..fc7944d462f4 100644
--- a/arch/ia64/oprofile/Makefile
+++ b/arch/ia64/oprofile/Makefile
@@ -8,4 +8,3 @@ DRIVER_OBJS := $(addprefix ../../../drivers/oprofile/, \
timer_int.o )
oprofile-y := $(DRIVER_OBJS) init.o backtrace.o
-oprofile-$(CONFIG_PERFMON) += perfmon.o
diff --git a/arch/ia64/oprofile/init.c b/arch/ia64/oprofile/init.c
index 31b545c35460..a692ba16a07b 100644
--- a/arch/ia64/oprofile/init.c
+++ b/arch/ia64/oprofile/init.c
@@ -18,21 +18,11 @@ extern void ia64_backtrace(struct pt_regs * const regs, unsigned int depth);
int __init oprofile_arch_init(struct oprofile_operations *ops)
{
- int ret = -ENODEV;
-
-#ifdef CONFIG_PERFMON
- /* perfmon_init() can fail, but we have no way to report it */
- ret = perfmon_init(ops);
-#endif
ops->backtrace = ia64_backtrace;
-
- return ret;
+ return -ENODEV;
}
void oprofile_arch_exit(void)
{
-#ifdef CONFIG_PERFMON
- perfmon_exit();
-#endif
}
diff --git a/arch/ia64/oprofile/perfmon.c b/arch/ia64/oprofile/perfmon.c
deleted file mode 100644
index 192d3e8e1f65..000000000000
--- a/arch/ia64/oprofile/perfmon.c
+++ /dev/null
@@ -1,99 +0,0 @@
-/**
- * @file perfmon.c
- *
- * @remark Copyright 2003 OProfile authors
- * @remark Read the file COPYING
- *
- * @author John Levon <levon@movementarian.org>
- */
-
-#include <linux/kernel.h>
-#include <linux/oprofile.h>
-#include <linux/sched.h>
-#include <asm/perfmon.h>
-#include <asm/ptrace.h>
-#include <asm/errno.h>
-
-static int allow_ints;
-
-static int
-perfmon_handler(struct task_struct *task, void *buf, pfm_ovfl_arg_t *arg,
- struct pt_regs *regs, unsigned long stamp)
-{
- int event = arg->pmd_eventid;
-
- arg->ovfl_ctrl.bits.reset_ovfl_pmds = 1;
-
- /* the owner of the oprofile event buffer may have exited
- * without perfmon being shutdown (e.g. SIGSEGV)
- */
- if (allow_ints)
- oprofile_add_sample(regs, event);
- return 0;
-}
-
-
-static int perfmon_start(void)
-{
- allow_ints = 1;
- return 0;
-}
-
-
-static void perfmon_stop(void)
-{
- allow_ints = 0;
-}
-
-
-#define OPROFILE_FMT_UUID { \
- 0x77, 0x7a, 0x6e, 0x61, 0x20, 0x65, 0x73, 0x69, 0x74, 0x6e, 0x72, 0x20, 0x61, 0x65, 0x0a, 0x6c }
-
-static pfm_buffer_fmt_t oprofile_fmt = {
- .fmt_name = "oprofile_format",
- .fmt_uuid = OPROFILE_FMT_UUID,
- .fmt_handler = perfmon_handler,
-};
-
-
-static char *get_cpu_type(void)
-{
- __u8 family = local_cpu_data->family;
-
- switch (family) {
- case 0x07:
- return "ia64/itanium";
- case 0x1f:
- return "ia64/itanium2";
- default:
- return "ia64/ia64";
- }
-}
-
-
-/* all the ops are handled via userspace for IA64 perfmon */
-
-static int using_perfmon;
-
-int perfmon_init(struct oprofile_operations *ops)
-{
- int ret = pfm_register_buffer_fmt(&oprofile_fmt);
- if (ret)
- return -ENODEV;
-
- ops->cpu_type = get_cpu_type();
- ops->start = perfmon_start;
- ops->stop = perfmon_stop;
- using_perfmon = 1;
- printk(KERN_INFO "oprofile: using perfmon.\n");
- return 0;
-}
-
-
-void perfmon_exit(void)
-{
- if (!using_perfmon)
- return;
-
- pfm_unregister_buffer_fmt(oprofile_fmt.fmt_uuid);
-}
diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig
index 6f2f38d05772..93bbb74ea876 100644
--- a/arch/m68k/Kconfig
+++ b/arch/m68k/Kconfig
@@ -6,32 +6,32 @@ config M68K
select ARCH_HAS_BINFMT_FLAT
select ARCH_HAS_DMA_PREP_COHERENT if HAS_DMA && MMU && !COLDFIRE
select ARCH_HAS_SYNC_DMA_FOR_DEVICE if HAS_DMA
+ select ARCH_HAVE_NMI_SAFE_CMPXCHG if RMW_INSNS
select ARCH_MIGHT_HAVE_PC_PARPORT if ISA
select ARCH_NO_PREEMPT if !COLDFIRE
+ select ARCH_WANT_IPC_PARSE_VERSION
select BINFMT_FLAT_ARGVP_ENVP_ON_STACK
select DMA_DIRECT_REMAP if HAS_DMA && MMU && !COLDFIRE
- select HAVE_IDE
- select HAVE_AOUT if MMU
- select HAVE_ASM_MODVERSIONS
- select HAVE_DEBUG_BUGVERBOSE
- select GENERIC_IRQ_SHOW
select GENERIC_ATOMIC64
- select NO_DMA if !MMU && !COLDFIRE
- select HAVE_UID16
- select VIRT_TO_BUS
- select ARCH_HAVE_NMI_SAFE_CMPXCHG if RMW_INSNS
select GENERIC_CPU_DEVICES
select GENERIC_IOMAP
+ select GENERIC_IRQ_SHOW
select GENERIC_STRNCPY_FROM_USER if MMU
select GENERIC_STRNLEN_USER if MMU
- select ARCH_WANT_IPC_PARSE_VERSION
+ select HAVE_AOUT if MMU
+ select HAVE_ASM_MODVERSIONS
+ select HAVE_DEBUG_BUGVERBOSE
select HAVE_FUTEX_CMPXCHG if MMU && FUTEX
+ select HAVE_IDE
select HAVE_MOD_ARCH_SPECIFIC
+ select HAVE_UID16
+ select MMU_GATHER_NO_RANGE if MMU
select MODULES_USE_ELF_REL
select MODULES_USE_ELF_RELA
- select OLD_SIGSUSPEND3
+ select NO_DMA if !MMU && !COLDFIRE
select OLD_SIGACTION
- select MMU_GATHER_NO_RANGE if MMU
+ select OLD_SIGSUSPEND3
+ select VIRT_TO_BUS
config CPU_BIG_ENDIAN
def_bool y
diff --git a/arch/m68k/amiga/config.c b/arch/m68k/amiga/config.c
index 8f23b2fab64c..bee9f240f35d 100644
--- a/arch/m68k/amiga/config.c
+++ b/arch/m68k/amiga/config.c
@@ -214,7 +214,7 @@ static void __init amiga_identify(void)
switch (amiga_model) {
case AMI_UNKNOWN:
- goto Generic;
+ break;
case AMI_600:
case AMI_1200:
@@ -227,7 +227,7 @@ static void __init amiga_identify(void)
case AMI_2000:
case AMI_2500:
AMIGAHW_SET(A2000_CLK); /* Is this correct for all models? */
- goto Generic;
+ break;
case AMI_3000:
case AMI_3000T:
@@ -238,7 +238,7 @@ static void __init amiga_identify(void)
AMIGAHW_SET(A3000_SCSI);
AMIGAHW_SET(A3000_CLK);
AMIGAHW_SET(ZORRO3);
- goto Generic;
+ break;
case AMI_4000T:
AMIGAHW_SET(A4000_SCSI);
@@ -247,68 +247,12 @@ static void __init amiga_identify(void)
AMIGAHW_SET(A4000_IDE);
AMIGAHW_SET(A3000_CLK);
AMIGAHW_SET(ZORRO3);
- goto Generic;
+ break;
case AMI_CDTV:
case AMI_CD32:
AMIGAHW_SET(CD_ROM);
AMIGAHW_SET(A2000_CLK); /* Is this correct? */
- goto Generic;
-
- Generic:
- AMIGAHW_SET(AMI_VIDEO);
- AMIGAHW_SET(AMI_BLITTER);
- AMIGAHW_SET(AMI_AUDIO);
- AMIGAHW_SET(AMI_FLOPPY);
- AMIGAHW_SET(AMI_KEYBOARD);
- AMIGAHW_SET(AMI_MOUSE);
- AMIGAHW_SET(AMI_SERIAL);
- AMIGAHW_SET(AMI_PARALLEL);
- AMIGAHW_SET(CHIP_RAM);
- AMIGAHW_SET(PAULA);
-
- switch (amiga_chipset) {
- case CS_OCS:
- case CS_ECS:
- case CS_AGA:
- switch (amiga_custom.deniseid & 0xf) {
- case 0x0c:
- AMIGAHW_SET(DENISE_HR);
- break;
- case 0x08:
- AMIGAHW_SET(LISA);
- break;
- }
- break;
- default:
- AMIGAHW_SET(DENISE);
- break;
- }
- switch ((amiga_custom.vposr>>8) & 0x7f) {
- case 0x00:
- AMIGAHW_SET(AGNUS_PAL);
- break;
- case 0x10:
- AMIGAHW_SET(AGNUS_NTSC);
- break;
- case 0x20:
- case 0x21:
- AMIGAHW_SET(AGNUS_HR_PAL);
- break;
- case 0x30:
- case 0x31:
- AMIGAHW_SET(AGNUS_HR_NTSC);
- break;
- case 0x22:
- case 0x23:
- AMIGAHW_SET(ALICE_PAL);
- break;
- case 0x32:
- case 0x33:
- AMIGAHW_SET(ALICE_NTSC);
- break;
- }
- AMIGAHW_SET(ZORRO);
break;
case AMI_DRACO:
@@ -318,6 +262,60 @@ static void __init amiga_identify(void)
panic("Unknown Amiga Model");
}
+ AMIGAHW_SET(AMI_VIDEO);
+ AMIGAHW_SET(AMI_BLITTER);
+ AMIGAHW_SET(AMI_AUDIO);
+ AMIGAHW_SET(AMI_FLOPPY);
+ AMIGAHW_SET(AMI_KEYBOARD);
+ AMIGAHW_SET(AMI_MOUSE);
+ AMIGAHW_SET(AMI_SERIAL);
+ AMIGAHW_SET(AMI_PARALLEL);
+ AMIGAHW_SET(CHIP_RAM);
+ AMIGAHW_SET(PAULA);
+
+ switch (amiga_chipset) {
+ case CS_OCS:
+ case CS_ECS:
+ case CS_AGA:
+ switch (amiga_custom.deniseid & 0xf) {
+ case 0x0c:
+ AMIGAHW_SET(DENISE_HR);
+ break;
+ case 0x08:
+ AMIGAHW_SET(LISA);
+ break;
+ default:
+ AMIGAHW_SET(DENISE);
+ break;
+ }
+ break;
+ }
+ switch ((amiga_custom.vposr>>8) & 0x7f) {
+ case 0x00:
+ AMIGAHW_SET(AGNUS_PAL);
+ break;
+ case 0x10:
+ AMIGAHW_SET(AGNUS_NTSC);
+ break;
+ case 0x20:
+ case 0x21:
+ AMIGAHW_SET(AGNUS_HR_PAL);
+ break;
+ case 0x30:
+ case 0x31:
+ AMIGAHW_SET(AGNUS_HR_NTSC);
+ break;
+ case 0x22:
+ case 0x23:
+ AMIGAHW_SET(ALICE_PAL);
+ break;
+ case 0x32:
+ case 0x33:
+ AMIGAHW_SET(ALICE_NTSC);
+ break;
+ }
+ AMIGAHW_SET(ZORRO);
+
#define AMIGAHW_ANNOUNCE(name, str) \
if (AMIGAHW_PRESENT(name)) \
pr_cont(str)
diff --git a/arch/m68k/atari/atakeyb.c b/arch/m68k/atari/atakeyb.c
index 37091898adb3..5e0e682f9c61 100644
--- a/arch/m68k/atari/atakeyb.c
+++ b/arch/m68k/atari/atakeyb.c
@@ -207,7 +207,7 @@ repeat:
self_test_last_rcv = jiffies;
break;
}
- /* FALL THROUGH */
+ fallthrough;
default:
break_flag = scancode & BREAK_MASK;
diff --git a/arch/m68k/configs/mac_defconfig b/arch/m68k/configs/mac_defconfig
index 522dcf624aa5..3cd76bfaee03 100644
--- a/arch/m68k/configs/mac_defconfig
+++ b/arch/m68k/configs/mac_defconfig
@@ -317,6 +317,7 @@ CONFIG_DUMMY_IRQ=m
CONFIG_IDE=y
CONFIG_IDE_GD_ATAPI=y
CONFIG_BLK_DEV_IDECD=y
+CONFIG_BLK_DEV_PLATFORM=y
CONFIG_BLK_DEV_MAC_IDE=y
CONFIG_RAID_ATTRS=m
CONFIG_SCSI=y
diff --git a/arch/m68k/configs/multi_defconfig b/arch/m68k/configs/multi_defconfig
index 2433409f4369..c3d6faa7894f 100644
--- a/arch/m68k/configs/multi_defconfig
+++ b/arch/m68k/configs/multi_defconfig
@@ -346,6 +346,7 @@ CONFIG_DUMMY_IRQ=m
CONFIG_IDE=y
CONFIG_IDE_GD_ATAPI=y
CONFIG_BLK_DEV_IDECD=y
+CONFIG_BLK_DEV_PLATFORM=y
CONFIG_BLK_DEV_GAYLE=y
CONFIG_BLK_DEV_BUDDHA=y
CONFIG_BLK_DEV_FALCON_IDE=y
diff --git a/arch/m68k/include/asm/checksum.h b/arch/m68k/include/asm/checksum.h
index 3f2c15d6f18c..692e7b6cc042 100644
--- a/arch/m68k/include/asm/checksum.h
+++ b/arch/m68k/include/asm/checksum.h
@@ -31,14 +31,13 @@ __wsum csum_partial(const void *buff, int len, __wsum sum);
*/
#define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER
+#define _HAVE_ARCH_CSUM_AND_COPY
extern __wsum csum_and_copy_from_user(const void __user *src,
void *dst,
- int len, __wsum sum,
- int *csum_err);
+ int len);
extern __wsum csum_partial_copy_nocheck(const void *src,
- void *dst, int len,
- __wsum sum);
+ void *dst, int len);
/*
* This is a version of ip_fast_csum() optimized for IP headers,
diff --git a/arch/m68k/include/asm/thread_info.h b/arch/m68k/include/asm/thread_info.h
index 015f1ca38305..3689c6718c88 100644
--- a/arch/m68k/include/asm/thread_info.h
+++ b/arch/m68k/include/asm/thread_info.h
@@ -68,4 +68,12 @@ static inline struct thread_info *current_thread_info(void)
#define TIF_MEMDIE 16 /* is terminating due to OOM killer */
#define TIF_RESTORE_SIGMASK 18 /* restore signal mask in do_signal */
+#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
+#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
+#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
+#define _TIF_DELAYED_TRACE (1 << TIF_DELAYED_TRACE)
+#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
+#define _TIF_MEMDIE (1 << TIF_MEMDIE)
+#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK)
+
#endif /* _ASM_M68K_THREAD_INFO_H */
diff --git a/arch/m68k/kernel/head.S b/arch/m68k/kernel/head.S
index 29de2b3108ea..493c95db0e51 100644
--- a/arch/m68k/kernel/head.S
+++ b/arch/m68k/kernel/head.S
@@ -57,7 +57,7 @@
* Of course, readability is a subjective issue, so it will never be
* argued that that goal was accomplished. It was merely a goal.
* A key way to help make code more readable is to give good
- * documentation. So, the first thing you will find is exaustive
+ * documentation. So, the first thing you will find is exhaustive
* write-ups on the structure of the file, and the features of the
* functional subroutines.
*
@@ -1304,7 +1304,7 @@ L(mmu_fixup_done):
* mmu_engage
*
* This chunk of code performs the gruesome task of engaging the MMU.
- * The reason its gruesome is because when the MMU becomes engaged it
+ * The reason it's gruesome is because when the MMU becomes engaged it
* maps logical addresses to physical addresses. The Program Counter
* register is then passed through the MMU before the next instruction
* is fetched (the instruction following the engage MMU instruction).
@@ -1369,7 +1369,7 @@ L(mmu_fixup_done):
/*
* After this point no new memory is allocated and
* the start of available memory is stored in availmem.
- * (The bootmem allocator requires now the physicall address.)
+ * (The bootmem allocator requires now the physical address.)
*/
movel L(memory_start),availmem
@@ -1547,7 +1547,7 @@ func_return get_bi_record
* seven bits of the logical address (LA) are used as an
* index into the "root table." Each entry in the root
* table has a bit which specifies if it's a valid pointer to a
- * pointer table. Each entry defines a 32KMeg range of memory.
+ * pointer table. Each entry defines a 32Meg range of memory.
* If an entry is invalid then that logical range of 32M is
* invalid and references to that range of memory (when the MMU
* is enabled) will fault. If the entry is valid, then it does
@@ -1584,7 +1584,7 @@ func_return get_bi_record
* bits 17..12 - index into the Page Table
* bits 11..0 - offset into a particular 4K page
*
- * The algorithms which follows do one thing: they abstract
+ * The algorithms which follow do one thing: they abstract
* the MMU hardware. For example, there are three kinds of
* cache settings that are relevant. Either, memory is
* being mapped in which case it is either Kernel Code (or
@@ -2082,7 +2082,7 @@ func_return mmu_map_tt
* mmu_map
*
* This routine will map a range of memory using a pointer
- * table and allocating the pages on the fly from the kernel.
+ * table and allocate the pages on the fly from the kernel.
* The pointer table does not have to be already linked into
* the root table, this routine will do that if necessary.
*
@@ -2528,7 +2528,7 @@ func_start mmu_get_root_table_entry,%d0/%a1
/* Find the start of free memory, get_bi_record does this for us,
* as the bootinfo structure is located directly behind the kernel
- * and and we simply search for the last entry.
+ * we simply search for the last entry.
*/
get_bi_record BI_LAST
addw #PAGESIZE-1,%a0
@@ -2654,7 +2654,7 @@ func_start mmu_get_page_table_entry,%d0/%a1
jne 2f
/* If the page table entry doesn't exist, we allocate a complete new
- * page and use it as one continues big page table which can cover
+ * page and use it as one continuous big page table which can cover
* 4MB of memory, nearly almost all mappings have that alignment.
*/
get_new_page
diff --git a/arch/m68k/kernel/process.c b/arch/m68k/kernel/process.c
index 6492a2c54dbc..08359a6e058f 100644
--- a/arch/m68k/kernel/process.c
+++ b/arch/m68k/kernel/process.c
@@ -107,10 +107,10 @@ void flush_thread(void)
* on top of pt_regs, which means that sys_clone() arguments would be
* buried. We could, of course, copy them, but it's too costly for no
* good reason - generic clone() would have to copy them *again* for
- * _do_fork() anyway. So in this case it's actually better to pass pt_regs *
- * and extract arguments for _do_fork() from there. Eventually we might
- * go for calling _do_fork() directly from the wrapper, but only after we
- * are finished with _do_fork() prototype conversion.
+ * kernel_clone() anyway. So in this case it's actually better to pass pt_regs *
+ * and extract arguments for kernel_clone() from there. Eventually we might
+ * go for calling kernel_clone() directly from the wrapper, but only after we
+ * are finished with kernel_clone() prototype conversion.
*/
asmlinkage int m68k_clone(struct pt_regs *regs)
{
@@ -125,7 +125,7 @@ asmlinkage int m68k_clone(struct pt_regs *regs)
.tls = regs->d5,
};
- return _do_fork(&args);
+ return kernel_clone(&args);
}
/*
diff --git a/arch/m68k/kernel/signal.c b/arch/m68k/kernel/signal.c
index fc034fd19798..a98fca977073 100644
--- a/arch/m68k/kernel/signal.c
+++ b/arch/m68k/kernel/signal.c
@@ -1067,7 +1067,7 @@ handle_restart(struct pt_regs *regs, struct k_sigaction *ka, int has_handler)
regs->d0 = -EINTR;
break;
}
- /* fallthrough */
+ fallthrough;
case -ERESTARTNOINTR:
do_restart:
regs->d0 = regs->orig_d0;
diff --git a/arch/m68k/kernel/traps.c b/arch/m68k/kernel/traps.c
index 546e81935fe8..9e1261462bcc 100644
--- a/arch/m68k/kernel/traps.c
+++ b/arch/m68k/kernel/traps.c
@@ -845,7 +845,6 @@ static void show_trace(unsigned long *stack, const char *loglvl)
void show_registers(struct pt_regs *regs)
{
struct frame *fp = (struct frame *)regs;
- mm_segment_t old_fs = get_fs();
u16 c, *cp;
unsigned long addr;
int i;
@@ -918,10 +917,9 @@ void show_registers(struct pt_regs *regs)
show_stack(NULL, (unsigned long *)addr, KERN_INFO);
pr_info("Code:");
- set_fs(KERNEL_DS);
cp = (u16 *)regs->pc;
for (i = -8; i < 16; i++) {
- if (get_user(c, cp + i) && i >= 0) {
+ if (get_kernel_nofault(c, cp + i) && i >= 0) {
pr_cont(" Bad PC value.");
break;
}
@@ -930,7 +928,6 @@ void show_registers(struct pt_regs *regs)
else
pr_cont(" <%04x>", c);
}
- set_fs(old_fs);
pr_cont("\n");
}
diff --git a/arch/m68k/lib/checksum.c b/arch/m68k/lib/checksum.c
index 31797be9a3dc..7e6afeae6217 100644
--- a/arch/m68k/lib/checksum.c
+++ b/arch/m68k/lib/checksum.c
@@ -129,8 +129,7 @@ EXPORT_SYMBOL(csum_partial);
*/
__wsum
-csum_and_copy_from_user(const void __user *src, void *dst,
- int len, __wsum sum, int *csum_err)
+csum_and_copy_from_user(const void __user *src, void *dst, int len)
{
/*
* GCC doesn't like more than 10 operands for the asm
@@ -138,6 +137,7 @@ csum_and_copy_from_user(const void __user *src, void *dst,
* code.
*/
unsigned long tmp1, tmp2;
+ __wsum sum = ~0U;
__asm__("movel %2,%4\n\t"
"btst #1,%4\n\t" /* Check alignment */
@@ -236,84 +236,33 @@ csum_and_copy_from_user(const void __user *src, void *dst,
"clrl %5\n\t"
"addxl %5,%0\n\t" /* add X bit */
"7:\t"
- "clrl %5\n" /* no error - clear return value */
- "8:\n"
".section .fixup,\"ax\"\n"
".even\n"
- /* If any exception occurs zero out the rest.
- Similarities with the code above are intentional :-) */
+ /* If any exception occurs, return 0 */
"90:\t"
- "clrw %3@+\n\t"
- "movel %1,%4\n\t"
- "lsrl #5,%1\n\t"
- "jeq 1f\n\t"
- "subql #1,%1\n"
- "91:\t"
- "clrl %3@+\n"
- "92:\t"
- "clrl %3@+\n"
- "93:\t"
- "clrl %3@+\n"
- "94:\t"
- "clrl %3@+\n"
- "95:\t"
- "clrl %3@+\n"
- "96:\t"
- "clrl %3@+\n"
- "97:\t"
- "clrl %3@+\n"
- "98:\t"
- "clrl %3@+\n\t"
- "dbra %1,91b\n\t"
- "clrw %1\n\t"
- "subql #1,%1\n\t"
- "jcc 91b\n"
- "1:\t"
- "movel %4,%1\n\t"
- "andw #0x1c,%4\n\t"
- "jeq 1f\n\t"
- "lsrw #2,%4\n\t"
- "subqw #1,%4\n"
- "99:\t"
- "clrl %3@+\n\t"
- "dbra %4,99b\n\t"
- "1:\t"
- "andw #3,%1\n\t"
- "jeq 9f\n"
- "100:\t"
- "clrw %3@+\n\t"
- "tstw %1\n\t"
- "jeq 9f\n"
- "101:\t"
- "clrb %3@+\n"
- "9:\t"
-#define STR(X) STR1(X)
-#define STR1(X) #X
- "moveq #-" STR(EFAULT) ",%5\n\t"
- "jra 8b\n"
+ "clrl %0\n"
+ "jra 7b\n"
".previous\n"
".section __ex_table,\"a\"\n"
".long 10b,90b\n"
- ".long 11b,91b\n"
- ".long 12b,92b\n"
- ".long 13b,93b\n"
- ".long 14b,94b\n"
- ".long 15b,95b\n"
- ".long 16b,96b\n"
- ".long 17b,97b\n"
- ".long 18b,98b\n"
- ".long 19b,99b\n"
- ".long 20b,100b\n"
- ".long 21b,101b\n"
+ ".long 11b,90b\n"
+ ".long 12b,90b\n"
+ ".long 13b,90b\n"
+ ".long 14b,90b\n"
+ ".long 15b,90b\n"
+ ".long 16b,90b\n"
+ ".long 17b,90b\n"
+ ".long 18b,90b\n"
+ ".long 19b,90b\n"
+ ".long 20b,90b\n"
+ ".long 21b,90b\n"
".previous"
: "=d" (sum), "=d" (len), "=a" (src), "=a" (dst),
"=&d" (tmp1), "=d" (tmp2)
: "0" (sum), "1" (len), "2" (src), "3" (dst)
);
- *csum_err = tmp2;
-
- return(sum);
+ return sum;
}
EXPORT_SYMBOL(csum_and_copy_from_user);
@@ -324,9 +273,10 @@ EXPORT_SYMBOL(csum_and_copy_from_user);
*/
__wsum
-csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum)
+csum_partial_copy_nocheck(const void *src, void *dst, int len)
{
unsigned long tmp1, tmp2;
+ __wsum sum = 0;
__asm__("movel %2,%4\n\t"
"btst #1,%4\n\t" /* Check alignment */
"jeq 2f\n\t"
diff --git a/arch/m68k/mac/config.c b/arch/m68k/mac/config.c
index 5c9f3a2d6538..0ac53d87493c 100644
--- a/arch/m68k/mac/config.c
+++ b/arch/m68k/mac/config.c
@@ -24,6 +24,7 @@
#include <linux/init.h>
#include <linux/vt_kern.h>
#include <linux/platform_device.h>
+#include <linux/ata_platform.h>
#include <linux/adb.h>
#include <linux/cuda.h>
#include <linux/pmu.h>
@@ -940,6 +941,26 @@ static const struct resource mac_scsi_ccl_rsrc[] __initconst = {
},
};
+static const struct resource mac_ide_quadra_rsrc[] __initconst = {
+ DEFINE_RES_MEM(0x50F1A000, 0x104),
+ DEFINE_RES_IRQ(IRQ_NUBUS_F),
+};
+
+static const struct resource mac_ide_pb_rsrc[] __initconst = {
+ DEFINE_RES_MEM(0x50F1A000, 0x104),
+ DEFINE_RES_IRQ(IRQ_NUBUS_C),
+};
+
+static const struct resource mac_pata_baboon_rsrc[] __initconst = {
+ DEFINE_RES_MEM(0x50F1A000, 0x38),
+ DEFINE_RES_MEM(0x50F1A038, 0x04),
+ DEFINE_RES_IRQ(IRQ_BABOON_1),
+};
+
+static const struct pata_platform_info mac_pata_baboon_data __initconst = {
+ .ioport_shift = 2,
+};
+
int __init mac_platform_init(void)
{
phys_addr_t swim_base = 0;
@@ -1018,7 +1039,7 @@ int __init mac_platform_init(void)
*/
platform_device_register_simple("mac_scsi", 1,
mac_scsi_duo_rsrc, ARRAY_SIZE(mac_scsi_duo_rsrc));
- /* fall through */
+ fallthrough;
case MAC_SCSI_OLD:
/* Addresses from Developer Notes for Duo System,
* PowerBook 180 & 160, 140 & 170, Macintosh IIsi
@@ -1049,6 +1070,26 @@ int __init mac_platform_init(void)
}
/*
+ * IDE device
+ */
+
+ switch (macintosh_config->ide_type) {
+ case MAC_IDE_QUADRA:
+ platform_device_register_simple("mac_ide", -1,
+ mac_ide_quadra_rsrc, ARRAY_SIZE(mac_ide_quadra_rsrc));
+ break;
+ case MAC_IDE_PB:
+ platform_device_register_simple("mac_ide", -1,
+ mac_ide_pb_rsrc, ARRAY_SIZE(mac_ide_pb_rsrc));
+ break;
+ case MAC_IDE_BABOON:
+ platform_device_register_resndata(NULL, "pata_platform", -1,
+ mac_pata_baboon_rsrc, ARRAY_SIZE(mac_pata_baboon_rsrc),
+ &mac_pata_baboon_data, sizeof(mac_pata_baboon_data));
+ break;
+ }
+
+ /*
* Ethernet device
*/
diff --git a/arch/m68k/mac/macboing.c b/arch/m68k/mac/macboing.c
index 388780797f7d..4de6229c7bfd 100644
--- a/arch/m68k/mac/macboing.c
+++ b/arch/m68k/mac/macboing.c
@@ -116,7 +116,7 @@ static void mac_init_asc( void )
* support 16-bit stereo output, but only mono input."
*
* Technical Information Library (TIL) article number 16405.
- * http://support.apple.com/kb/TA32601
+ * https://support.apple.com/kb/TA32601
*
* --David Kilzer
*/
diff --git a/arch/m68k/mac/via.c b/arch/m68k/mac/via.c
index 1f0fad2a98a0..ac77d73af19a 100644
--- a/arch/m68k/mac/via.c
+++ b/arch/m68k/mac/via.c
@@ -370,7 +370,7 @@ void via_nubus_irq_startup(int irq)
/* Allow NuBus slots 9 through F. */
via2[vDirA] &= 0x80 | ~(1 << irq_idx);
}
- /* fall through */
+ fallthrough;
case MAC_VIA_IICI:
via_irq_enable(irq);
break;
diff --git a/arch/m68k/mm/fault.c b/arch/m68k/mm/fault.c
index 795f483b1050..ef46e77e97a5 100644
--- a/arch/m68k/mm/fault.c
+++ b/arch/m68k/mm/fault.c
@@ -118,7 +118,7 @@ good_area:
pr_debug("do_page_fault: good_area\n");
switch (error_code & 3) {
default: /* 3: write, present */
- /* fall through */
+ fallthrough;
case 2: /* write, not present */
if (!(vma->vm_flags & VM_WRITE))
goto acc_err;
diff --git a/arch/m68k/mm/mcfmmu.c b/arch/m68k/mm/mcfmmu.c
index 2b9cb4a62281..eac9dde65193 100644
--- a/arch/m68k/mm/mcfmmu.c
+++ b/arch/m68k/mm/mcfmmu.c
@@ -42,7 +42,7 @@ void __init paging_init(void)
unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0 };
int i;
- empty_zero_page = (void *) memblock_alloc(PAGE_SIZE, PAGE_SIZE);
+ empty_zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
if (!empty_zero_page)
panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
__func__, PAGE_SIZE, PAGE_SIZE);
diff --git a/arch/m68k/mm/motorola.c b/arch/m68k/mm/motorola.c
index 2bb006bdc31c..3a653f0a4188 100644
--- a/arch/m68k/mm/motorola.c
+++ b/arch/m68k/mm/motorola.c
@@ -226,8 +226,8 @@ static pte_t * __init kernel_page_table(void)
{
pte_t *pte_table = last_pte_table;
- if (((unsigned long)last_pte_table & ~PAGE_MASK) == 0) {
- pte_table = (pte_t *)memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
+ if (PAGE_ALIGNED(last_pte_table)) {
+ pte_table = memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
if (!pte_table) {
panic("%s: Failed to allocate %lu bytes align=%lx\n",
__func__, PAGE_SIZE, PAGE_SIZE);
@@ -274,9 +274,8 @@ static pmd_t * __init kernel_ptr_table(void)
}
last_pmd_table += PTRS_PER_PMD;
- if (((unsigned long)last_pmd_table & ~PAGE_MASK) == 0) {
- last_pmd_table = (pmd_t *)memblock_alloc_low(PAGE_SIZE,
- PAGE_SIZE);
+ if (PAGE_ALIGNED(last_pmd_table)) {
+ last_pmd_table = memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
if (!last_pmd_table)
panic("%s: Failed to allocate %lu bytes align=%lx\n",
__func__, PAGE_SIZE, PAGE_SIZE);
diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig
index d262ac0c8714..37bd6a5f38fb 100644
--- a/arch/microblaze/Kconfig
+++ b/arch/microblaze/Kconfig
@@ -26,6 +26,7 @@ config MICROBLAZE
select GENERIC_SCHED_CLOCK
select HAVE_ARCH_HASH
select HAVE_ARCH_KGDB
+ select HAVE_ARCH_SECCOMP
select HAVE_DEBUG_KMEMLEAK
select HAVE_DMA_CONTIGUOUS
select HAVE_DYNAMIC_FTRACE
@@ -120,23 +121,6 @@ config CMDLINE_FORCE
Set this to have arguments from the default kernel command string
override those passed by the boot loader.
-config SECCOMP
- bool "Enable seccomp to safely compute untrusted bytecode"
- depends on PROC_FS
- default y
- help
- This kernel feature is useful for number crunching applications
- that may need to compute untrusted bytecode during their
- execution. By using pipes or other transports made available to
- the process as file descriptors supporting the read/write
- syscalls, it's possible to isolate those applications in
- their own address space using seccomp. Once seccomp is
- enabled via /proc/<pid>/seccomp, it cannot be disabled
- and the task is only allowed to execute a few safe syscalls
- defined by each seccomp mode.
-
- If unsure, say Y. Only embedded should say N here.
-
endmenu
menu "Kernel features"
diff --git a/arch/microblaze/include/asm/Kbuild b/arch/microblaze/include/asm/Kbuild
index 2e87a9b6d312..63bce836b9f1 100644
--- a/arch/microblaze/include/asm/Kbuild
+++ b/arch/microblaze/include/asm/Kbuild
@@ -1,7 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
generated-y += syscall_table.h
generic-y += extable.h
-generic-y += hw_irq.h
generic-y += kvm_para.h
generic-y += local64.h
generic-y += mcs_spinlock.h
diff --git a/arch/microblaze/kernel/signal.c b/arch/microblaze/kernel/signal.c
index 65bf5fd8d473..4a96b59f0bee 100644
--- a/arch/microblaze/kernel/signal.c
+++ b/arch/microblaze/kernel/signal.c
@@ -249,7 +249,7 @@ handle_restart(struct pt_regs *regs, struct k_sigaction *ka, int has_handler)
regs->r3 = -EINTR;
break;
}
- /* fallthrough */
+ fallthrough;
case -ERESTARTNOINTR:
do_restart:
/* offset of 4 bytes to re-execute trap (brki) instruction */
diff --git a/arch/microblaze/mm/init.c b/arch/microblaze/mm/init.c
index 0880a003573d..0902c459c385 100644
--- a/arch/microblaze/mm/init.c
+++ b/arch/microblaze/mm/init.c
@@ -46,6 +46,9 @@ unsigned long memory_size;
EXPORT_SYMBOL(memory_size);
unsigned long lowmem_size;
+EXPORT_SYMBOL(min_low_pfn);
+EXPORT_SYMBOL(max_low_pfn);
+
#ifdef CONFIG_HIGHMEM
pte_t *kmap_pte;
EXPORT_SYMBOL(kmap_pte);
@@ -105,15 +108,15 @@ static void __init paging_init(void)
void __init setup_memory(void)
{
- struct memblock_region *reg;
-
#ifndef CONFIG_MMU
u32 kernel_align_start, kernel_align_size;
+ phys_addr_t start, end;
+ u64 i;
/* Find main memory where is the kernel */
- for_each_memblock(memory, reg) {
- memory_start = (u32)reg->base;
- lowmem_size = reg->size;
+ for_each_mem_range(i, &start, &end) {
+ memory_start = start;
+ lowmem_size = end - start;
if ((memory_start <= (u32)_text) &&
((u32)_text <= (memory_start + lowmem_size - 1))) {
memory_size = lowmem_size;
@@ -161,17 +164,6 @@ void __init setup_memory(void)
pr_info("%s: max_low_pfn: %#lx\n", __func__, max_low_pfn);
pr_info("%s: max_pfn: %#lx\n", __func__, max_pfn);
- /* Add active regions with valid PFNs */
- for_each_memblock(memory, reg) {
- unsigned long start_pfn, end_pfn;
-
- start_pfn = memblock_region_memory_base_pfn(reg);
- end_pfn = memblock_region_memory_end_pfn(reg);
- memblock_set_node(start_pfn << PAGE_SHIFT,
- (end_pfn - start_pfn) << PAGE_SHIFT,
- &memblock.memory, 0);
- }
-
paging_init();
}
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index c95fa3a2484c..440614dc9de2 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -86,6 +86,7 @@ config MIPS
select MODULES_USE_ELF_REL if MODULES
select MODULES_USE_ELF_RELA if MODULES && 64BIT
select PERF_USE_VMALLOC
+ select PCI_MSI_ARCH_FALLBACKS if PCI_MSI
select RTC_LIB
select SYSCTL_EXCEPTION_TRACE
select VIRT_TO_BUS
@@ -877,6 +878,7 @@ config SNI_RM
select I8253
select I8259
select ISA
+ select MIPS_L1_CACHE_SHIFT_6
select SWAP_IO_SPACE if CPU_BIG_ENDIAN
select SYS_HAS_CPU_R4X00
select SYS_HAS_CPU_R5000
@@ -3004,23 +3006,6 @@ config PHYSICAL_START
specified in the "crashkernel=YM@XM" command line boot parameter
passed to the panic-ed kernel).
-config SECCOMP
- bool "Enable seccomp to safely compute untrusted bytecode"
- depends on PROC_FS
- default y
- help
- This kernel feature is useful for number crunching applications
- that may need to compute untrusted bytecode during their
- execution. By using pipes or other transports made available to
- the process as file descriptors supporting the read/write
- syscalls, it's possible to isolate those applications in
- their own address space using seccomp. Once seccomp is
- enabled via /proc/<pid>/seccomp, it cannot be disabled
- and the task is only allowed to execute a few safe syscalls
- defined by each seccomp mode.
-
- If unsure, say Y. Only embedded should say N here.
-
config MIPS_O32_FP64_SUPPORT
bool "Support for O32 binaries using 64-bit FP" if !CPU_MIPSR6
depends on 32BIT || MIPS32_O32
diff --git a/arch/mips/bcm47xx/setup.c b/arch/mips/bcm47xx/setup.c
index 82627c264964..01427bde2397 100644
--- a/arch/mips/bcm47xx/setup.c
+++ b/arch/mips/bcm47xx/setup.c
@@ -148,7 +148,7 @@ void __init plat_mem_setup(void)
{
struct cpuinfo_mips *c = &current_cpu_data;
- if ((c->cputype == CPU_74K) || (c->cputype == CPU_1074K)) {
+ if (c->cputype == CPU_74K) {
pr_info("Using bcma bus\n");
#ifdef CONFIG_BCM47XX_BCMA
bcm47xx_bus_type = BCM47XX_BUS_TYPE_BCMA;
diff --git a/arch/mips/cavium-octeon/dma-octeon.c b/arch/mips/cavium-octeon/dma-octeon.c
index 14ea680d180e..ad1aecc4b401 100644
--- a/arch/mips/cavium-octeon/dma-octeon.c
+++ b/arch/mips/cavium-octeon/dma-octeon.c
@@ -190,25 +190,25 @@ char *octeon_swiotlb;
void __init plat_swiotlb_setup(void)
{
- struct memblock_region *mem;
+ phys_addr_t start, end;
phys_addr_t max_addr;
phys_addr_t addr_size;
size_t swiotlbsize;
unsigned long swiotlb_nslabs;
+ u64 i;
max_addr = 0;
addr_size = 0;
- for_each_memblock(memory, mem) {
+ for_each_mem_range(i, &start, &end) {
/* These addresses map low for PCI. */
- if (mem->base > 0x410000000ull && !OCTEON_IS_OCTEON2())
+ if (start > 0x410000000ull && !OCTEON_IS_OCTEON2())
continue;
- addr_size += mem->size;
-
- if (max_addr < mem->base + mem->size)
- max_addr = mem->base + mem->size;
+ addr_size += (end - start);
+ if (max_addr < end)
+ max_addr = end;
}
swiotlbsize = PAGE_SIZE;
diff --git a/arch/mips/include/asm/checksum.h b/arch/mips/include/asm/checksum.h
index 181f7d14efb9..5f80c28f5253 100644
--- a/arch/mips/include/asm/checksum.h
+++ b/arch/mips/include/asm/checksum.h
@@ -34,42 +34,17 @@
*/
__wsum csum_partial(const void *buff, int len, __wsum sum);
-__wsum __csum_partial_copy_kernel(const void *src, void *dst,
- int len, __wsum sum, int *err_ptr);
-
-__wsum __csum_partial_copy_from_user(const void *src, void *dst,
- int len, __wsum sum, int *err_ptr);
-__wsum __csum_partial_copy_to_user(const void *src, void *dst,
- int len, __wsum sum, int *err_ptr);
-/*
- * this is a new version of the above that records errors it finds in *errp,
- * but continues and zeros the rest of the buffer.
- */
-static inline
-__wsum csum_partial_copy_from_user(const void __user *src, void *dst, int len,
- __wsum sum, int *err_ptr)
-{
- might_fault();
- if (uaccess_kernel())
- return __csum_partial_copy_kernel((__force void *)src, dst,
- len, sum, err_ptr);
- else
- return __csum_partial_copy_from_user((__force void *)src, dst,
- len, sum, err_ptr);
-}
+__wsum __csum_partial_copy_from_user(const void __user *src, void *dst, int len);
+__wsum __csum_partial_copy_to_user(const void *src, void __user *dst, int len);
#define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER
static inline
-__wsum csum_and_copy_from_user(const void __user *src, void *dst,
- int len, __wsum sum, int *err_ptr)
+__wsum csum_and_copy_from_user(const void __user *src, void *dst, int len)
{
- if (access_ok(src, len))
- return csum_partial_copy_from_user(src, dst, len, sum,
- err_ptr);
- if (len)
- *err_ptr = -EFAULT;
-
- return sum;
+ might_fault();
+ if (!access_ok(src, len))
+ return 0;
+ return __csum_partial_copy_from_user(src, dst, len);
}
/*
@@ -77,33 +52,24 @@ __wsum csum_and_copy_from_user(const void __user *src, void *dst,
*/
#define HAVE_CSUM_COPY_USER
static inline
-__wsum csum_and_copy_to_user(const void *src, void __user *dst, int len,
- __wsum sum, int *err_ptr)
+__wsum csum_and_copy_to_user(const void *src, void __user *dst, int len)
{
might_fault();
- if (access_ok(dst, len)) {
- if (uaccess_kernel())
- return __csum_partial_copy_kernel(src,
- (__force void *)dst,
- len, sum, err_ptr);
- else
- return __csum_partial_copy_to_user(src,
- (__force void *)dst,
- len, sum, err_ptr);
- }
- if (len)
- *err_ptr = -EFAULT;
-
- return (__force __wsum)-1; /* invalid checksum */
+ if (!access_ok(dst, len))
+ return 0;
+ return __csum_partial_copy_to_user(src, dst, len);
}
/*
* the same as csum_partial, but copies from user space (but on MIPS
* we have just one address space, so this is identical to the above)
*/
-__wsum csum_partial_copy_nocheck(const void *src, void *dst,
- int len, __wsum sum);
-#define csum_partial_copy_nocheck csum_partial_copy_nocheck
+#define _HAVE_ARCH_CSUM_AND_COPY
+__wsum __csum_partial_copy_nocheck(const void *src, void *dst, int len);
+static inline __wsum csum_partial_copy_nocheck(const void *src, void *dst, int len)
+{
+ return __csum_partial_copy_nocheck(src, dst, len);
+}
/*
* Fold a partial checksum without adding pseudo headers
diff --git a/arch/mips/include/asm/compat.h b/arch/mips/include/asm/compat.h
index 255afcdd79c9..65975712a22d 100644
--- a/arch/mips/include/asm/compat.h
+++ b/arch/mips/include/asm/compat.h
@@ -26,8 +26,6 @@ typedef s32 compat_caddr_t;
typedef struct {
s32 val[2];
} compat_fsid_t;
-typedef s64 compat_s64;
-typedef u64 compat_u64;
struct compat_stat {
compat_dev_t st_dev;
diff --git a/arch/mips/include/asm/cpu-type.h b/arch/mips/include/asm/cpu-type.h
index 75a7a382da09..3288cef4b168 100644
--- a/arch/mips/include/asm/cpu-type.h
+++ b/arch/mips/include/asm/cpu-type.h
@@ -47,6 +47,7 @@ static inline int __pure __get_cpu_type(const int cpu_type)
case CPU_34K:
case CPU_1004K:
case CPU_74K:
+ case CPU_1074K:
case CPU_M14KC:
case CPU_M14KEC:
case CPU_INTERAPTIV:
diff --git a/arch/mips/include/asm/irqflags.h b/arch/mips/include/asm/irqflags.h
index 47a8ffc0b413..f5b8300f4573 100644
--- a/arch/mips/include/asm/irqflags.h
+++ b/arch/mips/include/asm/irqflags.h
@@ -137,6 +137,11 @@ static inline int arch_irqs_disabled_flags(unsigned long flags)
return !(flags & 1);
}
+static inline int arch_irqs_disabled(void)
+{
+ return arch_irqs_disabled_flags(arch_local_save_flags());
+}
+
#endif /* #ifndef __ASSEMBLY__ */
/*
diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
index d35eaed1668f..825d337a505a 100644
--- a/arch/mips/include/asm/kvm_host.h
+++ b/arch/mips/include/asm/kvm_host.h
@@ -969,7 +969,7 @@ enum kvm_mips_fault_result kvm_trap_emul_gva_fault(struct kvm_vcpu *vcpu,
#define KVM_ARCH_WANT_MMU_NOTIFIER
int kvm_unmap_hva_range(struct kvm *kvm,
- unsigned long start, unsigned long end);
+ unsigned long start, unsigned long end, unsigned flags);
int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
diff --git a/arch/mips/include/asm/mach-loongson64/cpu-feature-overrides.h b/arch/mips/include/asm/mach-loongson64/cpu-feature-overrides.h
index b6e9c99b85a5..eb181224eb4c 100644
--- a/arch/mips/include/asm/mach-loongson64/cpu-feature-overrides.h
+++ b/arch/mips/include/asm/mach-loongson64/cpu-feature-overrides.h
@@ -26,7 +26,6 @@
#define cpu_has_counter 1
#define cpu_has_dc_aliases (PAGE_SIZE < 0x4000)
#define cpu_has_divec 0
-#define cpu_has_ejtag 0
#define cpu_has_inclusive_pcaches 1
#define cpu_has_llsc 1
#define cpu_has_mcheck 0
@@ -42,7 +41,6 @@
#define cpu_has_veic 0
#define cpu_has_vint 0
#define cpu_has_vtag_icache 0
-#define cpu_has_watch 1
#define cpu_has_wsbh 1
#define cpu_has_ic_fills_f_dc 1
#define cpu_hwrena_impl_bits 0xc0000000
diff --git a/arch/mips/include/asm/mach-loongson64/irq.h b/arch/mips/include/asm/mach-loongson64/irq.h
index f5e362f79701..bf2480923154 100644
--- a/arch/mips/include/asm/mach-loongson64/irq.h
+++ b/arch/mips/include/asm/mach-loongson64/irq.h
@@ -2,8 +2,6 @@
#ifndef __ASM_MACH_LOONGSON64_IRQ_H_
#define __ASM_MACH_LOONGSON64_IRQ_H_
-#include <boot_param.h>
-
/* cpu core interrupt numbers */
#define NR_IRQS_LEGACY 16
#define NR_MIPS_CPU_IRQS 8
diff --git a/arch/mips/include/asm/mach-loongson64/mmzone.h b/arch/mips/include/asm/mach-loongson64/mmzone.h
index 3a25dbd3b3e9..5eaca4fe3f92 100644
--- a/arch/mips/include/asm/mach-loongson64/mmzone.h
+++ b/arch/mips/include/asm/mach-loongson64/mmzone.h
@@ -9,7 +9,6 @@
#ifndef _ASM_MACH_LOONGSON64_MMZONE_H
#define _ASM_MACH_LOONGSON64_MMZONE_H
-#include <boot_param.h>
#define NODE_ADDRSPACE_SHIFT 44
#define NODE0_ADDRSPACE_OFFSET 0x000000000000UL
#define NODE1_ADDRSPACE_OFFSET 0x100000000000UL
diff --git a/arch/mips/include/asm/unroll.h b/arch/mips/include/asm/unroll.h
index 7dd4a80e05d6..6f4ac854b12d 100644
--- a/arch/mips/include/asm/unroll.h
+++ b/arch/mips/include/asm/unroll.h
@@ -28,38 +28,38 @@
BUILD_BUG_ON(!__builtin_constant_p(times)); \
\
switch (times) { \
- case 32: fn(__VA_ARGS__); /* fall through */ \
- case 31: fn(__VA_ARGS__); /* fall through */ \
- case 30: fn(__VA_ARGS__); /* fall through */ \
- case 29: fn(__VA_ARGS__); /* fall through */ \
- case 28: fn(__VA_ARGS__); /* fall through */ \
- case 27: fn(__VA_ARGS__); /* fall through */ \
- case 26: fn(__VA_ARGS__); /* fall through */ \
- case 25: fn(__VA_ARGS__); /* fall through */ \
- case 24: fn(__VA_ARGS__); /* fall through */ \
- case 23: fn(__VA_ARGS__); /* fall through */ \
- case 22: fn(__VA_ARGS__); /* fall through */ \
- case 21: fn(__VA_ARGS__); /* fall through */ \
- case 20: fn(__VA_ARGS__); /* fall through */ \
- case 19: fn(__VA_ARGS__); /* fall through */ \
- case 18: fn(__VA_ARGS__); /* fall through */ \
- case 17: fn(__VA_ARGS__); /* fall through */ \
- case 16: fn(__VA_ARGS__); /* fall through */ \
- case 15: fn(__VA_ARGS__); /* fall through */ \
- case 14: fn(__VA_ARGS__); /* fall through */ \
- case 13: fn(__VA_ARGS__); /* fall through */ \
- case 12: fn(__VA_ARGS__); /* fall through */ \
- case 11: fn(__VA_ARGS__); /* fall through */ \
- case 10: fn(__VA_ARGS__); /* fall through */ \
- case 9: fn(__VA_ARGS__); /* fall through */ \
- case 8: fn(__VA_ARGS__); /* fall through */ \
- case 7: fn(__VA_ARGS__); /* fall through */ \
- case 6: fn(__VA_ARGS__); /* fall through */ \
- case 5: fn(__VA_ARGS__); /* fall through */ \
- case 4: fn(__VA_ARGS__); /* fall through */ \
- case 3: fn(__VA_ARGS__); /* fall through */ \
- case 2: fn(__VA_ARGS__); /* fall through */ \
- case 1: fn(__VA_ARGS__); /* fall through */ \
+ case 32: fn(__VA_ARGS__); fallthrough; \
+ case 31: fn(__VA_ARGS__); fallthrough; \
+ case 30: fn(__VA_ARGS__); fallthrough; \
+ case 29: fn(__VA_ARGS__); fallthrough; \
+ case 28: fn(__VA_ARGS__); fallthrough; \
+ case 27: fn(__VA_ARGS__); fallthrough; \
+ case 26: fn(__VA_ARGS__); fallthrough; \
+ case 25: fn(__VA_ARGS__); fallthrough; \
+ case 24: fn(__VA_ARGS__); fallthrough; \
+ case 23: fn(__VA_ARGS__); fallthrough; \
+ case 22: fn(__VA_ARGS__); fallthrough; \
+ case 21: fn(__VA_ARGS__); fallthrough; \
+ case 20: fn(__VA_ARGS__); fallthrough; \
+ case 19: fn(__VA_ARGS__); fallthrough; \
+ case 18: fn(__VA_ARGS__); fallthrough; \
+ case 17: fn(__VA_ARGS__); fallthrough; \
+ case 16: fn(__VA_ARGS__); fallthrough; \
+ case 15: fn(__VA_ARGS__); fallthrough; \
+ case 14: fn(__VA_ARGS__); fallthrough; \
+ case 13: fn(__VA_ARGS__); fallthrough; \
+ case 12: fn(__VA_ARGS__); fallthrough; \
+ case 11: fn(__VA_ARGS__); fallthrough; \
+ case 10: fn(__VA_ARGS__); fallthrough; \
+ case 9: fn(__VA_ARGS__); fallthrough; \
+ case 8: fn(__VA_ARGS__); fallthrough; \
+ case 7: fn(__VA_ARGS__); fallthrough; \
+ case 6: fn(__VA_ARGS__); fallthrough; \
+ case 5: fn(__VA_ARGS__); fallthrough; \
+ case 4: fn(__VA_ARGS__); fallthrough; \
+ case 3: fn(__VA_ARGS__); fallthrough; \
+ case 2: fn(__VA_ARGS__); fallthrough; \
+ case 1: fn(__VA_ARGS__); fallthrough; \
case 0: break; \
\
default: \
diff --git a/arch/mips/kernel/kprobes.c b/arch/mips/kernel/kprobes.c
index d043c2f897fc..54dfba8fa77c 100644
--- a/arch/mips/kernel/kprobes.c
+++ b/arch/mips/kernel/kprobes.c
@@ -477,6 +477,7 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
struct pt_regs *regs)
{
ri->ret_addr = (kprobe_opcode_t *) regs->regs[31];
+ ri->fp = NULL;
/* Replace the return addr with trampoline addr */
regs->regs[31] = (unsigned long)kretprobe_trampoline;
@@ -488,57 +489,8 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
static int __kprobes trampoline_probe_handler(struct kprobe *p,
struct pt_regs *regs)
{
- struct kretprobe_instance *ri = NULL;
- struct hlist_head *head, empty_rp;
- struct hlist_node *tmp;
- unsigned long flags, orig_ret_address = 0;
- unsigned long trampoline_address = (unsigned long)kretprobe_trampoline;
-
- INIT_HLIST_HEAD(&empty_rp);
- kretprobe_hash_lock(current, &head, &flags);
-
- /*
- * It is possible to have multiple instances associated with a given
- * task either because an multiple functions in the call path
- * have a return probe installed on them, and/or more than one return
- * return probe was registered for a target function.
- *
- * We can handle this because:
- * - instances are always inserted at the head of the list
- * - when multiple return probes are registered for the same
- * function, the first instance's ret_addr will point to the
- * real return address, and all the rest will point to
- * kretprobe_trampoline
- */
- hlist_for_each_entry_safe(ri, tmp, head, hlist) {
- if (ri->task != current)
- /* another task is sharing our hash bucket */
- continue;
-
- if (ri->rp && ri->rp->handler)
- ri->rp->handler(ri, regs);
-
- orig_ret_address = (unsigned long)ri->ret_addr;
- recycle_rp_inst(ri, &empty_rp);
-
- if (orig_ret_address != trampoline_address)
- /*
- * This is the real return address. Any other
- * instances associated with this task are for
- * other calls deeper on the call stack
- */
- break;
- }
-
- kretprobe_assert(ri, orig_ret_address, trampoline_address);
- instruction_pointer(regs) = orig_ret_address;
-
- kretprobe_hash_unlock(current, &flags);
-
- hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
- hlist_del(&ri->hlist);
- kfree(ri);
- }
+ instruction_pointer(regs) = __kretprobe_trampoline_handler(regs,
+ kretprobe_trampoline, NULL);
/*
* By returning a non-zero value, we are telling
* kprobe_handler() that we don't want the post_handler
diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c
index efce5defcc5c..011eb6bbf81a 100644
--- a/arch/mips/kernel/perf_event_mipsxx.c
+++ b/arch/mips/kernel/perf_event_mipsxx.c
@@ -1898,8 +1898,8 @@ static const struct mips_perf_event *mipsxx_pmu_map_raw_event(u64 config)
(base_id >= 64 && base_id < 90) ||
(base_id >= 128 && base_id < 164) ||
(base_id >= 192 && base_id < 200) ||
- (base_id >= 256 && base_id < 274) ||
- (base_id >= 320 && base_id < 358) ||
+ (base_id >= 256 && base_id < 275) ||
+ (base_id >= 320 && base_id < 361) ||
(base_id >= 384 && base_id < 574))
break;
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index bf5f5acab0a8..335bd188b8b4 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -300,8 +300,9 @@ static void __init bootmem_init(void)
static void __init bootmem_init(void)
{
- struct memblock_region *mem;
phys_addr_t ramstart, ramend;
+ phys_addr_t start, end;
+ u64 i;
ramstart = memblock_start_of_DRAM();
ramend = memblock_end_of_DRAM();
@@ -338,18 +339,13 @@ static void __init bootmem_init(void)
min_low_pfn = ARCH_PFN_OFFSET;
max_pfn = PFN_DOWN(ramend);
- for_each_memblock(memory, mem) {
- unsigned long start = memblock_region_memory_base_pfn(mem);
- unsigned long end = memblock_region_memory_end_pfn(mem);
-
+ for_each_mem_range(i, &start, &end) {
/*
* Skip highmem here so we get an accurate max_low_pfn if low
* memory stops short of high memory.
* If the region overlaps HIGHMEM_START, end is clipped so
* max_pfn excludes the highmem portion.
*/
- if (memblock_is_nomap(mem))
- continue;
if (start >= PFN_DOWN(HIGHMEM_START))
continue;
if (end > PFN_DOWN(HIGHMEM_START))
@@ -450,13 +446,12 @@ early_param("memmap", early_parse_memmap);
unsigned long setup_elfcorehdr, setup_elfcorehdr_size;
static int __init early_parse_elfcorehdr(char *p)
{
- struct memblock_region *mem;
+ phys_addr_t start, end;
+ u64 i;
setup_elfcorehdr = memparse(p, &p);
- for_each_memblock(memory, mem) {
- unsigned long start = mem->base;
- unsigned long end = start + mem->size;
+ for_each_mem_range(i, &start, &end) {
if (setup_elfcorehdr >= start && setup_elfcorehdr < end) {
/*
* Reserve from the elf core header to the end of
@@ -720,7 +715,8 @@ static void __init arch_mem_init(char **cmdline_p)
static void __init resource_init(void)
{
- struct memblock_region *region;
+ phys_addr_t start, end;
+ u64 i;
if (UNCAC_BASE != IO_BASE)
return;
@@ -732,9 +728,7 @@ static void __init resource_init(void)
bss_resource.start = __pa_symbol(&__bss_start);
bss_resource.end = __pa_symbol(&__bss_stop) - 1;
- for_each_memblock(memory, region) {
- phys_addr_t start = PFN_PHYS(memblock_region_memory_base_pfn(region));
- phys_addr_t end = PFN_PHYS(memblock_region_memory_end_pfn(region)) - 1;
+ for_each_mem_range(i, &start, &end) {
struct resource *res;
res = memblock_alloc(sizeof(struct resource), SMP_CACHE_BYTES);
@@ -743,7 +737,12 @@ static void __init resource_init(void)
sizeof(struct resource));
res->start = start;
- res->end = end;
+ /*
+ * In memblock, end points to the first byte after the
+ * range while in resourses, end points to the last byte in
+ * the range.
+ */
+ res->end = end - 1;
res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
res->name = "System RAM";
diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c
index 2f513506a3d5..1dbfb5aadffd 100644
--- a/arch/mips/kernel/smp-bmips.c
+++ b/arch/mips/kernel/smp-bmips.c
@@ -239,6 +239,8 @@ static int bmips_boot_secondary(int cpu, struct task_struct *idle)
*/
static void bmips_init_secondary(void)
{
+ bmips_cpu_setup();
+
switch (current_cpu_type()) {
case CPU_BMIPS4350:
case CPU_BMIPS4380:
diff --git a/arch/mips/kernel/syscalls/syscall_n32.tbl b/arch/mips/kernel/syscalls/syscall_n32.tbl
index f9df9edb67a4..cf72a0206a87 100644
--- a/arch/mips/kernel/syscalls/syscall_n32.tbl
+++ b/arch/mips/kernel/syscalls/syscall_n32.tbl
@@ -25,8 +25,8 @@
15 n32 ioctl compat_sys_ioctl
16 n32 pread64 sys_pread64
17 n32 pwrite64 sys_pwrite64
-18 n32 readv compat_sys_readv
-19 n32 writev compat_sys_writev
+18 n32 readv sys_readv
+19 n32 writev sys_writev
20 n32 access sys_access
21 n32 pipe sysm_pipe
22 n32 _newselect compat_sys_select
@@ -167,7 +167,7 @@
157 n32 sync sys_sync
158 n32 acct sys_acct
159 n32 settimeofday compat_sys_settimeofday
-160 n32 mount compat_sys_mount
+160 n32 mount sys_mount
161 n32 umount2 sys_umount
162 n32 swapon sys_swapon
163 n32 swapoff sys_swapoff
@@ -278,7 +278,7 @@
267 n32 splice sys_splice
268 n32 sync_file_range sys_sync_file_range
269 n32 tee sys_tee
-270 n32 vmsplice compat_sys_vmsplice
+270 n32 vmsplice sys_vmsplice
271 n32 move_pages compat_sys_move_pages
272 n32 set_robust_list compat_sys_set_robust_list
273 n32 get_robust_list compat_sys_get_robust_list
@@ -317,8 +317,8 @@
306 n32 syncfs sys_syncfs
307 n32 sendmmsg compat_sys_sendmmsg
308 n32 setns sys_setns
-309 n32 process_vm_readv compat_sys_process_vm_readv
-310 n32 process_vm_writev compat_sys_process_vm_writev
+309 n32 process_vm_readv sys_process_vm_readv
+310 n32 process_vm_writev sys_process_vm_writev
311 n32 kcmp sys_kcmp
312 n32 finit_module sys_finit_module
313 n32 sched_setattr sys_sched_setattr
diff --git a/arch/mips/kernel/syscalls/syscall_o32.tbl b/arch/mips/kernel/syscalls/syscall_o32.tbl
index 195b43cf27c8..a17aab5abeb2 100644
--- a/arch/mips/kernel/syscalls/syscall_o32.tbl
+++ b/arch/mips/kernel/syscalls/syscall_o32.tbl
@@ -29,7 +29,7 @@
18 o32 unused18 sys_ni_syscall
19 o32 lseek sys_lseek
20 o32 getpid sys_getpid
-21 o32 mount sys_mount compat_sys_mount
+21 o32 mount sys_mount
22 o32 umount sys_oldumount
23 o32 setuid sys_setuid
24 o32 getuid sys_getuid
@@ -156,8 +156,8 @@
142 o32 _newselect sys_select compat_sys_select
143 o32 flock sys_flock
144 o32 msync sys_msync
-145 o32 readv sys_readv compat_sys_readv
-146 o32 writev sys_writev compat_sys_writev
+145 o32 readv sys_readv
+146 o32 writev sys_writev
147 o32 cacheflush sys_cacheflush
148 o32 cachectl sys_cachectl
149 o32 sysmips __sys_sysmips
@@ -318,7 +318,7 @@
304 o32 splice sys_splice
305 o32 sync_file_range sys_sync_file_range sys32_sync_file_range
306 o32 tee sys_tee
-307 o32 vmsplice sys_vmsplice compat_sys_vmsplice
+307 o32 vmsplice sys_vmsplice
308 o32 move_pages sys_move_pages compat_sys_move_pages
309 o32 set_robust_list sys_set_robust_list compat_sys_set_robust_list
310 o32 get_robust_list sys_get_robust_list compat_sys_get_robust_list
@@ -356,8 +356,8 @@
342 o32 syncfs sys_syncfs
343 o32 sendmmsg sys_sendmmsg compat_sys_sendmmsg
344 o32 setns sys_setns
-345 o32 process_vm_readv sys_process_vm_readv compat_sys_process_vm_readv
-346 o32 process_vm_writev sys_process_vm_writev compat_sys_process_vm_writev
+345 o32 process_vm_readv sys_process_vm_readv
+346 o32 process_vm_writev sys_process_vm_writev
347 o32 kcmp sys_kcmp
348 o32 finit_module sys_finit_module
349 o32 sched_setattr sys_sched_setattr
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 38aa07ccdbcc..cf788591f091 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -1287,6 +1287,18 @@ static int enable_restore_fp_context(int msa)
err = own_fpu_inatomic(1);
if (msa && !err) {
enable_msa();
+ /*
+ * with MSA enabled, userspace can see MSACSR
+ * and MSA regs, but the values in them are from
+ * other task before current task, restore them
+ * from saved fp/msa context
+ */
+ write_msa_csr(current->thread.fpu.msacsr);
+ /*
+ * own_fpu_inatomic(1) just restore low 64bit,
+ * fix the high 64bit
+ */
+ init_msa_upper();
set_thread_flag(TIF_USEDMSA);
set_thread_flag(TIF_MSA_CTX_LIVE);
}
diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S
index f185a85a27c1..5e97e9d02f98 100644
--- a/arch/mips/kernel/vmlinux.lds.S
+++ b/arch/mips/kernel/vmlinux.lds.S
@@ -202,6 +202,7 @@ SECTIONS
STABS_DEBUG
DWARF_DEBUG
+ ELF_DETAILS
/* These must appear regardless of . */
.gptab.sdata : {
diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
index 7de85d2253ff..0c50ac444222 100644
--- a/arch/mips/kvm/mips.c
+++ b/arch/mips/kvm/mips.c
@@ -137,6 +137,8 @@ extern void kvm_init_loongson_ipi(struct kvm *kvm);
int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
{
switch (type) {
+ case KVM_VM_MIPS_AUTO:
+ break;
#ifdef CONFIG_KVM_MIPS_VZ
case KVM_VM_MIPS_VZ:
#else
diff --git a/arch/mips/kvm/mmu.c b/arch/mips/kvm/mmu.c
index 87fa8d8a1031..28c366d307e7 100644
--- a/arch/mips/kvm/mmu.c
+++ b/arch/mips/kvm/mmu.c
@@ -486,7 +486,8 @@ static int kvm_unmap_hva_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end,
return 1;
}
-int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
+int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end,
+ unsigned flags)
{
handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL);
diff --git a/arch/mips/lib/csum_partial.S b/arch/mips/lib/csum_partial.S
index 87fda0713b84..a46db0807195 100644
--- a/arch/mips/lib/csum_partial.S
+++ b/arch/mips/lib/csum_partial.S
@@ -308,8 +308,8 @@ EXPORT_SYMBOL(csum_partial)
/*
* checksum and copy routines based on memcpy.S
*
- * csum_partial_copy_nocheck(src, dst, len, sum)
- * __csum_partial_copy_kernel(src, dst, len, sum, errp)
+ * csum_partial_copy_nocheck(src, dst, len)
+ * __csum_partial_copy_kernel(src, dst, len)
*
* See "Spec" in memcpy.S for details. Unlike __copy_user, all
* function in this file use the standard calling convention.
@@ -318,26 +318,11 @@ EXPORT_SYMBOL(csum_partial)
#define src a0
#define dst a1
#define len a2
-#define psum a3
#define sum v0
#define odd t8
-#define errptr t9
/*
- * The exception handler for loads requires that:
- * 1- AT contain the address of the byte just past the end of the source
- * of the copy,
- * 2- src_entry <= src < AT, and
- * 3- (dst - src) == (dst_entry - src_entry),
- * The _entry suffix denotes values when __copy_user was called.
- *
- * (1) is set up up by __csum_partial_copy_from_user and maintained by
- * not writing AT in __csum_partial_copy
- * (2) is met by incrementing src by the number of bytes copied
- * (3) is met by not doing loads between a pair of increments of dst and src
- *
- * The exception handlers for stores stores -EFAULT to errptr and return.
- * These handlers do not need to overwrite any data.
+ * All exception handlers simply return 0.
*/
/* Instruction type */
@@ -358,11 +343,11 @@ EXPORT_SYMBOL(csum_partial)
* addr : Address
* handler : Exception handler
*/
-#define EXC(insn, type, reg, addr, handler) \
+#define EXC(insn, type, reg, addr) \
.if \mode == LEGACY_MODE; \
9: insn reg, addr; \
.section __ex_table,"a"; \
- PTR 9b, handler; \
+ PTR 9b, .L_exc; \
.previous; \
/* This is enabled in EVA mode */ \
.else; \
@@ -371,7 +356,7 @@ EXPORT_SYMBOL(csum_partial)
((\to == USEROP) && (type == ST_INSN)); \
9: __BUILD_EVA_INSN(insn##e, reg, addr); \
.section __ex_table,"a"; \
- PTR 9b, handler; \
+ PTR 9b, .L_exc; \
.previous; \
.else; \
/* EVA without exception */ \
@@ -384,14 +369,14 @@ EXPORT_SYMBOL(csum_partial)
#ifdef USE_DOUBLE
#define LOADK ld /* No exception */
-#define LOAD(reg, addr, handler) EXC(ld, LD_INSN, reg, addr, handler)
-#define LOADBU(reg, addr, handler) EXC(lbu, LD_INSN, reg, addr, handler)
-#define LOADL(reg, addr, handler) EXC(ldl, LD_INSN, reg, addr, handler)
-#define LOADR(reg, addr, handler) EXC(ldr, LD_INSN, reg, addr, handler)
-#define STOREB(reg, addr, handler) EXC(sb, ST_INSN, reg, addr, handler)
-#define STOREL(reg, addr, handler) EXC(sdl, ST_INSN, reg, addr, handler)
-#define STORER(reg, addr, handler) EXC(sdr, ST_INSN, reg, addr, handler)
-#define STORE(reg, addr, handler) EXC(sd, ST_INSN, reg, addr, handler)
+#define LOAD(reg, addr) EXC(ld, LD_INSN, reg, addr)
+#define LOADBU(reg, addr) EXC(lbu, LD_INSN, reg, addr)
+#define LOADL(reg, addr) EXC(ldl, LD_INSN, reg, addr)
+#define LOADR(reg, addr) EXC(ldr, LD_INSN, reg, addr)
+#define STOREB(reg, addr) EXC(sb, ST_INSN, reg, addr)
+#define STOREL(reg, addr) EXC(sdl, ST_INSN, reg, addr)
+#define STORER(reg, addr) EXC(sdr, ST_INSN, reg, addr)
+#define STORE(reg, addr) EXC(sd, ST_INSN, reg, addr)
#define ADD daddu
#define SUB dsubu
#define SRL dsrl
@@ -404,14 +389,14 @@ EXPORT_SYMBOL(csum_partial)
#else
#define LOADK lw /* No exception */
-#define LOAD(reg, addr, handler) EXC(lw, LD_INSN, reg, addr, handler)
-#define LOADBU(reg, addr, handler) EXC(lbu, LD_INSN, reg, addr, handler)
-#define LOADL(reg, addr, handler) EXC(lwl, LD_INSN, reg, addr, handler)
-#define LOADR(reg, addr, handler) EXC(lwr, LD_INSN, reg, addr, handler)
-#define STOREB(reg, addr, handler) EXC(sb, ST_INSN, reg, addr, handler)
-#define STOREL(reg, addr, handler) EXC(swl, ST_INSN, reg, addr, handler)
-#define STORER(reg, addr, handler) EXC(swr, ST_INSN, reg, addr, handler)
-#define STORE(reg, addr, handler) EXC(sw, ST_INSN, reg, addr, handler)
+#define LOAD(reg, addr) EXC(lw, LD_INSN, reg, addr)
+#define LOADBU(reg, addr) EXC(lbu, LD_INSN, reg, addr)
+#define LOADL(reg, addr) EXC(lwl, LD_INSN, reg, addr)
+#define LOADR(reg, addr) EXC(lwr, LD_INSN, reg, addr)
+#define STOREB(reg, addr) EXC(sb, ST_INSN, reg, addr)
+#define STOREL(reg, addr) EXC(swl, ST_INSN, reg, addr)
+#define STORER(reg, addr) EXC(swr, ST_INSN, reg, addr)
+#define STORE(reg, addr) EXC(sw, ST_INSN, reg, addr)
#define ADD addu
#define SUB subu
#define SRL srl
@@ -450,22 +435,9 @@ EXPORT_SYMBOL(csum_partial)
.set at=v1
#endif
- .macro __BUILD_CSUM_PARTIAL_COPY_USER mode, from, to, __nocheck
+ .macro __BUILD_CSUM_PARTIAL_COPY_USER mode, from, to
- PTR_ADDU AT, src, len /* See (1) above. */
- /* initialize __nocheck if this the first time we execute this
- * macro
- */
-#ifdef CONFIG_64BIT
- move errptr, a4
-#else
- lw errptr, 16(sp)
-#endif
- .if \__nocheck == 1
- FEXPORT(csum_partial_copy_nocheck)
- EXPORT_SYMBOL(csum_partial_copy_nocheck)
- .endif
- move sum, zero
+ li sum, -1
move odd, zero
/*
* Note: dst & src may be unaligned, len may be 0
@@ -497,31 +469,31 @@ EXPORT_SYMBOL(csum_partial)
SUB len, 8*NBYTES # subtract here for bgez loop
.align 4
1:
- LOAD(t0, UNIT(0)(src), .Ll_exc\@)
- LOAD(t1, UNIT(1)(src), .Ll_exc_copy\@)
- LOAD(t2, UNIT(2)(src), .Ll_exc_copy\@)
- LOAD(t3, UNIT(3)(src), .Ll_exc_copy\@)
- LOAD(t4, UNIT(4)(src), .Ll_exc_copy\@)
- LOAD(t5, UNIT(5)(src), .Ll_exc_copy\@)
- LOAD(t6, UNIT(6)(src), .Ll_exc_copy\@)
- LOAD(t7, UNIT(7)(src), .Ll_exc_copy\@)
+ LOAD(t0, UNIT(0)(src))
+ LOAD(t1, UNIT(1)(src))
+ LOAD(t2, UNIT(2)(src))
+ LOAD(t3, UNIT(3)(src))
+ LOAD(t4, UNIT(4)(src))
+ LOAD(t5, UNIT(5)(src))
+ LOAD(t6, UNIT(6)(src))
+ LOAD(t7, UNIT(7)(src))
SUB len, len, 8*NBYTES
ADD src, src, 8*NBYTES
- STORE(t0, UNIT(0)(dst), .Ls_exc\@)
+ STORE(t0, UNIT(0)(dst))
ADDC(t0, t1)
- STORE(t1, UNIT(1)(dst), .Ls_exc\@)
+ STORE(t1, UNIT(1)(dst))
ADDC(sum, t0)
- STORE(t2, UNIT(2)(dst), .Ls_exc\@)
+ STORE(t2, UNIT(2)(dst))
ADDC(t2, t3)
- STORE(t3, UNIT(3)(dst), .Ls_exc\@)
+ STORE(t3, UNIT(3)(dst))
ADDC(sum, t2)
- STORE(t4, UNIT(4)(dst), .Ls_exc\@)
+ STORE(t4, UNIT(4)(dst))
ADDC(t4, t5)
- STORE(t5, UNIT(5)(dst), .Ls_exc\@)
+ STORE(t5, UNIT(5)(dst))
ADDC(sum, t4)
- STORE(t6, UNIT(6)(dst), .Ls_exc\@)
+ STORE(t6, UNIT(6)(dst))
ADDC(t6, t7)
- STORE(t7, UNIT(7)(dst), .Ls_exc\@)
+ STORE(t7, UNIT(7)(dst))
ADDC(sum, t6)
.set reorder /* DADDI_WAR */
ADD dst, dst, 8*NBYTES
@@ -541,19 +513,19 @@ EXPORT_SYMBOL(csum_partial)
/*
* len >= 4*NBYTES
*/
- LOAD(t0, UNIT(0)(src), .Ll_exc\@)
- LOAD(t1, UNIT(1)(src), .Ll_exc_copy\@)
- LOAD(t2, UNIT(2)(src), .Ll_exc_copy\@)
- LOAD(t3, UNIT(3)(src), .Ll_exc_copy\@)
+ LOAD(t0, UNIT(0)(src))
+ LOAD(t1, UNIT(1)(src))
+ LOAD(t2, UNIT(2)(src))
+ LOAD(t3, UNIT(3)(src))
SUB len, len, 4*NBYTES
ADD src, src, 4*NBYTES
- STORE(t0, UNIT(0)(dst), .Ls_exc\@)
+ STORE(t0, UNIT(0)(dst))
ADDC(t0, t1)
- STORE(t1, UNIT(1)(dst), .Ls_exc\@)
+ STORE(t1, UNIT(1)(dst))
ADDC(sum, t0)
- STORE(t2, UNIT(2)(dst), .Ls_exc\@)
+ STORE(t2, UNIT(2)(dst))
ADDC(t2, t3)
- STORE(t3, UNIT(3)(dst), .Ls_exc\@)
+ STORE(t3, UNIT(3)(dst))
ADDC(sum, t2)
.set reorder /* DADDI_WAR */
ADD dst, dst, 4*NBYTES
@@ -566,10 +538,10 @@ EXPORT_SYMBOL(csum_partial)
beq rem, len, .Lcopy_bytes\@
nop
1:
- LOAD(t0, 0(src), .Ll_exc\@)
+ LOAD(t0, 0(src))
ADD src, src, NBYTES
SUB len, len, NBYTES
- STORE(t0, 0(dst), .Ls_exc\@)
+ STORE(t0, 0(dst))
ADDC(sum, t0)
.set reorder /* DADDI_WAR */
ADD dst, dst, NBYTES
@@ -592,10 +564,10 @@ EXPORT_SYMBOL(csum_partial)
ADD t1, dst, len # t1 is just past last byte of dst
li bits, 8*NBYTES
SLL rem, len, 3 # rem = number of bits to keep
- LOAD(t0, 0(src), .Ll_exc\@)
+ LOAD(t0, 0(src))
SUB bits, bits, rem # bits = number of bits to discard
SHIFT_DISCARD t0, t0, bits
- STREST(t0, -1(t1), .Ls_exc\@)
+ STREST(t0, -1(t1))
SHIFT_DISCARD_REVERT t0, t0, bits
.set reorder
ADDC(sum, t0)
@@ -612,12 +584,12 @@ EXPORT_SYMBOL(csum_partial)
* Set match = (src and dst have same alignment)
*/
#define match rem
- LDFIRST(t3, FIRST(0)(src), .Ll_exc\@)
+ LDFIRST(t3, FIRST(0)(src))
ADD t2, zero, NBYTES
- LDREST(t3, REST(0)(src), .Ll_exc_copy\@)
+ LDREST(t3, REST(0)(src))
SUB t2, t2, t1 # t2 = number of bytes copied
xor match, t0, t1
- STFIRST(t3, FIRST(0)(dst), .Ls_exc\@)
+ STFIRST(t3, FIRST(0)(dst))
SLL t4, t1, 3 # t4 = number of bits to discard
SHIFT_DISCARD t3, t3, t4
/* no SHIFT_DISCARD_REVERT to handle odd buffer properly */
@@ -639,26 +611,26 @@ EXPORT_SYMBOL(csum_partial)
* It's OK to load FIRST(N+1) before REST(N) because the two addresses
* are to the same unit (unless src is aligned, but it's not).
*/
- LDFIRST(t0, FIRST(0)(src), .Ll_exc\@)
- LDFIRST(t1, FIRST(1)(src), .Ll_exc_copy\@)
+ LDFIRST(t0, FIRST(0)(src))
+ LDFIRST(t1, FIRST(1)(src))
SUB len, len, 4*NBYTES
- LDREST(t0, REST(0)(src), .Ll_exc_copy\@)
- LDREST(t1, REST(1)(src), .Ll_exc_copy\@)
- LDFIRST(t2, FIRST(2)(src), .Ll_exc_copy\@)
- LDFIRST(t3, FIRST(3)(src), .Ll_exc_copy\@)
- LDREST(t2, REST(2)(src), .Ll_exc_copy\@)
- LDREST(t3, REST(3)(src), .Ll_exc_copy\@)
+ LDREST(t0, REST(0)(src))
+ LDREST(t1, REST(1)(src))
+ LDFIRST(t2, FIRST(2)(src))
+ LDFIRST(t3, FIRST(3)(src))
+ LDREST(t2, REST(2)(src))
+ LDREST(t3, REST(3)(src))
ADD src, src, 4*NBYTES
#ifdef CONFIG_CPU_SB1
nop # improves slotting
#endif
- STORE(t0, UNIT(0)(dst), .Ls_exc\@)
+ STORE(t0, UNIT(0)(dst))
ADDC(t0, t1)
- STORE(t1, UNIT(1)(dst), .Ls_exc\@)
+ STORE(t1, UNIT(1)(dst))
ADDC(sum, t0)
- STORE(t2, UNIT(2)(dst), .Ls_exc\@)
+ STORE(t2, UNIT(2)(dst))
ADDC(t2, t3)
- STORE(t3, UNIT(3)(dst), .Ls_exc\@)
+ STORE(t3, UNIT(3)(dst))
ADDC(sum, t2)
.set reorder /* DADDI_WAR */
ADD dst, dst, 4*NBYTES
@@ -671,11 +643,11 @@ EXPORT_SYMBOL(csum_partial)
beq rem, len, .Lcopy_bytes\@
nop
1:
- LDFIRST(t0, FIRST(0)(src), .Ll_exc\@)
- LDREST(t0, REST(0)(src), .Ll_exc_copy\@)
+ LDFIRST(t0, FIRST(0)(src))
+ LDREST(t0, REST(0)(src))
ADD src, src, NBYTES
SUB len, len, NBYTES
- STORE(t0, 0(dst), .Ls_exc\@)
+ STORE(t0, 0(dst))
ADDC(sum, t0)
.set reorder /* DADDI_WAR */
ADD dst, dst, NBYTES
@@ -696,11 +668,10 @@ EXPORT_SYMBOL(csum_partial)
#endif
move t2, zero # partial word
li t3, SHIFT_START # shift
-/* use .Ll_exc_copy here to return correct sum on fault */
#define COPY_BYTE(N) \
- LOADBU(t0, N(src), .Ll_exc_copy\@); \
+ LOADBU(t0, N(src)); \
SUB len, len, 1; \
- STOREB(t0, N(dst), .Ls_exc\@); \
+ STOREB(t0, N(dst)); \
SLLV t0, t0, t3; \
addu t3, SHIFT_INC; \
beqz len, .Lcopy_bytes_done\@; \
@@ -714,9 +685,9 @@ EXPORT_SYMBOL(csum_partial)
COPY_BYTE(4)
COPY_BYTE(5)
#endif
- LOADBU(t0, NBYTES-2(src), .Ll_exc_copy\@)
+ LOADBU(t0, NBYTES-2(src))
SUB len, len, 1
- STOREB(t0, NBYTES-2(dst), .Ls_exc\@)
+ STOREB(t0, NBYTES-2(dst))
SLLV t0, t0, t3
or t2, t0
.Lcopy_bytes_done\@:
@@ -753,97 +724,31 @@ EXPORT_SYMBOL(csum_partial)
#endif
.set pop
.set reorder
- ADDC32(sum, psum)
jr ra
.set noreorder
+ .endm
-.Ll_exc_copy\@:
- /*
- * Copy bytes from src until faulting load address (or until a
- * lb faults)
- *
- * When reached by a faulting LDFIRST/LDREST, THREAD_BUADDR($28)
- * may be more than a byte beyond the last address.
- * Hence, the lb below may get an exception.
- *
- * Assumes src < THREAD_BUADDR($28)
- */
- LOADK t0, TI_TASK($28)
- li t2, SHIFT_START
- LOADK t0, THREAD_BUADDR(t0)
-1:
- LOADBU(t1, 0(src), .Ll_exc\@)
- ADD src, src, 1
- sb t1, 0(dst) # can't fault -- we're copy_from_user
- SLLV t1, t1, t2
- addu t2, SHIFT_INC
- ADDC(sum, t1)
- .set reorder /* DADDI_WAR */
- ADD dst, dst, 1
- bne src, t0, 1b
- .set noreorder
-.Ll_exc\@:
- LOADK t0, TI_TASK($28)
- nop
- LOADK t0, THREAD_BUADDR(t0) # t0 is just past last good address
- nop
- SUB len, AT, t0 # len number of uncopied bytes
- /*
- * Here's where we rely on src and dst being incremented in tandem,
- * See (3) above.
- * dst += (fault addr - src) to put dst at first byte to clear
- */
- ADD dst, t0 # compute start address in a1
- SUB dst, src
- /*
- * Clear len bytes starting at dst. Can't call __bzero because it
- * might modify len. An inefficient loop for these rare times...
- */
- .set reorder /* DADDI_WAR */
- SUB src, len, 1
- beqz len, .Ldone\@
- .set noreorder
-1: sb zero, 0(dst)
- ADD dst, dst, 1
- .set push
- .set noat
-#ifndef CONFIG_CPU_DADDI_WORKAROUNDS
- bnez src, 1b
- SUB src, src, 1
-#else
- li v1, 1
- bnez src, 1b
- SUB src, src, v1
-#endif
- li v1, -EFAULT
- b .Ldone\@
- sw v1, (errptr)
-
-.Ls_exc\@:
- li v0, -1 /* invalid checksum */
- li v1, -EFAULT
+ .set noreorder
+.L_exc:
jr ra
- sw v1, (errptr)
- .set pop
- .endm
+ li v0, 0
-LEAF(__csum_partial_copy_kernel)
-EXPORT_SYMBOL(__csum_partial_copy_kernel)
+FEXPORT(__csum_partial_copy_nocheck)
+EXPORT_SYMBOL(__csum_partial_copy_nocheck)
#ifndef CONFIG_EVA
FEXPORT(__csum_partial_copy_to_user)
EXPORT_SYMBOL(__csum_partial_copy_to_user)
FEXPORT(__csum_partial_copy_from_user)
EXPORT_SYMBOL(__csum_partial_copy_from_user)
#endif
-__BUILD_CSUM_PARTIAL_COPY_USER LEGACY_MODE USEROP USEROP 1
-END(__csum_partial_copy_kernel)
+__BUILD_CSUM_PARTIAL_COPY_USER LEGACY_MODE USEROP USEROP
#ifdef CONFIG_EVA
LEAF(__csum_partial_copy_to_user)
-__BUILD_CSUM_PARTIAL_COPY_USER EVA_MODE KERNELOP USEROP 0
+__BUILD_CSUM_PARTIAL_COPY_USER EVA_MODE KERNELOP USEROP
END(__csum_partial_copy_to_user)
LEAF(__csum_partial_copy_from_user)
-__BUILD_CSUM_PARTIAL_COPY_USER EVA_MODE USEROP KERNELOP 0
+__BUILD_CSUM_PARTIAL_COPY_USER EVA_MODE USEROP KERNELOP
END(__csum_partial_copy_from_user)
#endif
diff --git a/arch/mips/loongson2ef/Platform b/arch/mips/loongson2ef/Platform
index 4ab55f1123a0..ae023b9a1c51 100644
--- a/arch/mips/loongson2ef/Platform
+++ b/arch/mips/loongson2ef/Platform
@@ -44,6 +44,10 @@ ifdef CONFIG_CPU_LOONGSON2F_WORKAROUNDS
endif
endif
+# Some -march= flags enable MMI instructions, and GCC complains about that
+# support being enabled alongside -msoft-float. Thus explicitly disable MMI.
+cflags-y += $(call cc-option,-mno-loongson-mmi)
+
#
# Loongson Machines' Support
#
diff --git a/arch/mips/loongson64/cop2-ex.c b/arch/mips/loongson64/cop2-ex.c
index f130f62129b8..00055d4b6042 100644
--- a/arch/mips/loongson64/cop2-ex.c
+++ b/arch/mips/loongson64/cop2-ex.c
@@ -95,10 +95,8 @@ static int loongson_cu2_call(struct notifier_block *nfb, unsigned long action,
if (res)
goto fault;
- set_fpr64(current->thread.fpu.fpr,
- insn.loongson3_lswc2_format.rt, value);
- set_fpr64(current->thread.fpu.fpr,
- insn.loongson3_lswc2_format.rq, value_next);
+ set_fpr64(&current->thread.fpu.fpr[insn.loongson3_lswc2_format.rt], 0, value);
+ set_fpr64(&current->thread.fpu.fpr[insn.loongson3_lswc2_format.rq], 0, value_next);
compute_return_epc(regs);
own_fpu(1);
}
@@ -130,15 +128,13 @@ static int loongson_cu2_call(struct notifier_block *nfb, unsigned long action,
goto sigbus;
lose_fpu(1);
- value_next = get_fpr64(current->thread.fpu.fpr,
- insn.loongson3_lswc2_format.rq);
+ value_next = get_fpr64(&current->thread.fpu.fpr[insn.loongson3_lswc2_format.rq], 0);
StoreDW(addr + 8, value_next, res);
if (res)
goto fault;
- value = get_fpr64(current->thread.fpu.fpr,
- insn.loongson3_lswc2_format.rt);
+ value = get_fpr64(&current->thread.fpu.fpr[insn.loongson3_lswc2_format.rt], 0);
StoreDW(addr, value, res);
if (res)
@@ -204,8 +200,7 @@ static int loongson_cu2_call(struct notifier_block *nfb, unsigned long action,
if (res)
goto fault;
- set_fpr64(current->thread.fpu.fpr,
- insn.loongson3_lsdc2_format.rt, value);
+ set_fpr64(&current->thread.fpu.fpr[insn.loongson3_lsdc2_format.rt], 0, value);
compute_return_epc(regs);
own_fpu(1);
@@ -221,8 +216,7 @@ static int loongson_cu2_call(struct notifier_block *nfb, unsigned long action,
if (res)
goto fault;
- set_fpr64(current->thread.fpu.fpr,
- insn.loongson3_lsdc2_format.rt, value);
+ set_fpr64(&current->thread.fpu.fpr[insn.loongson3_lsdc2_format.rt], 0, value);
compute_return_epc(regs);
own_fpu(1);
break;
@@ -286,8 +280,7 @@ static int loongson_cu2_call(struct notifier_block *nfb, unsigned long action,
goto sigbus;
lose_fpu(1);
- value = get_fpr64(current->thread.fpu.fpr,
- insn.loongson3_lsdc2_format.rt);
+ value = get_fpr64(&current->thread.fpu.fpr[insn.loongson3_lsdc2_format.rt], 0);
StoreW(addr, value, res);
if (res)
@@ -305,8 +298,7 @@ static int loongson_cu2_call(struct notifier_block *nfb, unsigned long action,
goto sigbus;
lose_fpu(1);
- value = get_fpr64(current->thread.fpu.fpr,
- insn.loongson3_lsdc2_format.rt);
+ value = get_fpr64(&current->thread.fpu.fpr[insn.loongson3_lsdc2_format.rt], 0);
StoreDW(addr, value, res);
if (res)
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index fc5a6d25f74f..0ef717093262 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -1712,7 +1712,11 @@ static void setup_scache(void)
printk("MIPS secondary cache %ldkB, %s, linesize %d bytes.\n",
scache_size >> 10,
way_string[c->scache.ways], c->scache.linesz);
+
+ if (current_cpu_type() == CPU_BMIPS5000)
+ c->options |= MIPS_CPU_INCLUSIVE_CACHES;
}
+
#else
if (!(c->scache.flags & MIPS_CACHE_NOT_PRESENT))
panic("Dunno how to handle MIPS32 / MIPS64 second level cache");
diff --git a/arch/mips/netlogic/xlp/setup.c b/arch/mips/netlogic/xlp/setup.c
index 1a0fc5b62ba4..6e3102bcd2f1 100644
--- a/arch/mips/netlogic/xlp/setup.c
+++ b/arch/mips/netlogic/xlp/setup.c
@@ -70,7 +70,7 @@ static void nlm_fixup_mem(void)
const int pref_backup = 512;
struct memblock_region *mem;
- for_each_memblock(memory, mem) {
+ for_each_mem_region(mem) {
memblock_remove(mem->base + mem->size - pref_backup,
pref_backup);
}
diff --git a/arch/mips/oprofile/op_model_mipsxx.c b/arch/mips/oprofile/op_model_mipsxx.c
index 1493c49ca47a..55d7b7fd18b6 100644
--- a/arch/mips/oprofile/op_model_mipsxx.c
+++ b/arch/mips/oprofile/op_model_mipsxx.c
@@ -245,7 +245,6 @@ static int mipsxx_perfcount_handler(void)
switch (counters) {
#define HANDLE_COUNTER(n) \
- fallthrough; \
case n + 1: \
control = r_c0_perfctrl ## n(); \
counter = r_c0_perfcntr ## n(); \
@@ -256,8 +255,11 @@ static int mipsxx_perfcount_handler(void)
handled = IRQ_HANDLED; \
}
HANDLE_COUNTER(3)
+ fallthrough;
HANDLE_COUNTER(2)
+ fallthrough;
HANDLE_COUNTER(1)
+ fallthrough;
HANDLE_COUNTER(0)
}
diff --git a/arch/mips/sni/a20r.c b/arch/mips/sni/a20r.c
index 0ecffb65fd6d..eeeec18c420a 100644
--- a/arch/mips/sni/a20r.c
+++ b/arch/mips/sni/a20r.c
@@ -143,7 +143,10 @@ static struct platform_device sc26xx_pdev = {
},
};
-static u32 a20r_ack_hwint(void)
+/*
+ * Trigger chipset to update CPU's CAUSE IP field
+ */
+static u32 a20r_update_cause_ip(void)
{
u32 status = read_c0_status();
@@ -205,12 +208,14 @@ static void a20r_hwint(void)
int irq;
clear_c0_status(IE_IRQ0);
- status = a20r_ack_hwint();
+ status = a20r_update_cause_ip();
cause = read_c0_cause();
irq = ffs(((cause & status) >> 8) & 0xf8);
if (likely(irq > 0))
do_IRQ(SNI_A20R_IRQ_BASE + irq - 1);
+
+ a20r_update_cause_ip();
set_c0_status(IE_IRQ0);
}
@@ -222,8 +227,8 @@ void __init sni_a20r_irq_init(void)
irq_set_chip_and_handler(i, &a20r_irq_type, handle_level_irq);
sni_hwint = a20r_hwint;
change_c0_status(ST0_IM, IE_IRQ0);
- if (request_irq(SNI_A20R_IRQ_BASE + 3, sni_isa_irq_handler, 0, "ISA",
- NULL))
+ if (request_irq(SNI_A20R_IRQ_BASE + 3, sni_isa_irq_handler,
+ IRQF_SHARED, "ISA", sni_isa_irq_handler))
pr_err("Failed to register ISA interrupt\n");
}
diff --git a/arch/nds32/include/asm/irqflags.h b/arch/nds32/include/asm/irqflags.h
index fb45ec46bb1b..51ef800bb301 100644
--- a/arch/nds32/include/asm/irqflags.h
+++ b/arch/nds32/include/asm/irqflags.h
@@ -34,3 +34,8 @@ static inline int arch_irqs_disabled_flags(unsigned long flags)
{
return !flags;
}
+
+static inline int arch_irqs_disabled(void)
+{
+ return arch_irqs_disabled_flags(arch_local_save_flags());
+}
diff --git a/arch/nds32/kernel/fpu.c b/arch/nds32/kernel/fpu.c
index 62bdafbc53f4..9edd7ed7d7bf 100644
--- a/arch/nds32/kernel/fpu.c
+++ b/arch/nds32/kernel/fpu.c
@@ -45,7 +45,7 @@ void save_fpu(struct task_struct *tsk)
: /* no output */
: "r" (&tsk->thread.fpu)
: "memory");
- /* fall through */
+ fallthrough;
case SP32_DP16_reg:
asm volatile ("fsdi $fd15, [%0+0x78]\n\t"
"fsdi $fd14, [%0+0x70]\n\t"
@@ -58,7 +58,7 @@ void save_fpu(struct task_struct *tsk)
: /* no output */
: "r" (&tsk->thread.fpu)
: "memory");
- /* fall through */
+ fallthrough;
case SP16_DP8_reg:
asm volatile ("fsdi $fd7, [%0+0x38]\n\t"
"fsdi $fd6, [%0+0x30]\n\t"
@@ -67,7 +67,7 @@ void save_fpu(struct task_struct *tsk)
: /* no output */
: "r" (&tsk->thread.fpu)
: "memory");
- /* fall through */
+ fallthrough;
case SP8_DP4_reg:
asm volatile ("fsdi $fd3, [%1+0x18]\n\t"
"fsdi $fd2, [%1+0x10]\n\t"
@@ -108,7 +108,7 @@ void load_fpu(const struct fpu_struct *fpregs)
"fldi $fd16, [%0+0x80]\n\t"
: /* no output */
: "r" (fpregs));
- /* fall through */
+ fallthrough;
case SP32_DP16_reg:
asm volatile ("fldi $fd15, [%0+0x78]\n\t"
"fldi $fd14, [%0+0x70]\n\t"
@@ -120,7 +120,7 @@ void load_fpu(const struct fpu_struct *fpregs)
"fldi $fd8, [%0+0x40]\n\t"
: /* no output */
: "r" (fpregs));
- /* fall through */
+ fallthrough;
case SP16_DP8_reg:
asm volatile ("fldi $fd7, [%0+0x38]\n\t"
"fldi $fd6, [%0+0x30]\n\t"
@@ -128,7 +128,7 @@ void load_fpu(const struct fpu_struct *fpregs)
"fldi $fd4, [%0+0x20]\n\t"
: /* no output */
: "r" (fpregs));
- /* fall through */
+ fallthrough;
case SP8_DP4_reg:
asm volatile ("fldi $fd3, [%1+0x18]\n\t"
"fldi $fd2, [%1+0x10]\n\t"
diff --git a/arch/nds32/kernel/setup.c b/arch/nds32/kernel/setup.c
index a066efbe53c0..c356e484dcab 100644
--- a/arch/nds32/kernel/setup.c
+++ b/arch/nds32/kernel/setup.c
@@ -249,12 +249,8 @@ static void __init setup_memory(void)
memory_end = memory_start = 0;
/* Find main memory where is the kernel */
- for_each_memblock(memory, region) {
- memory_start = region->base;
- memory_end = region->base + region->size;
- pr_info("%s: Memory: 0x%x-0x%x\n", __func__,
- memory_start, memory_end);
- }
+ memory_start = memblock_start_of_DRAM();
+ memory_end = memblock_end_of_DRAM();
if (!memory_end) {
panic("No memory!");
diff --git a/arch/nds32/kernel/signal.c b/arch/nds32/kernel/signal.c
index 330b19fcd990..36e25a410bb0 100644
--- a/arch/nds32/kernel/signal.c
+++ b/arch/nds32/kernel/signal.c
@@ -316,7 +316,7 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
regs->uregs[0] = -EINTR;
break;
}
- /* Else, fall through */
+ fallthrough;
case -ERESTARTNOINTR:
regs->uregs[0] = regs->orig_r0;
regs->ipc -= 4;
@@ -361,7 +361,7 @@ static void do_signal(struct pt_regs *regs)
switch (regs->uregs[0]) {
case -ERESTART_RESTARTBLOCK:
regs->uregs[15] = __NR_restart_syscall;
- /* Fall through */
+ fallthrough;
case -ERESTARTNOHAND:
case -ERESTARTSYS:
case -ERESTARTNOINTR:
diff --git a/arch/nds32/kernel/vmlinux.lds.S b/arch/nds32/kernel/vmlinux.lds.S
index 7a6c1cefe3fe..6a91b965fb1e 100644
--- a/arch/nds32/kernel/vmlinux.lds.S
+++ b/arch/nds32/kernel/vmlinux.lds.S
@@ -64,6 +64,7 @@ SECTIONS
STABS_DEBUG
DWARF_DEBUG
+ ELF_DETAILS
DISCARDS
}
diff --git a/arch/nios2/include/asm/checksum.h b/arch/nios2/include/asm/checksum.h
index b4316c361729..69004e07a1ba 100644
--- a/arch/nios2/include/asm/checksum.h
+++ b/arch/nios2/include/asm/checksum.h
@@ -12,10 +12,6 @@
/* Take these from lib/checksum.c */
extern __wsum csum_partial(const void *buff, int len, __wsum sum);
-__wsum csum_partial_copy_nocheck(const void *src, void *dst, int len,
- __wsum sum);
-#define csum_partial_copy_nocheck csum_partial_copy_nocheck
-
extern __sum16 ip_fast_csum(const void *iph, unsigned int ihl);
extern __sum16 ip_compute_csum(const void *buff, int len);
diff --git a/arch/nios2/kernel/process.c b/arch/nios2/kernel/process.c
index 88a4ec03edab..4ffe857e6ada 100644
--- a/arch/nios2/kernel/process.c
+++ b/arch/nios2/kernel/process.c
@@ -266,5 +266,5 @@ asmlinkage int nios2_clone(unsigned long clone_flags, unsigned long newsp,
.tls = tls,
};
- return _do_fork(&args);
+ return kernel_clone(&args);
}
diff --git a/arch/nios2/kernel/vmlinux.lds.S b/arch/nios2/kernel/vmlinux.lds.S
index c55a7cfa1075..126e114744cb 100644
--- a/arch/nios2/kernel/vmlinux.lds.S
+++ b/arch/nios2/kernel/vmlinux.lds.S
@@ -58,6 +58,7 @@ SECTIONS
STABS_DEBUG
DWARF_DEBUG
+ ELF_DETAILS
DISCARDS
}
diff --git a/arch/openrisc/include/asm/uaccess.h b/arch/openrisc/include/asm/uaccess.h
index f0390211236b..120f5005461b 100644
--- a/arch/openrisc/include/asm/uaccess.h
+++ b/arch/openrisc/include/asm/uaccess.h
@@ -165,19 +165,19 @@ struct __large_struct {
#define __get_user_nocheck(x, ptr, size) \
({ \
- long __gu_err, __gu_val; \
- __get_user_size(__gu_val, (ptr), (size), __gu_err); \
- (x) = (__force __typeof__(*(ptr)))__gu_val; \
+ long __gu_err; \
+ __get_user_size((x), (ptr), (size), __gu_err); \
__gu_err; \
})
#define __get_user_check(x, ptr, size) \
({ \
- long __gu_err = -EFAULT, __gu_val = 0; \
+ long __gu_err = -EFAULT; \
const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
- if (access_ok(__gu_addr, size)) \
- __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
- (x) = (__force __typeof__(*(ptr)))__gu_val; \
+ if (access_ok(__gu_addr, size)) \
+ __get_user_size((x), __gu_addr, (size), __gu_err); \
+ else \
+ (x) = (__typeof__(*(ptr))) 0; \
__gu_err; \
})
@@ -191,11 +191,13 @@ do { \
case 2: __get_user_asm(x, ptr, retval, "l.lhz"); break; \
case 4: __get_user_asm(x, ptr, retval, "l.lwz"); break; \
case 8: __get_user_asm2(x, ptr, retval); break; \
- default: (x) = __get_user_bad(); \
+ default: (x) = (__typeof__(*(ptr)))__get_user_bad(); \
} \
} while (0)
#define __get_user_asm(x, addr, err, op) \
+{ \
+ unsigned long __gu_tmp; \
__asm__ __volatile__( \
"1: "op" %1,0(%2)\n" \
"2:\n" \
@@ -209,10 +211,14 @@ do { \
" .align 2\n" \
" .long 1b,3b\n" \
".previous" \
- : "=r"(err), "=r"(x) \
- : "r"(addr), "i"(-EFAULT), "0"(err))
+ : "=r"(err), "=r"(__gu_tmp) \
+ : "r"(addr), "i"(-EFAULT), "0"(err)); \
+ (x) = (__typeof__(*(addr)))__gu_tmp; \
+}
#define __get_user_asm2(x, addr, err) \
+{ \
+ unsigned long long __gu_tmp; \
__asm__ __volatile__( \
"1: l.lwz %1,0(%2)\n" \
"2: l.lwz %H1,4(%2)\n" \
@@ -229,8 +235,11 @@ do { \
" .long 1b,4b\n" \
" .long 2b,4b\n" \
".previous" \
- : "=r"(err), "=&r"(x) \
- : "r"(addr), "i"(-EFAULT), "0"(err))
+ : "=r"(err), "=&r"(__gu_tmp) \
+ : "r"(addr), "i"(-EFAULT), "0"(err)); \
+ (x) = (__typeof__(*(addr)))( \
+ (__typeof__((x)-(x)))__gu_tmp); \
+}
/* more complex routines */
diff --git a/arch/openrisc/kernel/setup.c b/arch/openrisc/kernel/setup.c
index b18e775f8be3..2416a9f91533 100644
--- a/arch/openrisc/kernel/setup.c
+++ b/arch/openrisc/kernel/setup.c
@@ -48,17 +48,12 @@ static void __init setup_memory(void)
unsigned long ram_start_pfn;
unsigned long ram_end_pfn;
phys_addr_t memory_start, memory_end;
- struct memblock_region *region;
memory_end = memory_start = 0;
/* Find main memory where is the kernel, we assume its the only one */
- for_each_memblock(memory, region) {
- memory_start = region->base;
- memory_end = region->base + region->size;
- printk(KERN_INFO "%s: Memory: 0x%x-0x%x\n", __func__,
- memory_start, memory_end);
- }
+ memory_start = memblock_start_of_DRAM();
+ memory_end = memblock_end_of_DRAM();
if (!memory_end) {
panic("No memory!");
@@ -80,6 +75,16 @@ static void __init setup_memory(void)
*/
memblock_reserve(__pa(_stext), _end - _stext);
+#ifdef CONFIG_BLK_DEV_INITRD
+ /* Then reserve the initrd, if any */
+ if (initrd_start && (initrd_end > initrd_start)) {
+ unsigned long aligned_start = ALIGN_DOWN(initrd_start, PAGE_SIZE);
+ unsigned long aligned_end = ALIGN(initrd_end, PAGE_SIZE);
+
+ memblock_reserve(__pa(aligned_start), aligned_end - aligned_start);
+ }
+#endif /* CONFIG_BLK_DEV_INITRD */
+
early_init_fdt_reserve_self();
early_init_fdt_scan_reserved_mem();
diff --git a/arch/openrisc/kernel/signal.c b/arch/openrisc/kernel/signal.c
index 97804f21a40c..c779364f0cd0 100644
--- a/arch/openrisc/kernel/signal.c
+++ b/arch/openrisc/kernel/signal.c
@@ -244,7 +244,7 @@ int do_signal(struct pt_regs *regs, int syscall)
switch (retval) {
case -ERESTART_RESTARTBLOCK:
restart = -2;
- /* Fall through */
+ fallthrough;
case -ERESTARTNOHAND:
case -ERESTARTSYS:
case -ERESTARTNOINTR:
diff --git a/arch/openrisc/kernel/vmlinux.lds.S b/arch/openrisc/kernel/vmlinux.lds.S
index 22fbc5fb24b3..d5c7bb0fae57 100644
--- a/arch/openrisc/kernel/vmlinux.lds.S
+++ b/arch/openrisc/kernel/vmlinux.lds.S
@@ -103,6 +103,7 @@ SECTIONS
/* Throw in the debugging sections */
STABS_DEBUG
DWARF_DEBUG
+ ELF_DETAILS
/* Sections to be discarded -- must be last */
DISCARDS
diff --git a/arch/openrisc/mm/cache.c b/arch/openrisc/mm/cache.c
index 08f56af387ac..534a52ec5e66 100644
--- a/arch/openrisc/mm/cache.c
+++ b/arch/openrisc/mm/cache.c
@@ -16,7 +16,7 @@
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
-static void cache_loop(struct page *page, const unsigned int reg)
+static __always_inline void cache_loop(struct page *page, const unsigned int reg)
{
unsigned long paddr = page_to_pfn(page) << PAGE_SHIFT;
unsigned long line = paddr & ~(L1_CACHE_BYTES - 1);
diff --git a/arch/openrisc/mm/init.c b/arch/openrisc/mm/init.c
index 3d7c79c7745d..8348feaaf46e 100644
--- a/arch/openrisc/mm/init.c
+++ b/arch/openrisc/mm/init.c
@@ -64,6 +64,7 @@ extern const char _s_kernel_ro[], _e_kernel_ro[];
*/
static void __init map_ram(void)
{
+ phys_addr_t start, end;
unsigned long v, p, e;
pgprot_t prot;
pgd_t *pge;
@@ -71,6 +72,7 @@ static void __init map_ram(void)
pud_t *pue;
pmd_t *pme;
pte_t *pte;
+ u64 i;
/* These mark extents of read-only kernel pages...
* ...from vmlinux.lds.S
*/
@@ -78,9 +80,9 @@ static void __init map_ram(void)
v = PAGE_OFFSET;
- for_each_memblock(memory, region) {
- p = (u32) region->base & PAGE_MASK;
- e = p + (u32) region->size;
+ for_each_mem_range(i, &start, &end) {
+ p = (u32) start & PAGE_MASK;
+ e = (u32) end;
v = (u32) __va(p);
pge = pgd_offset_k(v);
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index 3b0f53dd70bc..cd4afe1e7a6c 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -378,19 +378,3 @@ endmenu
source "drivers/parisc/Kconfig"
-
-config SECCOMP
- def_bool y
- prompt "Enable seccomp to safely compute untrusted bytecode"
- help
- This kernel feature is useful for number crunching applications
- that may need to compute untrusted bytecode during their
- execution. By using pipes or other transports made available to
- the process as file descriptors supporting the read/write
- syscalls, it's possible to isolate those applications in
- their own address space using seccomp. Once seccomp is
- enabled via prctl(PR_SET_SECCOMP), it cannot be disabled
- and the task is only allowed to execute a few safe syscalls
- defined by each seccomp mode.
-
- If unsure, say Y. Only embedded should say N here.
diff --git a/arch/parisc/boot/compressed/vmlinux.lds.S b/arch/parisc/boot/compressed/vmlinux.lds.S
index 2ac3a643f2eb..ab7b43990857 100644
--- a/arch/parisc/boot/compressed/vmlinux.lds.S
+++ b/arch/parisc/boot/compressed/vmlinux.lds.S
@@ -84,6 +84,7 @@ SECTIONS
}
STABS_DEBUG
+ ELF_DETAILS
.note 0 : { *(.note) }
/* Sections to be discarded */
diff --git a/arch/parisc/include/asm/checksum.h b/arch/parisc/include/asm/checksum.h
index fe8c63b2d2c3..3c43baca7b39 100644
--- a/arch/parisc/include/asm/checksum.h
+++ b/arch/parisc/include/asm/checksum.h
@@ -19,14 +19,6 @@
extern __wsum csum_partial(const void *, int, __wsum);
/*
- * The same as csum_partial, but copies from src while it checksums.
- *
- * Here even more important to align src and dst on a 32-bit (or even
- * better 64-bit) boundary
- */
-extern __wsum csum_partial_copy_nocheck(const void *, void *, int, __wsum);
-
-/*
* Optimized for IP headers, which always checksum on 4 octet boundaries.
*
* Written by Randolph Chung <tausq@debian.org>, and then mucked with by
@@ -181,25 +173,5 @@ static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
return csum_fold(sum);
}
-/*
- * Copy and checksum to user
- */
-#define HAVE_CSUM_COPY_USER
-static __inline__ __wsum csum_and_copy_to_user(const void *src,
- void __user *dst,
- int len, __wsum sum,
- int *err_ptr)
-{
- /* code stolen from include/asm-mips64 */
- sum = csum_partial(src, len, sum);
-
- if (copy_to_user(dst, src, len)) {
- *err_ptr = -EFAULT;
- return (__force __wsum)-1;
- }
-
- return sum;
-}
-
#endif
diff --git a/arch/parisc/include/asm/compat.h b/arch/parisc/include/asm/compat.h
index 2f4f66a3bac0..8f33085ff1bd 100644
--- a/arch/parisc/include/asm/compat.h
+++ b/arch/parisc/include/asm/compat.h
@@ -22,8 +22,6 @@ typedef u32 compat_dev_t;
typedef u16 compat_nlink_t;
typedef u16 compat_ipc_pid_t;
typedef u32 compat_caddr_t;
-typedef s64 compat_s64;
-typedef u64 compat_u64;
struct compat_stat {
compat_dev_t st_dev; /* dev_t is 32 bits on parisc */
diff --git a/arch/parisc/kernel/kprobes.c b/arch/parisc/kernel/kprobes.c
index 77ec51818916..6d21a515eea5 100644
--- a/arch/parisc/kernel/kprobes.c
+++ b/arch/parisc/kernel/kprobes.c
@@ -191,80 +191,11 @@ static struct kprobe trampoline_p = {
static int __kprobes trampoline_probe_handler(struct kprobe *p,
struct pt_regs *regs)
{
- struct kretprobe_instance *ri = NULL;
- struct hlist_head *head, empty_rp;
- struct hlist_node *tmp;
- unsigned long flags, orig_ret_address = 0;
- unsigned long trampoline_address = (unsigned long)trampoline_p.addr;
- kprobe_opcode_t *correct_ret_addr = NULL;
-
- INIT_HLIST_HEAD(&empty_rp);
- kretprobe_hash_lock(current, &head, &flags);
-
- /*
- * It is possible to have multiple instances associated with a given
- * task either because multiple functions in the call path have
- * a return probe installed on them, and/or more than one return
- * probe was registered for a target function.
- *
- * We can handle this because:
- * - instances are always inserted at the head of the list
- * - when multiple return probes are registered for the same
- * function, the first instance's ret_addr will point to the
- * real return address, and all the rest will point to
- * kretprobe_trampoline
- */
- hlist_for_each_entry_safe(ri, tmp, head, hlist) {
- if (ri->task != current)
- /* another task is sharing our hash bucket */
- continue;
-
- orig_ret_address = (unsigned long)ri->ret_addr;
-
- if (orig_ret_address != trampoline_address)
- /*
- * This is the real return address. Any other
- * instances associated with this task are for
- * other calls deeper on the call stack
- */
- break;
- }
-
- kretprobe_assert(ri, orig_ret_address, trampoline_address);
-
- correct_ret_addr = ri->ret_addr;
- hlist_for_each_entry_safe(ri, tmp, head, hlist) {
- if (ri->task != current)
- /* another task is sharing our hash bucket */
- continue;
-
- orig_ret_address = (unsigned long)ri->ret_addr;
- if (ri->rp && ri->rp->handler) {
- __this_cpu_write(current_kprobe, &ri->rp->kp);
- get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE;
- ri->ret_addr = correct_ret_addr;
- ri->rp->handler(ri, regs);
- __this_cpu_write(current_kprobe, NULL);
- }
-
- recycle_rp_inst(ri, &empty_rp);
-
- if (orig_ret_address != trampoline_address)
- /*
- * This is the real return address. Any other
- * instances associated with this task are for
- * other calls deeper on the call stack
- */
- break;
- }
+ unsigned long orig_ret_address;
- kretprobe_hash_unlock(current, &flags);
-
- hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
- hlist_del(&ri->hlist);
- kfree(ri);
- }
+ orig_ret_address = __kretprobe_trampoline_handler(regs, trampoline_p.addr, NULL);
instruction_pointer_set(regs, orig_ret_address);
+
return 1;
}
@@ -272,6 +203,7 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
struct pt_regs *regs)
{
ri->ret_addr = (kprobe_opcode_t *)regs->gr[2];
+ ri->fp = NULL;
/* Replace the return addr with trampoline addr. */
regs->gr[2] = (unsigned long)trampoline_p.addr;
diff --git a/arch/parisc/kernel/signal.c b/arch/parisc/kernel/signal.c
index 5df5d4cd5d4c..3c037fc96038 100644
--- a/arch/parisc/kernel/signal.c
+++ b/arch/parisc/kernel/signal.c
@@ -502,7 +502,7 @@ syscall_restart(struct pt_regs *regs, struct k_sigaction *ka)
regs->gr[28] = -EINTR;
break;
}
- /* fallthrough */
+ fallthrough;
case -ERESTARTNOINTR:
check_syscallno_in_delay_branch(regs);
break;
diff --git a/arch/parisc/kernel/syscalls/syscall.tbl b/arch/parisc/kernel/syscalls/syscall.tbl
index def64d221cd4..ae3dab371f6f 100644
--- a/arch/parisc/kernel/syscalls/syscall.tbl
+++ b/arch/parisc/kernel/syscalls/syscall.tbl
@@ -29,7 +29,7 @@
18 common stat sys_newstat compat_sys_newstat
19 common lseek sys_lseek compat_sys_lseek
20 common getpid sys_getpid
-21 common mount sys_mount compat_sys_mount
+21 common mount sys_mount
22 common bind sys_bind
23 common setuid sys_setuid
24 common getuid sys_getuid
@@ -159,8 +159,8 @@
142 common _newselect sys_select compat_sys_select
143 common flock sys_flock
144 common msync sys_msync
-145 common readv sys_readv compat_sys_readv
-146 common writev sys_writev compat_sys_writev
+145 common readv sys_readv
+146 common writev sys_writev
147 common getsid sys_getsid
148 common fdatasync sys_fdatasync
149 common _sysctl sys_ni_syscall
@@ -330,7 +330,7 @@
292 32 sync_file_range parisc_sync_file_range
292 64 sync_file_range sys_sync_file_range
293 common tee sys_tee
-294 common vmsplice sys_vmsplice compat_sys_vmsplice
+294 common vmsplice sys_vmsplice
295 common move_pages sys_move_pages compat_sys_move_pages
296 common getcpu sys_getcpu
297 common epoll_pwait sys_epoll_pwait compat_sys_epoll_pwait
@@ -372,8 +372,8 @@
327 common syncfs sys_syncfs
328 common setns sys_setns
329 common sendmmsg sys_sendmmsg compat_sys_sendmmsg
-330 common process_vm_readv sys_process_vm_readv compat_sys_process_vm_readv
-331 common process_vm_writev sys_process_vm_writev compat_sys_process_vm_writev
+330 common process_vm_readv sys_process_vm_readv
+331 common process_vm_writev sys_process_vm_writev
332 common kcmp sys_kcmp
333 common finit_module sys_finit_module
334 common sched_setattr sys_sched_setattr
diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
index 43875c289723..a52c7abf2ca4 100644
--- a/arch/parisc/kernel/traps.c
+++ b/arch/parisc/kernel/traps.c
@@ -437,7 +437,6 @@ void parisc_terminate(char *msg, struct pt_regs *regs, int code, unsigned long o
break;
default:
- /* Fall through */
break;
}
@@ -644,12 +643,12 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
case 15:
/* Data TLB miss fault/Data page fault */
- /* Fall through */
+ fallthrough;
case 16:
/* Non-access instruction TLB miss fault */
/* The instruction TLB entry needed for the target address of the FIC
is absent, and hardware can't find it, so we get to cleanup */
- /* Fall through */
+ fallthrough;
case 17:
/* Non-access data TLB miss fault/Non-access data page fault */
/* FIXME:
@@ -673,7 +672,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
handle_unaligned(regs);
return;
}
- /* Fall Through */
+ fallthrough;
case 26:
/* PCXL: Data memory access rights trap */
fault_address = regs->ior;
@@ -683,7 +682,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
case 19:
/* Data memory break trap */
regs->gr[0] |= PSW_X; /* So we can single-step over the trap */
- /* fall thru */
+ fallthrough;
case 21:
/* Page reference trap */
handle_gdb_break(regs, TRAP_HWBKPT);
@@ -730,7 +729,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
}
mmap_read_unlock(current->mm);
}
- /* Fall Through */
+ fallthrough;
case 27:
/* Data memory protection ID trap */
if (code == 27 && !user_mode(regs) &&
diff --git a/arch/parisc/kernel/vmlinux.lds.S b/arch/parisc/kernel/vmlinux.lds.S
index 53e29d88f99c..2769eb991f58 100644
--- a/arch/parisc/kernel/vmlinux.lds.S
+++ b/arch/parisc/kernel/vmlinux.lds.S
@@ -164,6 +164,7 @@ SECTIONS
_end = . ;
STABS_DEBUG
+ ELF_DETAILS
.note 0 : { *(.note) }
/* Sections to be discarded */
diff --git a/arch/parisc/lib/checksum.c b/arch/parisc/lib/checksum.c
index c6f161583549..4818f3db84a5 100644
--- a/arch/parisc/lib/checksum.c
+++ b/arch/parisc/lib/checksum.c
@@ -106,20 +106,3 @@ __wsum csum_partial(const void *buff, int len, __wsum sum)
}
EXPORT_SYMBOL(csum_partial);
-
-/*
- * copy while checksumming, otherwise like csum_partial
- */
-__wsum csum_partial_copy_nocheck(const void *src, void *dst,
- int len, __wsum sum)
-{
- /*
- * It's 2:30 am and I don't feel like doing it real ...
- * This is lots slower than the real thing (tm)
- */
- sum = csum_partial(src, len, sum);
- memcpy(dst, src, len);
-
- return sum;
-}
-EXPORT_SYMBOL(csum_partial_copy_nocheck);
diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
index 4bfe2da9fbe3..716960f5d92e 100644
--- a/arch/parisc/mm/fault.c
+++ b/arch/parisc/mm/fault.c
@@ -67,7 +67,7 @@ parisc_acctyp(unsigned long code, unsigned int inst)
case 0x30000000: /* coproc2 */
if (bit22set(inst))
return VM_WRITE;
- /* fall through */
+ fallthrough;
case 0x0: /* indexed/memory management */
if (bit22set(inst)) {
@@ -370,7 +370,7 @@ bad_area:
}
/* probably address is outside of mapped file */
- /* fall through */
+ fallthrough;
case 17: /* NA data TLB miss / page fault */
case 18: /* Unaligned access - PCXS only */
signo = SIGBUS;
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 1f48bbfb3ce9..1f0bd7e223f5 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -116,7 +116,6 @@ config PPC
#
select ARCH_32BIT_OFF_T if PPC32
select ARCH_HAS_DEBUG_VIRTUAL
- select ARCH_HAS_DEBUG_VM_PGTABLE
select ARCH_HAS_DEVMEM_IS_ALLOWED
select ARCH_HAS_ELF_RANDOMIZE
select ARCH_HAS_FORTIFY_SOURCE
@@ -136,7 +135,7 @@ config PPC
select ARCH_HAS_STRICT_KERNEL_RWX if (PPC32 && !HIBERNATION)
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
select ARCH_HAS_UACCESS_FLUSHCACHE
- select ARCH_HAS_UACCESS_MCSAFE if PPC64
+ select ARCH_HAS_COPY_MC if PPC64
select ARCH_HAS_UBSAN_SANITIZE_ALL
select ARCH_HAVE_NMI_SAFE_CMPXCHG
select ARCH_KEEP_MEMBLOCK
@@ -246,6 +245,7 @@ config PPC
select OLD_SIGACTION if PPC32
select OLD_SIGSUSPEND
select PCI_DOMAINS if PCI
+ select PCI_MSI_ARCH_FALLBACKS if PCI_MSI
select PCI_SYSCALL if PCI
select PPC_DAWR if PPC64
select RTC_LIB
@@ -860,6 +860,18 @@ config PPC_SUBPAGE_PROT
If unsure, say N here.
+config PPC_PROT_SAO_LPAR
+ bool "Support PROT_SAO mappings in LPARs"
+ depends on PPC_BOOK3S_64
+ help
+ This option adds support for PROT_SAO mappings from userspace
+ inside LPARs on supported CPUs.
+
+ This may cause issues when performing guest migration from
+ a CPU that supports SAO to one that does not.
+
+ If unsure, say N here.
+
config PPC_COPRO_BASE
bool
@@ -934,23 +946,6 @@ config ARCH_WANTS_FREEZER_CONTROL
source "kernel/power/Kconfig"
-config SECCOMP
- bool "Enable seccomp to safely compute untrusted bytecode"
- depends on PROC_FS
- default y
- help
- This kernel feature is useful for number crunching applications
- that may need to compute untrusted bytecode during their
- execution. By using pipes or other transports made available to
- the process as file descriptors supporting the read/write
- syscalls, it's possible to isolate those applications in
- their own address space using seccomp. Once seccomp is
- enabled via /proc/<pid>/seccomp, it cannot be disabled
- and the task is only allowed to execute a few safe syscalls
- defined by each seccomp mode.
-
- If unsure, say Y. Only embedded should say N here.
-
config PPC_MEM_KEYS
prompt "PowerPC Memory Protection Keys"
def_bool y
diff --git a/arch/powerpc/configs/pasemi_defconfig b/arch/powerpc/configs/pasemi_defconfig
index af9af03059e4..15ed8d0aa014 100644
--- a/arch/powerpc/configs/pasemi_defconfig
+++ b/arch/powerpc/configs/pasemi_defconfig
@@ -108,7 +108,6 @@ CONFIG_FB_NVIDIA=y
CONFIG_FB_NVIDIA_I2C=y
CONFIG_FB_RADEON=y
# CONFIG_LCD_CLASS_DEVICE is not set
-CONFIG_VGACON_SOFT_SCROLLBACK=y
CONFIG_LOGO=y
CONFIG_SOUND=y
CONFIG_SND=y
diff --git a/arch/powerpc/configs/ppc6xx_defconfig b/arch/powerpc/configs/ppc6xx_defconfig
index 5e6f92ba3210..66e9a0fd64ff 100644
--- a/arch/powerpc/configs/ppc6xx_defconfig
+++ b/arch/powerpc/configs/ppc6xx_defconfig
@@ -743,7 +743,6 @@ CONFIG_FB_TRIDENT=m
CONFIG_FB_SM501=m
CONFIG_FB_IBM_GXT4500=y
CONFIG_LCD_PLATFORM=m
-CONFIG_VGACON_SOFT_SCROLLBACK=y
CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y
CONFIG_LOGO=y
diff --git a/arch/powerpc/crypto/crc-vpmsum_test.c b/arch/powerpc/crypto/crc-vpmsum_test.c
index dce86e75f1a8..c1c1ef9457fb 100644
--- a/arch/powerpc/crypto/crc-vpmsum_test.c
+++ b/arch/powerpc/crypto/crc-vpmsum_test.c
@@ -9,6 +9,7 @@
#include <crypto/internal/hash.h>
#include <linux/init.h>
#include <linux/module.h>
+#include <linux/random.h>
#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/cpufeature.h>
@@ -22,10 +23,11 @@ static unsigned long iterations = 10000;
static int __init crc_test_init(void)
{
u16 crc16 = 0, verify16 = 0;
- u32 crc32 = 0, verify32 = 0;
__le32 verify32le = 0;
unsigned char *data;
+ u32 verify32 = 0;
unsigned long i;
+ __le32 crc32;
int ret;
struct crypto_shash *crct10dif_tfm;
@@ -98,7 +100,7 @@ static int __init crc_test_init(void)
crypto_shash_final(crc32c_shash, (u8 *)(&crc32));
verify32 = le32_to_cpu(verify32le);
verify32le = ~cpu_to_le32(__crc32c_le(~verify32, data+offset, len));
- if (crc32 != (u32)verify32le) {
+ if (crc32 != verify32le) {
pr_err("FAILURE in CRC32: got 0x%08x expected 0x%08x (len %lu)\n",
crc32, verify32, len);
break;
diff --git a/arch/powerpc/include/asm/book3s/64/mmu.h b/arch/powerpc/include/asm/book3s/64/mmu.h
index 55442d45c597..b392384a3b15 100644
--- a/arch/powerpc/include/asm/book3s/64/mmu.h
+++ b/arch/powerpc/include/asm/book3s/64/mmu.h
@@ -239,14 +239,14 @@ static inline void early_init_mmu_secondary(void)
extern void hash__setup_initial_memory_limit(phys_addr_t first_memblock_base,
phys_addr_t first_memblock_size);
-extern void radix__setup_initial_memory_limit(phys_addr_t first_memblock_base,
- phys_addr_t first_memblock_size);
static inline void setup_initial_memory_limit(phys_addr_t first_memblock_base,
phys_addr_t first_memblock_size)
{
- if (early_radix_enabled())
- return radix__setup_initial_memory_limit(first_memblock_base,
- first_memblock_size);
+ /*
+ * Hash has more strict restrictions. At this point we don't
+ * know which translations we will pick. Hence go with hash
+ * restrictions.
+ */
return hash__setup_initial_memory_limit(first_memblock_base,
first_memblock_size);
}
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
index 6de56c3b33c4..495fc0ccb453 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -20,13 +20,9 @@
#define _PAGE_RW (_PAGE_READ | _PAGE_WRITE)
#define _PAGE_RWX (_PAGE_READ | _PAGE_WRITE | _PAGE_EXEC)
#define _PAGE_PRIVILEGED 0x00008 /* kernel access only */
-
-#define _PAGE_CACHE_CTL 0x00030 /* Bits for the folowing cache modes */
- /* No bits set is normal cacheable memory */
- /* 0x00010 unused, is SAO bit on radix POWER9 */
+#define _PAGE_SAO 0x00010 /* Strong access order */
#define _PAGE_NON_IDEMPOTENT 0x00020 /* non idempotent memory */
#define _PAGE_TOLERANT 0x00030 /* tolerant memory, cache inhibited */
-
#define _PAGE_DIRTY 0x00080 /* C: page changed */
#define _PAGE_ACCESSED 0x00100 /* R: page referenced */
/*
@@ -828,6 +824,8 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
return hash__set_pte_at(mm, addr, ptep, pte, percpu);
}
+#define _PAGE_CACHE_CTL (_PAGE_SAO | _PAGE_NON_IDEMPOTENT | _PAGE_TOLERANT)
+
#define pgprot_noncached pgprot_noncached
static inline pgprot_t pgprot_noncached(pgprot_t prot)
{
diff --git a/arch/powerpc/include/asm/checksum.h b/arch/powerpc/include/asm/checksum.h
index 9cce06194dcc..82f099ba2411 100644
--- a/arch/powerpc/include/asm/checksum.h
+++ b/arch/powerpc/include/asm/checksum.h
@@ -18,19 +18,18 @@
* Like csum_partial, this must be called with even lengths,
* except for the last fragment.
*/
-extern __wsum csum_partial_copy_generic(const void *src, void *dst,
- int len, __wsum sum,
- int *src_err, int *dst_err);
+extern __wsum csum_partial_copy_generic(const void *src, void *dst, int len);
#define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER
extern __wsum csum_and_copy_from_user(const void __user *src, void *dst,
- int len, __wsum sum, int *err_ptr);
+ int len);
#define HAVE_CSUM_COPY_USER
extern __wsum csum_and_copy_to_user(const void *src, void __user *dst,
- int len, __wsum sum, int *err_ptr);
+ int len);
-#define csum_partial_copy_nocheck(src, dst, len, sum) \
- csum_partial_copy_generic((src), (dst), (len), (sum), NULL, NULL)
+#define _HAVE_ARCH_CSUM_AND_COPY
+#define csum_partial_copy_nocheck(src, dst, len) \
+ csum_partial_copy_generic((src), (dst), (len))
/*
diff --git a/arch/powerpc/include/asm/compat.h b/arch/powerpc/include/asm/compat.h
index 3e3cdfaa76c6..9191fc29e6ed 100644
--- a/arch/powerpc/include/asm/compat.h
+++ b/arch/powerpc/include/asm/compat.h
@@ -27,8 +27,6 @@ typedef s16 compat_nlink_t;
typedef u16 compat_ipc_pid_t;
typedef u32 compat_caddr_t;
typedef __kernel_fsid_t compat_fsid_t;
-typedef s64 compat_s64;
-typedef u64 compat_u64;
struct compat_stat {
compat_dev_t st_dev;
diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h
index fdddb822d564..32a15dc49e8c 100644
--- a/arch/powerpc/include/asm/cputable.h
+++ b/arch/powerpc/include/asm/cputable.h
@@ -9,6 +9,11 @@
#ifndef __ASSEMBLY__
+/*
+ * Added to include __machine_check_early_realmode_* functions
+ */
+#include <asm/mce.h>
+
/* This structure can grow, it's real size is used by head.S code
* via the mkdefs mechanism.
*/
@@ -191,7 +196,7 @@ static inline void cpu_feature_keys_init(void) { }
#define CPU_FTR_SPURR LONG_ASM_CONST(0x0000000001000000)
#define CPU_FTR_DSCR LONG_ASM_CONST(0x0000000002000000)
#define CPU_FTR_VSX LONG_ASM_CONST(0x0000000004000000)
-// Free LONG_ASM_CONST(0x0000000008000000)
+#define CPU_FTR_SAO LONG_ASM_CONST(0x0000000008000000)
#define CPU_FTR_CP_USE_DCBTZ LONG_ASM_CONST(0x0000000010000000)
#define CPU_FTR_UNALIGNED_LD_STD LONG_ASM_CONST(0x0000000020000000)
#define CPU_FTR_ASYM_SMT LONG_ASM_CONST(0x0000000040000000)
@@ -436,7 +441,7 @@ static inline void cpu_feature_keys_init(void) { }
CPU_FTR_MMCRA | CPU_FTR_SMT | \
CPU_FTR_COHERENT_ICACHE | \
CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \
- CPU_FTR_DSCR | CPU_FTR_ASYM_SMT | \
+ CPU_FTR_DSCR | CPU_FTR_SAO | CPU_FTR_ASYM_SMT | \
CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \
CPU_FTR_CFAR | CPU_FTR_HVMODE | \
CPU_FTR_VMX_COPY | CPU_FTR_HAS_PPR | CPU_FTR_DABRX )
@@ -445,7 +450,7 @@ static inline void cpu_feature_keys_init(void) { }
CPU_FTR_MMCRA | CPU_FTR_SMT | \
CPU_FTR_COHERENT_ICACHE | \
CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \
- CPU_FTR_DSCR | \
+ CPU_FTR_DSCR | CPU_FTR_SAO | \
CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \
CPU_FTR_CFAR | CPU_FTR_HVMODE | CPU_FTR_VMX_COPY | \
CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_DAWR | \
@@ -456,7 +461,7 @@ static inline void cpu_feature_keys_init(void) { }
CPU_FTR_MMCRA | CPU_FTR_SMT | \
CPU_FTR_COHERENT_ICACHE | \
CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \
- CPU_FTR_DSCR | \
+ CPU_FTR_DSCR | CPU_FTR_SAO | \
CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \
CPU_FTR_CFAR | CPU_FTR_HVMODE | CPU_FTR_VMX_COPY | \
CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_ARCH_207S | \
@@ -474,7 +479,7 @@ static inline void cpu_feature_keys_init(void) { }
CPU_FTR_MMCRA | CPU_FTR_SMT | \
CPU_FTR_COHERENT_ICACHE | \
CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \
- CPU_FTR_DSCR | \
+ CPU_FTR_DSCR | CPU_FTR_SAO | \
CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \
CPU_FTR_CFAR | CPU_FTR_HVMODE | CPU_FTR_VMX_COPY | \
CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_ARCH_207S | \
diff --git a/arch/powerpc/include/asm/fixmap.h b/arch/powerpc/include/asm/fixmap.h
index 925cf89cbf4b..6bfc87915d5d 100644
--- a/arch/powerpc/include/asm/fixmap.h
+++ b/arch/powerpc/include/asm/fixmap.h
@@ -52,7 +52,7 @@ enum fixed_addresses {
FIX_HOLE,
/* reserve the top 128K for early debugging purposes */
FIX_EARLY_DEBUG_TOP = FIX_HOLE,
- FIX_EARLY_DEBUG_BASE = FIX_EARLY_DEBUG_TOP+(ALIGN(SZ_128, PAGE_SIZE)/PAGE_SIZE)-1,
+ FIX_EARLY_DEBUG_BASE = FIX_EARLY_DEBUG_TOP+(ALIGN(SZ_128K, PAGE_SIZE)/PAGE_SIZE)-1,
#ifdef CONFIG_HIGHMEM
FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */
FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1,
diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h
index 3a0db7b0b46e..35060be09073 100644
--- a/arch/powerpc/include/asm/hw_irq.h
+++ b/arch/powerpc/include/asm/hw_irq.h
@@ -200,17 +200,14 @@ static inline bool arch_irqs_disabled(void)
#define powerpc_local_irq_pmu_save(flags) \
do { \
raw_local_irq_pmu_save(flags); \
- trace_hardirqs_off(); \
+ if (!raw_irqs_disabled_flags(flags)) \
+ trace_hardirqs_off(); \
} while(0)
#define powerpc_local_irq_pmu_restore(flags) \
do { \
- if (raw_irqs_disabled_flags(flags)) { \
- raw_local_irq_pmu_restore(flags); \
- trace_hardirqs_off(); \
- } else { \
+ if (!raw_irqs_disabled_flags(flags)) \
trace_hardirqs_on(); \
- raw_local_irq_pmu_restore(flags); \
- } \
+ raw_local_irq_pmu_restore(flags); \
} while(0)
#else
#define powerpc_local_irq_pmu_save(flags) \
diff --git a/arch/powerpc/include/asm/kasan.h b/arch/powerpc/include/asm/kasan.h
index d635b96c7ea6..7355ed05e65e 100644
--- a/arch/powerpc/include/asm/kasan.h
+++ b/arch/powerpc/include/asm/kasan.h
@@ -15,11 +15,18 @@
#ifndef __ASSEMBLY__
#include <asm/page.h>
+#include <linux/sizes.h>
#define KASAN_SHADOW_SCALE_SHIFT 3
+#if defined(CONFIG_PPC_BOOK3S_32) && defined(CONFIG_MODULES) && defined(CONFIG_STRICT_KERNEL_RWX)
+#define KASAN_KERN_START ALIGN_DOWN(PAGE_OFFSET - SZ_256M, SZ_256M)
+#else
+#define KASAN_KERN_START PAGE_OFFSET
+#endif
+
#define KASAN_SHADOW_START (KASAN_SHADOW_OFFSET + \
- (PAGE_OFFSET >> KASAN_SHADOW_SCALE_SHIFT))
+ (KASAN_KERN_START >> KASAN_SHADOW_SCALE_SHIFT))
#define KASAN_SHADOW_OFFSET ASM_CONST(CONFIG_KASAN_SHADOW_OFFSET)
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index e020d269416d..10ded83414de 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -58,7 +58,8 @@
#define KVM_ARCH_WANT_MMU_NOTIFIER
extern int kvm_unmap_hva_range(struct kvm *kvm,
- unsigned long start, unsigned long end);
+ unsigned long start, unsigned long end,
+ unsigned flags);
extern int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
extern int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
extern int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
diff --git a/arch/powerpc/include/asm/mce.h b/arch/powerpc/include/asm/mce.h
index adf2cda67f9a..89aa8248a57d 100644
--- a/arch/powerpc/include/asm/mce.h
+++ b/arch/powerpc/include/asm/mce.h
@@ -210,6 +210,9 @@ struct mce_error_info {
#define MCE_EVENT_RELEASE true
#define MCE_EVENT_DONTRELEASE false
+struct pt_regs;
+struct notifier_block;
+
extern void save_mce_event(struct pt_regs *regs, long handled,
struct mce_error_info *mce_err, uint64_t nip,
uint64_t addr, uint64_t phys_addr);
@@ -225,5 +228,9 @@ int mce_register_notifier(struct notifier_block *nb);
int mce_unregister_notifier(struct notifier_block *nb);
#ifdef CONFIG_PPC_BOOK3S_64
void flush_and_reload_slb(void);
+long __machine_check_early_realmode_p7(struct pt_regs *regs);
+long __machine_check_early_realmode_p8(struct pt_regs *regs);
+long __machine_check_early_realmode_p9(struct pt_regs *regs);
+long __machine_check_early_realmode_p10(struct pt_regs *regs);
#endif /* CONFIG_PPC_BOOK3S_64 */
#endif /* __ASM_PPC64_MCE_H__ */
diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h
index 7c07728af300..7cb6d18f5cd6 100644
--- a/arch/powerpc/include/asm/mman.h
+++ b/arch/powerpc/include/asm/mman.h
@@ -13,20 +13,43 @@
#include <linux/pkeys.h>
#include <asm/cpu_has_feature.h>
-#ifdef CONFIG_PPC_MEM_KEYS
static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot,
unsigned long pkey)
{
- return pkey_to_vmflag_bits(pkey);
+#ifdef CONFIG_PPC_MEM_KEYS
+ return (((prot & PROT_SAO) ? VM_SAO : 0) | pkey_to_vmflag_bits(pkey));
+#else
+ return ((prot & PROT_SAO) ? VM_SAO : 0);
+#endif
}
#define arch_calc_vm_prot_bits(prot, pkey) arch_calc_vm_prot_bits(prot, pkey)
static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
{
- return __pgprot(vmflag_to_pte_pkey_bits(vm_flags));
+#ifdef CONFIG_PPC_MEM_KEYS
+ return (vm_flags & VM_SAO) ?
+ __pgprot(_PAGE_SAO | vmflag_to_pte_pkey_bits(vm_flags)) :
+ __pgprot(0 | vmflag_to_pte_pkey_bits(vm_flags));
+#else
+ return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
+#endif
}
#define arch_vm_get_page_prot(vm_flags) arch_vm_get_page_prot(vm_flags)
-#endif
+
+static inline bool arch_validate_prot(unsigned long prot, unsigned long addr)
+{
+ if (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC | PROT_SEM | PROT_SAO))
+ return false;
+ if (prot & PROT_SAO) {
+ if (!cpu_has_feature(CPU_FTR_SAO))
+ return false;
+ if (firmware_has_feature(FW_FEATURE_LPAR) &&
+ !IS_ENABLED(CONFIG_PPC_PROT_SAO_LPAR))
+ return false;
+ }
+ return true;
+}
+#define arch_validate_prot arch_validate_prot
#endif /* CONFIG_PPC64 */
#endif /* _ASM_POWERPC_MMAN_H */
diff --git a/arch/powerpc/include/asm/nohash/64/pgtable.h b/arch/powerpc/include/asm/nohash/64/pgtable.h
index 59ee9fa4ae09..6cb8aa357191 100644
--- a/arch/powerpc/include/asm/nohash/64/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/64/pgtable.h
@@ -82,6 +82,8 @@
*/
#include <asm/nohash/pte-book3e.h>
+#define _PAGE_SAO 0
+
#define PTE_RPN_MASK (~((1UL << PTE_RPN_SHIFT) - 1))
/*
diff --git a/arch/powerpc/include/asm/perf_event.h b/arch/powerpc/include/asm/perf_event.h
index 1e8b2e1ec1db..daec64d41b44 100644
--- a/arch/powerpc/include/asm/perf_event.h
+++ b/arch/powerpc/include/asm/perf_event.h
@@ -40,4 +40,7 @@ static inline bool is_sier_available(void) { return false; }
/* To support perf_regs sier update */
extern bool is_sier_available(void);
+/* To define perf extended regs mask value */
+extern u64 PERF_REG_EXTENDED_MASK;
+#define PERF_REG_EXTENDED_MASK PERF_REG_EXTENDED_MASK
#endif
diff --git a/arch/powerpc/include/asm/perf_event_server.h b/arch/powerpc/include/asm/perf_event_server.h
index 86c9eb064b22..f6acabb6c9be 100644
--- a/arch/powerpc/include/asm/perf_event_server.h
+++ b/arch/powerpc/include/asm/perf_event_server.h
@@ -62,6 +62,11 @@ struct power_pmu {
int *blacklist_ev;
/* BHRB entries in the PMU */
int bhrb_nr;
+ /*
+ * set this flag with `PERF_PMU_CAP_EXTENDED_REGS` if
+ * the pmu supports extended perf regs capability
+ */
+ int capabilities;
};
/*
diff --git a/arch/powerpc/include/asm/string.h b/arch/powerpc/include/asm/string.h
index 283552cd0e58..2aa0e31e6884 100644
--- a/arch/powerpc/include/asm/string.h
+++ b/arch/powerpc/include/asm/string.h
@@ -53,9 +53,7 @@ void *__memmove(void *to, const void *from, __kernel_size_t n);
#ifndef CONFIG_KASAN
#define __HAVE_ARCH_MEMSET32
#define __HAVE_ARCH_MEMSET64
-#define __HAVE_ARCH_MEMCPY_MCSAFE
-extern int memcpy_mcsafe(void *dst, const void *src, __kernel_size_t sz);
extern void *__memset16(uint16_t *, uint16_t v, __kernel_size_t);
extern void *__memset32(uint32_t *, uint32_t v, __kernel_size_t);
extern void *__memset64(uint64_t *, uint64_t v, __kernel_size_t);
diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
index 00699903f1ef..20a35373cafc 100644
--- a/arch/powerpc/include/asm/uaccess.h
+++ b/arch/powerpc/include/asm/uaccess.h
@@ -435,6 +435,32 @@ do { \
extern unsigned long __copy_tofrom_user(void __user *to,
const void __user *from, unsigned long size);
+#ifdef CONFIG_ARCH_HAS_COPY_MC
+unsigned long __must_check
+copy_mc_generic(void *to, const void *from, unsigned long size);
+
+static inline unsigned long __must_check
+copy_mc_to_kernel(void *to, const void *from, unsigned long size)
+{
+ return copy_mc_generic(to, from, size);
+}
+#define copy_mc_to_kernel copy_mc_to_kernel
+
+static inline unsigned long __must_check
+copy_mc_to_user(void __user *to, const void *from, unsigned long n)
+{
+ if (likely(check_copy_size(from, n, true))) {
+ if (access_ok(to, n)) {
+ allow_write_to_user(to, n);
+ n = copy_mc_generic((void *)to, from, n);
+ prevent_write_to_user(to, n);
+ }
+ }
+
+ return n;
+}
+#endif
+
#ifdef __powerpc64__
static inline unsigned long
raw_copy_in_user(void __user *to, const void __user *from, unsigned long n)
@@ -523,20 +549,6 @@ raw_copy_to_user(void __user *to, const void *from, unsigned long n)
return ret;
}
-static __always_inline unsigned long __must_check
-copy_to_user_mcsafe(void __user *to, const void *from, unsigned long n)
-{
- if (likely(check_copy_size(from, n, true))) {
- if (access_ok(to, n)) {
- allow_write_to_user(to, n);
- n = memcpy_mcsafe((void *)to, from, n);
- prevent_write_to_user(to, n);
- }
- }
-
- return n;
-}
-
unsigned long __arch_clear_user(void __user *addr, unsigned long size);
static inline unsigned long clear_user(void __user *addr, unsigned long size)
diff --git a/arch/powerpc/include/uapi/asm/mman.h b/arch/powerpc/include/uapi/asm/mman.h
index 3a700351feca..c0c737215b00 100644
--- a/arch/powerpc/include/uapi/asm/mman.h
+++ b/arch/powerpc/include/uapi/asm/mman.h
@@ -11,7 +11,7 @@
#include <asm-generic/mman-common.h>
-#define PROT_SAO 0x10 /* Unsupported since v5.9 */
+#define PROT_SAO 0x10 /* Strong Access Ordering */
#define MAP_RENAME MAP_ANONYMOUS /* In SunOS terminology */
#define MAP_NORESERVE 0x40 /* don't reserve swap pages */
diff --git a/arch/powerpc/include/uapi/asm/perf_regs.h b/arch/powerpc/include/uapi/asm/perf_regs.h
index f599064dd8dc..bdf5f10f8b9f 100644
--- a/arch/powerpc/include/uapi/asm/perf_regs.h
+++ b/arch/powerpc/include/uapi/asm/perf_regs.h
@@ -48,6 +48,24 @@ enum perf_event_powerpc_regs {
PERF_REG_POWERPC_DSISR,
PERF_REG_POWERPC_SIER,
PERF_REG_POWERPC_MMCRA,
- PERF_REG_POWERPC_MAX,
+ /* Extended registers */
+ PERF_REG_POWERPC_MMCR0,
+ PERF_REG_POWERPC_MMCR1,
+ PERF_REG_POWERPC_MMCR2,
+ PERF_REG_POWERPC_MMCR3,
+ PERF_REG_POWERPC_SIER2,
+ PERF_REG_POWERPC_SIER3,
+ /* Max regs without the extended regs */
+ PERF_REG_POWERPC_MAX = PERF_REG_POWERPC_MMCRA + 1,
};
+
+#define PERF_REG_PMU_MASK ((1ULL << PERF_REG_POWERPC_MAX) - 1)
+
+/* PERF_REG_EXTENDED_MASK value for CPU_FTR_ARCH_300 */
+#define PERF_REG_PMU_MASK_300 (((1ULL << (PERF_REG_POWERPC_MMCR2 + 1)) - 1) - PERF_REG_PMU_MASK)
+/* PERF_REG_EXTENDED_MASK value for CPU_FTR_ARCH_31 */
+#define PERF_REG_PMU_MASK_31 (((1ULL << (PERF_REG_POWERPC_SIER3 + 1)) - 1) - PERF_REG_PMU_MASK)
+
+#define PERF_REG_MAX_ISA_300 (PERF_REG_POWERPC_MMCR2 + 1)
+#define PERF_REG_MAX_ISA_31 (PERF_REG_POWERPC_SIER3 + 1)
#endif /* _UAPI_ASM_POWERPC_PERF_REGS_H */
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index 3d406a9626e8..2aa89c6b2896 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -72,9 +72,6 @@ extern void __setup_cpu_power9(unsigned long offset, struct cpu_spec* spec);
extern void __restore_cpu_power9(void);
extern void __setup_cpu_power10(unsigned long offset, struct cpu_spec* spec);
extern void __restore_cpu_power10(void);
-extern long __machine_check_early_realmode_p7(struct pt_regs *regs);
-extern long __machine_check_early_realmode_p8(struct pt_regs *regs);
-extern long __machine_check_early_realmode_p9(struct pt_regs *regs);
#endif /* CONFIG_PPC64 */
#if defined(CONFIG_E500)
extern void __setup_cpu_e5500(unsigned long offset, struct cpu_spec* spec);
@@ -542,6 +539,25 @@ static struct cpu_spec __initdata cpu_specs[] = {
.machine_check_early = __machine_check_early_realmode_p9,
.platform = "power9",
},
+ { /* Power10 */
+ .pvr_mask = 0xffff0000,
+ .pvr_value = 0x00800000,
+ .cpu_name = "POWER10 (raw)",
+ .cpu_features = CPU_FTRS_POWER10,
+ .cpu_user_features = COMMON_USER_POWER10,
+ .cpu_user_features2 = COMMON_USER2_POWER10,
+ .mmu_features = MMU_FTRS_POWER10,
+ .icache_bsize = 128,
+ .dcache_bsize = 128,
+ .num_pmcs = 6,
+ .pmc_type = PPC_PMC_IBM,
+ .oprofile_cpu_type = "ppc64/power10",
+ .oprofile_type = PPC_OPROFILE_INVALID,
+ .cpu_setup = __setup_cpu_power10,
+ .cpu_restore = __restore_cpu_power10,
+ .machine_check_early = __machine_check_early_realmode_p10,
+ .platform = "power10",
+ },
{ /* Cell Broadband Engine */
.pvr_mask = 0xffff0000,
.pvr_value = 0x00700000,
diff --git a/arch/powerpc/kernel/dma-iommu.c b/arch/powerpc/kernel/dma-iommu.c
index 569fecd7b5b2..9053fc9d20c7 100644
--- a/arch/powerpc/kernel/dma-iommu.c
+++ b/arch/powerpc/kernel/dma-iommu.c
@@ -120,7 +120,8 @@ u64 dma_iommu_get_required_mask(struct device *dev)
if (!tbl)
return 0;
- mask = 1ULL < (fls_long(tbl->it_offset + tbl->it_size) - 1);
+ mask = 1ULL << (fls_long(tbl->it_offset + tbl->it_size) +
+ tbl->it_page_shift - 1);
mask += mask - 1;
return mask;
diff --git a/arch/powerpc/kernel/dt_cpu_ftrs.c b/arch/powerpc/kernel/dt_cpu_ftrs.c
index 6f8c0c6b937a..f204ad79b6b5 100644
--- a/arch/powerpc/kernel/dt_cpu_ftrs.c
+++ b/arch/powerpc/kernel/dt_cpu_ftrs.c
@@ -64,10 +64,6 @@ struct dt_cpu_feature {
* Set up the base CPU
*/
-extern long __machine_check_early_realmode_p8(struct pt_regs *regs);
-extern long __machine_check_early_realmode_p9(struct pt_regs *regs);
-extern long __machine_check_early_realmode_p10(struct pt_regs *regs);
-
static int hv_mode;
static struct {
@@ -657,7 +653,7 @@ static struct dt_cpu_feature_match __initdata
{"processor-control-facility-v3", feat_enable_dbell, CPU_FTR_DBELL},
{"processor-utilization-of-resources-register", feat_enable_purr, 0},
{"no-execute", feat_enable, 0},
- /* strong-access-ordering is unused */
+ {"strong-access-ordering", feat_enable, CPU_FTR_SAO},
{"cache-inhibited-large-page", feat_enable_large_ci, 0},
{"coprocessor-icswx", feat_enable, 0},
{"hypervisor-virtualization-interrupt", feat_enable_hvi, 0},
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 33a42e42c56f..733e40eba4eb 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -113,6 +113,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_TM)
ld r11,exception_marker@toc(r2)
std r11,-16(r10) /* "regshere" marker */
+BEGIN_FTR_SECTION
+ HMT_MEDIUM
+END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
+
/*
* RECONCILE_IRQ_STATE without calling trace_hardirqs_off(), which
* would clobber syscall parameters. Also we always enter with IRQs
diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c
index 10ebb4bf71ad..5cdf4168a61a 100644
--- a/arch/powerpc/kernel/fadump.c
+++ b/arch/powerpc/kernel/fadump.c
@@ -191,13 +191,13 @@ int is_fadump_active(void)
*/
static bool is_fadump_mem_area_contiguous(u64 d_start, u64 d_end)
{
- struct memblock_region *reg;
+ phys_addr_t reg_start, reg_end;
bool ret = false;
- u64 start, end;
+ u64 i, start, end;
- for_each_memblock(memory, reg) {
- start = max_t(u64, d_start, reg->base);
- end = min_t(u64, d_end, (reg->base + reg->size));
+ for_each_mem_range(i, &reg_start, &reg_end) {
+ start = max_t(u64, d_start, reg_start);
+ end = min_t(u64, d_end, reg_end);
if (d_start < end) {
/* Memory hole from d_start to start */
if (start > d_start)
@@ -422,34 +422,34 @@ static int __init add_boot_mem_regions(unsigned long mstart,
static int __init fadump_get_boot_mem_regions(void)
{
- unsigned long base, size, cur_size, hole_size, last_end;
+ unsigned long size, cur_size, hole_size, last_end;
unsigned long mem_size = fw_dump.boot_memory_size;
- struct memblock_region *reg;
+ phys_addr_t reg_start, reg_end;
int ret = 1;
+ u64 i;
fw_dump.boot_mem_regs_cnt = 0;
last_end = 0;
hole_size = 0;
cur_size = 0;
- for_each_memblock(memory, reg) {
- base = reg->base;
- size = reg->size;
- hole_size += (base - last_end);
+ for_each_mem_range(i, &reg_start, &reg_end) {
+ size = reg_end - reg_start;
+ hole_size += (reg_start - last_end);
if ((cur_size + size) >= mem_size) {
size = (mem_size - cur_size);
- ret = add_boot_mem_regions(base, size);
+ ret = add_boot_mem_regions(reg_start, size);
break;
}
mem_size -= size;
cur_size += size;
- ret = add_boot_mem_regions(base, size);
+ ret = add_boot_mem_regions(reg_start, size);
if (!ret)
break;
- last_end = base + size;
+ last_end = reg_end;
}
fw_dump.boot_mem_top = PAGE_ALIGN(fw_dump.boot_memory_size + hole_size);
@@ -985,9 +985,8 @@ static int fadump_init_elfcore_header(char *bufp)
*/
static int fadump_setup_crash_memory_ranges(void)
{
- struct memblock_region *reg;
- u64 start, end;
- int i, ret;
+ u64 i, start, end;
+ int ret;
pr_debug("Setup crash memory ranges.\n");
crash_mrange_info.mem_range_cnt = 0;
@@ -1005,10 +1004,7 @@ static int fadump_setup_crash_memory_ranges(void)
return ret;
}
- for_each_memblock(memory, reg) {
- start = (u64)reg->base;
- end = start + (u64)reg->size;
-
+ for_each_mem_range(i, &start, &end) {
/*
* skip the memory chunk that is already added
* (0 through boot_memory_top).
@@ -1242,14 +1238,17 @@ static void fadump_free_reserved_memory(unsigned long start_pfn,
*/
static void fadump_release_reserved_area(u64 start, u64 end)
{
+ unsigned long reg_spfn, reg_epfn;
u64 tstart, tend, spfn, epfn;
- struct memblock_region *reg;
+ int i;
spfn = PHYS_PFN(start);
epfn = PHYS_PFN(end);
- for_each_memblock(memory, reg) {
- tstart = max_t(u64, spfn, memblock_region_memory_base_pfn(reg));
- tend = min_t(u64, epfn, memblock_region_memory_end_pfn(reg));
+
+ for_each_mem_pfn_range(i, MAX_NUMNODES, &reg_spfn, &reg_epfn, NULL) {
+ tstart = max_t(u64, spfn, reg_spfn);
+ tend = min_t(u64, epfn, reg_epfn);
+
if (tstart < tend) {
fadump_free_reserved_memory(tstart, tend);
@@ -1684,12 +1683,10 @@ int __init fadump_reserve_mem(void)
/* Preserve everything above the base address */
static void __init fadump_reserve_crash_area(u64 base)
{
- struct memblock_region *reg;
- u64 mstart, msize;
+ u64 i, mstart, mend, msize;
- for_each_memblock(memory, reg) {
- mstart = reg->base;
- msize = reg->size;
+ for_each_mem_range(i, &mstart, &mend) {
+ msize = mend - mstart;
if ((mstart + msize) < base)
continue;
diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c
index 6ab9b4d037c3..01ab2163659e 100644
--- a/arch/powerpc/kernel/kprobes.c
+++ b/arch/powerpc/kernel/kprobes.c
@@ -218,6 +218,7 @@ bool arch_kprobe_on_func_entry(unsigned long offset)
void arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs)
{
ri->ret_addr = (kprobe_opcode_t *)regs->link;
+ ri->fp = NULL;
/* Replace the return addr with trampoline addr */
regs->link = (unsigned long)kretprobe_trampoline;
@@ -396,50 +397,9 @@ asm(".global kretprobe_trampoline\n"
*/
static int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
{
- struct kretprobe_instance *ri = NULL;
- struct hlist_head *head, empty_rp;
- struct hlist_node *tmp;
- unsigned long flags, orig_ret_address = 0;
- unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline;
-
- INIT_HLIST_HEAD(&empty_rp);
- kretprobe_hash_lock(current, &head, &flags);
-
- /*
- * It is possible to have multiple instances associated with a given
- * task either because an multiple functions in the call path
- * have a return probe installed on them, and/or more than one return
- * return probe was registered for a target function.
- *
- * We can handle this because:
- * - instances are always inserted at the head of the list
- * - when multiple return probes are registered for the same
- * function, the first instance's ret_addr will point to the
- * real return address, and all the rest will point to
- * kretprobe_trampoline
- */
- hlist_for_each_entry_safe(ri, tmp, head, hlist) {
- if (ri->task != current)
- /* another task is sharing our hash bucket */
- continue;
-
- if (ri->rp && ri->rp->handler)
- ri->rp->handler(ri, regs);
-
- orig_ret_address = (unsigned long)ri->ret_addr;
- recycle_rp_inst(ri, &empty_rp);
-
- if (orig_ret_address != trampoline_address)
- /*
- * This is the real return address. Any other
- * instances associated with this task are for
- * other calls deeper on the call stack
- */
- break;
- }
-
- kretprobe_assert(ri, orig_ret_address, trampoline_address);
+ unsigned long orig_ret_address;
+ orig_ret_address = __kretprobe_trampoline_handler(regs, &kretprobe_trampoline, NULL);
/*
* We get here through one of two paths:
* 1. by taking a trap -> kprobe_handler() -> here
@@ -458,13 +418,6 @@ static int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
regs->nip = orig_ret_address - 4;
regs->link = orig_ret_address;
- kretprobe_hash_unlock(current, &flags);
-
- hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
- hlist_del(&ri->hlist);
- kfree(ri);
- }
-
return 0;
}
NOKPROBE_SYMBOL(trampoline_probe_handler);
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 016bd831908e..73a57043ee66 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -548,7 +548,7 @@ void notrace restore_math(struct pt_regs *regs)
* are live for the user thread).
*/
if ((!(msr & MSR_FP)) && should_restore_fp())
- new_msr |= MSR_FP | current->thread.fpexc_mode;
+ new_msr |= MSR_FP;
if ((!(msr & MSR_VEC)) && should_restore_altivec())
new_msr |= MSR_VEC;
@@ -559,11 +559,17 @@ void notrace restore_math(struct pt_regs *regs)
}
if (new_msr) {
+ unsigned long fpexc_mode = 0;
+
msr_check_and_set(new_msr);
- if (new_msr & MSR_FP)
+ if (new_msr & MSR_FP) {
do_restore_fp();
+ // This also covers VSX, because VSX implies FP
+ fpexc_mode = current->thread.fpexc_mode;
+ }
+
if (new_msr & MSR_VEC)
do_restore_altivec();
@@ -572,7 +578,7 @@ void notrace restore_math(struct pt_regs *regs)
msr_check_and_clear(new_msr);
- regs->msr |= new_msr;
+ regs->msr |= new_msr | fpexc_mode;
}
}
#endif
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index b198b0ff25bc..808ec9fab605 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -311,6 +311,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
min = pvr & 0xFF;
break;
case 0x004e: /* POWER9 bits 12-15 give chip type */
+ case 0x0080: /* POWER10 bit 12 gives SMT8/4 */
maj = (pvr >> 8) & 0x0F;
min = pvr & 0xFF;
break;
diff --git a/arch/powerpc/kernel/syscalls/syscall.tbl b/arch/powerpc/kernel/syscalls/syscall.tbl
index c2d737ff2e7b..9d7fb4ced290 100644
--- a/arch/powerpc/kernel/syscalls/syscall.tbl
+++ b/arch/powerpc/kernel/syscalls/syscall.tbl
@@ -34,7 +34,7 @@
18 spu oldstat sys_ni_syscall
19 common lseek sys_lseek compat_sys_lseek
20 common getpid sys_getpid
-21 nospu mount sys_mount compat_sys_mount
+21 nospu mount sys_mount
22 32 umount sys_oldumount
22 64 umount sys_ni_syscall
22 spu umount sys_ni_syscall
@@ -193,8 +193,8 @@
142 common _newselect sys_select compat_sys_select
143 common flock sys_flock
144 common msync sys_msync
-145 common readv sys_readv compat_sys_readv
-146 common writev sys_writev compat_sys_writev
+145 common readv sys_readv
+146 common writev sys_writev
147 common getsid sys_getsid
148 common fdatasync sys_fdatasync
149 nospu _sysctl sys_ni_syscall
@@ -369,7 +369,7 @@
282 common unshare sys_unshare
283 common splice sys_splice
284 common tee sys_tee
-285 common vmsplice sys_vmsplice compat_sys_vmsplice
+285 common vmsplice sys_vmsplice
286 common openat sys_openat compat_sys_openat
287 common mkdirat sys_mkdirat
288 common mknodat sys_mknodat
@@ -449,8 +449,8 @@
348 common syncfs sys_syncfs
349 common sendmmsg sys_sendmmsg compat_sys_sendmmsg
350 common setns sys_setns
-351 nospu process_vm_readv sys_process_vm_readv compat_sys_process_vm_readv
-352 nospu process_vm_writev sys_process_vm_writev compat_sys_process_vm_writev
+351 nospu process_vm_readv sys_process_vm_readv
+352 nospu process_vm_writev sys_process_vm_writev
353 nospu finit_module sys_finit_module
354 nospu kcmp sys_kcmp
355 common sched_setattr sys_sched_setattr
diff --git a/arch/powerpc/kernel/vdso32/Makefile b/arch/powerpc/kernel/vdso32/Makefile
index 87ab1152d5ce..e147bbdc12cd 100644
--- a/arch/powerpc/kernel/vdso32/Makefile
+++ b/arch/powerpc/kernel/vdso32/Makefile
@@ -50,7 +50,7 @@ $(obj-vdso32): %.o: %.S FORCE
# actual build commands
quiet_cmd_vdso32ld = VDSO32L $@
- cmd_vdso32ld = $(VDSOCC) $(c_flags) $(CC32FLAGS) -o $@ $(call cc-ldoption, -Wl$(comma)--orphan-handling=warn) -Wl,-T$(filter %.lds,$^) $(filter %.o,$^)
+ cmd_vdso32ld = $(VDSOCC) $(c_flags) $(CC32FLAGS) -o $@ -Wl,-T$(filter %.lds,$^) $(filter %.o,$^)
quiet_cmd_vdso32as = VDSO32A $@
cmd_vdso32as = $(VDSOCC) $(a_flags) $(CC32FLAGS) -c -o $@ $<
diff --git a/arch/powerpc/kernel/vdso32/vdso32.lds.S b/arch/powerpc/kernel/vdso32/vdso32.lds.S
index 4c985467a668..5206c2eb2a1d 100644
--- a/arch/powerpc/kernel/vdso32/vdso32.lds.S
+++ b/arch/powerpc/kernel/vdso32/vdso32.lds.S
@@ -111,7 +111,6 @@ SECTIONS
*(.note.GNU-stack)
*(.data .data.* .gnu.linkonce.d.* .sdata*)
*(.bss .sbss .dynbss .dynsbss)
- *(.glink .iplt .plt .rela*)
}
}
diff --git a/arch/powerpc/kernel/vdso64/Makefile b/arch/powerpc/kernel/vdso64/Makefile
index 38c317f25141..32ebb3522ea1 100644
--- a/arch/powerpc/kernel/vdso64/Makefile
+++ b/arch/powerpc/kernel/vdso64/Makefile
@@ -34,7 +34,7 @@ $(obj)/%.so: $(obj)/%.so.dbg FORCE
# actual build commands
quiet_cmd_vdso64ld = VDSO64L $@
- cmd_vdso64ld = $(CC) $(c_flags) -o $@ -Wl,-T$(filter %.lds,$^) $(filter %.o,$^) $(call cc-ldoption, -Wl$(comma)--orphan-handling=warn)
+ cmd_vdso64ld = $(CC) $(c_flags) -o $@ -Wl,-T$(filter %.lds,$^) $(filter %.o,$^)
# install commands for the unstripped file
quiet_cmd_vdso_install = INSTALL $@
diff --git a/arch/powerpc/kernel/vdso64/vdso64.lds.S b/arch/powerpc/kernel/vdso64/vdso64.lds.S
index 4e3a8d4ee614..256fb9720298 100644
--- a/arch/powerpc/kernel/vdso64/vdso64.lds.S
+++ b/arch/powerpc/kernel/vdso64/vdso64.lds.S
@@ -30,7 +30,7 @@ SECTIONS
. = ALIGN(16);
.text : {
*(.text .stub .text.* .gnu.linkonce.t.* __ftr_alt_*)
- *(.sfpr)
+ *(.sfpr .glink)
} :text
PROVIDE(__etext = .);
PROVIDE(_etext = .);
@@ -111,7 +111,6 @@ SECTIONS
*(.branch_lt)
*(.data .data.* .gnu.linkonce.d.* .sdata*)
*(.bss .sbss .dynbss .dynsbss)
- *(.glink .iplt .plt .rela*)
}
}
diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
index 326e113d2e45..e0548b4950de 100644
--- a/arch/powerpc/kernel/vmlinux.lds.S
+++ b/arch/powerpc/kernel/vmlinux.lds.S
@@ -360,8 +360,8 @@ SECTIONS
PROVIDE32 (end = .);
STABS_DEBUG
-
DWARF_DEBUG
+ ELF_DETAILS
DISCARDS
/DISCARD/ : {
diff --git a/arch/powerpc/kexec/file_load_64.c b/arch/powerpc/kexec/file_load_64.c
index 53bb71e3a2e1..c69bcf9b547a 100644
--- a/arch/powerpc/kexec/file_load_64.c
+++ b/arch/powerpc/kexec/file_load_64.c
@@ -138,15 +138,13 @@ out:
*/
static int get_crash_memory_ranges(struct crash_mem **mem_ranges)
{
- struct memblock_region *reg;
+ phys_addr_t base, end;
struct crash_mem *tmem;
+ u64 i;
int ret;
- for_each_memblock(memory, reg) {
- u64 base, size;
-
- base = (u64)reg->base;
- size = (u64)reg->size;
+ for_each_mem_range(i, &base, &end) {
+ u64 size = end - base;
/* Skip backup memory region, which needs a separate entry */
if (base == BACKUP_SRC_START) {
@@ -250,8 +248,7 @@ static int __locate_mem_hole_top_down(struct kexec_buf *kbuf,
phys_addr_t start, end;
u64 i;
- for_each_mem_range_rev(i, &memblock.memory, NULL, NUMA_NO_NODE,
- MEMBLOCK_NONE, &start, &end, NULL) {
+ for_each_mem_range_rev(i, &start, &end) {
/*
* memblock uses [start, end) convention while it is
* [start, end] here. Fix the off-by-one to have the
@@ -350,8 +347,7 @@ static int __locate_mem_hole_bottom_up(struct kexec_buf *kbuf,
phys_addr_t start, end;
u64 i;
- for_each_mem_range(i, &memblock.memory, NULL, NUMA_NO_NODE,
- MEMBLOCK_NONE, &start, &end, NULL) {
+ for_each_mem_range(i, &start, &end) {
/*
* memblock uses [start, end) convention while it is
* [start, end] here. Fix the off-by-one to have the
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index 41fedec69ac3..49db50d1db04 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -834,7 +834,8 @@ void kvmppc_core_commit_memory_region(struct kvm *kvm,
kvm->arch.kvm_ops->commit_memory_region(kvm, mem, old, new, change);
}
-int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
+int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end,
+ unsigned flags)
{
return kvm->arch.kvm_ops->unmap_hva_range(kvm, start, end);
}
diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c
index 073617ce83e0..8f58dd20b362 100644
--- a/arch/powerpc/kvm/book3s_hv_builtin.c
+++ b/arch/powerpc/kvm/book3s_hv_builtin.c
@@ -95,23 +95,15 @@ EXPORT_SYMBOL_GPL(kvm_free_hpt_cma);
void __init kvm_cma_reserve(void)
{
unsigned long align_size;
- struct memblock_region *reg;
- phys_addr_t selected_size = 0;
+ phys_addr_t selected_size;
/*
* We need CMA reservation only when we are in HV mode
*/
if (!cpu_has_feature(CPU_FTR_HVMODE))
return;
- /*
- * We cannot use memblock_phys_mem_size() here, because
- * memblock_analyze() has not been called yet.
- */
- for_each_memblock(memory, reg)
- selected_size += memblock_region_memory_end_pfn(reg) -
- memblock_region_memory_base_pfn(reg);
- selected_size = (selected_size * kvm_cma_resv_ratio / 100) << PAGE_SHIFT;
+ selected_size = PAGE_ALIGN(memblock_phys_mem_size() * kvm_cma_resv_ratio / 100);
if (selected_size) {
pr_info("%s: reserving %ld MiB for global area\n", __func__,
(unsigned long)selected_size / SZ_1M);
diff --git a/arch/powerpc/kvm/book3s_hv_uvmem.c b/arch/powerpc/kvm/book3s_hv_uvmem.c
index 7705d5557239..84e5a2dc8be5 100644
--- a/arch/powerpc/kvm/book3s_hv_uvmem.c
+++ b/arch/powerpc/kvm/book3s_hv_uvmem.c
@@ -687,9 +687,9 @@ static struct page *kvmppc_uvmem_get_page(unsigned long gpa, struct kvm *kvm)
struct kvmppc_uvmem_page_pvt *pvt;
unsigned long pfn_last, pfn_first;
- pfn_first = kvmppc_uvmem_pgmap.res.start >> PAGE_SHIFT;
+ pfn_first = kvmppc_uvmem_pgmap.range.start >> PAGE_SHIFT;
pfn_last = pfn_first +
- (resource_size(&kvmppc_uvmem_pgmap.res) >> PAGE_SHIFT);
+ (range_len(&kvmppc_uvmem_pgmap.range) >> PAGE_SHIFT);
spin_lock(&kvmppc_uvmem_bitmap_lock);
bit = find_first_zero_bit(kvmppc_uvmem_bitmap,
@@ -1007,7 +1007,7 @@ static vm_fault_t kvmppc_uvmem_migrate_to_ram(struct vm_fault *vmf)
static void kvmppc_uvmem_page_free(struct page *page)
{
unsigned long pfn = page_to_pfn(page) -
- (kvmppc_uvmem_pgmap.res.start >> PAGE_SHIFT);
+ (kvmppc_uvmem_pgmap.range.start >> PAGE_SHIFT);
struct kvmppc_uvmem_page_pvt *pvt;
spin_lock(&kvmppc_uvmem_bitmap_lock);
@@ -1170,7 +1170,9 @@ int kvmppc_uvmem_init(void)
}
kvmppc_uvmem_pgmap.type = MEMORY_DEVICE_PRIVATE;
- kvmppc_uvmem_pgmap.res = *res;
+ kvmppc_uvmem_pgmap.range.start = res->start;
+ kvmppc_uvmem_pgmap.range.end = res->end;
+ kvmppc_uvmem_pgmap.nr_range = 1;
kvmppc_uvmem_pgmap.ops = &kvmppc_uvmem_ops;
/* just one global instance: */
kvmppc_uvmem_pgmap.owner = &kvmppc_uvmem_pgmap;
@@ -1205,7 +1207,7 @@ void kvmppc_uvmem_free(void)
return;
memunmap_pages(&kvmppc_uvmem_pgmap);
- release_mem_region(kvmppc_uvmem_pgmap.res.start,
- resource_size(&kvmppc_uvmem_pgmap.res));
+ release_mem_region(kvmppc_uvmem_pgmap.range.start,
+ range_len(&kvmppc_uvmem_pgmap.range));
kfree(kvmppc_uvmem_bitmap);
}
diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c
index d6c1069e9954..ed0c9c43d0cf 100644
--- a/arch/powerpc/kvm/e500_mmu_host.c
+++ b/arch/powerpc/kvm/e500_mmu_host.c
@@ -734,7 +734,8 @@ static int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
return 0;
}
-int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
+int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end,
+ unsigned flags)
{
/* kvm_unmap_hva flushes everything anyways */
kvm_unmap_hva(kvm, start);
diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile
index d66a645503eb..69a91b571845 100644
--- a/arch/powerpc/lib/Makefile
+++ b/arch/powerpc/lib/Makefile
@@ -39,7 +39,7 @@ obj-$(CONFIG_PPC_BOOK3S_64) += copyuser_power7.o copypage_power7.o \
memcpy_power7.o
obj64-y += copypage_64.o copyuser_64.o mem_64.o hweight_64.o \
- memcpy_64.o memcpy_mcsafe_64.o
+ memcpy_64.o copy_mc_64.o
ifndef CONFIG_PPC_QUEUED_SPINLOCKS
obj64-$(CONFIG_SMP) += locks.o
diff --git a/arch/powerpc/lib/checksum_32.S b/arch/powerpc/lib/checksum_32.S
index ecd150dc3ed9..ec5cd2dede35 100644
--- a/arch/powerpc/lib/checksum_32.S
+++ b/arch/powerpc/lib/checksum_32.S
@@ -78,12 +78,10 @@ EXPORT_SYMBOL(__csum_partial)
/*
* Computes the checksum of a memory block at src, length len,
- * and adds in "sum" (32-bit), while copying the block to dst.
- * If an access exception occurs on src or dst, it stores -EFAULT
- * to *src_err or *dst_err respectively, and (for an error on
- * src) zeroes the rest of dst.
+ * and adds in 0xffffffff, while copying the block to dst.
+ * If an access exception occurs it returns zero.
*
- * csum_partial_copy_generic(src, dst, len, sum, src_err, dst_err)
+ * csum_partial_copy_generic(src, dst, len)
*/
#define CSUM_COPY_16_BYTES_WITHEX(n) \
8 ## n ## 0: \
@@ -108,14 +106,14 @@ EXPORT_SYMBOL(__csum_partial)
adde r12,r12,r10
#define CSUM_COPY_16_BYTES_EXCODE(n) \
- EX_TABLE(8 ## n ## 0b, src_error); \
- EX_TABLE(8 ## n ## 1b, src_error); \
- EX_TABLE(8 ## n ## 2b, src_error); \
- EX_TABLE(8 ## n ## 3b, src_error); \
- EX_TABLE(8 ## n ## 4b, dst_error); \
- EX_TABLE(8 ## n ## 5b, dst_error); \
- EX_TABLE(8 ## n ## 6b, dst_error); \
- EX_TABLE(8 ## n ## 7b, dst_error);
+ EX_TABLE(8 ## n ## 0b, fault); \
+ EX_TABLE(8 ## n ## 1b, fault); \
+ EX_TABLE(8 ## n ## 2b, fault); \
+ EX_TABLE(8 ## n ## 3b, fault); \
+ EX_TABLE(8 ## n ## 4b, fault); \
+ EX_TABLE(8 ## n ## 5b, fault); \
+ EX_TABLE(8 ## n ## 6b, fault); \
+ EX_TABLE(8 ## n ## 7b, fault);
.text
.stabs "arch/powerpc/lib/",N_SO,0,0,0f
@@ -127,11 +125,8 @@ LG_CACHELINE_BYTES = L1_CACHE_SHIFT
CACHELINE_MASK = (L1_CACHE_BYTES-1)
_GLOBAL(csum_partial_copy_generic)
- stwu r1,-16(r1)
- stw r7,12(r1)
- stw r8,8(r1)
-
- addic r12,r6,0
+ li r12,-1
+ addic r0,r0,0 /* clear carry */
addi r6,r4,-4
neg r0,r4
addi r4,r3,-4
@@ -246,34 +241,19 @@ _GLOBAL(csum_partial_copy_generic)
rlwinm r3,r3,8,0,31 /* odd destination address: rotate one byte */
blr
-/* read fault */
-src_error:
- lwz r7,12(r1)
- addi r1,r1,16
- cmpwi cr0,r7,0
- beqlr
- li r0,-EFAULT
- stw r0,0(r7)
- blr
-/* write fault */
-dst_error:
- lwz r8,8(r1)
- addi r1,r1,16
- cmpwi cr0,r8,0
- beqlr
- li r0,-EFAULT
- stw r0,0(r8)
+fault:
+ li r3,0
blr
- EX_TABLE(70b, src_error);
- EX_TABLE(71b, dst_error);
- EX_TABLE(72b, src_error);
- EX_TABLE(73b, dst_error);
- EX_TABLE(54b, dst_error);
+ EX_TABLE(70b, fault);
+ EX_TABLE(71b, fault);
+ EX_TABLE(72b, fault);
+ EX_TABLE(73b, fault);
+ EX_TABLE(54b, fault);
/*
* this stuff handles faults in the cacheline loop and branches to either
- * src_error (if in read part) or dst_error (if in write part)
+ * fault (if in read part) or fault (if in write part)
*/
CSUM_COPY_16_BYTES_EXCODE(0)
#if L1_CACHE_BYTES >= 32
@@ -290,12 +270,12 @@ dst_error:
#endif
#endif
- EX_TABLE(30b, src_error);
- EX_TABLE(31b, dst_error);
- EX_TABLE(40b, src_error);
- EX_TABLE(41b, dst_error);
- EX_TABLE(50b, src_error);
- EX_TABLE(51b, dst_error);
+ EX_TABLE(30b, fault);
+ EX_TABLE(31b, fault);
+ EX_TABLE(40b, fault);
+ EX_TABLE(41b, fault);
+ EX_TABLE(50b, fault);
+ EX_TABLE(51b, fault);
EXPORT_SYMBOL(csum_partial_copy_generic)
diff --git a/arch/powerpc/lib/checksum_64.S b/arch/powerpc/lib/checksum_64.S
index 514978f908d4..98ff51bd2f7d 100644
--- a/arch/powerpc/lib/checksum_64.S
+++ b/arch/powerpc/lib/checksum_64.S
@@ -182,34 +182,33 @@ EXPORT_SYMBOL(__csum_partial)
.macro srcnr
100:
- EX_TABLE(100b,.Lsrc_error_nr)
+ EX_TABLE(100b,.Lerror_nr)
.endm
.macro source
150:
- EX_TABLE(150b,.Lsrc_error)
+ EX_TABLE(150b,.Lerror)
.endm
.macro dstnr
200:
- EX_TABLE(200b,.Ldest_error_nr)
+ EX_TABLE(200b,.Lerror_nr)
.endm
.macro dest
250:
- EX_TABLE(250b,.Ldest_error)
+ EX_TABLE(250b,.Lerror)
.endm
/*
* Computes the checksum of a memory block at src, length len,
- * and adds in "sum" (32-bit), while copying the block to dst.
- * If an access exception occurs on src or dst, it stores -EFAULT
- * to *src_err or *dst_err respectively. The caller must take any action
- * required in this case (zeroing memory, recalculating partial checksum etc).
+ * and adds in 0xffffffff (32-bit), while copying the block to dst.
+ * If an access exception occurs, it returns 0.
*
- * csum_partial_copy_generic(r3=src, r4=dst, r5=len, r6=sum, r7=src_err, r8=dst_err)
+ * csum_partial_copy_generic(r3=src, r4=dst, r5=len)
*/
_GLOBAL(csum_partial_copy_generic)
+ li r6,-1
addic r0,r6,0 /* clear carry */
srdi. r6,r5,3 /* less than 8 bytes? */
@@ -401,29 +400,15 @@ dstnr; stb r6,0(r4)
srdi r3,r3,32
blr
-.Lsrc_error:
+.Lerror:
ld r14,STK_REG(R14)(r1)
ld r15,STK_REG(R15)(r1)
ld r16,STK_REG(R16)(r1)
addi r1,r1,STACKFRAMESIZE
-.Lsrc_error_nr:
- cmpdi 0,r7,0
- beqlr
- li r6,-EFAULT
- stw r6,0(r7)
+.Lerror_nr:
+ li r3,0
blr
-.Ldest_error:
- ld r14,STK_REG(R14)(r1)
- ld r15,STK_REG(R15)(r1)
- ld r16,STK_REG(R16)(r1)
- addi r1,r1,STACKFRAMESIZE
-.Ldest_error_nr:
- cmpdi 0,r8,0
- beqlr
- li r6,-EFAULT
- stw r6,0(r8)
- blr
EXPORT_SYMBOL(csum_partial_copy_generic)
/*
diff --git a/arch/powerpc/lib/checksum_wrappers.c b/arch/powerpc/lib/checksum_wrappers.c
index fabe4db28726..b895166afc82 100644
--- a/arch/powerpc/lib/checksum_wrappers.c
+++ b/arch/powerpc/lib/checksum_wrappers.c
@@ -12,83 +12,37 @@
#include <linux/uaccess.h>
__wsum csum_and_copy_from_user(const void __user *src, void *dst,
- int len, __wsum sum, int *err_ptr)
+ int len)
{
- unsigned int csum;
+ __wsum csum;
might_sleep();
- allow_read_from_user(src, len);
-
- *err_ptr = 0;
-
- if (!len) {
- csum = 0;
- goto out;
- }
- if (unlikely((len < 0) || !access_ok(src, len))) {
- *err_ptr = -EFAULT;
- csum = (__force unsigned int)sum;
- goto out;
- }
+ if (unlikely(!access_ok(src, len)))
+ return 0;
- csum = csum_partial_copy_generic((void __force *)src, dst,
- len, sum, err_ptr, NULL);
-
- if (unlikely(*err_ptr)) {
- int missing = __copy_from_user(dst, src, len);
-
- if (missing) {
- memset(dst + len - missing, 0, missing);
- *err_ptr = -EFAULT;
- } else {
- *err_ptr = 0;
- }
+ allow_read_from_user(src, len);
- csum = csum_partial(dst, len, sum);
- }
+ csum = csum_partial_copy_generic((void __force *)src, dst, len);
-out:
prevent_read_from_user(src, len);
- return (__force __wsum)csum;
+ return csum;
}
EXPORT_SYMBOL(csum_and_copy_from_user);
-__wsum csum_and_copy_to_user(const void *src, void __user *dst, int len,
- __wsum sum, int *err_ptr)
+__wsum csum_and_copy_to_user(const void *src, void __user *dst, int len)
{
- unsigned int csum;
+ __wsum csum;
might_sleep();
- allow_write_to_user(dst, len);
-
- *err_ptr = 0;
-
- if (!len) {
- csum = 0;
- goto out;
- }
+ if (unlikely(!access_ok(dst, len)))
+ return 0;
- if (unlikely((len < 0) || !access_ok(dst, len))) {
- *err_ptr = -EFAULT;
- csum = -1; /* invalid checksum */
- goto out;
- }
-
- csum = csum_partial_copy_generic(src, (void __force *)dst,
- len, sum, NULL, err_ptr);
-
- if (unlikely(*err_ptr)) {
- csum = csum_partial(src, len, sum);
+ allow_write_to_user(dst, len);
- if (copy_to_user(dst, src, len)) {
- *err_ptr = -EFAULT;
- csum = -1; /* invalid checksum */
- }
- }
+ csum = csum_partial_copy_generic(src, (void __force *)dst, len);
-out:
prevent_write_to_user(dst, len);
- return (__force __wsum)csum;
+ return csum;
}
EXPORT_SYMBOL(csum_and_copy_to_user);
diff --git a/arch/powerpc/lib/memcpy_mcsafe_64.S b/arch/powerpc/lib/copy_mc_64.S
index cb882d9a6d8a..88d46c471493 100644
--- a/arch/powerpc/lib/memcpy_mcsafe_64.S
+++ b/arch/powerpc/lib/copy_mc_64.S
@@ -50,7 +50,7 @@ err3; stb r0,0(r3)
blr
-_GLOBAL(memcpy_mcsafe)
+_GLOBAL(copy_mc_generic)
mr r7,r5
cmpldi r5,16
blt .Lshort_copy
@@ -239,4 +239,4 @@ err1; stb r0,0(r3)
15: li r3,0
blr
-EXPORT_SYMBOL_GPL(memcpy_mcsafe);
+EXPORT_SYMBOL_GPL(copy_mc_generic);
diff --git a/arch/powerpc/mm/book3s32/mmu.c b/arch/powerpc/mm/book3s32/mmu.c
index c0162911f6cb..d426eaf76bb0 100644
--- a/arch/powerpc/mm/book3s32/mmu.c
+++ b/arch/powerpc/mm/book3s32/mmu.c
@@ -191,10 +191,17 @@ static bool is_module_segment(unsigned long addr)
{
if (!IS_ENABLED(CONFIG_MODULES))
return false;
+#ifdef MODULES_VADDR
+ if (addr < ALIGN_DOWN(MODULES_VADDR, SZ_256M))
+ return false;
+ if (addr > ALIGN(MODULES_END, SZ_256M) - 1)
+ return false;
+#else
if (addr < ALIGN_DOWN(VMALLOC_START, SZ_256M))
return false;
- if (addr >= ALIGN(VMALLOC_END, SZ_256M))
+ if (addr > ALIGN(VMALLOC_END, SZ_256M) - 1)
return false;
+#endif
return true;
}
diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c
index 1da9dbba9217..b830adee51f5 100644
--- a/arch/powerpc/mm/book3s64/hash_utils.c
+++ b/arch/powerpc/mm/book3s64/hash_utils.c
@@ -7,7 +7,7 @@
*
* SMP scalability work:
* Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
- *
+ *
* Module name: htab.c
*
* Description:
@@ -232,6 +232,8 @@ unsigned long htab_convert_pte_flags(unsigned long pteflags)
rflags |= HPTE_R_I;
else if ((pteflags & _PAGE_CACHE_CTL) == _PAGE_NON_IDEMPOTENT)
rflags |= (HPTE_R_I | HPTE_R_G);
+ else if ((pteflags & _PAGE_CACHE_CTL) == _PAGE_SAO)
+ rflags |= (HPTE_R_W | HPTE_R_I | HPTE_R_M);
else
/*
* Add memory coherence if cache inhibited is not set
@@ -865,8 +867,8 @@ static void __init htab_initialize(void)
unsigned long table;
unsigned long pteg_count;
unsigned long prot;
- unsigned long base = 0, size = 0;
- struct memblock_region *reg;
+ phys_addr_t base = 0, size = 0, end;
+ u64 i;
DBG(" -> htab_initialize()\n");
@@ -882,7 +884,7 @@ static void __init htab_initialize(void)
/*
* Calculate the required size of the htab. We want the number of
* PTEGs to equal one half the number of real pages.
- */
+ */
htab_size_bytes = htab_get_table_size();
pteg_count = htab_size_bytes >> 7;
@@ -892,7 +894,7 @@ static void __init htab_initialize(void)
firmware_has_feature(FW_FEATURE_PS3_LV1)) {
/* Using a hypervisor which owns the htab */
htab_address = NULL;
- _SDR1 = 0;
+ _SDR1 = 0;
#ifdef CONFIG_FA_DUMP
/*
* If firmware assisted dump is active firmware preserves
@@ -958,9 +960,9 @@ static void __init htab_initialize(void)
#endif /* CONFIG_DEBUG_PAGEALLOC */
/* create bolted the linear mapping in the hash table */
- for_each_memblock(memory, reg) {
- base = (unsigned long)__va(reg->base);
- size = reg->size;
+ for_each_mem_range(i, &base, &end) {
+ size = end - base;
+ base = (unsigned long)__va(base);
DBG("creating mapping for region: %lx..%lx (prot: %lx)\n",
base, size, prot);
@@ -1115,8 +1117,10 @@ void hash__early_init_mmu_secondary(void)
&& cpu_has_feature(CPU_FTR_HVMODE))
tlbiel_all();
- if (IS_ENABLED(CONFIG_PPC_MEM_KEYS) && mmu_has_feature(MMU_FTR_PKEY))
+#ifdef CONFIG_PPC_MEM_KEYS
+ if (mmu_has_feature(MMU_FTR_PKEY))
mtspr(SPRN_UAMOR, default_uamor);
+#endif
}
#endif /* CONFIG_SMP */
diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c
index 28c784976bed..cc72666e891a 100644
--- a/arch/powerpc/mm/book3s64/radix_pgtable.c
+++ b/arch/powerpc/mm/book3s64/radix_pgtable.c
@@ -329,7 +329,8 @@ static int __meminit create_physical_mapping(unsigned long start,
static void __init radix_init_pgtable(void)
{
unsigned long rts_field;
- struct memblock_region *reg;
+ phys_addr_t start, end;
+ u64 i;
/* We don't support slb for radix */
mmu_slb_size = 0;
@@ -337,20 +338,19 @@ static void __init radix_init_pgtable(void)
/*
* Create the linear mapping
*/
- for_each_memblock(memory, reg) {
+ for_each_mem_range(i, &start, &end) {
/*
* The memblock allocator is up at this point, so the
* page tables will be allocated within the range. No
* need or a node (which we don't have yet).
*/
- if ((reg->base + reg->size) >= RADIX_VMALLOC_START) {
+ if (end >= RADIX_VMALLOC_START) {
pr_warn("Outside the supported range\n");
continue;
}
- WARN_ON(create_physical_mapping(reg->base,
- reg->base + reg->size,
+ WARN_ON(create_physical_mapping(start, end,
radix_mem_block_size,
-1, PAGE_KERNEL));
}
@@ -734,21 +734,6 @@ void radix__mmu_cleanup_all(void)
}
}
-void radix__setup_initial_memory_limit(phys_addr_t first_memblock_base,
- phys_addr_t first_memblock_size)
-{
- /*
- * We don't currently support the first MEMBLOCK not mapping 0
- * physical on those processors
- */
- BUG_ON(first_memblock_base != 0);
-
- /*
- * Radix mode is not limited by RMA / VRMA addressing.
- */
- ppc64_rma_size = ULONG_MAX;
-}
-
#ifdef CONFIG_MEMORY_HOTPLUG
static void free_pte_table(pte_t *pte_start, pmd_t *pmd)
{
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index 02e127fa5777..8459056cce67 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -433,9 +433,16 @@ void __init mmu_early_init_devtree(void)
if (!(mfmsr() & MSR_HV))
early_check_vec5();
- if (early_radix_enabled())
+ if (early_radix_enabled()) {
radix__early_init_devtree();
- else
+ /*
+ * We have finalized the translation we are going to use by now.
+ * Radix mode is not limited by RMA / VRMA addressing.
+ * Hence don't limit memblock allocations.
+ */
+ ppc64_rma_size = ULONG_MAX;
+ memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE);
+ } else
hash__early_init_devtree();
}
#endif /* CONFIG_PPC_BOOK3S_64 */
diff --git a/arch/powerpc/mm/kasan/kasan_init_32.c b/arch/powerpc/mm/kasan/kasan_init_32.c
index fb294046e00e..26fda3203320 100644
--- a/arch/powerpc/mm/kasan/kasan_init_32.c
+++ b/arch/powerpc/mm/kasan/kasan_init_32.c
@@ -138,11 +138,11 @@ void __init kasan_mmu_init(void)
void __init kasan_init(void)
{
- struct memblock_region *reg;
+ phys_addr_t base, end;
+ u64 i;
- for_each_memblock(memory, reg) {
- phys_addr_t base = reg->base;
- phys_addr_t top = min(base + reg->size, total_lowmem);
+ for_each_mem_range(i, &base, &end) {
+ phys_addr_t top = min(end, total_lowmem);
int ret;
if (base >= top)
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index 42e25874f5a8..5e2e7c0a8f1a 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -184,15 +184,16 @@ void __init initmem_init(void)
/* mark pages that don't exist as nosave */
static int __init mark_nonram_nosave(void)
{
- struct memblock_region *reg, *prev = NULL;
-
- for_each_memblock(memory, reg) {
- if (prev &&
- memblock_region_memory_end_pfn(prev) < memblock_region_memory_base_pfn(reg))
- register_nosave_region(memblock_region_memory_end_pfn(prev),
- memblock_region_memory_base_pfn(reg));
- prev = reg;
+ unsigned long spfn, epfn, prev = 0;
+ int i;
+
+ for_each_mem_pfn_range(i, MAX_NUMNODES, &spfn, &epfn, NULL) {
+ if (prev && prev < spfn)
+ register_nosave_region(prev, spfn);
+
+ prev = epfn;
}
+
return 0;
}
#else /* CONFIG_NEED_MULTIPLE_NODES */
@@ -584,20 +585,24 @@ void flush_icache_user_page(struct vm_area_struct *vma, struct page *page,
*/
static int __init add_system_ram_resources(void)
{
- struct memblock_region *reg;
+ phys_addr_t start, end;
+ u64 i;
- for_each_memblock(memory, reg) {
+ for_each_mem_range(i, &start, &end) {
struct resource *res;
- unsigned long base = reg->base;
- unsigned long size = reg->size;
res = kzalloc(sizeof(struct resource), GFP_KERNEL);
WARN_ON(!res);
if (res) {
res->name = "System RAM";
- res->start = base;
- res->end = base + size - 1;
+ res->start = start;
+ /*
+ * In memblock, end points to the first byte after
+ * the range while in resourses, end points to the
+ * last byte in the range.
+ */
+ res->end = end - 1;
res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
WARN_ON(request_resource(&iomem_resource, res) < 0);
}
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index 1f61fa2148b5..f4e20d8e6c02 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -804,17 +804,14 @@ static void __init setup_nonnuma(void)
unsigned long total_ram = memblock_phys_mem_size();
unsigned long start_pfn, end_pfn;
unsigned int nid = 0;
- struct memblock_region *reg;
+ int i;
printk(KERN_DEBUG "Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
top_of_ram, total_ram);
printk(KERN_DEBUG "Memory hole size: %ldMB\n",
(top_of_ram - total_ram) >> 20);
- for_each_memblock(memory, reg) {
- start_pfn = memblock_region_memory_base_pfn(reg);
- end_pfn = memblock_region_memory_end_pfn(reg);
-
+ for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, NULL) {
fake_numa_create_new_node(end_pfn, &nid);
memblock_set_node(PFN_PHYS(start_pfn),
PFN_PHYS(end_pfn - start_pfn),
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
index 6eb4eab79385..079159e97bca 100644
--- a/arch/powerpc/mm/pgtable_32.c
+++ b/arch/powerpc/mm/pgtable_32.c
@@ -123,11 +123,11 @@ static void __init __mapin_ram_chunk(unsigned long offset, unsigned long top)
void __init mapin_ram(void)
{
- struct memblock_region *reg;
+ phys_addr_t base, end;
+ u64 i;
- for_each_memblock(memory, reg) {
- phys_addr_t base = reg->base;
- phys_addr_t top = min(base + reg->size, total_lowmem);
+ for_each_mem_range(i, &base, &end) {
+ phys_addr_t top = min(end, total_lowmem);
if (base >= top)
continue;
diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c
index 16d09b36fe06..e809cb5a1631 100644
--- a/arch/powerpc/net/bpf_jit_comp.c
+++ b/arch/powerpc/net/bpf_jit_comp.c
@@ -475,7 +475,6 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
case BPF_JMP | BPF_JSET | BPF_K:
case BPF_JMP | BPF_JSET | BPF_X:
true_cond = COND_NE;
- /* Fall through */
cond_branch:
/* same targets, can avoid doing the test :) */
if (filter[i].jt == filter[i].jf) {
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index 78fe34986594..08643cba1494 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -1557,9 +1557,16 @@ nocheck:
ret = 0;
out:
if (has_branch_stack(event)) {
- power_pmu_bhrb_enable(event);
- cpuhw->bhrb_filter = ppmu->bhrb_filter_map(
- event->attr.branch_sample_type);
+ u64 bhrb_filter = -1;
+
+ if (ppmu->bhrb_filter_map)
+ bhrb_filter = ppmu->bhrb_filter_map(
+ event->attr.branch_sample_type);
+
+ if (bhrb_filter != -1) {
+ cpuhw->bhrb_filter = bhrb_filter;
+ power_pmu_bhrb_enable(event);
+ }
}
perf_pmu_enable(event->pmu);
@@ -1881,7 +1888,6 @@ static int power_pmu_event_init(struct perf_event *event)
int n;
int err;
struct cpu_hw_events *cpuhw;
- u64 bhrb_filter;
if (!ppmu)
return -ENOENT;
@@ -1987,7 +1993,10 @@ static int power_pmu_event_init(struct perf_event *event)
err = power_check_constraints(cpuhw, events, cflags, n + 1);
if (has_branch_stack(event)) {
- bhrb_filter = ppmu->bhrb_filter_map(
+ u64 bhrb_filter = -1;
+
+ if (ppmu->bhrb_filter_map)
+ bhrb_filter = ppmu->bhrb_filter_map(
event->attr.branch_sample_type);
if (bhrb_filter == -1) {
@@ -2141,6 +2150,10 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
if (perf_event_overflow(event, &data, regs))
power_pmu_stop(event, 0);
+ } else if (period) {
+ /* Account for interrupt in case of invalid SIAR */
+ if (perf_event_account_interrupt(event))
+ power_pmu_stop(event, 0);
}
}
@@ -2323,6 +2336,7 @@ int register_power_pmu(struct power_pmu *pmu)
pmu->name);
power_pmu.attr_groups = ppmu->attr_groups;
+ power_pmu.capabilities |= (ppmu->capabilities & PERF_PMU_CAP_EXTENDED_REGS);
#ifdef MSR_HV
/*
diff --git a/arch/powerpc/perf/hv-24x7.c b/arch/powerpc/perf/hv-24x7.c
index cdb7bfbd157e..6e7e820508df 100644
--- a/arch/powerpc/perf/hv-24x7.c
+++ b/arch/powerpc/perf/hv-24x7.c
@@ -1128,6 +1128,15 @@ static struct bin_attribute *if_bin_attrs[] = {
NULL,
};
+static struct attribute *cpumask_attrs[] = {
+ &dev_attr_cpumask.attr,
+ NULL,
+};
+
+static struct attribute_group cpumask_attr_group = {
+ .attrs = cpumask_attrs,
+};
+
static struct attribute *if_attrs[] = {
&dev_attr_catalog_len.attr,
&dev_attr_catalog_version.attr,
@@ -1135,7 +1144,6 @@ static struct attribute *if_attrs[] = {
&dev_attr_sockets.attr,
&dev_attr_chipspersocket.attr,
&dev_attr_coresperchip.attr,
- &dev_attr_cpumask.attr,
NULL,
};
@@ -1151,6 +1159,7 @@ static const struct attribute_group *attr_groups[] = {
&event_desc_group,
&event_long_desc_group,
&if_group,
+ &cpumask_attr_group,
NULL,
};
diff --git a/arch/powerpc/perf/imc-pmu.c b/arch/powerpc/perf/imc-pmu.c
index a45d694a5d5d..62d0b54086f8 100644
--- a/arch/powerpc/perf/imc-pmu.c
+++ b/arch/powerpc/perf/imc-pmu.c
@@ -1289,7 +1289,7 @@ static int trace_imc_prepare_sample(struct trace_imc_data *mem,
header->misc = 0;
if (cpu_has_feature(CPU_FTR_ARCH_31)) {
- switch (IMC_TRACE_RECORD_VAL_HVPR(mem->val)) {
+ switch (IMC_TRACE_RECORD_VAL_HVPR(be64_to_cpu(READ_ONCE(mem->val)))) {
case 0:/* when MSR HV and PR not set in the trace-record */
header->misc |= PERF_RECORD_MISC_GUEST_KERNEL;
break;
@@ -1297,7 +1297,7 @@ static int trace_imc_prepare_sample(struct trace_imc_data *mem,
header->misc |= PERF_RECORD_MISC_GUEST_USER;
break;
case 2: /* MSR HV is 1 and PR is 0 */
- header->misc |= PERF_RECORD_MISC_HYPERVISOR;
+ header->misc |= PERF_RECORD_MISC_KERNEL;
break;
case 3: /* MSR HV is 1 and PR is 1 */
header->misc |= PERF_RECORD_MISC_USER;
diff --git a/arch/powerpc/perf/perf_regs.c b/arch/powerpc/perf/perf_regs.c
index a213a0aa5d25..8e53f2fc3fe0 100644
--- a/arch/powerpc/perf/perf_regs.c
+++ b/arch/powerpc/perf/perf_regs.c
@@ -13,9 +13,11 @@
#include <asm/ptrace.h>
#include <asm/perf_regs.h>
+u64 PERF_REG_EXTENDED_MASK;
+
#define PT_REGS_OFFSET(id, r) [id] = offsetof(struct pt_regs, r)
-#define REG_RESERVED (~((1ULL << PERF_REG_POWERPC_MAX) - 1))
+#define REG_RESERVED (~(PERF_REG_EXTENDED_MASK | PERF_REG_PMU_MASK))
static unsigned int pt_regs_offset[PERF_REG_POWERPC_MAX] = {
PT_REGS_OFFSET(PERF_REG_POWERPC_R0, gpr[0]),
@@ -69,10 +71,36 @@ static unsigned int pt_regs_offset[PERF_REG_POWERPC_MAX] = {
PT_REGS_OFFSET(PERF_REG_POWERPC_MMCRA, dsisr),
};
+/* Function to return the extended register values */
+static u64 get_ext_regs_value(int idx)
+{
+ switch (idx) {
+ case PERF_REG_POWERPC_MMCR0:
+ return mfspr(SPRN_MMCR0);
+ case PERF_REG_POWERPC_MMCR1:
+ return mfspr(SPRN_MMCR1);
+ case PERF_REG_POWERPC_MMCR2:
+ return mfspr(SPRN_MMCR2);
+#ifdef CONFIG_PPC64
+ case PERF_REG_POWERPC_MMCR3:
+ return mfspr(SPRN_MMCR3);
+ case PERF_REG_POWERPC_SIER2:
+ return mfspr(SPRN_SIER2);
+ case PERF_REG_POWERPC_SIER3:
+ return mfspr(SPRN_SIER3);
+#endif
+ default: return 0;
+ }
+}
+
u64 perf_reg_value(struct pt_regs *regs, int idx)
{
- if (WARN_ON_ONCE(idx >= PERF_REG_POWERPC_MAX))
- return 0;
+ u64 perf_reg_extended_max = PERF_REG_POWERPC_MAX;
+
+ if (cpu_has_feature(CPU_FTR_ARCH_31))
+ perf_reg_extended_max = PERF_REG_MAX_ISA_31;
+ else if (cpu_has_feature(CPU_FTR_ARCH_300))
+ perf_reg_extended_max = PERF_REG_MAX_ISA_300;
if (idx == PERF_REG_POWERPC_SIER &&
(IS_ENABLED(CONFIG_FSL_EMB_PERF_EVENT) ||
@@ -85,6 +113,16 @@ u64 perf_reg_value(struct pt_regs *regs, int idx)
IS_ENABLED(CONFIG_PPC32)))
return 0;
+ if (idx >= PERF_REG_POWERPC_MAX && idx < perf_reg_extended_max)
+ return get_ext_regs_value(idx);
+
+ /*
+ * If the idx is referring to value beyond the
+ * supported registers, return 0 with a warning
+ */
+ if (WARN_ON_ONCE(idx >= perf_reg_extended_max))
+ return 0;
+
return regs_get_register(regs, pt_regs_offset[idx]);
}
diff --git a/arch/powerpc/perf/power10-pmu.c b/arch/powerpc/perf/power10-pmu.c
index f7cff7f36a1c..83148656b524 100644
--- a/arch/powerpc/perf/power10-pmu.c
+++ b/arch/powerpc/perf/power10-pmu.c
@@ -87,6 +87,8 @@
#define POWER10_MMCRA_IFM3 0x00000000C0000000UL
#define POWER10_MMCRA_BHRB_MASK 0x00000000C0000000UL
+extern u64 PERF_REG_EXTENDED_MASK;
+
/* Table of alternatives, sorted by column 0 */
static const unsigned int power10_event_alternatives[][MAX_ALT] = {
{ PM_RUN_CYC_ALT, PM_RUN_CYC },
@@ -397,6 +399,7 @@ static struct power_pmu power10_pmu = {
.cache_events = &power10_cache_events,
.attr_groups = power10_pmu_attr_groups,
.bhrb_nr = 32,
+ .capabilities = PERF_PMU_CAP_EXTENDED_REGS,
};
int init_power10_pmu(void)
@@ -408,6 +411,9 @@ int init_power10_pmu(void)
strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power10"))
return -ENODEV;
+ /* Set the PERF_REG_EXTENDED_MASK here */
+ PERF_REG_EXTENDED_MASK = PERF_REG_PMU_MASK_31;
+
rc = register_power_pmu(&power10_pmu);
if (rc)
return rc;
diff --git a/arch/powerpc/perf/power9-pmu.c b/arch/powerpc/perf/power9-pmu.c
index 05dae38b969a..2a57e93a79dc 100644
--- a/arch/powerpc/perf/power9-pmu.c
+++ b/arch/powerpc/perf/power9-pmu.c
@@ -90,6 +90,8 @@ enum {
#define POWER9_MMCRA_IFM3 0x00000000C0000000UL
#define POWER9_MMCRA_BHRB_MASK 0x00000000C0000000UL
+extern u64 PERF_REG_EXTENDED_MASK;
+
/* Nasty Power9 specific hack */
#define PVR_POWER9_CUMULUS 0x00002000
@@ -434,6 +436,7 @@ static struct power_pmu power9_pmu = {
.cache_events = &power9_cache_events,
.attr_groups = power9_pmu_attr_groups,
.bhrb_nr = 32,
+ .capabilities = PERF_PMU_CAP_EXTENDED_REGS,
};
int init_power9_pmu(void)
@@ -457,6 +460,9 @@ int init_power9_pmu(void)
}
}
+ /* Set the PERF_REG_EXTENDED_MASK here */
+ PERF_REG_EXTENDED_MASK = PERF_REG_PMU_MASK_300;
+
rc = register_power_pmu(&power9_pmu);
if (rc)
return rc;
diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype
index 87737ec86d39..1dc9d3c81872 100644
--- a/arch/powerpc/platforms/Kconfig.cputype
+++ b/arch/powerpc/platforms/Kconfig.cputype
@@ -36,7 +36,7 @@ config PPC_BOOK3S_6xx
select PPC_HAVE_PMU_SUPPORT
select PPC_HAVE_KUEP
select PPC_HAVE_KUAP
- select HAVE_ARCH_VMAP_STACK
+ select HAVE_ARCH_VMAP_STACK if !ADB_PMU
config PPC_BOOK3S_601
bool "PowerPC 601"
diff --git a/arch/powerpc/platforms/powernv/idle.c b/arch/powerpc/platforms/powernv/idle.c
index 77513a80cef9..345ab062b21a 100644
--- a/arch/powerpc/platforms/powernv/idle.c
+++ b/arch/powerpc/platforms/powernv/idle.c
@@ -1223,7 +1223,7 @@ static void __init pnv_probe_idle_states(void)
return;
}
- if (pvr_version_is(PVR_POWER9))
+ if (cpu_has_feature(CPU_FTR_ARCH_300))
pnv_power9_idle_init();
for (i = 0; i < nr_pnv_idle_states; i++)
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index c9c25fb0783c..023a4f987bb2 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -2705,7 +2705,7 @@ void pnv_pci_ioda2_release_pe_dma(struct pnv_ioda_pe *pe)
struct iommu_table *tbl = pe->table_group.tables[0];
int64_t rc;
- if (pe->dma_setup_done)
+ if (!pe->dma_setup_done)
return;
rc = pnv_pci_ioda2_unset_window(&pe->table_group, 0);
diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c
index c6e0d8abf75e..7a974ed6b240 100644
--- a/arch/powerpc/platforms/pseries/hotplug-cpu.c
+++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c
@@ -107,22 +107,28 @@ static int pseries_cpu_disable(void)
*/
static void pseries_cpu_die(unsigned int cpu)
{
- int tries;
int cpu_status = 1;
unsigned int pcpu = get_hard_smp_processor_id(cpu);
+ unsigned long timeout = jiffies + msecs_to_jiffies(120000);
- for (tries = 0; tries < 25; tries++) {
+ while (true) {
cpu_status = smp_query_cpu_stopped(pcpu);
if (cpu_status == QCSS_STOPPED ||
cpu_status == QCSS_HARDWARE_ERROR)
break;
- cpu_relax();
+ if (time_after(jiffies, timeout)) {
+ pr_warn("CPU %i (hwid %i) didn't die after 120 seconds\n",
+ cpu, pcpu);
+ timeout = jiffies + msecs_to_jiffies(120000);
+ }
+
+ cond_resched();
}
- if (cpu_status != 0) {
- printk("Querying DEAD? cpu %i (%i) shows %i\n",
- cpu, pcpu, cpu_status);
+ if (cpu_status == QCSS_HARDWARE_ERROR) {
+ pr_warn("CPU %i (hwid %i) reported error while dying\n",
+ cpu, pcpu);
}
/* Isolation and deallocation are definitely done by
diff --git a/arch/powerpc/platforms/pseries/papr_scm.c b/arch/powerpc/platforms/pseries/papr_scm.c
index f439f0dfea7d..a88a707a608a 100644
--- a/arch/powerpc/platforms/pseries/papr_scm.c
+++ b/arch/powerpc/platforms/pseries/papr_scm.c
@@ -822,7 +822,7 @@ free_stats:
kfree(stats);
return rc ? rc : seq_buf_used(&s);
}
-DEVICE_ATTR_RO(perf_stats);
+DEVICE_ATTR_ADMIN_RO(perf_stats);
static ssize_t flags_show(struct device *dev,
struct device_attribute *attr, char *buf)
diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c
index f3736fcd98fc..13c86a292c6d 100644
--- a/arch/powerpc/platforms/pseries/ras.c
+++ b/arch/powerpc/platforms/pseries/ras.c
@@ -184,7 +184,6 @@ static void handle_system_shutdown(char event_modifier)
case EPOW_SHUTDOWN_ON_UPS:
pr_emerg("Loss of system power detected. System is running on"
" UPS/battery. Check RTAS error log for details\n");
- orderly_poweroff(true);
break;
case EPOW_SHUTDOWN_LOSS_OF_CRITICAL_FUNCTIONS:
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
index 7b5905529146..b7821ac36d28 100644
--- a/arch/riscv/Kconfig
+++ b/arch/riscv/Kconfig
@@ -32,6 +32,7 @@ config RISCV
select ARCH_WANT_FRAME_POINTERS
select ARCH_WANT_HUGE_PMD_SHARE if 64BIT
select CLONE_BACKWARDS
+ select CLINT_TIMER if !MMU
select COMMON_CLK
select EDAC_SUPPORT
select GENERIC_ARCH_TOPOLOGY if SMP
@@ -81,7 +82,7 @@ config RISCV
select PCI_DOMAINS_GENERIC if PCI
select PCI_MSI if PCI
select RISCV_INTC
- select RISCV_TIMER
+ select RISCV_TIMER if RISCV_SBI
select SPARSEMEM_STATIC if 32BIT
select SPARSE_IRQ
select SYSCTL_EXCEPTION_TRACE
@@ -333,19 +334,6 @@ menu "Kernel features"
source "kernel/Kconfig.hz"
-config SECCOMP
- bool "Enable seccomp to safely compute untrusted bytecode"
- help
- This kernel feature is useful for number crunching applications
- that may need to compute untrusted bytecode during their
- execution. By using pipes or other transports made available to
- the process as file descriptors supporting the read/write
- syscalls, it's possible to isolate those applications in
- their own address space using seccomp. Once seccomp is
- enabled via prctl(PR_SET_SECCOMP), it cannot be disabled
- and the task is only allowed to execute a few safe syscalls
- defined by each seccomp mode.
-
config RISCV_SBI_V01
bool "SBI v0.1 support"
default y
diff --git a/arch/riscv/Kconfig.socs b/arch/riscv/Kconfig.socs
index 6c88148f1b9b..8a55f6156661 100644
--- a/arch/riscv/Kconfig.socs
+++ b/arch/riscv/Kconfig.socs
@@ -12,6 +12,7 @@ config SOC_SIFIVE
config SOC_VIRT
bool "QEMU Virt Machine"
+ select CLINT_TIMER if RISCV_M_MODE
select POWER_RESET
select POWER_RESET_SYSCON
select POWER_RESET_SYSCON_POWEROFF
@@ -24,6 +25,7 @@ config SOC_VIRT
config SOC_KENDRYTE
bool "Kendryte K210 SoC"
depends on !MMU
+ select CLINT_TIMER if RISCV_M_MODE
select SERIAL_SIFIVE if TTY
select SERIAL_SIFIVE_CONSOLE if TTY
select SIFIVE_PLIC
diff --git a/arch/riscv/boot/dts/kendryte/k210.dtsi b/arch/riscv/boot/dts/kendryte/k210.dtsi
index c1df56ccb8d5..d2d0ff645632 100644
--- a/arch/riscv/boot/dts/kendryte/k210.dtsi
+++ b/arch/riscv/boot/dts/kendryte/k210.dtsi
@@ -95,10 +95,12 @@
#clock-cells = <1>;
};
- clint0: interrupt-controller@2000000 {
+ clint0: clint@2000000 {
+ #interrupt-cells = <1>;
compatible = "riscv,clint0";
reg = <0x2000000 0xC000>;
- interrupts-extended = <&cpu0_intc 3>, <&cpu1_intc 3>;
+ interrupts-extended = <&cpu0_intc 3 &cpu0_intc 7
+ &cpu1_intc 3 &cpu1_intc 7>;
clocks = <&sysctl K210_CLK_ACLK>;
};
diff --git a/arch/riscv/configs/nommu_virt_defconfig b/arch/riscv/configs/nommu_virt_defconfig
index f27596e9663e..e046a0babde4 100644
--- a/arch/riscv/configs/nommu_virt_defconfig
+++ b/arch/riscv/configs/nommu_virt_defconfig
@@ -26,6 +26,7 @@ CONFIG_EXPERT=y
CONFIG_SLOB=y
# CONFIG_SLAB_MERGE_DEFAULT is not set
# CONFIG_MMU is not set
+CONFIG_SOC_VIRT=y
CONFIG_MAXPHYSMEM_2GB=y
CONFIG_SMP=y
CONFIG_CMDLINE="root=/dev/vda rw earlycon=uart8250,mmio,0x10000000,115200n8 console=ttyS0"
@@ -49,7 +50,6 @@ CONFIG_VIRTIO_BLK=y
# CONFIG_SERIO is not set
# CONFIG_LEGACY_PTYS is not set
# CONFIG_LDISC_AUTOLOAD is not set
-# CONFIG_DEVMEM is not set
CONFIG_SERIAL_8250=y
# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set
CONFIG_SERIAL_8250_CONSOLE=y
@@ -57,16 +57,13 @@ CONFIG_SERIAL_8250_NR_UARTS=1
CONFIG_SERIAL_8250_RUNTIME_UARTS=1
CONFIG_SERIAL_OF_PLATFORM=y
# CONFIG_HW_RANDOM is not set
+# CONFIG_DEVMEM is not set
# CONFIG_HWMON is not set
-# CONFIG_LCD_CLASS_DEVICE is not set
-# CONFIG_BACKLIGHT_CLASS_DEVICE is not set
# CONFIG_VGA_CONSOLE is not set
# CONFIG_HID is not set
# CONFIG_USB_SUPPORT is not set
CONFIG_VIRTIO_MMIO=y
CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES=y
-CONFIG_SIFIVE_PLIC=y
-# CONFIG_VALIDATE_FS_PARSER is not set
CONFIG_EXT2_FS=y
# CONFIG_DNOTIFY is not set
# CONFIG_INOTIFY_USER is not set
diff --git a/arch/riscv/configs/rv32_defconfig b/arch/riscv/configs/rv32_defconfig
index 3a55f0e00d6c..2c2cda6cc1c5 100644
--- a/arch/riscv/configs/rv32_defconfig
+++ b/arch/riscv/configs/rv32_defconfig
@@ -14,6 +14,7 @@ CONFIG_CHECKPOINT_RESTORE=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_EXPERT=y
CONFIG_BPF_SYSCALL=y
+CONFIG_SOC_SIFIVE=y
CONFIG_SOC_VIRT=y
CONFIG_ARCH_RV32I=y
CONFIG_SMP=y
@@ -62,6 +63,8 @@ CONFIG_HVC_RISCV_SBI=y
CONFIG_VIRTIO_CONSOLE=y
CONFIG_HW_RANDOM=y
CONFIG_HW_RANDOM_VIRTIO=y
+CONFIG_SPI=y
+CONFIG_SPI_SIFIVE=y
# CONFIG_PTP_1588_CLOCK is not set
CONFIG_POWER_RESET=y
CONFIG_DRM=y
@@ -77,6 +80,8 @@ CONFIG_USB_OHCI_HCD=y
CONFIG_USB_OHCI_HCD_PLATFORM=y
CONFIG_USB_STORAGE=y
CONFIG_USB_UAS=y
+CONFIG_MMC=y
+CONFIG_MMC_SPI=y
CONFIG_RTC_CLASS=y
CONFIG_VIRTIO_PCI=y
CONFIG_VIRTIO_BALLOON=y
diff --git a/arch/riscv/include/asm/clint.h b/arch/riscv/include/asm/clint.h
index a279b17a6aad..0789fd37b40a 100644
--- a/arch/riscv/include/asm/clint.h
+++ b/arch/riscv/include/asm/clint.h
@@ -1,39 +1,26 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020 Google, Inc
+ */
+
#ifndef _ASM_RISCV_CLINT_H
-#define _ASM_RISCV_CLINT_H 1
+#define _ASM_RISCV_CLINT_H
-#include <linux/io.h>
-#include <linux/smp.h>
+#include <linux/types.h>
+#include <asm/mmio.h>
#ifdef CONFIG_RISCV_M_MODE
-extern u32 __iomem *clint_ipi_base;
-
-void clint_init_boot_cpu(void);
-
-static inline void clint_send_ipi_single(unsigned long hartid)
-{
- writel(1, clint_ipi_base + hartid);
-}
-
-static inline void clint_send_ipi_mask(const struct cpumask *mask)
-{
- int cpu;
-
- for_each_cpu(cpu, mask)
- clint_send_ipi_single(cpuid_to_hartid_map(cpu));
-}
-
-static inline void clint_clear_ipi(unsigned long hartid)
-{
- writel(0, clint_ipi_base + hartid);
-}
-#else /* CONFIG_RISCV_M_MODE */
-#define clint_init_boot_cpu() do { } while (0)
-
-/* stubs to for code is only reachable under IS_ENABLED(CONFIG_RISCV_M_MODE): */
-void clint_send_ipi_single(unsigned long hartid);
-void clint_send_ipi_mask(const struct cpumask *hartid_mask);
-void clint_clear_ipi(unsigned long hartid);
-#endif /* CONFIG_RISCV_M_MODE */
-
-#endif /* _ASM_RISCV_CLINT_H */
+/*
+ * This lives in the CLINT driver, but is accessed directly by timex.h to avoid
+ * any overhead when accessing the MMIO timer.
+ *
+ * The ISA defines mtime as a 64-bit memory-mapped register that increments at
+ * a constant frequency, but it doesn't define some other constraints we depend
+ * on (most notably ordering constraints, but also some simpler stuff like the
+ * memory layout). Thus, this is called "clint_time_val" instead of something
+ * like "riscv_mtime", to signify that these non-ISA assumptions must hold.
+ */
+extern u64 __iomem *clint_time_val;
+#endif
+
+#endif
diff --git a/arch/riscv/include/asm/ftrace.h b/arch/riscv/include/asm/ftrace.h
index ace8a6e2d11d..845002cc2e57 100644
--- a/arch/riscv/include/asm/ftrace.h
+++ b/arch/riscv/include/asm/ftrace.h
@@ -66,6 +66,13 @@ do { \
* Let auipc+jalr be the basic *mcount unit*, so we make it 8 bytes here.
*/
#define MCOUNT_INSN_SIZE 8
+
+#ifndef __ASSEMBLY__
+struct dyn_ftrace;
+int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec);
+#define ftrace_init_nop ftrace_init_nop
+#endif
+
#endif
#endif /* _ASM_RISCV_FTRACE_H */
diff --git a/arch/riscv/include/asm/smp.h b/arch/riscv/include/asm/smp.h
index 6dfd2a1446d5..df1f7c4cd433 100644
--- a/arch/riscv/include/asm/smp.h
+++ b/arch/riscv/include/asm/smp.h
@@ -15,6 +15,11 @@
struct seq_file;
extern unsigned long boot_cpu_hartid;
+struct riscv_ipi_ops {
+ void (*ipi_inject)(const struct cpumask *target);
+ void (*ipi_clear)(void);
+};
+
#ifdef CONFIG_SMP
/*
* Mapping between linux logical cpu index and hartid.
@@ -40,6 +45,12 @@ void arch_send_call_function_single_ipi(int cpu);
int riscv_hartid_to_cpuid(int hartid);
void riscv_cpuid_to_hartid_mask(const struct cpumask *in, struct cpumask *out);
+/* Set custom IPI operations */
+void riscv_set_ipi_ops(struct riscv_ipi_ops *ops);
+
+/* Clear IPI for current CPU */
+void riscv_clear_ipi(void);
+
/* Secondary hart entry */
asmlinkage void smp_callin(void);
@@ -81,6 +92,14 @@ static inline void riscv_cpuid_to_hartid_mask(const struct cpumask *in,
cpumask_set_cpu(boot_cpu_hartid, out);
}
+static inline void riscv_set_ipi_ops(struct riscv_ipi_ops *ops)
+{
+}
+
+static inline void riscv_clear_ipi(void)
+{
+}
+
#endif /* CONFIG_SMP */
#if defined(CONFIG_HOTPLUG_CPU) && (CONFIG_SMP)
diff --git a/arch/riscv/include/asm/stackprotector.h b/arch/riscv/include/asm/stackprotector.h
index d95f7b2a7f37..5962f8891f06 100644
--- a/arch/riscv/include/asm/stackprotector.h
+++ b/arch/riscv/include/asm/stackprotector.h
@@ -5,7 +5,6 @@
#include <linux/random.h>
#include <linux/version.h>
-#include <asm/timex.h>
extern unsigned long __stack_chk_guard;
@@ -18,12 +17,9 @@ extern unsigned long __stack_chk_guard;
static __always_inline void boot_init_stack_canary(void)
{
unsigned long canary;
- unsigned long tsc;
/* Try to get a semi random initial value. */
get_random_bytes(&canary, sizeof(canary));
- tsc = get_cycles();
- canary += tsc + (tsc << BITS_PER_LONG/2);
canary ^= LINUX_VERSION_CODE;
canary &= CANARY_MASK;
diff --git a/arch/riscv/include/asm/timex.h b/arch/riscv/include/asm/timex.h
index bad2a7c2cda5..ab104905d4db 100644
--- a/arch/riscv/include/asm/timex.h
+++ b/arch/riscv/include/asm/timex.h
@@ -7,41 +7,65 @@
#define _ASM_RISCV_TIMEX_H
#include <asm/csr.h>
-#include <asm/mmio.h>
typedef unsigned long cycles_t;
-extern u64 __iomem *riscv_time_val;
-extern u64 __iomem *riscv_time_cmp;
+#ifdef CONFIG_RISCV_M_MODE
-#ifdef CONFIG_64BIT
-#define mmio_get_cycles() readq_relaxed(riscv_time_val)
-#else
-#define mmio_get_cycles() readl_relaxed(riscv_time_val)
-#define mmio_get_cycles_hi() readl_relaxed(((u32 *)riscv_time_val) + 1)
-#endif
+#include <asm/clint.h>
+#ifdef CONFIG_64BIT
static inline cycles_t get_cycles(void)
{
- if (IS_ENABLED(CONFIG_RISCV_SBI))
- return csr_read(CSR_TIME);
- return mmio_get_cycles();
+ return readq_relaxed(clint_time_val);
+}
+#else /* !CONFIG_64BIT */
+static inline u32 get_cycles(void)
+{
+ return readl_relaxed(((u32 *)clint_time_val));
}
#define get_cycles get_cycles
-#ifdef CONFIG_64BIT
-static inline u64 get_cycles64(void)
+static inline u32 get_cycles_hi(void)
{
+ return readl_relaxed(((u32 *)clint_time_val) + 1);
+}
+#define get_cycles_hi get_cycles_hi
+#endif /* CONFIG_64BIT */
+
+/*
+ * Much like MIPS, we may not have a viable counter to use at an early point
+ * in the boot process. Unfortunately we don't have a fallback, so instead
+ * we just return 0.
+ */
+static inline unsigned long random_get_entropy(void)
+{
+ if (unlikely(clint_time_val == NULL))
+ return 0;
return get_cycles();
}
-#else /* CONFIG_64BIT */
+#define random_get_entropy() random_get_entropy()
+
+#else /* CONFIG_RISCV_M_MODE */
+
+static inline cycles_t get_cycles(void)
+{
+ return csr_read(CSR_TIME);
+}
+#define get_cycles get_cycles
+
static inline u32 get_cycles_hi(void)
{
- if (IS_ENABLED(CONFIG_RISCV_SBI))
- return csr_read(CSR_TIMEH);
- return mmio_get_cycles_hi();
+ return csr_read(CSR_TIMEH);
}
+#define get_cycles_hi get_cycles_hi
+#ifdef CONFIG_64BIT
+static inline u64 get_cycles64(void)
+{
+ return get_cycles();
+}
+#else /* CONFIG_64BIT */
static inline u64 get_cycles64(void)
{
u32 hi, lo;
@@ -55,6 +79,8 @@ static inline u64 get_cycles64(void)
}
#endif /* CONFIG_64BIT */
+#endif /* !CONFIG_RISCV_M_MODE */
+
#define ARCH_HAS_READ_CURRENT_TIMER
static inline int read_current_timer(unsigned long *timer_val)
{
diff --git a/arch/riscv/kernel/Makefile b/arch/riscv/kernel/Makefile
index a5287ab9f7f2..dc93710f0b2f 100644
--- a/arch/riscv/kernel/Makefile
+++ b/arch/riscv/kernel/Makefile
@@ -31,7 +31,7 @@ obj-y += cacheinfo.o
obj-y += patch.o
obj-$(CONFIG_MMU) += vdso.o vdso/
-obj-$(CONFIG_RISCV_M_MODE) += clint.o traps_misaligned.o
+obj-$(CONFIG_RISCV_M_MODE) += traps_misaligned.o
obj-$(CONFIG_FPU) += fpu.o
obj-$(CONFIG_SMP) += smpboot.o
obj-$(CONFIG_SMP) += smp.o
diff --git a/arch/riscv/kernel/clint.c b/arch/riscv/kernel/clint.c
deleted file mode 100644
index 3647980d14c3..000000000000
--- a/arch/riscv/kernel/clint.c
+++ /dev/null
@@ -1,44 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (c) 2019 Christoph Hellwig.
- */
-
-#include <linux/io.h>
-#include <linux/of_address.h>
-#include <linux/types.h>
-#include <asm/clint.h>
-#include <asm/csr.h>
-#include <asm/timex.h>
-#include <asm/smp.h>
-
-/*
- * This is the layout used by the SiFive clint, which is also shared by the qemu
- * virt platform, and the Kendryte KD210 at least.
- */
-#define CLINT_IPI_OFF 0
-#define CLINT_TIME_CMP_OFF 0x4000
-#define CLINT_TIME_VAL_OFF 0xbff8
-
-u32 __iomem *clint_ipi_base;
-
-void clint_init_boot_cpu(void)
-{
- struct device_node *np;
- void __iomem *base;
-
- np = of_find_compatible_node(NULL, NULL, "riscv,clint0");
- if (!np) {
- panic("clint not found");
- return;
- }
-
- base = of_iomap(np, 0);
- if (!base)
- panic("could not map CLINT");
-
- clint_ipi_base = base + CLINT_IPI_OFF;
- riscv_time_cmp = base + CLINT_TIME_CMP_OFF;
- riscv_time_val = base + CLINT_TIME_VAL_OFF;
-
- clint_clear_ipi(boot_cpu_hartid);
-}
diff --git a/arch/riscv/kernel/ftrace.c b/arch/riscv/kernel/ftrace.c
index 2ff63d0cbb50..99e12faa5498 100644
--- a/arch/riscv/kernel/ftrace.c
+++ b/arch/riscv/kernel/ftrace.c
@@ -97,6 +97,25 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
return __ftrace_modify_call(rec->ip, addr, false);
}
+
+/*
+ * This is called early on, and isn't wrapped by
+ * ftrace_arch_code_modify_{prepare,post_process}() and therefor doesn't hold
+ * text_mutex, which triggers a lockdep failure. SMP isn't running so we could
+ * just directly poke the text, but it's simpler to just take the lock
+ * ourselves.
+ */
+int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
+{
+ int out;
+
+ ftrace_arch_code_modify_prepare();
+ out = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
+ ftrace_arch_code_modify_post_process();
+
+ return out;
+}
+
int ftrace_update_ftrace_func(ftrace_func_t func)
{
int ret = __ftrace_modify_call((unsigned long)&ftrace_call,
diff --git a/arch/riscv/kernel/sbi.c b/arch/riscv/kernel/sbi.c
index f383ef5672b2..226ccce0f9e0 100644
--- a/arch/riscv/kernel/sbi.c
+++ b/arch/riscv/kernel/sbi.c
@@ -547,6 +547,18 @@ static inline long sbi_get_firmware_version(void)
return __sbi_base_ecall(SBI_EXT_BASE_GET_IMP_VERSION);
}
+static void sbi_send_cpumask_ipi(const struct cpumask *target)
+{
+ struct cpumask hartid_mask;
+
+ riscv_cpuid_to_hartid_mask(target, &hartid_mask);
+
+ sbi_send_ipi(cpumask_bits(&hartid_mask));
+}
+
+static struct riscv_ipi_ops sbi_ipi_ops = {
+ .ipi_inject = sbi_send_cpumask_ipi
+};
int __init sbi_init(void)
{
@@ -587,5 +599,7 @@ int __init sbi_init(void)
__sbi_rfence = __sbi_rfence_v01;
}
+ riscv_set_ipi_ops(&sbi_ipi_ops);
+
return 0;
}
diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c
index f04373be54a6..2c6dd329312b 100644
--- a/arch/riscv/kernel/setup.c
+++ b/arch/riscv/kernel/setup.c
@@ -18,7 +18,6 @@
#include <linux/swiotlb.h>
#include <linux/smp.h>
-#include <asm/clint.h>
#include <asm/cpu_ops.h>
#include <asm/setup.h>
#include <asm/sections.h>
@@ -79,7 +78,6 @@ void __init setup_arch(char **cmdline_p)
#else
unflatten_device_tree();
#endif
- clint_init_boot_cpu();
#ifdef CONFIG_SWIOTLB
swiotlb_init(1);
diff --git a/arch/riscv/kernel/signal.c b/arch/riscv/kernel/signal.c
index 17ba190e84a5..e996e08f1061 100644
--- a/arch/riscv/kernel/signal.c
+++ b/arch/riscv/kernel/signal.c
@@ -250,7 +250,7 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
regs->a0 = -EINTR;
break;
}
- /* fallthrough */
+ fallthrough;
case -ERESTARTNOINTR:
regs->a0 = regs->orig_a0;
regs->epc -= 0x4;
diff --git a/arch/riscv/kernel/smp.c b/arch/riscv/kernel/smp.c
index 554b0fb47060..ea028d9e0d24 100644
--- a/arch/riscv/kernel/smp.c
+++ b/arch/riscv/kernel/smp.c
@@ -18,7 +18,6 @@
#include <linux/delay.h>
#include <linux/irq_work.h>
-#include <asm/clint.h>
#include <asm/sbi.h>
#include <asm/tlbflush.h>
#include <asm/cacheflush.h>
@@ -86,9 +85,25 @@ static void ipi_stop(void)
wait_for_interrupt();
}
+static struct riscv_ipi_ops *ipi_ops;
+
+void riscv_set_ipi_ops(struct riscv_ipi_ops *ops)
+{
+ ipi_ops = ops;
+}
+EXPORT_SYMBOL_GPL(riscv_set_ipi_ops);
+
+void riscv_clear_ipi(void)
+{
+ if (ipi_ops && ipi_ops->ipi_clear)
+ ipi_ops->ipi_clear();
+
+ csr_clear(CSR_IP, IE_SIE);
+}
+EXPORT_SYMBOL_GPL(riscv_clear_ipi);
+
static void send_ipi_mask(const struct cpumask *mask, enum ipi_message_type op)
{
- struct cpumask hartid_mask;
int cpu;
smp_mb__before_atomic();
@@ -96,33 +111,22 @@ static void send_ipi_mask(const struct cpumask *mask, enum ipi_message_type op)
set_bit(op, &ipi_data[cpu].bits);
smp_mb__after_atomic();
- riscv_cpuid_to_hartid_mask(mask, &hartid_mask);
- if (IS_ENABLED(CONFIG_RISCV_SBI))
- sbi_send_ipi(cpumask_bits(&hartid_mask));
+ if (ipi_ops && ipi_ops->ipi_inject)
+ ipi_ops->ipi_inject(mask);
else
- clint_send_ipi_mask(mask);
+ pr_warn("SMP: IPI inject method not available\n");
}
static void send_ipi_single(int cpu, enum ipi_message_type op)
{
- int hartid = cpuid_to_hartid_map(cpu);
-
smp_mb__before_atomic();
set_bit(op, &ipi_data[cpu].bits);
smp_mb__after_atomic();
- if (IS_ENABLED(CONFIG_RISCV_SBI))
- sbi_send_ipi(cpumask_bits(cpumask_of(hartid)));
- else
- clint_send_ipi_single(hartid);
-}
-
-static inline void clear_ipi(void)
-{
- if (IS_ENABLED(CONFIG_RISCV_SBI))
- csr_clear(CSR_IP, IE_SIE);
+ if (ipi_ops && ipi_ops->ipi_inject)
+ ipi_ops->ipi_inject(cpumask_of(cpu));
else
- clint_clear_ipi(cpuid_to_hartid_map(smp_processor_id()));
+ pr_warn("SMP: IPI inject method not available\n");
}
#ifdef CONFIG_IRQ_WORK
@@ -140,7 +144,7 @@ void handle_IPI(struct pt_regs *regs)
irq_enter();
- clear_ipi();
+ riscv_clear_ipi();
while (true) {
unsigned long ops;
diff --git a/arch/riscv/kernel/smpboot.c b/arch/riscv/kernel/smpboot.c
index 356825a57551..96167d55ed98 100644
--- a/arch/riscv/kernel/smpboot.c
+++ b/arch/riscv/kernel/smpboot.c
@@ -24,7 +24,6 @@
#include <linux/of.h>
#include <linux/sched/task_stack.h>
#include <linux/sched/mm.h>
-#include <asm/clint.h>
#include <asm/cpu_ops.h>
#include <asm/irq.h>
#include <asm/mmu_context.h>
@@ -147,8 +146,7 @@ asmlinkage __visible void smp_callin(void)
struct mm_struct *mm = &init_mm;
unsigned int curr_cpuid = smp_processor_id();
- if (!IS_ENABLED(CONFIG_RISCV_SBI))
- clint_clear_ipi(cpuid_to_hartid_map(smp_processor_id()));
+ riscv_clear_ipi();
/* All kernel threads share the same mm context. */
mmgrab(mm);
diff --git a/arch/riscv/kernel/vmlinux.lds.S b/arch/riscv/kernel/vmlinux.lds.S
index f3586e31ed1e..67db80e12d1f 100644
--- a/arch/riscv/kernel/vmlinux.lds.S
+++ b/arch/riscv/kernel/vmlinux.lds.S
@@ -22,13 +22,11 @@ SECTIONS
/* Beginning of code and text segment */
. = LOAD_OFFSET;
_start = .;
- _stext = .;
HEAD_TEXT_SECTION
. = ALIGN(PAGE_SIZE);
__init_begin = .;
INIT_TEXT_SECTION(PAGE_SIZE)
- INIT_DATA_SECTION(16)
. = ALIGN(8);
__soc_early_init_table : {
__soc_early_init_table_start = .;
@@ -55,6 +53,7 @@ SECTIONS
. = ALIGN(SECTION_ALIGN);
.text : {
_text = .;
+ _stext = .;
TEXT_TEXT
SCHED_TEXT
CPUIDLE_TEXT
@@ -67,6 +66,8 @@ SECTIONS
_etext = .;
}
+ INIT_DATA_SECTION(16)
+
/* Start of data section */
_sdata = .;
RO_DATA(SECTION_ALIGN)
@@ -97,6 +98,7 @@ SECTIONS
STABS_DEBUG
DWARF_DEBUG
+ ELF_DETAILS
DISCARDS
}
diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
index 787c75f751a5..1dc89303b679 100644
--- a/arch/riscv/mm/init.c
+++ b/arch/riscv/mm/init.c
@@ -145,21 +145,21 @@ static phys_addr_t dtb_early_pa __initdata;
void __init setup_bootmem(void)
{
- struct memblock_region *reg;
phys_addr_t mem_size = 0;
phys_addr_t total_mem = 0;
- phys_addr_t mem_start, end = 0;
+ phys_addr_t mem_start, start, end = 0;
phys_addr_t vmlinux_end = __pa_symbol(&_end);
phys_addr_t vmlinux_start = __pa_symbol(&_start);
+ u64 i;
/* Find the memory region containing the kernel */
- for_each_memblock(memory, reg) {
- end = reg->base + reg->size;
+ for_each_mem_range(i, &start, &end) {
+ phys_addr_t size = end - start;
if (!total_mem)
- mem_start = reg->base;
- if (reg->base <= vmlinux_start && vmlinux_end <= end)
- BUG_ON(reg->size == 0);
- total_mem = total_mem + reg->size;
+ mem_start = start;
+ if (start <= vmlinux_start && vmlinux_end <= end)
+ BUG_ON(size == 0);
+ total_mem = total_mem + size;
}
/*
@@ -191,15 +191,6 @@ void __init setup_bootmem(void)
early_init_fdt_scan_reserved_mem();
memblock_allow_resize();
memblock_dump_all();
-
- for_each_memblock(memory, reg) {
- unsigned long start_pfn = memblock_region_memory_base_pfn(reg);
- unsigned long end_pfn = memblock_region_memory_end_pfn(reg);
-
- memblock_set_node(PFN_PHYS(start_pfn),
- PFN_PHYS(end_pfn - start_pfn),
- &memblock.memory, 0);
- }
}
#ifdef CONFIG_MMU
@@ -226,12 +217,11 @@ void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot)
ptep = &fixmap_pte[pte_index(addr)];
- if (pgprot_val(prot)) {
+ if (pgprot_val(prot))
set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, prot));
- } else {
+ else
pte_clear(&init_mm, addr, ptep);
- local_flush_tlb_page(addr);
- }
+ local_flush_tlb_page(addr);
}
static pte_t *__init get_pte_virt(phys_addr_t pa)
@@ -465,7 +455,7 @@ static void __init setup_vm_final(void)
{
uintptr_t va, map_size;
phys_addr_t pa, start, end;
- struct memblock_region *reg;
+ u64 i;
/* Set mmu_enabled flag */
mmu_enabled = true;
@@ -476,14 +466,9 @@ static void __init setup_vm_final(void)
PGDIR_SIZE, PAGE_TABLE);
/* Map all memory banks */
- for_each_memblock(memory, reg) {
- start = reg->base;
- end = start + reg->size;
-
+ for_each_mem_range(i, &start, &end) {
if (start >= end)
break;
- if (memblock_is_nomap(reg))
- continue;
if (start <= __pa(PAGE_OFFSET) &&
__pa(PAGE_OFFSET) < end)
start = __pa(PAGE_OFFSET);
@@ -516,6 +501,7 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
#else
dtb_early_va = (void *)dtb_pa;
#endif
+ dtb_early_pa = dtb_pa;
}
static inline void setup_vm_final(void)
@@ -545,7 +531,7 @@ static void __init resource_init(void)
{
struct memblock_region *region;
- for_each_memblock(memory, region) {
+ for_each_mem_region(region) {
struct resource *res;
res = memblock_alloc(sizeof(struct resource), SMP_CACHE_BYTES);
diff --git a/arch/riscv/mm/kasan_init.c b/arch/riscv/mm/kasan_init.c
index 87b4ab3d3c77..12ddd1f6bf70 100644
--- a/arch/riscv/mm/kasan_init.c
+++ b/arch/riscv/mm/kasan_init.c
@@ -85,16 +85,16 @@ static void __init populate(void *start, void *end)
void __init kasan_init(void)
{
- struct memblock_region *reg;
- unsigned long i;
+ phys_addr_t _start, _end;
+ u64 i;
kasan_populate_early_shadow((void *)KASAN_SHADOW_START,
(void *)kasan_mem_to_shadow((void *)
VMALLOC_END));
- for_each_memblock(memory, reg) {
- void *start = (void *)__va(reg->base);
- void *end = (void *)__va(reg->base + reg->size);
+ for_each_mem_range(i, &_start, &_end) {
+ void *start = (void *)_start;
+ void *end = (void *)_end;
if (start >= end)
break;
diff --git a/arch/riscv/net/bpf_jit_comp32.c b/arch/riscv/net/bpf_jit_comp32.c
index bc5f2204693f..579575f9cdae 100644
--- a/arch/riscv/net/bpf_jit_comp32.c
+++ b/arch/riscv/net/bpf_jit_comp32.c
@@ -1020,7 +1020,7 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
emit_zext64(dst, ctx);
break;
}
- /* Fallthrough. */
+ fallthrough;
case BPF_ALU | BPF_ADD | BPF_X:
case BPF_ALU | BPF_SUB | BPF_X:
@@ -1079,7 +1079,7 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
case 16:
emit(rv_slli(lo(rd), lo(rd), 16), ctx);
emit(rv_srli(lo(rd), lo(rd), 16), ctx);
- /* Fallthrough. */
+ fallthrough;
case 32:
if (!ctx->prog->aux->verifier_zext)
emit(rv_addi(hi(rd), RV_REG_ZERO, 0), ctx);
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 3d86e12e8e3c..d509bf23ef78 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -30,7 +30,7 @@ config GENERIC_BUG_RELATIVE_POINTERS
def_bool y
config GENERIC_LOCKBREAK
- def_bool y if PREEMPTTION
+ def_bool y if PREEMPTION
config PGSTE
def_bool y if KVM
@@ -185,6 +185,7 @@ config S390
select OLD_SIGSUSPEND3
select PCI_DOMAINS if PCI
select PCI_MSI if PCI
+ select PCI_MSI_ARCH_FALLBACKS if PCI_MSI
select SPARSE_IRQ
select SYSCTL_EXCEPTION_TRACE
select THREAD_INFO_IN_TASK
@@ -791,23 +792,6 @@ config CRASH_DUMP
endmenu
-config SECCOMP
- def_bool y
- prompt "Enable seccomp to safely compute untrusted bytecode"
- depends on PROC_FS
- help
- This kernel feature is useful for number crunching applications
- that may need to compute untrusted bytecode during their
- execution. By using pipes or other transports made available to
- the process as file descriptors supporting the read/write
- syscalls, it's possible to isolate those applications in
- their own address space using seccomp. Once seccomp is
- enabled via /proc/<pid>/seccomp, it cannot be disabled
- and the task is only allowed to execute a few safe syscalls
- defined by each seccomp mode.
-
- If unsure, say Y.
-
config CCW
def_bool y
diff --git a/arch/s390/configs/debug_defconfig b/arch/s390/configs/debug_defconfig
index 0cf9a82326a8..7228aabe9da6 100644
--- a/arch/s390/configs/debug_defconfig
+++ b/arch/s390/configs/debug_defconfig
@@ -626,6 +626,7 @@ CONFIG_NTFS_RW=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_TMPFS_INODE64=y
CONFIG_HUGETLBFS=y
CONFIG_CONFIGFS_FS=m
CONFIG_ECRYPT_FS=m
@@ -807,6 +808,7 @@ CONFIG_DEBUG_NOTIFIERS=y
CONFIG_BUG_ON_DATA_CORRUPTION=y
CONFIG_DEBUG_CREDENTIALS=y
CONFIG_RCU_TORTURE_TEST=m
+CONFIG_RCU_REF_SCALE_TEST=m
CONFIG_RCU_CPU_STALL_TIMEOUT=300
# CONFIG_RCU_TRACE is not set
CONFIG_LATENCYTOP=y
@@ -818,6 +820,7 @@ CONFIG_PREEMPT_TRACER=y
CONFIG_SCHED_TRACER=y
CONFIG_FTRACE_SYSCALLS=y
CONFIG_BLK_DEV_IO_TRACE=y
+CONFIG_BPF_KPROBE_OVERRIDE=y
CONFIG_HIST_TRIGGERS=y
CONFIG_S390_PTDUMP=y
CONFIG_NOTIFIER_ERROR_INJECTION=m
@@ -829,6 +832,7 @@ CONFIG_FAIL_MAKE_REQUEST=y
CONFIG_FAIL_IO_TIMEOUT=y
CONFIG_FAIL_FUTEX=y
CONFIG_FAULT_INJECTION_DEBUG_FS=y
+CONFIG_FAIL_FUNCTION=y
CONFIG_FAULT_INJECTION_STACKTRACE_FILTER=y
CONFIG_LKDTM=m
CONFIG_TEST_LIST_SORT=y
diff --git a/arch/s390/configs/defconfig b/arch/s390/configs/defconfig
index 5df9759e8ff6..fab03b7a6932 100644
--- a/arch/s390/configs/defconfig
+++ b/arch/s390/configs/defconfig
@@ -617,6 +617,7 @@ CONFIG_NTFS_RW=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_TMPFS_INODE64=y
CONFIG_HUGETLBFS=y
CONFIG_CONFIGFS_FS=m
CONFIG_ECRYPT_FS=m
@@ -763,6 +764,7 @@ CONFIG_PANIC_ON_OOPS=y
CONFIG_TEST_LOCKUP=m
CONFIG_BUG_ON_DATA_CORRUPTION=y
CONFIG_RCU_TORTURE_TEST=m
+CONFIG_RCU_REF_SCALE_TEST=m
CONFIG_RCU_CPU_STALL_TIMEOUT=60
CONFIG_LATENCYTOP=y
CONFIG_BOOTTIME_TRACING=y
@@ -771,6 +773,7 @@ CONFIG_STACK_TRACER=y
CONFIG_SCHED_TRACER=y
CONFIG_FTRACE_SYSCALLS=y
CONFIG_BLK_DEV_IO_TRACE=y
+CONFIG_BPF_KPROBE_OVERRIDE=y
CONFIG_HIST_TRIGGERS=y
CONFIG_S390_PTDUMP=y
CONFIG_LKDTM=m
diff --git a/arch/s390/configs/zfcpdump_defconfig b/arch/s390/configs/zfcpdump_defconfig
index 4091c50449cd..8f67c55625f9 100644
--- a/arch/s390/configs/zfcpdump_defconfig
+++ b/arch/s390/configs/zfcpdump_defconfig
@@ -74,5 +74,6 @@ CONFIG_DEBUG_KERNEL=y
CONFIG_PANIC_ON_OOPS=y
# CONFIG_SCHED_DEBUG is not set
CONFIG_RCU_CPU_STALL_TIMEOUT=60
+# CONFIG_RCU_TRACE is not set
# CONFIG_FTRACE is not set
# CONFIG_RUNTIME_TESTING_MENU is not set
diff --git a/arch/s390/include/asm/checksum.h b/arch/s390/include/asm/checksum.h
index 6d01c96aeb5c..6813bfa1eeb7 100644
--- a/arch/s390/include/asm/checksum.h
+++ b/arch/s390/include/asm/checksum.h
@@ -39,13 +39,6 @@ csum_partial(const void *buff, int len, __wsum sum)
return sum;
}
-static inline __wsum
-csum_partial_copy_nocheck (const void *src, void *dst, int len, __wsum sum)
-{
- memcpy(dst,src,len);
- return csum_partial(dst, len, sum);
-}
-
/*
* Fold a partial checksum without adding pseudo headers
*/
diff --git a/arch/s390/include/asm/compat.h b/arch/s390/include/asm/compat.h
index 9547cd5d6cdc..ea5b9c34b7be 100644
--- a/arch/s390/include/asm/compat.h
+++ b/arch/s390/include/asm/compat.h
@@ -63,8 +63,6 @@ typedef u16 compat_nlink_t;
typedef u16 compat_ipc_pid_t;
typedef u32 compat_caddr_t;
typedef __kernel_fsid_t compat_fsid_t;
-typedef s64 compat_s64;
-typedef u64 compat_u64;
typedef struct {
u32 mask;
diff --git a/arch/s390/include/asm/percpu.h b/arch/s390/include/asm/percpu.h
index 50b4ce8cddfd..918f0ba4f4d2 100644
--- a/arch/s390/include/asm/percpu.h
+++ b/arch/s390/include/asm/percpu.h
@@ -29,7 +29,7 @@
typedef typeof(pcp) pcp_op_T__; \
pcp_op_T__ old__, new__, prev__; \
pcp_op_T__ *ptr__; \
- preempt_disable(); \
+ preempt_disable_notrace(); \
ptr__ = raw_cpu_ptr(&(pcp)); \
prev__ = *ptr__; \
do { \
@@ -37,7 +37,7 @@
new__ = old__ op (val); \
prev__ = cmpxchg(ptr__, old__, new__); \
} while (prev__ != old__); \
- preempt_enable(); \
+ preempt_enable_notrace(); \
new__; \
})
@@ -68,7 +68,7 @@
typedef typeof(pcp) pcp_op_T__; \
pcp_op_T__ val__ = (val); \
pcp_op_T__ old__, *ptr__; \
- preempt_disable(); \
+ preempt_disable_notrace(); \
ptr__ = raw_cpu_ptr(&(pcp)); \
if (__builtin_constant_p(val__) && \
((szcast)val__ > -129) && ((szcast)val__ < 128)) { \
@@ -84,7 +84,7 @@
: [val__] "d" (val__) \
: "cc"); \
} \
- preempt_enable(); \
+ preempt_enable_notrace(); \
}
#define this_cpu_add_4(pcp, val) arch_this_cpu_add(pcp, val, "laa", "asi", int)
@@ -95,14 +95,14 @@
typedef typeof(pcp) pcp_op_T__; \
pcp_op_T__ val__ = (val); \
pcp_op_T__ old__, *ptr__; \
- preempt_disable(); \
+ preempt_disable_notrace(); \
ptr__ = raw_cpu_ptr(&(pcp)); \
asm volatile( \
op " %[old__],%[val__],%[ptr__]\n" \
: [old__] "=d" (old__), [ptr__] "+Q" (*ptr__) \
: [val__] "d" (val__) \
: "cc"); \
- preempt_enable(); \
+ preempt_enable_notrace(); \
old__ + val__; \
})
@@ -114,14 +114,14 @@
typedef typeof(pcp) pcp_op_T__; \
pcp_op_T__ val__ = (val); \
pcp_op_T__ old__, *ptr__; \
- preempt_disable(); \
+ preempt_disable_notrace(); \
ptr__ = raw_cpu_ptr(&(pcp)); \
asm volatile( \
op " %[old__],%[val__],%[ptr__]\n" \
: [old__] "=d" (old__), [ptr__] "+Q" (*ptr__) \
: [val__] "d" (val__) \
: "cc"); \
- preempt_enable(); \
+ preempt_enable_notrace(); \
}
#define this_cpu_and_4(pcp, val) arch_this_cpu_to_op(pcp, val, "lan")
@@ -136,10 +136,10 @@
typedef typeof(pcp) pcp_op_T__; \
pcp_op_T__ ret__; \
pcp_op_T__ *ptr__; \
- preempt_disable(); \
+ preempt_disable_notrace(); \
ptr__ = raw_cpu_ptr(&(pcp)); \
ret__ = cmpxchg(ptr__, oval, nval); \
- preempt_enable(); \
+ preempt_enable_notrace(); \
ret__; \
})
@@ -152,10 +152,10 @@
({ \
typeof(pcp) *ptr__; \
typeof(pcp) ret__; \
- preempt_disable(); \
+ preempt_disable_notrace(); \
ptr__ = raw_cpu_ptr(&(pcp)); \
ret__ = xchg(ptr__, nval); \
- preempt_enable(); \
+ preempt_enable_notrace(); \
ret__; \
})
@@ -171,11 +171,11 @@
typeof(pcp1) *p1__; \
typeof(pcp2) *p2__; \
int ret__; \
- preempt_disable(); \
+ preempt_disable_notrace(); \
p1__ = raw_cpu_ptr(&(pcp1)); \
p2__ = raw_cpu_ptr(&(pcp2)); \
ret__ = __cmpxchg_double(p1__, p2__, o1__, o2__, n1__, n2__); \
- preempt_enable(); \
+ preempt_enable_notrace(); \
ret__; \
})
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 7eb01a5459cd..b55561cc8786 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -1260,26 +1260,44 @@ static inline pgd_t *pgd_offset_raw(pgd_t *pgd, unsigned long address)
#define pgd_offset(mm, address) pgd_offset_raw(READ_ONCE((mm)->pgd), address)
-static inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address)
+static inline p4d_t *p4d_offset_lockless(pgd_t *pgdp, pgd_t pgd, unsigned long address)
{
- if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R1)
- return (p4d_t *) pgd_deref(*pgd) + p4d_index(address);
- return (p4d_t *) pgd;
+ if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R1)
+ return (p4d_t *) pgd_deref(pgd) + p4d_index(address);
+ return (p4d_t *) pgdp;
}
+#define p4d_offset_lockless p4d_offset_lockless
-static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address)
+static inline p4d_t *p4d_offset(pgd_t *pgdp, unsigned long address)
{
- if ((p4d_val(*p4d) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R2)
- return (pud_t *) p4d_deref(*p4d) + pud_index(address);
- return (pud_t *) p4d;
+ return p4d_offset_lockless(pgdp, *pgdp, address);
+}
+
+static inline pud_t *pud_offset_lockless(p4d_t *p4dp, p4d_t p4d, unsigned long address)
+{
+ if ((p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R2)
+ return (pud_t *) p4d_deref(p4d) + pud_index(address);
+ return (pud_t *) p4dp;
+}
+#define pud_offset_lockless pud_offset_lockless
+
+static inline pud_t *pud_offset(p4d_t *p4dp, unsigned long address)
+{
+ return pud_offset_lockless(p4dp, *p4dp, address);
}
#define pud_offset pud_offset
-static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
+static inline pmd_t *pmd_offset_lockless(pud_t *pudp, pud_t pud, unsigned long address)
+{
+ if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R3)
+ return (pmd_t *) pud_deref(pud) + pmd_index(address);
+ return (pmd_t *) pudp;
+}
+#define pmd_offset_lockless pmd_offset_lockless
+
+static inline pmd_t *pmd_offset(pud_t *pudp, unsigned long address)
{
- if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R3)
- return (pmd_t *) pud_deref(*pud) + pmd_index(address);
- return (pmd_t *) pud;
+ return pmd_offset_lockless(pudp, *pudp, address);
}
#define pmd_offset pmd_offset
diff --git a/arch/s390/kernel/entry.h b/arch/s390/kernel/entry.h
index faca269d5f27..a44ddc2f2dec 100644
--- a/arch/s390/kernel/entry.h
+++ b/arch/s390/kernel/entry.h
@@ -26,6 +26,7 @@ void do_protection_exception(struct pt_regs *regs);
void do_dat_exception(struct pt_regs *regs);
void do_secure_storage_access(struct pt_regs *regs);
void do_non_secure_storage_access(struct pt_regs *regs);
+void do_secure_storage_violation(struct pt_regs *regs);
void addressing_exception(struct pt_regs *regs);
void data_exception(struct pt_regs *regs);
diff --git a/arch/s390/kernel/idle.c b/arch/s390/kernel/idle.c
index 88bb42ca5008..f7f1e64e0d98 100644
--- a/arch/s390/kernel/idle.c
+++ b/arch/s390/kernel/idle.c
@@ -33,21 +33,19 @@ void enabled_wait(void)
PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK;
clear_cpu_flag(CIF_NOHZ_DELAY);
- trace_cpu_idle_rcuidle(1, smp_processor_id());
local_irq_save(flags);
/* Call the assembler magic in entry.S */
psw_idle(idle, psw_mask);
local_irq_restore(flags);
- trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
/* Account time spent with enabled wait psw loaded as idle time. */
- write_seqcount_begin(&idle->seqcount);
+ raw_write_seqcount_begin(&idle->seqcount);
idle_time = idle->clock_idle_exit - idle->clock_idle_enter;
idle->clock_idle_enter = idle->clock_idle_exit = 0ULL;
idle->idle_time += idle_time;
idle->idle_count++;
account_idle_time(cputime_to_nsecs(idle_time));
- write_seqcount_end(&idle->seqcount);
+ raw_write_seqcount_end(&idle->seqcount);
}
NOKPROBE_SYMBOL(enabled_wait);
diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c
index d2a71d872638..fc30e799bd84 100644
--- a/arch/s390/kernel/kprobes.c
+++ b/arch/s390/kernel/kprobes.c
@@ -228,6 +228,7 @@ NOKPROBE_SYMBOL(pop_kprobe);
void arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs)
{
ri->ret_addr = (kprobe_opcode_t *) regs->gprs[14];
+ ri->fp = NULL;
/* Replace the return addr with trampoline addr */
regs->gprs[14] = (unsigned long) &kretprobe_trampoline;
@@ -331,83 +332,7 @@ static void __used kretprobe_trampoline_holder(void)
*/
static int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
{
- struct kretprobe_instance *ri;
- struct hlist_head *head, empty_rp;
- struct hlist_node *tmp;
- unsigned long flags, orig_ret_address;
- unsigned long trampoline_address;
- kprobe_opcode_t *correct_ret_addr;
-
- INIT_HLIST_HEAD(&empty_rp);
- kretprobe_hash_lock(current, &head, &flags);
-
- /*
- * It is possible to have multiple instances associated with a given
- * task either because an multiple functions in the call path
- * have a return probe installed on them, and/or more than one return
- * return probe was registered for a target function.
- *
- * We can handle this because:
- * - instances are always inserted at the head of the list
- * - when multiple return probes are registered for the same
- * function, the first instance's ret_addr will point to the
- * real return address, and all the rest will point to
- * kretprobe_trampoline
- */
- ri = NULL;
- orig_ret_address = 0;
- correct_ret_addr = NULL;
- trampoline_address = (unsigned long) &kretprobe_trampoline;
- hlist_for_each_entry_safe(ri, tmp, head, hlist) {
- if (ri->task != current)
- /* another task is sharing our hash bucket */
- continue;
-
- orig_ret_address = (unsigned long) ri->ret_addr;
-
- if (orig_ret_address != trampoline_address)
- /*
- * This is the real return address. Any other
- * instances associated with this task are for
- * other calls deeper on the call stack
- */
- break;
- }
-
- kretprobe_assert(ri, orig_ret_address, trampoline_address);
-
- correct_ret_addr = ri->ret_addr;
- hlist_for_each_entry_safe(ri, tmp, head, hlist) {
- if (ri->task != current)
- /* another task is sharing our hash bucket */
- continue;
-
- orig_ret_address = (unsigned long) ri->ret_addr;
-
- if (ri->rp && ri->rp->handler) {
- ri->ret_addr = correct_ret_addr;
- ri->rp->handler(ri, regs);
- }
-
- recycle_rp_inst(ri, &empty_rp);
-
- if (orig_ret_address != trampoline_address)
- /*
- * This is the real return address. Any other
- * instances associated with this task are for
- * other calls deeper on the call stack
- */
- break;
- }
-
- regs->psw.addr = orig_ret_address;
-
- kretprobe_hash_unlock(current, &flags);
-
- hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
- hlist_del(&ri->hlist);
- kfree(ri);
- }
+ regs->psw.addr = __kretprobe_trampoline_handler(regs, &kretprobe_trampoline, NULL);
/*
* By returning a non-zero value, we are telling
* kprobe_handler() that we don't want the post_handler
diff --git a/arch/s390/kernel/pgm_check.S b/arch/s390/kernel/pgm_check.S
index 2c27907a5ffc..9a92638360ee 100644
--- a/arch/s390/kernel/pgm_check.S
+++ b/arch/s390/kernel/pgm_check.S
@@ -80,7 +80,7 @@ PGM_CHECK(do_dat_exception) /* 3b */
PGM_CHECK_DEFAULT /* 3c */
PGM_CHECK(do_secure_storage_access) /* 3d */
PGM_CHECK(do_non_secure_storage_access) /* 3e */
-PGM_CHECK_DEFAULT /* 3f */
+PGM_CHECK(do_secure_storage_violation) /* 3f */
PGM_CHECK(monitor_event_exception) /* 40 */
PGM_CHECK_DEFAULT /* 41 */
PGM_CHECK_DEFAULT /* 42 */
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
index 11d2f7d05f91..a76dd27fb2e8 100644
--- a/arch/s390/kernel/ptrace.c
+++ b/arch/s390/kernel/ptrace.c
@@ -1268,7 +1268,6 @@ static bool is_ri_cb_valid(struct runtime_instr_cb *cb)
cb->pc == 1 &&
cb->qc == 0 &&
cb->reserved2 == 0 &&
- cb->key == PAGE_DEFAULT_KEY &&
cb->reserved3 == 0 &&
cb->reserved4 == 0 &&
cb->reserved5 == 0 &&
@@ -1330,7 +1329,11 @@ static int s390_runtime_instr_set(struct task_struct *target,
kfree(data);
return -EINVAL;
}
-
+ /*
+ * Override access key in any case, since user space should
+ * not be able to set it, nor should it care about it.
+ */
+ ri_cb.key = PAGE_DEFAULT_KEY >> 4;
preempt_disable();
if (!target->thread.ri_cb)
target->thread.ri_cb = data;
diff --git a/arch/s390/kernel/runtime_instr.c b/arch/s390/kernel/runtime_instr.c
index 125c7f6e8715..1788a5454b6f 100644
--- a/arch/s390/kernel/runtime_instr.c
+++ b/arch/s390/kernel/runtime_instr.c
@@ -57,7 +57,7 @@ static void init_runtime_instr_cb(struct runtime_instr_cb *cb)
cb->k = 1;
cb->ps = 1;
cb->pc = 1;
- cb->key = PAGE_DEFAULT_KEY;
+ cb->key = PAGE_DEFAULT_KEY >> 4;
cb->v = 1;
}
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index e600f6953d7c..d44e522c569b 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -484,8 +484,9 @@ static struct resource __initdata *standard_resources[] = {
static void __init setup_resources(void)
{
struct resource *res, *std_res, *sub_res;
- struct memblock_region *reg;
+ phys_addr_t start, end;
int j;
+ u64 i;
code_resource.start = (unsigned long) _text;
code_resource.end = (unsigned long) _etext - 1;
@@ -494,7 +495,7 @@ static void __init setup_resources(void)
bss_resource.start = (unsigned long) __bss_start;
bss_resource.end = (unsigned long) __bss_stop - 1;
- for_each_memblock(memory, reg) {
+ for_each_mem_range(i, &start, &end) {
res = memblock_alloc(sizeof(*res), 8);
if (!res)
panic("%s: Failed to allocate %zu bytes align=0x%x\n",
@@ -502,8 +503,13 @@ static void __init setup_resources(void)
res->flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM;
res->name = "System RAM";
- res->start = reg->base;
- res->end = reg->base + reg->size - 1;
+ res->start = start;
+ /*
+ * In memblock, end points to the first byte after the
+ * range while in resourses, end points to the last byte in
+ * the range.
+ */
+ res->end = end - 1;
request_resource(&iomem_resource, res);
for (j = 0; j < ARRAY_SIZE(standard_resources); j++) {
@@ -619,7 +625,7 @@ static struct notifier_block kdump_mem_nb = {
/*
* Make sure that the area behind memory_end is protected
*/
-static void reserve_memory_end(void)
+static void __init reserve_memory_end(void)
{
if (memory_end_set)
memblock_reserve(memory_end, ULONG_MAX);
@@ -628,7 +634,7 @@ static void reserve_memory_end(void)
/*
* Make sure that oldmem, where the dump is stored, is protected
*/
-static void reserve_oldmem(void)
+static void __init reserve_oldmem(void)
{
#ifdef CONFIG_CRASH_DUMP
if (OLDMEM_BASE)
@@ -640,7 +646,7 @@ static void reserve_oldmem(void)
/*
* Make sure that oldmem, where the dump is stored, is protected
*/
-static void remove_oldmem(void)
+static void __init remove_oldmem(void)
{
#ifdef CONFIG_CRASH_DUMP
if (OLDMEM_BASE)
@@ -776,8 +782,8 @@ static void __init memblock_add_mem_detect_info(void)
unsigned long start, end;
int i;
- memblock_dbg("physmem info source: %s (%hhd)\n",
- get_mem_info_source(), mem_detect.info_source);
+ pr_debug("physmem info source: %s (%hhd)\n",
+ get_mem_info_source(), mem_detect.info_source);
/* keep memblock lists close to the kernel */
memblock_set_bottom_up(true);
for_each_mem_detect_block(i, &start, &end) {
@@ -819,14 +825,15 @@ static void __init reserve_kernel(void)
static void __init setup_memory(void)
{
- struct memblock_region *reg;
+ phys_addr_t start, end;
+ u64 i;
/*
* Init storage key for present memory
*/
- for_each_memblock(memory, reg) {
- storage_key_init_range(reg->base, reg->base + reg->size);
- }
+ for_each_mem_range(i, &start, &end)
+ storage_key_init_range(start, end);
+
psw_set_key(PAGE_DEFAULT_KEY);
/* Only cosmetics */
diff --git a/arch/s390/kernel/stacktrace.c b/arch/s390/kernel/stacktrace.c
index fc5419ac64c8..7f1266c24f6b 100644
--- a/arch/s390/kernel/stacktrace.c
+++ b/arch/s390/kernel/stacktrace.c
@@ -19,7 +19,7 @@ void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
unwind_for_each_frame(&state, task, regs, 0) {
addr = unwind_get_return_address(&state);
- if (!addr || !consume_entry(cookie, addr, false))
+ if (!addr || !consume_entry(cookie, addr))
break;
}
}
@@ -56,7 +56,7 @@ int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry,
return -EINVAL;
#endif
- if (!consume_entry(cookie, addr, false))
+ if (!consume_entry(cookie, addr))
return -EINVAL;
}
diff --git a/arch/s390/kernel/syscalls/syscall.tbl b/arch/s390/kernel/syscalls/syscall.tbl
index 10456bc936fb..1c3b48165e86 100644
--- a/arch/s390/kernel/syscalls/syscall.tbl
+++ b/arch/s390/kernel/syscalls/syscall.tbl
@@ -26,7 +26,7 @@
16 32 lchown - sys_lchown16
19 common lseek sys_lseek compat_sys_lseek
20 common getpid sys_getpid sys_getpid
-21 common mount sys_mount compat_sys_mount
+21 common mount sys_mount sys_mount
22 common umount sys_oldumount sys_oldumount
23 32 setuid - sys_setuid16
24 32 getuid - sys_getuid16
@@ -134,8 +134,8 @@
142 64 select sys_select -
143 common flock sys_flock sys_flock
144 common msync sys_msync sys_msync
-145 common readv sys_readv compat_sys_readv
-146 common writev sys_writev compat_sys_writev
+145 common readv sys_readv sys_readv
+146 common writev sys_writev sys_writev
147 common getsid sys_getsid sys_getsid
148 common fdatasync sys_fdatasync sys_fdatasync
149 common _sysctl - -
@@ -316,7 +316,7 @@
306 common splice sys_splice sys_splice
307 common sync_file_range sys_sync_file_range compat_sys_s390_sync_file_range
308 common tee sys_tee sys_tee
-309 common vmsplice sys_vmsplice compat_sys_vmsplice
+309 common vmsplice sys_vmsplice sys_vmsplice
310 common move_pages sys_move_pages compat_sys_move_pages
311 common getcpu sys_getcpu sys_getcpu
312 common epoll_pwait sys_epoll_pwait compat_sys_epoll_pwait
@@ -347,8 +347,8 @@
337 common clock_adjtime sys_clock_adjtime sys_clock_adjtime32
338 common syncfs sys_syncfs sys_syncfs
339 common setns sys_setns sys_setns
-340 common process_vm_readv sys_process_vm_readv compat_sys_process_vm_readv
-341 common process_vm_writev sys_process_vm_writev compat_sys_process_vm_writev
+340 common process_vm_readv sys_process_vm_readv sys_process_vm_readv
+341 common process_vm_writev sys_process_vm_writev sys_process_vm_writev
342 common s390_runtime_instr sys_s390_runtime_instr sys_s390_runtime_instr
343 common kcmp sys_kcmp sys_kcmp
344 common finit_module sys_finit_module sys_finit_module
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
index 37695499717d..177ccfbda40a 100644
--- a/arch/s390/kernel/vmlinux.lds.S
+++ b/arch/s390/kernel/vmlinux.lds.S
@@ -181,6 +181,7 @@ SECTIONS
/* Debugging sections. */
STABS_DEBUG
DWARF_DEBUG
+ ELF_DETAILS
/* Sections to be discarded */
DISCARDS
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 4c8c063bce5b..996884dcc9fd 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -859,6 +859,21 @@ void do_non_secure_storage_access(struct pt_regs *regs)
}
NOKPROBE_SYMBOL(do_non_secure_storage_access);
+void do_secure_storage_violation(struct pt_regs *regs)
+{
+ /*
+ * Either KVM messed up the secure guest mapping or the same
+ * page is mapped into multiple secure guests.
+ *
+ * This exception is only triggered when a guest 2 is running
+ * and can therefore never occur in kernel context.
+ */
+ printk_ratelimited(KERN_WARNING
+ "Secure storage violation in task: %s, pid %d\n",
+ current->comm, current->pid);
+ send_sig(SIGSEGV, current, 0);
+}
+
#else
void do_secure_storage_access(struct pt_regs *regs)
{
@@ -869,4 +884,9 @@ void do_non_secure_storage_access(struct pt_regs *regs)
{
default_trap_handler(regs);
}
+
+void do_secure_storage_violation(struct pt_regs *regs)
+{
+ default_trap_handler(regs);
+}
#endif
diff --git a/arch/s390/mm/page-states.c b/arch/s390/mm/page-states.c
index fc141893d028..567c69f3069e 100644
--- a/arch/s390/mm/page-states.c
+++ b/arch/s390/mm/page-states.c
@@ -183,9 +183,9 @@ static void mark_kernel_pgd(void)
void __init cmma_init_nodat(void)
{
- struct memblock_region *reg;
struct page *page;
unsigned long start, end, ix;
+ int i;
if (cmma_flag < 2)
return;
@@ -193,9 +193,7 @@ void __init cmma_init_nodat(void)
mark_kernel_pgd();
/* Set all kernel pages not used for page tables to stable/no-dat */
- for_each_memblock(memory, reg) {
- start = memblock_region_memory_base_pfn(reg);
- end = memblock_region_memory_end_pfn(reg);
+ for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, NULL) {
page = pfn_to_page(start);
for (ix = start; ix < end; ix++, page++) {
if (__test_and_clear_bit(PG_arch_1, &page->flags))
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index 1aed1a4dfc2d..b239f2ba93b0 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -402,6 +402,7 @@ static int modify_p4d_table(pgd_t *pgd, unsigned long addr, unsigned long end,
pud = vmem_crst_alloc(_REGION3_ENTRY_EMPTY);
if (!pud)
goto out;
+ p4d_populate(&init_mm, p4d, pud);
}
ret = modify_pud_table(p4d, addr, next, add, direct);
if (ret)
@@ -554,10 +555,11 @@ int vmem_add_mapping(unsigned long start, unsigned long size)
*/
void __init vmem_map_init(void)
{
- struct memblock_region *reg;
+ phys_addr_t base, end;
+ u64 i;
- for_each_memblock(memory, reg)
- vmem_add_range(reg->base, reg->size);
+ for_each_mem_range(i, &base, &end)
+ vmem_add_range(base, end - base);
__set_memory((unsigned long)_stext,
(unsigned long)(_etext - _stext) >> PAGE_SHIFT,
SET_MEMORY_RO | SET_MEMORY_X);
diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
index 3902c9f6f2d6..1804230dd8d8 100644
--- a/arch/s390/pci/pci.c
+++ b/arch/s390/pci/pci.c
@@ -668,10 +668,27 @@ EXPORT_SYMBOL_GPL(zpci_enable_device);
int zpci_disable_device(struct zpci_dev *zdev)
{
zpci_dma_exit_device(zdev);
+ /*
+ * The zPCI function may already be disabled by the platform, this is
+ * detected in clp_disable_fh() which becomes a no-op.
+ */
return clp_disable_fh(zdev);
}
EXPORT_SYMBOL_GPL(zpci_disable_device);
+void zpci_remove_device(struct zpci_dev *zdev)
+{
+ struct zpci_bus *zbus = zdev->zbus;
+ struct pci_dev *pdev;
+
+ pdev = pci_get_slot(zbus->bus, zdev->devfn);
+ if (pdev) {
+ if (pdev->is_virtfn)
+ return zpci_remove_virtfn(pdev, zdev->vfn);
+ pci_stop_and_remove_bus_device_locked(pdev);
+ }
+}
+
int zpci_create_device(struct zpci_dev *zdev)
{
int rc;
@@ -716,13 +733,8 @@ void zpci_release_device(struct kref *kref)
{
struct zpci_dev *zdev = container_of(kref, struct zpci_dev, kref);
- if (zdev->zbus->bus) {
- struct pci_dev *pdev;
-
- pdev = pci_get_slot(zdev->zbus->bus, zdev->devfn);
- if (pdev)
- pci_stop_and_remove_bus_device_locked(pdev);
- }
+ if (zdev->zbus->bus)
+ zpci_remove_device(zdev);
switch (zdev->state) {
case ZPCI_FN_STATE_ONLINE:
diff --git a/arch/s390/pci/pci_bus.c b/arch/s390/pci/pci_bus.c
index 642a99384688..5967f3014156 100644
--- a/arch/s390/pci/pci_bus.c
+++ b/arch/s390/pci/pci_bus.c
@@ -132,13 +132,14 @@ static int zpci_bus_link_virtfn(struct pci_dev *pdev,
{
int rc;
- virtfn->physfn = pci_dev_get(pdev);
rc = pci_iov_sysfs_link(pdev, virtfn, vfid);
- if (rc) {
- pci_dev_put(pdev);
- virtfn->physfn = NULL;
+ if (rc)
return rc;
- }
+
+ virtfn->is_virtfn = 1;
+ virtfn->multifunction = 0;
+ virtfn->physfn = pci_dev_get(pdev);
+
return 0;
}
@@ -151,9 +152,9 @@ static int zpci_bus_setup_virtfn(struct zpci_bus *zbus,
int vfid = vfn - 1; /* Linux' vfid's start at 0 vfn at 1*/
int rc = 0;
- virtfn->is_virtfn = 1;
- virtfn->multifunction = 0;
- WARN_ON(vfid < 0);
+ if (!zbus->multifunction)
+ return 0;
+
/* If the parent PF for the given VF is also configured in the
* instance, it must be on the same zbus.
* We can then identify the parent PF by checking what
@@ -165,11 +166,17 @@ static int zpci_bus_setup_virtfn(struct zpci_bus *zbus,
zdev = zbus->function[i];
if (zdev && zdev->is_physfn) {
pdev = pci_get_slot(zbus->bus, zdev->devfn);
+ if (!pdev)
+ continue;
cand_devfn = pci_iov_virtfn_devfn(pdev, vfid);
if (cand_devfn == virtfn->devfn) {
rc = zpci_bus_link_virtfn(pdev, virtfn, vfid);
+ /* balance pci_get_slot() */
+ pci_dev_put(pdev);
break;
}
+ /* balance pci_get_slot() */
+ pci_dev_put(pdev);
}
}
return rc;
@@ -178,12 +185,23 @@ static int zpci_bus_setup_virtfn(struct zpci_bus *zbus,
static inline int zpci_bus_setup_virtfn(struct zpci_bus *zbus,
struct pci_dev *virtfn, int vfn)
{
- virtfn->is_virtfn = 1;
- virtfn->multifunction = 0;
return 0;
}
#endif
+void pcibios_bus_add_device(struct pci_dev *pdev)
+{
+ struct zpci_dev *zdev = to_zpci(pdev);
+
+ /*
+ * With pdev->no_vf_scan the common PCI probing code does not
+ * perform PF/VF linking.
+ */
+ if (zdev->vfn)
+ zpci_bus_setup_virtfn(zdev->zbus, pdev, zdev->vfn);
+
+}
+
static int zpci_bus_add_device(struct zpci_bus *zbus, struct zpci_dev *zdev)
{
struct pci_bus *bus;
@@ -214,20 +232,10 @@ static int zpci_bus_add_device(struct zpci_bus *zbus, struct zpci_dev *zdev)
}
pdev = pci_scan_single_device(bus, zdev->devfn);
- if (pdev) {
- if (!zdev->is_physfn) {
- rc = zpci_bus_setup_virtfn(zbus, pdev, zdev->vfn);
- if (rc)
- goto failed_with_pdev;
- }
+ if (pdev)
pci_bus_add_device(pdev);
- }
- return 0;
-failed_with_pdev:
- pci_stop_and_remove_bus_device(pdev);
- pci_dev_put(pdev);
- return rc;
+ return 0;
}
static void zpci_bus_add_devices(struct zpci_bus *zbus)
diff --git a/arch/s390/pci/pci_bus.h b/arch/s390/pci/pci_bus.h
index 89be3c354b7b..4972433df458 100644
--- a/arch/s390/pci/pci_bus.h
+++ b/arch/s390/pci/pci_bus.h
@@ -29,3 +29,16 @@ static inline struct zpci_dev *get_zdev_by_bus(struct pci_bus *bus,
return (devfn >= ZPCI_FUNCTIONS_PER_BUS) ? NULL : zbus->function[devfn];
}
+
+#ifdef CONFIG_PCI_IOV
+static inline void zpci_remove_virtfn(struct pci_dev *pdev, int vfn)
+{
+
+ pci_lock_rescan_remove();
+ /* Linux' vfid's start at 0 vfn at 1 */
+ pci_iov_remove_virtfn(pdev->physfn, vfn - 1);
+ pci_unlock_rescan_remove();
+}
+#else /* CONFIG_PCI_IOV */
+static inline void zpci_remove_virtfn(struct pci_dev *pdev, int vfn) {}
+#endif /* CONFIG_PCI_IOV */
diff --git a/arch/s390/pci/pci_event.c b/arch/s390/pci/pci_event.c
index fdebd286f402..d9ae7456dd4c 100644
--- a/arch/s390/pci/pci_event.c
+++ b/arch/s390/pci/pci_event.c
@@ -92,6 +92,9 @@ static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf)
ret = clp_add_pci_device(ccdf->fid, ccdf->fh, 1);
break;
}
+ /* the configuration request may be stale */
+ if (zdev->state != ZPCI_FN_STATE_STANDBY)
+ break;
zdev->fh = ccdf->fh;
zdev->state = ZPCI_FN_STATE_CONFIGURED;
ret = zpci_enable_device(zdev);
@@ -118,7 +121,7 @@ static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf)
if (!zdev)
break;
if (pdev)
- pci_stop_and_remove_bus_device_locked(pdev);
+ zpci_remove_device(zdev);
ret = zpci_disable_device(zdev);
if (ret)
@@ -137,9 +140,11 @@ static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf)
/* Give the driver a hint that the function is
* already unusable. */
pdev->error_state = pci_channel_io_perm_failure;
- pci_stop_and_remove_bus_device_locked(pdev);
+ zpci_remove_device(zdev);
}
+ zdev->fh = ccdf->fh;
+ zpci_disable_device(zdev);
zdev->state = ZPCI_FN_STATE_STANDBY;
if (!clp_get_state(ccdf->fid, &state) &&
state == ZPCI_FN_STATE_RESERVED) {
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index d20927128fce..18278152c91c 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -600,22 +600,6 @@ config PHYSICAL_START
where the fail safe kernel needs to run at a different address
than the panic-ed kernel.
-config SECCOMP
- bool "Enable seccomp to safely compute untrusted bytecode"
- depends on PROC_FS
- help
- This kernel feature is useful for number crunching applications
- that may need to compute untrusted bytecode during their
- execution. By using pipes or other transports made available to
- the process as file descriptors supporting the read/write
- syscalls, it's possible to isolate those applications in
- their own address space using seccomp. Once seccomp is
- enabled via prctl, it cannot be disabled and the task is only
- allowed to execute a few safe syscalls defined by each seccomp
- mode.
-
- If unsure, say N.
-
config SMP
bool "Symmetric multi-processing support"
depends on SYS_SUPPORTS_SMP
diff --git a/arch/sh/drivers/platform_early.c b/arch/sh/drivers/platform_early.c
index f3dc3f25b3ff..143747c45206 100644
--- a/arch/sh/drivers/platform_early.c
+++ b/arch/sh/drivers/platform_early.c
@@ -246,7 +246,7 @@ static int __init sh_early_platform_driver_probe_id(char *class_str,
case EARLY_PLATFORM_ID_ERROR:
pr_warn("%s: unable to parse %s parameter\n",
class_str, epdrv->pdrv->driver.name);
- /* fall-through */
+ fallthrough;
case EARLY_PLATFORM_ID_UNSET:
match = NULL;
break;
diff --git a/arch/sh/include/asm/checksum_32.h b/arch/sh/include/asm/checksum_32.h
index 91571a42e44e..1a391e3a7659 100644
--- a/arch/sh/include/asm/checksum_32.h
+++ b/arch/sh/include/asm/checksum_32.h
@@ -30,10 +30,9 @@ asmlinkage __wsum csum_partial(const void *buff, int len, __wsum sum);
* better 64-bit) boundary
*/
-asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
- int len, __wsum sum,
- int *src_err_ptr, int *dst_err_ptr);
+asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst, int len);
+#define _HAVE_ARCH_CSUM_AND_COPY
/*
* Note: when you get a NULL pointer exception here this means someone
* passed in an incorrect kernel address to one of these functions.
@@ -42,23 +41,18 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
* access_ok().
*/
static inline
-__wsum csum_partial_copy_nocheck(const void *src, void *dst,
- int len, __wsum sum)
+__wsum csum_partial_copy_nocheck(const void *src, void *dst, int len)
{
- return csum_partial_copy_generic(src, dst, len, sum, NULL, NULL);
+ return csum_partial_copy_generic(src, dst, len);
}
#define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER
static inline
-__wsum csum_and_copy_from_user(const void __user *src, void *dst,
- int len, __wsum sum, int *err_ptr)
+__wsum csum_and_copy_from_user(const void __user *src, void *dst, int len)
{
- if (access_ok(src, len))
- return csum_partial_copy_generic((__force const void *)src, dst,
- len, sum, err_ptr, NULL);
- if (len)
- *err_ptr = -EFAULT;
- return sum;
+ if (!access_ok(src, len))
+ return 0;
+ return csum_partial_copy_generic((__force const void *)src, dst, len);
}
/*
@@ -199,16 +193,10 @@ static inline __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
#define HAVE_CSUM_COPY_USER
static inline __wsum csum_and_copy_to_user(const void *src,
void __user *dst,
- int len, __wsum sum,
- int *err_ptr)
+ int len)
{
- if (access_ok(dst, len))
- return csum_partial_copy_generic((__force const void *)src,
- dst, len, sum, NULL, err_ptr);
-
- if (len)
- *err_ptr = -EFAULT;
-
- return (__force __wsum)-1; /* invalid checksum */
+ if (!access_ok(dst, len))
+ return 0;
+ return csum_partial_copy_generic((__force const void *)src, dst, len);
}
#endif /* __ASM_SH_CHECKSUM_H */
diff --git a/arch/sh/include/asm/smp.h b/arch/sh/include/asm/smp.h
index 1a0d7cf71c10..100bf241340b 100644
--- a/arch/sh/include/asm/smp.h
+++ b/arch/sh/include/asm/smp.h
@@ -8,7 +8,6 @@
#ifdef CONFIG_SMP
-#include <linux/spinlock.h>
#include <linux/atomic.h>
#include <asm/current.h>
#include <asm/percpu.h>
diff --git a/arch/sh/kernel/disassemble.c b/arch/sh/kernel/disassemble.c
index 08e1af63edd9..34e25a439c81 100644
--- a/arch/sh/kernel/disassemble.c
+++ b/arch/sh/kernel/disassemble.c
@@ -486,7 +486,7 @@ static void print_sh_insn(u32 memaddr, u16 insn)
pr_cont("xd%d", rn & ~1);
break;
}
- /* else, fall through */
+ fallthrough;
case D_REG_N:
pr_cont("dr%d", rn);
break;
@@ -495,7 +495,7 @@ static void print_sh_insn(u32 memaddr, u16 insn)
pr_cont("xd%d", rm & ~1);
break;
}
- /* else, fall through */
+ fallthrough;
case D_REG_M:
pr_cont("dr%d", rm);
break;
diff --git a/arch/sh/kernel/entry-common.S b/arch/sh/kernel/entry-common.S
index ad963104d22d..91ab2607a1ff 100644
--- a/arch/sh/kernel/entry-common.S
+++ b/arch/sh/kernel/entry-common.S
@@ -370,7 +370,6 @@ syscall_trace_entry:
nop
cmp/eq #-1, r0
bt syscall_exit
- mov.l r0, @(OFF_R0,r15) ! Save return value
! Reload R0-R4 from kernel stack, where the
! parent may have modified them using
! ptrace(POKEUSR). (Note that R0-R2 are
diff --git a/arch/sh/kernel/kgdb.c b/arch/sh/kernel/kgdb.c
index 0d5f3c9d52f3..e4147efa9ec6 100644
--- a/arch/sh/kernel/kgdb.c
+++ b/arch/sh/kernel/kgdb.c
@@ -266,7 +266,7 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
ptr = &remcomInBuffer[1];
if (kgdb_hex2long(&ptr, &addr))
linux_regs->pc = addr;
- /* fallthrough */
+ fallthrough;
case 'D':
case 'k':
atomic_set(&kgdb_cpu_doing_single_step, -1);
diff --git a/arch/sh/kernel/kprobes.c b/arch/sh/kernel/kprobes.c
index 318296f48f1a..756100b01e84 100644
--- a/arch/sh/kernel/kprobes.c
+++ b/arch/sh/kernel/kprobes.c
@@ -204,6 +204,7 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
struct pt_regs *regs)
{
ri->ret_addr = (kprobe_opcode_t *) regs->pr;
+ ri->fp = NULL;
/* Replace the return addr with trampoline addr */
regs->pr = (unsigned long)kretprobe_trampoline;
@@ -302,62 +303,9 @@ static void __used kretprobe_trampoline_holder(void)
*/
int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
{
- struct kretprobe_instance *ri = NULL;
- struct hlist_head *head, empty_rp;
- struct hlist_node *tmp;
- unsigned long flags, orig_ret_address = 0;
- unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
+ regs->pc = __kretprobe_trampoline_handler(regs, &kretprobe_trampoline, NULL);
- INIT_HLIST_HEAD(&empty_rp);
- kretprobe_hash_lock(current, &head, &flags);
-
- /*
- * It is possible to have multiple instances associated with a given
- * task either because an multiple functions in the call path
- * have a return probe installed on them, and/or more then one return
- * return probe was registered for a target function.
- *
- * We can handle this because:
- * - instances are always inserted at the head of the list
- * - when multiple return probes are registered for the same
- * function, the first instance's ret_addr will point to the
- * real return address, and all the rest will point to
- * kretprobe_trampoline
- */
- hlist_for_each_entry_safe(ri, tmp, head, hlist) {
- if (ri->task != current)
- /* another task is sharing our hash bucket */
- continue;
-
- if (ri->rp && ri->rp->handler) {
- __this_cpu_write(current_kprobe, &ri->rp->kp);
- ri->rp->handler(ri, regs);
- __this_cpu_write(current_kprobe, NULL);
- }
-
- orig_ret_address = (unsigned long)ri->ret_addr;
- recycle_rp_inst(ri, &empty_rp);
-
- if (orig_ret_address != trampoline_address)
- /*
- * This is the real return address. Any other
- * instances associated with this task are for
- * other calls deeper on the call stack
- */
- break;
- }
-
- kretprobe_assert(ri, orig_ret_address, trampoline_address);
-
- regs->pc = orig_ret_address;
- kretprobe_hash_unlock(current, &flags);
-
- hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
- hlist_del(&ri->hlist);
- kfree(ri);
- }
-
- return orig_ret_address;
+ return 1;
}
static int __kprobes post_kprobe_handler(struct pt_regs *regs)
diff --git a/arch/sh/kernel/ptrace_32.c b/arch/sh/kernel/ptrace_32.c
index b05bf92f9c32..5281685f6ad1 100644
--- a/arch/sh/kernel/ptrace_32.c
+++ b/arch/sh/kernel/ptrace_32.c
@@ -455,16 +455,11 @@ long arch_ptrace(struct task_struct *child, long request,
asmlinkage long do_syscall_trace_enter(struct pt_regs *regs)
{
- long ret = 0;
-
if (test_thread_flag(TIF_SYSCALL_TRACE) &&
- tracehook_report_syscall_entry(regs))
- /*
- * Tracing decided this syscall should not happen.
- * We'll return a bogus call number to get an ENOSYS
- * error, but leave the original number in regs->regs[0].
- */
- ret = -1L;
+ tracehook_report_syscall_entry(regs)) {
+ regs->regs[0] = -ENOSYS;
+ return -1;
+ }
if (secure_computing() == -1)
return -1;
@@ -475,7 +470,7 @@ asmlinkage long do_syscall_trace_enter(struct pt_regs *regs)
audit_syscall_entry(regs->regs[3], regs->regs[4], regs->regs[5],
regs->regs[6], regs->regs[7]);
- return ret ?: regs->regs[0];
+ return 0;
}
asmlinkage void do_syscall_trace_leave(struct pt_regs *regs)
diff --git a/arch/sh/kernel/signal_32.c b/arch/sh/kernel/signal_32.c
index a0fbb8427b39..4fe3f00137bc 100644
--- a/arch/sh/kernel/signal_32.c
+++ b/arch/sh/kernel/signal_32.c
@@ -418,7 +418,7 @@ handle_syscall_restart(unsigned long save_r0, struct pt_regs *regs,
case -ERESTARTSYS:
if (!(sa->sa_flags & SA_RESTART))
goto no_system_call_restart;
- /* fallthrough */
+ fallthrough;
case -ERESTARTNOINTR:
regs->regs[0] = save_r0;
regs->pc -= instruction_size(__raw_readw(regs->pc - 4));
diff --git a/arch/sh/kernel/vmlinux.lds.S b/arch/sh/kernel/vmlinux.lds.S
index bde7a6c01aaf..3161b9ccd2a5 100644
--- a/arch/sh/kernel/vmlinux.lds.S
+++ b/arch/sh/kernel/vmlinux.lds.S
@@ -76,6 +76,7 @@ SECTIONS
STABS_DEBUG
DWARF_DEBUG
+ ELF_DETAILS
DISCARDS
}
diff --git a/arch/sh/lib/checksum.S b/arch/sh/lib/checksum.S
index 97b5c2d9fec4..3e07074e0098 100644
--- a/arch/sh/lib/checksum.S
+++ b/arch/sh/lib/checksum.S
@@ -173,47 +173,27 @@ ENTRY(csum_partial)
mov r6, r0
/*
-unsigned int csum_partial_copy_generic (const char *src, char *dst, int len,
- int sum, int *src_err_ptr, int *dst_err_ptr)
+unsigned int csum_partial_copy_generic (const char *src, char *dst, int len)
*/
/*
- * Copy from ds while checksumming, otherwise like csum_partial
- *
- * The macros SRC and DST specify the type of access for the instruction.
- * thus we can call a custom exception handler for all access types.
- *
- * FIXME: could someone double-check whether I haven't mixed up some SRC and
- * DST definitions? It's damn hard to trigger all cases. I hope I got
- * them all but there's no guarantee.
+ * Copy from ds while checksumming, otherwise like csum_partial with initial
+ * sum being ~0U
*/
-#define SRC(...) \
+#define EXC(...) \
9999: __VA_ARGS__ ; \
.section __ex_table, "a"; \
.long 9999b, 6001f ; \
.previous
-#define DST(...) \
- 9999: __VA_ARGS__ ; \
- .section __ex_table, "a"; \
- .long 9999b, 6002f ; \
- .previous
-
!
! r4: const char *SRC
! r5: char *DST
! r6: int LEN
-! r7: int SUM
-!
-! on stack:
-! int *SRC_ERR_PTR
-! int *DST_ERR_PTR
!
ENTRY(csum_partial_copy_generic)
- mov.l r5,@-r15
- mov.l r6,@-r15
-
+ mov #-1,r7
mov #3,r0 ! Check src and dest are equally aligned
mov r4,r1
and r0,r1
@@ -243,11 +223,11 @@ ENTRY(csum_partial_copy_generic)
clrt
.align 2
5:
-SRC( mov.b @r4+,r1 )
-SRC( mov.b @r4+,r0 )
+EXC( mov.b @r4+,r1 )
+EXC( mov.b @r4+,r0 )
extu.b r1,r1
-DST( mov.b r1,@r5 )
-DST( mov.b r0,@(1,r5) )
+EXC( mov.b r1,@r5 )
+EXC( mov.b r0,@(1,r5) )
extu.b r0,r0
add #2,r5
@@ -276,8 +256,8 @@ DST( mov.b r0,@(1,r5) )
! Handle first two bytes as a special case
.align 2
1:
-SRC( mov.w @r4+,r0 )
-DST( mov.w r0,@r5 )
+EXC( mov.w @r4+,r0 )
+EXC( mov.w r0,@r5 )
add #2,r5
extu.w r0,r0
addc r0,r7
@@ -292,32 +272,32 @@ DST( mov.w r0,@r5 )
clrt
.align 2
1:
-SRC( mov.l @r4+,r0 )
-SRC( mov.l @r4+,r1 )
+EXC( mov.l @r4+,r0 )
+EXC( mov.l @r4+,r1 )
addc r0,r7
-DST( mov.l r0,@r5 )
-DST( mov.l r1,@(4,r5) )
+EXC( mov.l r0,@r5 )
+EXC( mov.l r1,@(4,r5) )
addc r1,r7
-SRC( mov.l @r4+,r0 )
-SRC( mov.l @r4+,r1 )
+EXC( mov.l @r4+,r0 )
+EXC( mov.l @r4+,r1 )
addc r0,r7
-DST( mov.l r0,@(8,r5) )
-DST( mov.l r1,@(12,r5) )
+EXC( mov.l r0,@(8,r5) )
+EXC( mov.l r1,@(12,r5) )
addc r1,r7
-SRC( mov.l @r4+,r0 )
-SRC( mov.l @r4+,r1 )
+EXC( mov.l @r4+,r0 )
+EXC( mov.l @r4+,r1 )
addc r0,r7
-DST( mov.l r0,@(16,r5) )
-DST( mov.l r1,@(20,r5) )
+EXC( mov.l r0,@(16,r5) )
+EXC( mov.l r1,@(20,r5) )
addc r1,r7
-SRC( mov.l @r4+,r0 )
-SRC( mov.l @r4+,r1 )
+EXC( mov.l @r4+,r0 )
+EXC( mov.l @r4+,r1 )
addc r0,r7
-DST( mov.l r0,@(24,r5) )
-DST( mov.l r1,@(28,r5) )
+EXC( mov.l r0,@(24,r5) )
+EXC( mov.l r1,@(28,r5) )
addc r1,r7
add #32,r5
movt r0
@@ -335,9 +315,9 @@ DST( mov.l r1,@(28,r5) )
clrt
shlr2 r6
3:
-SRC( mov.l @r4+,r0 )
+EXC( mov.l @r4+,r0 )
addc r0,r7
-DST( mov.l r0,@r5 )
+EXC( mov.l r0,@r5 )
add #4,r5
movt r0
dt r6
@@ -353,8 +333,8 @@ DST( mov.l r0,@r5 )
mov #2,r1
cmp/hs r1,r6
bf 5f
-SRC( mov.w @r4+,r0 )
-DST( mov.w r0,@r5 )
+EXC( mov.w @r4+,r0 )
+EXC( mov.w r0,@r5 )
extu.w r0,r0
add #2,r5
cmp/eq r1,r6
@@ -363,8 +343,8 @@ DST( mov.w r0,@r5 )
shll16 r0
addc r0,r7
5:
-SRC( mov.b @r4+,r0 )
-DST( mov.b r0,@r5 )
+EXC( mov.b @r4+,r0 )
+EXC( mov.b r0,@r5 )
extu.b r0,r0
#ifndef __LITTLE_ENDIAN__
shll8 r0
@@ -373,42 +353,13 @@ DST( mov.b r0,@r5 )
mov #0,r0
addc r0,r7
7:
-5000:
# Exception handler:
.section .fixup, "ax"
6001:
- mov.l @(8,r15),r0 ! src_err_ptr
- mov #-EFAULT,r1
- mov.l r1,@r0
-
- ! zero the complete destination - computing the rest
- ! is too much work
- mov.l @(4,r15),r5 ! dst
- mov.l @r15,r6 ! len
- mov #0,r7
-1: mov.b r7,@r5
- dt r6
- bf/s 1b
- add #1,r5
- mov.l 8000f,r0
- jmp @r0
- nop
- .align 2
-8000: .long 5000b
-
-6002:
- mov.l @(12,r15),r0 ! dst_err_ptr
- mov #-EFAULT,r1
- mov.l r1,@r0
- mov.l 8001f,r0
- jmp @r0
- nop
- .align 2
-8001: .long 5000b
-
+ rts
+ mov #0,r0
.previous
- add #8,r15
rts
mov r7,r0
diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c
index 4735176ab811..3348e0c4d769 100644
--- a/arch/sh/mm/init.c
+++ b/arch/sh/mm/init.c
@@ -226,15 +226,12 @@ void __init allocate_pgdat(unsigned int nid)
static void __init do_init_bootmem(void)
{
- struct memblock_region *reg;
+ unsigned long start_pfn, end_pfn;
+ int i;
/* Add active regions with valid PFNs. */
- for_each_memblock(memory, reg) {
- unsigned long start_pfn, end_pfn;
- start_pfn = memblock_region_memory_base_pfn(reg);
- end_pfn = memblock_region_memory_end_pfn(reg);
+ for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, NULL)
__add_active_range(0, start_pfn, end_pfn);
- }
/* All of system RAM sits in node 0 for the non-NUMA case */
allocate_pgdat(0);
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index efeff2c896a5..096530eac8e1 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -23,6 +23,7 @@ config SPARC
select HAVE_OPROFILE
select HAVE_ARCH_KGDB if !SMP || SPARC64
select HAVE_ARCH_TRACEHOOK
+ select HAVE_ARCH_SECCOMP if SPARC64
select HAVE_EXIT_THREAD
select HAVE_PCI
select SYSCTL_EXCEPTION_TRACE
@@ -43,6 +44,7 @@ config SPARC
select GENERIC_STRNLEN_USER
select MODULES_USE_ELF_RELA
select PCI_SYSCALL if PCI
+ select PCI_MSI_ARCH_FALLBACKS if PCI_MSI
select ODD_RT_SIGACTION
select OLD_SIGSUSPEND
select CPU_NO_EFFICIENT_FFS
@@ -226,23 +228,6 @@ config EARLYFB
help
Say Y here to enable a faster early framebuffer boot console.
-config SECCOMP
- bool "Enable seccomp to safely compute untrusted bytecode"
- depends on SPARC64 && PROC_FS
- default y
- help
- This kernel feature is useful for number crunching applications
- that may need to compute untrusted bytecode during their
- execution. By using pipes or other transports made available to
- the process as file descriptors supporting the read/write
- syscalls, it's possible to isolate those applications in
- their own address space using seccomp. Once seccomp is
- enabled via /proc/<pid>/seccomp, it cannot be disabled
- and the task is only allowed to execute a few safe syscalls
- defined by each seccomp mode.
-
- If unsure, say Y. Only embedded should say N here.
-
config HOTPLUG_CPU
bool "Support for hot-pluggable CPUs"
depends on SPARC64 && SMP
diff --git a/arch/sparc/include/asm/checksum.h b/arch/sparc/include/asm/checksum.h
index a6256cb6fc5c..f2ac13323b6d 100644
--- a/arch/sparc/include/asm/checksum.h
+++ b/arch/sparc/include/asm/checksum.h
@@ -1,7 +1,9 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef ___ASM_SPARC_CHECKSUM_H
#define ___ASM_SPARC_CHECKSUM_H
+#define _HAVE_ARCH_CSUM_AND_COPY
#define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER
+#define HAVE_CSUM_COPY_USER
#if defined(__sparc__) && defined(__arch64__)
#include <asm/checksum_64.h>
#else
diff --git a/arch/sparc/include/asm/checksum_32.h b/arch/sparc/include/asm/checksum_32.h
index 479a0b812af5..ce11e0ad80c7 100644
--- a/arch/sparc/include/asm/checksum_32.h
+++ b/arch/sparc/include/asm/checksum_32.h
@@ -42,7 +42,7 @@ __wsum csum_partial(const void *buff, int len, __wsum sum);
unsigned int __csum_partial_copy_sparc_generic (const unsigned char *, unsigned char *);
static inline __wsum
-csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum)
+csum_partial_copy_nocheck(const void *src, void *dst, int len)
{
register unsigned int ret asm("o0") = (unsigned int)src;
register char *d asm("o1") = dst;
@@ -50,9 +50,9 @@ csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum)
__asm__ __volatile__ (
"call __csum_partial_copy_sparc_generic\n\t"
- " mov %6, %%g7\n"
+ " mov -1, %%g7\n"
: "=&r" (ret), "=&r" (d), "=&r" (l)
- : "0" (ret), "1" (d), "2" (l), "r" (sum)
+ : "0" (ret), "1" (d), "2" (l)
: "o2", "o3", "o4", "o5", "o7",
"g2", "g3", "g4", "g5", "g7",
"memory", "cc");
@@ -60,65 +60,19 @@ csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum)
}
static inline __wsum
-csum_and_copy_from_user(const void __user *src, void *dst, int len,
- __wsum sum, int *err)
- {
- register unsigned long ret asm("o0") = (unsigned long)src;
- register char *d asm("o1") = dst;
- register int l asm("g1") = len;
- register __wsum s asm("g7") = sum;
-
- if (unlikely(!access_ok(src, len))) {
- if (len)
- *err = -EFAULT;
- return sum;
- }
-
- __asm__ __volatile__ (
- ".section __ex_table,#alloc\n\t"
- ".align 4\n\t"
- ".word 1f,2\n\t"
- ".previous\n"
- "1:\n\t"
- "call __csum_partial_copy_sparc_generic\n\t"
- " st %8, [%%sp + 64]\n"
- : "=&r" (ret), "=&r" (d), "=&r" (l), "=&r" (s)
- : "0" (ret), "1" (d), "2" (l), "3" (s), "r" (err)
- : "o2", "o3", "o4", "o5", "o7", "g2", "g3", "g4", "g5",
- "cc", "memory");
- return (__force __wsum)ret;
+csum_and_copy_from_user(const void __user *src, void *dst, int len)
+{
+ if (unlikely(!access_ok(src, len)))
+ return 0;
+ return csum_partial_copy_nocheck((__force void *)src, dst, len);
}
-#define HAVE_CSUM_COPY_USER
-
static inline __wsum
-csum_and_copy_to_user(const void *src, void __user *dst, int len,
- __wsum sum, int *err)
+csum_and_copy_to_user(const void *src, void __user *dst, int len)
{
- if (!access_ok(dst, len)) {
- *err = -EFAULT;
- return sum;
- } else {
- register unsigned long ret asm("o0") = (unsigned long)src;
- register char __user *d asm("o1") = dst;
- register int l asm("g1") = len;
- register __wsum s asm("g7") = sum;
-
- __asm__ __volatile__ (
- ".section __ex_table,#alloc\n\t"
- ".align 4\n\t"
- ".word 1f,1\n\t"
- ".previous\n"
- "1:\n\t"
- "call __csum_partial_copy_sparc_generic\n\t"
- " st %8, [%%sp + 64]\n"
- : "=&r" (ret), "=&r" (d), "=&r" (l), "=&r" (s)
- : "0" (ret), "1" (d), "2" (l), "3" (s), "r" (err)
- : "o2", "o3", "o4", "o5", "o7",
- "g2", "g3", "g4", "g5",
- "cc", "memory");
- return (__force __wsum)ret;
- }
+ if (!access_ok(dst, len))
+ return 0;
+ return csum_partial_copy_nocheck(src, (__force void *)dst, len);
}
/* ihl is always 5 or greater, almost always is 5, and iph is word aligned
diff --git a/arch/sparc/include/asm/checksum_64.h b/arch/sparc/include/asm/checksum_64.h
index 0fa4433f5662..d6b59461e064 100644
--- a/arch/sparc/include/asm/checksum_64.h
+++ b/arch/sparc/include/asm/checksum_64.h
@@ -38,42 +38,9 @@ __wsum csum_partial(const void * buff, int len, __wsum sum);
* here even more important to align src and dst on a 32-bit (or even
* better 64-bit) boundary
*/
-__wsum csum_partial_copy_nocheck(const void *src, void *dst,
- int len, __wsum sum);
-
-long __csum_partial_copy_from_user(const void __user *src,
- void *dst, int len,
- __wsum sum);
-
-static inline __wsum
-csum_and_copy_from_user(const void __user *src,
- void *dst, int len,
- __wsum sum, int *err)
-{
- long ret = __csum_partial_copy_from_user(src, dst, len, sum);
- if (ret < 0)
- *err = -EFAULT;
- return (__force __wsum) ret;
-}
-
-/*
- * Copy and checksum to user
- */
-#define HAVE_CSUM_COPY_USER
-long __csum_partial_copy_to_user(const void *src,
- void __user *dst, int len,
- __wsum sum);
-
-static inline __wsum
-csum_and_copy_to_user(const void *src,
- void __user *dst, int len,
- __wsum sum, int *err)
-{
- long ret = __csum_partial_copy_to_user(src, dst, len, sum);
- if (ret < 0)
- *err = -EFAULT;
- return (__force __wsum) ret;
-}
+__wsum csum_partial_copy_nocheck(const void *src, void *dst, int len);
+__wsum csum_and_copy_from_user(const void __user *src, void *dst, int len);
+__wsum csum_and_copy_to_user(const void *src, void __user *dst, int len);
/* ihl is always 5 or greater, almost always is 5, and iph is word aligned
* the majority of the time.
diff --git a/arch/sparc/include/asm/compat.h b/arch/sparc/include/asm/compat.h
index 40a267b3bd52..b85842cda99f 100644
--- a/arch/sparc/include/asm/compat.h
+++ b/arch/sparc/include/asm/compat.h
@@ -21,8 +21,7 @@ typedef s16 compat_nlink_t;
typedef u16 compat_ipc_pid_t;
typedef u32 compat_caddr_t;
typedef __kernel_fsid_t compat_fsid_t;
-typedef s64 compat_s64;
-typedef u64 compat_u64;
+
struct compat_stat {
compat_dev_t st_dev;
compat_ino_t st_ino;
diff --git a/arch/sparc/kernel/auxio_64.c b/arch/sparc/kernel/auxio_64.c
index 4843f48bfe85..774a82b0c649 100644
--- a/arch/sparc/kernel/auxio_64.c
+++ b/arch/sparc/kernel/auxio_64.c
@@ -87,7 +87,6 @@ void auxio_set_lte(int on)
__auxio_sbus_set_lte(on);
break;
case AUXIO_TYPE_EBUS:
- /* FALL-THROUGH */
default:
break;
}
diff --git a/arch/sparc/kernel/central.c b/arch/sparc/kernel/central.c
index bfae98ab8638..23f8838dd96e 100644
--- a/arch/sparc/kernel/central.c
+++ b/arch/sparc/kernel/central.c
@@ -55,7 +55,7 @@ static int clock_board_calc_nslots(struct clock_board *p)
else
return 5;
}
- /* Fallthrough */
+ fallthrough;
default:
return 4;
}
diff --git a/arch/sparc/kernel/kgdb_32.c b/arch/sparc/kernel/kgdb_32.c
index 7580775a14b9..58ad3f7de1fb 100644
--- a/arch/sparc/kernel/kgdb_32.c
+++ b/arch/sparc/kernel/kgdb_32.c
@@ -122,7 +122,7 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
linux_regs->pc = addr;
linux_regs->npc = addr + 4;
}
- /* fall through */
+ fallthrough;
case 'D':
case 'k':
diff --git a/arch/sparc/kernel/kgdb_64.c b/arch/sparc/kernel/kgdb_64.c
index 5d6c2d287e85..177746ae2c81 100644
--- a/arch/sparc/kernel/kgdb_64.c
+++ b/arch/sparc/kernel/kgdb_64.c
@@ -148,7 +148,7 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
linux_regs->tpc = addr;
linux_regs->tnpc = addr + 4;
}
- /* fall through */
+ fallthrough;
case 'D':
case 'k':
diff --git a/arch/sparc/kernel/kprobes.c b/arch/sparc/kernel/kprobes.c
index dfbca2470536..217c21a6986a 100644
--- a/arch/sparc/kernel/kprobes.c
+++ b/arch/sparc/kernel/kprobes.c
@@ -453,6 +453,7 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
struct pt_regs *regs)
{
ri->ret_addr = (kprobe_opcode_t *)(regs->u_regs[UREG_RETPC] + 8);
+ ri->fp = NULL;
/* Replace the return addr with trampoline addr */
regs->u_regs[UREG_RETPC] =
@@ -465,58 +466,12 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
static int __kprobes trampoline_probe_handler(struct kprobe *p,
struct pt_regs *regs)
{
- struct kretprobe_instance *ri = NULL;
- struct hlist_head *head, empty_rp;
- struct hlist_node *tmp;
- unsigned long flags, orig_ret_address = 0;
- unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline;
+ unsigned long orig_ret_address = 0;
- INIT_HLIST_HEAD(&empty_rp);
- kretprobe_hash_lock(current, &head, &flags);
-
- /*
- * It is possible to have multiple instances associated with a given
- * task either because an multiple functions in the call path
- * have a return probe installed on them, and/or more than one return
- * return probe was registered for a target function.
- *
- * We can handle this because:
- * - instances are always inserted at the head of the list
- * - when multiple return probes are registered for the same
- * function, the first instance's ret_addr will point to the
- * real return address, and all the rest will point to
- * kretprobe_trampoline
- */
- hlist_for_each_entry_safe(ri, tmp, head, hlist) {
- if (ri->task != current)
- /* another task is sharing our hash bucket */
- continue;
-
- if (ri->rp && ri->rp->handler)
- ri->rp->handler(ri, regs);
-
- orig_ret_address = (unsigned long)ri->ret_addr;
- recycle_rp_inst(ri, &empty_rp);
-
- if (orig_ret_address != trampoline_address)
- /*
- * This is the real return address. Any other
- * instances associated with this task are for
- * other calls deeper on the call stack
- */
- break;
- }
-
- kretprobe_assert(ri, orig_ret_address, trampoline_address);
+ orig_ret_address = __kretprobe_trampoline_handler(regs, &kretprobe_trampoline, NULL);
regs->tpc = orig_ret_address;
regs->tnpc = orig_ret_address + 4;
- kretprobe_hash_unlock(current, &flags);
-
- hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
- hlist_del(&ri->hlist);
- kfree(ri);
- }
/*
* By returning a non-zero value, we are telling
* kprobe_handler() that we don't want the post_handler
diff --git a/arch/sparc/kernel/pcr.c b/arch/sparc/kernel/pcr.c
index c0886b400dad..2a12c86af956 100644
--- a/arch/sparc/kernel/pcr.c
+++ b/arch/sparc/kernel/pcr.c
@@ -359,7 +359,7 @@ int __init pcr_arch_init(void)
* counter overflow interrupt so we can't make use of
* their hardware currently.
*/
- /* fallthrough */
+ fallthrough;
default:
err = -ENODEV;
goto out_unregister;
diff --git a/arch/sparc/kernel/process.c b/arch/sparc/kernel/process.c
index 5234b5ccc0b9..0442ab00518d 100644
--- a/arch/sparc/kernel/process.c
+++ b/arch/sparc/kernel/process.c
@@ -25,7 +25,7 @@ asmlinkage long sparc_fork(struct pt_regs *regs)
.stack = regs->u_regs[UREG_FP],
};
- ret = _do_fork(&args);
+ ret = kernel_clone(&args);
/* If we get an error and potentially restart the system
* call, we're screwed because copy_thread() clobbered
@@ -50,7 +50,7 @@ asmlinkage long sparc_vfork(struct pt_regs *regs)
.stack = regs->u_regs[UREG_FP],
};
- ret = _do_fork(&args);
+ ret = kernel_clone(&args);
/* If we get an error and potentially restart the system
* call, we're screwed because copy_thread() clobbered
@@ -96,7 +96,7 @@ asmlinkage long sparc_clone(struct pt_regs *regs)
else
args.stack = regs->u_regs[UREG_FP];
- ret = _do_fork(&args);
+ ret = kernel_clone(&args);
/* If we get an error and potentially restart the system
* call, we're screwed because copy_thread() clobbered
diff --git a/arch/sparc/kernel/prom_32.c b/arch/sparc/kernel/prom_32.c
index da8902295c8c..3df960c137f7 100644
--- a/arch/sparc/kernel/prom_32.c
+++ b/arch/sparc/kernel/prom_32.c
@@ -224,7 +224,7 @@ void __init of_console_init(void)
case PROMDEV_TTYB:
skip = 1;
- /* FALLTHRU */
+ fallthrough;
case PROMDEV_TTYA:
type = "serial";
diff --git a/arch/sparc/kernel/signal32.c b/arch/sparc/kernel/signal32.c
index e2c6f0abda00..e9695a06492f 100644
--- a/arch/sparc/kernel/signal32.c
+++ b/arch/sparc/kernel/signal32.c
@@ -646,7 +646,7 @@ static inline void syscall_restart32(unsigned long orig_i0, struct pt_regs *regs
case ERESTARTSYS:
if (!(sa->sa_flags & SA_RESTART))
goto no_system_call_restart;
- /* fallthrough */
+ fallthrough;
case ERESTARTNOINTR:
regs->u_regs[UREG_I0] = orig_i0;
regs->tpc -= 4;
@@ -686,7 +686,7 @@ void do_signal32(struct pt_regs * regs)
regs->tpc -= 4;
regs->tnpc -= 4;
pt_regs_clear_syscall(regs);
- /* fall through */
+ fallthrough;
case ERESTART_RESTARTBLOCK:
regs->u_regs[UREG_G1] = __NR_restart_syscall;
regs->tpc -= 4;
diff --git a/arch/sparc/kernel/signal_32.c b/arch/sparc/kernel/signal_32.c
index f1f8c8ebe641..d0e0025ee3ba 100644
--- a/arch/sparc/kernel/signal_32.c
+++ b/arch/sparc/kernel/signal_32.c
@@ -440,7 +440,7 @@ static inline void syscall_restart(unsigned long orig_i0, struct pt_regs *regs,
case ERESTARTSYS:
if (!(sa->sa_flags & SA_RESTART))
goto no_system_call_restart;
- /* fallthrough */
+ fallthrough;
case ERESTARTNOINTR:
regs->u_regs[UREG_I0] = orig_i0;
regs->pc -= 4;
@@ -506,7 +506,7 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
regs->pc -= 4;
regs->npc -= 4;
pt_regs_clear_syscall(regs);
- /* fall through */
+ fallthrough;
case ERESTART_RESTARTBLOCK:
regs->u_regs[UREG_G1] = __NR_restart_syscall;
regs->pc -= 4;
diff --git a/arch/sparc/kernel/signal_64.c b/arch/sparc/kernel/signal_64.c
index 6937339a272c..255264bcb46a 100644
--- a/arch/sparc/kernel/signal_64.c
+++ b/arch/sparc/kernel/signal_64.c
@@ -461,7 +461,7 @@ static inline void syscall_restart(unsigned long orig_i0, struct pt_regs *regs,
case ERESTARTSYS:
if (!(sa->sa_flags & SA_RESTART))
goto no_system_call_restart;
- /* fallthrough */
+ fallthrough;
case ERESTARTNOINTR:
regs->u_regs[UREG_I0] = orig_i0;
regs->tpc -= 4;
@@ -532,7 +532,7 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
regs->tpc -= 4;
regs->tnpc -= 4;
pt_regs_clear_syscall(regs);
- /* fall through */
+ fallthrough;
case ERESTART_RESTARTBLOCK:
regs->u_regs[UREG_G1] = __NR_restart_syscall;
regs->tpc -= 4;
diff --git a/arch/sparc/kernel/syscalls/syscall.tbl b/arch/sparc/kernel/syscalls/syscall.tbl
index 4af114e84f20..37ec52b34c73 100644
--- a/arch/sparc/kernel/syscalls/syscall.tbl
+++ b/arch/sparc/kernel/syscalls/syscall.tbl
@@ -38,7 +38,7 @@
23 64 setuid sys_setuid
24 32 getuid sys_getuid16
24 64 getuid sys_getuid
-25 common vmsplice sys_vmsplice compat_sys_vmsplice
+25 common vmsplice sys_vmsplice
26 common ptrace sys_ptrace compat_sys_ptrace
27 common alarm sys_alarm
28 common sigaltstack sys_sigaltstack compat_sys_sigaltstack
@@ -149,8 +149,8 @@
117 common getrusage sys_getrusage compat_sys_getrusage
118 common getsockopt sys_getsockopt sys_getsockopt
119 common getcwd sys_getcwd
-120 common readv sys_readv compat_sys_readv
-121 common writev sys_writev compat_sys_writev
+120 common readv sys_readv
+121 common writev sys_writev
122 common settimeofday sys_settimeofday compat_sys_settimeofday
123 32 fchown sys_fchown16
123 64 fchown sys_fchown
@@ -201,7 +201,7 @@
164 64 utrap_install sys_utrap_install
165 common quotactl sys_quotactl
166 common set_tid_address sys_set_tid_address
-167 common mount sys_mount compat_sys_mount
+167 common mount sys_mount
168 common ustat sys_ustat compat_sys_ustat
169 common setxattr sys_setxattr
170 common lsetxattr sys_lsetxattr
@@ -406,8 +406,8 @@
335 common syncfs sys_syncfs
336 common sendmmsg sys_sendmmsg compat_sys_sendmmsg
337 common setns sys_setns
-338 common process_vm_readv sys_process_vm_readv compat_sys_process_vm_readv
-339 common process_vm_writev sys_process_vm_writev compat_sys_process_vm_writev
+338 common process_vm_readv sys_process_vm_readv
+339 common process_vm_writev sys_process_vm_writev
340 32 kern_features sys_ni_syscall sys_kern_features
340 64 kern_features sys_kern_features
341 common kcmp sys_kcmp
diff --git a/arch/sparc/kernel/vmlinux.lds.S b/arch/sparc/kernel/vmlinux.lds.S
index f99e99e58075..d55ae65a07ad 100644
--- a/arch/sparc/kernel/vmlinux.lds.S
+++ b/arch/sparc/kernel/vmlinux.lds.S
@@ -187,6 +187,7 @@ SECTIONS
STABS_DEBUG
DWARF_DEBUG
+ ELF_DETAILS
DISCARDS
}
diff --git a/arch/sparc/lib/checksum_32.S b/arch/sparc/lib/checksum_32.S
index 6a5469c97246..7488d130faf7 100644
--- a/arch/sparc/lib/checksum_32.S
+++ b/arch/sparc/lib/checksum_32.S
@@ -144,44 +144,21 @@ cpte: bne csum_partial_end_cruft ! yep, handle it
cpout: retl ! get outta here
mov %o2, %o0 ! return computed csum
- .globl __csum_partial_copy_start, __csum_partial_copy_end
-__csum_partial_copy_start:
-
/* Work around cpp -rob */
#define ALLOC #alloc
#define EXECINSTR #execinstr
-#define EX(x,y,a,b) \
-98: x,y; \
- .section .fixup,ALLOC,EXECINSTR; \
- .align 4; \
-99: ba 30f; \
- a, b, %o3; \
- .section __ex_table,ALLOC; \
- .align 4; \
- .word 98b, 99b; \
- .text; \
- .align 4
-
-#define EX2(x,y) \
-98: x,y; \
- .section __ex_table,ALLOC; \
- .align 4; \
- .word 98b, 30f; \
- .text; \
- .align 4
-
-#define EX3(x,y) \
+#define EX(x,y) \
98: x,y; \
.section __ex_table,ALLOC; \
.align 4; \
- .word 98b, 96f; \
+ .word 98b, cc_fault; \
.text; \
.align 4
-#define EXT(start,end,handler) \
+#define EXT(start,end) \
.section __ex_table,ALLOC; \
.align 4; \
- .word start, 0, end, handler; \
+ .word start, 0, end, cc_fault; \
.text; \
.align 4
@@ -252,21 +229,21 @@ __csum_partial_copy_start:
cc_end_cruft:
be 1f
andcc %o3, 4, %g0
- EX(ldd [%o0 + 0x00], %g2, and %o3, 0xf)
+ EX(ldd [%o0 + 0x00], %g2)
add %o1, 8, %o1
addcc %g2, %g7, %g7
add %o0, 8, %o0
addxcc %g3, %g7, %g7
- EX2(st %g2, [%o1 - 0x08])
+ EX(st %g2, [%o1 - 0x08])
addx %g0, %g7, %g7
andcc %o3, 4, %g0
- EX2(st %g3, [%o1 - 0x04])
+ EX(st %g3, [%o1 - 0x04])
1: be 1f
andcc %o3, 3, %o3
- EX(ld [%o0 + 0x00], %g2, add %o3, 4)
+ EX(ld [%o0 + 0x00], %g2)
add %o1, 4, %o1
addcc %g2, %g7, %g7
- EX2(st %g2, [%o1 - 0x04])
+ EX(st %g2, [%o1 - 0x04])
addx %g0, %g7, %g7
andcc %o3, 3, %g0
add %o0, 4, %o0
@@ -276,14 +253,14 @@ cc_end_cruft:
subcc %o3, 2, %o3
b 4f
or %g0, %g0, %o4
-2: EX(lduh [%o0 + 0x00], %o4, add %o3, 2)
+2: EX(lduh [%o0 + 0x00], %o4)
add %o0, 2, %o0
- EX2(sth %o4, [%o1 + 0x00])
+ EX(sth %o4, [%o1 + 0x00])
be 6f
add %o1, 2, %o1
sll %o4, 16, %o4
-4: EX(ldub [%o0 + 0x00], %o5, add %g0, 1)
- EX2(stb %o5, [%o1 + 0x00])
+4: EX(ldub [%o0 + 0x00], %o5)
+ EX(stb %o5, [%o1 + 0x00])
sll %o5, 8, %o5
or %o5, %o4, %o4
6: addcc %o4, %g7, %g7
@@ -306,9 +283,9 @@ cc_dword_align:
andcc %o0, 0x2, %g0
be 1f
andcc %o0, 0x4, %g0
- EX(lduh [%o0 + 0x00], %g4, add %g1, 0)
+ EX(lduh [%o0 + 0x00], %g4)
sub %g1, 2, %g1
- EX2(sth %g4, [%o1 + 0x00])
+ EX(sth %g4, [%o1 + 0x00])
add %o0, 2, %o0
sll %g4, 16, %g4
addcc %g4, %g7, %g7
@@ -322,9 +299,9 @@ cc_dword_align:
or %g3, %g7, %g7
1: be 3f
andcc %g1, 0xffffff80, %g0
- EX(ld [%o0 + 0x00], %g4, add %g1, 0)
+ EX(ld [%o0 + 0x00], %g4)
sub %g1, 4, %g1
- EX2(st %g4, [%o1 + 0x00])
+ EX(st %g4, [%o1 + 0x00])
add %o0, 4, %o0
addcc %g4, %g7, %g7
add %o1, 4, %o1
@@ -354,7 +331,7 @@ __csum_partial_copy_sparc_generic:
CSUMCOPY_BIGCHUNK(%o0,%o1,%g7,0x20,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
CSUMCOPY_BIGCHUNK(%o0,%o1,%g7,0x40,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
CSUMCOPY_BIGCHUNK(%o0,%o1,%g7,0x60,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
-10: EXT(5b, 10b, 20f) ! note for exception handling
+10: EXT(5b, 10b) ! note for exception handling
sub %g1, 128, %g1 ! detract from length
addx %g0, %g7, %g7 ! add in last carry bit
andcc %g1, 0xffffff80, %g0 ! more to csum?
@@ -379,7 +356,7 @@ cctbl: CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x68,%g2,%g3,%g4,%g5)
CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x28,%g2,%g3,%g4,%g5)
CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x18,%g2,%g3,%g4,%g5)
CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x08,%g2,%g3,%g4,%g5)
-12: EXT(cctbl, 12b, 22f) ! note for exception table handling
+12: EXT(cctbl, 12b) ! note for exception table handling
addx %g0, %g7, %g7
andcc %o3, 0xf, %g0 ! check for low bits set
ccte: bne cc_end_cruft ! something left, handle it out of band
@@ -390,7 +367,7 @@ ccdbl: CSUMCOPY_BIGCHUNK_ALIGNED(%o0,%o1,%g7,0x00,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o
CSUMCOPY_BIGCHUNK_ALIGNED(%o0,%o1,%g7,0x20,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
CSUMCOPY_BIGCHUNK_ALIGNED(%o0,%o1,%g7,0x40,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
CSUMCOPY_BIGCHUNK_ALIGNED(%o0,%o1,%g7,0x60,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
-11: EXT(ccdbl, 11b, 21f) ! note for exception table handling
+11: EXT(ccdbl, 11b) ! note for exception table handling
sub %g1, 128, %g1 ! detract from length
addx %g0, %g7, %g7 ! add in last carry bit
andcc %g1, 0xffffff80, %g0 ! more to csum?
@@ -407,9 +384,9 @@ ccslow: cmp %g1, 0
be,a 1f
srl %g1, 1, %g4
sub %g1, 1, %g1
- EX(ldub [%o0], %g5, add %g1, 1)
+ EX(ldub [%o0], %g5)
add %o0, 1, %o0
- EX2(stb %g5, [%o1])
+ EX(stb %g5, [%o1])
srl %g1, 1, %g4
add %o1, 1, %o1
1: cmp %g4, 0
@@ -418,34 +395,34 @@ ccslow: cmp %g1, 0
andcc %o0, 2, %g0
be,a 1f
srl %g4, 1, %g4
- EX(lduh [%o0], %o4, add %g1, 0)
+ EX(lduh [%o0], %o4)
sub %g1, 2, %g1
srl %o4, 8, %g2
sub %g4, 1, %g4
- EX2(stb %g2, [%o1])
+ EX(stb %g2, [%o1])
add %o4, %g5, %g5
- EX2(stb %o4, [%o1 + 1])
+ EX(stb %o4, [%o1 + 1])
add %o0, 2, %o0
srl %g4, 1, %g4
add %o1, 2, %o1
1: cmp %g4, 0
be,a 2f
andcc %g1, 2, %g0
- EX3(ld [%o0], %o4)
+ EX(ld [%o0], %o4)
5: srl %o4, 24, %g2
srl %o4, 16, %g3
- EX2(stb %g2, [%o1])
+ EX(stb %g2, [%o1])
srl %o4, 8, %g2
- EX2(stb %g3, [%o1 + 1])
+ EX(stb %g3, [%o1 + 1])
add %o0, 4, %o0
- EX2(stb %g2, [%o1 + 2])
+ EX(stb %g2, [%o1 + 2])
addcc %o4, %g5, %g5
- EX2(stb %o4, [%o1 + 3])
+ EX(stb %o4, [%o1 + 3])
addx %g5, %g0, %g5 ! I am now to lazy to optimize this (question it
add %o1, 4, %o1 ! is worthy). Maybe some day - with the sll/srl
subcc %g4, 1, %g4 ! tricks
bne,a 5b
- EX3(ld [%o0], %o4)
+ EX(ld [%o0], %o4)
sll %g5, 16, %g2
srl %g5, 16, %g5
srl %g2, 16, %g2
@@ -453,19 +430,19 @@ ccslow: cmp %g1, 0
add %g2, %g5, %g5
2: be,a 3f
andcc %g1, 1, %g0
- EX(lduh [%o0], %o4, and %g1, 3)
+ EX(lduh [%o0], %o4)
andcc %g1, 1, %g0
srl %o4, 8, %g2
add %o0, 2, %o0
- EX2(stb %g2, [%o1])
+ EX(stb %g2, [%o1])
add %g5, %o4, %g5
- EX2(stb %o4, [%o1 + 1])
+ EX(stb %o4, [%o1 + 1])
add %o1, 2, %o1
3: be,a 1f
sll %g5, 16, %o4
- EX(ldub [%o0], %g2, add %g0, 1)
+ EX(ldub [%o0], %g2)
sll %g2, 8, %o4
- EX2(stb %g2, [%o1])
+ EX(stb %g2, [%o1])
add %g5, %o4, %g5
sll %g5, 16, %o4
1: addcc %o4, %g5, %g5
@@ -481,113 +458,10 @@ ccslow: cmp %g1, 0
4: addcc %g7, %g5, %g7
retl
addx %g0, %g7, %o0
-__csum_partial_copy_end:
/* We do these strange calculations for the csum_*_from_user case only, ie.
* we only bother with faults on loads... */
-/* o2 = ((g2%20)&3)*8
- * o3 = g1 - (g2/20)*32 - o2 */
-20:
- cmp %g2, 20
- blu,a 1f
- and %g2, 3, %o2
- sub %g1, 32, %g1
- b 20b
- sub %g2, 20, %g2
-1:
- sll %o2, 3, %o2
- b 31f
- sub %g1, %o2, %o3
-
-/* o2 = (!(g2 & 15) ? 0 : (((g2 & 15) + 1) & ~1)*8)
- * o3 = g1 - (g2/16)*32 - o2 */
-21:
- andcc %g2, 15, %o3
- srl %g2, 4, %g2
- be,a 1f
- clr %o2
- add %o3, 1, %o3
- and %o3, 14, %o3
- sll %o3, 3, %o2
-1:
- sll %g2, 5, %g2
- sub %g1, %g2, %o3
- b 31f
- sub %o3, %o2, %o3
-
-/* o0 += (g2/10)*16 - 0x70
- * 01 += (g2/10)*16 - 0x70
- * o2 = (g2 % 10) ? 8 : 0
- * o3 += 0x70 - (g2/10)*16 - o2 */
-22:
- cmp %g2, 10
- blu,a 1f
- sub %o0, 0x70, %o0
- add %o0, 16, %o0
- add %o1, 16, %o1
- sub %o3, 16, %o3
- b 22b
- sub %g2, 10, %g2
-1:
- sub %o1, 0x70, %o1
- add %o3, 0x70, %o3
- clr %o2
- tst %g2
- bne,a 1f
- mov 8, %o2
-1:
- b 31f
- sub %o3, %o2, %o3
-96:
- and %g1, 3, %g1
- sll %g4, 2, %g4
- add %g1, %g4, %o3
-30:
-/* %o1 is dst
- * %o3 is # bytes to zero out
- * %o4 is faulting address
- * %o5 is %pc where fault occurred */
- clr %o2
-31:
-/* %o0 is src
- * %o1 is dst
- * %o2 is # of bytes to copy from src to dst
- * %o3 is # bytes to zero out
- * %o4 is faulting address
- * %o5 is %pc where fault occurred */
- save %sp, -104, %sp
- mov %i5, %o0
- mov %i7, %o1
- mov %i4, %o2
- call lookup_fault
- mov %g7, %i4
- cmp %o0, 2
- bne 1f
- add %g0, -EFAULT, %i5
- tst %i2
- be 2f
- mov %i0, %o1
- mov %i1, %o0
-5:
- call memcpy
- mov %i2, %o2
- tst %o0
- bne,a 2f
- add %i3, %i2, %i3
- add %i1, %i2, %i1
-2:
- mov %i1, %o0
-6:
- call __bzero
- mov %i3, %o1
-1:
- ld [%sp + 168], %o2 ! struct_ptr of parent
- st %i5, [%o2]
+cc_fault:
ret
- restore
-
- .section __ex_table,#alloc
- .align 4
- .word 5b,2
- .word 6b,2
+ clr %o0
diff --git a/arch/sparc/lib/csum_copy.S b/arch/sparc/lib/csum_copy.S
index 26c644ba3ecb..0c0268e77155 100644
--- a/arch/sparc/lib/csum_copy.S
+++ b/arch/sparc/lib/csum_copy.S
@@ -68,9 +68,10 @@
.globl FUNC_NAME
.type FUNC_NAME,#function
EXPORT_SYMBOL(FUNC_NAME)
-FUNC_NAME: /* %o0=src, %o1=dst, %o2=len, %o3=sum */
+FUNC_NAME: /* %o0=src, %o1=dst, %o2=len */
LOAD(prefetch, %o0 + 0x000, #n_reads)
xor %o0, %o1, %g1
+ mov 1, %o3
clr %o4
andcc %g1, 0x3, %g0
bne,pn %icc, 95f
diff --git a/arch/sparc/lib/csum_copy_from_user.S b/arch/sparc/lib/csum_copy_from_user.S
index d20b9594f0c7..b0ba8d4dd439 100644
--- a/arch/sparc/lib/csum_copy_from_user.S
+++ b/arch/sparc/lib/csum_copy_from_user.S
@@ -9,14 +9,14 @@
.section .fixup, "ax"; \
.align 4; \
99: retl; \
- mov -1, %o0; \
+ mov 0, %o0; \
.section __ex_table,"a";\
.align 4; \
.word 98b, 99b; \
.text; \
.align 4;
-#define FUNC_NAME __csum_partial_copy_from_user
+#define FUNC_NAME csum_and_copy_from_user
#define LOAD(type,addr,dest) type##a [addr] %asi, dest
#include "csum_copy.S"
diff --git a/arch/sparc/lib/csum_copy_to_user.S b/arch/sparc/lib/csum_copy_to_user.S
index d71c0c81e8ab..91ba36dbf7d2 100644
--- a/arch/sparc/lib/csum_copy_to_user.S
+++ b/arch/sparc/lib/csum_copy_to_user.S
@@ -9,14 +9,14 @@
.section .fixup,"ax"; \
.align 4; \
99: retl; \
- mov -1, %o0; \
+ mov 0, %o0; \
.section __ex_table,"a";\
.align 4; \
.word 98b, 99b; \
.text; \
.align 4;
-#define FUNC_NAME __csum_partial_copy_to_user
+#define FUNC_NAME csum_and_copy_to_user
#define STORE(type,src,addr) type##a src, [addr] %asi
#include "csum_copy.S"
diff --git a/arch/sparc/math-emu/math_32.c b/arch/sparc/math-emu/math_32.c
index 72e560ef4a09..d5beec856146 100644
--- a/arch/sparc/math-emu/math_32.c
+++ b/arch/sparc/math-emu/math_32.c
@@ -359,7 +359,7 @@ static int do_one_mathemu(u32 insn, unsigned long *pfsr, unsigned long *fregs)
*pfsr |= (6 << 14);
return 0; /* simulate invalid_fp_register exception */
}
- /* fall through */
+ fallthrough;
case 2:
if (freg & 1) { /* doublewords must have bit 5 zeroed */
*pfsr |= (6 << 14);
@@ -380,7 +380,7 @@ static int do_one_mathemu(u32 insn, unsigned long *pfsr, unsigned long *fregs)
*pfsr |= (6 << 14);
return 0; /* simulate invalid_fp_register exception */
}
- /* fall through */
+ fallthrough;
case 2:
if (freg & 1) { /* doublewords must have bit 5 zeroed */
*pfsr |= (6 << 14);
@@ -408,13 +408,13 @@ static int do_one_mathemu(u32 insn, unsigned long *pfsr, unsigned long *fregs)
*pfsr |= (6 << 14);
return 0; /* simulate invalid_fp_register exception */
}
- /* fall through */
+ fallthrough;
case 2:
if (freg & 1) { /* doublewords must have bit 5 zeroed */
*pfsr |= (6 << 14);
return 0;
}
- /* fall through */
+ fallthrough;
case 1:
rd = (void *)&fregs[freg];
break;
diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
index 8071bfd72349..40ce087dfecf 100644
--- a/arch/sparc/mm/fault_32.c
+++ b/arch/sparc/mm/fault_32.c
@@ -288,8 +288,6 @@ no_context:
if (fixup > 10) {
extern const unsigned int __memset_start[];
extern const unsigned int __memset_end[];
- extern const unsigned int __csum_partial_copy_start[];
- extern const unsigned int __csum_partial_copy_end[];
#ifdef DEBUG_EXCEPTIONS
printk("Exception: PC<%08lx> faddr<%08lx>\n",
@@ -298,9 +296,7 @@ no_context:
regs->pc, fixup, g2);
#endif
if ((regs->pc >= (unsigned long)__memset_start &&
- regs->pc < (unsigned long)__memset_end) ||
- (regs->pc >= (unsigned long)__csum_partial_copy_start &&
- regs->pc < (unsigned long)__csum_partial_copy_end)) {
+ regs->pc < (unsigned long)__memset_end)) {
regs->u_regs[UREG_I4] = address;
regs->u_regs[UREG_I5] = regs->pc;
}
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index fad6d3129904..96edf64d4fb3 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -1192,18 +1192,14 @@ int of_node_to_nid(struct device_node *dp)
static void __init add_node_ranges(void)
{
- struct memblock_region *reg;
+ phys_addr_t start, end;
unsigned long prev_max;
+ u64 i;
memblock_resized:
prev_max = memblock.memory.max;
- for_each_memblock(memory, reg) {
- unsigned long size = reg->size;
- unsigned long start, end;
-
- start = reg->base;
- end = start + size;
+ for_each_mem_range(i, &start, &end) {
while (start < end) {
unsigned long this_end;
int nid;
@@ -1211,7 +1207,7 @@ memblock_resized:
this_end = memblock_nid_range(start, end, &nid);
numadbg("Setting memblock NUMA node nid[%d] "
- "start[%lx] end[%lx]\n",
+ "start[%llx] end[%lx]\n",
nid, start, this_end);
memblock_set_node(start, this_end - start,
diff --git a/arch/sparc/net/bpf_jit_comp_32.c b/arch/sparc/net/bpf_jit_comp_32.c
index c8eabb973b86..b1dbf2fa8c0a 100644
--- a/arch/sparc/net/bpf_jit_comp_32.c
+++ b/arch/sparc/net/bpf_jit_comp_32.c
@@ -491,7 +491,7 @@ void bpf_jit_compile(struct bpf_prog *fp)
} else {
emit_loadimm(K, r_A);
}
- /* Fallthrough */
+ fallthrough;
case BPF_RET | BPF_A:
if (seen_or_pass0) {
if (i != flen - 1) {
diff --git a/arch/um/Kconfig b/arch/um/Kconfig
index eb51fec75948..d49f471b02e3 100644
--- a/arch/um/Kconfig
+++ b/arch/um/Kconfig
@@ -173,22 +173,6 @@ config PGTABLE_LEVELS
default 3 if 3_LEVEL_PGTABLES
default 2
-config SECCOMP
- def_bool y
- prompt "Enable seccomp to safely compute untrusted bytecode"
- help
- This kernel feature is useful for number crunching applications
- that may need to compute untrusted bytecode during their
- execution. By using pipes or other transports made available to
- the process as file descriptors supporting the read/write
- syscalls, it's possible to isolate those applications in
- their own address space using seccomp. Once seccomp is
- enabled via prctl(PR_SET_SECCOMP), it cannot be disabled
- and the task is only allowed to execute a few safe syscalls
- defined by each seccomp mode.
-
- If unsure, say Y.
-
config UML_TIME_TRAVEL_SUPPORT
bool
prompt "Support time-travel mode (e.g. for test execution)"
diff --git a/arch/um/kernel/dyn.lds.S b/arch/um/kernel/dyn.lds.S
index f5001481010c..dacbfabf66d8 100644
--- a/arch/um/kernel/dyn.lds.S
+++ b/arch/um/kernel/dyn.lds.S
@@ -164,8 +164,8 @@ SECTIONS
PROVIDE (end = .);
STABS_DEBUG
-
DWARF_DEBUG
+ ELF_DETAILS
DISCARDS
}
diff --git a/arch/um/kernel/signal.c b/arch/um/kernel/signal.c
index 3d57c71c532e..88cd9b5c1b74 100644
--- a/arch/um/kernel/signal.c
+++ b/arch/um/kernel/signal.c
@@ -70,7 +70,7 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
PT_REGS_SYSCALL_RET(regs) = -EINTR;
break;
}
- /* fallthrough */
+ fallthrough;
case -ERESTARTNOINTR:
PT_REGS_RESTART_SYSCALL(regs);
PT_REGS_ORIG_SYSCALL(regs) = PT_REGS_SYSCALL_NR(regs);
diff --git a/arch/um/kernel/uml.lds.S b/arch/um/kernel/uml.lds.S
index 3b6dab3d4501..45d957d7004c 100644
--- a/arch/um/kernel/uml.lds.S
+++ b/arch/um/kernel/uml.lds.S
@@ -108,8 +108,8 @@ SECTIONS
PROVIDE (end = .);
STABS_DEBUG
-
DWARF_DEBUG
+ ELF_DETAILS
DISCARDS
}
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 7101ac64bb20..f6946b81f74a 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -75,7 +75,7 @@ config X86
select ARCH_HAS_PTE_DEVMAP if X86_64
select ARCH_HAS_PTE_SPECIAL
select ARCH_HAS_UACCESS_FLUSHCACHE if X86_64
- select ARCH_HAS_UACCESS_MCSAFE if X86_64 && X86_MCE
+ select ARCH_HAS_COPY_MC if X86_64
select ARCH_HAS_SET_MEMORY
select ARCH_HAS_SET_DIRECT_MAP
select ARCH_HAS_STRICT_KERNEL_RWX
@@ -215,6 +215,8 @@ config X86
select HAVE_FUNCTION_ARG_ACCESS_API
select HAVE_STACKPROTECTOR if CC_HAS_SANE_STACKPROTECTOR
select HAVE_STACK_VALIDATION if X86_64
+ select HAVE_STATIC_CALL
+ select HAVE_STATIC_CALL_INLINE if HAVE_STACK_VALIDATION
select HAVE_RSEQ
select HAVE_SYSCALL_TRACEPOINTS
select HAVE_UNSTABLE_SCHED_CLOCK
@@ -230,6 +232,7 @@ config X86
select RTC_MC146818_LIB
select SPARSE_IRQ
select SRCU
+ select STACK_VALIDATION if HAVE_STACK_VALIDATION && (HAVE_STATIC_CALL_INLINE || RETPOLINE)
select SYSCTL_EXCEPTION_TRACE
select THREAD_INFO_IN_TASK
select USER_STACKTRACE_SUPPORT
@@ -451,7 +454,6 @@ config GOLDFISH
config RETPOLINE
bool "Avoid speculative indirect branches in kernel"
default y
- select STACK_VALIDATION if HAVE_STACK_VALIDATION
help
Compile kernel with the retpoline compiler options to guard against
kernel-to-user data leaks by avoiding speculative indirect
@@ -1521,6 +1523,7 @@ config AMD_MEM_ENCRYPT
select DYNAMIC_PHYSICAL_MASK
select ARCH_USE_MEMREMAP_PROT
select ARCH_HAS_FORCE_DMA_UNENCRYPTED
+ select INSTRUCTION_DECODER
help
Say yes to enable support for the encryption of system memory.
This requires an AMD processor that supports Secure Memory
@@ -1968,22 +1971,6 @@ config EFI_MIXED
If unsure, say N.
-config SECCOMP
- def_bool y
- prompt "Enable seccomp to safely compute untrusted bytecode"
- help
- This kernel feature is useful for number crunching applications
- that may need to compute untrusted bytecode during their
- execution. By using pipes or other transports made available to
- the process as file descriptors supporting the read/write
- syscalls, it's possible to isolate those applications in
- their own address space using seccomp. Once seccomp is
- enabled via prctl(PR_SET_SECCOMP), it cannot be disabled
- and the task is only allowed to execute a few safe syscalls
- defined by each seccomp mode.
-
- If unsure, say Y. Only embedded should say N here.
-
source "kernel/Kconfig.hz"
config KEXEC
diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
index ee1d3c5834c6..27b5e2bc6a01 100644
--- a/arch/x86/Kconfig.debug
+++ b/arch/x86/Kconfig.debug
@@ -62,7 +62,7 @@ config EARLY_PRINTK_USB_XDBC
You should normally say N here, unless you want to debug early
crashes or need a very simple printk logging facility.
-config MCSAFE_TEST
+config COPY_MC_TEST
def_bool n
config EFI_PGT_DUMP
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index 4346ffb2e39f..154259f18b8b 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -209,6 +209,10 @@ ifdef CONFIG_X86_64
LDFLAGS_vmlinux += -z max-page-size=0x200000
endif
+# We never want expected sections to be placed heuristically by the
+# linker. All sections should be explicitly named in the linker script.
+LDFLAGS_vmlinux += $(call ld-option, --orphan-handling=warn)
+
archscripts: scripts_basic
$(Q)$(MAKE) $(build)=arch/x86/tools relocs
diff --git a/arch/x86/boot/cmdline.c b/arch/x86/boot/cmdline.c
index 4ff01176c1cc..21d56ae83cdf 100644
--- a/arch/x86/boot/cmdline.c
+++ b/arch/x86/boot/cmdline.c
@@ -54,7 +54,7 @@ int __cmdline_find_option(unsigned long cmdline_ptr, const char *option, char *b
/* else */
state = st_wordcmp;
opptr = option;
- /* fall through */
+ fallthrough;
case st_wordcmp:
if (c == '=' && !*opptr) {
@@ -129,7 +129,7 @@ int __cmdline_find_option_bool(unsigned long cmdline_ptr, const char *option)
state = st_wordcmp;
opptr = option;
wstart = pos;
- /* fall through */
+ fallthrough;
case st_wordcmp:
if (!*opptr)
diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
index 3962f592633d..ee249088cbfe 100644
--- a/arch/x86/boot/compressed/Makefile
+++ b/arch/x86/boot/compressed/Makefile
@@ -29,10 +29,10 @@ targets := vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma \
vmlinux.bin.xz vmlinux.bin.lzo vmlinux.bin.lz4 vmlinux.bin.zst
KBUILD_CFLAGS := -m$(BITS) -O2
-KBUILD_CFLAGS += -fno-strict-aliasing $(call cc-option, -fPIE, -fPIC)
+KBUILD_CFLAGS += -fno-strict-aliasing -fPIE
KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
cflags-$(CONFIG_X86_32) := -march=i386
-cflags-$(CONFIG_X86_64) := -mcmodel=small
+cflags-$(CONFIG_X86_64) := -mcmodel=small -mno-red-zone
KBUILD_CFLAGS += $(cflags-y)
KBUILD_CFLAGS += -mno-mmx -mno-sse
KBUILD_CFLAGS += -ffreestanding
@@ -43,24 +43,26 @@ KBUILD_CFLAGS += -Wno-pointer-sign
KBUILD_CFLAGS += $(call cc-option,-fmacro-prefix-map=$(srctree)/=)
KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
KBUILD_CFLAGS += -D__DISABLE_EXPORTS
+# Disable relocation relaxation in case the link is not PIE.
+KBUILD_CFLAGS += $(call as-option,-Wa$(comma)-mrelax-relocations=no)
+KBUILD_CFLAGS += -include $(srctree)/include/linux/hidden.h
+
+# sev-es.c indirectly inludes inat-table.h which is generated during
+# compilation and stored in $(objtree). Add the directory to the includes so
+# that the compiler finds it even with out-of-tree builds (make O=/some/path).
+CFLAGS_sev-es.o += -I$(objtree)/arch/x86/lib/
KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
GCOV_PROFILE := n
UBSAN_SANITIZE :=n
KBUILD_LDFLAGS := -m elf_$(UTS_MACHINE)
+KBUILD_LDFLAGS += $(call ld-option,--no-ld-generated-unwind-info)
# Compressed kernel should be built as PIE since it may be loaded at any
# address by the bootloader.
-ifeq ($(CONFIG_X86_32),y)
-KBUILD_LDFLAGS += $(call ld-option, -pie) $(call ld-option, --no-dynamic-linker)
-else
-# To build 64-bit compressed kernel as PIE, we disable relocation
-# overflow check to avoid relocation overflow error with a new linker
-# command-line option, -z noreloc-overflow.
-KBUILD_LDFLAGS += $(shell $(LD) --help 2>&1 | grep -q "\-z noreloc-overflow" \
- && echo "-z noreloc-overflow -pie --no-dynamic-linker")
-endif
-LDFLAGS_vmlinux := -T
+LDFLAGS_vmlinux := -pie $(call ld-option, --no-dynamic-linker)
+LDFLAGS_vmlinux += $(call ld-option, --orphan-handling=warn)
+LDFLAGS_vmlinux += -T
hostprogs := mkpiggy
HOST_EXTRACFLAGS += -I$(srctree)/tools/include
@@ -84,9 +86,11 @@ vmlinux-objs-y := $(obj)/vmlinux.lds $(obj)/kernel_info.o $(obj)/head_$(BITS).o
vmlinux-objs-$(CONFIG_EARLY_PRINTK) += $(obj)/early_serial_console.o
vmlinux-objs-$(CONFIG_RANDOMIZE_BASE) += $(obj)/kaslr.o
ifdef CONFIG_X86_64
- vmlinux-objs-$(CONFIG_RANDOMIZE_BASE) += $(obj)/kaslr_64.o
+ vmlinux-objs-y += $(obj)/ident_map_64.o
+ vmlinux-objs-y += $(obj)/idt_64.o $(obj)/idt_handlers_64.o
vmlinux-objs-y += $(obj)/mem_encrypt.o
vmlinux-objs-y += $(obj)/pgtable_64.o
+ vmlinux-objs-$(CONFIG_AMD_MEM_ENCRYPT) += $(obj)/sev-es.o
endif
vmlinux-objs-$(CONFIG_ACPI) += $(obj)/acpi.o
@@ -94,30 +98,8 @@ vmlinux-objs-$(CONFIG_ACPI) += $(obj)/acpi.o
vmlinux-objs-$(CONFIG_EFI_MIXED) += $(obj)/efi_thunk_$(BITS).o
efi-obj-$(CONFIG_EFI_STUB) = $(objtree)/drivers/firmware/efi/libstub/lib.a
-# The compressed kernel is built with -fPIC/-fPIE so that a boot loader
-# can place it anywhere in memory and it will still run. However, since
-# it is executed as-is without any ELF relocation processing performed
-# (and has already had all relocation sections stripped from the binary),
-# none of the code can use data relocations (e.g. static assignments of
-# pointer values), since they will be meaningless at runtime. This check
-# will refuse to link the vmlinux if any of these relocations are found.
-quiet_cmd_check_data_rel = DATAREL $@
-define cmd_check_data_rel
- for obj in $(filter %.o,$^); do \
- $(READELF) -S $$obj | grep -qF .rel.local && { \
- echo "error: $$obj has data relocations!" >&2; \
- exit 1; \
- } || true; \
- done
-endef
-
-# We need to run two commands under "if_changed", so merge them into a
-# single invocation.
-quiet_cmd_check-and-link-vmlinux = LD $@
- cmd_check-and-link-vmlinux = $(cmd_check_data_rel); $(cmd_ld)
-
$(obj)/vmlinux: $(vmlinux-objs-y) $(efi-obj-y) FORCE
- $(call if_changed,check-and-link-vmlinux)
+ $(call if_changed,ld)
OBJCOPYFLAGS_vmlinux.bin := -R .comment -S
$(obj)/vmlinux.bin: vmlinux FORCE
diff --git a/arch/x86/boot/compressed/cpuflags.c b/arch/x86/boot/compressed/cpuflags.c
index 6448a8196d32..0cc1323896d1 100644
--- a/arch/x86/boot/compressed/cpuflags.c
+++ b/arch/x86/boot/compressed/cpuflags.c
@@ -1,6 +1,4 @@
// SPDX-License-Identifier: GPL-2.0
-#ifdef CONFIG_RANDOMIZE_BASE
-
#include "../cpuflags.c"
bool has_cpuflag(int flag)
@@ -9,5 +7,3 @@ bool has_cpuflag(int flag)
return test_bit(flag, cpu.flags);
}
-
-#endif
diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
index 03557f2174bf..659fad53ca82 100644
--- a/arch/x86/boot/compressed/head_32.S
+++ b/arch/x86/boot/compressed/head_32.S
@@ -33,32 +33,13 @@
#include <asm/bootparam.h>
/*
- * The 32-bit x86 assembler in binutils 2.26 will generate R_386_GOT32X
- * relocation to get the symbol address in PIC. When the compressed x86
- * kernel isn't built as PIC, the linker optimizes R_386_GOT32X
- * relocations to their fixed symbol addresses. However, when the
- * compressed x86 kernel is loaded at a different address, it leads
- * to the following load failure:
- *
- * Failed to allocate space for phdrs
- *
- * during the decompression stage.
- *
- * If the compressed x86 kernel is relocatable at run-time, it should be
- * compiled with -fPIE, instead of -fPIC, if possible and should be built as
- * Position Independent Executable (PIE) so that linker won't optimize
- * R_386_GOT32X relocation to its fixed symbol address. Older
- * linkers generate R_386_32 relocations against locally defined symbols,
- * _bss, _ebss, _got, _egot and _end, in PIE. It isn't wrong, just less
- * optimal than R_386_RELATIVE. But the x86 kernel fails to properly handle
- * R_386_32 relocations when relocating the kernel. To generate
- * R_386_RELATIVE relocations, we mark _bss, _ebss, _got, _egot and _end as
- * hidden:
+ * These symbols needed to be marked as .hidden to prevent the BFD linker from
+ * generating R_386_32 (rather than R_386_RELATIVE) relocations for them when
+ * the 32-bit compressed kernel is linked as PIE. This is no longer necessary,
+ * but it doesn't hurt to keep them .hidden.
*/
.hidden _bss
.hidden _ebss
- .hidden _got
- .hidden _egot
.hidden _end
__HEAD
@@ -77,10 +58,10 @@ SYM_FUNC_START(startup_32)
leal (BP_scratch+4)(%esi), %esp
call 1f
1: popl %edx
- subl $1b, %edx
+ addl $_GLOBAL_OFFSET_TABLE_+(.-1b), %edx
/* Load new GDT */
- leal gdt(%edx), %eax
+ leal gdt@GOTOFF(%edx), %eax
movl %eax, 2(%eax)
lgdt (%eax)
@@ -93,14 +74,16 @@ SYM_FUNC_START(startup_32)
movl %eax, %ss
/*
- * %edx contains the address we are loaded at by the boot loader and %ebx
- * contains the address where we should move the kernel image temporarily
- * for safe in-place decompression. %ebp contains the address that the kernel
- * will be decompressed to.
+ * %edx contains the address we are loaded at by the boot loader (plus the
+ * offset to the GOT). The below code calculates %ebx to be the address where
+ * we should move the kernel image temporarily for safe in-place decompression
+ * (again, plus the offset to the GOT).
+ *
+ * %ebp is calculated to be the address that the kernel will be decompressed to.
*/
#ifdef CONFIG_RELOCATABLE
- movl %edx, %ebx
+ leal startup_32@GOTOFF(%edx), %ebx
#ifdef CONFIG_EFI_STUB
/*
@@ -111,7 +94,7 @@ SYM_FUNC_START(startup_32)
* image_offset = startup_32 - image_base
* Otherwise image_offset will be zero and has no effect on the calculations.
*/
- subl image_offset(%edx), %ebx
+ subl image_offset@GOTOFF(%edx), %ebx
#endif
movl BP_kernel_alignment(%esi), %eax
@@ -128,10 +111,10 @@ SYM_FUNC_START(startup_32)
movl %ebx, %ebp // Save the output address for later
/* Target address to relocate to for decompression */
addl BP_init_size(%esi), %ebx
- subl $_end, %ebx
+ subl $_end@GOTOFF, %ebx
/* Set up the stack */
- leal boot_stack_end(%ebx), %esp
+ leal boot_stack_end@GOTOFF(%ebx), %esp
/* Zero EFLAGS */
pushl $0
@@ -142,8 +125,8 @@ SYM_FUNC_START(startup_32)
* where decompression in place becomes safe.
*/
pushl %esi
- leal (_bss-4)(%edx), %esi
- leal (_bss-4)(%ebx), %edi
+ leal (_bss@GOTOFF-4)(%edx), %esi
+ leal (_bss@GOTOFF-4)(%ebx), %edi
movl $(_bss - startup_32), %ecx
shrl $2, %ecx
std
@@ -156,14 +139,14 @@ SYM_FUNC_START(startup_32)
* during extract_kernel below. To avoid any issues, repoint the GDTR
* to the new copy of the GDT.
*/
- leal gdt(%ebx), %eax
+ leal gdt@GOTOFF(%ebx), %eax
movl %eax, 2(%eax)
lgdt (%eax)
/*
* Jump to the relocated address.
*/
- leal .Lrelocated(%ebx), %eax
+ leal .Lrelocated@GOTOFF(%ebx), %eax
jmp *%eax
SYM_FUNC_END(startup_32)
@@ -173,7 +156,7 @@ SYM_FUNC_START_ALIAS(efi_stub_entry)
add $0x4, %esp
movl 8(%esp), %esi /* save boot_params pointer */
call efi_main
- leal startup_32(%eax), %eax
+ /* efi_main returns the possibly relocated address of startup_32 */
jmp *%eax
SYM_FUNC_END(efi32_stub_entry)
SYM_FUNC_END_ALIAS(efi_stub_entry)
@@ -186,40 +169,26 @@ SYM_FUNC_START_LOCAL_NOALIGN(.Lrelocated)
* Clear BSS (stack is currently empty)
*/
xorl %eax, %eax
- leal _bss(%ebx), %edi
- leal _ebss(%ebx), %ecx
+ leal _bss@GOTOFF(%ebx), %edi
+ leal _ebss@GOTOFF(%ebx), %ecx
subl %edi, %ecx
shrl $2, %ecx
rep stosl
/*
- * Adjust our own GOT
- */
- leal _got(%ebx), %edx
- leal _egot(%ebx), %ecx
-1:
- cmpl %ecx, %edx
- jae 2f
- addl %ebx, (%edx)
- addl $4, %edx
- jmp 1b
-2:
-
-/*
* Do the extraction, and jump to the new kernel..
*/
- /* push arguments for extract_kernel: */
- pushl $z_output_len /* decompressed length, end of relocs */
-
- pushl %ebp /* output address */
-
- pushl $z_input_len /* input_len */
- leal input_data(%ebx), %eax
- pushl %eax /* input_data */
- leal boot_heap(%ebx), %eax
- pushl %eax /* heap area */
- pushl %esi /* real mode pointer */
- call extract_kernel /* returns kernel location in %eax */
+ /* push arguments for extract_kernel: */
+
+ pushl output_len@GOTOFF(%ebx) /* decompressed length, end of relocs */
+ pushl %ebp /* output address */
+ pushl input_len@GOTOFF(%ebx) /* input_len */
+ leal input_data@GOTOFF(%ebx), %eax
+ pushl %eax /* input_data */
+ leal boot_heap@GOTOFF(%ebx), %eax
+ pushl %eax /* heap area */
+ pushl %esi /* real mode pointer */
+ call extract_kernel /* returns kernel location in %eax */
addl $24, %esp
/*
diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
index 97d37f0a34f5..1c80f1738fd9 100644
--- a/arch/x86/boot/compressed/head_64.S
+++ b/arch/x86/boot/compressed/head_64.S
@@ -33,6 +33,7 @@
#include <asm/processor-flags.h>
#include <asm/asm-offsets.h>
#include <asm/bootparam.h>
+#include <asm/desc_defs.h>
#include "pgtable.h"
/*
@@ -40,11 +41,35 @@
*/
.hidden _bss
.hidden _ebss
- .hidden _got
- .hidden _egot
.hidden _end
__HEAD
+
+/*
+ * This macro gives the relative virtual address of X, i.e. the offset of X
+ * from startup_32. This is the same as the link-time virtual address of X,
+ * since startup_32 is at 0, but defining it this way tells the
+ * assembler/linker that we do not want the actual run-time address of X. This
+ * prevents the linker from trying to create unwanted run-time relocation
+ * entries for the reference when the compressed kernel is linked as PIE.
+ *
+ * A reference X(%reg) will result in the link-time VA of X being stored with
+ * the instruction, and a run-time R_X86_64_RELATIVE relocation entry that
+ * adds the 64-bit base address where the kernel is loaded.
+ *
+ * Replacing it with (X-startup_32)(%reg) results in the offset being stored,
+ * and no run-time relocation.
+ *
+ * The macro should be used as a displacement with a base register containing
+ * the run-time address of startup_32 [i.e. rva(X)(%reg)], or as an immediate
+ * [$ rva(X)].
+ *
+ * This macro can only be used from within the .head.text section, since the
+ * expression requires startup_32 to be in the same section as the code being
+ * assembled.
+ */
+#define rva(X) ((X) - startup_32)
+
.code32
SYM_FUNC_START(startup_32)
/*
@@ -67,10 +92,10 @@ SYM_FUNC_START(startup_32)
leal (BP_scratch+4)(%esi), %esp
call 1f
1: popl %ebp
- subl $1b, %ebp
+ subl $ rva(1b), %ebp
/* Load new GDT with the 64bit segments using 32bit descriptor */
- leal gdt(%ebp), %eax
+ leal rva(gdt)(%ebp), %eax
movl %eax, 2(%eax)
lgdt (%eax)
@@ -83,7 +108,7 @@ SYM_FUNC_START(startup_32)
movl %eax, %ss
/* setup a stack and make sure cpu supports long mode. */
- leal boot_stack_end(%ebp), %esp
+ leal rva(boot_stack_end)(%ebp), %esp
call verify_cpu
testl %eax, %eax
@@ -110,7 +135,7 @@ SYM_FUNC_START(startup_32)
* image_offset = startup_32 - image_base
* Otherwise image_offset will be zero and has no effect on the calculations.
*/
- subl image_offset(%ebp), %ebx
+ subl rva(image_offset)(%ebp), %ebx
#endif
movl BP_kernel_alignment(%esi), %eax
@@ -126,7 +151,7 @@ SYM_FUNC_START(startup_32)
/* Target address to relocate to for decompression */
addl BP_init_size(%esi), %ebx
- subl $_end, %ebx
+ subl $ rva(_end), %ebx
/*
* Prepare for entering 64 bit mode
@@ -154,19 +179,19 @@ SYM_FUNC_START(startup_32)
1:
/* Initialize Page tables to 0 */
- leal pgtable(%ebx), %edi
+ leal rva(pgtable)(%ebx), %edi
xorl %eax, %eax
movl $(BOOT_INIT_PGT_SIZE/4), %ecx
rep stosl
/* Build Level 4 */
- leal pgtable + 0(%ebx), %edi
+ leal rva(pgtable + 0)(%ebx), %edi
leal 0x1007 (%edi), %eax
movl %eax, 0(%edi)
addl %edx, 4(%edi)
/* Build Level 3 */
- leal pgtable + 0x1000(%ebx), %edi
+ leal rva(pgtable + 0x1000)(%ebx), %edi
leal 0x1007(%edi), %eax
movl $4, %ecx
1: movl %eax, 0x00(%edi)
@@ -177,7 +202,7 @@ SYM_FUNC_START(startup_32)
jnz 1b
/* Build Level 2 */
- leal pgtable + 0x2000(%ebx), %edi
+ leal rva(pgtable + 0x2000)(%ebx), %edi
movl $0x00000183, %eax
movl $2048, %ecx
1: movl %eax, 0(%edi)
@@ -188,7 +213,7 @@ SYM_FUNC_START(startup_32)
jnz 1b
/* Enable the boot page tables */
- leal pgtable(%ebx), %eax
+ leal rva(pgtable)(%ebx), %eax
movl %eax, %cr3
/* Enable Long mode in EFER (Extended Feature Enable Register) */
@@ -213,14 +238,14 @@ SYM_FUNC_START(startup_32)
* We place all of the values on our mini stack so lret can
* used to perform that far jump.
*/
- leal startup_64(%ebp), %eax
+ leal rva(startup_64)(%ebp), %eax
#ifdef CONFIG_EFI_MIXED
- movl efi32_boot_args(%ebp), %edi
+ movl rva(efi32_boot_args)(%ebp), %edi
cmp $0, %edi
jz 1f
- leal efi64_stub_entry(%ebp), %eax
- movl efi32_boot_args+4(%ebp), %esi
- movl efi32_boot_args+8(%ebp), %edx // saved bootparams pointer
+ leal rva(efi64_stub_entry)(%ebp), %eax
+ movl rva(efi32_boot_args+4)(%ebp), %esi
+ movl rva(efi32_boot_args+8)(%ebp), %edx // saved bootparams pointer
cmpl $0, %edx
jnz 1f
/*
@@ -231,7 +256,7 @@ SYM_FUNC_START(startup_32)
* the correct stack alignment for entry.
*/
subl $40, %esp
- leal efi_pe_entry(%ebp), %eax
+ leal rva(efi_pe_entry)(%ebp), %eax
movl %edi, %ecx // MS calling convention
movl %esi, %edx
1:
@@ -257,18 +282,18 @@ SYM_FUNC_START(efi32_stub_entry)
call 1f
1: pop %ebp
- subl $1b, %ebp
+ subl $ rva(1b), %ebp
- movl %esi, efi32_boot_args+8(%ebp)
+ movl %esi, rva(efi32_boot_args+8)(%ebp)
SYM_INNER_LABEL(efi32_pe_stub_entry, SYM_L_LOCAL)
- movl %ecx, efi32_boot_args(%ebp)
- movl %edx, efi32_boot_args+4(%ebp)
- movb $0, efi_is64(%ebp)
+ movl %ecx, rva(efi32_boot_args)(%ebp)
+ movl %edx, rva(efi32_boot_args+4)(%ebp)
+ movb $0, rva(efi_is64)(%ebp)
/* Save firmware GDTR and code/data selectors */
- sgdtl efi32_boot_gdt(%ebp)
- movw %cs, efi32_boot_cs(%ebp)
- movw %ds, efi32_boot_ds(%ebp)
+ sgdtl rva(efi32_boot_gdt)(%ebp)
+ movw %cs, rva(efi32_boot_cs)(%ebp)
+ movw %ds, rva(efi32_boot_ds)(%ebp)
/* Disable paging */
movl %cr0, %eax
@@ -347,30 +372,11 @@ SYM_CODE_START(startup_64)
/* Target address to relocate to for decompression */
movl BP_init_size(%rsi), %ebx
- subl $_end, %ebx
+ subl $ rva(_end), %ebx
addq %rbp, %rbx
/* Set up the stack */
- leaq boot_stack_end(%rbx), %rsp
-
- /*
- * paging_prepare() and cleanup_trampoline() below can have GOT
- * references. Adjust the table with address we are running at.
- *
- * Zero RAX for adjust_got: the GOT was not adjusted before;
- * there's no adjustment to undo.
- */
- xorq %rax, %rax
-
- /*
- * Calculate the address the binary is loaded at and use it as
- * a GOT adjustment.
- */
- call 1f
-1: popq %rdi
- subq $1b, %rdi
-
- call .Ladjust_got
+ leaq rva(boot_stack_end)(%rbx), %rsp
/*
* At this point we are in long mode with 4-level paging enabled,
@@ -410,6 +416,10 @@ SYM_CODE_START(startup_64)
.Lon_kernel_cs:
+ pushq %rsi
+ call load_stage1_idt
+ popq %rsi
+
/*
* paging_prepare() sets up the trampoline and checks if we need to
* enable 5-level paging.
@@ -444,7 +454,7 @@ SYM_CODE_START(startup_64)
lretq
trampoline_return:
/* Restore the stack, the 32-bit trampoline uses its own stack */
- leaq boot_stack_end(%rbx), %rsp
+ leaq rva(boot_stack_end)(%rbx), %rsp
/*
* cleanup_trampoline() would restore trampoline memory.
@@ -456,7 +466,7 @@ trampoline_return:
* this function call.
*/
pushq %rsi
- leaq top_pgtable(%rbx), %rdi
+ leaq rva(top_pgtable)(%rbx), %rdi
call cleanup_trampoline
popq %rsi
@@ -464,30 +474,15 @@ trampoline_return:
pushq $0
popfq
- /*
- * Previously we've adjusted the GOT with address the binary was
- * loaded at. Now we need to re-adjust for relocation address.
- *
- * Calculate the address the binary is loaded at, so that we can
- * undo the previous GOT adjustment.
- */
- call 1f
-1: popq %rax
- subq $1b, %rax
-
- /* The new adjustment is the relocation address */
- movq %rbx, %rdi
- call .Ladjust_got
-
/*
* Copy the compressed kernel to the end of our buffer
* where decompression in place becomes safe.
*/
pushq %rsi
leaq (_bss-8)(%rip), %rsi
- leaq (_bss-8)(%rbx), %rdi
- movq $_bss /* - $startup_32 */, %rcx
- shrq $3, %rcx
+ leaq rva(_bss-8)(%rbx), %rdi
+ movl $(_bss - startup_32), %ecx
+ shrl $3, %ecx
std
rep movsq
cld
@@ -498,15 +493,15 @@ trampoline_return:
* during extract_kernel below. To avoid any issues, repoint the GDTR
* to the new copy of the GDT.
*/
- leaq gdt64(%rbx), %rax
- leaq gdt(%rbx), %rdx
+ leaq rva(gdt64)(%rbx), %rax
+ leaq rva(gdt)(%rbx), %rdx
movq %rdx, 2(%rax)
lgdt (%rax)
/*
* Jump to the relocated address.
*/
- leaq .Lrelocated(%rbx), %rax
+ leaq rva(.Lrelocated)(%rbx), %rax
jmp *%rax
SYM_CODE_END(startup_64)
@@ -518,7 +513,7 @@ SYM_FUNC_START_ALIAS(efi_stub_entry)
movq %rdx, %rbx /* save boot_params pointer */
call efi_main
movq %rbx,%rsi
- leaq startup_64(%rax), %rax
+ leaq rva(startup_64)(%rax), %rax
jmp *%rax
SYM_FUNC_END(efi64_stub_entry)
SYM_FUNC_END_ALIAS(efi_stub_entry)
@@ -538,15 +533,30 @@ SYM_FUNC_START_LOCAL_NOALIGN(.Lrelocated)
rep stosq
/*
+ * If running as an SEV guest, the encryption mask is required in the
+ * page-table setup code below. When the guest also has SEV-ES enabled
+ * set_sev_encryption_mask() will cause #VC exceptions, but the stage2
+ * handler can't map its GHCB because the page-table is not set up yet.
+ * So set up the encryption mask here while still on the stage1 #VC
+ * handler. Then load stage2 IDT and switch to the kernel's own
+ * page-table.
+ */
+ pushq %rsi
+ call set_sev_encryption_mask
+ call load_stage2_idt
+ call initialize_identity_maps
+ popq %rsi
+
+/*
* Do the extraction, and jump to the new kernel..
*/
pushq %rsi /* Save the real mode argument */
movq %rsi, %rdi /* real mode address */
leaq boot_heap(%rip), %rsi /* malloc area for uncompression */
leaq input_data(%rip), %rdx /* input_data */
- movl $z_input_len, %ecx /* input_len */
+ movl input_len(%rip), %ecx /* input_len */
movq %rbp, %r8 /* output target address */
- movl $z_output_len, %r9d /* decompressed length, end of relocs */
+ movl output_len(%rip), %r9d /* decompressed length, end of relocs */
call extract_kernel /* returns kernel location in %rax */
popq %rsi
@@ -556,27 +566,6 @@ SYM_FUNC_START_LOCAL_NOALIGN(.Lrelocated)
jmp *%rax
SYM_FUNC_END(.Lrelocated)
-/*
- * Adjust the global offset table
- *
- * RAX is the previous adjustment of the table to undo (use 0 if it's the
- * first time we touch GOT).
- * RDI is the new adjustment to apply.
- */
-.Ladjust_got:
- /* Walk through the GOT adding the address to the entries */
- leaq _got(%rip), %rdx
- leaq _egot(%rip), %rcx
-1:
- cmpq %rcx, %rdx
- jae 2f
- subq %rax, (%rdx) /* Undo previous adjustment */
- addq %rdi, (%rdx) /* Apply the new adjustment */
- addq $8, %rdx
- jmp 1b
-2:
- ret
-
.code32
/*
* This is the 32-bit trampoline that will be copied over to low memory.
@@ -690,10 +679,21 @@ SYM_DATA_START_LOCAL(gdt)
.quad 0x0000000000000000 /* TS continued */
SYM_DATA_END_LABEL(gdt, SYM_L_LOCAL, gdt_end)
+SYM_DATA_START(boot_idt_desc)
+ .word boot_idt_end - boot_idt - 1
+ .quad 0
+SYM_DATA_END(boot_idt_desc)
+ .balign 8
+SYM_DATA_START(boot_idt)
+ .rept BOOT_IDT_ENTRIES
+ .quad 0
+ .quad 0
+ .endr
+SYM_DATA_END_LABEL(boot_idt, SYM_L_GLOBAL, boot_idt_end)
+
#ifdef CONFIG_EFI_STUB
SYM_DATA(image_offset, .long 0)
#endif
-
#ifdef CONFIG_EFI_MIXED
SYM_DATA_LOCAL(efi32_boot_args, .long 0, 0, 0)
SYM_DATA(efi_is64, .byte 1)
@@ -702,7 +702,7 @@ SYM_DATA(efi_is64, .byte 1)
#define BS32_handle_protocol 88 // offsetof(efi_boot_services_32_t, handle_protocol)
#define LI32_image_base 32 // offsetof(efi_loaded_image_32_t, image_base)
- .text
+ __HEAD
.code32
SYM_FUNC_START(efi32_pe_entry)
/*
@@ -724,12 +724,12 @@ SYM_FUNC_START(efi32_pe_entry)
call 1f
1: pop %ebx
- subl $1b, %ebx
+ subl $ rva(1b), %ebx
/* Get the loaded image protocol pointer from the image handle */
leal -4(%ebp), %eax
pushl %eax // &loaded_image
- leal loaded_image_proto(%ebx), %eax
+ leal rva(loaded_image_proto)(%ebx), %eax
pushl %eax // pass the GUID address
pushl 8(%ebp) // pass the image handle
@@ -764,7 +764,7 @@ SYM_FUNC_START(efi32_pe_entry)
* use it before we get to the 64-bit efi_pe_entry() in C code.
*/
subl %esi, %ebx
- movl %ebx, image_offset(%ebp) // save image_offset
+ movl %ebx, rva(image_offset)(%ebp) // save image_offset
jmp efi32_pe_stub_entry
2: popl %edi // restore callee-save registers
diff --git a/arch/x86/boot/compressed/ident_map_64.c b/arch/x86/boot/compressed/ident_map_64.c
new file mode 100644
index 000000000000..063a60edcf99
--- /dev/null
+++ b/arch/x86/boot/compressed/ident_map_64.c
@@ -0,0 +1,349 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * This code is used on x86_64 to create page table identity mappings on
+ * demand by building up a new set of page tables (or appending to the
+ * existing ones), and then switching over to them when ready.
+ *
+ * Copyright (C) 2015-2016 Yinghai Lu
+ * Copyright (C) 2016 Kees Cook
+ */
+
+/*
+ * Since we're dealing with identity mappings, physical and virtual
+ * addresses are the same, so override these defines which are ultimately
+ * used by the headers in misc.h.
+ */
+#define __pa(x) ((unsigned long)(x))
+#define __va(x) ((void *)((unsigned long)(x)))
+
+/* No PAGE_TABLE_ISOLATION support needed either: */
+#undef CONFIG_PAGE_TABLE_ISOLATION
+
+#include "error.h"
+#include "misc.h"
+
+/* These actually do the work of building the kernel identity maps. */
+#include <linux/pgtable.h>
+#include <asm/cmpxchg.h>
+#include <asm/trap_pf.h>
+#include <asm/trapnr.h>
+#include <asm/init.h>
+/* Use the static base for this part of the boot process */
+#undef __PAGE_OFFSET
+#define __PAGE_OFFSET __PAGE_OFFSET_BASE
+#include "../../mm/ident_map.c"
+
+#ifdef CONFIG_X86_5LEVEL
+unsigned int __pgtable_l5_enabled;
+unsigned int pgdir_shift = 39;
+unsigned int ptrs_per_p4d = 1;
+#endif
+
+/* Used by PAGE_KERN* macros: */
+pteval_t __default_kernel_pte_mask __read_mostly = ~0;
+
+/* Used to track our page table allocation area. */
+struct alloc_pgt_data {
+ unsigned char *pgt_buf;
+ unsigned long pgt_buf_size;
+ unsigned long pgt_buf_offset;
+};
+
+/*
+ * Allocates space for a page table entry, using struct alloc_pgt_data
+ * above. Besides the local callers, this is used as the allocation
+ * callback in mapping_info below.
+ */
+static void *alloc_pgt_page(void *context)
+{
+ struct alloc_pgt_data *pages = (struct alloc_pgt_data *)context;
+ unsigned char *entry;
+
+ /* Validate there is space available for a new page. */
+ if (pages->pgt_buf_offset >= pages->pgt_buf_size) {
+ debug_putstr("out of pgt_buf in " __FILE__ "!?\n");
+ debug_putaddr(pages->pgt_buf_offset);
+ debug_putaddr(pages->pgt_buf_size);
+ return NULL;
+ }
+
+ entry = pages->pgt_buf + pages->pgt_buf_offset;
+ pages->pgt_buf_offset += PAGE_SIZE;
+
+ return entry;
+}
+
+/* Used to track our allocated page tables. */
+static struct alloc_pgt_data pgt_data;
+
+/* The top level page table entry pointer. */
+static unsigned long top_level_pgt;
+
+phys_addr_t physical_mask = (1ULL << __PHYSICAL_MASK_SHIFT) - 1;
+
+/*
+ * Mapping information structure passed to kernel_ident_mapping_init().
+ * Due to relocation, pointers must be assigned at run time not build time.
+ */
+static struct x86_mapping_info mapping_info;
+
+/*
+ * Adds the specified range to the identity mappings.
+ */
+static void add_identity_map(unsigned long start, unsigned long end)
+{
+ int ret;
+
+ /* Align boundary to 2M. */
+ start = round_down(start, PMD_SIZE);
+ end = round_up(end, PMD_SIZE);
+ if (start >= end)
+ return;
+
+ /* Build the mapping. */
+ ret = kernel_ident_mapping_init(&mapping_info, (pgd_t *)top_level_pgt, start, end);
+ if (ret)
+ error("Error: kernel_ident_mapping_init() failed\n");
+}
+
+/* Locates and clears a region for a new top level page table. */
+void initialize_identity_maps(void)
+{
+ /* Exclude the encryption mask from __PHYSICAL_MASK */
+ physical_mask &= ~sme_me_mask;
+
+ /* Init mapping_info with run-time function/buffer pointers. */
+ mapping_info.alloc_pgt_page = alloc_pgt_page;
+ mapping_info.context = &pgt_data;
+ mapping_info.page_flag = __PAGE_KERNEL_LARGE_EXEC | sme_me_mask;
+ mapping_info.kernpg_flag = _KERNPG_TABLE;
+
+ /*
+ * It should be impossible for this not to already be true,
+ * but since calling this a second time would rewind the other
+ * counters, let's just make sure this is reset too.
+ */
+ pgt_data.pgt_buf_offset = 0;
+
+ /*
+ * If we came here via startup_32(), cr3 will be _pgtable already
+ * and we must append to the existing area instead of entirely
+ * overwriting it.
+ *
+ * With 5-level paging, we use '_pgtable' to allocate the p4d page table,
+ * the top-level page table is allocated separately.
+ *
+ * p4d_offset(top_level_pgt, 0) would cover both the 4- and 5-level
+ * cases. On 4-level paging it's equal to 'top_level_pgt'.
+ */
+ top_level_pgt = read_cr3_pa();
+ if (p4d_offset((pgd_t *)top_level_pgt, 0) == (p4d_t *)_pgtable) {
+ pgt_data.pgt_buf = _pgtable + BOOT_INIT_PGT_SIZE;
+ pgt_data.pgt_buf_size = BOOT_PGT_SIZE - BOOT_INIT_PGT_SIZE;
+ memset(pgt_data.pgt_buf, 0, pgt_data.pgt_buf_size);
+ } else {
+ pgt_data.pgt_buf = _pgtable;
+ pgt_data.pgt_buf_size = BOOT_PGT_SIZE;
+ memset(pgt_data.pgt_buf, 0, pgt_data.pgt_buf_size);
+ top_level_pgt = (unsigned long)alloc_pgt_page(&pgt_data);
+ }
+
+ /*
+ * New page-table is set up - map the kernel image and load it
+ * into cr3.
+ */
+ add_identity_map((unsigned long)_head, (unsigned long)_end);
+ write_cr3(top_level_pgt);
+}
+
+/*
+ * This switches the page tables to the new level4 that has been built
+ * via calls to add_identity_map() above. If booted via startup_32(),
+ * this is effectively a no-op.
+ */
+void finalize_identity_maps(void)
+{
+ write_cr3(top_level_pgt);
+}
+
+static pte_t *split_large_pmd(struct x86_mapping_info *info,
+ pmd_t *pmdp, unsigned long __address)
+{
+ unsigned long page_flags;
+ unsigned long address;
+ pte_t *pte;
+ pmd_t pmd;
+ int i;
+
+ pte = (pte_t *)info->alloc_pgt_page(info->context);
+ if (!pte)
+ return NULL;
+
+ address = __address & PMD_MASK;
+ /* No large page - clear PSE flag */
+ page_flags = info->page_flag & ~_PAGE_PSE;
+
+ /* Populate the PTEs */
+ for (i = 0; i < PTRS_PER_PMD; i++) {
+ set_pte(&pte[i], __pte(address | page_flags));
+ address += PAGE_SIZE;
+ }
+
+ /*
+ * Ideally we need to clear the large PMD first and do a TLB
+ * flush before we write the new PMD. But the 2M range of the
+ * PMD might contain the code we execute and/or the stack
+ * we are on, so we can't do that. But that should be safe here
+ * because we are going from large to small mappings and we are
+ * also the only user of the page-table, so there is no chance
+ * of a TLB multihit.
+ */
+ pmd = __pmd((unsigned long)pte | info->kernpg_flag);
+ set_pmd(pmdp, pmd);
+ /* Flush TLB to establish the new PMD */
+ write_cr3(top_level_pgt);
+
+ return pte + pte_index(__address);
+}
+
+static void clflush_page(unsigned long address)
+{
+ unsigned int flush_size;
+ char *cl, *start, *end;
+
+ /*
+ * Hardcode cl-size to 64 - CPUID can't be used here because that might
+ * cause another #VC exception and the GHCB is not ready to use yet.
+ */
+ flush_size = 64;
+ start = (char *)(address & PAGE_MASK);
+ end = start + PAGE_SIZE;
+
+ /*
+ * First make sure there are no pending writes on the cache-lines to
+ * flush.
+ */
+ asm volatile("mfence" : : : "memory");
+
+ for (cl = start; cl != end; cl += flush_size)
+ clflush(cl);
+}
+
+static int set_clr_page_flags(struct x86_mapping_info *info,
+ unsigned long address,
+ pteval_t set, pteval_t clr)
+{
+ pgd_t *pgdp = (pgd_t *)top_level_pgt;
+ p4d_t *p4dp;
+ pud_t *pudp;
+ pmd_t *pmdp;
+ pte_t *ptep, pte;
+
+ /*
+ * First make sure there is a PMD mapping for 'address'.
+ * It should already exist, but keep things generic.
+ *
+ * To map the page just read from it and fault it in if there is no
+ * mapping yet. add_identity_map() can't be called here because that
+ * would unconditionally map the address on PMD level, destroying any
+ * PTE-level mappings that might already exist. Use assembly here so
+ * the access won't be optimized away.
+ */
+ asm volatile("mov %[address], %%r9"
+ :: [address] "g" (*(unsigned long *)address)
+ : "r9", "memory");
+
+ /*
+ * The page is mapped at least with PMD size - so skip checks and walk
+ * directly to the PMD.
+ */
+ p4dp = p4d_offset(pgdp, address);
+ pudp = pud_offset(p4dp, address);
+ pmdp = pmd_offset(pudp, address);
+
+ if (pmd_large(*pmdp))
+ ptep = split_large_pmd(info, pmdp, address);
+ else
+ ptep = pte_offset_kernel(pmdp, address);
+
+ if (!ptep)
+ return -ENOMEM;
+
+ /*
+ * Changing encryption attributes of a page requires to flush it from
+ * the caches.
+ */
+ if ((set | clr) & _PAGE_ENC)
+ clflush_page(address);
+
+ /* Update PTE */
+ pte = *ptep;
+ pte = pte_set_flags(pte, set);
+ pte = pte_clear_flags(pte, clr);
+ set_pte(ptep, pte);
+
+ /* Flush TLB after changing encryption attribute */
+ write_cr3(top_level_pgt);
+
+ return 0;
+}
+
+int set_page_decrypted(unsigned long address)
+{
+ return set_clr_page_flags(&mapping_info, address, 0, _PAGE_ENC);
+}
+
+int set_page_encrypted(unsigned long address)
+{
+ return set_clr_page_flags(&mapping_info, address, _PAGE_ENC, 0);
+}
+
+int set_page_non_present(unsigned long address)
+{
+ return set_clr_page_flags(&mapping_info, address, 0, _PAGE_PRESENT);
+}
+
+static void do_pf_error(const char *msg, unsigned long error_code,
+ unsigned long address, unsigned long ip)
+{
+ error_putstr(msg);
+
+ error_putstr("\nError Code: ");
+ error_puthex(error_code);
+ error_putstr("\nCR2: 0x");
+ error_puthex(address);
+ error_putstr("\nRIP relative to _head: 0x");
+ error_puthex(ip - (unsigned long)_head);
+ error_putstr("\n");
+
+ error("Stopping.\n");
+}
+
+void do_boot_page_fault(struct pt_regs *regs, unsigned long error_code)
+{
+ unsigned long address = native_read_cr2();
+ unsigned long end;
+ bool ghcb_fault;
+
+ ghcb_fault = sev_es_check_ghcb_fault(address);
+
+ address &= PMD_MASK;
+ end = address + PMD_SIZE;
+
+ /*
+ * Check for unexpected error codes. Unexpected are:
+ * - Faults on present pages
+ * - User faults
+ * - Reserved bits set
+ */
+ if (error_code & (X86_PF_PROT | X86_PF_USER | X86_PF_RSVD))
+ do_pf_error("Unexpected page-fault:", error_code, address, regs->ip);
+ else if (ghcb_fault)
+ do_pf_error("Page-fault on GHCB page:", error_code, address, regs->ip);
+
+ /*
+ * Error code is sane - now identity map the 2M region around
+ * the faulting address.
+ */
+ add_identity_map(address, end);
+}
diff --git a/arch/x86/boot/compressed/idt_64.c b/arch/x86/boot/compressed/idt_64.c
new file mode 100644
index 000000000000..804a502ee0d2
--- /dev/null
+++ b/arch/x86/boot/compressed/idt_64.c
@@ -0,0 +1,54 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <asm/trap_pf.h>
+#include <asm/segment.h>
+#include <asm/trapnr.h>
+#include "misc.h"
+
+static void set_idt_entry(int vector, void (*handler)(void))
+{
+ unsigned long address = (unsigned long)handler;
+ gate_desc entry;
+
+ memset(&entry, 0, sizeof(entry));
+
+ entry.offset_low = (u16)(address & 0xffff);
+ entry.segment = __KERNEL_CS;
+ entry.bits.type = GATE_TRAP;
+ entry.bits.p = 1;
+ entry.offset_middle = (u16)((address >> 16) & 0xffff);
+ entry.offset_high = (u32)(address >> 32);
+
+ memcpy(&boot_idt[vector], &entry, sizeof(entry));
+}
+
+/* Have this here so we don't need to include <asm/desc.h> */
+static void load_boot_idt(const struct desc_ptr *dtr)
+{
+ asm volatile("lidt %0"::"m" (*dtr));
+}
+
+/* Setup IDT before kernel jumping to .Lrelocated */
+void load_stage1_idt(void)
+{
+ boot_idt_desc.address = (unsigned long)boot_idt;
+
+
+ if (IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT))
+ set_idt_entry(X86_TRAP_VC, boot_stage1_vc);
+
+ load_boot_idt(&boot_idt_desc);
+}
+
+/* Setup IDT after kernel jumping to .Lrelocated */
+void load_stage2_idt(void)
+{
+ boot_idt_desc.address = (unsigned long)boot_idt;
+
+ set_idt_entry(X86_TRAP_PF, boot_page_fault);
+
+#ifdef CONFIG_AMD_MEM_ENCRYPT
+ set_idt_entry(X86_TRAP_VC, boot_stage2_vc);
+#endif
+
+ load_boot_idt(&boot_idt_desc);
+}
diff --git a/arch/x86/boot/compressed/idt_handlers_64.S b/arch/x86/boot/compressed/idt_handlers_64.S
new file mode 100644
index 000000000000..22890e199f5b
--- /dev/null
+++ b/arch/x86/boot/compressed/idt_handlers_64.S
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Early IDT handler entry points
+ *
+ * Copyright (C) 2019 SUSE
+ *
+ * Author: Joerg Roedel <jroedel@suse.de>
+ */
+
+#include <asm/segment.h>
+
+/* For ORIG_RAX */
+#include "../../entry/calling.h"
+
+.macro EXCEPTION_HANDLER name function error_code=0
+SYM_FUNC_START(\name)
+
+ /* Build pt_regs */
+ .if \error_code == 0
+ pushq $0
+ .endif
+
+ pushq %rdi
+ pushq %rsi
+ pushq %rdx
+ pushq %rcx
+ pushq %rax
+ pushq %r8
+ pushq %r9
+ pushq %r10
+ pushq %r11
+ pushq %rbx
+ pushq %rbp
+ pushq %r12
+ pushq %r13
+ pushq %r14
+ pushq %r15
+
+ /* Call handler with pt_regs */
+ movq %rsp, %rdi
+ /* Error code is second parameter */
+ movq ORIG_RAX(%rsp), %rsi
+ call \function
+
+ /* Restore regs */
+ popq %r15
+ popq %r14
+ popq %r13
+ popq %r12
+ popq %rbp
+ popq %rbx
+ popq %r11
+ popq %r10
+ popq %r9
+ popq %r8
+ popq %rax
+ popq %rcx
+ popq %rdx
+ popq %rsi
+ popq %rdi
+
+ /* Remove error code and return */
+ addq $8, %rsp
+
+ iretq
+SYM_FUNC_END(\name)
+ .endm
+
+ .text
+ .code64
+
+EXCEPTION_HANDLER boot_page_fault do_boot_page_fault error_code=1
+
+#ifdef CONFIG_AMD_MEM_ENCRYPT
+EXCEPTION_HANDLER boot_stage1_vc do_vc_no_ghcb error_code=1
+EXCEPTION_HANDLER boot_stage2_vc do_boot_stage2_vc error_code=1
+#endif
diff --git a/arch/x86/boot/compressed/kaslr.c b/arch/x86/boot/compressed/kaslr.c
index 0048269180d5..b59547ce5b19 100644
--- a/arch/x86/boot/compressed/kaslr.c
+++ b/arch/x86/boot/compressed/kaslr.c
@@ -36,17 +36,12 @@
#define STATIC
#include <linux/decompress/mm.h>
-#ifdef CONFIG_X86_5LEVEL
-unsigned int __pgtable_l5_enabled;
-unsigned int pgdir_shift __ro_after_init = 39;
-unsigned int ptrs_per_p4d __ro_after_init = 1;
-#endif
+#define _SETUP
+#include <asm/setup.h> /* For COMMAND_LINE_SIZE */
+#undef _SETUP
extern unsigned long get_cmd_line_ptr(void);
-/* Used by PAGE_KERN* macros: */
-pteval_t __default_kernel_pte_mask __read_mostly = ~0;
-
/* Simplified build-specific string for starting entropy. */
static const char build_str[] = UTS_RELEASE " (" LINUX_COMPILE_BY "@"
LINUX_COMPILE_HOST ") (" LINUX_COMPILER ") " UTS_VERSION;
@@ -87,8 +82,11 @@ static unsigned long get_boot_seed(void)
static bool memmap_too_large;
-/* Store memory limit specified by "mem=nn[KMG]" or "memmap=nn[KMG]" */
-static unsigned long long mem_limit = ULLONG_MAX;
+/*
+ * Store memory limit: MAXMEM on 64-bit and KERNEL_IMAGE_SIZE on 32-bit.
+ * It may be reduced by "mem=nn[KMG]" or "memmap=nn[KMG]" command line options.
+ */
+static u64 mem_limit;
/* Number of immovable memory regions */
static int num_immovable_mem;
@@ -131,8 +129,7 @@ enum parse_mode {
};
static int
-parse_memmap(char *p, unsigned long long *start, unsigned long long *size,
- enum parse_mode mode)
+parse_memmap(char *p, u64 *start, u64 *size, enum parse_mode mode)
{
char *oldp;
@@ -162,7 +159,7 @@ parse_memmap(char *p, unsigned long long *start, unsigned long long *size,
*/
*size = 0;
} else {
- unsigned long long flags;
+ u64 flags;
/*
* efi_fake_mem=nn@ss:attr the attr specifies
@@ -178,7 +175,7 @@ parse_memmap(char *p, unsigned long long *start, unsigned long long *size,
}
*size = 0;
}
- /* Fall through */
+ fallthrough;
default:
/*
* If w/o offset, only size specified, memmap=nn[KMG] has the
@@ -201,7 +198,7 @@ static void mem_avoid_memmap(enum parse_mode mode, char *str)
while (str && (i < MAX_MEMMAP_REGIONS)) {
int rc;
- unsigned long long start, size;
+ u64 start, size;
char *k = strchr(str, ',');
if (k)
@@ -214,7 +211,7 @@ static void mem_avoid_memmap(enum parse_mode mode, char *str)
if (start == 0) {
/* Store the specified memory limit if size > 0 */
- if (size > 0)
+ if (size > 0 && size < mem_limit)
mem_limit = size;
continue;
@@ -261,15 +258,15 @@ static void parse_gb_huge_pages(char *param, char *val)
static void handle_mem_options(void)
{
char *args = (char *)get_cmd_line_ptr();
- size_t len = strlen((char *)args);
+ size_t len;
char *tmp_cmdline;
char *param, *val;
u64 mem_size;
- if (!strstr(args, "memmap=") && !strstr(args, "mem=") &&
- !strstr(args, "hugepages"))
+ if (!args)
return;
+ len = strnlen(args, COMMAND_LINE_SIZE-1);
tmp_cmdline = malloc(len + 1);
if (!tmp_cmdline)
error("Failed to allocate space for tmp_cmdline");
@@ -284,14 +281,12 @@ static void handle_mem_options(void)
while (*args) {
args = next_arg(args, &param, &val);
/* Stop at -- */
- if (!val && strcmp(param, "--") == 0) {
- warn("Only '--' specified in cmdline");
- goto out;
- }
+ if (!val && strcmp(param, "--") == 0)
+ break;
if (!strcmp(param, "memmap")) {
mem_avoid_memmap(PARSE_MEMMAP, val);
- } else if (strstr(param, "hugepages")) {
+ } else if (IS_ENABLED(CONFIG_X86_64) && strstr(param, "hugepages")) {
parse_gb_huge_pages(param, val);
} else if (!strcmp(param, "mem")) {
char *p = val;
@@ -300,21 +295,23 @@ static void handle_mem_options(void)
continue;
mem_size = memparse(p, &p);
if (mem_size == 0)
- goto out;
+ break;
- mem_limit = mem_size;
+ if (mem_size < mem_limit)
+ mem_limit = mem_size;
} else if (!strcmp(param, "efi_fake_mem")) {
mem_avoid_memmap(PARSE_EFI, val);
}
}
-out:
free(tmp_cmdline);
return;
}
/*
- * In theory, KASLR can put the kernel anywhere in the range of [16M, 64T).
+ * In theory, KASLR can put the kernel anywhere in the range of [16M, MAXMEM)
+ * on 64-bit, and [16M, KERNEL_IMAGE_SIZE) on 32-bit.
+ *
* The mem_avoid array is used to store the ranges that need to be avoided
* when KASLR searches for an appropriate random address. We must avoid any
* regions that are unsafe to overlap with during decompression, and other
@@ -392,8 +389,7 @@ static void mem_avoid_init(unsigned long input, unsigned long input_size,
{
unsigned long init_size = boot_params->hdr.init_size;
u64 initrd_start, initrd_size;
- u64 cmd_line, cmd_line_size;
- char *ptr;
+ unsigned long cmd_line, cmd_line_size;
/*
* Avoid the region that is unsafe to overlap during
@@ -401,8 +397,6 @@ static void mem_avoid_init(unsigned long input, unsigned long input_size,
*/
mem_avoid[MEM_AVOID_ZO_RANGE].start = input;
mem_avoid[MEM_AVOID_ZO_RANGE].size = (output + init_size) - input;
- add_identity_map(mem_avoid[MEM_AVOID_ZO_RANGE].start,
- mem_avoid[MEM_AVOID_ZO_RANGE].size);
/* Avoid initrd. */
initrd_start = (u64)boot_params->ext_ramdisk_image << 32;
@@ -414,22 +408,17 @@ static void mem_avoid_init(unsigned long input, unsigned long input_size,
/* No need to set mapping for initrd, it will be handled in VO. */
/* Avoid kernel command line. */
- cmd_line = (u64)boot_params->ext_cmd_line_ptr << 32;
- cmd_line |= boot_params->hdr.cmd_line_ptr;
+ cmd_line = get_cmd_line_ptr();
/* Calculate size of cmd_line. */
- ptr = (char *)(unsigned long)cmd_line;
- for (cmd_line_size = 0; ptr[cmd_line_size++];)
- ;
- mem_avoid[MEM_AVOID_CMDLINE].start = cmd_line;
- mem_avoid[MEM_AVOID_CMDLINE].size = cmd_line_size;
- add_identity_map(mem_avoid[MEM_AVOID_CMDLINE].start,
- mem_avoid[MEM_AVOID_CMDLINE].size);
+ if (cmd_line) {
+ cmd_line_size = strnlen((char *)cmd_line, COMMAND_LINE_SIZE-1) + 1;
+ mem_avoid[MEM_AVOID_CMDLINE].start = cmd_line;
+ mem_avoid[MEM_AVOID_CMDLINE].size = cmd_line_size;
+ }
/* Avoid boot parameters. */
mem_avoid[MEM_AVOID_BOOTPARAMS].start = (unsigned long)boot_params;
mem_avoid[MEM_AVOID_BOOTPARAMS].size = sizeof(*boot_params);
- add_identity_map(mem_avoid[MEM_AVOID_BOOTPARAMS].start,
- mem_avoid[MEM_AVOID_BOOTPARAMS].size);
/* We don't need to set a mapping for setup_data. */
@@ -438,11 +427,6 @@ static void mem_avoid_init(unsigned long input, unsigned long input_size,
/* Enumerate the immovable memory regions */
num_immovable_mem = count_immovable_mem_regions();
-
-#ifdef CONFIG_X86_VERBOSE_BOOTUP
- /* Make sure video RAM can be used. */
- add_identity_map(0, PMD_SIZE);
-#endif
}
/*
@@ -454,7 +438,7 @@ static bool mem_avoid_overlap(struct mem_vector *img,
{
int i;
struct setup_data *ptr;
- unsigned long earliest = img->start + img->size;
+ u64 earliest = img->start + img->size;
bool is_overlapping = false;
for (i = 0; i < MEM_AVOID_MAX; i++) {
@@ -499,18 +483,16 @@ static bool mem_avoid_overlap(struct mem_vector *img,
}
struct slot_area {
- unsigned long addr;
- int num;
+ u64 addr;
+ unsigned long num;
};
#define MAX_SLOT_AREA 100
static struct slot_area slot_areas[MAX_SLOT_AREA];
-
+static unsigned int slot_area_index;
static unsigned long slot_max;
-static unsigned long slot_area_index;
-
static void store_slot_info(struct mem_vector *region, unsigned long image_size)
{
struct slot_area slot_area;
@@ -519,13 +501,10 @@ static void store_slot_info(struct mem_vector *region, unsigned long image_size)
return;
slot_area.addr = region->start;
- slot_area.num = (region->size - image_size) /
- CONFIG_PHYSICAL_ALIGN + 1;
+ slot_area.num = 1 + (region->size - image_size) / CONFIG_PHYSICAL_ALIGN;
- if (slot_area.num > 0) {
- slot_areas[slot_area_index++] = slot_area;
- slot_max += slot_area.num;
- }
+ slot_areas[slot_area_index++] = slot_area;
+ slot_max += slot_area.num;
}
/*
@@ -535,57 +514,53 @@ static void store_slot_info(struct mem_vector *region, unsigned long image_size)
static void
process_gb_huge_pages(struct mem_vector *region, unsigned long image_size)
{
- unsigned long addr, size = 0;
+ u64 pud_start, pud_end;
+ unsigned long gb_huge_pages;
struct mem_vector tmp;
- int i = 0;
- if (!max_gb_huge_pages) {
+ if (!IS_ENABLED(CONFIG_X86_64) || !max_gb_huge_pages) {
store_slot_info(region, image_size);
return;
}
- addr = ALIGN(region->start, PUD_SIZE);
- /* Did we raise the address above the passed in memory entry? */
- if (addr < region->start + region->size)
- size = region->size - (addr - region->start);
-
- /* Check how many 1GB huge pages can be filtered out: */
- while (size > PUD_SIZE && max_gb_huge_pages) {
- size -= PUD_SIZE;
- max_gb_huge_pages--;
- i++;
- }
+ /* Are there any 1GB pages in the region? */
+ pud_start = ALIGN(region->start, PUD_SIZE);
+ pud_end = ALIGN_DOWN(region->start + region->size, PUD_SIZE);
/* No good 1GB huge pages found: */
- if (!i) {
+ if (pud_start >= pud_end) {
store_slot_info(region, image_size);
return;
}
- /*
- * Skip those 'i'*1GB good huge pages, and continue checking and
- * processing the remaining head or tail part of the passed region
- * if available.
- */
-
- if (addr >= region->start + image_size) {
+ /* Check if the head part of the region is usable. */
+ if (pud_start >= region->start + image_size) {
tmp.start = region->start;
- tmp.size = addr - region->start;
+ tmp.size = pud_start - region->start;
store_slot_info(&tmp, image_size);
}
- size = region->size - (addr - region->start) - i * PUD_SIZE;
- if (size >= image_size) {
- tmp.start = addr + i * PUD_SIZE;
- tmp.size = size;
+ /* Skip the good 1GB pages. */
+ gb_huge_pages = (pud_end - pud_start) >> PUD_SHIFT;
+ if (gb_huge_pages > max_gb_huge_pages) {
+ pud_end = pud_start + (max_gb_huge_pages << PUD_SHIFT);
+ max_gb_huge_pages = 0;
+ } else {
+ max_gb_huge_pages -= gb_huge_pages;
+ }
+
+ /* Check if the tail part of the region is usable. */
+ if (region->start + region->size >= pud_end + image_size) {
+ tmp.start = pud_end;
+ tmp.size = region->start + region->size - pud_end;
store_slot_info(&tmp, image_size);
}
}
-static unsigned long slots_fetch_random(void)
+static u64 slots_fetch_random(void)
{
unsigned long slot;
- int i;
+ unsigned int i;
/* Handle case of no slots stored. */
if (slot_max == 0)
@@ -598,7 +573,7 @@ static unsigned long slots_fetch_random(void)
slot -= slot_areas[i].num;
continue;
}
- return slot_areas[i].addr + slot * CONFIG_PHYSICAL_ALIGN;
+ return slot_areas[i].addr + ((u64)slot * CONFIG_PHYSICAL_ALIGN);
}
if (i == slot_area_index)
@@ -611,49 +586,23 @@ static void __process_mem_region(struct mem_vector *entry,
unsigned long image_size)
{
struct mem_vector region, overlap;
- unsigned long start_orig, end;
- struct mem_vector cur_entry;
+ u64 region_end;
- /* On 32-bit, ignore entries entirely above our maximum. */
- if (IS_ENABLED(CONFIG_X86_32) && entry->start >= KERNEL_IMAGE_SIZE)
- return;
-
- /* Ignore entries entirely below our minimum. */
- if (entry->start + entry->size < minimum)
- return;
-
- /* Ignore entries above memory limit */
- end = min(entry->size + entry->start, mem_limit);
- if (entry->start >= end)
- return;
- cur_entry.start = entry->start;
- cur_entry.size = end - entry->start;
-
- region.start = cur_entry.start;
- region.size = cur_entry.size;
+ /* Enforce minimum and memory limit. */
+ region.start = max_t(u64, entry->start, minimum);
+ region_end = min(entry->start + entry->size, mem_limit);
/* Give up if slot area array is full. */
while (slot_area_index < MAX_SLOT_AREA) {
- start_orig = region.start;
-
- /* Potentially raise address to minimum location. */
- if (region.start < minimum)
- region.start = minimum;
-
/* Potentially raise address to meet alignment needs. */
region.start = ALIGN(region.start, CONFIG_PHYSICAL_ALIGN);
/* Did we raise the address above the passed in memory entry? */
- if (region.start > cur_entry.start + cur_entry.size)
+ if (region.start > region_end)
return;
/* Reduce size by any delta from the original address. */
- region.size -= region.start - start_orig;
-
- /* On 32-bit, reduce region size to fit within max size. */
- if (IS_ENABLED(CONFIG_X86_32) &&
- region.start + region.size > KERNEL_IMAGE_SIZE)
- region.size = KERNEL_IMAGE_SIZE - region.start;
+ region.size = region_end - region.start;
/* Return if region can't contain decompressed kernel */
if (region.size < image_size)
@@ -666,27 +615,19 @@ static void __process_mem_region(struct mem_vector *entry,
}
/* Store beginning of region if holds at least image_size. */
- if (overlap.start > region.start + image_size) {
- struct mem_vector beginning;
-
- beginning.start = region.start;
- beginning.size = overlap.start - region.start;
- process_gb_huge_pages(&beginning, image_size);
+ if (overlap.start >= region.start + image_size) {
+ region.size = overlap.start - region.start;
+ process_gb_huge_pages(&region, image_size);
}
- /* Return if overlap extends to or past end of region. */
- if (overlap.start + overlap.size >= region.start + region.size)
- return;
-
/* Clip off the overlapping region and start over. */
- region.size -= overlap.start - region.start + overlap.size;
region.start = overlap.start + overlap.size;
}
}
static bool process_mem_region(struct mem_vector *region,
- unsigned long long minimum,
- unsigned long long image_size)
+ unsigned long minimum,
+ unsigned long image_size)
{
int i;
/*
@@ -709,7 +650,7 @@ static bool process_mem_region(struct mem_vector *region,
* immovable memory and @region.
*/
for (i = 0; i < num_immovable_mem; i++) {
- unsigned long long start, end, entry_end, region_end;
+ u64 start, end, entry_end, region_end;
struct mem_vector entry;
if (!mem_overlaps(region, &immovable_mem[i]))
@@ -736,8 +677,8 @@ static bool process_mem_region(struct mem_vector *region,
#ifdef CONFIG_EFI
/*
- * Returns true if mirror region found (and must have been processed
- * for slots adding)
+ * Returns true if we processed the EFI memmap, which we prefer over the E820
+ * table if it is available.
*/
static bool
process_efi_entries(unsigned long minimum, unsigned long image_size)
@@ -839,20 +780,30 @@ static void process_e820_entries(unsigned long minimum,
static unsigned long find_random_phys_addr(unsigned long minimum,
unsigned long image_size)
{
+ u64 phys_addr;
+
+ /* Bail out early if it's impossible to succeed. */
+ if (minimum + image_size > mem_limit)
+ return 0;
+
/* Check if we had too many memmaps. */
if (memmap_too_large) {
debug_putstr("Aborted memory entries scan (more than 4 memmap= args)!\n");
return 0;
}
- /* Make sure minimum is aligned. */
- minimum = ALIGN(minimum, CONFIG_PHYSICAL_ALIGN);
+ if (!process_efi_entries(minimum, image_size))
+ process_e820_entries(minimum, image_size);
+
+ phys_addr = slots_fetch_random();
- if (process_efi_entries(minimum, image_size))
- return slots_fetch_random();
+ /* Perform a final check to make sure the address is in range. */
+ if (phys_addr < minimum || phys_addr + image_size > mem_limit) {
+ warn("Invalid physical address chosen!\n");
+ return 0;
+ }
- process_e820_entries(minimum, image_size);
- return slots_fetch_random();
+ return (unsigned long)phys_addr;
}
static unsigned long find_random_virt_addr(unsigned long minimum,
@@ -860,18 +811,12 @@ static unsigned long find_random_virt_addr(unsigned long minimum,
{
unsigned long slots, random_addr;
- /* Make sure minimum is aligned. */
- minimum = ALIGN(minimum, CONFIG_PHYSICAL_ALIGN);
- /* Align image_size for easy slot calculations. */
- image_size = ALIGN(image_size, CONFIG_PHYSICAL_ALIGN);
-
/*
* There are how many CONFIG_PHYSICAL_ALIGN-sized slots
* that can hold image_size within the range of minimum to
* KERNEL_IMAGE_SIZE?
*/
- slots = (KERNEL_IMAGE_SIZE - minimum - image_size) /
- CONFIG_PHYSICAL_ALIGN + 1;
+ slots = 1 + (KERNEL_IMAGE_SIZE - minimum - image_size) / CONFIG_PHYSICAL_ALIGN;
random_addr = kaslr_get_random_long("Virtual") % slots;
@@ -905,8 +850,10 @@ void choose_random_location(unsigned long input,
boot_params->hdr.loadflags |= KASLR_FLAG;
- /* Prepare to add new identity pagetables on demand. */
- initialize_identity_maps();
+ if (IS_ENABLED(CONFIG_X86_32))
+ mem_limit = KERNEL_IMAGE_SIZE;
+ else
+ mem_limit = MAXMEM;
/* Record the various known unsafe memory ranges. */
mem_avoid_init(input, input_size, *output);
@@ -917,6 +864,8 @@ void choose_random_location(unsigned long input,
* location:
*/
min_addr = min(*output, 512UL << 20);
+ /* Make sure minimum is aligned. */
+ min_addr = ALIGN(min_addr, CONFIG_PHYSICAL_ALIGN);
/* Walk available memory entries to find a random address. */
random_addr = find_random_phys_addr(min_addr, output_size);
@@ -924,19 +873,8 @@ void choose_random_location(unsigned long input,
warn("Physical KASLR disabled: no suitable memory region!");
} else {
/* Update the new physical address location. */
- if (*output != random_addr) {
- add_identity_map(random_addr, output_size);
+ if (*output != random_addr)
*output = random_addr;
- }
-
- /*
- * This loads the identity mapping page table.
- * This should only be done if a new physical address
- * is found for the kernel, otherwise we should keep
- * the old page table to make it be like the "nokaslr"
- * case.
- */
- finalize_identity_maps();
}
diff --git a/arch/x86/boot/compressed/kaslr_64.c b/arch/x86/boot/compressed/kaslr_64.c
deleted file mode 100644
index f9c5c13d979b..000000000000
--- a/arch/x86/boot/compressed/kaslr_64.c
+++ /dev/null
@@ -1,153 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * This code is used on x86_64 to create page table identity mappings on
- * demand by building up a new set of page tables (or appending to the
- * existing ones), and then switching over to them when ready.
- *
- * Copyright (C) 2015-2016 Yinghai Lu
- * Copyright (C) 2016 Kees Cook
- */
-
-/*
- * Since we're dealing with identity mappings, physical and virtual
- * addresses are the same, so override these defines which are ultimately
- * used by the headers in misc.h.
- */
-#define __pa(x) ((unsigned long)(x))
-#define __va(x) ((void *)((unsigned long)(x)))
-
-/* No PAGE_TABLE_ISOLATION support needed either: */
-#undef CONFIG_PAGE_TABLE_ISOLATION
-
-#include "misc.h"
-
-/* These actually do the work of building the kernel identity maps. */
-#include <linux/pgtable.h>
-#include <asm/init.h>
-/* Use the static base for this part of the boot process */
-#undef __PAGE_OFFSET
-#define __PAGE_OFFSET __PAGE_OFFSET_BASE
-#include "../../mm/ident_map.c"
-
-/* Used to track our page table allocation area. */
-struct alloc_pgt_data {
- unsigned char *pgt_buf;
- unsigned long pgt_buf_size;
- unsigned long pgt_buf_offset;
-};
-
-/*
- * Allocates space for a page table entry, using struct alloc_pgt_data
- * above. Besides the local callers, this is used as the allocation
- * callback in mapping_info below.
- */
-static void *alloc_pgt_page(void *context)
-{
- struct alloc_pgt_data *pages = (struct alloc_pgt_data *)context;
- unsigned char *entry;
-
- /* Validate there is space available for a new page. */
- if (pages->pgt_buf_offset >= pages->pgt_buf_size) {
- debug_putstr("out of pgt_buf in " __FILE__ "!?\n");
- debug_putaddr(pages->pgt_buf_offset);
- debug_putaddr(pages->pgt_buf_size);
- return NULL;
- }
-
- entry = pages->pgt_buf + pages->pgt_buf_offset;
- pages->pgt_buf_offset += PAGE_SIZE;
-
- return entry;
-}
-
-/* Used to track our allocated page tables. */
-static struct alloc_pgt_data pgt_data;
-
-/* The top level page table entry pointer. */
-static unsigned long top_level_pgt;
-
-phys_addr_t physical_mask = (1ULL << __PHYSICAL_MASK_SHIFT) - 1;
-
-/*
- * Mapping information structure passed to kernel_ident_mapping_init().
- * Due to relocation, pointers must be assigned at run time not build time.
- */
-static struct x86_mapping_info mapping_info;
-
-/* Locates and clears a region for a new top level page table. */
-void initialize_identity_maps(void)
-{
- /* If running as an SEV guest, the encryption mask is required. */
- set_sev_encryption_mask();
-
- /* Exclude the encryption mask from __PHYSICAL_MASK */
- physical_mask &= ~sme_me_mask;
-
- /* Init mapping_info with run-time function/buffer pointers. */
- mapping_info.alloc_pgt_page = alloc_pgt_page;
- mapping_info.context = &pgt_data;
- mapping_info.page_flag = __PAGE_KERNEL_LARGE_EXEC | sme_me_mask;
- mapping_info.kernpg_flag = _KERNPG_TABLE;
-
- /*
- * It should be impossible for this not to already be true,
- * but since calling this a second time would rewind the other
- * counters, let's just make sure this is reset too.
- */
- pgt_data.pgt_buf_offset = 0;
-
- /*
- * If we came here via startup_32(), cr3 will be _pgtable already
- * and we must append to the existing area instead of entirely
- * overwriting it.
- *
- * With 5-level paging, we use '_pgtable' to allocate the p4d page table,
- * the top-level page table is allocated separately.
- *
- * p4d_offset(top_level_pgt, 0) would cover both the 4- and 5-level
- * cases. On 4-level paging it's equal to 'top_level_pgt'.
- */
- top_level_pgt = read_cr3_pa();
- if (p4d_offset((pgd_t *)top_level_pgt, 0) == (p4d_t *)_pgtable) {
- debug_putstr("booted via startup_32()\n");
- pgt_data.pgt_buf = _pgtable + BOOT_INIT_PGT_SIZE;
- pgt_data.pgt_buf_size = BOOT_PGT_SIZE - BOOT_INIT_PGT_SIZE;
- memset(pgt_data.pgt_buf, 0, pgt_data.pgt_buf_size);
- } else {
- debug_putstr("booted via startup_64()\n");
- pgt_data.pgt_buf = _pgtable;
- pgt_data.pgt_buf_size = BOOT_PGT_SIZE;
- memset(pgt_data.pgt_buf, 0, pgt_data.pgt_buf_size);
- top_level_pgt = (unsigned long)alloc_pgt_page(&pgt_data);
- }
-}
-
-/*
- * Adds the specified range to what will become the new identity mappings.
- * Once all ranges have been added, the new mapping is activated by calling
- * finalize_identity_maps() below.
- */
-void add_identity_map(unsigned long start, unsigned long size)
-{
- unsigned long end = start + size;
-
- /* Align boundary to 2M. */
- start = round_down(start, PMD_SIZE);
- end = round_up(end, PMD_SIZE);
- if (start >= end)
- return;
-
- /* Build the mapping. */
- kernel_ident_mapping_init(&mapping_info, (pgd_t *)top_level_pgt,
- start, end);
-}
-
-/*
- * This switches the page tables to the new level4 that has been built
- * via calls to add_identity_map() above. If booted via startup_32(),
- * this is effectively a no-op.
- */
-void finalize_identity_maps(void)
-{
- write_cr3(top_level_pgt);
-}
diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
index 39e592d0e0b4..267e7f93050e 100644
--- a/arch/x86/boot/compressed/misc.c
+++ b/arch/x86/boot/compressed/misc.c
@@ -30,12 +30,9 @@
#define STATIC static
/*
- * Use normal definitions of mem*() from string.c. There are already
- * included header files which expect a definition of memset() and by
- * the time we define memset macro, it is too late.
+ * Provide definitions of memzero and memmove as some of the decompressors will
+ * try to define their own functions if these are not defined as macros.
*/
-#undef memcpy
-#undef memset
#define memzero(s, n) memset((s), 0, (n))
#define memmove memmove
@@ -445,6 +442,13 @@ asmlinkage __visible void *extract_kernel(void *rmode, memptr heap,
parse_elf(output);
handle_relocations(output, output_len, virt_addr);
debug_putstr("done.\nBooting the kernel.\n");
+
+ /*
+ * Flush GHCB from cache and map it encrypted again when running as
+ * SEV-ES guest.
+ */
+ sev_es_shutdown_ghcb();
+
return output;
}
diff --git a/arch/x86/boot/compressed/misc.h b/arch/x86/boot/compressed/misc.h
index 726e264410ff..6d31f1b4c4d1 100644
--- a/arch/x86/boot/compressed/misc.h
+++ b/arch/x86/boot/compressed/misc.h
@@ -23,6 +23,7 @@
#include <asm/page.h>
#include <asm/boot.h>
#include <asm/bootparam.h>
+#include <asm/desc_defs.h>
#define BOOT_CTYPE_H
#include <linux/acpi.h>
@@ -36,6 +37,9 @@
#define memptr unsigned
#endif
+/* boot/compressed/vmlinux start and end markers */
+extern char _head[], _end[];
+
/* misc.c */
extern memptr free_mem_ptr;
extern memptr free_mem_end_ptr;
@@ -70,8 +74,8 @@ int cmdline_find_option(const char *option, char *buffer, int bufsize);
int cmdline_find_option_bool(const char *option);
struct mem_vector {
- unsigned long long start;
- unsigned long long size;
+ u64 start;
+ u64 size;
};
#if CONFIG_RANDOMIZE_BASE
@@ -81,8 +85,6 @@ void choose_random_location(unsigned long input,
unsigned long *output,
unsigned long output_size,
unsigned long *virt_addr);
-/* cpuflags.c */
-bool has_cpuflag(int flag);
#else
static inline void choose_random_location(unsigned long input,
unsigned long input_size,
@@ -93,18 +95,14 @@ static inline void choose_random_location(unsigned long input,
}
#endif
+/* cpuflags.c */
+bool has_cpuflag(int flag);
+
#ifdef CONFIG_X86_64
-void initialize_identity_maps(void);
-void add_identity_map(unsigned long start, unsigned long size);
-void finalize_identity_maps(void);
+extern int set_page_decrypted(unsigned long address);
+extern int set_page_encrypted(unsigned long address);
+extern int set_page_non_present(unsigned long address);
extern unsigned char _pgtable[];
-#else
-static inline void initialize_identity_maps(void)
-{ }
-static inline void add_identity_map(unsigned long start, unsigned long size)
-{ }
-static inline void finalize_identity_maps(void)
-{ }
#endif
#ifdef CONFIG_EARLY_PRINTK
@@ -119,6 +117,17 @@ static inline void console_init(void)
void set_sev_encryption_mask(void);
+#ifdef CONFIG_AMD_MEM_ENCRYPT
+void sev_es_shutdown_ghcb(void);
+extern bool sev_es_check_ghcb_fault(unsigned long address);
+#else
+static inline void sev_es_shutdown_ghcb(void) { }
+static inline bool sev_es_check_ghcb_fault(unsigned long address)
+{
+ return false;
+}
+#endif
+
/* acpi.c */
#ifdef CONFIG_ACPI
acpi_physical_address get_rsdp_addr(void);
@@ -133,4 +142,21 @@ int count_immovable_mem_regions(void);
static inline int count_immovable_mem_regions(void) { return 0; }
#endif
+/* ident_map_64.c */
+#ifdef CONFIG_X86_5LEVEL
+extern unsigned int __pgtable_l5_enabled, pgdir_shift, ptrs_per_p4d;
+#endif
+
+/* Used by PAGE_KERN* macros: */
+extern pteval_t __default_kernel_pte_mask;
+
+/* idt_64.c */
+extern gate_desc boot_idt[BOOT_IDT_ENTRIES];
+extern struct desc_ptr boot_idt_desc;
+
+/* IDT Entry Points */
+void boot_page_fault(void);
+void boot_stage1_vc(void);
+void boot_stage2_vc(void);
+
#endif /* BOOT_COMPRESSED_MISC_H */
diff --git a/arch/x86/boot/compressed/mkpiggy.c b/arch/x86/boot/compressed/mkpiggy.c
index 7e01248765b2..52aa56cdbacc 100644
--- a/arch/x86/boot/compressed/mkpiggy.c
+++ b/arch/x86/boot/compressed/mkpiggy.c
@@ -60,6 +60,12 @@ int main(int argc, char *argv[])
printf(".incbin \"%s\"\n", argv[1]);
printf("input_data_end:\n");
+ printf(".section \".rodata\",\"a\",@progbits\n");
+ printf(".globl input_len\n");
+ printf("input_len:\n\t.long %lu\n", ilen);
+ printf(".globl output_len\n");
+ printf("output_len:\n\t.long %lu\n", (unsigned long)olen);
+
retval = 0;
bail:
if (f)
diff --git a/arch/x86/boot/compressed/pgtable_64.c b/arch/x86/boot/compressed/pgtable_64.c
index c8862696a47b..7d0394f4ebf9 100644
--- a/arch/x86/boot/compressed/pgtable_64.c
+++ b/arch/x86/boot/compressed/pgtable_64.c
@@ -5,15 +5,6 @@
#include "pgtable.h"
#include "../string.h"
-/*
- * __force_order is used by special_insns.h asm code to force instruction
- * serialization.
- *
- * It is not referenced from the code, but GCC < 5 with -fPIE would fail
- * due to an undefined symbol. Define it to make these ancient GCCs work.
- */
-unsigned long __force_order;
-
#define BIOS_START_MIN 0x20000U /* 128K, less than this is insane */
#define BIOS_START_MAX 0x9f000U /* 640K, absolute maximum */
diff --git a/arch/x86/boot/compressed/sev-es.c b/arch/x86/boot/compressed/sev-es.c
new file mode 100644
index 000000000000..954cb2702e23
--- /dev/null
+++ b/arch/x86/boot/compressed/sev-es.c
@@ -0,0 +1,214 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * AMD Encrypted Register State Support
+ *
+ * Author: Joerg Roedel <jroedel@suse.de>
+ */
+
+/*
+ * misc.h needs to be first because it knows how to include the other kernel
+ * headers in the pre-decompression code in a way that does not break
+ * compilation.
+ */
+#include "misc.h"
+
+#include <asm/pgtable_types.h>
+#include <asm/sev-es.h>
+#include <asm/trapnr.h>
+#include <asm/trap_pf.h>
+#include <asm/msr-index.h>
+#include <asm/fpu/xcr.h>
+#include <asm/ptrace.h>
+#include <asm/svm.h>
+
+#include "error.h"
+
+struct ghcb boot_ghcb_page __aligned(PAGE_SIZE);
+struct ghcb *boot_ghcb;
+
+/*
+ * Copy a version of this function here - insn-eval.c can't be used in
+ * pre-decompression code.
+ */
+static bool insn_has_rep_prefix(struct insn *insn)
+{
+ int i;
+
+ insn_get_prefixes(insn);
+
+ for (i = 0; i < insn->prefixes.nbytes; i++) {
+ insn_byte_t p = insn->prefixes.bytes[i];
+
+ if (p == 0xf2 || p == 0xf3)
+ return true;
+ }
+
+ return false;
+}
+
+/*
+ * Only a dummy for insn_get_seg_base() - Early boot-code is 64bit only and
+ * doesn't use segments.
+ */
+static unsigned long insn_get_seg_base(struct pt_regs *regs, int seg_reg_idx)
+{
+ return 0UL;
+}
+
+static inline u64 sev_es_rd_ghcb_msr(void)
+{
+ unsigned long low, high;
+
+ asm volatile("rdmsr" : "=a" (low), "=d" (high) :
+ "c" (MSR_AMD64_SEV_ES_GHCB));
+
+ return ((high << 32) | low);
+}
+
+static inline void sev_es_wr_ghcb_msr(u64 val)
+{
+ u32 low, high;
+
+ low = val & 0xffffffffUL;
+ high = val >> 32;
+
+ asm volatile("wrmsr" : : "c" (MSR_AMD64_SEV_ES_GHCB),
+ "a"(low), "d" (high) : "memory");
+}
+
+static enum es_result vc_decode_insn(struct es_em_ctxt *ctxt)
+{
+ char buffer[MAX_INSN_SIZE];
+ enum es_result ret;
+
+ memcpy(buffer, (unsigned char *)ctxt->regs->ip, MAX_INSN_SIZE);
+
+ insn_init(&ctxt->insn, buffer, MAX_INSN_SIZE, 1);
+ insn_get_length(&ctxt->insn);
+
+ ret = ctxt->insn.immediate.got ? ES_OK : ES_DECODE_FAILED;
+
+ return ret;
+}
+
+static enum es_result vc_write_mem(struct es_em_ctxt *ctxt,
+ void *dst, char *buf, size_t size)
+{
+ memcpy(dst, buf, size);
+
+ return ES_OK;
+}
+
+static enum es_result vc_read_mem(struct es_em_ctxt *ctxt,
+ void *src, char *buf, size_t size)
+{
+ memcpy(buf, src, size);
+
+ return ES_OK;
+}
+
+#undef __init
+#undef __pa
+#define __init
+#define __pa(x) ((unsigned long)(x))
+
+#define __BOOT_COMPRESSED
+
+/* Basic instruction decoding support needed */
+#include "../../lib/inat.c"
+#include "../../lib/insn.c"
+
+/* Include code for early handlers */
+#include "../../kernel/sev-es-shared.c"
+
+static bool early_setup_sev_es(void)
+{
+ if (!sev_es_negotiate_protocol())
+ sev_es_terminate(GHCB_SEV_ES_REASON_PROTOCOL_UNSUPPORTED);
+
+ if (set_page_decrypted((unsigned long)&boot_ghcb_page))
+ return false;
+
+ /* Page is now mapped decrypted, clear it */
+ memset(&boot_ghcb_page, 0, sizeof(boot_ghcb_page));
+
+ boot_ghcb = &boot_ghcb_page;
+
+ /* Initialize lookup tables for the instruction decoder */
+ inat_init_tables();
+
+ return true;
+}
+
+void sev_es_shutdown_ghcb(void)
+{
+ if (!boot_ghcb)
+ return;
+
+ if (!sev_es_check_cpu_features())
+ error("SEV-ES CPU Features missing.");
+
+ /*
+ * GHCB Page must be flushed from the cache and mapped encrypted again.
+ * Otherwise the running kernel will see strange cache effects when
+ * trying to use that page.
+ */
+ if (set_page_encrypted((unsigned long)&boot_ghcb_page))
+ error("Can't map GHCB page encrypted");
+
+ /*
+ * GHCB page is mapped encrypted again and flushed from the cache.
+ * Mark it non-present now to catch bugs when #VC exceptions trigger
+ * after this point.
+ */
+ if (set_page_non_present((unsigned long)&boot_ghcb_page))
+ error("Can't unmap GHCB page");
+}
+
+bool sev_es_check_ghcb_fault(unsigned long address)
+{
+ /* Check whether the fault was on the GHCB page */
+ return ((address & PAGE_MASK) == (unsigned long)&boot_ghcb_page);
+}
+
+void do_boot_stage2_vc(struct pt_regs *regs, unsigned long exit_code)
+{
+ struct es_em_ctxt ctxt;
+ enum es_result result;
+
+ if (!boot_ghcb && !early_setup_sev_es())
+ sev_es_terminate(GHCB_SEV_ES_REASON_GENERAL_REQUEST);
+
+ vc_ghcb_invalidate(boot_ghcb);
+ result = vc_init_em_ctxt(&ctxt, regs, exit_code);
+ if (result != ES_OK)
+ goto finish;
+
+ switch (exit_code) {
+ case SVM_EXIT_RDTSC:
+ case SVM_EXIT_RDTSCP:
+ result = vc_handle_rdtsc(boot_ghcb, &ctxt, exit_code);
+ break;
+ case SVM_EXIT_IOIO:
+ result = vc_handle_ioio(boot_ghcb, &ctxt);
+ break;
+ case SVM_EXIT_CPUID:
+ result = vc_handle_cpuid(boot_ghcb, &ctxt);
+ break;
+ default:
+ result = ES_UNSUPPORTED;
+ break;
+ }
+
+finish:
+ if (result == ES_OK) {
+ vc_finish_insn(&ctxt);
+ } else if (result != ES_RETRY) {
+ /*
+ * For now, just halt the machine. That makes debugging easier,
+ * later we just call sev_es_terminate() here.
+ */
+ while (true)
+ asm volatile("hlt\n");
+ }
+}
diff --git a/arch/x86/boot/compressed/vmlinux.lds.S b/arch/x86/boot/compressed/vmlinux.lds.S
index 8f1025d1f681..112b2375d021 100644
--- a/arch/x86/boot/compressed/vmlinux.lds.S
+++ b/arch/x86/boot/compressed/vmlinux.lds.S
@@ -42,12 +42,6 @@ SECTIONS
*(.rodata.*)
_erodata = . ;
}
- .got : {
- _got = .;
- KEEP(*(.got.plt))
- KEEP(*(.got))
- _egot = .;
- }
.data : {
_data = . ;
*(.data)
@@ -75,5 +69,49 @@ SECTIONS
. = ALIGN(PAGE_SIZE); /* keep ZO size page aligned */
_end = .;
+ STABS_DEBUG
+ DWARF_DEBUG
+ ELF_DETAILS
+
DISCARDS
+ /DISCARD/ : {
+ *(.dynamic) *(.dynsym) *(.dynstr) *(.dynbss)
+ *(.hash) *(.gnu.hash)
+ *(.note.*)
+ }
+
+ .got.plt (INFO) : {
+ *(.got.plt)
+ }
+ ASSERT(SIZEOF(.got.plt) == 0 ||
+#ifdef CONFIG_X86_64
+ SIZEOF(.got.plt) == 0x18,
+#else
+ SIZEOF(.got.plt) == 0xc,
+#endif
+ "Unexpected GOT/PLT entries detected!")
+
+ /*
+ * Sections that should stay zero sized, which is safer to
+ * explicitly check instead of blindly discarding.
+ */
+ .got : {
+ *(.got)
+ }
+ ASSERT(SIZEOF(.got) == 0, "Unexpected GOT entries detected!")
+
+ .plt : {
+ *(.plt) *(.plt.*)
+ }
+ ASSERT(SIZEOF(.plt) == 0, "Unexpected run-time procedure linkages detected!")
+
+ .rel.dyn : {
+ *(.rel.*) *(.rel_*)
+ }
+ ASSERT(SIZEOF(.rel.dyn) == 0, "Unexpected run-time relocations (.rel) detected!")
+
+ .rela.dyn : {
+ *(.rela.*) *(.rela_*)
+ }
+ ASSERT(SIZEOF(.rela.dyn) == 0, "Unexpected run-time relocations (.rela) detected!")
}
diff --git a/arch/x86/boot/setup.ld b/arch/x86/boot/setup.ld
index 24c95522f231..49546c247ae2 100644
--- a/arch/x86/boot/setup.ld
+++ b/arch/x86/boot/setup.ld
@@ -20,7 +20,7 @@ SECTIONS
.initdata : { *(.initdata) }
__end_init = .;
- .text : { *(.text) }
+ .text : { *(.text .text.*) }
.text32 : { *(.text32) }
. = ALIGN(16);
diff --git a/arch/x86/boot/string.h b/arch/x86/boot/string.h
index 995f7b7ad512..a232da487cd2 100644
--- a/arch/x86/boot/string.h
+++ b/arch/x86/boot/string.h
@@ -11,10 +11,7 @@ void *memcpy(void *dst, const void *src, size_t len);
void *memset(void *dst, int c, size_t len);
int memcmp(const void *s1, const void *s2, size_t len);
-/*
- * Access builtin version by default. If one needs to use optimized version,
- * do "undef memcpy" in .c file and link against right string.c
- */
+/* Access builtin version by default. */
#define memcpy(d,s,l) __builtin_memcpy(d,s,l)
#define memset(d,c,l) __builtin_memset(d,c,l)
#define memcmp __builtin_memcmp
diff --git a/arch/x86/boot/tools/build.c b/arch/x86/boot/tools/build.c
index c8b8c1a8d1fc..a3725ad46c5a 100644
--- a/arch/x86/boot/tools/build.c
+++ b/arch/x86/boot/tools/build.c
@@ -416,8 +416,6 @@ int main(int argc, char ** argv)
/* Set the default root device */
put_unaligned_le16(DEFAULT_ROOT_DEV, &buf[508]);
- printf("Setup is %d bytes (padded to %d bytes).\n", c, i);
-
/* Open and stat the kernel file */
fd = open(argv[2], O_RDONLY);
if (fd < 0)
@@ -425,7 +423,6 @@ int main(int argc, char ** argv)
if (fstat(fd, &sb))
die("Unable to stat `%s': %m", argv[2]);
sz = sb.st_size;
- printf("System is %d kB\n", (sz+1023)/1024);
kernel = mmap(NULL, sz, PROT_READ, MAP_SHARED, fd, 0);
if (kernel == MAP_FAILED)
die("Unable to mmap '%s': %m", argv[2]);
@@ -488,7 +485,6 @@ int main(int argc, char ** argv)
}
/* Write the CRC */
- printf("CRC %x\n", crc);
put_unaligned_le32(crc, buf);
if (fwrite(buf, 1, 4, dest) != 4)
die("Writing CRC failed");
diff --git a/arch/x86/configs/i386_defconfig b/arch/x86/configs/i386_defconfig
index d7577fece9eb..78210793d357 100644
--- a/arch/x86/configs/i386_defconfig
+++ b/arch/x86/configs/i386_defconfig
@@ -19,6 +19,7 @@ CONFIG_CGROUP_CPUACCT=y
CONFIG_BLK_DEV_INITRD=y
# CONFIG_COMPAT_BRK is not set
CONFIG_PROFILING=y
+# CONFIG_64BIT is not set
CONFIG_SMP=y
CONFIG_X86_GENERIC=y
CONFIG_HPET_TIMER=y
@@ -186,7 +187,6 @@ CONFIG_DRM_I915=y
CONFIG_FB_MODE_HELPERS=y
CONFIG_FB_TILEBLITTING=y
CONFIG_FB_EFI=y
-CONFIG_VGACON_SOFT_SCROLLBACK=y
CONFIG_LOGO=y
# CONFIG_LOGO_LINUX_MONO is not set
# CONFIG_LOGO_LINUX_VGA16 is not set
diff --git a/arch/x86/configs/x86_64_defconfig b/arch/x86/configs/x86_64_defconfig
index f85600143747..9936528e1939 100644
--- a/arch/x86/configs/x86_64_defconfig
+++ b/arch/x86/configs/x86_64_defconfig
@@ -181,7 +181,6 @@ CONFIG_DRM_I915=y
CONFIG_FB_MODE_HELPERS=y
CONFIG_FB_TILEBLITTING=y
CONFIG_FB_EFI=y
-CONFIG_VGACON_SOFT_SCROLLBACK=y
CONFIG_LOGO=y
# CONFIG_LOGO_LINUX_MONO is not set
# CONFIG_LOGO_LINUX_VGA16 is not set
diff --git a/arch/x86/crypto/blake2s-glue.c b/arch/x86/crypto/blake2s-glue.c
index 6737bcea1fa1..c025a01cf708 100644
--- a/arch/x86/crypto/blake2s-glue.c
+++ b/arch/x86/crypto/blake2s-glue.c
@@ -11,6 +11,7 @@
#include <linux/jump_label.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/sizes.h>
#include <asm/cpufeature.h>
#include <asm/fpu/api.h>
diff --git a/arch/x86/crypto/chacha_glue.c b/arch/x86/crypto/chacha_glue.c
index e67a59130025..7b3a1cf0984b 100644
--- a/arch/x86/crypto/chacha_glue.c
+++ b/arch/x86/crypto/chacha_glue.c
@@ -12,6 +12,7 @@
#include <crypto/internal/skcipher.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/sizes.h>
#include <asm/simd.h>
asmlinkage void chacha_block_xor_ssse3(u32 *state, u8 *dst, const u8 *src,
diff --git a/arch/x86/crypto/crc32c-intel_glue.c b/arch/x86/crypto/crc32c-intel_glue.c
index d2d069bd459b..feccb5254c7e 100644
--- a/arch/x86/crypto/crc32c-intel_glue.c
+++ b/arch/x86/crypto/crc32c-intel_glue.c
@@ -28,9 +28,9 @@
#define SCALE_F sizeof(unsigned long)
#ifdef CONFIG_X86_64
-#define REX_PRE "0x48, "
+#define CRC32_INST "crc32q %1, %q0"
#else
-#define REX_PRE
+#define CRC32_INST "crc32l %1, %0"
#endif
#ifdef CONFIG_X86_64
@@ -48,11 +48,8 @@ asmlinkage unsigned int crc_pcl(const u8 *buffer, int len,
static u32 crc32c_intel_le_hw_byte(u32 crc, unsigned char const *data, size_t length)
{
while (length--) {
- __asm__ __volatile__(
- ".byte 0xf2, 0xf, 0x38, 0xf0, 0xf1"
- :"=S"(crc)
- :"0"(crc), "c"(*data)
- );
+ asm("crc32b %1, %0"
+ : "+r" (crc) : "rm" (*data));
data++;
}
@@ -66,11 +63,8 @@ static u32 __pure crc32c_intel_le_hw(u32 crc, unsigned char const *p, size_t len
unsigned long *ptmp = (unsigned long *)p;
while (iquotient--) {
- __asm__ __volatile__(
- ".byte 0xf2, " REX_PRE "0xf, 0x38, 0xf1, 0xf1;"
- :"=S"(crc)
- :"0"(crc), "c"(*ptmp)
- );
+ asm(CRC32_INST
+ : "+r" (crc) : "rm" (*ptmp));
ptmp++;
}
diff --git a/arch/x86/crypto/curve25519-x86_64.c b/arch/x86/crypto/curve25519-x86_64.c
index 8acbb6584a37..5af8021b98ce 100644
--- a/arch/x86/crypto/curve25519-x86_64.c
+++ b/arch/x86/crypto/curve25519-x86_64.c
@@ -11,6 +11,7 @@
#include <linux/jump_label.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/scatterlist.h>
#include <asm/cpufeature.h>
#include <asm/processor.h>
@@ -45,11 +46,11 @@ static inline u64 add_scalar(u64 *out, const u64 *f1, u64 f2)
asm volatile(
/* Clear registers to propagate the carry bit */
- " xor %%r8, %%r8;"
- " xor %%r9, %%r9;"
- " xor %%r10, %%r10;"
- " xor %%r11, %%r11;"
- " xor %1, %1;"
+ " xor %%r8d, %%r8d;"
+ " xor %%r9d, %%r9d;"
+ " xor %%r10d, %%r10d;"
+ " xor %%r11d, %%r11d;"
+ " xor %k1, %k1;"
/* Begin addition chain */
" addq 0(%3), %0;"
@@ -93,7 +94,7 @@ static inline void fadd(u64 *out, const u64 *f1, const u64 *f2)
" cmovc %0, %%rax;"
/* Step 2: Add carry*38 to the original sum */
- " xor %%rcx, %%rcx;"
+ " xor %%ecx, %%ecx;"
" add %%rax, %%r8;"
" adcx %%rcx, %%r9;"
" movq %%r9, 8(%1);"
@@ -165,28 +166,28 @@ static inline void fmul(u64 *out, const u64 *f1, const u64 *f2, u64 *tmp)
/* Compute src1[0] * src2 */
" movq 0(%1), %%rdx;"
- " mulxq 0(%3), %%r8, %%r9;" " xor %%r10, %%r10;" " movq %%r8, 0(%0);"
+ " mulxq 0(%3), %%r8, %%r9;" " xor %%r10d, %%r10d;" " movq %%r8, 0(%0);"
" mulxq 8(%3), %%r10, %%r11;" " adox %%r9, %%r10;" " movq %%r10, 8(%0);"
" mulxq 16(%3), %%rbx, %%r13;" " adox %%r11, %%rbx;"
" mulxq 24(%3), %%r14, %%rdx;" " adox %%r13, %%r14;" " mov $0, %%rax;"
" adox %%rdx, %%rax;"
/* Compute src1[1] * src2 */
" movq 8(%1), %%rdx;"
- " mulxq 0(%3), %%r8, %%r9;" " xor %%r10, %%r10;" " adcxq 8(%0), %%r8;" " movq %%r8, 8(%0);"
+ " mulxq 0(%3), %%r8, %%r9;" " xor %%r10d, %%r10d;" " adcxq 8(%0), %%r8;" " movq %%r8, 8(%0);"
" mulxq 8(%3), %%r10, %%r11;" " adox %%r9, %%r10;" " adcx %%rbx, %%r10;" " movq %%r10, 16(%0);"
" mulxq 16(%3), %%rbx, %%r13;" " adox %%r11, %%rbx;" " adcx %%r14, %%rbx;" " mov $0, %%r8;"
" mulxq 24(%3), %%r14, %%rdx;" " adox %%r13, %%r14;" " adcx %%rax, %%r14;" " mov $0, %%rax;"
" adox %%rdx, %%rax;" " adcx %%r8, %%rax;"
/* Compute src1[2] * src2 */
" movq 16(%1), %%rdx;"
- " mulxq 0(%3), %%r8, %%r9;" " xor %%r10, %%r10;" " adcxq 16(%0), %%r8;" " movq %%r8, 16(%0);"
+ " mulxq 0(%3), %%r8, %%r9;" " xor %%r10d, %%r10d;" " adcxq 16(%0), %%r8;" " movq %%r8, 16(%0);"
" mulxq 8(%3), %%r10, %%r11;" " adox %%r9, %%r10;" " adcx %%rbx, %%r10;" " movq %%r10, 24(%0);"
" mulxq 16(%3), %%rbx, %%r13;" " adox %%r11, %%rbx;" " adcx %%r14, %%rbx;" " mov $0, %%r8;"
" mulxq 24(%3), %%r14, %%rdx;" " adox %%r13, %%r14;" " adcx %%rax, %%r14;" " mov $0, %%rax;"
" adox %%rdx, %%rax;" " adcx %%r8, %%rax;"
/* Compute src1[3] * src2 */
" movq 24(%1), %%rdx;"
- " mulxq 0(%3), %%r8, %%r9;" " xor %%r10, %%r10;" " adcxq 24(%0), %%r8;" " movq %%r8, 24(%0);"
+ " mulxq 0(%3), %%r8, %%r9;" " xor %%r10d, %%r10d;" " adcxq 24(%0), %%r8;" " movq %%r8, 24(%0);"
" mulxq 8(%3), %%r10, %%r11;" " adox %%r9, %%r10;" " adcx %%rbx, %%r10;" " movq %%r10, 32(%0);"
" mulxq 16(%3), %%rbx, %%r13;" " adox %%r11, %%rbx;" " adcx %%r14, %%rbx;" " movq %%rbx, 40(%0);" " mov $0, %%r8;"
" mulxq 24(%3), %%r14, %%rdx;" " adox %%r13, %%r14;" " adcx %%rax, %%r14;" " movq %%r14, 48(%0);" " mov $0, %%rax;"
@@ -200,7 +201,7 @@ static inline void fmul(u64 *out, const u64 *f1, const u64 *f2, u64 *tmp)
/* Step 1: Compute dst + carry == tmp_hi * 38 + tmp_lo */
" mov $38, %%rdx;"
" mulxq 32(%1), %%r8, %%r13;"
- " xor %3, %3;"
+ " xor %k3, %k3;"
" adoxq 0(%1), %%r8;"
" mulxq 40(%1), %%r9, %%rbx;"
" adcx %%r13, %%r9;"
@@ -246,28 +247,28 @@ static inline void fmul2(u64 *out, const u64 *f1, const u64 *f2, u64 *tmp)
/* Compute src1[0] * src2 */
" movq 0(%1), %%rdx;"
- " mulxq 0(%3), %%r8, %%r9;" " xor %%r10, %%r10;" " movq %%r8, 0(%0);"
+ " mulxq 0(%3), %%r8, %%r9;" " xor %%r10d, %%r10d;" " movq %%r8, 0(%0);"
" mulxq 8(%3), %%r10, %%r11;" " adox %%r9, %%r10;" " movq %%r10, 8(%0);"
" mulxq 16(%3), %%rbx, %%r13;" " adox %%r11, %%rbx;"
" mulxq 24(%3), %%r14, %%rdx;" " adox %%r13, %%r14;" " mov $0, %%rax;"
" adox %%rdx, %%rax;"
/* Compute src1[1] * src2 */
" movq 8(%1), %%rdx;"
- " mulxq 0(%3), %%r8, %%r9;" " xor %%r10, %%r10;" " adcxq 8(%0), %%r8;" " movq %%r8, 8(%0);"
+ " mulxq 0(%3), %%r8, %%r9;" " xor %%r10d, %%r10d;" " adcxq 8(%0), %%r8;" " movq %%r8, 8(%0);"
" mulxq 8(%3), %%r10, %%r11;" " adox %%r9, %%r10;" " adcx %%rbx, %%r10;" " movq %%r10, 16(%0);"
" mulxq 16(%3), %%rbx, %%r13;" " adox %%r11, %%rbx;" " adcx %%r14, %%rbx;" " mov $0, %%r8;"
" mulxq 24(%3), %%r14, %%rdx;" " adox %%r13, %%r14;" " adcx %%rax, %%r14;" " mov $0, %%rax;"
" adox %%rdx, %%rax;" " adcx %%r8, %%rax;"
/* Compute src1[2] * src2 */
" movq 16(%1), %%rdx;"
- " mulxq 0(%3), %%r8, %%r9;" " xor %%r10, %%r10;" " adcxq 16(%0), %%r8;" " movq %%r8, 16(%0);"
+ " mulxq 0(%3), %%r8, %%r9;" " xor %%r10d, %%r10d;" " adcxq 16(%0), %%r8;" " movq %%r8, 16(%0);"
" mulxq 8(%3), %%r10, %%r11;" " adox %%r9, %%r10;" " adcx %%rbx, %%r10;" " movq %%r10, 24(%0);"
" mulxq 16(%3), %%rbx, %%r13;" " adox %%r11, %%rbx;" " adcx %%r14, %%rbx;" " mov $0, %%r8;"
" mulxq 24(%3), %%r14, %%rdx;" " adox %%r13, %%r14;" " adcx %%rax, %%r14;" " mov $0, %%rax;"
" adox %%rdx, %%rax;" " adcx %%r8, %%rax;"
/* Compute src1[3] * src2 */
" movq 24(%1), %%rdx;"
- " mulxq 0(%3), %%r8, %%r9;" " xor %%r10, %%r10;" " adcxq 24(%0), %%r8;" " movq %%r8, 24(%0);"
+ " mulxq 0(%3), %%r8, %%r9;" " xor %%r10d, %%r10d;" " adcxq 24(%0), %%r8;" " movq %%r8, 24(%0);"
" mulxq 8(%3), %%r10, %%r11;" " adox %%r9, %%r10;" " adcx %%rbx, %%r10;" " movq %%r10, 32(%0);"
" mulxq 16(%3), %%rbx, %%r13;" " adox %%r11, %%rbx;" " adcx %%r14, %%rbx;" " movq %%rbx, 40(%0);" " mov $0, %%r8;"
" mulxq 24(%3), %%r14, %%rdx;" " adox %%r13, %%r14;" " adcx %%rax, %%r14;" " movq %%r14, 48(%0);" " mov $0, %%rax;"
@@ -277,29 +278,29 @@ static inline void fmul2(u64 *out, const u64 *f1, const u64 *f2, u64 *tmp)
/* Compute src1[0] * src2 */
" movq 32(%1), %%rdx;"
- " mulxq 32(%3), %%r8, %%r9;" " xor %%r10, %%r10;" " movq %%r8, 64(%0);"
- " mulxq 40(%3), %%r10, %%r11;" " adox %%r9, %%r10;" " movq %%r10, 72(%0);"
+ " mulxq 32(%3), %%r8, %%r9;" " xor %%r10d, %%r10d;" " movq %%r8, 64(%0);"
+ " mulxq 40(%3), %%r10, %%r11;" " adox %%r9, %%r10;" " movq %%r10, 72(%0);"
" mulxq 48(%3), %%rbx, %%r13;" " adox %%r11, %%rbx;"
" mulxq 56(%3), %%r14, %%rdx;" " adox %%r13, %%r14;" " mov $0, %%rax;"
" adox %%rdx, %%rax;"
/* Compute src1[1] * src2 */
" movq 40(%1), %%rdx;"
- " mulxq 32(%3), %%r8, %%r9;" " xor %%r10, %%r10;" " adcxq 72(%0), %%r8;" " movq %%r8, 72(%0);"
- " mulxq 40(%3), %%r10, %%r11;" " adox %%r9, %%r10;" " adcx %%rbx, %%r10;" " movq %%r10, 80(%0);"
+ " mulxq 32(%3), %%r8, %%r9;" " xor %%r10d, %%r10d;" " adcxq 72(%0), %%r8;" " movq %%r8, 72(%0);"
+ " mulxq 40(%3), %%r10, %%r11;" " adox %%r9, %%r10;" " adcx %%rbx, %%r10;" " movq %%r10, 80(%0);"
" mulxq 48(%3), %%rbx, %%r13;" " adox %%r11, %%rbx;" " adcx %%r14, %%rbx;" " mov $0, %%r8;"
" mulxq 56(%3), %%r14, %%rdx;" " adox %%r13, %%r14;" " adcx %%rax, %%r14;" " mov $0, %%rax;"
" adox %%rdx, %%rax;" " adcx %%r8, %%rax;"
/* Compute src1[2] * src2 */
" movq 48(%1), %%rdx;"
- " mulxq 32(%3), %%r8, %%r9;" " xor %%r10, %%r10;" " adcxq 80(%0), %%r8;" " movq %%r8, 80(%0);"
- " mulxq 40(%3), %%r10, %%r11;" " adox %%r9, %%r10;" " adcx %%rbx, %%r10;" " movq %%r10, 88(%0);"
+ " mulxq 32(%3), %%r8, %%r9;" " xor %%r10d, %%r10d;" " adcxq 80(%0), %%r8;" " movq %%r8, 80(%0);"
+ " mulxq 40(%3), %%r10, %%r11;" " adox %%r9, %%r10;" " adcx %%rbx, %%r10;" " movq %%r10, 88(%0);"
" mulxq 48(%3), %%rbx, %%r13;" " adox %%r11, %%rbx;" " adcx %%r14, %%rbx;" " mov $0, %%r8;"
" mulxq 56(%3), %%r14, %%rdx;" " adox %%r13, %%r14;" " adcx %%rax, %%r14;" " mov $0, %%rax;"
" adox %%rdx, %%rax;" " adcx %%r8, %%rax;"
/* Compute src1[3] * src2 */
" movq 56(%1), %%rdx;"
- " mulxq 32(%3), %%r8, %%r9;" " xor %%r10, %%r10;" " adcxq 88(%0), %%r8;" " movq %%r8, 88(%0);"
- " mulxq 40(%3), %%r10, %%r11;" " adox %%r9, %%r10;" " adcx %%rbx, %%r10;" " movq %%r10, 96(%0);"
+ " mulxq 32(%3), %%r8, %%r9;" " xor %%r10d, %%r10d;" " adcxq 88(%0), %%r8;" " movq %%r8, 88(%0);"
+ " mulxq 40(%3), %%r10, %%r11;" " adox %%r9, %%r10;" " adcx %%rbx, %%r10;" " movq %%r10, 96(%0);"
" mulxq 48(%3), %%rbx, %%r13;" " adox %%r11, %%rbx;" " adcx %%r14, %%rbx;" " movq %%rbx, 104(%0);" " mov $0, %%r8;"
" mulxq 56(%3), %%r14, %%rdx;" " adox %%r13, %%r14;" " adcx %%rax, %%r14;" " movq %%r14, 112(%0);" " mov $0, %%rax;"
" adox %%rdx, %%rax;" " adcx %%r8, %%rax;" " movq %%rax, 120(%0);"
@@ -312,7 +313,7 @@ static inline void fmul2(u64 *out, const u64 *f1, const u64 *f2, u64 *tmp)
/* Step 1: Compute dst + carry == tmp_hi * 38 + tmp_lo */
" mov $38, %%rdx;"
" mulxq 32(%1), %%r8, %%r13;"
- " xor %3, %3;"
+ " xor %k3, %k3;"
" adoxq 0(%1), %%r8;"
" mulxq 40(%1), %%r9, %%rbx;"
" adcx %%r13, %%r9;"
@@ -345,7 +346,7 @@ static inline void fmul2(u64 *out, const u64 *f1, const u64 *f2, u64 *tmp)
/* Step 1: Compute dst + carry == tmp_hi * 38 + tmp_lo */
" mov $38, %%rdx;"
" mulxq 96(%1), %%r8, %%r13;"
- " xor %3, %3;"
+ " xor %k3, %k3;"
" adoxq 64(%1), %%r8;"
" mulxq 104(%1), %%r9, %%rbx;"
" adcx %%r13, %%r9;"
@@ -516,7 +517,7 @@ static inline void fsqr(u64 *out, const u64 *f, u64 *tmp)
/* Step 1: Compute all partial products */
" movq 0(%1), %%rdx;" /* f[0] */
- " mulxq 8(%1), %%r8, %%r14;" " xor %%r15, %%r15;" /* f[1]*f[0] */
+ " mulxq 8(%1), %%r8, %%r14;" " xor %%r15d, %%r15d;" /* f[1]*f[0] */
" mulxq 16(%1), %%r9, %%r10;" " adcx %%r14, %%r9;" /* f[2]*f[0] */
" mulxq 24(%1), %%rax, %%rcx;" " adcx %%rax, %%r10;" /* f[3]*f[0] */
" movq 24(%1), %%rdx;" /* f[3] */
@@ -526,7 +527,7 @@ static inline void fsqr(u64 *out, const u64 *f, u64 *tmp)
" mulxq 16(%1), %%rax, %%rcx;" " mov $0, %%r14;" /* f[2]*f[1] */
/* Step 2: Compute two parallel carry chains */
- " xor %%r15, %%r15;"
+ " xor %%r15d, %%r15d;"
" adox %%rax, %%r10;"
" adcx %%r8, %%r8;"
" adox %%rcx, %%r11;"
@@ -563,7 +564,7 @@ static inline void fsqr(u64 *out, const u64 *f, u64 *tmp)
/* Step 1: Compute dst + carry == tmp_hi * 38 + tmp_lo */
" mov $38, %%rdx;"
" mulxq 32(%1), %%r8, %%r13;"
- " xor %%rcx, %%rcx;"
+ " xor %%ecx, %%ecx;"
" adoxq 0(%1), %%r8;"
" mulxq 40(%1), %%r9, %%rbx;"
" adcx %%r13, %%r9;"
@@ -607,7 +608,7 @@ static inline void fsqr2(u64 *out, const u64 *f, u64 *tmp)
asm volatile(
/* Step 1: Compute all partial products */
" movq 0(%1), %%rdx;" /* f[0] */
- " mulxq 8(%1), %%r8, %%r14;" " xor %%r15, %%r15;" /* f[1]*f[0] */
+ " mulxq 8(%1), %%r8, %%r14;" " xor %%r15d, %%r15d;" /* f[1]*f[0] */
" mulxq 16(%1), %%r9, %%r10;" " adcx %%r14, %%r9;" /* f[2]*f[0] */
" mulxq 24(%1), %%rax, %%rcx;" " adcx %%rax, %%r10;" /* f[3]*f[0] */
" movq 24(%1), %%rdx;" /* f[3] */
@@ -617,7 +618,7 @@ static inline void fsqr2(u64 *out, const u64 *f, u64 *tmp)
" mulxq 16(%1), %%rax, %%rcx;" " mov $0, %%r14;" /* f[2]*f[1] */
/* Step 2: Compute two parallel carry chains */
- " xor %%r15, %%r15;"
+ " xor %%r15d, %%r15d;"
" adox %%rax, %%r10;"
" adcx %%r8, %%r8;"
" adox %%rcx, %%r11;"
@@ -647,7 +648,7 @@ static inline void fsqr2(u64 *out, const u64 *f, u64 *tmp)
/* Step 1: Compute all partial products */
" movq 32(%1), %%rdx;" /* f[0] */
- " mulxq 40(%1), %%r8, %%r14;" " xor %%r15, %%r15;" /* f[1]*f[0] */
+ " mulxq 40(%1), %%r8, %%r14;" " xor %%r15d, %%r15d;" /* f[1]*f[0] */
" mulxq 48(%1), %%r9, %%r10;" " adcx %%r14, %%r9;" /* f[2]*f[0] */
" mulxq 56(%1), %%rax, %%rcx;" " adcx %%rax, %%r10;" /* f[3]*f[0] */
" movq 56(%1), %%rdx;" /* f[3] */
@@ -657,7 +658,7 @@ static inline void fsqr2(u64 *out, const u64 *f, u64 *tmp)
" mulxq 48(%1), %%rax, %%rcx;" " mov $0, %%r14;" /* f[2]*f[1] */
/* Step 2: Compute two parallel carry chains */
- " xor %%r15, %%r15;"
+ " xor %%r15d, %%r15d;"
" adox %%rax, %%r10;"
" adcx %%r8, %%r8;"
" adox %%rcx, %%r11;"
@@ -692,7 +693,7 @@ static inline void fsqr2(u64 *out, const u64 *f, u64 *tmp)
/* Step 1: Compute dst + carry == tmp_hi * 38 + tmp_lo */
" mov $38, %%rdx;"
" mulxq 32(%1), %%r8, %%r13;"
- " xor %%rcx, %%rcx;"
+ " xor %%ecx, %%ecx;"
" adoxq 0(%1), %%r8;"
" mulxq 40(%1), %%r9, %%rbx;"
" adcx %%r13, %%r9;"
@@ -725,7 +726,7 @@ static inline void fsqr2(u64 *out, const u64 *f, u64 *tmp)
/* Step 1: Compute dst + carry == tmp_hi * 38 + tmp_lo */
" mov $38, %%rdx;"
" mulxq 96(%1), %%r8, %%r13;"
- " xor %%rcx, %%rcx;"
+ " xor %%ecx, %%ecx;"
" adoxq 64(%1), %%r8;"
" mulxq 104(%1), %%r9, %%rbx;"
" adcx %%r13, %%r9;"
diff --git a/arch/x86/crypto/nhpoly1305-avx2-glue.c b/arch/x86/crypto/nhpoly1305-avx2-glue.c
index 80fcb85736e1..8ea5ab0f1ca7 100644
--- a/arch/x86/crypto/nhpoly1305-avx2-glue.c
+++ b/arch/x86/crypto/nhpoly1305-avx2-glue.c
@@ -10,6 +10,7 @@
#include <crypto/internal/simd.h>
#include <crypto/nhpoly1305.h>
#include <linux/module.h>
+#include <linux/sizes.h>
#include <asm/simd.h>
asmlinkage void nh_avx2(const u32 *key, const u8 *message, size_t message_len,
diff --git a/arch/x86/crypto/nhpoly1305-sse2-glue.c b/arch/x86/crypto/nhpoly1305-sse2-glue.c
index cc6b7c1a2705..2b353d42ed13 100644
--- a/arch/x86/crypto/nhpoly1305-sse2-glue.c
+++ b/arch/x86/crypto/nhpoly1305-sse2-glue.c
@@ -10,6 +10,7 @@
#include <crypto/internal/simd.h>
#include <crypto/nhpoly1305.h>
#include <linux/module.h>
+#include <linux/sizes.h>
#include <asm/simd.h>
asmlinkage void nh_sse2(const u32 *key, const u8 *message, size_t message_len,
diff --git a/arch/x86/crypto/poly1305-x86_64-cryptogams.pl b/arch/x86/crypto/poly1305-x86_64-cryptogams.pl
index 137edcf038cb..7d568012cc15 100644
--- a/arch/x86/crypto/poly1305-x86_64-cryptogams.pl
+++ b/arch/x86/crypto/poly1305-x86_64-cryptogams.pl
@@ -246,7 +246,7 @@ $code.=<<___ if (!$kernel);
___
&declare_function("poly1305_init_x86_64", 32, 3);
$code.=<<___;
- xor %rax,%rax
+ xor %eax,%eax
mov %rax,0($ctx) # initialize hash value
mov %rax,8($ctx)
mov %rax,16($ctx)
@@ -2853,7 +2853,7 @@ $code.=<<___;
.type poly1305_init_base2_44,\@function,3
.align 32
poly1305_init_base2_44:
- xor %rax,%rax
+ xor %eax,%eax
mov %rax,0($ctx) # initialize hash value
mov %rax,8($ctx)
mov %rax,16($ctx)
@@ -3947,7 +3947,7 @@ xor128_decrypt_n_pad:
mov \$16,$len
sub %r10,$len
xor %eax,%eax
- xor %r11,%r11
+ xor %r11d,%r11d
.Loop_dec_byte:
mov ($inp,$otp),%r11b
mov ($otp),%al
@@ -4085,7 +4085,7 @@ avx_handler:
.long 0xa548f3fc # cld; rep movsq
mov $disp,%rsi
- xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER
+ xor %ecx,%ecx # arg1, UNW_FLAG_NHANDLER
mov 8(%rsi),%rdx # arg2, disp->ImageBase
mov 0(%rsi),%r8 # arg3, disp->ControlPc
mov 16(%rsi),%r9 # arg4, disp->FunctionEntry
diff --git a/arch/x86/crypto/poly1305_glue.c b/arch/x86/crypto/poly1305_glue.c
index dfe921efa9b2..e508dbd91813 100644
--- a/arch/x86/crypto/poly1305_glue.c
+++ b/arch/x86/crypto/poly1305_glue.c
@@ -11,6 +11,7 @@
#include <linux/jump_label.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/sizes.h>
#include <asm/intel-family.h>
#include <asm/simd.h>
@@ -157,9 +158,6 @@ static unsigned int crypto_poly1305_setdctxkey(struct poly1305_desc_ctx *dctx,
dctx->s[1] = get_unaligned_le32(&inp[4]);
dctx->s[2] = get_unaligned_le32(&inp[8]);
dctx->s[3] = get_unaligned_le32(&inp[12]);
- inp += POLY1305_BLOCK_SIZE;
- len -= POLY1305_BLOCK_SIZE;
- acc += POLY1305_BLOCK_SIZE;
dctx->sset = true;
}
}
diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h
index 98e4d8886f11..07a9331d55e7 100644
--- a/arch/x86/entry/calling.h
+++ b/arch/x86/entry/calling.h
@@ -6,7 +6,6 @@
#include <asm/percpu.h>
#include <asm/asm-offsets.h>
#include <asm/processor-flags.h>
-#include <asm/inst.h>
/*
@@ -374,12 +373,14 @@ For 32-bit we have the following conventions - kernel is built with
* Fetch the per-CPU GSBASE value for this processor and put it in @reg.
* We normally use %gs for accessing per-CPU data, but we are setting up
* %gs here and obviously can not use %gs itself to access per-CPU data.
+ *
+ * Do not use RDPID, because KVM loads guest's TSC_AUX on vm-entry and
+ * may not restore the host's value until the CPU returns to userspace.
+ * Thus the kernel would consume a guest's TSC_AUX if an NMI arrives
+ * while running KVM's run loop.
*/
.macro GET_PERCPU_BASE reg:req
- ALTERNATIVE \
- "LOAD_CPU_AND_NODE_SEG_LIMIT \reg", \
- "RDPID \reg", \
- X86_FEATURE_RDPID
+ LOAD_CPU_AND_NODE_SEG_LIMIT \reg
andq $VDSO_CPUNODE_MASK, \reg
movq __per_cpu_offset(, \reg, 8), \reg
.endm
diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c
index 48512c7944e7..870efeec8bda 100644
--- a/arch/x86/entry/common.c
+++ b/arch/x86/entry/common.c
@@ -60,16 +60,10 @@ __visible noinstr void do_syscall_64(unsigned long nr, struct pt_regs *regs)
#if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
static __always_inline unsigned int syscall_32_enter(struct pt_regs *regs)
{
- unsigned int nr = (unsigned int)regs->orig_ax;
-
if (IS_ENABLED(CONFIG_IA32_EMULATION))
current_thread_info()->status |= TS_COMPAT;
- /*
- * Subtlety here: if ptrace pokes something larger than 2^32-1 into
- * orig_ax, the unsigned int return value truncates it. This may
- * or may not be necessary, but it matches the old asm behavior.
- */
- return (unsigned int)syscall_enter_from_user_mode(regs, nr);
+
+ return (unsigned int)regs->orig_ax;
}
/*
@@ -91,15 +85,29 @@ __visible noinstr void do_int80_syscall_32(struct pt_regs *regs)
{
unsigned int nr = syscall_32_enter(regs);
+ /*
+ * Subtlety here: if ptrace pokes something larger than 2^32-1 into
+ * orig_ax, the unsigned int return value truncates it. This may
+ * or may not be necessary, but it matches the old asm behavior.
+ */
+ nr = (unsigned int)syscall_enter_from_user_mode(regs, nr);
+
do_syscall_32_irqs_on(regs, nr);
syscall_exit_to_user_mode(regs);
}
static noinstr bool __do_fast_syscall_32(struct pt_regs *regs)
{
- unsigned int nr = syscall_32_enter(regs);
+ unsigned int nr = syscall_32_enter(regs);
int res;
+ /*
+ * This cannot use syscall_enter_from_user_mode() as it has to
+ * fetch EBP before invoking any of the syscall entry work
+ * functions.
+ */
+ syscall_enter_from_user_mode_prepare(regs);
+
instrumentation_begin();
/* Fetch EBP from where the vDSO stashed it. */
if (IS_ENABLED(CONFIG_X86_64)) {
@@ -122,6 +130,9 @@ static noinstr bool __do_fast_syscall_32(struct pt_regs *regs)
return false;
}
+ /* The case truncates any ptrace induced syscall nr > 2^32 -1 */
+ nr = (unsigned int)syscall_enter_from_user_mode_work(regs, nr);
+
/* Now this is just like a normal syscall. */
do_syscall_32_irqs_on(regs, nr);
syscall_exit_to_user_mode(regs);
@@ -288,7 +299,7 @@ __visible noinstr void xen_pv_evtchn_do_upcall(struct pt_regs *regs)
old_regs = set_irq_regs(regs);
instrumentation_begin();
- run_on_irqstack_cond(__xen_pv_evtchn_do_upcall, NULL, regs);
+ run_on_irqstack_cond(__xen_pv_evtchn_do_upcall, regs);
instrumentation_begin();
set_irq_regs(old_regs);
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index 70dea9337816..cad08703c4ad 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -46,13 +46,13 @@
.code64
.section .entry.text, "ax"
-#ifdef CONFIG_PARAVIRT
+#ifdef CONFIG_PARAVIRT_XXL
SYM_CODE_START(native_usergs_sysret64)
UNWIND_HINT_EMPTY
swapgs
sysretq
SYM_CODE_END(native_usergs_sysret64)
-#endif /* CONFIG_PARAVIRT */
+#endif /* CONFIG_PARAVIRT_XXL */
/*
* 64-bit SYSCALL instruction entry. Up to 6 arguments in registers.
@@ -101,6 +101,8 @@ SYM_CODE_START(entry_SYSCALL_64)
SWITCH_TO_KERNEL_CR3 scratch_reg=%rsp
movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
+SYM_INNER_LABEL(entry_SYSCALL_64_safe_stack, SYM_L_GLOBAL)
+
/* Construct struct pt_regs on stack */
pushq $__USER_DS /* pt_regs->ss */
pushq PER_CPU_VAR(cpu_tss_rw + TSS_sp2) /* pt_regs->sp */
@@ -446,6 +448,84 @@ _ASM_NOKPROBE(\asmsym)
SYM_CODE_END(\asmsym)
.endm
+#ifdef CONFIG_AMD_MEM_ENCRYPT
+/**
+ * idtentry_vc - Macro to generate entry stub for #VC
+ * @vector: Vector number
+ * @asmsym: ASM symbol for the entry point
+ * @cfunc: C function to be called
+ *
+ * The macro emits code to set up the kernel context for #VC. The #VC handler
+ * runs on an IST stack and needs to be able to cause nested #VC exceptions.
+ *
+ * To make this work the #VC entry code tries its best to pretend it doesn't use
+ * an IST stack by switching to the task stack if coming from user-space (which
+ * includes early SYSCALL entry path) or back to the stack in the IRET frame if
+ * entered from kernel-mode.
+ *
+ * If entered from kernel-mode the return stack is validated first, and if it is
+ * not safe to use (e.g. because it points to the entry stack) the #VC handler
+ * will switch to a fall-back stack (VC2) and call a special handler function.
+ *
+ * The macro is only used for one vector, but it is planned to be extended in
+ * the future for the #HV exception.
+ */
+.macro idtentry_vc vector asmsym cfunc
+SYM_CODE_START(\asmsym)
+ UNWIND_HINT_IRET_REGS
+ ASM_CLAC
+
+ /*
+ * If the entry is from userspace, switch stacks and treat it as
+ * a normal entry.
+ */
+ testb $3, CS-ORIG_RAX(%rsp)
+ jnz .Lfrom_usermode_switch_stack_\@
+
+ /*
+ * paranoid_entry returns SWAPGS flag for paranoid_exit in EBX.
+ * EBX == 0 -> SWAPGS, EBX == 1 -> no SWAPGS
+ */
+ call paranoid_entry
+
+ UNWIND_HINT_REGS
+
+ /*
+ * Switch off the IST stack to make it free for nested exceptions. The
+ * vc_switch_off_ist() function will switch back to the interrupted
+ * stack if it is safe to do so. If not it switches to the VC fall-back
+ * stack.
+ */
+ movq %rsp, %rdi /* pt_regs pointer */
+ call vc_switch_off_ist
+ movq %rax, %rsp /* Switch to new stack */
+
+ UNWIND_HINT_REGS
+
+ /* Update pt_regs */
+ movq ORIG_RAX(%rsp), %rsi /* get error code into 2nd argument*/
+ movq $-1, ORIG_RAX(%rsp) /* no syscall to restart */
+
+ movq %rsp, %rdi /* pt_regs pointer */
+
+ call \cfunc
+
+ /*
+ * No need to switch back to the IST stack. The current stack is either
+ * identical to the stack in the IRET frame or the VC fall-back stack,
+ * so it is definitly mapped even with PTI enabled.
+ */
+ jmp paranoid_exit
+
+ /* Switch to the regular task stack */
+.Lfrom_usermode_switch_stack_\@:
+ idtentry_body safe_stack_\cfunc, has_error_code=1
+
+_ASM_NOKPROBE(\asmsym)
+SYM_CODE_END(\asmsym)
+.endm
+#endif
+
/*
* Double fault entry. Straight paranoid. No checks from which context
* this comes because for the espfix induced #DF this would do the wrong
@@ -682,6 +762,8 @@ SYM_CODE_END(.Lbad_gs)
* rdx: Function argument (can be NULL if none)
*/
SYM_FUNC_START(asm_call_on_stack)
+SYM_INNER_LABEL(asm_call_sysvec_on_stack, SYM_L_GLOBAL)
+SYM_INNER_LABEL(asm_call_irq_on_stack, SYM_L_GLOBAL)
/*
* Save the frame pointer unconditionally. This allows the ORC
* unwinder to handle the stack switch.
@@ -840,8 +922,9 @@ SYM_CODE_START_LOCAL(paranoid_entry)
* retrieve and set the current CPUs kernel GSBASE. The stored value
* has to be restored in paranoid_exit unconditionally.
*
- * The MSR write ensures that no subsequent load is based on a
- * mispredicted GSBASE. No extra FENCE required.
+ * The unconditional write to GS base below ensures that no subsequent
+ * loads based on a mispredicted GS base can happen, therefore no LFENCE
+ * is needed here.
*/
SAVE_AND_SET_GSBASE scratch_reg=%rax save_reg=%rbx
ret
diff --git a/arch/x86/entry/syscall_x32.c b/arch/x86/entry/syscall_x32.c
index 1583831f61a9..f2fe0a33bcfd 100644
--- a/arch/x86/entry/syscall_x32.c
+++ b/arch/x86/entry/syscall_x32.c
@@ -12,8 +12,13 @@
* Reuse the 64-bit entry points for the x32 versions that occupy different
* slots in the syscall table.
*/
+#define __x32_sys_readv __x64_sys_readv
+#define __x32_sys_writev __x64_sys_writev
#define __x32_sys_getsockopt __x64_sys_getsockopt
#define __x32_sys_setsockopt __x64_sys_setsockopt
+#define __x32_sys_vmsplice __x64_sys_vmsplice
+#define __x32_sys_process_vm_readv __x64_sys_process_vm_readv
+#define __x32_sys_process_vm_writev __x64_sys_process_vm_writev
#define __SYSCALL_64(nr, sym)
diff --git a/arch/x86/entry/syscalls/syscall_32.tbl b/arch/x86/entry/syscalls/syscall_32.tbl
index 9d1102873666..9b6931f8d555 100644
--- a/arch/x86/entry/syscalls/syscall_32.tbl
+++ b/arch/x86/entry/syscalls/syscall_32.tbl
@@ -32,7 +32,7 @@
18 i386 oldstat sys_stat
19 i386 lseek sys_lseek compat_sys_lseek
20 i386 getpid sys_getpid
-21 i386 mount sys_mount compat_sys_mount
+21 i386 mount sys_mount
22 i386 umount sys_oldumount
23 i386 setuid sys_setuid16
24 i386 getuid sys_getuid16
@@ -142,7 +142,7 @@
128 i386 init_module sys_init_module
129 i386 delete_module sys_delete_module
130 i386 get_kernel_syms
-131 i386 quotactl sys_quotactl compat_sys_quotactl32
+131 i386 quotactl sys_quotactl
132 i386 getpgid sys_getpgid
133 i386 fchdir sys_fchdir
134 i386 bdflush sys_bdflush
@@ -156,8 +156,8 @@
142 i386 _newselect sys_select compat_sys_select
143 i386 flock sys_flock
144 i386 msync sys_msync
-145 i386 readv sys_readv compat_sys_readv
-146 i386 writev sys_writev compat_sys_writev
+145 i386 readv sys_readv
+146 i386 writev sys_writev
147 i386 getsid sys_getsid
148 i386 fdatasync sys_fdatasync
149 i386 _sysctl sys_ni_syscall
@@ -327,7 +327,7 @@
313 i386 splice sys_splice
314 i386 sync_file_range sys_ia32_sync_file_range
315 i386 tee sys_tee
-316 i386 vmsplice sys_vmsplice compat_sys_vmsplice
+316 i386 vmsplice sys_vmsplice
317 i386 move_pages sys_move_pages compat_sys_move_pages
318 i386 getcpu sys_getcpu
319 i386 epoll_pwait sys_epoll_pwait
@@ -358,8 +358,8 @@
344 i386 syncfs sys_syncfs
345 i386 sendmmsg sys_sendmmsg compat_sys_sendmmsg
346 i386 setns sys_setns
-347 i386 process_vm_readv sys_process_vm_readv compat_sys_process_vm_readv
-348 i386 process_vm_writev sys_process_vm_writev compat_sys_process_vm_writev
+347 i386 process_vm_readv sys_process_vm_readv
+348 i386 process_vm_writev sys_process_vm_writev
349 i386 kcmp sys_kcmp
350 i386 finit_module sys_finit_module
351 i386 sched_setattr sys_sched_setattr
diff --git a/arch/x86/entry/syscalls/syscall_64.tbl b/arch/x86/entry/syscalls/syscall_64.tbl
index f30d6ae9a688..347809649ba2 100644
--- a/arch/x86/entry/syscalls/syscall_64.tbl
+++ b/arch/x86/entry/syscalls/syscall_64.tbl
@@ -371,8 +371,8 @@
512 x32 rt_sigaction compat_sys_rt_sigaction
513 x32 rt_sigreturn compat_sys_x32_rt_sigreturn
514 x32 ioctl compat_sys_ioctl
-515 x32 readv compat_sys_readv
-516 x32 writev compat_sys_writev
+515 x32 readv sys_readv
+516 x32 writev sys_writev
517 x32 recvfrom compat_sys_recvfrom
518 x32 sendmsg compat_sys_sendmsg
519 x32 recvmsg compat_sys_recvmsg
@@ -388,15 +388,15 @@
529 x32 waitid compat_sys_waitid
530 x32 set_robust_list compat_sys_set_robust_list
531 x32 get_robust_list compat_sys_get_robust_list
-532 x32 vmsplice compat_sys_vmsplice
+532 x32 vmsplice sys_vmsplice
533 x32 move_pages compat_sys_move_pages
534 x32 preadv compat_sys_preadv64
535 x32 pwritev compat_sys_pwritev64
536 x32 rt_tgsigqueueinfo compat_sys_rt_tgsigqueueinfo
537 x32 recvmmsg compat_sys_recvmmsg_time64
538 x32 sendmmsg compat_sys_sendmmsg
-539 x32 process_vm_readv compat_sys_process_vm_readv
-540 x32 process_vm_writev compat_sys_process_vm_writev
+539 x32 process_vm_readv sys_process_vm_readv
+540 x32 process_vm_writev sys_process_vm_writev
541 x32 setsockopt sys_setsockopt
542 x32 getsockopt sys_getsockopt
543 x32 io_setup compat_sys_io_setup
diff --git a/arch/x86/entry/thunk_32.S b/arch/x86/entry/thunk_32.S
index 3a07ce3ec70b..f1f96d4d8cd6 100644
--- a/arch/x86/entry/thunk_32.S
+++ b/arch/x86/entry/thunk_32.S
@@ -29,11 +29,6 @@ SYM_CODE_START_NOALIGN(\name)
SYM_CODE_END(\name)
.endm
-#ifdef CONFIG_TRACE_IRQFLAGS
- THUNK trace_hardirqs_on_thunk,trace_hardirqs_on_caller,1
- THUNK trace_hardirqs_off_thunk,trace_hardirqs_off_caller,1
-#endif
-
#ifdef CONFIG_PREEMPTION
THUNK preempt_schedule_thunk, preempt_schedule
THUNK preempt_schedule_notrace_thunk, preempt_schedule_notrace
diff --git a/arch/x86/entry/vdso/vdso32/vclock_gettime.c b/arch/x86/entry/vdso/vdso32/vclock_gettime.c
index 84a4a73f77f7..283ed9d00426 100644
--- a/arch/x86/entry/vdso/vdso32/vclock_gettime.c
+++ b/arch/x86/entry/vdso/vdso32/vclock_gettime.c
@@ -14,6 +14,7 @@
#undef CONFIG_ILLEGAL_POINTER_VALUE
#undef CONFIG_SPARSEMEM_VMEMMAP
#undef CONFIG_NR_CPUS
+#undef CONFIG_PARAVIRT_XXL
#define CONFIG_X86_32 1
#define CONFIG_PGTABLE_LEVELS 2
diff --git a/arch/x86/events/amd/ibs.c b/arch/x86/events/amd/ibs.c
index 26c36357c4c9..40669eac9d6d 100644
--- a/arch/x86/events/amd/ibs.c
+++ b/arch/x86/events/amd/ibs.c
@@ -89,6 +89,7 @@ struct perf_ibs {
u64 max_period;
unsigned long offset_mask[1];
int offset_max;
+ unsigned int fetch_count_reset_broken : 1;
struct cpu_perf_ibs __percpu *pcpu;
struct attribute **format_attrs;
@@ -334,11 +335,18 @@ static u64 get_ibs_op_count(u64 config)
{
u64 count = 0;
- if (config & IBS_OP_VAL)
- count += (config & IBS_OP_MAX_CNT) << 4; /* cnt rolled over */
-
- if (ibs_caps & IBS_CAPS_RDWROPCNT)
- count += (config & IBS_OP_CUR_CNT) >> 32;
+ /*
+ * If the internal 27-bit counter rolled over, the count is MaxCnt
+ * and the lower 7 bits of CurCnt are randomized.
+ * Otherwise CurCnt has the full 27-bit current counter value.
+ */
+ if (config & IBS_OP_VAL) {
+ count = (config & IBS_OP_MAX_CNT) << 4;
+ if (ibs_caps & IBS_CAPS_OPCNTEXT)
+ count += config & IBS_OP_MAX_CNT_EXT_MASK;
+ } else if (ibs_caps & IBS_CAPS_RDWROPCNT) {
+ count = (config & IBS_OP_CUR_CNT) >> 32;
+ }
return count;
}
@@ -363,7 +371,12 @@ perf_ibs_event_update(struct perf_ibs *perf_ibs, struct perf_event *event,
static inline void perf_ibs_enable_event(struct perf_ibs *perf_ibs,
struct hw_perf_event *hwc, u64 config)
{
- wrmsrl(hwc->config_base, hwc->config | config | perf_ibs->enable_mask);
+ u64 tmp = hwc->config | config;
+
+ if (perf_ibs->fetch_count_reset_broken)
+ wrmsrl(hwc->config_base, tmp & ~perf_ibs->enable_mask);
+
+ wrmsrl(hwc->config_base, tmp | perf_ibs->enable_mask);
}
/*
@@ -394,7 +407,7 @@ static void perf_ibs_start(struct perf_event *event, int flags)
struct hw_perf_event *hwc = &event->hw;
struct perf_ibs *perf_ibs = container_of(event->pmu, struct perf_ibs, pmu);
struct cpu_perf_ibs *pcpu = this_cpu_ptr(perf_ibs->pcpu);
- u64 period;
+ u64 period, config = 0;
if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED)))
return;
@@ -403,13 +416,19 @@ static void perf_ibs_start(struct perf_event *event, int flags)
hwc->state = 0;
perf_ibs_set_period(perf_ibs, hwc, &period);
+ if (perf_ibs == &perf_ibs_op && (ibs_caps & IBS_CAPS_OPCNTEXT)) {
+ config |= period & IBS_OP_MAX_CNT_EXT_MASK;
+ period &= ~IBS_OP_MAX_CNT_EXT_MASK;
+ }
+ config |= period >> 4;
+
/*
* Set STARTED before enabling the hardware, such that a subsequent NMI
* must observe it.
*/
set_bit(IBS_STARTED, pcpu->state);
clear_bit(IBS_STOPPING, pcpu->state);
- perf_ibs_enable_event(perf_ibs, hwc, period >> 4);
+ perf_ibs_enable_event(perf_ibs, hwc, config);
perf_event_update_userpage(event);
}
@@ -577,7 +596,7 @@ static int perf_ibs_handle_irq(struct perf_ibs *perf_ibs, struct pt_regs *iregs)
struct perf_ibs_data ibs_data;
int offset, size, check_rip, offset_max, throttle = 0;
unsigned int msr;
- u64 *buf, *config, period;
+ u64 *buf, *config, period, new_config = 0;
if (!test_bit(IBS_STARTED, pcpu->state)) {
fail:
@@ -626,18 +645,24 @@ fail:
perf_ibs->offset_max,
offset + 1);
} while (offset < offset_max);
+ /*
+ * Read IbsBrTarget, IbsOpData4, and IbsExtdCtl separately
+ * depending on their availability.
+ * Can't add to offset_max as they are staggered
+ */
if (event->attr.sample_type & PERF_SAMPLE_RAW) {
- /*
- * Read IbsBrTarget and IbsOpData4 separately
- * depending on their availability.
- * Can't add to offset_max as they are staggered
- */
- if (ibs_caps & IBS_CAPS_BRNTRGT) {
- rdmsrl(MSR_AMD64_IBSBRTARGET, *buf++);
- size++;
+ if (perf_ibs == &perf_ibs_op) {
+ if (ibs_caps & IBS_CAPS_BRNTRGT) {
+ rdmsrl(MSR_AMD64_IBSBRTARGET, *buf++);
+ size++;
+ }
+ if (ibs_caps & IBS_CAPS_OPDATA4) {
+ rdmsrl(MSR_AMD64_IBSOPDATA4, *buf++);
+ size++;
+ }
}
- if (ibs_caps & IBS_CAPS_OPDATA4) {
- rdmsrl(MSR_AMD64_IBSOPDATA4, *buf++);
+ if (perf_ibs == &perf_ibs_fetch && (ibs_caps & IBS_CAPS_FETCHCTLEXTD)) {
+ rdmsrl(MSR_AMD64_ICIBSEXTDCTL, *buf++);
size++;
}
}
@@ -666,13 +691,17 @@ out:
if (throttle) {
perf_ibs_stop(event, 0);
} else {
- period >>= 4;
-
- if ((ibs_caps & IBS_CAPS_RDWROPCNT) &&
- (*config & IBS_OP_CNT_CTL))
- period |= *config & IBS_OP_CUR_CNT_RAND;
+ if (perf_ibs == &perf_ibs_op) {
+ if (ibs_caps & IBS_CAPS_OPCNTEXT) {
+ new_config = period & IBS_OP_MAX_CNT_EXT_MASK;
+ period &= ~IBS_OP_MAX_CNT_EXT_MASK;
+ }
+ if ((ibs_caps & IBS_CAPS_RDWROPCNT) && (*config & IBS_OP_CNT_CTL))
+ new_config |= *config & IBS_OP_CUR_CNT_RAND;
+ }
+ new_config |= period >> 4;
- perf_ibs_enable_event(perf_ibs, hwc, period);
+ perf_ibs_enable_event(perf_ibs, hwc, new_config);
}
perf_event_update_userpage(event);
@@ -733,12 +762,26 @@ static __init void perf_event_ibs_init(void)
{
struct attribute **attr = ibs_op_format_attrs;
+ /*
+ * Some chips fail to reset the fetch count when it is written; instead
+ * they need a 0-1 transition of IbsFetchEn.
+ */
+ if (boot_cpu_data.x86 >= 0x16 && boot_cpu_data.x86 <= 0x18)
+ perf_ibs_fetch.fetch_count_reset_broken = 1;
+
perf_ibs_pmu_init(&perf_ibs_fetch, "ibs_fetch");
if (ibs_caps & IBS_CAPS_OPCNT) {
perf_ibs_op.config_mask |= IBS_OP_CNT_CTL;
*attr++ = &format_attr_cnt_ctl.attr;
}
+
+ if (ibs_caps & IBS_CAPS_OPCNTEXT) {
+ perf_ibs_op.max_period |= IBS_OP_MAX_CNT_EXT_MASK;
+ perf_ibs_op.config_mask |= IBS_OP_MAX_CNT_EXT_MASK;
+ perf_ibs_op.cnt_mask |= IBS_OP_MAX_CNT_EXT_MASK;
+ }
+
perf_ibs_pmu_init(&perf_ibs_op, "ibs_op");
register_nmi_handler(NMI_LOCAL, perf_ibs_nmi_handler, 0, "perf_ibs");
diff --git a/arch/x86/events/amd/iommu.c b/arch/x86/events/amd/iommu.c
index fb616203ce42..be50ef8572cc 100644
--- a/arch/x86/events/amd/iommu.c
+++ b/arch/x86/events/amd/iommu.c
@@ -379,7 +379,7 @@ static __init int _init_events_attrs(void)
while (amd_iommu_v2_event_descs[i].attr.attr.name)
i++;
- attrs = kcalloc(i + 1, sizeof(struct attribute **), GFP_KERNEL);
+ attrs = kcalloc(i + 1, sizeof(*attrs), GFP_KERNEL);
if (!attrs)
return -ENOMEM;
diff --git a/arch/x86/events/amd/uncore.c b/arch/x86/events/amd/uncore.c
index 76400c052b0e..7f014d450bc2 100644
--- a/arch/x86/events/amd/uncore.c
+++ b/arch/x86/events/amd/uncore.c
@@ -181,28 +181,28 @@ static void amd_uncore_del(struct perf_event *event, int flags)
}
/*
- * Convert logical CPU number to L3 PMC Config ThreadMask format
+ * Return a full thread and slice mask unless user
+ * has provided them
*/
-static u64 l3_thread_slice_mask(int cpu)
+static u64 l3_thread_slice_mask(u64 config)
{
- u64 thread_mask, core = topology_core_id(cpu);
- unsigned int shift, thread = 0;
+ if (boot_cpu_data.x86 <= 0x18)
+ return ((config & AMD64_L3_SLICE_MASK) ? : AMD64_L3_SLICE_MASK) |
+ ((config & AMD64_L3_THREAD_MASK) ? : AMD64_L3_THREAD_MASK);
- if (topology_smt_supported() && !topology_is_primary_thread(cpu))
- thread = 1;
-
- if (boot_cpu_data.x86 <= 0x18) {
- shift = AMD64_L3_THREAD_SHIFT + 2 * (core % 4) + thread;
- thread_mask = BIT_ULL(shift);
-
- return AMD64_L3_SLICE_MASK | thread_mask;
- }
-
- core = (core << AMD64_L3_COREID_SHIFT) & AMD64_L3_COREID_MASK;
- shift = AMD64_L3_THREAD_SHIFT + thread;
- thread_mask = BIT_ULL(shift);
+ /*
+ * If the user doesn't specify a threadmask, they're not trying to
+ * count core 0, so we enable all cores & threads.
+ * We'll also assume that they want to count slice 0 if they specify
+ * a threadmask and leave sliceid and enallslices unpopulated.
+ */
+ if (!(config & AMD64_L3_F19H_THREAD_MASK))
+ return AMD64_L3_F19H_THREAD_MASK | AMD64_L3_EN_ALL_SLICES |
+ AMD64_L3_EN_ALL_CORES;
- return AMD64_L3_EN_ALL_SLICES | core | thread_mask;
+ return config & (AMD64_L3_F19H_THREAD_MASK | AMD64_L3_SLICEID_MASK |
+ AMD64_L3_EN_ALL_CORES | AMD64_L3_EN_ALL_SLICES |
+ AMD64_L3_COREID_MASK);
}
static int amd_uncore_event_init(struct perf_event *event)
@@ -232,7 +232,7 @@ static int amd_uncore_event_init(struct perf_event *event)
* For other events, the two fields do not affect the count.
*/
if (l3_mask && is_llc_event(event))
- hwc->config |= l3_thread_slice_mask(event->cpu);
+ hwc->config |= l3_thread_slice_mask(event->attr.config);
uncore = event_to_amd_uncore(event);
if (!uncore)
@@ -274,47 +274,72 @@ static struct attribute_group amd_uncore_attr_group = {
.attrs = amd_uncore_attrs,
};
-/*
- * Similar to PMU_FORMAT_ATTR but allowing for format_attr to be assigned based
- * on family
- */
-#define AMD_FORMAT_ATTR(_dev, _name, _format) \
-static ssize_t \
-_dev##_show##_name(struct device *dev, \
- struct device_attribute *attr, \
- char *page) \
-{ \
- BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \
- return sprintf(page, _format "\n"); \
-} \
-static struct device_attribute format_attr_##_dev##_name = __ATTR_RO(_dev);
-
-/* Used for each uncore counter type */
-#define AMD_ATTRIBUTE(_name) \
-static struct attribute *amd_uncore_format_attr_##_name[] = { \
- &format_attr_event_##_name.attr, \
- &format_attr_umask.attr, \
- NULL, \
-}; \
-static struct attribute_group amd_uncore_format_group_##_name = { \
- .name = "format", \
- .attrs = amd_uncore_format_attr_##_name, \
-}; \
-static const struct attribute_group *amd_uncore_attr_groups_##_name[] = { \
- &amd_uncore_attr_group, \
- &amd_uncore_format_group_##_name, \
- NULL, \
+#define DEFINE_UNCORE_FORMAT_ATTR(_var, _name, _format) \
+static ssize_t __uncore_##_var##_show(struct kobject *kobj, \
+ struct kobj_attribute *attr, \
+ char *page) \
+{ \
+ BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \
+ return sprintf(page, _format "\n"); \
+} \
+static struct kobj_attribute format_attr_##_var = \
+ __ATTR(_name, 0444, __uncore_##_var##_show, NULL)
+
+DEFINE_UNCORE_FORMAT_ATTR(event12, event, "config:0-7,32-35");
+DEFINE_UNCORE_FORMAT_ATTR(event14, event, "config:0-7,32-35,59-60"); /* F17h+ DF */
+DEFINE_UNCORE_FORMAT_ATTR(event8, event, "config:0-7"); /* F17h+ L3 */
+DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
+DEFINE_UNCORE_FORMAT_ATTR(coreid, coreid, "config:42-44"); /* F19h L3 */
+DEFINE_UNCORE_FORMAT_ATTR(slicemask, slicemask, "config:48-51"); /* F17h L3 */
+DEFINE_UNCORE_FORMAT_ATTR(threadmask8, threadmask, "config:56-63"); /* F17h L3 */
+DEFINE_UNCORE_FORMAT_ATTR(threadmask2, threadmask, "config:56-57"); /* F19h L3 */
+DEFINE_UNCORE_FORMAT_ATTR(enallslices, enallslices, "config:46"); /* F19h L3 */
+DEFINE_UNCORE_FORMAT_ATTR(enallcores, enallcores, "config:47"); /* F19h L3 */
+DEFINE_UNCORE_FORMAT_ATTR(sliceid, sliceid, "config:48-50"); /* F19h L3 */
+
+static struct attribute *amd_uncore_df_format_attr[] = {
+ &format_attr_event12.attr, /* event14 if F17h+ */
+ &format_attr_umask.attr,
+ NULL,
+};
+
+static struct attribute *amd_uncore_l3_format_attr[] = {
+ &format_attr_event12.attr, /* event8 if F17h+ */
+ &format_attr_umask.attr,
+ NULL, /* slicemask if F17h, coreid if F19h */
+ NULL, /* threadmask8 if F17h, enallslices if F19h */
+ NULL, /* enallcores if F19h */
+ NULL, /* sliceid if F19h */
+ NULL, /* threadmask2 if F19h */
+ NULL,
+};
+
+static struct attribute_group amd_uncore_df_format_group = {
+ .name = "format",
+ .attrs = amd_uncore_df_format_attr,
};
-AMD_FORMAT_ATTR(event, , "config:0-7,32-35");
-AMD_FORMAT_ATTR(umask, , "config:8-15");
-AMD_FORMAT_ATTR(event, _df, "config:0-7,32-35,59-60");
-AMD_FORMAT_ATTR(event, _l3, "config:0-7");
-AMD_ATTRIBUTE(df);
-AMD_ATTRIBUTE(l3);
+static struct attribute_group amd_uncore_l3_format_group = {
+ .name = "format",
+ .attrs = amd_uncore_l3_format_attr,
+};
+
+static const struct attribute_group *amd_uncore_df_attr_groups[] = {
+ &amd_uncore_attr_group,
+ &amd_uncore_df_format_group,
+ NULL,
+};
+
+static const struct attribute_group *amd_uncore_l3_attr_groups[] = {
+ &amd_uncore_attr_group,
+ &amd_uncore_l3_format_group,
+ NULL,
+};
static struct pmu amd_nb_pmu = {
.task_ctx_nr = perf_invalid_context,
+ .attr_groups = amd_uncore_df_attr_groups,
+ .name = "amd_nb",
.event_init = amd_uncore_event_init,
.add = amd_uncore_add,
.del = amd_uncore_del,
@@ -326,6 +351,8 @@ static struct pmu amd_nb_pmu = {
static struct pmu amd_llc_pmu = {
.task_ctx_nr = perf_invalid_context,
+ .attr_groups = amd_uncore_l3_attr_groups,
+ .name = "amd_l2",
.event_init = amd_uncore_event_init,
.add = amd_uncore_add,
.del = amd_uncore_del,
@@ -529,6 +556,8 @@ static int amd_uncore_cpu_dead(unsigned int cpu)
static int __init amd_uncore_init(void)
{
+ struct attribute **df_attr = amd_uncore_df_format_attr;
+ struct attribute **l3_attr = amd_uncore_l3_format_attr;
int ret = -ENODEV;
if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
@@ -538,6 +567,8 @@ static int __init amd_uncore_init(void)
if (!boot_cpu_has(X86_FEATURE_TOPOEXT))
return -ENODEV;
+ num_counters_nb = NUM_COUNTERS_NB;
+ num_counters_llc = NUM_COUNTERS_L2;
if (boot_cpu_data.x86 >= 0x17) {
/*
* For F17h and above, the Northbridge counters are
@@ -545,27 +576,16 @@ static int __init amd_uncore_init(void)
* counters are supported too. The PMUs are exported
* based on family as either L2 or L3 and NB or DF.
*/
- num_counters_nb = NUM_COUNTERS_NB;
num_counters_llc = NUM_COUNTERS_L3;
amd_nb_pmu.name = "amd_df";
amd_llc_pmu.name = "amd_l3";
- format_attr_event_df.show = &event_show_df;
- format_attr_event_l3.show = &event_show_l3;
l3_mask = true;
- } else {
- num_counters_nb = NUM_COUNTERS_NB;
- num_counters_llc = NUM_COUNTERS_L2;
- amd_nb_pmu.name = "amd_nb";
- amd_llc_pmu.name = "amd_l2";
- format_attr_event_df = format_attr_event;
- format_attr_event_l3 = format_attr_event;
- l3_mask = false;
}
- amd_nb_pmu.attr_groups = amd_uncore_attr_groups_df;
- amd_llc_pmu.attr_groups = amd_uncore_attr_groups_l3;
-
if (boot_cpu_has(X86_FEATURE_PERFCTR_NB)) {
+ if (boot_cpu_data.x86 >= 0x17)
+ *df_attr = &format_attr_event14.attr;
+
amd_uncore_nb = alloc_percpu(struct amd_uncore *);
if (!amd_uncore_nb) {
ret = -ENOMEM;
@@ -575,13 +595,29 @@ static int __init amd_uncore_init(void)
if (ret)
goto fail_nb;
- pr_info("%s NB counters detected\n",
- boot_cpu_data.x86_vendor == X86_VENDOR_HYGON ?
- "HYGON" : "AMD");
+ pr_info("%d %s %s counters detected\n", num_counters_nb,
+ boot_cpu_data.x86_vendor == X86_VENDOR_HYGON ? "HYGON" : "",
+ amd_nb_pmu.name);
+
ret = 0;
}
if (boot_cpu_has(X86_FEATURE_PERFCTR_LLC)) {
+ if (boot_cpu_data.x86 >= 0x19) {
+ *l3_attr++ = &format_attr_event8.attr;
+ *l3_attr++ = &format_attr_umask.attr;
+ *l3_attr++ = &format_attr_coreid.attr;
+ *l3_attr++ = &format_attr_enallslices.attr;
+ *l3_attr++ = &format_attr_enallcores.attr;
+ *l3_attr++ = &format_attr_sliceid.attr;
+ *l3_attr++ = &format_attr_threadmask2.attr;
+ } else if (boot_cpu_data.x86 >= 0x17) {
+ *l3_attr++ = &format_attr_event8.attr;
+ *l3_attr++ = &format_attr_umask.attr;
+ *l3_attr++ = &format_attr_slicemask.attr;
+ *l3_attr++ = &format_attr_threadmask8.attr;
+ }
+
amd_uncore_llc = alloc_percpu(struct amd_uncore *);
if (!amd_uncore_llc) {
ret = -ENOMEM;
@@ -591,9 +627,9 @@ static int __init amd_uncore_init(void)
if (ret)
goto fail_llc;
- pr_info("%s LLC counters detected\n",
- boot_cpu_data.x86_vendor == X86_VENDOR_HYGON ?
- "HYGON" : "AMD");
+ pr_info("%d %s %s counters detected\n", num_counters_llc,
+ boot_cpu_data.x86_vendor == X86_VENDOR_HYGON ? "HYGON" : "",
+ amd_llc_pmu.name);
ret = 0;
}
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index 1cbf57dc2ac8..a88c94d65693 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -28,6 +28,7 @@
#include <linux/bitops.h>
#include <linux/device.h>
#include <linux/nospec.h>
+#include <linux/static_call.h>
#include <asm/apic.h>
#include <asm/stacktrace.h>
@@ -52,6 +53,34 @@ DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
DEFINE_STATIC_KEY_FALSE(rdpmc_never_available_key);
DEFINE_STATIC_KEY_FALSE(rdpmc_always_available_key);
+/*
+ * This here uses DEFINE_STATIC_CALL_NULL() to get a static_call defined
+ * from just a typename, as opposed to an actual function.
+ */
+DEFINE_STATIC_CALL_NULL(x86_pmu_handle_irq, *x86_pmu.handle_irq);
+DEFINE_STATIC_CALL_NULL(x86_pmu_disable_all, *x86_pmu.disable_all);
+DEFINE_STATIC_CALL_NULL(x86_pmu_enable_all, *x86_pmu.enable_all);
+DEFINE_STATIC_CALL_NULL(x86_pmu_enable, *x86_pmu.enable);
+DEFINE_STATIC_CALL_NULL(x86_pmu_disable, *x86_pmu.disable);
+
+DEFINE_STATIC_CALL_NULL(x86_pmu_add, *x86_pmu.add);
+DEFINE_STATIC_CALL_NULL(x86_pmu_del, *x86_pmu.del);
+DEFINE_STATIC_CALL_NULL(x86_pmu_read, *x86_pmu.read);
+
+DEFINE_STATIC_CALL_NULL(x86_pmu_schedule_events, *x86_pmu.schedule_events);
+DEFINE_STATIC_CALL_NULL(x86_pmu_get_event_constraints, *x86_pmu.get_event_constraints);
+DEFINE_STATIC_CALL_NULL(x86_pmu_put_event_constraints, *x86_pmu.put_event_constraints);
+
+DEFINE_STATIC_CALL_NULL(x86_pmu_start_scheduling, *x86_pmu.start_scheduling);
+DEFINE_STATIC_CALL_NULL(x86_pmu_commit_scheduling, *x86_pmu.commit_scheduling);
+DEFINE_STATIC_CALL_NULL(x86_pmu_stop_scheduling, *x86_pmu.stop_scheduling);
+
+DEFINE_STATIC_CALL_NULL(x86_pmu_sched_task, *x86_pmu.sched_task);
+DEFINE_STATIC_CALL_NULL(x86_pmu_swap_task_ctx, *x86_pmu.swap_task_ctx);
+
+DEFINE_STATIC_CALL_NULL(x86_pmu_drain_pebs, *x86_pmu.drain_pebs);
+DEFINE_STATIC_CALL_NULL(x86_pmu_pebs_aliases, *x86_pmu.pebs_aliases);
+
u64 __read_mostly hw_cache_event_ids
[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
@@ -76,6 +105,9 @@ u64 x86_perf_event_update(struct perf_event *event)
if (unlikely(!hwc->event_base))
return 0;
+ if (unlikely(is_topdown_count(event)) && x86_pmu.update_topdown_event)
+ return x86_pmu.update_topdown_event(event);
+
/*
* Careful: an NMI might modify the previous event value.
*
@@ -660,7 +692,7 @@ static void x86_pmu_disable(struct pmu *pmu)
cpuc->enabled = 0;
barrier();
- x86_pmu.disable_all();
+ static_call(x86_pmu_disable_all)();
}
void x86_pmu_enable_all(int added)
@@ -907,8 +939,7 @@ int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
if (cpuc->txn_flags & PERF_PMU_TXN_ADD)
n0 -= cpuc->n_txn;
- if (x86_pmu.start_scheduling)
- x86_pmu.start_scheduling(cpuc);
+ static_call_cond(x86_pmu_start_scheduling)(cpuc);
for (i = 0, wmin = X86_PMC_IDX_MAX, wmax = 0; i < n; i++) {
c = cpuc->event_constraint[i];
@@ -925,7 +956,7 @@ int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
* change due to external factors (sibling state, allow_tfa).
*/
if (!c || (c->flags & PERF_X86_EVENT_DYNAMIC)) {
- c = x86_pmu.get_event_constraints(cpuc, i, cpuc->event_list[i]);
+ c = static_call(x86_pmu_get_event_constraints)(cpuc, i, cpuc->event_list[i]);
cpuc->event_constraint[i] = c;
}
@@ -1008,8 +1039,7 @@ int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
if (!unsched && assign) {
for (i = 0; i < n; i++) {
e = cpuc->event_list[i];
- if (x86_pmu.commit_scheduling)
- x86_pmu.commit_scheduling(cpuc, i, assign[i]);
+ static_call_cond(x86_pmu_commit_scheduling)(cpuc, i, assign[i]);
}
} else {
for (i = n0; i < n; i++) {
@@ -1018,19 +1048,56 @@ int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
/*
* release events that failed scheduling
*/
- if (x86_pmu.put_event_constraints)
- x86_pmu.put_event_constraints(cpuc, e);
+ static_call_cond(x86_pmu_put_event_constraints)(cpuc, e);
cpuc->event_constraint[i] = NULL;
}
}
- if (x86_pmu.stop_scheduling)
- x86_pmu.stop_scheduling(cpuc);
+ static_call_cond(x86_pmu_stop_scheduling)(cpuc);
return unsched ? -EINVAL : 0;
}
+static int add_nr_metric_event(struct cpu_hw_events *cpuc,
+ struct perf_event *event)
+{
+ if (is_metric_event(event)) {
+ if (cpuc->n_metric == INTEL_TD_METRIC_NUM)
+ return -EINVAL;
+ cpuc->n_metric++;
+ cpuc->n_txn_metric++;
+ }
+
+ return 0;
+}
+
+static void del_nr_metric_event(struct cpu_hw_events *cpuc,
+ struct perf_event *event)
+{
+ if (is_metric_event(event))
+ cpuc->n_metric--;
+}
+
+static int collect_event(struct cpu_hw_events *cpuc, struct perf_event *event,
+ int max_count, int n)
+{
+
+ if (x86_pmu.intel_cap.perf_metrics && add_nr_metric_event(cpuc, event))
+ return -EINVAL;
+
+ if (n >= max_count + cpuc->n_metric)
+ return -EINVAL;
+
+ cpuc->event_list[n] = event;
+ if (is_counter_pair(&event->hw)) {
+ cpuc->n_pair++;
+ cpuc->n_txn_pair++;
+ }
+
+ return 0;
+}
+
/*
* dogrp: true if must collect siblings events (group)
* returns total number of events and error code
@@ -1067,28 +1134,22 @@ static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader,
}
if (is_x86_event(leader)) {
- if (n >= max_count)
+ if (collect_event(cpuc, leader, max_count, n))
return -EINVAL;
- cpuc->event_list[n] = leader;
n++;
- if (is_counter_pair(&leader->hw))
- cpuc->n_pair++;
}
+
if (!dogrp)
return n;
for_each_sibling_event(event, leader) {
- if (!is_x86_event(event) ||
- event->state <= PERF_EVENT_STATE_OFF)
+ if (!is_x86_event(event) || event->state <= PERF_EVENT_STATE_OFF)
continue;
- if (n >= max_count)
+ if (collect_event(cpuc, event, max_count, n))
return -EINVAL;
- cpuc->event_list[n] = event;
n++;
- if (is_counter_pair(&event->hw))
- cpuc->n_pair++;
}
return n;
}
@@ -1110,11 +1171,16 @@ static inline void x86_assign_hw_event(struct perf_event *event,
hwc->event_base = 0;
break;
+ case INTEL_PMC_IDX_METRIC_BASE ... INTEL_PMC_IDX_METRIC_END:
+ /* All the metric events are mapped onto the fixed counter 3. */
+ idx = INTEL_PMC_IDX_FIXED_SLOTS;
+ /* fall through */
case INTEL_PMC_IDX_FIXED ... INTEL_PMC_IDX_FIXED_BTS-1:
hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
hwc->event_base = MSR_ARCH_PERFMON_FIXED_CTR0 +
(idx - INTEL_PMC_IDX_FIXED);
- hwc->event_base_rdpmc = (idx - INTEL_PMC_IDX_FIXED) | 1<<30;
+ hwc->event_base_rdpmc = (idx - INTEL_PMC_IDX_FIXED) |
+ INTEL_PMC_FIXED_RDPMC_BASE;
break;
default:
@@ -1226,7 +1292,7 @@ static void x86_pmu_enable(struct pmu *pmu)
cpuc->enabled = 1;
barrier();
- x86_pmu.enable_all(added);
+ static_call(x86_pmu_enable_all)(added);
}
static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left);
@@ -1245,6 +1311,10 @@ int x86_perf_event_set_period(struct perf_event *event)
if (unlikely(!hwc->event_base))
return 0;
+ if (unlikely(is_topdown_count(event)) &&
+ x86_pmu.set_topdown_event_period)
+ return x86_pmu.set_topdown_event_period(event);
+
/*
* If we are way outside a reasonable range then just skip forward:
*/
@@ -1284,11 +1354,11 @@ int x86_perf_event_set_period(struct perf_event *event)
wrmsrl(hwc->event_base, (u64)(-left) & x86_pmu.cntval_mask);
/*
- * Clear the Merge event counter's upper 16 bits since
+ * Sign extend the Merge event counter's upper 16 bits since
* we currently declare a 48-bit counter width
*/
if (is_counter_pair(hwc))
- wrmsrl(x86_pmu_event_addr(idx + 1), 0);
+ wrmsrl(x86_pmu_event_addr(idx + 1), 0xffff);
/*
* Due to erratum on certan cpu we need
@@ -1347,7 +1417,7 @@ static int x86_pmu_add(struct perf_event *event, int flags)
if (cpuc->txn_flags & PERF_PMU_TXN_ADD)
goto done_collect;
- ret = x86_pmu.schedule_events(cpuc, n, assign);
+ ret = static_call(x86_pmu_schedule_events)(cpuc, n, assign);
if (ret)
goto out;
/*
@@ -1365,13 +1435,11 @@ done_collect:
cpuc->n_added += n - n0;
cpuc->n_txn += n - n0;
- if (x86_pmu.add) {
- /*
- * This is before x86_pmu_enable() will call x86_pmu_start(),
- * so we enable LBRs before an event needs them etc..
- */
- x86_pmu.add(event);
- }
+ /*
+ * This is before x86_pmu_enable() will call x86_pmu_start(),
+ * so we enable LBRs before an event needs them etc..
+ */
+ static_call_cond(x86_pmu_add)(event);
ret = 0;
out:
@@ -1399,7 +1467,7 @@ static void x86_pmu_start(struct perf_event *event, int flags)
cpuc->events[idx] = event;
__set_bit(idx, cpuc->active_mask);
__set_bit(idx, cpuc->running);
- x86_pmu.enable(event);
+ static_call(x86_pmu_enable)(event);
perf_event_update_userpage(event);
}
@@ -1469,7 +1537,7 @@ void x86_pmu_stop(struct perf_event *event, int flags)
struct hw_perf_event *hwc = &event->hw;
if (test_bit(hwc->idx, cpuc->active_mask)) {
- x86_pmu.disable(event);
+ static_call(x86_pmu_disable)(event);
__clear_bit(hwc->idx, cpuc->active_mask);
cpuc->events[hwc->idx] = NULL;
WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
@@ -1519,8 +1587,7 @@ static void x86_pmu_del(struct perf_event *event, int flags)
if (i >= cpuc->n_events - cpuc->n_added)
--cpuc->n_added;
- if (x86_pmu.put_event_constraints)
- x86_pmu.put_event_constraints(cpuc, event);
+ static_call_cond(x86_pmu_put_event_constraints)(cpuc, event);
/* Delete the array entry. */
while (++i < cpuc->n_events) {
@@ -1529,17 +1596,18 @@ static void x86_pmu_del(struct perf_event *event, int flags)
}
cpuc->event_constraint[i-1] = NULL;
--cpuc->n_events;
+ if (x86_pmu.intel_cap.perf_metrics)
+ del_nr_metric_event(cpuc, event);
perf_event_update_userpage(event);
do_del:
- if (x86_pmu.del) {
- /*
- * This is after x86_pmu_stop(); so we disable LBRs after any
- * event can need them etc..
- */
- x86_pmu.del(event);
- }
+
+ /*
+ * This is after x86_pmu_stop(); so we disable LBRs after any
+ * event can need them etc..
+ */
+ static_call_cond(x86_pmu_del)(event);
}
int x86_pmu_handle_irq(struct pt_regs *regs)
@@ -1617,7 +1685,7 @@ perf_event_nmi_handler(unsigned int cmd, struct pt_regs *regs)
return NMI_DONE;
start_clock = sched_clock();
- ret = x86_pmu.handle_irq(regs);
+ ret = static_call(x86_pmu_handle_irq)(regs);
finish_clock = sched_clock();
perf_sample_event_took(finish_clock - start_clock);
@@ -1830,6 +1898,38 @@ ssize_t x86_event_sysfs_show(char *page, u64 config, u64 event)
static struct attribute_group x86_pmu_attr_group;
static struct attribute_group x86_pmu_caps_group;
+static void x86_pmu_static_call_update(void)
+{
+ static_call_update(x86_pmu_handle_irq, x86_pmu.handle_irq);
+ static_call_update(x86_pmu_disable_all, x86_pmu.disable_all);
+ static_call_update(x86_pmu_enable_all, x86_pmu.enable_all);
+ static_call_update(x86_pmu_enable, x86_pmu.enable);
+ static_call_update(x86_pmu_disable, x86_pmu.disable);
+
+ static_call_update(x86_pmu_add, x86_pmu.add);
+ static_call_update(x86_pmu_del, x86_pmu.del);
+ static_call_update(x86_pmu_read, x86_pmu.read);
+
+ static_call_update(x86_pmu_schedule_events, x86_pmu.schedule_events);
+ static_call_update(x86_pmu_get_event_constraints, x86_pmu.get_event_constraints);
+ static_call_update(x86_pmu_put_event_constraints, x86_pmu.put_event_constraints);
+
+ static_call_update(x86_pmu_start_scheduling, x86_pmu.start_scheduling);
+ static_call_update(x86_pmu_commit_scheduling, x86_pmu.commit_scheduling);
+ static_call_update(x86_pmu_stop_scheduling, x86_pmu.stop_scheduling);
+
+ static_call_update(x86_pmu_sched_task, x86_pmu.sched_task);
+ static_call_update(x86_pmu_swap_task_ctx, x86_pmu.swap_task_ctx);
+
+ static_call_update(x86_pmu_drain_pebs, x86_pmu.drain_pebs);
+ static_call_update(x86_pmu_pebs_aliases, x86_pmu.pebs_aliases);
+}
+
+static void _x86_pmu_read(struct perf_event *event)
+{
+ x86_perf_event_update(event);
+}
+
static int __init init_hw_perf_events(void)
{
struct x86_pmu_quirk *quirk;
@@ -1898,6 +1998,11 @@ static int __init init_hw_perf_events(void)
pr_info("... fixed-purpose events: %d\n", x86_pmu.num_counters_fixed);
pr_info("... event mask: %016Lx\n", x86_pmu.intel_ctrl);
+ if (!x86_pmu.read)
+ x86_pmu.read = _x86_pmu_read;
+
+ x86_pmu_static_call_update();
+
/*
* Install callbacks. Core will call them for each online
* cpu.
@@ -1934,11 +2039,9 @@ out:
}
early_initcall(init_hw_perf_events);
-static inline void x86_pmu_read(struct perf_event *event)
+static void x86_pmu_read(struct perf_event *event)
{
- if (x86_pmu.read)
- return x86_pmu.read(event);
- x86_perf_event_update(event);
+ static_call(x86_pmu_read)(event);
}
/*
@@ -1962,6 +2065,8 @@ static void x86_pmu_start_txn(struct pmu *pmu, unsigned int txn_flags)
perf_pmu_disable(pmu);
__this_cpu_write(cpu_hw_events.n_txn, 0);
+ __this_cpu_write(cpu_hw_events.n_txn_pair, 0);
+ __this_cpu_write(cpu_hw_events.n_txn_metric, 0);
}
/*
@@ -1987,6 +2092,8 @@ static void x86_pmu_cancel_txn(struct pmu *pmu)
*/
__this_cpu_sub(cpu_hw_events.n_added, __this_cpu_read(cpu_hw_events.n_txn));
__this_cpu_sub(cpu_hw_events.n_events, __this_cpu_read(cpu_hw_events.n_txn));
+ __this_cpu_sub(cpu_hw_events.n_pair, __this_cpu_read(cpu_hw_events.n_txn_pair));
+ __this_cpu_sub(cpu_hw_events.n_metric, __this_cpu_read(cpu_hw_events.n_txn_metric));
perf_pmu_enable(pmu);
}
@@ -2015,7 +2122,7 @@ static int x86_pmu_commit_txn(struct pmu *pmu)
if (!x86_pmu_initialized())
return -EAGAIN;
- ret = x86_pmu.schedule_events(cpuc, n, assign);
+ ret = static_call(x86_pmu_schedule_events)(cpuc, n, assign);
if (ret)
return ret;
@@ -2208,17 +2315,15 @@ static void x86_pmu_event_unmapped(struct perf_event *event, struct mm_struct *m
static int x86_pmu_event_idx(struct perf_event *event)
{
- int idx = event->hw.idx;
+ struct hw_perf_event *hwc = &event->hw;
- if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED))
+ if (!(hwc->flags & PERF_X86_EVENT_RDPMC_ALLOWED))
return 0;
- if (x86_pmu.num_counters_fixed && idx >= INTEL_PMC_IDX_FIXED) {
- idx -= INTEL_PMC_IDX_FIXED;
- idx |= 1 << 30;
- }
-
- return idx + 1;
+ if (is_metric_idx(hwc->idx))
+ return INTEL_PMC_FIXED_RDPMC_METRICS + 1;
+ else
+ return hwc->event_base_rdpmc + 1;
}
static ssize_t get_attr_rdpmc(struct device *cdev,
@@ -2308,15 +2413,13 @@ static const struct attribute_group *x86_pmu_attr_groups[] = {
static void x86_pmu_sched_task(struct perf_event_context *ctx, bool sched_in)
{
- if (x86_pmu.sched_task)
- x86_pmu.sched_task(ctx, sched_in);
+ static_call_cond(x86_pmu_sched_task)(ctx, sched_in);
}
static void x86_pmu_swap_task_ctx(struct perf_event_context *prev,
struct perf_event_context *next)
{
- if (x86_pmu.swap_task_ctx)
- x86_pmu.swap_task_ctx(prev, next);
+ static_call_cond(x86_pmu_swap_task_ctx)(prev, next);
}
void perf_check_microcode(void)
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index 50963472ee85..f1926e9f2143 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -243,10 +243,14 @@ static struct extra_reg intel_skl_extra_regs[] __read_mostly = {
static struct event_constraint intel_icl_event_constraints[] = {
FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
- INTEL_UEVENT_CONSTRAINT(0x1c0, 0), /* INST_RETIRED.PREC_DIST */
+ FIXED_EVENT_CONSTRAINT(0x01c0, 0), /* INST_RETIRED.PREC_DIST */
FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
FIXED_EVENT_CONSTRAINT(0x0400, 3), /* SLOTS */
+ METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_RETIRING, 0),
+ METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BAD_SPEC, 1),
+ METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_FE_BOUND, 2),
+ METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BE_BOUND, 3),
INTEL_EVENT_CONSTRAINT_RANGE(0x03, 0x0a, 0xf),
INTEL_EVENT_CONSTRAINT_RANGE(0x1f, 0x28, 0xf),
INTEL_EVENT_CONSTRAINT(0x32, 0xf), /* SW_PREFETCH_ACCESS.* */
@@ -309,6 +313,12 @@ EVENT_ATTR_STR_HT(topdown-recovery-bubbles, td_recovery_bubbles,
EVENT_ATTR_STR_HT(topdown-recovery-bubbles.scale, td_recovery_bubbles_scale,
"4", "2");
+EVENT_ATTR_STR(slots, slots, "event=0x00,umask=0x4");
+EVENT_ATTR_STR(topdown-retiring, td_retiring, "event=0x00,umask=0x80");
+EVENT_ATTR_STR(topdown-bad-spec, td_bad_spec, "event=0x00,umask=0x81");
+EVENT_ATTR_STR(topdown-fe-bound, td_fe_bound, "event=0x00,umask=0x82");
+EVENT_ATTR_STR(topdown-be-bound, td_be_bound, "event=0x00,umask=0x83");
+
static struct attribute *snb_events_attrs[] = {
EVENT_PTR(td_slots_issued),
EVENT_PTR(td_slots_retired),
@@ -2165,11 +2175,24 @@ static inline void intel_clear_masks(struct perf_event *event, int idx)
static void intel_pmu_disable_fixed(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
- int idx = hwc->idx - INTEL_PMC_IDX_FIXED;
u64 ctrl_val, mask;
+ int idx = hwc->idx;
- mask = 0xfULL << (idx * 4);
+ if (is_topdown_idx(idx)) {
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+
+ /*
+ * When there are other active TopDown events,
+ * don't disable the fixed counter 3.
+ */
+ if (*(u64 *)cpuc->active_mask & INTEL_PMC_OTHER_TOPDOWN_BITS(idx))
+ return;
+ idx = INTEL_PMC_IDX_FIXED_SLOTS;
+ }
+ intel_clear_masks(event, idx);
+
+ mask = 0xfULL << ((idx - INTEL_PMC_IDX_FIXED) * 4);
rdmsrl(hwc->config_base, ctrl_val);
ctrl_val &= ~mask;
wrmsrl(hwc->config_base, ctrl_val);
@@ -2180,17 +2203,28 @@ static void intel_pmu_disable_event(struct perf_event *event)
struct hw_perf_event *hwc = &event->hw;
int idx = hwc->idx;
- if (idx < INTEL_PMC_IDX_FIXED) {
+ switch (idx) {
+ case 0 ... INTEL_PMC_IDX_FIXED - 1:
intel_clear_masks(event, idx);
x86_pmu_disable_event(event);
- } else if (idx < INTEL_PMC_IDX_FIXED_BTS) {
- intel_clear_masks(event, idx);
+ break;
+ case INTEL_PMC_IDX_FIXED ... INTEL_PMC_IDX_FIXED_BTS - 1:
+ case INTEL_PMC_IDX_METRIC_BASE ... INTEL_PMC_IDX_METRIC_END:
intel_pmu_disable_fixed(event);
- } else if (idx == INTEL_PMC_IDX_FIXED_BTS) {
+ break;
+ case INTEL_PMC_IDX_FIXED_BTS:
intel_pmu_disable_bts();
intel_pmu_drain_bts_buffer();
- } else if (idx == INTEL_PMC_IDX_FIXED_VLBR)
+ return;
+ case INTEL_PMC_IDX_FIXED_VLBR:
intel_clear_masks(event, idx);
+ break;
+ default:
+ intel_clear_masks(event, idx);
+ pr_warn("Failed to disable the event with invalid index %d\n",
+ idx);
+ return;
+ }
/*
* Needs to be called after x86_pmu_disable_event,
@@ -2208,10 +2242,189 @@ static void intel_pmu_del_event(struct perf_event *event)
intel_pmu_pebs_del(event);
}
+static int icl_set_topdown_event_period(struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ s64 left = local64_read(&hwc->period_left);
+
+ /*
+ * The values in PERF_METRICS MSR are derived from fixed counter 3.
+ * Software should start both registers, PERF_METRICS and fixed
+ * counter 3, from zero.
+ * Clear PERF_METRICS and Fixed counter 3 in initialization.
+ * After that, both MSRs will be cleared for each read.
+ * Don't need to clear them again.
+ */
+ if (left == x86_pmu.max_period) {
+ wrmsrl(MSR_CORE_PERF_FIXED_CTR3, 0);
+ wrmsrl(MSR_PERF_METRICS, 0);
+ hwc->saved_slots = 0;
+ hwc->saved_metric = 0;
+ }
+
+ if ((hwc->saved_slots) && is_slots_event(event)) {
+ wrmsrl(MSR_CORE_PERF_FIXED_CTR3, hwc->saved_slots);
+ wrmsrl(MSR_PERF_METRICS, hwc->saved_metric);
+ }
+
+ perf_event_update_userpage(event);
+
+ return 0;
+}
+
+static inline u64 icl_get_metrics_event_value(u64 metric, u64 slots, int idx)
+{
+ u32 val;
+
+ /*
+ * The metric is reported as an 8bit integer fraction
+ * suming up to 0xff.
+ * slots-in-metric = (Metric / 0xff) * slots
+ */
+ val = (metric >> ((idx - INTEL_PMC_IDX_METRIC_BASE) * 8)) & 0xff;
+ return mul_u64_u32_div(slots, val, 0xff);
+}
+
+static u64 icl_get_topdown_value(struct perf_event *event,
+ u64 slots, u64 metrics)
+{
+ int idx = event->hw.idx;
+ u64 delta;
+
+ if (is_metric_idx(idx))
+ delta = icl_get_metrics_event_value(metrics, slots, idx);
+ else
+ delta = slots;
+
+ return delta;
+}
+
+static void __icl_update_topdown_event(struct perf_event *event,
+ u64 slots, u64 metrics,
+ u64 last_slots, u64 last_metrics)
+{
+ u64 delta, last = 0;
+
+ delta = icl_get_topdown_value(event, slots, metrics);
+ if (last_slots)
+ last = icl_get_topdown_value(event, last_slots, last_metrics);
+
+ /*
+ * The 8bit integer fraction of metric may be not accurate,
+ * especially when the changes is very small.
+ * For example, if only a few bad_spec happens, the fraction
+ * may be reduced from 1 to 0. If so, the bad_spec event value
+ * will be 0 which is definitely less than the last value.
+ * Avoid update event->count for this case.
+ */
+ if (delta > last) {
+ delta -= last;
+ local64_add(delta, &event->count);
+ }
+}
+
+static void update_saved_topdown_regs(struct perf_event *event,
+ u64 slots, u64 metrics)
+{
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+ struct perf_event *other;
+ int idx;
+
+ event->hw.saved_slots = slots;
+ event->hw.saved_metric = metrics;
+
+ for_each_set_bit(idx, cpuc->active_mask, INTEL_PMC_IDX_TD_BE_BOUND + 1) {
+ if (!is_topdown_idx(idx))
+ continue;
+ other = cpuc->events[idx];
+ other->hw.saved_slots = slots;
+ other->hw.saved_metric = metrics;
+ }
+}
+
+/*
+ * Update all active Topdown events.
+ *
+ * The PERF_METRICS and Fixed counter 3 are read separately. The values may be
+ * modify by a NMI. PMU has to be disabled before calling this function.
+ */
+static u64 icl_update_topdown_event(struct perf_event *event)
+{
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+ struct perf_event *other;
+ u64 slots, metrics;
+ bool reset = true;
+ int idx;
+
+ /* read Fixed counter 3 */
+ rdpmcl((3 | INTEL_PMC_FIXED_RDPMC_BASE), slots);
+ if (!slots)
+ return 0;
+
+ /* read PERF_METRICS */
+ rdpmcl(INTEL_PMC_FIXED_RDPMC_METRICS, metrics);
+
+ for_each_set_bit(idx, cpuc->active_mask, INTEL_PMC_IDX_TD_BE_BOUND + 1) {
+ if (!is_topdown_idx(idx))
+ continue;
+ other = cpuc->events[idx];
+ __icl_update_topdown_event(other, slots, metrics,
+ event ? event->hw.saved_slots : 0,
+ event ? event->hw.saved_metric : 0);
+ }
+
+ /*
+ * Check and update this event, which may have been cleared
+ * in active_mask e.g. x86_pmu_stop()
+ */
+ if (event && !test_bit(event->hw.idx, cpuc->active_mask)) {
+ __icl_update_topdown_event(event, slots, metrics,
+ event->hw.saved_slots,
+ event->hw.saved_metric);
+
+ /*
+ * In x86_pmu_stop(), the event is cleared in active_mask first,
+ * then drain the delta, which indicates context switch for
+ * counting.
+ * Save metric and slots for context switch.
+ * Don't need to reset the PERF_METRICS and Fixed counter 3.
+ * Because the values will be restored in next schedule in.
+ */
+ update_saved_topdown_regs(event, slots, metrics);
+ reset = false;
+ }
+
+ if (reset) {
+ /* The fixed counter 3 has to be written before the PERF_METRICS. */
+ wrmsrl(MSR_CORE_PERF_FIXED_CTR3, 0);
+ wrmsrl(MSR_PERF_METRICS, 0);
+ if (event)
+ update_saved_topdown_regs(event, 0, 0);
+ }
+
+ return slots;
+}
+
+static void intel_pmu_read_topdown_event(struct perf_event *event)
+{
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+
+ /* Only need to call update_topdown_event() once for group read. */
+ if ((cpuc->txn_flags & PERF_PMU_TXN_READ) &&
+ !is_slots_event(event))
+ return;
+
+ perf_pmu_disable(event->pmu);
+ x86_pmu.update_topdown_event(event);
+ perf_pmu_enable(event->pmu);
+}
+
static void intel_pmu_read_event(struct perf_event *event)
{
if (event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD)
intel_pmu_auto_reload_read(event);
+ else if (is_topdown_count(event) && x86_pmu.update_topdown_event)
+ intel_pmu_read_topdown_event(event);
else
x86_perf_event_update(event);
}
@@ -2219,8 +2432,22 @@ static void intel_pmu_read_event(struct perf_event *event)
static void intel_pmu_enable_fixed(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
- int idx = hwc->idx - INTEL_PMC_IDX_FIXED;
u64 ctrl_val, mask, bits = 0;
+ int idx = hwc->idx;
+
+ if (is_topdown_idx(idx)) {
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+ /*
+ * When there are other active TopDown events,
+ * don't enable the fixed counter 3 again.
+ */
+ if (*(u64 *)cpuc->active_mask & INTEL_PMC_OTHER_TOPDOWN_BITS(idx))
+ return;
+
+ idx = INTEL_PMC_IDX_FIXED_SLOTS;
+ }
+
+ intel_set_masks(event, idx);
/*
* Enable IRQ generation (0x8), if not PEBS,
@@ -2240,6 +2467,7 @@ static void intel_pmu_enable_fixed(struct perf_event *event)
if (x86_pmu.version > 2 && hwc->config & ARCH_PERFMON_EVENTSEL_ANY)
bits |= 0x4;
+ idx -= INTEL_PMC_IDX_FIXED;
bits <<= (idx * 4);
mask = 0xfULL << (idx * 4);
@@ -2262,18 +2490,27 @@ static void intel_pmu_enable_event(struct perf_event *event)
if (unlikely(event->attr.precise_ip))
intel_pmu_pebs_enable(event);
- if (idx < INTEL_PMC_IDX_FIXED) {
+ switch (idx) {
+ case 0 ... INTEL_PMC_IDX_FIXED - 1:
intel_set_masks(event, idx);
__x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
- } else if (idx < INTEL_PMC_IDX_FIXED_BTS) {
- intel_set_masks(event, idx);
+ break;
+ case INTEL_PMC_IDX_FIXED ... INTEL_PMC_IDX_FIXED_BTS - 1:
+ case INTEL_PMC_IDX_METRIC_BASE ... INTEL_PMC_IDX_METRIC_END:
intel_pmu_enable_fixed(event);
- } else if (idx == INTEL_PMC_IDX_FIXED_BTS) {
+ break;
+ case INTEL_PMC_IDX_FIXED_BTS:
if (!__this_cpu_read(cpu_hw_events.enabled))
return;
intel_pmu_enable_bts(hwc->config);
- } else if (idx == INTEL_PMC_IDX_FIXED_VLBR)
+ break;
+ case INTEL_PMC_IDX_FIXED_VLBR:
intel_set_masks(event, idx);
+ break;
+ default:
+ pr_warn("Failed to enable the event with invalid index %d\n",
+ idx);
+ }
}
static void intel_pmu_add_event(struct perf_event *event)
@@ -2389,7 +2626,7 @@ static int handle_pmi_common(struct pt_regs *regs, u64 status)
/*
* PEBS overflow sets bit 62 in the global status register
*/
- if (__test_and_clear_bit(62, (unsigned long *)&status)) {
+ if (__test_and_clear_bit(GLOBAL_STATUS_BUFFER_OVF_BIT, (unsigned long *)&status)) {
u64 pebs_enabled = cpuc->pebs_enabled;
handled++;
@@ -2410,7 +2647,7 @@ static int handle_pmi_common(struct pt_regs *regs, u64 status)
/*
* Intel PT
*/
- if (__test_and_clear_bit(55, (unsigned long *)&status)) {
+ if (__test_and_clear_bit(GLOBAL_STATUS_TRACE_TOPAPMI_BIT, (unsigned long *)&status)) {
handled++;
if (unlikely(perf_guest_cbs && perf_guest_cbs->is_in_guest() &&
perf_guest_cbs->handle_intel_pt_intr))
@@ -2420,6 +2657,15 @@ static int handle_pmi_common(struct pt_regs *regs, u64 status)
}
/*
+ * Intel Perf mertrics
+ */
+ if (__test_and_clear_bit(GLOBAL_STATUS_PERF_METRICS_OVF_BIT, (unsigned long *)&status)) {
+ handled++;
+ if (x86_pmu.update_topdown_event)
+ x86_pmu.update_topdown_event(NULL);
+ }
+
+ /*
* Checkpointed counters can lead to 'spurious' PMIs because the
* rollback caused by the PMI will have cleared the overflow status
* bit. Therefore always force probe these counters.
@@ -3355,6 +3601,56 @@ static int intel_pmu_hw_config(struct perf_event *event)
if (event->attr.type != PERF_TYPE_RAW)
return 0;
+ /*
+ * Config Topdown slots and metric events
+ *
+ * The slots event on Fixed Counter 3 can support sampling,
+ * which will be handled normally in x86_perf_event_update().
+ *
+ * Metric events don't support sampling and require being paired
+ * with a slots event as group leader. When the slots event
+ * is used in a metrics group, it too cannot support sampling.
+ */
+ if (x86_pmu.intel_cap.perf_metrics && is_topdown_event(event)) {
+ if (event->attr.config1 || event->attr.config2)
+ return -EINVAL;
+
+ /*
+ * The TopDown metrics events and slots event don't
+ * support any filters.
+ */
+ if (event->attr.config & X86_ALL_EVENT_FLAGS)
+ return -EINVAL;
+
+ if (is_metric_event(event)) {
+ struct perf_event *leader = event->group_leader;
+
+ /* The metric events don't support sampling. */
+ if (is_sampling_event(event))
+ return -EINVAL;
+
+ /* The metric events require a slots group leader. */
+ if (!is_slots_event(leader))
+ return -EINVAL;
+
+ /*
+ * The leader/SLOTS must not be a sampling event for
+ * metric use; hardware requires it starts at 0 when used
+ * in conjunction with MSR_PERF_METRICS.
+ */
+ if (is_sampling_event(leader))
+ return -EINVAL;
+
+ event->event_caps |= PERF_EV_CAP_SIBLING;
+ /*
+ * Only once we have a METRICs sibling do we
+ * need TopDown magic.
+ */
+ leader->hw.flags |= PERF_X86_EVENT_TOPDOWN;
+ event->hw.flags |= PERF_X86_EVENT_TOPDOWN;
+ }
+ }
+
if (!(event->attr.config & ARCH_PERFMON_EVENTSEL_ANY))
return 0;
@@ -3787,6 +4083,17 @@ static void intel_pmu_cpu_starting(int cpu)
if (x86_pmu.counter_freezing)
enable_counter_freeze();
+ /* Disable perf metrics if any added CPU doesn't support it. */
+ if (x86_pmu.intel_cap.perf_metrics) {
+ union perf_capabilities perf_cap;
+
+ rdmsrl(MSR_IA32_PERF_CAPABILITIES, perf_cap.capabilities);
+ if (!perf_cap.perf_metrics) {
+ x86_pmu.intel_cap.perf_metrics = 0;
+ x86_pmu.intel_ctrl &= ~(1ULL << GLOBAL_CTRL_EN_PERF_METRICS);
+ }
+ }
+
if (!cpuc->shared_regs)
return;
@@ -4355,6 +4662,15 @@ static struct attribute *icl_events_attrs[] = {
NULL,
};
+static struct attribute *icl_td_events_attrs[] = {
+ EVENT_PTR(slots),
+ EVENT_PTR(td_retiring),
+ EVENT_PTR(td_bad_spec),
+ EVENT_PTR(td_fe_bound),
+ EVENT_PTR(td_be_bound),
+ NULL,
+};
+
static struct attribute *icl_tsx_events_attrs[] = {
EVENT_PTR(tx_start),
EVENT_PTR(tx_abort),
@@ -4682,7 +4998,7 @@ __init int intel_pmu_init(void)
case INTEL_FAM6_CORE2_MEROM:
x86_add_quirk(intel_clovertown_quirk);
- /* fall through */
+ fallthrough;
case INTEL_FAM6_CORE2_MEROM_L:
case INTEL_FAM6_CORE2_PENRYN:
@@ -4830,6 +5146,7 @@ __init int intel_pmu_init(void)
case INTEL_FAM6_ATOM_TREMONT_D:
case INTEL_FAM6_ATOM_TREMONT:
+ case INTEL_FAM6_ATOM_TREMONT_L:
x86_pmu.late_ack = true;
memcpy(hw_cache_event_ids, glp_hw_cache_event_ids,
sizeof(hw_cache_event_ids));
@@ -5062,7 +5379,7 @@ __init int intel_pmu_init(void)
case INTEL_FAM6_SKYLAKE_X:
pmem = true;
- /* fall through */
+ fallthrough;
case INTEL_FAM6_SKYLAKE_L:
case INTEL_FAM6_SKYLAKE:
case INTEL_FAM6_KABYLAKE_L:
@@ -5114,7 +5431,7 @@ __init int intel_pmu_init(void)
case INTEL_FAM6_ICELAKE_X:
case INTEL_FAM6_ICELAKE_D:
pmem = true;
- /* fall through */
+ fallthrough;
case INTEL_FAM6_ICELAKE_L:
case INTEL_FAM6_ICELAKE:
case INTEL_FAM6_TIGERLAKE_L:
@@ -5139,10 +5456,13 @@ __init int intel_pmu_init(void)
hsw_format_attr : nhm_format_attr;
extra_skl_attr = skl_format_attr;
mem_attr = icl_events_attrs;
+ td_attr = icl_td_events_attrs;
tsx_attr = icl_tsx_events_attrs;
x86_pmu.rtm_abort_event = X86_CONFIG(.event=0xca, .umask=0x02);
x86_pmu.lbr_pt_coexist = true;
intel_pmu_pebs_data_source_skl(pmem);
+ x86_pmu.update_topdown_event = icl_update_topdown_event;
+ x86_pmu.set_topdown_event_period = icl_set_topdown_event_period;
pr_cont("Icelake events, ");
name = "icelake";
break;
@@ -5198,6 +5518,15 @@ __init int intel_pmu_init(void)
* counter, so do not extend mask to generic counters
*/
for_each_event_constraint(c, x86_pmu.event_constraints) {
+ /*
+ * Don't extend the topdown slots and metrics
+ * events to the generic counters.
+ */
+ if (c->idxmsk64 & INTEL_PMC_MSK_TOPDOWN) {
+ c->weight = hweight64(c->idxmsk64);
+ continue;
+ }
+
if (c->cmask == FIXED_EVENT_FLAGS
&& c->idxmsk64 != INTEL_PMC_MSK_FIXED_REF_CYCLES) {
c->idxmsk64 |= (1ULL << x86_pmu.num_counters) - 1;
@@ -5253,6 +5582,9 @@ __init int intel_pmu_init(void)
if (x86_pmu.counter_freezing)
x86_pmu.handle_irq = intel_pmu_handle_irq_v4;
+ if (x86_pmu.intel_cap.perf_metrics)
+ x86_pmu.intel_ctrl |= 1ULL << GLOBAL_CTRL_EN_PERF_METRICS;
+
return 0;
}
diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
index 86848c57b55e..404315df1e16 100644
--- a/arch/x86/events/intel/ds.c
+++ b/arch/x86/events/intel/ds.c
@@ -670,9 +670,7 @@ unlock:
static inline void intel_pmu_drain_pebs_buffer(void)
{
- struct pt_regs regs;
-
- x86_pmu.drain_pebs(&regs);
+ x86_pmu.drain_pebs(NULL);
}
/*
@@ -1737,6 +1735,7 @@ static void __intel_pmu_pebs_event(struct perf_event *event,
struct x86_perf_regs perf_regs;
struct pt_regs *regs = &perf_regs.regs;
void *at = get_next_pebs_record_by_bit(base, top, bit);
+ struct pt_regs dummy_iregs;
if (hwc->flags & PERF_X86_EVENT_AUTO_RELOAD) {
/*
@@ -1749,6 +1748,9 @@ static void __intel_pmu_pebs_event(struct perf_event *event,
} else if (!intel_pmu_save_and_restart(event))
return;
+ if (!iregs)
+ iregs = &dummy_iregs;
+
while (count > 1) {
setup_sample(event, iregs, at, &data, regs);
perf_event_output(event, &data, regs);
@@ -1758,16 +1760,22 @@ static void __intel_pmu_pebs_event(struct perf_event *event,
}
setup_sample(event, iregs, at, &data, regs);
-
- /*
- * All but the last records are processed.
- * The last one is left to be able to call the overflow handler.
- */
- if (perf_event_overflow(event, &data, regs)) {
- x86_pmu_stop(event, 0);
- return;
+ if (iregs == &dummy_iregs) {
+ /*
+ * The PEBS records may be drained in the non-overflow context,
+ * e.g., large PEBS + context switch. Perf should treat the
+ * last record the same as other PEBS records, and doesn't
+ * invoke the generic overflow handler.
+ */
+ perf_event_output(event, &data, regs);
+ } else {
+ /*
+ * All but the last records are processed.
+ * The last one is left to be able to call the overflow handler.
+ */
+ if (perf_event_overflow(event, &data, regs))
+ x86_pmu_stop(event, 0);
}
-
}
static void intel_pmu_drain_pebs_core(struct pt_regs *iregs)
diff --git a/arch/x86/events/intel/lbr.c b/arch/x86/events/intel/lbr.c
index 63f58bdf556c..8961653c5dd2 100644
--- a/arch/x86/events/intel/lbr.c
+++ b/arch/x86/events/intel/lbr.c
@@ -1268,7 +1268,7 @@ static int branch_type(unsigned long from, unsigned long to, int abort)
ret = X86_BR_ZERO_CALL;
break;
}
- /* fall through */
+ fallthrough;
case 0x9a: /* call far absolute */
ret = X86_BR_CALL;
break;
diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c
index d5c6d3b340c5..86d012b3e0b4 100644
--- a/arch/x86/events/intel/uncore.c
+++ b/arch/x86/events/intel/uncore.c
@@ -12,6 +12,8 @@ struct intel_uncore_type **uncore_mmio_uncores = empty_uncore;
static bool pcidrv_registered;
struct pci_driver *uncore_pci_driver;
+/* The PCI driver for the device which the uncore doesn't own. */
+struct pci_driver *uncore_pci_sub_driver;
/* pci bus to socket mapping */
DEFINE_RAW_SPINLOCK(pci2phy_map_lock);
struct list_head pci2phy_map_head = LIST_HEAD_INIT(pci2phy_map_head);
@@ -989,65 +991,71 @@ uncore_types_init(struct intel_uncore_type **types, bool setid)
}
/*
- * add a pci uncore device
+ * Get the die information of a PCI device.
+ * @pdev: The PCI device.
+ * @phys_id: The physical socket id which the device maps to.
+ * @die: The die id which the device maps to.
*/
-static int uncore_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+static int uncore_pci_get_dev_die_info(struct pci_dev *pdev,
+ int *phys_id, int *die)
{
- struct intel_uncore_type *type;
- struct intel_uncore_pmu *pmu = NULL;
- struct intel_uncore_box *box;
- int phys_id, die, ret;
-
- phys_id = uncore_pcibus_to_physid(pdev->bus);
- if (phys_id < 0)
+ *phys_id = uncore_pcibus_to_physid(pdev->bus);
+ if (*phys_id < 0)
return -ENODEV;
- die = (topology_max_die_per_package() > 1) ? phys_id :
- topology_phys_to_logical_pkg(phys_id);
- if (die < 0)
+ *die = (topology_max_die_per_package() > 1) ? *phys_id :
+ topology_phys_to_logical_pkg(*phys_id);
+ if (*die < 0)
return -EINVAL;
- if (UNCORE_PCI_DEV_TYPE(id->driver_data) == UNCORE_EXTRA_PCI_DEV) {
- int idx = UNCORE_PCI_DEV_IDX(id->driver_data);
-
- uncore_extra_pci_dev[die].dev[idx] = pdev;
- pci_set_drvdata(pdev, NULL);
- return 0;
- }
-
- type = uncore_pci_uncores[UNCORE_PCI_DEV_TYPE(id->driver_data)];
+ return 0;
+}
- /*
- * Some platforms, e.g. Knights Landing, use a common PCI device ID
- * for multiple instances of an uncore PMU device type. We should check
- * PCI slot and func to indicate the uncore box.
- */
- if (id->driver_data & ~0xffff) {
- struct pci_driver *pci_drv = pdev->driver;
- const struct pci_device_id *ids = pci_drv->id_table;
- unsigned int devfn;
-
- while (ids && ids->vendor) {
- if ((ids->vendor == pdev->vendor) &&
- (ids->device == pdev->device)) {
- devfn = PCI_DEVFN(UNCORE_PCI_DEV_DEV(ids->driver_data),
- UNCORE_PCI_DEV_FUNC(ids->driver_data));
- if (devfn == pdev->devfn) {
- pmu = &type->pmus[UNCORE_PCI_DEV_IDX(ids->driver_data)];
- break;
- }
+/*
+ * Find the PMU of a PCI device.
+ * @pdev: The PCI device.
+ * @ids: The ID table of the available PCI devices with a PMU.
+ */
+static struct intel_uncore_pmu *
+uncore_pci_find_dev_pmu(struct pci_dev *pdev, const struct pci_device_id *ids)
+{
+ struct intel_uncore_pmu *pmu = NULL;
+ struct intel_uncore_type *type;
+ kernel_ulong_t data;
+ unsigned int devfn;
+
+ while (ids && ids->vendor) {
+ if ((ids->vendor == pdev->vendor) &&
+ (ids->device == pdev->device)) {
+ data = ids->driver_data;
+ devfn = PCI_DEVFN(UNCORE_PCI_DEV_DEV(data),
+ UNCORE_PCI_DEV_FUNC(data));
+ if (devfn == pdev->devfn) {
+ type = uncore_pci_uncores[UNCORE_PCI_DEV_TYPE(data)];
+ pmu = &type->pmus[UNCORE_PCI_DEV_IDX(data)];
+ break;
}
- ids++;
}
- if (pmu == NULL)
- return -ENODEV;
- } else {
- /*
- * for performance monitoring unit with multiple boxes,
- * each box has a different function id.
- */
- pmu = &type->pmus[UNCORE_PCI_DEV_IDX(id->driver_data)];
+ ids++;
}
+ return pmu;
+}
+
+/*
+ * Register the PMU for a PCI device
+ * @pdev: The PCI device.
+ * @type: The corresponding PMU type of the device.
+ * @pmu: The corresponding PMU of the device.
+ * @phys_id: The physical socket id which the device maps to.
+ * @die: The die id which the device maps to.
+ */
+static int uncore_pci_pmu_register(struct pci_dev *pdev,
+ struct intel_uncore_type *type,
+ struct intel_uncore_pmu *pmu,
+ int phys_id, int die)
+{
+ struct intel_uncore_box *box;
+ int ret;
if (WARN_ON_ONCE(pmu->boxes[die] != NULL))
return -EINVAL;
@@ -1067,7 +1075,6 @@ static int uncore_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id
box->pci_dev = pdev;
box->pmu = pmu;
uncore_box_init(box);
- pci_set_drvdata(pdev, box);
pmu->boxes[die] = box;
if (atomic_inc_return(&pmu->activeboxes) > 1)
@@ -1076,7 +1083,6 @@ static int uncore_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id
/* First active box registers the pmu */
ret = uncore_pmu_register(pmu);
if (ret) {
- pci_set_drvdata(pdev, NULL);
pmu->boxes[die] = NULL;
uncore_box_exit(box);
kfree(box);
@@ -1084,18 +1090,87 @@ static int uncore_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id
return ret;
}
+/*
+ * add a pci uncore device
+ */
+static int uncore_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct intel_uncore_type *type;
+ struct intel_uncore_pmu *pmu = NULL;
+ int phys_id, die, ret;
+
+ ret = uncore_pci_get_dev_die_info(pdev, &phys_id, &die);
+ if (ret)
+ return ret;
+
+ if (UNCORE_PCI_DEV_TYPE(id->driver_data) == UNCORE_EXTRA_PCI_DEV) {
+ int idx = UNCORE_PCI_DEV_IDX(id->driver_data);
+
+ uncore_extra_pci_dev[die].dev[idx] = pdev;
+ pci_set_drvdata(pdev, NULL);
+ return 0;
+ }
+
+ type = uncore_pci_uncores[UNCORE_PCI_DEV_TYPE(id->driver_data)];
+
+ /*
+ * Some platforms, e.g. Knights Landing, use a common PCI device ID
+ * for multiple instances of an uncore PMU device type. We should check
+ * PCI slot and func to indicate the uncore box.
+ */
+ if (id->driver_data & ~0xffff) {
+ struct pci_driver *pci_drv = pdev->driver;
+
+ pmu = uncore_pci_find_dev_pmu(pdev, pci_drv->id_table);
+ if (pmu == NULL)
+ return -ENODEV;
+ } else {
+ /*
+ * for performance monitoring unit with multiple boxes,
+ * each box has a different function id.
+ */
+ pmu = &type->pmus[UNCORE_PCI_DEV_IDX(id->driver_data)];
+ }
+
+ ret = uncore_pci_pmu_register(pdev, type, pmu, phys_id, die);
+
+ pci_set_drvdata(pdev, pmu->boxes[die]);
+
+ return ret;
+}
+
+/*
+ * Unregister the PMU of a PCI device
+ * @pmu: The corresponding PMU is unregistered.
+ * @phys_id: The physical socket id which the device maps to.
+ * @die: The die id which the device maps to.
+ */
+static void uncore_pci_pmu_unregister(struct intel_uncore_pmu *pmu,
+ int phys_id, int die)
+{
+ struct intel_uncore_box *box = pmu->boxes[die];
+
+ if (WARN_ON_ONCE(phys_id != box->pci_phys_id))
+ return;
+
+ pmu->boxes[die] = NULL;
+ if (atomic_dec_return(&pmu->activeboxes) == 0)
+ uncore_pmu_unregister(pmu);
+ uncore_box_exit(box);
+ kfree(box);
+}
+
static void uncore_pci_remove(struct pci_dev *pdev)
{
struct intel_uncore_box *box;
struct intel_uncore_pmu *pmu;
int i, phys_id, die;
- phys_id = uncore_pcibus_to_physid(pdev->bus);
+ if (uncore_pci_get_dev_die_info(pdev, &phys_id, &die))
+ return;
box = pci_get_drvdata(pdev);
if (!box) {
- die = (topology_max_die_per_package() > 1) ? phys_id :
- topology_phys_to_logical_pkg(phys_id);
for (i = 0; i < UNCORE_EXTRA_PCI_DEV_MAX; i++) {
if (uncore_extra_pci_dev[die].dev[i] == pdev) {
uncore_extra_pci_dev[die].dev[i] = NULL;
@@ -1107,15 +1182,84 @@ static void uncore_pci_remove(struct pci_dev *pdev)
}
pmu = box->pmu;
- if (WARN_ON_ONCE(phys_id != box->pci_phys_id))
- return;
pci_set_drvdata(pdev, NULL);
- pmu->boxes[box->dieid] = NULL;
- if (atomic_dec_return(&pmu->activeboxes) == 0)
- uncore_pmu_unregister(pmu);
- uncore_box_exit(box);
- kfree(box);
+
+ uncore_pci_pmu_unregister(pmu, phys_id, die);
+}
+
+static int uncore_bus_notify(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct device *dev = data;
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct intel_uncore_pmu *pmu;
+ int phys_id, die;
+
+ /* Unregister the PMU when the device is going to be deleted. */
+ if (action != BUS_NOTIFY_DEL_DEVICE)
+ return NOTIFY_DONE;
+
+ pmu = uncore_pci_find_dev_pmu(pdev, uncore_pci_sub_driver->id_table);
+ if (!pmu)
+ return NOTIFY_DONE;
+
+ if (uncore_pci_get_dev_die_info(pdev, &phys_id, &die))
+ return NOTIFY_DONE;
+
+ uncore_pci_pmu_unregister(pmu, phys_id, die);
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block uncore_notifier = {
+ .notifier_call = uncore_bus_notify,
+};
+
+static void uncore_pci_sub_driver_init(void)
+{
+ const struct pci_device_id *ids = uncore_pci_sub_driver->id_table;
+ struct intel_uncore_type *type;
+ struct intel_uncore_pmu *pmu;
+ struct pci_dev *pci_sub_dev;
+ bool notify = false;
+ unsigned int devfn;
+ int phys_id, die;
+
+ while (ids && ids->vendor) {
+ pci_sub_dev = NULL;
+ type = uncore_pci_uncores[UNCORE_PCI_DEV_TYPE(ids->driver_data)];
+ /*
+ * Search the available device, and register the
+ * corresponding PMU.
+ */
+ while ((pci_sub_dev = pci_get_device(PCI_VENDOR_ID_INTEL,
+ ids->device, pci_sub_dev))) {
+ devfn = PCI_DEVFN(UNCORE_PCI_DEV_DEV(ids->driver_data),
+ UNCORE_PCI_DEV_FUNC(ids->driver_data));
+ if (devfn != pci_sub_dev->devfn)
+ continue;
+
+ pmu = &type->pmus[UNCORE_PCI_DEV_IDX(ids->driver_data)];
+ if (!pmu)
+ continue;
+
+ if (uncore_pci_get_dev_die_info(pci_sub_dev,
+ &phys_id, &die))
+ continue;
+
+ if (!uncore_pci_pmu_register(pci_sub_dev, type, pmu,
+ phys_id, die))
+ notify = true;
+ }
+ ids++;
+ }
+
+ if (notify && bus_register_notifier(&pci_bus_type, &uncore_notifier))
+ notify = false;
+
+ if (!notify)
+ uncore_pci_sub_driver = NULL;
}
static int __init uncore_pci_init(void)
@@ -1141,6 +1285,9 @@ static int __init uncore_pci_init(void)
if (ret)
goto errtype;
+ if (uncore_pci_sub_driver)
+ uncore_pci_sub_driver_init();
+
pcidrv_registered = true;
return 0;
@@ -1158,6 +1305,8 @@ static void uncore_pci_exit(void)
{
if (pcidrv_registered) {
pcidrv_registered = false;
+ if (uncore_pci_sub_driver)
+ bus_unregister_notifier(&pci_bus_type, &uncore_notifier);
pci_unregister_driver(uncore_pci_driver);
uncore_types_exit(uncore_pci_uncores);
kfree(uncore_extra_pci_dev);
@@ -1478,12 +1627,12 @@ static const struct intel_uncore_init_fun icl_uncore_init __initconst = {
};
static const struct intel_uncore_init_fun tgl_uncore_init __initconst = {
- .cpu_init = icl_uncore_cpu_init,
+ .cpu_init = tgl_uncore_cpu_init,
.mmio_init = tgl_uncore_mmio_init,
};
static const struct intel_uncore_init_fun tgl_l_uncore_init __initconst = {
- .cpu_init = icl_uncore_cpu_init,
+ .cpu_init = tgl_uncore_cpu_init,
.mmio_init = tgl_l_uncore_mmio_init,
};
diff --git a/arch/x86/events/intel/uncore.h b/arch/x86/events/intel/uncore.h
index 105fdc69825e..83d2a7d490e0 100644
--- a/arch/x86/events/intel/uncore.h
+++ b/arch/x86/events/intel/uncore.h
@@ -552,6 +552,7 @@ extern struct intel_uncore_type **uncore_msr_uncores;
extern struct intel_uncore_type **uncore_pci_uncores;
extern struct intel_uncore_type **uncore_mmio_uncores;
extern struct pci_driver *uncore_pci_driver;
+extern struct pci_driver *uncore_pci_sub_driver;
extern raw_spinlock_t pci2phy_map_lock;
extern struct list_head pci2phy_map_head;
extern struct pci_extra_dev *uncore_extra_pci_dev;
@@ -567,6 +568,7 @@ void snb_uncore_cpu_init(void);
void nhm_uncore_cpu_init(void);
void skl_uncore_cpu_init(void);
void icl_uncore_cpu_init(void);
+void tgl_uncore_cpu_init(void);
void tgl_uncore_mmio_init(void);
void tgl_l_uncore_mmio_init(void);
int snb_pci2phy_map_init(int devid);
diff --git a/arch/x86/events/intel/uncore_snb.c b/arch/x86/events/intel/uncore_snb.c
index cb94ba86efd2..39e632ed6ca9 100644
--- a/arch/x86/events/intel/uncore_snb.c
+++ b/arch/x86/events/intel/uncore_snb.c
@@ -126,6 +126,10 @@
#define ICL_UNC_CBO_0_PER_CTR0 0x702
#define ICL_UNC_CBO_MSR_OFFSET 0x8
+/* ICL ARB register */
+#define ICL_UNC_ARB_PER_CTR 0x3b1
+#define ICL_UNC_ARB_PERFEVTSEL 0x3b3
+
DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18");
@@ -313,15 +317,21 @@ void skl_uncore_cpu_init(void)
snb_uncore_arb.ops = &skl_uncore_msr_ops;
}
+static struct intel_uncore_ops icl_uncore_msr_ops = {
+ .disable_event = snb_uncore_msr_disable_event,
+ .enable_event = snb_uncore_msr_enable_event,
+ .read_counter = uncore_msr_read_counter,
+};
+
static struct intel_uncore_type icl_uncore_cbox = {
.name = "cbox",
- .num_counters = 4,
+ .num_counters = 2,
.perf_ctr_bits = 44,
.perf_ctr = ICL_UNC_CBO_0_PER_CTR0,
.event_ctl = SNB_UNC_CBO_0_PERFEVTSEL0,
.event_mask = SNB_UNC_RAW_EVENT_MASK,
.msr_offset = ICL_UNC_CBO_MSR_OFFSET,
- .ops = &skl_uncore_msr_ops,
+ .ops = &icl_uncore_msr_ops,
.format_group = &snb_uncore_format_group,
};
@@ -350,13 +360,25 @@ static struct intel_uncore_type icl_uncore_clockbox = {
.single_fixed = 1,
.event_mask = SNB_UNC_CTL_EV_SEL_MASK,
.format_group = &icl_uncore_clock_format_group,
- .ops = &skl_uncore_msr_ops,
+ .ops = &icl_uncore_msr_ops,
.event_descs = icl_uncore_events,
};
+static struct intel_uncore_type icl_uncore_arb = {
+ .name = "arb",
+ .num_counters = 1,
+ .num_boxes = 1,
+ .perf_ctr_bits = 44,
+ .perf_ctr = ICL_UNC_ARB_PER_CTR,
+ .event_ctl = ICL_UNC_ARB_PERFEVTSEL,
+ .event_mask = SNB_UNC_RAW_EVENT_MASK,
+ .ops = &icl_uncore_msr_ops,
+ .format_group = &snb_uncore_format_group,
+};
+
static struct intel_uncore_type *icl_msr_uncores[] = {
&icl_uncore_cbox,
- &snb_uncore_arb,
+ &icl_uncore_arb,
&icl_uncore_clockbox,
NULL,
};
@@ -374,6 +396,21 @@ void icl_uncore_cpu_init(void)
{
uncore_msr_uncores = icl_msr_uncores;
icl_uncore_cbox.num_boxes = icl_get_cbox_num();
+}
+
+static struct intel_uncore_type *tgl_msr_uncores[] = {
+ &icl_uncore_cbox,
+ &snb_uncore_arb,
+ &icl_uncore_clockbox,
+ NULL,
+};
+
+void tgl_uncore_cpu_init(void)
+{
+ uncore_msr_uncores = tgl_msr_uncores;
+ icl_uncore_cbox.num_boxes = icl_get_cbox_num();
+ icl_uncore_cbox.ops = &skl_uncore_msr_ops;
+ icl_uncore_clockbox.ops = &skl_uncore_msr_ops;
snb_uncore_arb.ops = &skl_uncore_msr_ops;
}
@@ -390,6 +427,18 @@ static struct uncore_event_desc snb_uncore_imc_events[] = {
INTEL_UNCORE_EVENT_DESC(data_writes.scale, "6.103515625e-5"),
INTEL_UNCORE_EVENT_DESC(data_writes.unit, "MiB"),
+ INTEL_UNCORE_EVENT_DESC(gt_requests, "event=0x03"),
+ INTEL_UNCORE_EVENT_DESC(gt_requests.scale, "6.103515625e-5"),
+ INTEL_UNCORE_EVENT_DESC(gt_requests.unit, "MiB"),
+
+ INTEL_UNCORE_EVENT_DESC(ia_requests, "event=0x04"),
+ INTEL_UNCORE_EVENT_DESC(ia_requests.scale, "6.103515625e-5"),
+ INTEL_UNCORE_EVENT_DESC(ia_requests.unit, "MiB"),
+
+ INTEL_UNCORE_EVENT_DESC(io_requests, "event=0x05"),
+ INTEL_UNCORE_EVENT_DESC(io_requests.scale, "6.103515625e-5"),
+ INTEL_UNCORE_EVENT_DESC(io_requests.unit, "MiB"),
+
{ /* end: all zeroes */ },
};
@@ -405,13 +454,35 @@ static struct uncore_event_desc snb_uncore_imc_events[] = {
#define SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE 0x5054
#define SNB_UNCORE_PCI_IMC_CTR_BASE SNB_UNCORE_PCI_IMC_DATA_READS_BASE
+/* BW break down- legacy counters */
+#define SNB_UNCORE_PCI_IMC_GT_REQUESTS 0x3
+#define SNB_UNCORE_PCI_IMC_GT_REQUESTS_BASE 0x5040
+#define SNB_UNCORE_PCI_IMC_IA_REQUESTS 0x4
+#define SNB_UNCORE_PCI_IMC_IA_REQUESTS_BASE 0x5044
+#define SNB_UNCORE_PCI_IMC_IO_REQUESTS 0x5
+#define SNB_UNCORE_PCI_IMC_IO_REQUESTS_BASE 0x5048
+
enum perf_snb_uncore_imc_freerunning_types {
- SNB_PCI_UNCORE_IMC_DATA = 0,
+ SNB_PCI_UNCORE_IMC_DATA_READS = 0,
+ SNB_PCI_UNCORE_IMC_DATA_WRITES,
+ SNB_PCI_UNCORE_IMC_GT_REQUESTS,
+ SNB_PCI_UNCORE_IMC_IA_REQUESTS,
+ SNB_PCI_UNCORE_IMC_IO_REQUESTS,
+
SNB_PCI_UNCORE_IMC_FREERUNNING_TYPE_MAX,
};
static struct freerunning_counters snb_uncore_imc_freerunning[] = {
- [SNB_PCI_UNCORE_IMC_DATA] = { SNB_UNCORE_PCI_IMC_DATA_READS_BASE, 0x4, 0x0, 2, 32 },
+ [SNB_PCI_UNCORE_IMC_DATA_READS] = { SNB_UNCORE_PCI_IMC_DATA_READS_BASE,
+ 0x0, 0x0, 1, 32 },
+ [SNB_PCI_UNCORE_IMC_DATA_READS] = { SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE,
+ 0x0, 0x0, 1, 32 },
+ [SNB_PCI_UNCORE_IMC_GT_REQUESTS] = { SNB_UNCORE_PCI_IMC_GT_REQUESTS_BASE,
+ 0x0, 0x0, 1, 32 },
+ [SNB_PCI_UNCORE_IMC_IA_REQUESTS] = { SNB_UNCORE_PCI_IMC_IA_REQUESTS_BASE,
+ 0x0, 0x0, 1, 32 },
+ [SNB_PCI_UNCORE_IMC_IO_REQUESTS] = { SNB_UNCORE_PCI_IMC_IO_REQUESTS_BASE,
+ 0x0, 0x0, 1, 32 },
};
static struct attribute *snb_uncore_imc_formats_attr[] = {
@@ -525,6 +596,18 @@ static int snb_uncore_imc_event_init(struct perf_event *event)
base = SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE;
idx = UNCORE_PMC_IDX_FREERUNNING;
break;
+ case SNB_UNCORE_PCI_IMC_GT_REQUESTS:
+ base = SNB_UNCORE_PCI_IMC_GT_REQUESTS_BASE;
+ idx = UNCORE_PMC_IDX_FREERUNNING;
+ break;
+ case SNB_UNCORE_PCI_IMC_IA_REQUESTS:
+ base = SNB_UNCORE_PCI_IMC_IA_REQUESTS_BASE;
+ idx = UNCORE_PMC_IDX_FREERUNNING;
+ break;
+ case SNB_UNCORE_PCI_IMC_IO_REQUESTS:
+ base = SNB_UNCORE_PCI_IMC_IO_REQUESTS_BASE;
+ idx = UNCORE_PMC_IDX_FREERUNNING;
+ break;
default:
return -EINVAL;
}
@@ -598,7 +681,7 @@ static struct intel_uncore_ops snb_uncore_imc_ops = {
static struct intel_uncore_type snb_uncore_imc = {
.name = "imc",
- .num_counters = 2,
+ .num_counters = 5,
.num_boxes = 1,
.num_freerunning_types = SNB_PCI_UNCORE_IMC_FREERUNNING_TYPE_MAX,
.mmio_map_size = SNB_UNCORE_PCI_IMC_MAP_SIZE,
diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c
index 62e88ad919ff..7bdb1821215d 100644
--- a/arch/x86/events/intel/uncore_snbep.c
+++ b/arch/x86/events/intel/uncore_snbep.c
@@ -393,6 +393,11 @@
#define SNR_M2M_PCI_PMON_BOX_CTL 0x438
#define SNR_M2M_PCI_PMON_UMASK_EXT 0xff
+/* SNR PCIE3 */
+#define SNR_PCIE3_PCI_PMON_CTL0 0x508
+#define SNR_PCIE3_PCI_PMON_CTR0 0x4e8
+#define SNR_PCIE3_PCI_PMON_BOX_CTL 0x4e0
+
/* SNR IMC */
#define SNR_IMC_MMIO_PMON_FIXED_CTL 0x54
#define SNR_IMC_MMIO_PMON_FIXED_CTR 0x38
@@ -3749,7 +3754,9 @@ static int skx_iio_set_mapping(struct intel_uncore_type *type)
ret = skx_iio_get_topology(type);
if (ret)
- return ret;
+ goto clear_attr_update;
+
+ ret = -ENOMEM;
/* One more for NULL. */
attrs = kcalloc((uncore_max_dies() + 1), sizeof(*attrs), GFP_KERNEL);
@@ -3781,8 +3788,9 @@ err:
kfree(eas);
kfree(attrs);
kfree(type->topology);
+clear_attr_update:
type->attr_update = NULL;
- return -ENOMEM;
+ return ret;
}
static void skx_iio_cleanup_mapping(struct intel_uncore_type *type)
@@ -4551,12 +4559,46 @@ static struct intel_uncore_type snr_uncore_m2m = {
.format_group = &snr_m2m_uncore_format_group,
};
+static void snr_uncore_pci_enable_event(struct intel_uncore_box *box, struct perf_event *event)
+{
+ struct pci_dev *pdev = box->pci_dev;
+ struct hw_perf_event *hwc = &event->hw;
+
+ pci_write_config_dword(pdev, hwc->config_base, (u32)(hwc->config | SNBEP_PMON_CTL_EN));
+ pci_write_config_dword(pdev, hwc->config_base + 4, (u32)(hwc->config >> 32));
+}
+
+static struct intel_uncore_ops snr_pcie3_uncore_pci_ops = {
+ .init_box = snr_m2m_uncore_pci_init_box,
+ .disable_box = snbep_uncore_pci_disable_box,
+ .enable_box = snbep_uncore_pci_enable_box,
+ .disable_event = snbep_uncore_pci_disable_event,
+ .enable_event = snr_uncore_pci_enable_event,
+ .read_counter = snbep_uncore_pci_read_counter,
+};
+
+static struct intel_uncore_type snr_uncore_pcie3 = {
+ .name = "pcie3",
+ .num_counters = 4,
+ .num_boxes = 1,
+ .perf_ctr_bits = 48,
+ .perf_ctr = SNR_PCIE3_PCI_PMON_CTR0,
+ .event_ctl = SNR_PCIE3_PCI_PMON_CTL0,
+ .event_mask = SKX_IIO_PMON_RAW_EVENT_MASK,
+ .event_mask_ext = SKX_IIO_PMON_RAW_EVENT_MASK_EXT,
+ .box_ctl = SNR_PCIE3_PCI_PMON_BOX_CTL,
+ .ops = &snr_pcie3_uncore_pci_ops,
+ .format_group = &skx_uncore_iio_format_group,
+};
+
enum {
SNR_PCI_UNCORE_M2M,
+ SNR_PCI_UNCORE_PCIE3,
};
static struct intel_uncore_type *snr_pci_uncores[] = {
[SNR_PCI_UNCORE_M2M] = &snr_uncore_m2m,
+ [SNR_PCI_UNCORE_PCIE3] = &snr_uncore_pcie3,
NULL,
};
@@ -4573,6 +4615,19 @@ static struct pci_driver snr_uncore_pci_driver = {
.id_table = snr_uncore_pci_ids,
};
+static const struct pci_device_id snr_uncore_pci_sub_ids[] = {
+ { /* PCIe3 RP */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x334a),
+ .driver_data = UNCORE_PCI_DEV_FULL_DATA(4, 0, SNR_PCI_UNCORE_PCIE3, 0),
+ },
+ { /* end: all zeroes */ }
+};
+
+static struct pci_driver snr_uncore_pci_sub_driver = {
+ .name = "snr_uncore_sub",
+ .id_table = snr_uncore_pci_sub_ids,
+};
+
int snr_uncore_pci_init(void)
{
/* SNR UBOX DID */
@@ -4584,6 +4639,7 @@ int snr_uncore_pci_init(void)
uncore_pci_uncores = snr_pci_uncores;
uncore_pci_driver = &snr_uncore_pci_driver;
+ uncore_pci_sub_driver = &snr_uncore_pci_sub_driver;
return 0;
}
@@ -4751,10 +4807,10 @@ static struct uncore_event_desc snr_uncore_imc_freerunning_events[] = {
INTEL_UNCORE_EVENT_DESC(dclk, "event=0xff,umask=0x10"),
INTEL_UNCORE_EVENT_DESC(read, "event=0xff,umask=0x20"),
- INTEL_UNCORE_EVENT_DESC(read.scale, "3.814697266e-6"),
+ INTEL_UNCORE_EVENT_DESC(read.scale, "6.103515625e-5"),
INTEL_UNCORE_EVENT_DESC(read.unit, "MiB"),
INTEL_UNCORE_EVENT_DESC(write, "event=0xff,umask=0x21"),
- INTEL_UNCORE_EVENT_DESC(write.scale, "3.814697266e-6"),
+ INTEL_UNCORE_EVENT_DESC(write.scale, "6.103515625e-5"),
INTEL_UNCORE_EVENT_DESC(write.unit, "MiB"),
{ /* end: all zeroes */ },
};
@@ -5212,17 +5268,17 @@ static struct uncore_event_desc icx_uncore_imc_freerunning_events[] = {
INTEL_UNCORE_EVENT_DESC(dclk, "event=0xff,umask=0x10"),
INTEL_UNCORE_EVENT_DESC(read, "event=0xff,umask=0x20"),
- INTEL_UNCORE_EVENT_DESC(read.scale, "3.814697266e-6"),
+ INTEL_UNCORE_EVENT_DESC(read.scale, "6.103515625e-5"),
INTEL_UNCORE_EVENT_DESC(read.unit, "MiB"),
INTEL_UNCORE_EVENT_DESC(write, "event=0xff,umask=0x21"),
- INTEL_UNCORE_EVENT_DESC(write.scale, "3.814697266e-6"),
+ INTEL_UNCORE_EVENT_DESC(write.scale, "6.103515625e-5"),
INTEL_UNCORE_EVENT_DESC(write.unit, "MiB"),
INTEL_UNCORE_EVENT_DESC(ddrt_read, "event=0xff,umask=0x30"),
- INTEL_UNCORE_EVENT_DESC(ddrt_read.scale, "3.814697266e-6"),
+ INTEL_UNCORE_EVENT_DESC(ddrt_read.scale, "6.103515625e-5"),
INTEL_UNCORE_EVENT_DESC(ddrt_read.unit, "MiB"),
INTEL_UNCORE_EVENT_DESC(ddrt_write, "event=0xff,umask=0x31"),
- INTEL_UNCORE_EVENT_DESC(ddrt_write.scale, "3.814697266e-6"),
+ INTEL_UNCORE_EVENT_DESC(ddrt_write.scale, "6.103515625e-5"),
INTEL_UNCORE_EVENT_DESC(ddrt_write.unit, "MiB"),
{ /* end: all zeroes */ },
};
diff --git a/arch/x86/events/msr.c b/arch/x86/events/msr.c
index a949f6f55991..4be8f9cabd07 100644
--- a/arch/x86/events/msr.c
+++ b/arch/x86/events/msr.c
@@ -78,6 +78,7 @@ static bool test_intel(int idx, void *data)
case INTEL_FAM6_ATOM_GOLDMONT_PLUS:
case INTEL_FAM6_ATOM_TREMONT_D:
case INTEL_FAM6_ATOM_TREMONT:
+ case INTEL_FAM6_ATOM_TREMONT_L:
case INTEL_FAM6_XEON_PHI_KNL:
case INTEL_FAM6_XEON_PHI_KNM:
diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
index 7b68ab5f19e7..ee2b9b9fc2a5 100644
--- a/arch/x86/events/perf_event.h
+++ b/arch/x86/events/perf_event.h
@@ -79,6 +79,31 @@ static inline bool constraint_match(struct event_constraint *c, u64 ecode)
#define PERF_X86_EVENT_PEBS_VIA_PT 0x0800 /* use PT buffer for PEBS */
#define PERF_X86_EVENT_PAIR 0x1000 /* Large Increment per Cycle */
#define PERF_X86_EVENT_LBR_SELECT 0x2000 /* Save/Restore MSR_LBR_SELECT */
+#define PERF_X86_EVENT_TOPDOWN 0x4000 /* Count Topdown slots/metrics events */
+
+static inline bool is_topdown_count(struct perf_event *event)
+{
+ return event->hw.flags & PERF_X86_EVENT_TOPDOWN;
+}
+
+static inline bool is_metric_event(struct perf_event *event)
+{
+ u64 config = event->attr.config;
+
+ return ((config & ARCH_PERFMON_EVENTSEL_EVENT) == 0) &&
+ ((config & INTEL_ARCH_EVENT_MASK) >= INTEL_TD_METRIC_RETIRING) &&
+ ((config & INTEL_ARCH_EVENT_MASK) <= INTEL_TD_METRIC_MAX);
+}
+
+static inline bool is_slots_event(struct perf_event *event)
+{
+ return (event->attr.config & INTEL_ARCH_EVENT_MASK) == INTEL_TD_SLOTS;
+}
+
+static inline bool is_topdown_event(struct perf_event *event)
+{
+ return is_metric_event(event) || is_slots_event(event);
+}
struct amd_nb {
int nb_id; /* NorthBridge id */
@@ -210,6 +235,8 @@ struct cpu_hw_events {
they've never been enabled yet */
int n_txn; /* the # last events in the below arrays;
added in the current transaction */
+ int n_txn_pair;
+ int n_txn_metric;
int assign[X86_PMC_IDX_MAX]; /* event to counter assignment */
u64 tags[X86_PMC_IDX_MAX];
@@ -285,6 +312,12 @@ struct cpu_hw_events {
u64 tfa_shadow;
/*
+ * Perf Metrics
+ */
+ /* number of accepted metrics events */
+ int n_metric;
+
+ /*
* AMD specific bits
*/
struct amd_nb *amd_nb;
@@ -376,6 +409,19 @@ struct cpu_hw_events {
EVENT_CONSTRAINT(c, (1ULL << (32+n)), FIXED_EVENT_FLAGS)
/*
+ * The special metric counters do not actually exist. They are calculated from
+ * the combination of the FxCtr3 + MSR_PERF_METRICS.
+ *
+ * The special metric counters are mapped to a dummy offset for the scheduler.
+ * The sharing between multiple users of the same metric without multiplexing
+ * is not allowed, even though the hardware supports that in principle.
+ */
+
+#define METRIC_EVENT_CONSTRAINT(c, n) \
+ EVENT_CONSTRAINT(c, (1ULL << (INTEL_PMC_IDX_METRIC_BASE + n)), \
+ INTEL_ARCH_EVENT_MASK)
+
+/*
* Constraint on the Event code + UMask
*/
#define INTEL_UEVENT_CONSTRAINT(c, n) \
@@ -537,7 +583,7 @@ union perf_capabilities {
*/
u64 full_width_write:1;
u64 pebs_baseline:1;
- u64 pebs_metrics_available:1;
+ u64 perf_metrics:1;
u64 pebs_output_pt_available:1;
};
u64 capabilities;
@@ -727,6 +773,12 @@ struct x86_pmu {
atomic_t lbr_exclusive[x86_lbr_exclusive_max];
/*
+ * Intel perf metrics
+ */
+ u64 (*update_topdown_event)(struct perf_event *event);
+ int (*set_topdown_event_period)(struct perf_event *event);
+
+ /*
* perf task context (i.e. struct perf_event_context::task_ctx_data)
* switch helper to bridge calls from perf/core to perf/x86.
* See struct pmu::swap_task_ctx() usage for examples;
diff --git a/arch/x86/events/rapl.c b/arch/x86/events/rapl.c
index 67b411f7e8c4..7c0120e2e957 100644
--- a/arch/x86/events/rapl.c
+++ b/arch/x86/events/rapl.c
@@ -815,6 +815,7 @@ static const struct x86_cpu_id rapl_model_match[] __initconst = {
X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, &model_spr),
X86_MATCH_VENDOR_FAM(AMD, 0x17, &model_amd_fam17h),
X86_MATCH_VENDOR_FAM(HYGON, 0x18, &model_amd_fam17h),
+ X86_MATCH_VENDOR_FAM(AMD, 0x19, &model_amd_fam17h),
{},
};
MODULE_DEVICE_TABLE(x86cpu, rapl_model_match);
diff --git a/arch/x86/hyperv/hv_init.c b/arch/x86/hyperv/hv_init.c
index 6035df1b49e1..e04d90af4c27 100644
--- a/arch/x86/hyperv/hv_init.c
+++ b/arch/x86/hyperv/hv_init.c
@@ -148,9 +148,9 @@ static inline bool hv_reenlightenment_available(void)
* Check for required features and priviliges to make TSC frequency
* change notifications work.
*/
- return ms_hyperv.features & HV_X64_ACCESS_FREQUENCY_MSRS &&
+ return ms_hyperv.features & HV_ACCESS_FREQUENCY_MSRS &&
ms_hyperv.misc_features & HV_FEATURE_FREQUENCY_MSRS_AVAILABLE &&
- ms_hyperv.features & HV_X64_ACCESS_REENLIGHTENMENT;
+ ms_hyperv.features & HV_ACCESS_REENLIGHTENMENT;
}
DEFINE_IDTENTRY_SYSVEC(sysvec_hyperv_reenlightenment)
@@ -330,8 +330,8 @@ void __init hyperv_init(void)
return;
/* Absolutely required MSRs */
- required_msrs = HV_X64_MSR_HYPERCALL_AVAILABLE |
- HV_X64_MSR_VP_INDEX_AVAILABLE;
+ required_msrs = HV_MSR_HYPERCALL_AVAILABLE |
+ HV_MSR_VP_INDEX_AVAILABLE;
if ((ms_hyperv.features & required_msrs) != required_msrs)
return;
diff --git a/arch/x86/hyperv/hv_spinlock.c b/arch/x86/hyperv/hv_spinlock.c
index 07f21a06392f..f3270c1fc48c 100644
--- a/arch/x86/hyperv/hv_spinlock.c
+++ b/arch/x86/hyperv/hv_spinlock.c
@@ -66,7 +66,7 @@ void __init hv_init_spinlocks(void)
{
if (!hv_pvspin || !apic ||
!(ms_hyperv.hints & HV_X64_CLUSTER_IPI_RECOMMENDED) ||
- !(ms_hyperv.features & HV_X64_MSR_GUEST_IDLE_AVAILABLE)) {
+ !(ms_hyperv.features & HV_MSR_GUEST_IDLE_AVAILABLE)) {
pr_info("PV spinlocks disabled\n");
return;
}
diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h
index ca0976456a6b..6d2df1ee427b 100644
--- a/arch/x86/include/asm/acpi.h
+++ b/arch/x86/include/asm/acpi.h
@@ -159,8 +159,6 @@ static inline u64 x86_default_get_root_pointer(void)
extern int x86_acpi_numa_init(void);
#endif /* CONFIG_ACPI_NUMA */
-#define acpi_unlazy_tlb(x) leave_mm(x)
-
#ifdef CONFIG_ACPI_APEI
static inline pgprot_t arch_apei_get_mem_attribute(phys_addr_t addr)
{
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
index 2cc44e957c31..1c129abb7f09 100644
--- a/arch/x86/include/asm/apic.h
+++ b/arch/x86/include/asm/apic.h
@@ -519,6 +519,14 @@ static inline bool apic_id_is_primary_thread(unsigned int id) { return false; }
static inline void apic_smt_update(void) { }
#endif
+struct msi_msg;
+
+#ifdef CONFIG_PCI_MSI
+void x86_vector_msi_compose_msg(struct irq_data *data, struct msi_msg *msg);
+#else
+# define x86_vector_msi_compose_msg NULL
+#endif
+
extern void ioapic_zap_locks(void);
#endif /* _ASM_X86_APIC_H */
diff --git a/arch/x86/include/asm/asm-prototypes.h b/arch/x86/include/asm/asm-prototypes.h
index 5a42f9206138..51e2bf27cc9b 100644
--- a/arch/x86/include/asm/asm-prototypes.h
+++ b/arch/x86/include/asm/asm-prototypes.h
@@ -5,6 +5,7 @@
#include <asm/string.h>
#include <asm/page.h>
#include <asm/checksum.h>
+#include <asm/mce.h>
#include <asm-generic/asm-prototypes.h>
diff --git a/arch/x86/include/asm/asm.h b/arch/x86/include/asm/asm.h
index 5c15f95b1ba7..0603c7423aca 100644
--- a/arch/x86/include/asm/asm.h
+++ b/arch/x86/include/asm/asm.h
@@ -135,14 +135,21 @@
# define _ASM_EXTABLE_UA(from, to) \
_ASM_EXTABLE_HANDLE(from, to, ex_handler_uaccess)
+# define _ASM_EXTABLE_CPY(from, to) \
+ _ASM_EXTABLE_HANDLE(from, to, ex_handler_copy)
+
# define _ASM_EXTABLE_FAULT(from, to) \
_ASM_EXTABLE_HANDLE(from, to, ex_handler_fault)
-# define _ASM_NOKPROBE(entry) \
+# ifdef CONFIG_KPROBES
+# define _ASM_NOKPROBE(entry) \
.pushsection "_kprobe_blacklist","aw" ; \
_ASM_ALIGN ; \
_ASM_PTR (entry); \
.popsection
+# else
+# define _ASM_NOKPROBE(entry)
+# endif
#else /* ! __ASSEMBLY__ */
# define _EXPAND_EXTABLE_HANDLE(x) #x
@@ -160,6 +167,9 @@
# define _ASM_EXTABLE_UA(from, to) \
_ASM_EXTABLE_HANDLE(from, to, ex_handler_uaccess)
+# define _ASM_EXTABLE_CPY(from, to) \
+ _ASM_EXTABLE_HANDLE(from, to, ex_handler_copy)
+
# define _ASM_EXTABLE_FAULT(from, to) \
_ASM_EXTABLE_HANDLE(from, to, ex_handler_fault)
diff --git a/arch/x86/include/asm/checksum.h b/arch/x86/include/asm/checksum.h
index 0ada98d5d09f..bca625a60186 100644
--- a/arch/x86/include/asm/checksum.h
+++ b/arch/x86/include/asm/checksum.h
@@ -1,6 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
#define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER 1
#define HAVE_CSUM_COPY_USER
+#define _HAVE_ARCH_CSUM_AND_COPY
#ifdef CONFIG_X86_32
# include <asm/checksum_32.h>
#else
diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
index 11624c8a9d8d..17da95387997 100644
--- a/arch/x86/include/asm/checksum_32.h
+++ b/arch/x86/include/asm/checksum_32.h
@@ -27,9 +27,7 @@ asmlinkage __wsum csum_partial(const void *buff, int len, __wsum sum);
* better 64-bit) boundary
*/
-asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
- int len, __wsum sum,
- int *src_err_ptr, int *dst_err_ptr);
+asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst, int len);
/*
* Note: when you get a NULL pointer exception here this means someone
@@ -38,26 +36,20 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
* If you use these functions directly please don't forget the
* access_ok().
*/
-static inline __wsum csum_partial_copy_nocheck(const void *src, void *dst,
- int len, __wsum sum)
+static inline __wsum csum_partial_copy_nocheck(const void *src, void *dst, int len)
{
- return csum_partial_copy_generic(src, dst, len, sum, NULL, NULL);
+ return csum_partial_copy_generic(src, dst, len);
}
static inline __wsum csum_and_copy_from_user(const void __user *src,
- void *dst, int len,
- __wsum sum, int *err_ptr)
+ void *dst, int len)
{
__wsum ret;
might_sleep();
- if (!user_access_begin(src, len)) {
- if (len)
- *err_ptr = -EFAULT;
- return sum;
- }
- ret = csum_partial_copy_generic((__force void *)src, dst,
- len, sum, err_ptr, NULL);
+ if (!user_access_begin(src, len))
+ return 0;
+ ret = csum_partial_copy_generic((__force void *)src, dst, len);
user_access_end();
return ret;
@@ -178,23 +170,17 @@ static inline __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
*/
static inline __wsum csum_and_copy_to_user(const void *src,
void __user *dst,
- int len, __wsum sum,
- int *err_ptr)
+ int len)
{
__wsum ret;
might_sleep();
- if (user_access_begin(dst, len)) {
- ret = csum_partial_copy_generic(src, (__force void *)dst,
- len, sum, NULL, err_ptr);
- user_access_end();
- return ret;
- }
+ if (!user_access_begin(dst, len))
+ return 0;
- if (len)
- *err_ptr = -EFAULT;
-
- return (__force __wsum)-1; /* invalid checksum */
+ ret = csum_partial_copy_generic(src, (__force void *)dst, len);
+ user_access_end();
+ return ret;
}
#endif /* _ASM_X86_CHECKSUM_32_H */
diff --git a/arch/x86/include/asm/checksum_64.h b/arch/x86/include/asm/checksum_64.h
index 0a289b87e872..407beebadaf4 100644
--- a/arch/x86/include/asm/checksum_64.h
+++ b/arch/x86/include/asm/checksum_64.h
@@ -130,17 +130,11 @@ static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
extern __wsum csum_partial(const void *buff, int len, __wsum sum);
/* Do not call this directly. Use the wrappers below */
-extern __visible __wsum csum_partial_copy_generic(const void *src, const void *dst,
- int len, __wsum sum,
- int *src_err_ptr, int *dst_err_ptr);
+extern __visible __wsum csum_partial_copy_generic(const void *src, void *dst, int len);
-
-extern __wsum csum_and_copy_from_user(const void __user *src, void *dst,
- int len, __wsum isum, int *errp);
-extern __wsum csum_and_copy_to_user(const void *src, void __user *dst,
- int len, __wsum isum, int *errp);
-extern __wsum csum_partial_copy_nocheck(const void *src, void *dst,
- int len, __wsum sum);
+extern __wsum csum_and_copy_from_user(const void __user *src, void *dst, int len);
+extern __wsum csum_and_copy_to_user(const void *src, void __user *dst, int len);
+extern __wsum csum_partial_copy_nocheck(const void *src, void *dst, int len);
/**
* ip_compute_csum - Compute an 16bit IP checksum.
diff --git a/arch/x86/include/asm/compat.h b/arch/x86/include/asm/compat.h
index d4edf281fff4..0e327a01f50f 100644
--- a/arch/x86/include/asm/compat.h
+++ b/arch/x86/include/asm/compat.h
@@ -27,8 +27,6 @@ typedef u16 compat_nlink_t;
typedef u16 compat_ipc_pid_t;
typedef u32 compat_caddr_t;
typedef __kernel_fsid_t compat_fsid_t;
-typedef s64 __attribute__((aligned(4))) compat_s64;
-typedef u64 __attribute__((aligned(4))) compat_u64;
struct compat_stat {
compat_dev_t st_dev;
@@ -211,6 +209,7 @@ static inline bool in_compat_syscall(void)
return in_32bit_syscall();
}
#define in_compat_syscall in_compat_syscall /* override the generic impl */
+#define compat_need_64bit_alignment_fixup in_ia32_syscall
#endif
struct compat_siginfo;
diff --git a/arch/x86/include/asm/copy_mc_test.h b/arch/x86/include/asm/copy_mc_test.h
new file mode 100644
index 000000000000..e4991ba96726
--- /dev/null
+++ b/arch/x86/include/asm/copy_mc_test.h
@@ -0,0 +1,75 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _COPY_MC_TEST_H_
+#define _COPY_MC_TEST_H_
+
+#ifndef __ASSEMBLY__
+#ifdef CONFIG_COPY_MC_TEST
+extern unsigned long copy_mc_test_src;
+extern unsigned long copy_mc_test_dst;
+
+static inline void copy_mc_inject_src(void *addr)
+{
+ if (addr)
+ copy_mc_test_src = (unsigned long) addr;
+ else
+ copy_mc_test_src = ~0UL;
+}
+
+static inline void copy_mc_inject_dst(void *addr)
+{
+ if (addr)
+ copy_mc_test_dst = (unsigned long) addr;
+ else
+ copy_mc_test_dst = ~0UL;
+}
+#else /* CONFIG_COPY_MC_TEST */
+static inline void copy_mc_inject_src(void *addr)
+{
+}
+
+static inline void copy_mc_inject_dst(void *addr)
+{
+}
+#endif /* CONFIG_COPY_MC_TEST */
+
+#else /* __ASSEMBLY__ */
+#include <asm/export.h>
+
+#ifdef CONFIG_COPY_MC_TEST
+.macro COPY_MC_TEST_CTL
+ .pushsection .data
+ .align 8
+ .globl copy_mc_test_src
+ copy_mc_test_src:
+ .quad 0
+ EXPORT_SYMBOL_GPL(copy_mc_test_src)
+ .globl copy_mc_test_dst
+ copy_mc_test_dst:
+ .quad 0
+ EXPORT_SYMBOL_GPL(copy_mc_test_dst)
+ .popsection
+.endm
+
+.macro COPY_MC_TEST_SRC reg count target
+ leaq \count(\reg), %r9
+ cmp copy_mc_test_src, %r9
+ ja \target
+.endm
+
+.macro COPY_MC_TEST_DST reg count target
+ leaq \count(\reg), %r9
+ cmp copy_mc_test_dst, %r9
+ ja \target
+.endm
+#else
+.macro COPY_MC_TEST_CTL
+.endm
+
+.macro COPY_MC_TEST_SRC reg count target
+.endm
+
+.macro COPY_MC_TEST_DST reg count target
+.endm
+#endif /* CONFIG_COPY_MC_TEST */
+#endif /* __ASSEMBLY__ */
+#endif /* _COPY_MC_TEST_H_ */
diff --git a/arch/x86/include/asm/cpu_entry_area.h b/arch/x86/include/asm/cpu_entry_area.h
index 8902fdb7de13..3d52b094850a 100644
--- a/arch/x86/include/asm/cpu_entry_area.h
+++ b/arch/x86/include/asm/cpu_entry_area.h
@@ -11,25 +11,29 @@
#ifdef CONFIG_X86_64
/* Macro to enforce the same ordering and stack sizes */
-#define ESTACKS_MEMBERS(guardsize) \
- char DF_stack_guard[guardsize]; \
- char DF_stack[EXCEPTION_STKSZ]; \
- char NMI_stack_guard[guardsize]; \
- char NMI_stack[EXCEPTION_STKSZ]; \
- char DB_stack_guard[guardsize]; \
- char DB_stack[EXCEPTION_STKSZ]; \
- char MCE_stack_guard[guardsize]; \
- char MCE_stack[EXCEPTION_STKSZ]; \
- char IST_top_guard[guardsize]; \
+#define ESTACKS_MEMBERS(guardsize, optional_stack_size) \
+ char DF_stack_guard[guardsize]; \
+ char DF_stack[EXCEPTION_STKSZ]; \
+ char NMI_stack_guard[guardsize]; \
+ char NMI_stack[EXCEPTION_STKSZ]; \
+ char DB_stack_guard[guardsize]; \
+ char DB_stack[EXCEPTION_STKSZ]; \
+ char MCE_stack_guard[guardsize]; \
+ char MCE_stack[EXCEPTION_STKSZ]; \
+ char VC_stack_guard[guardsize]; \
+ char VC_stack[optional_stack_size]; \
+ char VC2_stack_guard[guardsize]; \
+ char VC2_stack[optional_stack_size]; \
+ char IST_top_guard[guardsize]; \
/* The exception stacks' physical storage. No guard pages required */
struct exception_stacks {
- ESTACKS_MEMBERS(0)
+ ESTACKS_MEMBERS(0, 0)
};
/* The effective cpu entry area mapping with guard pages. */
struct cea_exception_stacks {
- ESTACKS_MEMBERS(PAGE_SIZE)
+ ESTACKS_MEMBERS(PAGE_SIZE, EXCEPTION_STKSZ)
};
/*
@@ -40,6 +44,8 @@ enum exception_stack_ordering {
ESTACK_NMI,
ESTACK_DB,
ESTACK_MCE,
+ ESTACK_VC,
+ ESTACK_VC2,
N_EXCEPTION_STACKS
};
@@ -139,4 +145,7 @@ static inline struct entry_stack *cpu_entry_stack(int cpu)
#define __this_cpu_ist_top_va(name) \
CEA_ESTACK_TOP(__this_cpu_read(cea_exception_stacks), name)
+#define __this_cpu_ist_bottom_va(name) \
+ CEA_ESTACK_BOT(__this_cpu_read(cea_exception_stacks), name)
+
#endif
diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
index 2901d5df4366..dad350d42ecf 100644
--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -96,7 +96,7 @@
#define X86_FEATURE_SYSCALL32 ( 3*32+14) /* "" syscall in IA32 userspace */
#define X86_FEATURE_SYSENTER32 ( 3*32+15) /* "" sysenter in IA32 userspace */
#define X86_FEATURE_REP_GOOD ( 3*32+16) /* REP microcode works well */
-/* free ( 3*32+17) */
+#define X86_FEATURE_SME_COHERENT ( 3*32+17) /* "" AMD hardware-enforced cache coherency */
#define X86_FEATURE_LFENCE_RDTSC ( 3*32+18) /* "" LFENCE synchronizes RDTSC */
#define X86_FEATURE_ACC_POWER ( 3*32+19) /* AMD Accumulated Power Mechanism */
#define X86_FEATURE_NOPL ( 3*32+20) /* The NOPL (0F 1F) instructions */
@@ -236,6 +236,7 @@
#define X86_FEATURE_EPT_AD ( 8*32+17) /* Intel Extended Page Table access-dirty bit */
#define X86_FEATURE_VMCALL ( 8*32+18) /* "" Hypervisor supports the VMCALL instruction */
#define X86_FEATURE_VMW_VMMCALL ( 8*32+19) /* "" VMware prefers VMMCALL hypercall instruction */
+#define X86_FEATURE_SEV_ES ( 8*32+20) /* AMD Secure Encrypted Virtualization - Encrypted State */
/* Intel-defined CPU features, CPUID level 0x00000007:0 (EBX), word 9 */
#define X86_FEATURE_FSGSBASE ( 9*32+ 0) /* RDFSBASE, WRFSBASE, RDGSBASE, WRGSBASE instructions*/
@@ -288,6 +289,7 @@
#define X86_FEATURE_FENCE_SWAPGS_USER (11*32+ 4) /* "" LFENCE in user entry SWAPGS path */
#define X86_FEATURE_FENCE_SWAPGS_KERNEL (11*32+ 5) /* "" LFENCE in kernel entry SWAPGS path */
#define X86_FEATURE_SPLIT_LOCK_DETECT (11*32+ 6) /* #AC for split lock */
+#define X86_FEATURE_PER_THREAD_MBA (11*32+ 7) /* "" Per-thread Memory Bandwidth Allocation */
/* Intel-defined CPU features, CPUID level 0x00000007:1 (EAX), word 12 */
#define X86_FEATURE_AVX512_BF16 (12*32+ 5) /* AVX512 BFLOAT16 instructions */
@@ -353,6 +355,7 @@
#define X86_FEATURE_CLDEMOTE (16*32+25) /* CLDEMOTE instruction */
#define X86_FEATURE_MOVDIRI (16*32+27) /* MOVDIRI instruction */
#define X86_FEATURE_MOVDIR64B (16*32+28) /* MOVDIR64B instruction */
+#define X86_FEATURE_ENQCMD (16*32+29) /* ENQCMD and ENQCMDS instructions */
/* AMD-defined CPU features, CPUID level 0x80000007 (EBX), word 17 */
#define X86_FEATURE_OVERFLOW_RECOV (17*32+ 0) /* MCA overflow recovery support */
@@ -368,6 +371,7 @@
#define X86_FEATURE_MD_CLEAR (18*32+10) /* VERW clears CPU buffers */
#define X86_FEATURE_TSX_FORCE_ABORT (18*32+13) /* "" TSX_FORCE_ABORT */
#define X86_FEATURE_SERIALIZE (18*32+14) /* SERIALIZE instruction */
+#define X86_FEATURE_TSXLDTRK (18*32+16) /* TSX Suspend Load Address Tracking */
#define X86_FEATURE_PCONFIG (18*32+18) /* Intel PCONFIG */
#define X86_FEATURE_ARCH_LBR (18*32+19) /* Intel ARCH LBR */
#define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */
diff --git a/arch/x86/include/asm/debugreg.h b/arch/x86/include/asm/debugreg.h
index e89558a3fe4a..cfdf307ddc01 100644
--- a/arch/x86/include/asm/debugreg.h
+++ b/arch/x86/include/asm/debugreg.h
@@ -90,8 +90,6 @@ static __always_inline bool hw_breakpoint_active(void)
return __this_cpu_read(cpu_dr7) & DR_GLOBAL_ENABLE_MASK;
}
-extern void aout_dump_debugregs(struct user *dump);
-
extern void hw_breakpoint_restore(void);
static __always_inline unsigned long local_db_save(void)
diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
index 1ced11d31932..476082a83d1c 100644
--- a/arch/x86/include/asm/desc.h
+++ b/arch/x86/include/asm/desc.h
@@ -383,6 +383,33 @@ static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
void alloc_intr_gate(unsigned int n, const void *addr);
+static inline void init_idt_data(struct idt_data *data, unsigned int n,
+ const void *addr)
+{
+ BUG_ON(n > 0xFF);
+
+ memset(data, 0, sizeof(*data));
+ data->vector = n;
+ data->addr = addr;
+ data->segment = __KERNEL_CS;
+ data->bits.type = GATE_INTERRUPT;
+ data->bits.p = 1;
+}
+
+static inline void idt_init_desc(gate_desc *gate, const struct idt_data *d)
+{
+ unsigned long addr = (unsigned long) d->addr;
+
+ gate->offset_low = (u16) addr;
+ gate->segment = (u16) d->segment;
+ gate->bits = d->bits;
+ gate->offset_middle = (u16) (addr >> 16);
+#ifdef CONFIG_X86_64
+ gate->offset_high = (u32) (addr >> 32);
+ gate->reserved = 0;
+#endif
+}
+
extern unsigned long system_vectors[];
extern void load_current_idt(void);
diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h
index a91f3b6e4f2a..f7e7099af595 100644
--- a/arch/x86/include/asm/desc_defs.h
+++ b/arch/x86/include/asm/desc_defs.h
@@ -74,6 +74,13 @@ struct idt_bits {
p : 1;
} __attribute__((packed));
+struct idt_data {
+ unsigned int vector;
+ unsigned int segment;
+ struct idt_bits bits;
+ const void *addr;
+};
+
struct gate_struct {
u16 offset_low;
u16 segment;
@@ -109,6 +116,9 @@ struct desc_ptr {
#endif /* !__ASSEMBLY__ */
+/* Boot IDT definitions */
+#define BOOT_IDT_ENTRIES 32
+
/* Access rights as returned by LAR */
#define AR_TYPE_RODATA (0 * (1 << 9))
#define AR_TYPE_RWDATA (1 * (1 << 9))
diff --git a/arch/x86/include/asm/disabled-features.h b/arch/x86/include/asm/disabled-features.h
index 4ea8584682f9..5861d34f9771 100644
--- a/arch/x86/include/asm/disabled-features.h
+++ b/arch/x86/include/asm/disabled-features.h
@@ -56,6 +56,12 @@
# define DISABLE_PTI (1 << (X86_FEATURE_PTI & 31))
#endif
+#ifdef CONFIG_IOMMU_SUPPORT
+# define DISABLE_ENQCMD 0
+#else
+# define DISABLE_ENQCMD (1 << (X86_FEATURE_ENQCMD & 31))
+#endif
+
/*
* Make sure to add features to the correct mask
*/
@@ -75,7 +81,8 @@
#define DISABLED_MASK13 0
#define DISABLED_MASK14 0
#define DISABLED_MASK15 0
-#define DISABLED_MASK16 (DISABLE_PKU|DISABLE_OSPKE|DISABLE_LA57|DISABLE_UMIP)
+#define DISABLED_MASK16 (DISABLE_PKU|DISABLE_OSPKE|DISABLE_LA57|DISABLE_UMIP| \
+ DISABLE_ENQCMD)
#define DISABLED_MASK17 0
#define DISABLED_MASK18 0
#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 19)
diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h
index b9c2667ac46c..bc9758ef292e 100644
--- a/arch/x86/include/asm/efi.h
+++ b/arch/x86/include/asm/efi.h
@@ -81,11 +81,8 @@ extern unsigned long efi_fw_vendor, efi_config_table;
kernel_fpu_end(); \
})
-
#define arch_efi_call_virt(p, f, args...) p->f(args)
-#define efi_ioremap(addr, size, type, attr) ioremap_cache(addr, size)
-
#else /* !CONFIG_X86_32 */
#define EFI_LOADER_SIGNATURE "EL64"
@@ -125,9 +122,6 @@ struct efi_scratch {
kernel_fpu_end(); \
})
-extern void __iomem *__init efi_ioremap(unsigned long addr, unsigned long size,
- u32 type, u64 attribute);
-
#ifdef CONFIG_KASAN
/*
* CONFIG_KASAN may redefine memset to __memset. __memset function is present
@@ -143,17 +137,13 @@ extern void __iomem *__init efi_ioremap(unsigned long addr, unsigned long size,
#endif /* CONFIG_X86_32 */
extern struct efi_scratch efi_scratch;
-extern void __init efi_set_executable(efi_memory_desc_t *md, bool executable);
extern int __init efi_memblock_x86_reserve_range(void);
extern void __init efi_print_memmap(void);
-extern void __init efi_memory_uc(u64 addr, unsigned long size);
extern void __init efi_map_region(efi_memory_desc_t *md);
extern void __init efi_map_region_fixed(efi_memory_desc_t *md);
extern void efi_sync_low_kernel_mappings(void);
extern int __init efi_alloc_page_tables(void);
extern int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages);
-extern void __init old_map_region(efi_memory_desc_t *md);
-extern void __init runtime_code_page_mkexec(void);
extern void __init efi_runtime_update_mappings(void);
extern void __init efi_dump_pagetable(void);
extern void __init efi_apply_memmap_quirks(void);
diff --git a/arch/x86/include/asm/entry-common.h b/arch/x86/include/asm/entry-common.h
index a8f9315b9eae..6fe54b2813c1 100644
--- a/arch/x86/include/asm/entry-common.h
+++ b/arch/x86/include/asm/entry-common.h
@@ -18,8 +18,16 @@ static __always_inline void arch_check_user_regs(struct pt_regs *regs)
* state, not the interrupt state as imagined by Xen.
*/
unsigned long flags = native_save_fl();
- WARN_ON_ONCE(flags & (X86_EFLAGS_AC | X86_EFLAGS_DF |
- X86_EFLAGS_NT));
+ unsigned long mask = X86_EFLAGS_DF | X86_EFLAGS_NT;
+
+ /*
+ * For !SMAP hardware we patch out CLAC on entry.
+ */
+ if (boot_cpu_has(X86_FEATURE_SMAP) ||
+ (IS_ENABLED(CONFIG_64_BIT) && boot_cpu_has(X86_FEATURE_XENPV)))
+ mask |= X86_EFLAGS_AC;
+
+ WARN_ON_ONCE(flags & mask);
/* We think we came from user mode. Make sure pt_regs agrees. */
WARN_ON_ONCE(!user_mode(regs));
diff --git a/arch/x86/include/asm/extable.h b/arch/x86/include/asm/extable.h
index d8c2198d543b..1f0cbc52937c 100644
--- a/arch/x86/include/asm/extable.h
+++ b/arch/x86/include/asm/extable.h
@@ -29,10 +29,17 @@ struct pt_regs;
(b)->handler = (tmp).handler - (delta); \
} while (0)
+enum handler_type {
+ EX_HANDLER_NONE,
+ EX_HANDLER_FAULT,
+ EX_HANDLER_UACCESS,
+ EX_HANDLER_OTHER
+};
+
extern int fixup_exception(struct pt_regs *regs, int trapnr,
unsigned long error_code, unsigned long fault_addr);
extern int fixup_bug(struct pt_regs *regs, int trapnr);
-extern bool ex_has_fault_handler(unsigned long ip);
+extern enum handler_type ex_get_fault_handler_type(unsigned long ip);
extern void early_fixup_exception(struct pt_regs *regs, int trapnr);
#endif
diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h
index 0f0dd645b594..77217bd292bd 100644
--- a/arch/x86/include/asm/fixmap.h
+++ b/arch/x86/include/asm/fixmap.h
@@ -99,7 +99,7 @@ enum fixed_addresses {
FIX_PCIE_MCFG,
#endif
#endif
-#ifdef CONFIG_PARAVIRT
+#ifdef CONFIG_PARAVIRT_XXL
FIX_PARAVIRT_BOOTMAP,
#endif
#ifdef CONFIG_X86_INTEL_MID
diff --git a/arch/x86/include/asm/fpu/api.h b/arch/x86/include/asm/fpu/api.h
index b774c52e5411..dcd9503b1098 100644
--- a/arch/x86/include/asm/fpu/api.h
+++ b/arch/x86/include/asm/fpu/api.h
@@ -62,4 +62,16 @@ extern void switch_fpu_return(void);
*/
extern int cpu_has_xfeatures(u64 xfeatures_mask, const char **feature_name);
+/*
+ * Tasks that are not using SVA have mm->pasid set to zero to note that they
+ * will not have the valid bit set in MSR_IA32_PASID while they are running.
+ */
+#define PASID_DISABLED 0
+
+#ifdef CONFIG_IOMMU_SUPPORT
+/* Update current's PASID MSR/state by mm's PASID. */
+void update_pasid(void);
+#else
+static inline void update_pasid(void) { }
+#endif
#endif /* _ASM_X86_FPU_API_H */
diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h
index 0a460f2a3f90..8d33ad80704f 100644
--- a/arch/x86/include/asm/fpu/internal.h
+++ b/arch/x86/include/asm/fpu/internal.h
@@ -19,6 +19,7 @@
#include <asm/user.h>
#include <asm/fpu/api.h>
#include <asm/fpu/xstate.h>
+#include <asm/fpu/xcr.h>
#include <asm/cpufeature.h>
#include <asm/trace/fpu.h>
@@ -583,38 +584,13 @@ static inline void switch_fpu_finish(struct fpu *new_fpu)
pkru_val = pk->pkru;
}
__write_pkru(pkru_val);
-}
-
-/*
- * MXCSR and XCR definitions:
- */
-
-static inline void ldmxcsr(u32 mxcsr)
-{
- asm volatile("ldmxcsr %0" :: "m" (mxcsr));
-}
-
-extern unsigned int mxcsr_feature_mask;
-
-#define XCR_XFEATURE_ENABLED_MASK 0x00000000
-static inline u64 xgetbv(u32 index)
-{
- u32 eax, edx;
-
- asm volatile(".byte 0x0f,0x01,0xd0" /* xgetbv */
- : "=a" (eax), "=d" (edx)
- : "c" (index));
- return eax + ((u64)edx << 32);
-}
-
-static inline void xsetbv(u32 index, u64 value)
-{
- u32 eax = value;
- u32 edx = value >> 32;
-
- asm volatile(".byte 0x0f,0x01,0xd1" /* xsetbv */
- : : "a" (eax), "d" (edx), "c" (index));
+ /*
+ * Expensive PASID MSR write will be avoided in update_pasid() because
+ * TIF_NEED_FPU_LOAD was set. And the PASID state won't be updated
+ * unless it's different from mm->pasid to reduce overhead.
+ */
+ update_pasid();
}
#endif /* _ASM_X86_FPU_INTERNAL_H */
diff --git a/arch/x86/include/asm/fpu/types.h b/arch/x86/include/asm/fpu/types.h
index c87364ea6446..f5a38a5f3ae1 100644
--- a/arch/x86/include/asm/fpu/types.h
+++ b/arch/x86/include/asm/fpu/types.h
@@ -114,7 +114,7 @@ enum xfeature {
XFEATURE_Hi16_ZMM,
XFEATURE_PT_UNIMPLEMENTED_SO_FAR,
XFEATURE_PKRU,
- XFEATURE_RSRVD_COMP_10,
+ XFEATURE_PASID,
XFEATURE_RSRVD_COMP_11,
XFEATURE_RSRVD_COMP_12,
XFEATURE_RSRVD_COMP_13,
@@ -134,6 +134,7 @@ enum xfeature {
#define XFEATURE_MASK_Hi16_ZMM (1 << XFEATURE_Hi16_ZMM)
#define XFEATURE_MASK_PT (1 << XFEATURE_PT_UNIMPLEMENTED_SO_FAR)
#define XFEATURE_MASK_PKRU (1 << XFEATURE_PKRU)
+#define XFEATURE_MASK_PASID (1 << XFEATURE_PASID)
#define XFEATURE_MASK_LBR (1 << XFEATURE_LBR)
#define XFEATURE_MASK_FPSSE (XFEATURE_MASK_FP | XFEATURE_MASK_SSE)
@@ -256,6 +257,14 @@ struct arch_lbr_state {
struct lbr_entry entries[];
} __packed;
+/*
+ * State component 10 is supervisor state used for context-switching the
+ * PASID state.
+ */
+struct ia32_pasid_state {
+ u64 pasid;
+} __packed;
+
struct xstate_header {
u64 xfeatures;
u64 xcomp_bv;
diff --git a/arch/x86/include/asm/fpu/xcr.h b/arch/x86/include/asm/fpu/xcr.h
new file mode 100644
index 000000000000..1c7ab8d95da5
--- /dev/null
+++ b/arch/x86/include/asm/fpu/xcr.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_X86_FPU_XCR_H
+#define _ASM_X86_FPU_XCR_H
+
+/*
+ * MXCSR and XCR definitions:
+ */
+
+static inline void ldmxcsr(u32 mxcsr)
+{
+ asm volatile("ldmxcsr %0" :: "m" (mxcsr));
+}
+
+extern unsigned int mxcsr_feature_mask;
+
+#define XCR_XFEATURE_ENABLED_MASK 0x00000000
+
+static inline u64 xgetbv(u32 index)
+{
+ u32 eax, edx;
+
+ asm volatile("xgetbv" : "=a" (eax), "=d" (edx) : "c" (index));
+ return eax + ((u64)edx << 32);
+}
+
+static inline void xsetbv(u32 index, u64 value)
+{
+ u32 eax = value;
+ u32 edx = value >> 32;
+
+ asm volatile("xsetbv" :: "a" (eax), "d" (edx), "c" (index));
+}
+
+#endif /* _ASM_X86_FPU_XCR_H */
diff --git a/arch/x86/include/asm/fpu/xstate.h b/arch/x86/include/asm/fpu/xstate.h
index 14ab815132d4..47a92232d595 100644
--- a/arch/x86/include/asm/fpu/xstate.h
+++ b/arch/x86/include/asm/fpu/xstate.h
@@ -35,7 +35,7 @@
XFEATURE_MASK_BNDCSR)
/* All currently supported supervisor features */
-#define XFEATURE_MASK_SUPERVISOR_SUPPORTED (0)
+#define XFEATURE_MASK_SUPERVISOR_SUPPORTED (XFEATURE_MASK_PASID)
/*
* A supervisor state component may not always contain valuable information,
diff --git a/arch/x86/include/asm/frame.h b/arch/x86/include/asm/frame.h
index 296b346184b2..fb42659f6e98 100644
--- a/arch/x86/include/asm/frame.h
+++ b/arch/x86/include/asm/frame.h
@@ -60,12 +60,26 @@
#define FRAME_END "pop %" _ASM_BP "\n"
#ifdef CONFIG_X86_64
+
#define ENCODE_FRAME_POINTER \
"lea 1(%rsp), %rbp\n\t"
+
+static inline unsigned long encode_frame_pointer(struct pt_regs *regs)
+{
+ return (unsigned long)regs + 1;
+}
+
#else /* !CONFIG_X86_64 */
+
#define ENCODE_FRAME_POINTER \
"movl %esp, %ebp\n\t" \
"andl $0x7fffffff, %ebp\n\t"
+
+static inline unsigned long encode_frame_pointer(struct pt_regs *regs)
+{
+ return (unsigned long)regs & 0x7fffffff;
+}
+
#endif /* CONFIG_X86_64 */
#endif /* __ASSEMBLY__ */
@@ -83,6 +97,11 @@
#define ENCODE_FRAME_POINTER
+static inline unsigned long encode_frame_pointer(struct pt_regs *regs)
+{
+ return 0;
+}
+
#endif
#define FRAME_BEGIN
diff --git a/arch/x86/include/asm/fsgsbase.h b/arch/x86/include/asm/fsgsbase.h
index d552646411a9..35cff5f2becf 100644
--- a/arch/x86/include/asm/fsgsbase.h
+++ b/arch/x86/include/asm/fsgsbase.h
@@ -57,7 +57,7 @@ static inline unsigned long x86_fsbase_read_cpu(void)
{
unsigned long fsbase;
- if (static_cpu_has(X86_FEATURE_FSGSBASE))
+ if (boot_cpu_has(X86_FEATURE_FSGSBASE))
fsbase = rdfsbase();
else
rdmsrl(MSR_FS_BASE, fsbase);
@@ -67,7 +67,7 @@ static inline unsigned long x86_fsbase_read_cpu(void)
static inline void x86_fsbase_write_cpu(unsigned long fsbase)
{
- if (static_cpu_has(X86_FEATURE_FSGSBASE))
+ if (boot_cpu_has(X86_FEATURE_FSGSBASE))
wrfsbase(fsbase);
else
wrmsrl(MSR_FS_BASE, fsbase);
diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
index 74c12437401e..a4aeeaace040 100644
--- a/arch/x86/include/asm/hw_irq.h
+++ b/arch/x86/include/asm/hw_irq.h
@@ -36,61 +36,56 @@ struct msi_desc;
enum irq_alloc_type {
X86_IRQ_ALLOC_TYPE_IOAPIC = 1,
X86_IRQ_ALLOC_TYPE_HPET,
- X86_IRQ_ALLOC_TYPE_MSI,
- X86_IRQ_ALLOC_TYPE_MSIX,
+ X86_IRQ_ALLOC_TYPE_PCI_MSI,
+ X86_IRQ_ALLOC_TYPE_PCI_MSIX,
X86_IRQ_ALLOC_TYPE_DMAR,
X86_IRQ_ALLOC_TYPE_UV,
+ X86_IRQ_ALLOC_TYPE_IOAPIC_GET_PARENT,
+ X86_IRQ_ALLOC_TYPE_HPET_GET_PARENT,
};
+struct ioapic_alloc_info {
+ int pin;
+ int node;
+ u32 trigger : 1;
+ u32 polarity : 1;
+ u32 valid : 1;
+ struct IO_APIC_route_entry *entry;
+};
+
+struct uv_alloc_info {
+ int limit;
+ int blade;
+ unsigned long offset;
+ char *name;
+
+};
+
+/**
+ * irq_alloc_info - X86 specific interrupt allocation info
+ * @type: X86 specific allocation type
+ * @flags: Flags for allocation tweaks
+ * @devid: Device ID for allocations
+ * @hwirq: Associated hw interrupt number in the domain
+ * @mask: CPU mask for vector allocation
+ * @desc: Pointer to msi descriptor
+ * @data: Allocation specific data
+ *
+ * @ioapic: IOAPIC specific allocation data
+ * @uv: UV specific allocation data
+*/
struct irq_alloc_info {
enum irq_alloc_type type;
u32 flags;
- const struct cpumask *mask; /* CPU mask for vector allocation */
+ u32 devid;
+ irq_hw_number_t hwirq;
+ const struct cpumask *mask;
+ struct msi_desc *desc;
+ void *data;
+
union {
- int unused;
-#ifdef CONFIG_HPET_TIMER
- struct {
- int hpet_id;
- int hpet_index;
- void *hpet_data;
- };
-#endif
-#ifdef CONFIG_PCI_MSI
- struct {
- struct pci_dev *msi_dev;
- irq_hw_number_t msi_hwirq;
- };
-#endif
-#ifdef CONFIG_X86_IO_APIC
- struct {
- int ioapic_id;
- int ioapic_pin;
- int ioapic_node;
- u32 ioapic_trigger : 1;
- u32 ioapic_polarity : 1;
- u32 ioapic_valid : 1;
- struct IO_APIC_route_entry *ioapic_entry;
- };
-#endif
-#ifdef CONFIG_DMAR_TABLE
- struct {
- int dmar_id;
- void *dmar_data;
- };
-#endif
-#ifdef CONFIG_X86_UV
- struct {
- int uv_limit;
- int uv_blade;
- unsigned long uv_offset;
- char *uv_name;
- };
-#endif
-#if IS_ENABLED(CONFIG_VMD)
- struct {
- struct msi_desc *desc;
- };
-#endif
+ struct ioapic_alloc_info ioapic;
+ struct uv_alloc_info uv;
};
};
diff --git a/arch/x86/include/asm/hyperv-tlfs.h b/arch/x86/include/asm/hyperv-tlfs.h
index 7a4d2062385c..0ed20e8bba9e 100644
--- a/arch/x86/include/asm/hyperv-tlfs.h
+++ b/arch/x86/include/asm/hyperv-tlfs.h
@@ -28,39 +28,6 @@
#define HYPERV_CPUID_MAX 0x4000ffff
/*
- * Aliases for Group A features that have X64 in the name.
- * On x86/x64 these are HYPERV_CPUID_FEATURES.EAX bits.
- */
-
-#define HV_X64_MSR_VP_RUNTIME_AVAILABLE \
- HV_MSR_VP_RUNTIME_AVAILABLE
-#define HV_X64_MSR_SYNIC_AVAILABLE \
- HV_MSR_SYNIC_AVAILABLE
-#define HV_X64_MSR_APIC_ACCESS_AVAILABLE \
- HV_MSR_APIC_ACCESS_AVAILABLE
-#define HV_X64_MSR_HYPERCALL_AVAILABLE \
- HV_MSR_HYPERCALL_AVAILABLE
-#define HV_X64_MSR_VP_INDEX_AVAILABLE \
- HV_MSR_VP_INDEX_AVAILABLE
-#define HV_X64_MSR_RESET_AVAILABLE \
- HV_MSR_RESET_AVAILABLE
-#define HV_X64_MSR_GUEST_IDLE_AVAILABLE \
- HV_MSR_GUEST_IDLE_AVAILABLE
-#define HV_X64_ACCESS_FREQUENCY_MSRS \
- HV_ACCESS_FREQUENCY_MSRS
-#define HV_X64_ACCESS_REENLIGHTENMENT \
- HV_ACCESS_REENLIGHTENMENT
-#define HV_X64_ACCESS_TSC_INVARIANT \
- HV_ACCESS_TSC_INVARIANT
-
-/*
- * Aliases for Group B features that have X64 in the name.
- * On x86/x64 these are HYPERV_CPUID_FEATURES.EBX bits.
- */
-#define HV_X64_POST_MESSAGES HV_POST_MESSAGES
-#define HV_X64_SIGNAL_EVENTS HV_SIGNAL_EVENTS
-
-/*
* Group D Features. The bit assignments are custom to each architecture.
* On x86/x64 these are HYPERV_CPUID_FEATURES.EDX bits.
*/
diff --git a/arch/x86/include/asm/idtentry.h b/arch/x86/include/asm/idtentry.h
index a43366191212..b2442eb0ac2f 100644
--- a/arch/x86/include/asm/idtentry.h
+++ b/arch/x86/include/asm/idtentry.h
@@ -242,7 +242,7 @@ __visible noinstr void func(struct pt_regs *regs) \
instrumentation_begin(); \
irq_enter_rcu(); \
kvm_set_cpu_l1tf_flush_l1d(); \
- run_on_irqstack_cond(__##func, regs, regs); \
+ run_sysvec_on_irqstack_cond(__##func, regs); \
irq_exit_rcu(); \
instrumentation_end(); \
irqentry_exit(regs, state); \
@@ -309,6 +309,19 @@ static __always_inline void __##func(struct pt_regs *regs)
__visible void noist_##func(struct pt_regs *regs)
/**
+ * DECLARE_IDTENTRY_VC - Declare functions for the VC entry point
+ * @vector: Vector number (ignored for C)
+ * @func: Function name of the entry point
+ *
+ * Maps to DECLARE_IDTENTRY_RAW_ERRORCODE, but declares also the
+ * safe_stack C handler.
+ */
+#define DECLARE_IDTENTRY_VC(vector, func) \
+ DECLARE_IDTENTRY_RAW_ERRORCODE(vector, func); \
+ __visible noinstr void ist_##func(struct pt_regs *regs, unsigned long error_code); \
+ __visible noinstr void safe_stack_##func(struct pt_regs *regs, unsigned long error_code)
+
+/**
* DEFINE_IDTENTRY_IST - Emit code for IST entry points
* @func: Function name of the entry point
*
@@ -347,6 +360,35 @@ static __always_inline void __##func(struct pt_regs *regs)
#define DEFINE_IDTENTRY_DF(func) \
DEFINE_IDTENTRY_RAW_ERRORCODE(func)
+/**
+ * DEFINE_IDTENTRY_VC_SAFE_STACK - Emit code for VMM communication handler
+ which runs on a safe stack.
+ * @func: Function name of the entry point
+ *
+ * Maps to DEFINE_IDTENTRY_RAW_ERRORCODE
+ */
+#define DEFINE_IDTENTRY_VC_SAFE_STACK(func) \
+ DEFINE_IDTENTRY_RAW_ERRORCODE(safe_stack_##func)
+
+/**
+ * DEFINE_IDTENTRY_VC_IST - Emit code for VMM communication handler
+ which runs on the VC fall-back stack
+ * @func: Function name of the entry point
+ *
+ * Maps to DEFINE_IDTENTRY_RAW_ERRORCODE
+ */
+#define DEFINE_IDTENTRY_VC_IST(func) \
+ DEFINE_IDTENTRY_RAW_ERRORCODE(ist_##func)
+
+/**
+ * DEFINE_IDTENTRY_VC - Emit code for VMM communication handler
+ * @func: Function name of the entry point
+ *
+ * Maps to DEFINE_IDTENTRY_RAW_ERRORCODE
+ */
+#define DEFINE_IDTENTRY_VC(func) \
+ DEFINE_IDTENTRY_RAW_ERRORCODE(func)
+
#else /* CONFIG_X86_64 */
/**
@@ -433,6 +475,9 @@ __visible noinstr void func(struct pt_regs *regs, \
# define DECLARE_IDTENTRY_XENCB(vector, func) \
DECLARE_IDTENTRY(vector, func)
+# define DECLARE_IDTENTRY_VC(vector, func) \
+ idtentry_vc vector asm_##func func
+
#else
# define DECLARE_IDTENTRY_MCE(vector, func) \
DECLARE_IDTENTRY(vector, func)
@@ -547,7 +592,7 @@ DECLARE_IDTENTRY_RAW(X86_TRAP_MC, exc_machine_check);
/* NMI */
DECLARE_IDTENTRY_NMI(X86_TRAP_NMI, exc_nmi);
-#if defined(CONFIG_XEN_PV) && defined(CONFIG_X86_64)
+#ifdef CONFIG_XEN_PV
DECLARE_IDTENTRY_RAW(X86_TRAP_NMI, xenpv_exc_nmi);
#endif
@@ -557,13 +602,18 @@ DECLARE_IDTENTRY_DEBUG(X86_TRAP_DB, exc_debug);
#else
DECLARE_IDTENTRY_RAW(X86_TRAP_DB, exc_debug);
#endif
-#if defined(CONFIG_XEN_PV) && defined(CONFIG_X86_64)
+#ifdef CONFIG_XEN_PV
DECLARE_IDTENTRY_RAW(X86_TRAP_DB, xenpv_exc_debug);
#endif
/* #DF */
DECLARE_IDTENTRY_DF(X86_TRAP_DF, exc_double_fault);
+/* #VC */
+#ifdef CONFIG_AMD_MEM_ENCRYPT
+DECLARE_IDTENTRY_VC(X86_TRAP_VC, exc_vmm_communication);
+#endif
+
#ifdef CONFIG_XEN_PV
DECLARE_IDTENTRY_XENCB(X86_TRAP_OTHER, exc_xen_hypervisor_callback);
#endif
@@ -591,10 +641,6 @@ DECLARE_IDTENTRY_SYSVEC(CALL_FUNCTION_VECTOR, sysvec_call_function);
#endif
#ifdef CONFIG_X86_LOCAL_APIC
-# ifdef CONFIG_X86_UV
-DECLARE_IDTENTRY_SYSVEC(UV_BAU_MESSAGE, sysvec_uv_bau_message);
-# endif
-
# ifdef CONFIG_X86_MCE_THRESHOLD
DECLARE_IDTENTRY_SYSVEC(THRESHOLD_APIC_VECTOR, sysvec_threshold);
# endif
diff --git a/arch/x86/include/asm/insn-eval.h b/arch/x86/include/asm/insn-eval.h
index 2b6ccf2c49f1..a0f839aa144d 100644
--- a/arch/x86/include/asm/insn-eval.h
+++ b/arch/x86/include/asm/insn-eval.h
@@ -15,9 +15,15 @@
#define INSN_CODE_SEG_OPND_SZ(params) (params & 0xf)
#define INSN_CODE_SEG_PARAMS(oper_sz, addr_sz) (oper_sz | (addr_sz << 4))
+bool insn_has_rep_prefix(struct insn *insn);
void __user *insn_get_addr_ref(struct insn *insn, struct pt_regs *regs);
int insn_get_modrm_rm_off(struct insn *insn, struct pt_regs *regs);
+int insn_get_modrm_reg_off(struct insn *insn, struct pt_regs *regs);
unsigned long insn_get_seg_base(struct pt_regs *regs, int seg_reg_idx);
int insn_get_code_seg_params(struct pt_regs *regs);
+int insn_fetch_from_user(struct pt_regs *regs,
+ unsigned char buf[MAX_INSN_SIZE]);
+bool insn_decode(struct insn *insn, struct pt_regs *regs,
+ unsigned char buf[MAX_INSN_SIZE], int buf_size);
#endif /* _ASM_X86_INSN_EVAL_H */
diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
index e1aa17a468a8..d726459d08e5 100644
--- a/arch/x86/include/asm/io.h
+++ b/arch/x86/include/asm/io.h
@@ -401,7 +401,7 @@ extern bool phys_mem_access_encrypted(unsigned long phys_addr,
/**
* iosubmit_cmds512 - copy data to single MMIO location, in 512-bit units
- * @__dst: destination, in MMIO space (must be 512-bit aligned)
+ * @dst: destination, in MMIO space (must be 512-bit aligned)
* @src: source
* @count: number of 512 bits quantities to submit
*
@@ -412,25 +412,14 @@ extern bool phys_mem_access_encrypted(unsigned long phys_addr,
* Warning: Do not use this helper unless your driver has checked that the CPU
* instruction is supported on the platform.
*/
-static inline void iosubmit_cmds512(void __iomem *__dst, const void *src,
+static inline void iosubmit_cmds512(void __iomem *dst, const void *src,
size_t count)
{
- /*
- * Note that this isn't an "on-stack copy", just definition of "dst"
- * as a pointer to 64-bytes of stuff that is going to be overwritten.
- * In the MOVDIR64B case that may be needed as you can use the
- * MOVDIR64B instruction to copy arbitrary memory around. This trick
- * lets the compiler know how much gets clobbered.
- */
- volatile struct { char _[64]; } *dst = __dst;
const u8 *from = src;
const u8 *end = from + count * 64;
while (from < end) {
- /* MOVDIR64B [rdx], rax */
- asm volatile(".byte 0x66, 0x0f, 0x38, 0xf8, 0x02"
- : "=m" (dst)
- : "d" (from), "a" (dst));
+ movdir64b(dst, from);
from += 64;
}
}
diff --git a/arch/x86/include/asm/irq_remapping.h b/arch/x86/include/asm/irq_remapping.h
index 4bc985f1e2e4..af4a151d70b3 100644
--- a/arch/x86/include/asm/irq_remapping.h
+++ b/arch/x86/include/asm/irq_remapping.h
@@ -45,8 +45,6 @@ extern int irq_remap_enable_fault_handling(void);
extern void panic_if_irq_remap(const char *msg);
extern struct irq_domain *
-irq_remapping_get_ir_irq_domain(struct irq_alloc_info *info);
-extern struct irq_domain *
irq_remapping_get_irq_domain(struct irq_alloc_info *info);
/* Create PCI MSI/MSIx irqdomain, use @parent as the parent irqdomain. */
@@ -74,12 +72,6 @@ static inline void panic_if_irq_remap(const char *msg)
}
static inline struct irq_domain *
-irq_remapping_get_ir_irq_domain(struct irq_alloc_info *info)
-{
- return NULL;
-}
-
-static inline struct irq_domain *
irq_remapping_get_irq_domain(struct irq_alloc_info *info)
{
return NULL;
diff --git a/arch/x86/include/asm/irq_stack.h b/arch/x86/include/asm/irq_stack.h
index 4ae66f097101..775816965c6a 100644
--- a/arch/x86/include/asm/irq_stack.h
+++ b/arch/x86/include/asm/irq_stack.h
@@ -12,20 +12,50 @@ static __always_inline bool irqstack_active(void)
return __this_cpu_read(irq_count) != -1;
}
-void asm_call_on_stack(void *sp, void *func, void *arg);
+void asm_call_on_stack(void *sp, void (*func)(void), void *arg);
+void asm_call_sysvec_on_stack(void *sp, void (*func)(struct pt_regs *regs),
+ struct pt_regs *regs);
+void asm_call_irq_on_stack(void *sp, void (*func)(struct irq_desc *desc),
+ struct irq_desc *desc);
-static __always_inline void __run_on_irqstack(void *func, void *arg)
+static __always_inline void __run_on_irqstack(void (*func)(void))
{
void *tos = __this_cpu_read(hardirq_stack_ptr);
__this_cpu_add(irq_count, 1);
- asm_call_on_stack(tos - 8, func, arg);
+ asm_call_on_stack(tos - 8, func, NULL);
+ __this_cpu_sub(irq_count, 1);
+}
+
+static __always_inline void
+__run_sysvec_on_irqstack(void (*func)(struct pt_regs *regs),
+ struct pt_regs *regs)
+{
+ void *tos = __this_cpu_read(hardirq_stack_ptr);
+
+ __this_cpu_add(irq_count, 1);
+ asm_call_sysvec_on_stack(tos - 8, func, regs);
+ __this_cpu_sub(irq_count, 1);
+}
+
+static __always_inline void
+__run_irq_on_irqstack(void (*func)(struct irq_desc *desc),
+ struct irq_desc *desc)
+{
+ void *tos = __this_cpu_read(hardirq_stack_ptr);
+
+ __this_cpu_add(irq_count, 1);
+ asm_call_irq_on_stack(tos - 8, func, desc);
__this_cpu_sub(irq_count, 1);
}
#else /* CONFIG_X86_64 */
static inline bool irqstack_active(void) { return false; }
-static inline void __run_on_irqstack(void *func, void *arg) { }
+static inline void __run_on_irqstack(void (*func)(void)) { }
+static inline void __run_sysvec_on_irqstack(void (*func)(struct pt_regs *regs),
+ struct pt_regs *regs) { }
+static inline void __run_irq_on_irqstack(void (*func)(struct irq_desc *desc),
+ struct irq_desc *desc) { }
#endif /* !CONFIG_X86_64 */
static __always_inline bool irq_needs_irq_stack(struct pt_regs *regs)
@@ -37,17 +67,40 @@ static __always_inline bool irq_needs_irq_stack(struct pt_regs *regs)
return !user_mode(regs) && !irqstack_active();
}
-static __always_inline void run_on_irqstack_cond(void *func, void *arg,
+
+static __always_inline void run_on_irqstack_cond(void (*func)(void),
struct pt_regs *regs)
{
- void (*__func)(void *arg) = func;
+ lockdep_assert_irqs_disabled();
+
+ if (irq_needs_irq_stack(regs))
+ __run_on_irqstack(func);
+ else
+ func();
+}
+
+static __always_inline void
+run_sysvec_on_irqstack_cond(void (*func)(struct pt_regs *regs),
+ struct pt_regs *regs)
+{
+ lockdep_assert_irqs_disabled();
+ if (irq_needs_irq_stack(regs))
+ __run_sysvec_on_irqstack(func, regs);
+ else
+ func(regs);
+}
+
+static __always_inline void
+run_irq_on_irqstack_cond(void (*func)(struct irq_desc *desc), struct irq_desc *desc,
+ struct pt_regs *regs)
+{
lockdep_assert_irqs_disabled();
if (irq_needs_irq_stack(regs))
- __run_on_irqstack(__func, arg);
+ __run_irq_on_irqstack(func, desc);
else
- __func(arg);
+ func(desc);
}
#endif
diff --git a/arch/x86/include/asm/irqdomain.h b/arch/x86/include/asm/irqdomain.h
index c066ffae222b..cd684d45cb5f 100644
--- a/arch/x86/include/asm/irqdomain.h
+++ b/arch/x86/include/asm/irqdomain.h
@@ -51,9 +51,13 @@ extern int mp_irqdomain_ioapic_idx(struct irq_domain *domain);
#endif /* CONFIG_X86_IO_APIC */
#ifdef CONFIG_PCI_MSI
-extern void arch_init_msi_domain(struct irq_domain *domain);
+void x86_create_pci_msi_domain(void);
+struct irq_domain *native_create_pci_msi_domain(void);
+extern struct irq_domain *x86_pci_msi_default_domain;
#else
-static inline void arch_init_msi_domain(struct irq_domain *domain) { }
+static inline void x86_create_pci_msi_domain(void) { }
+#define native_create_pci_msi_domain NULL
+#define x86_pci_msi_default_domain NULL
#endif
#endif
diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
index 143bc9abe99c..991a7ad540c7 100644
--- a/arch/x86/include/asm/kprobes.h
+++ b/arch/x86/include/asm/kprobes.h
@@ -106,5 +106,9 @@ extern int kprobe_exceptions_notify(struct notifier_block *self,
extern int kprobe_int3_handler(struct pt_regs *regs);
extern int kprobe_debug_handler(struct pt_regs *regs);
+#else
+
+static inline int kprobe_debug_handler(struct pt_regs *regs) { return 0; }
+
#endif /* CONFIG_KPROBES */
#endif /* _ASM_X86_KPROBES_H */
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 5ab3af7275d8..5303dbc5c9bc 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1596,7 +1596,8 @@ asmlinkage void kvm_spurious_fault(void);
_ASM_EXTABLE(666b, 667b)
#define KVM_ARCH_WANT_MMU_NOTIFIER
-int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end);
+int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end,
+ unsigned flags);
int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h
index cf503824529c..a0f147893a04 100644
--- a/arch/x86/include/asm/mce.h
+++ b/arch/x86/include/asm/mce.h
@@ -136,9 +136,24 @@
#define MCE_HANDLED_NFIT BIT_ULL(3)
#define MCE_HANDLED_EDAC BIT_ULL(4)
#define MCE_HANDLED_MCELOG BIT_ULL(5)
+
+/*
+ * Indicates an MCE which has happened in kernel space but from
+ * which the kernel can recover simply by executing fixup_exception()
+ * so that an error is returned to the caller of the function that
+ * hit the machine check.
+ */
#define MCE_IN_KERNEL_RECOV BIT_ULL(6)
/*
+ * Indicates an MCE that happened in kernel space while copying data
+ * from user. In this case fixup_exception() gets the kernel to the
+ * error exit for the copy function. Machine check handler can then
+ * treat it like a fault taken in user mode.
+ */
+#define MCE_IN_KERNEL_COPYIN BIT_ULL(7)
+
+/*
* This structure contains all data related to the MCE log. Also
* carries a signature to make it easier to find from external
* debugging tools. Each entry is only valid when its finished flag
@@ -174,6 +189,15 @@ extern void mce_unregister_decode_chain(struct notifier_block *nb);
extern int mce_p5_enabled;
+#ifdef CONFIG_ARCH_HAS_COPY_MC
+extern void enable_copy_mc_fragile(void);
+unsigned long __must_check copy_mc_fragile(void *dst, const void *src, unsigned cnt);
+#else
+static inline void enable_copy_mc_fragile(void)
+{
+}
+#endif
+
#ifdef CONFIG_X86_MCE
int mcheck_init(void);
void mcheck_cpu_init(struct cpuinfo_x86 *c);
@@ -200,12 +224,8 @@ void mce_setup(struct mce *m);
void mce_log(struct mce *m);
DECLARE_PER_CPU(struct device *, mce_device);
-/*
- * Maximum banks number.
- * This is the limit of the current register layout on
- * Intel CPUs.
- */
-#define MAX_NR_BANKS 32
+/* Maximum number of MCA banks per CPU. */
+#define MAX_NR_BANKS 64
#ifdef CONFIG_X86_MCE_INTEL
void mce_intel_feature_init(struct cpuinfo_x86 *c);
@@ -328,7 +348,6 @@ enum smca_bank_types {
struct smca_hwid {
unsigned int bank_type; /* Use with smca_bank_types for easy indexing. */
u32 hwid_mcatype; /* (hwid,mcatype) tuple */
- u32 xec_bitmap; /* Bitmap of valid ExtErrorCodes; current max is 21. */
u8 count; /* Number of instances. */
};
diff --git a/arch/x86/include/asm/mcsafe_test.h b/arch/x86/include/asm/mcsafe_test.h
deleted file mode 100644
index eb59804b6201..000000000000
--- a/arch/x86/include/asm/mcsafe_test.h
+++ /dev/null
@@ -1,75 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _MCSAFE_TEST_H_
-#define _MCSAFE_TEST_H_
-
-#ifndef __ASSEMBLY__
-#ifdef CONFIG_MCSAFE_TEST
-extern unsigned long mcsafe_test_src;
-extern unsigned long mcsafe_test_dst;
-
-static inline void mcsafe_inject_src(void *addr)
-{
- if (addr)
- mcsafe_test_src = (unsigned long) addr;
- else
- mcsafe_test_src = ~0UL;
-}
-
-static inline void mcsafe_inject_dst(void *addr)
-{
- if (addr)
- mcsafe_test_dst = (unsigned long) addr;
- else
- mcsafe_test_dst = ~0UL;
-}
-#else /* CONFIG_MCSAFE_TEST */
-static inline void mcsafe_inject_src(void *addr)
-{
-}
-
-static inline void mcsafe_inject_dst(void *addr)
-{
-}
-#endif /* CONFIG_MCSAFE_TEST */
-
-#else /* __ASSEMBLY__ */
-#include <asm/export.h>
-
-#ifdef CONFIG_MCSAFE_TEST
-.macro MCSAFE_TEST_CTL
- .pushsection .data
- .align 8
- .globl mcsafe_test_src
- mcsafe_test_src:
- .quad 0
- EXPORT_SYMBOL_GPL(mcsafe_test_src)
- .globl mcsafe_test_dst
- mcsafe_test_dst:
- .quad 0
- EXPORT_SYMBOL_GPL(mcsafe_test_dst)
- .popsection
-.endm
-
-.macro MCSAFE_TEST_SRC reg count target
- leaq \count(\reg), %r9
- cmp mcsafe_test_src, %r9
- ja \target
-.endm
-
-.macro MCSAFE_TEST_DST reg count target
- leaq \count(\reg), %r9
- cmp mcsafe_test_dst, %r9
- ja \target
-.endm
-#else
-.macro MCSAFE_TEST_CTL
-.endm
-
-.macro MCSAFE_TEST_SRC reg count target
-.endm
-
-.macro MCSAFE_TEST_DST reg count target
-.endm
-#endif /* CONFIG_MCSAFE_TEST */
-#endif /* __ASSEMBLY__ */
-#endif /* _MCSAFE_TEST_H_ */
diff --git a/arch/x86/include/asm/mem_encrypt.h b/arch/x86/include/asm/mem_encrypt.h
index 5049f6c22683..c9f5df0a1c10 100644
--- a/arch/x86/include/asm/mem_encrypt.h
+++ b/arch/x86/include/asm/mem_encrypt.h
@@ -19,6 +19,7 @@
#ifdef CONFIG_AMD_MEM_ENCRYPT
extern u64 sme_me_mask;
+extern u64 sev_status;
extern bool sev_enabled;
void sme_encrypt_execute(unsigned long encrypted_kernel_vaddr,
@@ -48,8 +49,10 @@ void __init mem_encrypt_free_decrypted_mem(void);
/* Architecture __weak replacement functions */
void __init mem_encrypt_init(void);
+void __init sev_es_init_vc_handling(void);
bool sme_active(void);
bool sev_active(void);
+bool sev_es_active(void);
#define __bss_decrypted __attribute__((__section__(".bss..decrypted")))
@@ -70,8 +73,10 @@ static inline void __init sme_early_init(void) { }
static inline void __init sme_encrypt_kernel(struct boot_params *bp) { }
static inline void __init sme_enable(struct boot_params *bp) { }
+static inline void sev_es_init_vc_handling(void) { }
static inline bool sme_active(void) { return false; }
static inline bool sev_active(void) { return false; }
+static inline bool sev_es_active(void) { return false; }
static inline int __init
early_set_memory_decrypted(unsigned long vaddr, unsigned long size) { return 0; }
diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
index 0a301ad0b02f..9257667d13c5 100644
--- a/arch/x86/include/asm/mmu.h
+++ b/arch/x86/include/asm/mmu.h
@@ -59,5 +59,6 @@ typedef struct {
}
void leave_mm(int cpu);
+#define leave_mm leave_mm
#endif /* _ASM_X86_MMU_H */
diff --git a/arch/x86/include/asm/mpspec.h b/arch/x86/include/asm/mpspec.h
index 606cbaebd336..e90ac7e9ae2c 100644
--- a/arch/x86/include/asm/mpspec.h
+++ b/arch/x86/include/asm/mpspec.h
@@ -67,21 +67,11 @@ static inline void find_smp_config(void)
#ifdef CONFIG_X86_MPPARSE
extern void e820__memblock_alloc_reserved_mpc_new(void);
extern int enable_update_mptable;
-extern int default_mpc_apic_id(struct mpc_cpu *m);
-extern void default_smp_read_mpc_oem(struct mpc_table *mpc);
-# ifdef CONFIG_X86_IO_APIC
-extern void default_mpc_oem_bus_info(struct mpc_bus *m, char *str);
-# else
-# define default_mpc_oem_bus_info NULL
-# endif
extern void default_find_smp_config(void);
extern void default_get_smp_config(unsigned int early);
#else
static inline void e820__memblock_alloc_reserved_mpc_new(void) { }
#define enable_update_mptable 0
-#define default_mpc_apic_id NULL
-#define default_smp_read_mpc_oem NULL
-#define default_mpc_oem_bus_info NULL
#define default_find_smp_config x86_init_noop
#define default_get_smp_config x86_init_uint_noop
#endif
diff --git a/arch/x86/include/asm/msi.h b/arch/x86/include/asm/msi.h
index 25ddd0916bb2..cd30013d15d3 100644
--- a/arch/x86/include/asm/msi.h
+++ b/arch/x86/include/asm/msi.h
@@ -9,6 +9,4 @@ typedef struct irq_alloc_info msi_alloc_info_t;
int pci_msi_prepare(struct irq_domain *domain, struct device *dev, int nvec,
msi_alloc_info_t *arg);
-void pci_msi_set_desc(msi_alloc_info_t *arg, struct msi_desc *desc);
-
#endif /* _ASM_X86_MSI_H */
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 2859ee4f39a8..972a34d93505 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -257,6 +257,9 @@
#define MSR_IA32_LASTINTFROMIP 0x000001dd
#define MSR_IA32_LASTINTTOIP 0x000001de
+#define MSR_IA32_PASID 0x00000d93
+#define MSR_IA32_PASID_VALID BIT_ULL(31)
+
/* DEBUGCTLMSR bits (others vary by model): */
#define DEBUGCTLMSR_LBR (1UL << 0) /* last branch recording */
#define DEBUGCTLMSR_BTF_SHIFT 1
@@ -464,11 +467,15 @@
#define MSR_AMD64_IBSOP_REG_MASK ((1UL<<MSR_AMD64_IBSOP_REG_COUNT)-1)
#define MSR_AMD64_IBSCTL 0xc001103a
#define MSR_AMD64_IBSBRTARGET 0xc001103b
+#define MSR_AMD64_ICIBSEXTDCTL 0xc001103c
#define MSR_AMD64_IBSOPDATA4 0xc001103d
#define MSR_AMD64_IBS_REG_COUNT_MAX 8 /* includes MSR_AMD64_IBSBRTARGET */
+#define MSR_AMD64_SEV_ES_GHCB 0xc0010130
#define MSR_AMD64_SEV 0xc0010131
#define MSR_AMD64_SEV_ENABLED_BIT 0
+#define MSR_AMD64_SEV_ES_ENABLED_BIT 1
#define MSR_AMD64_SEV_ENABLED BIT_ULL(MSR_AMD64_SEV_ENABLED_BIT)
+#define MSR_AMD64_SEV_ES_ENABLED BIT_ULL(MSR_AMD64_SEV_ES_ENABLED_BIT)
#define MSR_AMD64_VIRT_SPEC_CTRL 0xc001011f
@@ -857,11 +864,14 @@
#define MSR_CORE_PERF_FIXED_CTR0 0x00000309
#define MSR_CORE_PERF_FIXED_CTR1 0x0000030a
#define MSR_CORE_PERF_FIXED_CTR2 0x0000030b
+#define MSR_CORE_PERF_FIXED_CTR3 0x0000030c
#define MSR_CORE_PERF_FIXED_CTR_CTRL 0x0000038d
#define MSR_CORE_PERF_GLOBAL_STATUS 0x0000038e
#define MSR_CORE_PERF_GLOBAL_CTRL 0x0000038f
#define MSR_CORE_PERF_GLOBAL_OVF_CTRL 0x00000390
+#define MSR_PERF_METRICS 0x00000329
+
/* PERF_GLOBAL_OVF_CTL bits */
#define MSR_CORE_PERF_GLOBAL_OVF_CTRL_TRACE_TOPA_PMI_BIT 55
#define MSR_CORE_PERF_GLOBAL_OVF_CTRL_TRACE_TOPA_PMI (1ULL << MSR_CORE_PERF_GLOBAL_OVF_CTRL_TRACE_TOPA_PMI_BIT)
diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
index e7752b4038ff..86651e86289d 100644
--- a/arch/x86/include/asm/nospec-branch.h
+++ b/arch/x86/include/asm/nospec-branch.h
@@ -4,7 +4,7 @@
#define _ASM_X86_NOSPEC_BRANCH_H_
#include <linux/static_key.h>
-#include <linux/frame.h>
+#include <linux/objtool.h>
#include <asm/alternative.h>
#include <asm/alternative-asm.h>
diff --git a/arch/x86/include/asm/numa.h b/arch/x86/include/asm/numa.h
index bbfde3d2662f..e3bae2b60a0d 100644
--- a/arch/x86/include/asm/numa.h
+++ b/arch/x86/include/asm/numa.h
@@ -3,6 +3,7 @@
#define _ASM_X86_NUMA_H
#include <linux/nodemask.h>
+#include <linux/errno.h>
#include <asm/topology.h>
#include <asm/apicdef.h>
@@ -62,12 +63,14 @@ extern void numa_clear_node(int cpu);
extern void __init init_cpu_to_node(void);
extern void numa_add_cpu(int cpu);
extern void numa_remove_cpu(int cpu);
+extern void init_gi_nodes(void);
#else /* CONFIG_NUMA */
static inline void numa_set_node(int cpu, int node) { }
static inline void numa_clear_node(int cpu) { }
static inline void init_cpu_to_node(void) { }
static inline void numa_add_cpu(int cpu) { }
static inline void numa_remove_cpu(int cpu) { }
+static inline void init_gi_nodes(void) { }
#endif /* CONFIG_NUMA */
#ifdef CONFIG_DEBUG_PER_CPU_MAPS
@@ -77,7 +80,12 @@ void debug_cpumask_set_cpu(int cpu, int node, bool enable);
#ifdef CONFIG_NUMA_EMU
#define FAKE_NODE_MIN_SIZE ((u64)32 << 20)
#define FAKE_NODE_MIN_HASH_MASK (~(FAKE_NODE_MIN_SIZE - 1UL))
-void numa_emu_cmdline(char *);
+int numa_emu_cmdline(char *str);
+#else /* CONFIG_NUMA_EMU */
+static inline int numa_emu_cmdline(char *str)
+{
+ return -EINVAL;
+}
#endif /* CONFIG_NUMA_EMU */
#endif /* _ASM_X86_NUMA_H */
diff --git a/arch/x86/include/asm/orc_types.h b/arch/x86/include/asm/orc_types.h
index d25534940bde..fdbffec4cfde 100644
--- a/arch/x86/include/asm/orc_types.h
+++ b/arch/x86/include/asm/orc_types.h
@@ -39,27 +39,6 @@
#define ORC_REG_SP_INDIRECT 9
#define ORC_REG_MAX 15
-/*
- * ORC_TYPE_CALL: Indicates that sp_reg+sp_offset resolves to PREV_SP (the
- * caller's SP right before it made the call). Used for all callable
- * functions, i.e. all C code and all callable asm functions.
- *
- * ORC_TYPE_REGS: Used in entry code to indicate that sp_reg+sp_offset points
- * to a fully populated pt_regs from a syscall, interrupt, or exception.
- *
- * ORC_TYPE_REGS_IRET: Used in entry code to indicate that sp_reg+sp_offset
- * points to the iret return frame.
- *
- * The UNWIND_HINT macros are used only for the unwind_hint struct. They
- * aren't used in struct orc_entry due to size and complexity constraints.
- * Objtool converts them to real types when it converts the hints to orc
- * entries.
- */
-#define ORC_TYPE_CALL 0
-#define ORC_TYPE_REGS 1
-#define ORC_TYPE_REGS_IRET 2
-#define UNWIND_HINT_TYPE_RET_OFFSET 3
-
#ifndef __ASSEMBLY__
/*
* This struct is more or less a vastly simplified version of the DWARF Call
@@ -78,19 +57,6 @@ struct orc_entry {
unsigned end:1;
} __packed;
-/*
- * This struct is used by asm and inline asm code to manually annotate the
- * location of registers on the stack for the ORC unwinder.
- *
- * Type can be either ORC_TYPE_* or UNWIND_HINT_TYPE_*.
- */
-struct unwind_hint {
- u32 ip;
- s16 sp_offset;
- u8 sp_reg;
- u8 type;
- u8 end;
-};
#endif /* __ASSEMBLY__ */
#endif /* _ORC_TYPES_H */
diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
index 288b065955b7..d0c6c10c18a0 100644
--- a/arch/x86/include/asm/page_64_types.h
+++ b/arch/x86/include/asm/page_64_types.h
@@ -28,6 +28,7 @@
#define IST_INDEX_NMI 1
#define IST_INDEX_DB 2
#define IST_INDEX_MCE 3
+#define IST_INDEX_VC 4
/*
* Set __PAGE_OFFSET to the most negative possible address +
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index 3d2afecde50c..d25cc6830e89 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -160,8 +160,6 @@ static inline void wbinvd(void)
PVOP_VCALL0(cpu.wbinvd);
}
-#define get_kernel_rpl() (pv_info.kernel_rpl)
-
static inline u64 paravirt_read_msr(unsigned msr)
{
return PVOP_CALL1(u64, cpu.read_msr, msr);
@@ -277,12 +275,10 @@ static inline void load_TLS(struct thread_struct *t, unsigned cpu)
PVOP_VCALL2(cpu.load_tls, t, cpu);
}
-#ifdef CONFIG_X86_64
static inline void load_gs_index(unsigned int gs)
{
PVOP_VCALL1(cpu.load_gs_index, gs);
}
-#endif
static inline void write_ldt_entry(struct desc_struct *dt, int entry,
const void *desc)
@@ -375,52 +371,22 @@ static inline void paravirt_release_p4d(unsigned long pfn)
static inline pte_t __pte(pteval_t val)
{
- pteval_t ret;
-
- if (sizeof(pteval_t) > sizeof(long))
- ret = PVOP_CALLEE2(pteval_t, mmu.make_pte, val, (u64)val >> 32);
- else
- ret = PVOP_CALLEE1(pteval_t, mmu.make_pte, val);
-
- return (pte_t) { .pte = ret };
+ return (pte_t) { PVOP_CALLEE1(pteval_t, mmu.make_pte, val) };
}
static inline pteval_t pte_val(pte_t pte)
{
- pteval_t ret;
-
- if (sizeof(pteval_t) > sizeof(long))
- ret = PVOP_CALLEE2(pteval_t, mmu.pte_val,
- pte.pte, (u64)pte.pte >> 32);
- else
- ret = PVOP_CALLEE1(pteval_t, mmu.pte_val, pte.pte);
-
- return ret;
+ return PVOP_CALLEE1(pteval_t, mmu.pte_val, pte.pte);
}
static inline pgd_t __pgd(pgdval_t val)
{
- pgdval_t ret;
-
- if (sizeof(pgdval_t) > sizeof(long))
- ret = PVOP_CALLEE2(pgdval_t, mmu.make_pgd, val, (u64)val >> 32);
- else
- ret = PVOP_CALLEE1(pgdval_t, mmu.make_pgd, val);
-
- return (pgd_t) { ret };
+ return (pgd_t) { PVOP_CALLEE1(pgdval_t, mmu.make_pgd, val) };
}
static inline pgdval_t pgd_val(pgd_t pgd)
{
- pgdval_t ret;
-
- if (sizeof(pgdval_t) > sizeof(long))
- ret = PVOP_CALLEE2(pgdval_t, mmu.pgd_val,
- pgd.pgd, (u64)pgd.pgd >> 32);
- else
- ret = PVOP_CALLEE1(pgdval_t, mmu.pgd_val, pgd.pgd);
-
- return ret;
+ return PVOP_CALLEE1(pgdval_t, mmu.pgd_val, pgd.pgd);
}
#define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
@@ -438,78 +404,34 @@ static inline void ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned
pte_t *ptep, pte_t old_pte, pte_t pte)
{
- if (sizeof(pteval_t) > sizeof(long))
- /* 5 arg words */
- pv_ops.mmu.ptep_modify_prot_commit(vma, addr, ptep, pte);
- else
- PVOP_VCALL4(mmu.ptep_modify_prot_commit,
- vma, addr, ptep, pte.pte);
+ PVOP_VCALL4(mmu.ptep_modify_prot_commit, vma, addr, ptep, pte.pte);
}
static inline void set_pte(pte_t *ptep, pte_t pte)
{
- if (sizeof(pteval_t) > sizeof(long))
- PVOP_VCALL3(mmu.set_pte, ptep, pte.pte, (u64)pte.pte >> 32);
- else
- PVOP_VCALL2(mmu.set_pte, ptep, pte.pte);
-}
-
-static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep, pte_t pte)
-{
- if (sizeof(pteval_t) > sizeof(long))
- /* 5 arg words */
- pv_ops.mmu.set_pte_at(mm, addr, ptep, pte);
- else
- PVOP_VCALL4(mmu.set_pte_at, mm, addr, ptep, pte.pte);
+ PVOP_VCALL2(mmu.set_pte, ptep, pte.pte);
}
static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
{
- pmdval_t val = native_pmd_val(pmd);
-
- if (sizeof(pmdval_t) > sizeof(long))
- PVOP_VCALL3(mmu.set_pmd, pmdp, val, (u64)val >> 32);
- else
- PVOP_VCALL2(mmu.set_pmd, pmdp, val);
+ PVOP_VCALL2(mmu.set_pmd, pmdp, native_pmd_val(pmd));
}
-#if CONFIG_PGTABLE_LEVELS >= 3
static inline pmd_t __pmd(pmdval_t val)
{
- pmdval_t ret;
-
- if (sizeof(pmdval_t) > sizeof(long))
- ret = PVOP_CALLEE2(pmdval_t, mmu.make_pmd, val, (u64)val >> 32);
- else
- ret = PVOP_CALLEE1(pmdval_t, mmu.make_pmd, val);
-
- return (pmd_t) { ret };
+ return (pmd_t) { PVOP_CALLEE1(pmdval_t, mmu.make_pmd, val) };
}
static inline pmdval_t pmd_val(pmd_t pmd)
{
- pmdval_t ret;
-
- if (sizeof(pmdval_t) > sizeof(long))
- ret = PVOP_CALLEE2(pmdval_t, mmu.pmd_val,
- pmd.pmd, (u64)pmd.pmd >> 32);
- else
- ret = PVOP_CALLEE1(pmdval_t, mmu.pmd_val, pmd.pmd);
-
- return ret;
+ return PVOP_CALLEE1(pmdval_t, mmu.pmd_val, pmd.pmd);
}
static inline void set_pud(pud_t *pudp, pud_t pud)
{
- pudval_t val = native_pud_val(pud);
-
- if (sizeof(pudval_t) > sizeof(long))
- PVOP_VCALL3(mmu.set_pud, pudp, val, (u64)val >> 32);
- else
- PVOP_VCALL2(mmu.set_pud, pudp, val);
+ PVOP_VCALL2(mmu.set_pud, pudp, native_pud_val(pud));
}
-#if CONFIG_PGTABLE_LEVELS >= 4
+
static inline pud_t __pud(pudval_t val)
{
pudval_t ret;
@@ -526,7 +448,7 @@ static inline pudval_t pud_val(pud_t pud)
static inline void pud_clear(pud_t *pudp)
{
- set_pud(pudp, __pud(0));
+ set_pud(pudp, native_make_pud(0));
}
static inline void set_p4d(p4d_t *p4dp, p4d_t p4d)
@@ -563,40 +485,17 @@ static inline void __set_pgd(pgd_t *pgdp, pgd_t pgd)
} while (0)
#define pgd_clear(pgdp) do { \
- if (pgtable_l5_enabled()) \
- set_pgd(pgdp, __pgd(0)); \
+ if (pgtable_l5_enabled()) \
+ set_pgd(pgdp, native_make_pgd(0)); \
} while (0)
#endif /* CONFIG_PGTABLE_LEVELS == 5 */
static inline void p4d_clear(p4d_t *p4dp)
{
- set_p4d(p4dp, __p4d(0));
+ set_p4d(p4dp, native_make_p4d(0));
}
-#endif /* CONFIG_PGTABLE_LEVELS == 4 */
-
-#endif /* CONFIG_PGTABLE_LEVELS >= 3 */
-
-#ifdef CONFIG_X86_PAE
-/* Special-case pte-setting operations for PAE, which can't update a
- 64-bit pte atomically */
-static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
-{
- PVOP_VCALL3(mmu.set_pte_atomic, ptep, pte.pte, pte.pte >> 32);
-}
-
-static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep)
-{
- PVOP_VCALL3(mmu.pte_clear, mm, addr, ptep);
-}
-
-static inline void pmd_clear(pmd_t *pmdp)
-{
- PVOP_VCALL1(mmu.pmd_clear, pmdp);
-}
-#else /* !CONFIG_X86_PAE */
static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
{
set_pte(ptep, pte);
@@ -605,14 +504,13 @@ static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
pte_t *ptep)
{
- set_pte_at(mm, addr, ptep, __pte(0));
+ set_pte(ptep, native_make_pte(0));
}
static inline void pmd_clear(pmd_t *pmdp)
{
- set_pmd(pmdp, __pmd(0));
+ set_pmd(pmdp, native_make_pmd(0));
}
-#endif /* CONFIG_X86_PAE */
#define __HAVE_ARCH_START_CONTEXT_SWITCH
static inline void arch_start_context_switch(struct task_struct *prev)
@@ -682,16 +580,9 @@ bool __raw_callee_save___native_vcpu_is_preempted(long cpu);
#endif /* SMP && PARAVIRT_SPINLOCKS */
#ifdef CONFIG_X86_32
-#define PV_SAVE_REGS "pushl %ecx; pushl %edx;"
-#define PV_RESTORE_REGS "popl %edx; popl %ecx;"
-
/* save and restore all caller-save registers, except return value */
#define PV_SAVE_ALL_CALLER_REGS "pushl %ecx;"
#define PV_RESTORE_ALL_CALLER_REGS "popl %ecx;"
-
-#define PV_FLAGS_ARG "0"
-#define PV_EXTRA_CLOBBERS
-#define PV_VEXTRA_CLOBBERS
#else
/* save and restore all caller-save registers, except return value */
#define PV_SAVE_ALL_CALLER_REGS \
@@ -712,14 +603,6 @@ bool __raw_callee_save___native_vcpu_is_preempted(long cpu);
"pop %rsi;" \
"pop %rdx;" \
"pop %rcx;"
-
-/* We save some registers, but all of them, that's too much. We clobber all
- * caller saved registers but the argument parameter */
-#define PV_SAVE_REGS "pushq %%rdi;"
-#define PV_RESTORE_REGS "popq %%rdi;"
-#define PV_EXTRA_CLOBBERS EXTRA_CLOBBERS, "rcx" , "rdx", "rsi"
-#define PV_VEXTRA_CLOBBERS EXTRA_CLOBBERS, "rdi", "rcx" , "rdx", "rsi"
-#define PV_FLAGS_ARG "D"
#endif
/*
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
index 8dfcb2508e6d..0fad9f61c76a 100644
--- a/arch/x86/include/asm/paravirt_types.h
+++ b/arch/x86/include/asm/paravirt_types.h
@@ -68,13 +68,8 @@ struct paravirt_callee_save {
/* general info */
struct pv_info {
#ifdef CONFIG_PARAVIRT_XXL
- unsigned int kernel_rpl;
- int shared_kernel_pmd;
-
-#ifdef CONFIG_X86_64
u16 extra_user_64bit_cs; /* __USER_CS if none */
#endif
-#endif
const char *name;
};
@@ -126,9 +121,7 @@ struct pv_cpu_ops {
void (*set_ldt)(const void *desc, unsigned entries);
unsigned long (*store_tr)(void);
void (*load_tls)(struct thread_struct *t, unsigned int cpu);
-#ifdef CONFIG_X86_64
void (*load_gs_index)(unsigned int idx);
-#endif
void (*write_ldt_entry)(struct desc_struct *ldt, int entrynum,
const void *desc);
void (*write_gdt_entry)(struct desc_struct *,
@@ -249,8 +242,6 @@ struct pv_mmu_ops {
/* Pagetable manipulation functions */
void (*set_pte)(pte_t *ptep, pte_t pteval);
- void (*set_pte_at)(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep, pte_t pteval);
void (*set_pmd)(pmd_t *pmdp, pmd_t pmdval);
pte_t (*ptep_modify_prot_start)(struct vm_area_struct *vma, unsigned long addr,
@@ -264,21 +255,11 @@ struct pv_mmu_ops {
struct paravirt_callee_save pgd_val;
struct paravirt_callee_save make_pgd;
-#if CONFIG_PGTABLE_LEVELS >= 3
-#ifdef CONFIG_X86_PAE
- void (*set_pte_atomic)(pte_t *ptep, pte_t pteval);
- void (*pte_clear)(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep);
- void (*pmd_clear)(pmd_t *pmdp);
-
-#endif /* CONFIG_X86_PAE */
-
void (*set_pud)(pud_t *pudp, pud_t pudval);
struct paravirt_callee_save pmd_val;
struct paravirt_callee_save make_pmd;
-#if CONFIG_PGTABLE_LEVELS >= 4
struct paravirt_callee_save pud_val;
struct paravirt_callee_save make_pud;
@@ -291,10 +272,6 @@ struct pv_mmu_ops {
void (*set_pgd)(pgd_t *pgdp, pgd_t pgdval);
#endif /* CONFIG_PGTABLE_LEVELS >= 5 */
-#endif /* CONFIG_PGTABLE_LEVELS >= 4 */
-
-#endif /* CONFIG_PGTABLE_LEVELS >= 3 */
-
struct pv_lazy_ops lazy_mode;
/* dom0 ops */
diff --git a/arch/x86/include/asm/pci.h b/arch/x86/include/asm/pci.h
index 7ccb338507e3..d2c76c8d8cfd 100644
--- a/arch/x86/include/asm/pci.h
+++ b/arch/x86/include/asm/pci.h
@@ -105,17 +105,6 @@ static inline void early_quirks(void) { }
extern void pci_iommu_alloc(void);
-#ifdef CONFIG_PCI_MSI
-/* implemented in arch/x86/kernel/apic/io_apic. */
-struct msi_desc;
-int native_setup_msi_irqs(struct pci_dev *dev, int nvec, int type);
-void native_teardown_msi_irq(unsigned int irq);
-void native_restore_msi_irqs(struct pci_dev *dev);
-#else
-#define native_setup_msi_irqs NULL
-#define native_teardown_msi_irq NULL
-#endif
-
/* generic pci stuff */
#include <asm-generic/pci.h>
diff --git a/arch/x86/include/asm/pci_x86.h b/arch/x86/include/asm/pci_x86.h
index 73bb404f4d2a..490411dba438 100644
--- a/arch/x86/include/asm/pci_x86.h
+++ b/arch/x86/include/asm/pci_x86.h
@@ -114,9 +114,20 @@ extern const struct pci_raw_ops pci_direct_conf1;
extern bool port_cf9_safe;
/* arch_initcall level */
+#ifdef CONFIG_PCI_DIRECT
extern int pci_direct_probe(void);
extern void pci_direct_init(int type);
+#else
+static inline int pci_direct_probe(void) { return -1; }
+static inline void pci_direct_init(int type) { }
+#endif
+
+#ifdef CONFIG_PCI_BIOS
extern void pci_pcbios_init(void);
+#else
+static inline void pci_pcbios_init(void) { }
+#endif
+
extern void __init dmi_check_pciprobe(void);
extern void __init dmi_check_skip_isa_align(void);
diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
index 0c1b13720525..6960cd6d1f23 100644
--- a/arch/x86/include/asm/perf_event.h
+++ b/arch/x86/include/asm/perf_event.h
@@ -196,13 +196,29 @@ struct x86_pmu_capability {
* Fixed-purpose performance events:
*/
+/* RDPMC offset for Fixed PMCs */
+#define INTEL_PMC_FIXED_RDPMC_BASE (1 << 30)
+#define INTEL_PMC_FIXED_RDPMC_METRICS (1 << 29)
+
/*
- * All 3 fixed-mode PMCs are configured via this single MSR:
+ * All the fixed-mode PMCs are configured via this single MSR:
*/
#define MSR_ARCH_PERFMON_FIXED_CTR_CTRL 0x38d
/*
- * The counts are available in three separate MSRs:
+ * There is no event-code assigned to the fixed-mode PMCs.
+ *
+ * For a fixed-mode PMC, which has an equivalent event on a general-purpose
+ * PMC, the event-code of the equivalent event is used for the fixed-mode PMC,
+ * e.g., Instr_Retired.Any and CPU_CLK_Unhalted.Core.
+ *
+ * For a fixed-mode PMC, which doesn't have an equivalent event, a
+ * pseudo-encoding is used, e.g., CPU_CLK_Unhalted.Ref and TOPDOWN.SLOTS.
+ * The pseudo event-code for a fixed-mode PMC must be 0x00.
+ * The pseudo umask-code is 0xX. The X equals the index of the fixed
+ * counter + 1, e.g., the fixed counter 2 has the pseudo-encoding 0x0300.
+ *
+ * The counts are available in separate MSRs:
*/
/* Instr_Retired.Any: */
@@ -213,29 +229,84 @@ struct x86_pmu_capability {
#define MSR_ARCH_PERFMON_FIXED_CTR1 0x30a
#define INTEL_PMC_IDX_FIXED_CPU_CYCLES (INTEL_PMC_IDX_FIXED + 1)
-/* CPU_CLK_Unhalted.Ref: */
+/* CPU_CLK_Unhalted.Ref: event=0x00,umask=0x3 (pseudo-encoding) */
#define MSR_ARCH_PERFMON_FIXED_CTR2 0x30b
#define INTEL_PMC_IDX_FIXED_REF_CYCLES (INTEL_PMC_IDX_FIXED + 2)
#define INTEL_PMC_MSK_FIXED_REF_CYCLES (1ULL << INTEL_PMC_IDX_FIXED_REF_CYCLES)
+/* TOPDOWN.SLOTS: event=0x00,umask=0x4 (pseudo-encoding) */
+#define MSR_ARCH_PERFMON_FIXED_CTR3 0x30c
+#define INTEL_PMC_IDX_FIXED_SLOTS (INTEL_PMC_IDX_FIXED + 3)
+#define INTEL_PMC_MSK_FIXED_SLOTS (1ULL << INTEL_PMC_IDX_FIXED_SLOTS)
+
/*
* We model BTS tracing as another fixed-mode PMC.
*
- * We choose a value in the middle of the fixed event range, since lower
+ * We choose the value 47 for the fixed index of BTS, since lower
* values are used by actual fixed events and higher values are used
* to indicate other overflow conditions in the PERF_GLOBAL_STATUS msr.
*/
-#define INTEL_PMC_IDX_FIXED_BTS (INTEL_PMC_IDX_FIXED + 16)
+#define INTEL_PMC_IDX_FIXED_BTS (INTEL_PMC_IDX_FIXED + 15)
+
+/*
+ * The PERF_METRICS MSR is modeled as several magic fixed-mode PMCs, one for
+ * each TopDown metric event.
+ *
+ * Internally the TopDown metric events are mapped to the FxCtr 3 (SLOTS).
+ */
+#define INTEL_PMC_IDX_METRIC_BASE (INTEL_PMC_IDX_FIXED + 16)
+#define INTEL_PMC_IDX_TD_RETIRING (INTEL_PMC_IDX_METRIC_BASE + 0)
+#define INTEL_PMC_IDX_TD_BAD_SPEC (INTEL_PMC_IDX_METRIC_BASE + 1)
+#define INTEL_PMC_IDX_TD_FE_BOUND (INTEL_PMC_IDX_METRIC_BASE + 2)
+#define INTEL_PMC_IDX_TD_BE_BOUND (INTEL_PMC_IDX_METRIC_BASE + 3)
+#define INTEL_PMC_IDX_METRIC_END INTEL_PMC_IDX_TD_BE_BOUND
+#define INTEL_PMC_MSK_TOPDOWN ((0xfull << INTEL_PMC_IDX_METRIC_BASE) | \
+ INTEL_PMC_MSK_FIXED_SLOTS)
-#define GLOBAL_STATUS_COND_CHG BIT_ULL(63)
-#define GLOBAL_STATUS_BUFFER_OVF BIT_ULL(62)
-#define GLOBAL_STATUS_UNC_OVF BIT_ULL(61)
-#define GLOBAL_STATUS_ASIF BIT_ULL(60)
-#define GLOBAL_STATUS_COUNTERS_FROZEN BIT_ULL(59)
-#define GLOBAL_STATUS_LBRS_FROZEN_BIT 58
-#define GLOBAL_STATUS_LBRS_FROZEN BIT_ULL(GLOBAL_STATUS_LBRS_FROZEN_BIT)
-#define GLOBAL_STATUS_TRACE_TOPAPMI BIT_ULL(55)
+/*
+ * There is no event-code assigned to the TopDown events.
+ *
+ * For the slots event, use the pseudo code of the fixed counter 3.
+ *
+ * For the metric events, the pseudo event-code is 0x00.
+ * The pseudo umask-code starts from the middle of the pseudo event
+ * space, 0x80.
+ */
+#define INTEL_TD_SLOTS 0x0400 /* TOPDOWN.SLOTS */
+/* Level 1 metrics */
+#define INTEL_TD_METRIC_RETIRING 0x8000 /* Retiring metric */
+#define INTEL_TD_METRIC_BAD_SPEC 0x8100 /* Bad speculation metric */
+#define INTEL_TD_METRIC_FE_BOUND 0x8200 /* FE bound metric */
+#define INTEL_TD_METRIC_BE_BOUND 0x8300 /* BE bound metric */
+#define INTEL_TD_METRIC_MAX INTEL_TD_METRIC_BE_BOUND
+#define INTEL_TD_METRIC_NUM 4
+
+static inline bool is_metric_idx(int idx)
+{
+ return (unsigned)(idx - INTEL_PMC_IDX_METRIC_BASE) < INTEL_TD_METRIC_NUM;
+}
+
+static inline bool is_topdown_idx(int idx)
+{
+ return is_metric_idx(idx) || idx == INTEL_PMC_IDX_FIXED_SLOTS;
+}
+#define INTEL_PMC_OTHER_TOPDOWN_BITS(bit) \
+ (~(0x1ull << bit) & INTEL_PMC_MSK_TOPDOWN)
+
+#define GLOBAL_STATUS_COND_CHG BIT_ULL(63)
+#define GLOBAL_STATUS_BUFFER_OVF_BIT 62
+#define GLOBAL_STATUS_BUFFER_OVF BIT_ULL(GLOBAL_STATUS_BUFFER_OVF_BIT)
+#define GLOBAL_STATUS_UNC_OVF BIT_ULL(61)
+#define GLOBAL_STATUS_ASIF BIT_ULL(60)
+#define GLOBAL_STATUS_COUNTERS_FROZEN BIT_ULL(59)
+#define GLOBAL_STATUS_LBRS_FROZEN_BIT 58
+#define GLOBAL_STATUS_LBRS_FROZEN BIT_ULL(GLOBAL_STATUS_LBRS_FROZEN_BIT)
+#define GLOBAL_STATUS_TRACE_TOPAPMI_BIT 55
+#define GLOBAL_STATUS_TRACE_TOPAPMI BIT_ULL(GLOBAL_STATUS_TRACE_TOPAPMI_BIT)
+#define GLOBAL_STATUS_PERF_METRICS_OVF_BIT 48
+
+#define GLOBAL_CTRL_EN_PERF_METRICS 48
/*
* We model guest LBR event tracing as another fixed-mode PMC like BTS.
*
@@ -334,6 +405,7 @@ struct pebs_xmm {
#define IBS_OP_ENABLE (1ULL<<17)
#define IBS_OP_MAX_CNT 0x0000FFFFULL
#define IBS_OP_MAX_CNT_EXT 0x007FFFFFULL /* not a register bit mask */
+#define IBS_OP_MAX_CNT_EXT_MASK (0x7FULL<<20) /* separate upper 7 bits */
#define IBS_RIP_INVALID (1ULL<<38)
#ifdef CONFIG_X86_LOCAL_APIC
diff --git a/arch/x86/include/asm/pgtable-3level_types.h b/arch/x86/include/asm/pgtable-3level_types.h
index 80fbb4a9ed87..56baf43befb4 100644
--- a/arch/x86/include/asm/pgtable-3level_types.h
+++ b/arch/x86/include/asm/pgtable-3level_types.h
@@ -20,12 +20,7 @@ typedef union {
} pte_t;
#endif /* !__ASSEMBLY__ */
-#ifdef CONFIG_PARAVIRT_XXL
-#define SHARED_KERNEL_PMD ((!static_cpu_has(X86_FEATURE_PTI) && \
- (pv_info.shared_kernel_pmd)))
-#else
#define SHARED_KERNEL_PMD (!static_cpu_has(X86_FEATURE_PTI))
-#endif
#define ARCH_PAGE_TABLE_SYNC_MASK (SHARED_KERNEL_PMD ? 0 : PGTBL_PMD_MODIFIED)
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index b836138ce852..a02c67291cfc 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -28,7 +28,7 @@
#include <asm-generic/pgtable_uffd.h>
extern pgd_t early_top_pgt[PTRS_PER_PGD];
-int __init __early_make_pgtable(unsigned long address, pmdval_t pmd);
+bool __init __early_make_pgtable(unsigned long address, pmdval_t pmd);
void ptdump_walk_pgd_level(struct seq_file *m, struct mm_struct *mm);
void ptdump_walk_pgd_level_debugfs(struct seq_file *m, struct mm_struct *mm,
@@ -63,7 +63,6 @@ extern pmdval_t early_pmd_flags;
#include <asm/paravirt.h>
#else /* !CONFIG_PARAVIRT_XXL */
#define set_pte(ptep, pte) native_set_pte(ptep, pte)
-#define set_pte_at(mm, addr, ptep, pte) native_set_pte_at(mm, addr, ptep, pte)
#define set_pte_atomic(ptep, pte) \
native_set_pte_atomic(ptep, pte)
@@ -1033,10 +1032,10 @@ static inline pud_t native_local_pudp_get_and_clear(pud_t *pudp)
return res;
}
-static inline void native_set_pte_at(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep , pte_t pte)
+static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pte)
{
- native_set_pte(ptep, pte);
+ set_pte(ptep, pte);
}
static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
index 8f63efb2a2cc..52e5f5f2240d 100644
--- a/arch/x86/include/asm/pgtable_64_types.h
+++ b/arch/x86/include/asm/pgtable_64_types.h
@@ -159,6 +159,4 @@ extern unsigned int ptrs_per_p4d;
#define PGD_KERNEL_START ((PAGE_SIZE / 2) / sizeof(pgd_t))
-#define ARCH_PAGE_TABLE_SYNC_MASK (pgtable_l5_enabled() ? PGTBL_PGD_MODIFIED : PGTBL_P4D_MODIFIED)
-
#endif /* _ASM_X86_PGTABLE_64_DEFS_H */
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 97143d87994c..5ac507586769 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -517,7 +517,7 @@ struct thread_struct {
/* Save middle states of ptrace breakpoints */
struct perf_event *ptrace_bps[HBP_NUM];
/* Debug status used for traps, single steps, etc... */
- unsigned long debugreg6;
+ unsigned long virtual_dr6;
/* Keep track of the exact dr7 value set by the user */
unsigned long ptrace_dr7;
/* Fault info: */
@@ -696,6 +696,7 @@ extern void load_direct_gdt(int);
extern void load_fixmap_gdt(int);
extern void load_percpu_segment(int);
extern void cpu_init(void);
+extern void cpu_init_exception_handling(void);
extern void cr4_init(void);
static inline unsigned long get_debugctlmsr(void)
diff --git a/arch/x86/include/asm/proto.h b/arch/x86/include/asm/proto.h
index 28996fe19301..2c35f1c01a2d 100644
--- a/arch/x86/include/asm/proto.h
+++ b/arch/x86/include/asm/proto.h
@@ -10,6 +10,7 @@ void syscall_init(void);
#ifdef CONFIG_X86_64
void entry_SYSCALL_64(void);
+void entry_SYSCALL_64_safe_stack(void);
long do_arch_prctl_64(struct task_struct *task, int option, unsigned long arg2);
#endif
diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
index 40aa69d04862..d8324a236696 100644
--- a/arch/x86/include/asm/ptrace.h
+++ b/arch/x86/include/asm/ptrace.h
@@ -327,8 +327,8 @@ static inline unsigned long regs_get_kernel_argument(struct pt_regs *regs,
static const unsigned int argument_offs[] = {
#ifdef __i386__
offsetof(struct pt_regs, ax),
- offsetof(struct pt_regs, cx),
offsetof(struct pt_regs, dx),
+ offsetof(struct pt_regs, cx),
#define NR_REG_ARGUMENTS 3
#else
offsetof(struct pt_regs, di),
diff --git a/arch/x86/include/asm/realmode.h b/arch/x86/include/asm/realmode.h
index b35030eeec36..5db5d083c873 100644
--- a/arch/x86/include/asm/realmode.h
+++ b/arch/x86/include/asm/realmode.h
@@ -21,6 +21,9 @@ struct real_mode_header {
/* SMP trampoline */
u32 trampoline_start;
u32 trampoline_header;
+#ifdef CONFIG_AMD_MEM_ENCRYPT
+ u32 sev_es_trampoline_start;
+#endif
#ifdef CONFIG_X86_64
u32 trampoline_pgd;
#endif
@@ -57,6 +60,9 @@ extern unsigned char real_mode_blob_end[];
extern unsigned long initial_code;
extern unsigned long initial_gs;
extern unsigned long initial_stack;
+#ifdef CONFIG_AMD_MEM_ENCRYPT
+extern unsigned long initial_vc_handler;
+#endif
extern unsigned char real_mode_blob[];
extern unsigned char real_mode_relocs[];
@@ -66,6 +72,7 @@ extern unsigned char startup_32_smp[];
extern unsigned char boot_gdt[];
#else
extern unsigned char secondary_startup_64[];
+extern unsigned char secondary_startup_64_no_verify[];
#endif
static inline size_t real_mode_size_needed(void)
diff --git a/arch/x86/include/asm/required-features.h b/arch/x86/include/asm/required-features.h
index 6847d85400a8..3ff0d48469f2 100644
--- a/arch/x86/include/asm/required-features.h
+++ b/arch/x86/include/asm/required-features.h
@@ -54,7 +54,7 @@
#endif
#ifdef CONFIG_X86_64
-#ifdef CONFIG_PARAVIRT
+#ifdef CONFIG_PARAVIRT_XXL
/* Paravirtualized systems may not have PSE or PGE available */
#define NEED_PSE 0
#define NEED_PGE 0
diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
index 9646c300f128..7fdd4facfce7 100644
--- a/arch/x86/include/asm/segment.h
+++ b/arch/x86/include/asm/segment.h
@@ -222,15 +222,11 @@
#endif
-#ifndef CONFIG_PARAVIRT_XXL
-# define get_kernel_rpl() 0
-#endif
-
#define IDT_ENTRIES 256
#define NUM_EXCEPTION_VECTORS 32
/* Bitmask of exception vectors which push an error code on the stack: */
-#define EXCEPTION_ERRCODE_MASK 0x00027d00
+#define EXCEPTION_ERRCODE_MASK 0x20027d00
#define GDT_SIZE (GDT_ENTRIES*8)
#define GDT_ENTRY_TLS_ENTRIES 3
diff --git a/arch/x86/include/asm/setup.h b/arch/x86/include/asm/setup.h
index 84b645cc8bc9..7d7a064af6ff 100644
--- a/arch/x86/include/asm/setup.h
+++ b/arch/x86/include/asm/setup.h
@@ -39,6 +39,8 @@ void vsmp_init(void);
static inline void vsmp_init(void) { }
#endif
+struct pt_regs;
+
void setup_bios_corruption_check(void);
void early_platform_quirks(void);
@@ -48,7 +50,9 @@ extern void reserve_standard_io_resources(void);
extern void i386_reserve_resources(void);
extern unsigned long __startup_64(unsigned long physaddr, struct boot_params *bp);
extern unsigned long __startup_secondary_64(void);
-extern int early_make_pgtable(unsigned long address);
+extern void startup_64_setup_env(unsigned long physbase);
+extern void early_setup_idt(void);
+extern void __init do_early_exception(struct pt_regs *regs, int trapnr);
#ifdef CONFIG_X86_INTEL_MID
extern void x86_intel_mid_early_setup(void);
diff --git a/arch/x86/include/asm/sev-es.h b/arch/x86/include/asm/sev-es.h
new file mode 100644
index 000000000000..cf1d957c7091
--- /dev/null
+++ b/arch/x86/include/asm/sev-es.h
@@ -0,0 +1,114 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * AMD Encrypted Register State Support
+ *
+ * Author: Joerg Roedel <jroedel@suse.de>
+ */
+
+#ifndef __ASM_ENCRYPTED_STATE_H
+#define __ASM_ENCRYPTED_STATE_H
+
+#include <linux/types.h>
+#include <asm/insn.h>
+
+#define GHCB_SEV_INFO 0x001UL
+#define GHCB_SEV_INFO_REQ 0x002UL
+#define GHCB_INFO(v) ((v) & 0xfffUL)
+#define GHCB_PROTO_MAX(v) (((v) >> 48) & 0xffffUL)
+#define GHCB_PROTO_MIN(v) (((v) >> 32) & 0xffffUL)
+#define GHCB_PROTO_OUR 0x0001UL
+#define GHCB_SEV_CPUID_REQ 0x004UL
+#define GHCB_CPUID_REQ_EAX 0
+#define GHCB_CPUID_REQ_EBX 1
+#define GHCB_CPUID_REQ_ECX 2
+#define GHCB_CPUID_REQ_EDX 3
+#define GHCB_CPUID_REQ(fn, reg) (GHCB_SEV_CPUID_REQ | \
+ (((unsigned long)reg & 3) << 30) | \
+ (((unsigned long)fn) << 32))
+
+#define GHCB_PROTOCOL_MAX 0x0001UL
+#define GHCB_DEFAULT_USAGE 0x0000UL
+
+#define GHCB_SEV_CPUID_RESP 0x005UL
+#define GHCB_SEV_TERMINATE 0x100UL
+#define GHCB_SEV_TERMINATE_REASON(reason_set, reason_val) \
+ (((((u64)reason_set) & 0x7) << 12) | \
+ ((((u64)reason_val) & 0xff) << 16))
+#define GHCB_SEV_ES_REASON_GENERAL_REQUEST 0
+#define GHCB_SEV_ES_REASON_PROTOCOL_UNSUPPORTED 1
+
+#define GHCB_SEV_GHCB_RESP_CODE(v) ((v) & 0xfff)
+#define VMGEXIT() { asm volatile("rep; vmmcall\n\r"); }
+
+enum es_result {
+ ES_OK, /* All good */
+ ES_UNSUPPORTED, /* Requested operation not supported */
+ ES_VMM_ERROR, /* Unexpected state from the VMM */
+ ES_DECODE_FAILED, /* Instruction decoding failed */
+ ES_EXCEPTION, /* Instruction caused exception */
+ ES_RETRY, /* Retry instruction emulation */
+};
+
+struct es_fault_info {
+ unsigned long vector;
+ unsigned long error_code;
+ unsigned long cr2;
+};
+
+struct pt_regs;
+
+/* ES instruction emulation context */
+struct es_em_ctxt {
+ struct pt_regs *regs;
+ struct insn insn;
+ struct es_fault_info fi;
+};
+
+void do_vc_no_ghcb(struct pt_regs *regs, unsigned long exit_code);
+
+static inline u64 lower_bits(u64 val, unsigned int bits)
+{
+ u64 mask = (1ULL << bits) - 1;
+
+ return (val & mask);
+}
+
+struct real_mode_header;
+enum stack_type;
+
+/* Early IDT entry points for #VC handler */
+extern void vc_no_ghcb(void);
+extern void vc_boot_ghcb(void);
+extern bool handle_vc_boot_ghcb(struct pt_regs *regs);
+
+#ifdef CONFIG_AMD_MEM_ENCRYPT
+extern struct static_key_false sev_es_enable_key;
+extern void __sev_es_ist_enter(struct pt_regs *regs);
+extern void __sev_es_ist_exit(void);
+static __always_inline void sev_es_ist_enter(struct pt_regs *regs)
+{
+ if (static_branch_unlikely(&sev_es_enable_key))
+ __sev_es_ist_enter(regs);
+}
+static __always_inline void sev_es_ist_exit(void)
+{
+ if (static_branch_unlikely(&sev_es_enable_key))
+ __sev_es_ist_exit();
+}
+extern int sev_es_setup_ap_jump_table(struct real_mode_header *rmh);
+extern void __sev_es_nmi_complete(void);
+static __always_inline void sev_es_nmi_complete(void)
+{
+ if (static_branch_unlikely(&sev_es_enable_key))
+ __sev_es_nmi_complete();
+}
+extern int __init sev_es_efi_map_ghcbs(pgd_t *pgd);
+#else
+static inline void sev_es_ist_enter(struct pt_regs *regs) { }
+static inline void sev_es_ist_exit(void) { }
+static inline int sev_es_setup_ap_jump_table(struct real_mode_header *rmh) { return 0; }
+static inline void sev_es_nmi_complete(void) { }
+static inline int sev_es_efi_map_ghcbs(pgd_t *pgd) { return 0; }
+#endif
+
+#endif
diff --git a/arch/x86/include/asm/special_insns.h b/arch/x86/include/asm/special_insns.h
index 59a3e13204c3..cc177b4431ae 100644
--- a/arch/x86/include/asm/special_insns.h
+++ b/arch/x86/include/asm/special_insns.h
@@ -11,45 +11,47 @@
#include <linux/jump_label.h>
/*
- * Volatile isn't enough to prevent the compiler from reordering the
- * read/write functions for the control registers and messing everything up.
- * A memory clobber would solve the problem, but would prevent reordering of
- * all loads stores around it, which can hurt performance. Solution is to
- * use a variable and mimic reads and writes to it to enforce serialization
+ * The compiler should not reorder volatile asm statements with respect to each
+ * other: they should execute in program order. However GCC 4.9.x and 5.x have
+ * a bug (which was fixed in 8.1, 7.3 and 6.5) where they might reorder
+ * volatile asm. The write functions are not affected since they have memory
+ * clobbers preventing reordering. To prevent reads from being reordered with
+ * respect to writes, use a dummy memory operand.
*/
-extern unsigned long __force_order;
+
+#define __FORCE_ORDER "m"(*(unsigned int *)0x1000UL)
void native_write_cr0(unsigned long val);
static inline unsigned long native_read_cr0(void)
{
unsigned long val;
- asm volatile("mov %%cr0,%0\n\t" : "=r" (val), "=m" (__force_order));
+ asm volatile("mov %%cr0,%0\n\t" : "=r" (val) : __FORCE_ORDER);
return val;
}
static __always_inline unsigned long native_read_cr2(void)
{
unsigned long val;
- asm volatile("mov %%cr2,%0\n\t" : "=r" (val), "=m" (__force_order));
+ asm volatile("mov %%cr2,%0\n\t" : "=r" (val) : __FORCE_ORDER);
return val;
}
static __always_inline void native_write_cr2(unsigned long val)
{
- asm volatile("mov %0,%%cr2": : "r" (val), "m" (__force_order));
+ asm volatile("mov %0,%%cr2": : "r" (val) : "memory");
}
static inline unsigned long __native_read_cr3(void)
{
unsigned long val;
- asm volatile("mov %%cr3,%0\n\t" : "=r" (val), "=m" (__force_order));
+ asm volatile("mov %%cr3,%0\n\t" : "=r" (val) : __FORCE_ORDER);
return val;
}
static inline void native_write_cr3(unsigned long val)
{
- asm volatile("mov %0,%%cr3": : "r" (val), "m" (__force_order));
+ asm volatile("mov %0,%%cr3": : "r" (val) : "memory");
}
static inline unsigned long native_read_cr4(void)
@@ -64,10 +66,10 @@ static inline unsigned long native_read_cr4(void)
asm volatile("1: mov %%cr4, %0\n"
"2:\n"
_ASM_EXTABLE(1b, 2b)
- : "=r" (val), "=m" (__force_order) : "0" (0));
+ : "=r" (val) : "0" (0), __FORCE_ORDER);
#else
/* CR4 always exists on x86_64. */
- asm volatile("mov %%cr4,%0\n\t" : "=r" (val), "=m" (__force_order));
+ asm volatile("mov %%cr4,%0\n\t" : "=r" (val) : __FORCE_ORDER);
#endif
return val;
}
@@ -234,6 +236,76 @@ static inline void clwb(volatile void *__p)
#define nop() asm volatile ("nop")
+static inline void serialize(void)
+{
+ /* Instruction opcode for SERIALIZE; supported in binutils >= 2.35. */
+ asm volatile(".byte 0xf, 0x1, 0xe8" ::: "memory");
+}
+
+/* The dst parameter must be 64-bytes aligned */
+static inline void movdir64b(void *dst, const void *src)
+{
+ const struct { char _[64]; } *__src = src;
+ struct { char _[64]; } *__dst = dst;
+
+ /*
+ * MOVDIR64B %(rdx), rax.
+ *
+ * Both __src and __dst must be memory constraints in order to tell the
+ * compiler that no other memory accesses should be reordered around
+ * this one.
+ *
+ * Also, both must be supplied as lvalues because this tells
+ * the compiler what the object is (its size) the instruction accesses.
+ * I.e., not the pointers but what they point to, thus the deref'ing '*'.
+ */
+ asm volatile(".byte 0x66, 0x0f, 0x38, 0xf8, 0x02"
+ : "+m" (*__dst)
+ : "m" (*__src), "a" (__dst), "d" (__src));
+}
+
+/**
+ * enqcmds - Enqueue a command in supervisor (CPL0) mode
+ * @dst: destination, in MMIO space (must be 512-bit aligned)
+ * @src: 512 bits memory operand
+ *
+ * The ENQCMDS instruction allows software to write a 512-bit command to
+ * a 512-bit-aligned special MMIO region that supports the instruction.
+ * A return status is loaded into the ZF flag in the RFLAGS register.
+ * ZF = 0 equates to success, and ZF = 1 indicates retry or error.
+ *
+ * This function issues the ENQCMDS instruction to submit data from
+ * kernel space to MMIO space, in a unit of 512 bits. Order of data access
+ * is not guaranteed, nor is a memory barrier performed afterwards. It
+ * returns 0 on success and -EAGAIN on failure.
+ *
+ * Warning: Do not use this helper unless your driver has checked that the
+ * ENQCMDS instruction is supported on the platform and the device accepts
+ * ENQCMDS.
+ */
+static inline int enqcmds(void __iomem *dst, const void *src)
+{
+ const struct { char _[64]; } *__src = src;
+ struct { char _[64]; } *__dst = dst;
+ int zf;
+
+ /*
+ * ENQCMDS %(rdx), rax
+ *
+ * See movdir64b()'s comment on operand specification.
+ */
+ asm volatile(".byte 0xf3, 0x0f, 0x38, 0xf8, 0x02, 0x66, 0x90"
+ CC_SET(z)
+ : CC_OUT(z) (zf), "+m" (*__dst)
+ : "m" (*__src), "a" (__dst), "d" (__src));
+
+ /* Submission failure is indicated via EFLAGS.ZF=1 */
+ if (zf)
+ return -EAGAIN;
+
+ return 0;
+}
+
#endif /* __KERNEL__ */
#endif /* _ASM_X86_SPECIAL_INSNS_H */
diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h
index 5ae5a68e469d..49600643faba 100644
--- a/arch/x86/include/asm/stacktrace.h
+++ b/arch/x86/include/asm/stacktrace.h
@@ -35,6 +35,8 @@ bool in_entry_stack(unsigned long *stack, struct stack_info *info);
int get_stack_info(unsigned long *stack, struct task_struct *task,
struct stack_info *info, unsigned long *visit_mask);
+bool get_stack_info_noinstr(unsigned long *stack, struct task_struct *task,
+ struct stack_info *info);
const char *stack_type_name(enum stack_type type);
diff --git a/arch/x86/include/asm/static_call.h b/arch/x86/include/asm/static_call.h
new file mode 100644
index 000000000000..c37f11999d0c
--- /dev/null
+++ b/arch/x86/include/asm/static_call.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_STATIC_CALL_H
+#define _ASM_STATIC_CALL_H
+
+#include <asm/text-patching.h>
+
+/*
+ * For CONFIG_HAVE_STATIC_CALL_INLINE, this is a temporary trampoline which
+ * uses the current value of the key->func pointer to do an indirect jump to
+ * the function. This trampoline is only used during boot, before the call
+ * sites get patched by static_call_update(). The name of this trampoline has
+ * a magical aspect: objtool uses it to find static call sites so it can create
+ * the .static_call_sites section.
+ *
+ * For CONFIG_HAVE_STATIC_CALL, this is a permanent trampoline which
+ * does a direct jump to the function. The direct jump gets patched by
+ * static_call_update().
+ *
+ * Having the trampoline in a special section forces GCC to emit a JMP.d32 when
+ * it does tail-call optimization on the call; since you cannot compute the
+ * relative displacement across sections.
+ */
+
+#define __ARCH_DEFINE_STATIC_CALL_TRAMP(name, insns) \
+ asm(".pushsection .static_call.text, \"ax\" \n" \
+ ".align 4 \n" \
+ ".globl " STATIC_CALL_TRAMP_STR(name) " \n" \
+ STATIC_CALL_TRAMP_STR(name) ": \n" \
+ insns " \n" \
+ ".type " STATIC_CALL_TRAMP_STR(name) ", @function \n" \
+ ".size " STATIC_CALL_TRAMP_STR(name) ", . - " STATIC_CALL_TRAMP_STR(name) " \n" \
+ ".popsection \n")
+
+#define ARCH_DEFINE_STATIC_CALL_TRAMP(name, func) \
+ __ARCH_DEFINE_STATIC_CALL_TRAMP(name, ".byte 0xe9; .long " #func " - (. + 4)")
+
+#define ARCH_DEFINE_STATIC_CALL_NULL_TRAMP(name) \
+ __ARCH_DEFINE_STATIC_CALL_TRAMP(name, "ret; nop; nop; nop; nop")
+
+#endif /* _ASM_STATIC_CALL_H */
diff --git a/arch/x86/include/asm/string_64.h b/arch/x86/include/asm/string_64.h
index 75314c3dbe47..6e450827f677 100644
--- a/arch/x86/include/asm/string_64.h
+++ b/arch/x86/include/asm/string_64.h
@@ -82,38 +82,6 @@ int strcmp(const char *cs, const char *ct);
#endif
-#define __HAVE_ARCH_MEMCPY_MCSAFE 1
-__must_check unsigned long __memcpy_mcsafe(void *dst, const void *src,
- size_t cnt);
-DECLARE_STATIC_KEY_FALSE(mcsafe_key);
-
-/**
- * memcpy_mcsafe - copy memory with indication if a machine check happened
- *
- * @dst: destination address
- * @src: source address
- * @cnt: number of bytes to copy
- *
- * Low level memory copy function that catches machine checks
- * We only call into the "safe" function on systems that can
- * actually do machine check recovery. Everyone else can just
- * use memcpy().
- *
- * Return 0 for success, or number of bytes not copied if there was an
- * exception.
- */
-static __always_inline __must_check unsigned long
-memcpy_mcsafe(void *dst, const void *src, size_t cnt)
-{
-#ifdef CONFIG_X86_MCE
- if (static_branch_unlikely(&mcsafe_key))
- return __memcpy_mcsafe(dst, src, cnt);
- else
-#endif
- memcpy(dst, src, cnt);
- return 0;
-}
-
#ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
#define __HAVE_ARCH_MEMCPY_FLUSHCACHE 1
void __memcpy_flushcache(void *dst, const void *src, size_t cnt);
diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h
index 8a1f5382a4ea..cf13f9e78585 100644
--- a/arch/x86/include/asm/svm.h
+++ b/arch/x86/include/asm/svm.h
@@ -150,14 +150,14 @@ struct __attribute__ ((__packed__)) vmcb_control_area {
#define SVM_NESTED_CTL_NP_ENABLE BIT(0)
#define SVM_NESTED_CTL_SEV_ENABLE BIT(1)
-struct __attribute__ ((__packed__)) vmcb_seg {
+struct vmcb_seg {
u16 selector;
u16 attrib;
u32 limit;
u64 base;
-};
+} __packed;
-struct __attribute__ ((__packed__)) vmcb_save_area {
+struct vmcb_save_area {
struct vmcb_seg es;
struct vmcb_seg cs;
struct vmcb_seg ss;
@@ -200,20 +200,67 @@ struct __attribute__ ((__packed__)) vmcb_save_area {
u64 br_to;
u64 last_excp_from;
u64 last_excp_to;
-};
+ /*
+ * The following part of the save area is valid only for
+ * SEV-ES guests when referenced through the GHCB.
+ */
+ u8 reserved_7[104];
+ u64 reserved_8; /* rax already available at 0x01f8 */
+ u64 rcx;
+ u64 rdx;
+ u64 rbx;
+ u64 reserved_9; /* rsp already available at 0x01d8 */
+ u64 rbp;
+ u64 rsi;
+ u64 rdi;
+ u64 r8;
+ u64 r9;
+ u64 r10;
+ u64 r11;
+ u64 r12;
+ u64 r13;
+ u64 r14;
+ u64 r15;
+ u8 reserved_10[16];
+ u64 sw_exit_code;
+ u64 sw_exit_info_1;
+ u64 sw_exit_info_2;
+ u64 sw_scratch;
+ u8 reserved_11[56];
+ u64 xcr0;
+ u8 valid_bitmap[16];
+ u64 x87_state_gpa;
+} __packed;
+
+struct ghcb {
+ struct vmcb_save_area save;
+ u8 reserved_save[2048 - sizeof(struct vmcb_save_area)];
+
+ u8 shared_buffer[2032];
+
+ u8 reserved_1[10];
+ u16 protocol_version; /* negotiated SEV-ES/GHCB protocol version */
+ u32 ghcb_usage;
+} __packed;
+
+
+#define EXPECTED_VMCB_SAVE_AREA_SIZE 1032
+#define EXPECTED_VMCB_CONTROL_AREA_SIZE 256
+#define EXPECTED_GHCB_SIZE PAGE_SIZE
static inline void __unused_size_checks(void)
{
- BUILD_BUG_ON(sizeof(struct vmcb_save_area) != 0x298);
- BUILD_BUG_ON(sizeof(struct vmcb_control_area) != 256);
+ BUILD_BUG_ON(sizeof(struct vmcb_save_area) != EXPECTED_VMCB_SAVE_AREA_SIZE);
+ BUILD_BUG_ON(sizeof(struct vmcb_control_area) != EXPECTED_VMCB_CONTROL_AREA_SIZE);
+ BUILD_BUG_ON(sizeof(struct ghcb) != EXPECTED_GHCB_SIZE);
}
-struct __attribute__ ((__packed__)) vmcb {
+struct vmcb {
struct vmcb_control_area control;
u8 reserved_control[1024 - sizeof(struct vmcb_control_area)];
struct vmcb_save_area save;
-};
+} __packed;
#define SVM_CPUID_FUNC 0x8000000a
@@ -298,4 +345,47 @@ struct __attribute__ ((__packed__)) vmcb {
#define SVM_CR0_SELECTIVE_MASK (X86_CR0_TS | X86_CR0_MP)
+/* GHCB Accessor functions */
+
+#define GHCB_BITMAP_IDX(field) \
+ (offsetof(struct vmcb_save_area, field) / sizeof(u64))
+
+#define DEFINE_GHCB_ACCESSORS(field) \
+ static inline bool ghcb_##field##_is_valid(const struct ghcb *ghcb) \
+ { \
+ return test_bit(GHCB_BITMAP_IDX(field), \
+ (unsigned long *)&ghcb->save.valid_bitmap); \
+ } \
+ \
+ static inline void ghcb_set_##field(struct ghcb *ghcb, u64 value) \
+ { \
+ __set_bit(GHCB_BITMAP_IDX(field), \
+ (unsigned long *)&ghcb->save.valid_bitmap); \
+ ghcb->save.field = value; \
+ }
+
+DEFINE_GHCB_ACCESSORS(cpl)
+DEFINE_GHCB_ACCESSORS(rip)
+DEFINE_GHCB_ACCESSORS(rsp)
+DEFINE_GHCB_ACCESSORS(rax)
+DEFINE_GHCB_ACCESSORS(rcx)
+DEFINE_GHCB_ACCESSORS(rdx)
+DEFINE_GHCB_ACCESSORS(rbx)
+DEFINE_GHCB_ACCESSORS(rbp)
+DEFINE_GHCB_ACCESSORS(rsi)
+DEFINE_GHCB_ACCESSORS(rdi)
+DEFINE_GHCB_ACCESSORS(r8)
+DEFINE_GHCB_ACCESSORS(r9)
+DEFINE_GHCB_ACCESSORS(r10)
+DEFINE_GHCB_ACCESSORS(r11)
+DEFINE_GHCB_ACCESSORS(r12)
+DEFINE_GHCB_ACCESSORS(r13)
+DEFINE_GHCB_ACCESSORS(r14)
+DEFINE_GHCB_ACCESSORS(r15)
+DEFINE_GHCB_ACCESSORS(sw_exit_code)
+DEFINE_GHCB_ACCESSORS(sw_exit_info_1)
+DEFINE_GHCB_ACCESSORS(sw_exit_info_2)
+DEFINE_GHCB_ACCESSORS(sw_scratch)
+DEFINE_GHCB_ACCESSORS(xcr0)
+
#endif
diff --git a/arch/x86/include/asm/sync_core.h b/arch/x86/include/asm/sync_core.h
index fdb5b356e59b..0fd4a9dfb29c 100644
--- a/arch/x86/include/asm/sync_core.h
+++ b/arch/x86/include/asm/sync_core.h
@@ -5,6 +5,7 @@
#include <linux/preempt.h>
#include <asm/processor.h>
#include <asm/cpufeature.h>
+#include <asm/special_insns.h>
#ifdef CONFIG_X86_32
static inline void iret_to_self(void)
@@ -46,22 +47,34 @@ static inline void iret_to_self(void)
*
* b) Text was modified on a different CPU, may subsequently be
* executed on this CPU, and you want to make sure the new version
- * gets executed. This generally means you're calling this in a IPI.
+ * gets executed. This generally means you're calling this in an IPI.
*
* If you're calling this for a different reason, you're probably doing
* it wrong.
+ *
+ * Like all of Linux's memory ordering operations, this is a
+ * compiler barrier as well.
*/
static inline void sync_core(void)
{
/*
- * There are quite a few ways to do this. IRET-to-self is nice
- * because it works on every CPU, at any CPL (so it's compatible
- * with paravirtualization), and it never exits to a hypervisor.
- * The only down sides are that it's a bit slow (it seems to be
- * a bit more than 2x slower than the fastest options) and that
- * it unmasks NMIs. The "push %cs" is needed because, in
- * paravirtual environments, __KERNEL_CS may not be a valid CS
- * value when we do IRET directly.
+ * The SERIALIZE instruction is the most straightforward way to
+ * do this, but it is not universally available.
+ */
+ if (static_cpu_has(X86_FEATURE_SERIALIZE)) {
+ serialize();
+ return;
+ }
+
+ /*
+ * For all other processors, there are quite a few ways to do this.
+ * IRET-to-self is nice because it works on every CPU, at any CPL
+ * (so it's compatible with paravirtualization), and it never exits
+ * to a hypervisor. The only downsides are that it's a bit slow
+ * (it seems to be a bit more than 2x slower than the fastest
+ * options) and that it unmasks NMIs. The "push %cs" is needed,
+ * because in paravirtual environments __KERNEL_CS may not be a
+ * valid CS value when we do IRET directly.
*
* In case NMI unmasking or performance ever becomes a problem,
* the next best option appears to be MOV-to-CR2 and an
@@ -71,9 +84,6 @@ static inline void sync_core(void)
* CPUID is the conventional way, but it's nasty: it doesn't
* exist on some 486-like CPUs, and it usually exits to a
* hypervisor.
- *
- * Like all of Linux's memory ordering operations, this is a
- * compiler barrier as well.
*/
iret_to_self();
}
diff --git a/arch/x86/include/asm/text-patching.h b/arch/x86/include/asm/text-patching.h
index 6593b42cb379..b7421780e4e9 100644
--- a/arch/x86/include/asm/text-patching.h
+++ b/arch/x86/include/asm/text-patching.h
@@ -53,6 +53,9 @@ extern void text_poke_finish(void);
#define INT3_INSN_SIZE 1
#define INT3_INSN_OPCODE 0xCC
+#define RET_INSN_SIZE 1
+#define RET_INSN_OPCODE 0xC3
+
#define CALL_INSN_SIZE 5
#define CALL_INSN_OPCODE 0xE8
@@ -73,6 +76,7 @@ static __always_inline int text_opcode_size(u8 opcode)
switch(opcode) {
__CASE(INT3);
+ __CASE(RET);
__CASE(CALL);
__CASE(JMP32);
__CASE(JMP8);
@@ -141,11 +145,26 @@ void int3_emulate_push(struct pt_regs *regs, unsigned long val)
}
static __always_inline
+unsigned long int3_emulate_pop(struct pt_regs *regs)
+{
+ unsigned long val = *(unsigned long *)regs->sp;
+ regs->sp += sizeof(unsigned long);
+ return val;
+}
+
+static __always_inline
void int3_emulate_call(struct pt_regs *regs, unsigned long func)
{
int3_emulate_push(regs, regs->ip - INT3_INSN_SIZE + CALL_INSN_SIZE);
int3_emulate_jmp(regs, func);
}
+
+static __always_inline
+void int3_emulate_ret(struct pt_regs *regs)
+{
+ unsigned long ip = int3_emulate_pop(regs);
+ int3_emulate_jmp(regs, ip);
+}
#endif /* !CONFIG_UML_X86 */
#endif /* _ASM_X86_TEXT_PATCHING_H */
diff --git a/arch/x86/include/asm/trap_pf.h b/arch/x86/include/asm/trap_pf.h
new file mode 100644
index 000000000000..305bc1214aef
--- /dev/null
+++ b/arch/x86/include/asm/trap_pf.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_X86_TRAP_PF_H
+#define _ASM_X86_TRAP_PF_H
+
+/*
+ * Page fault error code bits:
+ *
+ * bit 0 == 0: no page found 1: protection fault
+ * bit 1 == 0: read access 1: write access
+ * bit 2 == 0: kernel-mode access 1: user-mode access
+ * bit 3 == 1: use of reserved bit detected
+ * bit 4 == 1: fault was an instruction fetch
+ * bit 5 == 1: protection keys block access
+ */
+enum x86_pf_error_code {
+ X86_PF_PROT = 1 << 0,
+ X86_PF_WRITE = 1 << 1,
+ X86_PF_USER = 1 << 2,
+ X86_PF_RSVD = 1 << 3,
+ X86_PF_INSTR = 1 << 4,
+ X86_PF_PK = 1 << 5,
+};
+
+#endif /* _ASM_X86_TRAP_PF_H */
diff --git a/arch/x86/include/asm/trapnr.h b/arch/x86/include/asm/trapnr.h
index 082f45631fa9..f5d2325aa0b7 100644
--- a/arch/x86/include/asm/trapnr.h
+++ b/arch/x86/include/asm/trapnr.h
@@ -26,6 +26,7 @@
#define X86_TRAP_XF 19 /* SIMD Floating-Point Exception */
#define X86_TRAP_VE 20 /* Virtualization Exception */
#define X86_TRAP_CP 21 /* Control Protection Exception */
+#define X86_TRAP_VC 29 /* VMM Communication Exception */
#define X86_TRAP_IRET 32 /* IRET Exception */
#endif
diff --git a/arch/x86/include/asm/traps.h b/arch/x86/include/asm/traps.h
index 714b1a30e7b0..7f7200021bd1 100644
--- a/arch/x86/include/asm/traps.h
+++ b/arch/x86/include/asm/traps.h
@@ -8,12 +8,14 @@
#include <asm/debugreg.h>
#include <asm/idtentry.h>
#include <asm/siginfo.h> /* TRAP_TRACE, ... */
+#include <asm/trap_pf.h>
#ifdef CONFIG_X86_64
asmlinkage __visible notrace struct pt_regs *sync_regs(struct pt_regs *eregs);
asmlinkage __visible notrace
struct bad_iret_stack *fixup_bad_iret(struct bad_iret_stack *s);
void __init trap_init(void);
+asmlinkage __visible noinstr struct pt_regs *vc_switch_off_ist(struct pt_regs *eregs);
#endif
#ifdef CONFIG_X86_F00F_BUG
@@ -35,28 +37,12 @@ extern int panic_on_unrecovered_nmi;
void math_emulate(struct math_emu_info *);
+bool fault_in_kernel_space(unsigned long address);
+
#ifdef CONFIG_VMAP_STACK
void __noreturn handle_stack_overflow(const char *message,
struct pt_regs *regs,
unsigned long fault_address);
#endif
-/*
- * Page fault error code bits:
- *
- * bit 0 == 0: no page found 1: protection fault
- * bit 1 == 0: read access 1: write access
- * bit 2 == 0: kernel-mode access 1: user-mode access
- * bit 3 == 1: use of reserved bit detected
- * bit 4 == 1: fault was an instruction fetch
- * bit 5 == 1: protection keys block access
- */
-enum x86_pf_error_code {
- X86_PF_PROT = 1 << 0,
- X86_PF_WRITE = 1 << 1,
- X86_PF_USER = 1 << 2,
- X86_PF_RSVD = 1 << 3,
- X86_PF_INSTR = 1 << 4,
- X86_PF_PK = 1 << 5,
-};
#endif /* _ASM_X86_TRAPS_H */
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index ecefaffd15d4..477c503f2753 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -96,25 +96,14 @@ static inline bool pagefault_disabled(void);
likely(!__range_not_ok(addr, size, user_addr_max())); \
})
-/*
- * These are the main single-value transfer routines. They automatically
- * use the right size if we just have the right pointer type.
- *
- * This gets kind of ugly. We want to return _two_ values in "get_user()"
- * and yet we don't want to do any pointers, because that is too much
- * of a performance impact. Thus we have a few rather ugly macros here,
- * and hide all the ugliness from the user.
- *
- * The "__xxx" versions of the user access functions are versions that
- * do not verify the address space, that must have been done previously
- * with a separate "access_ok()" call (this is used when we do multiple
- * accesses to the same area of user memory).
- */
-
extern int __get_user_1(void);
extern int __get_user_2(void);
extern int __get_user_4(void);
extern int __get_user_8(void);
+extern int __get_user_nocheck_1(void);
+extern int __get_user_nocheck_2(void);
+extern int __get_user_nocheck_4(void);
+extern int __get_user_nocheck_8(void);
extern int __get_user_bad(void);
#define __uaccess_begin() stac()
@@ -138,25 +127,12 @@ extern int __get_user_bad(void);
#define __typefits(x,type,not) \
__builtin_choose_expr(sizeof(x)<=sizeof(type),(unsigned type)0,not)
-/**
- * get_user - Get a simple variable from user space.
- * @x: Variable to store result.
- * @ptr: Source address, in user space.
- *
- * Context: User context only. This function may sleep if pagefaults are
- * enabled.
- *
- * This macro copies a single simple variable from user space to kernel
- * space. It supports simple types like char and int, but not larger
- * data types like structures or arrays.
- *
- * @ptr must have pointer-to-simple-variable type, and the result of
- * dereferencing @ptr must be assignable to @x without a cast.
- *
- * Return: zero on success, or -EFAULT on error.
- * On error, the variable @x is set to zero.
- */
/*
+ * This is used for both get_user() and __get_user() to expand to
+ * the proper special function call that has odd calling conventions
+ * due to returning both a value and an error, and that depends on
+ * the size of the pointer passed in.
+ *
* Careful: we have to cast the result to the type of the pointer
* for sign reasons.
*
@@ -169,13 +145,12 @@ extern int __get_user_bad(void);
* Clang/LLVM cares about the size of the register, but still wants
* the base register for something that ends up being a pair.
*/
-#define get_user(x, ptr) \
+#define do_get_user_call(fn,x,ptr) \
({ \
int __ret_gu; \
register __inttype(*(ptr)) __val_gu asm("%"_ASM_DX); \
__chk_user_ptr(ptr); \
- might_fault(); \
- asm volatile("call __get_user_%P4" \
+ asm volatile("call __" #fn "_%P4" \
: "=a" (__ret_gu), "=r" (__val_gu), \
ASM_CALL_CONSTRAINT \
: "0" (ptr), "i" (sizeof(*(ptr)))); \
@@ -183,10 +158,48 @@ extern int __get_user_bad(void);
__builtin_expect(__ret_gu, 0); \
})
-#define __put_user_x(size, x, ptr, __ret_pu) \
- asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
- : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
+/**
+ * get_user - Get a simple variable from user space.
+ * @x: Variable to store result.
+ * @ptr: Source address, in user space.
+ *
+ * Context: User context only. This function may sleep if pagefaults are
+ * enabled.
+ *
+ * This macro copies a single simple variable from user space to kernel
+ * space. It supports simple types like char and int, but not larger
+ * data types like structures or arrays.
+ *
+ * @ptr must have pointer-to-simple-variable type, and the result of
+ * dereferencing @ptr must be assignable to @x without a cast.
+ *
+ * Return: zero on success, or -EFAULT on error.
+ * On error, the variable @x is set to zero.
+ */
+#define get_user(x,ptr) ({ might_fault(); do_get_user_call(get_user,x,ptr); })
+/**
+ * __get_user - Get a simple variable from user space, with less checking.
+ * @x: Variable to store result.
+ * @ptr: Source address, in user space.
+ *
+ * Context: User context only. This function may sleep if pagefaults are
+ * enabled.
+ *
+ * This macro copies a single simple variable from user space to kernel
+ * space. It supports simple types like char and int, but not larger
+ * data types like structures or arrays.
+ *
+ * @ptr must have pointer-to-simple-variable type, and the result of
+ * dereferencing @ptr must be assignable to @x without a cast.
+ *
+ * Caller must check the pointer with access_ok() before calling this
+ * function.
+ *
+ * Return: zero on success, or -EFAULT on error.
+ * On error, the variable @x is set to zero.
+ */
+#define __get_user(x,ptr) do_get_user_call(get_user_nocheck,x,ptr)
#ifdef CONFIG_X86_32
@@ -199,25 +212,41 @@ extern int __get_user_bad(void);
: : "A" (x), "r" (addr) \
: : label)
-#define __put_user_x8(x, ptr, __ret_pu) \
- asm volatile("call __put_user_8" : "=a" (__ret_pu) \
- : "A" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
#else
#define __put_user_goto_u64(x, ptr, label) \
__put_user_goto(x, ptr, "q", "er", label)
-#define __put_user_x8(x, ptr, __ret_pu) __put_user_x(8, x, ptr, __ret_pu)
#endif
extern void __put_user_bad(void);
/*
* Strange magic calling convention: pointer in %ecx,
- * value in %eax(:%edx), return value in %eax. clobbers %rbx
+ * value in %eax(:%edx), return value in %ecx. clobbers %rbx
*/
extern void __put_user_1(void);
extern void __put_user_2(void);
extern void __put_user_4(void);
extern void __put_user_8(void);
+extern void __put_user_nocheck_1(void);
+extern void __put_user_nocheck_2(void);
+extern void __put_user_nocheck_4(void);
+extern void __put_user_nocheck_8(void);
+
+#define do_put_user_call(fn,x,ptr) \
+({ \
+ int __ret_pu; \
+ register __typeof__(*(ptr)) __val_pu asm("%"_ASM_AX); \
+ __chk_user_ptr(ptr); \
+ __val_pu = (x); \
+ asm volatile("call __" #fn "_%P[size]" \
+ : "=c" (__ret_pu), \
+ ASM_CALL_CONSTRAINT \
+ : "0" (ptr), \
+ "r" (__val_pu), \
+ [size] "i" (sizeof(*(ptr))) \
+ :"ebx"); \
+ __builtin_expect(__ret_pu, 0); \
+})
/**
* put_user - Write a simple value into user space.
@@ -236,32 +265,29 @@ extern void __put_user_8(void);
*
* Return: zero on success, or -EFAULT on error.
*/
-#define put_user(x, ptr) \
-({ \
- int __ret_pu; \
- __typeof__(*(ptr)) __pu_val; \
- __chk_user_ptr(ptr); \
- might_fault(); \
- __pu_val = x; \
- switch (sizeof(*(ptr))) { \
- case 1: \
- __put_user_x(1, __pu_val, ptr, __ret_pu); \
- break; \
- case 2: \
- __put_user_x(2, __pu_val, ptr, __ret_pu); \
- break; \
- case 4: \
- __put_user_x(4, __pu_val, ptr, __ret_pu); \
- break; \
- case 8: \
- __put_user_x8(__pu_val, ptr, __ret_pu); \
- break; \
- default: \
- __put_user_x(X, __pu_val, ptr, __ret_pu); \
- break; \
- } \
- __builtin_expect(__ret_pu, 0); \
-})
+#define put_user(x, ptr) ({ might_fault(); do_put_user_call(put_user,x,ptr); })
+
+/**
+ * __put_user - Write a simple value into user space, with less checking.
+ * @x: Value to copy to user space.
+ * @ptr: Destination address, in user space.
+ *
+ * Context: User context only. This function may sleep if pagefaults are
+ * enabled.
+ *
+ * This macro copies a single simple value from kernel space to user
+ * space. It supports simple types like char and int, but not larger
+ * data types like structures or arrays.
+ *
+ * @ptr must have pointer-to-simple-variable type, and @x must be assignable
+ * to the result of dereferencing @ptr.
+ *
+ * Caller must check the pointer with access_ok() before calling this
+ * function.
+ *
+ * Return: zero on success, or -EFAULT on error.
+ */
+#define __put_user(x, ptr) do_put_user_call(put_user_nocheck,x,ptr)
#define __put_user_size(x, ptr, size, label) \
do { \
@@ -284,6 +310,55 @@ do { \
} \
} while (0)
+#ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
+
+#ifdef CONFIG_X86_32
+#define __get_user_asm_u64(x, ptr, label) do { \
+ unsigned int __gu_low, __gu_high; \
+ const unsigned int __user *__gu_ptr; \
+ __gu_ptr = (const void __user *)(ptr); \
+ __get_user_asm(__gu_low, ptr, "l", "=r", label); \
+ __get_user_asm(__gu_high, ptr+1, "l", "=r", label); \
+ (x) = ((unsigned long long)__gu_high << 32) | __gu_low; \
+} while (0)
+#else
+#define __get_user_asm_u64(x, ptr, label) \
+ __get_user_asm(x, ptr, "q", "=r", label)
+#endif
+
+#define __get_user_size(x, ptr, size, label) \
+do { \
+ __chk_user_ptr(ptr); \
+ switch (size) { \
+ unsigned char x_u8__; \
+ case 1: \
+ __get_user_asm(x_u8__, ptr, "b", "=q", label); \
+ (x) = x_u8__; \
+ break; \
+ case 2: \
+ __get_user_asm(x, ptr, "w", "=r", label); \
+ break; \
+ case 4: \
+ __get_user_asm(x, ptr, "l", "=r", label); \
+ break; \
+ case 8: \
+ __get_user_asm_u64(x, ptr, label); \
+ break; \
+ default: \
+ (x) = __get_user_bad(); \
+ } \
+} while (0)
+
+#define __get_user_asm(x, addr, itype, ltype, label) \
+ asm_volatile_goto("\n" \
+ "1: mov"itype" %[umem],%[output]\n" \
+ _ASM_EXTABLE_UA(1b, %l2) \
+ : [output] ltype(x) \
+ : [umem] "m" (__m(addr)) \
+ : : label)
+
+#else // !CONFIG_CC_HAS_ASM_GOTO_OUTPUT
+
#ifdef CONFIG_X86_32
#define __get_user_asm_u64(x, ptr, retval) \
({ \
@@ -343,7 +418,7 @@ do { \
"2:\n" \
".section .fixup,\"ax\"\n" \
"3: mov %[efault],%[errout]\n" \
- " xor"itype" %[output],%[output]\n" \
+ " xorl %k[output],%k[output]\n" \
" jmp 2b\n" \
".previous\n" \
_ASM_EXTABLE_UA(1b, 3b) \
@@ -352,33 +427,7 @@ do { \
: [umem] "m" (__m(addr)), \
[efault] "i" (-EFAULT), "0" (err))
-#define __put_user_nocheck(x, ptr, size) \
-({ \
- __label__ __pu_label; \
- int __pu_err = -EFAULT; \
- __typeof__(*(ptr)) __pu_val = (x); \
- __typeof__(ptr) __pu_ptr = (ptr); \
- __typeof__(size) __pu_size = (size); \
- __uaccess_begin(); \
- __put_user_size(__pu_val, __pu_ptr, __pu_size, __pu_label); \
- __pu_err = 0; \
-__pu_label: \
- __uaccess_end(); \
- __builtin_expect(__pu_err, 0); \
-})
-
-#define __get_user_nocheck(x, ptr, size) \
-({ \
- int __gu_err; \
- __inttype(*(ptr)) __gu_val; \
- __typeof__(ptr) __gu_ptr = (ptr); \
- __typeof__(size) __gu_size = (size); \
- __uaccess_begin_nospec(); \
- __get_user_size(__gu_val, __gu_ptr, __gu_size, __gu_err); \
- __uaccess_end(); \
- (x) = (__force __typeof__(*(ptr)))__gu_val; \
- __builtin_expect(__gu_err, 0); \
-})
+#endif // CONFIG_CC_ASM_GOTO_OUTPUT
/* FIXME: this hack is definitely wrong -AK */
struct __large_struct { unsigned long buf[100]; };
@@ -396,55 +445,6 @@ struct __large_struct { unsigned long buf[100]; };
: : ltype(x), "m" (__m(addr)) \
: : label)
-/**
- * __get_user - Get a simple variable from user space, with less checking.
- * @x: Variable to store result.
- * @ptr: Source address, in user space.
- *
- * Context: User context only. This function may sleep if pagefaults are
- * enabled.
- *
- * This macro copies a single simple variable from user space to kernel
- * space. It supports simple types like char and int, but not larger
- * data types like structures or arrays.
- *
- * @ptr must have pointer-to-simple-variable type, and the result of
- * dereferencing @ptr must be assignable to @x without a cast.
- *
- * Caller must check the pointer with access_ok() before calling this
- * function.
- *
- * Return: zero on success, or -EFAULT on error.
- * On error, the variable @x is set to zero.
- */
-
-#define __get_user(x, ptr) \
- __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
-
-/**
- * __put_user - Write a simple value into user space, with less checking.
- * @x: Value to copy to user space.
- * @ptr: Destination address, in user space.
- *
- * Context: User context only. This function may sleep if pagefaults are
- * enabled.
- *
- * This macro copies a single simple value from kernel space to user
- * space. It supports simple types like char and int, but not larger
- * data types like structures or arrays.
- *
- * @ptr must have pointer-to-simple-variable type, and @x must be assignable
- * to the result of dereferencing @ptr.
- *
- * Caller must check the pointer with access_ok() before calling this
- * function.
- *
- * Return: zero on success, or -EFAULT on error.
- */
-
-#define __put_user(x, ptr) \
- __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
-
extern unsigned long
copy_from_user_nmi(void *to, const void __user *from, unsigned long n);
extern __must_check long
@@ -455,6 +455,15 @@ extern __must_check long strnlen_user(const char __user *str, long n);
unsigned long __must_check clear_user(void __user *mem, unsigned long len);
unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
+#ifdef CONFIG_ARCH_HAS_COPY_MC
+unsigned long __must_check
+copy_mc_to_kernel(void *to, const void *from, unsigned len);
+#define copy_mc_to_kernel copy_mc_to_kernel
+
+unsigned long __must_check
+copy_mc_to_user(void *to, const void *from, unsigned len);
+#endif
+
/*
* movsl can be slow when source and dest are not both 8-byte aligned
*/
@@ -494,6 +503,14 @@ static __must_check __always_inline bool user_access_begin(const void __user *pt
#define unsafe_put_user(x, ptr, label) \
__put_user_size((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)), label)
+#ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
+#define unsafe_get_user(x, ptr, err_label) \
+do { \
+ __inttype(*(ptr)) __gu_val; \
+ __get_user_size(__gu_val, (ptr), sizeof(*(ptr)), err_label); \
+ (x) = (__force __typeof__(*(ptr)))__gu_val; \
+} while (0)
+#else // !CONFIG_CC_HAS_ASM_GOTO_OUTPUT
#define unsafe_get_user(x, ptr, err_label) \
do { \
int __gu_err; \
@@ -502,6 +519,7 @@ do { \
(x) = (__force __typeof__(*(ptr)))__gu_val; \
if (unlikely(__gu_err)) goto err_label; \
} while (0)
+#endif // CONFIG_CC_HAS_ASM_GOTO_OUTPUT
/*
* We want the unsafe accessors to always be inlined and use
@@ -528,6 +546,11 @@ do { \
#define HAVE_GET_KERNEL_NOFAULT
+#ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
+#define __get_kernel_nofault(dst, src, type, err_label) \
+ __get_user_size(*((type *)(dst)), (__force type __user *)(src), \
+ sizeof(type), err_label)
+#else // !CONFIG_CC_HAS_ASM_GOTO_OUTPUT
#define __get_kernel_nofault(dst, src, type, err_label) \
do { \
int __kr_err; \
@@ -537,6 +560,7 @@ do { \
if (unlikely(__kr_err)) \
goto err_label; \
} while (0)
+#endif // CONFIG_CC_HAS_ASM_GOTO_OUTPUT
#define __put_kernel_nofault(dst, src, type, err_label) \
__put_user_size(*((type *)(src)), (__force type __user *)(dst), \
diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
index bc10e3dc64fe..e7265a552f4f 100644
--- a/arch/x86/include/asm/uaccess_64.h
+++ b/arch/x86/include/asm/uaccess_64.h
@@ -47,22 +47,6 @@ copy_user_generic(void *to, const void *from, unsigned len)
}
static __always_inline __must_check unsigned long
-copy_to_user_mcsafe(void *to, const void *from, unsigned len)
-{
- unsigned long ret;
-
- __uaccess_begin();
- /*
- * Note, __memcpy_mcsafe() is explicitly used since it can
- * handle exceptions / faults. memcpy_mcsafe() may fall back to
- * memcpy() which lacks this handling.
- */
- ret = __memcpy_mcsafe(to, from, len);
- __uaccess_end();
- return ret;
-}
-
-static __always_inline __must_check unsigned long
raw_copy_from_user(void *dst, const void __user *src, unsigned long size)
{
return copy_user_generic(dst, (__force void *)src, size);
@@ -102,8 +86,4 @@ __copy_from_user_flushcache(void *dst, const void __user *src, unsigned size)
kasan_check_write(dst, size);
return __copy_user_flushcache(dst, src, size);
}
-
-unsigned long
-mcsafe_handle_tail(char *to, char *from, unsigned len);
-
#endif /* _ASM_X86_UACCESS_64_H */
diff --git a/arch/x86/include/asm/unwind_hints.h b/arch/x86/include/asm/unwind_hints.h
index 7d903fdb3f43..664d4610d700 100644
--- a/arch/x86/include/asm/unwind_hints.h
+++ b/arch/x86/include/asm/unwind_hints.h
@@ -1,51 +1,17 @@
#ifndef _ASM_X86_UNWIND_HINTS_H
#define _ASM_X86_UNWIND_HINTS_H
+#include <linux/objtool.h>
+
#include "orc_types.h"
#ifdef __ASSEMBLY__
-/*
- * In asm, there are two kinds of code: normal C-type callable functions and
- * the rest. The normal callable functions can be called by other code, and
- * don't do anything unusual with the stack. Such normal callable functions
- * are annotated with the ENTRY/ENDPROC macros. Most asm code falls in this
- * category. In this case, no special debugging annotations are needed because
- * objtool can automatically generate the ORC data for the ORC unwinder to read
- * at runtime.
- *
- * Anything which doesn't fall into the above category, such as syscall and
- * interrupt handlers, tends to not be called directly by other functions, and
- * often does unusual non-C-function-type things with the stack pointer. Such
- * code needs to be annotated such that objtool can understand it. The
- * following CFI hint macros are for this type of code.
- *
- * These macros provide hints to objtool about the state of the stack at each
- * instruction. Objtool starts from the hints and follows the code flow,
- * making automatic CFI adjustments when it sees pushes and pops, filling out
- * the debuginfo as necessary. It will also warn if it sees any
- * inconsistencies.
- */
-.macro UNWIND_HINT sp_reg=ORC_REG_SP sp_offset=0 type=ORC_TYPE_CALL end=0
-#ifdef CONFIG_STACK_VALIDATION
-.Lunwind_hint_ip_\@:
- .pushsection .discard.unwind_hints
- /* struct unwind_hint */
- .long .Lunwind_hint_ip_\@ - .
- .short \sp_offset
- .byte \sp_reg
- .byte \type
- .byte \end
- .balign 4
- .popsection
-#endif
-.endm
-
.macro UNWIND_HINT_EMPTY
- UNWIND_HINT sp_reg=ORC_REG_UNDEFINED end=1
+ UNWIND_HINT sp_reg=ORC_REG_UNDEFINED type=UNWIND_HINT_TYPE_CALL end=1
.endm
-.macro UNWIND_HINT_REGS base=%rsp offset=0 indirect=0 extra=1 iret=0
+.macro UNWIND_HINT_REGS base=%rsp offset=0 indirect=0 extra=1 partial=0
.if \base == %rsp
.if \indirect
.set sp_reg, ORC_REG_SP_INDIRECT
@@ -66,24 +32,24 @@
.set sp_offset, \offset
- .if \iret
- .set type, ORC_TYPE_REGS_IRET
+ .if \partial
+ .set type, UNWIND_HINT_TYPE_REGS_PARTIAL
.elseif \extra == 0
- .set type, ORC_TYPE_REGS_IRET
+ .set type, UNWIND_HINT_TYPE_REGS_PARTIAL
.set sp_offset, \offset + (16*8)
.else
- .set type, ORC_TYPE_REGS
+ .set type, UNWIND_HINT_TYPE_REGS
.endif
UNWIND_HINT sp_reg=sp_reg sp_offset=sp_offset type=type
.endm
.macro UNWIND_HINT_IRET_REGS base=%rsp offset=0
- UNWIND_HINT_REGS base=\base offset=\offset iret=1
+ UNWIND_HINT_REGS base=\base offset=\offset partial=1
.endm
.macro UNWIND_HINT_FUNC sp_offset=8
- UNWIND_HINT sp_offset=\sp_offset
+ UNWIND_HINT sp_reg=ORC_REG_SP sp_offset=\sp_offset type=UNWIND_HINT_TYPE_CALL
.endm
/*
@@ -92,7 +58,7 @@
* initial_func_cfi.
*/
.macro UNWIND_HINT_RET_OFFSET sp_offset=8
- UNWIND_HINT type=UNWIND_HINT_TYPE_RET_OFFSET sp_offset=\sp_offset
+ UNWIND_HINT sp_reg=ORC_REG_SP type=UNWIND_HINT_TYPE_RET_OFFSET sp_offset=\sp_offset
.endm
#endif /* __ASSEMBLY__ */
diff --git a/arch/x86/include/asm/uv/bios.h b/arch/x86/include/asm/uv/bios.h
index 70050d0136c3..08b3d810dfba 100644
--- a/arch/x86/include/asm/uv/bios.h
+++ b/arch/x86/include/asm/uv/bios.h
@@ -5,8 +5,9 @@
/*
* UV BIOS layer definitions.
*
- * Copyright (c) 2008-2009 Silicon Graphics, Inc. All Rights Reserved.
- * Copyright (c) Russ Anderson <rja@sgi.com>
+ * (C) Copyright 2020 Hewlett Packard Enterprise Development LP
+ * Copyright (C) 2007-2017 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (c) Russ Anderson <rja@sgi.com>
*/
#include <linux/rtc.h>
@@ -71,6 +72,11 @@ struct uv_gam_range_entry {
u32 limit; /* PA bits 56:26 (UV_GAM_RANGE_SHFT) */
};
+#define UV_AT_SIZE 8 /* 7 character arch type + NULL char */
+struct uv_arch_type_entry {
+ char archtype[UV_AT_SIZE];
+};
+
#define UV_SYSTAB_SIG "UVST"
#define UV_SYSTAB_VERSION_1 1 /* UV2/3 BIOS version */
#define UV_SYSTAB_VERSION_UV4 0x400 /* UV4 BIOS base version */
@@ -79,10 +85,14 @@ struct uv_gam_range_entry {
#define UV_SYSTAB_VERSION_UV4_3 0x403 /* - GAM Range PXM Value */
#define UV_SYSTAB_VERSION_UV4_LATEST UV_SYSTAB_VERSION_UV4_3
+#define UV_SYSTAB_VERSION_UV5 0x500 /* UV5 GAM base version */
+#define UV_SYSTAB_VERSION_UV5_LATEST UV_SYSTAB_VERSION_UV5
+
#define UV_SYSTAB_TYPE_UNUSED 0 /* End of table (offset == 0) */
#define UV_SYSTAB_TYPE_GAM_PARAMS 1 /* GAM PARAM conversions */
#define UV_SYSTAB_TYPE_GAM_RNG_TBL 2 /* GAM entry table */
-#define UV_SYSTAB_TYPE_MAX 3
+#define UV_SYSTAB_TYPE_ARCH_TYPE 3 /* UV arch type */
+#define UV_SYSTAB_TYPE_MAX 4
/*
* The UV system table describes specific firmware
@@ -133,6 +143,7 @@ extern s64 uv_bios_reserved_page_pa(u64, u64 *, u64 *, u64 *);
extern int uv_bios_set_legacy_vga_target(bool decode, int domain, int bus);
extern int uv_bios_init(void);
+extern unsigned long get_uv_systab_phys(bool msg);
extern unsigned long sn_rtc_cycles_per_second;
extern int uv_type;
diff --git a/arch/x86/include/asm/uv/uv.h b/arch/x86/include/asm/uv/uv.h
index e48aea9ba47d..172d3e4a9e4b 100644
--- a/arch/x86/include/asm/uv/uv.h
+++ b/arch/x86/include/asm/uv/uv.h
@@ -35,10 +35,8 @@ extern int is_uv_hubbed(int uvtype);
extern void uv_cpu_init(void);
extern void uv_nmi_init(void);
extern void uv_system_init(void);
-extern const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
- const struct flush_tlb_info *info);
-#else /* X86_UV */
+#else /* !X86_UV */
static inline enum uv_system_type get_uv_system_type(void) { return UV_NONE; }
static inline bool is_early_uv_system(void) { return 0; }
diff --git a/arch/x86/include/asm/uv/uv_bau.h b/arch/x86/include/asm/uv/uv_bau.h
deleted file mode 100644
index cd24804955d7..000000000000
--- a/arch/x86/include/asm/uv/uv_bau.h
+++ /dev/null
@@ -1,755 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * SGI UV Broadcast Assist Unit definitions
- *
- * Copyright (C) 2008-2011 Silicon Graphics, Inc. All rights reserved.
- */
-
-#ifndef _ASM_X86_UV_UV_BAU_H
-#define _ASM_X86_UV_UV_BAU_H
-
-#include <linux/bitmap.h>
-#include <asm/idtentry.h>
-
-#define BITSPERBYTE 8
-
-/*
- * Broadcast Assist Unit messaging structures
- *
- * Selective Broadcast activations are induced by software action
- * specifying a particular 8-descriptor "set" via a 6-bit index written
- * to an MMR.
- * Thus there are 64 unique 512-byte sets of SB descriptors - one set for
- * each 6-bit index value. These descriptor sets are mapped in sequence
- * starting with set 0 located at the address specified in the
- * BAU_SB_DESCRIPTOR_BASE register, set 1 is located at BASE + 512,
- * set 2 is at BASE + 2*512, set 3 at BASE + 3*512, and so on.
- *
- * We will use one set for sending BAU messages from each of the
- * cpu's on the uvhub.
- *
- * TLB shootdown will use the first of the 8 descriptors of each set.
- * Each of the descriptors is 64 bytes in size (8*64 = 512 bytes in a set).
- */
-
-#define MAX_CPUS_PER_UVHUB 128
-#define MAX_CPUS_PER_SOCKET 64
-#define ADP_SZ 64 /* hardware-provided max. */
-#define UV_CPUS_PER_AS 32 /* hardware-provided max. */
-#define ITEMS_PER_DESC 8
-/* the 'throttle' to prevent the hardware stay-busy bug */
-#define MAX_BAU_CONCURRENT 3
-#define UV_ACT_STATUS_MASK 0x3
-#define UV_ACT_STATUS_SIZE 2
-#define UV_DISTRIBUTION_SIZE 256
-#define UV_SW_ACK_NPENDING 8
-#define UV_NET_ENDPOINT_INTD 0x28
-#define UV_PAYLOADQ_GNODE_SHIFT 49
-#define UV_PTC_BASENAME "sgi_uv/ptc_statistics"
-#define UV_BAU_BASENAME "sgi_uv/bau_tunables"
-#define UV_BAU_TUNABLES_DIR "sgi_uv"
-#define UV_BAU_TUNABLES_FILE "bau_tunables"
-#define WHITESPACE " \t\n"
-#define cpubit_isset(cpu, bau_local_cpumask) \
- test_bit((cpu), (bau_local_cpumask).bits)
-
-/* [19:16] SOFT_ACK timeout period 19: 1 is urgency 7 17:16 1 is multiplier */
-/*
- * UV2: Bit 19 selects between
- * (0): 10 microsecond timebase and
- * (1): 80 microseconds
- * we're using 560us
- */
-#define UV_INTD_SOFT_ACK_TIMEOUT_PERIOD (15UL)
-/* assuming UV3 is the same */
-
-#define BAU_MISC_CONTROL_MULT_MASK 3
-
-#define UVH_AGING_PRESCALE_SEL 0x000000b000UL
-/* [30:28] URGENCY_7 an index into a table of times */
-#define BAU_URGENCY_7_SHIFT 28
-#define BAU_URGENCY_7_MASK 7
-
-#define UVH_TRANSACTION_TIMEOUT 0x000000b200UL
-/* [45:40] BAU - BAU transaction timeout select - a multiplier */
-#define BAU_TRANS_SHIFT 40
-#define BAU_TRANS_MASK 0x3f
-
-/*
- * shorten some awkward names
- */
-#define AS_PUSH_SHIFT UVH_LB_BAU_SB_ACTIVATION_CONTROL_PUSH_SHFT
-#define SOFTACK_MSHIFT UVH_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_SHFT
-#define SOFTACK_PSHIFT UVH_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHFT
-#define SOFTACK_TIMEOUT_PERIOD UV_INTD_SOFT_ACK_TIMEOUT_PERIOD
-#define PREFETCH_HINT_SHFT UV3H_LB_BAU_MISC_CONTROL_ENABLE_INTD_PREFETCH_HINT_SHFT
-#define SB_STATUS_SHFT UV3H_LB_BAU_MISC_CONTROL_ENABLE_EXTENDED_SB_STATUS_SHFT
-#define write_gmmr uv_write_global_mmr64
-#define write_lmmr uv_write_local_mmr
-#define read_lmmr uv_read_local_mmr
-#define read_gmmr uv_read_global_mmr64
-
-/*
- * bits in UVH_LB_BAU_SB_ACTIVATION_STATUS_0/1
- */
-#define DS_IDLE 0
-#define DS_ACTIVE 1
-#define DS_DESTINATION_TIMEOUT 2
-#define DS_SOURCE_TIMEOUT 3
-/*
- * bits put together from HRP_LB_BAU_SB_ACTIVATION_STATUS_0/1/2
- * values 1 and 3 will not occur
- * Decoded meaning ERROR BUSY AUX ERR
- * ------------------------------- ---- ----- -------
- * IDLE 0 0 0
- * BUSY (active) 0 1 0
- * SW Ack Timeout (destination) 1 0 0
- * SW Ack INTD rejected (strong NACK) 1 0 1
- * Source Side Time Out Detected 1 1 0
- * Destination Side PUT Failed 1 1 1
- */
-#define UV2H_DESC_IDLE 0
-#define UV2H_DESC_BUSY 2
-#define UV2H_DESC_DEST_TIMEOUT 4
-#define UV2H_DESC_DEST_STRONG_NACK 5
-#define UV2H_DESC_SOURCE_TIMEOUT 6
-#define UV2H_DESC_DEST_PUT_ERR 7
-
-/*
- * delay for 'plugged' timeout retries, in microseconds
- */
-#define PLUGGED_DELAY 10
-
-/*
- * threshholds at which to use IPI to free resources
- */
-/* after this # consecutive 'plugged' timeouts, use IPI to release resources */
-#define PLUGSB4RESET 100
-/* after this many consecutive timeouts, use IPI to release resources */
-#define TIMEOUTSB4RESET 1
-/* at this number uses of IPI to release resources, giveup the request */
-#define IPI_RESET_LIMIT 1
-/* after this # consecutive successes, bump up the throttle if it was lowered */
-#define COMPLETE_THRESHOLD 5
-/* after this # of giveups (fall back to kernel IPI's) disable the use of
- the BAU for a period of time */
-#define GIVEUP_LIMIT 100
-
-#define UV_LB_SUBNODEID 0x10
-
-#define UV_SA_SHFT UVH_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHFT
-#define UV_SA_MASK UVH_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_MASK
-/* 4 bits of software ack period */
-#define UV2_ACK_MASK 0x7UL
-#define UV2_ACK_UNITS_SHFT 3
-#define UV2_EXT_SHFT UV2H_LB_BAU_MISC_CONTROL_ENABLE_EXTENDED_SB_STATUS_SHFT
-
-/*
- * number of entries in the destination side payload queue
- */
-#define DEST_Q_SIZE 20
-/*
- * number of destination side software ack resources
- */
-#define DEST_NUM_RESOURCES 8
-/*
- * completion statuses for sending a TLB flush message
- */
-#define FLUSH_RETRY_PLUGGED 1
-#define FLUSH_RETRY_TIMEOUT 2
-#define FLUSH_GIVEUP 3
-#define FLUSH_COMPLETE 4
-
-/*
- * tuning the action when the numalink network is extremely delayed
- */
-#define CONGESTED_RESPONSE_US 1000 /* 'long' response time, in
- microseconds */
-#define CONGESTED_REPS 10 /* long delays averaged over
- this many broadcasts */
-#define DISABLED_PERIOD 10 /* time for the bau to be
- disabled, in seconds */
-/* see msg_type: */
-#define MSG_NOOP 0
-#define MSG_REGULAR 1
-#define MSG_RETRY 2
-
-#define BAU_DESC_QUALIFIER 0x534749
-
-enum uv_bau_version {
- UV_BAU_V2 = 2,
- UV_BAU_V3,
- UV_BAU_V4,
-};
-
-/*
- * Distribution: 32 bytes (256 bits) (bytes 0-0x1f of descriptor)
- * If the 'multilevel' flag in the header portion of the descriptor
- * has been set to 0, then endpoint multi-unicast mode is selected.
- * The distribution specification (32 bytes) is interpreted as a 256-bit
- * distribution vector. Adjacent bits correspond to consecutive even numbered
- * nodeIDs. The result of adding the index of a given bit to the 15-bit
- * 'base_dest_nasid' field of the header corresponds to the
- * destination nodeID associated with that specified bit.
- */
-struct pnmask {
- unsigned long bits[BITS_TO_LONGS(UV_DISTRIBUTION_SIZE)];
-};
-
-/*
- * mask of cpu's on a uvhub
- * (during initialization we need to check that unsigned long has
- * enough bits for max. cpu's per uvhub)
- */
-struct bau_local_cpumask {
- unsigned long bits;
-};
-
-/*
- * Payload: 16 bytes (128 bits) (bytes 0x20-0x2f of descriptor)
- * only 12 bytes (96 bits) of the payload area are usable.
- * An additional 3 bytes (bits 27:4) of the header address are carried
- * to the next bytes of the destination payload queue.
- * And an additional 2 bytes of the header Suppl_A field are also
- * carried to the destination payload queue.
- * But the first byte of the Suppl_A becomes bits 127:120 (the 16th byte)
- * of the destination payload queue, which is written by the hardware
- * with the s/w ack resource bit vector.
- * [ effective message contents (16 bytes (128 bits) maximum), not counting
- * the s/w ack bit vector ]
- */
-
-/**
- * struct uv2_3_bau_msg_payload - defines payload for INTD transactions
- * @address: Signifies a page or all TLB's of the cpu
- * @sending_cpu: CPU from which the message originates
- * @acknowledge_count: CPUs on the destination Hub that received the interrupt
- */
-struct uv2_3_bau_msg_payload {
- u64 address;
- u16 sending_cpu;
- u16 acknowledge_count;
-};
-
-/**
- * struct uv4_bau_msg_payload - defines payload for INTD transactions
- * @address: Signifies a page or all TLB's of the cpu
- * @sending_cpu: CPU from which the message originates
- * @acknowledge_count: CPUs on the destination Hub that received the interrupt
- * @qualifier: Set by source to verify origin of INTD broadcast
- */
-struct uv4_bau_msg_payload {
- u64 address;
- u16 sending_cpu;
- u16 acknowledge_count;
- u32 reserved:8;
- u32 qualifier:24;
-};
-
-/*
- * UV2 Message header: 16 bytes (128 bits) (bytes 0x30-0x3f of descriptor)
- * see figure 9-2 of harp_sys.pdf
- * assuming UV3 is the same
- */
-struct uv2_3_bau_msg_header {
- unsigned int base_dest_nasid:15; /* nasid of the first bit */
- /* bits 14:0 */ /* in uvhub map */
- unsigned int dest_subnodeid:5; /* must be 0x10, for the LB */
- /* bits 19:15 */
- unsigned int rsvd_1:1; /* must be zero */
- /* bit 20 */
- /* Address bits 59:21 */
- /* bits 25:2 of address (44:21) are payload */
- /* these next 24 bits become bytes 12-14 of msg */
- /* bits 28:21 land in byte 12 */
- unsigned int replied_to:1; /* sent as 0 by the source to
- byte 12 */
- /* bit 21 */
- unsigned int msg_type:3; /* software type of the
- message */
- /* bits 24:22 */
- unsigned int canceled:1; /* message canceled, resource
- is to be freed*/
- /* bit 25 */
- unsigned int payload_1:3; /* not currently used */
- /* bits 28:26 */
-
- /* bits 36:29 land in byte 13 */
- unsigned int payload_2a:3; /* not currently used */
- unsigned int payload_2b:5; /* not currently used */
- /* bits 36:29 */
-
- /* bits 44:37 land in byte 14 */
- unsigned int payload_3:8; /* not currently used */
- /* bits 44:37 */
-
- unsigned int rsvd_2:7; /* reserved */
- /* bits 51:45 */
- unsigned int swack_flag:1; /* software acknowledge flag */
- /* bit 52 */
- unsigned int rsvd_3a:3; /* must be zero */
- unsigned int rsvd_3b:8; /* must be zero */
- unsigned int rsvd_3c:8; /* must be zero */
- unsigned int rsvd_3d:3; /* must be zero */
- /* bits 74:53 */
- unsigned int fairness:3; /* usually zero */
- /* bits 77:75 */
-
- unsigned int sequence:16; /* message sequence number */
- /* bits 93:78 Suppl_A */
- unsigned int chaining:1; /* next descriptor is part of
- this activation*/
- /* bit 94 */
- unsigned int multilevel:1; /* multi-level multicast
- format */
- /* bit 95 */
- unsigned int rsvd_4:24; /* ordered / source node /
- source subnode / aging
- must be zero */
- /* bits 119:96 */
- unsigned int command:8; /* message type */
- /* bits 127:120 */
-};
-
-/*
- * The activation descriptor:
- * The format of the message to send, plus all accompanying control
- * Should be 64 bytes
- */
-struct bau_desc {
- struct pnmask distribution;
- /*
- * message template, consisting of header and payload:
- */
- union bau_msg_header {
- struct uv2_3_bau_msg_header uv2_3_hdr;
- } header;
-
- union bau_payload_header {
- struct uv2_3_bau_msg_payload uv2_3;
- struct uv4_bau_msg_payload uv4;
- } payload;
-};
-/* UV2:
- * -payload-- ---------header------
- * bytes 0-11 bits 70-78 bits 21-44
- * A B (2) C (3)
- *
- * A/B/C are moved to:
- * A C B
- * bytes 0-11 bytes 12-14 bytes 16-17 (byte 15 filled in by hw as vector)
- * ------------payload queue-----------
- */
-
-/*
- * The payload queue on the destination side is an array of these.
- * With BAU_MISC_CONTROL set for software acknowledge mode, the messages
- * are 32 bytes (2 micropackets) (256 bits) in length, but contain only 17
- * bytes of usable data, including the sw ack vector in byte 15 (bits 127:120)
- * (12 bytes come from bau_msg_payload, 3 from payload_1, 2 from
- * swack_vec and payload_2)
- * "Enabling Software Acknowledgment mode (see Section 4.3.3 Software
- * Acknowledge Processing) also selects 32 byte (17 bytes usable) payload
- * operation."
- */
-struct bau_pq_entry {
- unsigned long address; /* signifies a page or all TLB's
- of the cpu */
- /* 64 bits, bytes 0-7 */
- unsigned short sending_cpu; /* cpu that sent the message */
- /* 16 bits, bytes 8-9 */
- unsigned short acknowledge_count; /* filled in by destination */
- /* 16 bits, bytes 10-11 */
- /* these next 3 bytes come from bits 58-81 of the message header */
- unsigned short replied_to:1; /* sent as 0 by the source */
- unsigned short msg_type:3; /* software message type */
- unsigned short canceled:1; /* sent as 0 by the source */
- unsigned short unused1:3; /* not currently using */
- /* byte 12 */
- unsigned char unused2a; /* not currently using */
- /* byte 13 */
- unsigned char unused2; /* not currently using */
- /* byte 14 */
- unsigned char swack_vec; /* filled in by the hardware */
- /* byte 15 (bits 127:120) */
- unsigned short sequence; /* message sequence number */
- /* bytes 16-17 */
- unsigned char unused4[2]; /* not currently using bytes 18-19 */
- /* bytes 18-19 */
- int number_of_cpus; /* filled in at destination */
- /* 32 bits, bytes 20-23 (aligned) */
- unsigned char unused5[8]; /* not using */
- /* bytes 24-31 */
-};
-
-struct msg_desc {
- struct bau_pq_entry *msg;
- int msg_slot;
- struct bau_pq_entry *queue_first;
- struct bau_pq_entry *queue_last;
-};
-
-struct reset_args {
- int sender;
-};
-
-/*
- * This structure is allocated per_cpu for UV TLB shootdown statistics.
- */
-struct ptc_stats {
- /* sender statistics */
- unsigned long s_giveup; /* number of fall backs to
- IPI-style flushes */
- unsigned long s_requestor; /* number of shootdown
- requests */
- unsigned long s_stimeout; /* source side timeouts */
- unsigned long s_dtimeout; /* destination side timeouts */
- unsigned long s_strongnacks; /* number of strong nack's */
- unsigned long s_time; /* time spent in sending side */
- unsigned long s_retriesok; /* successful retries */
- unsigned long s_ntargcpu; /* total number of cpu's
- targeted */
- unsigned long s_ntargself; /* times the sending cpu was
- targeted */
- unsigned long s_ntarglocals; /* targets of cpus on the local
- blade */
- unsigned long s_ntargremotes; /* targets of cpus on remote
- blades */
- unsigned long s_ntarglocaluvhub; /* targets of the local hub */
- unsigned long s_ntargremoteuvhub; /* remotes hubs targeted */
- unsigned long s_ntarguvhub; /* total number of uvhubs
- targeted */
- unsigned long s_ntarguvhub16; /* number of times target
- hubs >= 16*/
- unsigned long s_ntarguvhub8; /* number of times target
- hubs >= 8 */
- unsigned long s_ntarguvhub4; /* number of times target
- hubs >= 4 */
- unsigned long s_ntarguvhub2; /* number of times target
- hubs >= 2 */
- unsigned long s_ntarguvhub1; /* number of times target
- hubs == 1 */
- unsigned long s_resets_plug; /* ipi-style resets from plug
- state */
- unsigned long s_resets_timeout; /* ipi-style resets from
- timeouts */
- unsigned long s_busy; /* status stayed busy past
- s/w timer */
- unsigned long s_throttles; /* waits in throttle */
- unsigned long s_retry_messages; /* retry broadcasts */
- unsigned long s_bau_reenabled; /* for bau enable/disable */
- unsigned long s_bau_disabled; /* for bau enable/disable */
- unsigned long s_uv2_wars; /* uv2 workaround, perm. busy */
- unsigned long s_uv2_wars_hw; /* uv2 workaround, hiwater */
- unsigned long s_uv2_war_waits; /* uv2 workaround, long waits */
- unsigned long s_overipilimit; /* over the ipi reset limit */
- unsigned long s_giveuplimit; /* disables, over giveup limit*/
- unsigned long s_enters; /* entries to the driver */
- unsigned long s_ipifordisabled; /* fall back to IPI; disabled */
- unsigned long s_plugged; /* plugged by h/w bug*/
- unsigned long s_congested; /* giveup on long wait */
- /* destination statistics */
- unsigned long d_alltlb; /* times all tlb's on this
- cpu were flushed */
- unsigned long d_onetlb; /* times just one tlb on this
- cpu was flushed */
- unsigned long d_multmsg; /* interrupts with multiple
- messages */
- unsigned long d_nomsg; /* interrupts with no message */
- unsigned long d_time; /* time spent on destination
- side */
- unsigned long d_requestee; /* number of messages
- processed */
- unsigned long d_retries; /* number of retry messages
- processed */
- unsigned long d_canceled; /* number of messages canceled
- by retries */
- unsigned long d_nocanceled; /* retries that found nothing
- to cancel */
- unsigned long d_resets; /* number of ipi-style requests
- processed */
- unsigned long d_rcanceled; /* number of messages canceled
- by resets */
-};
-
-struct tunables {
- int *tunp;
- int deflt;
-};
-
-struct hub_and_pnode {
- short uvhub;
- short pnode;
-};
-
-struct socket_desc {
- short num_cpus;
- short cpu_number[MAX_CPUS_PER_SOCKET];
-};
-
-struct uvhub_desc {
- unsigned short socket_mask;
- short num_cpus;
- short uvhub;
- short pnode;
- struct socket_desc socket[2];
-};
-
-/**
- * struct bau_control
- * @status_mmr: location of status mmr, determined by uvhub_cpu
- * @status_index: index of ERR|BUSY bits in status mmr, determined by uvhub_cpu
- *
- * Per-cpu control struct containing CPU topology information and BAU tuneables.
- */
-struct bau_control {
- struct bau_desc *descriptor_base;
- struct bau_pq_entry *queue_first;
- struct bau_pq_entry *queue_last;
- struct bau_pq_entry *bau_msg_head;
- struct bau_control *uvhub_master;
- struct bau_control *socket_master;
- struct ptc_stats *statp;
- cpumask_t *cpumask;
- unsigned long timeout_interval;
- unsigned long set_bau_on_time;
- atomic_t active_descriptor_count;
- int plugged_tries;
- int timeout_tries;
- int ipi_attempts;
- int conseccompletes;
- u64 status_mmr;
- int status_index;
- bool nobau;
- short baudisabled;
- short cpu;
- short osnode;
- short uvhub_cpu;
- short uvhub;
- short uvhub_version;
- short cpus_in_socket;
- short cpus_in_uvhub;
- short partition_base_pnode;
- short busy; /* all were busy (war) */
- unsigned short message_number;
- unsigned short uvhub_quiesce;
- short socket_acknowledge_count[DEST_Q_SIZE];
- cycles_t send_message;
- cycles_t period_end;
- cycles_t period_time;
- spinlock_t uvhub_lock;
- spinlock_t queue_lock;
- spinlock_t disable_lock;
- /* tunables */
- int max_concurr;
- int max_concurr_const;
- int plugged_delay;
- int plugsb4reset;
- int timeoutsb4reset;
- int ipi_reset_limit;
- int complete_threshold;
- int cong_response_us;
- int cong_reps;
- cycles_t disabled_period;
- int period_giveups;
- int giveup_limit;
- long period_requests;
- struct hub_and_pnode *thp;
-};
-
-/* Abstracted BAU functions */
-struct bau_operations {
- unsigned long (*read_l_sw_ack)(void);
- unsigned long (*read_g_sw_ack)(int pnode);
- unsigned long (*bau_gpa_to_offset)(unsigned long vaddr);
- void (*write_l_sw_ack)(unsigned long mmr);
- void (*write_g_sw_ack)(int pnode, unsigned long mmr);
- void (*write_payload_first)(int pnode, unsigned long mmr);
- void (*write_payload_last)(int pnode, unsigned long mmr);
- int (*wait_completion)(struct bau_desc*,
- struct bau_control*, long try);
-};
-
-static inline void write_mmr_data_broadcast(int pnode, unsigned long mmr_image)
-{
- write_gmmr(pnode, UVH_BAU_DATA_BROADCAST, mmr_image);
-}
-
-static inline void write_mmr_descriptor_base(int pnode, unsigned long mmr_image)
-{
- write_gmmr(pnode, UVH_LB_BAU_SB_DESCRIPTOR_BASE, mmr_image);
-}
-
-static inline void write_mmr_activation(unsigned long index)
-{
- write_lmmr(UVH_LB_BAU_SB_ACTIVATION_CONTROL, index);
-}
-
-static inline void write_gmmr_activation(int pnode, unsigned long mmr_image)
-{
- write_gmmr(pnode, UVH_LB_BAU_SB_ACTIVATION_CONTROL, mmr_image);
-}
-
-static inline void write_mmr_proc_payload_first(int pnode, unsigned long mmr_image)
-{
- write_gmmr(pnode, UV4H_LB_PROC_INTD_QUEUE_FIRST, mmr_image);
-}
-
-static inline void write_mmr_proc_payload_last(int pnode, unsigned long mmr_image)
-{
- write_gmmr(pnode, UV4H_LB_PROC_INTD_QUEUE_LAST, mmr_image);
-}
-
-static inline void write_mmr_payload_first(int pnode, unsigned long mmr_image)
-{
- write_gmmr(pnode, UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST, mmr_image);
-}
-
-static inline void write_mmr_payload_tail(int pnode, unsigned long mmr_image)
-{
- write_gmmr(pnode, UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL, mmr_image);
-}
-
-static inline void write_mmr_payload_last(int pnode, unsigned long mmr_image)
-{
- write_gmmr(pnode, UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST, mmr_image);
-}
-
-static inline void write_mmr_misc_control(int pnode, unsigned long mmr_image)
-{
- write_gmmr(pnode, UVH_LB_BAU_MISC_CONTROL, mmr_image);
-}
-
-static inline unsigned long read_mmr_misc_control(int pnode)
-{
- return read_gmmr(pnode, UVH_LB_BAU_MISC_CONTROL);
-}
-
-static inline void write_mmr_sw_ack(unsigned long mr)
-{
- uv_write_local_mmr(UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS, mr);
-}
-
-static inline void write_gmmr_sw_ack(int pnode, unsigned long mr)
-{
- write_gmmr(pnode, UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS, mr);
-}
-
-static inline unsigned long read_mmr_sw_ack(void)
-{
- return read_lmmr(UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE);
-}
-
-static inline unsigned long read_gmmr_sw_ack(int pnode)
-{
- return read_gmmr(pnode, UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE);
-}
-
-static inline void write_mmr_proc_sw_ack(unsigned long mr)
-{
- uv_write_local_mmr(UV4H_LB_PROC_INTD_SOFT_ACK_CLEAR, mr);
-}
-
-static inline void write_gmmr_proc_sw_ack(int pnode, unsigned long mr)
-{
- write_gmmr(pnode, UV4H_LB_PROC_INTD_SOFT_ACK_CLEAR, mr);
-}
-
-static inline unsigned long read_mmr_proc_sw_ack(void)
-{
- return read_lmmr(UV4H_LB_PROC_INTD_SOFT_ACK_PENDING);
-}
-
-static inline unsigned long read_gmmr_proc_sw_ack(int pnode)
-{
- return read_gmmr(pnode, UV4H_LB_PROC_INTD_SOFT_ACK_PENDING);
-}
-
-static inline void write_mmr_data_config(int pnode, unsigned long mr)
-{
- uv_write_global_mmr64(pnode, UVH_BAU_DATA_CONFIG, mr);
-}
-
-static inline int bau_uvhub_isset(int uvhub, struct pnmask *dstp)
-{
- return constant_test_bit(uvhub, &dstp->bits[0]);
-}
-static inline void bau_uvhub_set(int pnode, struct pnmask *dstp)
-{
- __set_bit(pnode, &dstp->bits[0]);
-}
-static inline void bau_uvhubs_clear(struct pnmask *dstp,
- int nbits)
-{
- bitmap_zero(&dstp->bits[0], nbits);
-}
-static inline int bau_uvhub_weight(struct pnmask *dstp)
-{
- return bitmap_weight((unsigned long *)&dstp->bits[0],
- UV_DISTRIBUTION_SIZE);
-}
-
-static inline void bau_cpubits_clear(struct bau_local_cpumask *dstp, int nbits)
-{
- bitmap_zero(&dstp->bits, nbits);
-}
-
-struct atomic_short {
- short counter;
-};
-
-/*
- * atomic_read_short - read a short atomic variable
- * @v: pointer of type atomic_short
- *
- * Atomically reads the value of @v.
- */
-static inline int atomic_read_short(const struct atomic_short *v)
-{
- return v->counter;
-}
-
-/*
- * atom_asr - add and return a short int
- * @i: short value to add
- * @v: pointer of type atomic_short
- *
- * Atomically adds @i to @v and returns @i + @v
- */
-static inline int atom_asr(short i, struct atomic_short *v)
-{
- short __i = i;
- asm volatile(LOCK_PREFIX "xaddw %0, %1"
- : "+r" (i), "+m" (v->counter)
- : : "memory");
- return i + __i;
-}
-
-/*
- * conditionally add 1 to *v, unless *v is >= u
- * return 0 if we cannot add 1 to *v because it is >= u
- * return 1 if we can add 1 to *v because it is < u
- * the add is atomic
- *
- * This is close to atomic_add_unless(), but this allows the 'u' value
- * to be lowered below the current 'v'. atomic_add_unless can only stop
- * on equal.
- */
-static inline int atomic_inc_unless_ge(spinlock_t *lock, atomic_t *v, int u)
-{
- spin_lock(lock);
- if (atomic_read(v) >= u) {
- spin_unlock(lock);
- return 0;
- }
- atomic_inc(v);
- spin_unlock(lock);
- return 1;
-}
-
-void uv_bau_message_interrupt(struct pt_regs *regs);
-
-#endif /* _ASM_X86_UV_UV_BAU_H */
diff --git a/arch/x86/include/asm/uv/uv_hub.h b/arch/x86/include/asm/uv/uv_hub.h
index 100d66806503..5002f52be332 100644
--- a/arch/x86/include/asm/uv/uv_hub.h
+++ b/arch/x86/include/asm/uv/uv_hub.h
@@ -5,6 +5,7 @@
*
* SGI UV architectural definitions
*
+ * (C) Copyright 2020 Hewlett Packard Enterprise Development LP
* Copyright (C) 2007-2014 Silicon Graphics, Inc. All rights reserved.
*/
@@ -129,17 +130,6 @@
*/
#define UV_MAX_NASID_VALUE (UV_MAX_NUMALINK_BLADES * 2)
-/* System Controller Interface Reg info */
-struct uv_scir_s {
- struct timer_list timer;
- unsigned long offset;
- unsigned long last;
- unsigned long idle_on;
- unsigned long idle_off;
- unsigned char state;
- unsigned char enabled;
-};
-
/* GAM (globally addressed memory) range table */
struct uv_gam_range_s {
u32 limit; /* PA bits 56:26 (GAM_RANGE_SHFT) */
@@ -155,6 +145,8 @@ struct uv_gam_range_s {
* available in the L3 cache on the cpu socket for the node.
*/
struct uv_hub_info_s {
+ unsigned int hub_type;
+ unsigned char hub_revision;
unsigned long global_mmr_base;
unsigned long global_mmr_shift;
unsigned long gpa_mask;
@@ -167,9 +159,9 @@ struct uv_hub_info_s {
unsigned char m_val;
unsigned char n_val;
unsigned char gr_table_len;
- unsigned char hub_revision;
unsigned char apic_pnode_shift;
unsigned char gpa_shift;
+ unsigned char nasid_shift;
unsigned char m_shift;
unsigned char n_lshift;
unsigned int gnode_extra;
@@ -191,16 +183,13 @@ struct uv_hub_info_s {
struct uv_cpu_info_s {
void *p_uv_hub_info;
unsigned char blade_cpu_id;
- struct uv_scir_s scir;
+ void *reserved;
};
DECLARE_PER_CPU(struct uv_cpu_info_s, __uv_cpu_info);
#define uv_cpu_info this_cpu_ptr(&__uv_cpu_info)
#define uv_cpu_info_per(cpu) (&per_cpu(__uv_cpu_info, cpu))
-#define uv_scir_info (&uv_cpu_info->scir)
-#define uv_cpu_scir_info(cpu) (&uv_cpu_info_per(cpu)->scir)
-
/* Node specific hub common info struct */
extern void **__uv_hub_info_list;
static inline struct uv_hub_info_s *uv_hub_info_list(int node)
@@ -219,6 +208,17 @@ static inline struct uv_hub_info_s *uv_cpu_hub_info(int cpu)
return (struct uv_hub_info_s *)uv_cpu_info_per(cpu)->p_uv_hub_info;
}
+static inline int uv_hub_type(void)
+{
+ return uv_hub_info->hub_type;
+}
+
+static inline __init void uv_hub_type_set(int uvmask)
+{
+ uv_hub_info->hub_type = uvmask;
+}
+
+
/*
* HUB revision ranges for each UV HUB architecture.
* This is a software convention - NOT the hardware revision numbers in
@@ -228,39 +228,31 @@ static inline struct uv_hub_info_s *uv_cpu_hub_info(int cpu)
#define UV3_HUB_REVISION_BASE 5
#define UV4_HUB_REVISION_BASE 7
#define UV4A_HUB_REVISION_BASE 8 /* UV4 (fixed) rev 2 */
+#define UV5_HUB_REVISION_BASE 9
-static inline int is_uv2_hub(void)
-{
- return is_uv_hubbed(uv(2));
-}
-
-static inline int is_uv3_hub(void)
-{
- return is_uv_hubbed(uv(3));
-}
+static inline int is_uv(int uvmask) { return uv_hub_type() & uvmask; }
+static inline int is_uv1_hub(void) { return 0; }
+static inline int is_uv2_hub(void) { return is_uv(UV2); }
+static inline int is_uv3_hub(void) { return is_uv(UV3); }
+static inline int is_uv4a_hub(void) { return is_uv(UV4A); }
+static inline int is_uv4_hub(void) { return is_uv(UV4); }
+static inline int is_uv5_hub(void) { return is_uv(UV5); }
-/* First test "is UV4A", then "is UV4" */
-static inline int is_uv4a_hub(void)
-{
- if (is_uv_hubbed(uv(4)))
- return (uv_hub_info->hub_revision == UV4A_HUB_REVISION_BASE);
- return 0;
-}
+/*
+ * UV4A is a revision of UV4. So on UV4A, both is_uv4_hub() and
+ * is_uv4a_hub() return true, While on UV4, only is_uv4_hub()
+ * returns true. So to get true results, first test if is UV4A,
+ * then test if is UV4.
+ */
-static inline int is_uv4_hub(void)
-{
- return is_uv_hubbed(uv(4));
-}
+/* UVX class: UV2,3,4 */
+static inline int is_uvx_hub(void) { return is_uv(UVX); }
-static inline int is_uvx_hub(void)
-{
- return (is_uv_hubbed(-2) >= uv(2));
-}
+/* UVY class: UV5,..? */
+static inline int is_uvy_hub(void) { return is_uv(UVY); }
-static inline int is_uv_hub(void)
-{
- return is_uvx_hub();
-}
+/* Any UV Hubbed System */
+static inline int is_uv_hub(void) { return is_uv(UV_ANY); }
union uvh_apicid {
unsigned long v;
@@ -282,9 +274,11 @@ union uvh_apicid {
* g - GNODE (full 15-bit global nasid, right shifted 1)
* p - PNODE (local part of nsids, right shifted 1)
*/
-#define UV_NASID_TO_PNODE(n) (((n) >> 1) & uv_hub_info->pnode_mask)
+#define UV_NASID_TO_PNODE(n) \
+ (((n) >> uv_hub_info->nasid_shift) & uv_hub_info->pnode_mask)
#define UV_PNODE_TO_GNODE(p) ((p) |uv_hub_info->gnode_extra)
-#define UV_PNODE_TO_NASID(p) (UV_PNODE_TO_GNODE(p) << 1)
+#define UV_PNODE_TO_NASID(p) \
+ (UV_PNODE_TO_GNODE(p) << uv_hub_info->nasid_shift)
#define UV2_LOCAL_MMR_BASE 0xfa000000UL
#define UV2_GLOBAL_MMR32_BASE 0xfc000000UL
@@ -297,29 +291,42 @@ union uvh_apicid {
#define UV3_GLOBAL_MMR32_SIZE (32UL * 1024 * 1024)
#define UV4_LOCAL_MMR_BASE 0xfa000000UL
-#define UV4_GLOBAL_MMR32_BASE 0xfc000000UL
+#define UV4_GLOBAL_MMR32_BASE 0
#define UV4_LOCAL_MMR_SIZE (32UL * 1024 * 1024)
-#define UV4_GLOBAL_MMR32_SIZE (16UL * 1024 * 1024)
+#define UV4_GLOBAL_MMR32_SIZE 0
+
+#define UV5_LOCAL_MMR_BASE 0xfa000000UL
+#define UV5_GLOBAL_MMR32_BASE 0
+#define UV5_LOCAL_MMR_SIZE (32UL * 1024 * 1024)
+#define UV5_GLOBAL_MMR32_SIZE 0
#define UV_LOCAL_MMR_BASE ( \
- is_uv2_hub() ? UV2_LOCAL_MMR_BASE : \
- is_uv3_hub() ? UV3_LOCAL_MMR_BASE : \
- /*is_uv4_hub*/ UV4_LOCAL_MMR_BASE)
+ is_uv(UV2) ? UV2_LOCAL_MMR_BASE : \
+ is_uv(UV3) ? UV3_LOCAL_MMR_BASE : \
+ is_uv(UV4) ? UV4_LOCAL_MMR_BASE : \
+ is_uv(UV5) ? UV5_LOCAL_MMR_BASE : \
+ 0)
#define UV_GLOBAL_MMR32_BASE ( \
- is_uv2_hub() ? UV2_GLOBAL_MMR32_BASE : \
- is_uv3_hub() ? UV3_GLOBAL_MMR32_BASE : \
- /*is_uv4_hub*/ UV4_GLOBAL_MMR32_BASE)
+ is_uv(UV2) ? UV2_GLOBAL_MMR32_BASE : \
+ is_uv(UV3) ? UV3_GLOBAL_MMR32_BASE : \
+ is_uv(UV4) ? UV4_GLOBAL_MMR32_BASE : \
+ is_uv(UV5) ? UV5_GLOBAL_MMR32_BASE : \
+ 0)
#define UV_LOCAL_MMR_SIZE ( \
- is_uv2_hub() ? UV2_LOCAL_MMR_SIZE : \
- is_uv3_hub() ? UV3_LOCAL_MMR_SIZE : \
- /*is_uv4_hub*/ UV4_LOCAL_MMR_SIZE)
+ is_uv(UV2) ? UV2_LOCAL_MMR_SIZE : \
+ is_uv(UV3) ? UV3_LOCAL_MMR_SIZE : \
+ is_uv(UV4) ? UV4_LOCAL_MMR_SIZE : \
+ is_uv(UV5) ? UV5_LOCAL_MMR_SIZE : \
+ 0)
#define UV_GLOBAL_MMR32_SIZE ( \
- is_uv2_hub() ? UV2_GLOBAL_MMR32_SIZE : \
- is_uv3_hub() ? UV3_GLOBAL_MMR32_SIZE : \
- /*is_uv4_hub*/ UV4_GLOBAL_MMR32_SIZE)
+ is_uv(UV2) ? UV2_GLOBAL_MMR32_SIZE : \
+ is_uv(UV3) ? UV3_GLOBAL_MMR32_SIZE : \
+ is_uv(UV4) ? UV4_GLOBAL_MMR32_SIZE : \
+ is_uv(UV5) ? UV5_GLOBAL_MMR32_SIZE : \
+ 0)
#define UV_GLOBAL_MMR64_BASE (uv_hub_info->global_mmr_base)
@@ -720,7 +727,7 @@ extern void uv_nmi_setup_hubless(void);
#define UVH_TSC_SYNC_SHIFT_UV2K 16 /* UV2/3k have different bits */
#define UVH_TSC_SYNC_MASK 3 /* 0011 */
#define UVH_TSC_SYNC_VALID 3 /* 0011 */
-#define UVH_TSC_SYNC_INVALID 2 /* 0010 */
+#define UVH_TSC_SYNC_UNKNOWN 0 /* 0000 */
/* BMC sets a bit this MMR non-zero before sending an NMI */
#define UVH_NMI_MMR UVH_BIOS_KERNEL_MMR
@@ -728,19 +735,6 @@ extern void uv_nmi_setup_hubless(void);
#define UVH_NMI_MMR_SHIFT 63
#define UVH_NMI_MMR_TYPE "SCRATCH5"
-/* Newer SMM NMI handler, not present in all systems */
-#define UVH_NMI_MMRX UVH_EVENT_OCCURRED0
-#define UVH_NMI_MMRX_CLEAR UVH_EVENT_OCCURRED0_ALIAS
-#define UVH_NMI_MMRX_SHIFT UVH_EVENT_OCCURRED0_EXTIO_INT0_SHFT
-#define UVH_NMI_MMRX_TYPE "EXTIO_INT0"
-
-/* Non-zero indicates newer SMM NMI handler present */
-#define UVH_NMI_MMRX_SUPPORTED UVH_EXTIO_INT0_BROADCAST
-
-/* Indicates to BIOS that we want to use the newer SMM NMI handler */
-#define UVH_NMI_MMRX_REQ UVH_BIOS_KERNEL_MMR_ALIAS_2
-#define UVH_NMI_MMRX_REQ_SHIFT 62
-
struct uv_hub_nmi_s {
raw_spinlock_t nmi_lock;
atomic_t in_nmi; /* flag this node in UV NMI IRQ */
@@ -772,29 +766,6 @@ DECLARE_PER_CPU(struct uv_cpu_nmi_s, uv_cpu_nmi);
#define UV_NMI_STATE_DUMP 2
#define UV_NMI_STATE_DUMP_DONE 3
-/* Update SCIR state */
-static inline void uv_set_scir_bits(unsigned char value)
-{
- if (uv_scir_info->state != value) {
- uv_scir_info->state = value;
- uv_write_local_mmr8(uv_scir_info->offset, value);
- }
-}
-
-static inline unsigned long uv_scir_offset(int apicid)
-{
- return SCIR_LOCAL_MMR_BASE | (apicid & 0x3f);
-}
-
-static inline void uv_set_cpu_scir_bits(int cpu, unsigned char value)
-{
- if (uv_cpu_scir_info(cpu)->state != value) {
- uv_write_global_mmr8(uv_cpu_to_pnode(cpu),
- uv_cpu_scir_info(cpu)->offset, value);
- uv_cpu_scir_info(cpu)->state = value;
- }
-}
-
/*
* Get the minimum revision number of the hub chips within the partition.
* (See UVx_HUB_REVISION_BASE above for specific values.)
diff --git a/arch/x86/include/asm/uv/uv_mmrs.h b/arch/x86/include/asm/uv/uv_mmrs.h
index 775bf143a072..57fa67373262 100644
--- a/arch/x86/include/asm/uv/uv_mmrs.h
+++ b/arch/x86/include/asm/uv/uv_mmrs.h
@@ -3,8 +3,9 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * SGI UV MMR definitions
+ * HPE UV MMR definitions
*
+ * (C) Copyright 2020 Hewlett Packard Enterprise Development LP
* Copyright (C) 2007-2016 Silicon Graphics, Inc. All rights reserved.
*/
@@ -18,42 +19,43 @@
* grouped by architecture types.
*
* UVH - definitions common to all UV hub types.
- * UVXH - definitions common to all UV eXtended hub types (currently 2, 3, 4).
- * UV2H - definitions specific to UV type 2 hub.
- * UV3H - definitions specific to UV type 3 hub.
+ * UVXH - definitions common to UVX class (2, 3, 4).
+ * UVYH - definitions common to UVY class (5).
+ * UV5H - definitions specific to UV type 5 hub.
+ * UV4AH - definitions specific to UV type 4A hub.
* UV4H - definitions specific to UV type 4 hub.
- *
- * So in general, MMR addresses and structures are identical on all hubs types.
- * These MMRs are identified as:
- * #define UVH_xxx <address>
- * union uvh_xxx {
- * unsigned long v;
- * struct uvh_int_cmpd_s {
- * } s;
- * };
+ * UV3H - definitions specific to UV type 3 hub.
+ * UV2H - definitions specific to UV type 2 hub.
*
* If the MMR exists on all hub types but have different addresses,
- * use a conditional operator to define the value at runtime.
- * #define UV2Hxxx b
- * #define UV3Hxxx c
- * #define UV4Hxxx d
- * #define UV4AHxxx e
- * #define UVHxxx (is_uv2_hub() ? UV2Hxxx :
- * (is_uv3_hub() ? UV3Hxxx :
- * (is_uv4a_hub() ? UV4AHxxx :
- * UV4Hxxx))
+ * use a conditional operator to define the value at runtime. Any
+ * that are not defined are blank.
+ * (UV4A variations only generated if different from uv4)
+ * #define UVHxxx (
+ * is_uv(UV5) ? UV5Hxxx value :
+ * is_uv(UV4A) ? UV4AHxxx value :
+ * is_uv(UV4) ? UV4Hxxx value :
+ * is_uv(UV3) ? UV3Hxxx value :
+ * is_uv(UV2) ? UV2Hxxx value :
+ * <ucv> or <undef value>)
+ *
+ * Class UVX has UVs (2|3|4|4A).
+ * Class UVY has UVs (5).
*
* union uvh_xxx {
* unsigned long v;
* struct uvh_xxx_s { # Common fields only
* } s;
- * struct uv2h_xxx_s { # Full UV2 definition (*)
- * } s2;
- * struct uv3h_xxx_s { # Full UV3 definition (*)
- * } s3;
- * (NOTE: No struct uv4ah_xxx_s members exist)
+ * struct uv5h_xxx_s { # Full UV5 definition (*)
+ * } s5;
+ * struct uv4ah_xxx_s { # Full UV4A definition (*)
+ * } s4a;
* struct uv4h_xxx_s { # Full UV4 definition (*)
* } s4;
+ * struct uv3h_xxx_s { # Full UV3 definition (*)
+ * } s3;
+ * struct uv2h_xxx_s { # Full UV2 definition (*)
+ * } s2;
* };
* (* - if present and different than the common struct)
*
@@ -62,429 +64,499 @@
* if the contents is the same for all hubs, only the "s" structure is
* generated.
*
- * If the MMR exists on ONLY 1 type of hub, no generic definition is
- * generated:
- * #define UVnH_xxx <uvn address>
- * union uvnh_xxx {
- * unsigned long v;
- * struct uvh_int_cmpd_s {
- * } sn;
- * };
- *
- * (GEN Flags: mflags_opt= undefs=function UV234=UVXH)
+ * (GEN Flags: undefs=function)
*/
+ /* UV bit masks */
+#define UV2 (1 << 0)
+#define UV3 (1 << 1)
+#define UV4 (1 << 2)
+#define UV4A (1 << 3)
+#define UV5 (1 << 4)
+#define UVX (UV2|UV3|UV4)
+#define UVY (UV5)
+#define UV_ANY (~0)
+
+
+
+
#define UV_MMR_ENABLE (1UL << 63)
+#define UV1_HUB_PART_NUMBER 0x88a5
#define UV2_HUB_PART_NUMBER 0x8eb8
#define UV2_HUB_PART_NUMBER_X 0x1111
#define UV3_HUB_PART_NUMBER 0x9578
#define UV3_HUB_PART_NUMBER_X 0x4321
#define UV4_HUB_PART_NUMBER 0x99a1
+#define UV5_HUB_PART_NUMBER 0xa171
/* Error function to catch undefined references */
extern unsigned long uv_undefined(char *str);
/* ========================================================================= */
-/* UVH_BAU_DATA_BROADCAST */
-/* ========================================================================= */
-#define UVH_BAU_DATA_BROADCAST 0x61688UL
-
-#define UV2H_BAU_DATA_BROADCAST_32 0x440
-#define UV3H_BAU_DATA_BROADCAST_32 0x440
-#define UV4H_BAU_DATA_BROADCAST_32 0x360
-#define UVH_BAU_DATA_BROADCAST_32 ( \
- is_uv2_hub() ? UV2H_BAU_DATA_BROADCAST_32 : \
- is_uv3_hub() ? UV3H_BAU_DATA_BROADCAST_32 : \
- /*is_uv4_hub*/ UV4H_BAU_DATA_BROADCAST_32)
-
-#define UVH_BAU_DATA_BROADCAST_ENABLE_SHFT 0
-#define UVH_BAU_DATA_BROADCAST_ENABLE_MASK 0x0000000000000001UL
-
-
-union uvh_bau_data_broadcast_u {
- unsigned long v;
- struct uvh_bau_data_broadcast_s {
- unsigned long enable:1; /* RW */
- unsigned long rsvd_1_63:63;
- } s;
-};
-
-/* ========================================================================= */
-/* UVH_BAU_DATA_CONFIG */
-/* ========================================================================= */
-#define UVH_BAU_DATA_CONFIG 0x61680UL
-
-#define UV2H_BAU_DATA_CONFIG_32 0x438
-#define UV3H_BAU_DATA_CONFIG_32 0x438
-#define UV4H_BAU_DATA_CONFIG_32 0x358
-#define UVH_BAU_DATA_CONFIG_32 ( \
- is_uv2_hub() ? UV2H_BAU_DATA_CONFIG_32 : \
- is_uv3_hub() ? UV3H_BAU_DATA_CONFIG_32 : \
- /*is_uv4_hub*/ UV4H_BAU_DATA_CONFIG_32)
-
-#define UVH_BAU_DATA_CONFIG_VECTOR_SHFT 0
-#define UVH_BAU_DATA_CONFIG_DM_SHFT 8
-#define UVH_BAU_DATA_CONFIG_DESTMODE_SHFT 11
-#define UVH_BAU_DATA_CONFIG_STATUS_SHFT 12
-#define UVH_BAU_DATA_CONFIG_P_SHFT 13
-#define UVH_BAU_DATA_CONFIG_T_SHFT 15
-#define UVH_BAU_DATA_CONFIG_M_SHFT 16
-#define UVH_BAU_DATA_CONFIG_APIC_ID_SHFT 32
-#define UVH_BAU_DATA_CONFIG_VECTOR_MASK 0x00000000000000ffUL
-#define UVH_BAU_DATA_CONFIG_DM_MASK 0x0000000000000700UL
-#define UVH_BAU_DATA_CONFIG_DESTMODE_MASK 0x0000000000000800UL
-#define UVH_BAU_DATA_CONFIG_STATUS_MASK 0x0000000000001000UL
-#define UVH_BAU_DATA_CONFIG_P_MASK 0x0000000000002000UL
-#define UVH_BAU_DATA_CONFIG_T_MASK 0x0000000000008000UL
-#define UVH_BAU_DATA_CONFIG_M_MASK 0x0000000000010000UL
-#define UVH_BAU_DATA_CONFIG_APIC_ID_MASK 0xffffffff00000000UL
-
-
-union uvh_bau_data_config_u {
- unsigned long v;
- struct uvh_bau_data_config_s {
- unsigned long vector_:8; /* RW */
- unsigned long dm:3; /* RW */
- unsigned long destmode:1; /* RW */
- unsigned long status:1; /* RO */
- unsigned long p:1; /* RO */
- unsigned long rsvd_14:1;
- unsigned long t:1; /* RO */
- unsigned long m:1; /* RW */
- unsigned long rsvd_17_31:15;
- unsigned long apic_id:32; /* RW */
- } s;
-};
-
-/* ========================================================================= */
/* UVH_EVENT_OCCURRED0 */
/* ========================================================================= */
#define UVH_EVENT_OCCURRED0 0x70000UL
-#define UVH_EVENT_OCCURRED0_32 0x5e8
+/* UVH common defines*/
#define UVH_EVENT_OCCURRED0_LB_HCERR_SHFT 0
-#define UVH_EVENT_OCCURRED0_RH_AOERR0_SHFT 11
#define UVH_EVENT_OCCURRED0_LB_HCERR_MASK 0x0000000000000001UL
-#define UVH_EVENT_OCCURRED0_RH_AOERR0_MASK 0x0000000000000800UL
+/* UVXH common defines */
#define UVXH_EVENT_OCCURRED0_RH_HCERR_SHFT 2
-#define UVXH_EVENT_OCCURRED0_LH0_HCERR_SHFT 3
-#define UVXH_EVENT_OCCURRED0_LH1_HCERR_SHFT 4
-#define UVXH_EVENT_OCCURRED0_GR0_HCERR_SHFT 5
-#define UVXH_EVENT_OCCURRED0_GR1_HCERR_SHFT 6
-#define UVXH_EVENT_OCCURRED0_NI0_HCERR_SHFT 7
-#define UVXH_EVENT_OCCURRED0_NI1_HCERR_SHFT 8
-#define UVXH_EVENT_OCCURRED0_LB_AOERR0_SHFT 9
-#define UVXH_EVENT_OCCURRED0_LH0_AOERR0_SHFT 12
-#define UVXH_EVENT_OCCURRED0_LH1_AOERR0_SHFT 13
-#define UVXH_EVENT_OCCURRED0_GR0_AOERR0_SHFT 14
-#define UVXH_EVENT_OCCURRED0_GR1_AOERR0_SHFT 15
-#define UVXH_EVENT_OCCURRED0_XB_AOERR0_SHFT 16
#define UVXH_EVENT_OCCURRED0_RH_HCERR_MASK 0x0000000000000004UL
+#define UVXH_EVENT_OCCURRED0_LH0_HCERR_SHFT 3
#define UVXH_EVENT_OCCURRED0_LH0_HCERR_MASK 0x0000000000000008UL
+#define UVXH_EVENT_OCCURRED0_LH1_HCERR_SHFT 4
#define UVXH_EVENT_OCCURRED0_LH1_HCERR_MASK 0x0000000000000010UL
+#define UVXH_EVENT_OCCURRED0_GR0_HCERR_SHFT 5
#define UVXH_EVENT_OCCURRED0_GR0_HCERR_MASK 0x0000000000000020UL
+#define UVXH_EVENT_OCCURRED0_GR1_HCERR_SHFT 6
#define UVXH_EVENT_OCCURRED0_GR1_HCERR_MASK 0x0000000000000040UL
+#define UVXH_EVENT_OCCURRED0_NI0_HCERR_SHFT 7
#define UVXH_EVENT_OCCURRED0_NI0_HCERR_MASK 0x0000000000000080UL
+#define UVXH_EVENT_OCCURRED0_NI1_HCERR_SHFT 8
#define UVXH_EVENT_OCCURRED0_NI1_HCERR_MASK 0x0000000000000100UL
+#define UVXH_EVENT_OCCURRED0_LB_AOERR0_SHFT 9
#define UVXH_EVENT_OCCURRED0_LB_AOERR0_MASK 0x0000000000000200UL
+#define UVXH_EVENT_OCCURRED0_RH_AOERR0_SHFT 11
+#define UVXH_EVENT_OCCURRED0_RH_AOERR0_MASK 0x0000000000000800UL
+#define UVXH_EVENT_OCCURRED0_LH0_AOERR0_SHFT 12
#define UVXH_EVENT_OCCURRED0_LH0_AOERR0_MASK 0x0000000000001000UL
+#define UVXH_EVENT_OCCURRED0_LH1_AOERR0_SHFT 13
#define UVXH_EVENT_OCCURRED0_LH1_AOERR0_MASK 0x0000000000002000UL
+#define UVXH_EVENT_OCCURRED0_GR0_AOERR0_SHFT 14
#define UVXH_EVENT_OCCURRED0_GR0_AOERR0_MASK 0x0000000000004000UL
+#define UVXH_EVENT_OCCURRED0_GR1_AOERR0_SHFT 15
#define UVXH_EVENT_OCCURRED0_GR1_AOERR0_MASK 0x0000000000008000UL
+#define UVXH_EVENT_OCCURRED0_XB_AOERR0_SHFT 16
#define UVXH_EVENT_OCCURRED0_XB_AOERR0_MASK 0x0000000000010000UL
-#define UV2H_EVENT_OCCURRED0_QP_HCERR_SHFT 1
-#define UV2H_EVENT_OCCURRED0_QP_AOERR0_SHFT 10
-#define UV2H_EVENT_OCCURRED0_RT_AOERR0_SHFT 17
-#define UV2H_EVENT_OCCURRED0_NI0_AOERR0_SHFT 18
-#define UV2H_EVENT_OCCURRED0_NI1_AOERR0_SHFT 19
-#define UV2H_EVENT_OCCURRED0_LB_AOERR1_SHFT 20
-#define UV2H_EVENT_OCCURRED0_QP_AOERR1_SHFT 21
-#define UV2H_EVENT_OCCURRED0_RH_AOERR1_SHFT 22
-#define UV2H_EVENT_OCCURRED0_LH0_AOERR1_SHFT 23
-#define UV2H_EVENT_OCCURRED0_LH1_AOERR1_SHFT 24
-#define UV2H_EVENT_OCCURRED0_GR0_AOERR1_SHFT 25
-#define UV2H_EVENT_OCCURRED0_GR1_AOERR1_SHFT 26
-#define UV2H_EVENT_OCCURRED0_XB_AOERR1_SHFT 27
-#define UV2H_EVENT_OCCURRED0_RT_AOERR1_SHFT 28
-#define UV2H_EVENT_OCCURRED0_NI0_AOERR1_SHFT 29
-#define UV2H_EVENT_OCCURRED0_NI1_AOERR1_SHFT 30
-#define UV2H_EVENT_OCCURRED0_SYSTEM_SHUTDOWN_INT_SHFT 31
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_0_SHFT 32
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_1_SHFT 33
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_2_SHFT 34
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_3_SHFT 35
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_4_SHFT 36
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_5_SHFT 37
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_6_SHFT 38
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_7_SHFT 39
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_8_SHFT 40
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_9_SHFT 41
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_10_SHFT 42
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_11_SHFT 43
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_12_SHFT 44
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_13_SHFT 45
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_14_SHFT 46
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_15_SHFT 47
-#define UV2H_EVENT_OCCURRED0_L1_NMI_INT_SHFT 48
-#define UV2H_EVENT_OCCURRED0_STOP_CLOCK_SHFT 49
-#define UV2H_EVENT_OCCURRED0_ASIC_TO_L1_SHFT 50
-#define UV2H_EVENT_OCCURRED0_L1_TO_ASIC_SHFT 51
-#define UV2H_EVENT_OCCURRED0_LA_SEQ_TRIGGER_SHFT 52
-#define UV2H_EVENT_OCCURRED0_IPI_INT_SHFT 53
-#define UV2H_EVENT_OCCURRED0_EXTIO_INT0_SHFT 54
-#define UV2H_EVENT_OCCURRED0_EXTIO_INT1_SHFT 55
-#define UV2H_EVENT_OCCURRED0_EXTIO_INT2_SHFT 56
-#define UV2H_EVENT_OCCURRED0_EXTIO_INT3_SHFT 57
-#define UV2H_EVENT_OCCURRED0_PROFILE_INT_SHFT 58
-#define UV2H_EVENT_OCCURRED0_QP_HCERR_MASK 0x0000000000000002UL
-#define UV2H_EVENT_OCCURRED0_QP_AOERR0_MASK 0x0000000000000400UL
-#define UV2H_EVENT_OCCURRED0_RT_AOERR0_MASK 0x0000000000020000UL
-#define UV2H_EVENT_OCCURRED0_NI0_AOERR0_MASK 0x0000000000040000UL
-#define UV2H_EVENT_OCCURRED0_NI1_AOERR0_MASK 0x0000000000080000UL
-#define UV2H_EVENT_OCCURRED0_LB_AOERR1_MASK 0x0000000000100000UL
-#define UV2H_EVENT_OCCURRED0_QP_AOERR1_MASK 0x0000000000200000UL
-#define UV2H_EVENT_OCCURRED0_RH_AOERR1_MASK 0x0000000000400000UL
-#define UV2H_EVENT_OCCURRED0_LH0_AOERR1_MASK 0x0000000000800000UL
-#define UV2H_EVENT_OCCURRED0_LH1_AOERR1_MASK 0x0000000001000000UL
-#define UV2H_EVENT_OCCURRED0_GR0_AOERR1_MASK 0x0000000002000000UL
-#define UV2H_EVENT_OCCURRED0_GR1_AOERR1_MASK 0x0000000004000000UL
-#define UV2H_EVENT_OCCURRED0_XB_AOERR1_MASK 0x0000000008000000UL
-#define UV2H_EVENT_OCCURRED0_RT_AOERR1_MASK 0x0000000010000000UL
-#define UV2H_EVENT_OCCURRED0_NI0_AOERR1_MASK 0x0000000020000000UL
-#define UV2H_EVENT_OCCURRED0_NI1_AOERR1_MASK 0x0000000040000000UL
-#define UV2H_EVENT_OCCURRED0_SYSTEM_SHUTDOWN_INT_MASK 0x0000000080000000UL
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_0_MASK 0x0000000100000000UL
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_1_MASK 0x0000000200000000UL
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_2_MASK 0x0000000400000000UL
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_3_MASK 0x0000000800000000UL
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_4_MASK 0x0000001000000000UL
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_5_MASK 0x0000002000000000UL
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_6_MASK 0x0000004000000000UL
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_7_MASK 0x0000008000000000UL
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_8_MASK 0x0000010000000000UL
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_9_MASK 0x0000020000000000UL
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_10_MASK 0x0000040000000000UL
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_11_MASK 0x0000080000000000UL
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_12_MASK 0x0000100000000000UL
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_13_MASK 0x0000200000000000UL
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_14_MASK 0x0000400000000000UL
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_15_MASK 0x0000800000000000UL
-#define UV2H_EVENT_OCCURRED0_L1_NMI_INT_MASK 0x0001000000000000UL
-#define UV2H_EVENT_OCCURRED0_STOP_CLOCK_MASK 0x0002000000000000UL
-#define UV2H_EVENT_OCCURRED0_ASIC_TO_L1_MASK 0x0004000000000000UL
-#define UV2H_EVENT_OCCURRED0_L1_TO_ASIC_MASK 0x0008000000000000UL
-#define UV2H_EVENT_OCCURRED0_LA_SEQ_TRIGGER_MASK 0x0010000000000000UL
-#define UV2H_EVENT_OCCURRED0_IPI_INT_MASK 0x0020000000000000UL
-#define UV2H_EVENT_OCCURRED0_EXTIO_INT0_MASK 0x0040000000000000UL
-#define UV2H_EVENT_OCCURRED0_EXTIO_INT1_MASK 0x0080000000000000UL
-#define UV2H_EVENT_OCCURRED0_EXTIO_INT2_MASK 0x0100000000000000UL
-#define UV2H_EVENT_OCCURRED0_EXTIO_INT3_MASK 0x0200000000000000UL
-#define UV2H_EVENT_OCCURRED0_PROFILE_INT_MASK 0x0400000000000000UL
-
-#define UV3H_EVENT_OCCURRED0_QP_HCERR_SHFT 1
-#define UV3H_EVENT_OCCURRED0_QP_AOERR0_SHFT 10
-#define UV3H_EVENT_OCCURRED0_RT_AOERR0_SHFT 17
-#define UV3H_EVENT_OCCURRED0_NI0_AOERR0_SHFT 18
-#define UV3H_EVENT_OCCURRED0_NI1_AOERR0_SHFT 19
-#define UV3H_EVENT_OCCURRED0_LB_AOERR1_SHFT 20
-#define UV3H_EVENT_OCCURRED0_QP_AOERR1_SHFT 21
-#define UV3H_EVENT_OCCURRED0_RH_AOERR1_SHFT 22
-#define UV3H_EVENT_OCCURRED0_LH0_AOERR1_SHFT 23
-#define UV3H_EVENT_OCCURRED0_LH1_AOERR1_SHFT 24
-#define UV3H_EVENT_OCCURRED0_GR0_AOERR1_SHFT 25
-#define UV3H_EVENT_OCCURRED0_GR1_AOERR1_SHFT 26
-#define UV3H_EVENT_OCCURRED0_XB_AOERR1_SHFT 27
-#define UV3H_EVENT_OCCURRED0_RT_AOERR1_SHFT 28
-#define UV3H_EVENT_OCCURRED0_NI0_AOERR1_SHFT 29
-#define UV3H_EVENT_OCCURRED0_NI1_AOERR1_SHFT 30
-#define UV3H_EVENT_OCCURRED0_SYSTEM_SHUTDOWN_INT_SHFT 31
-#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_0_SHFT 32
-#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_1_SHFT 33
-#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_2_SHFT 34
-#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_3_SHFT 35
-#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_4_SHFT 36
-#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_5_SHFT 37
-#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_6_SHFT 38
-#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_7_SHFT 39
-#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_8_SHFT 40
-#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_9_SHFT 41
-#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_10_SHFT 42
-#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_11_SHFT 43
-#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_12_SHFT 44
-#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_13_SHFT 45
-#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_14_SHFT 46
-#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_15_SHFT 47
-#define UV3H_EVENT_OCCURRED0_L1_NMI_INT_SHFT 48
-#define UV3H_EVENT_OCCURRED0_STOP_CLOCK_SHFT 49
-#define UV3H_EVENT_OCCURRED0_ASIC_TO_L1_SHFT 50
-#define UV3H_EVENT_OCCURRED0_L1_TO_ASIC_SHFT 51
-#define UV3H_EVENT_OCCURRED0_LA_SEQ_TRIGGER_SHFT 52
-#define UV3H_EVENT_OCCURRED0_IPI_INT_SHFT 53
-#define UV3H_EVENT_OCCURRED0_EXTIO_INT0_SHFT 54
-#define UV3H_EVENT_OCCURRED0_EXTIO_INT1_SHFT 55
-#define UV3H_EVENT_OCCURRED0_EXTIO_INT2_SHFT 56
-#define UV3H_EVENT_OCCURRED0_EXTIO_INT3_SHFT 57
-#define UV3H_EVENT_OCCURRED0_PROFILE_INT_SHFT 58
-#define UV3H_EVENT_OCCURRED0_QP_HCERR_MASK 0x0000000000000002UL
-#define UV3H_EVENT_OCCURRED0_QP_AOERR0_MASK 0x0000000000000400UL
-#define UV3H_EVENT_OCCURRED0_RT_AOERR0_MASK 0x0000000000020000UL
-#define UV3H_EVENT_OCCURRED0_NI0_AOERR0_MASK 0x0000000000040000UL
-#define UV3H_EVENT_OCCURRED0_NI1_AOERR0_MASK 0x0000000000080000UL
-#define UV3H_EVENT_OCCURRED0_LB_AOERR1_MASK 0x0000000000100000UL
-#define UV3H_EVENT_OCCURRED0_QP_AOERR1_MASK 0x0000000000200000UL
-#define UV3H_EVENT_OCCURRED0_RH_AOERR1_MASK 0x0000000000400000UL
-#define UV3H_EVENT_OCCURRED0_LH0_AOERR1_MASK 0x0000000000800000UL
-#define UV3H_EVENT_OCCURRED0_LH1_AOERR1_MASK 0x0000000001000000UL
-#define UV3H_EVENT_OCCURRED0_GR0_AOERR1_MASK 0x0000000002000000UL
-#define UV3H_EVENT_OCCURRED0_GR1_AOERR1_MASK 0x0000000004000000UL
-#define UV3H_EVENT_OCCURRED0_XB_AOERR1_MASK 0x0000000008000000UL
-#define UV3H_EVENT_OCCURRED0_RT_AOERR1_MASK 0x0000000010000000UL
-#define UV3H_EVENT_OCCURRED0_NI0_AOERR1_MASK 0x0000000020000000UL
-#define UV3H_EVENT_OCCURRED0_NI1_AOERR1_MASK 0x0000000040000000UL
-#define UV3H_EVENT_OCCURRED0_SYSTEM_SHUTDOWN_INT_MASK 0x0000000080000000UL
-#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_0_MASK 0x0000000100000000UL
-#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_1_MASK 0x0000000200000000UL
-#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_2_MASK 0x0000000400000000UL
-#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_3_MASK 0x0000000800000000UL
-#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_4_MASK 0x0000001000000000UL
-#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_5_MASK 0x0000002000000000UL
-#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_6_MASK 0x0000004000000000UL
-#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_7_MASK 0x0000008000000000UL
-#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_8_MASK 0x0000010000000000UL
-#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_9_MASK 0x0000020000000000UL
-#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_10_MASK 0x0000040000000000UL
-#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_11_MASK 0x0000080000000000UL
-#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_12_MASK 0x0000100000000000UL
-#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_13_MASK 0x0000200000000000UL
-#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_14_MASK 0x0000400000000000UL
-#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_15_MASK 0x0000800000000000UL
-#define UV3H_EVENT_OCCURRED0_L1_NMI_INT_MASK 0x0001000000000000UL
-#define UV3H_EVENT_OCCURRED0_STOP_CLOCK_MASK 0x0002000000000000UL
-#define UV3H_EVENT_OCCURRED0_ASIC_TO_L1_MASK 0x0004000000000000UL
-#define UV3H_EVENT_OCCURRED0_L1_TO_ASIC_MASK 0x0008000000000000UL
-#define UV3H_EVENT_OCCURRED0_LA_SEQ_TRIGGER_MASK 0x0010000000000000UL
-#define UV3H_EVENT_OCCURRED0_IPI_INT_MASK 0x0020000000000000UL
-#define UV3H_EVENT_OCCURRED0_EXTIO_INT0_MASK 0x0040000000000000UL
-#define UV3H_EVENT_OCCURRED0_EXTIO_INT1_MASK 0x0080000000000000UL
-#define UV3H_EVENT_OCCURRED0_EXTIO_INT2_MASK 0x0100000000000000UL
-#define UV3H_EVENT_OCCURRED0_EXTIO_INT3_MASK 0x0200000000000000UL
-#define UV3H_EVENT_OCCURRED0_PROFILE_INT_MASK 0x0400000000000000UL
-
+/* UVYH common defines */
+#define UVYH_EVENT_OCCURRED0_KT_HCERR_SHFT 1
+#define UVYH_EVENT_OCCURRED0_KT_HCERR_MASK 0x0000000000000002UL
+#define UVYH_EVENT_OCCURRED0_RH0_HCERR_SHFT 2
+#define UVYH_EVENT_OCCURRED0_RH0_HCERR_MASK 0x0000000000000004UL
+#define UVYH_EVENT_OCCURRED0_RH1_HCERR_SHFT 3
+#define UVYH_EVENT_OCCURRED0_RH1_HCERR_MASK 0x0000000000000008UL
+#define UVYH_EVENT_OCCURRED0_LH0_HCERR_SHFT 4
+#define UVYH_EVENT_OCCURRED0_LH0_HCERR_MASK 0x0000000000000010UL
+#define UVYH_EVENT_OCCURRED0_LH1_HCERR_SHFT 5
+#define UVYH_EVENT_OCCURRED0_LH1_HCERR_MASK 0x0000000000000020UL
+#define UVYH_EVENT_OCCURRED0_LH2_HCERR_SHFT 6
+#define UVYH_EVENT_OCCURRED0_LH2_HCERR_MASK 0x0000000000000040UL
+#define UVYH_EVENT_OCCURRED0_LH3_HCERR_SHFT 7
+#define UVYH_EVENT_OCCURRED0_LH3_HCERR_MASK 0x0000000000000080UL
+#define UVYH_EVENT_OCCURRED0_XB_HCERR_SHFT 8
+#define UVYH_EVENT_OCCURRED0_XB_HCERR_MASK 0x0000000000000100UL
+#define UVYH_EVENT_OCCURRED0_RDM_HCERR_SHFT 9
+#define UVYH_EVENT_OCCURRED0_RDM_HCERR_MASK 0x0000000000000200UL
+#define UVYH_EVENT_OCCURRED0_NI0_HCERR_SHFT 10
+#define UVYH_EVENT_OCCURRED0_NI0_HCERR_MASK 0x0000000000000400UL
+#define UVYH_EVENT_OCCURRED0_NI1_HCERR_SHFT 11
+#define UVYH_EVENT_OCCURRED0_NI1_HCERR_MASK 0x0000000000000800UL
+#define UVYH_EVENT_OCCURRED0_LB_AOERR0_SHFT 12
+#define UVYH_EVENT_OCCURRED0_LB_AOERR0_MASK 0x0000000000001000UL
+#define UVYH_EVENT_OCCURRED0_KT_AOERR0_SHFT 13
+#define UVYH_EVENT_OCCURRED0_KT_AOERR0_MASK 0x0000000000002000UL
+#define UVYH_EVENT_OCCURRED0_RH0_AOERR0_SHFT 14
+#define UVYH_EVENT_OCCURRED0_RH0_AOERR0_MASK 0x0000000000004000UL
+#define UVYH_EVENT_OCCURRED0_RH1_AOERR0_SHFT 15
+#define UVYH_EVENT_OCCURRED0_RH1_AOERR0_MASK 0x0000000000008000UL
+#define UVYH_EVENT_OCCURRED0_LH0_AOERR0_SHFT 16
+#define UVYH_EVENT_OCCURRED0_LH0_AOERR0_MASK 0x0000000000010000UL
+#define UVYH_EVENT_OCCURRED0_LH1_AOERR0_SHFT 17
+#define UVYH_EVENT_OCCURRED0_LH1_AOERR0_MASK 0x0000000000020000UL
+#define UVYH_EVENT_OCCURRED0_LH2_AOERR0_SHFT 18
+#define UVYH_EVENT_OCCURRED0_LH2_AOERR0_MASK 0x0000000000040000UL
+#define UVYH_EVENT_OCCURRED0_LH3_AOERR0_SHFT 19
+#define UVYH_EVENT_OCCURRED0_LH3_AOERR0_MASK 0x0000000000080000UL
+#define UVYH_EVENT_OCCURRED0_XB_AOERR0_SHFT 20
+#define UVYH_EVENT_OCCURRED0_XB_AOERR0_MASK 0x0000000000100000UL
+#define UVYH_EVENT_OCCURRED0_RDM_AOERR0_SHFT 21
+#define UVYH_EVENT_OCCURRED0_RDM_AOERR0_MASK 0x0000000000200000UL
+#define UVYH_EVENT_OCCURRED0_RT0_AOERR0_SHFT 22
+#define UVYH_EVENT_OCCURRED0_RT0_AOERR0_MASK 0x0000000000400000UL
+#define UVYH_EVENT_OCCURRED0_RT1_AOERR0_SHFT 23
+#define UVYH_EVENT_OCCURRED0_RT1_AOERR0_MASK 0x0000000000800000UL
+#define UVYH_EVENT_OCCURRED0_NI0_AOERR0_SHFT 24
+#define UVYH_EVENT_OCCURRED0_NI0_AOERR0_MASK 0x0000000001000000UL
+#define UVYH_EVENT_OCCURRED0_NI1_AOERR0_SHFT 25
+#define UVYH_EVENT_OCCURRED0_NI1_AOERR0_MASK 0x0000000002000000UL
+#define UVYH_EVENT_OCCURRED0_LB_AOERR1_SHFT 26
+#define UVYH_EVENT_OCCURRED0_LB_AOERR1_MASK 0x0000000004000000UL
+#define UVYH_EVENT_OCCURRED0_KT_AOERR1_SHFT 27
+#define UVYH_EVENT_OCCURRED0_KT_AOERR1_MASK 0x0000000008000000UL
+#define UVYH_EVENT_OCCURRED0_RH0_AOERR1_SHFT 28
+#define UVYH_EVENT_OCCURRED0_RH0_AOERR1_MASK 0x0000000010000000UL
+#define UVYH_EVENT_OCCURRED0_RH1_AOERR1_SHFT 29
+#define UVYH_EVENT_OCCURRED0_RH1_AOERR1_MASK 0x0000000020000000UL
+#define UVYH_EVENT_OCCURRED0_LH0_AOERR1_SHFT 30
+#define UVYH_EVENT_OCCURRED0_LH0_AOERR1_MASK 0x0000000040000000UL
+#define UVYH_EVENT_OCCURRED0_LH1_AOERR1_SHFT 31
+#define UVYH_EVENT_OCCURRED0_LH1_AOERR1_MASK 0x0000000080000000UL
+#define UVYH_EVENT_OCCURRED0_LH2_AOERR1_SHFT 32
+#define UVYH_EVENT_OCCURRED0_LH2_AOERR1_MASK 0x0000000100000000UL
+#define UVYH_EVENT_OCCURRED0_LH3_AOERR1_SHFT 33
+#define UVYH_EVENT_OCCURRED0_LH3_AOERR1_MASK 0x0000000200000000UL
+#define UVYH_EVENT_OCCURRED0_XB_AOERR1_SHFT 34
+#define UVYH_EVENT_OCCURRED0_XB_AOERR1_MASK 0x0000000400000000UL
+#define UVYH_EVENT_OCCURRED0_RDM_AOERR1_SHFT 35
+#define UVYH_EVENT_OCCURRED0_RDM_AOERR1_MASK 0x0000000800000000UL
+#define UVYH_EVENT_OCCURRED0_RT0_AOERR1_SHFT 36
+#define UVYH_EVENT_OCCURRED0_RT0_AOERR1_MASK 0x0000001000000000UL
+#define UVYH_EVENT_OCCURRED0_RT1_AOERR1_SHFT 37
+#define UVYH_EVENT_OCCURRED0_RT1_AOERR1_MASK 0x0000002000000000UL
+#define UVYH_EVENT_OCCURRED0_NI0_AOERR1_SHFT 38
+#define UVYH_EVENT_OCCURRED0_NI0_AOERR1_MASK 0x0000004000000000UL
+#define UVYH_EVENT_OCCURRED0_NI1_AOERR1_SHFT 39
+#define UVYH_EVENT_OCCURRED0_NI1_AOERR1_MASK 0x0000008000000000UL
+#define UVYH_EVENT_OCCURRED0_SYSTEM_SHUTDOWN_INT_SHFT 40
+#define UVYH_EVENT_OCCURRED0_SYSTEM_SHUTDOWN_INT_MASK 0x0000010000000000UL
+#define UVYH_EVENT_OCCURRED0_LB_IRQ_INT_0_SHFT 41
+#define UVYH_EVENT_OCCURRED0_LB_IRQ_INT_0_MASK 0x0000020000000000UL
+#define UVYH_EVENT_OCCURRED0_LB_IRQ_INT_1_SHFT 42
+#define UVYH_EVENT_OCCURRED0_LB_IRQ_INT_1_MASK 0x0000040000000000UL
+#define UVYH_EVENT_OCCURRED0_LB_IRQ_INT_2_SHFT 43
+#define UVYH_EVENT_OCCURRED0_LB_IRQ_INT_2_MASK 0x0000080000000000UL
+#define UVYH_EVENT_OCCURRED0_LB_IRQ_INT_3_SHFT 44
+#define UVYH_EVENT_OCCURRED0_LB_IRQ_INT_3_MASK 0x0000100000000000UL
+#define UVYH_EVENT_OCCURRED0_LB_IRQ_INT_4_SHFT 45
+#define UVYH_EVENT_OCCURRED0_LB_IRQ_INT_4_MASK 0x0000200000000000UL
+#define UVYH_EVENT_OCCURRED0_LB_IRQ_INT_5_SHFT 46
+#define UVYH_EVENT_OCCURRED0_LB_IRQ_INT_5_MASK 0x0000400000000000UL
+#define UVYH_EVENT_OCCURRED0_LB_IRQ_INT_6_SHFT 47
+#define UVYH_EVENT_OCCURRED0_LB_IRQ_INT_6_MASK 0x0000800000000000UL
+#define UVYH_EVENT_OCCURRED0_LB_IRQ_INT_7_SHFT 48
+#define UVYH_EVENT_OCCURRED0_LB_IRQ_INT_7_MASK 0x0001000000000000UL
+#define UVYH_EVENT_OCCURRED0_LB_IRQ_INT_8_SHFT 49
+#define UVYH_EVENT_OCCURRED0_LB_IRQ_INT_8_MASK 0x0002000000000000UL
+#define UVYH_EVENT_OCCURRED0_LB_IRQ_INT_9_SHFT 50
+#define UVYH_EVENT_OCCURRED0_LB_IRQ_INT_9_MASK 0x0004000000000000UL
+#define UVYH_EVENT_OCCURRED0_LB_IRQ_INT_10_SHFT 51
+#define UVYH_EVENT_OCCURRED0_LB_IRQ_INT_10_MASK 0x0008000000000000UL
+#define UVYH_EVENT_OCCURRED0_LB_IRQ_INT_11_SHFT 52
+#define UVYH_EVENT_OCCURRED0_LB_IRQ_INT_11_MASK 0x0010000000000000UL
+#define UVYH_EVENT_OCCURRED0_LB_IRQ_INT_12_SHFT 53
+#define UVYH_EVENT_OCCURRED0_LB_IRQ_INT_12_MASK 0x0020000000000000UL
+#define UVYH_EVENT_OCCURRED0_LB_IRQ_INT_13_SHFT 54
+#define UVYH_EVENT_OCCURRED0_LB_IRQ_INT_13_MASK 0x0040000000000000UL
+#define UVYH_EVENT_OCCURRED0_LB_IRQ_INT_14_SHFT 55
+#define UVYH_EVENT_OCCURRED0_LB_IRQ_INT_14_MASK 0x0080000000000000UL
+#define UVYH_EVENT_OCCURRED0_LB_IRQ_INT_15_SHFT 56
+#define UVYH_EVENT_OCCURRED0_LB_IRQ_INT_15_MASK 0x0100000000000000UL
+#define UVYH_EVENT_OCCURRED0_L1_NMI_INT_SHFT 57
+#define UVYH_EVENT_OCCURRED0_L1_NMI_INT_MASK 0x0200000000000000UL
+#define UVYH_EVENT_OCCURRED0_STOP_CLOCK_SHFT 58
+#define UVYH_EVENT_OCCURRED0_STOP_CLOCK_MASK 0x0400000000000000UL
+#define UVYH_EVENT_OCCURRED0_ASIC_TO_L1_SHFT 59
+#define UVYH_EVENT_OCCURRED0_ASIC_TO_L1_MASK 0x0800000000000000UL
+#define UVYH_EVENT_OCCURRED0_L1_TO_ASIC_SHFT 60
+#define UVYH_EVENT_OCCURRED0_L1_TO_ASIC_MASK 0x1000000000000000UL
+#define UVYH_EVENT_OCCURRED0_LA_SEQ_TRIGGER_SHFT 61
+#define UVYH_EVENT_OCCURRED0_LA_SEQ_TRIGGER_MASK 0x2000000000000000UL
+
+/* UV4 unique defines */
#define UV4H_EVENT_OCCURRED0_KT_HCERR_SHFT 1
-#define UV4H_EVENT_OCCURRED0_KT_AOERR0_SHFT 10
-#define UV4H_EVENT_OCCURRED0_RTQ0_AOERR0_SHFT 17
-#define UV4H_EVENT_OCCURRED0_RTQ1_AOERR0_SHFT 18
-#define UV4H_EVENT_OCCURRED0_RTQ2_AOERR0_SHFT 19
-#define UV4H_EVENT_OCCURRED0_RTQ3_AOERR0_SHFT 20
-#define UV4H_EVENT_OCCURRED0_NI0_AOERR0_SHFT 21
-#define UV4H_EVENT_OCCURRED0_NI1_AOERR0_SHFT 22
-#define UV4H_EVENT_OCCURRED0_LB_AOERR1_SHFT 23
-#define UV4H_EVENT_OCCURRED0_KT_AOERR1_SHFT 24
-#define UV4H_EVENT_OCCURRED0_RH_AOERR1_SHFT 25
-#define UV4H_EVENT_OCCURRED0_LH0_AOERR1_SHFT 26
-#define UV4H_EVENT_OCCURRED0_LH1_AOERR1_SHFT 27
-#define UV4H_EVENT_OCCURRED0_GR0_AOERR1_SHFT 28
-#define UV4H_EVENT_OCCURRED0_GR1_AOERR1_SHFT 29
-#define UV4H_EVENT_OCCURRED0_XB_AOERR1_SHFT 30
-#define UV4H_EVENT_OCCURRED0_RTQ0_AOERR1_SHFT 31
-#define UV4H_EVENT_OCCURRED0_RTQ1_AOERR1_SHFT 32
-#define UV4H_EVENT_OCCURRED0_RTQ2_AOERR1_SHFT 33
-#define UV4H_EVENT_OCCURRED0_RTQ3_AOERR1_SHFT 34
-#define UV4H_EVENT_OCCURRED0_NI0_AOERR1_SHFT 35
-#define UV4H_EVENT_OCCURRED0_NI1_AOERR1_SHFT 36
-#define UV4H_EVENT_OCCURRED0_SYSTEM_SHUTDOWN_INT_SHFT 37
-#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_0_SHFT 38
-#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_1_SHFT 39
-#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_2_SHFT 40
-#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_3_SHFT 41
-#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_4_SHFT 42
-#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_5_SHFT 43
-#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_6_SHFT 44
-#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_7_SHFT 45
-#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_8_SHFT 46
-#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_9_SHFT 47
-#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_10_SHFT 48
-#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_11_SHFT 49
-#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_12_SHFT 50
-#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_13_SHFT 51
-#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_14_SHFT 52
-#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_15_SHFT 53
-#define UV4H_EVENT_OCCURRED0_L1_NMI_INT_SHFT 54
-#define UV4H_EVENT_OCCURRED0_STOP_CLOCK_SHFT 55
-#define UV4H_EVENT_OCCURRED0_ASIC_TO_L1_SHFT 56
-#define UV4H_EVENT_OCCURRED0_L1_TO_ASIC_SHFT 57
-#define UV4H_EVENT_OCCURRED0_LA_SEQ_TRIGGER_SHFT 58
-#define UV4H_EVENT_OCCURRED0_IPI_INT_SHFT 59
-#define UV4H_EVENT_OCCURRED0_EXTIO_INT0_SHFT 60
-#define UV4H_EVENT_OCCURRED0_EXTIO_INT1_SHFT 61
-#define UV4H_EVENT_OCCURRED0_EXTIO_INT2_SHFT 62
-#define UV4H_EVENT_OCCURRED0_EXTIO_INT3_SHFT 63
#define UV4H_EVENT_OCCURRED0_KT_HCERR_MASK 0x0000000000000002UL
+#define UV4H_EVENT_OCCURRED0_KT_AOERR0_SHFT 10
#define UV4H_EVENT_OCCURRED0_KT_AOERR0_MASK 0x0000000000000400UL
+#define UV4H_EVENT_OCCURRED0_RTQ0_AOERR0_SHFT 17
#define UV4H_EVENT_OCCURRED0_RTQ0_AOERR0_MASK 0x0000000000020000UL
+#define UV4H_EVENT_OCCURRED0_RTQ1_AOERR0_SHFT 18
#define UV4H_EVENT_OCCURRED0_RTQ1_AOERR0_MASK 0x0000000000040000UL
+#define UV4H_EVENT_OCCURRED0_RTQ2_AOERR0_SHFT 19
#define UV4H_EVENT_OCCURRED0_RTQ2_AOERR0_MASK 0x0000000000080000UL
+#define UV4H_EVENT_OCCURRED0_RTQ3_AOERR0_SHFT 20
#define UV4H_EVENT_OCCURRED0_RTQ3_AOERR0_MASK 0x0000000000100000UL
+#define UV4H_EVENT_OCCURRED0_NI0_AOERR0_SHFT 21
#define UV4H_EVENT_OCCURRED0_NI0_AOERR0_MASK 0x0000000000200000UL
+#define UV4H_EVENT_OCCURRED0_NI1_AOERR0_SHFT 22
#define UV4H_EVENT_OCCURRED0_NI1_AOERR0_MASK 0x0000000000400000UL
+#define UV4H_EVENT_OCCURRED0_LB_AOERR1_SHFT 23
#define UV4H_EVENT_OCCURRED0_LB_AOERR1_MASK 0x0000000000800000UL
+#define UV4H_EVENT_OCCURRED0_KT_AOERR1_SHFT 24
#define UV4H_EVENT_OCCURRED0_KT_AOERR1_MASK 0x0000000001000000UL
+#define UV4H_EVENT_OCCURRED0_RH_AOERR1_SHFT 25
#define UV4H_EVENT_OCCURRED0_RH_AOERR1_MASK 0x0000000002000000UL
+#define UV4H_EVENT_OCCURRED0_LH0_AOERR1_SHFT 26
#define UV4H_EVENT_OCCURRED0_LH0_AOERR1_MASK 0x0000000004000000UL
+#define UV4H_EVENT_OCCURRED0_LH1_AOERR1_SHFT 27
#define UV4H_EVENT_OCCURRED0_LH1_AOERR1_MASK 0x0000000008000000UL
+#define UV4H_EVENT_OCCURRED0_GR0_AOERR1_SHFT 28
#define UV4H_EVENT_OCCURRED0_GR0_AOERR1_MASK 0x0000000010000000UL
+#define UV4H_EVENT_OCCURRED0_GR1_AOERR1_SHFT 29
#define UV4H_EVENT_OCCURRED0_GR1_AOERR1_MASK 0x0000000020000000UL
+#define UV4H_EVENT_OCCURRED0_XB_AOERR1_SHFT 30
#define UV4H_EVENT_OCCURRED0_XB_AOERR1_MASK 0x0000000040000000UL
+#define UV4H_EVENT_OCCURRED0_RTQ0_AOERR1_SHFT 31
#define UV4H_EVENT_OCCURRED0_RTQ0_AOERR1_MASK 0x0000000080000000UL
+#define UV4H_EVENT_OCCURRED0_RTQ1_AOERR1_SHFT 32
#define UV4H_EVENT_OCCURRED0_RTQ1_AOERR1_MASK 0x0000000100000000UL
+#define UV4H_EVENT_OCCURRED0_RTQ2_AOERR1_SHFT 33
#define UV4H_EVENT_OCCURRED0_RTQ2_AOERR1_MASK 0x0000000200000000UL
+#define UV4H_EVENT_OCCURRED0_RTQ3_AOERR1_SHFT 34
#define UV4H_EVENT_OCCURRED0_RTQ3_AOERR1_MASK 0x0000000400000000UL
+#define UV4H_EVENT_OCCURRED0_NI0_AOERR1_SHFT 35
#define UV4H_EVENT_OCCURRED0_NI0_AOERR1_MASK 0x0000000800000000UL
+#define UV4H_EVENT_OCCURRED0_NI1_AOERR1_SHFT 36
#define UV4H_EVENT_OCCURRED0_NI1_AOERR1_MASK 0x0000001000000000UL
+#define UV4H_EVENT_OCCURRED0_SYSTEM_SHUTDOWN_INT_SHFT 37
#define UV4H_EVENT_OCCURRED0_SYSTEM_SHUTDOWN_INT_MASK 0x0000002000000000UL
+#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_0_SHFT 38
#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_0_MASK 0x0000004000000000UL
+#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_1_SHFT 39
#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_1_MASK 0x0000008000000000UL
+#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_2_SHFT 40
#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_2_MASK 0x0000010000000000UL
+#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_3_SHFT 41
#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_3_MASK 0x0000020000000000UL
+#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_4_SHFT 42
#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_4_MASK 0x0000040000000000UL
+#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_5_SHFT 43
#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_5_MASK 0x0000080000000000UL
+#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_6_SHFT 44
#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_6_MASK 0x0000100000000000UL
+#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_7_SHFT 45
#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_7_MASK 0x0000200000000000UL
+#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_8_SHFT 46
#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_8_MASK 0x0000400000000000UL
+#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_9_SHFT 47
#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_9_MASK 0x0000800000000000UL
+#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_10_SHFT 48
#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_10_MASK 0x0001000000000000UL
+#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_11_SHFT 49
#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_11_MASK 0x0002000000000000UL
+#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_12_SHFT 50
#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_12_MASK 0x0004000000000000UL
+#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_13_SHFT 51
#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_13_MASK 0x0008000000000000UL
+#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_14_SHFT 52
#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_14_MASK 0x0010000000000000UL
+#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_15_SHFT 53
#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_15_MASK 0x0020000000000000UL
+#define UV4H_EVENT_OCCURRED0_L1_NMI_INT_SHFT 54
#define UV4H_EVENT_OCCURRED0_L1_NMI_INT_MASK 0x0040000000000000UL
+#define UV4H_EVENT_OCCURRED0_STOP_CLOCK_SHFT 55
#define UV4H_EVENT_OCCURRED0_STOP_CLOCK_MASK 0x0080000000000000UL
+#define UV4H_EVENT_OCCURRED0_ASIC_TO_L1_SHFT 56
#define UV4H_EVENT_OCCURRED0_ASIC_TO_L1_MASK 0x0100000000000000UL
+#define UV4H_EVENT_OCCURRED0_L1_TO_ASIC_SHFT 57
#define UV4H_EVENT_OCCURRED0_L1_TO_ASIC_MASK 0x0200000000000000UL
+#define UV4H_EVENT_OCCURRED0_LA_SEQ_TRIGGER_SHFT 58
#define UV4H_EVENT_OCCURRED0_LA_SEQ_TRIGGER_MASK 0x0400000000000000UL
+#define UV4H_EVENT_OCCURRED0_IPI_INT_SHFT 59
#define UV4H_EVENT_OCCURRED0_IPI_INT_MASK 0x0800000000000000UL
+#define UV4H_EVENT_OCCURRED0_EXTIO_INT0_SHFT 60
#define UV4H_EVENT_OCCURRED0_EXTIO_INT0_MASK 0x1000000000000000UL
+#define UV4H_EVENT_OCCURRED0_EXTIO_INT1_SHFT 61
#define UV4H_EVENT_OCCURRED0_EXTIO_INT1_MASK 0x2000000000000000UL
+#define UV4H_EVENT_OCCURRED0_EXTIO_INT2_SHFT 62
#define UV4H_EVENT_OCCURRED0_EXTIO_INT2_MASK 0x4000000000000000UL
+#define UV4H_EVENT_OCCURRED0_EXTIO_INT3_SHFT 63
#define UV4H_EVENT_OCCURRED0_EXTIO_INT3_MASK 0x8000000000000000UL
+/* UV3 unique defines */
+#define UV3H_EVENT_OCCURRED0_QP_HCERR_SHFT 1
+#define UV3H_EVENT_OCCURRED0_QP_HCERR_MASK 0x0000000000000002UL
+#define UV3H_EVENT_OCCURRED0_QP_AOERR0_SHFT 10
+#define UV3H_EVENT_OCCURRED0_QP_AOERR0_MASK 0x0000000000000400UL
+#define UV3H_EVENT_OCCURRED0_RT_AOERR0_SHFT 17
+#define UV3H_EVENT_OCCURRED0_RT_AOERR0_MASK 0x0000000000020000UL
+#define UV3H_EVENT_OCCURRED0_NI0_AOERR0_SHFT 18
+#define UV3H_EVENT_OCCURRED0_NI0_AOERR0_MASK 0x0000000000040000UL
+#define UV3H_EVENT_OCCURRED0_NI1_AOERR0_SHFT 19
+#define UV3H_EVENT_OCCURRED0_NI1_AOERR0_MASK 0x0000000000080000UL
+#define UV3H_EVENT_OCCURRED0_LB_AOERR1_SHFT 20
+#define UV3H_EVENT_OCCURRED0_LB_AOERR1_MASK 0x0000000000100000UL
+#define UV3H_EVENT_OCCURRED0_QP_AOERR1_SHFT 21
+#define UV3H_EVENT_OCCURRED0_QP_AOERR1_MASK 0x0000000000200000UL
+#define UV3H_EVENT_OCCURRED0_RH_AOERR1_SHFT 22
+#define UV3H_EVENT_OCCURRED0_RH_AOERR1_MASK 0x0000000000400000UL
+#define UV3H_EVENT_OCCURRED0_LH0_AOERR1_SHFT 23
+#define UV3H_EVENT_OCCURRED0_LH0_AOERR1_MASK 0x0000000000800000UL
+#define UV3H_EVENT_OCCURRED0_LH1_AOERR1_SHFT 24
+#define UV3H_EVENT_OCCURRED0_LH1_AOERR1_MASK 0x0000000001000000UL
+#define UV3H_EVENT_OCCURRED0_GR0_AOERR1_SHFT 25
+#define UV3H_EVENT_OCCURRED0_GR0_AOERR1_MASK 0x0000000002000000UL
+#define UV3H_EVENT_OCCURRED0_GR1_AOERR1_SHFT 26
+#define UV3H_EVENT_OCCURRED0_GR1_AOERR1_MASK 0x0000000004000000UL
+#define UV3H_EVENT_OCCURRED0_XB_AOERR1_SHFT 27
+#define UV3H_EVENT_OCCURRED0_XB_AOERR1_MASK 0x0000000008000000UL
+#define UV3H_EVENT_OCCURRED0_RT_AOERR1_SHFT 28
+#define UV3H_EVENT_OCCURRED0_RT_AOERR1_MASK 0x0000000010000000UL
+#define UV3H_EVENT_OCCURRED0_NI0_AOERR1_SHFT 29
+#define UV3H_EVENT_OCCURRED0_NI0_AOERR1_MASK 0x0000000020000000UL
+#define UV3H_EVENT_OCCURRED0_NI1_AOERR1_SHFT 30
+#define UV3H_EVENT_OCCURRED0_NI1_AOERR1_MASK 0x0000000040000000UL
+#define UV3H_EVENT_OCCURRED0_SYSTEM_SHUTDOWN_INT_SHFT 31
+#define UV3H_EVENT_OCCURRED0_SYSTEM_SHUTDOWN_INT_MASK 0x0000000080000000UL
+#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_0_SHFT 32
+#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_0_MASK 0x0000000100000000UL
+#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_1_SHFT 33
+#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_1_MASK 0x0000000200000000UL
+#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_2_SHFT 34
+#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_2_MASK 0x0000000400000000UL
+#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_3_SHFT 35
+#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_3_MASK 0x0000000800000000UL
+#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_4_SHFT 36
+#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_4_MASK 0x0000001000000000UL
+#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_5_SHFT 37
+#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_5_MASK 0x0000002000000000UL
+#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_6_SHFT 38
+#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_6_MASK 0x0000004000000000UL
+#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_7_SHFT 39
+#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_7_MASK 0x0000008000000000UL
+#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_8_SHFT 40
+#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_8_MASK 0x0000010000000000UL
+#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_9_SHFT 41
+#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_9_MASK 0x0000020000000000UL
+#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_10_SHFT 42
+#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_10_MASK 0x0000040000000000UL
+#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_11_SHFT 43
+#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_11_MASK 0x0000080000000000UL
+#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_12_SHFT 44
+#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_12_MASK 0x0000100000000000UL
+#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_13_SHFT 45
+#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_13_MASK 0x0000200000000000UL
+#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_14_SHFT 46
+#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_14_MASK 0x0000400000000000UL
+#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_15_SHFT 47
+#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_15_MASK 0x0000800000000000UL
+#define UV3H_EVENT_OCCURRED0_L1_NMI_INT_SHFT 48
+#define UV3H_EVENT_OCCURRED0_L1_NMI_INT_MASK 0x0001000000000000UL
+#define UV3H_EVENT_OCCURRED0_STOP_CLOCK_SHFT 49
+#define UV3H_EVENT_OCCURRED0_STOP_CLOCK_MASK 0x0002000000000000UL
+#define UV3H_EVENT_OCCURRED0_ASIC_TO_L1_SHFT 50
+#define UV3H_EVENT_OCCURRED0_ASIC_TO_L1_MASK 0x0004000000000000UL
+#define UV3H_EVENT_OCCURRED0_L1_TO_ASIC_SHFT 51
+#define UV3H_EVENT_OCCURRED0_L1_TO_ASIC_MASK 0x0008000000000000UL
+#define UV3H_EVENT_OCCURRED0_LA_SEQ_TRIGGER_SHFT 52
+#define UV3H_EVENT_OCCURRED0_LA_SEQ_TRIGGER_MASK 0x0010000000000000UL
+#define UV3H_EVENT_OCCURRED0_IPI_INT_SHFT 53
+#define UV3H_EVENT_OCCURRED0_IPI_INT_MASK 0x0020000000000000UL
+#define UV3H_EVENT_OCCURRED0_EXTIO_INT0_SHFT 54
+#define UV3H_EVENT_OCCURRED0_EXTIO_INT0_MASK 0x0040000000000000UL
+#define UV3H_EVENT_OCCURRED0_EXTIO_INT1_SHFT 55
+#define UV3H_EVENT_OCCURRED0_EXTIO_INT1_MASK 0x0080000000000000UL
+#define UV3H_EVENT_OCCURRED0_EXTIO_INT2_SHFT 56
+#define UV3H_EVENT_OCCURRED0_EXTIO_INT2_MASK 0x0100000000000000UL
+#define UV3H_EVENT_OCCURRED0_EXTIO_INT3_SHFT 57
+#define UV3H_EVENT_OCCURRED0_EXTIO_INT3_MASK 0x0200000000000000UL
+#define UV3H_EVENT_OCCURRED0_PROFILE_INT_SHFT 58
+#define UV3H_EVENT_OCCURRED0_PROFILE_INT_MASK 0x0400000000000000UL
+
+/* UV2 unique defines */
+#define UV2H_EVENT_OCCURRED0_QP_HCERR_SHFT 1
+#define UV2H_EVENT_OCCURRED0_QP_HCERR_MASK 0x0000000000000002UL
+#define UV2H_EVENT_OCCURRED0_QP_AOERR0_SHFT 10
+#define UV2H_EVENT_OCCURRED0_QP_AOERR0_MASK 0x0000000000000400UL
+#define UV2H_EVENT_OCCURRED0_RT_AOERR0_SHFT 17
+#define UV2H_EVENT_OCCURRED0_RT_AOERR0_MASK 0x0000000000020000UL
+#define UV2H_EVENT_OCCURRED0_NI0_AOERR0_SHFT 18
+#define UV2H_EVENT_OCCURRED0_NI0_AOERR0_MASK 0x0000000000040000UL
+#define UV2H_EVENT_OCCURRED0_NI1_AOERR0_SHFT 19
+#define UV2H_EVENT_OCCURRED0_NI1_AOERR0_MASK 0x0000000000080000UL
+#define UV2H_EVENT_OCCURRED0_LB_AOERR1_SHFT 20
+#define UV2H_EVENT_OCCURRED0_LB_AOERR1_MASK 0x0000000000100000UL
+#define UV2H_EVENT_OCCURRED0_QP_AOERR1_SHFT 21
+#define UV2H_EVENT_OCCURRED0_QP_AOERR1_MASK 0x0000000000200000UL
+#define UV2H_EVENT_OCCURRED0_RH_AOERR1_SHFT 22
+#define UV2H_EVENT_OCCURRED0_RH_AOERR1_MASK 0x0000000000400000UL
+#define UV2H_EVENT_OCCURRED0_LH0_AOERR1_SHFT 23
+#define UV2H_EVENT_OCCURRED0_LH0_AOERR1_MASK 0x0000000000800000UL
+#define UV2H_EVENT_OCCURRED0_LH1_AOERR1_SHFT 24
+#define UV2H_EVENT_OCCURRED0_LH1_AOERR1_MASK 0x0000000001000000UL
+#define UV2H_EVENT_OCCURRED0_GR0_AOERR1_SHFT 25
+#define UV2H_EVENT_OCCURRED0_GR0_AOERR1_MASK 0x0000000002000000UL
+#define UV2H_EVENT_OCCURRED0_GR1_AOERR1_SHFT 26
+#define UV2H_EVENT_OCCURRED0_GR1_AOERR1_MASK 0x0000000004000000UL
+#define UV2H_EVENT_OCCURRED0_XB_AOERR1_SHFT 27
+#define UV2H_EVENT_OCCURRED0_XB_AOERR1_MASK 0x0000000008000000UL
+#define UV2H_EVENT_OCCURRED0_RT_AOERR1_SHFT 28
+#define UV2H_EVENT_OCCURRED0_RT_AOERR1_MASK 0x0000000010000000UL
+#define UV2H_EVENT_OCCURRED0_NI0_AOERR1_SHFT 29
+#define UV2H_EVENT_OCCURRED0_NI0_AOERR1_MASK 0x0000000020000000UL
+#define UV2H_EVENT_OCCURRED0_NI1_AOERR1_SHFT 30
+#define UV2H_EVENT_OCCURRED0_NI1_AOERR1_MASK 0x0000000040000000UL
+#define UV2H_EVENT_OCCURRED0_SYSTEM_SHUTDOWN_INT_SHFT 31
+#define UV2H_EVENT_OCCURRED0_SYSTEM_SHUTDOWN_INT_MASK 0x0000000080000000UL
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_0_SHFT 32
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_0_MASK 0x0000000100000000UL
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_1_SHFT 33
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_1_MASK 0x0000000200000000UL
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_2_SHFT 34
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_2_MASK 0x0000000400000000UL
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_3_SHFT 35
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_3_MASK 0x0000000800000000UL
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_4_SHFT 36
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_4_MASK 0x0000001000000000UL
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_5_SHFT 37
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_5_MASK 0x0000002000000000UL
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_6_SHFT 38
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_6_MASK 0x0000004000000000UL
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_7_SHFT 39
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_7_MASK 0x0000008000000000UL
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_8_SHFT 40
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_8_MASK 0x0000010000000000UL
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_9_SHFT 41
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_9_MASK 0x0000020000000000UL
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_10_SHFT 42
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_10_MASK 0x0000040000000000UL
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_11_SHFT 43
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_11_MASK 0x0000080000000000UL
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_12_SHFT 44
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_12_MASK 0x0000100000000000UL
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_13_SHFT 45
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_13_MASK 0x0000200000000000UL
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_14_SHFT 46
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_14_MASK 0x0000400000000000UL
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_15_SHFT 47
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_15_MASK 0x0000800000000000UL
+#define UV2H_EVENT_OCCURRED0_L1_NMI_INT_SHFT 48
+#define UV2H_EVENT_OCCURRED0_L1_NMI_INT_MASK 0x0001000000000000UL
+#define UV2H_EVENT_OCCURRED0_STOP_CLOCK_SHFT 49
+#define UV2H_EVENT_OCCURRED0_STOP_CLOCK_MASK 0x0002000000000000UL
+#define UV2H_EVENT_OCCURRED0_ASIC_TO_L1_SHFT 50
+#define UV2H_EVENT_OCCURRED0_ASIC_TO_L1_MASK 0x0004000000000000UL
+#define UV2H_EVENT_OCCURRED0_L1_TO_ASIC_SHFT 51
+#define UV2H_EVENT_OCCURRED0_L1_TO_ASIC_MASK 0x0008000000000000UL
+#define UV2H_EVENT_OCCURRED0_LA_SEQ_TRIGGER_SHFT 52
+#define UV2H_EVENT_OCCURRED0_LA_SEQ_TRIGGER_MASK 0x0010000000000000UL
+#define UV2H_EVENT_OCCURRED0_IPI_INT_SHFT 53
+#define UV2H_EVENT_OCCURRED0_IPI_INT_MASK 0x0020000000000000UL
+#define UV2H_EVENT_OCCURRED0_EXTIO_INT0_SHFT 54
+#define UV2H_EVENT_OCCURRED0_EXTIO_INT0_MASK 0x0040000000000000UL
+#define UV2H_EVENT_OCCURRED0_EXTIO_INT1_SHFT 55
+#define UV2H_EVENT_OCCURRED0_EXTIO_INT1_MASK 0x0080000000000000UL
+#define UV2H_EVENT_OCCURRED0_EXTIO_INT2_SHFT 56
+#define UV2H_EVENT_OCCURRED0_EXTIO_INT2_MASK 0x0100000000000000UL
+#define UV2H_EVENT_OCCURRED0_EXTIO_INT3_SHFT 57
+#define UV2H_EVENT_OCCURRED0_EXTIO_INT3_MASK 0x0200000000000000UL
+#define UV2H_EVENT_OCCURRED0_PROFILE_INT_SHFT 58
+#define UV2H_EVENT_OCCURRED0_PROFILE_INT_MASK 0x0400000000000000UL
+
+#define UVH_EVENT_OCCURRED0_EXTIO_INT0_MASK ( \
+ is_uv(UV4) ? 0x1000000000000000UL : \
+ is_uv(UV3) ? 0x0040000000000000UL : \
+ is_uv(UV2) ? 0x0040000000000000UL : \
+ 0)
#define UVH_EVENT_OCCURRED0_EXTIO_INT0_SHFT ( \
- is_uv2_hub() ? UV2H_EVENT_OCCURRED0_EXTIO_INT0_SHFT : \
- is_uv3_hub() ? UV3H_EVENT_OCCURRED0_EXTIO_INT0_SHFT : \
- /*is_uv4_hub*/ UV4H_EVENT_OCCURRED0_EXTIO_INT0_SHFT)
+ is_uv(UV4) ? 60 : \
+ is_uv(UV3) ? 54 : \
+ is_uv(UV2) ? 54 : \
+ -1)
union uvh_event_occurred0_u {
unsigned long v;
+
+ /* UVH common struct */
struct uvh_event_occurred0_s {
- unsigned long lb_hcerr:1; /* RW, W1C */
- unsigned long rsvd_1_10:10;
- unsigned long rh_aoerr0:1; /* RW, W1C */
- unsigned long rsvd_12_63:52;
+ unsigned long lb_hcerr:1; /* RW */
+ unsigned long rsvd_1_63:63;
} s;
+
+ /* UVXH common struct */
struct uvxh_event_occurred0_s {
unsigned long lb_hcerr:1; /* RW */
unsigned long rsvd_1:1;
@@ -505,6 +577,142 @@ union uvh_event_occurred0_u {
unsigned long xb_aoerr0:1; /* RW */
unsigned long rsvd_17_63:47;
} sx;
+
+ /* UVYH common struct */
+ struct uvyh_event_occurred0_s {
+ unsigned long lb_hcerr:1; /* RW */
+ unsigned long kt_hcerr:1; /* RW */
+ unsigned long rh0_hcerr:1; /* RW */
+ unsigned long rh1_hcerr:1; /* RW */
+ unsigned long lh0_hcerr:1; /* RW */
+ unsigned long lh1_hcerr:1; /* RW */
+ unsigned long lh2_hcerr:1; /* RW */
+ unsigned long lh3_hcerr:1; /* RW */
+ unsigned long xb_hcerr:1; /* RW */
+ unsigned long rdm_hcerr:1; /* RW */
+ unsigned long ni0_hcerr:1; /* RW */
+ unsigned long ni1_hcerr:1; /* RW */
+ unsigned long lb_aoerr0:1; /* RW */
+ unsigned long kt_aoerr0:1; /* RW */
+ unsigned long rh0_aoerr0:1; /* RW */
+ unsigned long rh1_aoerr0:1; /* RW */
+ unsigned long lh0_aoerr0:1; /* RW */
+ unsigned long lh1_aoerr0:1; /* RW */
+ unsigned long lh2_aoerr0:1; /* RW */
+ unsigned long lh3_aoerr0:1; /* RW */
+ unsigned long xb_aoerr0:1; /* RW */
+ unsigned long rdm_aoerr0:1; /* RW */
+ unsigned long rt0_aoerr0:1; /* RW */
+ unsigned long rt1_aoerr0:1; /* RW */
+ unsigned long ni0_aoerr0:1; /* RW */
+ unsigned long ni1_aoerr0:1; /* RW */
+ unsigned long lb_aoerr1:1; /* RW */
+ unsigned long kt_aoerr1:1; /* RW */
+ unsigned long rh0_aoerr1:1; /* RW */
+ unsigned long rh1_aoerr1:1; /* RW */
+ unsigned long lh0_aoerr1:1; /* RW */
+ unsigned long lh1_aoerr1:1; /* RW */
+ unsigned long lh2_aoerr1:1; /* RW */
+ unsigned long lh3_aoerr1:1; /* RW */
+ unsigned long xb_aoerr1:1; /* RW */
+ unsigned long rdm_aoerr1:1; /* RW */
+ unsigned long rt0_aoerr1:1; /* RW */
+ unsigned long rt1_aoerr1:1; /* RW */
+ unsigned long ni0_aoerr1:1; /* RW */
+ unsigned long ni1_aoerr1:1; /* RW */
+ unsigned long system_shutdown_int:1; /* RW */
+ unsigned long lb_irq_int_0:1; /* RW */
+ unsigned long lb_irq_int_1:1; /* RW */
+ unsigned long lb_irq_int_2:1; /* RW */
+ unsigned long lb_irq_int_3:1; /* RW */
+ unsigned long lb_irq_int_4:1; /* RW */
+ unsigned long lb_irq_int_5:1; /* RW */
+ unsigned long lb_irq_int_6:1; /* RW */
+ unsigned long lb_irq_int_7:1; /* RW */
+ unsigned long lb_irq_int_8:1; /* RW */
+ unsigned long lb_irq_int_9:1; /* RW */
+ unsigned long lb_irq_int_10:1; /* RW */
+ unsigned long lb_irq_int_11:1; /* RW */
+ unsigned long lb_irq_int_12:1; /* RW */
+ unsigned long lb_irq_int_13:1; /* RW */
+ unsigned long lb_irq_int_14:1; /* RW */
+ unsigned long lb_irq_int_15:1; /* RW */
+ unsigned long l1_nmi_int:1; /* RW */
+ unsigned long stop_clock:1; /* RW */
+ unsigned long asic_to_l1:1; /* RW */
+ unsigned long l1_to_asic:1; /* RW */
+ unsigned long la_seq_trigger:1; /* RW */
+ unsigned long rsvd_62_63:2;
+ } sy;
+
+ /* UV5 unique struct */
+ struct uv5h_event_occurred0_s {
+ unsigned long lb_hcerr:1; /* RW */
+ unsigned long kt_hcerr:1; /* RW */
+ unsigned long rh0_hcerr:1; /* RW */
+ unsigned long rh1_hcerr:1; /* RW */
+ unsigned long lh0_hcerr:1; /* RW */
+ unsigned long lh1_hcerr:1; /* RW */
+ unsigned long lh2_hcerr:1; /* RW */
+ unsigned long lh3_hcerr:1; /* RW */
+ unsigned long xb_hcerr:1; /* RW */
+ unsigned long rdm_hcerr:1; /* RW */
+ unsigned long ni0_hcerr:1; /* RW */
+ unsigned long ni1_hcerr:1; /* RW */
+ unsigned long lb_aoerr0:1; /* RW */
+ unsigned long kt_aoerr0:1; /* RW */
+ unsigned long rh0_aoerr0:1; /* RW */
+ unsigned long rh1_aoerr0:1; /* RW */
+ unsigned long lh0_aoerr0:1; /* RW */
+ unsigned long lh1_aoerr0:1; /* RW */
+ unsigned long lh2_aoerr0:1; /* RW */
+ unsigned long lh3_aoerr0:1; /* RW */
+ unsigned long xb_aoerr0:1; /* RW */
+ unsigned long rdm_aoerr0:1; /* RW */
+ unsigned long rt0_aoerr0:1; /* RW */
+ unsigned long rt1_aoerr0:1; /* RW */
+ unsigned long ni0_aoerr0:1; /* RW */
+ unsigned long ni1_aoerr0:1; /* RW */
+ unsigned long lb_aoerr1:1; /* RW */
+ unsigned long kt_aoerr1:1; /* RW */
+ unsigned long rh0_aoerr1:1; /* RW */
+ unsigned long rh1_aoerr1:1; /* RW */
+ unsigned long lh0_aoerr1:1; /* RW */
+ unsigned long lh1_aoerr1:1; /* RW */
+ unsigned long lh2_aoerr1:1; /* RW */
+ unsigned long lh3_aoerr1:1; /* RW */
+ unsigned long xb_aoerr1:1; /* RW */
+ unsigned long rdm_aoerr1:1; /* RW */
+ unsigned long rt0_aoerr1:1; /* RW */
+ unsigned long rt1_aoerr1:1; /* RW */
+ unsigned long ni0_aoerr1:1; /* RW */
+ unsigned long ni1_aoerr1:1; /* RW */
+ unsigned long system_shutdown_int:1; /* RW */
+ unsigned long lb_irq_int_0:1; /* RW */
+ unsigned long lb_irq_int_1:1; /* RW */
+ unsigned long lb_irq_int_2:1; /* RW */
+ unsigned long lb_irq_int_3:1; /* RW */
+ unsigned long lb_irq_int_4:1; /* RW */
+ unsigned long lb_irq_int_5:1; /* RW */
+ unsigned long lb_irq_int_6:1; /* RW */
+ unsigned long lb_irq_int_7:1; /* RW */
+ unsigned long lb_irq_int_8:1; /* RW */
+ unsigned long lb_irq_int_9:1; /* RW */
+ unsigned long lb_irq_int_10:1; /* RW */
+ unsigned long lb_irq_int_11:1; /* RW */
+ unsigned long lb_irq_int_12:1; /* RW */
+ unsigned long lb_irq_int_13:1; /* RW */
+ unsigned long lb_irq_int_14:1; /* RW */
+ unsigned long lb_irq_int_15:1; /* RW */
+ unsigned long l1_nmi_int:1; /* RW */
+ unsigned long stop_clock:1; /* RW */
+ unsigned long asic_to_l1:1; /* RW */
+ unsigned long l1_to_asic:1; /* RW */
+ unsigned long la_seq_trigger:1; /* RW */
+ unsigned long rsvd_62_63:2;
+ } s5;
+
+ /* UV4 unique struct */
struct uv4h_event_occurred0_s {
unsigned long lb_hcerr:1; /* RW */
unsigned long kt_hcerr:1; /* RW */
@@ -571,13 +779,1355 @@ union uvh_event_occurred0_u {
unsigned long extio_int2:1; /* RW */
unsigned long extio_int3:1; /* RW */
} s4;
+
+ /* UV3 unique struct */
+ struct uv3h_event_occurred0_s {
+ unsigned long lb_hcerr:1; /* RW */
+ unsigned long qp_hcerr:1; /* RW */
+ unsigned long rh_hcerr:1; /* RW */
+ unsigned long lh0_hcerr:1; /* RW */
+ unsigned long lh1_hcerr:1; /* RW */
+ unsigned long gr0_hcerr:1; /* RW */
+ unsigned long gr1_hcerr:1; /* RW */
+ unsigned long ni0_hcerr:1; /* RW */
+ unsigned long ni1_hcerr:1; /* RW */
+ unsigned long lb_aoerr0:1; /* RW */
+ unsigned long qp_aoerr0:1; /* RW */
+ unsigned long rh_aoerr0:1; /* RW */
+ unsigned long lh0_aoerr0:1; /* RW */
+ unsigned long lh1_aoerr0:1; /* RW */
+ unsigned long gr0_aoerr0:1; /* RW */
+ unsigned long gr1_aoerr0:1; /* RW */
+ unsigned long xb_aoerr0:1; /* RW */
+ unsigned long rt_aoerr0:1; /* RW */
+ unsigned long ni0_aoerr0:1; /* RW */
+ unsigned long ni1_aoerr0:1; /* RW */
+ unsigned long lb_aoerr1:1; /* RW */
+ unsigned long qp_aoerr1:1; /* RW */
+ unsigned long rh_aoerr1:1; /* RW */
+ unsigned long lh0_aoerr1:1; /* RW */
+ unsigned long lh1_aoerr1:1; /* RW */
+ unsigned long gr0_aoerr1:1; /* RW */
+ unsigned long gr1_aoerr1:1; /* RW */
+ unsigned long xb_aoerr1:1; /* RW */
+ unsigned long rt_aoerr1:1; /* RW */
+ unsigned long ni0_aoerr1:1; /* RW */
+ unsigned long ni1_aoerr1:1; /* RW */
+ unsigned long system_shutdown_int:1; /* RW */
+ unsigned long lb_irq_int_0:1; /* RW */
+ unsigned long lb_irq_int_1:1; /* RW */
+ unsigned long lb_irq_int_2:1; /* RW */
+ unsigned long lb_irq_int_3:1; /* RW */
+ unsigned long lb_irq_int_4:1; /* RW */
+ unsigned long lb_irq_int_5:1; /* RW */
+ unsigned long lb_irq_int_6:1; /* RW */
+ unsigned long lb_irq_int_7:1; /* RW */
+ unsigned long lb_irq_int_8:1; /* RW */
+ unsigned long lb_irq_int_9:1; /* RW */
+ unsigned long lb_irq_int_10:1; /* RW */
+ unsigned long lb_irq_int_11:1; /* RW */
+ unsigned long lb_irq_int_12:1; /* RW */
+ unsigned long lb_irq_int_13:1; /* RW */
+ unsigned long lb_irq_int_14:1; /* RW */
+ unsigned long lb_irq_int_15:1; /* RW */
+ unsigned long l1_nmi_int:1; /* RW */
+ unsigned long stop_clock:1; /* RW */
+ unsigned long asic_to_l1:1; /* RW */
+ unsigned long l1_to_asic:1; /* RW */
+ unsigned long la_seq_trigger:1; /* RW */
+ unsigned long ipi_int:1; /* RW */
+ unsigned long extio_int0:1; /* RW */
+ unsigned long extio_int1:1; /* RW */
+ unsigned long extio_int2:1; /* RW */
+ unsigned long extio_int3:1; /* RW */
+ unsigned long profile_int:1; /* RW */
+ unsigned long rsvd_59_63:5;
+ } s3;
+
+ /* UV2 unique struct */
+ struct uv2h_event_occurred0_s {
+ unsigned long lb_hcerr:1; /* RW */
+ unsigned long qp_hcerr:1; /* RW */
+ unsigned long rh_hcerr:1; /* RW */
+ unsigned long lh0_hcerr:1; /* RW */
+ unsigned long lh1_hcerr:1; /* RW */
+ unsigned long gr0_hcerr:1; /* RW */
+ unsigned long gr1_hcerr:1; /* RW */
+ unsigned long ni0_hcerr:1; /* RW */
+ unsigned long ni1_hcerr:1; /* RW */
+ unsigned long lb_aoerr0:1; /* RW */
+ unsigned long qp_aoerr0:1; /* RW */
+ unsigned long rh_aoerr0:1; /* RW */
+ unsigned long lh0_aoerr0:1; /* RW */
+ unsigned long lh1_aoerr0:1; /* RW */
+ unsigned long gr0_aoerr0:1; /* RW */
+ unsigned long gr1_aoerr0:1; /* RW */
+ unsigned long xb_aoerr0:1; /* RW */
+ unsigned long rt_aoerr0:1; /* RW */
+ unsigned long ni0_aoerr0:1; /* RW */
+ unsigned long ni1_aoerr0:1; /* RW */
+ unsigned long lb_aoerr1:1; /* RW */
+ unsigned long qp_aoerr1:1; /* RW */
+ unsigned long rh_aoerr1:1; /* RW */
+ unsigned long lh0_aoerr1:1; /* RW */
+ unsigned long lh1_aoerr1:1; /* RW */
+ unsigned long gr0_aoerr1:1; /* RW */
+ unsigned long gr1_aoerr1:1; /* RW */
+ unsigned long xb_aoerr1:1; /* RW */
+ unsigned long rt_aoerr1:1; /* RW */
+ unsigned long ni0_aoerr1:1; /* RW */
+ unsigned long ni1_aoerr1:1; /* RW */
+ unsigned long system_shutdown_int:1; /* RW */
+ unsigned long lb_irq_int_0:1; /* RW */
+ unsigned long lb_irq_int_1:1; /* RW */
+ unsigned long lb_irq_int_2:1; /* RW */
+ unsigned long lb_irq_int_3:1; /* RW */
+ unsigned long lb_irq_int_4:1; /* RW */
+ unsigned long lb_irq_int_5:1; /* RW */
+ unsigned long lb_irq_int_6:1; /* RW */
+ unsigned long lb_irq_int_7:1; /* RW */
+ unsigned long lb_irq_int_8:1; /* RW */
+ unsigned long lb_irq_int_9:1; /* RW */
+ unsigned long lb_irq_int_10:1; /* RW */
+ unsigned long lb_irq_int_11:1; /* RW */
+ unsigned long lb_irq_int_12:1; /* RW */
+ unsigned long lb_irq_int_13:1; /* RW */
+ unsigned long lb_irq_int_14:1; /* RW */
+ unsigned long lb_irq_int_15:1; /* RW */
+ unsigned long l1_nmi_int:1; /* RW */
+ unsigned long stop_clock:1; /* RW */
+ unsigned long asic_to_l1:1; /* RW */
+ unsigned long l1_to_asic:1; /* RW */
+ unsigned long la_seq_trigger:1; /* RW */
+ unsigned long ipi_int:1; /* RW */
+ unsigned long extio_int0:1; /* RW */
+ unsigned long extio_int1:1; /* RW */
+ unsigned long extio_int2:1; /* RW */
+ unsigned long extio_int3:1; /* RW */
+ unsigned long profile_int:1; /* RW */
+ unsigned long rsvd_59_63:5;
+ } s2;
};
/* ========================================================================= */
/* UVH_EVENT_OCCURRED0_ALIAS */
/* ========================================================================= */
#define UVH_EVENT_OCCURRED0_ALIAS 0x70008UL
-#define UVH_EVENT_OCCURRED0_ALIAS_32 0x5f0
+
+
+/* ========================================================================= */
+/* UVH_EVENT_OCCURRED1 */
+/* ========================================================================= */
+#define UVH_EVENT_OCCURRED1 0x70080UL
+
+
+
+/* UVYH common defines */
+#define UVYH_EVENT_OCCURRED1_IPI_INT_SHFT 0
+#define UVYH_EVENT_OCCURRED1_IPI_INT_MASK 0x0000000000000001UL
+#define UVYH_EVENT_OCCURRED1_EXTIO_INT0_SHFT 1
+#define UVYH_EVENT_OCCURRED1_EXTIO_INT0_MASK 0x0000000000000002UL
+#define UVYH_EVENT_OCCURRED1_EXTIO_INT1_SHFT 2
+#define UVYH_EVENT_OCCURRED1_EXTIO_INT1_MASK 0x0000000000000004UL
+#define UVYH_EVENT_OCCURRED1_EXTIO_INT2_SHFT 3
+#define UVYH_EVENT_OCCURRED1_EXTIO_INT2_MASK 0x0000000000000008UL
+#define UVYH_EVENT_OCCURRED1_EXTIO_INT3_SHFT 4
+#define UVYH_EVENT_OCCURRED1_EXTIO_INT3_MASK 0x0000000000000010UL
+#define UVYH_EVENT_OCCURRED1_PROFILE_INT_SHFT 5
+#define UVYH_EVENT_OCCURRED1_PROFILE_INT_MASK 0x0000000000000020UL
+#define UVYH_EVENT_OCCURRED1_BAU_DATA_SHFT 6
+#define UVYH_EVENT_OCCURRED1_BAU_DATA_MASK 0x0000000000000040UL
+#define UVYH_EVENT_OCCURRED1_PROC_GENERAL_SHFT 7
+#define UVYH_EVENT_OCCURRED1_PROC_GENERAL_MASK 0x0000000000000080UL
+#define UVYH_EVENT_OCCURRED1_XH_TLB_INT0_SHFT 8
+#define UVYH_EVENT_OCCURRED1_XH_TLB_INT0_MASK 0x0000000000000100UL
+#define UVYH_EVENT_OCCURRED1_XH_TLB_INT1_SHFT 9
+#define UVYH_EVENT_OCCURRED1_XH_TLB_INT1_MASK 0x0000000000000200UL
+#define UVYH_EVENT_OCCURRED1_XH_TLB_INT2_SHFT 10
+#define UVYH_EVENT_OCCURRED1_XH_TLB_INT2_MASK 0x0000000000000400UL
+#define UVYH_EVENT_OCCURRED1_XH_TLB_INT3_SHFT 11
+#define UVYH_EVENT_OCCURRED1_XH_TLB_INT3_MASK 0x0000000000000800UL
+#define UVYH_EVENT_OCCURRED1_XH_TLB_INT4_SHFT 12
+#define UVYH_EVENT_OCCURRED1_XH_TLB_INT4_MASK 0x0000000000001000UL
+#define UVYH_EVENT_OCCURRED1_XH_TLB_INT5_SHFT 13
+#define UVYH_EVENT_OCCURRED1_XH_TLB_INT5_MASK 0x0000000000002000UL
+#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT0_SHFT 14
+#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT0_MASK 0x0000000000004000UL
+#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT1_SHFT 15
+#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT1_MASK 0x0000000000008000UL
+#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT2_SHFT 16
+#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT2_MASK 0x0000000000010000UL
+#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT3_SHFT 17
+#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT3_MASK 0x0000000000020000UL
+#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT4_SHFT 18
+#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT4_MASK 0x0000000000040000UL
+#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT5_SHFT 19
+#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT5_MASK 0x0000000000080000UL
+#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT6_SHFT 20
+#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT6_MASK 0x0000000000100000UL
+#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT7_SHFT 21
+#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT7_MASK 0x0000000000200000UL
+#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT8_SHFT 22
+#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT8_MASK 0x0000000000400000UL
+#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT9_SHFT 23
+#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT9_MASK 0x0000000000800000UL
+#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT10_SHFT 24
+#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT10_MASK 0x0000000001000000UL
+#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT11_SHFT 25
+#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT11_MASK 0x0000000002000000UL
+#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT12_SHFT 26
+#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT12_MASK 0x0000000004000000UL
+#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT13_SHFT 27
+#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT13_MASK 0x0000000008000000UL
+#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT14_SHFT 28
+#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT14_MASK 0x0000000010000000UL
+#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT15_SHFT 29
+#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT15_MASK 0x0000000020000000UL
+#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT16_SHFT 30
+#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT16_MASK 0x0000000040000000UL
+#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT17_SHFT 31
+#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT17_MASK 0x0000000080000000UL
+#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT18_SHFT 32
+#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT18_MASK 0x0000000100000000UL
+#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT19_SHFT 33
+#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT19_MASK 0x0000000200000000UL
+#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT20_SHFT 34
+#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT20_MASK 0x0000000400000000UL
+#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT21_SHFT 35
+#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT21_MASK 0x0000000800000000UL
+#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT22_SHFT 36
+#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT22_MASK 0x0000001000000000UL
+#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT23_SHFT 37
+#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT23_MASK 0x0000002000000000UL
+
+/* UV4 unique defines */
+#define UV4H_EVENT_OCCURRED1_PROFILE_INT_SHFT 0
+#define UV4H_EVENT_OCCURRED1_PROFILE_INT_MASK 0x0000000000000001UL
+#define UV4H_EVENT_OCCURRED1_BAU_DATA_SHFT 1
+#define UV4H_EVENT_OCCURRED1_BAU_DATA_MASK 0x0000000000000002UL
+#define UV4H_EVENT_OCCURRED1_PROC_GENERAL_SHFT 2
+#define UV4H_EVENT_OCCURRED1_PROC_GENERAL_MASK 0x0000000000000004UL
+#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT0_SHFT 3
+#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT0_MASK 0x0000000000000008UL
+#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT1_SHFT 4
+#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT1_MASK 0x0000000000000010UL
+#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT2_SHFT 5
+#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT2_MASK 0x0000000000000020UL
+#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT3_SHFT 6
+#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT3_MASK 0x0000000000000040UL
+#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT4_SHFT 7
+#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT4_MASK 0x0000000000000080UL
+#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT5_SHFT 8
+#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT5_MASK 0x0000000000000100UL
+#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT6_SHFT 9
+#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT6_MASK 0x0000000000000200UL
+#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT7_SHFT 10
+#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT7_MASK 0x0000000000000400UL
+#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT8_SHFT 11
+#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT8_MASK 0x0000000000000800UL
+#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT9_SHFT 12
+#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT9_MASK 0x0000000000001000UL
+#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT10_SHFT 13
+#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT10_MASK 0x0000000000002000UL
+#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT11_SHFT 14
+#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT11_MASK 0x0000000000004000UL
+#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT12_SHFT 15
+#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT12_MASK 0x0000000000008000UL
+#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT13_SHFT 16
+#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT13_MASK 0x0000000000010000UL
+#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT14_SHFT 17
+#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT14_MASK 0x0000000000020000UL
+#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT15_SHFT 18
+#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT15_MASK 0x0000000000040000UL
+#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT16_SHFT 19
+#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT16_MASK 0x0000000000080000UL
+#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT17_SHFT 20
+#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT17_MASK 0x0000000000100000UL
+#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT18_SHFT 21
+#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT18_MASK 0x0000000000200000UL
+#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT19_SHFT 22
+#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT19_MASK 0x0000000000400000UL
+#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT20_SHFT 23
+#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT20_MASK 0x0000000000800000UL
+#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT21_SHFT 24
+#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT21_MASK 0x0000000001000000UL
+#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT22_SHFT 25
+#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT22_MASK 0x0000000002000000UL
+#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT23_SHFT 26
+#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT23_MASK 0x0000000004000000UL
+#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT0_SHFT 27
+#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT0_MASK 0x0000000008000000UL
+#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT1_SHFT 28
+#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT1_MASK 0x0000000010000000UL
+#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT2_SHFT 29
+#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT2_MASK 0x0000000020000000UL
+#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT3_SHFT 30
+#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT3_MASK 0x0000000040000000UL
+#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT4_SHFT 31
+#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT4_MASK 0x0000000080000000UL
+#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT5_SHFT 32
+#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT5_MASK 0x0000000100000000UL
+#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT6_SHFT 33
+#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT6_MASK 0x0000000200000000UL
+#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT7_SHFT 34
+#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT7_MASK 0x0000000400000000UL
+#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT8_SHFT 35
+#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT8_MASK 0x0000000800000000UL
+#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT9_SHFT 36
+#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT9_MASK 0x0000001000000000UL
+#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT10_SHFT 37
+#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT10_MASK 0x0000002000000000UL
+#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT11_SHFT 38
+#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT11_MASK 0x0000004000000000UL
+#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT12_SHFT 39
+#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT12_MASK 0x0000008000000000UL
+#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT13_SHFT 40
+#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT13_MASK 0x0000010000000000UL
+#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT14_SHFT 41
+#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT14_MASK 0x0000020000000000UL
+#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT15_SHFT 42
+#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT15_MASK 0x0000040000000000UL
+#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT16_SHFT 43
+#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT16_MASK 0x0000080000000000UL
+#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT17_SHFT 44
+#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT17_MASK 0x0000100000000000UL
+#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT18_SHFT 45
+#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT18_MASK 0x0000200000000000UL
+#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT19_SHFT 46
+#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT19_MASK 0x0000400000000000UL
+#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT20_SHFT 47
+#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT20_MASK 0x0000800000000000UL
+#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT21_SHFT 48
+#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT21_MASK 0x0001000000000000UL
+#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT22_SHFT 49
+#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT22_MASK 0x0002000000000000UL
+#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT23_SHFT 50
+#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT23_MASK 0x0004000000000000UL
+
+/* UV3 unique defines */
+#define UV3H_EVENT_OCCURRED1_BAU_DATA_SHFT 0
+#define UV3H_EVENT_OCCURRED1_BAU_DATA_MASK 0x0000000000000001UL
+#define UV3H_EVENT_OCCURRED1_POWER_MANAGEMENT_REQ_SHFT 1
+#define UV3H_EVENT_OCCURRED1_POWER_MANAGEMENT_REQ_MASK 0x0000000000000002UL
+#define UV3H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT0_SHFT 2
+#define UV3H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT0_MASK 0x0000000000000004UL
+#define UV3H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT1_SHFT 3
+#define UV3H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT1_MASK 0x0000000000000008UL
+#define UV3H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT2_SHFT 4
+#define UV3H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT2_MASK 0x0000000000000010UL
+#define UV3H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT3_SHFT 5
+#define UV3H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT3_MASK 0x0000000000000020UL
+#define UV3H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT4_SHFT 6
+#define UV3H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT4_MASK 0x0000000000000040UL
+#define UV3H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT5_SHFT 7
+#define UV3H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT5_MASK 0x0000000000000080UL
+#define UV3H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT6_SHFT 8
+#define UV3H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT6_MASK 0x0000000000000100UL
+#define UV3H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT7_SHFT 9
+#define UV3H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT7_MASK 0x0000000000000200UL
+#define UV3H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT8_SHFT 10
+#define UV3H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT8_MASK 0x0000000000000400UL
+#define UV3H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT9_SHFT 11
+#define UV3H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT9_MASK 0x0000000000000800UL
+#define UV3H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT10_SHFT 12
+#define UV3H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT10_MASK 0x0000000000001000UL
+#define UV3H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT11_SHFT 13
+#define UV3H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT11_MASK 0x0000000000002000UL
+#define UV3H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT12_SHFT 14
+#define UV3H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT12_MASK 0x0000000000004000UL
+#define UV3H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT13_SHFT 15
+#define UV3H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT13_MASK 0x0000000000008000UL
+#define UV3H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT14_SHFT 16
+#define UV3H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT14_MASK 0x0000000000010000UL
+#define UV3H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT15_SHFT 17
+#define UV3H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT15_MASK 0x0000000000020000UL
+#define UV3H_EVENT_OCCURRED1_GR0_TLB_INT0_SHFT 18
+#define UV3H_EVENT_OCCURRED1_GR0_TLB_INT0_MASK 0x0000000000040000UL
+#define UV3H_EVENT_OCCURRED1_GR0_TLB_INT1_SHFT 19
+#define UV3H_EVENT_OCCURRED1_GR0_TLB_INT1_MASK 0x0000000000080000UL
+#define UV3H_EVENT_OCCURRED1_GR0_TLB_INT2_SHFT 20
+#define UV3H_EVENT_OCCURRED1_GR0_TLB_INT2_MASK 0x0000000000100000UL
+#define UV3H_EVENT_OCCURRED1_GR0_TLB_INT3_SHFT 21
+#define UV3H_EVENT_OCCURRED1_GR0_TLB_INT3_MASK 0x0000000000200000UL
+#define UV3H_EVENT_OCCURRED1_GR0_TLB_INT4_SHFT 22
+#define UV3H_EVENT_OCCURRED1_GR0_TLB_INT4_MASK 0x0000000000400000UL
+#define UV3H_EVENT_OCCURRED1_GR0_TLB_INT5_SHFT 23
+#define UV3H_EVENT_OCCURRED1_GR0_TLB_INT5_MASK 0x0000000000800000UL
+#define UV3H_EVENT_OCCURRED1_GR0_TLB_INT6_SHFT 24
+#define UV3H_EVENT_OCCURRED1_GR0_TLB_INT6_MASK 0x0000000001000000UL
+#define UV3H_EVENT_OCCURRED1_GR0_TLB_INT7_SHFT 25
+#define UV3H_EVENT_OCCURRED1_GR0_TLB_INT7_MASK 0x0000000002000000UL
+#define UV3H_EVENT_OCCURRED1_GR0_TLB_INT8_SHFT 26
+#define UV3H_EVENT_OCCURRED1_GR0_TLB_INT8_MASK 0x0000000004000000UL
+#define UV3H_EVENT_OCCURRED1_GR0_TLB_INT9_SHFT 27
+#define UV3H_EVENT_OCCURRED1_GR0_TLB_INT9_MASK 0x0000000008000000UL
+#define UV3H_EVENT_OCCURRED1_GR0_TLB_INT10_SHFT 28
+#define UV3H_EVENT_OCCURRED1_GR0_TLB_INT10_MASK 0x0000000010000000UL
+#define UV3H_EVENT_OCCURRED1_GR0_TLB_INT11_SHFT 29
+#define UV3H_EVENT_OCCURRED1_GR0_TLB_INT11_MASK 0x0000000020000000UL
+#define UV3H_EVENT_OCCURRED1_GR0_TLB_INT12_SHFT 30
+#define UV3H_EVENT_OCCURRED1_GR0_TLB_INT12_MASK 0x0000000040000000UL
+#define UV3H_EVENT_OCCURRED1_GR0_TLB_INT13_SHFT 31
+#define UV3H_EVENT_OCCURRED1_GR0_TLB_INT13_MASK 0x0000000080000000UL
+#define UV3H_EVENT_OCCURRED1_GR0_TLB_INT14_SHFT 32
+#define UV3H_EVENT_OCCURRED1_GR0_TLB_INT14_MASK 0x0000000100000000UL
+#define UV3H_EVENT_OCCURRED1_GR0_TLB_INT15_SHFT 33
+#define UV3H_EVENT_OCCURRED1_GR0_TLB_INT15_MASK 0x0000000200000000UL
+#define UV3H_EVENT_OCCURRED1_GR1_TLB_INT0_SHFT 34
+#define UV3H_EVENT_OCCURRED1_GR1_TLB_INT0_MASK 0x0000000400000000UL
+#define UV3H_EVENT_OCCURRED1_GR1_TLB_INT1_SHFT 35
+#define UV3H_EVENT_OCCURRED1_GR1_TLB_INT1_MASK 0x0000000800000000UL
+#define UV3H_EVENT_OCCURRED1_GR1_TLB_INT2_SHFT 36
+#define UV3H_EVENT_OCCURRED1_GR1_TLB_INT2_MASK 0x0000001000000000UL
+#define UV3H_EVENT_OCCURRED1_GR1_TLB_INT3_SHFT 37
+#define UV3H_EVENT_OCCURRED1_GR1_TLB_INT3_MASK 0x0000002000000000UL
+#define UV3H_EVENT_OCCURRED1_GR1_TLB_INT4_SHFT 38
+#define UV3H_EVENT_OCCURRED1_GR1_TLB_INT4_MASK 0x0000004000000000UL
+#define UV3H_EVENT_OCCURRED1_GR1_TLB_INT5_SHFT 39
+#define UV3H_EVENT_OCCURRED1_GR1_TLB_INT5_MASK 0x0000008000000000UL
+#define UV3H_EVENT_OCCURRED1_GR1_TLB_INT6_SHFT 40
+#define UV3H_EVENT_OCCURRED1_GR1_TLB_INT6_MASK 0x0000010000000000UL
+#define UV3H_EVENT_OCCURRED1_GR1_TLB_INT7_SHFT 41
+#define UV3H_EVENT_OCCURRED1_GR1_TLB_INT7_MASK 0x0000020000000000UL
+#define UV3H_EVENT_OCCURRED1_GR1_TLB_INT8_SHFT 42
+#define UV3H_EVENT_OCCURRED1_GR1_TLB_INT8_MASK 0x0000040000000000UL
+#define UV3H_EVENT_OCCURRED1_GR1_TLB_INT9_SHFT 43
+#define UV3H_EVENT_OCCURRED1_GR1_TLB_INT9_MASK 0x0000080000000000UL
+#define UV3H_EVENT_OCCURRED1_GR1_TLB_INT10_SHFT 44
+#define UV3H_EVENT_OCCURRED1_GR1_TLB_INT10_MASK 0x0000100000000000UL
+#define UV3H_EVENT_OCCURRED1_GR1_TLB_INT11_SHFT 45
+#define UV3H_EVENT_OCCURRED1_GR1_TLB_INT11_MASK 0x0000200000000000UL
+#define UV3H_EVENT_OCCURRED1_GR1_TLB_INT12_SHFT 46
+#define UV3H_EVENT_OCCURRED1_GR1_TLB_INT12_MASK 0x0000400000000000UL
+#define UV3H_EVENT_OCCURRED1_GR1_TLB_INT13_SHFT 47
+#define UV3H_EVENT_OCCURRED1_GR1_TLB_INT13_MASK 0x0000800000000000UL
+#define UV3H_EVENT_OCCURRED1_GR1_TLB_INT14_SHFT 48
+#define UV3H_EVENT_OCCURRED1_GR1_TLB_INT14_MASK 0x0001000000000000UL
+#define UV3H_EVENT_OCCURRED1_GR1_TLB_INT15_SHFT 49
+#define UV3H_EVENT_OCCURRED1_GR1_TLB_INT15_MASK 0x0002000000000000UL
+#define UV3H_EVENT_OCCURRED1_RTC_INTERVAL_INT_SHFT 50
+#define UV3H_EVENT_OCCURRED1_RTC_INTERVAL_INT_MASK 0x0004000000000000UL
+#define UV3H_EVENT_OCCURRED1_BAU_DASHBOARD_INT_SHFT 51
+#define UV3H_EVENT_OCCURRED1_BAU_DASHBOARD_INT_MASK 0x0008000000000000UL
+
+/* UV2 unique defines */
+#define UV2H_EVENT_OCCURRED1_BAU_DATA_SHFT 0
+#define UV2H_EVENT_OCCURRED1_BAU_DATA_MASK 0x0000000000000001UL
+#define UV2H_EVENT_OCCURRED1_POWER_MANAGEMENT_REQ_SHFT 1
+#define UV2H_EVENT_OCCURRED1_POWER_MANAGEMENT_REQ_MASK 0x0000000000000002UL
+#define UV2H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT0_SHFT 2
+#define UV2H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT0_MASK 0x0000000000000004UL
+#define UV2H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT1_SHFT 3
+#define UV2H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT1_MASK 0x0000000000000008UL
+#define UV2H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT2_SHFT 4
+#define UV2H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT2_MASK 0x0000000000000010UL
+#define UV2H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT3_SHFT 5
+#define UV2H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT3_MASK 0x0000000000000020UL
+#define UV2H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT4_SHFT 6
+#define UV2H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT4_MASK 0x0000000000000040UL
+#define UV2H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT5_SHFT 7
+#define UV2H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT5_MASK 0x0000000000000080UL
+#define UV2H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT6_SHFT 8
+#define UV2H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT6_MASK 0x0000000000000100UL
+#define UV2H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT7_SHFT 9
+#define UV2H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT7_MASK 0x0000000000000200UL
+#define UV2H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT8_SHFT 10
+#define UV2H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT8_MASK 0x0000000000000400UL
+#define UV2H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT9_SHFT 11
+#define UV2H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT9_MASK 0x0000000000000800UL
+#define UV2H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT10_SHFT 12
+#define UV2H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT10_MASK 0x0000000000001000UL
+#define UV2H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT11_SHFT 13
+#define UV2H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT11_MASK 0x0000000000002000UL
+#define UV2H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT12_SHFT 14
+#define UV2H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT12_MASK 0x0000000000004000UL
+#define UV2H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT13_SHFT 15
+#define UV2H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT13_MASK 0x0000000000008000UL
+#define UV2H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT14_SHFT 16
+#define UV2H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT14_MASK 0x0000000000010000UL
+#define UV2H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT15_SHFT 17
+#define UV2H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT15_MASK 0x0000000000020000UL
+#define UV2H_EVENT_OCCURRED1_GR0_TLB_INT0_SHFT 18
+#define UV2H_EVENT_OCCURRED1_GR0_TLB_INT0_MASK 0x0000000000040000UL
+#define UV2H_EVENT_OCCURRED1_GR0_TLB_INT1_SHFT 19
+#define UV2H_EVENT_OCCURRED1_GR0_TLB_INT1_MASK 0x0000000000080000UL
+#define UV2H_EVENT_OCCURRED1_GR0_TLB_INT2_SHFT 20
+#define UV2H_EVENT_OCCURRED1_GR0_TLB_INT2_MASK 0x0000000000100000UL
+#define UV2H_EVENT_OCCURRED1_GR0_TLB_INT3_SHFT 21
+#define UV2H_EVENT_OCCURRED1_GR0_TLB_INT3_MASK 0x0000000000200000UL
+#define UV2H_EVENT_OCCURRED1_GR0_TLB_INT4_SHFT 22
+#define UV2H_EVENT_OCCURRED1_GR0_TLB_INT4_MASK 0x0000000000400000UL
+#define UV2H_EVENT_OCCURRED1_GR0_TLB_INT5_SHFT 23
+#define UV2H_EVENT_OCCURRED1_GR0_TLB_INT5_MASK 0x0000000000800000UL
+#define UV2H_EVENT_OCCURRED1_GR0_TLB_INT6_SHFT 24
+#define UV2H_EVENT_OCCURRED1_GR0_TLB_INT6_MASK 0x0000000001000000UL
+#define UV2H_EVENT_OCCURRED1_GR0_TLB_INT7_SHFT 25
+#define UV2H_EVENT_OCCURRED1_GR0_TLB_INT7_MASK 0x0000000002000000UL
+#define UV2H_EVENT_OCCURRED1_GR0_TLB_INT8_SHFT 26
+#define UV2H_EVENT_OCCURRED1_GR0_TLB_INT8_MASK 0x0000000004000000UL
+#define UV2H_EVENT_OCCURRED1_GR0_TLB_INT9_SHFT 27
+#define UV2H_EVENT_OCCURRED1_GR0_TLB_INT9_MASK 0x0000000008000000UL
+#define UV2H_EVENT_OCCURRED1_GR0_TLB_INT10_SHFT 28
+#define UV2H_EVENT_OCCURRED1_GR0_TLB_INT10_MASK 0x0000000010000000UL
+#define UV2H_EVENT_OCCURRED1_GR0_TLB_INT11_SHFT 29
+#define UV2H_EVENT_OCCURRED1_GR0_TLB_INT11_MASK 0x0000000020000000UL
+#define UV2H_EVENT_OCCURRED1_GR0_TLB_INT12_SHFT 30
+#define UV2H_EVENT_OCCURRED1_GR0_TLB_INT12_MASK 0x0000000040000000UL
+#define UV2H_EVENT_OCCURRED1_GR0_TLB_INT13_SHFT 31
+#define UV2H_EVENT_OCCURRED1_GR0_TLB_INT13_MASK 0x0000000080000000UL
+#define UV2H_EVENT_OCCURRED1_GR0_TLB_INT14_SHFT 32
+#define UV2H_EVENT_OCCURRED1_GR0_TLB_INT14_MASK 0x0000000100000000UL
+#define UV2H_EVENT_OCCURRED1_GR0_TLB_INT15_SHFT 33
+#define UV2H_EVENT_OCCURRED1_GR0_TLB_INT15_MASK 0x0000000200000000UL
+#define UV2H_EVENT_OCCURRED1_GR1_TLB_INT0_SHFT 34
+#define UV2H_EVENT_OCCURRED1_GR1_TLB_INT0_MASK 0x0000000400000000UL
+#define UV2H_EVENT_OCCURRED1_GR1_TLB_INT1_SHFT 35
+#define UV2H_EVENT_OCCURRED1_GR1_TLB_INT1_MASK 0x0000000800000000UL
+#define UV2H_EVENT_OCCURRED1_GR1_TLB_INT2_SHFT 36
+#define UV2H_EVENT_OCCURRED1_GR1_TLB_INT2_MASK 0x0000001000000000UL
+#define UV2H_EVENT_OCCURRED1_GR1_TLB_INT3_SHFT 37
+#define UV2H_EVENT_OCCURRED1_GR1_TLB_INT3_MASK 0x0000002000000000UL
+#define UV2H_EVENT_OCCURRED1_GR1_TLB_INT4_SHFT 38
+#define UV2H_EVENT_OCCURRED1_GR1_TLB_INT4_MASK 0x0000004000000000UL
+#define UV2H_EVENT_OCCURRED1_GR1_TLB_INT5_SHFT 39
+#define UV2H_EVENT_OCCURRED1_GR1_TLB_INT5_MASK 0x0000008000000000UL
+#define UV2H_EVENT_OCCURRED1_GR1_TLB_INT6_SHFT 40
+#define UV2H_EVENT_OCCURRED1_GR1_TLB_INT6_MASK 0x0000010000000000UL
+#define UV2H_EVENT_OCCURRED1_GR1_TLB_INT7_SHFT 41
+#define UV2H_EVENT_OCCURRED1_GR1_TLB_INT7_MASK 0x0000020000000000UL
+#define UV2H_EVENT_OCCURRED1_GR1_TLB_INT8_SHFT 42
+#define UV2H_EVENT_OCCURRED1_GR1_TLB_INT8_MASK 0x0000040000000000UL
+#define UV2H_EVENT_OCCURRED1_GR1_TLB_INT9_SHFT 43
+#define UV2H_EVENT_OCCURRED1_GR1_TLB_INT9_MASK 0x0000080000000000UL
+#define UV2H_EVENT_OCCURRED1_GR1_TLB_INT10_SHFT 44
+#define UV2H_EVENT_OCCURRED1_GR1_TLB_INT10_MASK 0x0000100000000000UL
+#define UV2H_EVENT_OCCURRED1_GR1_TLB_INT11_SHFT 45
+#define UV2H_EVENT_OCCURRED1_GR1_TLB_INT11_MASK 0x0000200000000000UL
+#define UV2H_EVENT_OCCURRED1_GR1_TLB_INT12_SHFT 46
+#define UV2H_EVENT_OCCURRED1_GR1_TLB_INT12_MASK 0x0000400000000000UL
+#define UV2H_EVENT_OCCURRED1_GR1_TLB_INT13_SHFT 47
+#define UV2H_EVENT_OCCURRED1_GR1_TLB_INT13_MASK 0x0000800000000000UL
+#define UV2H_EVENT_OCCURRED1_GR1_TLB_INT14_SHFT 48
+#define UV2H_EVENT_OCCURRED1_GR1_TLB_INT14_MASK 0x0001000000000000UL
+#define UV2H_EVENT_OCCURRED1_GR1_TLB_INT15_SHFT 49
+#define UV2H_EVENT_OCCURRED1_GR1_TLB_INT15_MASK 0x0002000000000000UL
+#define UV2H_EVENT_OCCURRED1_RTC_INTERVAL_INT_SHFT 50
+#define UV2H_EVENT_OCCURRED1_RTC_INTERVAL_INT_MASK 0x0004000000000000UL
+#define UV2H_EVENT_OCCURRED1_BAU_DASHBOARD_INT_SHFT 51
+#define UV2H_EVENT_OCCURRED1_BAU_DASHBOARD_INT_MASK 0x0008000000000000UL
+
+#define UVH_EVENT_OCCURRED1_EXTIO_INT0_MASK ( \
+ is_uv(UV5) ? 0x0000000000000002UL : \
+ 0)
+#define UVH_EVENT_OCCURRED1_EXTIO_INT0_SHFT ( \
+ is_uv(UV5) ? 1 : \
+ -1)
+
+union uvyh_event_occurred1_u {
+ unsigned long v;
+
+ /* UVYH common struct */
+ struct uvyh_event_occurred1_s {
+ unsigned long ipi_int:1; /* RW */
+ unsigned long extio_int0:1; /* RW */
+ unsigned long extio_int1:1; /* RW */
+ unsigned long extio_int2:1; /* RW */
+ unsigned long extio_int3:1; /* RW */
+ unsigned long profile_int:1; /* RW */
+ unsigned long bau_data:1; /* RW */
+ unsigned long proc_general:1; /* RW */
+ unsigned long xh_tlb_int0:1; /* RW */
+ unsigned long xh_tlb_int1:1; /* RW */
+ unsigned long xh_tlb_int2:1; /* RW */
+ unsigned long xh_tlb_int3:1; /* RW */
+ unsigned long xh_tlb_int4:1; /* RW */
+ unsigned long xh_tlb_int5:1; /* RW */
+ unsigned long rdm_tlb_int0:1; /* RW */
+ unsigned long rdm_tlb_int1:1; /* RW */
+ unsigned long rdm_tlb_int2:1; /* RW */
+ unsigned long rdm_tlb_int3:1; /* RW */
+ unsigned long rdm_tlb_int4:1; /* RW */
+ unsigned long rdm_tlb_int5:1; /* RW */
+ unsigned long rdm_tlb_int6:1; /* RW */
+ unsigned long rdm_tlb_int7:1; /* RW */
+ unsigned long rdm_tlb_int8:1; /* RW */
+ unsigned long rdm_tlb_int9:1; /* RW */
+ unsigned long rdm_tlb_int10:1; /* RW */
+ unsigned long rdm_tlb_int11:1; /* RW */
+ unsigned long rdm_tlb_int12:1; /* RW */
+ unsigned long rdm_tlb_int13:1; /* RW */
+ unsigned long rdm_tlb_int14:1; /* RW */
+ unsigned long rdm_tlb_int15:1; /* RW */
+ unsigned long rdm_tlb_int16:1; /* RW */
+ unsigned long rdm_tlb_int17:1; /* RW */
+ unsigned long rdm_tlb_int18:1; /* RW */
+ unsigned long rdm_tlb_int19:1; /* RW */
+ unsigned long rdm_tlb_int20:1; /* RW */
+ unsigned long rdm_tlb_int21:1; /* RW */
+ unsigned long rdm_tlb_int22:1; /* RW */
+ unsigned long rdm_tlb_int23:1; /* RW */
+ unsigned long rsvd_38_63:26;
+ } sy;
+
+ /* UV5 unique struct */
+ struct uv5h_event_occurred1_s {
+ unsigned long ipi_int:1; /* RW */
+ unsigned long extio_int0:1; /* RW */
+ unsigned long extio_int1:1; /* RW */
+ unsigned long extio_int2:1; /* RW */
+ unsigned long extio_int3:1; /* RW */
+ unsigned long profile_int:1; /* RW */
+ unsigned long bau_data:1; /* RW */
+ unsigned long proc_general:1; /* RW */
+ unsigned long xh_tlb_int0:1; /* RW */
+ unsigned long xh_tlb_int1:1; /* RW */
+ unsigned long xh_tlb_int2:1; /* RW */
+ unsigned long xh_tlb_int3:1; /* RW */
+ unsigned long xh_tlb_int4:1; /* RW */
+ unsigned long xh_tlb_int5:1; /* RW */
+ unsigned long rdm_tlb_int0:1; /* RW */
+ unsigned long rdm_tlb_int1:1; /* RW */
+ unsigned long rdm_tlb_int2:1; /* RW */
+ unsigned long rdm_tlb_int3:1; /* RW */
+ unsigned long rdm_tlb_int4:1; /* RW */
+ unsigned long rdm_tlb_int5:1; /* RW */
+ unsigned long rdm_tlb_int6:1; /* RW */
+ unsigned long rdm_tlb_int7:1; /* RW */
+ unsigned long rdm_tlb_int8:1; /* RW */
+ unsigned long rdm_tlb_int9:1; /* RW */
+ unsigned long rdm_tlb_int10:1; /* RW */
+ unsigned long rdm_tlb_int11:1; /* RW */
+ unsigned long rdm_tlb_int12:1; /* RW */
+ unsigned long rdm_tlb_int13:1; /* RW */
+ unsigned long rdm_tlb_int14:1; /* RW */
+ unsigned long rdm_tlb_int15:1; /* RW */
+ unsigned long rdm_tlb_int16:1; /* RW */
+ unsigned long rdm_tlb_int17:1; /* RW */
+ unsigned long rdm_tlb_int18:1; /* RW */
+ unsigned long rdm_tlb_int19:1; /* RW */
+ unsigned long rdm_tlb_int20:1; /* RW */
+ unsigned long rdm_tlb_int21:1; /* RW */
+ unsigned long rdm_tlb_int22:1; /* RW */
+ unsigned long rdm_tlb_int23:1; /* RW */
+ unsigned long rsvd_38_63:26;
+ } s5;
+
+ /* UV4 unique struct */
+ struct uv4h_event_occurred1_s {
+ unsigned long profile_int:1; /* RW */
+ unsigned long bau_data:1; /* RW */
+ unsigned long proc_general:1; /* RW */
+ unsigned long gr0_tlb_int0:1; /* RW */
+ unsigned long gr0_tlb_int1:1; /* RW */
+ unsigned long gr0_tlb_int2:1; /* RW */
+ unsigned long gr0_tlb_int3:1; /* RW */
+ unsigned long gr0_tlb_int4:1; /* RW */
+ unsigned long gr0_tlb_int5:1; /* RW */
+ unsigned long gr0_tlb_int6:1; /* RW */
+ unsigned long gr0_tlb_int7:1; /* RW */
+ unsigned long gr0_tlb_int8:1; /* RW */
+ unsigned long gr0_tlb_int9:1; /* RW */
+ unsigned long gr0_tlb_int10:1; /* RW */
+ unsigned long gr0_tlb_int11:1; /* RW */
+ unsigned long gr0_tlb_int12:1; /* RW */
+ unsigned long gr0_tlb_int13:1; /* RW */
+ unsigned long gr0_tlb_int14:1; /* RW */
+ unsigned long gr0_tlb_int15:1; /* RW */
+ unsigned long gr0_tlb_int16:1; /* RW */
+ unsigned long gr0_tlb_int17:1; /* RW */
+ unsigned long gr0_tlb_int18:1; /* RW */
+ unsigned long gr0_tlb_int19:1; /* RW */
+ unsigned long gr0_tlb_int20:1; /* RW */
+ unsigned long gr0_tlb_int21:1; /* RW */
+ unsigned long gr0_tlb_int22:1; /* RW */
+ unsigned long gr0_tlb_int23:1; /* RW */
+ unsigned long gr1_tlb_int0:1; /* RW */
+ unsigned long gr1_tlb_int1:1; /* RW */
+ unsigned long gr1_tlb_int2:1; /* RW */
+ unsigned long gr1_tlb_int3:1; /* RW */
+ unsigned long gr1_tlb_int4:1; /* RW */
+ unsigned long gr1_tlb_int5:1; /* RW */
+ unsigned long gr1_tlb_int6:1; /* RW */
+ unsigned long gr1_tlb_int7:1; /* RW */
+ unsigned long gr1_tlb_int8:1; /* RW */
+ unsigned long gr1_tlb_int9:1; /* RW */
+ unsigned long gr1_tlb_int10:1; /* RW */
+ unsigned long gr1_tlb_int11:1; /* RW */
+ unsigned long gr1_tlb_int12:1; /* RW */
+ unsigned long gr1_tlb_int13:1; /* RW */
+ unsigned long gr1_tlb_int14:1; /* RW */
+ unsigned long gr1_tlb_int15:1; /* RW */
+ unsigned long gr1_tlb_int16:1; /* RW */
+ unsigned long gr1_tlb_int17:1; /* RW */
+ unsigned long gr1_tlb_int18:1; /* RW */
+ unsigned long gr1_tlb_int19:1; /* RW */
+ unsigned long gr1_tlb_int20:1; /* RW */
+ unsigned long gr1_tlb_int21:1; /* RW */
+ unsigned long gr1_tlb_int22:1; /* RW */
+ unsigned long gr1_tlb_int23:1; /* RW */
+ unsigned long rsvd_51_63:13;
+ } s4;
+
+ /* UV3 unique struct */
+ struct uv3h_event_occurred1_s {
+ unsigned long bau_data:1; /* RW */
+ unsigned long power_management_req:1; /* RW */
+ unsigned long message_accelerator_int0:1; /* RW */
+ unsigned long message_accelerator_int1:1; /* RW */
+ unsigned long message_accelerator_int2:1; /* RW */
+ unsigned long message_accelerator_int3:1; /* RW */
+ unsigned long message_accelerator_int4:1; /* RW */
+ unsigned long message_accelerator_int5:1; /* RW */
+ unsigned long message_accelerator_int6:1; /* RW */
+ unsigned long message_accelerator_int7:1; /* RW */
+ unsigned long message_accelerator_int8:1; /* RW */
+ unsigned long message_accelerator_int9:1; /* RW */
+ unsigned long message_accelerator_int10:1; /* RW */
+ unsigned long message_accelerator_int11:1; /* RW */
+ unsigned long message_accelerator_int12:1; /* RW */
+ unsigned long message_accelerator_int13:1; /* RW */
+ unsigned long message_accelerator_int14:1; /* RW */
+ unsigned long message_accelerator_int15:1; /* RW */
+ unsigned long gr0_tlb_int0:1; /* RW */
+ unsigned long gr0_tlb_int1:1; /* RW */
+ unsigned long gr0_tlb_int2:1; /* RW */
+ unsigned long gr0_tlb_int3:1; /* RW */
+ unsigned long gr0_tlb_int4:1; /* RW */
+ unsigned long gr0_tlb_int5:1; /* RW */
+ unsigned long gr0_tlb_int6:1; /* RW */
+ unsigned long gr0_tlb_int7:1; /* RW */
+ unsigned long gr0_tlb_int8:1; /* RW */
+ unsigned long gr0_tlb_int9:1; /* RW */
+ unsigned long gr0_tlb_int10:1; /* RW */
+ unsigned long gr0_tlb_int11:1; /* RW */
+ unsigned long gr0_tlb_int12:1; /* RW */
+ unsigned long gr0_tlb_int13:1; /* RW */
+ unsigned long gr0_tlb_int14:1; /* RW */
+ unsigned long gr0_tlb_int15:1; /* RW */
+ unsigned long gr1_tlb_int0:1; /* RW */
+ unsigned long gr1_tlb_int1:1; /* RW */
+ unsigned long gr1_tlb_int2:1; /* RW */
+ unsigned long gr1_tlb_int3:1; /* RW */
+ unsigned long gr1_tlb_int4:1; /* RW */
+ unsigned long gr1_tlb_int5:1; /* RW */
+ unsigned long gr1_tlb_int6:1; /* RW */
+ unsigned long gr1_tlb_int7:1; /* RW */
+ unsigned long gr1_tlb_int8:1; /* RW */
+ unsigned long gr1_tlb_int9:1; /* RW */
+ unsigned long gr1_tlb_int10:1; /* RW */
+ unsigned long gr1_tlb_int11:1; /* RW */
+ unsigned long gr1_tlb_int12:1; /* RW */
+ unsigned long gr1_tlb_int13:1; /* RW */
+ unsigned long gr1_tlb_int14:1; /* RW */
+ unsigned long gr1_tlb_int15:1; /* RW */
+ unsigned long rtc_interval_int:1; /* RW */
+ unsigned long bau_dashboard_int:1; /* RW */
+ unsigned long rsvd_52_63:12;
+ } s3;
+
+ /* UV2 unique struct */
+ struct uv2h_event_occurred1_s {
+ unsigned long bau_data:1; /* RW */
+ unsigned long power_management_req:1; /* RW */
+ unsigned long message_accelerator_int0:1; /* RW */
+ unsigned long message_accelerator_int1:1; /* RW */
+ unsigned long message_accelerator_int2:1; /* RW */
+ unsigned long message_accelerator_int3:1; /* RW */
+ unsigned long message_accelerator_int4:1; /* RW */
+ unsigned long message_accelerator_int5:1; /* RW */
+ unsigned long message_accelerator_int6:1; /* RW */
+ unsigned long message_accelerator_int7:1; /* RW */
+ unsigned long message_accelerator_int8:1; /* RW */
+ unsigned long message_accelerator_int9:1; /* RW */
+ unsigned long message_accelerator_int10:1; /* RW */
+ unsigned long message_accelerator_int11:1; /* RW */
+ unsigned long message_accelerator_int12:1; /* RW */
+ unsigned long message_accelerator_int13:1; /* RW */
+ unsigned long message_accelerator_int14:1; /* RW */
+ unsigned long message_accelerator_int15:1; /* RW */
+ unsigned long gr0_tlb_int0:1; /* RW */
+ unsigned long gr0_tlb_int1:1; /* RW */
+ unsigned long gr0_tlb_int2:1; /* RW */
+ unsigned long gr0_tlb_int3:1; /* RW */
+ unsigned long gr0_tlb_int4:1; /* RW */
+ unsigned long gr0_tlb_int5:1; /* RW */
+ unsigned long gr0_tlb_int6:1; /* RW */
+ unsigned long gr0_tlb_int7:1; /* RW */
+ unsigned long gr0_tlb_int8:1; /* RW */
+ unsigned long gr0_tlb_int9:1; /* RW */
+ unsigned long gr0_tlb_int10:1; /* RW */
+ unsigned long gr0_tlb_int11:1; /* RW */
+ unsigned long gr0_tlb_int12:1; /* RW */
+ unsigned long gr0_tlb_int13:1; /* RW */
+ unsigned long gr0_tlb_int14:1; /* RW */
+ unsigned long gr0_tlb_int15:1; /* RW */
+ unsigned long gr1_tlb_int0:1; /* RW */
+ unsigned long gr1_tlb_int1:1; /* RW */
+ unsigned long gr1_tlb_int2:1; /* RW */
+ unsigned long gr1_tlb_int3:1; /* RW */
+ unsigned long gr1_tlb_int4:1; /* RW */
+ unsigned long gr1_tlb_int5:1; /* RW */
+ unsigned long gr1_tlb_int6:1; /* RW */
+ unsigned long gr1_tlb_int7:1; /* RW */
+ unsigned long gr1_tlb_int8:1; /* RW */
+ unsigned long gr1_tlb_int9:1; /* RW */
+ unsigned long gr1_tlb_int10:1; /* RW */
+ unsigned long gr1_tlb_int11:1; /* RW */
+ unsigned long gr1_tlb_int12:1; /* RW */
+ unsigned long gr1_tlb_int13:1; /* RW */
+ unsigned long gr1_tlb_int14:1; /* RW */
+ unsigned long gr1_tlb_int15:1; /* RW */
+ unsigned long rtc_interval_int:1; /* RW */
+ unsigned long bau_dashboard_int:1; /* RW */
+ unsigned long rsvd_52_63:12;
+ } s2;
+};
+
+/* ========================================================================= */
+/* UVH_EVENT_OCCURRED1_ALIAS */
+/* ========================================================================= */
+#define UVH_EVENT_OCCURRED1_ALIAS 0x70088UL
+
+
+/* ========================================================================= */
+/* UVH_EVENT_OCCURRED2 */
+/* ========================================================================= */
+#define UVH_EVENT_OCCURRED2 0x70100UL
+
+
+
+/* UVYH common defines */
+#define UVYH_EVENT_OCCURRED2_RTC_INTERVAL_INT_SHFT 0
+#define UVYH_EVENT_OCCURRED2_RTC_INTERVAL_INT_MASK 0x0000000000000001UL
+#define UVYH_EVENT_OCCURRED2_BAU_DASHBOARD_INT_SHFT 1
+#define UVYH_EVENT_OCCURRED2_BAU_DASHBOARD_INT_MASK 0x0000000000000002UL
+#define UVYH_EVENT_OCCURRED2_RTC_0_SHFT 2
+#define UVYH_EVENT_OCCURRED2_RTC_0_MASK 0x0000000000000004UL
+#define UVYH_EVENT_OCCURRED2_RTC_1_SHFT 3
+#define UVYH_EVENT_OCCURRED2_RTC_1_MASK 0x0000000000000008UL
+#define UVYH_EVENT_OCCURRED2_RTC_2_SHFT 4
+#define UVYH_EVENT_OCCURRED2_RTC_2_MASK 0x0000000000000010UL
+#define UVYH_EVENT_OCCURRED2_RTC_3_SHFT 5
+#define UVYH_EVENT_OCCURRED2_RTC_3_MASK 0x0000000000000020UL
+#define UVYH_EVENT_OCCURRED2_RTC_4_SHFT 6
+#define UVYH_EVENT_OCCURRED2_RTC_4_MASK 0x0000000000000040UL
+#define UVYH_EVENT_OCCURRED2_RTC_5_SHFT 7
+#define UVYH_EVENT_OCCURRED2_RTC_5_MASK 0x0000000000000080UL
+#define UVYH_EVENT_OCCURRED2_RTC_6_SHFT 8
+#define UVYH_EVENT_OCCURRED2_RTC_6_MASK 0x0000000000000100UL
+#define UVYH_EVENT_OCCURRED2_RTC_7_SHFT 9
+#define UVYH_EVENT_OCCURRED2_RTC_7_MASK 0x0000000000000200UL
+#define UVYH_EVENT_OCCURRED2_RTC_8_SHFT 10
+#define UVYH_EVENT_OCCURRED2_RTC_8_MASK 0x0000000000000400UL
+#define UVYH_EVENT_OCCURRED2_RTC_9_SHFT 11
+#define UVYH_EVENT_OCCURRED2_RTC_9_MASK 0x0000000000000800UL
+#define UVYH_EVENT_OCCURRED2_RTC_10_SHFT 12
+#define UVYH_EVENT_OCCURRED2_RTC_10_MASK 0x0000000000001000UL
+#define UVYH_EVENT_OCCURRED2_RTC_11_SHFT 13
+#define UVYH_EVENT_OCCURRED2_RTC_11_MASK 0x0000000000002000UL
+#define UVYH_EVENT_OCCURRED2_RTC_12_SHFT 14
+#define UVYH_EVENT_OCCURRED2_RTC_12_MASK 0x0000000000004000UL
+#define UVYH_EVENT_OCCURRED2_RTC_13_SHFT 15
+#define UVYH_EVENT_OCCURRED2_RTC_13_MASK 0x0000000000008000UL
+#define UVYH_EVENT_OCCURRED2_RTC_14_SHFT 16
+#define UVYH_EVENT_OCCURRED2_RTC_14_MASK 0x0000000000010000UL
+#define UVYH_EVENT_OCCURRED2_RTC_15_SHFT 17
+#define UVYH_EVENT_OCCURRED2_RTC_15_MASK 0x0000000000020000UL
+#define UVYH_EVENT_OCCURRED2_RTC_16_SHFT 18
+#define UVYH_EVENT_OCCURRED2_RTC_16_MASK 0x0000000000040000UL
+#define UVYH_EVENT_OCCURRED2_RTC_17_SHFT 19
+#define UVYH_EVENT_OCCURRED2_RTC_17_MASK 0x0000000000080000UL
+#define UVYH_EVENT_OCCURRED2_RTC_18_SHFT 20
+#define UVYH_EVENT_OCCURRED2_RTC_18_MASK 0x0000000000100000UL
+#define UVYH_EVENT_OCCURRED2_RTC_19_SHFT 21
+#define UVYH_EVENT_OCCURRED2_RTC_19_MASK 0x0000000000200000UL
+#define UVYH_EVENT_OCCURRED2_RTC_20_SHFT 22
+#define UVYH_EVENT_OCCURRED2_RTC_20_MASK 0x0000000000400000UL
+#define UVYH_EVENT_OCCURRED2_RTC_21_SHFT 23
+#define UVYH_EVENT_OCCURRED2_RTC_21_MASK 0x0000000000800000UL
+#define UVYH_EVENT_OCCURRED2_RTC_22_SHFT 24
+#define UVYH_EVENT_OCCURRED2_RTC_22_MASK 0x0000000001000000UL
+#define UVYH_EVENT_OCCURRED2_RTC_23_SHFT 25
+#define UVYH_EVENT_OCCURRED2_RTC_23_MASK 0x0000000002000000UL
+#define UVYH_EVENT_OCCURRED2_RTC_24_SHFT 26
+#define UVYH_EVENT_OCCURRED2_RTC_24_MASK 0x0000000004000000UL
+#define UVYH_EVENT_OCCURRED2_RTC_25_SHFT 27
+#define UVYH_EVENT_OCCURRED2_RTC_25_MASK 0x0000000008000000UL
+#define UVYH_EVENT_OCCURRED2_RTC_26_SHFT 28
+#define UVYH_EVENT_OCCURRED2_RTC_26_MASK 0x0000000010000000UL
+#define UVYH_EVENT_OCCURRED2_RTC_27_SHFT 29
+#define UVYH_EVENT_OCCURRED2_RTC_27_MASK 0x0000000020000000UL
+#define UVYH_EVENT_OCCURRED2_RTC_28_SHFT 30
+#define UVYH_EVENT_OCCURRED2_RTC_28_MASK 0x0000000040000000UL
+#define UVYH_EVENT_OCCURRED2_RTC_29_SHFT 31
+#define UVYH_EVENT_OCCURRED2_RTC_29_MASK 0x0000000080000000UL
+#define UVYH_EVENT_OCCURRED2_RTC_30_SHFT 32
+#define UVYH_EVENT_OCCURRED2_RTC_30_MASK 0x0000000100000000UL
+#define UVYH_EVENT_OCCURRED2_RTC_31_SHFT 33
+#define UVYH_EVENT_OCCURRED2_RTC_31_MASK 0x0000000200000000UL
+
+/* UV4 unique defines */
+#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT0_SHFT 0
+#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT0_MASK 0x0000000000000001UL
+#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT1_SHFT 1
+#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT1_MASK 0x0000000000000002UL
+#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT2_SHFT 2
+#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT2_MASK 0x0000000000000004UL
+#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT3_SHFT 3
+#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT3_MASK 0x0000000000000008UL
+#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT4_SHFT 4
+#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT4_MASK 0x0000000000000010UL
+#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT5_SHFT 5
+#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT5_MASK 0x0000000000000020UL
+#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT6_SHFT 6
+#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT6_MASK 0x0000000000000040UL
+#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT7_SHFT 7
+#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT7_MASK 0x0000000000000080UL
+#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT8_SHFT 8
+#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT8_MASK 0x0000000000000100UL
+#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT9_SHFT 9
+#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT9_MASK 0x0000000000000200UL
+#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT10_SHFT 10
+#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT10_MASK 0x0000000000000400UL
+#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT11_SHFT 11
+#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT11_MASK 0x0000000000000800UL
+#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT12_SHFT 12
+#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT12_MASK 0x0000000000001000UL
+#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT13_SHFT 13
+#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT13_MASK 0x0000000000002000UL
+#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT14_SHFT 14
+#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT14_MASK 0x0000000000004000UL
+#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT15_SHFT 15
+#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT15_MASK 0x0000000000008000UL
+#define UV4H_EVENT_OCCURRED2_RTC_INTERVAL_INT_SHFT 16
+#define UV4H_EVENT_OCCURRED2_RTC_INTERVAL_INT_MASK 0x0000000000010000UL
+#define UV4H_EVENT_OCCURRED2_BAU_DASHBOARD_INT_SHFT 17
+#define UV4H_EVENT_OCCURRED2_BAU_DASHBOARD_INT_MASK 0x0000000000020000UL
+#define UV4H_EVENT_OCCURRED2_RTC_0_SHFT 18
+#define UV4H_EVENT_OCCURRED2_RTC_0_MASK 0x0000000000040000UL
+#define UV4H_EVENT_OCCURRED2_RTC_1_SHFT 19
+#define UV4H_EVENT_OCCURRED2_RTC_1_MASK 0x0000000000080000UL
+#define UV4H_EVENT_OCCURRED2_RTC_2_SHFT 20
+#define UV4H_EVENT_OCCURRED2_RTC_2_MASK 0x0000000000100000UL
+#define UV4H_EVENT_OCCURRED2_RTC_3_SHFT 21
+#define UV4H_EVENT_OCCURRED2_RTC_3_MASK 0x0000000000200000UL
+#define UV4H_EVENT_OCCURRED2_RTC_4_SHFT 22
+#define UV4H_EVENT_OCCURRED2_RTC_4_MASK 0x0000000000400000UL
+#define UV4H_EVENT_OCCURRED2_RTC_5_SHFT 23
+#define UV4H_EVENT_OCCURRED2_RTC_5_MASK 0x0000000000800000UL
+#define UV4H_EVENT_OCCURRED2_RTC_6_SHFT 24
+#define UV4H_EVENT_OCCURRED2_RTC_6_MASK 0x0000000001000000UL
+#define UV4H_EVENT_OCCURRED2_RTC_7_SHFT 25
+#define UV4H_EVENT_OCCURRED2_RTC_7_MASK 0x0000000002000000UL
+#define UV4H_EVENT_OCCURRED2_RTC_8_SHFT 26
+#define UV4H_EVENT_OCCURRED2_RTC_8_MASK 0x0000000004000000UL
+#define UV4H_EVENT_OCCURRED2_RTC_9_SHFT 27
+#define UV4H_EVENT_OCCURRED2_RTC_9_MASK 0x0000000008000000UL
+#define UV4H_EVENT_OCCURRED2_RTC_10_SHFT 28
+#define UV4H_EVENT_OCCURRED2_RTC_10_MASK 0x0000000010000000UL
+#define UV4H_EVENT_OCCURRED2_RTC_11_SHFT 29
+#define UV4H_EVENT_OCCURRED2_RTC_11_MASK 0x0000000020000000UL
+#define UV4H_EVENT_OCCURRED2_RTC_12_SHFT 30
+#define UV4H_EVENT_OCCURRED2_RTC_12_MASK 0x0000000040000000UL
+#define UV4H_EVENT_OCCURRED2_RTC_13_SHFT 31
+#define UV4H_EVENT_OCCURRED2_RTC_13_MASK 0x0000000080000000UL
+#define UV4H_EVENT_OCCURRED2_RTC_14_SHFT 32
+#define UV4H_EVENT_OCCURRED2_RTC_14_MASK 0x0000000100000000UL
+#define UV4H_EVENT_OCCURRED2_RTC_15_SHFT 33
+#define UV4H_EVENT_OCCURRED2_RTC_15_MASK 0x0000000200000000UL
+#define UV4H_EVENT_OCCURRED2_RTC_16_SHFT 34
+#define UV4H_EVENT_OCCURRED2_RTC_16_MASK 0x0000000400000000UL
+#define UV4H_EVENT_OCCURRED2_RTC_17_SHFT 35
+#define UV4H_EVENT_OCCURRED2_RTC_17_MASK 0x0000000800000000UL
+#define UV4H_EVENT_OCCURRED2_RTC_18_SHFT 36
+#define UV4H_EVENT_OCCURRED2_RTC_18_MASK 0x0000001000000000UL
+#define UV4H_EVENT_OCCURRED2_RTC_19_SHFT 37
+#define UV4H_EVENT_OCCURRED2_RTC_19_MASK 0x0000002000000000UL
+#define UV4H_EVENT_OCCURRED2_RTC_20_SHFT 38
+#define UV4H_EVENT_OCCURRED2_RTC_20_MASK 0x0000004000000000UL
+#define UV4H_EVENT_OCCURRED2_RTC_21_SHFT 39
+#define UV4H_EVENT_OCCURRED2_RTC_21_MASK 0x0000008000000000UL
+#define UV4H_EVENT_OCCURRED2_RTC_22_SHFT 40
+#define UV4H_EVENT_OCCURRED2_RTC_22_MASK 0x0000010000000000UL
+#define UV4H_EVENT_OCCURRED2_RTC_23_SHFT 41
+#define UV4H_EVENT_OCCURRED2_RTC_23_MASK 0x0000020000000000UL
+#define UV4H_EVENT_OCCURRED2_RTC_24_SHFT 42
+#define UV4H_EVENT_OCCURRED2_RTC_24_MASK 0x0000040000000000UL
+#define UV4H_EVENT_OCCURRED2_RTC_25_SHFT 43
+#define UV4H_EVENT_OCCURRED2_RTC_25_MASK 0x0000080000000000UL
+#define UV4H_EVENT_OCCURRED2_RTC_26_SHFT 44
+#define UV4H_EVENT_OCCURRED2_RTC_26_MASK 0x0000100000000000UL
+#define UV4H_EVENT_OCCURRED2_RTC_27_SHFT 45
+#define UV4H_EVENT_OCCURRED2_RTC_27_MASK 0x0000200000000000UL
+#define UV4H_EVENT_OCCURRED2_RTC_28_SHFT 46
+#define UV4H_EVENT_OCCURRED2_RTC_28_MASK 0x0000400000000000UL
+#define UV4H_EVENT_OCCURRED2_RTC_29_SHFT 47
+#define UV4H_EVENT_OCCURRED2_RTC_29_MASK 0x0000800000000000UL
+#define UV4H_EVENT_OCCURRED2_RTC_30_SHFT 48
+#define UV4H_EVENT_OCCURRED2_RTC_30_MASK 0x0001000000000000UL
+#define UV4H_EVENT_OCCURRED2_RTC_31_SHFT 49
+#define UV4H_EVENT_OCCURRED2_RTC_31_MASK 0x0002000000000000UL
+
+/* UV3 unique defines */
+#define UV3H_EVENT_OCCURRED2_RTC_0_SHFT 0
+#define UV3H_EVENT_OCCURRED2_RTC_0_MASK 0x0000000000000001UL
+#define UV3H_EVENT_OCCURRED2_RTC_1_SHFT 1
+#define UV3H_EVENT_OCCURRED2_RTC_1_MASK 0x0000000000000002UL
+#define UV3H_EVENT_OCCURRED2_RTC_2_SHFT 2
+#define UV3H_EVENT_OCCURRED2_RTC_2_MASK 0x0000000000000004UL
+#define UV3H_EVENT_OCCURRED2_RTC_3_SHFT 3
+#define UV3H_EVENT_OCCURRED2_RTC_3_MASK 0x0000000000000008UL
+#define UV3H_EVENT_OCCURRED2_RTC_4_SHFT 4
+#define UV3H_EVENT_OCCURRED2_RTC_4_MASK 0x0000000000000010UL
+#define UV3H_EVENT_OCCURRED2_RTC_5_SHFT 5
+#define UV3H_EVENT_OCCURRED2_RTC_5_MASK 0x0000000000000020UL
+#define UV3H_EVENT_OCCURRED2_RTC_6_SHFT 6
+#define UV3H_EVENT_OCCURRED2_RTC_6_MASK 0x0000000000000040UL
+#define UV3H_EVENT_OCCURRED2_RTC_7_SHFT 7
+#define UV3H_EVENT_OCCURRED2_RTC_7_MASK 0x0000000000000080UL
+#define UV3H_EVENT_OCCURRED2_RTC_8_SHFT 8
+#define UV3H_EVENT_OCCURRED2_RTC_8_MASK 0x0000000000000100UL
+#define UV3H_EVENT_OCCURRED2_RTC_9_SHFT 9
+#define UV3H_EVENT_OCCURRED2_RTC_9_MASK 0x0000000000000200UL
+#define UV3H_EVENT_OCCURRED2_RTC_10_SHFT 10
+#define UV3H_EVENT_OCCURRED2_RTC_10_MASK 0x0000000000000400UL
+#define UV3H_EVENT_OCCURRED2_RTC_11_SHFT 11
+#define UV3H_EVENT_OCCURRED2_RTC_11_MASK 0x0000000000000800UL
+#define UV3H_EVENT_OCCURRED2_RTC_12_SHFT 12
+#define UV3H_EVENT_OCCURRED2_RTC_12_MASK 0x0000000000001000UL
+#define UV3H_EVENT_OCCURRED2_RTC_13_SHFT 13
+#define UV3H_EVENT_OCCURRED2_RTC_13_MASK 0x0000000000002000UL
+#define UV3H_EVENT_OCCURRED2_RTC_14_SHFT 14
+#define UV3H_EVENT_OCCURRED2_RTC_14_MASK 0x0000000000004000UL
+#define UV3H_EVENT_OCCURRED2_RTC_15_SHFT 15
+#define UV3H_EVENT_OCCURRED2_RTC_15_MASK 0x0000000000008000UL
+#define UV3H_EVENT_OCCURRED2_RTC_16_SHFT 16
+#define UV3H_EVENT_OCCURRED2_RTC_16_MASK 0x0000000000010000UL
+#define UV3H_EVENT_OCCURRED2_RTC_17_SHFT 17
+#define UV3H_EVENT_OCCURRED2_RTC_17_MASK 0x0000000000020000UL
+#define UV3H_EVENT_OCCURRED2_RTC_18_SHFT 18
+#define UV3H_EVENT_OCCURRED2_RTC_18_MASK 0x0000000000040000UL
+#define UV3H_EVENT_OCCURRED2_RTC_19_SHFT 19
+#define UV3H_EVENT_OCCURRED2_RTC_19_MASK 0x0000000000080000UL
+#define UV3H_EVENT_OCCURRED2_RTC_20_SHFT 20
+#define UV3H_EVENT_OCCURRED2_RTC_20_MASK 0x0000000000100000UL
+#define UV3H_EVENT_OCCURRED2_RTC_21_SHFT 21
+#define UV3H_EVENT_OCCURRED2_RTC_21_MASK 0x0000000000200000UL
+#define UV3H_EVENT_OCCURRED2_RTC_22_SHFT 22
+#define UV3H_EVENT_OCCURRED2_RTC_22_MASK 0x0000000000400000UL
+#define UV3H_EVENT_OCCURRED2_RTC_23_SHFT 23
+#define UV3H_EVENT_OCCURRED2_RTC_23_MASK 0x0000000000800000UL
+#define UV3H_EVENT_OCCURRED2_RTC_24_SHFT 24
+#define UV3H_EVENT_OCCURRED2_RTC_24_MASK 0x0000000001000000UL
+#define UV3H_EVENT_OCCURRED2_RTC_25_SHFT 25
+#define UV3H_EVENT_OCCURRED2_RTC_25_MASK 0x0000000002000000UL
+#define UV3H_EVENT_OCCURRED2_RTC_26_SHFT 26
+#define UV3H_EVENT_OCCURRED2_RTC_26_MASK 0x0000000004000000UL
+#define UV3H_EVENT_OCCURRED2_RTC_27_SHFT 27
+#define UV3H_EVENT_OCCURRED2_RTC_27_MASK 0x0000000008000000UL
+#define UV3H_EVENT_OCCURRED2_RTC_28_SHFT 28
+#define UV3H_EVENT_OCCURRED2_RTC_28_MASK 0x0000000010000000UL
+#define UV3H_EVENT_OCCURRED2_RTC_29_SHFT 29
+#define UV3H_EVENT_OCCURRED2_RTC_29_MASK 0x0000000020000000UL
+#define UV3H_EVENT_OCCURRED2_RTC_30_SHFT 30
+#define UV3H_EVENT_OCCURRED2_RTC_30_MASK 0x0000000040000000UL
+#define UV3H_EVENT_OCCURRED2_RTC_31_SHFT 31
+#define UV3H_EVENT_OCCURRED2_RTC_31_MASK 0x0000000080000000UL
+
+/* UV2 unique defines */
+#define UV2H_EVENT_OCCURRED2_RTC_0_SHFT 0
+#define UV2H_EVENT_OCCURRED2_RTC_0_MASK 0x0000000000000001UL
+#define UV2H_EVENT_OCCURRED2_RTC_1_SHFT 1
+#define UV2H_EVENT_OCCURRED2_RTC_1_MASK 0x0000000000000002UL
+#define UV2H_EVENT_OCCURRED2_RTC_2_SHFT 2
+#define UV2H_EVENT_OCCURRED2_RTC_2_MASK 0x0000000000000004UL
+#define UV2H_EVENT_OCCURRED2_RTC_3_SHFT 3
+#define UV2H_EVENT_OCCURRED2_RTC_3_MASK 0x0000000000000008UL
+#define UV2H_EVENT_OCCURRED2_RTC_4_SHFT 4
+#define UV2H_EVENT_OCCURRED2_RTC_4_MASK 0x0000000000000010UL
+#define UV2H_EVENT_OCCURRED2_RTC_5_SHFT 5
+#define UV2H_EVENT_OCCURRED2_RTC_5_MASK 0x0000000000000020UL
+#define UV2H_EVENT_OCCURRED2_RTC_6_SHFT 6
+#define UV2H_EVENT_OCCURRED2_RTC_6_MASK 0x0000000000000040UL
+#define UV2H_EVENT_OCCURRED2_RTC_7_SHFT 7
+#define UV2H_EVENT_OCCURRED2_RTC_7_MASK 0x0000000000000080UL
+#define UV2H_EVENT_OCCURRED2_RTC_8_SHFT 8
+#define UV2H_EVENT_OCCURRED2_RTC_8_MASK 0x0000000000000100UL
+#define UV2H_EVENT_OCCURRED2_RTC_9_SHFT 9
+#define UV2H_EVENT_OCCURRED2_RTC_9_MASK 0x0000000000000200UL
+#define UV2H_EVENT_OCCURRED2_RTC_10_SHFT 10
+#define UV2H_EVENT_OCCURRED2_RTC_10_MASK 0x0000000000000400UL
+#define UV2H_EVENT_OCCURRED2_RTC_11_SHFT 11
+#define UV2H_EVENT_OCCURRED2_RTC_11_MASK 0x0000000000000800UL
+#define UV2H_EVENT_OCCURRED2_RTC_12_SHFT 12
+#define UV2H_EVENT_OCCURRED2_RTC_12_MASK 0x0000000000001000UL
+#define UV2H_EVENT_OCCURRED2_RTC_13_SHFT 13
+#define UV2H_EVENT_OCCURRED2_RTC_13_MASK 0x0000000000002000UL
+#define UV2H_EVENT_OCCURRED2_RTC_14_SHFT 14
+#define UV2H_EVENT_OCCURRED2_RTC_14_MASK 0x0000000000004000UL
+#define UV2H_EVENT_OCCURRED2_RTC_15_SHFT 15
+#define UV2H_EVENT_OCCURRED2_RTC_15_MASK 0x0000000000008000UL
+#define UV2H_EVENT_OCCURRED2_RTC_16_SHFT 16
+#define UV2H_EVENT_OCCURRED2_RTC_16_MASK 0x0000000000010000UL
+#define UV2H_EVENT_OCCURRED2_RTC_17_SHFT 17
+#define UV2H_EVENT_OCCURRED2_RTC_17_MASK 0x0000000000020000UL
+#define UV2H_EVENT_OCCURRED2_RTC_18_SHFT 18
+#define UV2H_EVENT_OCCURRED2_RTC_18_MASK 0x0000000000040000UL
+#define UV2H_EVENT_OCCURRED2_RTC_19_SHFT 19
+#define UV2H_EVENT_OCCURRED2_RTC_19_MASK 0x0000000000080000UL
+#define UV2H_EVENT_OCCURRED2_RTC_20_SHFT 20
+#define UV2H_EVENT_OCCURRED2_RTC_20_MASK 0x0000000000100000UL
+#define UV2H_EVENT_OCCURRED2_RTC_21_SHFT 21
+#define UV2H_EVENT_OCCURRED2_RTC_21_MASK 0x0000000000200000UL
+#define UV2H_EVENT_OCCURRED2_RTC_22_SHFT 22
+#define UV2H_EVENT_OCCURRED2_RTC_22_MASK 0x0000000000400000UL
+#define UV2H_EVENT_OCCURRED2_RTC_23_SHFT 23
+#define UV2H_EVENT_OCCURRED2_RTC_23_MASK 0x0000000000800000UL
+#define UV2H_EVENT_OCCURRED2_RTC_24_SHFT 24
+#define UV2H_EVENT_OCCURRED2_RTC_24_MASK 0x0000000001000000UL
+#define UV2H_EVENT_OCCURRED2_RTC_25_SHFT 25
+#define UV2H_EVENT_OCCURRED2_RTC_25_MASK 0x0000000002000000UL
+#define UV2H_EVENT_OCCURRED2_RTC_26_SHFT 26
+#define UV2H_EVENT_OCCURRED2_RTC_26_MASK 0x0000000004000000UL
+#define UV2H_EVENT_OCCURRED2_RTC_27_SHFT 27
+#define UV2H_EVENT_OCCURRED2_RTC_27_MASK 0x0000000008000000UL
+#define UV2H_EVENT_OCCURRED2_RTC_28_SHFT 28
+#define UV2H_EVENT_OCCURRED2_RTC_28_MASK 0x0000000010000000UL
+#define UV2H_EVENT_OCCURRED2_RTC_29_SHFT 29
+#define UV2H_EVENT_OCCURRED2_RTC_29_MASK 0x0000000020000000UL
+#define UV2H_EVENT_OCCURRED2_RTC_30_SHFT 30
+#define UV2H_EVENT_OCCURRED2_RTC_30_MASK 0x0000000040000000UL
+#define UV2H_EVENT_OCCURRED2_RTC_31_SHFT 31
+#define UV2H_EVENT_OCCURRED2_RTC_31_MASK 0x0000000080000000UL
+
+#define UVH_EVENT_OCCURRED2_RTC_1_MASK ( \
+ is_uv(UV5) ? 0x0000000000000008UL : \
+ is_uv(UV4) ? 0x0000000000080000UL : \
+ is_uv(UV3) ? 0x0000000000000002UL : \
+ is_uv(UV2) ? 0x0000000000000002UL : \
+ 0)
+#define UVH_EVENT_OCCURRED2_RTC_1_SHFT ( \
+ is_uv(UV5) ? 3 : \
+ is_uv(UV4) ? 19 : \
+ is_uv(UV3) ? 1 : \
+ is_uv(UV2) ? 1 : \
+ -1)
+
+union uvyh_event_occurred2_u {
+ unsigned long v;
+
+ /* UVYH common struct */
+ struct uvyh_event_occurred2_s {
+ unsigned long rtc_interval_int:1; /* RW */
+ unsigned long bau_dashboard_int:1; /* RW */
+ unsigned long rtc_0:1; /* RW */
+ unsigned long rtc_1:1; /* RW */
+ unsigned long rtc_2:1; /* RW */
+ unsigned long rtc_3:1; /* RW */
+ unsigned long rtc_4:1; /* RW */
+ unsigned long rtc_5:1; /* RW */
+ unsigned long rtc_6:1; /* RW */
+ unsigned long rtc_7:1; /* RW */
+ unsigned long rtc_8:1; /* RW */
+ unsigned long rtc_9:1; /* RW */
+ unsigned long rtc_10:1; /* RW */
+ unsigned long rtc_11:1; /* RW */
+ unsigned long rtc_12:1; /* RW */
+ unsigned long rtc_13:1; /* RW */
+ unsigned long rtc_14:1; /* RW */
+ unsigned long rtc_15:1; /* RW */
+ unsigned long rtc_16:1; /* RW */
+ unsigned long rtc_17:1; /* RW */
+ unsigned long rtc_18:1; /* RW */
+ unsigned long rtc_19:1; /* RW */
+ unsigned long rtc_20:1; /* RW */
+ unsigned long rtc_21:1; /* RW */
+ unsigned long rtc_22:1; /* RW */
+ unsigned long rtc_23:1; /* RW */
+ unsigned long rtc_24:1; /* RW */
+ unsigned long rtc_25:1; /* RW */
+ unsigned long rtc_26:1; /* RW */
+ unsigned long rtc_27:1; /* RW */
+ unsigned long rtc_28:1; /* RW */
+ unsigned long rtc_29:1; /* RW */
+ unsigned long rtc_30:1; /* RW */
+ unsigned long rtc_31:1; /* RW */
+ unsigned long rsvd_34_63:30;
+ } sy;
+
+ /* UV5 unique struct */
+ struct uv5h_event_occurred2_s {
+ unsigned long rtc_interval_int:1; /* RW */
+ unsigned long bau_dashboard_int:1; /* RW */
+ unsigned long rtc_0:1; /* RW */
+ unsigned long rtc_1:1; /* RW */
+ unsigned long rtc_2:1; /* RW */
+ unsigned long rtc_3:1; /* RW */
+ unsigned long rtc_4:1; /* RW */
+ unsigned long rtc_5:1; /* RW */
+ unsigned long rtc_6:1; /* RW */
+ unsigned long rtc_7:1; /* RW */
+ unsigned long rtc_8:1; /* RW */
+ unsigned long rtc_9:1; /* RW */
+ unsigned long rtc_10:1; /* RW */
+ unsigned long rtc_11:1; /* RW */
+ unsigned long rtc_12:1; /* RW */
+ unsigned long rtc_13:1; /* RW */
+ unsigned long rtc_14:1; /* RW */
+ unsigned long rtc_15:1; /* RW */
+ unsigned long rtc_16:1; /* RW */
+ unsigned long rtc_17:1; /* RW */
+ unsigned long rtc_18:1; /* RW */
+ unsigned long rtc_19:1; /* RW */
+ unsigned long rtc_20:1; /* RW */
+ unsigned long rtc_21:1; /* RW */
+ unsigned long rtc_22:1; /* RW */
+ unsigned long rtc_23:1; /* RW */
+ unsigned long rtc_24:1; /* RW */
+ unsigned long rtc_25:1; /* RW */
+ unsigned long rtc_26:1; /* RW */
+ unsigned long rtc_27:1; /* RW */
+ unsigned long rtc_28:1; /* RW */
+ unsigned long rtc_29:1; /* RW */
+ unsigned long rtc_30:1; /* RW */
+ unsigned long rtc_31:1; /* RW */
+ unsigned long rsvd_34_63:30;
+ } s5;
+
+ /* UV4 unique struct */
+ struct uv4h_event_occurred2_s {
+ unsigned long message_accelerator_int0:1; /* RW */
+ unsigned long message_accelerator_int1:1; /* RW */
+ unsigned long message_accelerator_int2:1; /* RW */
+ unsigned long message_accelerator_int3:1; /* RW */
+ unsigned long message_accelerator_int4:1; /* RW */
+ unsigned long message_accelerator_int5:1; /* RW */
+ unsigned long message_accelerator_int6:1; /* RW */
+ unsigned long message_accelerator_int7:1; /* RW */
+ unsigned long message_accelerator_int8:1; /* RW */
+ unsigned long message_accelerator_int9:1; /* RW */
+ unsigned long message_accelerator_int10:1; /* RW */
+ unsigned long message_accelerator_int11:1; /* RW */
+ unsigned long message_accelerator_int12:1; /* RW */
+ unsigned long message_accelerator_int13:1; /* RW */
+ unsigned long message_accelerator_int14:1; /* RW */
+ unsigned long message_accelerator_int15:1; /* RW */
+ unsigned long rtc_interval_int:1; /* RW */
+ unsigned long bau_dashboard_int:1; /* RW */
+ unsigned long rtc_0:1; /* RW */
+ unsigned long rtc_1:1; /* RW */
+ unsigned long rtc_2:1; /* RW */
+ unsigned long rtc_3:1; /* RW */
+ unsigned long rtc_4:1; /* RW */
+ unsigned long rtc_5:1; /* RW */
+ unsigned long rtc_6:1; /* RW */
+ unsigned long rtc_7:1; /* RW */
+ unsigned long rtc_8:1; /* RW */
+ unsigned long rtc_9:1; /* RW */
+ unsigned long rtc_10:1; /* RW */
+ unsigned long rtc_11:1; /* RW */
+ unsigned long rtc_12:1; /* RW */
+ unsigned long rtc_13:1; /* RW */
+ unsigned long rtc_14:1; /* RW */
+ unsigned long rtc_15:1; /* RW */
+ unsigned long rtc_16:1; /* RW */
+ unsigned long rtc_17:1; /* RW */
+ unsigned long rtc_18:1; /* RW */
+ unsigned long rtc_19:1; /* RW */
+ unsigned long rtc_20:1; /* RW */
+ unsigned long rtc_21:1; /* RW */
+ unsigned long rtc_22:1; /* RW */
+ unsigned long rtc_23:1; /* RW */
+ unsigned long rtc_24:1; /* RW */
+ unsigned long rtc_25:1; /* RW */
+ unsigned long rtc_26:1; /* RW */
+ unsigned long rtc_27:1; /* RW */
+ unsigned long rtc_28:1; /* RW */
+ unsigned long rtc_29:1; /* RW */
+ unsigned long rtc_30:1; /* RW */
+ unsigned long rtc_31:1; /* RW */
+ unsigned long rsvd_50_63:14;
+ } s4;
+
+ /* UV3 unique struct */
+ struct uv3h_event_occurred2_s {
+ unsigned long rtc_0:1; /* RW */
+ unsigned long rtc_1:1; /* RW */
+ unsigned long rtc_2:1; /* RW */
+ unsigned long rtc_3:1; /* RW */
+ unsigned long rtc_4:1; /* RW */
+ unsigned long rtc_5:1; /* RW */
+ unsigned long rtc_6:1; /* RW */
+ unsigned long rtc_7:1; /* RW */
+ unsigned long rtc_8:1; /* RW */
+ unsigned long rtc_9:1; /* RW */
+ unsigned long rtc_10:1; /* RW */
+ unsigned long rtc_11:1; /* RW */
+ unsigned long rtc_12:1; /* RW */
+ unsigned long rtc_13:1; /* RW */
+ unsigned long rtc_14:1; /* RW */
+ unsigned long rtc_15:1; /* RW */
+ unsigned long rtc_16:1; /* RW */
+ unsigned long rtc_17:1; /* RW */
+ unsigned long rtc_18:1; /* RW */
+ unsigned long rtc_19:1; /* RW */
+ unsigned long rtc_20:1; /* RW */
+ unsigned long rtc_21:1; /* RW */
+ unsigned long rtc_22:1; /* RW */
+ unsigned long rtc_23:1; /* RW */
+ unsigned long rtc_24:1; /* RW */
+ unsigned long rtc_25:1; /* RW */
+ unsigned long rtc_26:1; /* RW */
+ unsigned long rtc_27:1; /* RW */
+ unsigned long rtc_28:1; /* RW */
+ unsigned long rtc_29:1; /* RW */
+ unsigned long rtc_30:1; /* RW */
+ unsigned long rtc_31:1; /* RW */
+ unsigned long rsvd_32_63:32;
+ } s3;
+
+ /* UV2 unique struct */
+ struct uv2h_event_occurred2_s {
+ unsigned long rtc_0:1; /* RW */
+ unsigned long rtc_1:1; /* RW */
+ unsigned long rtc_2:1; /* RW */
+ unsigned long rtc_3:1; /* RW */
+ unsigned long rtc_4:1; /* RW */
+ unsigned long rtc_5:1; /* RW */
+ unsigned long rtc_6:1; /* RW */
+ unsigned long rtc_7:1; /* RW */
+ unsigned long rtc_8:1; /* RW */
+ unsigned long rtc_9:1; /* RW */
+ unsigned long rtc_10:1; /* RW */
+ unsigned long rtc_11:1; /* RW */
+ unsigned long rtc_12:1; /* RW */
+ unsigned long rtc_13:1; /* RW */
+ unsigned long rtc_14:1; /* RW */
+ unsigned long rtc_15:1; /* RW */
+ unsigned long rtc_16:1; /* RW */
+ unsigned long rtc_17:1; /* RW */
+ unsigned long rtc_18:1; /* RW */
+ unsigned long rtc_19:1; /* RW */
+ unsigned long rtc_20:1; /* RW */
+ unsigned long rtc_21:1; /* RW */
+ unsigned long rtc_22:1; /* RW */
+ unsigned long rtc_23:1; /* RW */
+ unsigned long rtc_24:1; /* RW */
+ unsigned long rtc_25:1; /* RW */
+ unsigned long rtc_26:1; /* RW */
+ unsigned long rtc_27:1; /* RW */
+ unsigned long rtc_28:1; /* RW */
+ unsigned long rtc_29:1; /* RW */
+ unsigned long rtc_30:1; /* RW */
+ unsigned long rtc_31:1; /* RW */
+ unsigned long rsvd_32_63:32;
+ } s2;
+};
+
+/* ========================================================================= */
+/* UVH_EVENT_OCCURRED2_ALIAS */
+/* ========================================================================= */
+#define UVH_EVENT_OCCURRED2_ALIAS 0x70108UL
/* ========================================================================= */
@@ -585,51 +2135,148 @@ union uvh_event_occurred0_u {
/* ========================================================================= */
#define UVH_EXTIO_INT0_BROADCAST 0x61448UL
-#define UV2H_EXTIO_INT0_BROADCAST_32 0x3f0
-#define UV3H_EXTIO_INT0_BROADCAST_32 0x3f0
-#define UV4H_EXTIO_INT0_BROADCAST_32 0x310
-#define UVH_EXTIO_INT0_BROADCAST_32 ( \
- is_uv2_hub() ? UV2H_EXTIO_INT0_BROADCAST_32 : \
- is_uv3_hub() ? UV3H_EXTIO_INT0_BROADCAST_32 : \
- /*is_uv4_hub*/ UV4H_EXTIO_INT0_BROADCAST_32)
-
+/* UVH common defines*/
#define UVH_EXTIO_INT0_BROADCAST_ENABLE_SHFT 0
#define UVH_EXTIO_INT0_BROADCAST_ENABLE_MASK 0x0000000000000001UL
union uvh_extio_int0_broadcast_u {
unsigned long v;
+
+ /* UVH common struct */
struct uvh_extio_int0_broadcast_s {
unsigned long enable:1; /* RW */
unsigned long rsvd_1_63:63;
} s;
+
+ /* UV5 unique struct */
+ struct uv5h_extio_int0_broadcast_s {
+ unsigned long enable:1; /* RW */
+ unsigned long rsvd_1_63:63;
+ } s5;
+
+ /* UV4 unique struct */
+ struct uv4h_extio_int0_broadcast_s {
+ unsigned long enable:1; /* RW */
+ unsigned long rsvd_1_63:63;
+ } s4;
+
+ /* UV3 unique struct */
+ struct uv3h_extio_int0_broadcast_s {
+ unsigned long enable:1; /* RW */
+ unsigned long rsvd_1_63:63;
+ } s3;
+
+ /* UV2 unique struct */
+ struct uv2h_extio_int0_broadcast_s {
+ unsigned long enable:1; /* RW */
+ unsigned long rsvd_1_63:63;
+ } s2;
+};
+
+/* ========================================================================= */
+/* UVH_GR0_GAM_GR_CONFIG */
+/* ========================================================================= */
+#define UVH_GR0_GAM_GR_CONFIG ( \
+ is_uv(UV5) ? 0x600028UL : \
+ is_uv(UV4) ? 0x600028UL : \
+ is_uv(UV3) ? 0xc00028UL : \
+ is_uv(UV2) ? 0xc00028UL : \
+ 0)
+
+
+
+/* UVYH common defines */
+#define UVYH_GR0_GAM_GR_CONFIG_SUBSPACE_SHFT 10
+#define UVYH_GR0_GAM_GR_CONFIG_SUBSPACE_MASK 0x0000000000000400UL
+
+/* UV4 unique defines */
+#define UV4H_GR0_GAM_GR_CONFIG_SUBSPACE_SHFT 10
+#define UV4H_GR0_GAM_GR_CONFIG_SUBSPACE_MASK 0x0000000000000400UL
+
+/* UV3 unique defines */
+#define UV3H_GR0_GAM_GR_CONFIG_M_SKT_SHFT 0
+#define UV3H_GR0_GAM_GR_CONFIG_M_SKT_MASK 0x000000000000003fUL
+#define UV3H_GR0_GAM_GR_CONFIG_SUBSPACE_SHFT 10
+#define UV3H_GR0_GAM_GR_CONFIG_SUBSPACE_MASK 0x0000000000000400UL
+
+/* UV2 unique defines */
+#define UV2H_GR0_GAM_GR_CONFIG_N_GR_SHFT 0
+#define UV2H_GR0_GAM_GR_CONFIG_N_GR_MASK 0x000000000000000fUL
+
+
+union uvyh_gr0_gam_gr_config_u {
+ unsigned long v;
+
+ /* UVYH common struct */
+ struct uvyh_gr0_gam_gr_config_s {
+ unsigned long rsvd_0_9:10;
+ unsigned long subspace:1; /* RW */
+ unsigned long rsvd_11_63:53;
+ } sy;
+
+ /* UV5 unique struct */
+ struct uv5h_gr0_gam_gr_config_s {
+ unsigned long rsvd_0_9:10;
+ unsigned long subspace:1; /* RW */
+ unsigned long rsvd_11_63:53;
+ } s5;
+
+ /* UV4 unique struct */
+ struct uv4h_gr0_gam_gr_config_s {
+ unsigned long rsvd_0_9:10;
+ unsigned long subspace:1; /* RW */
+ unsigned long rsvd_11_63:53;
+ } s4;
+
+ /* UV3 unique struct */
+ struct uv3h_gr0_gam_gr_config_s {
+ unsigned long m_skt:6; /* RW */
+ unsigned long undef_6_9:4; /* Undefined */
+ unsigned long subspace:1; /* RW */
+ unsigned long reserved:53;
+ } s3;
+
+ /* UV2 unique struct */
+ struct uv2h_gr0_gam_gr_config_s {
+ unsigned long n_gr:4; /* RW */
+ unsigned long reserved:60;
+ } s2;
};
/* ========================================================================= */
/* UVH_GR0_TLB_INT0_CONFIG */
/* ========================================================================= */
-#define UVH_GR0_TLB_INT0_CONFIG 0x61b00UL
-
-#define UVH_GR0_TLB_INT0_CONFIG_VECTOR_SHFT 0
-#define UVH_GR0_TLB_INT0_CONFIG_DM_SHFT 8
-#define UVH_GR0_TLB_INT0_CONFIG_DESTMODE_SHFT 11
-#define UVH_GR0_TLB_INT0_CONFIG_STATUS_SHFT 12
-#define UVH_GR0_TLB_INT0_CONFIG_P_SHFT 13
-#define UVH_GR0_TLB_INT0_CONFIG_T_SHFT 15
-#define UVH_GR0_TLB_INT0_CONFIG_M_SHFT 16
-#define UVH_GR0_TLB_INT0_CONFIG_APIC_ID_SHFT 32
-#define UVH_GR0_TLB_INT0_CONFIG_VECTOR_MASK 0x00000000000000ffUL
-#define UVH_GR0_TLB_INT0_CONFIG_DM_MASK 0x0000000000000700UL
-#define UVH_GR0_TLB_INT0_CONFIG_DESTMODE_MASK 0x0000000000000800UL
-#define UVH_GR0_TLB_INT0_CONFIG_STATUS_MASK 0x0000000000001000UL
-#define UVH_GR0_TLB_INT0_CONFIG_P_MASK 0x0000000000002000UL
-#define UVH_GR0_TLB_INT0_CONFIG_T_MASK 0x0000000000008000UL
-#define UVH_GR0_TLB_INT0_CONFIG_M_MASK 0x0000000000010000UL
-#define UVH_GR0_TLB_INT0_CONFIG_APIC_ID_MASK 0xffffffff00000000UL
+#define UVH_GR0_TLB_INT0_CONFIG ( \
+ is_uv(UV4) ? 0x61b00UL : \
+ is_uv(UV3) ? 0x61b00UL : \
+ is_uv(UV2) ? 0x61b00UL : \
+ uv_undefined("UVH_GR0_TLB_INT0_CONFIG"))
+
+
+/* UVXH common defines */
+#define UVXH_GR0_TLB_INT0_CONFIG_VECTOR_SHFT 0
+#define UVXH_GR0_TLB_INT0_CONFIG_VECTOR_MASK 0x00000000000000ffUL
+#define UVXH_GR0_TLB_INT0_CONFIG_DM_SHFT 8
+#define UVXH_GR0_TLB_INT0_CONFIG_DM_MASK 0x0000000000000700UL
+#define UVXH_GR0_TLB_INT0_CONFIG_DESTMODE_SHFT 11
+#define UVXH_GR0_TLB_INT0_CONFIG_DESTMODE_MASK 0x0000000000000800UL
+#define UVXH_GR0_TLB_INT0_CONFIG_STATUS_SHFT 12
+#define UVXH_GR0_TLB_INT0_CONFIG_STATUS_MASK 0x0000000000001000UL
+#define UVXH_GR0_TLB_INT0_CONFIG_P_SHFT 13
+#define UVXH_GR0_TLB_INT0_CONFIG_P_MASK 0x0000000000002000UL
+#define UVXH_GR0_TLB_INT0_CONFIG_T_SHFT 15
+#define UVXH_GR0_TLB_INT0_CONFIG_T_MASK 0x0000000000008000UL
+#define UVXH_GR0_TLB_INT0_CONFIG_M_SHFT 16
+#define UVXH_GR0_TLB_INT0_CONFIG_M_MASK 0x0000000000010000UL
+#define UVXH_GR0_TLB_INT0_CONFIG_APIC_ID_SHFT 32
+#define UVXH_GR0_TLB_INT0_CONFIG_APIC_ID_MASK 0xffffffff00000000UL
union uvh_gr0_tlb_int0_config_u {
unsigned long v;
+
+ /* UVH common struct */
struct uvh_gr0_tlb_int0_config_s {
unsigned long vector_:8; /* RW */
unsigned long dm:3; /* RW */
@@ -642,33 +2289,97 @@ union uvh_gr0_tlb_int0_config_u {
unsigned long rsvd_17_31:15;
unsigned long apic_id:32; /* RW */
} s;
+
+ /* UVXH common struct */
+ struct uvxh_gr0_tlb_int0_config_s {
+ unsigned long vector_:8; /* RW */
+ unsigned long dm:3; /* RW */
+ unsigned long destmode:1; /* RW */
+ unsigned long status:1; /* RO */
+ unsigned long p:1; /* RO */
+ unsigned long rsvd_14:1;
+ unsigned long t:1; /* RO */
+ unsigned long m:1; /* RW */
+ unsigned long rsvd_17_31:15;
+ unsigned long apic_id:32; /* RW */
+ } sx;
+
+ /* UV4 unique struct */
+ struct uv4h_gr0_tlb_int0_config_s {
+ unsigned long vector_:8; /* RW */
+ unsigned long dm:3; /* RW */
+ unsigned long destmode:1; /* RW */
+ unsigned long status:1; /* RO */
+ unsigned long p:1; /* RO */
+ unsigned long rsvd_14:1;
+ unsigned long t:1; /* RO */
+ unsigned long m:1; /* RW */
+ unsigned long rsvd_17_31:15;
+ unsigned long apic_id:32; /* RW */
+ } s4;
+
+ /* UV3 unique struct */
+ struct uv3h_gr0_tlb_int0_config_s {
+ unsigned long vector_:8; /* RW */
+ unsigned long dm:3; /* RW */
+ unsigned long destmode:1; /* RW */
+ unsigned long status:1; /* RO */
+ unsigned long p:1; /* RO */
+ unsigned long rsvd_14:1;
+ unsigned long t:1; /* RO */
+ unsigned long m:1; /* RW */
+ unsigned long rsvd_17_31:15;
+ unsigned long apic_id:32; /* RW */
+ } s3;
+
+ /* UV2 unique struct */
+ struct uv2h_gr0_tlb_int0_config_s {
+ unsigned long vector_:8; /* RW */
+ unsigned long dm:3; /* RW */
+ unsigned long destmode:1; /* RW */
+ unsigned long status:1; /* RO */
+ unsigned long p:1; /* RO */
+ unsigned long rsvd_14:1;
+ unsigned long t:1; /* RO */
+ unsigned long m:1; /* RW */
+ unsigned long rsvd_17_31:15;
+ unsigned long apic_id:32; /* RW */
+ } s2;
};
/* ========================================================================= */
/* UVH_GR0_TLB_INT1_CONFIG */
/* ========================================================================= */
-#define UVH_GR0_TLB_INT1_CONFIG 0x61b40UL
-
-#define UVH_GR0_TLB_INT1_CONFIG_VECTOR_SHFT 0
-#define UVH_GR0_TLB_INT1_CONFIG_DM_SHFT 8
-#define UVH_GR0_TLB_INT1_CONFIG_DESTMODE_SHFT 11
-#define UVH_GR0_TLB_INT1_CONFIG_STATUS_SHFT 12
-#define UVH_GR0_TLB_INT1_CONFIG_P_SHFT 13
-#define UVH_GR0_TLB_INT1_CONFIG_T_SHFT 15
-#define UVH_GR0_TLB_INT1_CONFIG_M_SHFT 16
-#define UVH_GR0_TLB_INT1_CONFIG_APIC_ID_SHFT 32
-#define UVH_GR0_TLB_INT1_CONFIG_VECTOR_MASK 0x00000000000000ffUL
-#define UVH_GR0_TLB_INT1_CONFIG_DM_MASK 0x0000000000000700UL
-#define UVH_GR0_TLB_INT1_CONFIG_DESTMODE_MASK 0x0000000000000800UL
-#define UVH_GR0_TLB_INT1_CONFIG_STATUS_MASK 0x0000000000001000UL
-#define UVH_GR0_TLB_INT1_CONFIG_P_MASK 0x0000000000002000UL
-#define UVH_GR0_TLB_INT1_CONFIG_T_MASK 0x0000000000008000UL
-#define UVH_GR0_TLB_INT1_CONFIG_M_MASK 0x0000000000010000UL
-#define UVH_GR0_TLB_INT1_CONFIG_APIC_ID_MASK 0xffffffff00000000UL
+#define UVH_GR0_TLB_INT1_CONFIG ( \
+ is_uv(UV4) ? 0x61b40UL : \
+ is_uv(UV3) ? 0x61b40UL : \
+ is_uv(UV2) ? 0x61b40UL : \
+ uv_undefined("UVH_GR0_TLB_INT1_CONFIG"))
+
+
+/* UVXH common defines */
+#define UVXH_GR0_TLB_INT1_CONFIG_VECTOR_SHFT 0
+#define UVXH_GR0_TLB_INT1_CONFIG_VECTOR_MASK 0x00000000000000ffUL
+#define UVXH_GR0_TLB_INT1_CONFIG_DM_SHFT 8
+#define UVXH_GR0_TLB_INT1_CONFIG_DM_MASK 0x0000000000000700UL
+#define UVXH_GR0_TLB_INT1_CONFIG_DESTMODE_SHFT 11
+#define UVXH_GR0_TLB_INT1_CONFIG_DESTMODE_MASK 0x0000000000000800UL
+#define UVXH_GR0_TLB_INT1_CONFIG_STATUS_SHFT 12
+#define UVXH_GR0_TLB_INT1_CONFIG_STATUS_MASK 0x0000000000001000UL
+#define UVXH_GR0_TLB_INT1_CONFIG_P_SHFT 13
+#define UVXH_GR0_TLB_INT1_CONFIG_P_MASK 0x0000000000002000UL
+#define UVXH_GR0_TLB_INT1_CONFIG_T_SHFT 15
+#define UVXH_GR0_TLB_INT1_CONFIG_T_MASK 0x0000000000008000UL
+#define UVXH_GR0_TLB_INT1_CONFIG_M_SHFT 16
+#define UVXH_GR0_TLB_INT1_CONFIG_M_MASK 0x0000000000010000UL
+#define UVXH_GR0_TLB_INT1_CONFIG_APIC_ID_SHFT 32
+#define UVXH_GR0_TLB_INT1_CONFIG_APIC_ID_MASK 0xffffffff00000000UL
union uvh_gr0_tlb_int1_config_u {
unsigned long v;
+
+ /* UVH common struct */
struct uvh_gr0_tlb_int1_config_s {
unsigned long vector_:8; /* RW */
unsigned long dm:3; /* RW */
@@ -681,382 +2392,97 @@ union uvh_gr0_tlb_int1_config_u {
unsigned long rsvd_17_31:15;
unsigned long apic_id:32; /* RW */
} s;
-};
-/* ========================================================================= */
-/* UVH_GR0_TLB_MMR_CONTROL */
-/* ========================================================================= */
-#define UV2H_GR0_TLB_MMR_CONTROL 0xc01080UL
-#define UV3H_GR0_TLB_MMR_CONTROL 0xc01080UL
-#define UV4H_GR0_TLB_MMR_CONTROL 0x601080UL
-#define UVH_GR0_TLB_MMR_CONTROL ( \
- is_uv2_hub() ? UV2H_GR0_TLB_MMR_CONTROL : \
- is_uv3_hub() ? UV3H_GR0_TLB_MMR_CONTROL : \
- /*is_uv4_hub*/ UV4H_GR0_TLB_MMR_CONTROL)
-
-#define UVH_GR0_TLB_MMR_CONTROL_INDEX_SHFT 0
-#define UVH_GR0_TLB_MMR_CONTROL_AUTO_VALID_EN_SHFT 16
-#define UVH_GR0_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_SHFT 20
-#define UVH_GR0_TLB_MMR_CONTROL_MMR_WRITE_SHFT 30
-#define UVH_GR0_TLB_MMR_CONTROL_MMR_READ_SHFT 31
-#define UVH_GR0_TLB_MMR_CONTROL_AUTO_VALID_EN_MASK 0x0000000000010000UL
-#define UVH_GR0_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_MASK 0x0000000000100000UL
-#define UVH_GR0_TLB_MMR_CONTROL_MMR_WRITE_MASK 0x0000000040000000UL
-#define UVH_GR0_TLB_MMR_CONTROL_MMR_READ_MASK 0x0000000080000000UL
-
-#define UVXH_GR0_TLB_MMR_CONTROL_INDEX_SHFT 0
-#define UVXH_GR0_TLB_MMR_CONTROL_AUTO_VALID_EN_SHFT 16
-#define UVXH_GR0_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_SHFT 20
-#define UVXH_GR0_TLB_MMR_CONTROL_MMR_WRITE_SHFT 30
-#define UVXH_GR0_TLB_MMR_CONTROL_MMR_READ_SHFT 31
-#define UVXH_GR0_TLB_MMR_CONTROL_MMR_OP_DONE_SHFT 32
-#define UVXH_GR0_TLB_MMR_CONTROL_AUTO_VALID_EN_MASK 0x0000000000010000UL
-#define UVXH_GR0_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_MASK 0x0000000000100000UL
-#define UVXH_GR0_TLB_MMR_CONTROL_MMR_WRITE_MASK 0x0000000040000000UL
-#define UVXH_GR0_TLB_MMR_CONTROL_MMR_READ_MASK 0x0000000080000000UL
-#define UVXH_GR0_TLB_MMR_CONTROL_MMR_OP_DONE_MASK 0x0000000100000000UL
-
-#define UV2H_GR0_TLB_MMR_CONTROL_INDEX_SHFT 0
-#define UV2H_GR0_TLB_MMR_CONTROL_MEM_SEL_SHFT 12
-#define UV2H_GR0_TLB_MMR_CONTROL_AUTO_VALID_EN_SHFT 16
-#define UV2H_GR0_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_SHFT 20
-#define UV2H_GR0_TLB_MMR_CONTROL_MMR_WRITE_SHFT 30
-#define UV2H_GR0_TLB_MMR_CONTROL_MMR_READ_SHFT 31
-#define UV2H_GR0_TLB_MMR_CONTROL_MMR_OP_DONE_SHFT 32
-#define UV2H_GR0_TLB_MMR_CONTROL_MMR_INJ_CON_SHFT 48
-#define UV2H_GR0_TLB_MMR_CONTROL_MMR_INJ_TLBRAM_SHFT 52
-#define UV2H_GR0_TLB_MMR_CONTROL_INDEX_MASK 0x0000000000000fffUL
-#define UV2H_GR0_TLB_MMR_CONTROL_MEM_SEL_MASK 0x0000000000003000UL
-#define UV2H_GR0_TLB_MMR_CONTROL_AUTO_VALID_EN_MASK 0x0000000000010000UL
-#define UV2H_GR0_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_MASK 0x0000000000100000UL
-#define UV2H_GR0_TLB_MMR_CONTROL_MMR_WRITE_MASK 0x0000000040000000UL
-#define UV2H_GR0_TLB_MMR_CONTROL_MMR_READ_MASK 0x0000000080000000UL
-#define UV2H_GR0_TLB_MMR_CONTROL_MMR_OP_DONE_MASK 0x0000000100000000UL
-#define UV2H_GR0_TLB_MMR_CONTROL_MMR_INJ_CON_MASK 0x0001000000000000UL
-#define UV2H_GR0_TLB_MMR_CONTROL_MMR_INJ_TLBRAM_MASK 0x0010000000000000UL
-
-#define UV3H_GR0_TLB_MMR_CONTROL_INDEX_SHFT 0
-#define UV3H_GR0_TLB_MMR_CONTROL_MEM_SEL_SHFT 12
-#define UV3H_GR0_TLB_MMR_CONTROL_AUTO_VALID_EN_SHFT 16
-#define UV3H_GR0_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_SHFT 20
-#define UV3H_GR0_TLB_MMR_CONTROL_ECC_SEL_SHFT 21
-#define UV3H_GR0_TLB_MMR_CONTROL_MMR_WRITE_SHFT 30
-#define UV3H_GR0_TLB_MMR_CONTROL_MMR_READ_SHFT 31
-#define UV3H_GR0_TLB_MMR_CONTROL_MMR_OP_DONE_SHFT 32
-#define UV3H_GR0_TLB_MMR_CONTROL_INDEX_MASK 0x0000000000000fffUL
-#define UV3H_GR0_TLB_MMR_CONTROL_MEM_SEL_MASK 0x0000000000003000UL
-#define UV3H_GR0_TLB_MMR_CONTROL_AUTO_VALID_EN_MASK 0x0000000000010000UL
-#define UV3H_GR0_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_MASK 0x0000000000100000UL
-#define UV3H_GR0_TLB_MMR_CONTROL_ECC_SEL_MASK 0x0000000000200000UL
-#define UV3H_GR0_TLB_MMR_CONTROL_MMR_WRITE_MASK 0x0000000040000000UL
-#define UV3H_GR0_TLB_MMR_CONTROL_MMR_READ_MASK 0x0000000080000000UL
-#define UV3H_GR0_TLB_MMR_CONTROL_MMR_OP_DONE_MASK 0x0000000100000000UL
-
-#define UV4H_GR0_TLB_MMR_CONTROL_INDEX_SHFT 0
-#define UV4H_GR0_TLB_MMR_CONTROL_MEM_SEL_SHFT 13
-#define UV4H_GR0_TLB_MMR_CONTROL_AUTO_VALID_EN_SHFT 16
-#define UV4H_GR0_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_SHFT 20
-#define UV4H_GR0_TLB_MMR_CONTROL_ECC_SEL_SHFT 21
-#define UV4H_GR0_TLB_MMR_CONTROL_MMR_WRITE_SHFT 30
-#define UV4H_GR0_TLB_MMR_CONTROL_MMR_READ_SHFT 31
-#define UV4H_GR0_TLB_MMR_CONTROL_MMR_OP_DONE_SHFT 32
-#define UV4H_GR0_TLB_MMR_CONTROL_PAGE_SIZE_SHFT 59
-#define UV4H_GR0_TLB_MMR_CONTROL_INDEX_MASK 0x0000000000001fffUL
-#define UV4H_GR0_TLB_MMR_CONTROL_MEM_SEL_MASK 0x0000000000006000UL
-#define UV4H_GR0_TLB_MMR_CONTROL_AUTO_VALID_EN_MASK 0x0000000000010000UL
-#define UV4H_GR0_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_MASK 0x0000000000100000UL
-#define UV4H_GR0_TLB_MMR_CONTROL_ECC_SEL_MASK 0x0000000000200000UL
-#define UV4H_GR0_TLB_MMR_CONTROL_MMR_WRITE_MASK 0x0000000040000000UL
-#define UV4H_GR0_TLB_MMR_CONTROL_MMR_READ_MASK 0x0000000080000000UL
-#define UV4H_GR0_TLB_MMR_CONTROL_MMR_OP_DONE_MASK 0x0000000100000000UL
-#define UV4H_GR0_TLB_MMR_CONTROL_PAGE_SIZE_MASK 0xf800000000000000UL
-
-#define UVH_GR0_TLB_MMR_CONTROL_INDEX_MASK ( \
- is_uv2_hub() ? UV2H_GR0_TLB_MMR_CONTROL_INDEX_MASK : \
- is_uv3_hub() ? UV3H_GR0_TLB_MMR_CONTROL_INDEX_MASK : \
- /*is_uv4_hub*/ UV4H_GR0_TLB_MMR_CONTROL_INDEX_MASK)
-#define UVH_GR0_TLB_MMR_CONTROL_MEM_SEL_MASK ( \
- is_uv2_hub() ? UV2H_GR0_TLB_MMR_CONTROL_MEM_SEL_MASK : \
- is_uv3_hub() ? UV3H_GR0_TLB_MMR_CONTROL_MEM_SEL_MASK : \
- /*is_uv4_hub*/ UV4H_GR0_TLB_MMR_CONTROL_MEM_SEL_MASK)
-#define UVH_GR0_TLB_MMR_CONTROL_MEM_SEL_SHFT ( \
- is_uv2_hub() ? UV2H_GR0_TLB_MMR_CONTROL_MEM_SEL_SHFT : \
- is_uv3_hub() ? UV3H_GR0_TLB_MMR_CONTROL_MEM_SEL_SHFT : \
- /*is_uv4_hub*/ UV4H_GR0_TLB_MMR_CONTROL_MEM_SEL_SHFT)
-
-union uvh_gr0_tlb_mmr_control_u {
- unsigned long v;
- struct uvh_gr0_tlb_mmr_control_s {
- unsigned long rsvd_0_15:16;
- unsigned long auto_valid_en:1; /* RW */
- unsigned long rsvd_17_19:3;
- unsigned long mmr_hash_index_en:1; /* RW */
- unsigned long rsvd_21_29:9;
- unsigned long mmr_write:1; /* WP */
- unsigned long mmr_read:1; /* WP */
- unsigned long rsvd_32_48:17;
- unsigned long rsvd_49_51:3;
- unsigned long rsvd_52_63:12;
- } s;
- struct uvxh_gr0_tlb_mmr_control_s {
- unsigned long rsvd_0_15:16;
- unsigned long auto_valid_en:1; /* RW */
- unsigned long rsvd_17_19:3;
- unsigned long mmr_hash_index_en:1; /* RW */
- unsigned long rsvd_21_29:9;
- unsigned long mmr_write:1; /* WP */
- unsigned long mmr_read:1; /* WP */
- unsigned long mmr_op_done:1; /* RW */
- unsigned long rsvd_33_47:15;
- unsigned long rsvd_48:1;
- unsigned long rsvd_49_51:3;
- unsigned long rsvd_52_63:12;
+ /* UVXH common struct */
+ struct uvxh_gr0_tlb_int1_config_s {
+ unsigned long vector_:8; /* RW */
+ unsigned long dm:3; /* RW */
+ unsigned long destmode:1; /* RW */
+ unsigned long status:1; /* RO */
+ unsigned long p:1; /* RO */
+ unsigned long rsvd_14:1;
+ unsigned long t:1; /* RO */
+ unsigned long m:1; /* RW */
+ unsigned long rsvd_17_31:15;
+ unsigned long apic_id:32; /* RW */
} sx;
- struct uv2h_gr0_tlb_mmr_control_s {
- unsigned long index:12; /* RW */
- unsigned long mem_sel:2; /* RW */
- unsigned long rsvd_14_15:2;
- unsigned long auto_valid_en:1; /* RW */
- unsigned long rsvd_17_19:3;
- unsigned long mmr_hash_index_en:1; /* RW */
- unsigned long rsvd_21_29:9;
- unsigned long mmr_write:1; /* WP */
- unsigned long mmr_read:1; /* WP */
- unsigned long mmr_op_done:1; /* RW */
- unsigned long rsvd_33_47:15;
- unsigned long mmr_inj_con:1; /* RW */
- unsigned long rsvd_49_51:3;
- unsigned long mmr_inj_tlbram:1; /* RW */
- unsigned long rsvd_53_63:11;
- } s2;
- struct uv3h_gr0_tlb_mmr_control_s {
- unsigned long index:12; /* RW */
- unsigned long mem_sel:2; /* RW */
- unsigned long rsvd_14_15:2;
- unsigned long auto_valid_en:1; /* RW */
- unsigned long rsvd_17_19:3;
- unsigned long mmr_hash_index_en:1; /* RW */
- unsigned long ecc_sel:1; /* RW */
- unsigned long rsvd_22_29:8;
- unsigned long mmr_write:1; /* WP */
- unsigned long mmr_read:1; /* WP */
- unsigned long mmr_op_done:1; /* RW */
- unsigned long rsvd_33_47:15;
- unsigned long undef_48:1; /* Undefined */
- unsigned long rsvd_49_51:3;
- unsigned long undef_52:1; /* Undefined */
- unsigned long rsvd_53_63:11;
- } s3;
- struct uv4h_gr0_tlb_mmr_control_s {
- unsigned long index:13; /* RW */
- unsigned long mem_sel:2; /* RW */
- unsigned long rsvd_15:1;
- unsigned long auto_valid_en:1; /* RW */
- unsigned long rsvd_17_19:3;
- unsigned long mmr_hash_index_en:1; /* RW */
- unsigned long ecc_sel:1; /* RW */
- unsigned long rsvd_22_29:8;
- unsigned long mmr_write:1; /* WP */
- unsigned long mmr_read:1; /* WP */
- unsigned long mmr_op_done:1; /* RW */
- unsigned long rsvd_33_47:15;
- unsigned long undef_48:1; /* Undefined */
- unsigned long rsvd_49_51:3;
- unsigned long rsvd_52_58:7;
- unsigned long page_size:5; /* RW */
+
+ /* UV4 unique struct */
+ struct uv4h_gr0_tlb_int1_config_s {
+ unsigned long vector_:8; /* RW */
+ unsigned long dm:3; /* RW */
+ unsigned long destmode:1; /* RW */
+ unsigned long status:1; /* RO */
+ unsigned long p:1; /* RO */
+ unsigned long rsvd_14:1;
+ unsigned long t:1; /* RO */
+ unsigned long m:1; /* RW */
+ unsigned long rsvd_17_31:15;
+ unsigned long apic_id:32; /* RW */
} s4;
-};
-/* ========================================================================= */
-/* UVH_GR0_TLB_MMR_READ_DATA_HI */
-/* ========================================================================= */
-#define UV2H_GR0_TLB_MMR_READ_DATA_HI 0xc010a0UL
-#define UV3H_GR0_TLB_MMR_READ_DATA_HI 0xc010a0UL
-#define UV4H_GR0_TLB_MMR_READ_DATA_HI 0x6010a0UL
-#define UVH_GR0_TLB_MMR_READ_DATA_HI ( \
- is_uv2_hub() ? UV2H_GR0_TLB_MMR_READ_DATA_HI : \
- is_uv3_hub() ? UV3H_GR0_TLB_MMR_READ_DATA_HI : \
- /*is_uv4_hub*/ UV4H_GR0_TLB_MMR_READ_DATA_HI)
-
-#define UVH_GR0_TLB_MMR_READ_DATA_HI_PFN_SHFT 0
-
-#define UVXH_GR0_TLB_MMR_READ_DATA_HI_PFN_SHFT 0
-
-#define UV2H_GR0_TLB_MMR_READ_DATA_HI_PFN_SHFT 0
-#define UV2H_GR0_TLB_MMR_READ_DATA_HI_GAA_SHFT 41
-#define UV2H_GR0_TLB_MMR_READ_DATA_HI_DIRTY_SHFT 43
-#define UV2H_GR0_TLB_MMR_READ_DATA_HI_LARGER_SHFT 44
-#define UV2H_GR0_TLB_MMR_READ_DATA_HI_PFN_MASK 0x000001ffffffffffUL
-#define UV2H_GR0_TLB_MMR_READ_DATA_HI_GAA_MASK 0x0000060000000000UL
-#define UV2H_GR0_TLB_MMR_READ_DATA_HI_DIRTY_MASK 0x0000080000000000UL
-#define UV2H_GR0_TLB_MMR_READ_DATA_HI_LARGER_MASK 0x0000100000000000UL
-
-#define UV3H_GR0_TLB_MMR_READ_DATA_HI_PFN_SHFT 0
-#define UV3H_GR0_TLB_MMR_READ_DATA_HI_GAA_SHFT 41
-#define UV3H_GR0_TLB_MMR_READ_DATA_HI_DIRTY_SHFT 43
-#define UV3H_GR0_TLB_MMR_READ_DATA_HI_LARGER_SHFT 44
-#define UV3H_GR0_TLB_MMR_READ_DATA_HI_AA_EXT_SHFT 45
-#define UV3H_GR0_TLB_MMR_READ_DATA_HI_WAY_ECC_SHFT 55
-#define UV3H_GR0_TLB_MMR_READ_DATA_HI_PFN_MASK 0x000001ffffffffffUL
-#define UV3H_GR0_TLB_MMR_READ_DATA_HI_GAA_MASK 0x0000060000000000UL
-#define UV3H_GR0_TLB_MMR_READ_DATA_HI_DIRTY_MASK 0x0000080000000000UL
-#define UV3H_GR0_TLB_MMR_READ_DATA_HI_LARGER_MASK 0x0000100000000000UL
-#define UV3H_GR0_TLB_MMR_READ_DATA_HI_AA_EXT_MASK 0x0000200000000000UL
-#define UV3H_GR0_TLB_MMR_READ_DATA_HI_WAY_ECC_MASK 0xff80000000000000UL
-
-#define UV4H_GR0_TLB_MMR_READ_DATA_HI_PFN_SHFT 0
-#define UV4H_GR0_TLB_MMR_READ_DATA_HI_PNID_SHFT 34
-#define UV4H_GR0_TLB_MMR_READ_DATA_HI_GAA_SHFT 49
-#define UV4H_GR0_TLB_MMR_READ_DATA_HI_DIRTY_SHFT 51
-#define UV4H_GR0_TLB_MMR_READ_DATA_HI_LARGER_SHFT 52
-#define UV4H_GR0_TLB_MMR_READ_DATA_HI_AA_EXT_SHFT 53
-#define UV4H_GR0_TLB_MMR_READ_DATA_HI_WAY_ECC_SHFT 55
-#define UV4H_GR0_TLB_MMR_READ_DATA_HI_PFN_MASK 0x00000003ffffffffUL
-#define UV4H_GR0_TLB_MMR_READ_DATA_HI_PNID_MASK 0x0001fffc00000000UL
-#define UV4H_GR0_TLB_MMR_READ_DATA_HI_GAA_MASK 0x0006000000000000UL
-#define UV4H_GR0_TLB_MMR_READ_DATA_HI_DIRTY_MASK 0x0008000000000000UL
-#define UV4H_GR0_TLB_MMR_READ_DATA_HI_LARGER_MASK 0x0010000000000000UL
-#define UV4H_GR0_TLB_MMR_READ_DATA_HI_AA_EXT_MASK 0x0020000000000000UL
-#define UV4H_GR0_TLB_MMR_READ_DATA_HI_WAY_ECC_MASK 0xff80000000000000UL
-
-
-union uvh_gr0_tlb_mmr_read_data_hi_u {
- unsigned long v;
- struct uv2h_gr0_tlb_mmr_read_data_hi_s {
- unsigned long pfn:41; /* RO */
- unsigned long gaa:2; /* RO */
- unsigned long dirty:1; /* RO */
- unsigned long larger:1; /* RO */
- unsigned long rsvd_45_63:19;
- } s2;
- struct uv3h_gr0_tlb_mmr_read_data_hi_s {
- unsigned long pfn:41; /* RO */
- unsigned long gaa:2; /* RO */
- unsigned long dirty:1; /* RO */
- unsigned long larger:1; /* RO */
- unsigned long aa_ext:1; /* RO */
- unsigned long undef_46_54:9; /* Undefined */
- unsigned long way_ecc:9; /* RO */
+ /* UV3 unique struct */
+ struct uv3h_gr0_tlb_int1_config_s {
+ unsigned long vector_:8; /* RW */
+ unsigned long dm:3; /* RW */
+ unsigned long destmode:1; /* RW */
+ unsigned long status:1; /* RO */
+ unsigned long p:1; /* RO */
+ unsigned long rsvd_14:1;
+ unsigned long t:1; /* RO */
+ unsigned long m:1; /* RW */
+ unsigned long rsvd_17_31:15;
+ unsigned long apic_id:32; /* RW */
} s3;
- struct uv4h_gr0_tlb_mmr_read_data_hi_s {
- unsigned long pfn:34; /* RO */
- unsigned long pnid:15; /* RO */
- unsigned long gaa:2; /* RO */
- unsigned long dirty:1; /* RO */
- unsigned long larger:1; /* RO */
- unsigned long aa_ext:1; /* RO */
- unsigned long undef_54:1; /* Undefined */
- unsigned long way_ecc:9; /* RO */
- } s4;
-};
-/* ========================================================================= */
-/* UVH_GR0_TLB_MMR_READ_DATA_LO */
-/* ========================================================================= */
-#define UV2H_GR0_TLB_MMR_READ_DATA_LO 0xc010a8UL
-#define UV3H_GR0_TLB_MMR_READ_DATA_LO 0xc010a8UL
-#define UV4H_GR0_TLB_MMR_READ_DATA_LO 0x6010a8UL
-#define UVH_GR0_TLB_MMR_READ_DATA_LO ( \
- is_uv2_hub() ? UV2H_GR0_TLB_MMR_READ_DATA_LO : \
- is_uv3_hub() ? UV3H_GR0_TLB_MMR_READ_DATA_LO : \
- /*is_uv4_hub*/ UV4H_GR0_TLB_MMR_READ_DATA_LO)
-
-#define UVH_GR0_TLB_MMR_READ_DATA_LO_VPN_SHFT 0
-#define UVH_GR0_TLB_MMR_READ_DATA_LO_ASID_SHFT 39
-#define UVH_GR0_TLB_MMR_READ_DATA_LO_VALID_SHFT 63
-#define UVH_GR0_TLB_MMR_READ_DATA_LO_VPN_MASK 0x0000007fffffffffUL
-#define UVH_GR0_TLB_MMR_READ_DATA_LO_ASID_MASK 0x7fffff8000000000UL
-#define UVH_GR0_TLB_MMR_READ_DATA_LO_VALID_MASK 0x8000000000000000UL
-
-#define UVXH_GR0_TLB_MMR_READ_DATA_LO_VPN_SHFT 0
-#define UVXH_GR0_TLB_MMR_READ_DATA_LO_ASID_SHFT 39
-#define UVXH_GR0_TLB_MMR_READ_DATA_LO_VALID_SHFT 63
-#define UVXH_GR0_TLB_MMR_READ_DATA_LO_VPN_MASK 0x0000007fffffffffUL
-#define UVXH_GR0_TLB_MMR_READ_DATA_LO_ASID_MASK 0x7fffff8000000000UL
-#define UVXH_GR0_TLB_MMR_READ_DATA_LO_VALID_MASK 0x8000000000000000UL
-
-#define UV2H_GR0_TLB_MMR_READ_DATA_LO_VPN_SHFT 0
-#define UV2H_GR0_TLB_MMR_READ_DATA_LO_ASID_SHFT 39
-#define UV2H_GR0_TLB_MMR_READ_DATA_LO_VALID_SHFT 63
-#define UV2H_GR0_TLB_MMR_READ_DATA_LO_VPN_MASK 0x0000007fffffffffUL
-#define UV2H_GR0_TLB_MMR_READ_DATA_LO_ASID_MASK 0x7fffff8000000000UL
-#define UV2H_GR0_TLB_MMR_READ_DATA_LO_VALID_MASK 0x8000000000000000UL
-
-#define UV3H_GR0_TLB_MMR_READ_DATA_LO_VPN_SHFT 0
-#define UV3H_GR0_TLB_MMR_READ_DATA_LO_ASID_SHFT 39
-#define UV3H_GR0_TLB_MMR_READ_DATA_LO_VALID_SHFT 63
-#define UV3H_GR0_TLB_MMR_READ_DATA_LO_VPN_MASK 0x0000007fffffffffUL
-#define UV3H_GR0_TLB_MMR_READ_DATA_LO_ASID_MASK 0x7fffff8000000000UL
-#define UV3H_GR0_TLB_MMR_READ_DATA_LO_VALID_MASK 0x8000000000000000UL
-
-#define UV4H_GR0_TLB_MMR_READ_DATA_LO_VPN_SHFT 0
-#define UV4H_GR0_TLB_MMR_READ_DATA_LO_ASID_SHFT 39
-#define UV4H_GR0_TLB_MMR_READ_DATA_LO_VALID_SHFT 63
-#define UV4H_GR0_TLB_MMR_READ_DATA_LO_VPN_MASK 0x0000007fffffffffUL
-#define UV4H_GR0_TLB_MMR_READ_DATA_LO_ASID_MASK 0x7fffff8000000000UL
-#define UV4H_GR0_TLB_MMR_READ_DATA_LO_VALID_MASK 0x8000000000000000UL
-
-
-union uvh_gr0_tlb_mmr_read_data_lo_u {
- unsigned long v;
- struct uvh_gr0_tlb_mmr_read_data_lo_s {
- unsigned long vpn:39; /* RO */
- unsigned long asid:24; /* RO */
- unsigned long valid:1; /* RO */
- } s;
- struct uvxh_gr0_tlb_mmr_read_data_lo_s {
- unsigned long vpn:39; /* RO */
- unsigned long asid:24; /* RO */
- unsigned long valid:1; /* RO */
- } sx;
- struct uv2h_gr0_tlb_mmr_read_data_lo_s {
- unsigned long vpn:39; /* RO */
- unsigned long asid:24; /* RO */
- unsigned long valid:1; /* RO */
+ /* UV2 unique struct */
+ struct uv2h_gr0_tlb_int1_config_s {
+ unsigned long vector_:8; /* RW */
+ unsigned long dm:3; /* RW */
+ unsigned long destmode:1; /* RW */
+ unsigned long status:1; /* RO */
+ unsigned long p:1; /* RO */
+ unsigned long rsvd_14:1;
+ unsigned long t:1; /* RO */
+ unsigned long m:1; /* RW */
+ unsigned long rsvd_17_31:15;
+ unsigned long apic_id:32; /* RW */
} s2;
- struct uv3h_gr0_tlb_mmr_read_data_lo_s {
- unsigned long vpn:39; /* RO */
- unsigned long asid:24; /* RO */
- unsigned long valid:1; /* RO */
- } s3;
- struct uv4h_gr0_tlb_mmr_read_data_lo_s {
- unsigned long vpn:39; /* RO */
- unsigned long asid:24; /* RO */
- unsigned long valid:1; /* RO */
- } s4;
};
/* ========================================================================= */
/* UVH_GR1_TLB_INT0_CONFIG */
/* ========================================================================= */
-#define UV2H_GR1_TLB_INT0_CONFIG 0x61f00UL
-#define UV3H_GR1_TLB_INT0_CONFIG 0x61f00UL
-#define UV4H_GR1_TLB_INT0_CONFIG 0x62100UL
#define UVH_GR1_TLB_INT0_CONFIG ( \
- is_uv2_hub() ? UV2H_GR1_TLB_INT0_CONFIG : \
- is_uv3_hub() ? UV3H_GR1_TLB_INT0_CONFIG : \
- /*is_uv4_hub*/ UV4H_GR1_TLB_INT0_CONFIG)
-
-#define UVH_GR1_TLB_INT0_CONFIG_VECTOR_SHFT 0
-#define UVH_GR1_TLB_INT0_CONFIG_DM_SHFT 8
-#define UVH_GR1_TLB_INT0_CONFIG_DESTMODE_SHFT 11
-#define UVH_GR1_TLB_INT0_CONFIG_STATUS_SHFT 12
-#define UVH_GR1_TLB_INT0_CONFIG_P_SHFT 13
-#define UVH_GR1_TLB_INT0_CONFIG_T_SHFT 15
-#define UVH_GR1_TLB_INT0_CONFIG_M_SHFT 16
-#define UVH_GR1_TLB_INT0_CONFIG_APIC_ID_SHFT 32
-#define UVH_GR1_TLB_INT0_CONFIG_VECTOR_MASK 0x00000000000000ffUL
-#define UVH_GR1_TLB_INT0_CONFIG_DM_MASK 0x0000000000000700UL
-#define UVH_GR1_TLB_INT0_CONFIG_DESTMODE_MASK 0x0000000000000800UL
-#define UVH_GR1_TLB_INT0_CONFIG_STATUS_MASK 0x0000000000001000UL
-#define UVH_GR1_TLB_INT0_CONFIG_P_MASK 0x0000000000002000UL
-#define UVH_GR1_TLB_INT0_CONFIG_T_MASK 0x0000000000008000UL
-#define UVH_GR1_TLB_INT0_CONFIG_M_MASK 0x0000000000010000UL
-#define UVH_GR1_TLB_INT0_CONFIG_APIC_ID_MASK 0xffffffff00000000UL
+ is_uv(UV4) ? 0x62100UL : \
+ is_uv(UV3) ? 0x61f00UL : \
+ is_uv(UV2) ? 0x61f00UL : \
+ uv_undefined("UVH_GR1_TLB_INT0_CONFIG"))
+
+
+/* UVXH common defines */
+#define UVXH_GR1_TLB_INT0_CONFIG_VECTOR_SHFT 0
+#define UVXH_GR1_TLB_INT0_CONFIG_VECTOR_MASK 0x00000000000000ffUL
+#define UVXH_GR1_TLB_INT0_CONFIG_DM_SHFT 8
+#define UVXH_GR1_TLB_INT0_CONFIG_DM_MASK 0x0000000000000700UL
+#define UVXH_GR1_TLB_INT0_CONFIG_DESTMODE_SHFT 11
+#define UVXH_GR1_TLB_INT0_CONFIG_DESTMODE_MASK 0x0000000000000800UL
+#define UVXH_GR1_TLB_INT0_CONFIG_STATUS_SHFT 12
+#define UVXH_GR1_TLB_INT0_CONFIG_STATUS_MASK 0x0000000000001000UL
+#define UVXH_GR1_TLB_INT0_CONFIG_P_SHFT 13
+#define UVXH_GR1_TLB_INT0_CONFIG_P_MASK 0x0000000000002000UL
+#define UVXH_GR1_TLB_INT0_CONFIG_T_SHFT 15
+#define UVXH_GR1_TLB_INT0_CONFIG_T_MASK 0x0000000000008000UL
+#define UVXH_GR1_TLB_INT0_CONFIG_M_SHFT 16
+#define UVXH_GR1_TLB_INT0_CONFIG_M_MASK 0x0000000000010000UL
+#define UVXH_GR1_TLB_INT0_CONFIG_APIC_ID_SHFT 32
+#define UVXH_GR1_TLB_INT0_CONFIG_APIC_ID_MASK 0xffffffff00000000UL
union uvh_gr1_tlb_int0_config_u {
unsigned long v;
+
+ /* UVH common struct */
struct uvh_gr1_tlb_int0_config_s {
unsigned long vector_:8; /* RW */
unsigned long dm:3; /* RW */
@@ -1069,39 +2495,97 @@ union uvh_gr1_tlb_int0_config_u {
unsigned long rsvd_17_31:15;
unsigned long apic_id:32; /* RW */
} s;
+
+ /* UVXH common struct */
+ struct uvxh_gr1_tlb_int0_config_s {
+ unsigned long vector_:8; /* RW */
+ unsigned long dm:3; /* RW */
+ unsigned long destmode:1; /* RW */
+ unsigned long status:1; /* RO */
+ unsigned long p:1; /* RO */
+ unsigned long rsvd_14:1;
+ unsigned long t:1; /* RO */
+ unsigned long m:1; /* RW */
+ unsigned long rsvd_17_31:15;
+ unsigned long apic_id:32; /* RW */
+ } sx;
+
+ /* UV4 unique struct */
+ struct uv4h_gr1_tlb_int0_config_s {
+ unsigned long vector_:8; /* RW */
+ unsigned long dm:3; /* RW */
+ unsigned long destmode:1; /* RW */
+ unsigned long status:1; /* RO */
+ unsigned long p:1; /* RO */
+ unsigned long rsvd_14:1;
+ unsigned long t:1; /* RO */
+ unsigned long m:1; /* RW */
+ unsigned long rsvd_17_31:15;
+ unsigned long apic_id:32; /* RW */
+ } s4;
+
+ /* UV3 unique struct */
+ struct uv3h_gr1_tlb_int0_config_s {
+ unsigned long vector_:8; /* RW */
+ unsigned long dm:3; /* RW */
+ unsigned long destmode:1; /* RW */
+ unsigned long status:1; /* RO */
+ unsigned long p:1; /* RO */
+ unsigned long rsvd_14:1;
+ unsigned long t:1; /* RO */
+ unsigned long m:1; /* RW */
+ unsigned long rsvd_17_31:15;
+ unsigned long apic_id:32; /* RW */
+ } s3;
+
+ /* UV2 unique struct */
+ struct uv2h_gr1_tlb_int0_config_s {
+ unsigned long vector_:8; /* RW */
+ unsigned long dm:3; /* RW */
+ unsigned long destmode:1; /* RW */
+ unsigned long status:1; /* RO */
+ unsigned long p:1; /* RO */
+ unsigned long rsvd_14:1;
+ unsigned long t:1; /* RO */
+ unsigned long m:1; /* RW */
+ unsigned long rsvd_17_31:15;
+ unsigned long apic_id:32; /* RW */
+ } s2;
};
/* ========================================================================= */
/* UVH_GR1_TLB_INT1_CONFIG */
/* ========================================================================= */
-#define UV2H_GR1_TLB_INT1_CONFIG 0x61f40UL
-#define UV3H_GR1_TLB_INT1_CONFIG 0x61f40UL
-#define UV4H_GR1_TLB_INT1_CONFIG 0x62140UL
#define UVH_GR1_TLB_INT1_CONFIG ( \
- is_uv2_hub() ? UV2H_GR1_TLB_INT1_CONFIG : \
- is_uv3_hub() ? UV3H_GR1_TLB_INT1_CONFIG : \
- /*is_uv4_hub*/ UV4H_GR1_TLB_INT1_CONFIG)
-
-#define UVH_GR1_TLB_INT1_CONFIG_VECTOR_SHFT 0
-#define UVH_GR1_TLB_INT1_CONFIG_DM_SHFT 8
-#define UVH_GR1_TLB_INT1_CONFIG_DESTMODE_SHFT 11
-#define UVH_GR1_TLB_INT1_CONFIG_STATUS_SHFT 12
-#define UVH_GR1_TLB_INT1_CONFIG_P_SHFT 13
-#define UVH_GR1_TLB_INT1_CONFIG_T_SHFT 15
-#define UVH_GR1_TLB_INT1_CONFIG_M_SHFT 16
-#define UVH_GR1_TLB_INT1_CONFIG_APIC_ID_SHFT 32
-#define UVH_GR1_TLB_INT1_CONFIG_VECTOR_MASK 0x00000000000000ffUL
-#define UVH_GR1_TLB_INT1_CONFIG_DM_MASK 0x0000000000000700UL
-#define UVH_GR1_TLB_INT1_CONFIG_DESTMODE_MASK 0x0000000000000800UL
-#define UVH_GR1_TLB_INT1_CONFIG_STATUS_MASK 0x0000000000001000UL
-#define UVH_GR1_TLB_INT1_CONFIG_P_MASK 0x0000000000002000UL
-#define UVH_GR1_TLB_INT1_CONFIG_T_MASK 0x0000000000008000UL
-#define UVH_GR1_TLB_INT1_CONFIG_M_MASK 0x0000000000010000UL
-#define UVH_GR1_TLB_INT1_CONFIG_APIC_ID_MASK 0xffffffff00000000UL
+ is_uv(UV4) ? 0x62140UL : \
+ is_uv(UV3) ? 0x61f40UL : \
+ is_uv(UV2) ? 0x61f40UL : \
+ uv_undefined("UVH_GR1_TLB_INT1_CONFIG"))
+
+
+/* UVXH common defines */
+#define UVXH_GR1_TLB_INT1_CONFIG_VECTOR_SHFT 0
+#define UVXH_GR1_TLB_INT1_CONFIG_VECTOR_MASK 0x00000000000000ffUL
+#define UVXH_GR1_TLB_INT1_CONFIG_DM_SHFT 8
+#define UVXH_GR1_TLB_INT1_CONFIG_DM_MASK 0x0000000000000700UL
+#define UVXH_GR1_TLB_INT1_CONFIG_DESTMODE_SHFT 11
+#define UVXH_GR1_TLB_INT1_CONFIG_DESTMODE_MASK 0x0000000000000800UL
+#define UVXH_GR1_TLB_INT1_CONFIG_STATUS_SHFT 12
+#define UVXH_GR1_TLB_INT1_CONFIG_STATUS_MASK 0x0000000000001000UL
+#define UVXH_GR1_TLB_INT1_CONFIG_P_SHFT 13
+#define UVXH_GR1_TLB_INT1_CONFIG_P_MASK 0x0000000000002000UL
+#define UVXH_GR1_TLB_INT1_CONFIG_T_SHFT 15
+#define UVXH_GR1_TLB_INT1_CONFIG_T_MASK 0x0000000000008000UL
+#define UVXH_GR1_TLB_INT1_CONFIG_M_SHFT 16
+#define UVXH_GR1_TLB_INT1_CONFIG_M_MASK 0x0000000000010000UL
+#define UVXH_GR1_TLB_INT1_CONFIG_APIC_ID_SHFT 32
+#define UVXH_GR1_TLB_INT1_CONFIG_APIC_ID_MASK 0xffffffff00000000UL
union uvh_gr1_tlb_int1_config_u {
unsigned long v;
+
+ /* UVH common struct */
struct uvh_gr1_tlb_int1_config_s {
unsigned long vector_:8; /* RW */
unsigned long dm:3; /* RW */
@@ -1114,337 +2598,62 @@ union uvh_gr1_tlb_int1_config_u {
unsigned long rsvd_17_31:15;
unsigned long apic_id:32; /* RW */
} s;
-};
-/* ========================================================================= */
-/* UVH_GR1_TLB_MMR_CONTROL */
-/* ========================================================================= */
-#define UV2H_GR1_TLB_MMR_CONTROL 0x1001080UL
-#define UV3H_GR1_TLB_MMR_CONTROL 0x1001080UL
-#define UV4H_GR1_TLB_MMR_CONTROL 0x701080UL
-#define UVH_GR1_TLB_MMR_CONTROL ( \
- is_uv2_hub() ? UV2H_GR1_TLB_MMR_CONTROL : \
- is_uv3_hub() ? UV3H_GR1_TLB_MMR_CONTROL : \
- /*is_uv4_hub*/ UV4H_GR1_TLB_MMR_CONTROL)
-
-#define UVH_GR1_TLB_MMR_CONTROL_INDEX_SHFT 0
-#define UVH_GR1_TLB_MMR_CONTROL_AUTO_VALID_EN_SHFT 16
-#define UVH_GR1_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_SHFT 20
-#define UVH_GR1_TLB_MMR_CONTROL_MMR_WRITE_SHFT 30
-#define UVH_GR1_TLB_MMR_CONTROL_MMR_READ_SHFT 31
-#define UVH_GR1_TLB_MMR_CONTROL_AUTO_VALID_EN_MASK 0x0000000000010000UL
-#define UVH_GR1_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_MASK 0x0000000000100000UL
-#define UVH_GR1_TLB_MMR_CONTROL_MMR_WRITE_MASK 0x0000000040000000UL
-#define UVH_GR1_TLB_MMR_CONTROL_MMR_READ_MASK 0x0000000080000000UL
-
-#define UVXH_GR1_TLB_MMR_CONTROL_INDEX_SHFT 0
-#define UVXH_GR1_TLB_MMR_CONTROL_AUTO_VALID_EN_SHFT 16
-#define UVXH_GR1_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_SHFT 20
-#define UVXH_GR1_TLB_MMR_CONTROL_MMR_WRITE_SHFT 30
-#define UVXH_GR1_TLB_MMR_CONTROL_MMR_READ_SHFT 31
-#define UVXH_GR1_TLB_MMR_CONTROL_MMR_OP_DONE_SHFT 32
-#define UVXH_GR1_TLB_MMR_CONTROL_AUTO_VALID_EN_MASK 0x0000000000010000UL
-#define UVXH_GR1_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_MASK 0x0000000000100000UL
-#define UVXH_GR1_TLB_MMR_CONTROL_MMR_WRITE_MASK 0x0000000040000000UL
-#define UVXH_GR1_TLB_MMR_CONTROL_MMR_READ_MASK 0x0000000080000000UL
-#define UVXH_GR1_TLB_MMR_CONTROL_MMR_OP_DONE_MASK 0x0000000100000000UL
-
-#define UV2H_GR1_TLB_MMR_CONTROL_INDEX_SHFT 0
-#define UV2H_GR1_TLB_MMR_CONTROL_MEM_SEL_SHFT 12
-#define UV2H_GR1_TLB_MMR_CONTROL_AUTO_VALID_EN_SHFT 16
-#define UV2H_GR1_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_SHFT 20
-#define UV2H_GR1_TLB_MMR_CONTROL_MMR_WRITE_SHFT 30
-#define UV2H_GR1_TLB_MMR_CONTROL_MMR_READ_SHFT 31
-#define UV2H_GR1_TLB_MMR_CONTROL_MMR_OP_DONE_SHFT 32
-#define UV2H_GR1_TLB_MMR_CONTROL_MMR_INJ_CON_SHFT 48
-#define UV2H_GR1_TLB_MMR_CONTROL_MMR_INJ_TLBRAM_SHFT 52
-#define UV2H_GR1_TLB_MMR_CONTROL_INDEX_MASK 0x0000000000000fffUL
-#define UV2H_GR1_TLB_MMR_CONTROL_MEM_SEL_MASK 0x0000000000003000UL
-#define UV2H_GR1_TLB_MMR_CONTROL_AUTO_VALID_EN_MASK 0x0000000000010000UL
-#define UV2H_GR1_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_MASK 0x0000000000100000UL
-#define UV2H_GR1_TLB_MMR_CONTROL_MMR_WRITE_MASK 0x0000000040000000UL
-#define UV2H_GR1_TLB_MMR_CONTROL_MMR_READ_MASK 0x0000000080000000UL
-#define UV2H_GR1_TLB_MMR_CONTROL_MMR_OP_DONE_MASK 0x0000000100000000UL
-#define UV2H_GR1_TLB_MMR_CONTROL_MMR_INJ_CON_MASK 0x0001000000000000UL
-#define UV2H_GR1_TLB_MMR_CONTROL_MMR_INJ_TLBRAM_MASK 0x0010000000000000UL
-
-#define UV3H_GR1_TLB_MMR_CONTROL_INDEX_SHFT 0
-#define UV3H_GR1_TLB_MMR_CONTROL_MEM_SEL_SHFT 12
-#define UV3H_GR1_TLB_MMR_CONTROL_AUTO_VALID_EN_SHFT 16
-#define UV3H_GR1_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_SHFT 20
-#define UV3H_GR1_TLB_MMR_CONTROL_ECC_SEL_SHFT 21
-#define UV3H_GR1_TLB_MMR_CONTROL_MMR_WRITE_SHFT 30
-#define UV3H_GR1_TLB_MMR_CONTROL_MMR_READ_SHFT 31
-#define UV3H_GR1_TLB_MMR_CONTROL_MMR_OP_DONE_SHFT 32
-#define UV3H_GR1_TLB_MMR_CONTROL_INDEX_MASK 0x0000000000000fffUL
-#define UV3H_GR1_TLB_MMR_CONTROL_MEM_SEL_MASK 0x0000000000003000UL
-#define UV3H_GR1_TLB_MMR_CONTROL_AUTO_VALID_EN_MASK 0x0000000000010000UL
-#define UV3H_GR1_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_MASK 0x0000000000100000UL
-#define UV3H_GR1_TLB_MMR_CONTROL_ECC_SEL_MASK 0x0000000000200000UL
-#define UV3H_GR1_TLB_MMR_CONTROL_MMR_WRITE_MASK 0x0000000040000000UL
-#define UV3H_GR1_TLB_MMR_CONTROL_MMR_READ_MASK 0x0000000080000000UL
-#define UV3H_GR1_TLB_MMR_CONTROL_MMR_OP_DONE_MASK 0x0000000100000000UL
-
-#define UV4H_GR1_TLB_MMR_CONTROL_INDEX_SHFT 0
-#define UV4H_GR1_TLB_MMR_CONTROL_MEM_SEL_SHFT 13
-#define UV4H_GR1_TLB_MMR_CONTROL_AUTO_VALID_EN_SHFT 16
-#define UV4H_GR1_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_SHFT 20
-#define UV4H_GR1_TLB_MMR_CONTROL_ECC_SEL_SHFT 21
-#define UV4H_GR1_TLB_MMR_CONTROL_MMR_WRITE_SHFT 30
-#define UV4H_GR1_TLB_MMR_CONTROL_MMR_READ_SHFT 31
-#define UV4H_GR1_TLB_MMR_CONTROL_MMR_OP_DONE_SHFT 32
-#define UV4H_GR1_TLB_MMR_CONTROL_PAGE_SIZE_SHFT 59
-#define UV4H_GR1_TLB_MMR_CONTROL_INDEX_MASK 0x0000000000001fffUL
-#define UV4H_GR1_TLB_MMR_CONTROL_MEM_SEL_MASK 0x0000000000006000UL
-#define UV4H_GR1_TLB_MMR_CONTROL_AUTO_VALID_EN_MASK 0x0000000000010000UL
-#define UV4H_GR1_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_MASK 0x0000000000100000UL
-#define UV4H_GR1_TLB_MMR_CONTROL_ECC_SEL_MASK 0x0000000000200000UL
-#define UV4H_GR1_TLB_MMR_CONTROL_MMR_WRITE_MASK 0x0000000040000000UL
-#define UV4H_GR1_TLB_MMR_CONTROL_MMR_READ_MASK 0x0000000080000000UL
-#define UV4H_GR1_TLB_MMR_CONTROL_MMR_OP_DONE_MASK 0x0000000100000000UL
-#define UV4H_GR1_TLB_MMR_CONTROL_PAGE_SIZE_MASK 0xf800000000000000UL
-
-
-union uvh_gr1_tlb_mmr_control_u {
- unsigned long v;
- struct uvh_gr1_tlb_mmr_control_s {
- unsigned long rsvd_0_15:16;
- unsigned long auto_valid_en:1; /* RW */
- unsigned long rsvd_17_19:3;
- unsigned long mmr_hash_index_en:1; /* RW */
- unsigned long rsvd_21_29:9;
- unsigned long mmr_write:1; /* WP */
- unsigned long mmr_read:1; /* WP */
- unsigned long rsvd_32_48:17;
- unsigned long rsvd_49_51:3;
- unsigned long rsvd_52_63:12;
- } s;
- struct uvxh_gr1_tlb_mmr_control_s {
- unsigned long rsvd_0_15:16;
- unsigned long auto_valid_en:1; /* RW */
- unsigned long rsvd_17_19:3;
- unsigned long mmr_hash_index_en:1; /* RW */
- unsigned long rsvd_21_29:9;
- unsigned long mmr_write:1; /* WP */
- unsigned long mmr_read:1; /* WP */
- unsigned long mmr_op_done:1; /* RW */
- unsigned long rsvd_33_47:15;
- unsigned long rsvd_48:1;
- unsigned long rsvd_49_51:3;
- unsigned long rsvd_52_63:12;
+ /* UVXH common struct */
+ struct uvxh_gr1_tlb_int1_config_s {
+ unsigned long vector_:8; /* RW */
+ unsigned long dm:3; /* RW */
+ unsigned long destmode:1; /* RW */
+ unsigned long status:1; /* RO */
+ unsigned long p:1; /* RO */
+ unsigned long rsvd_14:1;
+ unsigned long t:1; /* RO */
+ unsigned long m:1; /* RW */
+ unsigned long rsvd_17_31:15;
+ unsigned long apic_id:32; /* RW */
} sx;
- struct uv2h_gr1_tlb_mmr_control_s {
- unsigned long index:12; /* RW */
- unsigned long mem_sel:2; /* RW */
- unsigned long rsvd_14_15:2;
- unsigned long auto_valid_en:1; /* RW */
- unsigned long rsvd_17_19:3;
- unsigned long mmr_hash_index_en:1; /* RW */
- unsigned long rsvd_21_29:9;
- unsigned long mmr_write:1; /* WP */
- unsigned long mmr_read:1; /* WP */
- unsigned long mmr_op_done:1; /* RW */
- unsigned long rsvd_33_47:15;
- unsigned long mmr_inj_con:1; /* RW */
- unsigned long rsvd_49_51:3;
- unsigned long mmr_inj_tlbram:1; /* RW */
- unsigned long rsvd_53_63:11;
- } s2;
- struct uv3h_gr1_tlb_mmr_control_s {
- unsigned long index:12; /* RW */
- unsigned long mem_sel:2; /* RW */
- unsigned long rsvd_14_15:2;
- unsigned long auto_valid_en:1; /* RW */
- unsigned long rsvd_17_19:3;
- unsigned long mmr_hash_index_en:1; /* RW */
- unsigned long ecc_sel:1; /* RW */
- unsigned long rsvd_22_29:8;
- unsigned long mmr_write:1; /* WP */
- unsigned long mmr_read:1; /* WP */
- unsigned long mmr_op_done:1; /* RW */
- unsigned long rsvd_33_47:15;
- unsigned long undef_48:1; /* Undefined */
- unsigned long rsvd_49_51:3;
- unsigned long undef_52:1; /* Undefined */
- unsigned long rsvd_53_63:11;
- } s3;
- struct uv4h_gr1_tlb_mmr_control_s {
- unsigned long index:13; /* RW */
- unsigned long mem_sel:2; /* RW */
- unsigned long rsvd_15:1;
- unsigned long auto_valid_en:1; /* RW */
- unsigned long rsvd_17_19:3;
- unsigned long mmr_hash_index_en:1; /* RW */
- unsigned long ecc_sel:1; /* RW */
- unsigned long rsvd_22_29:8;
- unsigned long mmr_write:1; /* WP */
- unsigned long mmr_read:1; /* WP */
- unsigned long mmr_op_done:1; /* RW */
- unsigned long rsvd_33_47:15;
- unsigned long undef_48:1; /* Undefined */
- unsigned long rsvd_49_51:3;
- unsigned long rsvd_52_58:7;
- unsigned long page_size:5; /* RW */
+
+ /* UV4 unique struct */
+ struct uv4h_gr1_tlb_int1_config_s {
+ unsigned long vector_:8; /* RW */
+ unsigned long dm:3; /* RW */
+ unsigned long destmode:1; /* RW */
+ unsigned long status:1; /* RO */
+ unsigned long p:1; /* RO */
+ unsigned long rsvd_14:1;
+ unsigned long t:1; /* RO */
+ unsigned long m:1; /* RW */
+ unsigned long rsvd_17_31:15;
+ unsigned long apic_id:32; /* RW */
} s4;
-};
-/* ========================================================================= */
-/* UVH_GR1_TLB_MMR_READ_DATA_HI */
-/* ========================================================================= */
-#define UV2H_GR1_TLB_MMR_READ_DATA_HI 0x10010a0UL
-#define UV3H_GR1_TLB_MMR_READ_DATA_HI 0x10010a0UL
-#define UV4H_GR1_TLB_MMR_READ_DATA_HI 0x7010a0UL
-#define UVH_GR1_TLB_MMR_READ_DATA_HI ( \
- is_uv2_hub() ? UV2H_GR1_TLB_MMR_READ_DATA_HI : \
- is_uv3_hub() ? UV3H_GR1_TLB_MMR_READ_DATA_HI : \
- /*is_uv4_hub*/ UV4H_GR1_TLB_MMR_READ_DATA_HI)
-
-#define UVH_GR1_TLB_MMR_READ_DATA_HI_PFN_SHFT 0
-
-#define UVXH_GR1_TLB_MMR_READ_DATA_HI_PFN_SHFT 0
-
-#define UV2H_GR1_TLB_MMR_READ_DATA_HI_PFN_SHFT 0
-#define UV2H_GR1_TLB_MMR_READ_DATA_HI_GAA_SHFT 41
-#define UV2H_GR1_TLB_MMR_READ_DATA_HI_DIRTY_SHFT 43
-#define UV2H_GR1_TLB_MMR_READ_DATA_HI_LARGER_SHFT 44
-#define UV2H_GR1_TLB_MMR_READ_DATA_HI_PFN_MASK 0x000001ffffffffffUL
-#define UV2H_GR1_TLB_MMR_READ_DATA_HI_GAA_MASK 0x0000060000000000UL
-#define UV2H_GR1_TLB_MMR_READ_DATA_HI_DIRTY_MASK 0x0000080000000000UL
-#define UV2H_GR1_TLB_MMR_READ_DATA_HI_LARGER_MASK 0x0000100000000000UL
-
-#define UV3H_GR1_TLB_MMR_READ_DATA_HI_PFN_SHFT 0
-#define UV3H_GR1_TLB_MMR_READ_DATA_HI_GAA_SHFT 41
-#define UV3H_GR1_TLB_MMR_READ_DATA_HI_DIRTY_SHFT 43
-#define UV3H_GR1_TLB_MMR_READ_DATA_HI_LARGER_SHFT 44
-#define UV3H_GR1_TLB_MMR_READ_DATA_HI_AA_EXT_SHFT 45
-#define UV3H_GR1_TLB_MMR_READ_DATA_HI_WAY_ECC_SHFT 55
-#define UV3H_GR1_TLB_MMR_READ_DATA_HI_PFN_MASK 0x000001ffffffffffUL
-#define UV3H_GR1_TLB_MMR_READ_DATA_HI_GAA_MASK 0x0000060000000000UL
-#define UV3H_GR1_TLB_MMR_READ_DATA_HI_DIRTY_MASK 0x0000080000000000UL
-#define UV3H_GR1_TLB_MMR_READ_DATA_HI_LARGER_MASK 0x0000100000000000UL
-#define UV3H_GR1_TLB_MMR_READ_DATA_HI_AA_EXT_MASK 0x0000200000000000UL
-#define UV3H_GR1_TLB_MMR_READ_DATA_HI_WAY_ECC_MASK 0xff80000000000000UL
-
-#define UV4H_GR1_TLB_MMR_READ_DATA_HI_PFN_SHFT 0
-#define UV4H_GR1_TLB_MMR_READ_DATA_HI_PNID_SHFT 34
-#define UV4H_GR1_TLB_MMR_READ_DATA_HI_GAA_SHFT 49
-#define UV4H_GR1_TLB_MMR_READ_DATA_HI_DIRTY_SHFT 51
-#define UV4H_GR1_TLB_MMR_READ_DATA_HI_LARGER_SHFT 52
-#define UV4H_GR1_TLB_MMR_READ_DATA_HI_AA_EXT_SHFT 53
-#define UV4H_GR1_TLB_MMR_READ_DATA_HI_WAY_ECC_SHFT 55
-#define UV4H_GR1_TLB_MMR_READ_DATA_HI_PFN_MASK 0x00000003ffffffffUL
-#define UV4H_GR1_TLB_MMR_READ_DATA_HI_PNID_MASK 0x0001fffc00000000UL
-#define UV4H_GR1_TLB_MMR_READ_DATA_HI_GAA_MASK 0x0006000000000000UL
-#define UV4H_GR1_TLB_MMR_READ_DATA_HI_DIRTY_MASK 0x0008000000000000UL
-#define UV4H_GR1_TLB_MMR_READ_DATA_HI_LARGER_MASK 0x0010000000000000UL
-#define UV4H_GR1_TLB_MMR_READ_DATA_HI_AA_EXT_MASK 0x0020000000000000UL
-#define UV4H_GR1_TLB_MMR_READ_DATA_HI_WAY_ECC_MASK 0xff80000000000000UL
-
-
-union uvh_gr1_tlb_mmr_read_data_hi_u {
- unsigned long v;
- struct uv2h_gr1_tlb_mmr_read_data_hi_s {
- unsigned long pfn:41; /* RO */
- unsigned long gaa:2; /* RO */
- unsigned long dirty:1; /* RO */
- unsigned long larger:1; /* RO */
- unsigned long rsvd_45_63:19;
- } s2;
- struct uv3h_gr1_tlb_mmr_read_data_hi_s {
- unsigned long pfn:41; /* RO */
- unsigned long gaa:2; /* RO */
- unsigned long dirty:1; /* RO */
- unsigned long larger:1; /* RO */
- unsigned long aa_ext:1; /* RO */
- unsigned long undef_46_54:9; /* Undefined */
- unsigned long way_ecc:9; /* RO */
+ /* UV3 unique struct */
+ struct uv3h_gr1_tlb_int1_config_s {
+ unsigned long vector_:8; /* RW */
+ unsigned long dm:3; /* RW */
+ unsigned long destmode:1; /* RW */
+ unsigned long status:1; /* RO */
+ unsigned long p:1; /* RO */
+ unsigned long rsvd_14:1;
+ unsigned long t:1; /* RO */
+ unsigned long m:1; /* RW */
+ unsigned long rsvd_17_31:15;
+ unsigned long apic_id:32; /* RW */
} s3;
- struct uv4h_gr1_tlb_mmr_read_data_hi_s {
- unsigned long pfn:34; /* RO */
- unsigned long pnid:15; /* RO */
- unsigned long gaa:2; /* RO */
- unsigned long dirty:1; /* RO */
- unsigned long larger:1; /* RO */
- unsigned long aa_ext:1; /* RO */
- unsigned long undef_54:1; /* Undefined */
- unsigned long way_ecc:9; /* RO */
- } s4;
-};
-/* ========================================================================= */
-/* UVH_GR1_TLB_MMR_READ_DATA_LO */
-/* ========================================================================= */
-#define UV2H_GR1_TLB_MMR_READ_DATA_LO 0x10010a8UL
-#define UV3H_GR1_TLB_MMR_READ_DATA_LO 0x10010a8UL
-#define UV4H_GR1_TLB_MMR_READ_DATA_LO 0x7010a8UL
-#define UVH_GR1_TLB_MMR_READ_DATA_LO ( \
- is_uv2_hub() ? UV2H_GR1_TLB_MMR_READ_DATA_LO : \
- is_uv3_hub() ? UV3H_GR1_TLB_MMR_READ_DATA_LO : \
- /*is_uv4_hub*/ UV4H_GR1_TLB_MMR_READ_DATA_LO)
-
-#define UVH_GR1_TLB_MMR_READ_DATA_LO_VPN_SHFT 0
-#define UVH_GR1_TLB_MMR_READ_DATA_LO_ASID_SHFT 39
-#define UVH_GR1_TLB_MMR_READ_DATA_LO_VALID_SHFT 63
-#define UVH_GR1_TLB_MMR_READ_DATA_LO_VPN_MASK 0x0000007fffffffffUL
-#define UVH_GR1_TLB_MMR_READ_DATA_LO_ASID_MASK 0x7fffff8000000000UL
-#define UVH_GR1_TLB_MMR_READ_DATA_LO_VALID_MASK 0x8000000000000000UL
-
-#define UVXH_GR1_TLB_MMR_READ_DATA_LO_VPN_SHFT 0
-#define UVXH_GR1_TLB_MMR_READ_DATA_LO_ASID_SHFT 39
-#define UVXH_GR1_TLB_MMR_READ_DATA_LO_VALID_SHFT 63
-#define UVXH_GR1_TLB_MMR_READ_DATA_LO_VPN_MASK 0x0000007fffffffffUL
-#define UVXH_GR1_TLB_MMR_READ_DATA_LO_ASID_MASK 0x7fffff8000000000UL
-#define UVXH_GR1_TLB_MMR_READ_DATA_LO_VALID_MASK 0x8000000000000000UL
-
-#define UV2H_GR1_TLB_MMR_READ_DATA_LO_VPN_SHFT 0
-#define UV2H_GR1_TLB_MMR_READ_DATA_LO_ASID_SHFT 39
-#define UV2H_GR1_TLB_MMR_READ_DATA_LO_VALID_SHFT 63
-#define UV2H_GR1_TLB_MMR_READ_DATA_LO_VPN_MASK 0x0000007fffffffffUL
-#define UV2H_GR1_TLB_MMR_READ_DATA_LO_ASID_MASK 0x7fffff8000000000UL
-#define UV2H_GR1_TLB_MMR_READ_DATA_LO_VALID_MASK 0x8000000000000000UL
-
-#define UV3H_GR1_TLB_MMR_READ_DATA_LO_VPN_SHFT 0
-#define UV3H_GR1_TLB_MMR_READ_DATA_LO_ASID_SHFT 39
-#define UV3H_GR1_TLB_MMR_READ_DATA_LO_VALID_SHFT 63
-#define UV3H_GR1_TLB_MMR_READ_DATA_LO_VPN_MASK 0x0000007fffffffffUL
-#define UV3H_GR1_TLB_MMR_READ_DATA_LO_ASID_MASK 0x7fffff8000000000UL
-#define UV3H_GR1_TLB_MMR_READ_DATA_LO_VALID_MASK 0x8000000000000000UL
-
-#define UV4H_GR1_TLB_MMR_READ_DATA_LO_VPN_SHFT 0
-#define UV4H_GR1_TLB_MMR_READ_DATA_LO_ASID_SHFT 39
-#define UV4H_GR1_TLB_MMR_READ_DATA_LO_VALID_SHFT 63
-#define UV4H_GR1_TLB_MMR_READ_DATA_LO_VPN_MASK 0x0000007fffffffffUL
-#define UV4H_GR1_TLB_MMR_READ_DATA_LO_ASID_MASK 0x7fffff8000000000UL
-#define UV4H_GR1_TLB_MMR_READ_DATA_LO_VALID_MASK 0x8000000000000000UL
-
-
-union uvh_gr1_tlb_mmr_read_data_lo_u {
- unsigned long v;
- struct uvh_gr1_tlb_mmr_read_data_lo_s {
- unsigned long vpn:39; /* RO */
- unsigned long asid:24; /* RO */
- unsigned long valid:1; /* RO */
- } s;
- struct uvxh_gr1_tlb_mmr_read_data_lo_s {
- unsigned long vpn:39; /* RO */
- unsigned long asid:24; /* RO */
- unsigned long valid:1; /* RO */
- } sx;
- struct uv2h_gr1_tlb_mmr_read_data_lo_s {
- unsigned long vpn:39; /* RO */
- unsigned long asid:24; /* RO */
- unsigned long valid:1; /* RO */
+ /* UV2 unique struct */
+ struct uv2h_gr1_tlb_int1_config_s {
+ unsigned long vector_:8; /* RW */
+ unsigned long dm:3; /* RW */
+ unsigned long destmode:1; /* RW */
+ unsigned long status:1; /* RO */
+ unsigned long p:1; /* RO */
+ unsigned long rsvd_14:1;
+ unsigned long t:1; /* RO */
+ unsigned long m:1; /* RW */
+ unsigned long rsvd_17_31:15;
+ unsigned long apic_id:32; /* RW */
} s2;
- struct uv3h_gr1_tlb_mmr_read_data_lo_s {
- unsigned long vpn:39; /* RO */
- unsigned long asid:24; /* RO */
- unsigned long valid:1; /* RO */
- } s3;
- struct uv4h_gr1_tlb_mmr_read_data_lo_s {
- unsigned long vpn:39; /* RO */
- unsigned long asid:24; /* RO */
- unsigned long valid:1; /* RO */
- } s4;
};
/* ========================================================================= */
@@ -1452,52 +2661,43 @@ union uvh_gr1_tlb_mmr_read_data_lo_u {
/* ========================================================================= */
#define UVH_INT_CMPB 0x22080UL
+/* UVH common defines*/
#define UVH_INT_CMPB_REAL_TIME_CMPB_SHFT 0
#define UVH_INT_CMPB_REAL_TIME_CMPB_MASK 0x00ffffffffffffffUL
union uvh_int_cmpb_u {
unsigned long v;
+
+ /* UVH common struct */
struct uvh_int_cmpb_s {
unsigned long real_time_cmpb:56; /* RW */
unsigned long rsvd_56_63:8;
} s;
-};
-
-/* ========================================================================= */
-/* UVH_INT_CMPC */
-/* ========================================================================= */
-#define UVH_INT_CMPC 0x22100UL
-
-
-#define UVXH_INT_CMPC_REAL_TIME_CMP_2_SHFT 0
-#define UVXH_INT_CMPC_REAL_TIME_CMP_2_MASK 0x00ffffffffffffffUL
-
-union uvh_int_cmpc_u {
- unsigned long v;
- struct uvh_int_cmpc_s {
- unsigned long real_time_cmpc:56; /* RW */
+ /* UV5 unique struct */
+ struct uv5h_int_cmpb_s {
+ unsigned long real_time_cmpb:56; /* RW */
unsigned long rsvd_56_63:8;
- } s;
-};
+ } s5;
-/* ========================================================================= */
-/* UVH_INT_CMPD */
-/* ========================================================================= */
-#define UVH_INT_CMPD 0x22180UL
-
-
-#define UVXH_INT_CMPD_REAL_TIME_CMP_3_SHFT 0
-#define UVXH_INT_CMPD_REAL_TIME_CMP_3_MASK 0x00ffffffffffffffUL
+ /* UV4 unique struct */
+ struct uv4h_int_cmpb_s {
+ unsigned long real_time_cmpb:56; /* RW */
+ unsigned long rsvd_56_63:8;
+ } s4;
+ /* UV3 unique struct */
+ struct uv3h_int_cmpb_s {
+ unsigned long real_time_cmpb:56; /* RW */
+ unsigned long rsvd_56_63:8;
+ } s3;
-union uvh_int_cmpd_u {
- unsigned long v;
- struct uvh_int_cmpd_s {
- unsigned long real_time_cmpd:56; /* RW */
+ /* UV2 unique struct */
+ struct uv2h_int_cmpb_s {
+ unsigned long real_time_cmpb:56; /* RW */
unsigned long rsvd_56_63:8;
- } s;
+ } s2;
};
/* ========================================================================= */
@@ -1505,28 +2705,23 @@ union uvh_int_cmpd_u {
/* ========================================================================= */
#define UVH_IPI_INT 0x60500UL
-#define UV2H_IPI_INT_32 0x348
-#define UV3H_IPI_INT_32 0x348
-#define UV4H_IPI_INT_32 0x268
-#define UVH_IPI_INT_32 ( \
- is_uv2_hub() ? UV2H_IPI_INT_32 : \
- is_uv3_hub() ? UV3H_IPI_INT_32 : \
- /*is_uv4_hub*/ UV4H_IPI_INT_32)
-
+/* UVH common defines*/
#define UVH_IPI_INT_VECTOR_SHFT 0
-#define UVH_IPI_INT_DELIVERY_MODE_SHFT 8
-#define UVH_IPI_INT_DESTMODE_SHFT 11
-#define UVH_IPI_INT_APIC_ID_SHFT 16
-#define UVH_IPI_INT_SEND_SHFT 63
#define UVH_IPI_INT_VECTOR_MASK 0x00000000000000ffUL
+#define UVH_IPI_INT_DELIVERY_MODE_SHFT 8
#define UVH_IPI_INT_DELIVERY_MODE_MASK 0x0000000000000700UL
+#define UVH_IPI_INT_DESTMODE_SHFT 11
#define UVH_IPI_INT_DESTMODE_MASK 0x0000000000000800UL
+#define UVH_IPI_INT_APIC_ID_SHFT 16
#define UVH_IPI_INT_APIC_ID_MASK 0x0000ffffffff0000UL
+#define UVH_IPI_INT_SEND_SHFT 63
#define UVH_IPI_INT_SEND_MASK 0x8000000000000000UL
union uvh_ipi_int_u {
unsigned long v;
+
+ /* UVH common struct */
struct uvh_ipi_int_s {
unsigned long vector_:8; /* RW */
unsigned long delivery_mode:3; /* RW */
@@ -1536,903 +2731,105 @@ union uvh_ipi_int_u {
unsigned long rsvd_48_62:15;
unsigned long send:1; /* WP */
} s;
-};
-
-/* ========================================================================= */
-/* UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST */
-/* ========================================================================= */
-#define UV2H_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST 0x320050UL
-#define UV3H_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST 0x320050UL
-#define UV4H_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST uv_undefined("UV4H_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST")
-#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST ( \
- is_uv2_hub() ? UV2H_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST : \
- is_uv3_hub() ? UV3H_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST : \
- /*is_uv4_hub*/ UV4H_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST)
-#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_32 0x9c0
-
-
-#define UV2H_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_ADDRESS_SHFT 4
-#define UV2H_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_NODE_ID_SHFT 49
-#define UV2H_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_ADDRESS_MASK 0x000007fffffffff0UL
-#define UV2H_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_NODE_ID_MASK 0x7ffe000000000000UL
-
-#define UV3H_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_ADDRESS_SHFT 4
-#define UV3H_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_NODE_ID_SHFT 49
-#define UV3H_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_ADDRESS_MASK 0x000007fffffffff0UL
-#define UV3H_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_NODE_ID_MASK 0x7ffe000000000000UL
-
-
-union uvh_lb_bau_intd_payload_queue_first_u {
- unsigned long v;
- struct uv2h_lb_bau_intd_payload_queue_first_s {
- unsigned long rsvd_0_3:4;
- unsigned long address:39; /* RW */
- unsigned long rsvd_43_48:6;
- unsigned long node_id:14; /* RW */
- unsigned long rsvd_63:1;
- } s2;
- struct uv3h_lb_bau_intd_payload_queue_first_s {
- unsigned long rsvd_0_3:4;
- unsigned long address:39; /* RW */
- unsigned long rsvd_43_48:6;
- unsigned long node_id:14; /* RW */
- unsigned long rsvd_63:1;
- } s3;
-};
-
-/* ========================================================================= */
-/* UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST */
-/* ========================================================================= */
-#define UV2H_LB_BAU_INTD_PAYLOAD_QUEUE_LAST 0x320060UL
-#define UV3H_LB_BAU_INTD_PAYLOAD_QUEUE_LAST 0x320060UL
-#define UV4H_LB_BAU_INTD_PAYLOAD_QUEUE_LAST uv_undefined("UV4H_LB_BAU_INTD_PAYLOAD_QUEUE_LAST")
-#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST ( \
- is_uv2_hub() ? UV2H_LB_BAU_INTD_PAYLOAD_QUEUE_LAST : \
- is_uv3_hub() ? UV3H_LB_BAU_INTD_PAYLOAD_QUEUE_LAST : \
- /*is_uv4_hub*/ UV4H_LB_BAU_INTD_PAYLOAD_QUEUE_LAST)
-#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST_32 0x9c8
-
-
-#define UV2H_LB_BAU_INTD_PAYLOAD_QUEUE_LAST_ADDRESS_SHFT 4
-#define UV2H_LB_BAU_INTD_PAYLOAD_QUEUE_LAST_ADDRESS_MASK 0x000007fffffffff0UL
-
-#define UV3H_LB_BAU_INTD_PAYLOAD_QUEUE_LAST_ADDRESS_SHFT 4
-#define UV3H_LB_BAU_INTD_PAYLOAD_QUEUE_LAST_ADDRESS_MASK 0x000007fffffffff0UL
-
-
-union uvh_lb_bau_intd_payload_queue_last_u {
- unsigned long v;
- struct uv2h_lb_bau_intd_payload_queue_last_s {
- unsigned long rsvd_0_3:4;
- unsigned long address:39; /* RW */
- unsigned long rsvd_43_63:21;
- } s2;
- struct uv3h_lb_bau_intd_payload_queue_last_s {
- unsigned long rsvd_0_3:4;
- unsigned long address:39; /* RW */
- unsigned long rsvd_43_63:21;
- } s3;
-};
-
-/* ========================================================================= */
-/* UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL */
-/* ========================================================================= */
-#define UV2H_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL 0x320070UL
-#define UV3H_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL 0x320070UL
-#define UV4H_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL uv_undefined("UV4H_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL")
-#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL ( \
- is_uv2_hub() ? UV2H_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL : \
- is_uv3_hub() ? UV3H_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL : \
- /*is_uv4_hub*/ UV4H_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL)
-#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL_32 0x9d0
-
-
-#define UV2H_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL_ADDRESS_SHFT 4
-#define UV2H_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL_ADDRESS_MASK 0x000007fffffffff0UL
-
-#define UV3H_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL_ADDRESS_SHFT 4
-#define UV3H_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL_ADDRESS_MASK 0x000007fffffffff0UL
+ /* UV5 unique struct */
+ struct uv5h_ipi_int_s {
+ unsigned long vector_:8; /* RW */
+ unsigned long delivery_mode:3; /* RW */
+ unsigned long destmode:1; /* RW */
+ unsigned long rsvd_12_15:4;
+ unsigned long apic_id:32; /* RW */
+ unsigned long rsvd_48_62:15;
+ unsigned long send:1; /* WP */
+ } s5;
-union uvh_lb_bau_intd_payload_queue_tail_u {
- unsigned long v;
- struct uv2h_lb_bau_intd_payload_queue_tail_s {
- unsigned long rsvd_0_3:4;
- unsigned long address:39; /* RW */
- unsigned long rsvd_43_63:21;
- } s2;
- struct uv3h_lb_bau_intd_payload_queue_tail_s {
- unsigned long rsvd_0_3:4;
- unsigned long address:39; /* RW */
- unsigned long rsvd_43_63:21;
- } s3;
-};
+ /* UV4 unique struct */
+ struct uv4h_ipi_int_s {
+ unsigned long vector_:8; /* RW */
+ unsigned long delivery_mode:3; /* RW */
+ unsigned long destmode:1; /* RW */
+ unsigned long rsvd_12_15:4;
+ unsigned long apic_id:32; /* RW */
+ unsigned long rsvd_48_62:15;
+ unsigned long send:1; /* WP */
+ } s4;
-/* ========================================================================= */
-/* UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE */
-/* ========================================================================= */
-#define UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE 0x320080UL
-#define UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE 0x320080UL
-#define UV4H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE uv_undefined("UV4H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE")
-#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE ( \
- is_uv2_hub() ? UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE : \
- is_uv3_hub() ? UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE : \
- /*is_uv4_hub*/ UV4H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE)
-#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_32 0xa68
-
-
-#define UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_0_SHFT 0
-#define UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_1_SHFT 1
-#define UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_2_SHFT 2
-#define UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_3_SHFT 3
-#define UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_4_SHFT 4
-#define UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_5_SHFT 5
-#define UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_6_SHFT 6
-#define UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_7_SHFT 7
-#define UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_0_SHFT 8
-#define UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_1_SHFT 9
-#define UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_2_SHFT 10
-#define UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_3_SHFT 11
-#define UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_4_SHFT 12
-#define UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_5_SHFT 13
-#define UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_6_SHFT 14
-#define UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_7_SHFT 15
-#define UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_0_MASK 0x0000000000000001UL
-#define UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_1_MASK 0x0000000000000002UL
-#define UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_2_MASK 0x0000000000000004UL
-#define UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_3_MASK 0x0000000000000008UL
-#define UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_4_MASK 0x0000000000000010UL
-#define UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_5_MASK 0x0000000000000020UL
-#define UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_6_MASK 0x0000000000000040UL
-#define UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_7_MASK 0x0000000000000080UL
-#define UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_0_MASK 0x0000000000000100UL
-#define UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_1_MASK 0x0000000000000200UL
-#define UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_2_MASK 0x0000000000000400UL
-#define UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_3_MASK 0x0000000000000800UL
-#define UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_4_MASK 0x0000000000001000UL
-#define UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_5_MASK 0x0000000000002000UL
-#define UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_6_MASK 0x0000000000004000UL
-#define UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_7_MASK 0x0000000000008000UL
-
-#define UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_0_SHFT 0
-#define UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_1_SHFT 1
-#define UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_2_SHFT 2
-#define UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_3_SHFT 3
-#define UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_4_SHFT 4
-#define UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_5_SHFT 5
-#define UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_6_SHFT 6
-#define UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_7_SHFT 7
-#define UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_0_SHFT 8
-#define UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_1_SHFT 9
-#define UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_2_SHFT 10
-#define UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_3_SHFT 11
-#define UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_4_SHFT 12
-#define UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_5_SHFT 13
-#define UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_6_SHFT 14
-#define UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_7_SHFT 15
-#define UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_0_MASK 0x0000000000000001UL
-#define UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_1_MASK 0x0000000000000002UL
-#define UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_2_MASK 0x0000000000000004UL
-#define UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_3_MASK 0x0000000000000008UL
-#define UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_4_MASK 0x0000000000000010UL
-#define UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_5_MASK 0x0000000000000020UL
-#define UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_6_MASK 0x0000000000000040UL
-#define UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_7_MASK 0x0000000000000080UL
-#define UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_0_MASK 0x0000000000000100UL
-#define UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_1_MASK 0x0000000000000200UL
-#define UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_2_MASK 0x0000000000000400UL
-#define UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_3_MASK 0x0000000000000800UL
-#define UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_4_MASK 0x0000000000001000UL
-#define UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_5_MASK 0x0000000000002000UL
-#define UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_6_MASK 0x0000000000004000UL
-#define UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_7_MASK 0x0000000000008000UL
-
-
-union uvh_lb_bau_intd_software_acknowledge_u {
- unsigned long v;
- struct uv2h_lb_bau_intd_software_acknowledge_s {
- unsigned long pending_0:1; /* RW */
- unsigned long pending_1:1; /* RW */
- unsigned long pending_2:1; /* RW */
- unsigned long pending_3:1; /* RW */
- unsigned long pending_4:1; /* RW */
- unsigned long pending_5:1; /* RW */
- unsigned long pending_6:1; /* RW */
- unsigned long pending_7:1; /* RW */
- unsigned long timeout_0:1; /* RW */
- unsigned long timeout_1:1; /* RW */
- unsigned long timeout_2:1; /* RW */
- unsigned long timeout_3:1; /* RW */
- unsigned long timeout_4:1; /* RW */
- unsigned long timeout_5:1; /* RW */
- unsigned long timeout_6:1; /* RW */
- unsigned long timeout_7:1; /* RW */
- unsigned long rsvd_16_63:48;
- } s2;
- struct uv3h_lb_bau_intd_software_acknowledge_s {
- unsigned long pending_0:1; /* RW */
- unsigned long pending_1:1; /* RW */
- unsigned long pending_2:1; /* RW */
- unsigned long pending_3:1; /* RW */
- unsigned long pending_4:1; /* RW */
- unsigned long pending_5:1; /* RW */
- unsigned long pending_6:1; /* RW */
- unsigned long pending_7:1; /* RW */
- unsigned long timeout_0:1; /* RW */
- unsigned long timeout_1:1; /* RW */
- unsigned long timeout_2:1; /* RW */
- unsigned long timeout_3:1; /* RW */
- unsigned long timeout_4:1; /* RW */
- unsigned long timeout_5:1; /* RW */
- unsigned long timeout_6:1; /* RW */
- unsigned long timeout_7:1; /* RW */
- unsigned long rsvd_16_63:48;
+ /* UV3 unique struct */
+ struct uv3h_ipi_int_s {
+ unsigned long vector_:8; /* RW */
+ unsigned long delivery_mode:3; /* RW */
+ unsigned long destmode:1; /* RW */
+ unsigned long rsvd_12_15:4;
+ unsigned long apic_id:32; /* RW */
+ unsigned long rsvd_48_62:15;
+ unsigned long send:1; /* WP */
} s3;
-};
-/* ========================================================================= */
-/* UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS */
-/* ========================================================================= */
-#define UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS 0x320088UL
-#define UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS 0x320088UL
-#define UV4H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS uv_undefined("UV4H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS")
-#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS ( \
- is_uv2_hub() ? UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS : \
- is_uv3_hub() ? UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS : \
- /*is_uv4_hub*/ UV4H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS)
-#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS_32 0xa70
-
-
-/* ========================================================================= */
-/* UVH_LB_BAU_MISC_CONTROL */
-/* ========================================================================= */
-#define UV2H_LB_BAU_MISC_CONTROL 0x320170UL
-#define UV3H_LB_BAU_MISC_CONTROL 0x320170UL
-#define UV4H_LB_BAU_MISC_CONTROL 0xc8170UL
-#define UVH_LB_BAU_MISC_CONTROL ( \
- is_uv2_hub() ? UV2H_LB_BAU_MISC_CONTROL : \
- is_uv3_hub() ? UV3H_LB_BAU_MISC_CONTROL : \
- /*is_uv4_hub*/ UV4H_LB_BAU_MISC_CONTROL)
-
-#define UV2H_LB_BAU_MISC_CONTROL_32 0xa10
-#define UV3H_LB_BAU_MISC_CONTROL_32 0xa10
-#define UV4H_LB_BAU_MISC_CONTROL_32 0xa18
-#define UVH_LB_BAU_MISC_CONTROL_32 ( \
- is_uv2_hub() ? UV2H_LB_BAU_MISC_CONTROL_32 : \
- is_uv3_hub() ? UV3H_LB_BAU_MISC_CONTROL_32 : \
- /*is_uv4_hub*/ UV4H_LB_BAU_MISC_CONTROL_32)
-
-#define UVH_LB_BAU_MISC_CONTROL_REJECTION_DELAY_SHFT 0
-#define UVH_LB_BAU_MISC_CONTROL_APIC_MODE_SHFT 8
-#define UVH_LB_BAU_MISC_CONTROL_FORCE_BROADCAST_SHFT 9
-#define UVH_LB_BAU_MISC_CONTROL_FORCE_LOCK_NOP_SHFT 10
-#define UVH_LB_BAU_MISC_CONTROL_QPI_AGENT_PRESENCE_VECTOR_SHFT 11
-#define UVH_LB_BAU_MISC_CONTROL_DESCRIPTOR_FETCH_MODE_SHFT 14
-#define UVH_LB_BAU_MISC_CONTROL_ENABLE_DUAL_MAPPING_MODE_SHFT 20
-#define UVH_LB_BAU_MISC_CONTROL_VGA_IO_PORT_DECODE_ENABLE_SHFT 21
-#define UVH_LB_BAU_MISC_CONTROL_VGA_IO_PORT_16_BIT_DECODE_SHFT 22
-#define UVH_LB_BAU_MISC_CONTROL_SUPPRESS_DEST_REGISTRATION_SHFT 23
-#define UVH_LB_BAU_MISC_CONTROL_PROGRAMMED_INITIAL_PRIORITY_SHFT 24
-#define UVH_LB_BAU_MISC_CONTROL_USE_INCOMING_PRIORITY_SHFT 27
-#define UVH_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_SHFT 28
-#define UVH_LB_BAU_MISC_CONTROL_FUN_SHFT 48
-#define UVH_LB_BAU_MISC_CONTROL_REJECTION_DELAY_MASK 0x00000000000000ffUL
-#define UVH_LB_BAU_MISC_CONTROL_APIC_MODE_MASK 0x0000000000000100UL
-#define UVH_LB_BAU_MISC_CONTROL_FORCE_BROADCAST_MASK 0x0000000000000200UL
-#define UVH_LB_BAU_MISC_CONTROL_FORCE_LOCK_NOP_MASK 0x0000000000000400UL
-#define UVH_LB_BAU_MISC_CONTROL_QPI_AGENT_PRESENCE_VECTOR_MASK 0x0000000000003800UL
-#define UVH_LB_BAU_MISC_CONTROL_DESCRIPTOR_FETCH_MODE_MASK 0x0000000000004000UL
-#define UVH_LB_BAU_MISC_CONTROL_ENABLE_DUAL_MAPPING_MODE_MASK 0x0000000000100000UL
-#define UVH_LB_BAU_MISC_CONTROL_VGA_IO_PORT_DECODE_ENABLE_MASK 0x0000000000200000UL
-#define UVH_LB_BAU_MISC_CONTROL_VGA_IO_PORT_16_BIT_DECODE_MASK 0x0000000000400000UL
-#define UVH_LB_BAU_MISC_CONTROL_SUPPRESS_DEST_REGISTRATION_MASK 0x0000000000800000UL
-#define UVH_LB_BAU_MISC_CONTROL_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000007000000UL
-#define UVH_LB_BAU_MISC_CONTROL_USE_INCOMING_PRIORITY_MASK 0x0000000008000000UL
-#define UVH_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000010000000UL
-#define UVH_LB_BAU_MISC_CONTROL_FUN_MASK 0xffff000000000000UL
-
-#define UVXH_LB_BAU_MISC_CONTROL_REJECTION_DELAY_SHFT 0
-#define UVXH_LB_BAU_MISC_CONTROL_APIC_MODE_SHFT 8
-#define UVXH_LB_BAU_MISC_CONTROL_FORCE_BROADCAST_SHFT 9
-#define UVXH_LB_BAU_MISC_CONTROL_FORCE_LOCK_NOP_SHFT 10
-#define UVXH_LB_BAU_MISC_CONTROL_QPI_AGENT_PRESENCE_VECTOR_SHFT 11
-#define UVXH_LB_BAU_MISC_CONTROL_DESCRIPTOR_FETCH_MODE_SHFT 14
-#define UVXH_LB_BAU_MISC_CONTROL_ENABLE_DUAL_MAPPING_MODE_SHFT 20
-#define UVXH_LB_BAU_MISC_CONTROL_VGA_IO_PORT_DECODE_ENABLE_SHFT 21
-#define UVXH_LB_BAU_MISC_CONTROL_VGA_IO_PORT_16_BIT_DECODE_SHFT 22
-#define UVXH_LB_BAU_MISC_CONTROL_SUPPRESS_DEST_REGISTRATION_SHFT 23
-#define UVXH_LB_BAU_MISC_CONTROL_PROGRAMMED_INITIAL_PRIORITY_SHFT 24
-#define UVXH_LB_BAU_MISC_CONTROL_USE_INCOMING_PRIORITY_SHFT 27
-#define UVXH_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_SHFT 28
-#define UVXH_LB_BAU_MISC_CONTROL_ENABLE_AUTOMATIC_APIC_MODE_SELECTION_SHFT 29
-#define UVXH_LB_BAU_MISC_CONTROL_APIC_MODE_STATUS_SHFT 30
-#define UVXH_LB_BAU_MISC_CONTROL_SUPPRESS_INTERRUPTS_TO_SELF_SHFT 31
-#define UVXH_LB_BAU_MISC_CONTROL_ENABLE_LOCK_BASED_SYSTEM_FLUSH_SHFT 32
-#define UVXH_LB_BAU_MISC_CONTROL_ENABLE_EXTENDED_SB_STATUS_SHFT 33
-#define UVXH_LB_BAU_MISC_CONTROL_SUPPRESS_INT_PRIO_UDT_TO_SELF_SHFT 34
-#define UVXH_LB_BAU_MISC_CONTROL_USE_LEGACY_DESCRIPTOR_FORMATS_SHFT 35
-#define UVXH_LB_BAU_MISC_CONTROL_FUN_SHFT 48
-#define UVXH_LB_BAU_MISC_CONTROL_REJECTION_DELAY_MASK 0x00000000000000ffUL
-#define UVXH_LB_BAU_MISC_CONTROL_APIC_MODE_MASK 0x0000000000000100UL
-#define UVXH_LB_BAU_MISC_CONTROL_FORCE_BROADCAST_MASK 0x0000000000000200UL
-#define UVXH_LB_BAU_MISC_CONTROL_FORCE_LOCK_NOP_MASK 0x0000000000000400UL
-#define UVXH_LB_BAU_MISC_CONTROL_QPI_AGENT_PRESENCE_VECTOR_MASK 0x0000000000003800UL
-#define UVXH_LB_BAU_MISC_CONTROL_DESCRIPTOR_FETCH_MODE_MASK 0x0000000000004000UL
-#define UVXH_LB_BAU_MISC_CONTROL_ENABLE_DUAL_MAPPING_MODE_MASK 0x0000000000100000UL
-#define UVXH_LB_BAU_MISC_CONTROL_VGA_IO_PORT_DECODE_ENABLE_MASK 0x0000000000200000UL
-#define UVXH_LB_BAU_MISC_CONTROL_VGA_IO_PORT_16_BIT_DECODE_MASK 0x0000000000400000UL
-#define UVXH_LB_BAU_MISC_CONTROL_SUPPRESS_DEST_REGISTRATION_MASK 0x0000000000800000UL
-#define UVXH_LB_BAU_MISC_CONTROL_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000007000000UL
-#define UVXH_LB_BAU_MISC_CONTROL_USE_INCOMING_PRIORITY_MASK 0x0000000008000000UL
-#define UVXH_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000010000000UL
-#define UVXH_LB_BAU_MISC_CONTROL_ENABLE_AUTOMATIC_APIC_MODE_SELECTION_MASK 0x0000000020000000UL
-#define UVXH_LB_BAU_MISC_CONTROL_APIC_MODE_STATUS_MASK 0x0000000040000000UL
-#define UVXH_LB_BAU_MISC_CONTROL_SUPPRESS_INTERRUPTS_TO_SELF_MASK 0x0000000080000000UL
-#define UVXH_LB_BAU_MISC_CONTROL_ENABLE_LOCK_BASED_SYSTEM_FLUSH_MASK 0x0000000100000000UL
-#define UVXH_LB_BAU_MISC_CONTROL_ENABLE_EXTENDED_SB_STATUS_MASK 0x0000000200000000UL
-#define UVXH_LB_BAU_MISC_CONTROL_SUPPRESS_INT_PRIO_UDT_TO_SELF_MASK 0x0000000400000000UL
-#define UVXH_LB_BAU_MISC_CONTROL_USE_LEGACY_DESCRIPTOR_FORMATS_MASK 0x0000000800000000UL
-#define UVXH_LB_BAU_MISC_CONTROL_FUN_MASK 0xffff000000000000UL
-
-#define UV2H_LB_BAU_MISC_CONTROL_REJECTION_DELAY_SHFT 0
-#define UV2H_LB_BAU_MISC_CONTROL_APIC_MODE_SHFT 8
-#define UV2H_LB_BAU_MISC_CONTROL_FORCE_BROADCAST_SHFT 9
-#define UV2H_LB_BAU_MISC_CONTROL_FORCE_LOCK_NOP_SHFT 10
-#define UV2H_LB_BAU_MISC_CONTROL_QPI_AGENT_PRESENCE_VECTOR_SHFT 11
-#define UV2H_LB_BAU_MISC_CONTROL_DESCRIPTOR_FETCH_MODE_SHFT 14
-#define UV2H_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_SHFT 15
-#define UV2H_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHFT 16
-#define UV2H_LB_BAU_MISC_CONTROL_ENABLE_DUAL_MAPPING_MODE_SHFT 20
-#define UV2H_LB_BAU_MISC_CONTROL_VGA_IO_PORT_DECODE_ENABLE_SHFT 21
-#define UV2H_LB_BAU_MISC_CONTROL_VGA_IO_PORT_16_BIT_DECODE_SHFT 22
-#define UV2H_LB_BAU_MISC_CONTROL_SUPPRESS_DEST_REGISTRATION_SHFT 23
-#define UV2H_LB_BAU_MISC_CONTROL_PROGRAMMED_INITIAL_PRIORITY_SHFT 24
-#define UV2H_LB_BAU_MISC_CONTROL_USE_INCOMING_PRIORITY_SHFT 27
-#define UV2H_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_SHFT 28
-#define UV2H_LB_BAU_MISC_CONTROL_ENABLE_AUTOMATIC_APIC_MODE_SELECTION_SHFT 29
-#define UV2H_LB_BAU_MISC_CONTROL_APIC_MODE_STATUS_SHFT 30
-#define UV2H_LB_BAU_MISC_CONTROL_SUPPRESS_INTERRUPTS_TO_SELF_SHFT 31
-#define UV2H_LB_BAU_MISC_CONTROL_ENABLE_LOCK_BASED_SYSTEM_FLUSH_SHFT 32
-#define UV2H_LB_BAU_MISC_CONTROL_ENABLE_EXTENDED_SB_STATUS_SHFT 33
-#define UV2H_LB_BAU_MISC_CONTROL_SUPPRESS_INT_PRIO_UDT_TO_SELF_SHFT 34
-#define UV2H_LB_BAU_MISC_CONTROL_USE_LEGACY_DESCRIPTOR_FORMATS_SHFT 35
-#define UV2H_LB_BAU_MISC_CONTROL_FUN_SHFT 48
-#define UV2H_LB_BAU_MISC_CONTROL_REJECTION_DELAY_MASK 0x00000000000000ffUL
-#define UV2H_LB_BAU_MISC_CONTROL_APIC_MODE_MASK 0x0000000000000100UL
-#define UV2H_LB_BAU_MISC_CONTROL_FORCE_BROADCAST_MASK 0x0000000000000200UL
-#define UV2H_LB_BAU_MISC_CONTROL_FORCE_LOCK_NOP_MASK 0x0000000000000400UL
-#define UV2H_LB_BAU_MISC_CONTROL_QPI_AGENT_PRESENCE_VECTOR_MASK 0x0000000000003800UL
-#define UV2H_LB_BAU_MISC_CONTROL_DESCRIPTOR_FETCH_MODE_MASK 0x0000000000004000UL
-#define UV2H_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_MASK 0x0000000000008000UL
-#define UV2H_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_MASK 0x00000000000f0000UL
-#define UV2H_LB_BAU_MISC_CONTROL_ENABLE_DUAL_MAPPING_MODE_MASK 0x0000000000100000UL
-#define UV2H_LB_BAU_MISC_CONTROL_VGA_IO_PORT_DECODE_ENABLE_MASK 0x0000000000200000UL
-#define UV2H_LB_BAU_MISC_CONTROL_VGA_IO_PORT_16_BIT_DECODE_MASK 0x0000000000400000UL
-#define UV2H_LB_BAU_MISC_CONTROL_SUPPRESS_DEST_REGISTRATION_MASK 0x0000000000800000UL
-#define UV2H_LB_BAU_MISC_CONTROL_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000007000000UL
-#define UV2H_LB_BAU_MISC_CONTROL_USE_INCOMING_PRIORITY_MASK 0x0000000008000000UL
-#define UV2H_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000010000000UL
-#define UV2H_LB_BAU_MISC_CONTROL_ENABLE_AUTOMATIC_APIC_MODE_SELECTION_MASK 0x0000000020000000UL
-#define UV2H_LB_BAU_MISC_CONTROL_APIC_MODE_STATUS_MASK 0x0000000040000000UL
-#define UV2H_LB_BAU_MISC_CONTROL_SUPPRESS_INTERRUPTS_TO_SELF_MASK 0x0000000080000000UL
-#define UV2H_LB_BAU_MISC_CONTROL_ENABLE_LOCK_BASED_SYSTEM_FLUSH_MASK 0x0000000100000000UL
-#define UV2H_LB_BAU_MISC_CONTROL_ENABLE_EXTENDED_SB_STATUS_MASK 0x0000000200000000UL
-#define UV2H_LB_BAU_MISC_CONTROL_SUPPRESS_INT_PRIO_UDT_TO_SELF_MASK 0x0000000400000000UL
-#define UV2H_LB_BAU_MISC_CONTROL_USE_LEGACY_DESCRIPTOR_FORMATS_MASK 0x0000000800000000UL
-#define UV2H_LB_BAU_MISC_CONTROL_FUN_MASK 0xffff000000000000UL
-
-#define UV3H_LB_BAU_MISC_CONTROL_REJECTION_DELAY_SHFT 0
-#define UV3H_LB_BAU_MISC_CONTROL_APIC_MODE_SHFT 8
-#define UV3H_LB_BAU_MISC_CONTROL_FORCE_BROADCAST_SHFT 9
-#define UV3H_LB_BAU_MISC_CONTROL_FORCE_LOCK_NOP_SHFT 10
-#define UV3H_LB_BAU_MISC_CONTROL_QPI_AGENT_PRESENCE_VECTOR_SHFT 11
-#define UV3H_LB_BAU_MISC_CONTROL_DESCRIPTOR_FETCH_MODE_SHFT 14
-#define UV3H_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_SHFT 15
-#define UV3H_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHFT 16
-#define UV3H_LB_BAU_MISC_CONTROL_ENABLE_DUAL_MAPPING_MODE_SHFT 20
-#define UV3H_LB_BAU_MISC_CONTROL_VGA_IO_PORT_DECODE_ENABLE_SHFT 21
-#define UV3H_LB_BAU_MISC_CONTROL_VGA_IO_PORT_16_BIT_DECODE_SHFT 22
-#define UV3H_LB_BAU_MISC_CONTROL_SUPPRESS_DEST_REGISTRATION_SHFT 23
-#define UV3H_LB_BAU_MISC_CONTROL_PROGRAMMED_INITIAL_PRIORITY_SHFT 24
-#define UV3H_LB_BAU_MISC_CONTROL_USE_INCOMING_PRIORITY_SHFT 27
-#define UV3H_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_SHFT 28
-#define UV3H_LB_BAU_MISC_CONTROL_ENABLE_AUTOMATIC_APIC_MODE_SELECTION_SHFT 29
-#define UV3H_LB_BAU_MISC_CONTROL_APIC_MODE_STATUS_SHFT 30
-#define UV3H_LB_BAU_MISC_CONTROL_SUPPRESS_INTERRUPTS_TO_SELF_SHFT 31
-#define UV3H_LB_BAU_MISC_CONTROL_ENABLE_LOCK_BASED_SYSTEM_FLUSH_SHFT 32
-#define UV3H_LB_BAU_MISC_CONTROL_ENABLE_EXTENDED_SB_STATUS_SHFT 33
-#define UV3H_LB_BAU_MISC_CONTROL_SUPPRESS_INT_PRIO_UDT_TO_SELF_SHFT 34
-#define UV3H_LB_BAU_MISC_CONTROL_USE_LEGACY_DESCRIPTOR_FORMATS_SHFT 35
-#define UV3H_LB_BAU_MISC_CONTROL_SUPPRESS_QUIESCE_MSGS_TO_QPI_SHFT 36
-#define UV3H_LB_BAU_MISC_CONTROL_ENABLE_INTD_PREFETCH_HINT_SHFT 37
-#define UV3H_LB_BAU_MISC_CONTROL_THREAD_KILL_TIMEBASE_SHFT 38
-#define UV3H_LB_BAU_MISC_CONTROL_FUN_SHFT 48
-#define UV3H_LB_BAU_MISC_CONTROL_REJECTION_DELAY_MASK 0x00000000000000ffUL
-#define UV3H_LB_BAU_MISC_CONTROL_APIC_MODE_MASK 0x0000000000000100UL
-#define UV3H_LB_BAU_MISC_CONTROL_FORCE_BROADCAST_MASK 0x0000000000000200UL
-#define UV3H_LB_BAU_MISC_CONTROL_FORCE_LOCK_NOP_MASK 0x0000000000000400UL
-#define UV3H_LB_BAU_MISC_CONTROL_QPI_AGENT_PRESENCE_VECTOR_MASK 0x0000000000003800UL
-#define UV3H_LB_BAU_MISC_CONTROL_DESCRIPTOR_FETCH_MODE_MASK 0x0000000000004000UL
-#define UV3H_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_MASK 0x0000000000008000UL
-#define UV3H_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_MASK 0x00000000000f0000UL
-#define UV3H_LB_BAU_MISC_CONTROL_ENABLE_DUAL_MAPPING_MODE_MASK 0x0000000000100000UL
-#define UV3H_LB_BAU_MISC_CONTROL_VGA_IO_PORT_DECODE_ENABLE_MASK 0x0000000000200000UL
-#define UV3H_LB_BAU_MISC_CONTROL_VGA_IO_PORT_16_BIT_DECODE_MASK 0x0000000000400000UL
-#define UV3H_LB_BAU_MISC_CONTROL_SUPPRESS_DEST_REGISTRATION_MASK 0x0000000000800000UL
-#define UV3H_LB_BAU_MISC_CONTROL_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000007000000UL
-#define UV3H_LB_BAU_MISC_CONTROL_USE_INCOMING_PRIORITY_MASK 0x0000000008000000UL
-#define UV3H_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000010000000UL
-#define UV3H_LB_BAU_MISC_CONTROL_ENABLE_AUTOMATIC_APIC_MODE_SELECTION_MASK 0x0000000020000000UL
-#define UV3H_LB_BAU_MISC_CONTROL_APIC_MODE_STATUS_MASK 0x0000000040000000UL
-#define UV3H_LB_BAU_MISC_CONTROL_SUPPRESS_INTERRUPTS_TO_SELF_MASK 0x0000000080000000UL
-#define UV3H_LB_BAU_MISC_CONTROL_ENABLE_LOCK_BASED_SYSTEM_FLUSH_MASK 0x0000000100000000UL
-#define UV3H_LB_BAU_MISC_CONTROL_ENABLE_EXTENDED_SB_STATUS_MASK 0x0000000200000000UL
-#define UV3H_LB_BAU_MISC_CONTROL_SUPPRESS_INT_PRIO_UDT_TO_SELF_MASK 0x0000000400000000UL
-#define UV3H_LB_BAU_MISC_CONTROL_USE_LEGACY_DESCRIPTOR_FORMATS_MASK 0x0000000800000000UL
-#define UV3H_LB_BAU_MISC_CONTROL_SUPPRESS_QUIESCE_MSGS_TO_QPI_MASK 0x0000001000000000UL
-#define UV3H_LB_BAU_MISC_CONTROL_ENABLE_INTD_PREFETCH_HINT_MASK 0x0000002000000000UL
-#define UV3H_LB_BAU_MISC_CONTROL_THREAD_KILL_TIMEBASE_MASK 0x00003fc000000000UL
-#define UV3H_LB_BAU_MISC_CONTROL_FUN_MASK 0xffff000000000000UL
-
-#define UV4H_LB_BAU_MISC_CONTROL_REJECTION_DELAY_SHFT 0
-#define UV4H_LB_BAU_MISC_CONTROL_APIC_MODE_SHFT 8
-#define UV4H_LB_BAU_MISC_CONTROL_FORCE_BROADCAST_SHFT 9
-#define UV4H_LB_BAU_MISC_CONTROL_FORCE_LOCK_NOP_SHFT 10
-#define UV4H_LB_BAU_MISC_CONTROL_QPI_AGENT_PRESENCE_VECTOR_SHFT 11
-#define UV4H_LB_BAU_MISC_CONTROL_DESCRIPTOR_FETCH_MODE_SHFT 14
-#define UV4H_LB_BAU_MISC_CONTROL_RESERVED_15_19_SHFT 15
-#define UV4H_LB_BAU_MISC_CONTROL_ENABLE_DUAL_MAPPING_MODE_SHFT 20
-#define UV4H_LB_BAU_MISC_CONTROL_VGA_IO_PORT_DECODE_ENABLE_SHFT 21
-#define UV4H_LB_BAU_MISC_CONTROL_VGA_IO_PORT_16_BIT_DECODE_SHFT 22
-#define UV4H_LB_BAU_MISC_CONTROL_SUPPRESS_DEST_REGISTRATION_SHFT 23
-#define UV4H_LB_BAU_MISC_CONTROL_PROGRAMMED_INITIAL_PRIORITY_SHFT 24
-#define UV4H_LB_BAU_MISC_CONTROL_USE_INCOMING_PRIORITY_SHFT 27
-#define UV4H_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_SHFT 28
-#define UV4H_LB_BAU_MISC_CONTROL_ENABLE_AUTOMATIC_APIC_MODE_SELECTION_SHFT 29
-#define UV4H_LB_BAU_MISC_CONTROL_APIC_MODE_STATUS_SHFT 30
-#define UV4H_LB_BAU_MISC_CONTROL_SUPPRESS_INTERRUPTS_TO_SELF_SHFT 31
-#define UV4H_LB_BAU_MISC_CONTROL_ENABLE_LOCK_BASED_SYSTEM_FLUSH_SHFT 32
-#define UV4H_LB_BAU_MISC_CONTROL_ENABLE_EXTENDED_SB_STATUS_SHFT 33
-#define UV4H_LB_BAU_MISC_CONTROL_SUPPRESS_INT_PRIO_UDT_TO_SELF_SHFT 34
-#define UV4H_LB_BAU_MISC_CONTROL_USE_LEGACY_DESCRIPTOR_FORMATS_SHFT 35
-#define UV4H_LB_BAU_MISC_CONTROL_SUPPRESS_QUIESCE_MSGS_TO_QPI_SHFT 36
-#define UV4H_LB_BAU_MISC_CONTROL_RESERVED_37_SHFT 37
-#define UV4H_LB_BAU_MISC_CONTROL_THREAD_KILL_TIMEBASE_SHFT 38
-#define UV4H_LB_BAU_MISC_CONTROL_ADDRESS_INTERLEAVE_SELECT_SHFT 46
-#define UV4H_LB_BAU_MISC_CONTROL_FUN_SHFT 48
-#define UV4H_LB_BAU_MISC_CONTROL_REJECTION_DELAY_MASK 0x00000000000000ffUL
-#define UV4H_LB_BAU_MISC_CONTROL_APIC_MODE_MASK 0x0000000000000100UL
-#define UV4H_LB_BAU_MISC_CONTROL_FORCE_BROADCAST_MASK 0x0000000000000200UL
-#define UV4H_LB_BAU_MISC_CONTROL_FORCE_LOCK_NOP_MASK 0x0000000000000400UL
-#define UV4H_LB_BAU_MISC_CONTROL_QPI_AGENT_PRESENCE_VECTOR_MASK 0x0000000000003800UL
-#define UV4H_LB_BAU_MISC_CONTROL_DESCRIPTOR_FETCH_MODE_MASK 0x0000000000004000UL
-#define UV4H_LB_BAU_MISC_CONTROL_RESERVED_15_19_MASK 0x00000000000f8000UL
-#define UV4H_LB_BAU_MISC_CONTROL_ENABLE_DUAL_MAPPING_MODE_MASK 0x0000000000100000UL
-#define UV4H_LB_BAU_MISC_CONTROL_VGA_IO_PORT_DECODE_ENABLE_MASK 0x0000000000200000UL
-#define UV4H_LB_BAU_MISC_CONTROL_VGA_IO_PORT_16_BIT_DECODE_MASK 0x0000000000400000UL
-#define UV4H_LB_BAU_MISC_CONTROL_SUPPRESS_DEST_REGISTRATION_MASK 0x0000000000800000UL
-#define UV4H_LB_BAU_MISC_CONTROL_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000007000000UL
-#define UV4H_LB_BAU_MISC_CONTROL_USE_INCOMING_PRIORITY_MASK 0x0000000008000000UL
-#define UV4H_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000010000000UL
-#define UV4H_LB_BAU_MISC_CONTROL_ENABLE_AUTOMATIC_APIC_MODE_SELECTION_MASK 0x0000000020000000UL
-#define UV4H_LB_BAU_MISC_CONTROL_APIC_MODE_STATUS_MASK 0x0000000040000000UL
-#define UV4H_LB_BAU_MISC_CONTROL_SUPPRESS_INTERRUPTS_TO_SELF_MASK 0x0000000080000000UL
-#define UV4H_LB_BAU_MISC_CONTROL_ENABLE_LOCK_BASED_SYSTEM_FLUSH_MASK 0x0000000100000000UL
-#define UV4H_LB_BAU_MISC_CONTROL_ENABLE_EXTENDED_SB_STATUS_MASK 0x0000000200000000UL
-#define UV4H_LB_BAU_MISC_CONTROL_SUPPRESS_INT_PRIO_UDT_TO_SELF_MASK 0x0000000400000000UL
-#define UV4H_LB_BAU_MISC_CONTROL_USE_LEGACY_DESCRIPTOR_FORMATS_MASK 0x0000000800000000UL
-#define UV4H_LB_BAU_MISC_CONTROL_SUPPRESS_QUIESCE_MSGS_TO_QPI_MASK 0x0000001000000000UL
-#define UV4H_LB_BAU_MISC_CONTROL_RESERVED_37_MASK 0x0000002000000000UL
-#define UV4H_LB_BAU_MISC_CONTROL_THREAD_KILL_TIMEBASE_MASK 0x00003fc000000000UL
-#define UV4H_LB_BAU_MISC_CONTROL_ADDRESS_INTERLEAVE_SELECT_MASK 0x0000400000000000UL
-#define UV4H_LB_BAU_MISC_CONTROL_FUN_MASK 0xffff000000000000UL
-
-#define UV4H_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_MASK \
- uv_undefined("UV4H_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_MASK")
-#define UVH_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_MASK ( \
- is_uv2_hub() ? UV2H_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_MASK : \
- is_uv3_hub() ? UV3H_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_MASK : \
- /*is_uv4_hub*/ UV4H_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_MASK)
-#define UV4H_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_SHFT \
- uv_undefined("UV4H_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_SHFT")
-#define UVH_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_SHFT ( \
- is_uv2_hub() ? UV2H_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_SHFT : \
- is_uv3_hub() ? UV3H_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_SHFT : \
- /*is_uv4_hub*/ UV4H_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_SHFT)
-#define UV4H_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_MASK \
- uv_undefined("UV4H_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_MASK")
-#define UVH_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_MASK ( \
- is_uv2_hub() ? UV2H_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_MASK : \
- is_uv3_hub() ? UV3H_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_MASK : \
- /*is_uv4_hub*/ UV4H_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_MASK)
-#define UV4H_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHFT \
- uv_undefined("UV4H_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHFT")
-#define UVH_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHFT ( \
- is_uv2_hub() ? UV2H_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHFT : \
- is_uv3_hub() ? UV3H_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHFT : \
- /*is_uv4_hub*/ UV4H_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHFT)
-
-union uvh_lb_bau_misc_control_u {
- unsigned long v;
- struct uvh_lb_bau_misc_control_s {
- unsigned long rejection_delay:8; /* RW */
- unsigned long apic_mode:1; /* RW */
- unsigned long force_broadcast:1; /* RW */
- unsigned long force_lock_nop:1; /* RW */
- unsigned long qpi_agent_presence_vector:3; /* RW */
- unsigned long descriptor_fetch_mode:1; /* RW */
- unsigned long rsvd_15_19:5;
- unsigned long enable_dual_mapping_mode:1; /* RW */
- unsigned long vga_io_port_decode_enable:1; /* RW */
- unsigned long vga_io_port_16_bit_decode:1; /* RW */
- unsigned long suppress_dest_registration:1; /* RW */
- unsigned long programmed_initial_priority:3; /* RW */
- unsigned long use_incoming_priority:1; /* RW */
- unsigned long enable_programmed_initial_priority:1;/* RW */
- unsigned long rsvd_29_47:19;
- unsigned long fun:16; /* RW */
- } s;
- struct uvxh_lb_bau_misc_control_s {
- unsigned long rejection_delay:8; /* RW */
- unsigned long apic_mode:1; /* RW */
- unsigned long force_broadcast:1; /* RW */
- unsigned long force_lock_nop:1; /* RW */
- unsigned long qpi_agent_presence_vector:3; /* RW */
- unsigned long descriptor_fetch_mode:1; /* RW */
- unsigned long rsvd_15_19:5;
- unsigned long enable_dual_mapping_mode:1; /* RW */
- unsigned long vga_io_port_decode_enable:1; /* RW */
- unsigned long vga_io_port_16_bit_decode:1; /* RW */
- unsigned long suppress_dest_registration:1; /* RW */
- unsigned long programmed_initial_priority:3; /* RW */
- unsigned long use_incoming_priority:1; /* RW */
- unsigned long enable_programmed_initial_priority:1;/* RW */
- unsigned long enable_automatic_apic_mode_selection:1;/* RW */
- unsigned long apic_mode_status:1; /* RO */
- unsigned long suppress_interrupts_to_self:1; /* RW */
- unsigned long enable_lock_based_system_flush:1;/* RW */
- unsigned long enable_extended_sb_status:1; /* RW */
- unsigned long suppress_int_prio_udt_to_self:1;/* RW */
- unsigned long use_legacy_descriptor_formats:1;/* RW */
- unsigned long rsvd_36_47:12;
- unsigned long fun:16; /* RW */
- } sx;
- struct uv2h_lb_bau_misc_control_s {
- unsigned long rejection_delay:8; /* RW */
- unsigned long apic_mode:1; /* RW */
- unsigned long force_broadcast:1; /* RW */
- unsigned long force_lock_nop:1; /* RW */
- unsigned long qpi_agent_presence_vector:3; /* RW */
- unsigned long descriptor_fetch_mode:1; /* RW */
- unsigned long enable_intd_soft_ack_mode:1; /* RW */
- unsigned long intd_soft_ack_timeout_period:4; /* RW */
- unsigned long enable_dual_mapping_mode:1; /* RW */
- unsigned long vga_io_port_decode_enable:1; /* RW */
- unsigned long vga_io_port_16_bit_decode:1; /* RW */
- unsigned long suppress_dest_registration:1; /* RW */
- unsigned long programmed_initial_priority:3; /* RW */
- unsigned long use_incoming_priority:1; /* RW */
- unsigned long enable_programmed_initial_priority:1;/* RW */
- unsigned long enable_automatic_apic_mode_selection:1;/* RW */
- unsigned long apic_mode_status:1; /* RO */
- unsigned long suppress_interrupts_to_self:1; /* RW */
- unsigned long enable_lock_based_system_flush:1;/* RW */
- unsigned long enable_extended_sb_status:1; /* RW */
- unsigned long suppress_int_prio_udt_to_self:1;/* RW */
- unsigned long use_legacy_descriptor_formats:1;/* RW */
- unsigned long rsvd_36_47:12;
- unsigned long fun:16; /* RW */
+ /* UV2 unique struct */
+ struct uv2h_ipi_int_s {
+ unsigned long vector_:8; /* RW */
+ unsigned long delivery_mode:3; /* RW */
+ unsigned long destmode:1; /* RW */
+ unsigned long rsvd_12_15:4;
+ unsigned long apic_id:32; /* RW */
+ unsigned long rsvd_48_62:15;
+ unsigned long send:1; /* WP */
} s2;
- struct uv3h_lb_bau_misc_control_s {
- unsigned long rejection_delay:8; /* RW */
- unsigned long apic_mode:1; /* RW */
- unsigned long force_broadcast:1; /* RW */
- unsigned long force_lock_nop:1; /* RW */
- unsigned long qpi_agent_presence_vector:3; /* RW */
- unsigned long descriptor_fetch_mode:1; /* RW */
- unsigned long enable_intd_soft_ack_mode:1; /* RW */
- unsigned long intd_soft_ack_timeout_period:4; /* RW */
- unsigned long enable_dual_mapping_mode:1; /* RW */
- unsigned long vga_io_port_decode_enable:1; /* RW */
- unsigned long vga_io_port_16_bit_decode:1; /* RW */
- unsigned long suppress_dest_registration:1; /* RW */
- unsigned long programmed_initial_priority:3; /* RW */
- unsigned long use_incoming_priority:1; /* RW */
- unsigned long enable_programmed_initial_priority:1;/* RW */
- unsigned long enable_automatic_apic_mode_selection:1;/* RW */
- unsigned long apic_mode_status:1; /* RO */
- unsigned long suppress_interrupts_to_self:1; /* RW */
- unsigned long enable_lock_based_system_flush:1;/* RW */
- unsigned long enable_extended_sb_status:1; /* RW */
- unsigned long suppress_int_prio_udt_to_self:1;/* RW */
- unsigned long use_legacy_descriptor_formats:1;/* RW */
- unsigned long suppress_quiesce_msgs_to_qpi:1; /* RW */
- unsigned long enable_intd_prefetch_hint:1; /* RW */
- unsigned long thread_kill_timebase:8; /* RW */
- unsigned long rsvd_46_47:2;
- unsigned long fun:16; /* RW */
- } s3;
- struct uv4h_lb_bau_misc_control_s {
- unsigned long rejection_delay:8; /* RW */
- unsigned long apic_mode:1; /* RW */
- unsigned long force_broadcast:1; /* RW */
- unsigned long force_lock_nop:1; /* RW */
- unsigned long qpi_agent_presence_vector:3; /* RW */
- unsigned long descriptor_fetch_mode:1; /* RW */
- unsigned long rsvd_15_19:5;
- unsigned long enable_dual_mapping_mode:1; /* RW */
- unsigned long vga_io_port_decode_enable:1; /* RW */
- unsigned long vga_io_port_16_bit_decode:1; /* RW */
- unsigned long suppress_dest_registration:1; /* RW */
- unsigned long programmed_initial_priority:3; /* RW */
- unsigned long use_incoming_priority:1; /* RW */
- unsigned long enable_programmed_initial_priority:1;/* RW */
- unsigned long enable_automatic_apic_mode_selection:1;/* RW */
- unsigned long apic_mode_status:1; /* RO */
- unsigned long suppress_interrupts_to_self:1; /* RW */
- unsigned long enable_lock_based_system_flush:1;/* RW */
- unsigned long enable_extended_sb_status:1; /* RW */
- unsigned long suppress_int_prio_udt_to_self:1;/* RW */
- unsigned long use_legacy_descriptor_formats:1;/* RW */
- unsigned long suppress_quiesce_msgs_to_qpi:1; /* RW */
- unsigned long rsvd_37:1;
- unsigned long thread_kill_timebase:8; /* RW */
- unsigned long address_interleave_select:1; /* RW */
- unsigned long rsvd_47:1;
- unsigned long fun:16; /* RW */
- } s4;
-};
-
-/* ========================================================================= */
-/* UVH_LB_BAU_SB_ACTIVATION_CONTROL */
-/* ========================================================================= */
-#define UV2H_LB_BAU_SB_ACTIVATION_CONTROL 0x320020UL
-#define UV3H_LB_BAU_SB_ACTIVATION_CONTROL 0x320020UL
-#define UV4H_LB_BAU_SB_ACTIVATION_CONTROL 0xc8020UL
-#define UVH_LB_BAU_SB_ACTIVATION_CONTROL ( \
- is_uv2_hub() ? UV2H_LB_BAU_SB_ACTIVATION_CONTROL : \
- is_uv3_hub() ? UV3H_LB_BAU_SB_ACTIVATION_CONTROL : \
- /*is_uv4_hub*/ UV4H_LB_BAU_SB_ACTIVATION_CONTROL)
-
-#define UV2H_LB_BAU_SB_ACTIVATION_CONTROL_32 0x9a8
-#define UV3H_LB_BAU_SB_ACTIVATION_CONTROL_32 0x9a8
-#define UV4H_LB_BAU_SB_ACTIVATION_CONTROL_32 0x9c8
-#define UVH_LB_BAU_SB_ACTIVATION_CONTROL_32 ( \
- is_uv2_hub() ? UV2H_LB_BAU_SB_ACTIVATION_CONTROL_32 : \
- is_uv3_hub() ? UV3H_LB_BAU_SB_ACTIVATION_CONTROL_32 : \
- /*is_uv4_hub*/ UV4H_LB_BAU_SB_ACTIVATION_CONTROL_32)
-
-#define UVH_LB_BAU_SB_ACTIVATION_CONTROL_INDEX_SHFT 0
-#define UVH_LB_BAU_SB_ACTIVATION_CONTROL_PUSH_SHFT 62
-#define UVH_LB_BAU_SB_ACTIVATION_CONTROL_INIT_SHFT 63
-#define UVH_LB_BAU_SB_ACTIVATION_CONTROL_INDEX_MASK 0x000000000000003fUL
-#define UVH_LB_BAU_SB_ACTIVATION_CONTROL_PUSH_MASK 0x4000000000000000UL
-#define UVH_LB_BAU_SB_ACTIVATION_CONTROL_INIT_MASK 0x8000000000000000UL
-
-
-union uvh_lb_bau_sb_activation_control_u {
- unsigned long v;
- struct uvh_lb_bau_sb_activation_control_s {
- unsigned long index:6; /* RW */
- unsigned long rsvd_6_61:56;
- unsigned long push:1; /* WP */
- unsigned long init:1; /* WP */
- } s;
-};
-
-/* ========================================================================= */
-/* UVH_LB_BAU_SB_ACTIVATION_STATUS_0 */
-/* ========================================================================= */
-#define UV2H_LB_BAU_SB_ACTIVATION_STATUS_0 0x320030UL
-#define UV3H_LB_BAU_SB_ACTIVATION_STATUS_0 0x320030UL
-#define UV4H_LB_BAU_SB_ACTIVATION_STATUS_0 0xc8030UL
-#define UVH_LB_BAU_SB_ACTIVATION_STATUS_0 ( \
- is_uv2_hub() ? UV2H_LB_BAU_SB_ACTIVATION_STATUS_0 : \
- is_uv3_hub() ? UV3H_LB_BAU_SB_ACTIVATION_STATUS_0 : \
- /*is_uv4_hub*/ UV4H_LB_BAU_SB_ACTIVATION_STATUS_0)
-
-#define UV2H_LB_BAU_SB_ACTIVATION_STATUS_0_32 0x9b0
-#define UV3H_LB_BAU_SB_ACTIVATION_STATUS_0_32 0x9b0
-#define UV4H_LB_BAU_SB_ACTIVATION_STATUS_0_32 0x9d0
-#define UVH_LB_BAU_SB_ACTIVATION_STATUS_0_32 ( \
- is_uv2_hub() ? UV2H_LB_BAU_SB_ACTIVATION_STATUS_0_32 : \
- is_uv3_hub() ? UV3H_LB_BAU_SB_ACTIVATION_STATUS_0_32 : \
- /*is_uv4_hub*/ UV4H_LB_BAU_SB_ACTIVATION_STATUS_0_32)
-
-#define UVH_LB_BAU_SB_ACTIVATION_STATUS_0_STATUS_SHFT 0
-#define UVH_LB_BAU_SB_ACTIVATION_STATUS_0_STATUS_MASK 0xffffffffffffffffUL
-
-
-union uvh_lb_bau_sb_activation_status_0_u {
- unsigned long v;
- struct uvh_lb_bau_sb_activation_status_0_s {
- unsigned long status:64; /* RW */
- } s;
-};
-
-/* ========================================================================= */
-/* UVH_LB_BAU_SB_ACTIVATION_STATUS_1 */
-/* ========================================================================= */
-#define UV2H_LB_BAU_SB_ACTIVATION_STATUS_1 0x320040UL
-#define UV3H_LB_BAU_SB_ACTIVATION_STATUS_1 0x320040UL
-#define UV4H_LB_BAU_SB_ACTIVATION_STATUS_1 0xc8040UL
-#define UVH_LB_BAU_SB_ACTIVATION_STATUS_1 ( \
- is_uv2_hub() ? UV2H_LB_BAU_SB_ACTIVATION_STATUS_1 : \
- is_uv3_hub() ? UV3H_LB_BAU_SB_ACTIVATION_STATUS_1 : \
- /*is_uv4_hub*/ UV4H_LB_BAU_SB_ACTIVATION_STATUS_1)
-
-#define UV2H_LB_BAU_SB_ACTIVATION_STATUS_1_32 0x9b8
-#define UV3H_LB_BAU_SB_ACTIVATION_STATUS_1_32 0x9b8
-#define UV4H_LB_BAU_SB_ACTIVATION_STATUS_1_32 0x9d8
-#define UVH_LB_BAU_SB_ACTIVATION_STATUS_1_32 ( \
- is_uv2_hub() ? UV2H_LB_BAU_SB_ACTIVATION_STATUS_1_32 : \
- is_uv3_hub() ? UV3H_LB_BAU_SB_ACTIVATION_STATUS_1_32 : \
- /*is_uv4_hub*/ UV4H_LB_BAU_SB_ACTIVATION_STATUS_1_32)
-
-#define UVH_LB_BAU_SB_ACTIVATION_STATUS_1_STATUS_SHFT 0
-#define UVH_LB_BAU_SB_ACTIVATION_STATUS_1_STATUS_MASK 0xffffffffffffffffUL
-
-
-union uvh_lb_bau_sb_activation_status_1_u {
- unsigned long v;
- struct uvh_lb_bau_sb_activation_status_1_s {
- unsigned long status:64; /* RW */
- } s;
};
/* ========================================================================= */
-/* UVH_LB_BAU_SB_DESCRIPTOR_BASE */
-/* ========================================================================= */
-#define UV2H_LB_BAU_SB_DESCRIPTOR_BASE 0x320010UL
-#define UV3H_LB_BAU_SB_DESCRIPTOR_BASE 0x320010UL
-#define UV4H_LB_BAU_SB_DESCRIPTOR_BASE 0xc8010UL
-#define UVH_LB_BAU_SB_DESCRIPTOR_BASE ( \
- is_uv2_hub() ? UV2H_LB_BAU_SB_DESCRIPTOR_BASE : \
- is_uv3_hub() ? UV3H_LB_BAU_SB_DESCRIPTOR_BASE : \
- /*is_uv4_hub*/ UV4H_LB_BAU_SB_DESCRIPTOR_BASE)
-
-#define UV2H_LB_BAU_SB_DESCRIPTOR_BASE_32 0x9a0
-#define UV3H_LB_BAU_SB_DESCRIPTOR_BASE_32 0x9a0
-#define UV4H_LB_BAU_SB_DESCRIPTOR_BASE_32 0x9c0
-#define UVH_LB_BAU_SB_DESCRIPTOR_BASE_32 ( \
- is_uv2_hub() ? UV2H_LB_BAU_SB_DESCRIPTOR_BASE_32 : \
- is_uv3_hub() ? UV3H_LB_BAU_SB_DESCRIPTOR_BASE_32 : \
- /*is_uv4_hub*/ UV4H_LB_BAU_SB_DESCRIPTOR_BASE_32)
-
-#define UVH_LB_BAU_SB_DESCRIPTOR_BASE_PAGE_ADDRESS_SHFT 12
-
-#define UV2H_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_SHFT 49
-#define UV2H_LB_BAU_SB_DESCRIPTOR_BASE_PAGE_ADDRESS_MASK 0x000007fffffff000UL
-#define UV2H_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_MASK 0x7ffe000000000000UL
-
-#define UV3H_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_SHFT 49
-#define UV3H_LB_BAU_SB_DESCRIPTOR_BASE_PAGE_ADDRESS_MASK 0x000007fffffff000UL
-#define UV3H_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_MASK 0x7ffe000000000000UL
-
-#define UV4H_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_SHFT 49
-#define UV4H_LB_BAU_SB_DESCRIPTOR_BASE_PAGE_ADDRESS_MASK 0x00003ffffffff000UL
-#define UV4H_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_MASK 0x7ffe000000000000UL
-
-#define UV4AH_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_SHFT 53
-#define UV4AH_LB_BAU_SB_DESCRIPTOR_BASE_PAGE_ADDRESS_MASK 0x000ffffffffff000UL
-#define UV4AH_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_MASK 0xffe0000000000000UL
-
-#define UVH_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_SHFT ( \
- is_uv2_hub() ? UV2H_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_SHFT : \
- is_uv3_hub() ? UV3H_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_SHFT : \
- is_uv4a_hub() ? UV4AH_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_SHFT : \
- /*is_uv4_hub*/ UV4H_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_SHFT)
-
-#define UVH_LB_BAU_SB_DESCRIPTOR_PAGE_ADDRESS_MASK ( \
- is_uv2_hub() ? UV2H_LB_BAU_SB_DESCRIPTOR_PAGE_ADDRESS_MASK : \
- is_uv3_hub() ? UV3H_LB_BAU_SB_DESCRIPTOR_PAGE_ADDRESS_MASK : \
- is_uv4a_hub() ? UV4AH_LB_BAU_SB_DESCRIPTOR_PAGE_ADDRESS_MASK : \
- /*is_uv4_hub*/ UV4H_LB_BAU_SB_DESCRIPTOR_PAGE_ADDRESS_MASK)
-
-#define UVH_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_MASK ( \
- is_uv2_hub() ? UV2H_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_MASK : \
- is_uv3_hub() ? UV3H_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_MASK : \
- is_uv4a_hub() ? UV4AH_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_MASK : \
- /*is_uv4_hub*/ UV4H_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_MASK)
-
-/* ========================================================================= */
/* UVH_NODE_ID */
/* ========================================================================= */
#define UVH_NODE_ID 0x0UL
-#define UV2H_NODE_ID 0x0UL
-#define UV3H_NODE_ID 0x0UL
-#define UV4H_NODE_ID 0x0UL
+/* UVH common defines*/
#define UVH_NODE_ID_FORCE1_SHFT 0
-#define UVH_NODE_ID_MANUFACTURER_SHFT 1
-#define UVH_NODE_ID_PART_NUMBER_SHFT 12
-#define UVH_NODE_ID_REVISION_SHFT 28
-#define UVH_NODE_ID_NODE_ID_SHFT 32
#define UVH_NODE_ID_FORCE1_MASK 0x0000000000000001UL
+#define UVH_NODE_ID_MANUFACTURER_SHFT 1
#define UVH_NODE_ID_MANUFACTURER_MASK 0x0000000000000ffeUL
+#define UVH_NODE_ID_PART_NUMBER_SHFT 12
#define UVH_NODE_ID_PART_NUMBER_MASK 0x000000000ffff000UL
+#define UVH_NODE_ID_REVISION_SHFT 28
#define UVH_NODE_ID_REVISION_MASK 0x00000000f0000000UL
-#define UVH_NODE_ID_NODE_ID_MASK 0x00007fff00000000UL
+#define UVH_NODE_ID_NODE_ID_SHFT 32
+#define UVH_NODE_ID_NI_PORT_SHFT 57
-#define UVXH_NODE_ID_FORCE1_SHFT 0
-#define UVXH_NODE_ID_MANUFACTURER_SHFT 1
-#define UVXH_NODE_ID_PART_NUMBER_SHFT 12
-#define UVXH_NODE_ID_REVISION_SHFT 28
-#define UVXH_NODE_ID_NODE_ID_SHFT 32
-#define UVXH_NODE_ID_NODES_PER_BIT_SHFT 50
-#define UVXH_NODE_ID_NI_PORT_SHFT 57
-#define UVXH_NODE_ID_FORCE1_MASK 0x0000000000000001UL
-#define UVXH_NODE_ID_MANUFACTURER_MASK 0x0000000000000ffeUL
-#define UVXH_NODE_ID_PART_NUMBER_MASK 0x000000000ffff000UL
-#define UVXH_NODE_ID_REVISION_MASK 0x00000000f0000000UL
+/* UVXH common defines */
#define UVXH_NODE_ID_NODE_ID_MASK 0x00007fff00000000UL
+#define UVXH_NODE_ID_NODES_PER_BIT_SHFT 50
#define UVXH_NODE_ID_NODES_PER_BIT_MASK 0x01fc000000000000UL
#define UVXH_NODE_ID_NI_PORT_MASK 0x3e00000000000000UL
-#define UV2H_NODE_ID_FORCE1_SHFT 0
-#define UV2H_NODE_ID_MANUFACTURER_SHFT 1
-#define UV2H_NODE_ID_PART_NUMBER_SHFT 12
-#define UV2H_NODE_ID_REVISION_SHFT 28
-#define UV2H_NODE_ID_NODE_ID_SHFT 32
-#define UV2H_NODE_ID_NODES_PER_BIT_SHFT 50
-#define UV2H_NODE_ID_NI_PORT_SHFT 57
-#define UV2H_NODE_ID_FORCE1_MASK 0x0000000000000001UL
-#define UV2H_NODE_ID_MANUFACTURER_MASK 0x0000000000000ffeUL
-#define UV2H_NODE_ID_PART_NUMBER_MASK 0x000000000ffff000UL
-#define UV2H_NODE_ID_REVISION_MASK 0x00000000f0000000UL
-#define UV2H_NODE_ID_NODE_ID_MASK 0x00007fff00000000UL
-#define UV2H_NODE_ID_NODES_PER_BIT_MASK 0x01fc000000000000UL
-#define UV2H_NODE_ID_NI_PORT_MASK 0x3e00000000000000UL
-
-#define UV3H_NODE_ID_FORCE1_SHFT 0
-#define UV3H_NODE_ID_MANUFACTURER_SHFT 1
-#define UV3H_NODE_ID_PART_NUMBER_SHFT 12
-#define UV3H_NODE_ID_REVISION_SHFT 28
-#define UV3H_NODE_ID_NODE_ID_SHFT 32
-#define UV3H_NODE_ID_ROUTER_SELECT_SHFT 48
-#define UV3H_NODE_ID_RESERVED_2_SHFT 49
-#define UV3H_NODE_ID_NODES_PER_BIT_SHFT 50
-#define UV3H_NODE_ID_NI_PORT_SHFT 57
-#define UV3H_NODE_ID_FORCE1_MASK 0x0000000000000001UL
-#define UV3H_NODE_ID_MANUFACTURER_MASK 0x0000000000000ffeUL
-#define UV3H_NODE_ID_PART_NUMBER_MASK 0x000000000ffff000UL
-#define UV3H_NODE_ID_REVISION_MASK 0x00000000f0000000UL
-#define UV3H_NODE_ID_NODE_ID_MASK 0x00007fff00000000UL
-#define UV3H_NODE_ID_ROUTER_SELECT_MASK 0x0001000000000000UL
-#define UV3H_NODE_ID_RESERVED_2_MASK 0x0002000000000000UL
-#define UV3H_NODE_ID_NODES_PER_BIT_MASK 0x01fc000000000000UL
-#define UV3H_NODE_ID_NI_PORT_MASK 0x3e00000000000000UL
-
-#define UV4H_NODE_ID_FORCE1_SHFT 0
-#define UV4H_NODE_ID_MANUFACTURER_SHFT 1
-#define UV4H_NODE_ID_PART_NUMBER_SHFT 12
-#define UV4H_NODE_ID_REVISION_SHFT 28
-#define UV4H_NODE_ID_NODE_ID_SHFT 32
+/* UVYH common defines */
+#define UVYH_NODE_ID_NODE_ID_MASK 0x0000007f00000000UL
+#define UVYH_NODE_ID_NI_PORT_MASK 0x7e00000000000000UL
+
+/* UV4 unique defines */
#define UV4H_NODE_ID_ROUTER_SELECT_SHFT 48
-#define UV4H_NODE_ID_RESERVED_2_SHFT 49
-#define UV4H_NODE_ID_NODES_PER_BIT_SHFT 50
-#define UV4H_NODE_ID_NI_PORT_SHFT 57
-#define UV4H_NODE_ID_FORCE1_MASK 0x0000000000000001UL
-#define UV4H_NODE_ID_MANUFACTURER_MASK 0x0000000000000ffeUL
-#define UV4H_NODE_ID_PART_NUMBER_MASK 0x000000000ffff000UL
-#define UV4H_NODE_ID_REVISION_MASK 0x00000000f0000000UL
-#define UV4H_NODE_ID_NODE_ID_MASK 0x00007fff00000000UL
#define UV4H_NODE_ID_ROUTER_SELECT_MASK 0x0001000000000000UL
+#define UV4H_NODE_ID_RESERVED_2_SHFT 49
#define UV4H_NODE_ID_RESERVED_2_MASK 0x0002000000000000UL
-#define UV4H_NODE_ID_NODES_PER_BIT_MASK 0x01fc000000000000UL
-#define UV4H_NODE_ID_NI_PORT_MASK 0x3e00000000000000UL
+
+/* UV3 unique defines */
+#define UV3H_NODE_ID_ROUTER_SELECT_SHFT 48
+#define UV3H_NODE_ID_ROUTER_SELECT_MASK 0x0001000000000000UL
+#define UV3H_NODE_ID_RESERVED_2_SHFT 49
+#define UV3H_NODE_ID_RESERVED_2_MASK 0x0002000000000000UL
union uvh_node_id_u {
unsigned long v;
+
+ /* UVH common struct */
struct uvh_node_id_s {
unsigned long force1:1; /* RO */
unsigned long manufacturer:11; /* RO */
unsigned long part_number:16; /* RO */
unsigned long revision:4; /* RO */
- unsigned long node_id:15; /* RW */
- unsigned long rsvd_47_63:17;
+ unsigned long rsvd_32_63:32;
} s;
+
+ /* UVXH common struct */
struct uvxh_node_id_s {
unsigned long force1:1; /* RO */
unsigned long manufacturer:11; /* RO */
@@ -2444,17 +2841,47 @@ union uvh_node_id_u {
unsigned long ni_port:5; /* RO */
unsigned long rsvd_62_63:2;
} sx;
- struct uv2h_node_id_s {
+
+ /* UVYH common struct */
+ struct uvyh_node_id_s {
+ unsigned long force1:1; /* RO */
+ unsigned long manufacturer:11; /* RO */
+ unsigned long part_number:16; /* RO */
+ unsigned long revision:4; /* RO */
+ unsigned long node_id:7; /* RW */
+ unsigned long rsvd_39_56:18;
+ unsigned long ni_port:6; /* RO */
+ unsigned long rsvd_63:1;
+ } sy;
+
+ /* UV5 unique struct */
+ struct uv5h_node_id_s {
+ unsigned long force1:1; /* RO */
+ unsigned long manufacturer:11; /* RO */
+ unsigned long part_number:16; /* RO */
+ unsigned long revision:4; /* RO */
+ unsigned long node_id:7; /* RW */
+ unsigned long rsvd_39_56:18;
+ unsigned long ni_port:6; /* RO */
+ unsigned long rsvd_63:1;
+ } s5;
+
+ /* UV4 unique struct */
+ struct uv4h_node_id_s {
unsigned long force1:1; /* RO */
unsigned long manufacturer:11; /* RO */
unsigned long part_number:16; /* RO */
unsigned long revision:4; /* RO */
unsigned long node_id:15; /* RW */
- unsigned long rsvd_47_49:3;
+ unsigned long rsvd_47:1;
+ unsigned long router_select:1; /* RO */
+ unsigned long rsvd_49:1;
unsigned long nodes_per_bit:7; /* RO */
unsigned long ni_port:5; /* RO */
unsigned long rsvd_62_63:2;
- } s2;
+ } s4;
+
+ /* UV3 unique struct */
struct uv3h_node_id_s {
unsigned long force1:1; /* RO */
unsigned long manufacturer:11; /* RO */
@@ -2468,186 +2895,569 @@ union uvh_node_id_u {
unsigned long ni_port:5; /* RO */
unsigned long rsvd_62_63:2;
} s3;
- struct uv4h_node_id_s {
+
+ /* UV2 unique struct */
+ struct uv2h_node_id_s {
unsigned long force1:1; /* RO */
unsigned long manufacturer:11; /* RO */
unsigned long part_number:16; /* RO */
unsigned long revision:4; /* RO */
unsigned long node_id:15; /* RW */
- unsigned long rsvd_47:1;
- unsigned long router_select:1; /* RO */
- unsigned long rsvd_49:1;
+ unsigned long rsvd_47_49:3;
unsigned long nodes_per_bit:7; /* RO */
unsigned long ni_port:5; /* RO */
unsigned long rsvd_62_63:2;
- } s4;
+ } s2;
+};
+
+/* ========================================================================= */
+/* UVH_NODE_PRESENT_0 */
+/* ========================================================================= */
+#define UVH_NODE_PRESENT_0 ( \
+ is_uv(UV5) ? 0x1400UL : \
+ 0)
+
+
+/* UVYH common defines */
+#define UVYH_NODE_PRESENT_0_NODES_SHFT 0
+#define UVYH_NODE_PRESENT_0_NODES_MASK 0xffffffffffffffffUL
+
+
+union uvh_node_present_0_u {
+ unsigned long v;
+
+ /* UVH common struct */
+ struct uvh_node_present_0_s {
+ unsigned long nodes:64; /* RW */
+ } s;
+
+ /* UVYH common struct */
+ struct uvyh_node_present_0_s {
+ unsigned long nodes:64; /* RW */
+ } sy;
+
+ /* UV5 unique struct */
+ struct uv5h_node_present_0_s {
+ unsigned long nodes:64; /* RW */
+ } s5;
+};
+
+/* ========================================================================= */
+/* UVH_NODE_PRESENT_1 */
+/* ========================================================================= */
+#define UVH_NODE_PRESENT_1 ( \
+ is_uv(UV5) ? 0x1408UL : \
+ 0)
+
+
+/* UVYH common defines */
+#define UVYH_NODE_PRESENT_1_NODES_SHFT 0
+#define UVYH_NODE_PRESENT_1_NODES_MASK 0xffffffffffffffffUL
+
+
+union uvh_node_present_1_u {
+ unsigned long v;
+
+ /* UVH common struct */
+ struct uvh_node_present_1_s {
+ unsigned long nodes:64; /* RW */
+ } s;
+
+ /* UVYH common struct */
+ struct uvyh_node_present_1_s {
+ unsigned long nodes:64; /* RW */
+ } sy;
+
+ /* UV5 unique struct */
+ struct uv5h_node_present_1_s {
+ unsigned long nodes:64; /* RW */
+ } s5;
};
/* ========================================================================= */
/* UVH_NODE_PRESENT_TABLE */
/* ========================================================================= */
-#define UVH_NODE_PRESENT_TABLE 0x1400UL
+#define UVH_NODE_PRESENT_TABLE ( \
+ is_uv(UV4) ? 0x1400UL : \
+ is_uv(UV3) ? 0x1400UL : \
+ is_uv(UV2) ? 0x1400UL : \
+ 0)
-#define UV2H_NODE_PRESENT_TABLE_DEPTH 16
-#define UV3H_NODE_PRESENT_TABLE_DEPTH 16
-#define UV4H_NODE_PRESENT_TABLE_DEPTH 4
#define UVH_NODE_PRESENT_TABLE_DEPTH ( \
- is_uv2_hub() ? UV2H_NODE_PRESENT_TABLE_DEPTH : \
- is_uv3_hub() ? UV3H_NODE_PRESENT_TABLE_DEPTH : \
- /*is_uv4_hub*/ UV4H_NODE_PRESENT_TABLE_DEPTH)
+ is_uv(UV4) ? 4 : \
+ is_uv(UV3) ? 16 : \
+ is_uv(UV2) ? 16 : \
+ 0)
-#define UVH_NODE_PRESENT_TABLE_NODES_SHFT 0
-#define UVH_NODE_PRESENT_TABLE_NODES_MASK 0xffffffffffffffffUL
+
+/* UVXH common defines */
+#define UVXH_NODE_PRESENT_TABLE_NODES_SHFT 0
+#define UVXH_NODE_PRESENT_TABLE_NODES_MASK 0xffffffffffffffffUL
union uvh_node_present_table_u {
unsigned long v;
+
+ /* UVH common struct */
struct uvh_node_present_table_s {
unsigned long nodes:64; /* RW */
} s;
+
+ /* UVXH common struct */
+ struct uvxh_node_present_table_s {
+ unsigned long nodes:64; /* RW */
+ } sx;
+
+ /* UV4 unique struct */
+ struct uv4h_node_present_table_s {
+ unsigned long nodes:64; /* RW */
+ } s4;
+
+ /* UV3 unique struct */
+ struct uv3h_node_present_table_s {
+ unsigned long nodes:64; /* RW */
+ } s3;
+
+ /* UV2 unique struct */
+ struct uv2h_node_present_table_s {
+ unsigned long nodes:64; /* RW */
+ } s2;
+};
+
+/* ========================================================================= */
+/* UVH_RH10_GAM_ADDR_MAP_CONFIG */
+/* ========================================================================= */
+#define UVH_RH10_GAM_ADDR_MAP_CONFIG ( \
+ is_uv(UV5) ? 0x470000UL : \
+ 0)
+
+
+/* UVYH common defines */
+#define UVYH_RH10_GAM_ADDR_MAP_CONFIG_N_SKT_SHFT 6
+#define UVYH_RH10_GAM_ADDR_MAP_CONFIG_N_SKT_MASK 0x00000000000001c0UL
+#define UVYH_RH10_GAM_ADDR_MAP_CONFIG_LS_ENABLE_SHFT 12
+#define UVYH_RH10_GAM_ADDR_MAP_CONFIG_LS_ENABLE_MASK 0x0000000000001000UL
+#define UVYH_RH10_GAM_ADDR_MAP_CONFIG_MK_TME_KEYID_BITS_SHFT 16
+#define UVYH_RH10_GAM_ADDR_MAP_CONFIG_MK_TME_KEYID_BITS_MASK 0x00000000000f0000UL
+
+
+union uvh_rh10_gam_addr_map_config_u {
+ unsigned long v;
+
+ /* UVH common struct */
+ struct uvh_rh10_gam_addr_map_config_s {
+ unsigned long undef_0_5:6; /* Undefined */
+ unsigned long n_skt:3; /* RW */
+ unsigned long undef_9_11:3; /* Undefined */
+ unsigned long ls_enable:1; /* RW */
+ unsigned long undef_13_15:3; /* Undefined */
+ unsigned long mk_tme_keyid_bits:4; /* RW */
+ unsigned long rsvd_20_63:44;
+ } s;
+
+ /* UVYH common struct */
+ struct uvyh_rh10_gam_addr_map_config_s {
+ unsigned long undef_0_5:6; /* Undefined */
+ unsigned long n_skt:3; /* RW */
+ unsigned long undef_9_11:3; /* Undefined */
+ unsigned long ls_enable:1; /* RW */
+ unsigned long undef_13_15:3; /* Undefined */
+ unsigned long mk_tme_keyid_bits:4; /* RW */
+ unsigned long rsvd_20_63:44;
+ } sy;
+
+ /* UV5 unique struct */
+ struct uv5h_rh10_gam_addr_map_config_s {
+ unsigned long undef_0_5:6; /* Undefined */
+ unsigned long n_skt:3; /* RW */
+ unsigned long undef_9_11:3; /* Undefined */
+ unsigned long ls_enable:1; /* RW */
+ unsigned long undef_13_15:3; /* Undefined */
+ unsigned long mk_tme_keyid_bits:4; /* RW */
+ } s5;
};
/* ========================================================================= */
-/* UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR */
+/* UVH_RH10_GAM_GRU_OVERLAY_CONFIG */
/* ========================================================================= */
-#define UV2H_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR 0x16000c8UL
-#define UV3H_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR 0x16000c8UL
-#define UV4H_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR 0x4800c8UL
-#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR ( \
- is_uv2_hub() ? UV2H_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR : \
- is_uv3_hub() ? UV3H_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR : \
- /*is_uv4_hub*/ UV4H_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR)
-
-#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_BASE_SHFT 24
-#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_M_ALIAS_SHFT 48
-#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_ENABLE_SHFT 63
-#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_BASE_MASK 0x00000000ff000000UL
-#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_M_ALIAS_MASK 0x001f000000000000UL
-#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_ENABLE_MASK 0x8000000000000000UL
-
-#define UVXH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_BASE_SHFT 24
-#define UVXH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_M_ALIAS_SHFT 48
-#define UVXH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_ENABLE_SHFT 63
-#define UVXH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_BASE_MASK 0x00000000ff000000UL
-#define UVXH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_M_ALIAS_MASK 0x001f000000000000UL
-#define UVXH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_ENABLE_MASK 0x8000000000000000UL
-
-#define UV2H_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_BASE_SHFT 24
-#define UV2H_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_M_ALIAS_SHFT 48
-#define UV2H_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_ENABLE_SHFT 63
-#define UV2H_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_BASE_MASK 0x00000000ff000000UL
-#define UV2H_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_M_ALIAS_MASK 0x001f000000000000UL
-#define UV2H_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_ENABLE_MASK 0x8000000000000000UL
-
-#define UV3H_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_BASE_SHFT 24
-#define UV3H_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_M_ALIAS_SHFT 48
-#define UV3H_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_ENABLE_SHFT 63
-#define UV3H_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_BASE_MASK 0x00000000ff000000UL
-#define UV3H_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_M_ALIAS_MASK 0x001f000000000000UL
-#define UV3H_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_ENABLE_MASK 0x8000000000000000UL
-
-#define UV4H_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_BASE_SHFT 24
-#define UV4H_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_M_ALIAS_SHFT 48
-#define UV4H_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_ENABLE_SHFT 63
-#define UV4H_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_BASE_MASK 0x00000000ff000000UL
-#define UV4H_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_M_ALIAS_MASK 0x001f000000000000UL
-#define UV4H_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_ENABLE_MASK 0x8000000000000000UL
-
-
-union uvh_rh_gam_alias210_overlay_config_0_mmr_u {
+#define UVH_RH10_GAM_GRU_OVERLAY_CONFIG ( \
+ is_uv(UV5) ? 0x4700b0UL : \
+ 0)
+
+
+/* UVYH common defines */
+#define UVYH_RH10_GAM_GRU_OVERLAY_CONFIG_BASE_SHFT 25
+#define UVYH_RH10_GAM_GRU_OVERLAY_CONFIG_BASE_MASK 0x000ffffffe000000UL
+#define UVYH_RH10_GAM_GRU_OVERLAY_CONFIG_N_GRU_SHFT 52
+#define UVYH_RH10_GAM_GRU_OVERLAY_CONFIG_N_GRU_MASK 0x0070000000000000UL
+#define UVYH_RH10_GAM_GRU_OVERLAY_CONFIG_ENABLE_SHFT 63
+#define UVYH_RH10_GAM_GRU_OVERLAY_CONFIG_ENABLE_MASK 0x8000000000000000UL
+
+#define UVH_RH10_GAM_GRU_OVERLAY_CONFIG_BASE_MASK ( \
+ is_uv(UV5) ? 0x000ffffffe000000UL : \
+ 0)
+#define UVH_RH10_GAM_GRU_OVERLAY_CONFIG_BASE_SHFT ( \
+ is_uv(UV5) ? 25 : \
+ -1)
+
+union uvh_rh10_gam_gru_overlay_config_u {
unsigned long v;
- struct uvh_rh_gam_alias210_overlay_config_0_mmr_s {
- unsigned long rsvd_0_23:24;
- unsigned long base:8; /* RW */
- unsigned long rsvd_32_47:16;
- unsigned long m_alias:5; /* RW */
- unsigned long rsvd_53_62:10;
+
+ /* UVH common struct */
+ struct uvh_rh10_gam_gru_overlay_config_s {
+ unsigned long undef_0_24:25; /* Undefined */
+ unsigned long base:27; /* RW */
+ unsigned long n_gru:3; /* RW */
+ unsigned long undef_55_62:8; /* Undefined */
unsigned long enable:1; /* RW */
} s;
- struct uvxh_rh_gam_alias210_overlay_config_0_mmr_s {
- unsigned long rsvd_0_23:24;
- unsigned long base:8; /* RW */
- unsigned long rsvd_32_47:16;
- unsigned long m_alias:5; /* RW */
- unsigned long rsvd_53_62:10;
+
+ /* UVYH common struct */
+ struct uvyh_rh10_gam_gru_overlay_config_s {
+ unsigned long undef_0_24:25; /* Undefined */
+ unsigned long base:27; /* RW */
+ unsigned long n_gru:3; /* RW */
+ unsigned long undef_55_62:8; /* Undefined */
unsigned long enable:1; /* RW */
- } sx;
- struct uv2h_rh_gam_alias210_overlay_config_0_mmr_s {
- unsigned long rsvd_0_23:24;
- unsigned long base:8; /* RW */
- unsigned long rsvd_32_47:16;
- unsigned long m_alias:5; /* RW */
- unsigned long rsvd_53_62:10;
+ } sy;
+
+ /* UV5 unique struct */
+ struct uv5h_rh10_gam_gru_overlay_config_s {
+ unsigned long undef_0_24:25; /* Undefined */
+ unsigned long base:27; /* RW */
+ unsigned long n_gru:3; /* RW */
+ unsigned long undef_55_62:8; /* Undefined */
unsigned long enable:1; /* RW */
- } s2;
- struct uv3h_rh_gam_alias210_overlay_config_0_mmr_s {
- unsigned long rsvd_0_23:24;
- unsigned long base:8; /* RW */
- unsigned long rsvd_32_47:16;
- unsigned long m_alias:5; /* RW */
- unsigned long rsvd_53_62:10;
+ } s5;
+};
+
+/* ========================================================================= */
+/* UVH_RH10_GAM_MMIOH_OVERLAY_CONFIG0 */
+/* ========================================================================= */
+#define UVH_RH10_GAM_MMIOH_OVERLAY_CONFIG0 ( \
+ is_uv(UV5) ? 0x473000UL : \
+ 0)
+
+
+/* UVYH common defines */
+#define UVYH_RH10_GAM_MMIOH_OVERLAY_CONFIG0_BASE_SHFT 26
+#define UVYH_RH10_GAM_MMIOH_OVERLAY_CONFIG0_BASE_MASK 0x000ffffffc000000UL
+#define UVYH_RH10_GAM_MMIOH_OVERLAY_CONFIG0_M_IO_SHFT 52
+#define UVYH_RH10_GAM_MMIOH_OVERLAY_CONFIG0_M_IO_MASK 0x03f0000000000000UL
+#define UVYH_RH10_GAM_MMIOH_OVERLAY_CONFIG0_ENABLE_SHFT 63
+#define UVYH_RH10_GAM_MMIOH_OVERLAY_CONFIG0_ENABLE_MASK 0x8000000000000000UL
+
+#define UVH_RH10_GAM_MMIOH_OVERLAY_CONFIG0_BASE_MASK ( \
+ is_uv(UV5) ? 0x000ffffffc000000UL : \
+ 0)
+#define UVH_RH10_GAM_MMIOH_OVERLAY_CONFIG0_BASE_SHFT ( \
+ is_uv(UV5) ? 26 : \
+ -1)
+
+union uvh_rh10_gam_mmioh_overlay_config0_u {
+ unsigned long v;
+
+ /* UVH common struct */
+ struct uvh_rh10_gam_mmioh_overlay_config0_s {
+ unsigned long rsvd_0_25:26;
+ unsigned long base:26; /* RW */
+ unsigned long m_io:6; /* RW */
+ unsigned long n_io:4;
+ unsigned long undef_62:1; /* Undefined */
unsigned long enable:1; /* RW */
- } s3;
- struct uv4h_rh_gam_alias210_overlay_config_0_mmr_s {
- unsigned long rsvd_0_23:24;
- unsigned long base:8; /* RW */
- unsigned long rsvd_32_47:16;
- unsigned long m_alias:5; /* RW */
- unsigned long rsvd_53_62:10;
+ } s;
+
+ /* UVYH common struct */
+ struct uvyh_rh10_gam_mmioh_overlay_config0_s {
+ unsigned long rsvd_0_25:26;
+ unsigned long base:26; /* RW */
+ unsigned long m_io:6; /* RW */
+ unsigned long n_io:4;
+ unsigned long undef_62:1; /* Undefined */
unsigned long enable:1; /* RW */
+ } sy;
+
+ /* UV5 unique struct */
+ struct uv5h_rh10_gam_mmioh_overlay_config0_s {
+ unsigned long rsvd_0_25:26;
+ unsigned long base:26; /* RW */
+ unsigned long m_io:6; /* RW */
+ unsigned long n_io:4;
+ unsigned long undef_62:1; /* Undefined */
+ unsigned long enable:1; /* RW */
+ } s5;
+};
+
+/* ========================================================================= */
+/* UVH_RH10_GAM_MMIOH_OVERLAY_CONFIG1 */
+/* ========================================================================= */
+#define UVH_RH10_GAM_MMIOH_OVERLAY_CONFIG1 ( \
+ is_uv(UV5) ? 0x474000UL : \
+ 0)
+
+
+/* UVYH common defines */
+#define UVYH_RH10_GAM_MMIOH_OVERLAY_CONFIG1_BASE_SHFT 26
+#define UVYH_RH10_GAM_MMIOH_OVERLAY_CONFIG1_BASE_MASK 0x000ffffffc000000UL
+#define UVYH_RH10_GAM_MMIOH_OVERLAY_CONFIG1_M_IO_SHFT 52
+#define UVYH_RH10_GAM_MMIOH_OVERLAY_CONFIG1_M_IO_MASK 0x03f0000000000000UL
+#define UVYH_RH10_GAM_MMIOH_OVERLAY_CONFIG1_ENABLE_SHFT 63
+#define UVYH_RH10_GAM_MMIOH_OVERLAY_CONFIG1_ENABLE_MASK 0x8000000000000000UL
+
+#define UVH_RH10_GAM_MMIOH_OVERLAY_CONFIG1_BASE_MASK ( \
+ is_uv(UV5) ? 0x000ffffffc000000UL : \
+ 0)
+#define UVH_RH10_GAM_MMIOH_OVERLAY_CONFIG1_BASE_SHFT ( \
+ is_uv(UV5) ? 26 : \
+ -1)
+
+union uvh_rh10_gam_mmioh_overlay_config1_u {
+ unsigned long v;
+
+ /* UVH common struct */
+ struct uvh_rh10_gam_mmioh_overlay_config1_s {
+ unsigned long rsvd_0_25:26;
+ unsigned long base:26; /* RW */
+ unsigned long m_io:6; /* RW */
+ unsigned long n_io:4;
+ unsigned long undef_62:1; /* Undefined */
+ unsigned long enable:1; /* RW */
+ } s;
+
+ /* UVYH common struct */
+ struct uvyh_rh10_gam_mmioh_overlay_config1_s {
+ unsigned long rsvd_0_25:26;
+ unsigned long base:26; /* RW */
+ unsigned long m_io:6; /* RW */
+ unsigned long n_io:4;
+ unsigned long undef_62:1; /* Undefined */
+ unsigned long enable:1; /* RW */
+ } sy;
+
+ /* UV5 unique struct */
+ struct uv5h_rh10_gam_mmioh_overlay_config1_s {
+ unsigned long rsvd_0_25:26;
+ unsigned long base:26; /* RW */
+ unsigned long m_io:6; /* RW */
+ unsigned long n_io:4;
+ unsigned long undef_62:1; /* Undefined */
+ unsigned long enable:1; /* RW */
+ } s5;
+};
+
+/* ========================================================================= */
+/* UVH_RH10_GAM_MMIOH_REDIRECT_CONFIG0 */
+/* ========================================================================= */
+#define UVH_RH10_GAM_MMIOH_REDIRECT_CONFIG0 ( \
+ is_uv(UV5) ? 0x473800UL : \
+ 0)
+
+#define UVH_RH10_GAM_MMIOH_REDIRECT_CONFIG0_DEPTH ( \
+ is_uv(UV5) ? 128 : \
+ 0)
+
+
+/* UVYH common defines */
+#define UVYH_RH10_GAM_MMIOH_REDIRECT_CONFIG0_NASID_SHFT 0
+#define UVYH_RH10_GAM_MMIOH_REDIRECT_CONFIG0_NASID_MASK 0x000000000000007fUL
+
+
+union uvh_rh10_gam_mmioh_redirect_config0_u {
+ unsigned long v;
+
+ /* UVH common struct */
+ struct uvh_rh10_gam_mmioh_redirect_config0_s {
+ unsigned long nasid:7; /* RW */
+ unsigned long rsvd_7_63:57;
+ } s;
+
+ /* UVYH common struct */
+ struct uvyh_rh10_gam_mmioh_redirect_config0_s {
+ unsigned long nasid:7; /* RW */
+ unsigned long rsvd_7_63:57;
+ } sy;
+
+ /* UV5 unique struct */
+ struct uv5h_rh10_gam_mmioh_redirect_config0_s {
+ unsigned long nasid:7; /* RW */
+ unsigned long rsvd_7_63:57;
+ } s5;
+};
+
+/* ========================================================================= */
+/* UVH_RH10_GAM_MMIOH_REDIRECT_CONFIG1 */
+/* ========================================================================= */
+#define UVH_RH10_GAM_MMIOH_REDIRECT_CONFIG1 ( \
+ is_uv(UV5) ? 0x474800UL : \
+ 0)
+
+#define UVH_RH10_GAM_MMIOH_REDIRECT_CONFIG1_DEPTH ( \
+ is_uv(UV5) ? 128 : \
+ 0)
+
+
+/* UVYH common defines */
+#define UVYH_RH10_GAM_MMIOH_REDIRECT_CONFIG1_NASID_SHFT 0
+#define UVYH_RH10_GAM_MMIOH_REDIRECT_CONFIG1_NASID_MASK 0x000000000000007fUL
+
+
+union uvh_rh10_gam_mmioh_redirect_config1_u {
+ unsigned long v;
+
+ /* UVH common struct */
+ struct uvh_rh10_gam_mmioh_redirect_config1_s {
+ unsigned long nasid:7; /* RW */
+ unsigned long rsvd_7_63:57;
+ } s;
+
+ /* UVYH common struct */
+ struct uvyh_rh10_gam_mmioh_redirect_config1_s {
+ unsigned long nasid:7; /* RW */
+ unsigned long rsvd_7_63:57;
+ } sy;
+
+ /* UV5 unique struct */
+ struct uv5h_rh10_gam_mmioh_redirect_config1_s {
+ unsigned long nasid:7; /* RW */
+ unsigned long rsvd_7_63:57;
+ } s5;
+};
+
+/* ========================================================================= */
+/* UVH_RH10_GAM_MMR_OVERLAY_CONFIG */
+/* ========================================================================= */
+#define UVH_RH10_GAM_MMR_OVERLAY_CONFIG ( \
+ is_uv(UV5) ? 0x470090UL : \
+ 0)
+
+
+/* UVYH common defines */
+#define UVYH_RH10_GAM_MMR_OVERLAY_CONFIG_BASE_SHFT 25
+#define UVYH_RH10_GAM_MMR_OVERLAY_CONFIG_BASE_MASK 0x000ffffffe000000UL
+#define UVYH_RH10_GAM_MMR_OVERLAY_CONFIG_ENABLE_SHFT 63
+#define UVYH_RH10_GAM_MMR_OVERLAY_CONFIG_ENABLE_MASK 0x8000000000000000UL
+
+#define UVH_RH10_GAM_MMR_OVERLAY_CONFIG_BASE_MASK ( \
+ is_uv(UV5) ? 0x000ffffffe000000UL : \
+ 0)
+#define UVH_RH10_GAM_MMR_OVERLAY_CONFIG_BASE_SHFT ( \
+ is_uv(UV5) ? 25 : \
+ -1)
+
+union uvh_rh10_gam_mmr_overlay_config_u {
+ unsigned long v;
+
+ /* UVH common struct */
+ struct uvh_rh10_gam_mmr_overlay_config_s {
+ unsigned long undef_0_24:25; /* Undefined */
+ unsigned long base:27; /* RW */
+ unsigned long undef_52_62:11; /* Undefined */
+ unsigned long enable:1; /* RW */
+ } s;
+
+ /* UVYH common struct */
+ struct uvyh_rh10_gam_mmr_overlay_config_s {
+ unsigned long undef_0_24:25; /* Undefined */
+ unsigned long base:27; /* RW */
+ unsigned long undef_52_62:11; /* Undefined */
+ unsigned long enable:1; /* RW */
+ } sy;
+
+ /* UV5 unique struct */
+ struct uv5h_rh10_gam_mmr_overlay_config_s {
+ unsigned long undef_0_24:25; /* Undefined */
+ unsigned long base:27; /* RW */
+ unsigned long undef_52_62:11; /* Undefined */
+ unsigned long enable:1; /* RW */
+ } s5;
+};
+
+/* ========================================================================= */
+/* UVH_RH_GAM_ADDR_MAP_CONFIG */
+/* ========================================================================= */
+#define UVH_RH_GAM_ADDR_MAP_CONFIG ( \
+ is_uv(UV4) ? 0x480000UL : \
+ is_uv(UV3) ? 0x1600000UL : \
+ is_uv(UV2) ? 0x1600000UL : \
+ 0)
+
+
+/* UVXH common defines */
+#define UVXH_RH_GAM_ADDR_MAP_CONFIG_N_SKT_SHFT 6
+#define UVXH_RH_GAM_ADDR_MAP_CONFIG_N_SKT_MASK 0x00000000000003c0UL
+
+/* UV3 unique defines */
+#define UV3H_RH_GAM_ADDR_MAP_CONFIG_M_SKT_SHFT 0
+#define UV3H_RH_GAM_ADDR_MAP_CONFIG_M_SKT_MASK 0x000000000000003fUL
+
+/* UV2 unique defines */
+#define UV2H_RH_GAM_ADDR_MAP_CONFIG_M_SKT_SHFT 0
+#define UV2H_RH_GAM_ADDR_MAP_CONFIG_M_SKT_MASK 0x000000000000003fUL
+
+
+union uvh_rh_gam_addr_map_config_u {
+ unsigned long v;
+
+ /* UVH common struct */
+ struct uvh_rh_gam_addr_map_config_s {
+ unsigned long rsvd_0_5:6;
+ unsigned long n_skt:4; /* RW */
+ unsigned long rsvd_10_63:54;
+ } s;
+
+ /* UVXH common struct */
+ struct uvxh_rh_gam_addr_map_config_s {
+ unsigned long rsvd_0_5:6;
+ unsigned long n_skt:4; /* RW */
+ unsigned long rsvd_10_63:54;
+ } sx;
+
+ /* UV4 unique struct */
+ struct uv4h_rh_gam_addr_map_config_s {
+ unsigned long rsvd_0_5:6;
+ unsigned long n_skt:4; /* RW */
+ unsigned long rsvd_10_63:54;
} s4;
+
+ /* UV3 unique struct */
+ struct uv3h_rh_gam_addr_map_config_s {
+ unsigned long m_skt:6; /* RW */
+ unsigned long n_skt:4; /* RW */
+ unsigned long rsvd_10_63:54;
+ } s3;
+
+ /* UV2 unique struct */
+ struct uv2h_rh_gam_addr_map_config_s {
+ unsigned long m_skt:6; /* RW */
+ unsigned long n_skt:4; /* RW */
+ unsigned long rsvd_10_63:54;
+ } s2;
};
/* ========================================================================= */
-/* UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR */
+/* UVH_RH_GAM_ALIAS_0_OVERLAY_CONFIG */
/* ========================================================================= */
-#define UV2H_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR 0x16000d8UL
-#define UV3H_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR 0x16000d8UL
-#define UV4H_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR 0x4800d8UL
-#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR ( \
- is_uv2_hub() ? UV2H_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR : \
- is_uv3_hub() ? UV3H_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR : \
- /*is_uv4_hub*/ UV4H_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR)
-
-#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_BASE_SHFT 24
-#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_M_ALIAS_SHFT 48
-#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_ENABLE_SHFT 63
-#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_BASE_MASK 0x00000000ff000000UL
-#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_M_ALIAS_MASK 0x001f000000000000UL
-#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_ENABLE_MASK 0x8000000000000000UL
-
-#define UVXH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_BASE_SHFT 24
-#define UVXH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_M_ALIAS_SHFT 48
-#define UVXH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_ENABLE_SHFT 63
-#define UVXH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_BASE_MASK 0x00000000ff000000UL
-#define UVXH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_M_ALIAS_MASK 0x001f000000000000UL
-#define UVXH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_ENABLE_MASK 0x8000000000000000UL
-
-#define UV2H_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_BASE_SHFT 24
-#define UV2H_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_M_ALIAS_SHFT 48
-#define UV2H_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_ENABLE_SHFT 63
-#define UV2H_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_BASE_MASK 0x00000000ff000000UL
-#define UV2H_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_M_ALIAS_MASK 0x001f000000000000UL
-#define UV2H_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_ENABLE_MASK 0x8000000000000000UL
-
-#define UV3H_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_BASE_SHFT 24
-#define UV3H_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_M_ALIAS_SHFT 48
-#define UV3H_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_ENABLE_SHFT 63
-#define UV3H_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_BASE_MASK 0x00000000ff000000UL
-#define UV3H_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_M_ALIAS_MASK 0x001f000000000000UL
-#define UV3H_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_ENABLE_MASK 0x8000000000000000UL
-
-#define UV4H_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_BASE_SHFT 24
-#define UV4H_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_M_ALIAS_SHFT 48
-#define UV4H_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_ENABLE_SHFT 63
-#define UV4H_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_BASE_MASK 0x00000000ff000000UL
-#define UV4H_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_M_ALIAS_MASK 0x001f000000000000UL
-#define UV4H_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_ENABLE_MASK 0x8000000000000000UL
-
-
-union uvh_rh_gam_alias210_overlay_config_1_mmr_u {
+#define UVH_RH_GAM_ALIAS_0_OVERLAY_CONFIG ( \
+ is_uv(UV4) ? 0x4800c8UL : \
+ is_uv(UV3) ? 0x16000c8UL : \
+ is_uv(UV2) ? 0x16000c8UL : \
+ 0)
+
+
+/* UVXH common defines */
+#define UVXH_RH_GAM_ALIAS_0_OVERLAY_CONFIG_BASE_SHFT 24
+#define UVXH_RH_GAM_ALIAS_0_OVERLAY_CONFIG_BASE_MASK 0x00000000ff000000UL
+#define UVXH_RH_GAM_ALIAS_0_OVERLAY_CONFIG_M_ALIAS_SHFT 48
+#define UVXH_RH_GAM_ALIAS_0_OVERLAY_CONFIG_M_ALIAS_MASK 0x001f000000000000UL
+#define UVXH_RH_GAM_ALIAS_0_OVERLAY_CONFIG_ENABLE_SHFT 63
+#define UVXH_RH_GAM_ALIAS_0_OVERLAY_CONFIG_ENABLE_MASK 0x8000000000000000UL
+
+
+union uvh_rh_gam_alias_0_overlay_config_u {
unsigned long v;
- struct uvh_rh_gam_alias210_overlay_config_1_mmr_s {
+
+ /* UVH common struct */
+ struct uvh_rh_gam_alias_0_overlay_config_s {
unsigned long rsvd_0_23:24;
unsigned long base:8; /* RW */
unsigned long rsvd_32_47:16;
@@ -2655,7 +3465,9 @@ union uvh_rh_gam_alias210_overlay_config_1_mmr_u {
unsigned long rsvd_53_62:10;
unsigned long enable:1; /* RW */
} s;
- struct uvxh_rh_gam_alias210_overlay_config_1_mmr_s {
+
+ /* UVXH common struct */
+ struct uvxh_rh_gam_alias_0_overlay_config_s {
unsigned long rsvd_0_23:24;
unsigned long base:8; /* RW */
unsigned long rsvd_32_47:16;
@@ -2663,15 +3475,19 @@ union uvh_rh_gam_alias210_overlay_config_1_mmr_u {
unsigned long rsvd_53_62:10;
unsigned long enable:1; /* RW */
} sx;
- struct uv2h_rh_gam_alias210_overlay_config_1_mmr_s {
+
+ /* UV4 unique struct */
+ struct uv4h_rh_gam_alias_0_overlay_config_s {
unsigned long rsvd_0_23:24;
unsigned long base:8; /* RW */
unsigned long rsvd_32_47:16;
unsigned long m_alias:5; /* RW */
unsigned long rsvd_53_62:10;
unsigned long enable:1; /* RW */
- } s2;
- struct uv3h_rh_gam_alias210_overlay_config_1_mmr_s {
+ } s4;
+
+ /* UV3 unique struct */
+ struct uv3h_rh_gam_alias_0_overlay_config_s {
unsigned long rsvd_0_23:24;
unsigned long base:8; /* RW */
unsigned long rsvd_32_47:16;
@@ -2679,66 +3495,96 @@ union uvh_rh_gam_alias210_overlay_config_1_mmr_u {
unsigned long rsvd_53_62:10;
unsigned long enable:1; /* RW */
} s3;
- struct uv4h_rh_gam_alias210_overlay_config_1_mmr_s {
+
+ /* UV2 unique struct */
+ struct uv2h_rh_gam_alias_0_overlay_config_s {
unsigned long rsvd_0_23:24;
unsigned long base:8; /* RW */
unsigned long rsvd_32_47:16;
unsigned long m_alias:5; /* RW */
unsigned long rsvd_53_62:10;
unsigned long enable:1; /* RW */
+ } s2;
+};
+
+/* ========================================================================= */
+/* UVH_RH_GAM_ALIAS_0_REDIRECT_CONFIG */
+/* ========================================================================= */
+#define UVH_RH_GAM_ALIAS_0_REDIRECT_CONFIG ( \
+ is_uv(UV4) ? 0x4800d0UL : \
+ is_uv(UV3) ? 0x16000d0UL : \
+ is_uv(UV2) ? 0x16000d0UL : \
+ 0)
+
+
+/* UVXH common defines */
+#define UVXH_RH_GAM_ALIAS_0_REDIRECT_CONFIG_DEST_BASE_SHFT 24
+#define UVXH_RH_GAM_ALIAS_0_REDIRECT_CONFIG_DEST_BASE_MASK 0x00003fffff000000UL
+
+
+union uvh_rh_gam_alias_0_redirect_config_u {
+ unsigned long v;
+
+ /* UVH common struct */
+ struct uvh_rh_gam_alias_0_redirect_config_s {
+ unsigned long rsvd_0_23:24;
+ unsigned long dest_base:22; /* RW */
+ unsigned long rsvd_46_63:18;
+ } s;
+
+ /* UVXH common struct */
+ struct uvxh_rh_gam_alias_0_redirect_config_s {
+ unsigned long rsvd_0_23:24;
+ unsigned long dest_base:22; /* RW */
+ unsigned long rsvd_46_63:18;
+ } sx;
+
+ /* UV4 unique struct */
+ struct uv4h_rh_gam_alias_0_redirect_config_s {
+ unsigned long rsvd_0_23:24;
+ unsigned long dest_base:22; /* RW */
+ unsigned long rsvd_46_63:18;
} s4;
+
+ /* UV3 unique struct */
+ struct uv3h_rh_gam_alias_0_redirect_config_s {
+ unsigned long rsvd_0_23:24;
+ unsigned long dest_base:22; /* RW */
+ unsigned long rsvd_46_63:18;
+ } s3;
+
+ /* UV2 unique struct */
+ struct uv2h_rh_gam_alias_0_redirect_config_s {
+ unsigned long rsvd_0_23:24;
+ unsigned long dest_base:22; /* RW */
+ unsigned long rsvd_46_63:18;
+ } s2;
};
/* ========================================================================= */
-/* UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR */
+/* UVH_RH_GAM_ALIAS_1_OVERLAY_CONFIG */
/* ========================================================================= */
-#define UV2H_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR 0x16000e8UL
-#define UV3H_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR 0x16000e8UL
-#define UV4H_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR 0x4800e8UL
-#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR ( \
- is_uv2_hub() ? UV2H_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR : \
- is_uv3_hub() ? UV3H_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR : \
- /*is_uv4_hub*/ UV4H_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR)
-
-#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_BASE_SHFT 24
-#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_M_ALIAS_SHFT 48
-#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_ENABLE_SHFT 63
-#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_BASE_MASK 0x00000000ff000000UL
-#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_M_ALIAS_MASK 0x001f000000000000UL
-#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_ENABLE_MASK 0x8000000000000000UL
-
-#define UVXH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_BASE_SHFT 24
-#define UVXH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_M_ALIAS_SHFT 48
-#define UVXH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_ENABLE_SHFT 63
-#define UVXH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_BASE_MASK 0x00000000ff000000UL
-#define UVXH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_M_ALIAS_MASK 0x001f000000000000UL
-#define UVXH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_ENABLE_MASK 0x8000000000000000UL
-
-#define UV2H_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_BASE_SHFT 24
-#define UV2H_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_M_ALIAS_SHFT 48
-#define UV2H_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_ENABLE_SHFT 63
-#define UV2H_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_BASE_MASK 0x00000000ff000000UL
-#define UV2H_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_M_ALIAS_MASK 0x001f000000000000UL
-#define UV2H_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_ENABLE_MASK 0x8000000000000000UL
-
-#define UV3H_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_BASE_SHFT 24
-#define UV3H_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_M_ALIAS_SHFT 48
-#define UV3H_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_ENABLE_SHFT 63
-#define UV3H_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_BASE_MASK 0x00000000ff000000UL
-#define UV3H_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_M_ALIAS_MASK 0x001f000000000000UL
-#define UV3H_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_ENABLE_MASK 0x8000000000000000UL
-
-#define UV4H_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_BASE_SHFT 24
-#define UV4H_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_M_ALIAS_SHFT 48
-#define UV4H_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_ENABLE_SHFT 63
-#define UV4H_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_BASE_MASK 0x00000000ff000000UL
-#define UV4H_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_M_ALIAS_MASK 0x001f000000000000UL
-#define UV4H_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_ENABLE_MASK 0x8000000000000000UL
-
-
-union uvh_rh_gam_alias210_overlay_config_2_mmr_u {
+#define UVH_RH_GAM_ALIAS_1_OVERLAY_CONFIG ( \
+ is_uv(UV4) ? 0x4800d8UL : \
+ is_uv(UV3) ? 0x16000d8UL : \
+ is_uv(UV2) ? 0x16000d8UL : \
+ 0)
+
+
+/* UVXH common defines */
+#define UVXH_RH_GAM_ALIAS_1_OVERLAY_CONFIG_BASE_SHFT 24
+#define UVXH_RH_GAM_ALIAS_1_OVERLAY_CONFIG_BASE_MASK 0x00000000ff000000UL
+#define UVXH_RH_GAM_ALIAS_1_OVERLAY_CONFIG_M_ALIAS_SHFT 48
+#define UVXH_RH_GAM_ALIAS_1_OVERLAY_CONFIG_M_ALIAS_MASK 0x001f000000000000UL
+#define UVXH_RH_GAM_ALIAS_1_OVERLAY_CONFIG_ENABLE_SHFT 63
+#define UVXH_RH_GAM_ALIAS_1_OVERLAY_CONFIG_ENABLE_MASK 0x8000000000000000UL
+
+
+union uvh_rh_gam_alias_1_overlay_config_u {
unsigned long v;
- struct uvh_rh_gam_alias210_overlay_config_2_mmr_s {
+
+ /* UVH common struct */
+ struct uvh_rh_gam_alias_1_overlay_config_s {
unsigned long rsvd_0_23:24;
unsigned long base:8; /* RW */
unsigned long rsvd_32_47:16;
@@ -2746,7 +3592,9 @@ union uvh_rh_gam_alias210_overlay_config_2_mmr_u {
unsigned long rsvd_53_62:10;
unsigned long enable:1; /* RW */
} s;
- struct uvxh_rh_gam_alias210_overlay_config_2_mmr_s {
+
+ /* UVXH common struct */
+ struct uvxh_rh_gam_alias_1_overlay_config_s {
unsigned long rsvd_0_23:24;
unsigned long base:8; /* RW */
unsigned long rsvd_32_47:16;
@@ -2754,15 +3602,19 @@ union uvh_rh_gam_alias210_overlay_config_2_mmr_u {
unsigned long rsvd_53_62:10;
unsigned long enable:1; /* RW */
} sx;
- struct uv2h_rh_gam_alias210_overlay_config_2_mmr_s {
+
+ /* UV4 unique struct */
+ struct uv4h_rh_gam_alias_1_overlay_config_s {
unsigned long rsvd_0_23:24;
unsigned long base:8; /* RW */
unsigned long rsvd_32_47:16;
unsigned long m_alias:5; /* RW */
unsigned long rsvd_53_62:10;
unsigned long enable:1; /* RW */
- } s2;
- struct uv3h_rh_gam_alias210_overlay_config_2_mmr_s {
+ } s4;
+
+ /* UV3 unique struct */
+ struct uv3h_rh_gam_alias_1_overlay_config_s {
unsigned long rsvd_0_23:24;
unsigned long base:8; /* RW */
unsigned long rsvd_32_47:16;
@@ -2770,321 +3622,289 @@ union uvh_rh_gam_alias210_overlay_config_2_mmr_u {
unsigned long rsvd_53_62:10;
unsigned long enable:1; /* RW */
} s3;
- struct uv4h_rh_gam_alias210_overlay_config_2_mmr_s {
+
+ /* UV2 unique struct */
+ struct uv2h_rh_gam_alias_1_overlay_config_s {
unsigned long rsvd_0_23:24;
unsigned long base:8; /* RW */
unsigned long rsvd_32_47:16;
unsigned long m_alias:5; /* RW */
unsigned long rsvd_53_62:10;
unsigned long enable:1; /* RW */
- } s4;
+ } s2;
};
/* ========================================================================= */
-/* UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR */
+/* UVH_RH_GAM_ALIAS_1_REDIRECT_CONFIG */
/* ========================================================================= */
-#define UV2H_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR 0x16000d0UL
-#define UV3H_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR 0x16000d0UL
-#define UV4H_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR 0x4800d0UL
-#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR ( \
- is_uv2_hub() ? UV2H_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR : \
- is_uv3_hub() ? UV3H_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR : \
- /*is_uv4_hub*/ UV4H_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR)
-
-#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_SHFT 24
-#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_MASK 0x00003fffff000000UL
-
-#define UVXH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_SHFT 24
-#define UVXH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_MASK 0x00003fffff000000UL
-
-#define UV2H_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_SHFT 24
-#define UV2H_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_MASK 0x00003fffff000000UL
+#define UVH_RH_GAM_ALIAS_1_REDIRECT_CONFIG ( \
+ is_uv(UV4) ? 0x4800e0UL : \
+ is_uv(UV3) ? 0x16000e0UL : \
+ is_uv(UV2) ? 0x16000e0UL : \
+ 0)
-#define UV3H_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_SHFT 24
-#define UV3H_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_MASK 0x00003fffff000000UL
-#define UV4H_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_SHFT 24
-#define UV4H_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_MASK 0x00003fffff000000UL
+/* UVXH common defines */
+#define UVXH_RH_GAM_ALIAS_1_REDIRECT_CONFIG_DEST_BASE_SHFT 24
+#define UVXH_RH_GAM_ALIAS_1_REDIRECT_CONFIG_DEST_BASE_MASK 0x00003fffff000000UL
-union uvh_rh_gam_alias210_redirect_config_0_mmr_u {
+union uvh_rh_gam_alias_1_redirect_config_u {
unsigned long v;
- struct uvh_rh_gam_alias210_redirect_config_0_mmr_s {
+
+ /* UVH common struct */
+ struct uvh_rh_gam_alias_1_redirect_config_s {
unsigned long rsvd_0_23:24;
unsigned long dest_base:22; /* RW */
unsigned long rsvd_46_63:18;
} s;
- struct uvxh_rh_gam_alias210_redirect_config_0_mmr_s {
+
+ /* UVXH common struct */
+ struct uvxh_rh_gam_alias_1_redirect_config_s {
unsigned long rsvd_0_23:24;
unsigned long dest_base:22; /* RW */
unsigned long rsvd_46_63:18;
} sx;
- struct uv2h_rh_gam_alias210_redirect_config_0_mmr_s {
+
+ /* UV4 unique struct */
+ struct uv4h_rh_gam_alias_1_redirect_config_s {
unsigned long rsvd_0_23:24;
unsigned long dest_base:22; /* RW */
unsigned long rsvd_46_63:18;
- } s2;
- struct uv3h_rh_gam_alias210_redirect_config_0_mmr_s {
+ } s4;
+
+ /* UV3 unique struct */
+ struct uv3h_rh_gam_alias_1_redirect_config_s {
unsigned long rsvd_0_23:24;
unsigned long dest_base:22; /* RW */
unsigned long rsvd_46_63:18;
} s3;
- struct uv4h_rh_gam_alias210_redirect_config_0_mmr_s {
+
+ /* UV2 unique struct */
+ struct uv2h_rh_gam_alias_1_redirect_config_s {
unsigned long rsvd_0_23:24;
unsigned long dest_base:22; /* RW */
unsigned long rsvd_46_63:18;
- } s4;
+ } s2;
};
/* ========================================================================= */
-/* UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR */
+/* UVH_RH_GAM_ALIAS_2_OVERLAY_CONFIG */
/* ========================================================================= */
-#define UV2H_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR 0x16000e0UL
-#define UV3H_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR 0x16000e0UL
-#define UV4H_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR 0x4800e0UL
-#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR ( \
- is_uv2_hub() ? UV2H_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR : \
- is_uv3_hub() ? UV3H_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR : \
- /*is_uv4_hub*/ UV4H_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR)
-
-#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR_DEST_BASE_SHFT 24
-#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR_DEST_BASE_MASK 0x00003fffff000000UL
-
-#define UVXH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR_DEST_BASE_SHFT 24
-#define UVXH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR_DEST_BASE_MASK 0x00003fffff000000UL
+#define UVH_RH_GAM_ALIAS_2_OVERLAY_CONFIG ( \
+ is_uv(UV4) ? 0x4800e8UL : \
+ is_uv(UV3) ? 0x16000e8UL : \
+ is_uv(UV2) ? 0x16000e8UL : \
+ 0)
-#define UV2H_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR_DEST_BASE_SHFT 24
-#define UV2H_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR_DEST_BASE_MASK 0x00003fffff000000UL
-#define UV3H_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR_DEST_BASE_SHFT 24
-#define UV3H_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR_DEST_BASE_MASK 0x00003fffff000000UL
+/* UVXH common defines */
+#define UVXH_RH_GAM_ALIAS_2_OVERLAY_CONFIG_BASE_SHFT 24
+#define UVXH_RH_GAM_ALIAS_2_OVERLAY_CONFIG_BASE_MASK 0x00000000ff000000UL
+#define UVXH_RH_GAM_ALIAS_2_OVERLAY_CONFIG_M_ALIAS_SHFT 48
+#define UVXH_RH_GAM_ALIAS_2_OVERLAY_CONFIG_M_ALIAS_MASK 0x001f000000000000UL
+#define UVXH_RH_GAM_ALIAS_2_OVERLAY_CONFIG_ENABLE_SHFT 63
+#define UVXH_RH_GAM_ALIAS_2_OVERLAY_CONFIG_ENABLE_MASK 0x8000000000000000UL
-#define UV4H_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR_DEST_BASE_SHFT 24
-#define UV4H_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR_DEST_BASE_MASK 0x00003fffff000000UL
-
-union uvh_rh_gam_alias210_redirect_config_1_mmr_u {
+union uvh_rh_gam_alias_2_overlay_config_u {
unsigned long v;
- struct uvh_rh_gam_alias210_redirect_config_1_mmr_s {
+
+ /* UVH common struct */
+ struct uvh_rh_gam_alias_2_overlay_config_s {
unsigned long rsvd_0_23:24;
- unsigned long dest_base:22; /* RW */
- unsigned long rsvd_46_63:18;
+ unsigned long base:8; /* RW */
+ unsigned long rsvd_32_47:16;
+ unsigned long m_alias:5; /* RW */
+ unsigned long rsvd_53_62:10;
+ unsigned long enable:1; /* RW */
} s;
- struct uvxh_rh_gam_alias210_redirect_config_1_mmr_s {
+
+ /* UVXH common struct */
+ struct uvxh_rh_gam_alias_2_overlay_config_s {
unsigned long rsvd_0_23:24;
- unsigned long dest_base:22; /* RW */
- unsigned long rsvd_46_63:18;
+ unsigned long base:8; /* RW */
+ unsigned long rsvd_32_47:16;
+ unsigned long m_alias:5; /* RW */
+ unsigned long rsvd_53_62:10;
+ unsigned long enable:1; /* RW */
} sx;
- struct uv2h_rh_gam_alias210_redirect_config_1_mmr_s {
+
+ /* UV4 unique struct */
+ struct uv4h_rh_gam_alias_2_overlay_config_s {
unsigned long rsvd_0_23:24;
- unsigned long dest_base:22; /* RW */
- unsigned long rsvd_46_63:18;
- } s2;
- struct uv3h_rh_gam_alias210_redirect_config_1_mmr_s {
+ unsigned long base:8; /* RW */
+ unsigned long rsvd_32_47:16;
+ unsigned long m_alias:5; /* RW */
+ unsigned long rsvd_53_62:10;
+ unsigned long enable:1; /* RW */
+ } s4;
+
+ /* UV3 unique struct */
+ struct uv3h_rh_gam_alias_2_overlay_config_s {
unsigned long rsvd_0_23:24;
- unsigned long dest_base:22; /* RW */
- unsigned long rsvd_46_63:18;
+ unsigned long base:8; /* RW */
+ unsigned long rsvd_32_47:16;
+ unsigned long m_alias:5; /* RW */
+ unsigned long rsvd_53_62:10;
+ unsigned long enable:1; /* RW */
} s3;
- struct uv4h_rh_gam_alias210_redirect_config_1_mmr_s {
+
+ /* UV2 unique struct */
+ struct uv2h_rh_gam_alias_2_overlay_config_s {
unsigned long rsvd_0_23:24;
- unsigned long dest_base:22; /* RW */
- unsigned long rsvd_46_63:18;
- } s4;
+ unsigned long base:8; /* RW */
+ unsigned long rsvd_32_47:16;
+ unsigned long m_alias:5; /* RW */
+ unsigned long rsvd_53_62:10;
+ unsigned long enable:1; /* RW */
+ } s2;
};
/* ========================================================================= */
-/* UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR */
+/* UVH_RH_GAM_ALIAS_2_REDIRECT_CONFIG */
/* ========================================================================= */
-#define UV2H_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR 0x16000f0UL
-#define UV3H_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR 0x16000f0UL
-#define UV4H_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR 0x4800f0UL
-#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR ( \
- is_uv2_hub() ? UV2H_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR : \
- is_uv3_hub() ? UV3H_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR : \
- /*is_uv4_hub*/ UV4H_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR)
-
-#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR_DEST_BASE_SHFT 24
-#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR_DEST_BASE_MASK 0x00003fffff000000UL
-
-#define UVXH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR_DEST_BASE_SHFT 24
-#define UVXH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR_DEST_BASE_MASK 0x00003fffff000000UL
-
-#define UV2H_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR_DEST_BASE_SHFT 24
-#define UV2H_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR_DEST_BASE_MASK 0x00003fffff000000UL
+#define UVH_RH_GAM_ALIAS_2_REDIRECT_CONFIG ( \
+ is_uv(UV4) ? 0x4800f0UL : \
+ is_uv(UV3) ? 0x16000f0UL : \
+ is_uv(UV2) ? 0x16000f0UL : \
+ 0)
-#define UV3H_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR_DEST_BASE_SHFT 24
-#define UV3H_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR_DEST_BASE_MASK 0x00003fffff000000UL
-#define UV4H_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR_DEST_BASE_SHFT 24
-#define UV4H_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR_DEST_BASE_MASK 0x00003fffff000000UL
+/* UVXH common defines */
+#define UVXH_RH_GAM_ALIAS_2_REDIRECT_CONFIG_DEST_BASE_SHFT 24
+#define UVXH_RH_GAM_ALIAS_2_REDIRECT_CONFIG_DEST_BASE_MASK 0x00003fffff000000UL
-union uvh_rh_gam_alias210_redirect_config_2_mmr_u {
+union uvh_rh_gam_alias_2_redirect_config_u {
unsigned long v;
- struct uvh_rh_gam_alias210_redirect_config_2_mmr_s {
+
+ /* UVH common struct */
+ struct uvh_rh_gam_alias_2_redirect_config_s {
unsigned long rsvd_0_23:24;
unsigned long dest_base:22; /* RW */
unsigned long rsvd_46_63:18;
} s;
- struct uvxh_rh_gam_alias210_redirect_config_2_mmr_s {
+
+ /* UVXH common struct */
+ struct uvxh_rh_gam_alias_2_redirect_config_s {
unsigned long rsvd_0_23:24;
unsigned long dest_base:22; /* RW */
unsigned long rsvd_46_63:18;
} sx;
- struct uv2h_rh_gam_alias210_redirect_config_2_mmr_s {
+
+ /* UV4 unique struct */
+ struct uv4h_rh_gam_alias_2_redirect_config_s {
unsigned long rsvd_0_23:24;
unsigned long dest_base:22; /* RW */
unsigned long rsvd_46_63:18;
- } s2;
- struct uv3h_rh_gam_alias210_redirect_config_2_mmr_s {
+ } s4;
+
+ /* UV3 unique struct */
+ struct uv3h_rh_gam_alias_2_redirect_config_s {
unsigned long rsvd_0_23:24;
unsigned long dest_base:22; /* RW */
unsigned long rsvd_46_63:18;
} s3;
- struct uv4h_rh_gam_alias210_redirect_config_2_mmr_s {
+
+ /* UV2 unique struct */
+ struct uv2h_rh_gam_alias_2_redirect_config_s {
unsigned long rsvd_0_23:24;
unsigned long dest_base:22; /* RW */
unsigned long rsvd_46_63:18;
- } s4;
-};
-
-/* ========================================================================= */
-/* UVH_RH_GAM_CONFIG_MMR */
-/* ========================================================================= */
-#define UV2H_RH_GAM_CONFIG_MMR 0x1600000UL
-#define UV3H_RH_GAM_CONFIG_MMR 0x1600000UL
-#define UV4H_RH_GAM_CONFIG_MMR 0x480000UL
-#define UVH_RH_GAM_CONFIG_MMR ( \
- is_uv2_hub() ? UV2H_RH_GAM_CONFIG_MMR : \
- is_uv3_hub() ? UV3H_RH_GAM_CONFIG_MMR : \
- /*is_uv4_hub*/ UV4H_RH_GAM_CONFIG_MMR)
-
-#define UVH_RH_GAM_CONFIG_MMR_N_SKT_SHFT 6
-#define UVH_RH_GAM_CONFIG_MMR_N_SKT_MASK 0x00000000000003c0UL
-
-#define UVXH_RH_GAM_CONFIG_MMR_N_SKT_SHFT 6
-#define UVXH_RH_GAM_CONFIG_MMR_N_SKT_MASK 0x00000000000003c0UL
-
-#define UV2H_RH_GAM_CONFIG_MMR_M_SKT_SHFT 0
-#define UV2H_RH_GAM_CONFIG_MMR_N_SKT_SHFT 6
-#define UV2H_RH_GAM_CONFIG_MMR_M_SKT_MASK 0x000000000000003fUL
-#define UV2H_RH_GAM_CONFIG_MMR_N_SKT_MASK 0x00000000000003c0UL
-
-#define UV3H_RH_GAM_CONFIG_MMR_M_SKT_SHFT 0
-#define UV3H_RH_GAM_CONFIG_MMR_N_SKT_SHFT 6
-#define UV3H_RH_GAM_CONFIG_MMR_M_SKT_MASK 0x000000000000003fUL
-#define UV3H_RH_GAM_CONFIG_MMR_N_SKT_MASK 0x00000000000003c0UL
-
-#define UV4H_RH_GAM_CONFIG_MMR_N_SKT_SHFT 6
-#define UV4H_RH_GAM_CONFIG_MMR_N_SKT_MASK 0x00000000000003c0UL
-
-
-union uvh_rh_gam_config_mmr_u {
- unsigned long v;
- struct uvh_rh_gam_config_mmr_s {
- unsigned long rsvd_0_5:6;
- unsigned long n_skt:4; /* RW */
- unsigned long rsvd_10_63:54;
- } s;
- struct uvxh_rh_gam_config_mmr_s {
- unsigned long rsvd_0_5:6;
- unsigned long n_skt:4; /* RW */
- unsigned long rsvd_10_63:54;
- } sx;
- struct uv2h_rh_gam_config_mmr_s {
- unsigned long m_skt:6; /* RW */
- unsigned long n_skt:4; /* RW */
- unsigned long rsvd_10_63:54;
} s2;
- struct uv3h_rh_gam_config_mmr_s {
- unsigned long m_skt:6; /* RW */
- unsigned long n_skt:4; /* RW */
- unsigned long rsvd_10_63:54;
- } s3;
- struct uv4h_rh_gam_config_mmr_s {
- unsigned long rsvd_0_5:6;
- unsigned long n_skt:4; /* RW */
- unsigned long rsvd_10_63:54;
- } s4;
};
/* ========================================================================= */
-/* UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR */
+/* UVH_RH_GAM_GRU_OVERLAY_CONFIG */
/* ========================================================================= */
-#define UV2H_RH_GAM_GRU_OVERLAY_CONFIG_MMR 0x1600010UL
-#define UV3H_RH_GAM_GRU_OVERLAY_CONFIG_MMR 0x1600010UL
-#define UV4H_RH_GAM_GRU_OVERLAY_CONFIG_MMR 0x480010UL
-#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR ( \
- is_uv2_hub() ? UV2H_RH_GAM_GRU_OVERLAY_CONFIG_MMR : \
- is_uv3_hub() ? UV3H_RH_GAM_GRU_OVERLAY_CONFIG_MMR : \
- /*is_uv4_hub*/ UV4H_RH_GAM_GRU_OVERLAY_CONFIG_MMR)
-
-#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_SHFT 52
-#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63
-#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_MASK 0x00f0000000000000UL
-#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
-
-#define UVXH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_SHFT 52
-#define UVXH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63
-#define UVXH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_MASK 0x00f0000000000000UL
-#define UVXH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
-
-#define UV2H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT 28
-#define UV2H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_SHFT 52
-#define UV2H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63
-#define UV2H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffff0000000UL
-#define UV2H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_MASK 0x00f0000000000000UL
-#define UV2H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
-
-#define UV3H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT 28
-#define UV3H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_SHFT 52
-#define UV3H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_MODE_SHFT 62
-#define UV3H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63
-#define UV3H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffff0000000UL
-#define UV3H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_MASK 0x00f0000000000000UL
-#define UV3H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_MODE_MASK 0x4000000000000000UL
-#define UV3H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
-
-#define UV4H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT 26
-#define UV4H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_SHFT 52
-#define UV4H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63
-#define UV4H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffffc000000UL
-#define UV4H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_MASK 0x00f0000000000000UL
-#define UV4H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
-
-#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_MASK ( \
- is_uv2_hub() ? UV2H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_MASK : \
- is_uv3_hub() ? UV3H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_MASK : \
- /*is_uv4_hub*/ UV4H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_MASK)
-#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT ( \
- is_uv2_hub() ? UV2H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT : \
- is_uv3_hub() ? UV3H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT : \
- /*is_uv4_hub*/ UV4H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT)
-
-union uvh_rh_gam_gru_overlay_config_mmr_u {
+#define UVH_RH_GAM_GRU_OVERLAY_CONFIG ( \
+ is_uv(UV4) ? 0x480010UL : \
+ is_uv(UV3) ? 0x1600010UL : \
+ is_uv(UV2) ? 0x1600010UL : \
+ 0)
+
+
+/* UVXH common defines */
+#define UVXH_RH_GAM_GRU_OVERLAY_CONFIG_N_GRU_SHFT 52
+#define UVXH_RH_GAM_GRU_OVERLAY_CONFIG_N_GRU_MASK 0x00f0000000000000UL
+#define UVXH_RH_GAM_GRU_OVERLAY_CONFIG_ENABLE_SHFT 63
+#define UVXH_RH_GAM_GRU_OVERLAY_CONFIG_ENABLE_MASK 0x8000000000000000UL
+
+/* UV4A unique defines */
+#define UV4AH_RH_GAM_GRU_OVERLAY_CONFIG_BASE_SHFT 26
+#define UV4AH_RH_GAM_GRU_OVERLAY_CONFIG_BASE_MASK 0x000ffffffc000000UL
+
+/* UV4 unique defines */
+#define UV4H_RH_GAM_GRU_OVERLAY_CONFIG_BASE_SHFT 26
+#define UV4H_RH_GAM_GRU_OVERLAY_CONFIG_BASE_MASK 0x00003ffffc000000UL
+
+/* UV3 unique defines */
+#define UV3H_RH_GAM_GRU_OVERLAY_CONFIG_BASE_SHFT 28
+#define UV3H_RH_GAM_GRU_OVERLAY_CONFIG_BASE_MASK 0x00003ffff0000000UL
+#define UV3H_RH_GAM_GRU_OVERLAY_CONFIG_MODE_SHFT 62
+#define UV3H_RH_GAM_GRU_OVERLAY_CONFIG_MODE_MASK 0x4000000000000000UL
+
+/* UV2 unique defines */
+#define UV2H_RH_GAM_GRU_OVERLAY_CONFIG_BASE_SHFT 28
+#define UV2H_RH_GAM_GRU_OVERLAY_CONFIG_BASE_MASK 0x00003ffff0000000UL
+
+#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_BASE_MASK ( \
+ is_uv(UV4A) ? 0x000ffffffc000000UL : \
+ is_uv(UV4) ? 0x00003ffffc000000UL : \
+ is_uv(UV3) ? 0x00003ffff0000000UL : \
+ is_uv(UV2) ? 0x00003ffff0000000UL : \
+ 0)
+#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_BASE_SHFT ( \
+ is_uv(UV4) ? 26 : \
+ is_uv(UV3) ? 28 : \
+ is_uv(UV2) ? 28 : \
+ -1)
+
+union uvh_rh_gam_gru_overlay_config_u {
unsigned long v;
- struct uvh_rh_gam_gru_overlay_config_mmr_s {
- unsigned long rsvd_0_51:52;
+
+ /* UVH common struct */
+ struct uvh_rh_gam_gru_overlay_config_s {
+ unsigned long rsvd_0_45:46;
+ unsigned long rsvd_46_51:6;
unsigned long n_gru:4; /* RW */
unsigned long rsvd_56_62:7;
unsigned long enable:1; /* RW */
} s;
- struct uvxh_rh_gam_gru_overlay_config_mmr_s {
+
+ /* UVXH common struct */
+ struct uvxh_rh_gam_gru_overlay_config_s {
unsigned long rsvd_0_45:46;
unsigned long rsvd_46_51:6;
unsigned long n_gru:4; /* RW */
unsigned long rsvd_56_62:7;
unsigned long enable:1; /* RW */
} sx;
- struct uv2h_rh_gam_gru_overlay_config_mmr_s {
- unsigned long rsvd_0_27:28;
- unsigned long base:18; /* RW */
+
+ /* UV4A unique struct */
+ struct uv4ah_rh_gam_gru_overlay_config_s {
+ unsigned long rsvd_0_24:25;
+ unsigned long undef_25:1; /* Undefined */
+ unsigned long base:26; /* RW */
+ unsigned long n_gru:4; /* RW */
+ unsigned long rsvd_56_62:7;
+ unsigned long enable:1; /* RW */
+ } s4a;
+
+ /* UV4 unique struct */
+ struct uv4h_rh_gam_gru_overlay_config_s {
+ unsigned long rsvd_0_24:25;
+ unsigned long undef_25:1; /* Undefined */
+ unsigned long base:20; /* RW */
unsigned long rsvd_46_51:6;
unsigned long n_gru:4; /* RW */
unsigned long rsvd_56_62:7;
unsigned long enable:1; /* RW */
- } s2;
- struct uv3h_rh_gam_gru_overlay_config_mmr_s {
+ } s4;
+
+ /* UV3 unique struct */
+ struct uv3h_rh_gam_gru_overlay_config_s {
unsigned long rsvd_0_27:28;
unsigned long base:18; /* RW */
unsigned long rsvd_46_51:6;
@@ -3093,86 +3913,141 @@ union uvh_rh_gam_gru_overlay_config_mmr_u {
unsigned long mode:1; /* RW */
unsigned long enable:1; /* RW */
} s3;
- struct uv4h_rh_gam_gru_overlay_config_mmr_s {
- unsigned long rsvd_0_24:25;
- unsigned long undef_25:1; /* Undefined */
- unsigned long base:20; /* RW */
+
+ /* UV2 unique struct */
+ struct uv2h_rh_gam_gru_overlay_config_s {
+ unsigned long rsvd_0_27:28;
+ unsigned long base:18; /* RW */
unsigned long rsvd_46_51:6;
unsigned long n_gru:4; /* RW */
unsigned long rsvd_56_62:7;
unsigned long enable:1; /* RW */
- } s4;
+ } s2;
};
/* ========================================================================= */
-/* UVH_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR */
+/* UVH_RH_GAM_MMIOH_OVERLAY_CONFIG */
/* ========================================================================= */
-#define UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR uv_undefined("UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR")
-#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR 0x1603000UL
-#define UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR 0x483000UL
-#define UVH_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR ( \
- is_uv2_hub() ? UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR : \
- is_uv3_hub() ? UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR : \
- /*is_uv4_hub*/ UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR)
-
-
-#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_BASE_SHFT 26
-#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_M_IO_SHFT 46
-#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_ENABLE_SHFT 63
-#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_BASE_MASK 0x00003ffffc000000UL
-#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_M_IO_MASK 0x000fc00000000000UL
-#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_ENABLE_MASK 0x8000000000000000UL
-
-#define UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_BASE_SHFT 26
-#define UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_M_IO_SHFT 46
-#define UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_ENABLE_SHFT 63
-#define UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_BASE_MASK 0x00003ffffc000000UL
-#define UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_M_IO_MASK 0x000fc00000000000UL
-#define UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_ENABLE_MASK 0x8000000000000000UL
-
-#define UV4AH_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_M_IO_SHFT 52
-#define UV4AH_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_BASE_MASK 0x000ffffffc000000UL
-#define UV4AH_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_M_IO_MASK 0x03f0000000000000UL
-#define UV4AH_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_ENABLE_MASK 0x8000000000000000UL
-
-#define UVH_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_M_IO_SHFT ( \
- is_uv3_hub() ? UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_M_IO_SHFT : \
- is_uv4a_hub() ? UV4AH_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_M_IO_SHFT : \
- /*is_uv4_hub*/ UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_M_IO_SHFT)
-
-#define UVH_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_BASE_MASK ( \
- is_uv3_hub() ? UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_BASE_MASK : \
- is_uv4a_hub() ? UV4AH_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_BASE_MASK : \
- /*is_uv4_hub*/ UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_BASE_MASK)
-
-#define UVH_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_M_IO_MASK ( \
- is_uv3_hub() ? UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_M_IO_MASK : \
- is_uv4a_hub() ? UV4AH_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_M_IO_MASK : \
- /*is_uv4_hub*/ UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_M_IO_MASK)
-
-#define UVH_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_ENABLE_MASK ( \
- is_uv3_hub() ? UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_ENABLE_MASK : \
- is_uv4a_hub() ? UV4AH_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_ENABLE_MASK : \
- /*is_uv4_hub*/ UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_ENABLE_MASK)
-
-union uvh_rh_gam_mmioh_overlay_config0_mmr_u {
+#define UVH_RH_GAM_MMIOH_OVERLAY_CONFIG ( \
+ is_uv(UV2) ? 0x1600030UL : \
+ 0)
+
+
+
+/* UV2 unique defines */
+#define UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_BASE_SHFT 27
+#define UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_BASE_MASK 0x00003ffff8000000UL
+#define UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_M_IO_SHFT 46
+#define UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_M_IO_MASK 0x000fc00000000000UL
+#define UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_N_IO_SHFT 52
+#define UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_N_IO_MASK 0x00f0000000000000UL
+#define UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_ENABLE_SHFT 63
+#define UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_ENABLE_MASK 0x8000000000000000UL
+
+#define UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_BASE_SHFT ( \
+ is_uv(UV2) ? 27 : \
+ uv_undefined("UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_BASE_SHFT"))
+
+union uvh_rh_gam_mmioh_overlay_config_u {
unsigned long v;
- struct uv3h_rh_gam_mmioh_overlay_config0_mmr_s {
+
+ /* UVH common struct */
+ struct uvh_rh_gam_mmioh_overlay_config_s {
+ unsigned long rsvd_0_26:27;
+ unsigned long base:19; /* RW */
+ unsigned long m_io:6; /* RW */
+ unsigned long n_io:4; /* RW */
+ unsigned long rsvd_56_62:7;
+ unsigned long enable:1; /* RW */
+ } s;
+
+ /* UVXH common struct */
+ struct uvxh_rh_gam_mmioh_overlay_config_s {
+ unsigned long rsvd_0_26:27;
+ unsigned long base:19; /* RW */
+ unsigned long m_io:6; /* RW */
+ unsigned long n_io:4; /* RW */
+ unsigned long rsvd_56_62:7;
+ unsigned long enable:1; /* RW */
+ } sx;
+
+ /* UV2 unique struct */
+ struct uv2h_rh_gam_mmioh_overlay_config_s {
+ unsigned long rsvd_0_26:27;
+ unsigned long base:19; /* RW */
+ unsigned long m_io:6; /* RW */
+ unsigned long n_io:4; /* RW */
+ unsigned long rsvd_56_62:7;
+ unsigned long enable:1; /* RW */
+ } s2;
+};
+
+/* ========================================================================= */
+/* UVH_RH_GAM_MMIOH_OVERLAY_CONFIG0 */
+/* ========================================================================= */
+#define UVH_RH_GAM_MMIOH_OVERLAY_CONFIG0 ( \
+ is_uv(UV4) ? 0x483000UL : \
+ is_uv(UV3) ? 0x1603000UL : \
+ 0)
+
+/* UV4A unique defines */
+#define UV4AH_RH_GAM_MMIOH_OVERLAY_CONFIG0_BASE_SHFT 26
+#define UV4AH_RH_GAM_MMIOH_OVERLAY_CONFIG0_BASE_MASK 0x000ffffffc000000UL
+#define UV4AH_RH_GAM_MMIOH_OVERLAY_CONFIG0_M_IO_SHFT 52
+#define UV4AH_RH_GAM_MMIOH_OVERLAY_CONFIG0_M_IO_MASK 0x03f0000000000000UL
+#define UV4AH_RH_GAM_MMIOH_OVERLAY_CONFIG0_ENABLE_SHFT 63
+#define UV4AH_RH_GAM_MMIOH_OVERLAY_CONFIG0_ENABLE_MASK 0x8000000000000000UL
+
+/* UV4 unique defines */
+#define UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG0_BASE_SHFT 26
+#define UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG0_BASE_MASK 0x00003ffffc000000UL
+#define UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG0_M_IO_SHFT 46
+#define UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG0_M_IO_MASK 0x000fc00000000000UL
+#define UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG0_ENABLE_SHFT 63
+#define UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG0_ENABLE_MASK 0x8000000000000000UL
+
+/* UV3 unique defines */
+#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_BASE_SHFT 26
+#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_BASE_MASK 0x00003ffffc000000UL
+#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_M_IO_SHFT 46
+#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_M_IO_MASK 0x000fc00000000000UL
+#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_ENABLE_SHFT 63
+#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_ENABLE_MASK 0x8000000000000000UL
+
+#define UVH_RH_GAM_MMIOH_OVERLAY_CONFIG0_BASE_MASK ( \
+ is_uv(UV4A) ? 0x000ffffffc000000UL : \
+ is_uv(UV4) ? 0x00003ffffc000000UL : \
+ is_uv(UV3) ? 0x00003ffffc000000UL : \
+ 0)
+#define UVH_RH_GAM_MMIOH_OVERLAY_CONFIG0_BASE_SHFT ( \
+ is_uv(UV4) ? 26 : \
+ is_uv(UV3) ? 26 : \
+ -1)
+
+union uvh_rh_gam_mmioh_overlay_config0_u {
+ unsigned long v;
+
+ /* UVH common struct */
+ struct uvh_rh_gam_mmioh_overlay_config0_s {
unsigned long rsvd_0_25:26;
unsigned long base:20; /* RW */
unsigned long m_io:6; /* RW */
unsigned long n_io:4;
unsigned long rsvd_56_62:7;
unsigned long enable:1; /* RW */
- } s3;
- struct uv4h_rh_gam_mmioh_overlay_config0_mmr_s {
+ } s;
+
+ /* UVXH common struct */
+ struct uvxh_rh_gam_mmioh_overlay_config0_s {
unsigned long rsvd_0_25:26;
unsigned long base:20; /* RW */
unsigned long m_io:6; /* RW */
unsigned long n_io:4;
unsigned long rsvd_56_62:7;
unsigned long enable:1; /* RW */
- } s4;
+ } sx;
+
+ /* UV4A unique struct */
struct uv4ah_rh_gam_mmioh_overlay_config0_mmr_s {
unsigned long rsvd_0_25:26;
unsigned long base:26; /* RW */
@@ -3181,71 +4056,94 @@ union uvh_rh_gam_mmioh_overlay_config0_mmr_u {
unsigned long undef_62:1; /* Undefined */
unsigned long enable:1; /* RW */
} s4a;
+
+ /* UV4 unique struct */
+ struct uv4h_rh_gam_mmioh_overlay_config0_s {
+ unsigned long rsvd_0_25:26;
+ unsigned long base:20; /* RW */
+ unsigned long m_io:6; /* RW */
+ unsigned long n_io:4;
+ unsigned long rsvd_56_62:7;
+ unsigned long enable:1; /* RW */
+ } s4;
+
+ /* UV3 unique struct */
+ struct uv3h_rh_gam_mmioh_overlay_config0_s {
+ unsigned long rsvd_0_25:26;
+ unsigned long base:20; /* RW */
+ unsigned long m_io:6; /* RW */
+ unsigned long n_io:4;
+ unsigned long rsvd_56_62:7;
+ unsigned long enable:1; /* RW */
+ } s3;
};
/* ========================================================================= */
-/* UVH_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR */
+/* UVH_RH_GAM_MMIOH_OVERLAY_CONFIG1 */
/* ========================================================================= */
-#define UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR uv_undefined("UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR")
-#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR 0x1603000UL
-#define UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR 0x484000UL
-#define UVH_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR ( \
- is_uv2_hub() ? UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR : \
- is_uv3_hub() ? UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR : \
- /*is_uv4_hub*/ UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR)
-
-
-#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_BASE_SHFT 26
-#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_M_IO_SHFT 46
-#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_ENABLE_SHFT 63
-#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_BASE_MASK 0x00003ffffc000000UL
-#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_M_IO_MASK 0x000fc00000000000UL
-#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_ENABLE_MASK 0x8000000000000000UL
-
-#define UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_BASE_SHFT 26
-#define UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_M_IO_SHFT 46
-#define UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_ENABLE_SHFT 63
-#define UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_BASE_MASK 0x00003ffffc000000UL
-#define UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_M_IO_MASK 0x000fc00000000000UL
-#define UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_ENABLE_MASK 0x8000000000000000UL
-
-#define UV4AH_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_M_IO_SHFT 52
-#define UV4AH_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_BASE_MASK 0x000ffffffc000000UL
-#define UV4AH_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_M_IO_MASK 0x03f0000000000000UL
-
-#define UVH_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_M_IO_SHFT ( \
- is_uv3_hub() ? UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_M_IO_SHFT : \
- is_uv4a_hub() ? UV4AH_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_M_IO_SHFT : \
- /*is_uv4_hub*/ UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_M_IO_SHFT)
-
-#define UVH_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_BASE_MASK ( \
- is_uv3_hub() ? UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_BASE_MASK : \
- is_uv4a_hub() ? UV4AH_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_BASE_MASK : \
- /*is_uv4_hub*/ UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_BASE_MASK)
-
-#define UVH_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_M_IO_MASK ( \
- is_uv3_hub() ? UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_M_IO_MASK : \
- is_uv4a_hub() ? UV4AH_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_M_IO_MASK : \
- /*is_uv4_hub*/ UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_M_IO_MASK)
-
-union uvh_rh_gam_mmioh_overlay_config1_mmr_u {
+#define UVH_RH_GAM_MMIOH_OVERLAY_CONFIG1 ( \
+ is_uv(UV4) ? 0x484000UL : \
+ is_uv(UV3) ? 0x1604000UL : \
+ 0)
+
+/* UV4A unique defines */
+#define UV4AH_RH_GAM_MMIOH_OVERLAY_CONFIG1_BASE_SHFT 26
+#define UV4AH_RH_GAM_MMIOH_OVERLAY_CONFIG1_BASE_MASK 0x000ffffffc000000UL
+#define UV4AH_RH_GAM_MMIOH_OVERLAY_CONFIG1_M_IO_SHFT 52
+#define UV4AH_RH_GAM_MMIOH_OVERLAY_CONFIG1_M_IO_MASK 0x03f0000000000000UL
+#define UV4AH_RH_GAM_MMIOH_OVERLAY_CONFIG1_ENABLE_SHFT 63
+#define UV4AH_RH_GAM_MMIOH_OVERLAY_CONFIG1_ENABLE_MASK 0x8000000000000000UL
+
+/* UV4 unique defines */
+#define UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG1_BASE_SHFT 26
+#define UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG1_BASE_MASK 0x00003ffffc000000UL
+#define UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG1_M_IO_SHFT 46
+#define UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG1_M_IO_MASK 0x000fc00000000000UL
+#define UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG1_ENABLE_SHFT 63
+#define UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG1_ENABLE_MASK 0x8000000000000000UL
+
+/* UV3 unique defines */
+#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_BASE_SHFT 26
+#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_BASE_MASK 0x00003ffffc000000UL
+#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_M_IO_SHFT 46
+#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_M_IO_MASK 0x000fc00000000000UL
+#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_ENABLE_SHFT 63
+#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_ENABLE_MASK 0x8000000000000000UL
+
+#define UVH_RH_GAM_MMIOH_OVERLAY_CONFIG1_BASE_MASK ( \
+ is_uv(UV4A) ? 0x000ffffffc000000UL : \
+ is_uv(UV4) ? 0x00003ffffc000000UL : \
+ is_uv(UV3) ? 0x00003ffffc000000UL : \
+ 0)
+#define UVH_RH_GAM_MMIOH_OVERLAY_CONFIG1_BASE_SHFT ( \
+ is_uv(UV4) ? 26 : \
+ is_uv(UV3) ? 26 : \
+ -1)
+
+union uvh_rh_gam_mmioh_overlay_config1_u {
unsigned long v;
- struct uv3h_rh_gam_mmioh_overlay_config1_mmr_s {
+
+ /* UVH common struct */
+ struct uvh_rh_gam_mmioh_overlay_config1_s {
unsigned long rsvd_0_25:26;
unsigned long base:20; /* RW */
unsigned long m_io:6; /* RW */
unsigned long n_io:4;
unsigned long rsvd_56_62:7;
unsigned long enable:1; /* RW */
- } s3;
- struct uv4h_rh_gam_mmioh_overlay_config1_mmr_s {
+ } s;
+
+ /* UVXH common struct */
+ struct uvxh_rh_gam_mmioh_overlay_config1_s {
unsigned long rsvd_0_25:26;
unsigned long base:20; /* RW */
unsigned long m_io:6; /* RW */
unsigned long n_io:4;
unsigned long rsvd_56_62:7;
unsigned long enable:1; /* RW */
- } s4;
+ } sx;
+
+ /* UV4A unique struct */
struct uv4ah_rh_gam_mmioh_overlay_config1_mmr_s {
unsigned long rsvd_0_25:26;
unsigned long base:26; /* RW */
@@ -3254,232 +4152,275 @@ union uvh_rh_gam_mmioh_overlay_config1_mmr_u {
unsigned long undef_62:1; /* Undefined */
unsigned long enable:1; /* RW */
} s4a;
-};
-/* ========================================================================= */
-/* UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR */
-/* ========================================================================= */
-#define UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR 0x1600030UL
-#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR uv_undefined("UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR")
-#define UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR uv_undefined("UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR")
-#define UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR ( \
- is_uv2_hub() ? UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR : \
- is_uv3_hub() ? UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR : \
- /*is_uv4_hub*/ UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR)
-
-
-#define UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_BASE_SHFT 27
-#define UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_M_IO_SHFT 46
-#define UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_N_IO_SHFT 52
-#define UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63
-#define UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffff8000000UL
-#define UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_M_IO_MASK 0x000fc00000000000UL
-#define UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_N_IO_MASK 0x00f0000000000000UL
-#define UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
-
-
-union uvh_rh_gam_mmioh_overlay_config_mmr_u {
- unsigned long v;
- struct uv2h_rh_gam_mmioh_overlay_config_mmr_s {
- unsigned long rsvd_0_26:27;
- unsigned long base:19; /* RW */
+ /* UV4 unique struct */
+ struct uv4h_rh_gam_mmioh_overlay_config1_s {
+ unsigned long rsvd_0_25:26;
+ unsigned long base:20; /* RW */
unsigned long m_io:6; /* RW */
- unsigned long n_io:4; /* RW */
+ unsigned long n_io:4;
unsigned long rsvd_56_62:7;
unsigned long enable:1; /* RW */
- } s2;
+ } s4;
+
+ /* UV3 unique struct */
+ struct uv3h_rh_gam_mmioh_overlay_config1_s {
+ unsigned long rsvd_0_25:26;
+ unsigned long base:20; /* RW */
+ unsigned long m_io:6; /* RW */
+ unsigned long n_io:4;
+ unsigned long rsvd_56_62:7;
+ unsigned long enable:1; /* RW */
+ } s3;
};
/* ========================================================================= */
-/* UVH_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR */
+/* UVH_RH_GAM_MMIOH_REDIRECT_CONFIG0 */
/* ========================================================================= */
-#define UV2H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR uv_undefined("UV2H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR")
-#define UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR 0x1603800UL
-#define UV4H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR 0x483800UL
-#define UVH_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR ( \
- is_uv2_hub() ? UV2H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR : \
- is_uv3_hub() ? UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR : \
- /*is_uv4_hub*/ UV4H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR)
+#define UVH_RH_GAM_MMIOH_REDIRECT_CONFIG0 ( \
+ is_uv(UV4) ? 0x483800UL : \
+ is_uv(UV3) ? 0x1603800UL : \
+ 0)
-#define UV2H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_DEPTH uv_undefined("UV2H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_DEPTH")
-#define UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_DEPTH 128
-#define UV4H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_DEPTH 128
-#define UVH_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_DEPTH ( \
- is_uv2_hub() ? UV2H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_DEPTH : \
- is_uv3_hub() ? UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_DEPTH : \
- /*is_uv4_hub*/ UV4H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_DEPTH)
+#define UVH_RH_GAM_MMIOH_REDIRECT_CONFIG0_DEPTH ( \
+ is_uv(UV4) ? 128 : \
+ is_uv(UV3) ? 128 : \
+ 0)
+/* UV4A unique defines */
+#define UV4AH_RH_GAM_MMIOH_REDIRECT_CONFIG0_NASID_SHFT 0
+#define UV4AH_RH_GAM_MMIOH_REDIRECT_CONFIG0_NASID_MASK 0x0000000000000fffUL
-#define UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_NASID_SHFT 0
-#define UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_NASID_MASK 0x0000000000007fffUL
+/* UV4 unique defines */
+#define UV4H_RH_GAM_MMIOH_REDIRECT_CONFIG0_NASID_SHFT 0
+#define UV4H_RH_GAM_MMIOH_REDIRECT_CONFIG0_NASID_MASK 0x0000000000007fffUL
-#define UV4H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_NASID_SHFT 0
-#define UV4H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_NASID_MASK 0x0000000000007fffUL
+/* UV3 unique defines */
+#define UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG0_NASID_SHFT 0
+#define UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG0_NASID_MASK 0x0000000000007fffUL
-#define UV4AH_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_NASID_MASK 0x0000000000000fffUL
-#define UVH_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_NASID_MASK ( \
- is_uv3_hub() ? UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_NASID_MASK : \
- is_uv4a_hub() ? UV4AH_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_NASID_MASK : \
- /*is_uv4_hub*/ UV4H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_NASID_MASK)
-
-union uvh_rh_gam_mmioh_redirect_config0_mmr_u {
+union uvh_rh_gam_mmioh_redirect_config0_u {
unsigned long v;
- struct uv3h_rh_gam_mmioh_redirect_config0_mmr_s {
+
+ /* UVH common struct */
+ struct uvh_rh_gam_mmioh_redirect_config0_s {
unsigned long nasid:15; /* RW */
unsigned long rsvd_15_63:49;
- } s3;
- struct uv4h_rh_gam_mmioh_redirect_config0_mmr_s {
+ } s;
+
+ /* UVXH common struct */
+ struct uvxh_rh_gam_mmioh_redirect_config0_s {
unsigned long nasid:15; /* RW */
unsigned long rsvd_15_63:49;
- } s4;
- struct uv4ah_rh_gam_mmioh_redirect_config0_mmr_s {
+ } sx;
+
+ struct uv4ah_rh_gam_mmioh_redirect_config0_s {
unsigned long nasid:12; /* RW */
unsigned long rsvd_12_63:52;
} s4a;
+
+ /* UV4 unique struct */
+ struct uv4h_rh_gam_mmioh_redirect_config0_s {
+ unsigned long nasid:15; /* RW */
+ unsigned long rsvd_15_63:49;
+ } s4;
+
+ /* UV3 unique struct */
+ struct uv3h_rh_gam_mmioh_redirect_config0_s {
+ unsigned long nasid:15; /* RW */
+ unsigned long rsvd_15_63:49;
+ } s3;
};
/* ========================================================================= */
-/* UVH_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR */
+/* UVH_RH_GAM_MMIOH_REDIRECT_CONFIG1 */
/* ========================================================================= */
-#define UV2H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR uv_undefined("UV2H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR")
-#define UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR 0x1604800UL
-#define UV4H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR 0x484800UL
-#define UVH_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR ( \
- is_uv2_hub() ? UV2H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR : \
- is_uv3_hub() ? UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR : \
- /*is_uv4_hub*/ UV4H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR)
-
-#define UV2H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_DEPTH uv_undefined("UV2H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_DEPTH")
-#define UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_DEPTH 128
-#define UV4H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_DEPTH 128
-#define UVH_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_DEPTH ( \
- is_uv2_hub() ? UV2H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_DEPTH : \
- is_uv3_hub() ? UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_DEPTH : \
- /*is_uv4_hub*/ UV4H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_DEPTH)
+#define UVH_RH_GAM_MMIOH_REDIRECT_CONFIG1 ( \
+ is_uv(UV4) ? 0x484800UL : \
+ is_uv(UV3) ? 0x1604800UL : \
+ 0)
+#define UVH_RH_GAM_MMIOH_REDIRECT_CONFIG1_DEPTH ( \
+ is_uv(UV4) ? 128 : \
+ is_uv(UV3) ? 128 : \
+ 0)
-#define UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_NASID_SHFT 0
-#define UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_NASID_MASK 0x0000000000007fffUL
+/* UV4A unique defines */
+#define UV4AH_RH_GAM_MMIOH_REDIRECT_CONFIG0_NASID_SHFT 0
+#define UV4AH_RH_GAM_MMIOH_REDIRECT_CONFIG0_NASID_MASK 0x0000000000000fffUL
-#define UV4H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_NASID_SHFT 0
-#define UV4H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_NASID_MASK 0x0000000000007fffUL
+/* UV4 unique defines */
+#define UV4H_RH_GAM_MMIOH_REDIRECT_CONFIG1_NASID_SHFT 0
+#define UV4H_RH_GAM_MMIOH_REDIRECT_CONFIG1_NASID_MASK 0x0000000000007fffUL
-#define UV4AH_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_NASID_MASK 0x0000000000000fffUL
+/* UV3 unique defines */
+#define UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG1_NASID_SHFT 0
+#define UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG1_NASID_MASK 0x0000000000007fffUL
-#define UVH_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_NASID_MASK ( \
- is_uv3_hub() ? UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_NASID_MASK : \
- is_uv4a_hub() ? UV4AH_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_NASID_MASK : \
- /*is_uv4_hub*/ UV4H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_NASID_MASK)
-union uvh_rh_gam_mmioh_redirect_config1_mmr_u {
+union uvh_rh_gam_mmioh_redirect_config1_u {
unsigned long v;
- struct uv3h_rh_gam_mmioh_redirect_config1_mmr_s {
+
+ /* UVH common struct */
+ struct uvh_rh_gam_mmioh_redirect_config1_s {
unsigned long nasid:15; /* RW */
unsigned long rsvd_15_63:49;
- } s3;
- struct uv4h_rh_gam_mmioh_redirect_config1_mmr_s {
+ } s;
+
+ /* UVXH common struct */
+ struct uvxh_rh_gam_mmioh_redirect_config1_s {
unsigned long nasid:15; /* RW */
unsigned long rsvd_15_63:49;
- } s4;
- struct uv4ah_rh_gam_mmioh_redirect_config1_mmr_s {
+ } sx;
+
+ struct uv4ah_rh_gam_mmioh_redirect_config1_s {
unsigned long nasid:12; /* RW */
unsigned long rsvd_12_63:52;
} s4a;
+
+ /* UV4 unique struct */
+ struct uv4h_rh_gam_mmioh_redirect_config1_s {
+ unsigned long nasid:15; /* RW */
+ unsigned long rsvd_15_63:49;
+ } s4;
+
+ /* UV3 unique struct */
+ struct uv3h_rh_gam_mmioh_redirect_config1_s {
+ unsigned long nasid:15; /* RW */
+ unsigned long rsvd_15_63:49;
+ } s3;
};
/* ========================================================================= */
-/* UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR */
+/* UVH_RH_GAM_MMR_OVERLAY_CONFIG */
/* ========================================================================= */
-#define UV2H_RH_GAM_MMR_OVERLAY_CONFIG_MMR 0x1600028UL
-#define UV3H_RH_GAM_MMR_OVERLAY_CONFIG_MMR 0x1600028UL
-#define UV4H_RH_GAM_MMR_OVERLAY_CONFIG_MMR 0x480028UL
-#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR ( \
- is_uv2_hub() ? UV2H_RH_GAM_MMR_OVERLAY_CONFIG_MMR : \
- is_uv3_hub() ? UV3H_RH_GAM_MMR_OVERLAY_CONFIG_MMR : \
- /*is_uv4_hub*/ UV4H_RH_GAM_MMR_OVERLAY_CONFIG_MMR)
-
-#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_SHFT 26
-#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63
-#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffffc000000UL
-#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
-
-#define UVXH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_SHFT 26
-#define UVXH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63
-#define UVXH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffffc000000UL
-#define UVXH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
-
-#define UV2H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_SHFT 26
-#define UV2H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63
-#define UV2H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffffc000000UL
-#define UV2H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
-
-#define UV3H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_SHFT 26
-#define UV3H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63
-#define UV3H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffffc000000UL
-#define UV3H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
-
-#define UV4H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_SHFT 26
-#define UV4H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63
-#define UV4H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffffc000000UL
-#define UV4H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
-
-
-union uvh_rh_gam_mmr_overlay_config_mmr_u {
+#define UVH_RH_GAM_MMR_OVERLAY_CONFIG ( \
+ is_uv(UV4) ? 0x480028UL : \
+ is_uv(UV3) ? 0x1600028UL : \
+ is_uv(UV2) ? 0x1600028UL : \
+ 0)
+
+
+/* UVXH common defines */
+#define UVXH_RH_GAM_MMR_OVERLAY_CONFIG_BASE_SHFT 26
+#define UVXH_RH_GAM_MMR_OVERLAY_CONFIG_BASE_MASK ( \
+ is_uv(UV4A) ? 0x000ffffffc000000UL : \
+ is_uv(UV4) ? 0x00003ffffc000000UL : \
+ is_uv(UV3) ? 0x00003ffffc000000UL : \
+ is_uv(UV2) ? 0x00003ffffc000000UL : \
+ 0)
+#define UVXH_RH_GAM_MMR_OVERLAY_CONFIG_ENABLE_SHFT 63
+#define UVXH_RH_GAM_MMR_OVERLAY_CONFIG_ENABLE_MASK 0x8000000000000000UL
+
+/* UV4A unique defines */
+#define UV4AH_RH_GAM_GRU_OVERLAY_CONFIG_BASE_SHFT 26
+#define UV4AH_RH_GAM_GRU_OVERLAY_CONFIG_BASE_MASK 0x000ffffffc000000UL
+
+#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_BASE_MASK ( \
+ is_uv(UV4A) ? 0x000ffffffc000000UL : \
+ is_uv(UV4) ? 0x00003ffffc000000UL : \
+ is_uv(UV3) ? 0x00003ffffc000000UL : \
+ is_uv(UV2) ? 0x00003ffffc000000UL : \
+ 0)
+
+#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_BASE_SHFT ( \
+ is_uv(UV4) ? 26 : \
+ is_uv(UV3) ? 26 : \
+ is_uv(UV2) ? 26 : \
+ -1)
+
+union uvh_rh_gam_mmr_overlay_config_u {
unsigned long v;
- struct uvh_rh_gam_mmr_overlay_config_mmr_s {
+
+ /* UVH common struct */
+ struct uvh_rh_gam_mmr_overlay_config_s {
unsigned long rsvd_0_25:26;
unsigned long base:20; /* RW */
unsigned long rsvd_46_62:17;
unsigned long enable:1; /* RW */
} s;
- struct uvxh_rh_gam_mmr_overlay_config_mmr_s {
+
+ /* UVXH common struct */
+ struct uvxh_rh_gam_mmr_overlay_config_s {
unsigned long rsvd_0_25:26;
unsigned long base:20; /* RW */
unsigned long rsvd_46_62:17;
unsigned long enable:1; /* RW */
} sx;
- struct uv2h_rh_gam_mmr_overlay_config_mmr_s {
+
+ /* UV4 unique struct */
+ struct uv4h_rh_gam_mmr_overlay_config_s {
unsigned long rsvd_0_25:26;
unsigned long base:20; /* RW */
unsigned long rsvd_46_62:17;
unsigned long enable:1; /* RW */
- } s2;
- struct uv3h_rh_gam_mmr_overlay_config_mmr_s {
+ } s4;
+
+ /* UV3 unique struct */
+ struct uv3h_rh_gam_mmr_overlay_config_s {
unsigned long rsvd_0_25:26;
unsigned long base:20; /* RW */
unsigned long rsvd_46_62:17;
unsigned long enable:1; /* RW */
} s3;
- struct uv4h_rh_gam_mmr_overlay_config_mmr_s {
+
+ /* UV2 unique struct */
+ struct uv2h_rh_gam_mmr_overlay_config_s {
unsigned long rsvd_0_25:26;
unsigned long base:20; /* RW */
unsigned long rsvd_46_62:17;
unsigned long enable:1; /* RW */
- } s4;
+ } s2;
};
/* ========================================================================= */
/* UVH_RTC */
/* ========================================================================= */
-#define UV2H_RTC 0x340000UL
-#define UV3H_RTC 0x340000UL
-#define UV4H_RTC 0xe0000UL
#define UVH_RTC ( \
- is_uv2_hub() ? UV2H_RTC : \
- is_uv3_hub() ? UV3H_RTC : \
- /*is_uv4_hub*/ UV4H_RTC)
+ is_uv(UV5) ? 0xe0000UL : \
+ is_uv(UV4) ? 0xe0000UL : \
+ is_uv(UV3) ? 0x340000UL : \
+ is_uv(UV2) ? 0x340000UL : \
+ 0)
+/* UVH common defines*/
#define UVH_RTC_REAL_TIME_CLOCK_SHFT 0
#define UVH_RTC_REAL_TIME_CLOCK_MASK 0x00ffffffffffffffUL
union uvh_rtc_u {
unsigned long v;
+
+ /* UVH common struct */
struct uvh_rtc_s {
unsigned long real_time_clock:56; /* RW */
unsigned long rsvd_56_63:8;
} s;
+
+ /* UV5 unique struct */
+ struct uv5h_rtc_s {
+ unsigned long real_time_clock:56; /* RW */
+ unsigned long rsvd_56_63:8;
+ } s5;
+
+ /* UV4 unique struct */
+ struct uv4h_rtc_s {
+ unsigned long real_time_clock:56; /* RW */
+ unsigned long rsvd_56_63:8;
+ } s4;
+
+ /* UV3 unique struct */
+ struct uv3h_rtc_s {
+ unsigned long real_time_clock:56; /* RW */
+ unsigned long rsvd_56_63:8;
+ } s3;
+
+ /* UV2 unique struct */
+ struct uv2h_rtc_s {
+ unsigned long real_time_clock:56; /* RW */
+ unsigned long rsvd_56_63:8;
+ } s2;
};
/* ========================================================================= */
@@ -3487,26 +4428,29 @@ union uvh_rtc_u {
/* ========================================================================= */
#define UVH_RTC1_INT_CONFIG 0x615c0UL
+/* UVH common defines*/
#define UVH_RTC1_INT_CONFIG_VECTOR_SHFT 0
-#define UVH_RTC1_INT_CONFIG_DM_SHFT 8
-#define UVH_RTC1_INT_CONFIG_DESTMODE_SHFT 11
-#define UVH_RTC1_INT_CONFIG_STATUS_SHFT 12
-#define UVH_RTC1_INT_CONFIG_P_SHFT 13
-#define UVH_RTC1_INT_CONFIG_T_SHFT 15
-#define UVH_RTC1_INT_CONFIG_M_SHFT 16
-#define UVH_RTC1_INT_CONFIG_APIC_ID_SHFT 32
#define UVH_RTC1_INT_CONFIG_VECTOR_MASK 0x00000000000000ffUL
+#define UVH_RTC1_INT_CONFIG_DM_SHFT 8
#define UVH_RTC1_INT_CONFIG_DM_MASK 0x0000000000000700UL
+#define UVH_RTC1_INT_CONFIG_DESTMODE_SHFT 11
#define UVH_RTC1_INT_CONFIG_DESTMODE_MASK 0x0000000000000800UL
+#define UVH_RTC1_INT_CONFIG_STATUS_SHFT 12
#define UVH_RTC1_INT_CONFIG_STATUS_MASK 0x0000000000001000UL
+#define UVH_RTC1_INT_CONFIG_P_SHFT 13
#define UVH_RTC1_INT_CONFIG_P_MASK 0x0000000000002000UL
+#define UVH_RTC1_INT_CONFIG_T_SHFT 15
#define UVH_RTC1_INT_CONFIG_T_MASK 0x0000000000008000UL
+#define UVH_RTC1_INT_CONFIG_M_SHFT 16
#define UVH_RTC1_INT_CONFIG_M_MASK 0x0000000000010000UL
+#define UVH_RTC1_INT_CONFIG_APIC_ID_SHFT 32
#define UVH_RTC1_INT_CONFIG_APIC_ID_MASK 0xffffffff00000000UL
union uvh_rtc1_int_config_u {
unsigned long v;
+
+ /* UVH common struct */
struct uvh_rtc1_int_config_s {
unsigned long vector_:8; /* RW */
unsigned long dm:3; /* RW */
@@ -3519,591 +4463,175 @@ union uvh_rtc1_int_config_u {
unsigned long rsvd_17_31:15;
unsigned long apic_id:32; /* RW */
} s;
-};
-
-/* ========================================================================= */
-/* UVH_SCRATCH5 */
-/* ========================================================================= */
-#define UV2H_SCRATCH5 0x2d0200UL
-#define UV3H_SCRATCH5 0x2d0200UL
-#define UV4H_SCRATCH5 0xb0200UL
-#define UVH_SCRATCH5 ( \
- is_uv2_hub() ? UV2H_SCRATCH5 : \
- is_uv3_hub() ? UV3H_SCRATCH5 : \
- /*is_uv4_hub*/ UV4H_SCRATCH5)
-
-#define UV2H_SCRATCH5_32 0x778
-#define UV3H_SCRATCH5_32 0x778
-#define UV4H_SCRATCH5_32 0x798
-#define UVH_SCRATCH5_32 ( \
- is_uv2_hub() ? UV2H_SCRATCH5_32 : \
- is_uv3_hub() ? UV3H_SCRATCH5_32 : \
- /*is_uv4_hub*/ UV4H_SCRATCH5_32)
-
-#define UVH_SCRATCH5_SCRATCH5_SHFT 0
-#define UVH_SCRATCH5_SCRATCH5_MASK 0xffffffffffffffffUL
-
-
-union uvh_scratch5_u {
- unsigned long v;
- struct uvh_scratch5_s {
- unsigned long scratch5:64; /* RW, W1CS */
- } s;
-};
-/* ========================================================================= */
-/* UVH_SCRATCH5_ALIAS */
-/* ========================================================================= */
-#define UV2H_SCRATCH5_ALIAS 0x2d0208UL
-#define UV3H_SCRATCH5_ALIAS 0x2d0208UL
-#define UV4H_SCRATCH5_ALIAS 0xb0208UL
-#define UVH_SCRATCH5_ALIAS ( \
- is_uv2_hub() ? UV2H_SCRATCH5_ALIAS : \
- is_uv3_hub() ? UV3H_SCRATCH5_ALIAS : \
- /*is_uv4_hub*/ UV4H_SCRATCH5_ALIAS)
-
-#define UV2H_SCRATCH5_ALIAS_32 0x780
-#define UV3H_SCRATCH5_ALIAS_32 0x780
-#define UV4H_SCRATCH5_ALIAS_32 0x7a0
-#define UVH_SCRATCH5_ALIAS_32 ( \
- is_uv2_hub() ? UV2H_SCRATCH5_ALIAS_32 : \
- is_uv3_hub() ? UV3H_SCRATCH5_ALIAS_32 : \
- /*is_uv4_hub*/ UV4H_SCRATCH5_ALIAS_32)
-
-
-/* ========================================================================= */
-/* UVH_SCRATCH5_ALIAS_2 */
-/* ========================================================================= */
-#define UV2H_SCRATCH5_ALIAS_2 0x2d0210UL
-#define UV3H_SCRATCH5_ALIAS_2 0x2d0210UL
-#define UV4H_SCRATCH5_ALIAS_2 0xb0210UL
-#define UVH_SCRATCH5_ALIAS_2 ( \
- is_uv2_hub() ? UV2H_SCRATCH5_ALIAS_2 : \
- is_uv3_hub() ? UV3H_SCRATCH5_ALIAS_2 : \
- /*is_uv4_hub*/ UV4H_SCRATCH5_ALIAS_2)
-#define UVH_SCRATCH5_ALIAS_2_32 0x788
-
-
-/* ========================================================================= */
-/* UVXH_EVENT_OCCURRED2 */
-/* ========================================================================= */
-#define UVXH_EVENT_OCCURRED2 0x70100UL
-
-#define UV2H_EVENT_OCCURRED2_32 0xb68
-#define UV3H_EVENT_OCCURRED2_32 0xb68
-#define UV4H_EVENT_OCCURRED2_32 0x608
-#define UVH_EVENT_OCCURRED2_32 ( \
- is_uv2_hub() ? UV2H_EVENT_OCCURRED2_32 : \
- is_uv3_hub() ? UV3H_EVENT_OCCURRED2_32 : \
- /*is_uv4_hub*/ UV4H_EVENT_OCCURRED2_32)
-
-
-#define UV2H_EVENT_OCCURRED2_RTC_0_SHFT 0
-#define UV2H_EVENT_OCCURRED2_RTC_1_SHFT 1
-#define UV2H_EVENT_OCCURRED2_RTC_2_SHFT 2
-#define UV2H_EVENT_OCCURRED2_RTC_3_SHFT 3
-#define UV2H_EVENT_OCCURRED2_RTC_4_SHFT 4
-#define UV2H_EVENT_OCCURRED2_RTC_5_SHFT 5
-#define UV2H_EVENT_OCCURRED2_RTC_6_SHFT 6
-#define UV2H_EVENT_OCCURRED2_RTC_7_SHFT 7
-#define UV2H_EVENT_OCCURRED2_RTC_8_SHFT 8
-#define UV2H_EVENT_OCCURRED2_RTC_9_SHFT 9
-#define UV2H_EVENT_OCCURRED2_RTC_10_SHFT 10
-#define UV2H_EVENT_OCCURRED2_RTC_11_SHFT 11
-#define UV2H_EVENT_OCCURRED2_RTC_12_SHFT 12
-#define UV2H_EVENT_OCCURRED2_RTC_13_SHFT 13
-#define UV2H_EVENT_OCCURRED2_RTC_14_SHFT 14
-#define UV2H_EVENT_OCCURRED2_RTC_15_SHFT 15
-#define UV2H_EVENT_OCCURRED2_RTC_16_SHFT 16
-#define UV2H_EVENT_OCCURRED2_RTC_17_SHFT 17
-#define UV2H_EVENT_OCCURRED2_RTC_18_SHFT 18
-#define UV2H_EVENT_OCCURRED2_RTC_19_SHFT 19
-#define UV2H_EVENT_OCCURRED2_RTC_20_SHFT 20
-#define UV2H_EVENT_OCCURRED2_RTC_21_SHFT 21
-#define UV2H_EVENT_OCCURRED2_RTC_22_SHFT 22
-#define UV2H_EVENT_OCCURRED2_RTC_23_SHFT 23
-#define UV2H_EVENT_OCCURRED2_RTC_24_SHFT 24
-#define UV2H_EVENT_OCCURRED2_RTC_25_SHFT 25
-#define UV2H_EVENT_OCCURRED2_RTC_26_SHFT 26
-#define UV2H_EVENT_OCCURRED2_RTC_27_SHFT 27
-#define UV2H_EVENT_OCCURRED2_RTC_28_SHFT 28
-#define UV2H_EVENT_OCCURRED2_RTC_29_SHFT 29
-#define UV2H_EVENT_OCCURRED2_RTC_30_SHFT 30
-#define UV2H_EVENT_OCCURRED2_RTC_31_SHFT 31
-#define UV2H_EVENT_OCCURRED2_RTC_0_MASK 0x0000000000000001UL
-#define UV2H_EVENT_OCCURRED2_RTC_1_MASK 0x0000000000000002UL
-#define UV2H_EVENT_OCCURRED2_RTC_2_MASK 0x0000000000000004UL
-#define UV2H_EVENT_OCCURRED2_RTC_3_MASK 0x0000000000000008UL
-#define UV2H_EVENT_OCCURRED2_RTC_4_MASK 0x0000000000000010UL
-#define UV2H_EVENT_OCCURRED2_RTC_5_MASK 0x0000000000000020UL
-#define UV2H_EVENT_OCCURRED2_RTC_6_MASK 0x0000000000000040UL
-#define UV2H_EVENT_OCCURRED2_RTC_7_MASK 0x0000000000000080UL
-#define UV2H_EVENT_OCCURRED2_RTC_8_MASK 0x0000000000000100UL
-#define UV2H_EVENT_OCCURRED2_RTC_9_MASK 0x0000000000000200UL
-#define UV2H_EVENT_OCCURRED2_RTC_10_MASK 0x0000000000000400UL
-#define UV2H_EVENT_OCCURRED2_RTC_11_MASK 0x0000000000000800UL
-#define UV2H_EVENT_OCCURRED2_RTC_12_MASK 0x0000000000001000UL
-#define UV2H_EVENT_OCCURRED2_RTC_13_MASK 0x0000000000002000UL
-#define UV2H_EVENT_OCCURRED2_RTC_14_MASK 0x0000000000004000UL
-#define UV2H_EVENT_OCCURRED2_RTC_15_MASK 0x0000000000008000UL
-#define UV2H_EVENT_OCCURRED2_RTC_16_MASK 0x0000000000010000UL
-#define UV2H_EVENT_OCCURRED2_RTC_17_MASK 0x0000000000020000UL
-#define UV2H_EVENT_OCCURRED2_RTC_18_MASK 0x0000000000040000UL
-#define UV2H_EVENT_OCCURRED2_RTC_19_MASK 0x0000000000080000UL
-#define UV2H_EVENT_OCCURRED2_RTC_20_MASK 0x0000000000100000UL
-#define UV2H_EVENT_OCCURRED2_RTC_21_MASK 0x0000000000200000UL
-#define UV2H_EVENT_OCCURRED2_RTC_22_MASK 0x0000000000400000UL
-#define UV2H_EVENT_OCCURRED2_RTC_23_MASK 0x0000000000800000UL
-#define UV2H_EVENT_OCCURRED2_RTC_24_MASK 0x0000000001000000UL
-#define UV2H_EVENT_OCCURRED2_RTC_25_MASK 0x0000000002000000UL
-#define UV2H_EVENT_OCCURRED2_RTC_26_MASK 0x0000000004000000UL
-#define UV2H_EVENT_OCCURRED2_RTC_27_MASK 0x0000000008000000UL
-#define UV2H_EVENT_OCCURRED2_RTC_28_MASK 0x0000000010000000UL
-#define UV2H_EVENT_OCCURRED2_RTC_29_MASK 0x0000000020000000UL
-#define UV2H_EVENT_OCCURRED2_RTC_30_MASK 0x0000000040000000UL
-#define UV2H_EVENT_OCCURRED2_RTC_31_MASK 0x0000000080000000UL
-
-#define UV3H_EVENT_OCCURRED2_RTC_0_SHFT 0
-#define UV3H_EVENT_OCCURRED2_RTC_1_SHFT 1
-#define UV3H_EVENT_OCCURRED2_RTC_2_SHFT 2
-#define UV3H_EVENT_OCCURRED2_RTC_3_SHFT 3
-#define UV3H_EVENT_OCCURRED2_RTC_4_SHFT 4
-#define UV3H_EVENT_OCCURRED2_RTC_5_SHFT 5
-#define UV3H_EVENT_OCCURRED2_RTC_6_SHFT 6
-#define UV3H_EVENT_OCCURRED2_RTC_7_SHFT 7
-#define UV3H_EVENT_OCCURRED2_RTC_8_SHFT 8
-#define UV3H_EVENT_OCCURRED2_RTC_9_SHFT 9
-#define UV3H_EVENT_OCCURRED2_RTC_10_SHFT 10
-#define UV3H_EVENT_OCCURRED2_RTC_11_SHFT 11
-#define UV3H_EVENT_OCCURRED2_RTC_12_SHFT 12
-#define UV3H_EVENT_OCCURRED2_RTC_13_SHFT 13
-#define UV3H_EVENT_OCCURRED2_RTC_14_SHFT 14
-#define UV3H_EVENT_OCCURRED2_RTC_15_SHFT 15
-#define UV3H_EVENT_OCCURRED2_RTC_16_SHFT 16
-#define UV3H_EVENT_OCCURRED2_RTC_17_SHFT 17
-#define UV3H_EVENT_OCCURRED2_RTC_18_SHFT 18
-#define UV3H_EVENT_OCCURRED2_RTC_19_SHFT 19
-#define UV3H_EVENT_OCCURRED2_RTC_20_SHFT 20
-#define UV3H_EVENT_OCCURRED2_RTC_21_SHFT 21
-#define UV3H_EVENT_OCCURRED2_RTC_22_SHFT 22
-#define UV3H_EVENT_OCCURRED2_RTC_23_SHFT 23
-#define UV3H_EVENT_OCCURRED2_RTC_24_SHFT 24
-#define UV3H_EVENT_OCCURRED2_RTC_25_SHFT 25
-#define UV3H_EVENT_OCCURRED2_RTC_26_SHFT 26
-#define UV3H_EVENT_OCCURRED2_RTC_27_SHFT 27
-#define UV3H_EVENT_OCCURRED2_RTC_28_SHFT 28
-#define UV3H_EVENT_OCCURRED2_RTC_29_SHFT 29
-#define UV3H_EVENT_OCCURRED2_RTC_30_SHFT 30
-#define UV3H_EVENT_OCCURRED2_RTC_31_SHFT 31
-#define UV3H_EVENT_OCCURRED2_RTC_0_MASK 0x0000000000000001UL
-#define UV3H_EVENT_OCCURRED2_RTC_1_MASK 0x0000000000000002UL
-#define UV3H_EVENT_OCCURRED2_RTC_2_MASK 0x0000000000000004UL
-#define UV3H_EVENT_OCCURRED2_RTC_3_MASK 0x0000000000000008UL
-#define UV3H_EVENT_OCCURRED2_RTC_4_MASK 0x0000000000000010UL
-#define UV3H_EVENT_OCCURRED2_RTC_5_MASK 0x0000000000000020UL
-#define UV3H_EVENT_OCCURRED2_RTC_6_MASK 0x0000000000000040UL
-#define UV3H_EVENT_OCCURRED2_RTC_7_MASK 0x0000000000000080UL
-#define UV3H_EVENT_OCCURRED2_RTC_8_MASK 0x0000000000000100UL
-#define UV3H_EVENT_OCCURRED2_RTC_9_MASK 0x0000000000000200UL
-#define UV3H_EVENT_OCCURRED2_RTC_10_MASK 0x0000000000000400UL
-#define UV3H_EVENT_OCCURRED2_RTC_11_MASK 0x0000000000000800UL
-#define UV3H_EVENT_OCCURRED2_RTC_12_MASK 0x0000000000001000UL
-#define UV3H_EVENT_OCCURRED2_RTC_13_MASK 0x0000000000002000UL
-#define UV3H_EVENT_OCCURRED2_RTC_14_MASK 0x0000000000004000UL
-#define UV3H_EVENT_OCCURRED2_RTC_15_MASK 0x0000000000008000UL
-#define UV3H_EVENT_OCCURRED2_RTC_16_MASK 0x0000000000010000UL
-#define UV3H_EVENT_OCCURRED2_RTC_17_MASK 0x0000000000020000UL
-#define UV3H_EVENT_OCCURRED2_RTC_18_MASK 0x0000000000040000UL
-#define UV3H_EVENT_OCCURRED2_RTC_19_MASK 0x0000000000080000UL
-#define UV3H_EVENT_OCCURRED2_RTC_20_MASK 0x0000000000100000UL
-#define UV3H_EVENT_OCCURRED2_RTC_21_MASK 0x0000000000200000UL
-#define UV3H_EVENT_OCCURRED2_RTC_22_MASK 0x0000000000400000UL
-#define UV3H_EVENT_OCCURRED2_RTC_23_MASK 0x0000000000800000UL
-#define UV3H_EVENT_OCCURRED2_RTC_24_MASK 0x0000000001000000UL
-#define UV3H_EVENT_OCCURRED2_RTC_25_MASK 0x0000000002000000UL
-#define UV3H_EVENT_OCCURRED2_RTC_26_MASK 0x0000000004000000UL
-#define UV3H_EVENT_OCCURRED2_RTC_27_MASK 0x0000000008000000UL
-#define UV3H_EVENT_OCCURRED2_RTC_28_MASK 0x0000000010000000UL
-#define UV3H_EVENT_OCCURRED2_RTC_29_MASK 0x0000000020000000UL
-#define UV3H_EVENT_OCCURRED2_RTC_30_MASK 0x0000000040000000UL
-#define UV3H_EVENT_OCCURRED2_RTC_31_MASK 0x0000000080000000UL
+ /* UV5 unique struct */
+ struct uv5h_rtc1_int_config_s {
+ unsigned long vector_:8; /* RW */
+ unsigned long dm:3; /* RW */
+ unsigned long destmode:1; /* RW */
+ unsigned long status:1; /* RO */
+ unsigned long p:1; /* RO */
+ unsigned long rsvd_14:1;
+ unsigned long t:1; /* RO */
+ unsigned long m:1; /* RW */
+ unsigned long rsvd_17_31:15;
+ unsigned long apic_id:32; /* RW */
+ } s5;
-#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT0_SHFT 0
-#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT1_SHFT 1
-#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT2_SHFT 2
-#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT3_SHFT 3
-#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT4_SHFT 4
-#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT5_SHFT 5
-#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT6_SHFT 6
-#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT7_SHFT 7
-#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT8_SHFT 8
-#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT9_SHFT 9
-#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT10_SHFT 10
-#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT11_SHFT 11
-#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT12_SHFT 12
-#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT13_SHFT 13
-#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT14_SHFT 14
-#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT15_SHFT 15
-#define UV4H_EVENT_OCCURRED2_RTC_INTERVAL_INT_SHFT 16
-#define UV4H_EVENT_OCCURRED2_BAU_DASHBOARD_INT_SHFT 17
-#define UV4H_EVENT_OCCURRED2_RTC_0_SHFT 18
-#define UV4H_EVENT_OCCURRED2_RTC_1_SHFT 19
-#define UV4H_EVENT_OCCURRED2_RTC_2_SHFT 20
-#define UV4H_EVENT_OCCURRED2_RTC_3_SHFT 21
-#define UV4H_EVENT_OCCURRED2_RTC_4_SHFT 22
-#define UV4H_EVENT_OCCURRED2_RTC_5_SHFT 23
-#define UV4H_EVENT_OCCURRED2_RTC_6_SHFT 24
-#define UV4H_EVENT_OCCURRED2_RTC_7_SHFT 25
-#define UV4H_EVENT_OCCURRED2_RTC_8_SHFT 26
-#define UV4H_EVENT_OCCURRED2_RTC_9_SHFT 27
-#define UV4H_EVENT_OCCURRED2_RTC_10_SHFT 28
-#define UV4H_EVENT_OCCURRED2_RTC_11_SHFT 29
-#define UV4H_EVENT_OCCURRED2_RTC_12_SHFT 30
-#define UV4H_EVENT_OCCURRED2_RTC_13_SHFT 31
-#define UV4H_EVENT_OCCURRED2_RTC_14_SHFT 32
-#define UV4H_EVENT_OCCURRED2_RTC_15_SHFT 33
-#define UV4H_EVENT_OCCURRED2_RTC_16_SHFT 34
-#define UV4H_EVENT_OCCURRED2_RTC_17_SHFT 35
-#define UV4H_EVENT_OCCURRED2_RTC_18_SHFT 36
-#define UV4H_EVENT_OCCURRED2_RTC_19_SHFT 37
-#define UV4H_EVENT_OCCURRED2_RTC_20_SHFT 38
-#define UV4H_EVENT_OCCURRED2_RTC_21_SHFT 39
-#define UV4H_EVENT_OCCURRED2_RTC_22_SHFT 40
-#define UV4H_EVENT_OCCURRED2_RTC_23_SHFT 41
-#define UV4H_EVENT_OCCURRED2_RTC_24_SHFT 42
-#define UV4H_EVENT_OCCURRED2_RTC_25_SHFT 43
-#define UV4H_EVENT_OCCURRED2_RTC_26_SHFT 44
-#define UV4H_EVENT_OCCURRED2_RTC_27_SHFT 45
-#define UV4H_EVENT_OCCURRED2_RTC_28_SHFT 46
-#define UV4H_EVENT_OCCURRED2_RTC_29_SHFT 47
-#define UV4H_EVENT_OCCURRED2_RTC_30_SHFT 48
-#define UV4H_EVENT_OCCURRED2_RTC_31_SHFT 49
-#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT0_MASK 0x0000000000000001UL
-#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT1_MASK 0x0000000000000002UL
-#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT2_MASK 0x0000000000000004UL
-#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT3_MASK 0x0000000000000008UL
-#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT4_MASK 0x0000000000000010UL
-#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT5_MASK 0x0000000000000020UL
-#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT6_MASK 0x0000000000000040UL
-#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT7_MASK 0x0000000000000080UL
-#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT8_MASK 0x0000000000000100UL
-#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT9_MASK 0x0000000000000200UL
-#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT10_MASK 0x0000000000000400UL
-#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT11_MASK 0x0000000000000800UL
-#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT12_MASK 0x0000000000001000UL
-#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT13_MASK 0x0000000000002000UL
-#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT14_MASK 0x0000000000004000UL
-#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT15_MASK 0x0000000000008000UL
-#define UV4H_EVENT_OCCURRED2_RTC_INTERVAL_INT_MASK 0x0000000000010000UL
-#define UV4H_EVENT_OCCURRED2_BAU_DASHBOARD_INT_MASK 0x0000000000020000UL
-#define UV4H_EVENT_OCCURRED2_RTC_0_MASK 0x0000000000040000UL
-#define UV4H_EVENT_OCCURRED2_RTC_1_MASK 0x0000000000080000UL
-#define UV4H_EVENT_OCCURRED2_RTC_2_MASK 0x0000000000100000UL
-#define UV4H_EVENT_OCCURRED2_RTC_3_MASK 0x0000000000200000UL
-#define UV4H_EVENT_OCCURRED2_RTC_4_MASK 0x0000000000400000UL
-#define UV4H_EVENT_OCCURRED2_RTC_5_MASK 0x0000000000800000UL
-#define UV4H_EVENT_OCCURRED2_RTC_6_MASK 0x0000000001000000UL
-#define UV4H_EVENT_OCCURRED2_RTC_7_MASK 0x0000000002000000UL
-#define UV4H_EVENT_OCCURRED2_RTC_8_MASK 0x0000000004000000UL
-#define UV4H_EVENT_OCCURRED2_RTC_9_MASK 0x0000000008000000UL
-#define UV4H_EVENT_OCCURRED2_RTC_10_MASK 0x0000000010000000UL
-#define UV4H_EVENT_OCCURRED2_RTC_11_MASK 0x0000000020000000UL
-#define UV4H_EVENT_OCCURRED2_RTC_12_MASK 0x0000000040000000UL
-#define UV4H_EVENT_OCCURRED2_RTC_13_MASK 0x0000000080000000UL
-#define UV4H_EVENT_OCCURRED2_RTC_14_MASK 0x0000000100000000UL
-#define UV4H_EVENT_OCCURRED2_RTC_15_MASK 0x0000000200000000UL
-#define UV4H_EVENT_OCCURRED2_RTC_16_MASK 0x0000000400000000UL
-#define UV4H_EVENT_OCCURRED2_RTC_17_MASK 0x0000000800000000UL
-#define UV4H_EVENT_OCCURRED2_RTC_18_MASK 0x0000001000000000UL
-#define UV4H_EVENT_OCCURRED2_RTC_19_MASK 0x0000002000000000UL
-#define UV4H_EVENT_OCCURRED2_RTC_20_MASK 0x0000004000000000UL
-#define UV4H_EVENT_OCCURRED2_RTC_21_MASK 0x0000008000000000UL
-#define UV4H_EVENT_OCCURRED2_RTC_22_MASK 0x0000010000000000UL
-#define UV4H_EVENT_OCCURRED2_RTC_23_MASK 0x0000020000000000UL
-#define UV4H_EVENT_OCCURRED2_RTC_24_MASK 0x0000040000000000UL
-#define UV4H_EVENT_OCCURRED2_RTC_25_MASK 0x0000080000000000UL
-#define UV4H_EVENT_OCCURRED2_RTC_26_MASK 0x0000100000000000UL
-#define UV4H_EVENT_OCCURRED2_RTC_27_MASK 0x0000200000000000UL
-#define UV4H_EVENT_OCCURRED2_RTC_28_MASK 0x0000400000000000UL
-#define UV4H_EVENT_OCCURRED2_RTC_29_MASK 0x0000800000000000UL
-#define UV4H_EVENT_OCCURRED2_RTC_30_MASK 0x0001000000000000UL
-#define UV4H_EVENT_OCCURRED2_RTC_31_MASK 0x0002000000000000UL
+ /* UV4 unique struct */
+ struct uv4h_rtc1_int_config_s {
+ unsigned long vector_:8; /* RW */
+ unsigned long dm:3; /* RW */
+ unsigned long destmode:1; /* RW */
+ unsigned long status:1; /* RO */
+ unsigned long p:1; /* RO */
+ unsigned long rsvd_14:1;
+ unsigned long t:1; /* RO */
+ unsigned long m:1; /* RW */
+ unsigned long rsvd_17_31:15;
+ unsigned long apic_id:32; /* RW */
+ } s4;
-#define UVXH_EVENT_OCCURRED2_RTC_1_MASK ( \
- is_uv2_hub() ? UV2H_EVENT_OCCURRED2_RTC_1_MASK : \
- is_uv3_hub() ? UV3H_EVENT_OCCURRED2_RTC_1_MASK : \
- /*is_uv4_hub*/ UV4H_EVENT_OCCURRED2_RTC_1_MASK)
+ /* UV3 unique struct */
+ struct uv3h_rtc1_int_config_s {
+ unsigned long vector_:8; /* RW */
+ unsigned long dm:3; /* RW */
+ unsigned long destmode:1; /* RW */
+ unsigned long status:1; /* RO */
+ unsigned long p:1; /* RO */
+ unsigned long rsvd_14:1;
+ unsigned long t:1; /* RO */
+ unsigned long m:1; /* RW */
+ unsigned long rsvd_17_31:15;
+ unsigned long apic_id:32; /* RW */
+ } s3;
-union uvh_event_occurred2_u {
- unsigned long v;
- struct uv2h_event_occurred2_s {
- unsigned long rtc_0:1; /* RW */
- unsigned long rtc_1:1; /* RW */
- unsigned long rtc_2:1; /* RW */
- unsigned long rtc_3:1; /* RW */
- unsigned long rtc_4:1; /* RW */
- unsigned long rtc_5:1; /* RW */
- unsigned long rtc_6:1; /* RW */
- unsigned long rtc_7:1; /* RW */
- unsigned long rtc_8:1; /* RW */
- unsigned long rtc_9:1; /* RW */
- unsigned long rtc_10:1; /* RW */
- unsigned long rtc_11:1; /* RW */
- unsigned long rtc_12:1; /* RW */
- unsigned long rtc_13:1; /* RW */
- unsigned long rtc_14:1; /* RW */
- unsigned long rtc_15:1; /* RW */
- unsigned long rtc_16:1; /* RW */
- unsigned long rtc_17:1; /* RW */
- unsigned long rtc_18:1; /* RW */
- unsigned long rtc_19:1; /* RW */
- unsigned long rtc_20:1; /* RW */
- unsigned long rtc_21:1; /* RW */
- unsigned long rtc_22:1; /* RW */
- unsigned long rtc_23:1; /* RW */
- unsigned long rtc_24:1; /* RW */
- unsigned long rtc_25:1; /* RW */
- unsigned long rtc_26:1; /* RW */
- unsigned long rtc_27:1; /* RW */
- unsigned long rtc_28:1; /* RW */
- unsigned long rtc_29:1; /* RW */
- unsigned long rtc_30:1; /* RW */
- unsigned long rtc_31:1; /* RW */
- unsigned long rsvd_32_63:32;
+ /* UV2 unique struct */
+ struct uv2h_rtc1_int_config_s {
+ unsigned long vector_:8; /* RW */
+ unsigned long dm:3; /* RW */
+ unsigned long destmode:1; /* RW */
+ unsigned long status:1; /* RO */
+ unsigned long p:1; /* RO */
+ unsigned long rsvd_14:1;
+ unsigned long t:1; /* RO */
+ unsigned long m:1; /* RW */
+ unsigned long rsvd_17_31:15;
+ unsigned long apic_id:32; /* RW */
} s2;
- struct uv3h_event_occurred2_s {
- unsigned long rtc_0:1; /* RW */
- unsigned long rtc_1:1; /* RW */
- unsigned long rtc_2:1; /* RW */
- unsigned long rtc_3:1; /* RW */
- unsigned long rtc_4:1; /* RW */
- unsigned long rtc_5:1; /* RW */
- unsigned long rtc_6:1; /* RW */
- unsigned long rtc_7:1; /* RW */
- unsigned long rtc_8:1; /* RW */
- unsigned long rtc_9:1; /* RW */
- unsigned long rtc_10:1; /* RW */
- unsigned long rtc_11:1; /* RW */
- unsigned long rtc_12:1; /* RW */
- unsigned long rtc_13:1; /* RW */
- unsigned long rtc_14:1; /* RW */
- unsigned long rtc_15:1; /* RW */
- unsigned long rtc_16:1; /* RW */
- unsigned long rtc_17:1; /* RW */
- unsigned long rtc_18:1; /* RW */
- unsigned long rtc_19:1; /* RW */
- unsigned long rtc_20:1; /* RW */
- unsigned long rtc_21:1; /* RW */
- unsigned long rtc_22:1; /* RW */
- unsigned long rtc_23:1; /* RW */
- unsigned long rtc_24:1; /* RW */
- unsigned long rtc_25:1; /* RW */
- unsigned long rtc_26:1; /* RW */
- unsigned long rtc_27:1; /* RW */
- unsigned long rtc_28:1; /* RW */
- unsigned long rtc_29:1; /* RW */
- unsigned long rtc_30:1; /* RW */
- unsigned long rtc_31:1; /* RW */
- unsigned long rsvd_32_63:32;
- } s3;
- struct uv4h_event_occurred2_s {
- unsigned long message_accelerator_int0:1; /* RW */
- unsigned long message_accelerator_int1:1; /* RW */
- unsigned long message_accelerator_int2:1; /* RW */
- unsigned long message_accelerator_int3:1; /* RW */
- unsigned long message_accelerator_int4:1; /* RW */
- unsigned long message_accelerator_int5:1; /* RW */
- unsigned long message_accelerator_int6:1; /* RW */
- unsigned long message_accelerator_int7:1; /* RW */
- unsigned long message_accelerator_int8:1; /* RW */
- unsigned long message_accelerator_int9:1; /* RW */
- unsigned long message_accelerator_int10:1; /* RW */
- unsigned long message_accelerator_int11:1; /* RW */
- unsigned long message_accelerator_int12:1; /* RW */
- unsigned long message_accelerator_int13:1; /* RW */
- unsigned long message_accelerator_int14:1; /* RW */
- unsigned long message_accelerator_int15:1; /* RW */
- unsigned long rtc_interval_int:1; /* RW */
- unsigned long bau_dashboard_int:1; /* RW */
- unsigned long rtc_0:1; /* RW */
- unsigned long rtc_1:1; /* RW */
- unsigned long rtc_2:1; /* RW */
- unsigned long rtc_3:1; /* RW */
- unsigned long rtc_4:1; /* RW */
- unsigned long rtc_5:1; /* RW */
- unsigned long rtc_6:1; /* RW */
- unsigned long rtc_7:1; /* RW */
- unsigned long rtc_8:1; /* RW */
- unsigned long rtc_9:1; /* RW */
- unsigned long rtc_10:1; /* RW */
- unsigned long rtc_11:1; /* RW */
- unsigned long rtc_12:1; /* RW */
- unsigned long rtc_13:1; /* RW */
- unsigned long rtc_14:1; /* RW */
- unsigned long rtc_15:1; /* RW */
- unsigned long rtc_16:1; /* RW */
- unsigned long rtc_17:1; /* RW */
- unsigned long rtc_18:1; /* RW */
- unsigned long rtc_19:1; /* RW */
- unsigned long rtc_20:1; /* RW */
- unsigned long rtc_21:1; /* RW */
- unsigned long rtc_22:1; /* RW */
- unsigned long rtc_23:1; /* RW */
- unsigned long rtc_24:1; /* RW */
- unsigned long rtc_25:1; /* RW */
- unsigned long rtc_26:1; /* RW */
- unsigned long rtc_27:1; /* RW */
- unsigned long rtc_28:1; /* RW */
- unsigned long rtc_29:1; /* RW */
- unsigned long rtc_30:1; /* RW */
- unsigned long rtc_31:1; /* RW */
- unsigned long rsvd_50_63:14;
- } s4;
};
/* ========================================================================= */
-/* UVXH_EVENT_OCCURRED2_ALIAS */
+/* UVH_SCRATCH5 */
/* ========================================================================= */
-#define UVXH_EVENT_OCCURRED2_ALIAS 0x70108UL
-
-#define UV2H_EVENT_OCCURRED2_ALIAS_32 0xb70
-#define UV3H_EVENT_OCCURRED2_ALIAS_32 0xb70
-#define UV4H_EVENT_OCCURRED2_ALIAS_32 0x610
-#define UVH_EVENT_OCCURRED2_ALIAS_32 ( \
- is_uv2_hub() ? UV2H_EVENT_OCCURRED2_ALIAS_32 : \
- is_uv3_hub() ? UV3H_EVENT_OCCURRED2_ALIAS_32 : \
- /*is_uv4_hub*/ UV4H_EVENT_OCCURRED2_ALIAS_32)
+#define UVH_SCRATCH5 ( \
+ is_uv(UV5) ? 0xb0200UL : \
+ is_uv(UV4) ? 0xb0200UL : \
+ is_uv(UV3) ? 0x2d0200UL : \
+ is_uv(UV2) ? 0x2d0200UL : \
+ 0)
+#define UV5H_SCRATCH5 0xb0200UL
+#define UV4H_SCRATCH5 0xb0200UL
+#define UV3H_SCRATCH5 0x2d0200UL
+#define UV2H_SCRATCH5 0x2d0200UL
+/* UVH common defines*/
+#define UVH_SCRATCH5_SCRATCH5_SHFT 0
+#define UVH_SCRATCH5_SCRATCH5_MASK 0xffffffffffffffffUL
-/* ========================================================================= */
-/* UVXH_LB_BAU_SB_ACTIVATION_STATUS_2 */
-/* ========================================================================= */
-#define UV2H_LB_BAU_SB_ACTIVATION_STATUS_2 0x320130UL
-#define UV3H_LB_BAU_SB_ACTIVATION_STATUS_2 0x320130UL
-#define UV4H_LB_BAU_SB_ACTIVATION_STATUS_2 0xc8130UL
-#define UVH_LB_BAU_SB_ACTIVATION_STATUS_2 ( \
- is_uv2_hub() ? UV2H_LB_BAU_SB_ACTIVATION_STATUS_2 : \
- is_uv3_hub() ? UV3H_LB_BAU_SB_ACTIVATION_STATUS_2 : \
- /*is_uv4_hub*/ UV4H_LB_BAU_SB_ACTIVATION_STATUS_2)
+/* UVXH common defines */
+#define UVXH_SCRATCH5_SCRATCH5_SHFT 0
+#define UVXH_SCRATCH5_SCRATCH5_MASK 0xffffffffffffffffUL
-#define UV2H_LB_BAU_SB_ACTIVATION_STATUS_2_32 0x9f0
-#define UV3H_LB_BAU_SB_ACTIVATION_STATUS_2_32 0x9f0
-#define UV4H_LB_BAU_SB_ACTIVATION_STATUS_2_32 0xa10
-#define UVH_LB_BAU_SB_ACTIVATION_STATUS_2_32 ( \
- is_uv2_hub() ? UV2H_LB_BAU_SB_ACTIVATION_STATUS_2_32 : \
- is_uv3_hub() ? UV3H_LB_BAU_SB_ACTIVATION_STATUS_2_32 : \
- /*is_uv4_hub*/ UV4H_LB_BAU_SB_ACTIVATION_STATUS_2_32)
+/* UVYH common defines */
+#define UVYH_SCRATCH5_SCRATCH5_SHFT 0
+#define UVYH_SCRATCH5_SCRATCH5_MASK 0xffffffffffffffffUL
-#define UVXH_LB_BAU_SB_ACTIVATION_STATUS_2_AUX_ERROR_SHFT 0
-#define UVXH_LB_BAU_SB_ACTIVATION_STATUS_2_AUX_ERROR_MASK 0xffffffffffffffffUL
+/* UV5 unique defines */
+#define UV5H_SCRATCH5_SCRATCH5_SHFT 0
+#define UV5H_SCRATCH5_SCRATCH5_MASK 0xffffffffffffffffUL
-#define UV2H_LB_BAU_SB_ACTIVATION_STATUS_2_AUX_ERROR_SHFT 0
-#define UV2H_LB_BAU_SB_ACTIVATION_STATUS_2_AUX_ERROR_MASK 0xffffffffffffffffUL
+/* UV4 unique defines */
+#define UV4H_SCRATCH5_SCRATCH5_SHFT 0
+#define UV4H_SCRATCH5_SCRATCH5_MASK 0xffffffffffffffffUL
-#define UV3H_LB_BAU_SB_ACTIVATION_STATUS_2_AUX_ERROR_SHFT 0
-#define UV3H_LB_BAU_SB_ACTIVATION_STATUS_2_AUX_ERROR_MASK 0xffffffffffffffffUL
+/* UV3 unique defines */
+#define UV3H_SCRATCH5_SCRATCH5_SHFT 0
+#define UV3H_SCRATCH5_SCRATCH5_MASK 0xffffffffffffffffUL
-#define UV4H_LB_BAU_SB_ACTIVATION_STATUS_2_AUX_ERROR_SHFT 0
-#define UV4H_LB_BAU_SB_ACTIVATION_STATUS_2_AUX_ERROR_MASK 0xffffffffffffffffUL
+/* UV2 unique defines */
+#define UV2H_SCRATCH5_SCRATCH5_SHFT 0
+#define UV2H_SCRATCH5_SCRATCH5_MASK 0xffffffffffffffffUL
-union uvxh_lb_bau_sb_activation_status_2_u {
+union uvh_scratch5_u {
unsigned long v;
- struct uvxh_lb_bau_sb_activation_status_2_s {
- unsigned long aux_error:64; /* RW */
- } sx;
- struct uv2h_lb_bau_sb_activation_status_2_s {
- unsigned long aux_error:64; /* RW */
- } s2;
- struct uv3h_lb_bau_sb_activation_status_2_s {
- unsigned long aux_error:64; /* RW */
- } s3;
- struct uv4h_lb_bau_sb_activation_status_2_s {
- unsigned long aux_error:64; /* RW */
- } s4;
-};
-/* ========================================================================= */
-/* UV3H_GR0_GAM_GR_CONFIG */
-/* ========================================================================= */
-#define UV3H_GR0_GAM_GR_CONFIG 0xc00028UL
-
-#define UV3H_GR0_GAM_GR_CONFIG_M_SKT_SHFT 0
-#define UV3H_GR0_GAM_GR_CONFIG_SUBSPACE_SHFT 10
-#define UV3H_GR0_GAM_GR_CONFIG_M_SKT_MASK 0x000000000000003fUL
-#define UV3H_GR0_GAM_GR_CONFIG_SUBSPACE_MASK 0x0000000000000400UL
+ /* UVH common struct */
+ struct uvh_scratch5_s {
+ unsigned long scratch5:64; /* RW */
+ } s;
-union uv3h_gr0_gam_gr_config_u {
- unsigned long v;
- struct uv3h_gr0_gam_gr_config_s {
- unsigned long m_skt:6; /* RW */
- unsigned long undef_6_9:4; /* Undefined */
- unsigned long subspace:1; /* RW */
- unsigned long reserved:53;
- } s3;
-};
+ /* UVXH common struct */
+ struct uvxh_scratch5_s {
+ unsigned long scratch5:64; /* RW */
+ } sx;
-/* ========================================================================= */
-/* UV4H_LB_PROC_INTD_QUEUE_FIRST */
-/* ========================================================================= */
-#define UV4H_LB_PROC_INTD_QUEUE_FIRST 0xa4100UL
+ /* UVYH common struct */
+ struct uvyh_scratch5_s {
+ unsigned long scratch5:64; /* RW */
+ } sy;
-#define UV4H_LB_PROC_INTD_QUEUE_FIRST_FIRST_PAYLOAD_ADDRESS_SHFT 6
-#define UV4H_LB_PROC_INTD_QUEUE_FIRST_FIRST_PAYLOAD_ADDRESS_MASK 0x00003fffffffffc0UL
+ /* UV5 unique struct */
+ struct uv5h_scratch5_s {
+ unsigned long scratch5:64; /* RW */
+ } s5;
-union uv4h_lb_proc_intd_queue_first_u {
- unsigned long v;
- struct uv4h_lb_proc_intd_queue_first_s {
- unsigned long undef_0_5:6; /* Undefined */
- unsigned long first_payload_address:40; /* RW */
+ /* UV4 unique struct */
+ struct uv4h_scratch5_s {
+ unsigned long scratch5:64; /* RW */
} s4;
-};
-/* ========================================================================= */
-/* UV4H_LB_PROC_INTD_QUEUE_LAST */
-/* ========================================================================= */
-#define UV4H_LB_PROC_INTD_QUEUE_LAST 0xa4108UL
-
-#define UV4H_LB_PROC_INTD_QUEUE_LAST_LAST_PAYLOAD_ADDRESS_SHFT 5
-#define UV4H_LB_PROC_INTD_QUEUE_LAST_LAST_PAYLOAD_ADDRESS_MASK 0x00003fffffffffe0UL
+ /* UV3 unique struct */
+ struct uv3h_scratch5_s {
+ unsigned long scratch5:64; /* RW */
+ } s3;
-union uv4h_lb_proc_intd_queue_last_u {
- unsigned long v;
- struct uv4h_lb_proc_intd_queue_last_s {
- unsigned long undef_0_4:5; /* Undefined */
- unsigned long last_payload_address:41; /* RW */
- } s4;
+ /* UV2 unique struct */
+ struct uv2h_scratch5_s {
+ unsigned long scratch5:64; /* RW */
+ } s2;
};
/* ========================================================================= */
-/* UV4H_LB_PROC_INTD_SOFT_ACK_CLEAR */
+/* UVH_SCRATCH5_ALIAS */
/* ========================================================================= */
-#define UV4H_LB_PROC_INTD_SOFT_ACK_CLEAR 0xa4118UL
+#define UVH_SCRATCH5_ALIAS ( \
+ is_uv(UV5) ? 0xb0208UL : \
+ is_uv(UV4) ? 0xb0208UL : \
+ is_uv(UV3) ? 0x2d0208UL : \
+ is_uv(UV2) ? 0x2d0208UL : \
+ 0)
+#define UV5H_SCRATCH5_ALIAS 0xb0208UL
+#define UV4H_SCRATCH5_ALIAS 0xb0208UL
+#define UV3H_SCRATCH5_ALIAS 0x2d0208UL
+#define UV2H_SCRATCH5_ALIAS 0x2d0208UL
-#define UV4H_LB_PROC_INTD_SOFT_ACK_CLEAR_SOFT_ACK_PENDING_FLAGS_SHFT 0
-#define UV4H_LB_PROC_INTD_SOFT_ACK_CLEAR_SOFT_ACK_PENDING_FLAGS_MASK 0x00000000000000ffUL
-
-union uv4h_lb_proc_intd_soft_ack_clear_u {
- unsigned long v;
- struct uv4h_lb_proc_intd_soft_ack_clear_s {
- unsigned long soft_ack_pending_flags:8; /* WP */
- } s4;
-};
/* ========================================================================= */
-/* UV4H_LB_PROC_INTD_SOFT_ACK_PENDING */
+/* UVH_SCRATCH5_ALIAS_2 */
/* ========================================================================= */
-#define UV4H_LB_PROC_INTD_SOFT_ACK_PENDING 0xa4110UL
-
-#define UV4H_LB_PROC_INTD_SOFT_ACK_PENDING_SOFT_ACK_FLAGS_SHFT 0
-#define UV4H_LB_PROC_INTD_SOFT_ACK_PENDING_SOFT_ACK_FLAGS_MASK 0x00000000000000ffUL
+#define UVH_SCRATCH5_ALIAS_2 ( \
+ is_uv(UV5) ? 0xb0210UL : \
+ is_uv(UV4) ? 0xb0210UL : \
+ is_uv(UV3) ? 0x2d0210UL : \
+ is_uv(UV2) ? 0x2d0210UL : \
+ 0)
+#define UV5H_SCRATCH5_ALIAS_2 0xb0210UL
+#define UV4H_SCRATCH5_ALIAS_2 0xb0210UL
+#define UV3H_SCRATCH5_ALIAS_2 0x2d0210UL
+#define UV2H_SCRATCH5_ALIAS_2 0x2d0210UL
-union uv4h_lb_proc_intd_soft_ack_pending_u {
- unsigned long v;
- struct uv4h_lb_proc_intd_soft_ack_pending_s {
- unsigned long soft_ack_flags:8; /* RW */
- } s4;
-};
#endif /* _ASM_X86_UV_UV_MMRS_H */
diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
index 6807153c0410..dde5b3f1e7cd 100644
--- a/arch/x86/include/asm/x86_init.h
+++ b/arch/x86/include/asm/x86_init.h
@@ -4,29 +4,22 @@
#include <asm/bootparam.h>
+struct ghcb;
struct mpc_bus;
struct mpc_cpu;
+struct pt_regs;
struct mpc_table;
struct cpuinfo_x86;
+struct irq_domain;
/**
* struct x86_init_mpparse - platform specific mpparse ops
- * @mpc_record: platform specific mpc record accounting
* @setup_ioapic_ids: platform specific ioapic id override
- * @mpc_apic_id: platform specific mpc apic id assignment
- * @smp_read_mpc_oem: platform specific oem mpc table setup
- * @mpc_oem_pci_bus: platform specific pci bus setup (default NULL)
- * @mpc_oem_bus_info: platform specific mpc bus info
* @find_smp_config: find the smp configuration
* @get_smp_config: get the smp configuration
*/
struct x86_init_mpparse {
- void (*mpc_record)(unsigned int mode);
void (*setup_ioapic_ids)(void);
- int (*mpc_apic_id)(struct mpc_cpu *m);
- void (*smp_read_mpc_oem)(struct mpc_table *mpc);
- void (*mpc_oem_pci_bus)(struct mpc_bus *m);
- void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name);
void (*find_smp_config)(void);
void (*get_smp_config)(unsigned int early);
};
@@ -52,12 +45,14 @@ struct x86_init_resources {
* @intr_init: interrupt init code
* @intr_mode_select: interrupt delivery mode selection
* @intr_mode_init: interrupt delivery mode setup
+ * @create_pci_msi_domain: Create the PCI/MSI interrupt domain
*/
struct x86_init_irqs {
void (*pre_vector_init)(void);
void (*intr_init)(void);
void (*intr_mode_select)(void);
void (*intr_mode_init)(void);
+ struct irq_domain *(*create_pci_msi_domain)(void);
};
/**
@@ -236,10 +231,22 @@ struct x86_legacy_features {
/**
* struct x86_hyper_runtime - x86 hypervisor specific runtime callbacks
*
- * @pin_vcpu: pin current vcpu to specified physical cpu (run rarely)
+ * @pin_vcpu: pin current vcpu to specified physical
+ * cpu (run rarely)
+ * @sev_es_hcall_prepare: Load additional hypervisor-specific
+ * state into the GHCB when doing a VMMCALL under
+ * SEV-ES. Called from the #VC exception handler.
+ * @sev_es_hcall_finish: Copies state from the GHCB back into the
+ * processor (or pt_regs). Also runs checks on the
+ * state returned from the hypervisor after a
+ * VMMCALL under SEV-ES. Needs to return 'false'
+ * if the checks fail. Called from the #VC
+ * exception handler.
*/
struct x86_hyper_runtime {
void (*pin_vcpu)(int cpu);
+ void (*sev_es_hcall_prepare)(struct ghcb *ghcb, struct pt_regs *regs);
+ bool (*sev_es_hcall_finish)(struct ghcb *ghcb, struct pt_regs *regs);
};
/**
@@ -283,9 +290,6 @@ struct x86_platform_ops {
struct pci_dev;
struct x86_msi_ops {
- int (*setup_msi_irqs)(struct pci_dev *dev, int nvec, int type);
- void (*teardown_msi_irq)(unsigned int irq);
- void (*teardown_msi_irqs)(struct pci_dev *dev);
void (*restore_msi_irqs)(struct pci_dev *dev);
};
diff --git a/arch/x86/include/uapi/asm/svm.h b/arch/x86/include/uapi/asm/svm.h
index 2e8a30f06c74..a7a3403645e5 100644
--- a/arch/x86/include/uapi/asm/svm.h
+++ b/arch/x86/include/uapi/asm/svm.h
@@ -29,6 +29,7 @@
#define SVM_EXIT_WRITE_DR6 0x036
#define SVM_EXIT_WRITE_DR7 0x037
#define SVM_EXIT_EXCP_BASE 0x040
+#define SVM_EXIT_LAST_EXCP 0x05f
#define SVM_EXIT_INTR 0x060
#define SVM_EXIT_NMI 0x061
#define SVM_EXIT_SMI 0x062
@@ -80,6 +81,16 @@
#define SVM_EXIT_AVIC_INCOMPLETE_IPI 0x401
#define SVM_EXIT_AVIC_UNACCELERATED_ACCESS 0x402
+/* SEV-ES software-defined VMGEXIT events */
+#define SVM_VMGEXIT_MMIO_READ 0x80000001
+#define SVM_VMGEXIT_MMIO_WRITE 0x80000002
+#define SVM_VMGEXIT_NMI_COMPLETE 0x80000003
+#define SVM_VMGEXIT_AP_HLT_LOOP 0x80000004
+#define SVM_VMGEXIT_AP_JUMP_TABLE 0x80000005
+#define SVM_VMGEXIT_SET_AP_JUMP_TABLE 0
+#define SVM_VMGEXIT_GET_AP_JUMP_TABLE 1
+#define SVM_VMGEXIT_UNSUPPORTED_EVENT 0x8000ffff
+
#define SVM_EXIT_ERR -1
#define SVM_EXIT_REASONS \
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index e77261db2391..04ceea8f4a89 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -20,6 +20,7 @@ CFLAGS_REMOVE_kvmclock.o = -pg
CFLAGS_REMOVE_ftrace.o = -pg
CFLAGS_REMOVE_early_printk.o = -pg
CFLAGS_REMOVE_head64.o = -pg
+CFLAGS_REMOVE_sev-es.o = -pg
endif
KASAN_SANITIZE_head$(BITS).o := n
@@ -27,6 +28,7 @@ KASAN_SANITIZE_dumpstack.o := n
KASAN_SANITIZE_dumpstack_$(BITS).o := n
KASAN_SANITIZE_stacktrace.o := n
KASAN_SANITIZE_paravirt.o := n
+KASAN_SANITIZE_sev-es.o := n
# With some compiler versions the generated code results in boot hangs, caused
# by several compilation units. To be safe, disable all instrumentation.
@@ -68,6 +70,7 @@ obj-y += tsc.o tsc_msr.o io_delay.o rtc.o
obj-y += pci-iommu_table.o
obj-y += resource.o
obj-y += irqflags.o
+obj-y += static_call.o
obj-y += process.o
obj-y += fpu/
@@ -145,6 +148,7 @@ obj-$(CONFIG_UNWINDER_ORC) += unwind_orc.o
obj-$(CONFIG_UNWINDER_FRAME_POINTER) += unwind_frame.o
obj-$(CONFIG_UNWINDER_GUESS) += unwind_guess.o
+obj-$(CONFIG_AMD_MEM_ENCRYPT) += sev-es.o
###
# 64 bit specific files
ifeq ($(CONFIG_X86_64),y)
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index c3daf0aaa0ee..4adbe65afe23 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -239,7 +239,7 @@ void __init arch_init_ideal_nops(void)
return;
}
- /* fall through */
+ fallthrough;
default:
#ifdef CONFIG_X86_64
@@ -1103,6 +1103,10 @@ noinstr int poke_int3_handler(struct pt_regs *regs)
*/
goto out_put;
+ case RET_INSN_OPCODE:
+ int3_emulate_ret(regs);
+ break;
+
case CALL_INSN_OPCODE:
int3_emulate_call(regs, (long)ip + tp->rel32);
break;
@@ -1277,6 +1281,7 @@ static void text_poke_loc_init(struct text_poke_loc *tp, void *addr,
switch (tp->opcode) {
case INT3_INSN_OPCODE:
+ case RET_INSN_OPCODE:
break;
case CALL_INSN_OPCODE:
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index 5f943b938167..b3eef1d5c903 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -1429,6 +1429,9 @@ void __init apic_intr_mode_init(void)
break;
}
+ if (x86_platform.apic_post_init)
+ x86_platform.apic_post_init();
+
apic_bsp_setup(upmode);
}
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 21325a4a78b9..7b3c7e0d4a09 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -800,7 +800,7 @@ static int irq_polarity(int idx)
return IOAPIC_POL_HIGH;
case MP_IRQPOL_RESERVED:
pr_warn("IOAPIC: Invalid polarity: 2, defaulting to low\n");
- /* fall through */
+ fallthrough;
case MP_IRQPOL_ACTIVE_LOW:
default: /* Pointless default required due to do gcc stupidity */
return IOAPIC_POL_LOW;
@@ -848,7 +848,7 @@ static int irq_trigger(int idx)
return IOAPIC_EDGE;
case MP_IRQTRIG_RESERVED:
pr_warn("IOAPIC: Invalid trigger mode 2 defaulting to level\n");
- /* fall through */
+ fallthrough;
case MP_IRQTRIG_LEVEL:
default: /* Pointless default required due to do gcc stupidity */
return IOAPIC_LEVEL;
@@ -860,10 +860,10 @@ void ioapic_set_alloc_attr(struct irq_alloc_info *info, int node,
{
init_irq_alloc_info(info, NULL);
info->type = X86_IRQ_ALLOC_TYPE_IOAPIC;
- info->ioapic_node = node;
- info->ioapic_trigger = trigger;
- info->ioapic_polarity = polarity;
- info->ioapic_valid = 1;
+ info->ioapic.node = node;
+ info->ioapic.trigger = trigger;
+ info->ioapic.polarity = polarity;
+ info->ioapic.valid = 1;
}
#ifndef CONFIG_ACPI
@@ -878,32 +878,32 @@ static void ioapic_copy_alloc_attr(struct irq_alloc_info *dst,
copy_irq_alloc_info(dst, src);
dst->type = X86_IRQ_ALLOC_TYPE_IOAPIC;
- dst->ioapic_id = mpc_ioapic_id(ioapic_idx);
- dst->ioapic_pin = pin;
- dst->ioapic_valid = 1;
- if (src && src->ioapic_valid) {
- dst->ioapic_node = src->ioapic_node;
- dst->ioapic_trigger = src->ioapic_trigger;
- dst->ioapic_polarity = src->ioapic_polarity;
+ dst->devid = mpc_ioapic_id(ioapic_idx);
+ dst->ioapic.pin = pin;
+ dst->ioapic.valid = 1;
+ if (src && src->ioapic.valid) {
+ dst->ioapic.node = src->ioapic.node;
+ dst->ioapic.trigger = src->ioapic.trigger;
+ dst->ioapic.polarity = src->ioapic.polarity;
} else {
- dst->ioapic_node = NUMA_NO_NODE;
+ dst->ioapic.node = NUMA_NO_NODE;
if (acpi_get_override_irq(gsi, &trigger, &polarity) >= 0) {
- dst->ioapic_trigger = trigger;
- dst->ioapic_polarity = polarity;
+ dst->ioapic.trigger = trigger;
+ dst->ioapic.polarity = polarity;
} else {
/*
* PCI interrupts are always active low level
* triggered.
*/
- dst->ioapic_trigger = IOAPIC_LEVEL;
- dst->ioapic_polarity = IOAPIC_POL_LOW;
+ dst->ioapic.trigger = IOAPIC_LEVEL;
+ dst->ioapic.polarity = IOAPIC_POL_LOW;
}
}
}
static int ioapic_alloc_attr_node(struct irq_alloc_info *info)
{
- return (info && info->ioapic_valid) ? info->ioapic_node : NUMA_NO_NODE;
+ return (info && info->ioapic.valid) ? info->ioapic.node : NUMA_NO_NODE;
}
static void mp_register_handler(unsigned int irq, unsigned long trigger)
@@ -933,14 +933,14 @@ static bool mp_check_pin_attr(int irq, struct irq_alloc_info *info)
* pin with real trigger and polarity attributes.
*/
if (irq < nr_legacy_irqs() && data->count == 1) {
- if (info->ioapic_trigger != data->trigger)
- mp_register_handler(irq, info->ioapic_trigger);
- data->entry.trigger = data->trigger = info->ioapic_trigger;
- data->entry.polarity = data->polarity = info->ioapic_polarity;
+ if (info->ioapic.trigger != data->trigger)
+ mp_register_handler(irq, info->ioapic.trigger);
+ data->entry.trigger = data->trigger = info->ioapic.trigger;
+ data->entry.polarity = data->polarity = info->ioapic.polarity;
}
- return data->trigger == info->ioapic_trigger &&
- data->polarity == info->ioapic_polarity;
+ return data->trigger == info->ioapic.trigger &&
+ data->polarity == info->ioapic.polarity;
}
static int alloc_irq_from_domain(struct irq_domain *domain, int ioapic, u32 gsi,
@@ -1002,7 +1002,7 @@ static int alloc_isa_irq_from_domain(struct irq_domain *domain,
if (!mp_check_pin_attr(irq, info))
return -EBUSY;
if (__add_pin_to_irq_node(irq_data->chip_data, node, ioapic,
- info->ioapic_pin))
+ info->ioapic.pin))
return -ENOMEM;
} else {
info->flags |= X86_IRQ_ALLOC_LEGACY;
@@ -2092,8 +2092,8 @@ static int mp_alloc_timer_irq(int ioapic, int pin)
struct irq_alloc_info info;
ioapic_set_alloc_attr(&info, NUMA_NO_NODE, 0, 0);
- info.ioapic_id = mpc_ioapic_id(ioapic);
- info.ioapic_pin = pin;
+ info.devid = mpc_ioapic_id(ioapic);
+ info.ioapic.pin = pin;
mutex_lock(&ioapic_mutex);
irq = alloc_isa_irq_from_domain(domain, 0, ioapic, pin, &info);
mutex_unlock(&ioapic_mutex);
@@ -2243,6 +2243,7 @@ static inline void __init check_timer(void)
legacy_pic->init(0);
legacy_pic->make_irq(0);
apic_write(APIC_LVT0, APIC_DM_EXTINT);
+ legacy_pic->unmask(0);
unlock_ExtINT_logic();
@@ -2296,9 +2297,9 @@ static int mp_irqdomain_create(int ioapic)
return 0;
init_irq_alloc_info(&info, NULL);
- info.type = X86_IRQ_ALLOC_TYPE_IOAPIC;
- info.ioapic_id = mpc_ioapic_id(ioapic);
- parent = irq_remapping_get_ir_irq_domain(&info);
+ info.type = X86_IRQ_ALLOC_TYPE_IOAPIC_GET_PARENT;
+ info.devid = mpc_ioapic_id(ioapic);
+ parent = irq_remapping_get_irq_domain(&info);
if (!parent)
parent = x86_vector_domain;
else
@@ -2932,9 +2933,9 @@ int mp_ioapic_registered(u32 gsi_base)
static void mp_irqdomain_get_attr(u32 gsi, struct mp_chip_data *data,
struct irq_alloc_info *info)
{
- if (info && info->ioapic_valid) {
- data->trigger = info->ioapic_trigger;
- data->polarity = info->ioapic_polarity;
+ if (info && info->ioapic.valid) {
+ data->trigger = info->ioapic.trigger;
+ data->polarity = info->ioapic.polarity;
} else if (acpi_get_override_irq(gsi, &data->trigger,
&data->polarity) < 0) {
/* PCI interrupts are always active low level triggered. */
@@ -2980,7 +2981,7 @@ int mp_irqdomain_alloc(struct irq_domain *domain, unsigned int virq,
return -EINVAL;
ioapic = mp_irqdomain_ioapic_idx(domain);
- pin = info->ioapic_pin;
+ pin = info->ioapic.pin;
if (irq_find_mapping(domain, (irq_hw_number_t)pin) > 0)
return -EEXIST;
@@ -2988,7 +2989,7 @@ int mp_irqdomain_alloc(struct irq_domain *domain, unsigned int virq,
if (!data)
return -ENOMEM;
- info->ioapic_entry = &data->entry;
+ info->ioapic.entry = &data->entry;
ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, info);
if (ret < 0) {
kfree(data);
@@ -2996,7 +2997,7 @@ int mp_irqdomain_alloc(struct irq_domain *domain, unsigned int virq,
}
INIT_LIST_HEAD(&data->irq_2_pin);
- irq_data->hwirq = info->ioapic_pin;
+ irq_data->hwirq = info->ioapic.pin;
irq_data->chip = (domain->parent == x86_vector_domain) ?
&ioapic_chip : &ioapic_ir_chip;
irq_data->chip_data = data;
@@ -3006,8 +3007,8 @@ int mp_irqdomain_alloc(struct irq_domain *domain, unsigned int virq,
add_pin_to_irq_node(data, ioapic_alloc_attr_node(info), ioapic, pin);
local_irq_save(flags);
- if (info->ioapic_entry)
- mp_setup_entry(cfg, data, info->ioapic_entry);
+ if (info->ioapic.entry)
+ mp_setup_entry(cfg, data, info->ioapic.entry);
mp_register_handler(virq, data->trigger);
if (virq < nr_legacy_irqs())
legacy_pic->mask(virq);
diff --git a/arch/x86/kernel/apic/msi.c b/arch/x86/kernel/apic/msi.c
index c2b2911feeef..6313f0a05db7 100644
--- a/arch/x86/kernel/apic/msi.c
+++ b/arch/x86/kernel/apic/msi.c
@@ -21,7 +21,7 @@
#include <asm/apic.h>
#include <asm/irq_remapping.h>
-static struct irq_domain *msi_default_domain;
+struct irq_domain *x86_pci_msi_default_domain __ro_after_init;
static void __irq_msi_compose_msg(struct irq_cfg *cfg, struct msi_msg *msg)
{
@@ -45,7 +45,7 @@ static void __irq_msi_compose_msg(struct irq_cfg *cfg, struct msi_msg *msg)
MSI_DATA_VECTOR(cfg->vector);
}
-static void irq_msi_compose_msg(struct irq_data *data, struct msi_msg *msg)
+void x86_vector_msi_compose_msg(struct irq_data *data, struct msi_msg *msg)
{
__irq_msi_compose_msg(irqd_cfg(data), msg);
}
@@ -177,40 +177,10 @@ static struct irq_chip pci_msi_controller = {
.irq_mask = pci_msi_mask_irq,
.irq_ack = irq_chip_ack_parent,
.irq_retrigger = irq_chip_retrigger_hierarchy,
- .irq_compose_msi_msg = irq_msi_compose_msg,
.irq_set_affinity = msi_set_affinity,
.flags = IRQCHIP_SKIP_SET_WAKE,
};
-int native_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
-{
- struct irq_domain *domain;
- struct irq_alloc_info info;
-
- init_irq_alloc_info(&info, NULL);
- info.type = X86_IRQ_ALLOC_TYPE_MSI;
- info.msi_dev = dev;
-
- domain = irq_remapping_get_irq_domain(&info);
- if (domain == NULL)
- domain = msi_default_domain;
- if (domain == NULL)
- return -ENOSYS;
-
- return msi_domain_alloc_irqs(domain, &dev->dev, nvec);
-}
-
-void native_teardown_msi_irq(unsigned int irq)
-{
- irq_domain_free_irqs(irq, 1);
-}
-
-static irq_hw_number_t pci_msi_get_hwirq(struct msi_domain_info *info,
- msi_alloc_info_t *arg)
-{
- return arg->msi_hwirq;
-}
-
int pci_msi_prepare(struct irq_domain *domain, struct device *dev, int nvec,
msi_alloc_info_t *arg)
{
@@ -218,11 +188,10 @@ int pci_msi_prepare(struct irq_domain *domain, struct device *dev, int nvec,
struct msi_desc *desc = first_pci_msi_entry(pdev);
init_irq_alloc_info(arg, NULL);
- arg->msi_dev = pdev;
if (desc->msi_attrib.is_msix) {
- arg->type = X86_IRQ_ALLOC_TYPE_MSIX;
+ arg->type = X86_IRQ_ALLOC_TYPE_PCI_MSIX;
} else {
- arg->type = X86_IRQ_ALLOC_TYPE_MSI;
+ arg->type = X86_IRQ_ALLOC_TYPE_PCI_MSI;
arg->flags |= X86_IRQ_ALLOC_CONTIGUOUS_VECTORS;
}
@@ -230,16 +199,8 @@ int pci_msi_prepare(struct irq_domain *domain, struct device *dev, int nvec,
}
EXPORT_SYMBOL_GPL(pci_msi_prepare);
-void pci_msi_set_desc(msi_alloc_info_t *arg, struct msi_desc *desc)
-{
- arg->msi_hwirq = pci_msi_domain_calc_hwirq(arg->msi_dev, desc);
-}
-EXPORT_SYMBOL_GPL(pci_msi_set_desc);
-
static struct msi_domain_ops pci_msi_domain_ops = {
- .get_hwirq = pci_msi_get_hwirq,
.msi_prepare = pci_msi_prepare,
- .set_desc = pci_msi_set_desc,
};
static struct msi_domain_info pci_msi_domain_info = {
@@ -251,25 +212,32 @@ static struct msi_domain_info pci_msi_domain_info = {
.handler_name = "edge",
};
-void __init arch_init_msi_domain(struct irq_domain *parent)
+struct irq_domain * __init native_create_pci_msi_domain(void)
{
struct fwnode_handle *fn;
+ struct irq_domain *d;
if (disable_apic)
- return;
+ return NULL;
fn = irq_domain_alloc_named_fwnode("PCI-MSI");
- if (fn) {
- msi_default_domain =
- pci_msi_create_irq_domain(fn, &pci_msi_domain_info,
- parent);
- }
- if (!msi_default_domain) {
+ if (!fn)
+ return NULL;
+
+ d = pci_msi_create_irq_domain(fn, &pci_msi_domain_info,
+ x86_vector_domain);
+ if (!d) {
irq_domain_free_fwnode(fn);
- pr_warn("failed to initialize irqdomain for MSI/MSI-x.\n");
+ pr_warn("Failed to initialize PCI-MSI irqdomain.\n");
} else {
- msi_default_domain->flags |= IRQ_DOMAIN_MSI_NOMASK_QUIRK;
+ d->flags |= IRQ_DOMAIN_MSI_NOMASK_QUIRK;
}
+ return d;
+}
+
+void __init x86_create_pci_msi_domain(void)
+{
+ x86_pci_msi_default_domain = x86_init.irqs.create_pci_msi_domain();
}
#ifdef CONFIG_IRQ_REMAP
@@ -279,7 +247,6 @@ static struct irq_chip pci_msi_ir_controller = {
.irq_mask = pci_msi_mask_irq,
.irq_ack = irq_chip_ack_parent,
.irq_retrigger = irq_chip_retrigger_hierarchy,
- .irq_set_vcpu_affinity = irq_chip_set_vcpu_affinity_parent,
.flags = IRQCHIP_SKIP_SET_WAKE,
};
@@ -321,35 +288,28 @@ static struct irq_chip dmar_msi_controller = {
.irq_ack = irq_chip_ack_parent,
.irq_set_affinity = msi_domain_set_affinity,
.irq_retrigger = irq_chip_retrigger_hierarchy,
- .irq_compose_msi_msg = irq_msi_compose_msg,
.irq_write_msi_msg = dmar_msi_write_msg,
.flags = IRQCHIP_SKIP_SET_WAKE,
};
-static irq_hw_number_t dmar_msi_get_hwirq(struct msi_domain_info *info,
- msi_alloc_info_t *arg)
-{
- return arg->dmar_id;
-}
-
static int dmar_msi_init(struct irq_domain *domain,
struct msi_domain_info *info, unsigned int virq,
irq_hw_number_t hwirq, msi_alloc_info_t *arg)
{
- irq_domain_set_info(domain, virq, arg->dmar_id, info->chip, NULL,
- handle_edge_irq, arg->dmar_data, "edge");
+ irq_domain_set_info(domain, virq, arg->devid, info->chip, NULL,
+ handle_edge_irq, arg->data, "edge");
return 0;
}
static struct msi_domain_ops dmar_msi_domain_ops = {
- .get_hwirq = dmar_msi_get_hwirq,
.msi_init = dmar_msi_init,
};
static struct msi_domain_info dmar_msi_domain_info = {
.ops = &dmar_msi_domain_ops,
.chip = &dmar_msi_controller,
+ .flags = MSI_FLAG_USE_DEF_DOM_OPS,
};
static struct irq_domain *dmar_get_irq_domain(void)
@@ -384,8 +344,9 @@ int dmar_alloc_hwirq(int id, int node, void *arg)
init_irq_alloc_info(&info, NULL);
info.type = X86_IRQ_ALLOC_TYPE_DMAR;
- info.dmar_id = id;
- info.dmar_data = arg;
+ info.devid = id;
+ info.hwirq = id;
+ info.data = arg;
return irq_domain_alloc_irqs(domain, 1, node, &info);
}
@@ -419,24 +380,17 @@ static struct irq_chip hpet_msi_controller __ro_after_init = {
.irq_ack = irq_chip_ack_parent,
.irq_set_affinity = msi_domain_set_affinity,
.irq_retrigger = irq_chip_retrigger_hierarchy,
- .irq_compose_msi_msg = irq_msi_compose_msg,
.irq_write_msi_msg = hpet_msi_write_msg,
.flags = IRQCHIP_SKIP_SET_WAKE,
};
-static irq_hw_number_t hpet_msi_get_hwirq(struct msi_domain_info *info,
- msi_alloc_info_t *arg)
-{
- return arg->hpet_index;
-}
-
static int hpet_msi_init(struct irq_domain *domain,
struct msi_domain_info *info, unsigned int virq,
irq_hw_number_t hwirq, msi_alloc_info_t *arg)
{
irq_set_status_flags(virq, IRQ_MOVE_PCNTXT);
- irq_domain_set_info(domain, virq, arg->hpet_index, info->chip, NULL,
- handle_edge_irq, arg->hpet_data, "edge");
+ irq_domain_set_info(domain, virq, arg->hwirq, info->chip, NULL,
+ handle_edge_irq, arg->data, "edge");
return 0;
}
@@ -448,7 +402,6 @@ static void hpet_msi_free(struct irq_domain *domain,
}
static struct msi_domain_ops hpet_msi_domain_ops = {
- .get_hwirq = hpet_msi_get_hwirq,
.msi_init = hpet_msi_init,
.msi_free = hpet_msi_free,
};
@@ -456,6 +409,7 @@ static struct msi_domain_ops hpet_msi_domain_ops = {
static struct msi_domain_info hpet_msi_domain_info = {
.ops = &hpet_msi_domain_ops,
.chip = &hpet_msi_controller,
+ .flags = MSI_FLAG_USE_DEF_DOM_OPS,
};
struct irq_domain *hpet_create_irq_domain(int hpet_id)
@@ -476,9 +430,9 @@ struct irq_domain *hpet_create_irq_domain(int hpet_id)
domain_info->data = (void *)(long)hpet_id;
init_irq_alloc_info(&info, NULL);
- info.type = X86_IRQ_ALLOC_TYPE_HPET;
- info.hpet_id = hpet_id;
- parent = irq_remapping_get_ir_irq_domain(&info);
+ info.type = X86_IRQ_ALLOC_TYPE_HPET_GET_PARENT;
+ info.devid = hpet_id;
+ parent = irq_remapping_get_irq_domain(&info);
if (parent == NULL)
parent = x86_vector_domain;
else
@@ -506,9 +460,9 @@ int hpet_assign_irq(struct irq_domain *domain, struct hpet_channel *hc,
init_irq_alloc_info(&info, NULL);
info.type = X86_IRQ_ALLOC_TYPE_HPET;
- info.hpet_data = hc;
- info.hpet_id = hpet_dev_id(domain);
- info.hpet_index = dev_num;
+ info.data = hc;
+ info.devid = hpet_dev_id(domain);
+ info.hwirq = dev_num;
return irq_domain_alloc_irqs(domain, 1, NUMA_NO_NODE, &info);
}
diff --git a/arch/x86/kernel/apic/probe_32.c b/arch/x86/kernel/apic/probe_32.c
index 7bda71def557..67b6f7c049ec 100644
--- a/arch/x86/kernel/apic/probe_32.c
+++ b/arch/x86/kernel/apic/probe_32.c
@@ -149,7 +149,7 @@ void __init default_setup_apic_routing(void)
break;
}
/* P4 and above */
- /* fall through */
+ fallthrough;
case X86_VENDOR_HYGON:
case X86_VENDOR_AMD:
def_to_bigsmp = 1;
@@ -170,9 +170,6 @@ void __init default_setup_apic_routing(void)
if (apic->setup_apic_routing)
apic->setup_apic_routing();
-
- if (x86_platform.apic_post_init)
- x86_platform.apic_post_init();
}
void __init generic_apic_probe(void)
diff --git a/arch/x86/kernel/apic/probe_64.c b/arch/x86/kernel/apic/probe_64.c
index bd3835d6b535..c46720f185c0 100644
--- a/arch/x86/kernel/apic/probe_64.c
+++ b/arch/x86/kernel/apic/probe_64.c
@@ -32,9 +32,6 @@ void __init default_setup_apic_routing(void)
break;
}
}
-
- if (x86_platform.apic_post_init)
- x86_platform.apic_post_init();
}
int __init default_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
index dae32d948bf2..1eac53632786 100644
--- a/arch/x86/kernel/apic/vector.c
+++ b/arch/x86/kernel/apic/vector.c
@@ -161,6 +161,7 @@ static void apic_update_vector(struct irq_data *irqd, unsigned int newvec,
apicd->move_in_progress = true;
apicd->prev_vector = apicd->vector;
apicd->prev_cpu = apicd->cpu;
+ WARN_ON_ONCE(apicd->cpu == newcpu);
} else {
irq_matrix_free(vector_matrix, apicd->cpu, apicd->vector,
managed);
@@ -713,8 +714,6 @@ int __init arch_early_irq_init(void)
BUG_ON(x86_vector_domain == NULL);
irq_set_default_host(x86_vector_domain);
- arch_init_msi_domain(x86_vector_domain);
-
BUG_ON(!alloc_cpumask_var(&vector_searchmask, GFP_KERNEL));
/*
@@ -823,6 +822,7 @@ static struct irq_chip lapic_controller = {
.name = "APIC",
.irq_ack = apic_ack_edge,
.irq_set_affinity = apic_set_affinity,
+ .irq_compose_msi_msg = x86_vector_msi_compose_msg,
.irq_retrigger = apic_retrigger_irq,
};
@@ -910,7 +910,7 @@ void send_cleanup_vector(struct irq_cfg *cfg)
__send_cleanup_vector(apicd);
}
-static void __irq_complete_move(struct irq_cfg *cfg, unsigned vector)
+void irq_complete_move(struct irq_cfg *cfg)
{
struct apic_chip_data *apicd;
@@ -918,15 +918,16 @@ static void __irq_complete_move(struct irq_cfg *cfg, unsigned vector)
if (likely(!apicd->move_in_progress))
return;
- if (vector == apicd->vector && apicd->cpu == smp_processor_id())
+ /*
+ * If the interrupt arrived on the new target CPU, cleanup the
+ * vector on the old target CPU. A vector check is not required
+ * because an interrupt can never move from one vector to another
+ * on the same CPU.
+ */
+ if (apicd->cpu == smp_processor_id())
__send_cleanup_vector(apicd);
}
-void irq_complete_move(struct irq_cfg *cfg)
-{
- __irq_complete_move(cfg, ~get_irq_regs()->orig_ax);
-}
-
/*
* Called from fixup_irqs() with @desc->lock held and interrupts disabled.
*/
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index 0b6eea3f54e6..714233cee0b5 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -5,6 +5,7 @@
*
* SGI UV APIC functions (note: not an Intel compatible APIC)
*
+ * (C) Copyright 2020 Hewlett Packard Enterprise Development LP
* Copyright (C) 2007-2014 Silicon Graphics, Inc. All rights reserved.
*/
#include <linux/crash_dump.h>
@@ -29,19 +30,24 @@ static int uv_hubbed_system;
static int uv_hubless_system;
static u64 gru_start_paddr, gru_end_paddr;
static union uvh_apicid uvh_apicid;
+static int uv_node_id;
-/* Unpack OEM/TABLE ID's to be NULL terminated strings */
+/* Unpack AT/OEM/TABLE ID's to be NULL terminated strings */
+static u8 uv_archtype[UV_AT_SIZE];
static u8 oem_id[ACPI_OEM_ID_SIZE + 1];
static u8 oem_table_id[ACPI_OEM_TABLE_ID_SIZE + 1];
-/* Information derived from CPUID: */
+/* Information derived from CPUID and some UV MMRs */
static struct {
unsigned int apicid_shift;
unsigned int apicid_mask;
unsigned int socketid_shift; /* aka pnode_shift for UV2/3 */
unsigned int pnode_mask;
+ unsigned int nasid_shift;
unsigned int gpa_shift;
unsigned int gnode_shift;
+ unsigned int m_skt;
+ unsigned int n_skt;
} uv_cpuid;
static int uv_min_hub_revision_id;
@@ -77,6 +83,9 @@ static unsigned long __init uv_early_read_mmr(unsigned long addr)
static inline bool is_GRU_range(u64 start, u64 end)
{
+ if (!gru_start_paddr)
+ return false;
+
return start >= gru_start_paddr && end <= gru_end_paddr;
}
@@ -85,43 +94,102 @@ static bool uv_is_untracked_pat_range(u64 start, u64 end)
return is_ISA_range(start, end) || is_GRU_range(start, end);
}
-static int __init early_get_pnodeid(void)
+static void __init early_get_pnodeid(void)
{
- union uvh_node_id_u node_id;
- union uvh_rh_gam_config_mmr_u m_n_config;
int pnode;
- /* Currently, all blades have same revision number */
+ uv_cpuid.m_skt = 0;
+ if (UVH_RH10_GAM_ADDR_MAP_CONFIG) {
+ union uvh_rh10_gam_addr_map_config_u m_n_config;
+
+ m_n_config.v = uv_early_read_mmr(UVH_RH10_GAM_ADDR_MAP_CONFIG);
+ uv_cpuid.n_skt = m_n_config.s.n_skt;
+ uv_cpuid.nasid_shift = 0;
+ } else if (UVH_RH_GAM_ADDR_MAP_CONFIG) {
+ union uvh_rh_gam_addr_map_config_u m_n_config;
+
+ m_n_config.v = uv_early_read_mmr(UVH_RH_GAM_ADDR_MAP_CONFIG);
+ uv_cpuid.n_skt = m_n_config.s.n_skt;
+ if (is_uv(UV3))
+ uv_cpuid.m_skt = m_n_config.s3.m_skt;
+ if (is_uv(UV2))
+ uv_cpuid.m_skt = m_n_config.s2.m_skt;
+ uv_cpuid.nasid_shift = 1;
+ } else {
+ unsigned long GAM_ADDR_MAP_CONFIG = 0;
+
+ WARN(GAM_ADDR_MAP_CONFIG == 0,
+ "UV: WARN: GAM_ADDR_MAP_CONFIG is not available\n");
+ uv_cpuid.n_skt = 0;
+ uv_cpuid.nasid_shift = 0;
+ }
+
+ if (is_uv(UV4|UVY))
+ uv_cpuid.gnode_shift = 2; /* min partition is 4 sockets */
+
+ uv_cpuid.pnode_mask = (1 << uv_cpuid.n_skt) - 1;
+ pnode = (uv_node_id >> uv_cpuid.nasid_shift) & uv_cpuid.pnode_mask;
+ uv_cpuid.gpa_shift = 46; /* Default unless changed */
+
+ pr_info("UV: n_skt:%d pnmsk:%x pn:%x\n",
+ uv_cpuid.n_skt, uv_cpuid.pnode_mask, pnode);
+}
+
+/* Running on a UV Hubbed system, determine which UV Hub Type it is */
+static int __init early_set_hub_type(void)
+{
+ union uvh_node_id_u node_id;
+
+ /*
+ * The NODE_ID MMR is always at offset 0.
+ * Contains the chip part # + revision.
+ * Node_id field started with 15 bits,
+ * ... now 7 but upper 8 are masked to 0.
+ * All blades/nodes have the same part # and hub revision.
+ */
node_id.v = uv_early_read_mmr(UVH_NODE_ID);
- m_n_config.v = uv_early_read_mmr(UVH_RH_GAM_CONFIG_MMR);
- uv_min_hub_revision_id = node_id.s.revision;
+ uv_node_id = node_id.sx.node_id;
switch (node_id.s.part_number) {
- case UV2_HUB_PART_NUMBER:
- case UV2_HUB_PART_NUMBER_X:
- uv_min_hub_revision_id += UV2_HUB_REVISION_BASE - 1;
+
+ case UV5_HUB_PART_NUMBER:
+ uv_min_hub_revision_id = node_id.s.revision
+ + UV5_HUB_REVISION_BASE;
+ uv_hub_type_set(UV5);
break;
+
+ /* UV4/4A only have a revision difference */
+ case UV4_HUB_PART_NUMBER:
+ uv_min_hub_revision_id = node_id.s.revision
+ + UV4_HUB_REVISION_BASE;
+ uv_hub_type_set(UV4);
+ if (uv_min_hub_revision_id == UV4A_HUB_REVISION_BASE)
+ uv_hub_type_set(UV4|UV4A);
+ break;
+
case UV3_HUB_PART_NUMBER:
case UV3_HUB_PART_NUMBER_X:
- uv_min_hub_revision_id += UV3_HUB_REVISION_BASE;
+ uv_min_hub_revision_id = node_id.s.revision
+ + UV3_HUB_REVISION_BASE;
+ uv_hub_type_set(UV3);
break;
- /* Update: UV4A has only a modified revision to indicate HUB fixes */
- case UV4_HUB_PART_NUMBER:
- uv_min_hub_revision_id += UV4_HUB_REVISION_BASE - 1;
- uv_cpuid.gnode_shift = 2; /* min partition is 4 sockets */
+ case UV2_HUB_PART_NUMBER:
+ case UV2_HUB_PART_NUMBER_X:
+ uv_min_hub_revision_id = node_id.s.revision
+ + UV2_HUB_REVISION_BASE - 1;
+ uv_hub_type_set(UV2);
break;
+
+ default:
+ return 0;
}
- uv_hub_info->hub_revision = uv_min_hub_revision_id;
- uv_cpuid.pnode_mask = (1 << m_n_config.s.n_skt) - 1;
- pnode = (node_id.s.node_id >> 1) & uv_cpuid.pnode_mask;
- uv_cpuid.gpa_shift = 46; /* Default unless changed */
+ pr_info("UV: part#:%x rev:%d rev_id:%d UVtype:0x%x\n",
+ node_id.s.part_number, node_id.s.revision,
+ uv_min_hub_revision_id, is_uv(~0));
- pr_info("UV: rev:%d part#:%x nodeid:%04x n_skt:%d pnmsk:%x pn:%x\n",
- node_id.s.revision, node_id.s.part_number, node_id.s.node_id,
- m_n_config.s.n_skt, uv_cpuid.pnode_mask, pnode);
- return pnode;
+ return 1;
}
static void __init uv_tsc_check_sync(void)
@@ -130,38 +198,41 @@ static void __init uv_tsc_check_sync(void)
int sync_state;
int mmr_shift;
char *state;
- bool valid;
- /* Accommodate different UV arch BIOSes */
+ /* Different returns from different UV BIOS versions */
mmr = uv_early_read_mmr(UVH_TSC_SYNC_MMR);
mmr_shift =
is_uv2_hub() ? UVH_TSC_SYNC_SHIFT_UV2K : UVH_TSC_SYNC_SHIFT;
sync_state = (mmr >> mmr_shift) & UVH_TSC_SYNC_MASK;
+ /* Check if TSC is valid for all sockets */
switch (sync_state) {
case UVH_TSC_SYNC_VALID:
state = "in sync";
- valid = true;
+ mark_tsc_async_resets("UV BIOS");
break;
- case UVH_TSC_SYNC_INVALID:
- state = "unstable";
- valid = false;
+ /* If BIOS state unknown, don't do anything */
+ case UVH_TSC_SYNC_UNKNOWN:
+ state = "unknown";
break;
+
+ /* Otherwise, BIOS indicates problem with TSC */
default:
- state = "unknown: assuming valid";
- valid = true;
+ state = "unstable";
+ mark_tsc_unstable("UV BIOS");
break;
}
pr_info("UV: TSC sync state from BIOS:0%d(%s)\n", sync_state, state);
-
- /* Mark flag that says TSC != 0 is valid for socket 0 */
- if (valid)
- mark_tsc_async_resets("UV BIOS");
- else
- mark_tsc_unstable("UV BIOS");
}
+/* Selector for (4|4A|5) structs */
+#define uvxy_field(sname, field, undef) ( \
+ is_uv(UV4A) ? sname.s4a.field : \
+ is_uv(UV4) ? sname.s4.field : \
+ is_uv(UV3) ? sname.s3.field : \
+ undef)
+
/* [Copied from arch/x86/kernel/cpu/topology.c:detect_extended_topology()] */
#define SMT_LEVEL 0 /* Leaf 0xb SMT level */
@@ -221,29 +292,110 @@ static void __init uv_stringify(int len, char *to, char *from)
strncpy(to, from, len-1);
}
-static int __init uv_acpi_madt_oem_check(char *_oem_id, char *_oem_table_id)
+/* Find UV arch type entry in UVsystab */
+static unsigned long __init early_find_archtype(struct uv_systab *st)
+{
+ int i;
+
+ for (i = 0; st->entry[i].type != UV_SYSTAB_TYPE_UNUSED; i++) {
+ unsigned long ptr = st->entry[i].offset;
+
+ if (!ptr)
+ continue;
+ ptr += (unsigned long)st;
+ if (st->entry[i].type == UV_SYSTAB_TYPE_ARCH_TYPE)
+ return ptr;
+ }
+ return 0;
+}
+
+/* Validate UV arch type field in UVsystab */
+static int __init decode_arch_type(unsigned long ptr)
+{
+ struct uv_arch_type_entry *uv_ate = (struct uv_arch_type_entry *)ptr;
+ int n = strlen(uv_ate->archtype);
+
+ if (n > 0 && n < sizeof(uv_ate->archtype)) {
+ pr_info("UV: UVarchtype received from BIOS\n");
+ uv_stringify(UV_AT_SIZE, uv_archtype, uv_ate->archtype);
+ return 1;
+ }
+ return 0;
+}
+
+/* Determine if UV arch type entry might exist in UVsystab */
+static int __init early_get_arch_type(void)
{
- int pnodeid;
- int uv_apic;
+ unsigned long uvst_physaddr, uvst_size, ptr;
+ struct uv_systab *st;
+ u32 rev;
+ int ret;
+
+ uvst_physaddr = get_uv_systab_phys(0);
+ if (!uvst_physaddr)
+ return 0;
+
+ st = early_memremap_ro(uvst_physaddr, sizeof(struct uv_systab));
+ if (!st) {
+ pr_err("UV: Cannot access UVsystab, remap failed\n");
+ return 0;
+ }
+ rev = st->revision;
+ if (rev < UV_SYSTAB_VERSION_UV5) {
+ early_memunmap(st, sizeof(struct uv_systab));
+ return 0;
+ }
+
+ uvst_size = st->size;
+ early_memunmap(st, sizeof(struct uv_systab));
+ st = early_memremap_ro(uvst_physaddr, uvst_size);
+ if (!st) {
+ pr_err("UV: Cannot access UVarchtype, remap failed\n");
+ return 0;
+ }
+
+ ptr = early_find_archtype(st);
+ if (!ptr) {
+ early_memunmap(st, uvst_size);
+ return 0;
+ }
+
+ ret = decode_arch_type(ptr);
+ early_memunmap(st, uvst_size);
+ return ret;
+}
+
+static int __init uv_set_system_type(char *_oem_id)
+{
+ /* Save OEM_ID passed from ACPI MADT */
uv_stringify(sizeof(oem_id), oem_id, _oem_id);
- uv_stringify(sizeof(oem_table_id), oem_table_id, _oem_table_id);
- if (strncmp(oem_id, "SGI", 3) != 0) {
- if (strncmp(oem_id, "NSGI", 4) != 0)
+ /* Check if BIOS sent us a UVarchtype */
+ if (!early_get_arch_type())
+
+ /* If not use OEM ID for UVarchtype */
+ uv_stringify(UV_AT_SIZE, uv_archtype, _oem_id);
+
+ /* Check if not hubbed */
+ if (strncmp(uv_archtype, "SGI", 3) != 0) {
+
+ /* (Not hubbed), check if not hubless */
+ if (strncmp(uv_archtype, "NSGI", 4) != 0)
+
+ /* (Not hubless), not a UV */
return 0;
- /* UV4 Hubless, CH, (0x11:UV4+Any) */
- if (strncmp(oem_id, "NSGI4", 5) == 0)
+ /* UV4 Hubless: CH */
+ if (strncmp(uv_archtype, "NSGI4", 5) == 0)
uv_hubless_system = 0x11;
- /* UV3 Hubless, UV300/MC990X w/o hub (0x9:UV3+Any) */
+ /* UV3 Hubless: UV300/MC990X w/o hub */
else
uv_hubless_system = 0x9;
- pr_info("UV: OEM IDs %s/%s, HUBLESS(0x%x)\n",
- oem_id, oem_table_id, uv_hubless_system);
-
+ pr_info("UV: OEM IDs %s/%s, SystemType %d, HUBLESS ID %x\n",
+ oem_id, oem_table_id, uv_system_type, uv_hubless_system);
return 0;
}
@@ -252,60 +404,83 @@ static int __init uv_acpi_madt_oem_check(char *_oem_id, char *_oem_table_id)
return 0;
}
- /* Set up early hub type field in uv_hub_info for Node 0 */
- uv_cpu_info->p_uv_hub_info = &uv_hub_info_node0;
+ /* Set hubbed type if true */
+ uv_hub_info->hub_revision =
+ !strncmp(uv_archtype, "SGI5", 4) ? UV5_HUB_REVISION_BASE :
+ !strncmp(uv_archtype, "SGI4", 4) ? UV4_HUB_REVISION_BASE :
+ !strncmp(uv_archtype, "SGI3", 4) ? UV3_HUB_REVISION_BASE :
+ !strcmp(uv_archtype, "SGI2") ? UV2_HUB_REVISION_BASE : 0;
+
+ switch (uv_hub_info->hub_revision) {
+ case UV5_HUB_REVISION_BASE:
+ uv_hubbed_system = 0x21;
+ uv_hub_type_set(UV5);
+ break;
- /*
- * Determine UV arch type.
- * SGI2: UV2000/3000
- * SGI3: UV300 (truncated to 4 chars because of different varieties)
- * SGI4: UV400 (truncated to 4 chars because of different varieties)
- */
- if (!strncmp(oem_id, "SGI4", 4)) {
- uv_hub_info->hub_revision = UV4_HUB_REVISION_BASE;
+ case UV4_HUB_REVISION_BASE:
uv_hubbed_system = 0x11;
+ uv_hub_type_set(UV4);
+ break;
- } else if (!strncmp(oem_id, "SGI3", 4)) {
- uv_hub_info->hub_revision = UV3_HUB_REVISION_BASE;
+ case UV3_HUB_REVISION_BASE:
uv_hubbed_system = 0x9;
+ uv_hub_type_set(UV3);
+ break;
- } else if (!strcmp(oem_id, "SGI2")) {
- uv_hub_info->hub_revision = UV2_HUB_REVISION_BASE;
+ case UV2_HUB_REVISION_BASE:
uv_hubbed_system = 0x5;
+ uv_hub_type_set(UV2);
+ break;
- } else {
- uv_hub_info->hub_revision = 0;
- goto badbios;
+ default:
+ return 0;
}
- pnodeid = early_get_pnodeid();
- early_get_apic_socketid_shift();
+ /* Get UV hub chip part number & revision */
+ early_set_hub_type();
+ /* Other UV setup functions */
+ early_get_pnodeid();
+ early_get_apic_socketid_shift();
x86_platform.is_untracked_pat_range = uv_is_untracked_pat_range;
x86_platform.nmi_init = uv_nmi_init;
+ uv_tsc_check_sync();
+
+ return 1;
+}
+
+/* Called early to probe for the correct APIC driver */
+static int __init uv_acpi_madt_oem_check(char *_oem_id, char *_oem_table_id)
+{
+ /* Set up early hub info fields for Node 0 */
+ uv_cpu_info->p_uv_hub_info = &uv_hub_info_node0;
+
+ /* If not UV, return. */
+ if (likely(uv_set_system_type(_oem_id) == 0))
+ return 0;
+
+ /* Save and Decode OEM Table ID */
+ uv_stringify(sizeof(oem_table_id), oem_table_id, _oem_table_id);
- if (!strcmp(oem_table_id, "UVX")) {
- /* This is the most common hardware variant: */
+ /* This is the most common hardware variant, x2apic mode */
+ if (!strcmp(oem_table_id, "UVX"))
uv_system_type = UV_X2APIC;
- uv_apic = 0;
- } else if (!strcmp(oem_table_id, "UVL")) {
- /* Only used for very small systems: */
+ /* Only used for very small systems, usually 1 chassis, legacy mode */
+ else if (!strcmp(oem_table_id, "UVL"))
uv_system_type = UV_LEGACY_APIC;
- uv_apic = 0;
- } else {
+ else
goto badbios;
- }
- pr_info("UV: OEM IDs %s/%s, System/HUB Types %d/%d, uv_apic %d\n", oem_id, oem_table_id, uv_system_type, uv_min_hub_revision_id, uv_apic);
- uv_tsc_check_sync();
+ pr_info("UV: OEM IDs %s/%s, System/UVType %d/0x%x, HUB RevID %d\n",
+ oem_id, oem_table_id, uv_system_type, is_uv(UV_ANY),
+ uv_min_hub_revision_id);
- return uv_apic;
+ return 0;
badbios:
- pr_err("UV: OEM_ID:%s OEM_TABLE_ID:%s\n", oem_id, oem_table_id);
- pr_err("Current UV Type or BIOS not supported\n");
+ pr_err("UV: UVarchtype:%s not supported\n", uv_archtype);
BUG();
}
@@ -673,12 +848,12 @@ static struct apic apic_x2apic_uv_x __ro_after_init = {
};
#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_LENGTH 3
-#define DEST_SHIFT UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_SHFT
+#define DEST_SHIFT UVXH_RH_GAM_ALIAS_0_REDIRECT_CONFIG_DEST_BASE_SHFT
static __init void get_lowmem_redirect(unsigned long *base, unsigned long *size)
{
- union uvh_rh_gam_alias210_overlay_config_2_mmr_u alias;
- union uvh_rh_gam_alias210_redirect_config_2_mmr_u redirect;
+ union uvh_rh_gam_alias_2_overlay_config_u alias;
+ union uvh_rh_gam_alias_2_redirect_config_u redirect;
unsigned long m_redirect;
unsigned long m_overlay;
int i;
@@ -686,16 +861,16 @@ static __init void get_lowmem_redirect(unsigned long *base, unsigned long *size)
for (i = 0; i < UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_LENGTH; i++) {
switch (i) {
case 0:
- m_redirect = UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR;
- m_overlay = UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR;
+ m_redirect = UVH_RH_GAM_ALIAS_0_REDIRECT_CONFIG;
+ m_overlay = UVH_RH_GAM_ALIAS_0_OVERLAY_CONFIG;
break;
case 1:
- m_redirect = UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR;
- m_overlay = UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR;
+ m_redirect = UVH_RH_GAM_ALIAS_1_REDIRECT_CONFIG;
+ m_overlay = UVH_RH_GAM_ALIAS_1_OVERLAY_CONFIG;
break;
case 2:
- m_redirect = UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR;
- m_overlay = UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR;
+ m_redirect = UVH_RH_GAM_ALIAS_2_REDIRECT_CONFIG;
+ m_overlay = UVH_RH_GAM_ALIAS_2_OVERLAY_CONFIG;
break;
}
alias.v = uv_read_local_mmr(m_overlay);
@@ -710,6 +885,7 @@ static __init void get_lowmem_redirect(unsigned long *base, unsigned long *size)
}
enum map_type {map_wb, map_uc};
+static const char * const mt[] = { "WB", "UC" };
static __init void map_high(char *id, unsigned long base, int pshift, int bshift, int max_pnode, enum map_type map_type)
{
@@ -721,23 +897,36 @@ static __init void map_high(char *id, unsigned long base, int pshift, int bshift
pr_info("UV: Map %s_HI base address NULL\n", id);
return;
}
- pr_debug("UV: Map %s_HI 0x%lx - 0x%lx\n", id, paddr, paddr + bytes);
if (map_type == map_uc)
init_extra_mapping_uc(paddr, bytes);
else
init_extra_mapping_wb(paddr, bytes);
+
+ pr_info("UV: Map %s_HI 0x%lx - 0x%lx %s (%d segments)\n",
+ id, paddr, paddr + bytes, mt[map_type], max_pnode + 1);
}
static __init void map_gru_high(int max_pnode)
{
- union uvh_rh_gam_gru_overlay_config_mmr_u gru;
- int shift = UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT;
- unsigned long mask = UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_MASK;
- unsigned long base;
+ union uvh_rh_gam_gru_overlay_config_u gru;
+ unsigned long mask, base;
+ int shift;
+
+ if (UVH_RH_GAM_GRU_OVERLAY_CONFIG) {
+ gru.v = uv_read_local_mmr(UVH_RH_GAM_GRU_OVERLAY_CONFIG);
+ shift = UVH_RH_GAM_GRU_OVERLAY_CONFIG_BASE_SHFT;
+ mask = UVH_RH_GAM_GRU_OVERLAY_CONFIG_BASE_MASK;
+ } else if (UVH_RH10_GAM_GRU_OVERLAY_CONFIG) {
+ gru.v = uv_read_local_mmr(UVH_RH10_GAM_GRU_OVERLAY_CONFIG);
+ shift = UVH_RH10_GAM_GRU_OVERLAY_CONFIG_BASE_SHFT;
+ mask = UVH_RH10_GAM_GRU_OVERLAY_CONFIG_BASE_MASK;
+ } else {
+ pr_err("UV: GRU unavailable (no MMR)\n");
+ return;
+ }
- gru.v = uv_read_local_mmr(UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR);
if (!gru.s.enable) {
- pr_info("UV: GRU disabled\n");
+ pr_info("UV: GRU disabled (by BIOS)\n");
return;
}
@@ -749,62 +938,104 @@ static __init void map_gru_high(int max_pnode)
static __init void map_mmr_high(int max_pnode)
{
- union uvh_rh_gam_mmr_overlay_config_mmr_u mmr;
- int shift = UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_SHFT;
+ unsigned long base;
+ int shift;
+ bool enable;
+
+ if (UVH_RH10_GAM_MMR_OVERLAY_CONFIG) {
+ union uvh_rh10_gam_mmr_overlay_config_u mmr;
+
+ mmr.v = uv_read_local_mmr(UVH_RH10_GAM_MMR_OVERLAY_CONFIG);
+ enable = mmr.s.enable;
+ base = mmr.s.base;
+ shift = UVH_RH10_GAM_MMR_OVERLAY_CONFIG_BASE_SHFT;
+ } else if (UVH_RH_GAM_MMR_OVERLAY_CONFIG) {
+ union uvh_rh_gam_mmr_overlay_config_u mmr;
+
+ mmr.v = uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG);
+ enable = mmr.s.enable;
+ base = mmr.s.base;
+ shift = UVH_RH_GAM_MMR_OVERLAY_CONFIG_BASE_SHFT;
+ } else {
+ pr_err("UV:%s:RH_GAM_MMR_OVERLAY_CONFIG MMR undefined?\n",
+ __func__);
+ return;
+ }
- mmr.v = uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR);
- if (mmr.s.enable)
- map_high("MMR", mmr.s.base, shift, shift, max_pnode, map_uc);
+ if (enable)
+ map_high("MMR", base, shift, shift, max_pnode, map_uc);
else
pr_info("UV: MMR disabled\n");
}
-/* UV3/4 have identical MMIOH overlay configs, UV4A is slightly different */
-static __init void map_mmioh_high_uv34(int index, int min_pnode, int max_pnode)
-{
- unsigned long overlay;
- unsigned long mmr;
- unsigned long base;
- unsigned long nasid_mask;
- unsigned long m_overlay;
- int i, n, shift, m_io, max_io;
- int nasid, lnasid, fi, li;
- char *id;
-
- if (index == 0) {
- id = "MMIOH0";
- m_overlay = UVH_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR;
- overlay = uv_read_local_mmr(m_overlay);
- base = overlay & UVH_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_BASE_MASK;
- mmr = UVH_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR;
- m_io = (overlay & UVH_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_M_IO_MASK)
- >> UVH_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_M_IO_SHFT;
- shift = UVH_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_M_IO_SHFT;
- n = UVH_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_DEPTH;
- nasid_mask = UVH_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_NASID_MASK;
- } else {
- id = "MMIOH1";
- m_overlay = UVH_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR;
- overlay = uv_read_local_mmr(m_overlay);
- base = overlay & UVH_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_BASE_MASK;
- mmr = UVH_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR;
- m_io = (overlay & UVH_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_M_IO_MASK)
- >> UVH_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_M_IO_SHFT;
- shift = UVH_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_M_IO_SHFT;
- n = UVH_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_DEPTH;
- nasid_mask = UVH_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_NASID_MASK;
+/* Arch specific ENUM cases */
+enum mmioh_arch {
+ UV2_MMIOH = -1,
+ UVY_MMIOH0, UVY_MMIOH1,
+ UVX_MMIOH0, UVX_MMIOH1,
+};
+
+/* Calculate and Map MMIOH Regions */
+static void __init calc_mmioh_map(enum mmioh_arch index,
+ int min_pnode, int max_pnode,
+ int shift, unsigned long base, int m_io, int n_io)
+{
+ unsigned long mmr, nasid_mask;
+ int nasid, min_nasid, max_nasid, lnasid, mapped;
+ int i, fi, li, n, max_io;
+ char id[8];
+
+ /* One (UV2) mapping */
+ if (index == UV2_MMIOH) {
+ strncpy(id, "MMIOH", sizeof(id));
+ max_io = max_pnode;
+ mapped = 0;
+ goto map_exit;
}
- pr_info("UV: %s overlay 0x%lx base:0x%lx m_io:%d\n", id, overlay, base, m_io);
- if (!(overlay & UVH_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_ENABLE_MASK)) {
- pr_info("UV: %s disabled\n", id);
+
+ /* small and large MMIOH mappings */
+ switch (index) {
+ case UVY_MMIOH0:
+ mmr = UVH_RH10_GAM_MMIOH_REDIRECT_CONFIG0;
+ nasid_mask = UVH_RH10_GAM_MMIOH_OVERLAY_CONFIG0_BASE_MASK;
+ n = UVH_RH10_GAM_MMIOH_REDIRECT_CONFIG0_DEPTH;
+ min_nasid = min_pnode;
+ max_nasid = max_pnode;
+ mapped = 1;
+ break;
+ case UVY_MMIOH1:
+ mmr = UVH_RH10_GAM_MMIOH_REDIRECT_CONFIG1;
+ nasid_mask = UVH_RH10_GAM_MMIOH_OVERLAY_CONFIG1_BASE_MASK;
+ n = UVH_RH10_GAM_MMIOH_REDIRECT_CONFIG1_DEPTH;
+ min_nasid = min_pnode;
+ max_nasid = max_pnode;
+ mapped = 1;
+ break;
+ case UVX_MMIOH0:
+ mmr = UVH_RH_GAM_MMIOH_REDIRECT_CONFIG0;
+ nasid_mask = UVH_RH_GAM_MMIOH_OVERLAY_CONFIG0_BASE_MASK;
+ n = UVH_RH_GAM_MMIOH_REDIRECT_CONFIG0_DEPTH;
+ min_nasid = min_pnode * 2;
+ max_nasid = max_pnode * 2;
+ mapped = 1;
+ break;
+ case UVX_MMIOH1:
+ mmr = UVH_RH_GAM_MMIOH_REDIRECT_CONFIG1;
+ nasid_mask = UVH_RH_GAM_MMIOH_OVERLAY_CONFIG1_BASE_MASK;
+ n = UVH_RH_GAM_MMIOH_REDIRECT_CONFIG1_DEPTH;
+ min_nasid = min_pnode * 2;
+ max_nasid = max_pnode * 2;
+ mapped = 1;
+ break;
+ default:
+ pr_err("UV:%s:Invalid mapping type:%d\n", __func__, index);
return;
}
- /* Convert to NASID: */
- min_pnode *= 2;
- max_pnode *= 2;
- max_io = lnasid = fi = li = -1;
+ /* enum values chosen so (index mod 2) is MMIOH 0/1 (low/high) */
+ snprintf(id, sizeof(id), "MMIOH%d", index%2);
+ max_io = lnasid = fi = li = -1;
for (i = 0; i < n; i++) {
unsigned long m_redirect = mmr + i * 8;
unsigned long redirect = uv_read_local_mmr(m_redirect);
@@ -814,9 +1045,12 @@ static __init void map_mmioh_high_uv34(int index, int min_pnode, int max_pnode)
pr_info("UV: %s redirect base 0x%lx(@0x%lx) 0x%04x\n",
id, redirect, m_redirect, nasid);
- /* Invalid NASID: */
- if (nasid < min_pnode || max_pnode < nasid)
+ /* Invalid NASID check */
+ if (nasid < min_nasid || max_nasid < nasid) {
+ pr_err("UV:%s:Invalid NASID:%x (range:%x..%x)\n",
+ __func__, index, min_nasid, max_nasid);
nasid = -1;
+ }
if (nasid == lnasid) {
li = i;
@@ -839,7 +1073,8 @@ static __init void map_mmioh_high_uv34(int index, int min_pnode, int max_pnode)
}
addr1 = (base << shift) + f * (1ULL << m_io);
addr2 = (base << shift) + (l + 1) * (1ULL << m_io);
- pr_info("UV: %s[%03d..%03d] NASID 0x%04x ADDR 0x%016lx - 0x%016lx\n", id, fi, li, lnasid, addr1, addr2);
+ pr_info("UV: %s[%03d..%03d] NASID 0x%04x ADDR 0x%016lx - 0x%016lx\n",
+ id, fi, li, lnasid, addr1, addr2);
if (max_io < l)
max_io = l;
}
@@ -847,49 +1082,93 @@ static __init void map_mmioh_high_uv34(int index, int min_pnode, int max_pnode)
lnasid = nasid;
}
- pr_info("UV: %s base:0x%lx shift:%d M_IO:%d MAX_IO:%d\n", id, base, shift, m_io, max_io);
+map_exit:
+ pr_info("UV: %s base:0x%lx shift:%d m_io:%d max_io:%d max_pnode:0x%x\n",
+ id, base, shift, m_io, max_io, max_pnode);
- if (max_io >= 0)
+ if (max_io >= 0 && !mapped)
map_high(id, base, shift, m_io, max_io, map_uc);
}
static __init void map_mmioh_high(int min_pnode, int max_pnode)
{
- union uvh_rh_gam_mmioh_overlay_config_mmr_u mmioh;
- unsigned long mmr, base;
- int shift, enable, m_io, n_io;
+ /* UVY flavor */
+ if (UVH_RH10_GAM_MMIOH_OVERLAY_CONFIG0) {
+ union uvh_rh10_gam_mmioh_overlay_config0_u mmioh0;
+ union uvh_rh10_gam_mmioh_overlay_config1_u mmioh1;
+
+ mmioh0.v = uv_read_local_mmr(UVH_RH10_GAM_MMIOH_OVERLAY_CONFIG0);
+ if (unlikely(mmioh0.s.enable == 0))
+ pr_info("UV: MMIOH0 disabled\n");
+ else
+ calc_mmioh_map(UVY_MMIOH0, min_pnode, max_pnode,
+ UVH_RH10_GAM_MMIOH_OVERLAY_CONFIG0_BASE_SHFT,
+ mmioh0.s.base, mmioh0.s.m_io, mmioh0.s.n_io);
- if (is_uv3_hub() || is_uv4_hub()) {
- /* Map both MMIOH regions: */
- map_mmioh_high_uv34(0, min_pnode, max_pnode);
- map_mmioh_high_uv34(1, min_pnode, max_pnode);
+ mmioh1.v = uv_read_local_mmr(UVH_RH10_GAM_MMIOH_OVERLAY_CONFIG1);
+ if (unlikely(mmioh1.s.enable == 0))
+ pr_info("UV: MMIOH1 disabled\n");
+ else
+ calc_mmioh_map(UVY_MMIOH1, min_pnode, max_pnode,
+ UVH_RH10_GAM_MMIOH_OVERLAY_CONFIG1_BASE_SHFT,
+ mmioh1.s.base, mmioh1.s.m_io, mmioh1.s.n_io);
return;
}
+ /* UVX flavor */
+ if (UVH_RH_GAM_MMIOH_OVERLAY_CONFIG0) {
+ union uvh_rh_gam_mmioh_overlay_config0_u mmioh0;
+ union uvh_rh_gam_mmioh_overlay_config1_u mmioh1;
+
+ mmioh0.v = uv_read_local_mmr(UVH_RH_GAM_MMIOH_OVERLAY_CONFIG0);
+ if (unlikely(mmioh0.s.enable == 0))
+ pr_info("UV: MMIOH0 disabled\n");
+ else {
+ unsigned long base = uvxy_field(mmioh0, base, 0);
+ int m_io = uvxy_field(mmioh0, m_io, 0);
+ int n_io = uvxy_field(mmioh0, n_io, 0);
+
+ calc_mmioh_map(UVX_MMIOH0, min_pnode, max_pnode,
+ UVH_RH_GAM_MMIOH_OVERLAY_CONFIG0_BASE_SHFT,
+ base, m_io, n_io);
+ }
- if (is_uv2_hub()) {
- mmr = UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR;
- shift = UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_BASE_SHFT;
- mmioh.v = uv_read_local_mmr(mmr);
- enable = !!mmioh.s2.enable;
- base = mmioh.s2.base;
- m_io = mmioh.s2.m_io;
- n_io = mmioh.s2.n_io;
-
- if (enable) {
- max_pnode &= (1 << n_io) - 1;
- pr_info("UV: base:0x%lx shift:%d N_IO:%d M_IO:%d max_pnode:0x%x\n",
- base, shift, m_io, n_io, max_pnode);
- map_high("MMIOH", base, shift, m_io, max_pnode, map_uc);
- } else {
- pr_info("UV: MMIOH disabled\n");
+ mmioh1.v = uv_read_local_mmr(UVH_RH_GAM_MMIOH_OVERLAY_CONFIG1);
+ if (unlikely(mmioh1.s.enable == 0))
+ pr_info("UV: MMIOH1 disabled\n");
+ else {
+ unsigned long base = uvxy_field(mmioh1, base, 0);
+ int m_io = uvxy_field(mmioh1, m_io, 0);
+ int n_io = uvxy_field(mmioh1, n_io, 0);
+
+ calc_mmioh_map(UVX_MMIOH1, min_pnode, max_pnode,
+ UVH_RH_GAM_MMIOH_OVERLAY_CONFIG1_BASE_SHFT,
+ base, m_io, n_io);
}
+ return;
+ }
+
+ /* UV2 flavor */
+ if (UVH_RH_GAM_MMIOH_OVERLAY_CONFIG) {
+ union uvh_rh_gam_mmioh_overlay_config_u mmioh;
+
+ mmioh.v = uv_read_local_mmr(UVH_RH_GAM_MMIOH_OVERLAY_CONFIG);
+ if (unlikely(mmioh.s2.enable == 0))
+ pr_info("UV: MMIOH disabled\n");
+ else
+ calc_mmioh_map(UV2_MMIOH, min_pnode, max_pnode,
+ UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_BASE_SHFT,
+ mmioh.s2.base, mmioh.s2.m_io, mmioh.s2.n_io);
+ return;
}
}
static __init void map_low_mmrs(void)
{
- init_extra_mapping_uc(UV_GLOBAL_MMR32_BASE, UV_GLOBAL_MMR32_SIZE);
- init_extra_mapping_uc(UV_LOCAL_MMR_BASE, UV_LOCAL_MMR_SIZE);
+ if (UV_GLOBAL_MMR32_BASE)
+ init_extra_mapping_uc(UV_GLOBAL_MMR32_BASE, UV_GLOBAL_MMR32_SIZE);
+
+ if (UV_LOCAL_MMR_BASE)
+ init_extra_mapping_uc(UV_LOCAL_MMR_BASE, UV_LOCAL_MMR_SIZE);
}
static __init void uv_rtc_init(void)
@@ -909,85 +1188,6 @@ static __init void uv_rtc_init(void)
}
}
-/*
- * percpu heartbeat timer
- */
-static void uv_heartbeat(struct timer_list *timer)
-{
- unsigned char bits = uv_scir_info->state;
-
- /* Flip heartbeat bit: */
- bits ^= SCIR_CPU_HEARTBEAT;
-
- /* Is this CPU idle? */
- if (idle_cpu(raw_smp_processor_id()))
- bits &= ~SCIR_CPU_ACTIVITY;
- else
- bits |= SCIR_CPU_ACTIVITY;
-
- /* Update system controller interface reg: */
- uv_set_scir_bits(bits);
-
- /* Enable next timer period: */
- mod_timer(timer, jiffies + SCIR_CPU_HB_INTERVAL);
-}
-
-static int uv_heartbeat_enable(unsigned int cpu)
-{
- while (!uv_cpu_scir_info(cpu)->enabled) {
- struct timer_list *timer = &uv_cpu_scir_info(cpu)->timer;
-
- uv_set_cpu_scir_bits(cpu, SCIR_CPU_HEARTBEAT|SCIR_CPU_ACTIVITY);
- timer_setup(timer, uv_heartbeat, TIMER_PINNED);
- timer->expires = jiffies + SCIR_CPU_HB_INTERVAL;
- add_timer_on(timer, cpu);
- uv_cpu_scir_info(cpu)->enabled = 1;
-
- /* Also ensure that boot CPU is enabled: */
- cpu = 0;
- }
- return 0;
-}
-
-#ifdef CONFIG_HOTPLUG_CPU
-static int uv_heartbeat_disable(unsigned int cpu)
-{
- if (uv_cpu_scir_info(cpu)->enabled) {
- uv_cpu_scir_info(cpu)->enabled = 0;
- del_timer(&uv_cpu_scir_info(cpu)->timer);
- }
- uv_set_cpu_scir_bits(cpu, 0xff);
- return 0;
-}
-
-static __init void uv_scir_register_cpu_notifier(void)
-{
- cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "x86/x2apic-uvx:online",
- uv_heartbeat_enable, uv_heartbeat_disable);
-}
-
-#else /* !CONFIG_HOTPLUG_CPU */
-
-static __init void uv_scir_register_cpu_notifier(void)
-{
-}
-
-static __init int uv_init_heartbeat(void)
-{
- int cpu;
-
- if (is_uv_system()) {
- for_each_online_cpu(cpu)
- uv_heartbeat_enable(cpu);
- }
-
- return 0;
-}
-
-late_initcall(uv_init_heartbeat);
-
-#endif /* !CONFIG_HOTPLUG_CPU */
-
/* Direct Legacy VGA I/O traffic to designated IOH */
static int uv_set_vga_state(struct pci_dev *pdev, bool decode, unsigned int command_bits, u32 flags)
{
@@ -1027,26 +1227,22 @@ struct mn {
unsigned char n_lshift;
};
+/* Initialize caller's MN struct and fill in values */
static void get_mn(struct mn *mnp)
{
- union uvh_rh_gam_config_mmr_u m_n_config;
- union uv3h_gr0_gam_gr_config_u m_gr_config;
-
- /* Make sure the whole structure is well initialized: */
memset(mnp, 0, sizeof(*mnp));
-
- m_n_config.v = uv_read_local_mmr(UVH_RH_GAM_CONFIG_MMR);
- mnp->n_val = m_n_config.s.n_skt;
-
- if (is_uv4_hub()) {
+ mnp->n_val = uv_cpuid.n_skt;
+ if (is_uv(UV4|UVY)) {
mnp->m_val = 0;
mnp->n_lshift = 0;
} else if (is_uv3_hub()) {
- mnp->m_val = m_n_config.s3.m_skt;
- m_gr_config.v = uv_read_local_mmr(UV3H_GR0_GAM_GR_CONFIG);
+ union uvyh_gr0_gam_gr_config_u m_gr_config;
+
+ mnp->m_val = uv_cpuid.m_skt;
+ m_gr_config.v = uv_read_local_mmr(UVH_GR0_GAM_GR_CONFIG);
mnp->n_lshift = m_gr_config.s3.m_skt;
} else if (is_uv2_hub()) {
- mnp->m_val = m_n_config.s2.m_skt;
+ mnp->m_val = uv_cpuid.m_skt;
mnp->n_lshift = mnp->m_val == 40 ? 40 : 39;
}
mnp->m_shift = mnp->m_val ? 64 - mnp->m_val : 0;
@@ -1054,7 +1250,6 @@ static void get_mn(struct mn *mnp)
static void __init uv_init_hub_info(struct uv_hub_info_s *hi)
{
- union uvh_node_id_u node_id;
struct mn mn;
get_mn(&mn);
@@ -1067,7 +1262,9 @@ static void __init uv_init_hub_info(struct uv_hub_info_s *hi)
hi->m_shift = mn.m_shift;
hi->n_lshift = mn.n_lshift ? mn.n_lshift : 0;
hi->hub_revision = uv_hub_info->hub_revision;
+ hi->hub_type = uv_hub_info->hub_type;
hi->pnode_mask = uv_cpuid.pnode_mask;
+ hi->nasid_shift = uv_cpuid.nasid_shift;
hi->min_pnode = _min_pnode;
hi->min_socket = _min_socket;
hi->pnode_to_socket = _pnode_to_socket;
@@ -1076,9 +1273,8 @@ static void __init uv_init_hub_info(struct uv_hub_info_s *hi)
hi->gr_table_len = _gr_table_len;
hi->gr_table = _gr_table;
- node_id.v = uv_read_local_mmr(UVH_NODE_ID);
uv_cpuid.gnode_shift = max_t(unsigned int, uv_cpuid.gnode_shift, mn.n_val);
- hi->gnode_extra = (node_id.s.node_id & ~((1 << uv_cpuid.gnode_shift) - 1)) >> 1;
+ hi->gnode_extra = (uv_node_id & ~((1 << uv_cpuid.gnode_shift) - 1)) >> 1;
if (mn.m_val)
hi->gnode_upper = (u64)hi->gnode_extra << mn.m_val;
@@ -1090,7 +1286,9 @@ static void __init uv_init_hub_info(struct uv_hub_info_s *hi)
hi->gpa_shift = uv_gp_table->gpa_shift;
hi->gpa_mask = (1UL << hi->gpa_shift) - 1;
} else {
- hi->global_mmr_base = uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR) & ~UV_MMR_ENABLE;
+ hi->global_mmr_base =
+ uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG) &
+ ~UV_MMR_ENABLE;
hi->global_mmr_shift = _UV_GLOBAL_MMR64_PNODE_SHIFT;
}
@@ -1101,7 +1299,11 @@ static void __init uv_init_hub_info(struct uv_hub_info_s *hi)
/* Show system specific info: */
pr_info("UV: N:%d M:%d m_shift:%d n_lshift:%d\n", hi->n_val, hi->m_val, hi->m_shift, hi->n_lshift);
pr_info("UV: gpa_mask/shift:0x%lx/%d pnode_mask:0x%x apic_pns:%d\n", hi->gpa_mask, hi->gpa_shift, hi->pnode_mask, hi->apic_pnode_shift);
- pr_info("UV: mmr_base/shift:0x%lx/%ld gru_base/shift:0x%lx/%ld\n", hi->global_mmr_base, hi->global_mmr_shift, hi->global_gru_base, hi->global_gru_shift);
+ pr_info("UV: mmr_base/shift:0x%lx/%ld\n", hi->global_mmr_base, hi->global_mmr_shift);
+ if (hi->global_gru_base)
+ pr_info("UV: gru_base/shift:0x%lx/%ld\n",
+ hi->global_gru_base, hi->global_gru_shift);
+
pr_info("UV: gnode_upper:0x%lx gnode_extra:0x%x\n", hi->gnode_upper, hi->gnode_extra);
}
@@ -1173,21 +1375,25 @@ static void __init decode_gam_rng_tbl(unsigned long ptr)
pr_info("UV: GRT: %d entries, sockets(min:%x,max:%x) pnodes(min:%x,max:%x)\n", index, _min_socket, _max_socket, _min_pnode, _max_pnode);
}
+/* Walk through UVsystab decoding the fields */
static int __init decode_uv_systab(void)
{
struct uv_systab *st;
int i;
- /* If system is uv3 or lower, there is no extended UVsystab */
- if (is_uv_hubbed(0xfffffe) < uv(4) && is_uv_hubless(0xfffffe) < uv(4))
- return 0; /* No extended UVsystab required */
-
+ /* Get mapped UVsystab pointer */
st = uv_systab;
+
+ /* If UVsystab is version 1, there is no extended UVsystab */
+ if (st && st->revision == UV_SYSTAB_VERSION_1)
+ return 0;
+
if ((!st) || (st->revision < UV_SYSTAB_VERSION_UV4_LATEST)) {
int rev = st ? st->revision : 0;
- pr_err("UV: BIOS UVsystab version(%x) mismatch, expecting(%x)\n", rev, UV_SYSTAB_VERSION_UV4_LATEST);
- pr_err("UV: Cannot support UV operations, switching to generic PC\n");
+ pr_err("UV: BIOS UVsystab mismatch, (%x < %x)\n",
+ rev, UV_SYSTAB_VERSION_UV4_LATEST);
+ pr_err("UV: Does not support UV, switch to non-UV x86_64\n");
uv_system_type = UV_NONE;
return -EINVAL;
@@ -1199,7 +1405,8 @@ static int __init decode_uv_systab(void)
if (!ptr)
continue;
- ptr = ptr + (unsigned long)st;
+ /* point to payload */
+ ptr += (unsigned long)st;
switch (st->entry[i].type) {
case UV_SYSTAB_TYPE_GAM_PARAMS:
@@ -1209,32 +1416,49 @@ static int __init decode_uv_systab(void)
case UV_SYSTAB_TYPE_GAM_RNG_TBL:
decode_gam_rng_tbl(ptr);
break;
+
+ case UV_SYSTAB_TYPE_ARCH_TYPE:
+ /* already processed in early startup */
+ break;
+
+ default:
+ pr_err("UV:%s:Unrecognized UV_SYSTAB_TYPE:%d, skipped\n",
+ __func__, st->entry[i].type);
+ break;
}
}
return 0;
}
-/*
- * Set up physical blade translations from UVH_NODE_PRESENT_TABLE
- * .. NB: UVH_NODE_PRESENT_TABLE is going away,
- * .. being replaced by GAM Range Table
- */
+/* Set up physical blade translations from UVH_NODE_PRESENT_TABLE */
static __init void boot_init_possible_blades(struct uv_hub_info_s *hub_info)
{
+ unsigned long np;
int i, uv_pb = 0;
- pr_info("UV: NODE_PRESENT_DEPTH = %d\n", UVH_NODE_PRESENT_TABLE_DEPTH);
- for (i = 0; i < UVH_NODE_PRESENT_TABLE_DEPTH; i++) {
- unsigned long np;
-
- np = uv_read_local_mmr(UVH_NODE_PRESENT_TABLE + i * 8);
- if (np)
+ if (UVH_NODE_PRESENT_TABLE) {
+ pr_info("UV: NODE_PRESENT_DEPTH = %d\n",
+ UVH_NODE_PRESENT_TABLE_DEPTH);
+ for (i = 0; i < UVH_NODE_PRESENT_TABLE_DEPTH; i++) {
+ np = uv_read_local_mmr(UVH_NODE_PRESENT_TABLE + i * 8);
pr_info("UV: NODE_PRESENT(%d) = 0x%016lx\n", i, np);
-
+ uv_pb += hweight64(np);
+ }
+ }
+ if (UVH_NODE_PRESENT_0) {
+ np = uv_read_local_mmr(UVH_NODE_PRESENT_0);
+ pr_info("UV: NODE_PRESENT_0 = 0x%016lx\n", np);
+ uv_pb += hweight64(np);
+ }
+ if (UVH_NODE_PRESENT_1) {
+ np = uv_read_local_mmr(UVH_NODE_PRESENT_1);
+ pr_info("UV: NODE_PRESENT_1 = 0x%016lx\n", np);
uv_pb += hweight64(np);
}
if (uv_possible_blades != uv_pb)
uv_possible_blades = uv_pb;
+
+ pr_info("UV: number nodes/possible blades %d\n", uv_pb);
}
static void __init build_socket_tables(void)
@@ -1253,7 +1477,7 @@ static void __init build_socket_tables(void)
pr_info("UV: No UVsystab socket table, ignoring\n");
return;
}
- pr_crit("UV: Error: UVsystab address translations not available!\n");
+ pr_err("UV: Error: UVsystab address translations not available!\n");
BUG();
}
@@ -1379,9 +1603,9 @@ static int __maybe_unused proc_hubless_show(struct seq_file *file, void *data)
return 0;
}
-static int __maybe_unused proc_oemid_show(struct seq_file *file, void *data)
+static int __maybe_unused proc_archtype_show(struct seq_file *file, void *data)
{
- seq_printf(file, "%s/%s\n", oem_id, oem_table_id);
+ seq_printf(file, "%s/%s\n", uv_archtype, oem_table_id);
return 0;
}
@@ -1390,7 +1614,7 @@ static __init void uv_setup_proc_files(int hubless)
struct proc_dir_entry *pde;
pde = proc_mkdir(UV_PROC_NODE, NULL);
- proc_create_single("oemid", 0, pde, proc_oemid_show);
+ proc_create_single("archtype", 0, pde, proc_archtype_show);
if (hubless)
proc_create_single("hubless", 0, pde, proc_hubless_show);
else
@@ -1429,7 +1653,8 @@ static void __init uv_system_init_hub(void)
struct uv_hub_info_s hub_info = {0};
int bytes, cpu, nodeid;
unsigned short min_pnode = 9999, max_pnode = 0;
- char *hub = is_uv4_hub() ? "UV400" :
+ char *hub = is_uv5_hub() ? "UV500" :
+ is_uv4_hub() ? "UV400" :
is_uv3_hub() ? "UV300" :
is_uv2_hub() ? "UV2000/3000" : NULL;
@@ -1441,12 +1666,14 @@ static void __init uv_system_init_hub(void)
map_low_mmrs();
- /* Get uv_systab for decoding: */
+ /* Get uv_systab for decoding, setup UV BIOS calls */
uv_bios_init();
/* If there's an UVsystab problem then abort UV init: */
- if (decode_uv_systab() < 0)
+ if (decode_uv_systab() < 0) {
+ pr_err("UV: Mangled UVsystab format\n");
return;
+ }
build_socket_tables();
build_uv_gr_table();
@@ -1517,8 +1744,6 @@ static void __init uv_system_init_hub(void)
uv_hub_info_list(numa_node_id)->pnode = pnode;
else if (uv_cpu_hub_info(cpu)->pnode == 0xffff)
uv_cpu_hub_info(cpu)->pnode = pnode;
-
- uv_cpu_scir_info(cpu)->offset = uv_scir_offset(apicid);
}
for_each_node(nodeid) {
@@ -1547,7 +1772,6 @@ static void __init uv_system_init_hub(void)
uv_nmi_setup();
uv_cpu_init();
- uv_scir_register_cpu_notifier();
uv_setup_proc_files(0);
/* Register Legacy VGA I/O redirection handler: */
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index dcc3d943c68f..6062ce586b95 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -614,7 +614,7 @@ static void early_detect_mem_encrypt(struct cpuinfo_x86 *c)
* If BIOS has not enabled SME then don't advertise the
* SME feature (set in scattered.c).
* For SEV: If BIOS has not enabled SEV then don't advertise the
- * SEV feature (set in scattered.c).
+ * SEV and SEV_ES feature (set in scattered.c).
*
* In all cases, since support for SME and SEV requires long mode,
* don't advertise the feature under CONFIG_X86_32.
@@ -645,6 +645,7 @@ clear_all:
setup_clear_cpu_cap(X86_FEATURE_SME);
clear_sev:
setup_clear_cpu_cap(X86_FEATURE_SEV);
+ setup_clear_cpu_cap(X86_FEATURE_SEV_ES);
}
}
diff --git a/arch/x86/kernel/cpu/cacheinfo.c b/arch/x86/kernel/cpu/cacheinfo.c
index c7503be92f35..57074cf3ad7c 100644
--- a/arch/x86/kernel/cpu/cacheinfo.c
+++ b/arch/x86/kernel/cpu/cacheinfo.c
@@ -248,7 +248,7 @@ amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
switch (leaf) {
case 1:
l1 = &l1i;
- /* fall through */
+ fallthrough;
case 0:
if (!l1->val)
return;
diff --git a/arch/x86/kernel/cpu/centaur.c b/arch/x86/kernel/cpu/centaur.c
index c5cf336e5077..345f7d905db6 100644
--- a/arch/x86/kernel/cpu/centaur.c
+++ b/arch/x86/kernel/cpu/centaur.c
@@ -65,6 +65,9 @@ static void init_c3(struct cpuinfo_x86 *c)
c->x86_cache_alignment = c->x86_clflush_size * 2;
set_cpu_cap(c, X86_FEATURE_REP_GOOD);
}
+
+ if (c->x86 >= 7)
+ set_cpu_cap(c, X86_FEATURE_REP_GOOD);
}
enum {
@@ -90,18 +93,15 @@ enum {
static void early_init_centaur(struct cpuinfo_x86 *c)
{
- switch (c->x86) {
#ifdef CONFIG_X86_32
- case 5:
- /* Emulate MTRRs using Centaur's MCR. */
+ /* Emulate MTRRs using Centaur's MCR. */
+ if (c->x86 == 5)
set_cpu_cap(c, X86_FEATURE_CENTAUR_MCR);
- break;
#endif
- case 6:
- if (c->x86_model >= 0xf)
- set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
- break;
- }
+ if ((c->x86 == 6 && c->x86_model >= 0xf) ||
+ (c->x86 >= 7))
+ set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
+
#ifdef CONFIG_X86_64
set_cpu_cap(c, X86_FEATURE_SYSENTER32);
#endif
@@ -145,9 +145,8 @@ static void init_centaur(struct cpuinfo_x86 *c)
set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON);
}
- switch (c->x86) {
#ifdef CONFIG_X86_32
- case 5:
+ if (c->x86 == 5) {
switch (c->x86_model) {
case 4:
name = "C6";
@@ -207,12 +206,10 @@ static void init_centaur(struct cpuinfo_x86 *c)
c->x86_cache_size = (cc>>24)+(dd>>24);
}
sprintf(c->x86_model_id, "WinChip %s", name);
- break;
+ }
#endif
- case 6:
+ if (c->x86 == 6 || c->x86 >= 7)
init_c3(c);
- break;
- }
#ifdef CONFIG_X86_64
set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
#endif
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index c5d6f17d9b9d..35ad8480c464 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -23,6 +23,7 @@
#include <linux/syscore_ops.h>
#include <linux/pgtable.h>
+#include <asm/cmdline.h>
#include <asm/stackprotector.h>
#include <asm/perf_event.h>
#include <asm/mmu_context.h>
@@ -359,7 +360,7 @@ void native_write_cr0(unsigned long val)
unsigned long bits_missing = 0;
set_register:
- asm volatile("mov %0,%%cr0": "+r" (val), "+m" (__force_order));
+ asm volatile("mov %0,%%cr0": "+r" (val) : : "memory");
if (static_branch_likely(&cr_pinning)) {
if (unlikely((val & X86_CR0_WP) != X86_CR0_WP)) {
@@ -378,7 +379,7 @@ void native_write_cr4(unsigned long val)
unsigned long bits_changed = 0;
set_register:
- asm volatile("mov %0,%%cr4": "+r" (val), "+m" (cr4_pinned_bits));
+ asm volatile("mov %0,%%cr4": "+r" (val) : : "memory");
if (static_branch_likely(&cr_pinning)) {
if (unlikely((val & cr4_pinned_mask) != cr4_pinned_bits)) {
@@ -1221,6 +1222,59 @@ static void detect_nopl(void)
}
/*
+ * We parse cpu parameters early because fpu__init_system() is executed
+ * before parse_early_param().
+ */
+static void __init cpu_parse_early_param(void)
+{
+ char arg[128];
+ char *argptr = arg;
+ int arglen, res, bit;
+
+#ifdef CONFIG_X86_32
+ if (cmdline_find_option_bool(boot_command_line, "no387"))
+#ifdef CONFIG_MATH_EMULATION
+ setup_clear_cpu_cap(X86_FEATURE_FPU);
+#else
+ pr_err("Option 'no387' required CONFIG_MATH_EMULATION enabled.\n");
+#endif
+
+ if (cmdline_find_option_bool(boot_command_line, "nofxsr"))
+ setup_clear_cpu_cap(X86_FEATURE_FXSR);
+#endif
+
+ if (cmdline_find_option_bool(boot_command_line, "noxsave"))
+ setup_clear_cpu_cap(X86_FEATURE_XSAVE);
+
+ if (cmdline_find_option_bool(boot_command_line, "noxsaveopt"))
+ setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT);
+
+ if (cmdline_find_option_bool(boot_command_line, "noxsaves"))
+ setup_clear_cpu_cap(X86_FEATURE_XSAVES);
+
+ arglen = cmdline_find_option(boot_command_line, "clearcpuid", arg, sizeof(arg));
+ if (arglen <= 0)
+ return;
+
+ pr_info("Clearing CPUID bits:");
+ do {
+ res = get_option(&argptr, &bit);
+ if (res == 0 || res == 3)
+ break;
+
+ /* If the argument was too long, the last bit may be cut off */
+ if (res == 1 && arglen >= sizeof(arg))
+ break;
+
+ if (bit >= 0 && bit < NCAPINTS * 32) {
+ pr_cont(" " X86_CAP_FMT, x86_cap_flag(bit));
+ setup_clear_cpu_cap(bit);
+ }
+ } while (res == 2);
+ pr_cont("\n");
+}
+
+/*
* Do minimum CPU detection early.
* Fields really needed: vendor, cpuid_level, family, model, mask,
* cache alignment.
@@ -1255,6 +1309,7 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
get_cpu_cap(c);
get_cpu_address_sizes(c);
setup_force_cpu_cap(X86_FEATURE_CPUID);
+ cpu_parse_early_param();
if (this_cpu->c_early_init)
this_cpu->c_early_init(c);
@@ -1413,15 +1468,7 @@ static void generic_identify(struct cpuinfo_x86 *c)
* ESPFIX issue, we can change this.
*/
#ifdef CONFIG_X86_32
-# ifdef CONFIG_PARAVIRT_XXL
- do {
- extern void native_iret(void);
- if (pv_ops.cpu.iret == native_iret)
- set_cpu_bug(c, X86_BUG_ESPFIX);
- } while (0);
-# else
set_cpu_bug(c, X86_BUG_ESPFIX);
-# endif
#endif
}
@@ -1829,6 +1876,8 @@ static inline void tss_setup_ist(struct tss_struct *tss)
tss->x86_tss.ist[IST_INDEX_NMI] = __this_cpu_ist_top_va(NMI);
tss->x86_tss.ist[IST_INDEX_DB] = __this_cpu_ist_top_va(DB);
tss->x86_tss.ist[IST_INDEX_MCE] = __this_cpu_ist_top_va(MCE);
+ /* Only mapped when SEV-ES is active */
+ tss->x86_tss.ist[IST_INDEX_VC] = __this_cpu_ist_top_va(VC);
}
#else /* CONFIG_X86_64 */
@@ -1861,6 +1910,29 @@ static inline void tss_setup_io_bitmap(struct tss_struct *tss)
}
/*
+ * Setup everything needed to handle exceptions from the IDT, including the IST
+ * exceptions which use paranoid_entry().
+ */
+void cpu_init_exception_handling(void)
+{
+ struct tss_struct *tss = this_cpu_ptr(&cpu_tss_rw);
+ int cpu = raw_smp_processor_id();
+
+ /* paranoid_entry() gets the CPU number from the GDT */
+ setup_getcpu(cpu);
+
+ /* IST vectors need TSS to be set up. */
+ tss_setup_ist(tss);
+ tss_setup_io_bitmap(tss);
+ set_tss_desc(cpu, &get_cpu_entry_area(cpu)->tss.x86_tss);
+
+ load_TR_desc();
+
+ /* Finally load the IDT */
+ load_current_idt();
+}
+
+/*
* cpu_init() initializes state that is per-CPU. Some data is already
* initialized (naturally) in the bootstrap process, such as the GDT
* and IDT. We reload them nevertheless, this function acts as a
diff --git a/arch/x86/kernel/cpu/cpuid-deps.c b/arch/x86/kernel/cpu/cpuid-deps.c
index 3cbe24ca80ab..d502241995a3 100644
--- a/arch/x86/kernel/cpu/cpuid-deps.c
+++ b/arch/x86/kernel/cpu/cpuid-deps.c
@@ -69,6 +69,8 @@ static const struct cpuid_dep cpuid_deps[] = {
{ X86_FEATURE_CQM_MBM_TOTAL, X86_FEATURE_CQM_LLC },
{ X86_FEATURE_CQM_MBM_LOCAL, X86_FEATURE_CQM_LLC },
{ X86_FEATURE_AVX512_BF16, X86_FEATURE_AVX512VL },
+ { X86_FEATURE_ENQCMD, X86_FEATURE_XSAVES },
+ { X86_FEATURE_PER_THREAD_MBA, X86_FEATURE_MBA },
{}
};
diff --git a/arch/x86/kernel/cpu/mce/amd.c b/arch/x86/kernel/cpu/mce/amd.c
index 99be063fcb1b..0c6b02dd744c 100644
--- a/arch/x86/kernel/cpu/mce/amd.c
+++ b/arch/x86/kernel/cpu/mce/amd.c
@@ -132,49 +132,49 @@ static enum smca_bank_types smca_get_bank_type(unsigned int bank)
}
static struct smca_hwid smca_hwid_mcatypes[] = {
- /* { bank_type, hwid_mcatype, xec_bitmap } */
+ /* { bank_type, hwid_mcatype } */
/* Reserved type */
- { SMCA_RESERVED, HWID_MCATYPE(0x00, 0x0), 0x0 },
+ { SMCA_RESERVED, HWID_MCATYPE(0x00, 0x0) },
/* ZN Core (HWID=0xB0) MCA types */
- { SMCA_LS, HWID_MCATYPE(0xB0, 0x0), 0x1FFFFF },
- { SMCA_LS_V2, HWID_MCATYPE(0xB0, 0x10), 0xFFFFFF },
- { SMCA_IF, HWID_MCATYPE(0xB0, 0x1), 0x3FFF },
- { SMCA_L2_CACHE, HWID_MCATYPE(0xB0, 0x2), 0xF },
- { SMCA_DE, HWID_MCATYPE(0xB0, 0x3), 0x1FF },
+ { SMCA_LS, HWID_MCATYPE(0xB0, 0x0) },
+ { SMCA_LS_V2, HWID_MCATYPE(0xB0, 0x10) },
+ { SMCA_IF, HWID_MCATYPE(0xB0, 0x1) },
+ { SMCA_L2_CACHE, HWID_MCATYPE(0xB0, 0x2) },
+ { SMCA_DE, HWID_MCATYPE(0xB0, 0x3) },
/* HWID 0xB0 MCATYPE 0x4 is Reserved */
- { SMCA_EX, HWID_MCATYPE(0xB0, 0x5), 0xFFF },
- { SMCA_FP, HWID_MCATYPE(0xB0, 0x6), 0x7F },
- { SMCA_L3_CACHE, HWID_MCATYPE(0xB0, 0x7), 0xFF },
+ { SMCA_EX, HWID_MCATYPE(0xB0, 0x5) },
+ { SMCA_FP, HWID_MCATYPE(0xB0, 0x6) },
+ { SMCA_L3_CACHE, HWID_MCATYPE(0xB0, 0x7) },
/* Data Fabric MCA types */
- { SMCA_CS, HWID_MCATYPE(0x2E, 0x0), 0x1FF },
- { SMCA_PIE, HWID_MCATYPE(0x2E, 0x1), 0x1F },
- { SMCA_CS_V2, HWID_MCATYPE(0x2E, 0x2), 0x3FFF },
+ { SMCA_CS, HWID_MCATYPE(0x2E, 0x0) },
+ { SMCA_PIE, HWID_MCATYPE(0x2E, 0x1) },
+ { SMCA_CS_V2, HWID_MCATYPE(0x2E, 0x2) },
/* Unified Memory Controller MCA type */
- { SMCA_UMC, HWID_MCATYPE(0x96, 0x0), 0xFF },
+ { SMCA_UMC, HWID_MCATYPE(0x96, 0x0) },
/* Parameter Block MCA type */
- { SMCA_PB, HWID_MCATYPE(0x05, 0x0), 0x1 },
+ { SMCA_PB, HWID_MCATYPE(0x05, 0x0) },
/* Platform Security Processor MCA type */
- { SMCA_PSP, HWID_MCATYPE(0xFF, 0x0), 0x1 },
- { SMCA_PSP_V2, HWID_MCATYPE(0xFF, 0x1), 0x3FFFF },
+ { SMCA_PSP, HWID_MCATYPE(0xFF, 0x0) },
+ { SMCA_PSP_V2, HWID_MCATYPE(0xFF, 0x1) },
/* System Management Unit MCA type */
- { SMCA_SMU, HWID_MCATYPE(0x01, 0x0), 0x1 },
- { SMCA_SMU_V2, HWID_MCATYPE(0x01, 0x1), 0x7FF },
+ { SMCA_SMU, HWID_MCATYPE(0x01, 0x0) },
+ { SMCA_SMU_V2, HWID_MCATYPE(0x01, 0x1) },
/* Microprocessor 5 Unit MCA type */
- { SMCA_MP5, HWID_MCATYPE(0x01, 0x2), 0x3FF },
+ { SMCA_MP5, HWID_MCATYPE(0x01, 0x2) },
/* Northbridge IO Unit MCA type */
- { SMCA_NBIO, HWID_MCATYPE(0x18, 0x0), 0x1F },
+ { SMCA_NBIO, HWID_MCATYPE(0x18, 0x0) },
/* PCI Express Unit MCA type */
- { SMCA_PCIE, HWID_MCATYPE(0x46, 0x0), 0x1F },
+ { SMCA_PCIE, HWID_MCATYPE(0x46, 0x0) },
};
struct smca_bank smca_banks[MAX_NR_BANKS];
diff --git a/arch/x86/kernel/cpu/mce/core.c b/arch/x86/kernel/cpu/mce/core.c
index f43a78bde670..1c08cb9eb9f6 100644
--- a/arch/x86/kernel/cpu/mce/core.c
+++ b/arch/x86/kernel/cpu/mce/core.c
@@ -40,7 +40,6 @@
#include <linux/debugfs.h>
#include <linux/irq_work.h>
#include <linux/export.h>
-#include <linux/jump_label.h>
#include <linux/set_memory.h>
#include <linux/sync_core.h>
#include <linux/task_work.h>
@@ -373,42 +372,105 @@ static int msr_to_offset(u32 msr)
return -1;
}
+__visible bool ex_handler_rdmsr_fault(const struct exception_table_entry *fixup,
+ struct pt_regs *regs, int trapnr,
+ unsigned long error_code,
+ unsigned long fault_addr)
+{
+ pr_emerg("MSR access error: RDMSR from 0x%x at rIP: 0x%lx (%pS)\n",
+ (unsigned int)regs->cx, regs->ip, (void *)regs->ip);
+
+ show_stack_regs(regs);
+
+ panic("MCA architectural violation!\n");
+
+ while (true)
+ cpu_relax();
+
+ return true;
+}
+
/* MSR access wrappers used for error injection */
-static u64 mce_rdmsrl(u32 msr)
+static noinstr u64 mce_rdmsrl(u32 msr)
{
- u64 v;
+ DECLARE_ARGS(val, low, high);
if (__this_cpu_read(injectm.finished)) {
- int offset = msr_to_offset(msr);
+ int offset;
+ u64 ret;
+
+ instrumentation_begin();
+ offset = msr_to_offset(msr);
if (offset < 0)
- return 0;
- return *(u64 *)((char *)this_cpu_ptr(&injectm) + offset);
- }
+ ret = 0;
+ else
+ ret = *(u64 *)((char *)this_cpu_ptr(&injectm) + offset);
- if (rdmsrl_safe(msr, &v)) {
- WARN_ONCE(1, "mce: Unable to read MSR 0x%x!\n", msr);
- /*
- * Return zero in case the access faulted. This should
- * not happen normally but can happen if the CPU does
- * something weird, or if the code is buggy.
- */
- v = 0;
+ instrumentation_end();
+
+ return ret;
}
- return v;
+ /*
+ * RDMSR on MCA MSRs should not fault. If they do, this is very much an
+ * architectural violation and needs to be reported to hw vendor. Panic
+ * the box to not allow any further progress.
+ */
+ asm volatile("1: rdmsr\n"
+ "2:\n"
+ _ASM_EXTABLE_HANDLE(1b, 2b, ex_handler_rdmsr_fault)
+ : EAX_EDX_RET(val, low, high) : "c" (msr));
+
+
+ return EAX_EDX_VAL(val, low, high);
+}
+
+__visible bool ex_handler_wrmsr_fault(const struct exception_table_entry *fixup,
+ struct pt_regs *regs, int trapnr,
+ unsigned long error_code,
+ unsigned long fault_addr)
+{
+ pr_emerg("MSR access error: WRMSR to 0x%x (tried to write 0x%08x%08x) at rIP: 0x%lx (%pS)\n",
+ (unsigned int)regs->cx, (unsigned int)regs->dx, (unsigned int)regs->ax,
+ regs->ip, (void *)regs->ip);
+
+ show_stack_regs(regs);
+
+ panic("MCA architectural violation!\n");
+
+ while (true)
+ cpu_relax();
+
+ return true;
}
-static void mce_wrmsrl(u32 msr, u64 v)
+static noinstr void mce_wrmsrl(u32 msr, u64 v)
{
+ u32 low, high;
+
if (__this_cpu_read(injectm.finished)) {
- int offset = msr_to_offset(msr);
+ int offset;
+
+ instrumentation_begin();
+ offset = msr_to_offset(msr);
if (offset >= 0)
*(u64 *)((char *)this_cpu_ptr(&injectm) + offset) = v;
+
+ instrumentation_end();
+
return;
}
- wrmsrl(msr, v);
+
+ low = (u32)v;
+ high = (u32)(v >> 32);
+
+ /* See comment in mce_rdmsrl() */
+ asm volatile("1: wrmsr\n"
+ "2:\n"
+ _ASM_EXTABLE_HANDLE(1b, 2b, ex_handler_wrmsr_fault)
+ : : "c" (msr), "a"(low), "d" (high) : "memory");
}
/*
@@ -745,7 +807,7 @@ log_it:
goto clear_it;
mce_read_aux(&m, i);
- m.severity = mce_severity(&m, mca_cfg.tolerant, NULL, false);
+ m.severity = mce_severity(&m, NULL, mca_cfg.tolerant, NULL, false);
/*
* Don't get the IP here because it's unlikely to
* have anything to do with the actual error location.
@@ -794,7 +856,7 @@ static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp,
quirk_no_way_out(i, m, regs);
m->bank = i;
- if (mce_severity(m, mca_cfg.tolerant, &tmp, true) >= MCE_PANIC_SEVERITY) {
+ if (mce_severity(m, regs, mca_cfg.tolerant, &tmp, true) >= MCE_PANIC_SEVERITY) {
mce_read_aux(m, i);
*msg = tmp;
return 1;
@@ -872,7 +934,6 @@ static void mce_reign(void)
struct mce *m = NULL;
int global_worst = 0;
char *msg = NULL;
- char *nmsg = NULL;
/*
* This CPU is the Monarch and the other CPUs have run
@@ -880,12 +941,10 @@ static void mce_reign(void)
* Grade the severity of the errors of all the CPUs.
*/
for_each_possible_cpu(cpu) {
- int severity = mce_severity(&per_cpu(mces_seen, cpu),
- mca_cfg.tolerant,
- &nmsg, true);
- if (severity > global_worst) {
- msg = nmsg;
- global_worst = severity;
+ struct mce *mtmp = &per_cpu(mces_seen, cpu);
+
+ if (mtmp->severity > global_worst) {
+ global_worst = mtmp->severity;
m = &per_cpu(mces_seen, cpu);
}
}
@@ -895,8 +954,11 @@ static void mce_reign(void)
* This dumps all the mces in the log buffer and stops the
* other CPUs.
*/
- if (m && global_worst >= MCE_PANIC_SEVERITY && mca_cfg.tolerant < 3)
+ if (m && global_worst >= MCE_PANIC_SEVERITY && mca_cfg.tolerant < 3) {
+ /* call mce_severity() to get "msg" for panic */
+ mce_severity(m, NULL, mca_cfg.tolerant, &msg, true);
mce_panic("Fatal machine check", m, msg);
+ }
/*
* For UC somewhere we let the CPU who detects it handle it.
@@ -1105,7 +1167,7 @@ static noinstr bool mce_check_crashing_cpu(void)
return false;
}
-static void __mc_scan_banks(struct mce *m, struct mce *final,
+static void __mc_scan_banks(struct mce *m, struct pt_regs *regs, struct mce *final,
unsigned long *toclear, unsigned long *valid_banks,
int no_way_out, int *worst)
{
@@ -1140,7 +1202,7 @@ static void __mc_scan_banks(struct mce *m, struct mce *final,
/* Set taint even when machine check was not enabled. */
add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
- severity = mce_severity(m, cfg->tolerant, NULL, true);
+ severity = mce_severity(m, regs, cfg->tolerant, NULL, true);
/*
* When machine check was for corrected/deferred handler don't
@@ -1188,13 +1250,34 @@ static void kill_me_maybe(struct callback_head *cb)
if (!p->mce_ripv)
flags |= MF_MUST_KILL;
- if (!memory_failure(p->mce_addr >> PAGE_SHIFT, flags)) {
+ if (!memory_failure(p->mce_addr >> PAGE_SHIFT, flags) &&
+ !(p->mce_kflags & MCE_IN_KERNEL_COPYIN)) {
set_mce_nospec(p->mce_addr >> PAGE_SHIFT, p->mce_whole_page);
+ sync_core();
return;
}
- pr_err("Memory error not recovered");
- kill_me_now(cb);
+ if (p->mce_vaddr != (void __user *)-1l) {
+ force_sig_mceerr(BUS_MCEERR_AR, p->mce_vaddr, PAGE_SHIFT);
+ } else {
+ pr_err("Memory error not recovered");
+ kill_me_now(cb);
+ }
+}
+
+static void queue_task_work(struct mce *m, int kill_it)
+{
+ current->mce_addr = m->addr;
+ current->mce_kflags = m->kflags;
+ current->mce_ripv = !!(m->mcgstatus & MCG_STATUS_RIPV);
+ current->mce_whole_page = whole_page(m);
+
+ if (kill_it)
+ current->mce_kill_me.func = kill_me_now;
+ else
+ current->mce_kill_me.func = kill_me_maybe;
+
+ task_work_add(current, &current->mce_kill_me, true);
}
/*
@@ -1291,7 +1374,7 @@ noinstr void do_machine_check(struct pt_regs *regs)
order = mce_start(&no_way_out);
}
- __mc_scan_banks(&m, final, toclear, valid_banks, no_way_out, &worst);
+ __mc_scan_banks(&m, regs, final, toclear, valid_banks, no_way_out, &worst);
if (!no_way_out)
mce_clear_state(toclear);
@@ -1313,7 +1396,7 @@ noinstr void do_machine_check(struct pt_regs *regs)
* make sure we have the right "msg".
*/
if (worst >= MCE_PANIC_SEVERITY && mca_cfg.tolerant < 3) {
- mce_severity(&m, cfg->tolerant, &msg, true);
+ mce_severity(&m, regs, cfg->tolerant, &msg, true);
mce_panic("Local fatal machine check!", &m, msg);
}
}
@@ -1330,25 +1413,16 @@ noinstr void do_machine_check(struct pt_regs *regs)
if (worst > 0)
irq_work_queue(&mce_irq_work);
- mce_wrmsrl(MSR_IA32_MCG_STATUS, 0);
-
- sync_core();
-
if (worst != MCE_AR_SEVERITY && !kill_it)
- return;
+ goto out;
/* Fault was in user mode and we need to take some action */
if ((m.cs & 3) == 3) {
/* If this triggers there is no way to recover. Die hard. */
BUG_ON(!on_thread_stack() || !user_mode(regs));
- current->mce_addr = m.addr;
- current->mce_ripv = !!(m.mcgstatus & MCG_STATUS_RIPV);
- current->mce_whole_page = whole_page(&m);
- current->mce_kill_me.func = kill_me_maybe;
- if (kill_it)
- current->mce_kill_me.func = kill_me_now;
- task_work_add(current, &current->mce_kill_me, true);
+ queue_task_work(&m, kill_it);
+
} else {
/*
* Handle an MCE which has happened in kernel space but from
@@ -1363,7 +1437,12 @@ noinstr void do_machine_check(struct pt_regs *regs)
if (!fixup_exception(regs, X86_TRAP_MC, 0, 0))
mce_panic("Failed kernel mode recovery", &m, msg);
}
+
+ if (m.kflags & MCE_IN_KERNEL_COPYIN)
+ queue_task_work(&m, kill_it);
}
+out:
+ mce_wrmsrl(MSR_IA32_MCG_STATUS, 0);
}
EXPORT_SYMBOL_GPL(do_machine_check);
@@ -1904,6 +1983,8 @@ void (*machine_check_vector)(struct pt_regs *) = unexpected_machine_check;
static __always_inline void exc_machine_check_kernel(struct pt_regs *regs)
{
+ bool irq_state;
+
WARN_ON_ONCE(user_mode(regs));
/*
@@ -1914,7 +1995,7 @@ static __always_inline void exc_machine_check_kernel(struct pt_regs *regs)
mce_check_crashing_cpu())
return;
- nmi_enter();
+ irq_state = idtentry_enter_nmi(regs);
/*
* The call targets are marked noinstr, but objtool can't figure
* that out because it's an indirect call. Annotate it.
@@ -1925,7 +2006,7 @@ static __always_inline void exc_machine_check_kernel(struct pt_regs *regs)
if (regs->flags & X86_EFLAGS_IF)
trace_hardirqs_on_prepare();
instrumentation_end();
- nmi_exit();
+ idtentry_exit_nmi(regs, irq_state);
}
static __always_inline void exc_machine_check_user(struct pt_regs *regs)
@@ -2062,7 +2143,7 @@ void mce_disable_bank(int bank)
and older.
* mce=nobootlog Don't log MCEs from before booting.
* mce=bios_cmci_threshold Don't program the CMCI threshold
- * mce=recovery force enable memcpy_mcsafe()
+ * mce=recovery force enable copy_mc_fragile()
*/
static int __init mcheck_enable(char *str)
{
@@ -2670,13 +2751,10 @@ static void __init mcheck_debugfs_init(void)
static void __init mcheck_debugfs_init(void) { }
#endif
-DEFINE_STATIC_KEY_FALSE(mcsafe_key);
-EXPORT_SYMBOL_GPL(mcsafe_key);
-
static int __init mcheck_late_init(void)
{
if (mca_cfg.recovery)
- static_branch_inc(&mcsafe_key);
+ enable_copy_mc_fragile();
mcheck_debugfs_init();
diff --git a/arch/x86/kernel/cpu/mce/dev-mcelog.c b/arch/x86/kernel/cpu/mce/dev-mcelog.c
index 03e51053592a..100fbeebdc72 100644
--- a/arch/x86/kernel/cpu/mce/dev-mcelog.c
+++ b/arch/x86/kernel/cpu/mce/dev-mcelog.c
@@ -67,7 +67,9 @@ static int dev_mce_log(struct notifier_block *nb, unsigned long val,
unlock:
mutex_unlock(&mce_chrdev_read_mutex);
- mce->kflags |= MCE_HANDLED_MCELOG;
+ if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
+ mce->kflags |= MCE_HANDLED_MCELOG;
+
return NOTIFY_OK;
}
diff --git a/arch/x86/kernel/cpu/mce/inject.c b/arch/x86/kernel/cpu/mce/inject.c
index 7843ab3fde09..3a44346f2276 100644
--- a/arch/x86/kernel/cpu/mce/inject.c
+++ b/arch/x86/kernel/cpu/mce/inject.c
@@ -199,7 +199,7 @@ static int raise_local(void)
* calling irq_enter, but the necessary
* machinery isn't exported currently.
*/
- /*FALL THROUGH*/
+ fallthrough;
case MCJ_CTX_PROCESS:
raise_exception(m, NULL);
break;
diff --git a/arch/x86/kernel/cpu/mce/intel.c b/arch/x86/kernel/cpu/mce/intel.c
index d8f9230d2034..abe9fe0fb851 100644
--- a/arch/x86/kernel/cpu/mce/intel.c
+++ b/arch/x86/kernel/cpu/mce/intel.c
@@ -193,7 +193,7 @@ unsigned long cmci_intel_adjust_timer(unsigned long interval)
if (!atomic_sub_return(1, &cmci_storm_on_cpus))
pr_notice("CMCI storm subsided: switching to interrupt mode\n");
- /* FALLTHROUGH */
+ fallthrough;
case CMCI_STORM_SUBSIDED:
/*
diff --git a/arch/x86/kernel/cpu/mce/internal.h b/arch/x86/kernel/cpu/mce/internal.h
index 6473070b5da4..88dcc79cfb07 100644
--- a/arch/x86/kernel/cpu/mce/internal.h
+++ b/arch/x86/kernel/cpu/mce/internal.h
@@ -38,7 +38,8 @@ int mce_gen_pool_add(struct mce *mce);
int mce_gen_pool_init(void);
struct llist_node *mce_gen_pool_prepare_records(void);
-extern int (*mce_severity)(struct mce *a, int tolerant, char **msg, bool is_excp);
+extern int (*mce_severity)(struct mce *a, struct pt_regs *regs,
+ int tolerant, char **msg, bool is_excp);
struct dentry *mce_get_debugfs_dir(void);
extern mce_banks_t mce_banks_ce_disabled;
@@ -185,4 +186,14 @@ extern bool amd_filter_mce(struct mce *m);
static inline bool amd_filter_mce(struct mce *m) { return false; };
#endif
+__visible bool ex_handler_rdmsr_fault(const struct exception_table_entry *fixup,
+ struct pt_regs *regs, int trapnr,
+ unsigned long error_code,
+ unsigned long fault_addr);
+
+__visible bool ex_handler_wrmsr_fault(const struct exception_table_entry *fixup,
+ struct pt_regs *regs, int trapnr,
+ unsigned long error_code,
+ unsigned long fault_addr);
+
#endif /* __X86_MCE_INTERNAL_H__ */
diff --git a/arch/x86/kernel/cpu/mce/severity.c b/arch/x86/kernel/cpu/mce/severity.c
index e1da619add19..83df991314c5 100644
--- a/arch/x86/kernel/cpu/mce/severity.c
+++ b/arch/x86/kernel/cpu/mce/severity.c
@@ -9,9 +9,14 @@
#include <linux/seq_file.h>
#include <linux/init.h>
#include <linux/debugfs.h>
-#include <asm/mce.h>
#include <linux/uaccess.h>
+#include <asm/mce.h>
+#include <asm/intel-family.h>
+#include <asm/traps.h>
+#include <asm/insn.h>
+#include <asm/insn-eval.h>
+
#include "internal.h"
/*
@@ -40,9 +45,14 @@ static struct severity {
unsigned char context;
unsigned char excp;
unsigned char covered;
+ unsigned char cpu_model;
+ unsigned char cpu_minstepping;
+ unsigned char bank_lo, bank_hi;
char *msg;
} severities[] = {
#define MCESEV(s, m, c...) { .sev = MCE_ ## s ## _SEVERITY, .msg = m, ## c }
+#define BANK_RANGE(l, h) .bank_lo = l, .bank_hi = h
+#define MODEL_STEPPING(m, s) .cpu_model = m, .cpu_minstepping = s
#define KERNEL .context = IN_KERNEL
#define USER .context = IN_USER
#define KERNEL_RECOV .context = IN_KERNEL_RECOV
@@ -90,14 +100,9 @@ static struct severity {
EXCP, KERNEL_RECOV, MCGMASK(MCG_STATUS_RIPV, 0)
),
MCESEV(
- DEFERRED, "Deferred error",
- NOSER, MASK(MCI_STATUS_UC|MCI_STATUS_DEFERRED|MCI_STATUS_POISON, MCI_STATUS_DEFERRED)
- ),
- MCESEV(
KEEP, "Corrected error",
NOSER, BITCLR(MCI_STATUS_UC)
),
-
/*
* known AO MCACODs reported via MCE or CMC:
*
@@ -113,6 +118,18 @@ static struct severity {
AO, "Action optional: last level cache writeback error",
SER, MASK(MCI_UC_AR|MCACOD, MCI_STATUS_UC|MCACOD_L3WB)
),
+ /*
+ * Quirk for Skylake/Cascade Lake. Patrol scrubber may be configured
+ * to report uncorrected errors using CMCI with a special signature.
+ * UC=0, MSCOD=0x0010, MCACOD=binary(000X 0000 1100 XXXX) reported
+ * in one of the memory controller banks.
+ * Set severity to "AO" for same action as normal patrol scrub error.
+ */
+ MCESEV(
+ AO, "Uncorrected Patrol Scrub Error",
+ SER, MASK(MCI_STATUS_UC|MCI_ADDR|0xffffeff0, MCI_ADDR|0x001000c0),
+ MODEL_STEPPING(INTEL_FAM6_SKYLAKE_X, 4), BANK_RANGE(13, 18)
+ ),
/* ignore OVER for UCNA */
MCESEV(
@@ -198,6 +215,47 @@ static struct severity {
#define mc_recoverable(mcg) (((mcg) & (MCG_STATUS_RIPV|MCG_STATUS_EIPV)) == \
(MCG_STATUS_RIPV|MCG_STATUS_EIPV))
+static bool is_copy_from_user(struct pt_regs *regs)
+{
+ u8 insn_buf[MAX_INSN_SIZE];
+ struct insn insn;
+ unsigned long addr;
+
+ if (copy_from_kernel_nofault(insn_buf, (void *)regs->ip, MAX_INSN_SIZE))
+ return false;
+
+ kernel_insn_init(&insn, insn_buf, MAX_INSN_SIZE);
+ insn_get_opcode(&insn);
+ if (!insn.opcode.got)
+ return false;
+
+ switch (insn.opcode.value) {
+ /* MOV mem,reg */
+ case 0x8A: case 0x8B:
+ /* MOVZ mem,reg */
+ case 0xB60F: case 0xB70F:
+ insn_get_modrm(&insn);
+ insn_get_sib(&insn);
+ if (!insn.modrm.got || !insn.sib.got)
+ return false;
+ addr = (unsigned long)insn_get_addr_ref(&insn, regs);
+ break;
+ /* REP MOVS */
+ case 0xA4: case 0xA5:
+ addr = regs->si;
+ break;
+ default:
+ return false;
+ }
+
+ if (fault_in_kernel_space(addr))
+ return false;
+
+ current->mce_vaddr = (void __user *)addr;
+
+ return true;
+}
+
/*
* If mcgstatus indicated that ip/cs on the stack were
* no good, then "m->cs" will be zero and we will have
@@ -209,15 +267,25 @@ static struct severity {
* distinguish an exception taken in user from from one
* taken in the kernel.
*/
-static int error_context(struct mce *m)
+static int error_context(struct mce *m, struct pt_regs *regs)
{
+ enum handler_type t;
+
if ((m->cs & 3) == 3)
return IN_USER;
+ if (!mc_recoverable(m->mcgstatus))
+ return IN_KERNEL;
- if (mc_recoverable(m->mcgstatus) && ex_has_fault_handler(m->ip)) {
+ t = ex_get_fault_handler_type(m->ip);
+ if (t == EX_HANDLER_FAULT) {
m->kflags |= MCE_IN_KERNEL_RECOV;
return IN_KERNEL_RECOV;
}
+ if (t == EX_HANDLER_UACCESS && regs && is_copy_from_user(regs)) {
+ m->kflags |= MCE_IN_KERNEL_RECOV;
+ m->kflags |= MCE_IN_KERNEL_COPYIN;
+ return IN_KERNEL_RECOV;
+ }
return IN_KERNEL;
}
@@ -253,9 +321,10 @@ static int mce_severity_amd_smca(struct mce *m, enum context err_ctx)
* See AMD Error Scope Hierarchy table in a newer BKDG. For example
* 49125_15h_Models_30h-3Fh_BKDG.pdf, section "RAS Features"
*/
-static int mce_severity_amd(struct mce *m, int tolerant, char **msg, bool is_excp)
+static int mce_severity_amd(struct mce *m, struct pt_regs *regs, int tolerant,
+ char **msg, bool is_excp)
{
- enum context ctx = error_context(m);
+ enum context ctx = error_context(m, regs);
/* Processor Context Corrupt, no need to fumble too much, die! */
if (m->status & MCI_STATUS_PCC)
@@ -305,10 +374,11 @@ static int mce_severity_amd(struct mce *m, int tolerant, char **msg, bool is_exc
return MCE_KEEP_SEVERITY;
}
-static int mce_severity_intel(struct mce *m, int tolerant, char **msg, bool is_excp)
+static int mce_severity_intel(struct mce *m, struct pt_regs *regs,
+ int tolerant, char **msg, bool is_excp)
{
enum exception excp = (is_excp ? EXCP_CONTEXT : NO_EXCP);
- enum context ctx = error_context(m);
+ enum context ctx = error_context(m, regs);
struct severity *s;
for (s = severities;; s++) {
@@ -324,6 +394,12 @@ static int mce_severity_intel(struct mce *m, int tolerant, char **msg, bool is_e
continue;
if (s->excp && excp != s->excp)
continue;
+ if (s->cpu_model && boot_cpu_data.x86_model != s->cpu_model)
+ continue;
+ if (s->cpu_minstepping && boot_cpu_data.x86_stepping < s->cpu_minstepping)
+ continue;
+ if (s->bank_lo && (m->bank < s->bank_lo || m->bank > s->bank_hi))
+ continue;
if (msg)
*msg = s->msg;
s->covered = 1;
@@ -336,7 +412,7 @@ static int mce_severity_intel(struct mce *m, int tolerant, char **msg, bool is_e
}
/* Default to mce_severity_intel */
-int (*mce_severity)(struct mce *m, int tolerant, char **msg, bool is_excp) =
+int (*mce_severity)(struct mce *m, struct pt_regs *regs, int tolerant, char **msg, bool is_excp) =
mce_severity_intel;
void __init mcheck_vendor_init_severity(void)
diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c
index 31125448b174..9834a43cd0fa 100644
--- a/arch/x86/kernel/cpu/mshyperv.c
+++ b/arch/x86/kernel/cpu/mshyperv.c
@@ -248,7 +248,7 @@ static void __init ms_hyperv_init_platform(void)
hv_host_info_edx >> 24, hv_host_info_edx & 0xFFFFFF);
}
- if (ms_hyperv.features & HV_X64_ACCESS_FREQUENCY_MSRS &&
+ if (ms_hyperv.features & HV_ACCESS_FREQUENCY_MSRS &&
ms_hyperv.misc_features & HV_FEATURE_FREQUENCY_MSRS_AVAILABLE) {
x86_platform.calibrate_tsc = hv_get_tsc_khz;
x86_platform.calibrate_cpu = hv_get_tsc_khz;
@@ -270,7 +270,7 @@ static void __init ms_hyperv_init_platform(void)
crash_kexec_post_notifiers = true;
#ifdef CONFIG_X86_LOCAL_APIC
- if (ms_hyperv.features & HV_X64_ACCESS_FREQUENCY_MSRS &&
+ if (ms_hyperv.features & HV_ACCESS_FREQUENCY_MSRS &&
ms_hyperv.misc_features & HV_FEATURE_FREQUENCY_MSRS_AVAILABLE) {
/*
* Get the APIC frequency.
@@ -296,7 +296,7 @@ static void __init ms_hyperv_init_platform(void)
machine_ops.shutdown = hv_machine_shutdown;
machine_ops.crash_shutdown = hv_machine_crash_shutdown;
#endif
- if (ms_hyperv.features & HV_X64_ACCESS_TSC_INVARIANT) {
+ if (ms_hyperv.features & HV_ACCESS_TSC_INVARIANT) {
wrmsrl(HV_X64_MSR_TSC_INVARIANT_CONTROL, 0x1);
setup_force_cpu_cap(X86_FEATURE_TSC_RELIABLE);
} else {
@@ -330,7 +330,7 @@ static void __init ms_hyperv_init_platform(void)
alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR, asm_sysvec_hyperv_callback);
/* Setup the IDT for reenlightenment notifications */
- if (ms_hyperv.features & HV_X64_ACCESS_REENLIGHTENMENT) {
+ if (ms_hyperv.features & HV_ACCESS_REENLIGHTENMENT) {
alloc_intr_gate(HYPERV_REENLIGHTENMENT_VECTOR,
asm_sysvec_hyperv_reenlightenment);
}
diff --git a/arch/x86/kernel/cpu/mtrr/cyrix.c b/arch/x86/kernel/cpu/mtrr/cyrix.c
index 72182809b333..ca670919b561 100644
--- a/arch/x86/kernel/cpu/mtrr/cyrix.c
+++ b/arch/x86/kernel/cpu/mtrr/cyrix.c
@@ -98,7 +98,7 @@ cyrix_get_free_region(unsigned long base, unsigned long size, int replace_reg)
case 7:
if (size < 0x40)
break;
- /* Else, fall through */
+ fallthrough;
case 6:
case 5:
case 4:
diff --git a/arch/x86/kernel/cpu/resctrl/core.c b/arch/x86/kernel/cpu/resctrl/core.c
index 6a9df71c1b9e..e5f4ee8f4c3b 100644
--- a/arch/x86/kernel/cpu/resctrl/core.c
+++ b/arch/x86/kernel/cpu/resctrl/core.c
@@ -168,6 +168,7 @@ struct rdt_resource rdt_resources_all[] = {
.name = "MB",
.domains = domain_init(RDT_RESOURCE_MBA),
.cache_level = 3,
+ .parse_ctrlval = parse_bw,
.format_str = "%d=%*u",
.fflags = RFTYPE_RES_MB,
},
@@ -254,22 +255,30 @@ static bool __get_mem_config_intel(struct rdt_resource *r)
{
union cpuid_0x10_3_eax eax;
union cpuid_0x10_x_edx edx;
- u32 ebx, ecx;
+ u32 ebx, ecx, max_delay;
cpuid_count(0x00000010, 3, &eax.full, &ebx, &ecx, &edx.full);
r->num_closid = edx.split.cos_max + 1;
- r->membw.max_delay = eax.split.max_delay + 1;
+ max_delay = eax.split.max_delay + 1;
r->default_ctrl = MAX_MBA_BW;
+ r->membw.arch_needs_linear = true;
if (ecx & MBA_IS_LINEAR) {
r->membw.delay_linear = true;
- r->membw.min_bw = MAX_MBA_BW - r->membw.max_delay;
- r->membw.bw_gran = MAX_MBA_BW - r->membw.max_delay;
+ r->membw.min_bw = MAX_MBA_BW - max_delay;
+ r->membw.bw_gran = MAX_MBA_BW - max_delay;
} else {
if (!rdt_get_mb_table(r))
return false;
+ r->membw.arch_needs_linear = false;
}
r->data_width = 3;
+ if (boot_cpu_has(X86_FEATURE_PER_THREAD_MBA))
+ r->membw.throttle_mode = THREAD_THROTTLE_PER_THREAD;
+ else
+ r->membw.throttle_mode = THREAD_THROTTLE_MAX;
+ thread_throttle_mode_init();
+
r->alloc_capable = true;
r->alloc_enabled = true;
@@ -288,7 +297,13 @@ static bool __rdt_get_mem_config_amd(struct rdt_resource *r)
/* AMD does not use delay */
r->membw.delay_linear = false;
+ r->membw.arch_needs_linear = false;
+ /*
+ * AMD does not use memory delay throttle model to control
+ * the allocation like Intel does.
+ */
+ r->membw.throttle_mode = THREAD_THROTTLE_UNDEFINED;
r->membw.min_bw = 0;
r->membw.bw_gran = 1;
/* Max value is 2048, Data width should be 4 in decimal */
@@ -346,19 +361,6 @@ static void rdt_get_cdp_l2_config(void)
rdt_get_cdp_config(RDT_RESOURCE_L2, RDT_RESOURCE_L2CODE);
}
-static int get_cache_id(int cpu, int level)
-{
- struct cpu_cacheinfo *ci = get_cpu_cacheinfo(cpu);
- int i;
-
- for (i = 0; i < ci->num_leaves; i++) {
- if (ci->info_list[i].level == level)
- return ci->info_list[i].id;
- }
-
- return -1;
-}
-
static void
mba_wrmsr_amd(struct rdt_domain *d, struct msr_param *m, struct rdt_resource *r)
{
@@ -556,13 +558,13 @@ static int domain_setup_mon_state(struct rdt_resource *r, struct rdt_domain *d)
*/
static void domain_add_cpu(int cpu, struct rdt_resource *r)
{
- int id = get_cache_id(cpu, r->cache_level);
+ int id = get_cpu_cacheinfo_id(cpu, r->cache_level);
struct list_head *add_pos = NULL;
struct rdt_domain *d;
d = rdt_find_domain(r, id, &add_pos);
if (IS_ERR(d)) {
- pr_warn("Could't find cache id for cpu %d\n", cpu);
+ pr_warn("Couldn't find cache id for CPU %d\n", cpu);
return;
}
@@ -602,12 +604,12 @@ static void domain_add_cpu(int cpu, struct rdt_resource *r)
static void domain_remove_cpu(int cpu, struct rdt_resource *r)
{
- int id = get_cache_id(cpu, r->cache_level);
+ int id = get_cpu_cacheinfo_id(cpu, r->cache_level);
struct rdt_domain *d;
d = rdt_find_domain(r, id, NULL);
if (IS_ERR_OR_NULL(d)) {
- pr_warn("Could't find cache id for cpu %d\n", cpu);
+ pr_warn("Couldn't find cache id for CPU %d\n", cpu);
return;
}
@@ -918,12 +920,12 @@ static __init void rdt_init_res_defs_intel(void)
r->rid == RDT_RESOURCE_L3CODE ||
r->rid == RDT_RESOURCE_L2 ||
r->rid == RDT_RESOURCE_L2DATA ||
- r->rid == RDT_RESOURCE_L2CODE)
- r->cbm_validate = cbm_validate_intel;
- else if (r->rid == RDT_RESOURCE_MBA) {
+ r->rid == RDT_RESOURCE_L2CODE) {
+ r->cache.arch_has_sparse_bitmaps = false;
+ r->cache.arch_has_empty_bitmaps = false;
+ } else if (r->rid == RDT_RESOURCE_MBA) {
r->msr_base = MSR_IA32_MBA_THRTL_BASE;
r->msr_update = mba_wrmsr_intel;
- r->parse_ctrlval = parse_bw_intel;
}
}
}
@@ -938,12 +940,12 @@ static __init void rdt_init_res_defs_amd(void)
r->rid == RDT_RESOURCE_L3CODE ||
r->rid == RDT_RESOURCE_L2 ||
r->rid == RDT_RESOURCE_L2DATA ||
- r->rid == RDT_RESOURCE_L2CODE)
- r->cbm_validate = cbm_validate_amd;
- else if (r->rid == RDT_RESOURCE_MBA) {
+ r->rid == RDT_RESOURCE_L2CODE) {
+ r->cache.arch_has_sparse_bitmaps = true;
+ r->cache.arch_has_empty_bitmaps = true;
+ } else if (r->rid == RDT_RESOURCE_MBA) {
r->msr_base = MSR_IA32_MBA_BW_BASE;
r->msr_update = mba_wrmsr_amd;
- r->parse_ctrlval = parse_bw_amd;
}
}
}
diff --git a/arch/x86/kernel/cpu/resctrl/ctrlmondata.c b/arch/x86/kernel/cpu/resctrl/ctrlmondata.c
index 934c8fb8a64a..c877642e8a14 100644
--- a/arch/x86/kernel/cpu/resctrl/ctrlmondata.c
+++ b/arch/x86/kernel/cpu/resctrl/ctrlmondata.c
@@ -23,53 +23,6 @@
/*
* Check whether MBA bandwidth percentage value is correct. The value is
- * checked against the minimum and maximum bandwidth values specified by
- * the hardware. The allocated bandwidth percentage is rounded to the next
- * control step available on the hardware.
- */
-static bool bw_validate_amd(char *buf, unsigned long *data,
- struct rdt_resource *r)
-{
- unsigned long bw;
- int ret;
-
- ret = kstrtoul(buf, 10, &bw);
- if (ret) {
- rdt_last_cmd_printf("Non-decimal digit in MB value %s\n", buf);
- return false;
- }
-
- if (bw < r->membw.min_bw || bw > r->default_ctrl) {
- rdt_last_cmd_printf("MB value %ld out of range [%d,%d]\n", bw,
- r->membw.min_bw, r->default_ctrl);
- return false;
- }
-
- *data = roundup(bw, (unsigned long)r->membw.bw_gran);
- return true;
-}
-
-int parse_bw_amd(struct rdt_parse_data *data, struct rdt_resource *r,
- struct rdt_domain *d)
-{
- unsigned long bw_val;
-
- if (d->have_new_ctrl) {
- rdt_last_cmd_printf("Duplicate domain %d\n", d->id);
- return -EINVAL;
- }
-
- if (!bw_validate_amd(data->buf, &bw_val, r))
- return -EINVAL;
-
- d->new_ctrl = bw_val;
- d->have_new_ctrl = true;
-
- return 0;
-}
-
-/*
- * Check whether MBA bandwidth percentage value is correct. The value is
* checked against the minimum and max bandwidth values specified by the
* hardware. The allocated bandwidth percentage is rounded to the next
* control step available on the hardware.
@@ -82,7 +35,7 @@ static bool bw_validate(char *buf, unsigned long *data, struct rdt_resource *r)
/*
* Only linear delay values is supported for current Intel SKUs.
*/
- if (!r->membw.delay_linear) {
+ if (!r->membw.delay_linear && r->membw.arch_needs_linear) {
rdt_last_cmd_puts("No support for non-linear MB domains\n");
return false;
}
@@ -104,8 +57,8 @@ static bool bw_validate(char *buf, unsigned long *data, struct rdt_resource *r)
return true;
}
-int parse_bw_intel(struct rdt_parse_data *data, struct rdt_resource *r,
- struct rdt_domain *d)
+int parse_bw(struct rdt_parse_data *data, struct rdt_resource *r,
+ struct rdt_domain *d)
{
unsigned long bw_val;
@@ -123,12 +76,14 @@ int parse_bw_intel(struct rdt_parse_data *data, struct rdt_resource *r,
}
/*
- * Check whether a cache bit mask is valid. The SDM says:
+ * Check whether a cache bit mask is valid.
+ * For Intel the SDM says:
* Please note that all (and only) contiguous '1' combinations
* are allowed (e.g. FFFFH, 0FF0H, 003CH, etc.).
* Additionally Haswell requires at least two bits set.
+ * AMD allows non-contiguous bitmasks.
*/
-bool cbm_validate_intel(char *buf, u32 *data, struct rdt_resource *r)
+static bool cbm_validate(char *buf, u32 *data, struct rdt_resource *r)
{
unsigned long first_bit, zero_bit, val;
unsigned int cbm_len = r->cache.cbm_len;
@@ -140,7 +95,8 @@ bool cbm_validate_intel(char *buf, u32 *data, struct rdt_resource *r)
return false;
}
- if (val == 0 || val > r->default_ctrl) {
+ if ((!r->cache.arch_has_empty_bitmaps && val == 0) ||
+ val > r->default_ctrl) {
rdt_last_cmd_puts("Mask out of range\n");
return false;
}
@@ -148,7 +104,9 @@ bool cbm_validate_intel(char *buf, u32 *data, struct rdt_resource *r)
first_bit = find_first_bit(&val, cbm_len);
zero_bit = find_next_zero_bit(&val, cbm_len, first_bit);
- if (find_next_bit(&val, cbm_len, zero_bit) < cbm_len) {
+ /* Are non-contiguous bitmaps allowed? */
+ if (!r->cache.arch_has_sparse_bitmaps &&
+ (find_next_bit(&val, cbm_len, zero_bit) < cbm_len)) {
rdt_last_cmd_printf("The mask %lx has non-consecutive 1-bits\n", val);
return false;
}
@@ -164,30 +122,6 @@ bool cbm_validate_intel(char *buf, u32 *data, struct rdt_resource *r)
}
/*
- * Check whether a cache bit mask is valid. AMD allows non-contiguous
- * bitmasks
- */
-bool cbm_validate_amd(char *buf, u32 *data, struct rdt_resource *r)
-{
- unsigned long val;
- int ret;
-
- ret = kstrtoul(buf, 16, &val);
- if (ret) {
- rdt_last_cmd_printf("Non-hex character in the mask %s\n", buf);
- return false;
- }
-
- if (val > r->default_ctrl) {
- rdt_last_cmd_puts("Mask out of range\n");
- return false;
- }
-
- *data = val;
- return true;
-}
-
-/*
* Read one cache bit mask (hex). Check that it is valid for the current
* resource type.
*/
@@ -212,7 +146,7 @@ int parse_cbm(struct rdt_parse_data *data, struct rdt_resource *r,
return -EINVAL;
}
- if (!r->cbm_validate(data->buf, &cbm_val, r))
+ if (!cbm_validate(data->buf, &cbm_val, r))
return -EINVAL;
if ((rdtgrp->mode == RDT_MODE_EXCLUSIVE ||
diff --git a/arch/x86/kernel/cpu/resctrl/internal.h b/arch/x86/kernel/cpu/resctrl/internal.h
index 5ffa32256b3b..80fa997fae60 100644
--- a/arch/x86/kernel/cpu/resctrl/internal.h
+++ b/arch/x86/kernel/cpu/resctrl/internal.h
@@ -283,7 +283,6 @@ struct rftype {
* struct mbm_state - status for each MBM counter in each domain
* @chunks: Total data moved (multiply by rdt_group.mon_scale to get bytes)
* @prev_msr Value of IA32_QM_CTR for this RMID last time we read it
- * @chunks_bw Total local data moved. Used for bandwidth calculation
* @prev_bw_msr:Value of previous IA32_QM_CTR for bandwidth counting
* @prev_bw The most recent bandwidth in MBps
* @delta_bw Difference between the current and previous bandwidth
@@ -292,7 +291,6 @@ struct rftype {
struct mbm_state {
u64 chunks;
u64 prev_msr;
- u64 chunks_bw;
u64 prev_bw_msr;
u32 prev_bw;
u32 delta_bw;
@@ -360,6 +358,8 @@ struct msr_param {
* in a cache bit mask
* @shareable_bits: Bitmask of shareable resource with other
* executing entities
+ * @arch_has_sparse_bitmaps: True if a bitmap like f00f is valid.
+ * @arch_has_empty_bitmaps: True if the '0' bitmap is valid.
*/
struct rdt_cache {
unsigned int cbm_len;
@@ -367,25 +367,43 @@ struct rdt_cache {
unsigned int cbm_idx_mult;
unsigned int cbm_idx_offset;
unsigned int shareable_bits;
+ bool arch_has_sparse_bitmaps;
+ bool arch_has_empty_bitmaps;
+};
+
+/**
+ * enum membw_throttle_mode - System's memory bandwidth throttling mode
+ * @THREAD_THROTTLE_UNDEFINED: Not relevant to the system
+ * @THREAD_THROTTLE_MAX: Memory bandwidth is throttled at the core
+ * always using smallest bandwidth percentage
+ * assigned to threads, aka "max throttling"
+ * @THREAD_THROTTLE_PER_THREAD: Memory bandwidth is throttled at the thread
+ */
+enum membw_throttle_mode {
+ THREAD_THROTTLE_UNDEFINED = 0,
+ THREAD_THROTTLE_MAX,
+ THREAD_THROTTLE_PER_THREAD,
};
/**
* struct rdt_membw - Memory bandwidth allocation related data
- * @max_delay: Max throttle delay. Delay is the hardware
- * representation for memory bandwidth.
* @min_bw: Minimum memory bandwidth percentage user can request
* @bw_gran: Granularity at which the memory bandwidth is allocated
* @delay_linear: True if memory B/W delay is in linear scale
+ * @arch_needs_linear: True if we can't configure non-linear resources
+ * @throttle_mode: Bandwidth throttling mode when threads request
+ * different memory bandwidths
* @mba_sc: True if MBA software controller(mba_sc) is enabled
* @mb_map: Mapping of memory B/W percentage to memory B/W delay
*/
struct rdt_membw {
- u32 max_delay;
- u32 min_bw;
- u32 bw_gran;
- u32 delay_linear;
- bool mba_sc;
- u32 *mb_map;
+ u32 min_bw;
+ u32 bw_gran;
+ u32 delay_linear;
+ bool arch_needs_linear;
+ enum membw_throttle_mode throttle_mode;
+ bool mba_sc;
+ u32 *mb_map;
};
static inline bool is_llc_occupancy_enabled(void)
@@ -437,7 +455,6 @@ struct rdt_parse_data {
* @cache: Cache allocation related data
* @format_str: Per resource format string to show domain value
* @parse_ctrlval: Per resource function pointer to parse control values
- * @cbm_validate Cache bitmask validate function
* @evt_list: List of monitoring events
* @num_rmid: Number of RMIDs available
* @mon_scale: cqm counter * mon_scale = occupancy in bytes
@@ -464,7 +481,6 @@ struct rdt_resource {
int (*parse_ctrlval)(struct rdt_parse_data *data,
struct rdt_resource *r,
struct rdt_domain *d);
- bool (*cbm_validate)(char *buf, u32 *data, struct rdt_resource *r);
struct list_head evt_list;
int num_rmid;
unsigned int mon_scale;
@@ -474,10 +490,8 @@ struct rdt_resource {
int parse_cbm(struct rdt_parse_data *data, struct rdt_resource *r,
struct rdt_domain *d);
-int parse_bw_intel(struct rdt_parse_data *data, struct rdt_resource *r,
- struct rdt_domain *d);
-int parse_bw_amd(struct rdt_parse_data *data, struct rdt_resource *r,
- struct rdt_domain *d);
+int parse_bw(struct rdt_parse_data *data, struct rdt_resource *r,
+ struct rdt_domain *d);
extern struct mutex rdtgroup_mutex;
@@ -609,8 +623,7 @@ void cqm_setup_limbo_handler(struct rdt_domain *dom, unsigned long delay_ms);
void cqm_handle_limbo(struct work_struct *work);
bool has_busy_rmid(struct rdt_resource *r, struct rdt_domain *d);
void __check_limbo(struct rdt_domain *d, bool force_free);
-bool cbm_validate_intel(char *buf, u32 *data, struct rdt_resource *r);
-bool cbm_validate_amd(char *buf, u32 *data, struct rdt_resource *r);
void rdt_domain_reconfigure_cdp(struct rdt_resource *r);
+void __init thread_throttle_mode_init(void);
#endif /* _ASM_X86_RESCTRL_INTERNAL_H */
diff --git a/arch/x86/kernel/cpu/resctrl/monitor.c b/arch/x86/kernel/cpu/resctrl/monitor.c
index 837d7d012b7b..54dffe574e67 100644
--- a/arch/x86/kernel/cpu/resctrl/monitor.c
+++ b/arch/x86/kernel/cpu/resctrl/monitor.c
@@ -279,8 +279,7 @@ static void mbm_bw_count(u32 rmid, struct rmid_read *rr)
return;
chunks = mbm_overflow_count(m->prev_bw_msr, tval, rr->r->mbm_width);
- m->chunks_bw += chunks;
- m->chunks = m->chunks_bw;
+ m->chunks += chunks;
cur_bw = (chunks * r->mon_scale) >> 20;
if (m->delta_comp)
@@ -478,19 +477,13 @@ void cqm_handle_limbo(struct work_struct *work)
mutex_lock(&rdtgroup_mutex);
r = &rdt_resources_all[RDT_RESOURCE_L3];
- d = get_domain_from_cpu(cpu, r);
-
- if (!d) {
- pr_warn_once("Failure to get domain for limbo worker\n");
- goto out_unlock;
- }
+ d = container_of(work, struct rdt_domain, cqm_limbo.work);
__check_limbo(d, false);
if (has_busy_rmid(r, d))
schedule_delayed_work_on(cpu, &d->cqm_limbo, delay);
-out_unlock:
mutex_unlock(&rdtgroup_mutex);
}
@@ -520,10 +513,7 @@ void mbm_handle_overflow(struct work_struct *work)
goto out_unlock;
r = &rdt_resources_all[RDT_RESOURCE_L3];
-
- d = get_domain_from_cpu(cpu, r);
- if (!d)
- goto out_unlock;
+ d = container_of(work, struct rdt_domain, mbm_over.work);
list_for_each_entry(prgrp, &rdt_all_groups, rdtgroup_list) {
mbm_update(r, d, prgrp->mon.rmid);
diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
index 3f844f14fc0a..b494187632b2 100644
--- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c
+++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
@@ -592,6 +592,18 @@ static int __rdtgroup_move_task(struct task_struct *tsk,
return ret;
}
+static bool is_closid_match(struct task_struct *t, struct rdtgroup *r)
+{
+ return (rdt_alloc_capable &&
+ (r->type == RDTCTRL_GROUP) && (t->closid == r->closid));
+}
+
+static bool is_rmid_match(struct task_struct *t, struct rdtgroup *r)
+{
+ return (rdt_mon_capable &&
+ (r->type == RDTMON_GROUP) && (t->rmid == r->mon.rmid));
+}
+
/**
* rdtgroup_tasks_assigned - Test if tasks have been assigned to resource group
* @r: Resource group
@@ -607,8 +619,7 @@ int rdtgroup_tasks_assigned(struct rdtgroup *r)
rcu_read_lock();
for_each_process_thread(p, t) {
- if ((r->type == RDTCTRL_GROUP && t->closid == r->closid) ||
- (r->type == RDTMON_GROUP && t->rmid == r->mon.rmid)) {
+ if (is_closid_match(t, r) || is_rmid_match(t, r)) {
ret = 1;
break;
}
@@ -706,8 +717,7 @@ static void show_rdt_tasks(struct rdtgroup *r, struct seq_file *s)
rcu_read_lock();
for_each_process_thread(p, t) {
- if ((r->type == RDTCTRL_GROUP && t->closid == r->closid) ||
- (r->type == RDTMON_GROUP && t->rmid == r->mon.rmid))
+ if (is_closid_match(t, r) || is_rmid_match(t, r))
seq_printf(s, "%d\n", t->pid);
}
rcu_read_unlock();
@@ -1017,6 +1027,19 @@ static int max_threshold_occ_show(struct kernfs_open_file *of,
return 0;
}
+static int rdt_thread_throttle_mode_show(struct kernfs_open_file *of,
+ struct seq_file *seq, void *v)
+{
+ struct rdt_resource *r = of->kn->parent->priv;
+
+ if (r->membw.throttle_mode == THREAD_THROTTLE_PER_THREAD)
+ seq_puts(seq, "per-thread\n");
+ else
+ seq_puts(seq, "max\n");
+
+ return 0;
+}
+
static ssize_t max_threshold_occ_write(struct kernfs_open_file *of,
char *buf, size_t nbytes, loff_t off)
{
@@ -1513,6 +1536,17 @@ static struct rftype res_common_files[] = {
.seq_show = rdt_delay_linear_show,
.fflags = RF_CTRL_INFO | RFTYPE_RES_MB,
},
+ /*
+ * Platform specific which (if any) capabilities are provided by
+ * thread_throttle_mode. Defer "fflags" initialization to platform
+ * discovery.
+ */
+ {
+ .name = "thread_throttle_mode",
+ .mode = 0444,
+ .kf_ops = &rdtgroup_kf_single_ops,
+ .seq_show = rdt_thread_throttle_mode_show,
+ },
{
.name = "max_threshold_occupancy",
.mode = 0644,
@@ -1583,7 +1617,7 @@ static int rdtgroup_add_files(struct kernfs_node *kn, unsigned long fflags)
lockdep_assert_held(&rdtgroup_mutex);
for (rft = rfts; rft < rfts + len; rft++) {
- if ((fflags & rft->fflags) == rft->fflags) {
+ if (rft->fflags && ((fflags & rft->fflags) == rft->fflags)) {
ret = rdtgroup_add_file(kn, rft);
if (ret)
goto error;
@@ -1600,6 +1634,33 @@ error:
return ret;
}
+static struct rftype *rdtgroup_get_rftype_by_name(const char *name)
+{
+ struct rftype *rfts, *rft;
+ int len;
+
+ rfts = res_common_files;
+ len = ARRAY_SIZE(res_common_files);
+
+ for (rft = rfts; rft < rfts + len; rft++) {
+ if (!strcmp(rft->name, name))
+ return rft;
+ }
+
+ return NULL;
+}
+
+void __init thread_throttle_mode_init(void)
+{
+ struct rftype *rft;
+
+ rft = rdtgroup_get_rftype_by_name("thread_throttle_mode");
+ if (!rft)
+ return;
+
+ rft->fflags = RF_CTRL_INFO | RFTYPE_RES_MB;
+}
+
/**
* rdtgroup_kn_mode_restrict - Restrict user access to named resctrl file
* @r: The resource group with which the file is associated.
@@ -2245,18 +2306,6 @@ static int reset_all_ctrls(struct rdt_resource *r)
return 0;
}
-static bool is_closid_match(struct task_struct *t, struct rdtgroup *r)
-{
- return (rdt_alloc_capable &&
- (r->type == RDTCTRL_GROUP) && (t->closid == r->closid));
-}
-
-static bool is_rmid_match(struct task_struct *t, struct rdtgroup *r)
-{
- return (rdt_mon_capable &&
- (r->type == RDTMON_GROUP) && (t->rmid == r->mon.rmid));
-}
-
/*
* Move tasks from one to the other group. If @from is NULL, then all tasks
* in the systems are moved unconditionally (used for teardown).
@@ -3196,7 +3245,7 @@ int __init rdtgroup_init(void)
* It may also be ok since that would enable debugging of RDT before
* resctrl is mounted.
* The reason why the debugfs directory is created here and not in
- * rdt_mount() is because rdt_mount() takes rdtgroup_mutex and
+ * rdt_get_tree() is because rdt_get_tree() takes rdtgroup_mutex and
* during the debugfs directory creation also &sb->s_type->i_mutex_key
* (the lockdep class of inode->i_rwsem). Other filesystem
* interactions (eg. SyS_getdents) have the lock ordering:
diff --git a/arch/x86/kernel/cpu/scattered.c b/arch/x86/kernel/cpu/scattered.c
index 62b137c3c97a..866c9a9bcdee 100644
--- a/arch/x86/kernel/cpu/scattered.c
+++ b/arch/x86/kernel/cpu/scattered.c
@@ -35,12 +35,15 @@ static const struct cpuid_bit cpuid_bits[] = {
{ X86_FEATURE_CDP_L3, CPUID_ECX, 2, 0x00000010, 1 },
{ X86_FEATURE_CDP_L2, CPUID_ECX, 2, 0x00000010, 2 },
{ X86_FEATURE_MBA, CPUID_EBX, 3, 0x00000010, 0 },
+ { X86_FEATURE_PER_THREAD_MBA, CPUID_ECX, 0, 0x00000010, 3 },
{ X86_FEATURE_HW_PSTATE, CPUID_EDX, 7, 0x80000007, 0 },
{ X86_FEATURE_CPB, CPUID_EDX, 9, 0x80000007, 0 },
{ X86_FEATURE_PROC_FEEDBACK, CPUID_EDX, 11, 0x80000007, 0 },
{ X86_FEATURE_MBA, CPUID_EBX, 6, 0x80000008, 0 },
{ X86_FEATURE_SME, CPUID_EAX, 0, 0x8000001f, 0 },
{ X86_FEATURE_SEV, CPUID_EAX, 1, 0x8000001f, 0 },
+ { X86_FEATURE_SEV_ES, CPUID_EAX, 3, 0x8000001f, 0 },
+ { X86_FEATURE_SME_COHERENT, CPUID_EAX, 10, 0x8000001f, 0 },
{ 0, 0, 0, 0, 0 }
};
diff --git a/arch/x86/kernel/cpu/vmware.c b/arch/x86/kernel/cpu/vmware.c
index 9b6fafa69be9..924571fe5864 100644
--- a/arch/x86/kernel/cpu/vmware.c
+++ b/arch/x86/kernel/cpu/vmware.c
@@ -33,6 +33,7 @@
#include <asm/timer.h>
#include <asm/apic.h>
#include <asm/vmware.h>
+#include <asm/svm.h>
#undef pr_fmt
#define pr_fmt(fmt) "vmware: " fmt
@@ -476,10 +477,49 @@ static bool __init vmware_legacy_x2apic_available(void)
(eax & (1 << VMWARE_CMD_LEGACY_X2APIC)) != 0;
}
+#ifdef CONFIG_AMD_MEM_ENCRYPT
+static void vmware_sev_es_hcall_prepare(struct ghcb *ghcb,
+ struct pt_regs *regs)
+{
+ /* Copy VMWARE specific Hypercall parameters to the GHCB */
+ ghcb_set_rip(ghcb, regs->ip);
+ ghcb_set_rbx(ghcb, regs->bx);
+ ghcb_set_rcx(ghcb, regs->cx);
+ ghcb_set_rdx(ghcb, regs->dx);
+ ghcb_set_rsi(ghcb, regs->si);
+ ghcb_set_rdi(ghcb, regs->di);
+ ghcb_set_rbp(ghcb, regs->bp);
+}
+
+static bool vmware_sev_es_hcall_finish(struct ghcb *ghcb, struct pt_regs *regs)
+{
+ if (!(ghcb_rbx_is_valid(ghcb) &&
+ ghcb_rcx_is_valid(ghcb) &&
+ ghcb_rdx_is_valid(ghcb) &&
+ ghcb_rsi_is_valid(ghcb) &&
+ ghcb_rdi_is_valid(ghcb) &&
+ ghcb_rbp_is_valid(ghcb)))
+ return false;
+
+ regs->bx = ghcb->save.rbx;
+ regs->cx = ghcb->save.rcx;
+ regs->dx = ghcb->save.rdx;
+ regs->si = ghcb->save.rsi;
+ regs->di = ghcb->save.rdi;
+ regs->bp = ghcb->save.rbp;
+
+ return true;
+}
+#endif
+
const __initconst struct hypervisor_x86 x86_hyper_vmware = {
- .name = "VMware",
- .detect = vmware_platform,
- .type = X86_HYPER_VMWARE,
- .init.init_platform = vmware_platform_setup,
- .init.x2apic_available = vmware_legacy_x2apic_available,
+ .name = "VMware",
+ .detect = vmware_platform,
+ .type = X86_HYPER_VMWARE,
+ .init.init_platform = vmware_platform_setup,
+ .init.x2apic_available = vmware_legacy_x2apic_available,
+#ifdef CONFIG_AMD_MEM_ENCRYPT
+ .runtime.sev_es_hcall_prepare = vmware_sev_es_hcall_prepare,
+ .runtime.sev_es_hcall_finish = vmware_sev_es_hcall_finish,
+#endif
};
diff --git a/arch/x86/kernel/devicetree.c b/arch/x86/kernel/devicetree.c
index a0e8fc7d85f1..ddffd80f5c52 100644
--- a/arch/x86/kernel/devicetree.c
+++ b/arch/x86/kernel/devicetree.c
@@ -229,8 +229,8 @@ static int dt_irqdomain_alloc(struct irq_domain *domain, unsigned int virq,
it = &of_ioapic_type[type_index];
ioapic_set_alloc_attr(&tmp, NUMA_NO_NODE, it->trigger, it->polarity);
- tmp.ioapic_id = mpc_ioapic_id(mp_irqdomain_ioapic_idx(domain));
- tmp.ioapic_pin = fwspec->param[0];
+ tmp.devid = mpc_ioapic_id(mp_irqdomain_ioapic_idx(domain));
+ tmp.ioapic.pin = fwspec->param[0];
return mp_irqdomain_alloc(domain, virq, nr_irqs, &tmp);
}
diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
index 48ce44576947..25c06b67e7e0 100644
--- a/arch/x86/kernel/dumpstack.c
+++ b/arch/x86/kernel/dumpstack.c
@@ -29,8 +29,8 @@ static int die_counter;
static struct pt_regs exec_summary_regs;
-bool in_task_stack(unsigned long *stack, struct task_struct *task,
- struct stack_info *info)
+bool noinstr in_task_stack(unsigned long *stack, struct task_struct *task,
+ struct stack_info *info)
{
unsigned long *begin = task_stack_page(task);
unsigned long *end = task_stack_page(task) + THREAD_SIZE;
@@ -46,7 +46,8 @@ bool in_task_stack(unsigned long *stack, struct task_struct *task,
return true;
}
-bool in_entry_stack(unsigned long *stack, struct stack_info *info)
+/* Called from get_stack_info_noinstr - so must be noinstr too */
+bool noinstr in_entry_stack(unsigned long *stack, struct stack_info *info)
{
struct entry_stack *ss = cpu_entry_stack(smp_processor_id());
@@ -115,7 +116,8 @@ void show_opcodes(struct pt_regs *regs, const char *loglvl)
unsigned long prologue = regs->ip - PROLOGUE_SIZE;
if (copy_code(regs, opcodes, prologue, sizeof(opcodes))) {
- printk("%sCode: Bad RIP value.\n", loglvl);
+ printk("%sCode: Unable to access opcode bytes at RIP 0x%lx.\n",
+ loglvl, prologue);
} else {
printk("%sCode: %" __stringify(PROLOGUE_SIZE) "ph <%02x> %"
__stringify(EPILOGUE_SIZE) "ph\n", loglvl, opcodes,
diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
index 4a94d38cd141..1dd851397bd9 100644
--- a/arch/x86/kernel/dumpstack_64.c
+++ b/arch/x86/kernel/dumpstack_64.c
@@ -24,11 +24,13 @@ static const char * const exception_stack_names[] = {
[ ESTACK_NMI ] = "NMI",
[ ESTACK_DB ] = "#DB",
[ ESTACK_MCE ] = "#MC",
+ [ ESTACK_VC ] = "#VC",
+ [ ESTACK_VC2 ] = "#VC2",
};
const char *stack_type_name(enum stack_type type)
{
- BUILD_BUG_ON(N_EXCEPTION_STACKS != 4);
+ BUILD_BUG_ON(N_EXCEPTION_STACKS != 6);
if (type == STACK_TYPE_IRQ)
return "IRQ";
@@ -79,16 +81,18 @@ struct estack_pages estack_pages[CEA_ESTACK_PAGES] ____cacheline_aligned = {
EPAGERANGE(NMI),
EPAGERANGE(DB),
EPAGERANGE(MCE),
+ EPAGERANGE(VC),
+ EPAGERANGE(VC2),
};
-static bool in_exception_stack(unsigned long *stack, struct stack_info *info)
+static __always_inline bool in_exception_stack(unsigned long *stack, struct stack_info *info)
{
unsigned long begin, end, stk = (unsigned long)stack;
const struct estack_pages *ep;
struct pt_regs *regs;
unsigned int k;
- BUILD_BUG_ON(N_EXCEPTION_STACKS != 4);
+ BUILD_BUG_ON(N_EXCEPTION_STACKS != 6);
begin = (unsigned long)__this_cpu_read(cea_exception_stacks);
/*
@@ -122,7 +126,7 @@ static bool in_exception_stack(unsigned long *stack, struct stack_info *info)
return true;
}
-static bool in_irq_stack(unsigned long *stack, struct stack_info *info)
+static __always_inline bool in_irq_stack(unsigned long *stack, struct stack_info *info)
{
unsigned long *end = (unsigned long *)this_cpu_read(hardirq_stack_ptr);
unsigned long *begin = end - (IRQ_STACK_SIZE / sizeof(long));
@@ -147,32 +151,38 @@ static bool in_irq_stack(unsigned long *stack, struct stack_info *info)
return true;
}
-int get_stack_info(unsigned long *stack, struct task_struct *task,
- struct stack_info *info, unsigned long *visit_mask)
+bool noinstr get_stack_info_noinstr(unsigned long *stack, struct task_struct *task,
+ struct stack_info *info)
{
- if (!stack)
- goto unknown;
-
- task = task ? : current;
-
if (in_task_stack(stack, task, info))
- goto recursion_check;
+ return true;
if (task != current)
- goto unknown;
+ return false;
if (in_exception_stack(stack, info))
- goto recursion_check;
+ return true;
if (in_irq_stack(stack, info))
- goto recursion_check;
+ return true;
if (in_entry_stack(stack, info))
- goto recursion_check;
+ return true;
+
+ return false;
+}
+
+int get_stack_info(unsigned long *stack, struct task_struct *task,
+ struct stack_info *info, unsigned long *visit_mask)
+{
+ task = task ? : current;
- goto unknown;
+ if (!stack)
+ goto unknown;
+
+ if (!get_stack_info_noinstr(stack, task, info))
+ goto unknown;
-recursion_check:
/*
* Make sure we don't iterate through any given stack more than once.
* If it comes up a second time then there's something wrong going on:
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
index 983cd53ed4c9..22aad412f965 100644
--- a/arch/x86/kernel/e820.c
+++ b/arch/x86/kernel/e820.c
@@ -305,6 +305,20 @@ static int __init cpcompare(const void *a, const void *b)
return (ap->addr != ap->entry->addr) - (bp->addr != bp->entry->addr);
}
+static bool e820_nomerge(enum e820_type type)
+{
+ /*
+ * These types may indicate distinct platform ranges aligned to
+ * numa node, protection domain, performance domain, or other
+ * boundaries. Do not merge them.
+ */
+ if (type == E820_TYPE_PRAM)
+ return true;
+ if (type == E820_TYPE_SOFT_RESERVED)
+ return true;
+ return false;
+}
+
int __init e820__update_table(struct e820_table *table)
{
struct e820_entry *entries = table->entries;
@@ -380,7 +394,7 @@ int __init e820__update_table(struct e820_table *table)
}
/* Continue building up new map based on this information: */
- if (current_type != last_type || current_type == E820_TYPE_PRAM) {
+ if (current_type != last_type || e820_nomerge(current_type)) {
if (last_type != 0) {
new_entries[new_nr_entries].size = change_point[chg_idx]->addr - last_addr;
/* Move forward only if the new size was non-zero: */
diff --git a/arch/x86/kernel/fpu/init.c b/arch/x86/kernel/fpu/init.c
index 61ddc3a5e5c2..701f196d7c68 100644
--- a/arch/x86/kernel/fpu/init.c
+++ b/arch/x86/kernel/fpu/init.c
@@ -5,7 +5,6 @@
#include <asm/fpu/internal.h>
#include <asm/tlbflush.h>
#include <asm/setup.h>
-#include <asm/cmdline.h>
#include <linux/sched.h>
#include <linux/sched/task.h>
@@ -238,51 +237,11 @@ static void __init fpu__init_system_ctx_switch(void)
}
/*
- * We parse fpu parameters early because fpu__init_system() is executed
- * before parse_early_param().
- */
-static void __init fpu__init_parse_early_param(void)
-{
- char arg[32];
- char *argptr = arg;
- int bit;
-
-#ifdef CONFIG_X86_32
- if (cmdline_find_option_bool(boot_command_line, "no387"))
-#ifdef CONFIG_MATH_EMULATION
- setup_clear_cpu_cap(X86_FEATURE_FPU);
-#else
- pr_err("Option 'no387' required CONFIG_MATH_EMULATION enabled.\n");
-#endif
-
- if (cmdline_find_option_bool(boot_command_line, "nofxsr"))
- setup_clear_cpu_cap(X86_FEATURE_FXSR);
-#endif
-
- if (cmdline_find_option_bool(boot_command_line, "noxsave"))
- setup_clear_cpu_cap(X86_FEATURE_XSAVE);
-
- if (cmdline_find_option_bool(boot_command_line, "noxsaveopt"))
- setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT);
-
- if (cmdline_find_option_bool(boot_command_line, "noxsaves"))
- setup_clear_cpu_cap(X86_FEATURE_XSAVES);
-
- if (cmdline_find_option(boot_command_line, "clearcpuid", arg,
- sizeof(arg)) &&
- get_option(&argptr, &bit) &&
- bit >= 0 &&
- bit < NCAPINTS * 32)
- setup_clear_cpu_cap(bit);
-}
-
-/*
* Called on the boot CPU once per system bootup, to set up the initial
* FPU state that is later cloned into all processes:
*/
void __init fpu__init_system(struct cpuinfo_x86 *c)
{
- fpu__init_parse_early_param();
fpu__init_system_early_generic(c);
/*
diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c
index 038e19c0019e..5d8047441a0a 100644
--- a/arch/x86/kernel/fpu/xstate.c
+++ b/arch/x86/kernel/fpu/xstate.c
@@ -37,6 +37,7 @@ static const char *xfeature_names[] =
"AVX-512 ZMM_Hi256" ,
"Processor Trace (unused)" ,
"Protection Keys User registers",
+ "PASID state",
"unknown xstate feature" ,
};
@@ -51,6 +52,7 @@ static short xsave_cpuid_features[] __initdata = {
X86_FEATURE_AVX512F,
X86_FEATURE_INTEL_PT,
X86_FEATURE_PKU,
+ X86_FEATURE_ENQCMD,
};
/*
@@ -318,6 +320,7 @@ static void __init print_xstate_features(void)
print_xstate_feature(XFEATURE_MASK_ZMM_Hi256);
print_xstate_feature(XFEATURE_MASK_Hi16_ZMM);
print_xstate_feature(XFEATURE_MASK_PKRU);
+ print_xstate_feature(XFEATURE_MASK_PASID);
}
/*
@@ -592,6 +595,7 @@ static void check_xstate_against_struct(int nr)
XCHECK_SZ(sz, nr, XFEATURE_ZMM_Hi256, struct avx_512_zmm_uppers_state);
XCHECK_SZ(sz, nr, XFEATURE_Hi16_ZMM, struct avx_512_hi16_state);
XCHECK_SZ(sz, nr, XFEATURE_PKRU, struct pkru_state);
+ XCHECK_SZ(sz, nr, XFEATURE_PASID, struct ia32_pasid_state);
/*
* Make *SURE* to add any feature numbers in below if
@@ -601,7 +605,7 @@ static void check_xstate_against_struct(int nr)
if ((nr < XFEATURE_YMM) ||
(nr >= XFEATURE_MAX) ||
(nr == XFEATURE_PT_UNIMPLEMENTED_SO_FAR) ||
- ((nr >= XFEATURE_RSRVD_COMP_10) && (nr <= XFEATURE_LBR))) {
+ ((nr >= XFEATURE_RSRVD_COMP_11) && (nr <= XFEATURE_LBR))) {
WARN_ONCE(1, "no structure for xstate: %d\n", nr);
XSTATE_WARN_ON(1);
}
@@ -1398,3 +1402,60 @@ int proc_pid_arch_status(struct seq_file *m, struct pid_namespace *ns,
return 0;
}
#endif /* CONFIG_PROC_PID_ARCH_STATUS */
+
+#ifdef CONFIG_IOMMU_SUPPORT
+void update_pasid(void)
+{
+ u64 pasid_state;
+ u32 pasid;
+
+ if (!cpu_feature_enabled(X86_FEATURE_ENQCMD))
+ return;
+
+ if (!current->mm)
+ return;
+
+ pasid = READ_ONCE(current->mm->pasid);
+ /* Set the valid bit in the PASID MSR/state only for valid pasid. */
+ pasid_state = pasid == PASID_DISABLED ?
+ pasid : pasid | MSR_IA32_PASID_VALID;
+
+ /*
+ * No need to hold fregs_lock() since the task's fpstate won't
+ * be changed by others (e.g. ptrace) while the task is being
+ * switched to or is in IPI.
+ */
+ if (!test_thread_flag(TIF_NEED_FPU_LOAD)) {
+ /* The MSR is active and can be directly updated. */
+ wrmsrl(MSR_IA32_PASID, pasid_state);
+ } else {
+ struct fpu *fpu = &current->thread.fpu;
+ struct ia32_pasid_state *ppasid_state;
+ struct xregs_state *xsave;
+
+ /*
+ * The CPU's xstate registers are not currently active. Just
+ * update the PASID state in the memory buffer here. The
+ * PASID MSR will be loaded when returning to user mode.
+ */
+ xsave = &fpu->state.xsave;
+ xsave->header.xfeatures |= XFEATURE_MASK_PASID;
+ ppasid_state = get_xsave_addr(xsave, XFEATURE_PASID);
+ /*
+ * Since XFEATURE_MASK_PASID is set in xfeatures, ppasid_state
+ * won't be NULL and no need to check its value.
+ *
+ * Only update the task's PASID state when it's different
+ * from the mm's pasid.
+ */
+ if (ppasid_state->pasid != pasid_state) {
+ /*
+ * Invalid fpregs so that state restoring will pick up
+ * the PASID state.
+ */
+ __fpu_invalidate_fpregs_state(fpu);
+ ppasid_state->pasid = pasid_state;
+ }
+ }
+}
+#endif /* CONFIG_IOMMU_SUPPORT */
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index cbb71c1b574f..4199f25c0063 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -36,6 +36,11 @@
#include <asm/microcode.h>
#include <asm/kasan.h>
#include <asm/fixmap.h>
+#include <asm/realmode.h>
+#include <asm/desc.h>
+#include <asm/extable.h>
+#include <asm/trapnr.h>
+#include <asm/sev-es.h>
/*
* Manage page tables very early on.
@@ -61,6 +66,24 @@ unsigned long vmemmap_base __ro_after_init = __VMEMMAP_BASE_L4;
EXPORT_SYMBOL(vmemmap_base);
#endif
+/*
+ * GDT used on the boot CPU before switching to virtual addresses.
+ */
+static struct desc_struct startup_gdt[GDT_ENTRIES] = {
+ [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
+ [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
+ [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
+};
+
+/*
+ * Address needs to be set at runtime because it references the startup_gdt
+ * while the kernel still uses a direct mapping.
+ */
+static struct desc_ptr startup_gdt_descr = {
+ .size = sizeof(startup_gdt),
+ .address = 0,
+};
+
#define __head __section(.head.text)
static void __head *fixup_pointer(void *ptr, unsigned long physaddr)
@@ -297,7 +320,7 @@ static void __init reset_early_page_tables(void)
}
/* Create a new PMD entry */
-int __init __early_make_pgtable(unsigned long address, pmdval_t pmd)
+bool __init __early_make_pgtable(unsigned long address, pmdval_t pmd)
{
unsigned long physaddr = address - __PAGE_OFFSET;
pgdval_t pgd, *pgd_p;
@@ -307,7 +330,7 @@ int __init __early_make_pgtable(unsigned long address, pmdval_t pmd)
/* Invalid address or early pgt is done ? */
if (physaddr >= MAXMEM || read_cr3_pa() != __pa_nodebug(early_top_pgt))
- return -1;
+ return false;
again:
pgd_p = &early_top_pgt[pgd_index(address)].pgd;
@@ -364,10 +387,10 @@ again:
}
pmd_p[pmd_index(address)] = pmd;
- return 0;
+ return true;
}
-int __init early_make_pgtable(unsigned long address)
+static bool __init early_make_pgtable(unsigned long address)
{
unsigned long physaddr = address - __PAGE_OFFSET;
pmdval_t pmd;
@@ -377,6 +400,19 @@ int __init early_make_pgtable(unsigned long address)
return __early_make_pgtable(address, pmd);
}
+void __init do_early_exception(struct pt_regs *regs, int trapnr)
+{
+ if (trapnr == X86_TRAP_PF &&
+ early_make_pgtable(native_read_cr2()))
+ return;
+
+ if (IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT) &&
+ trapnr == X86_TRAP_VC && handle_vc_boot_ghcb(regs))
+ return;
+
+ early_fixup_exception(regs, trapnr);
+}
+
/* Don't add a printk in there. printk relies on the PDA which is not initialized
yet. */
static void __init clear_bss(void)
@@ -489,3 +525,81 @@ void __init x86_64_start_reservations(char *real_mode_data)
start_kernel();
}
+
+/*
+ * Data structures and code used for IDT setup in head_64.S. The bringup-IDT is
+ * used until the idt_table takes over. On the boot CPU this happens in
+ * x86_64_start_kernel(), on secondary CPUs in start_secondary(). In both cases
+ * this happens in the functions called from head_64.S.
+ *
+ * The idt_table can't be used that early because all the code modifying it is
+ * in idt.c and can be instrumented by tracing or KASAN, which both don't work
+ * during early CPU bringup. Also the idt_table has the runtime vectors
+ * configured which require certain CPU state to be setup already (like TSS),
+ * which also hasn't happened yet in early CPU bringup.
+ */
+static gate_desc bringup_idt_table[NUM_EXCEPTION_VECTORS] __page_aligned_data;
+
+static struct desc_ptr bringup_idt_descr = {
+ .size = (NUM_EXCEPTION_VECTORS * sizeof(gate_desc)) - 1,
+ .address = 0, /* Set at runtime */
+};
+
+static void set_bringup_idt_handler(gate_desc *idt, int n, void *handler)
+{
+#ifdef CONFIG_AMD_MEM_ENCRYPT
+ struct idt_data data;
+ gate_desc desc;
+
+ init_idt_data(&data, n, handler);
+ idt_init_desc(&desc, &data);
+ native_write_idt_entry(idt, n, &desc);
+#endif
+}
+
+/* This runs while still in the direct mapping */
+static void startup_64_load_idt(unsigned long physbase)
+{
+ struct desc_ptr *desc = fixup_pointer(&bringup_idt_descr, physbase);
+ gate_desc *idt = fixup_pointer(bringup_idt_table, physbase);
+
+
+ if (IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT)) {
+ void *handler;
+
+ /* VMM Communication Exception */
+ handler = fixup_pointer(vc_no_ghcb, physbase);
+ set_bringup_idt_handler(idt, X86_TRAP_VC, handler);
+ }
+
+ desc->address = (unsigned long)idt;
+ native_load_idt(desc);
+}
+
+/* This is used when running on kernel addresses */
+void early_setup_idt(void)
+{
+ /* VMM Communication Exception */
+ if (IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT))
+ set_bringup_idt_handler(bringup_idt_table, X86_TRAP_VC, vc_boot_ghcb);
+
+ bringup_idt_descr.address = (unsigned long)bringup_idt_table;
+ native_load_idt(&bringup_idt_descr);
+}
+
+/*
+ * Setup boot CPU state needed before kernel switches to virtual addresses.
+ */
+void __head startup_64_setup_env(unsigned long physbase)
+{
+ /* Load GDT */
+ startup_gdt_descr.address = (unsigned long)fixup_pointer(startup_gdt, physbase);
+ native_load_gdt(&startup_gdt_descr);
+
+ /* New GDT is live - reload data segment registers */
+ asm volatile("movl %%eax, %%ds\n"
+ "movl %%eax, %%ss\n"
+ "movl %%eax, %%es\n" : : "a"(__KERNEL_DS) : "memory");
+
+ startup_64_load_idt(physbase);
+}
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index 16da4ac01597..7eb2a1c87969 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -73,6 +73,20 @@ SYM_CODE_START_NOALIGN(startup_64)
/* Set up the stack for verify_cpu(), similar to initial_stack below */
leaq (__end_init_task - SIZEOF_PTREGS)(%rip), %rsp
+ leaq _text(%rip), %rdi
+ pushq %rsi
+ call startup_64_setup_env
+ popq %rsi
+
+ /* Now switch to __KERNEL_CS so IRET works reliably */
+ pushq $__KERNEL_CS
+ leaq .Lon_kernel_cs(%rip), %rax
+ pushq %rax
+ lretq
+
+.Lon_kernel_cs:
+ UNWIND_HINT_EMPTY
+
/* Sanitize CPU configuration */
call verify_cpu
@@ -112,6 +126,18 @@ SYM_CODE_START(secondary_startup_64)
call verify_cpu
/*
+ * The secondary_startup_64_no_verify entry point is only used by
+ * SEV-ES guests. In those guests the call to verify_cpu() would cause
+ * #VC exceptions which can not be handled at this stage of secondary
+ * CPU bringup.
+ *
+ * All non SEV-ES systems, especially Intel systems, need to execute
+ * verify_cpu() above to make sure NX is enabled.
+ */
+SYM_INNER_LABEL(secondary_startup_64_no_verify, SYM_L_GLOBAL)
+ UNWIND_HINT_EMPTY
+
+ /*
* Retrieve the modifier (SME encryption mask if SME is active) to be
* added to the initial pgdir entry that will be programmed into CR3.
*/
@@ -144,33 +170,6 @@ SYM_CODE_START(secondary_startup_64)
1:
UNWIND_HINT_EMPTY
- /* Check if nx is implemented */
- movl $0x80000001, %eax
- cpuid
- movl %edx,%edi
-
- /* Setup EFER (Extended Feature Enable Register) */
- movl $MSR_EFER, %ecx
- rdmsr
- btsl $_EFER_SCE, %eax /* Enable System Call */
- btl $20,%edi /* No Execute supported? */
- jnc 1f
- btsl $_EFER_NX, %eax
- btsq $_PAGE_BIT_NX,early_pmd_flags(%rip)
-1: wrmsr /* Make changes effective */
-
- /* Setup cr0 */
- movl $CR0_STATE, %eax
- /* Make changes effective */
- movq %rax, %cr0
-
- /* Setup a boot time stack */
- movq initial_stack(%rip), %rsp
-
- /* zero EFLAGS after setting rsp */
- pushq $0
- popfq
-
/*
* We must switch to a new descriptor in kernel space for the GDT
* because soon the kernel won't have access anymore to the userspace
@@ -205,6 +204,41 @@ SYM_CODE_START(secondary_startup_64)
movl initial_gs+4(%rip),%edx
wrmsr
+ /*
+ * Setup a boot time stack - Any secondary CPU will have lost its stack
+ * by now because the cr3-switch above unmaps the real-mode stack
+ */
+ movq initial_stack(%rip), %rsp
+
+ /* Setup and Load IDT */
+ pushq %rsi
+ call early_setup_idt
+ popq %rsi
+
+ /* Check if nx is implemented */
+ movl $0x80000001, %eax
+ cpuid
+ movl %edx,%edi
+
+ /* Setup EFER (Extended Feature Enable Register) */
+ movl $MSR_EFER, %ecx
+ rdmsr
+ btsl $_EFER_SCE, %eax /* Enable System Call */
+ btl $20,%edi /* No Execute supported? */
+ jnc 1f
+ btsl $_EFER_NX, %eax
+ btsq $_PAGE_BIT_NX,early_pmd_flags(%rip)
+1: wrmsr /* Make changes effective */
+
+ /* Setup cr0 */
+ movl $CR0_STATE, %eax
+ /* Make changes effective */
+ movq %rax, %cr0
+
+ /* zero EFLAGS after setting rsp */
+ pushq $0
+ popfq
+
/* rsi is pointer to real mode structure with interesting info.
pass it to C */
movq %rsi, %rdi
@@ -259,11 +293,47 @@ SYM_CODE_START(start_cpu0)
SYM_CODE_END(start_cpu0)
#endif
+#ifdef CONFIG_AMD_MEM_ENCRYPT
+/*
+ * VC Exception handler used during early boot when running on kernel
+ * addresses, but before the switch to the idt_table can be made.
+ * The early_idt_handler_array can't be used here because it calls into a lot
+ * of __init code and this handler is also used during CPU offlining/onlining.
+ * Therefore this handler ends up in the .text section so that it stays around
+ * when .init.text is freed.
+ */
+SYM_CODE_START_NOALIGN(vc_boot_ghcb)
+ UNWIND_HINT_IRET_REGS offset=8
+
+ /* Build pt_regs */
+ PUSH_AND_CLEAR_REGS
+
+ /* Call C handler */
+ movq %rsp, %rdi
+ movq ORIG_RAX(%rsp), %rsi
+ movq initial_vc_handler(%rip), %rax
+ ANNOTATE_RETPOLINE_SAFE
+ call *%rax
+
+ /* Unwind pt_regs */
+ POP_REGS
+
+ /* Remove Error Code */
+ addq $8, %rsp
+
+ /* Pure iret required here - don't use INTERRUPT_RETURN */
+ iretq
+SYM_CODE_END(vc_boot_ghcb)
+#endif
+
/* Both SMP bootup and ACPI suspend change these variables */
__REFDATA
.balign 8
SYM_DATA(initial_code, .quad x86_64_start_kernel)
SYM_DATA(initial_gs, .quad INIT_PER_CPU_VAR(fixed_percpu_data))
+#ifdef CONFIG_AMD_MEM_ENCRYPT
+SYM_DATA(initial_vc_handler, .quad handle_vc_boot_ghcb)
+#endif
/*
* The SIZEOF_PTREGS gap is a convention which helps the in-kernel unwinder
@@ -319,22 +389,43 @@ SYM_CODE_START_LOCAL(early_idt_handler_common)
pushq %r15 /* pt_regs->r15 */
UNWIND_HINT_REGS
- cmpq $14,%rsi /* Page fault? */
- jnz 10f
- GET_CR2_INTO(%rdi) /* can clobber %rax if pv */
- call early_make_pgtable
- andl %eax,%eax
- jz 20f /* All good */
-
-10:
movq %rsp,%rdi /* RDI = pt_regs; RSI is already trapnr */
- call early_fixup_exception
+ call do_early_exception
-20:
decl early_recursion_flag(%rip)
jmp restore_regs_and_return_to_kernel
SYM_CODE_END(early_idt_handler_common)
+#ifdef CONFIG_AMD_MEM_ENCRYPT
+/*
+ * VC Exception handler used during very early boot. The
+ * early_idt_handler_array can't be used because it returns via the
+ * paravirtualized INTERRUPT_RETURN and pv-ops don't work that early.
+ *
+ * This handler will end up in the .init.text section and not be
+ * available to boot secondary CPUs.
+ */
+SYM_CODE_START_NOALIGN(vc_no_ghcb)
+ UNWIND_HINT_IRET_REGS offset=8
+
+ /* Build pt_regs */
+ PUSH_AND_CLEAR_REGS
+
+ /* Call C handler */
+ movq %rsp, %rdi
+ movq ORIG_RAX(%rsp), %rsi
+ call do_vc_no_ghcb
+
+ /* Unwind pt_regs */
+ POP_REGS
+
+ /* Remove Error Code */
+ addq $8, %rsp
+
+ /* Pure iret required here - don't use INTERRUPT_RETURN */
+ iretq
+SYM_CODE_END(vc_no_ghcb)
+#endif
#define SYM_DATA_START_PAGE_ALIGNED(name) \
SYM_START(name, SYM_L_GLOBAL, .balign PAGE_SIZE)
diff --git a/arch/x86/kernel/hw_breakpoint.c b/arch/x86/kernel/hw_breakpoint.c
index 8cdf29ffd95f..03aa33b58165 100644
--- a/arch/x86/kernel/hw_breakpoint.c
+++ b/arch/x86/kernel/hw_breakpoint.c
@@ -349,7 +349,7 @@ static int arch_build_bp_info(struct perf_event *bp,
hw->len = X86_BREAKPOINT_LEN_X;
return 0;
}
- /* fall through */
+ fallthrough;
default:
return -EINVAL;
}
@@ -442,42 +442,6 @@ int hw_breakpoint_arch_parse(struct perf_event *bp,
}
/*
- * Dump the debug register contents to the user.
- * We can't dump our per cpu values because it
- * may contain cpu wide breakpoint, something that
- * doesn't belong to the current task.
- *
- * TODO: include non-ptrace user breakpoints (perf)
- */
-void aout_dump_debugregs(struct user *dump)
-{
- int i;
- int dr7 = 0;
- struct perf_event *bp;
- struct arch_hw_breakpoint *info;
- struct thread_struct *thread = &current->thread;
-
- for (i = 0; i < HBP_NUM; i++) {
- bp = thread->ptrace_bps[i];
-
- if (bp && !bp->attr.disabled) {
- dump->u_debugreg[i] = bp->attr.bp_addr;
- info = counter_arch_bp(bp);
- dr7 |= encode_dr7(i, info->len, info->type);
- } else {
- dump->u_debugreg[i] = 0;
- }
- }
-
- dump->u_debugreg[4] = 0;
- dump->u_debugreg[5] = 0;
- dump->u_debugreg[6] = current->thread.debugreg6;
-
- dump->u_debugreg[7] = dr7;
-}
-EXPORT_SYMBOL_GPL(aout_dump_debugregs);
-
-/*
* Release the user breakpoints used by ptrace
*/
void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
@@ -490,7 +454,7 @@ void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
t->ptrace_bps[i] = NULL;
}
- t->debugreg6 = 0;
+ t->virtual_dr6 = 0;
t->ptrace_dr7 = 0;
}
@@ -500,7 +464,7 @@ void hw_breakpoint_restore(void)
set_debugreg(__this_cpu_read(cpu_debugreg[1]), 1);
set_debugreg(__this_cpu_read(cpu_debugreg[2]), 2);
set_debugreg(__this_cpu_read(cpu_debugreg[3]), 3);
- set_debugreg(current->thread.debugreg6, 6);
+ set_debugreg(DR6_RESERVED, 6);
set_debugreg(__this_cpu_read(cpu_dr7), 7);
}
EXPORT_SYMBOL_GPL(hw_breakpoint_restore);
@@ -523,10 +487,10 @@ EXPORT_SYMBOL_GPL(hw_breakpoint_restore);
*/
static int hw_breakpoint_handler(struct die_args *args)
{
- int i, cpu, rc = NOTIFY_STOP;
+ int i, rc = NOTIFY_STOP;
struct perf_event *bp;
- unsigned long dr6;
unsigned long *dr6_p;
+ unsigned long dr6;
/* The DR6 value is pointed by args->err */
dr6_p = (unsigned long *)ERR_PTR(args->err);
@@ -540,14 +504,6 @@ static int hw_breakpoint_handler(struct die_args *args)
if ((dr6 & DR_TRAP_BITS) == 0)
return NOTIFY_DONE;
- /*
- * Assert that local interrupts are disabled
- * Reset the DRn bits in the virtualized register value.
- * The ptrace trigger routine will add in whatever is needed.
- */
- current->thread.debugreg6 &= ~DR_TRAP_BITS;
- cpu = get_cpu();
-
/* Handle all the breakpoints that were triggered */
for (i = 0; i < HBP_NUM; ++i) {
if (likely(!(dr6 & (DR_TRAP0 << i))))
@@ -561,7 +517,7 @@ static int hw_breakpoint_handler(struct die_args *args)
*/
rcu_read_lock();
- bp = per_cpu(bp_per_reg[i], cpu);
+ bp = this_cpu_read(bp_per_reg[i]);
/*
* Reset the 'i'th TRAP bit in dr6 to denote completion of
* exception handling
@@ -592,12 +548,10 @@ static int hw_breakpoint_handler(struct die_args *args)
* breakpoints (to generate signals) and b) when the system has
* taken exception due to multiple causes
*/
- if ((current->thread.debugreg6 & DR_TRAP_BITS) ||
+ if ((current->thread.virtual_dr6 & DR_TRAP_BITS) ||
(dr6 & (~DR_TRAP_BITS)))
rc = NOTIFY_DONE;
- put_cpu();
-
return rc;
}
diff --git a/arch/x86/kernel/idt.c b/arch/x86/kernel/idt.c
index 7ecf9babf0cb..ee1a283f8e96 100644
--- a/arch/x86/kernel/idt.c
+++ b/arch/x86/kernel/idt.c
@@ -11,13 +11,6 @@
#include <asm/desc.h>
#include <asm/hw_irq.h>
-struct idt_data {
- unsigned int vector;
- unsigned int segment;
- struct idt_bits bits;
- const void *addr;
-};
-
#define DPL0 0x0
#define DPL3 0x3
@@ -149,9 +142,6 @@ static const __initconst struct idt_data apic_idts[] = {
# ifdef CONFIG_IRQ_WORK
INTG(IRQ_WORK_VECTOR, asm_sysvec_irq_work),
# endif
-# ifdef CONFIG_X86_UV
- INTG(UV_BAU_MESSAGE, asm_sysvec_uv_bau_message),
-# endif
INTG(SPURIOUS_APIC_VECTOR, asm_sysvec_spurious_apic_interrupt),
INTG(ERROR_APIC_VECTOR, asm_sysvec_error_interrupt),
#endif
@@ -178,20 +168,6 @@ bool idt_is_f00f_address(unsigned long address)
}
#endif
-static inline void idt_init_desc(gate_desc *gate, const struct idt_data *d)
-{
- unsigned long addr = (unsigned long) d->addr;
-
- gate->offset_low = (u16) addr;
- gate->segment = (u16) d->segment;
- gate->bits = d->bits;
- gate->offset_middle = (u16) (addr >> 16);
-#ifdef CONFIG_X86_64
- gate->offset_high = (u32) (addr >> 32);
- gate->reserved = 0;
-#endif
-}
-
static __init void
idt_setup_from_table(gate_desc *idt, const struct idt_data *t, int size, bool sys)
{
@@ -209,14 +185,7 @@ static __init void set_intr_gate(unsigned int n, const void *addr)
{
struct idt_data data;
- BUG_ON(n > 0xFF);
-
- memset(&data, 0, sizeof(data));
- data.vector = n;
- data.addr = addr;
- data.segment = __KERNEL_CS;
- data.bits.type = GATE_INTERRUPT;
- data.bits.p = 1;
+ init_idt_data(&data, n, addr);
idt_setup_from_table(idt_table, &data, 1, false);
}
@@ -257,11 +226,14 @@ static const __initconst struct idt_data early_pf_idts[] = {
* cpu_init() when the TSS has been initialized.
*/
static const __initconst struct idt_data ist_idts[] = {
- ISTG(X86_TRAP_DB, asm_exc_debug, IST_INDEX_DB),
- ISTG(X86_TRAP_NMI, asm_exc_nmi, IST_INDEX_NMI),
- ISTG(X86_TRAP_DF, asm_exc_double_fault, IST_INDEX_DF),
+ ISTG(X86_TRAP_DB, asm_exc_debug, IST_INDEX_DB),
+ ISTG(X86_TRAP_NMI, asm_exc_nmi, IST_INDEX_NMI),
+ ISTG(X86_TRAP_DF, asm_exc_double_fault, IST_INDEX_DF),
#ifdef CONFIG_X86_MCE
- ISTG(X86_TRAP_MC, asm_exc_machine_check, IST_INDEX_MCE),
+ ISTG(X86_TRAP_MC, asm_exc_machine_check, IST_INDEX_MCE),
+#endif
+#ifdef CONFIG_AMD_MEM_ENCRYPT
+ ISTG(X86_TRAP_VC, asm_exc_vmm_communication, IST_INDEX_VC),
#endif
};
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
index 181060247e3c..c5dd50369e2f 100644
--- a/arch/x86/kernel/irq.c
+++ b/arch/x86/kernel/irq.c
@@ -227,7 +227,7 @@ static __always_inline void handle_irq(struct irq_desc *desc,
struct pt_regs *regs)
{
if (IS_ENABLED(CONFIG_X86_64))
- run_on_irqstack_cond(desc->handle_irq, desc, regs);
+ run_irq_on_irqstack_cond(desc->handle_irq, desc, regs);
else
__handle_irq(desc, regs);
}
diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
index 1b4fe93a86c5..440eed558558 100644
--- a/arch/x86/kernel/irq_64.c
+++ b/arch/x86/kernel/irq_64.c
@@ -74,5 +74,5 @@ int irq_init_percpu_irqstack(unsigned int cpu)
void do_softirq_own_stack(void)
{
- run_on_irqstack_cond(__do_softirq, NULL, NULL);
+ run_on_irqstack_cond(__do_softirq, NULL);
}
diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
index 68acd30c6b87..ff7878df96b4 100644
--- a/arch/x86/kernel/kgdb.c
+++ b/arch/x86/kernel/kgdb.c
@@ -450,7 +450,7 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
ptr = &remcomInBuffer[1];
if (kgdb_hex2long(&ptr, &addr))
linux_regs->ip = addr;
- /* fall through */
+ fallthrough;
case 'D':
case 'k':
/* clear the trace bit */
@@ -539,7 +539,7 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
* a system call which should be ignored
*/
return NOTIFY_DONE;
- /* fall through */
+ fallthrough;
default:
if (user_mode(regs))
return NOTIFY_DONE;
@@ -629,9 +629,10 @@ static void kgdb_hw_overflow_handler(struct perf_event *event,
struct task_struct *tsk = current;
int i;
- for (i = 0; i < 4; i++)
+ for (i = 0; i < 4; i++) {
if (breakinfo[i].enabled)
- tsk->thread.debugreg6 |= (DR_TRAP0 << i);
+ tsk->thread.virtual_dr6 |= (DR_TRAP0 << i);
+ }
}
void kgdb_arch_late(void)
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index fdadc37d72af..547c7abb39f5 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -38,9 +38,9 @@
#include <linux/kdebug.h>
#include <linux/kallsyms.h>
#include <linux/ftrace.h>
-#include <linux/frame.h>
#include <linux/kasan.h>
#include <linux/moduleloader.h>
+#include <linux/objtool.h>
#include <linux/vmalloc.h>
#include <linux/pgtable.h>
@@ -767,124 +767,21 @@ asm(
NOKPROBE_SYMBOL(kretprobe_trampoline);
STACK_FRAME_NON_STANDARD(kretprobe_trampoline);
+
/*
* Called from kretprobe_trampoline
*/
__used __visible void *trampoline_handler(struct pt_regs *regs)
{
- struct kretprobe_instance *ri = NULL;
- struct hlist_head *head, empty_rp;
- struct hlist_node *tmp;
- unsigned long flags, orig_ret_address = 0;
- unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
- kprobe_opcode_t *correct_ret_addr = NULL;
- void *frame_pointer;
- bool skipped = false;
-
- /*
- * Set a dummy kprobe for avoiding kretprobe recursion.
- * Since kretprobe never run in kprobe handler, kprobe must not
- * be running at this point.
- */
- kprobe_busy_begin();
-
- INIT_HLIST_HEAD(&empty_rp);
- kretprobe_hash_lock(current, &head, &flags);
/* fixup registers */
regs->cs = __KERNEL_CS;
#ifdef CONFIG_X86_32
- regs->cs |= get_kernel_rpl();
regs->gs = 0;
#endif
- /* We use pt_regs->sp for return address holder. */
- frame_pointer = &regs->sp;
- regs->ip = trampoline_address;
+ regs->ip = (unsigned long)&kretprobe_trampoline;
regs->orig_ax = ~0UL;
- /*
- * It is possible to have multiple instances associated with a given
- * task either because multiple functions in the call path have
- * return probes installed on them, and/or more than one
- * return probe was registered for a target function.
- *
- * We can handle this because:
- * - instances are always pushed into the head of the list
- * - when multiple return probes are registered for the same
- * function, the (chronologically) first instance's ret_addr
- * will be the real return address, and all the rest will
- * point to kretprobe_trampoline.
- */
- hlist_for_each_entry(ri, head, hlist) {
- if (ri->task != current)
- /* another task is sharing our hash bucket */
- continue;
- /*
- * Return probes must be pushed on this hash list correct
- * order (same as return order) so that it can be popped
- * correctly. However, if we find it is pushed it incorrect
- * order, this means we find a function which should not be
- * probed, because the wrong order entry is pushed on the
- * path of processing other kretprobe itself.
- */
- if (ri->fp != frame_pointer) {
- if (!skipped)
- pr_warn("kretprobe is stacked incorrectly. Trying to fixup.\n");
- skipped = true;
- continue;
- }
-
- orig_ret_address = (unsigned long)ri->ret_addr;
- if (skipped)
- pr_warn("%ps must be blacklisted because of incorrect kretprobe order\n",
- ri->rp->kp.addr);
-
- if (orig_ret_address != trampoline_address)
- /*
- * This is the real return address. Any other
- * instances associated with this task are for
- * other calls deeper on the call stack
- */
- break;
- }
-
- kretprobe_assert(ri, orig_ret_address, trampoline_address);
-
- correct_ret_addr = ri->ret_addr;
- hlist_for_each_entry_safe(ri, tmp, head, hlist) {
- if (ri->task != current)
- /* another task is sharing our hash bucket */
- continue;
- if (ri->fp != frame_pointer)
- continue;
-
- orig_ret_address = (unsigned long)ri->ret_addr;
- if (ri->rp && ri->rp->handler) {
- __this_cpu_write(current_kprobe, &ri->rp->kp);
- ri->ret_addr = correct_ret_addr;
- ri->rp->handler(ri, regs);
- __this_cpu_write(current_kprobe, &kprobe_busy);
- }
-
- recycle_rp_inst(ri, &empty_rp);
-
- if (orig_ret_address != trampoline_address)
- /*
- * This is the real return address. Any other
- * instances associated with this task are for
- * other calls deeper on the call stack
- */
- break;
- }
-
- kretprobe_hash_unlock(current, &flags);
-
- kprobe_busy_end();
-
- hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
- hlist_del(&ri->hlist);
- kfree(ri);
- }
- return (void *)orig_ret_address;
+ return (void *)kretprobe_trampoline_handler(regs, &kretprobe_trampoline, &regs->sp);
}
NOKPROBE_SYMBOL(trampoline_handler);
diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c
index 40f380461e6d..041f0b50bc27 100644
--- a/arch/x86/kernel/kprobes/opt.c
+++ b/arch/x86/kernel/kprobes/opt.c
@@ -16,8 +16,9 @@
#include <linux/kdebug.h>
#include <linux/kallsyms.h>
#include <linux/ftrace.h>
-#include <linux/frame.h>
+#include <linux/objtool.h>
#include <linux/pgtable.h>
+#include <linux/static_call.h>
#include <asm/text-patching.h>
#include <asm/cacheflush.h>
@@ -181,7 +182,6 @@ optimized_callback(struct optimized_kprobe *op, struct pt_regs *regs)
/* Save skipped registers */
regs->cs = __KERNEL_CS;
#ifdef CONFIG_X86_32
- regs->cs |= get_kernel_rpl();
regs->gs = 0;
#endif
regs->ip = (unsigned long)op->kp.addr + INT3_INSN_SIZE;
@@ -210,7 +210,8 @@ static int copy_optimized_instructions(u8 *dest, u8 *src, u8 *real)
/* Check whether the address range is reserved */
if (ftrace_text_reserved(src, src + len - 1) ||
alternatives_text_reserved(src, src + len - 1) ||
- jump_label_text_reserved(src, src + len - 1))
+ jump_label_text_reserved(src, src + len - 1) ||
+ static_call_text_reserved(src, src + len - 1))
return -EBUSY;
return len;
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index 08320b0b2b27..1c0f2560a41c 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -36,6 +36,8 @@
#include <asm/hypervisor.h>
#include <asm/tlb.h>
#include <asm/cpuidle_haltpoll.h>
+#include <asm/ptrace.h>
+#include <asm/svm.h>
DEFINE_STATIC_KEY_FALSE(kvm_async_pf_enabled);
@@ -270,9 +272,8 @@ DEFINE_IDTENTRY_SYSVEC(sysvec_kvm_asyncpf_interrupt)
{
struct pt_regs *old_regs = set_irq_regs(regs);
u32 token;
- irqentry_state_t state;
- state = irqentry_enter(regs);
+ ack_APIC_irq();
inc_irq_stat(irq_hv_callback_count);
@@ -283,7 +284,6 @@ DEFINE_IDTENTRY_SYSVEC(sysvec_kvm_asyncpf_interrupt)
wrmsrl(MSR_KVM_ASYNC_PF_ACK, 1);
}
- irqentry_exit(regs, state);
set_irq_regs(old_regs);
}
@@ -746,13 +746,34 @@ static void __init kvm_init_platform(void)
x86_platform.apic_post_init = kvm_apic_init;
}
+#if defined(CONFIG_AMD_MEM_ENCRYPT)
+static void kvm_sev_es_hcall_prepare(struct ghcb *ghcb, struct pt_regs *regs)
+{
+ /* RAX and CPL are already in the GHCB */
+ ghcb_set_rbx(ghcb, regs->bx);
+ ghcb_set_rcx(ghcb, regs->cx);
+ ghcb_set_rdx(ghcb, regs->dx);
+ ghcb_set_rsi(ghcb, regs->si);
+}
+
+static bool kvm_sev_es_hcall_finish(struct ghcb *ghcb, struct pt_regs *regs)
+{
+ /* No checking of the return state needed */
+ return true;
+}
+#endif
+
const __initconst struct hypervisor_x86 x86_hyper_kvm = {
- .name = "KVM",
- .detect = kvm_detect,
- .type = X86_HYPER_KVM,
- .init.guest_late_init = kvm_guest_init,
- .init.x2apic_available = kvm_para_available,
- .init.init_platform = kvm_init_platform,
+ .name = "KVM",
+ .detect = kvm_detect,
+ .type = X86_HYPER_KVM,
+ .init.guest_late_init = kvm_guest_init,
+ .init.x2apic_available = kvm_para_available,
+ .init.init_platform = kvm_init_platform,
+#if defined(CONFIG_AMD_MEM_ENCRYPT)
+ .runtime.sev_es_hcall_prepare = kvm_sev_es_hcall_prepare,
+ .runtime.sev_es_hcall_finish = kvm_sev_es_hcall_finish,
+#endif
};
static __init int activate_jump_labels(void)
diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c
index 411af4aa7b51..8f06449aab27 100644
--- a/arch/x86/kernel/mpparse.c
+++ b/arch/x86/kernel/mpparse.c
@@ -24,7 +24,6 @@
#include <asm/irqdomain.h>
#include <asm/mtrr.h>
#include <asm/mpspec.h>
-#include <asm/io_apic.h>
#include <asm/proto.h>
#include <asm/bios_ebda.h>
#include <asm/e820/api.h>
@@ -46,11 +45,6 @@ static int __init mpf_checksum(unsigned char *mp, int len)
return sum & 0xFF;
}
-int __init default_mpc_apic_id(struct mpc_cpu *m)
-{
- return m->apicid;
-}
-
static void __init MP_processor_info(struct mpc_cpu *m)
{
int apicid;
@@ -61,7 +55,7 @@ static void __init MP_processor_info(struct mpc_cpu *m)
return;
}
- apicid = x86_init.mpparse.mpc_apic_id(m);
+ apicid = m->apicid;
if (m->cpuflag & CPU_BOOTPROCESSOR) {
bootup_cpu = " (Bootup-CPU)";
@@ -73,7 +67,7 @@ static void __init MP_processor_info(struct mpc_cpu *m)
}
#ifdef CONFIG_X86_IO_APIC
-void __init default_mpc_oem_bus_info(struct mpc_bus *m, char *str)
+static void __init mpc_oem_bus_info(struct mpc_bus *m, char *str)
{
memcpy(str, m->bustype, 6);
str[6] = 0;
@@ -84,7 +78,7 @@ static void __init MP_bus_info(struct mpc_bus *m)
{
char str[7];
- x86_init.mpparse.mpc_oem_bus_info(m, str);
+ mpc_oem_bus_info(m, str);
#if MAX_MP_BUSSES < 256
if (m->busid >= MAX_MP_BUSSES) {
@@ -100,9 +94,6 @@ static void __init MP_bus_info(struct mpc_bus *m)
mp_bus_id_to_type[m->busid] = MP_BUS_ISA;
#endif
} else if (strncmp(str, BUSTYPE_PCI, sizeof(BUSTYPE_PCI) - 1) == 0) {
- if (x86_init.mpparse.mpc_oem_pci_bus)
- x86_init.mpparse.mpc_oem_pci_bus(m);
-
clear_bit(m->busid, mp_bus_not_pci);
#ifdef CONFIG_EISA
mp_bus_id_to_type[m->busid] = MP_BUS_PCI;
@@ -198,8 +189,6 @@ static void __init smp_dump_mptable(struct mpc_table *mpc, unsigned char *mpt)
1, mpc, mpc->length, 1);
}
-void __init default_smp_read_mpc_oem(struct mpc_table *mpc) { }
-
static int __init smp_read_mpc(struct mpc_table *mpc, unsigned early)
{
char str[16];
@@ -218,14 +207,7 @@ static int __init smp_read_mpc(struct mpc_table *mpc, unsigned early)
if (early)
return 1;
- if (mpc->oemptr)
- x86_init.mpparse.smp_read_mpc_oem(mpc);
-
- /*
- * Now process the configuration blocks.
- */
- x86_init.mpparse.mpc_record(0);
-
+ /* Now process the configuration blocks. */
while (count < mpc->length) {
switch (*mpt) {
case MP_PROCESSOR:
@@ -256,7 +238,6 @@ static int __init smp_read_mpc(struct mpc_table *mpc, unsigned early)
count = mpc->length;
break;
}
- x86_init.mpparse.mpc_record(1);
}
if (!num_processors)
@@ -312,7 +293,7 @@ static void __init construct_default_ioirq_mptable(int mpc_default_type)
case 2:
if (i == 0 || i == 13)
continue; /* IRQ0 & IRQ13 not connected */
- /* fall through */
+ fallthrough;
default:
if (i == 2)
continue; /* IRQ2 is never connected */
@@ -356,7 +337,7 @@ static void __init construct_ioapic_table(int mpc_default_type)
default:
pr_err("???\nUnknown standard configuration %d\n",
mpc_default_type);
- /* fall through */
+ fallthrough;
case 1:
case 5:
memcpy(bus.bustype, "ISA ", 6);
diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c
index 49dcfb85e773..c0d409810658 100644
--- a/arch/x86/kernel/msr.c
+++ b/arch/x86/kernel/msr.c
@@ -80,18 +80,30 @@ static ssize_t msr_read(struct file *file, char __user *buf,
static int filter_write(u32 reg)
{
+ /*
+ * MSRs writes usually happen all at once, and can easily saturate kmsg.
+ * Only allow one message every 30 seconds.
+ *
+ * It's possible to be smarter here and do it (for example) per-MSR, but
+ * it would certainly be more complex, and this is enough at least to
+ * avoid saturating the ring buffer.
+ */
+ static DEFINE_RATELIMIT_STATE(fw_rs, 30 * HZ, 1);
+
switch (allow_writes) {
case MSR_WRITES_ON: return 0;
case MSR_WRITES_OFF: return -EPERM;
default: break;
}
+ if (!__ratelimit(&fw_rs))
+ return 0;
+
if (reg == MSR_IA32_ENERGY_PERF_BIAS)
return 0;
- pr_err_ratelimited("Write to unrecognized MSR 0x%x by %s\n"
- "Please report to x86@kernel.org\n",
- reg, current->comm);
+ pr_err("Write to unrecognized MSR 0x%x by %s (pid: %d). Please report to x86@kernel.org.\n",
+ reg, current->comm, current->pid);
return 0;
}
diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
index 4fc9954a9560..4bc77aaf1303 100644
--- a/arch/x86/kernel/nmi.c
+++ b/arch/x86/kernel/nmi.c
@@ -33,6 +33,7 @@
#include <asm/reboot.h>
#include <asm/cache.h>
#include <asm/nospec-branch.h>
+#include <asm/sev-es.h>
#define CREATE_TRACE_POINTS
#include <trace/events/nmi.h>
@@ -102,7 +103,6 @@ fs_initcall(nmi_warning_debugfs);
static void nmi_check_duration(struct nmiaction *action, u64 duration)
{
- u64 whole_msecs = READ_ONCE(action->max_duration);
int remainder_ns, decimal_msecs;
if (duration < nmi_longest_ns || duration < action->max_duration)
@@ -110,12 +110,12 @@ static void nmi_check_duration(struct nmiaction *action, u64 duration)
action->max_duration = duration;
- remainder_ns = do_div(whole_msecs, (1000 * 1000));
+ remainder_ns = do_div(duration, (1000 * 1000));
decimal_msecs = remainder_ns / 1000;
printk_ratelimited(KERN_INFO
"INFO: NMI handler (%ps) took too long to run: %lld.%03d msecs\n",
- action->handler, whole_msecs, decimal_msecs);
+ action->handler, duration, decimal_msecs);
}
static int nmi_handle(unsigned int type, struct pt_regs *regs)
@@ -477,6 +477,12 @@ DEFINE_IDTENTRY_RAW(exc_nmi)
{
bool irq_state;
+ /*
+ * Re-enable NMIs right here when running as an SEV-ES guest. This might
+ * cause nested NMIs, but those can be handled safely.
+ */
+ sev_es_nmi_complete();
+
if (IS_ENABLED(CONFIG_SMP) && arch_cpu_is_offline(smp_processor_id()))
return;
@@ -488,6 +494,12 @@ DEFINE_IDTENTRY_RAW(exc_nmi)
this_cpu_write(nmi_cr2, read_cr2());
nmi_restart:
+ /*
+ * Needs to happen before DR7 is accessed, because the hypervisor can
+ * intercept DR7 reads/writes, turning those into #VC exceptions.
+ */
+ sev_es_ist_enter(regs);
+
this_cpu_write(nmi_dr7, local_db_save());
irq_state = idtentry_enter_nmi(regs);
@@ -501,6 +513,8 @@ nmi_restart:
local_db_restore(this_cpu_read(nmi_dr7));
+ sev_es_ist_exit();
+
if (unlikely(this_cpu_read(nmi_cr2) != read_cr2()))
write_cr2(this_cpu_read(nmi_cr2));
if (this_cpu_dec_return(nmi_state))
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index de2138ba38e5..6c3407ba6ee9 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -263,13 +263,8 @@ enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
struct pv_info pv_info = {
.name = "bare hardware",
#ifdef CONFIG_PARAVIRT_XXL
- .kernel_rpl = 0,
- .shared_kernel_pmd = 1, /* Only used when CONFIG_X86_PAE is set */
-
-#ifdef CONFIG_X86_64
.extra_user_64bit_cs = __USER_CS,
#endif
-#endif
};
/* 64-bit pagetable entries */
@@ -305,9 +300,7 @@ struct paravirt_patch_template pv_ops = {
.cpu.load_idt = native_load_idt,
.cpu.store_tr = native_store_tr,
.cpu.load_tls = native_load_tls,
-#ifdef CONFIG_X86_64
.cpu.load_gs_index = native_load_gs_index,
-#endif
.cpu.write_ldt_entry = native_write_ldt_entry,
.cpu.write_gdt_entry = native_write_gdt_entry,
.cpu.write_idt_entry = native_write_idt_entry,
@@ -317,9 +310,7 @@ struct paravirt_patch_template pv_ops = {
.cpu.load_sp0 = native_load_sp0,
-#ifdef CONFIG_X86_64
.cpu.usergs_sysret64 = native_usergs_sysret64,
-#endif
.cpu.iret = native_iret,
.cpu.swapgs = native_swapgs,
@@ -369,24 +360,16 @@ struct paravirt_patch_template pv_ops = {
.mmu.release_p4d = paravirt_nop,
.mmu.set_pte = native_set_pte,
- .mmu.set_pte_at = native_set_pte_at,
.mmu.set_pmd = native_set_pmd,
.mmu.ptep_modify_prot_start = __ptep_modify_prot_start,
.mmu.ptep_modify_prot_commit = __ptep_modify_prot_commit,
-#if CONFIG_PGTABLE_LEVELS >= 3
-#ifdef CONFIG_X86_PAE
- .mmu.set_pte_atomic = native_set_pte_atomic,
- .mmu.pte_clear = native_pte_clear,
- .mmu.pmd_clear = native_pmd_clear,
-#endif
.mmu.set_pud = native_set_pud,
.mmu.pmd_val = PTE_IDENT,
.mmu.make_pmd = PTE_IDENT,
-#if CONFIG_PGTABLE_LEVELS >= 4
.mmu.pud_val = PTE_IDENT,
.mmu.make_pud = PTE_IDENT,
@@ -398,8 +381,6 @@ struct paravirt_patch_template pv_ops = {
.mmu.set_pgd = native_set_pgd,
#endif /* CONFIG_PGTABLE_LEVELS >= 5 */
-#endif /* CONFIG_PGTABLE_LEVELS >= 4 */
-#endif /* CONFIG_PGTABLE_LEVELS >= 3 */
.mmu.pte_val = PTE_IDENT,
.mmu.pgd_val = PTE_IDENT,
diff --git a/arch/x86/kernel/paravirt_patch.c b/arch/x86/kernel/paravirt_patch.c
index 3eff63c090d2..ace6e334cb39 100644
--- a/arch/x86/kernel/paravirt_patch.c
+++ b/arch/x86/kernel/paravirt_patch.c
@@ -26,14 +26,10 @@ struct patch_xxl {
const unsigned char mmu_read_cr3[3];
const unsigned char mmu_write_cr3[3];
const unsigned char irq_restore_fl[2];
-# ifdef CONFIG_X86_64
const unsigned char cpu_wbinvd[2];
const unsigned char cpu_usergs_sysret64[6];
const unsigned char cpu_swapgs[3];
const unsigned char mov64[3];
-# else
- const unsigned char cpu_iret[1];
-# endif
};
static const struct patch_xxl patch_data_xxl = {
@@ -42,7 +38,6 @@ static const struct patch_xxl patch_data_xxl = {
.irq_save_fl = { 0x9c, 0x58 }, // pushf; pop %[re]ax
.mmu_read_cr2 = { 0x0f, 0x20, 0xd0 }, // mov %cr2, %[re]ax
.mmu_read_cr3 = { 0x0f, 0x20, 0xd8 }, // mov %cr3, %[re]ax
-# ifdef CONFIG_X86_64
.mmu_write_cr3 = { 0x0f, 0x22, 0xdf }, // mov %rdi, %cr3
.irq_restore_fl = { 0x57, 0x9d }, // push %rdi; popfq
.cpu_wbinvd = { 0x0f, 0x09 }, // wbinvd
@@ -50,19 +45,11 @@ static const struct patch_xxl patch_data_xxl = {
0x48, 0x0f, 0x07 }, // swapgs; sysretq
.cpu_swapgs = { 0x0f, 0x01, 0xf8 }, // swapgs
.mov64 = { 0x48, 0x89, 0xf8 }, // mov %rdi, %rax
-# else
- .mmu_write_cr3 = { 0x0f, 0x22, 0xd8 }, // mov %eax, %cr3
- .irq_restore_fl = { 0x50, 0x9d }, // push %eax; popf
- .cpu_iret = { 0xcf }, // iret
-# endif
};
unsigned int paravirt_patch_ident_64(void *insn_buff, unsigned int len)
{
-#ifdef CONFIG_X86_64
return PATCH(xxl, mov64, insn_buff, len);
-#endif
- return 0;
}
# endif /* CONFIG_PARAVIRT_XXL */
@@ -98,13 +85,9 @@ unsigned int native_patch(u8 type, void *insn_buff, unsigned long addr,
PATCH_CASE(mmu, read_cr3, xxl, insn_buff, len);
PATCH_CASE(mmu, write_cr3, xxl, insn_buff, len);
-# ifdef CONFIG_X86_64
PATCH_CASE(cpu, usergs_sysret64, xxl, insn_buff, len);
PATCH_CASE(cpu, swapgs, xxl, insn_buff, len);
PATCH_CASE(cpu, wbinvd, xxl, insn_buff, len);
-# else
- PATCH_CASE(cpu, iret, xxl, insn_buff, len);
-# endif
#endif
#ifdef CONFIG_PARAVIRT_SPINLOCKS
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 994d8393f2f7..ba4593a913fa 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -42,6 +42,7 @@
#include <asm/spec-ctrl.h>
#include <asm/io_bitmap.h>
#include <asm/proto.h>
+#include <asm/frame.h>
#include "process.h"
@@ -133,7 +134,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, unsigned long arg,
fork_frame = container_of(childregs, struct fork_frame, regs);
frame = &fork_frame->frame;
- frame->bp = 0;
+ frame->bp = encode_frame_pointer(childregs);
frame->ret_addr = (unsigned long) ret_from_fork;
p->thread.sp = (unsigned long) fork_frame;
p->thread.io_bitmap = NULL;
@@ -684,9 +685,7 @@ void arch_cpu_idle(void)
*/
void __cpuidle default_idle(void)
{
- trace_cpu_idle_rcuidle(1, smp_processor_id());
safe_halt();
- trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
}
#if defined(CONFIG_APM_MODULE) || defined(CONFIG_HALTPOLL_CPUIDLE_MODULE)
EXPORT_SYMBOL(default_idle);
@@ -792,7 +791,6 @@ static int prefer_mwait_c1_over_halt(const struct cpuinfo_x86 *c)
static __cpuidle void mwait_idle(void)
{
if (!current_set_polling_and_test()) {
- trace_cpu_idle_rcuidle(1, smp_processor_id());
if (this_cpu_has(X86_BUG_CLFLUSH_MONITOR)) {
mb(); /* quirk */
clflush((void *)&current_thread_info()->flags);
@@ -804,7 +802,6 @@ static __cpuidle void mwait_idle(void)
__sti_mwait(0, 0);
else
local_irq_enable();
- trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
} else {
local_irq_enable();
}
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 9afefe325acb..df342bedea88 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -407,7 +407,7 @@ unsigned long x86_gsbase_read_cpu_inactive(void)
{
unsigned long gsbase;
- if (static_cpu_has(X86_FEATURE_FSGSBASE)) {
+ if (boot_cpu_has(X86_FEATURE_FSGSBASE)) {
unsigned long flags;
local_irq_save(flags);
@@ -422,7 +422,7 @@ unsigned long x86_gsbase_read_cpu_inactive(void)
void x86_gsbase_write_cpu_inactive(unsigned long gsbase)
{
- if (static_cpu_has(X86_FEATURE_FSGSBASE)) {
+ if (boot_cpu_has(X86_FEATURE_FSGSBASE)) {
unsigned long flags;
local_irq_save(flags);
@@ -439,7 +439,7 @@ unsigned long x86_fsbase_read_task(struct task_struct *task)
if (task == current)
fsbase = x86_fsbase_read_cpu();
- else if (static_cpu_has(X86_FEATURE_FSGSBASE) ||
+ else if (boot_cpu_has(X86_FEATURE_FSGSBASE) ||
(task->thread.fsindex == 0))
fsbase = task->thread.fsbase;
else
@@ -454,7 +454,7 @@ unsigned long x86_gsbase_read_task(struct task_struct *task)
if (task == current)
gsbase = x86_gsbase_read_cpu_inactive();
- else if (static_cpu_has(X86_FEATURE_FSGSBASE) ||
+ else if (boot_cpu_has(X86_FEATURE_FSGSBASE) ||
(task->thread.gsindex == 0))
gsbase = task->thread.gsbase;
else
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
index 5679aa3fdcb8..bedca011459c 100644
--- a/arch/x86/kernel/ptrace.c
+++ b/arch/x86/kernel/ptrace.c
@@ -204,7 +204,7 @@ static int set_segment_reg(struct task_struct *task,
case offsetof(struct user_regs_struct, ss):
if (unlikely(value == 0))
return -EIO;
- /* Else, fall through */
+ fallthrough;
default:
*pt_regs_access(task_pt_regs(task), offset) = value;
@@ -465,7 +465,7 @@ static void ptrace_triggered(struct perf_event *bp,
break;
}
- thread->debugreg6 |= (DR_TRAP0 << i);
+ thread->virtual_dr6 |= (DR_TRAP0 << i);
}
/*
@@ -601,7 +601,7 @@ static unsigned long ptrace_get_debugreg(struct task_struct *tsk, int n)
if (bp)
val = bp->hw.info.address;
} else if (n == 6) {
- val = thread->debugreg6;
+ val = thread->virtual_dr6 ^ DR6_RESERVED; /* Flip back to arch polarity */
} else if (n == 7) {
val = thread->ptrace_dr7;
}
@@ -657,7 +657,7 @@ static int ptrace_set_debugreg(struct task_struct *tsk, int n,
if (n < HBP_NUM) {
rc = ptrace_set_breakpoint_addr(tsk, n, val);
} else if (n == 6) {
- thread->debugreg6 = val;
+ thread->virtual_dr6 = val ^ DR6_RESERVED; /* Flip to positive polarity */
rc = 0;
} else if (n == 7) {
rc = ptrace_write_dr7(tsk, val);
diff --git a/arch/x86/kernel/quirks.c b/arch/x86/kernel/quirks.c
index 1b10717c9321..6d0df6a58873 100644
--- a/arch/x86/kernel/quirks.c
+++ b/arch/x86/kernel/quirks.c
@@ -8,6 +8,7 @@
#include <asm/hpet.h>
#include <asm/setup.h>
+#include <asm/mce.h>
#if defined(CONFIG_X86_IO_APIC) && defined(CONFIG_SMP) && defined(CONFIG_PCI)
@@ -624,10 +625,6 @@ static void amd_disable_seq_and_redirect_scrub(struct pci_dev *dev)
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3,
amd_disable_seq_and_redirect_scrub);
-#if defined(CONFIG_X86_64) && defined(CONFIG_X86_MCE)
-#include <linux/jump_label.h>
-#include <asm/string_64.h>
-
/* Ivy Bridge, Haswell, Broadwell */
static void quirk_intel_brickland_xeon_ras_cap(struct pci_dev *pdev)
{
@@ -636,7 +633,7 @@ static void quirk_intel_brickland_xeon_ras_cap(struct pci_dev *pdev)
pci_read_config_dword(pdev, 0x84, &capid0);
if (capid0 & 0x10)
- static_branch_inc(&mcsafe_key);
+ enable_copy_mc_fragile();
}
/* Skylake */
@@ -653,7 +650,7 @@ static void quirk_intel_purley_xeon_ras_cap(struct pci_dev *pdev)
* enabled, so memory machine check recovery is also enabled.
*/
if ((capid0 & 0xc0) == 0xc0 || (capid5 & 0x1e0))
- static_branch_inc(&mcsafe_key);
+ enable_copy_mc_fragile();
}
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x0ec3, quirk_intel_brickland_xeon_ras_cap);
@@ -661,7 +658,6 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2fc0, quirk_intel_brickland_xeon_
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fc0, quirk_intel_brickland_xeon_ras_cap);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2083, quirk_intel_purley_xeon_ras_cap);
#endif
-#endif
bool x86_apple_machine;
EXPORT_SYMBOL(x86_apple_machine);
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index 0ec7ced727fe..db115943e8bd 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -10,7 +10,7 @@
#include <linux/sched.h>
#include <linux/tboot.h>
#include <linux/delay.h>
-#include <linux/frame.h>
+#include <linux/objtool.h>
#include <linux/pgtable.h>
#include <acpi/reboot.h>
#include <asm/io.h>
@@ -654,7 +654,7 @@ static void native_machine_emergency_restart(void)
case BOOT_CF9_FORCE:
port_cf9_safe = true;
- /* Fall through */
+ fallthrough;
case BOOT_CF9_SAFE:
if (port_cf9_safe) {
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 3511736fbc74..b16caee53bea 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -19,6 +19,7 @@
#include <linux/hugetlb.h>
#include <linux/tboot.h>
#include <linux/usb/xhci-dbgp.h>
+#include <linux/static_call.h>
#include <uapi/linux/mount.h>
@@ -263,16 +264,12 @@ static void __init relocate_initrd(void)
u64 area_size = PAGE_ALIGN(ramdisk_size);
/* We need to move the initrd down into directly mapped mem */
- relocated_ramdisk = memblock_find_in_range(0, PFN_PHYS(max_pfn_mapped),
- area_size, PAGE_SIZE);
-
+ relocated_ramdisk = memblock_phys_alloc_range(area_size, PAGE_SIZE, 0,
+ PFN_PHYS(max_pfn_mapped));
if (!relocated_ramdisk)
panic("Cannot find place for new RAMDISK of size %lld\n",
ramdisk_size);
- /* Note: this includes all the mem currently occupied by
- the initrd, we rely on that fact to keep the data intact. */
- memblock_reserve(relocated_ramdisk, area_size);
initrd_start = relocated_ramdisk + PAGE_OFFSET;
initrd_end = initrd_start + ramdisk_size;
printk(KERN_INFO "Allocated new RAMDISK: [mem %#010llx-%#010llx]\n",
@@ -299,13 +296,13 @@ static void __init early_reserve_initrd(void)
memblock_reserve(ramdisk_image, ramdisk_end - ramdisk_image);
}
+
static void __init reserve_initrd(void)
{
/* Assume only end is not page aligned */
u64 ramdisk_image = get_ramdisk_image();
u64 ramdisk_size = get_ramdisk_size();
u64 ramdisk_end = PAGE_ALIGN(ramdisk_image + ramdisk_size);
- u64 mapped_size;
if (!boot_params.hdr.type_of_loader ||
!ramdisk_image || !ramdisk_size)
@@ -313,12 +310,6 @@ static void __init reserve_initrd(void)
initrd_start = 0;
- mapped_size = memblock_mem_size(max_pfn_mapped);
- if (ramdisk_size >= (mapped_size>>1))
- panic("initrd too large to handle, "
- "disabling initrd (%lld needed, %lld available)\n",
- ramdisk_size, mapped_size>>1);
-
printk(KERN_INFO "RAMDISK: [mem %#010llx-%#010llx]\n", ramdisk_image,
ramdisk_end - 1);
@@ -430,13 +421,13 @@ static int __init reserve_crashkernel_low(void)
{
#ifdef CONFIG_X86_64
unsigned long long base, low_base = 0, low_size = 0;
- unsigned long total_low_mem;
+ unsigned long low_mem_limit;
int ret;
- total_low_mem = memblock_mem_size(1UL << (32 - PAGE_SHIFT));
+ low_mem_limit = min(memblock_phys_mem_size(), CRASH_ADDR_LOW_MAX);
/* crashkernel=Y,low */
- ret = parse_crashkernel_low(boot_command_line, total_low_mem, &low_size, &base);
+ ret = parse_crashkernel_low(boot_command_line, low_mem_limit, &low_size, &base);
if (ret) {
/*
* two parts from kernel/dma/swiotlb.c:
@@ -454,23 +445,17 @@ static int __init reserve_crashkernel_low(void)
return 0;
}
- low_base = memblock_find_in_range(0, 1ULL << 32, low_size, CRASH_ALIGN);
+ low_base = memblock_phys_alloc_range(low_size, CRASH_ALIGN, 0, CRASH_ADDR_LOW_MAX);
if (!low_base) {
pr_err("Cannot reserve %ldMB crashkernel low memory, please try smaller size.\n",
(unsigned long)(low_size >> 20));
return -ENOMEM;
}
- ret = memblock_reserve(low_base, low_size);
- if (ret) {
- pr_err("%s: Error reserving crashkernel low memblock.\n", __func__);
- return ret;
- }
-
- pr_info("Reserving %ldMB of low memory at %ldMB for crashkernel (System low RAM: %ldMB)\n",
+ pr_info("Reserving %ldMB of low memory at %ldMB for crashkernel (low RAM limit: %ldMB)\n",
(unsigned long)(low_size >> 20),
(unsigned long)(low_base >> 20),
- (unsigned long)(total_low_mem >> 20));
+ (unsigned long)(low_mem_limit >> 20));
crashk_low_res.start = low_base;
crashk_low_res.end = low_base + low_size - 1;
@@ -514,13 +499,13 @@ static void __init reserve_crashkernel(void)
* unless "crashkernel=size[KMG],high" is specified.
*/
if (!high)
- crash_base = memblock_find_in_range(CRASH_ALIGN,
- CRASH_ADDR_LOW_MAX,
- crash_size, CRASH_ALIGN);
+ crash_base = memblock_phys_alloc_range(crash_size,
+ CRASH_ALIGN, CRASH_ALIGN,
+ CRASH_ADDR_LOW_MAX);
if (!crash_base)
- crash_base = memblock_find_in_range(CRASH_ALIGN,
- CRASH_ADDR_HIGH_MAX,
- crash_size, CRASH_ALIGN);
+ crash_base = memblock_phys_alloc_range(crash_size,
+ CRASH_ALIGN, CRASH_ALIGN,
+ CRASH_ADDR_HIGH_MAX);
if (!crash_base) {
pr_info("crashkernel reservation failed - No suitable area found.\n");
return;
@@ -528,19 +513,13 @@ static void __init reserve_crashkernel(void)
} else {
unsigned long long start;
- start = memblock_find_in_range(crash_base,
- crash_base + crash_size,
- crash_size, 1 << 20);
+ start = memblock_phys_alloc_range(crash_size, SZ_1M, crash_base,
+ crash_base + crash_size);
if (start != crash_base) {
pr_info("crashkernel reservation failed - memory is in use.\n");
return;
}
}
- ret = memblock_reserve(crash_base, crash_size);
- if (ret) {
- pr_err("%s: Error reserving crashkernel memblock.\n", __func__);
- return;
- }
if (crash_base >= (1ULL << 32) && reserve_crashkernel_low()) {
memblock_free(crash_base, crash_size);
@@ -849,6 +828,7 @@ void __init setup_arch(char **cmdline_p)
early_cpu_init();
arch_init_ideal_nops();
jump_label_init();
+ static_call_init();
early_ioremap_init();
setup_olpc_ofw_pgd();
@@ -1077,6 +1057,7 @@ void __init setup_arch(char **cmdline_p)
efi_fake_memmap();
efi_find_mirror();
efi_esrt_init();
+ efi_mokvar_table_init();
/*
* The EFI specification says that boot service code won't be
@@ -1218,6 +1199,7 @@ void __init setup_arch(char **cmdline_p)
prefill_possible_map();
init_cpu_to_node();
+ init_gi_nodes();
io_apic_init_mappings();
diff --git a/arch/x86/kernel/sev-es-shared.c b/arch/x86/kernel/sev-es-shared.c
new file mode 100644
index 000000000000..5f83ccaab877
--- /dev/null
+++ b/arch/x86/kernel/sev-es-shared.c
@@ -0,0 +1,507 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * AMD Encrypted Register State Support
+ *
+ * Author: Joerg Roedel <jroedel@suse.de>
+ *
+ * This file is not compiled stand-alone. It contains code shared
+ * between the pre-decompression boot code and the running Linux kernel
+ * and is included directly into both code-bases.
+ */
+
+#ifndef __BOOT_COMPRESSED
+#define error(v) pr_err(v)
+#define has_cpuflag(f) boot_cpu_has(f)
+#endif
+
+static bool __init sev_es_check_cpu_features(void)
+{
+ if (!has_cpuflag(X86_FEATURE_RDRAND)) {
+ error("RDRAND instruction not supported - no trusted source of randomness available\n");
+ return false;
+ }
+
+ return true;
+}
+
+static void sev_es_terminate(unsigned int reason)
+{
+ u64 val = GHCB_SEV_TERMINATE;
+
+ /*
+ * Tell the hypervisor what went wrong - only reason-set 0 is
+ * currently supported.
+ */
+ val |= GHCB_SEV_TERMINATE_REASON(0, reason);
+
+ /* Request Guest Termination from Hypvervisor */
+ sev_es_wr_ghcb_msr(val);
+ VMGEXIT();
+
+ while (true)
+ asm volatile("hlt\n" : : : "memory");
+}
+
+static bool sev_es_negotiate_protocol(void)
+{
+ u64 val;
+
+ /* Do the GHCB protocol version negotiation */
+ sev_es_wr_ghcb_msr(GHCB_SEV_INFO_REQ);
+ VMGEXIT();
+ val = sev_es_rd_ghcb_msr();
+
+ if (GHCB_INFO(val) != GHCB_SEV_INFO)
+ return false;
+
+ if (GHCB_PROTO_MAX(val) < GHCB_PROTO_OUR ||
+ GHCB_PROTO_MIN(val) > GHCB_PROTO_OUR)
+ return false;
+
+ return true;
+}
+
+static __always_inline void vc_ghcb_invalidate(struct ghcb *ghcb)
+{
+ memset(ghcb->save.valid_bitmap, 0, sizeof(ghcb->save.valid_bitmap));
+}
+
+static bool vc_decoding_needed(unsigned long exit_code)
+{
+ /* Exceptions don't require to decode the instruction */
+ return !(exit_code >= SVM_EXIT_EXCP_BASE &&
+ exit_code <= SVM_EXIT_LAST_EXCP);
+}
+
+static enum es_result vc_init_em_ctxt(struct es_em_ctxt *ctxt,
+ struct pt_regs *regs,
+ unsigned long exit_code)
+{
+ enum es_result ret = ES_OK;
+
+ memset(ctxt, 0, sizeof(*ctxt));
+ ctxt->regs = regs;
+
+ if (vc_decoding_needed(exit_code))
+ ret = vc_decode_insn(ctxt);
+
+ return ret;
+}
+
+static void vc_finish_insn(struct es_em_ctxt *ctxt)
+{
+ ctxt->regs->ip += ctxt->insn.length;
+}
+
+static enum es_result sev_es_ghcb_hv_call(struct ghcb *ghcb,
+ struct es_em_ctxt *ctxt,
+ u64 exit_code, u64 exit_info_1,
+ u64 exit_info_2)
+{
+ enum es_result ret;
+
+ /* Fill in protocol and format specifiers */
+ ghcb->protocol_version = GHCB_PROTOCOL_MAX;
+ ghcb->ghcb_usage = GHCB_DEFAULT_USAGE;
+
+ ghcb_set_sw_exit_code(ghcb, exit_code);
+ ghcb_set_sw_exit_info_1(ghcb, exit_info_1);
+ ghcb_set_sw_exit_info_2(ghcb, exit_info_2);
+
+ sev_es_wr_ghcb_msr(__pa(ghcb));
+ VMGEXIT();
+
+ if ((ghcb->save.sw_exit_info_1 & 0xffffffff) == 1) {
+ u64 info = ghcb->save.sw_exit_info_2;
+ unsigned long v;
+
+ info = ghcb->save.sw_exit_info_2;
+ v = info & SVM_EVTINJ_VEC_MASK;
+
+ /* Check if exception information from hypervisor is sane. */
+ if ((info & SVM_EVTINJ_VALID) &&
+ ((v == X86_TRAP_GP) || (v == X86_TRAP_UD)) &&
+ ((info & SVM_EVTINJ_TYPE_MASK) == SVM_EVTINJ_TYPE_EXEPT)) {
+ ctxt->fi.vector = v;
+ if (info & SVM_EVTINJ_VALID_ERR)
+ ctxt->fi.error_code = info >> 32;
+ ret = ES_EXCEPTION;
+ } else {
+ ret = ES_VMM_ERROR;
+ }
+ } else {
+ ret = ES_OK;
+ }
+
+ return ret;
+}
+
+/*
+ * Boot VC Handler - This is the first VC handler during boot, there is no GHCB
+ * page yet, so it only supports the MSR based communication with the
+ * hypervisor and only the CPUID exit-code.
+ */
+void __init do_vc_no_ghcb(struct pt_regs *regs, unsigned long exit_code)
+{
+ unsigned int fn = lower_bits(regs->ax, 32);
+ unsigned long val;
+
+ /* Only CPUID is supported via MSR protocol */
+ if (exit_code != SVM_EXIT_CPUID)
+ goto fail;
+
+ sev_es_wr_ghcb_msr(GHCB_CPUID_REQ(fn, GHCB_CPUID_REQ_EAX));
+ VMGEXIT();
+ val = sev_es_rd_ghcb_msr();
+ if (GHCB_SEV_GHCB_RESP_CODE(val) != GHCB_SEV_CPUID_RESP)
+ goto fail;
+ regs->ax = val >> 32;
+
+ sev_es_wr_ghcb_msr(GHCB_CPUID_REQ(fn, GHCB_CPUID_REQ_EBX));
+ VMGEXIT();
+ val = sev_es_rd_ghcb_msr();
+ if (GHCB_SEV_GHCB_RESP_CODE(val) != GHCB_SEV_CPUID_RESP)
+ goto fail;
+ regs->bx = val >> 32;
+
+ sev_es_wr_ghcb_msr(GHCB_CPUID_REQ(fn, GHCB_CPUID_REQ_ECX));
+ VMGEXIT();
+ val = sev_es_rd_ghcb_msr();
+ if (GHCB_SEV_GHCB_RESP_CODE(val) != GHCB_SEV_CPUID_RESP)
+ goto fail;
+ regs->cx = val >> 32;
+
+ sev_es_wr_ghcb_msr(GHCB_CPUID_REQ(fn, GHCB_CPUID_REQ_EDX));
+ VMGEXIT();
+ val = sev_es_rd_ghcb_msr();
+ if (GHCB_SEV_GHCB_RESP_CODE(val) != GHCB_SEV_CPUID_RESP)
+ goto fail;
+ regs->dx = val >> 32;
+
+ /* Skip over the CPUID two-byte opcode */
+ regs->ip += 2;
+
+ return;
+
+fail:
+ sev_es_wr_ghcb_msr(GHCB_SEV_TERMINATE);
+ VMGEXIT();
+
+ /* Shouldn't get here - if we do halt the machine */
+ while (true)
+ asm volatile("hlt\n");
+}
+
+static enum es_result vc_insn_string_read(struct es_em_ctxt *ctxt,
+ void *src, char *buf,
+ unsigned int data_size,
+ unsigned int count,
+ bool backwards)
+{
+ int i, b = backwards ? -1 : 1;
+ enum es_result ret = ES_OK;
+
+ for (i = 0; i < count; i++) {
+ void *s = src + (i * data_size * b);
+ char *d = buf + (i * data_size);
+
+ ret = vc_read_mem(ctxt, s, d, data_size);
+ if (ret != ES_OK)
+ break;
+ }
+
+ return ret;
+}
+
+static enum es_result vc_insn_string_write(struct es_em_ctxt *ctxt,
+ void *dst, char *buf,
+ unsigned int data_size,
+ unsigned int count,
+ bool backwards)
+{
+ int i, s = backwards ? -1 : 1;
+ enum es_result ret = ES_OK;
+
+ for (i = 0; i < count; i++) {
+ void *d = dst + (i * data_size * s);
+ char *b = buf + (i * data_size);
+
+ ret = vc_write_mem(ctxt, d, b, data_size);
+ if (ret != ES_OK)
+ break;
+ }
+
+ return ret;
+}
+
+#define IOIO_TYPE_STR BIT(2)
+#define IOIO_TYPE_IN 1
+#define IOIO_TYPE_INS (IOIO_TYPE_IN | IOIO_TYPE_STR)
+#define IOIO_TYPE_OUT 0
+#define IOIO_TYPE_OUTS (IOIO_TYPE_OUT | IOIO_TYPE_STR)
+
+#define IOIO_REP BIT(3)
+
+#define IOIO_ADDR_64 BIT(9)
+#define IOIO_ADDR_32 BIT(8)
+#define IOIO_ADDR_16 BIT(7)
+
+#define IOIO_DATA_32 BIT(6)
+#define IOIO_DATA_16 BIT(5)
+#define IOIO_DATA_8 BIT(4)
+
+#define IOIO_SEG_ES (0 << 10)
+#define IOIO_SEG_DS (3 << 10)
+
+static enum es_result vc_ioio_exitinfo(struct es_em_ctxt *ctxt, u64 *exitinfo)
+{
+ struct insn *insn = &ctxt->insn;
+ *exitinfo = 0;
+
+ switch (insn->opcode.bytes[0]) {
+ /* INS opcodes */
+ case 0x6c:
+ case 0x6d:
+ *exitinfo |= IOIO_TYPE_INS;
+ *exitinfo |= IOIO_SEG_ES;
+ *exitinfo |= (ctxt->regs->dx & 0xffff) << 16;
+ break;
+
+ /* OUTS opcodes */
+ case 0x6e:
+ case 0x6f:
+ *exitinfo |= IOIO_TYPE_OUTS;
+ *exitinfo |= IOIO_SEG_DS;
+ *exitinfo |= (ctxt->regs->dx & 0xffff) << 16;
+ break;
+
+ /* IN immediate opcodes */
+ case 0xe4:
+ case 0xe5:
+ *exitinfo |= IOIO_TYPE_IN;
+ *exitinfo |= (u64)insn->immediate.value << 16;
+ break;
+
+ /* OUT immediate opcodes */
+ case 0xe6:
+ case 0xe7:
+ *exitinfo |= IOIO_TYPE_OUT;
+ *exitinfo |= (u64)insn->immediate.value << 16;
+ break;
+
+ /* IN register opcodes */
+ case 0xec:
+ case 0xed:
+ *exitinfo |= IOIO_TYPE_IN;
+ *exitinfo |= (ctxt->regs->dx & 0xffff) << 16;
+ break;
+
+ /* OUT register opcodes */
+ case 0xee:
+ case 0xef:
+ *exitinfo |= IOIO_TYPE_OUT;
+ *exitinfo |= (ctxt->regs->dx & 0xffff) << 16;
+ break;
+
+ default:
+ return ES_DECODE_FAILED;
+ }
+
+ switch (insn->opcode.bytes[0]) {
+ case 0x6c:
+ case 0x6e:
+ case 0xe4:
+ case 0xe6:
+ case 0xec:
+ case 0xee:
+ /* Single byte opcodes */
+ *exitinfo |= IOIO_DATA_8;
+ break;
+ default:
+ /* Length determined by instruction parsing */
+ *exitinfo |= (insn->opnd_bytes == 2) ? IOIO_DATA_16
+ : IOIO_DATA_32;
+ }
+ switch (insn->addr_bytes) {
+ case 2:
+ *exitinfo |= IOIO_ADDR_16;
+ break;
+ case 4:
+ *exitinfo |= IOIO_ADDR_32;
+ break;
+ case 8:
+ *exitinfo |= IOIO_ADDR_64;
+ break;
+ }
+
+ if (insn_has_rep_prefix(insn))
+ *exitinfo |= IOIO_REP;
+
+ return ES_OK;
+}
+
+static enum es_result vc_handle_ioio(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
+{
+ struct pt_regs *regs = ctxt->regs;
+ u64 exit_info_1, exit_info_2;
+ enum es_result ret;
+
+ ret = vc_ioio_exitinfo(ctxt, &exit_info_1);
+ if (ret != ES_OK)
+ return ret;
+
+ if (exit_info_1 & IOIO_TYPE_STR) {
+
+ /* (REP) INS/OUTS */
+
+ bool df = ((regs->flags & X86_EFLAGS_DF) == X86_EFLAGS_DF);
+ unsigned int io_bytes, exit_bytes;
+ unsigned int ghcb_count, op_count;
+ unsigned long es_base;
+ u64 sw_scratch;
+
+ /*
+ * For the string variants with rep prefix the amount of in/out
+ * operations per #VC exception is limited so that the kernel
+ * has a chance to take interrupts and re-schedule while the
+ * instruction is emulated.
+ */
+ io_bytes = (exit_info_1 >> 4) & 0x7;
+ ghcb_count = sizeof(ghcb->shared_buffer) / io_bytes;
+
+ op_count = (exit_info_1 & IOIO_REP) ? regs->cx : 1;
+ exit_info_2 = min(op_count, ghcb_count);
+ exit_bytes = exit_info_2 * io_bytes;
+
+ es_base = insn_get_seg_base(ctxt->regs, INAT_SEG_REG_ES);
+
+ /* Read bytes of OUTS into the shared buffer */
+ if (!(exit_info_1 & IOIO_TYPE_IN)) {
+ ret = vc_insn_string_read(ctxt,
+ (void *)(es_base + regs->si),
+ ghcb->shared_buffer, io_bytes,
+ exit_info_2, df);
+ if (ret)
+ return ret;
+ }
+
+ /*
+ * Issue an VMGEXIT to the HV to consume the bytes from the
+ * shared buffer or to have it write them into the shared buffer
+ * depending on the instruction: OUTS or INS.
+ */
+ sw_scratch = __pa(ghcb) + offsetof(struct ghcb, shared_buffer);
+ ghcb_set_sw_scratch(ghcb, sw_scratch);
+ ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_IOIO,
+ exit_info_1, exit_info_2);
+ if (ret != ES_OK)
+ return ret;
+
+ /* Read bytes from shared buffer into the guest's destination. */
+ if (exit_info_1 & IOIO_TYPE_IN) {
+ ret = vc_insn_string_write(ctxt,
+ (void *)(es_base + regs->di),
+ ghcb->shared_buffer, io_bytes,
+ exit_info_2, df);
+ if (ret)
+ return ret;
+
+ if (df)
+ regs->di -= exit_bytes;
+ else
+ regs->di += exit_bytes;
+ } else {
+ if (df)
+ regs->si -= exit_bytes;
+ else
+ regs->si += exit_bytes;
+ }
+
+ if (exit_info_1 & IOIO_REP)
+ regs->cx -= exit_info_2;
+
+ ret = regs->cx ? ES_RETRY : ES_OK;
+
+ } else {
+
+ /* IN/OUT into/from rAX */
+
+ int bits = (exit_info_1 & 0x70) >> 1;
+ u64 rax = 0;
+
+ if (!(exit_info_1 & IOIO_TYPE_IN))
+ rax = lower_bits(regs->ax, bits);
+
+ ghcb_set_rax(ghcb, rax);
+
+ ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_IOIO, exit_info_1, 0);
+ if (ret != ES_OK)
+ return ret;
+
+ if (exit_info_1 & IOIO_TYPE_IN) {
+ if (!ghcb_rax_is_valid(ghcb))
+ return ES_VMM_ERROR;
+ regs->ax = lower_bits(ghcb->save.rax, bits);
+ }
+ }
+
+ return ret;
+}
+
+static enum es_result vc_handle_cpuid(struct ghcb *ghcb,
+ struct es_em_ctxt *ctxt)
+{
+ struct pt_regs *regs = ctxt->regs;
+ u32 cr4 = native_read_cr4();
+ enum es_result ret;
+
+ ghcb_set_rax(ghcb, regs->ax);
+ ghcb_set_rcx(ghcb, regs->cx);
+
+ if (cr4 & X86_CR4_OSXSAVE)
+ /* Safe to read xcr0 */
+ ghcb_set_xcr0(ghcb, xgetbv(XCR_XFEATURE_ENABLED_MASK));
+ else
+ /* xgetbv will cause #GP - use reset value for xcr0 */
+ ghcb_set_xcr0(ghcb, 1);
+
+ ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_CPUID, 0, 0);
+ if (ret != ES_OK)
+ return ret;
+
+ if (!(ghcb_rax_is_valid(ghcb) &&
+ ghcb_rbx_is_valid(ghcb) &&
+ ghcb_rcx_is_valid(ghcb) &&
+ ghcb_rdx_is_valid(ghcb)))
+ return ES_VMM_ERROR;
+
+ regs->ax = ghcb->save.rax;
+ regs->bx = ghcb->save.rbx;
+ regs->cx = ghcb->save.rcx;
+ regs->dx = ghcb->save.rdx;
+
+ return ES_OK;
+}
+
+static enum es_result vc_handle_rdtsc(struct ghcb *ghcb,
+ struct es_em_ctxt *ctxt,
+ unsigned long exit_code)
+{
+ bool rdtscp = (exit_code == SVM_EXIT_RDTSCP);
+ enum es_result ret;
+
+ ret = sev_es_ghcb_hv_call(ghcb, ctxt, exit_code, 0, 0);
+ if (ret != ES_OK)
+ return ret;
+
+ if (!(ghcb_rax_is_valid(ghcb) && ghcb_rdx_is_valid(ghcb) &&
+ (!rdtscp || ghcb_rcx_is_valid(ghcb))))
+ return ES_VMM_ERROR;
+
+ ctxt->regs->ax = ghcb->save.rax;
+ ctxt->regs->dx = ghcb->save.rdx;
+ if (rdtscp)
+ ctxt->regs->cx = ghcb->save.rcx;
+
+ return ES_OK;
+}
diff --git a/arch/x86/kernel/sev-es.c b/arch/x86/kernel/sev-es.c
new file mode 100644
index 000000000000..4a96726fbaf8
--- /dev/null
+++ b/arch/x86/kernel/sev-es.c
@@ -0,0 +1,1404 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * AMD Memory Encryption Support
+ *
+ * Copyright (C) 2019 SUSE
+ *
+ * Author: Joerg Roedel <jroedel@suse.de>
+ */
+
+#define pr_fmt(fmt) "SEV-ES: " fmt
+
+#include <linux/sched/debug.h> /* For show_regs() */
+#include <linux/percpu-defs.h>
+#include <linux/mem_encrypt.h>
+#include <linux/lockdep.h>
+#include <linux/printk.h>
+#include <linux/mm_types.h>
+#include <linux/set_memory.h>
+#include <linux/memblock.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+
+#include <asm/cpu_entry_area.h>
+#include <asm/stacktrace.h>
+#include <asm/sev-es.h>
+#include <asm/insn-eval.h>
+#include <asm/fpu/internal.h>
+#include <asm/processor.h>
+#include <asm/realmode.h>
+#include <asm/traps.h>
+#include <asm/svm.h>
+#include <asm/smp.h>
+#include <asm/cpu.h>
+
+#define DR7_RESET_VALUE 0x400
+
+/* For early boot hypervisor communication in SEV-ES enabled guests */
+static struct ghcb boot_ghcb_page __bss_decrypted __aligned(PAGE_SIZE);
+
+/*
+ * Needs to be in the .data section because we need it NULL before bss is
+ * cleared
+ */
+static struct ghcb __initdata *boot_ghcb;
+
+/* #VC handler runtime per-CPU data */
+struct sev_es_runtime_data {
+ struct ghcb ghcb_page;
+
+ /* Physical storage for the per-CPU IST stack of the #VC handler */
+ char ist_stack[EXCEPTION_STKSZ] __aligned(PAGE_SIZE);
+
+ /*
+ * Physical storage for the per-CPU fall-back stack of the #VC handler.
+ * The fall-back stack is used when it is not safe to switch back to the
+ * interrupted stack in the #VC entry code.
+ */
+ char fallback_stack[EXCEPTION_STKSZ] __aligned(PAGE_SIZE);
+
+ /*
+ * Reserve one page per CPU as backup storage for the unencrypted GHCB.
+ * It is needed when an NMI happens while the #VC handler uses the real
+ * GHCB, and the NMI handler itself is causing another #VC exception. In
+ * that case the GHCB content of the first handler needs to be backed up
+ * and restored.
+ */
+ struct ghcb backup_ghcb;
+
+ /*
+ * Mark the per-cpu GHCBs as in-use to detect nested #VC exceptions.
+ * There is no need for it to be atomic, because nothing is written to
+ * the GHCB between the read and the write of ghcb_active. So it is safe
+ * to use it when a nested #VC exception happens before the write.
+ *
+ * This is necessary for example in the #VC->NMI->#VC case when the NMI
+ * happens while the first #VC handler uses the GHCB. When the NMI code
+ * raises a second #VC handler it might overwrite the contents of the
+ * GHCB written by the first handler. To avoid this the content of the
+ * GHCB is saved and restored when the GHCB is detected to be in use
+ * already.
+ */
+ bool ghcb_active;
+ bool backup_ghcb_active;
+
+ /*
+ * Cached DR7 value - write it on DR7 writes and return it on reads.
+ * That value will never make it to the real hardware DR7 as debugging
+ * is currently unsupported in SEV-ES guests.
+ */
+ unsigned long dr7;
+};
+
+struct ghcb_state {
+ struct ghcb *ghcb;
+};
+
+static DEFINE_PER_CPU(struct sev_es_runtime_data*, runtime_data);
+DEFINE_STATIC_KEY_FALSE(sev_es_enable_key);
+
+/* Needed in vc_early_forward_exception */
+void do_early_exception(struct pt_regs *regs, int trapnr);
+
+static void __init setup_vc_stacks(int cpu)
+{
+ struct sev_es_runtime_data *data;
+ struct cpu_entry_area *cea;
+ unsigned long vaddr;
+ phys_addr_t pa;
+
+ data = per_cpu(runtime_data, cpu);
+ cea = get_cpu_entry_area(cpu);
+
+ /* Map #VC IST stack */
+ vaddr = CEA_ESTACK_BOT(&cea->estacks, VC);
+ pa = __pa(data->ist_stack);
+ cea_set_pte((void *)vaddr, pa, PAGE_KERNEL);
+
+ /* Map VC fall-back stack */
+ vaddr = CEA_ESTACK_BOT(&cea->estacks, VC2);
+ pa = __pa(data->fallback_stack);
+ cea_set_pte((void *)vaddr, pa, PAGE_KERNEL);
+}
+
+static __always_inline bool on_vc_stack(unsigned long sp)
+{
+ return ((sp >= __this_cpu_ist_bottom_va(VC)) && (sp < __this_cpu_ist_top_va(VC)));
+}
+
+/*
+ * This function handles the case when an NMI is raised in the #VC exception
+ * handler entry code. In this case, the IST entry for #VC must be adjusted, so
+ * that any subsequent #VC exception will not overwrite the stack contents of the
+ * interrupted #VC handler.
+ *
+ * The IST entry is adjusted unconditionally so that it can be also be
+ * unconditionally adjusted back in sev_es_ist_exit(). Otherwise a nested
+ * sev_es_ist_exit() call may adjust back the IST entry too early.
+ */
+void noinstr __sev_es_ist_enter(struct pt_regs *regs)
+{
+ unsigned long old_ist, new_ist;
+
+ /* Read old IST entry */
+ old_ist = __this_cpu_read(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC]);
+
+ /* Make room on the IST stack */
+ if (on_vc_stack(regs->sp))
+ new_ist = ALIGN_DOWN(regs->sp, 8) - sizeof(old_ist);
+ else
+ new_ist = old_ist - sizeof(old_ist);
+
+ /* Store old IST entry */
+ *(unsigned long *)new_ist = old_ist;
+
+ /* Set new IST entry */
+ this_cpu_write(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC], new_ist);
+}
+
+void noinstr __sev_es_ist_exit(void)
+{
+ unsigned long ist;
+
+ /* Read IST entry */
+ ist = __this_cpu_read(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC]);
+
+ if (WARN_ON(ist == __this_cpu_ist_top_va(VC)))
+ return;
+
+ /* Read back old IST entry and write it to the TSS */
+ this_cpu_write(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC], *(unsigned long *)ist);
+}
+
+static __always_inline struct ghcb *sev_es_get_ghcb(struct ghcb_state *state)
+{
+ struct sev_es_runtime_data *data;
+ struct ghcb *ghcb;
+
+ data = this_cpu_read(runtime_data);
+ ghcb = &data->ghcb_page;
+
+ if (unlikely(data->ghcb_active)) {
+ /* GHCB is already in use - save its contents */
+
+ if (unlikely(data->backup_ghcb_active))
+ return NULL;
+
+ /* Mark backup_ghcb active before writing to it */
+ data->backup_ghcb_active = true;
+
+ state->ghcb = &data->backup_ghcb;
+
+ /* Backup GHCB content */
+ *state->ghcb = *ghcb;
+ } else {
+ state->ghcb = NULL;
+ data->ghcb_active = true;
+ }
+
+ return ghcb;
+}
+
+static __always_inline void sev_es_put_ghcb(struct ghcb_state *state)
+{
+ struct sev_es_runtime_data *data;
+ struct ghcb *ghcb;
+
+ data = this_cpu_read(runtime_data);
+ ghcb = &data->ghcb_page;
+
+ if (state->ghcb) {
+ /* Restore GHCB from Backup */
+ *ghcb = *state->ghcb;
+ data->backup_ghcb_active = false;
+ state->ghcb = NULL;
+ } else {
+ data->ghcb_active = false;
+ }
+}
+
+/* Needed in vc_early_forward_exception */
+void do_early_exception(struct pt_regs *regs, int trapnr);
+
+static inline u64 sev_es_rd_ghcb_msr(void)
+{
+ return __rdmsr(MSR_AMD64_SEV_ES_GHCB);
+}
+
+static inline void sev_es_wr_ghcb_msr(u64 val)
+{
+ u32 low, high;
+
+ low = (u32)(val);
+ high = (u32)(val >> 32);
+
+ native_wrmsr(MSR_AMD64_SEV_ES_GHCB, low, high);
+}
+
+static int vc_fetch_insn_kernel(struct es_em_ctxt *ctxt,
+ unsigned char *buffer)
+{
+ return copy_from_kernel_nofault(buffer, (unsigned char *)ctxt->regs->ip, MAX_INSN_SIZE);
+}
+
+static enum es_result vc_decode_insn(struct es_em_ctxt *ctxt)
+{
+ char buffer[MAX_INSN_SIZE];
+ enum es_result ret;
+ int res;
+
+ if (user_mode(ctxt->regs)) {
+ res = insn_fetch_from_user(ctxt->regs, buffer);
+ if (!res) {
+ ctxt->fi.vector = X86_TRAP_PF;
+ ctxt->fi.error_code = X86_PF_INSTR | X86_PF_USER;
+ ctxt->fi.cr2 = ctxt->regs->ip;
+ return ES_EXCEPTION;
+ }
+
+ if (!insn_decode(&ctxt->insn, ctxt->regs, buffer, res))
+ return ES_DECODE_FAILED;
+ } else {
+ res = vc_fetch_insn_kernel(ctxt, buffer);
+ if (res) {
+ ctxt->fi.vector = X86_TRAP_PF;
+ ctxt->fi.error_code = X86_PF_INSTR;
+ ctxt->fi.cr2 = ctxt->regs->ip;
+ return ES_EXCEPTION;
+ }
+
+ insn_init(&ctxt->insn, buffer, MAX_INSN_SIZE - res, 1);
+ insn_get_length(&ctxt->insn);
+ }
+
+ ret = ctxt->insn.immediate.got ? ES_OK : ES_DECODE_FAILED;
+
+ return ret;
+}
+
+static enum es_result vc_write_mem(struct es_em_ctxt *ctxt,
+ char *dst, char *buf, size_t size)
+{
+ unsigned long error_code = X86_PF_PROT | X86_PF_WRITE;
+ char __user *target = (char __user *)dst;
+ u64 d8;
+ u32 d4;
+ u16 d2;
+ u8 d1;
+
+ switch (size) {
+ case 1:
+ memcpy(&d1, buf, 1);
+ if (put_user(d1, target))
+ goto fault;
+ break;
+ case 2:
+ memcpy(&d2, buf, 2);
+ if (put_user(d2, target))
+ goto fault;
+ break;
+ case 4:
+ memcpy(&d4, buf, 4);
+ if (put_user(d4, target))
+ goto fault;
+ break;
+ case 8:
+ memcpy(&d8, buf, 8);
+ if (put_user(d8, target))
+ goto fault;
+ break;
+ default:
+ WARN_ONCE(1, "%s: Invalid size: %zu\n", __func__, size);
+ return ES_UNSUPPORTED;
+ }
+
+ return ES_OK;
+
+fault:
+ if (user_mode(ctxt->regs))
+ error_code |= X86_PF_USER;
+
+ ctxt->fi.vector = X86_TRAP_PF;
+ ctxt->fi.error_code = error_code;
+ ctxt->fi.cr2 = (unsigned long)dst;
+
+ return ES_EXCEPTION;
+}
+
+static enum es_result vc_read_mem(struct es_em_ctxt *ctxt,
+ char *src, char *buf, size_t size)
+{
+ unsigned long error_code = X86_PF_PROT;
+ char __user *s = (char __user *)src;
+ u64 d8;
+ u32 d4;
+ u16 d2;
+ u8 d1;
+
+ switch (size) {
+ case 1:
+ if (get_user(d1, s))
+ goto fault;
+ memcpy(buf, &d1, 1);
+ break;
+ case 2:
+ if (get_user(d2, s))
+ goto fault;
+ memcpy(buf, &d2, 2);
+ break;
+ case 4:
+ if (get_user(d4, s))
+ goto fault;
+ memcpy(buf, &d4, 4);
+ break;
+ case 8:
+ if (get_user(d8, s))
+ goto fault;
+ memcpy(buf, &d8, 8);
+ break;
+ default:
+ WARN_ONCE(1, "%s: Invalid size: %zu\n", __func__, size);
+ return ES_UNSUPPORTED;
+ }
+
+ return ES_OK;
+
+fault:
+ if (user_mode(ctxt->regs))
+ error_code |= X86_PF_USER;
+
+ ctxt->fi.vector = X86_TRAP_PF;
+ ctxt->fi.error_code = error_code;
+ ctxt->fi.cr2 = (unsigned long)src;
+
+ return ES_EXCEPTION;
+}
+
+static bool vc_slow_virt_to_phys(struct ghcb *ghcb, struct es_em_ctxt *ctxt,
+ unsigned long vaddr, phys_addr_t *paddr)
+{
+ unsigned long va = (unsigned long)vaddr;
+ unsigned int level;
+ phys_addr_t pa;
+ pgd_t *pgd;
+ pte_t *pte;
+
+ pgd = __va(read_cr3_pa());
+ pgd = &pgd[pgd_index(va)];
+ pte = lookup_address_in_pgd(pgd, va, &level);
+ if (!pte) {
+ ctxt->fi.vector = X86_TRAP_PF;
+ ctxt->fi.cr2 = vaddr;
+ ctxt->fi.error_code = 0;
+
+ if (user_mode(ctxt->regs))
+ ctxt->fi.error_code |= X86_PF_USER;
+
+ return false;
+ }
+
+ pa = (phys_addr_t)pte_pfn(*pte) << PAGE_SHIFT;
+ pa |= va & ~page_level_mask(level);
+
+ *paddr = pa;
+
+ return true;
+}
+
+/* Include code shared with pre-decompression boot stage */
+#include "sev-es-shared.c"
+
+void noinstr __sev_es_nmi_complete(void)
+{
+ struct ghcb_state state;
+ struct ghcb *ghcb;
+
+ ghcb = sev_es_get_ghcb(&state);
+
+ vc_ghcb_invalidate(ghcb);
+ ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_NMI_COMPLETE);
+ ghcb_set_sw_exit_info_1(ghcb, 0);
+ ghcb_set_sw_exit_info_2(ghcb, 0);
+
+ sev_es_wr_ghcb_msr(__pa_nodebug(ghcb));
+ VMGEXIT();
+
+ sev_es_put_ghcb(&state);
+}
+
+static u64 get_jump_table_addr(void)
+{
+ struct ghcb_state state;
+ unsigned long flags;
+ struct ghcb *ghcb;
+ u64 ret = 0;
+
+ local_irq_save(flags);
+
+ ghcb = sev_es_get_ghcb(&state);
+
+ vc_ghcb_invalidate(ghcb);
+ ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_AP_JUMP_TABLE);
+ ghcb_set_sw_exit_info_1(ghcb, SVM_VMGEXIT_GET_AP_JUMP_TABLE);
+ ghcb_set_sw_exit_info_2(ghcb, 0);
+
+ sev_es_wr_ghcb_msr(__pa(ghcb));
+ VMGEXIT();
+
+ if (ghcb_sw_exit_info_1_is_valid(ghcb) &&
+ ghcb_sw_exit_info_2_is_valid(ghcb))
+ ret = ghcb->save.sw_exit_info_2;
+
+ sev_es_put_ghcb(&state);
+
+ local_irq_restore(flags);
+
+ return ret;
+}
+
+int sev_es_setup_ap_jump_table(struct real_mode_header *rmh)
+{
+ u16 startup_cs, startup_ip;
+ phys_addr_t jump_table_pa;
+ u64 jump_table_addr;
+ u16 __iomem *jump_table;
+
+ jump_table_addr = get_jump_table_addr();
+
+ /* On UP guests there is no jump table so this is not a failure */
+ if (!jump_table_addr)
+ return 0;
+
+ /* Check if AP Jump Table is page-aligned */
+ if (jump_table_addr & ~PAGE_MASK)
+ return -EINVAL;
+
+ jump_table_pa = jump_table_addr & PAGE_MASK;
+
+ startup_cs = (u16)(rmh->trampoline_start >> 4);
+ startup_ip = (u16)(rmh->sev_es_trampoline_start -
+ rmh->trampoline_start);
+
+ jump_table = ioremap_encrypted(jump_table_pa, PAGE_SIZE);
+ if (!jump_table)
+ return -EIO;
+
+ writew(startup_ip, &jump_table[0]);
+ writew(startup_cs, &jump_table[1]);
+
+ iounmap(jump_table);
+
+ return 0;
+}
+
+/*
+ * This is needed by the OVMF UEFI firmware which will use whatever it finds in
+ * the GHCB MSR as its GHCB to talk to the hypervisor. So make sure the per-cpu
+ * runtime GHCBs used by the kernel are also mapped in the EFI page-table.
+ */
+int __init sev_es_efi_map_ghcbs(pgd_t *pgd)
+{
+ struct sev_es_runtime_data *data;
+ unsigned long address, pflags;
+ int cpu;
+ u64 pfn;
+
+ if (!sev_es_active())
+ return 0;
+
+ pflags = _PAGE_NX | _PAGE_RW;
+
+ for_each_possible_cpu(cpu) {
+ data = per_cpu(runtime_data, cpu);
+
+ address = __pa(&data->ghcb_page);
+ pfn = address >> PAGE_SHIFT;
+
+ if (kernel_map_pages_in_pgd(pgd, pfn, address, 1, pflags))
+ return 1;
+ }
+
+ return 0;
+}
+
+static enum es_result vc_handle_msr(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
+{
+ struct pt_regs *regs = ctxt->regs;
+ enum es_result ret;
+ u64 exit_info_1;
+
+ /* Is it a WRMSR? */
+ exit_info_1 = (ctxt->insn.opcode.bytes[1] == 0x30) ? 1 : 0;
+
+ ghcb_set_rcx(ghcb, regs->cx);
+ if (exit_info_1) {
+ ghcb_set_rax(ghcb, regs->ax);
+ ghcb_set_rdx(ghcb, regs->dx);
+ }
+
+ ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_MSR, exit_info_1, 0);
+
+ if ((ret == ES_OK) && (!exit_info_1)) {
+ regs->ax = ghcb->save.rax;
+ regs->dx = ghcb->save.rdx;
+ }
+
+ return ret;
+}
+
+/*
+ * This function runs on the first #VC exception after the kernel
+ * switched to virtual addresses.
+ */
+static bool __init sev_es_setup_ghcb(void)
+{
+ /* First make sure the hypervisor talks a supported protocol. */
+ if (!sev_es_negotiate_protocol())
+ return false;
+
+ /*
+ * Clear the boot_ghcb. The first exception comes in before the bss
+ * section is cleared.
+ */
+ memset(&boot_ghcb_page, 0, PAGE_SIZE);
+
+ /* Alright - Make the boot-ghcb public */
+ boot_ghcb = &boot_ghcb_page;
+
+ return true;
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+static void sev_es_ap_hlt_loop(void)
+{
+ struct ghcb_state state;
+ struct ghcb *ghcb;
+
+ ghcb = sev_es_get_ghcb(&state);
+
+ while (true) {
+ vc_ghcb_invalidate(ghcb);
+ ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_AP_HLT_LOOP);
+ ghcb_set_sw_exit_info_1(ghcb, 0);
+ ghcb_set_sw_exit_info_2(ghcb, 0);
+
+ sev_es_wr_ghcb_msr(__pa(ghcb));
+ VMGEXIT();
+
+ /* Wakeup signal? */
+ if (ghcb_sw_exit_info_2_is_valid(ghcb) &&
+ ghcb->save.sw_exit_info_2)
+ break;
+ }
+
+ sev_es_put_ghcb(&state);
+}
+
+/*
+ * Play_dead handler when running under SEV-ES. This is needed because
+ * the hypervisor can't deliver an SIPI request to restart the AP.
+ * Instead the kernel has to issue a VMGEXIT to halt the VCPU until the
+ * hypervisor wakes it up again.
+ */
+static void sev_es_play_dead(void)
+{
+ play_dead_common();
+
+ /* IRQs now disabled */
+
+ sev_es_ap_hlt_loop();
+
+ /*
+ * If we get here, the VCPU was woken up again. Jump to CPU
+ * startup code to get it back online.
+ */
+ start_cpu0();
+}
+#else /* CONFIG_HOTPLUG_CPU */
+#define sev_es_play_dead native_play_dead
+#endif /* CONFIG_HOTPLUG_CPU */
+
+#ifdef CONFIG_SMP
+static void __init sev_es_setup_play_dead(void)
+{
+ smp_ops.play_dead = sev_es_play_dead;
+}
+#else
+static inline void sev_es_setup_play_dead(void) { }
+#endif
+
+static void __init alloc_runtime_data(int cpu)
+{
+ struct sev_es_runtime_data *data;
+
+ data = memblock_alloc(sizeof(*data), PAGE_SIZE);
+ if (!data)
+ panic("Can't allocate SEV-ES runtime data");
+
+ per_cpu(runtime_data, cpu) = data;
+}
+
+static void __init init_ghcb(int cpu)
+{
+ struct sev_es_runtime_data *data;
+ int err;
+
+ data = per_cpu(runtime_data, cpu);
+
+ err = early_set_memory_decrypted((unsigned long)&data->ghcb_page,
+ sizeof(data->ghcb_page));
+ if (err)
+ panic("Can't map GHCBs unencrypted");
+
+ memset(&data->ghcb_page, 0, sizeof(data->ghcb_page));
+
+ data->ghcb_active = false;
+ data->backup_ghcb_active = false;
+}
+
+void __init sev_es_init_vc_handling(void)
+{
+ int cpu;
+
+ BUILD_BUG_ON(offsetof(struct sev_es_runtime_data, ghcb_page) % PAGE_SIZE);
+
+ if (!sev_es_active())
+ return;
+
+ if (!sev_es_check_cpu_features())
+ panic("SEV-ES CPU Features missing");
+
+ /* Enable SEV-ES special handling */
+ static_branch_enable(&sev_es_enable_key);
+
+ /* Initialize per-cpu GHCB pages */
+ for_each_possible_cpu(cpu) {
+ alloc_runtime_data(cpu);
+ init_ghcb(cpu);
+ setup_vc_stacks(cpu);
+ }
+
+ sev_es_setup_play_dead();
+
+ /* Secondary CPUs use the runtime #VC handler */
+ initial_vc_handler = (unsigned long)safe_stack_exc_vmm_communication;
+}
+
+static void __init vc_early_forward_exception(struct es_em_ctxt *ctxt)
+{
+ int trapnr = ctxt->fi.vector;
+
+ if (trapnr == X86_TRAP_PF)
+ native_write_cr2(ctxt->fi.cr2);
+
+ ctxt->regs->orig_ax = ctxt->fi.error_code;
+ do_early_exception(ctxt->regs, trapnr);
+}
+
+static long *vc_insn_get_reg(struct es_em_ctxt *ctxt)
+{
+ long *reg_array;
+ int offset;
+
+ reg_array = (long *)ctxt->regs;
+ offset = insn_get_modrm_reg_off(&ctxt->insn, ctxt->regs);
+
+ if (offset < 0)
+ return NULL;
+
+ offset /= sizeof(long);
+
+ return reg_array + offset;
+}
+
+static long *vc_insn_get_rm(struct es_em_ctxt *ctxt)
+{
+ long *reg_array;
+ int offset;
+
+ reg_array = (long *)ctxt->regs;
+ offset = insn_get_modrm_rm_off(&ctxt->insn, ctxt->regs);
+
+ if (offset < 0)
+ return NULL;
+
+ offset /= sizeof(long);
+
+ return reg_array + offset;
+}
+static enum es_result vc_do_mmio(struct ghcb *ghcb, struct es_em_ctxt *ctxt,
+ unsigned int bytes, bool read)
+{
+ u64 exit_code, exit_info_1, exit_info_2;
+ unsigned long ghcb_pa = __pa(ghcb);
+ phys_addr_t paddr;
+ void __user *ref;
+
+ ref = insn_get_addr_ref(&ctxt->insn, ctxt->regs);
+ if (ref == (void __user *)-1L)
+ return ES_UNSUPPORTED;
+
+ exit_code = read ? SVM_VMGEXIT_MMIO_READ : SVM_VMGEXIT_MMIO_WRITE;
+
+ if (!vc_slow_virt_to_phys(ghcb, ctxt, (unsigned long)ref, &paddr)) {
+ if (!read)
+ ctxt->fi.error_code |= X86_PF_WRITE;
+
+ return ES_EXCEPTION;
+ }
+
+ exit_info_1 = paddr;
+ /* Can never be greater than 8 */
+ exit_info_2 = bytes;
+
+ ghcb_set_sw_scratch(ghcb, ghcb_pa + offsetof(struct ghcb, shared_buffer));
+
+ return sev_es_ghcb_hv_call(ghcb, ctxt, exit_code, exit_info_1, exit_info_2);
+}
+
+static enum es_result vc_handle_mmio_twobyte_ops(struct ghcb *ghcb,
+ struct es_em_ctxt *ctxt)
+{
+ struct insn *insn = &ctxt->insn;
+ unsigned int bytes = 0;
+ enum es_result ret;
+ int sign_byte;
+ long *reg_data;
+
+ switch (insn->opcode.bytes[1]) {
+ /* MMIO Read w/ zero-extension */
+ case 0xb6:
+ bytes = 1;
+ fallthrough;
+ case 0xb7:
+ if (!bytes)
+ bytes = 2;
+
+ ret = vc_do_mmio(ghcb, ctxt, bytes, true);
+ if (ret)
+ break;
+
+ /* Zero extend based on operand size */
+ reg_data = vc_insn_get_reg(ctxt);
+ if (!reg_data)
+ return ES_DECODE_FAILED;
+
+ memset(reg_data, 0, insn->opnd_bytes);
+
+ memcpy(reg_data, ghcb->shared_buffer, bytes);
+ break;
+
+ /* MMIO Read w/ sign-extension */
+ case 0xbe:
+ bytes = 1;
+ fallthrough;
+ case 0xbf:
+ if (!bytes)
+ bytes = 2;
+
+ ret = vc_do_mmio(ghcb, ctxt, bytes, true);
+ if (ret)
+ break;
+
+ /* Sign extend based on operand size */
+ reg_data = vc_insn_get_reg(ctxt);
+ if (!reg_data)
+ return ES_DECODE_FAILED;
+
+ if (bytes == 1) {
+ u8 *val = (u8 *)ghcb->shared_buffer;
+
+ sign_byte = (*val & 0x80) ? 0xff : 0x00;
+ } else {
+ u16 *val = (u16 *)ghcb->shared_buffer;
+
+ sign_byte = (*val & 0x8000) ? 0xff : 0x00;
+ }
+ memset(reg_data, sign_byte, insn->opnd_bytes);
+
+ memcpy(reg_data, ghcb->shared_buffer, bytes);
+ break;
+
+ default:
+ ret = ES_UNSUPPORTED;
+ }
+
+ return ret;
+}
+
+/*
+ * The MOVS instruction has two memory operands, which raises the
+ * problem that it is not known whether the access to the source or the
+ * destination caused the #VC exception (and hence whether an MMIO read
+ * or write operation needs to be emulated).
+ *
+ * Instead of playing games with walking page-tables and trying to guess
+ * whether the source or destination is an MMIO range, split the move
+ * into two operations, a read and a write with only one memory operand.
+ * This will cause a nested #VC exception on the MMIO address which can
+ * then be handled.
+ *
+ * This implementation has the benefit that it also supports MOVS where
+ * source _and_ destination are MMIO regions.
+ *
+ * It will slow MOVS on MMIO down a lot, but in SEV-ES guests it is a
+ * rare operation. If it turns out to be a performance problem the split
+ * operations can be moved to memcpy_fromio() and memcpy_toio().
+ */
+static enum es_result vc_handle_mmio_movs(struct es_em_ctxt *ctxt,
+ unsigned int bytes)
+{
+ unsigned long ds_base, es_base;
+ unsigned char *src, *dst;
+ unsigned char buffer[8];
+ enum es_result ret;
+ bool rep;
+ int off;
+
+ ds_base = insn_get_seg_base(ctxt->regs, INAT_SEG_REG_DS);
+ es_base = insn_get_seg_base(ctxt->regs, INAT_SEG_REG_ES);
+
+ if (ds_base == -1L || es_base == -1L) {
+ ctxt->fi.vector = X86_TRAP_GP;
+ ctxt->fi.error_code = 0;
+ return ES_EXCEPTION;
+ }
+
+ src = ds_base + (unsigned char *)ctxt->regs->si;
+ dst = es_base + (unsigned char *)ctxt->regs->di;
+
+ ret = vc_read_mem(ctxt, src, buffer, bytes);
+ if (ret != ES_OK)
+ return ret;
+
+ ret = vc_write_mem(ctxt, dst, buffer, bytes);
+ if (ret != ES_OK)
+ return ret;
+
+ if (ctxt->regs->flags & X86_EFLAGS_DF)
+ off = -bytes;
+ else
+ off = bytes;
+
+ ctxt->regs->si += off;
+ ctxt->regs->di += off;
+
+ rep = insn_has_rep_prefix(&ctxt->insn);
+ if (rep)
+ ctxt->regs->cx -= 1;
+
+ if (!rep || ctxt->regs->cx == 0)
+ return ES_OK;
+ else
+ return ES_RETRY;
+}
+
+static enum es_result vc_handle_mmio(struct ghcb *ghcb,
+ struct es_em_ctxt *ctxt)
+{
+ struct insn *insn = &ctxt->insn;
+ unsigned int bytes = 0;
+ enum es_result ret;
+ long *reg_data;
+
+ switch (insn->opcode.bytes[0]) {
+ /* MMIO Write */
+ case 0x88:
+ bytes = 1;
+ fallthrough;
+ case 0x89:
+ if (!bytes)
+ bytes = insn->opnd_bytes;
+
+ reg_data = vc_insn_get_reg(ctxt);
+ if (!reg_data)
+ return ES_DECODE_FAILED;
+
+ memcpy(ghcb->shared_buffer, reg_data, bytes);
+
+ ret = vc_do_mmio(ghcb, ctxt, bytes, false);
+ break;
+
+ case 0xc6:
+ bytes = 1;
+ fallthrough;
+ case 0xc7:
+ if (!bytes)
+ bytes = insn->opnd_bytes;
+
+ memcpy(ghcb->shared_buffer, insn->immediate1.bytes, bytes);
+
+ ret = vc_do_mmio(ghcb, ctxt, bytes, false);
+ break;
+
+ /* MMIO Read */
+ case 0x8a:
+ bytes = 1;
+ fallthrough;
+ case 0x8b:
+ if (!bytes)
+ bytes = insn->opnd_bytes;
+
+ ret = vc_do_mmio(ghcb, ctxt, bytes, true);
+ if (ret)
+ break;
+
+ reg_data = vc_insn_get_reg(ctxt);
+ if (!reg_data)
+ return ES_DECODE_FAILED;
+
+ /* Zero-extend for 32-bit operation */
+ if (bytes == 4)
+ *reg_data = 0;
+
+ memcpy(reg_data, ghcb->shared_buffer, bytes);
+ break;
+
+ /* MOVS instruction */
+ case 0xa4:
+ bytes = 1;
+ fallthrough;
+ case 0xa5:
+ if (!bytes)
+ bytes = insn->opnd_bytes;
+
+ ret = vc_handle_mmio_movs(ctxt, bytes);
+ break;
+ /* Two-Byte Opcodes */
+ case 0x0f:
+ ret = vc_handle_mmio_twobyte_ops(ghcb, ctxt);
+ break;
+ default:
+ ret = ES_UNSUPPORTED;
+ }
+
+ return ret;
+}
+
+static enum es_result vc_handle_dr7_write(struct ghcb *ghcb,
+ struct es_em_ctxt *ctxt)
+{
+ struct sev_es_runtime_data *data = this_cpu_read(runtime_data);
+ long val, *reg = vc_insn_get_rm(ctxt);
+ enum es_result ret;
+
+ if (!reg)
+ return ES_DECODE_FAILED;
+
+ val = *reg;
+
+ /* Upper 32 bits must be written as zeroes */
+ if (val >> 32) {
+ ctxt->fi.vector = X86_TRAP_GP;
+ ctxt->fi.error_code = 0;
+ return ES_EXCEPTION;
+ }
+
+ /* Clear out other reserved bits and set bit 10 */
+ val = (val & 0xffff23ffL) | BIT(10);
+
+ /* Early non-zero writes to DR7 are not supported */
+ if (!data && (val & ~DR7_RESET_VALUE))
+ return ES_UNSUPPORTED;
+
+ /* Using a value of 0 for ExitInfo1 means RAX holds the value */
+ ghcb_set_rax(ghcb, val);
+ ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_WRITE_DR7, 0, 0);
+ if (ret != ES_OK)
+ return ret;
+
+ if (data)
+ data->dr7 = val;
+
+ return ES_OK;
+}
+
+static enum es_result vc_handle_dr7_read(struct ghcb *ghcb,
+ struct es_em_ctxt *ctxt)
+{
+ struct sev_es_runtime_data *data = this_cpu_read(runtime_data);
+ long *reg = vc_insn_get_rm(ctxt);
+
+ if (!reg)
+ return ES_DECODE_FAILED;
+
+ if (data)
+ *reg = data->dr7;
+ else
+ *reg = DR7_RESET_VALUE;
+
+ return ES_OK;
+}
+
+static enum es_result vc_handle_wbinvd(struct ghcb *ghcb,
+ struct es_em_ctxt *ctxt)
+{
+ return sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_WBINVD, 0, 0);
+}
+
+static enum es_result vc_handle_rdpmc(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
+{
+ enum es_result ret;
+
+ ghcb_set_rcx(ghcb, ctxt->regs->cx);
+
+ ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_RDPMC, 0, 0);
+ if (ret != ES_OK)
+ return ret;
+
+ if (!(ghcb_rax_is_valid(ghcb) && ghcb_rdx_is_valid(ghcb)))
+ return ES_VMM_ERROR;
+
+ ctxt->regs->ax = ghcb->save.rax;
+ ctxt->regs->dx = ghcb->save.rdx;
+
+ return ES_OK;
+}
+
+static enum es_result vc_handle_monitor(struct ghcb *ghcb,
+ struct es_em_ctxt *ctxt)
+{
+ /*
+ * Treat it as a NOP and do not leak a physical address to the
+ * hypervisor.
+ */
+ return ES_OK;
+}
+
+static enum es_result vc_handle_mwait(struct ghcb *ghcb,
+ struct es_em_ctxt *ctxt)
+{
+ /* Treat the same as MONITOR/MONITORX */
+ return ES_OK;
+}
+
+static enum es_result vc_handle_vmmcall(struct ghcb *ghcb,
+ struct es_em_ctxt *ctxt)
+{
+ enum es_result ret;
+
+ ghcb_set_rax(ghcb, ctxt->regs->ax);
+ ghcb_set_cpl(ghcb, user_mode(ctxt->regs) ? 3 : 0);
+
+ if (x86_platform.hyper.sev_es_hcall_prepare)
+ x86_platform.hyper.sev_es_hcall_prepare(ghcb, ctxt->regs);
+
+ ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_VMMCALL, 0, 0);
+ if (ret != ES_OK)
+ return ret;
+
+ if (!ghcb_rax_is_valid(ghcb))
+ return ES_VMM_ERROR;
+
+ ctxt->regs->ax = ghcb->save.rax;
+
+ /*
+ * Call sev_es_hcall_finish() after regs->ax is already set.
+ * This allows the hypervisor handler to overwrite it again if
+ * necessary.
+ */
+ if (x86_platform.hyper.sev_es_hcall_finish &&
+ !x86_platform.hyper.sev_es_hcall_finish(ghcb, ctxt->regs))
+ return ES_VMM_ERROR;
+
+ return ES_OK;
+}
+
+static enum es_result vc_handle_trap_ac(struct ghcb *ghcb,
+ struct es_em_ctxt *ctxt)
+{
+ /*
+ * Calling ecx_alignment_check() directly does not work, because it
+ * enables IRQs and the GHCB is active. Forward the exception and call
+ * it later from vc_forward_exception().
+ */
+ ctxt->fi.vector = X86_TRAP_AC;
+ ctxt->fi.error_code = 0;
+ return ES_EXCEPTION;
+}
+
+static __always_inline void vc_handle_trap_db(struct pt_regs *regs)
+{
+ if (user_mode(regs))
+ noist_exc_debug(regs);
+ else
+ exc_debug(regs);
+}
+
+static enum es_result vc_handle_exitcode(struct es_em_ctxt *ctxt,
+ struct ghcb *ghcb,
+ unsigned long exit_code)
+{
+ enum es_result result;
+
+ switch (exit_code) {
+ case SVM_EXIT_READ_DR7:
+ result = vc_handle_dr7_read(ghcb, ctxt);
+ break;
+ case SVM_EXIT_WRITE_DR7:
+ result = vc_handle_dr7_write(ghcb, ctxt);
+ break;
+ case SVM_EXIT_EXCP_BASE + X86_TRAP_AC:
+ result = vc_handle_trap_ac(ghcb, ctxt);
+ break;
+ case SVM_EXIT_RDTSC:
+ case SVM_EXIT_RDTSCP:
+ result = vc_handle_rdtsc(ghcb, ctxt, exit_code);
+ break;
+ case SVM_EXIT_RDPMC:
+ result = vc_handle_rdpmc(ghcb, ctxt);
+ break;
+ case SVM_EXIT_INVD:
+ pr_err_ratelimited("#VC exception for INVD??? Seriously???\n");
+ result = ES_UNSUPPORTED;
+ break;
+ case SVM_EXIT_CPUID:
+ result = vc_handle_cpuid(ghcb, ctxt);
+ break;
+ case SVM_EXIT_IOIO:
+ result = vc_handle_ioio(ghcb, ctxt);
+ break;
+ case SVM_EXIT_MSR:
+ result = vc_handle_msr(ghcb, ctxt);
+ break;
+ case SVM_EXIT_VMMCALL:
+ result = vc_handle_vmmcall(ghcb, ctxt);
+ break;
+ case SVM_EXIT_WBINVD:
+ result = vc_handle_wbinvd(ghcb, ctxt);
+ break;
+ case SVM_EXIT_MONITOR:
+ result = vc_handle_monitor(ghcb, ctxt);
+ break;
+ case SVM_EXIT_MWAIT:
+ result = vc_handle_mwait(ghcb, ctxt);
+ break;
+ case SVM_EXIT_NPF:
+ result = vc_handle_mmio(ghcb, ctxt);
+ break;
+ default:
+ /*
+ * Unexpected #VC exception
+ */
+ result = ES_UNSUPPORTED;
+ }
+
+ return result;
+}
+
+static __always_inline void vc_forward_exception(struct es_em_ctxt *ctxt)
+{
+ long error_code = ctxt->fi.error_code;
+ int trapnr = ctxt->fi.vector;
+
+ ctxt->regs->orig_ax = ctxt->fi.error_code;
+
+ switch (trapnr) {
+ case X86_TRAP_GP:
+ exc_general_protection(ctxt->regs, error_code);
+ break;
+ case X86_TRAP_UD:
+ exc_invalid_op(ctxt->regs);
+ break;
+ case X86_TRAP_AC:
+ exc_alignment_check(ctxt->regs, error_code);
+ break;
+ default:
+ pr_emerg("Unsupported exception in #VC instruction emulation - can't continue\n");
+ BUG();
+ }
+}
+
+static __always_inline bool on_vc_fallback_stack(struct pt_regs *regs)
+{
+ unsigned long sp = (unsigned long)regs;
+
+ return (sp >= __this_cpu_ist_bottom_va(VC2) && sp < __this_cpu_ist_top_va(VC2));
+}
+
+/*
+ * Main #VC exception handler. It is called when the entry code was able to
+ * switch off the IST to a safe kernel stack.
+ *
+ * With the current implementation it is always possible to switch to a safe
+ * stack because #VC exceptions only happen at known places, like intercepted
+ * instructions or accesses to MMIO areas/IO ports. They can also happen with
+ * code instrumentation when the hypervisor intercepts #DB, but the critical
+ * paths are forbidden to be instrumented, so #DB exceptions currently also
+ * only happen in safe places.
+ */
+DEFINE_IDTENTRY_VC_SAFE_STACK(exc_vmm_communication)
+{
+ struct sev_es_runtime_data *data = this_cpu_read(runtime_data);
+ struct ghcb_state state;
+ struct es_em_ctxt ctxt;
+ enum es_result result;
+ struct ghcb *ghcb;
+
+ lockdep_assert_irqs_disabled();
+
+ /*
+ * Handle #DB before calling into !noinstr code to avoid recursive #DB.
+ */
+ if (error_code == SVM_EXIT_EXCP_BASE + X86_TRAP_DB) {
+ vc_handle_trap_db(regs);
+ return;
+ }
+
+ instrumentation_begin();
+
+ /*
+ * This is invoked through an interrupt gate, so IRQs are disabled. The
+ * code below might walk page-tables for user or kernel addresses, so
+ * keep the IRQs disabled to protect us against concurrent TLB flushes.
+ */
+
+ ghcb = sev_es_get_ghcb(&state);
+ if (!ghcb) {
+ /*
+ * Mark GHCBs inactive so that panic() is able to print the
+ * message.
+ */
+ data->ghcb_active = false;
+ data->backup_ghcb_active = false;
+
+ panic("Unable to handle #VC exception! GHCB and Backup GHCB are already in use");
+ }
+
+ vc_ghcb_invalidate(ghcb);
+ result = vc_init_em_ctxt(&ctxt, regs, error_code);
+
+ if (result == ES_OK)
+ result = vc_handle_exitcode(&ctxt, ghcb, error_code);
+
+ sev_es_put_ghcb(&state);
+
+ /* Done - now check the result */
+ switch (result) {
+ case ES_OK:
+ vc_finish_insn(&ctxt);
+ break;
+ case ES_UNSUPPORTED:
+ pr_err_ratelimited("Unsupported exit-code 0x%02lx in early #VC exception (IP: 0x%lx)\n",
+ error_code, regs->ip);
+ goto fail;
+ case ES_VMM_ERROR:
+ pr_err_ratelimited("Failure in communication with VMM (exit-code 0x%02lx IP: 0x%lx)\n",
+ error_code, regs->ip);
+ goto fail;
+ case ES_DECODE_FAILED:
+ pr_err_ratelimited("Failed to decode instruction (exit-code 0x%02lx IP: 0x%lx)\n",
+ error_code, regs->ip);
+ goto fail;
+ case ES_EXCEPTION:
+ vc_forward_exception(&ctxt);
+ break;
+ case ES_RETRY:
+ /* Nothing to do */
+ break;
+ default:
+ pr_emerg("Unknown result in %s():%d\n", __func__, result);
+ /*
+ * Emulating the instruction which caused the #VC exception
+ * failed - can't continue so print debug information
+ */
+ BUG();
+ }
+
+out:
+ instrumentation_end();
+
+ return;
+
+fail:
+ if (user_mode(regs)) {
+ /*
+ * Do not kill the machine if user-space triggered the
+ * exception. Send SIGBUS instead and let user-space deal with
+ * it.
+ */
+ force_sig_fault(SIGBUS, BUS_OBJERR, (void __user *)0);
+ } else {
+ pr_emerg("PANIC: Unhandled #VC exception in kernel space (result=%d)\n",
+ result);
+
+ /* Show some debug info */
+ show_regs(regs);
+
+ /* Ask hypervisor to sev_es_terminate */
+ sev_es_terminate(GHCB_SEV_ES_REASON_GENERAL_REQUEST);
+
+ /* If that fails and we get here - just panic */
+ panic("Returned from Terminate-Request to Hypervisor\n");
+ }
+
+ goto out;
+}
+
+/* This handler runs on the #VC fall-back stack. It can cause further #VC exceptions */
+DEFINE_IDTENTRY_VC_IST(exc_vmm_communication)
+{
+ instrumentation_begin();
+ panic("Can't handle #VC exception from unsupported context\n");
+ instrumentation_end();
+}
+
+DEFINE_IDTENTRY_VC(exc_vmm_communication)
+{
+ if (likely(!on_vc_fallback_stack(regs)))
+ safe_stack_exc_vmm_communication(regs, error_code);
+ else
+ ist_exc_vmm_communication(regs, error_code);
+}
+
+bool __init handle_vc_boot_ghcb(struct pt_regs *regs)
+{
+ unsigned long exit_code = regs->orig_ax;
+ struct es_em_ctxt ctxt;
+ enum es_result result;
+
+ /* Do initial setup or terminate the guest */
+ if (unlikely(boot_ghcb == NULL && !sev_es_setup_ghcb()))
+ sev_es_terminate(GHCB_SEV_ES_REASON_GENERAL_REQUEST);
+
+ vc_ghcb_invalidate(boot_ghcb);
+
+ result = vc_init_em_ctxt(&ctxt, regs, exit_code);
+ if (result == ES_OK)
+ result = vc_handle_exitcode(&ctxt, boot_ghcb, exit_code);
+
+ /* Done - now check the result */
+ switch (result) {
+ case ES_OK:
+ vc_finish_insn(&ctxt);
+ break;
+ case ES_UNSUPPORTED:
+ early_printk("PANIC: Unsupported exit-code 0x%02lx in early #VC exception (IP: 0x%lx)\n",
+ exit_code, regs->ip);
+ goto fail;
+ case ES_VMM_ERROR:
+ early_printk("PANIC: Failure in communication with VMM (exit-code 0x%02lx IP: 0x%lx)\n",
+ exit_code, regs->ip);
+ goto fail;
+ case ES_DECODE_FAILED:
+ early_printk("PANIC: Failed to decode instruction (exit-code 0x%02lx IP: 0x%lx)\n",
+ exit_code, regs->ip);
+ goto fail;
+ case ES_EXCEPTION:
+ vc_early_forward_exception(&ctxt);
+ break;
+ case ES_RETRY:
+ /* Nothing to do */
+ break;
+ default:
+ BUG();
+ }
+
+ return true;
+
+fail:
+ show_regs(regs);
+
+ while (true)
+ halt();
+}
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
index d5fa494c2304..be0d7d4152ec 100644
--- a/arch/x86/kernel/signal.c
+++ b/arch/x86/kernel/signal.c
@@ -726,7 +726,7 @@ handle_signal(struct ksignal *ksig, struct pt_regs *regs)
regs->ax = -EINTR;
break;
}
- /* fallthrough */
+ fallthrough;
case -ERESTARTNOINTR:
regs->ax = regs->orig_ax;
regs->ip -= 2;
diff --git a/arch/x86/kernel/signal_compat.c b/arch/x86/kernel/signal_compat.c
index 9ccbf0576cd0..a7f3e12cfbdb 100644
--- a/arch/x86/kernel/signal_compat.c
+++ b/arch/x86/kernel/signal_compat.c
@@ -27,7 +27,7 @@ static inline void signal_compat_build_tests(void)
*/
BUILD_BUG_ON(NSIGILL != 11);
BUILD_BUG_ON(NSIGFPE != 15);
- BUILD_BUG_ON(NSIGSEGV != 7);
+ BUILD_BUG_ON(NSIGSEGV != 9);
BUILD_BUG_ON(NSIGBUS != 5);
BUILD_BUG_ON(NSIGTRAP != 5);
BUILD_BUG_ON(NSIGCHLD != 6);
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 27aa04a95702..de776b2e6046 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -227,7 +227,7 @@ static void notrace start_secondary(void *unused)
load_cr3(swapper_pg_dir);
__flush_tlb_all();
#endif
- load_current_idt();
+ cpu_init_exception_handling();
cpu_init();
x86_cpuinit.early_percpu_clock_init();
preempt_disable();
@@ -1594,14 +1594,28 @@ int native_cpu_disable(void)
if (ret)
return ret;
- /*
- * Disable the local APIC. Otherwise IPI broadcasts will reach
- * it. It still responds normally to INIT, NMI, SMI, and SIPI
- * messages.
- */
- apic_soft_disable();
cpu_disable_common();
+ /*
+ * Disable the local APIC. Otherwise IPI broadcasts will reach
+ * it. It still responds normally to INIT, NMI, SMI, and SIPI
+ * messages.
+ *
+ * Disabling the APIC must happen after cpu_disable_common()
+ * which invokes fixup_irqs().
+ *
+ * Disabling the APIC preserves already set bits in IRR, but
+ * an interrupt arriving after disabling the local APIC does not
+ * set the corresponding IRR bit.
+ *
+ * fixup_irqs() scans IRR for set bits so it can raise a not
+ * yet handled interrupt on the new destination CPU via an IPI
+ * but obviously it can't do so for IRR bits which are not set.
+ * IOW, interrupts arriving after disabling the local APIC will
+ * be lost.
+ */
+ apic_soft_disable();
+
return 0;
}
diff --git a/arch/x86/kernel/stacktrace.c b/arch/x86/kernel/stacktrace.c
index 2fd698e28e4d..8627fda8d993 100644
--- a/arch/x86/kernel/stacktrace.c
+++ b/arch/x86/kernel/stacktrace.c
@@ -18,13 +18,13 @@ void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
struct unwind_state state;
unsigned long addr;
- if (regs && !consume_entry(cookie, regs->ip, false))
+ if (regs && !consume_entry(cookie, regs->ip))
return;
for (unwind_start(&state, task, regs, NULL); !unwind_done(&state);
unwind_next_frame(&state)) {
addr = unwind_get_return_address(&state);
- if (!addr || !consume_entry(cookie, addr, false))
+ if (!addr || !consume_entry(cookie, addr))
break;
}
}
@@ -72,7 +72,7 @@ int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry,
if (!addr)
return -EINVAL;
- if (!consume_entry(cookie, addr, false))
+ if (!consume_entry(cookie, addr))
return -EINVAL;
}
@@ -114,7 +114,7 @@ void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie,
{
const void __user *fp = (const void __user *)regs->bp;
- if (!consume_entry(cookie, regs->ip, false))
+ if (!consume_entry(cookie, regs->ip))
return;
while (1) {
@@ -128,7 +128,7 @@ void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie,
break;
if (!frame.ret_addr)
break;
- if (!consume_entry(cookie, frame.ret_addr, false))
+ if (!consume_entry(cookie, frame.ret_addr))
break;
fp = frame.next_fp;
}
diff --git a/arch/x86/kernel/static_call.c b/arch/x86/kernel/static_call.c
new file mode 100644
index 000000000000..ca9a380d9c0b
--- /dev/null
+++ b/arch/x86/kernel/static_call.c
@@ -0,0 +1,98 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/static_call.h>
+#include <linux/memory.h>
+#include <linux/bug.h>
+#include <asm/text-patching.h>
+
+enum insn_type {
+ CALL = 0, /* site call */
+ NOP = 1, /* site cond-call */
+ JMP = 2, /* tramp / site tail-call */
+ RET = 3, /* tramp / site cond-tail-call */
+};
+
+static void __ref __static_call_transform(void *insn, enum insn_type type, void *func)
+{
+ int size = CALL_INSN_SIZE;
+ const void *code;
+
+ switch (type) {
+ case CALL:
+ code = text_gen_insn(CALL_INSN_OPCODE, insn, func);
+ break;
+
+ case NOP:
+ code = ideal_nops[NOP_ATOMIC5];
+ break;
+
+ case JMP:
+ code = text_gen_insn(JMP32_INSN_OPCODE, insn, func);
+ break;
+
+ case RET:
+ code = text_gen_insn(RET_INSN_OPCODE, insn, func);
+ size = RET_INSN_SIZE;
+ break;
+ }
+
+ if (memcmp(insn, code, size) == 0)
+ return;
+
+ if (unlikely(system_state == SYSTEM_BOOTING))
+ return text_poke_early(insn, code, size);
+
+ text_poke_bp(insn, code, size, NULL);
+}
+
+static void __static_call_validate(void *insn, bool tail)
+{
+ u8 opcode = *(u8 *)insn;
+
+ if (tail) {
+ if (opcode == JMP32_INSN_OPCODE ||
+ opcode == RET_INSN_OPCODE)
+ return;
+ } else {
+ if (opcode == CALL_INSN_OPCODE ||
+ !memcmp(insn, ideal_nops[NOP_ATOMIC5], 5))
+ return;
+ }
+
+ /*
+ * If we ever trigger this, our text is corrupt, we'll probably not live long.
+ */
+ WARN_ONCE(1, "unexpected static_call insn opcode 0x%x at %pS\n", opcode, insn);
+}
+
+static inline enum insn_type __sc_insn(bool null, bool tail)
+{
+ /*
+ * Encode the following table without branches:
+ *
+ * tail null insn
+ * -----+-------+------
+ * 0 | 0 | CALL
+ * 0 | 1 | NOP
+ * 1 | 0 | JMP
+ * 1 | 1 | RET
+ */
+ return 2*tail + null;
+}
+
+void arch_static_call_transform(void *site, void *tramp, void *func, bool tail)
+{
+ mutex_lock(&text_mutex);
+
+ if (tramp) {
+ __static_call_validate(tramp, true);
+ __static_call_transform(tramp, __sc_insn(!func, true), func);
+ }
+
+ if (IS_ENABLED(CONFIG_HAVE_STATIC_CALL_INLINE) && site) {
+ __static_call_validate(site, tail);
+ __static_call_transform(site, __sc_insn(!func, tail), func);
+ }
+
+ mutex_unlock(&text_mutex);
+}
+EXPORT_SYMBOL_GPL(arch_static_call_transform);
diff --git a/arch/x86/kernel/sys_ia32.c b/arch/x86/kernel/sys_ia32.c
index 720cde885042..6cf65397d225 100644
--- a/arch/x86/kernel/sys_ia32.c
+++ b/arch/x86/kernel/sys_ia32.c
@@ -251,6 +251,6 @@ COMPAT_SYSCALL_DEFINE5(ia32_clone, unsigned long, clone_flags,
.tls = tls_val,
};
- return _do_fork(&args);
+ return kernel_clone(&args);
}
#endif /* CONFIG_IA32_EMULATION */
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index 1f66d2d1e998..3c70fb34028b 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -43,6 +43,7 @@
#include <asm/stacktrace.h>
#include <asm/processor.h>
#include <asm/debugreg.h>
+#include <asm/realmode.h>
#include <asm/text-patching.h>
#include <asm/ftrace.h>
#include <asm/traps.h>
@@ -195,7 +196,7 @@ static __always_inline void __user *error_get_trap_addr(struct pt_regs *regs)
DEFINE_IDTENTRY(exc_divide_error)
{
- do_error_trap(regs, 0, "divide_error", X86_TRAP_DE, SIGFPE,
+ do_error_trap(regs, 0, "divide error", X86_TRAP_DE, SIGFPE,
FPE_INTDIV, error_get_trap_addr(regs));
}
@@ -673,6 +674,50 @@ asmlinkage __visible noinstr struct pt_regs *sync_regs(struct pt_regs *eregs)
return regs;
}
+#ifdef CONFIG_AMD_MEM_ENCRYPT
+asmlinkage __visible noinstr struct pt_regs *vc_switch_off_ist(struct pt_regs *regs)
+{
+ unsigned long sp, *stack;
+ struct stack_info info;
+ struct pt_regs *regs_ret;
+
+ /*
+ * In the SYSCALL entry path the RSP value comes from user-space - don't
+ * trust it and switch to the current kernel stack
+ */
+ if (regs->ip >= (unsigned long)entry_SYSCALL_64 &&
+ regs->ip < (unsigned long)entry_SYSCALL_64_safe_stack) {
+ sp = this_cpu_read(cpu_current_top_of_stack);
+ goto sync;
+ }
+
+ /*
+ * From here on the RSP value is trusted. Now check whether entry
+ * happened from a safe stack. Not safe are the entry or unknown stacks,
+ * use the fall-back stack instead in this case.
+ */
+ sp = regs->sp;
+ stack = (unsigned long *)sp;
+
+ if (!get_stack_info_noinstr(stack, current, &info) || info.type == STACK_TYPE_ENTRY ||
+ info.type >= STACK_TYPE_EXCEPTION_LAST)
+ sp = __this_cpu_ist_top_va(VC2);
+
+sync:
+ /*
+ * Found a safe stack - switch to it as if the entry didn't happen via
+ * IST stack. The code below only copies pt_regs, the real switch happens
+ * in assembly code.
+ */
+ sp = ALIGN_DOWN(sp, 8) - sizeof(*regs_ret);
+
+ regs_ret = (struct pt_regs *)sp;
+ *regs_ret = *regs;
+
+ return regs_ret;
+}
+#endif
+
struct bad_iret_stack {
void *error_entry_ret;
struct pt_regs regs;
@@ -729,20 +774,9 @@ static bool is_sysenter_singlestep(struct pt_regs *regs)
#endif
}
-static __always_inline void debug_enter(unsigned long *dr6, unsigned long *dr7)
+static __always_inline unsigned long debug_read_clear_dr6(void)
{
- /*
- * Disable breakpoints during exception handling; recursive exceptions
- * are exceedingly 'fun'.
- *
- * Since this function is NOKPROBE, and that also applies to
- * HW_BREAKPOINT_X, we can't hit a breakpoint before this (XXX except a
- * HW_BREAKPOINT_W on our stack)
- *
- * Entry text is excluded for HW_BP_X and cpu_entry_area, which
- * includes the entry stack is excluded for everything.
- */
- *dr7 = local_db_save();
+ unsigned long dr6;
/*
* The Intel SDM says:
@@ -755,15 +789,24 @@ static __always_inline void debug_enter(unsigned long *dr6, unsigned long *dr7)
*
* Keep it simple: clear DR6 immediately.
*/
- get_debugreg(*dr6, 6);
- set_debugreg(0, 6);
- /* Filter out all the reserved bits which are preset to 1 */
- *dr6 &= ~DR6_RESERVED;
-}
+ get_debugreg(dr6, 6);
+ set_debugreg(DR6_RESERVED, 6);
+ dr6 ^= DR6_RESERVED; /* Flip to positive polarity */
-static __always_inline void debug_exit(unsigned long dr7)
-{
- local_db_restore(dr7);
+ /*
+ * Clear the virtual DR6 value, ptrace routines will set bits here for
+ * things we want signals for.
+ */
+ current->thread.virtual_dr6 = 0;
+
+ /*
+ * The SDM says "The processor clears the BTF flag when it
+ * generates a debug exception." Clear TIF_BLOCKSTEP to keep
+ * TIF_BLOCKSTEP in sync with the hardware BTF flag.
+ */
+ clear_thread_flag(TIF_BLOCKSTEP);
+
+ return dr6;
}
/*
@@ -790,79 +833,37 @@ static __always_inline void debug_exit(unsigned long dr7)
*
* May run on IST stack.
*/
-static void handle_debug(struct pt_regs *regs, unsigned long dr6, bool user)
-{
- struct task_struct *tsk = current;
- bool user_icebp;
- int si_code;
-
- /*
- * The SDM says "The processor clears the BTF flag when it
- * generates a debug exception." Clear TIF_BLOCKSTEP to keep
- * TIF_BLOCKSTEP in sync with the hardware BTF flag.
- */
- clear_thread_flag(TIF_BLOCKSTEP);
-
- /*
- * If DR6 is zero, no point in trying to handle it. The kernel is
- * not using INT1.
- */
- if (!user && !dr6)
- return;
+static bool notify_debug(struct pt_regs *regs, unsigned long *dr6)
+{
/*
- * If dr6 has no reason to give us about the origin of this trap,
- * then it's very likely the result of an icebp/int01 trap.
- * User wants a sigtrap for that.
+ * Notifiers will clear bits in @dr6 to indicate the event has been
+ * consumed - hw_breakpoint_handler(), single_stop_cont().
+ *
+ * Notifiers will set bits in @virtual_dr6 to indicate the desire
+ * for signals - ptrace_triggered(), kgdb_hw_overflow_handler().
*/
- user_icebp = user && !dr6;
-
- /* Store the virtualized DR6 value */
- tsk->thread.debugreg6 = dr6;
-
-#ifdef CONFIG_KPROBES
- if (kprobe_debug_handler(regs)) {
- return;
- }
-#endif
-
- if (notify_die(DIE_DEBUG, "debug", regs, (long)&dr6, 0,
- SIGTRAP) == NOTIFY_STOP) {
- return;
- }
-
- /* It's safe to allow irq's after DR6 has been saved */
- cond_local_irq_enable(regs);
-
- if (v8086_mode(regs)) {
- handle_vm86_trap((struct kernel_vm86_regs *) regs, 0,
- X86_TRAP_DB);
- goto out;
- }
-
- if (WARN_ON_ONCE((dr6 & DR_STEP) && !user_mode(regs))) {
- /*
- * Historical junk that used to handle SYSENTER single-stepping.
- * This should be unreachable now. If we survive for a while
- * without anyone hitting this warning, we'll turn this into
- * an oops.
- */
- tsk->thread.debugreg6 &= ~DR_STEP;
- set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
- regs->flags &= ~X86_EFLAGS_TF;
- }
-
- si_code = get_si_code(tsk->thread.debugreg6);
- if (tsk->thread.debugreg6 & (DR_STEP | DR_TRAP_BITS) || user_icebp)
- send_sigtrap(regs, 0, si_code);
+ if (notify_die(DIE_DEBUG, "debug", regs, (long)dr6, 0, SIGTRAP) == NOTIFY_STOP)
+ return true;
-out:
- cond_local_irq_disable(regs);
+ return false;
}
static __always_inline void exc_debug_kernel(struct pt_regs *regs,
unsigned long dr6)
{
+ /*
+ * Disable breakpoints during exception handling; recursive exceptions
+ * are exceedingly 'fun'.
+ *
+ * Since this function is NOKPROBE, and that also applies to
+ * HW_BREAKPOINT_X, we can't hit a breakpoint before this (XXX except a
+ * HW_BREAKPOINT_W on our stack)
+ *
+ * Entry text is excluded for HW_BP_X and cpu_entry_area, which
+ * includes the entry stack is excluded for everything.
+ */
+ unsigned long dr7 = local_db_save();
bool irq_state = idtentry_enter_nmi(regs);
instrumentation_begin();
@@ -879,26 +880,87 @@ static __always_inline void exc_debug_kernel(struct pt_regs *regs,
if ((dr6 & DR_STEP) && is_sysenter_singlestep(regs))
dr6 &= ~DR_STEP;
- handle_debug(regs, dr6, false);
+ if (kprobe_debug_handler(regs))
+ goto out;
+
+ /*
+ * The kernel doesn't use INT1
+ */
+ if (!dr6)
+ goto out;
+
+ if (notify_debug(regs, &dr6))
+ goto out;
+ /*
+ * The kernel doesn't use TF single-step outside of:
+ *
+ * - Kprobes, consumed through kprobe_debug_handler()
+ * - KGDB, consumed through notify_debug()
+ *
+ * So if we get here with DR_STEP set, something is wonky.
+ *
+ * A known way to trigger this is through QEMU's GDB stub,
+ * which leaks #DB into the guest and causes IST recursion.
+ */
+ if (WARN_ON_ONCE(dr6 & DR_STEP))
+ regs->flags &= ~X86_EFLAGS_TF;
+out:
instrumentation_end();
idtentry_exit_nmi(regs, irq_state);
+
+ local_db_restore(dr7);
}
static __always_inline void exc_debug_user(struct pt_regs *regs,
unsigned long dr6)
{
+ bool icebp;
+
/*
* If something gets miswired and we end up here for a kernel mode
* #DB, we will malfunction.
*/
WARN_ON_ONCE(!user_mode(regs));
+ /*
+ * NB: We can't easily clear DR7 here because
+ * idtentry_exit_to_usermode() can invoke ptrace, schedule, access
+ * user memory, etc. This means that a recursive #DB is possible. If
+ * this happens, that #DB will hit exc_debug_kernel() and clear DR7.
+ * Since we're not on the IST stack right now, everything will be
+ * fine.
+ */
+
irqentry_enter_from_user_mode(regs);
instrumentation_begin();
- handle_debug(regs, dr6, true);
+ /*
+ * If dr6 has no reason to give us about the origin of this trap,
+ * then it's very likely the result of an icebp/int01 trap.
+ * User wants a sigtrap for that.
+ */
+ icebp = !dr6;
+
+ if (notify_debug(regs, &dr6))
+ goto out;
+
+ /* It's safe to allow irq's after DR6 has been saved */
+ local_irq_enable();
+
+ if (v8086_mode(regs)) {
+ handle_vm86_trap((struct kernel_vm86_regs *)regs, 0, X86_TRAP_DB);
+ goto out_irq;
+ }
+ /* Add the virtual_dr6 bits for signals. */
+ dr6 |= current->thread.virtual_dr6;
+ if (dr6 & (DR_STEP | DR_TRAP_BITS) || icebp)
+ send_sigtrap(regs, 0, get_si_code(dr6));
+
+out_irq:
+ local_irq_disable();
+out:
instrumentation_end();
irqentry_exit_to_user_mode(regs);
}
@@ -907,36 +969,24 @@ static __always_inline void exc_debug_user(struct pt_regs *regs,
/* IST stack entry */
DEFINE_IDTENTRY_DEBUG(exc_debug)
{
- unsigned long dr6, dr7;
-
- debug_enter(&dr6, &dr7);
- exc_debug_kernel(regs, dr6);
- debug_exit(dr7);
+ exc_debug_kernel(regs, debug_read_clear_dr6());
}
/* User entry, runs on regular task stack */
DEFINE_IDTENTRY_DEBUG_USER(exc_debug)
{
- unsigned long dr6, dr7;
-
- debug_enter(&dr6, &dr7);
- exc_debug_user(regs, dr6);
- debug_exit(dr7);
+ exc_debug_user(regs, debug_read_clear_dr6());
}
#else
/* 32 bit does not have separate entry points. */
DEFINE_IDTENTRY_RAW(exc_debug)
{
- unsigned long dr6, dr7;
-
- debug_enter(&dr6, &dr7);
+ unsigned long dr6 = debug_read_clear_dr6();
if (user_mode(regs))
exc_debug_user(regs, dr6);
else
exc_debug_kernel(regs, dr6);
-
- debug_exit(dr7);
}
#endif
@@ -1077,6 +1127,9 @@ void __init trap_init(void)
/* Init cpu_entry_area before IST entries are set up */
setup_cpu_entry_areas();
+ /* Init GHCB memory pages when running as an SEV-ES guest */
+ sev_es_init_vc_handling();
+
idt_setup_traps();
/*
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 49d925043171..f70dffc2771f 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -54,7 +54,7 @@ struct clocksource *art_related_clocksource;
struct cyc2ns {
struct cyc2ns_data data[2]; /* 0 + 2*16 = 32 */
- seqcount_t seq; /* 32 + 4 = 36 */
+ seqcount_latch_t seq; /* 32 + 4 = 36 */
}; /* fits one cacheline */
@@ -73,14 +73,14 @@ __always_inline void cyc2ns_read_begin(struct cyc2ns_data *data)
preempt_disable_notrace();
do {
- seq = this_cpu_read(cyc2ns.seq.sequence);
+ seq = this_cpu_read(cyc2ns.seq.seqcount.sequence);
idx = seq & 1;
data->cyc2ns_offset = this_cpu_read(cyc2ns.data[idx].cyc2ns_offset);
data->cyc2ns_mul = this_cpu_read(cyc2ns.data[idx].cyc2ns_mul);
data->cyc2ns_shift = this_cpu_read(cyc2ns.data[idx].cyc2ns_shift);
- } while (unlikely(seq != this_cpu_read(cyc2ns.seq.sequence)));
+ } while (unlikely(seq != this_cpu_read(cyc2ns.seq.seqcount.sequence)));
}
__always_inline void cyc2ns_read_end(void)
@@ -186,7 +186,7 @@ static void __init cyc2ns_init_boot_cpu(void)
{
struct cyc2ns *c2n = this_cpu_ptr(&cyc2ns);
- seqcount_init(&c2n->seq);
+ seqcount_latch_init(&c2n->seq);
__set_cyc2ns_scale(tsc_khz, smp_processor_id(), rdtsc());
}
@@ -203,7 +203,7 @@ static void __init cyc2ns_init_secondary_cpus(void)
for_each_possible_cpu(cpu) {
if (cpu != this_cpu) {
- seqcount_init(&c2n->seq);
+ seqcount_latch_init(&c2n->seq);
c2n = per_cpu_ptr(&cyc2ns, cpu);
c2n->data[0] = data[0];
c2n->data[1] = data[1];
diff --git a/arch/x86/kernel/umip.c b/arch/x86/kernel/umip.c
index 8d5cbe1bbb3b..f6225bf22c02 100644
--- a/arch/x86/kernel/umip.c
+++ b/arch/x86/kernel/umip.c
@@ -45,11 +45,12 @@
* value that, lies close to the top of the kernel memory. The limit for the GDT
* and the IDT are set to zero.
*
- * Given that SLDT and STR are not commonly used in programs that run on WineHQ
- * or DOSEMU2, they are not emulated.
- *
- * The instruction smsw is emulated to return the value that the register CR0
+ * The instruction SMSW is emulated to return the value that the register CR0
* has at boot time as set in the head_32.
+ * SLDT and STR are emulated to return the values that the kernel programmatically
+ * assigns:
+ * - SLDT returns (GDT_ENTRY_LDT * 8) if an LDT has been set, 0 if not.
+ * - STR returns (GDT_ENTRY_TSS * 8).
*
* Emulation is provided for both 32-bit and 64-bit processes.
*
@@ -244,16 +245,34 @@ static int emulate_umip_insn(struct insn *insn, int umip_inst,
*data_size += UMIP_GDT_IDT_LIMIT_SIZE;
memcpy(data, &dummy_limit, UMIP_GDT_IDT_LIMIT_SIZE);
- } else if (umip_inst == UMIP_INST_SMSW) {
- unsigned long dummy_value = CR0_STATE;
+ } else if (umip_inst == UMIP_INST_SMSW || umip_inst == UMIP_INST_SLDT ||
+ umip_inst == UMIP_INST_STR) {
+ unsigned long dummy_value;
+
+ if (umip_inst == UMIP_INST_SMSW) {
+ dummy_value = CR0_STATE;
+ } else if (umip_inst == UMIP_INST_STR) {
+ dummy_value = GDT_ENTRY_TSS * 8;
+ } else if (umip_inst == UMIP_INST_SLDT) {
+#ifdef CONFIG_MODIFY_LDT_SYSCALL
+ down_read(&current->mm->context.ldt_usr_sem);
+ if (current->mm->context.ldt)
+ dummy_value = GDT_ENTRY_LDT * 8;
+ else
+ dummy_value = 0;
+ up_read(&current->mm->context.ldt_usr_sem);
+#else
+ dummy_value = 0;
+#endif
+ }
/*
- * Even though the CR0 register has 4 bytes, the number
+ * For these 3 instructions, the number
* of bytes to be copied in the result buffer is determined
* by whether the operand is a register or a memory location.
* If operand is a register, return as many bytes as the operand
* size. If operand is memory, return only the two least
- * siginificant bytes of CR0.
+ * siginificant bytes.
*/
if (X86_MODRM_MOD(insn->modrm.value) == 3)
*data_size = insn->opnd_bytes;
@@ -261,7 +280,6 @@ static int emulate_umip_insn(struct insn *insn, int umip_inst,
*data_size = 2;
memcpy(data, &dummy_value, *data_size);
- /* STR and SLDT are not emulated */
} else {
return -EINVAL;
}
@@ -317,63 +335,28 @@ static void force_sig_info_umip_fault(void __user *addr, struct pt_regs *regs)
*/
bool fixup_umip_exception(struct pt_regs *regs)
{
- int not_copied, nr_copied, reg_offset, dummy_data_size, umip_inst;
- unsigned long seg_base = 0, *reg_addr;
+ int nr_copied, reg_offset, dummy_data_size, umip_inst;
/* 10 bytes is the maximum size of the result of UMIP instructions */
unsigned char dummy_data[10] = { 0 };
unsigned char buf[MAX_INSN_SIZE];
+ unsigned long *reg_addr;
void __user *uaddr;
struct insn insn;
- int seg_defs;
if (!regs)
return false;
- /*
- * If not in user-space long mode, a custom code segment could be in
- * use. This is true in protected mode (if the process defined a local
- * descriptor table), or virtual-8086 mode. In most of the cases
- * seg_base will be zero as in USER_CS.
- */
- if (!user_64bit_mode(regs))
- seg_base = insn_get_seg_base(regs, INAT_SEG_REG_CS);
-
- if (seg_base == -1L)
- return false;
-
- not_copied = copy_from_user(buf, (void __user *)(seg_base + regs->ip),
- sizeof(buf));
- nr_copied = sizeof(buf) - not_copied;
+ nr_copied = insn_fetch_from_user(regs, buf);
/*
- * The copy_from_user above could have failed if user code is protected
- * by a memory protection key. Give up on emulation in such a case.
- * Should we issue a page fault?
+ * The insn_fetch_from_user above could have failed if user code
+ * is protected by a memory protection key. Give up on emulation
+ * in such a case. Should we issue a page fault?
*/
if (!nr_copied)
return false;
- insn_init(&insn, buf, nr_copied, user_64bit_mode(regs));
-
- /*
- * Override the default operand and address sizes with what is specified
- * in the code segment descriptor. The instruction decoder only sets
- * the address size it to either 4 or 8 address bytes and does nothing
- * for the operand bytes. This OK for most of the cases, but we could
- * have special cases where, for instance, a 16-bit code segment
- * descriptor is used.
- * If there is an address override prefix, the instruction decoder
- * correctly updates these values, even for 16-bit defaults.
- */
- seg_defs = insn_get_code_seg_params(regs);
- if (seg_defs == -EINVAL)
- return false;
-
- insn.addr_bytes = INSN_CODE_SEG_ADDR_SZ(seg_defs);
- insn.opnd_bytes = INSN_CODE_SEG_OPND_SZ(seg_defs);
-
- insn_get_length(&insn);
- if (nr_copied < insn.length)
+ if (!insn_decode(&insn, regs, buf, nr_copied))
return false;
umip_inst = identify_insn(&insn);
@@ -383,10 +366,6 @@ bool fixup_umip_exception(struct pt_regs *regs)
umip_pr_warn(regs, "%s instruction cannot be used by applications.\n",
umip_insns[umip_inst]);
- /* Do not emulate (spoof) SLDT or STR. */
- if (umip_inst == UMIP_INST_STR || umip_inst == UMIP_INST_SLDT)
- return false;
-
umip_pr_warn(regs, "For now, expensive software emulation returns the result.\n");
if (emulate_umip_insn(&insn, umip_inst, dummy_data, &dummy_data_size,
diff --git a/arch/x86/kernel/unwind_orc.c b/arch/x86/kernel/unwind_orc.c
index ec88bbe08a32..6a339ce328e0 100644
--- a/arch/x86/kernel/unwind_orc.c
+++ b/arch/x86/kernel/unwind_orc.c
@@ -1,4 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/objtool.h>
#include <linux/module.h>
#include <linux/sort.h>
#include <asm/ptrace.h>
@@ -127,12 +128,12 @@ static struct orc_entry null_orc_entry = {
.sp_offset = sizeof(long),
.sp_reg = ORC_REG_SP,
.bp_reg = ORC_REG_UNDEFINED,
- .type = ORC_TYPE_CALL
+ .type = UNWIND_HINT_TYPE_CALL
};
/* Fake frame pointer entry -- used as a fallback for generated code */
static struct orc_entry orc_fp_entry = {
- .type = ORC_TYPE_CALL,
+ .type = UNWIND_HINT_TYPE_CALL,
.sp_reg = ORC_REG_BP,
.sp_offset = 16,
.bp_reg = ORC_REG_PREV_SP,
@@ -531,7 +532,7 @@ bool unwind_next_frame(struct unwind_state *state)
/* Find IP, SP and possibly regs: */
switch (orc->type) {
- case ORC_TYPE_CALL:
+ case UNWIND_HINT_TYPE_CALL:
ip_p = sp - sizeof(long);
if (!deref_stack_reg(state, ip_p, &state->ip))
@@ -546,7 +547,7 @@ bool unwind_next_frame(struct unwind_state *state)
state->signal = false;
break;
- case ORC_TYPE_REGS:
+ case UNWIND_HINT_TYPE_REGS:
if (!deref_stack_regs(state, sp, &state->ip, &state->sp)) {
orc_warn_current("can't access registers at %pB\n",
(void *)orig_ip);
@@ -559,7 +560,7 @@ bool unwind_next_frame(struct unwind_state *state)
state->signal = true;
break;
- case ORC_TYPE_REGS_IRET:
+ case UNWIND_HINT_TYPE_REGS_PARTIAL:
if (!deref_stack_iret_regs(state, sp, &state->ip, &state->sp)) {
orc_warn_current("can't access iret registers at %pB\n",
(void *)orig_ip);
diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
index 15e5aad8ac2c..3fdaa042823d 100644
--- a/arch/x86/kernel/uprobes.c
+++ b/arch/x86/kernel/uprobes.c
@@ -735,7 +735,7 @@ static int branch_setup_xol_ops(struct arch_uprobe *auprobe, struct insn *insn)
* OPCODE1() of the "short" jmp which checks the same condition.
*/
opc1 = OPCODE2(insn) - 0x10;
- /* fall through */
+ fallthrough;
default:
if (!is_cond_jmp_opcode(opc1))
return -ENOSYS;
@@ -892,7 +892,7 @@ int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm,
fix_ip_or_call = 0;
break;
}
- /* fall through */
+ fallthrough;
default:
riprel_analyze(auprobe, &insn);
}
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
index 9a03e5b23135..bf9e0adb5b7e 100644
--- a/arch/x86/kernel/vmlinux.lds.S
+++ b/arch/x86/kernel/vmlinux.lds.S
@@ -136,6 +136,7 @@ SECTIONS
ENTRY_TEXT
ALIGN_ENTRY_TEXT_END
SOFTIRQENTRY_TEXT
+ STATIC_CALL_TEXT
*(.fixup)
*(.gnu.warning)
@@ -411,10 +412,47 @@ SECTIONS
STABS_DEBUG
DWARF_DEBUG
+ ELF_DETAILS
DISCARDS
-}
+ /*
+ * Make sure that the .got.plt is either completely empty or it
+ * contains only the lazy dispatch entries.
+ */
+ .got.plt (INFO) : { *(.got.plt) }
+ ASSERT(SIZEOF(.got.plt) == 0 ||
+#ifdef CONFIG_X86_64
+ SIZEOF(.got.plt) == 0x18,
+#else
+ SIZEOF(.got.plt) == 0xc,
+#endif
+ "Unexpected GOT/PLT entries detected!")
+
+ /*
+ * Sections that should stay zero sized, which is safer to
+ * explicitly check instead of blindly discarding.
+ */
+ .got : {
+ *(.got) *(.igot.*)
+ }
+ ASSERT(SIZEOF(.got) == 0, "Unexpected GOT entries detected!")
+
+ .plt : {
+ *(.plt) *(.plt.*) *(.iplt)
+ }
+ ASSERT(SIZEOF(.plt) == 0, "Unexpected run-time procedure linkages detected!")
+
+ .rel.dyn : {
+ *(.rel.*) *(.rel_*)
+ }
+ ASSERT(SIZEOF(.rel.dyn) == 0, "Unexpected run-time relocations (.rel) detected!")
+
+ .rela.dyn : {
+ *(.rela.*) *(.rela_*)
+ }
+ ASSERT(SIZEOF(.rela.dyn) == 0, "Unexpected run-time relocations (.rela) detected!")
+}
#ifdef CONFIG_X86_32
/*
diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c
index 123f1c1f1788..a3038d8deb6a 100644
--- a/arch/x86/kernel/x86_init.c
+++ b/arch/x86/kernel/x86_init.c
@@ -24,6 +24,7 @@
#include <asm/tsc.h>
#include <asm/iommu.h>
#include <asm/mach_traps.h>
+#include <asm/irqdomain.h>
void x86_init_noop(void) { }
void __init x86_init_uint_noop(unsigned int unused) { }
@@ -67,11 +68,7 @@ struct x86_init_ops x86_init __initdata = {
},
.mpparse = {
- .mpc_record = x86_init_uint_noop,
.setup_ioapic_ids = x86_init_noop,
- .mpc_apic_id = default_mpc_apic_id,
- .smp_read_mpc_oem = default_smp_read_mpc_oem,
- .mpc_oem_bus_info = default_mpc_oem_bus_info,
.find_smp_config = default_find_smp_config,
.get_smp_config = default_get_smp_config,
},
@@ -80,7 +77,8 @@ struct x86_init_ops x86_init __initdata = {
.pre_vector_init = init_ISA_irqs,
.intr_init = native_init_IRQ,
.intr_mode_select = apic_intr_mode_select,
- .intr_mode_init = apic_intr_mode_init
+ .intr_mode_init = apic_intr_mode_init,
+ .create_pci_msi_domain = native_create_pci_msi_domain,
},
.oem = {
@@ -148,28 +146,10 @@ EXPORT_SYMBOL_GPL(x86_platform);
#if defined(CONFIG_PCI_MSI)
struct x86_msi_ops x86_msi __ro_after_init = {
- .setup_msi_irqs = native_setup_msi_irqs,
- .teardown_msi_irq = native_teardown_msi_irq,
- .teardown_msi_irqs = default_teardown_msi_irqs,
.restore_msi_irqs = default_restore_msi_irqs,
};
/* MSI arch specific hooks */
-int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
-{
- return x86_msi.setup_msi_irqs(dev, nvec, type);
-}
-
-void arch_teardown_msi_irqs(struct pci_dev *dev)
-{
- x86_msi.teardown_msi_irqs(dev);
-}
-
-void arch_teardown_msi_irq(unsigned int irq)
-{
- x86_msi.teardown_msi_irq(irq);
-}
-
void arch_restore_msi_irqs(struct pci_dev *dev)
{
x86_msi.restore_msi_irqs(dev);
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index 3fd6eec202d7..7456f9ad424b 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -371,7 +371,7 @@ void kvm_set_cpu_caps(void)
F(AVX512_4VNNIW) | F(AVX512_4FMAPS) | F(SPEC_CTRL) |
F(SPEC_CTRL_SSBD) | F(ARCH_CAPABILITIES) | F(INTEL_STIBP) |
F(MD_CLEAR) | F(AVX512_VP2INTERSECT) | F(FSRM) |
- F(SERIALIZE)
+ F(SERIALIZE) | F(TSXLDTRK)
);
/* TSC_ADJUST and ARCH_CAPABILITIES are emulated in software. */
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index d0e2825ae617..2f6510de6b0c 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -2505,9 +2505,14 @@ static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt,
*reg_write(ctxt, i) = GET_SMSTATE(u32, smstate, 0x7fd0 + i * 4);
val = GET_SMSTATE(u32, smstate, 0x7fcc);
- ctxt->ops->set_dr(ctxt, 6, (val & DR6_VOLATILE) | DR6_FIXED_1);
+
+ if (ctxt->ops->set_dr(ctxt, 6, (val & DR6_VOLATILE) | DR6_FIXED_1))
+ return X86EMUL_UNHANDLEABLE;
+
val = GET_SMSTATE(u32, smstate, 0x7fc8);
- ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1);
+
+ if (ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1))
+ return X86EMUL_UNHANDLEABLE;
selector = GET_SMSTATE(u32, smstate, 0x7fc4);
set_desc_base(&desc, GET_SMSTATE(u32, smstate, 0x7f64));
@@ -2560,16 +2565,23 @@ static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt,
ctxt->eflags = GET_SMSTATE(u32, smstate, 0x7f70) | X86_EFLAGS_FIXED;
val = GET_SMSTATE(u32, smstate, 0x7f68);
- ctxt->ops->set_dr(ctxt, 6, (val & DR6_VOLATILE) | DR6_FIXED_1);
+
+ if (ctxt->ops->set_dr(ctxt, 6, (val & DR6_VOLATILE) | DR6_FIXED_1))
+ return X86EMUL_UNHANDLEABLE;
+
val = GET_SMSTATE(u32, smstate, 0x7f60);
- ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1);
+
+ if (ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1))
+ return X86EMUL_UNHANDLEABLE;
cr0 = GET_SMSTATE(u64, smstate, 0x7f58);
cr3 = GET_SMSTATE(u64, smstate, 0x7f50);
cr4 = GET_SMSTATE(u64, smstate, 0x7f48);
ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smstate, 0x7f00));
val = GET_SMSTATE(u64, smstate, 0x7ed0);
- ctxt->ops->set_msr(ctxt, MSR_EFER, val & ~EFER_LMA);
+
+ if (ctxt->ops->set_msr(ctxt, MSR_EFER, val & ~EFER_LMA))
+ return X86EMUL_UNHANDLEABLE;
selector = GET_SMSTATE(u32, smstate, 0x7e90);
rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, 0x7e92) << 8);
@@ -3016,7 +3028,7 @@ static void string_registers_quirk(struct x86_emulate_ctxt *ctxt)
case 0xa4: /* movsb */
case 0xa5: /* movsd/w */
*reg_rmw(ctxt, VCPU_REGS_RSI) &= (u32)-1;
- /* fall through */
+ fallthrough;
case 0xaa: /* stosb */
case 0xab: /* stosd/w */
*reg_rmw(ctxt, VCPU_REGS_RDI) &= (u32)-1;
diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
index 814d3aee5cef..8c1e8334eff0 100644
--- a/arch/x86/kvm/hyperv.c
+++ b/arch/x86/kvm/hyperv.c
@@ -1779,7 +1779,7 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
ret = kvm_hvcall_signal_event(vcpu, fast, ingpa);
if (ret != HV_STATUS_INVALID_PORT_ID)
break;
- /* fall through - maybe userspace knows this conn_id. */
+ fallthrough; /* maybe userspace knows this conn_id */
case HVCALL_POST_MESSAGE:
/* don't bother userspace if it has no way to handle it */
if (unlikely(rep || !vcpu_to_synic(vcpu)->active)) {
@@ -2000,20 +2000,20 @@ int kvm_vcpu_ioctl_get_hv_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid,
break;
case HYPERV_CPUID_FEATURES:
- ent->eax |= HV_X64_MSR_VP_RUNTIME_AVAILABLE;
+ ent->eax |= HV_MSR_VP_RUNTIME_AVAILABLE;
ent->eax |= HV_MSR_TIME_REF_COUNT_AVAILABLE;
- ent->eax |= HV_X64_MSR_SYNIC_AVAILABLE;
+ ent->eax |= HV_MSR_SYNIC_AVAILABLE;
ent->eax |= HV_MSR_SYNTIMER_AVAILABLE;
- ent->eax |= HV_X64_MSR_APIC_ACCESS_AVAILABLE;
- ent->eax |= HV_X64_MSR_HYPERCALL_AVAILABLE;
- ent->eax |= HV_X64_MSR_VP_INDEX_AVAILABLE;
- ent->eax |= HV_X64_MSR_RESET_AVAILABLE;
+ ent->eax |= HV_MSR_APIC_ACCESS_AVAILABLE;
+ ent->eax |= HV_MSR_HYPERCALL_AVAILABLE;
+ ent->eax |= HV_MSR_VP_INDEX_AVAILABLE;
+ ent->eax |= HV_MSR_RESET_AVAILABLE;
ent->eax |= HV_MSR_REFERENCE_TSC_AVAILABLE;
- ent->eax |= HV_X64_ACCESS_FREQUENCY_MSRS;
- ent->eax |= HV_X64_ACCESS_REENLIGHTENMENT;
+ ent->eax |= HV_ACCESS_FREQUENCY_MSRS;
+ ent->eax |= HV_ACCESS_REENLIGHTENMENT;
- ent->ebx |= HV_X64_POST_MESSAGES;
- ent->ebx |= HV_X64_SIGNAL_EVENTS;
+ ent->ebx |= HV_POST_MESSAGES;
+ ent->ebx |= HV_SIGNAL_EVENTS;
ent->edx |= HV_FEATURE_FREQUENCY_MSRS_AVAILABLE;
ent->edx |= HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE;
diff --git a/arch/x86/kvm/irq_comm.c b/arch/x86/kvm/irq_comm.c
index c47d2acec529..4aa1c2e00e2a 100644
--- a/arch/x86/kvm/irq_comm.c
+++ b/arch/x86/kvm/irq_comm.c
@@ -285,7 +285,7 @@ int kvm_set_routing_entry(struct kvm *kvm,
switch (ue->u.irqchip.irqchip) {
case KVM_IRQCHIP_PIC_SLAVE:
e->irqchip.pin += PIC_NUM_PINS / 2;
- /* fall through */
+ fallthrough;
case KVM_IRQCHIP_PIC_MASTER:
if (ue->u.irqchip.pin >= PIC_NUM_PINS / 2)
return -EINVAL;
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 5ccbee7165a2..35cca2e0c802 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -1053,7 +1053,7 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
switch (delivery_mode) {
case APIC_DM_LOWEST:
vcpu->arch.apic_arb_prio++;
- /* fall through */
+ fallthrough;
case APIC_DM_FIXED:
if (unlikely(trig_mode && !level))
break;
@@ -1341,7 +1341,7 @@ static u32 __apic_read(struct kvm_lapic *apic, unsigned int offset)
break;
case APIC_TASKPRI:
report_tpr_access(apic, false);
- /* fall thru */
+ fallthrough;
default:
val = kvm_lapic_get_reg(apic, offset);
break;
@@ -2027,7 +2027,7 @@ int kvm_lapic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val)
case APIC_LVT0:
apic_manage_nmi_watchdog(apic, val);
- /* fall through */
+ fallthrough;
case APIC_LVTTHMR:
case APIC_LVTPC:
case APIC_LVT1:
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 4e03841f053d..71aa3da2a0b7 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -1916,7 +1916,8 @@ static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
return kvm_handle_hva_range(kvm, hva, hva + 1, data, handler);
}
-int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
+int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end,
+ unsigned flags)
{
return kvm_handle_hva_range(kvm, start, end, 0, kvm_unmap_rmapp);
}
@@ -2468,7 +2469,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
}
if (sp->unsync_children)
- kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
+ kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
__clear_sp_write_flooding_count(sp);
@@ -4421,7 +4422,7 @@ __reset_rsvds_bits_mask(struct kvm_vcpu *vcpu,
rsvd_bits(maxphyaddr, 51);
rsvd_check->rsvd_bits_mask[1][4] =
rsvd_check->rsvd_bits_mask[0][4];
- /* fall through */
+ fallthrough;
case PT64_ROOT_4LEVEL:
rsvd_check->rsvd_bits_mask[0][3] = exb_bit_rsvd |
nonleaf_bit8_rsvd | rsvd_bits(7, 7) |
diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
index fb68467e6049..598a769f1961 100644
--- a/arch/x86/kvm/svm/nested.c
+++ b/arch/x86/kvm/svm/nested.c
@@ -586,7 +586,6 @@ int nested_svm_vmexit(struct vcpu_svm *svm)
svm->vcpu.arch.mp_state = KVM_MP_STATE_RUNNABLE;
/* Give the current vmcb to the guest */
- svm_set_gif(svm, false);
nested_vmcb->save.es = vmcb->save.es;
nested_vmcb->save.cs = vmcb->save.cs;
@@ -632,6 +631,9 @@ int nested_svm_vmexit(struct vcpu_svm *svm)
/* Restore the original control entries */
copy_vmcb_control_area(&vmcb->control, &hsave->control);
+ /* On vmexit the GIF is set to false */
+ svm_set_gif(svm, false);
+
svm->vmcb->control.tsc_offset = svm->vcpu.arch.tsc_offset =
svm->vcpu.arch.l1_tsc_offset;
@@ -1060,10 +1062,14 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu,
struct vmcb *hsave = svm->nested.hsave;
struct vmcb __user *user_vmcb = (struct vmcb __user *)
&user_kvm_nested_state->data.svm[0];
- struct vmcb_control_area ctl;
- struct vmcb_save_area save;
+ struct vmcb_control_area *ctl;
+ struct vmcb_save_area *save;
+ int ret;
u32 cr0;
+ BUILD_BUG_ON(sizeof(struct vmcb_control_area) + sizeof(struct vmcb_save_area) >
+ KVM_STATE_NESTED_SVM_VMCB_SIZE);
+
if (kvm_state->format != KVM_STATE_NESTED_FORMAT_SVM)
return -EINVAL;
@@ -1095,13 +1101,22 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu,
return -EINVAL;
if (kvm_state->size < sizeof(*kvm_state) + KVM_STATE_NESTED_SVM_VMCB_SIZE)
return -EINVAL;
- if (copy_from_user(&ctl, &user_vmcb->control, sizeof(ctl)))
- return -EFAULT;
- if (copy_from_user(&save, &user_vmcb->save, sizeof(save)))
- return -EFAULT;
- if (!nested_vmcb_check_controls(&ctl))
- return -EINVAL;
+ ret = -ENOMEM;
+ ctl = kzalloc(sizeof(*ctl), GFP_KERNEL);
+ save = kzalloc(sizeof(*save), GFP_KERNEL);
+ if (!ctl || !save)
+ goto out_free;
+
+ ret = -EFAULT;
+ if (copy_from_user(ctl, &user_vmcb->control, sizeof(*ctl)))
+ goto out_free;
+ if (copy_from_user(save, &user_vmcb->save, sizeof(*save)))
+ goto out_free;
+
+ ret = -EINVAL;
+ if (!nested_vmcb_check_controls(ctl))
+ goto out_free;
/*
* Processor state contains L2 state. Check that it is
@@ -1109,15 +1124,15 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu,
*/
cr0 = kvm_read_cr0(vcpu);
if (((cr0 & X86_CR0_CD) == 0) && (cr0 & X86_CR0_NW))
- return -EINVAL;
+ goto out_free;
/*
* Validate host state saved from before VMRUN (see
* nested_svm_check_permissions).
* TODO: validate reserved bits for all saved state.
*/
- if (!(save.cr0 & X86_CR0_PG))
- return -EINVAL;
+ if (!(save->cr0 & X86_CR0_PG))
+ goto out_free;
/*
* All checks done, we can enter guest mode. L1 control fields
@@ -1126,15 +1141,24 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu,
* contains saved L1 state.
*/
copy_vmcb_control_area(&hsave->control, &svm->vmcb->control);
- hsave->save = save;
+ hsave->save = *save;
svm->nested.vmcb = kvm_state->hdr.svm.vmcb_pa;
- load_nested_vmcb_control(svm, &ctl);
+ load_nested_vmcb_control(svm, ctl);
nested_prepare_vmcb_control(svm);
+ if (!nested_svm_vmrun_msrpm(svm))
+ return -EINVAL;
+
out_set_gif:
svm_set_gif(svm, !!(kvm_state->flags & KVM_STATE_NESTED_GIF_SET));
- return 0;
+
+ ret = 0;
+out_free:
+ kfree(save);
+ kfree(ctl);
+
+ return ret;
}
struct kvm_x86_nested_ops svm_nested_ops = {
diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
index 402dc4234e39..3c9a45efdd4d 100644
--- a/arch/x86/kvm/svm/sev.c
+++ b/arch/x86/kvm/svm/sev.c
@@ -384,7 +384,8 @@ static void sev_clflush_pages(struct page *pages[], unsigned long npages)
uint8_t *page_virtual;
unsigned long i;
- if (npages == 0 || pages == NULL)
+ if (this_cpu_has(X86_FEATURE_SME_COHERENT) || npages == 0 ||
+ pages == NULL)
return;
for (i = 0; i < npages; i++) {
@@ -1106,6 +1107,7 @@ void sev_vm_destroy(struct kvm *kvm)
list_for_each_safe(pos, q, head) {
__unregister_enc_region_locked(kvm,
list_entry(pos, struct enc_region, list));
+ cond_resched();
}
}
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 03dd7bac8034..9709c98d0d6c 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -19,7 +19,7 @@
#include <linux/trace_events.h>
#include <linux/slab.h>
#include <linux/hashtable.h>
-#include <linux/frame.h>
+#include <linux/objtool.h>
#include <linux/psp-sev.h>
#include <linux/file.h>
#include <linux/pagemap.h>
@@ -2183,6 +2183,12 @@ static int iret_interception(struct vcpu_svm *svm)
return 1;
}
+static int invd_interception(struct vcpu_svm *svm)
+{
+ /* Treat an INVD instruction as a NOP and just skip it. */
+ return kvm_skip_emulated_instruction(&svm->vcpu);
+}
+
static int invlpg_interception(struct vcpu_svm *svm)
{
if (!static_cpu_has(X86_FEATURE_DECODEASSISTS))
@@ -2668,7 +2674,7 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
case MSR_IA32_APICBASE:
if (kvm_vcpu_apicv_active(vcpu))
avic_update_vapic_bar(to_svm(vcpu), data);
- /* Fall through */
+ fallthrough;
default:
return kvm_set_msr_common(vcpu, msr);
}
@@ -2774,7 +2780,7 @@ static int (*const svm_exit_handlers[])(struct vcpu_svm *svm) = {
[SVM_EXIT_RDPMC] = rdpmc_interception,
[SVM_EXIT_CPUID] = cpuid_interception,
[SVM_EXIT_IRET] = iret_interception,
- [SVM_EXIT_INVD] = emulate_on_interception,
+ [SVM_EXIT_INVD] = invd_interception,
[SVM_EXIT_PAUSE] = pause_interception,
[SVM_EXIT_HLT] = halt_interception,
[SVM_EXIT_INVLPG] = invlpg_interception,
@@ -2938,8 +2944,6 @@ static int handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
if (npt_enabled)
vcpu->arch.cr3 = svm->vmcb->save.cr3;
- svm_complete_interrupts(svm);
-
if (is_guest_mode(vcpu)) {
int vmexit;
@@ -3504,7 +3508,6 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)
stgi();
/* Any pending NMI will happen here */
- exit_fastpath = svm_exit_handlers_fastpath(vcpu);
if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
kvm_after_interrupt(&svm->vcpu);
@@ -3518,6 +3521,7 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)
}
svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING;
+ vmcb_mark_all_clean(svm->vmcb);
/* if exit due to PF check for async PF */
if (svm->vmcb->control.exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR)
@@ -3537,7 +3541,8 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)
SVM_EXIT_EXCP_BASE + MC_VECTOR))
svm_handle_mce(svm);
- vmcb_mark_all_clean(svm->vmcb);
+ svm_complete_interrupts(svm);
+ exit_fastpath = svm_exit_handlers_fastpath(vcpu);
return exit_fastpath;
}
@@ -3900,21 +3905,28 @@ static int svm_pre_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
static int svm_pre_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
{
struct vcpu_svm *svm = to_svm(vcpu);
- struct vmcb *nested_vmcb;
struct kvm_host_map map;
- u64 guest;
- u64 vmcb;
int ret = 0;
- guest = GET_SMSTATE(u64, smstate, 0x7ed8);
- vmcb = GET_SMSTATE(u64, smstate, 0x7ee0);
+ if (guest_cpuid_has(vcpu, X86_FEATURE_LM)) {
+ u64 saved_efer = GET_SMSTATE(u64, smstate, 0x7ed0);
+ u64 guest = GET_SMSTATE(u64, smstate, 0x7ed8);
+ u64 vmcb = GET_SMSTATE(u64, smstate, 0x7ee0);
- if (guest) {
- if (kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(vmcb), &map) == -EINVAL)
- return 1;
- nested_vmcb = map.hva;
- ret = enter_svm_guest_mode(svm, vmcb, nested_vmcb);
- kvm_vcpu_unmap(&svm->vcpu, &map, true);
+ if (guest) {
+ if (!guest_cpuid_has(vcpu, X86_FEATURE_SVM))
+ return 1;
+
+ if (!(saved_efer & EFER_SVME))
+ return 1;
+
+ if (kvm_vcpu_map(&svm->vcpu,
+ gpa_to_gfn(vmcb), &map) == -EINVAL)
+ return 1;
+
+ ret = enter_svm_guest_mode(svm, vmcb, map.hva);
+ kvm_vcpu_unmap(&svm->vcpu, &map, true);
+ }
}
return ret;
@@ -4164,6 +4176,8 @@ static struct kvm_x86_init_ops svm_init_ops __initdata = {
static int __init svm_init(void)
{
+ __unused_size_checks();
+
return kvm_init(&svm_init_ops, sizeof(struct vcpu_svm),
__alignof__(struct vcpu_svm), THIS_MODULE);
}
diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index 23b58c28a1c9..19e2265956ba 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
-#include <linux/frame.h>
+#include <linux/objtool.h>
#include <linux/percpu.h>
#include <asm/debugreg.h>
@@ -4404,6 +4404,14 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason,
if (kvm_check_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu))
kvm_vcpu_flush_tlb_current(vcpu);
+ /*
+ * VCPU_EXREG_PDPTR will be clobbered in arch/x86/kvm/vmx/vmx.h between
+ * now and the new vmentry. Ensure that the VMCS02 PDPTR fields are
+ * up-to-date before switching to L1.
+ */
+ if (enable_ept && is_pae_paging(vcpu))
+ vmx_ept_load_pdptrs(vcpu);
+
leave_guest_mode(vcpu);
if (nested_cpu_has_preemption_timer(vmcs12))
@@ -4668,7 +4676,7 @@ void nested_vmx_pmu_entry_exit_ctls_update(struct kvm_vcpu *vcpu)
vmx->nested.msrs.entry_ctls_high &=
~VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL;
vmx->nested.msrs.exit_ctls_high &=
- ~VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL;
+ ~VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL;
}
}
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 46ba2e03a892..f0a9954c49db 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -13,7 +13,6 @@
* Yaniv Kamay <yaniv@qumranet.com>
*/
-#include <linux/frame.h>
#include <linux/highmem.h>
#include <linux/hrtimer.h>
#include <linux/kernel.h>
@@ -22,6 +21,7 @@
#include <linux/moduleparam.h>
#include <linux/mod_devicetable.h>
#include <linux/mm.h>
+#include <linux/objtool.h>
#include <linux/sched.h>
#include <linux/sched/smt.h>
#include <linux/slab.h>
@@ -129,6 +129,9 @@ static bool __read_mostly enable_preemption_timer = 1;
module_param_named(preemption_timer, enable_preemption_timer, bool, S_IRUGO);
#endif
+extern bool __read_mostly allow_smaller_maxphyaddr;
+module_param(allow_smaller_maxphyaddr, bool, S_IRUGO);
+
#define KVM_VM_CR0_ALWAYS_OFF (X86_CR0_NW | X86_CR0_CD)
#define KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST X86_CR0_NE
#define KVM_VM_CR0_ALWAYS_ON \
@@ -791,6 +794,18 @@ void update_exception_bitmap(struct kvm_vcpu *vcpu)
*/
if (is_guest_mode(vcpu))
eb |= get_vmcs12(vcpu)->exception_bitmap;
+ else {
+ /*
+ * If EPT is enabled, #PF is only trapped if MAXPHYADDR is mismatched
+ * between guest and host. In that case we only care about present
+ * faults. For vmcs02, however, PFEC_MASK and PFEC_MATCH are set in
+ * prepare_vmcs02_rare.
+ */
+ bool selective_pf_trap = enable_ept && (eb & (1u << PF_VECTOR));
+ int mask = selective_pf_trap ? PFERR_PRESENT_MASK : 0;
+ vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, mask);
+ vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, mask);
+ }
vmcs_write32(EXCEPTION_BITMAP, eb);
}
@@ -2971,7 +2986,7 @@ static void vmx_flush_tlb_guest(struct kvm_vcpu *vcpu)
vpid_sync_context(to_vmx(vcpu)->vpid);
}
-static void ept_load_pdptrs(struct kvm_vcpu *vcpu)
+void vmx_ept_load_pdptrs(struct kvm_vcpu *vcpu)
{
struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
@@ -3114,7 +3129,7 @@ static void vmx_load_mmu_pgd(struct kvm_vcpu *vcpu, unsigned long pgd,
guest_cr3 = vcpu->arch.cr3;
else /* vmcs01.GUEST_CR3 is already up-to-date. */
update_guest_cr3 = false;
- ept_load_pdptrs(vcpu);
+ vmx_ept_load_pdptrs(vcpu);
} else {
guest_cr3 = pgd;
}
@@ -4352,16 +4367,6 @@ static void init_vmcs(struct vcpu_vmx *vmx)
vmx->pt_desc.guest.output_mask = 0x7F;
vmcs_write64(GUEST_IA32_RTIT_CTL, 0);
}
-
- /*
- * If EPT is enabled, #PF is only trapped if MAXPHYADDR is mismatched
- * between guest and host. In that case we only care about present
- * faults.
- */
- if (enable_ept) {
- vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, PFERR_PRESENT_MASK);
- vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, PFERR_PRESENT_MASK);
- }
}
static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
@@ -4654,7 +4659,7 @@ static bool rmode_exception(struct kvm_vcpu *vcpu, int vec)
vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
return false;
- /* fall through */
+ fallthrough;
case DB_VECTOR:
return !(vcpu->guest_debug &
(KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP));
@@ -4803,6 +4808,7 @@ static int handle_exception_nmi(struct kvm_vcpu *vcpu)
* EPT will cause page fault only if we need to
* detect illegal GPAs.
*/
+ WARN_ON_ONCE(!allow_smaller_maxphyaddr);
kvm_fixup_and_inject_pf_error(vcpu, cr2, error_code);
return 1;
} else
@@ -4827,7 +4833,7 @@ static int handle_exception_nmi(struct kvm_vcpu *vcpu)
}
kvm_run->debug.arch.dr6 = dr6 | DR6_FIXED_1 | DR6_RTM;
kvm_run->debug.arch.dr7 = vmcs_readl(GUEST_DR7);
- /* fall through */
+ fallthrough;
case BP_VECTOR:
/*
* Update instruction length as we may reinject #BP from
@@ -5257,7 +5263,7 @@ static int handle_task_switch(struct kvm_vcpu *vcpu)
error_code =
vmcs_read32(IDT_VECTORING_ERROR_CODE);
}
- /* fall through */
+ fallthrough;
case INTR_TYPE_SOFT_EXCEPTION:
kvm_clear_exception_queue(vcpu);
break;
@@ -5331,7 +5337,7 @@ static int handle_ept_violation(struct kvm_vcpu *vcpu)
* would also use advanced VM-exit information for EPT violations to
* reconstruct the page fault error code.
*/
- if (unlikely(kvm_mmu_is_illegal_gpa(vcpu, gpa)))
+ if (unlikely(allow_smaller_maxphyaddr && kvm_mmu_is_illegal_gpa(vcpu, gpa)))
return kvm_emulate_instruction(vcpu, 0);
return kvm_mmu_page_fault(vcpu, gpa, error_code, NULL, 0);
@@ -5610,7 +5616,7 @@ static int handle_invpcid(struct kvm_vcpu *vcpu)
* keeping track of global entries in shadow page tables.
*/
- /* fall-through */
+ fallthrough;
case INVPCID_TYPE_ALL_INCL_GLOBAL:
kvm_mmu_unload(vcpu);
return kvm_skip_emulated_instruction(vcpu);
@@ -6054,6 +6060,7 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
(exit_reason != EXIT_REASON_EXCEPTION_NMI &&
exit_reason != EXIT_REASON_EPT_VIOLATION &&
exit_reason != EXIT_REASON_PML_FULL &&
+ exit_reason != EXIT_REASON_APIC_ACCESS &&
exit_reason != EXIT_REASON_TASK_SWITCH)) {
vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_DELIVERY_EV;
@@ -6578,7 +6585,7 @@ static void __vmx_complete_interrupts(struct kvm_vcpu *vcpu,
break;
case INTR_TYPE_SOFT_EXCEPTION:
vcpu->arch.event_exit_inst_len = vmcs_read32(instr_len_field);
- /* fall through */
+ fallthrough;
case INTR_TYPE_HARD_EXCEPTION:
if (idt_vectoring_info & VECTORING_INFO_DELIVER_CODE_MASK) {
u32 err = vmcs_read32(error_code_field);
@@ -6588,7 +6595,7 @@ static void __vmx_complete_interrupts(struct kvm_vcpu *vcpu,
break;
case INTR_TYPE_SOFT_INTR:
vcpu->arch.event_exit_inst_len = vmcs_read32(instr_len_field);
- /* fall through */
+ fallthrough;
case INTR_TYPE_EXT_INTR:
kvm_queue_interrupt(vcpu, vector, type == INTR_TYPE_SOFT_INTR);
break;
@@ -8304,11 +8311,12 @@ static int __init vmx_init(void)
vmx_check_vmcs12_offsets();
/*
- * Intel processors don't have problems with
- * GUEST_MAXPHYADDR < HOST_MAXPHYADDR so enable
- * it for VMX by default
+ * Shadow paging doesn't have a (further) performance penalty
+ * from GUEST_MAXPHYADDR < HOST_MAXPHYADDR so enable it
+ * by default
*/
- allow_smaller_maxphyaddr = true;
+ if (!enable_ept)
+ allow_smaller_maxphyaddr = true;
return 0;
}
diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h
index 26175a4759fa..a0e47720f60c 100644
--- a/arch/x86/kvm/vmx/vmx.h
+++ b/arch/x86/kvm/vmx/vmx.h
@@ -356,6 +356,7 @@ void vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp);
int vmx_find_msr_index(struct vmx_msrs *m, u32 msr);
int vmx_handle_memory_failure(struct kvm_vcpu *vcpu, int r,
struct x86_exception *e);
+void vmx_ept_load_pdptrs(struct kvm_vcpu *vcpu);
#define POSTED_INTR_ON 0
#define POSTED_INTR_SN 1
@@ -551,7 +552,10 @@ static inline bool vmx_has_waitpkg(struct vcpu_vmx *vmx)
static inline bool vmx_need_pf_intercept(struct kvm_vcpu *vcpu)
{
- return !enable_ept || cpuid_maxphyaddr(vcpu) < boot_cpu_data.x86_phys_bits;
+ if (!enable_ept)
+ return true;
+
+ return allow_smaller_maxphyaddr && cpuid_maxphyaddr(vcpu) < boot_cpu_data.x86_phys_bits;
}
void dump_vmcs(void);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 599d73206299..ce856e0ece84 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -188,7 +188,7 @@ static struct kvm_shared_msrs __percpu *shared_msrs;
u64 __read_mostly host_efer;
EXPORT_SYMBOL_GPL(host_efer);
-bool __read_mostly allow_smaller_maxphyaddr;
+bool __read_mostly allow_smaller_maxphyaddr = 0;
EXPORT_SYMBOL_GPL(allow_smaller_maxphyaddr);
static u64 __read_mostly host_xss;
@@ -975,7 +975,8 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
{
unsigned long old_cr4 = kvm_read_cr4(vcpu);
unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE |
- X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_PKE;
+ X86_CR4_SMEP;
+ unsigned long mmu_role_bits = pdptr_bits | X86_CR4_SMAP | X86_CR4_PKE;
if (kvm_valid_cr4(vcpu, cr4))
return 1;
@@ -1003,7 +1004,7 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
if (kvm_x86_ops.set_cr4(vcpu, cr4))
return 1;
- if (((cr4 ^ old_cr4) & pdptr_bits) ||
+ if (((cr4 ^ old_cr4) & mmu_role_bits) ||
(!(cr4 & X86_CR4_PCIDE) && (old_cr4 & X86_CR4_PCIDE)))
kvm_mmu_reset_context(vcpu);
@@ -1116,14 +1117,12 @@ static int __kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val)
vcpu->arch.eff_db[dr] = val;
break;
case 4:
- /* fall through */
case 6:
if (!kvm_dr6_valid(val))
return -1; /* #GP */
vcpu->arch.dr6 = (val & DR6_VOLATILE) | kvm_dr6_fixed(vcpu);
break;
case 5:
- /* fall through */
default: /* 7 */
if (!kvm_dr7_valid(val))
return -1; /* #GP */
@@ -1154,12 +1153,10 @@ int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val)
*val = vcpu->arch.db[array_index_nospec(dr, size)];
break;
case 4:
- /* fall through */
case 6:
*val = vcpu->arch.dr6;
break;
case 5:
- /* fall through */
default: /* 7 */
*val = vcpu->arch.dr7;
break;
@@ -2735,7 +2732,7 @@ static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
return 1;
if (!lapic_in_kernel(vcpu))
- return 1;
+ return data ? 1 : 0;
vcpu->arch.apf.msr_en_val = data;
@@ -3051,7 +3048,8 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
case MSR_K7_PERFCTR0 ... MSR_K7_PERFCTR3:
case MSR_P6_PERFCTR0 ... MSR_P6_PERFCTR1:
- pr = true; /* fall through */
+ pr = true;
+ fallthrough;
case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3:
case MSR_P6_EVNTSEL0 ... MSR_P6_EVNTSEL1:
if (kvm_pmu_is_valid_msr(vcpu, msr))
@@ -3224,9 +3222,22 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
case MSR_IA32_POWER_CTL:
msr_info->data = vcpu->arch.msr_ia32_power_ctl;
break;
- case MSR_IA32_TSC:
- msr_info->data = kvm_scale_tsc(vcpu, rdtsc()) + vcpu->arch.tsc_offset;
+ case MSR_IA32_TSC: {
+ /*
+ * Intel SDM states that MSR_IA32_TSC read adds the TSC offset
+ * even when not intercepted. AMD manual doesn't explicitly
+ * state this but appears to behave the same.
+ *
+ * On userspace reads and writes, however, we unconditionally
+ * operate L1's TSC value to ensure backwards-compatible
+ * behavior for migration.
+ */
+ u64 tsc_offset = msr_info->host_initiated ? vcpu->arch.l1_tsc_offset :
+ vcpu->arch.tsc_offset;
+
+ msr_info->data = kvm_scale_tsc(vcpu, rdtsc()) + tsc_offset;
break;
+ }
case MSR_MTRRcap:
case 0x200 ... 0x2ff:
return kvm_mtrr_get_msr(vcpu, msr_info->index, &msr_info->data);
@@ -3581,6 +3592,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_SMALLER_MAXPHYADDR:
r = (int) allow_smaller_maxphyaddr;
break;
+ case KVM_CAP_STEAL_TIME:
+ r = sched_info_on();
+ break;
default:
break;
}
@@ -4359,7 +4373,7 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
case KVM_CAP_HYPERV_SYNIC2:
if (cap->args[0])
return -EINVAL;
- /* fall through */
+ fallthrough;
case KVM_CAP_HYPERV_SYNIC:
if (!irqchip_in_kernel(vcpu->kvm))
@@ -8672,7 +8686,7 @@ static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu)
vcpu->arch.pv.pv_unhalted = false;
vcpu->arch.mp_state =
KVM_MP_STATE_RUNNABLE;
- /* fall through */
+ fallthrough;
case KVM_MP_STATE_RUNNABLE:
vcpu->arch.apf.halted = false;
break;
@@ -10751,9 +10765,11 @@ EXPORT_SYMBOL_GPL(kvm_spec_ctrl_test_value);
void kvm_fixup_and_inject_pf_error(struct kvm_vcpu *vcpu, gva_t gva, u16 error_code)
{
struct x86_exception fault;
+ u32 access = error_code &
+ (PFERR_WRITE_MASK | PFERR_FETCH_MASK | PFERR_USER_MASK);
if (!(error_code & PFERR_PRESENT_MASK) ||
- vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, error_code, &fault) != UNMAPPED_GVA) {
+ vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, &fault) != UNMAPPED_GVA) {
/*
* If vcpu->arch.walk_mmu->gva_to_gpa succeeded, the page
* tables probably do not match the TLB. Just proceed
diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile
index d46fff11f06f..bad4dee4f0e4 100644
--- a/arch/x86/lib/Makefile
+++ b/arch/x86/lib/Makefile
@@ -24,7 +24,7 @@ ifdef CONFIG_FUNCTION_TRACER
CFLAGS_REMOVE_cmdline.o = -pg
endif
-CFLAGS_cmdline.o := -fno-stack-protector
+CFLAGS_cmdline.o := -fno-stack-protector -fno-jump-tables
endif
inat_tables_script = $(srctree)/arch/x86/tools/gen-insn-attr-x86.awk
@@ -44,6 +44,7 @@ obj-$(CONFIG_SMP) += msr-smp.o cache-smp.o
lib-y := delay.o misc.o cmdline.o cpu.o
lib-y += usercopy_$(BITS).o usercopy.o getuser.o putuser.o
lib-y += memcpy_$(BITS).o
+lib-$(CONFIG_ARCH_HAS_COPY_MC) += copy_mc.o copy_mc_64.o
lib-$(CONFIG_INSTRUCTION_DECODER) += insn.o inat.o insn-eval.o
lib-$(CONFIG_RANDOMIZE_BASE) += kaslr.o
lib-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o
diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
index d1d768912368..4304320e51f4 100644
--- a/arch/x86/lib/checksum_32.S
+++ b/arch/x86/lib/checksum_32.S
@@ -253,28 +253,17 @@ EXPORT_SYMBOL(csum_partial)
/*
unsigned int csum_partial_copy_generic (const char *src, char *dst,
- int len, int sum, int *src_err_ptr, int *dst_err_ptr)
+ int len)
*/
/*
* Copy from ds while checksumming, otherwise like csum_partial
- *
- * The macros SRC and DST specify the type of access for the instruction.
- * thus we can call a custom exception handler for all access types.
- *
- * FIXME: could someone double-check whether I haven't mixed up some SRC and
- * DST definitions? It's damn hard to trigger all cases. I hope I got
- * them all but there's no guarantee.
*/
-#define SRC(y...) \
+#define EXC(y...) \
9999: y; \
_ASM_EXTABLE_UA(9999b, 6001f)
-#define DST(y...) \
- 9999: y; \
- _ASM_EXTABLE_UA(9999b, 6002f)
-
#ifndef CONFIG_X86_USE_PPRO_CHECKSUM
#define ARGBASE 16
@@ -285,20 +274,20 @@ SYM_FUNC_START(csum_partial_copy_generic)
pushl %edi
pushl %esi
pushl %ebx
- movl ARGBASE+16(%esp),%eax # sum
movl ARGBASE+12(%esp),%ecx # len
movl ARGBASE+4(%esp),%esi # src
movl ARGBASE+8(%esp),%edi # dst
+ movl $-1, %eax # sum
testl $2, %edi # Check alignment.
jz 2f # Jump if alignment is ok.
subl $2, %ecx # Alignment uses up two bytes.
jae 1f # Jump if we had at least two bytes.
addl $2, %ecx # ecx was < 2. Deal with it.
jmp 4f
-SRC(1: movw (%esi), %bx )
+EXC(1: movw (%esi), %bx )
addl $2, %esi
-DST( movw %bx, (%edi) )
+EXC( movw %bx, (%edi) )
addl $2, %edi
addw %bx, %ax
adcl $0, %eax
@@ -306,34 +295,34 @@ DST( movw %bx, (%edi) )
movl %ecx, FP(%esp)
shrl $5, %ecx
jz 2f
- testl %esi, %esi
-SRC(1: movl (%esi), %ebx )
-SRC( movl 4(%esi), %edx )
+ testl %esi, %esi # what's wrong with clc?
+EXC(1: movl (%esi), %ebx )
+EXC( movl 4(%esi), %edx )
adcl %ebx, %eax
-DST( movl %ebx, (%edi) )
+EXC( movl %ebx, (%edi) )
adcl %edx, %eax
-DST( movl %edx, 4(%edi) )
+EXC( movl %edx, 4(%edi) )
-SRC( movl 8(%esi), %ebx )
-SRC( movl 12(%esi), %edx )
+EXC( movl 8(%esi), %ebx )
+EXC( movl 12(%esi), %edx )
adcl %ebx, %eax
-DST( movl %ebx, 8(%edi) )
+EXC( movl %ebx, 8(%edi) )
adcl %edx, %eax
-DST( movl %edx, 12(%edi) )
+EXC( movl %edx, 12(%edi) )
-SRC( movl 16(%esi), %ebx )
-SRC( movl 20(%esi), %edx )
+EXC( movl 16(%esi), %ebx )
+EXC( movl 20(%esi), %edx )
adcl %ebx, %eax
-DST( movl %ebx, 16(%edi) )
+EXC( movl %ebx, 16(%edi) )
adcl %edx, %eax
-DST( movl %edx, 20(%edi) )
+EXC( movl %edx, 20(%edi) )
-SRC( movl 24(%esi), %ebx )
-SRC( movl 28(%esi), %edx )
+EXC( movl 24(%esi), %ebx )
+EXC( movl 28(%esi), %edx )
adcl %ebx, %eax
-DST( movl %ebx, 24(%edi) )
+EXC( movl %ebx, 24(%edi) )
adcl %edx, %eax
-DST( movl %edx, 28(%edi) )
+EXC( movl %edx, 28(%edi) )
lea 32(%esi), %esi
lea 32(%edi), %edi
@@ -345,9 +334,9 @@ DST( movl %edx, 28(%edi) )
andl $0x1c, %edx
je 4f
shrl $2, %edx # This clears CF
-SRC(3: movl (%esi), %ebx )
+EXC(3: movl (%esi), %ebx )
adcl %ebx, %eax
-DST( movl %ebx, (%edi) )
+EXC( movl %ebx, (%edi) )
lea 4(%esi), %esi
lea 4(%edi), %edi
dec %edx
@@ -357,39 +346,24 @@ DST( movl %ebx, (%edi) )
jz 7f
cmpl $2, %ecx
jb 5f
-SRC( movw (%esi), %cx )
+EXC( movw (%esi), %cx )
leal 2(%esi), %esi
-DST( movw %cx, (%edi) )
+EXC( movw %cx, (%edi) )
leal 2(%edi), %edi
je 6f
shll $16,%ecx
-SRC(5: movb (%esi), %cl )
-DST( movb %cl, (%edi) )
+EXC(5: movb (%esi), %cl )
+EXC( movb %cl, (%edi) )
6: addl %ecx, %eax
adcl $0, %eax
7:
-5000:
# Exception handler:
.section .fixup, "ax"
6001:
- movl ARGBASE+20(%esp), %ebx # src_err_ptr
- movl $-EFAULT, (%ebx)
-
- # zero the complete destination - computing the rest
- # is too much work
- movl ARGBASE+8(%esp), %edi # dst
- movl ARGBASE+12(%esp), %ecx # len
- xorl %eax,%eax
- rep ; stosb
-
- jmp 5000b
-
-6002:
- movl ARGBASE+24(%esp), %ebx # dst_err_ptr
- movl $-EFAULT,(%ebx)
- jmp 5000b
+ xorl %eax, %eax
+ jmp 7b
.previous
@@ -405,14 +379,14 @@ SYM_FUNC_END(csum_partial_copy_generic)
/* Version for PentiumII/PPro */
#define ROUND1(x) \
- SRC(movl x(%esi), %ebx ) ; \
+ EXC(movl x(%esi), %ebx ) ; \
addl %ebx, %eax ; \
- DST(movl %ebx, x(%edi) ) ;
+ EXC(movl %ebx, x(%edi) ) ;
#define ROUND(x) \
- SRC(movl x(%esi), %ebx ) ; \
+ EXC(movl x(%esi), %ebx ) ; \
adcl %ebx, %eax ; \
- DST(movl %ebx, x(%edi) ) ;
+ EXC(movl %ebx, x(%edi) ) ;
#define ARGBASE 12
@@ -423,7 +397,7 @@ SYM_FUNC_START(csum_partial_copy_generic)
movl ARGBASE+4(%esp),%esi #src
movl ARGBASE+8(%esp),%edi #dst
movl ARGBASE+12(%esp),%ecx #len
- movl ARGBASE+16(%esp),%eax #sum
+ movl $-1, %eax #sum
# movl %ecx, %edx
movl %ecx, %ebx
movl %esi, %edx
@@ -439,7 +413,7 @@ SYM_FUNC_START(csum_partial_copy_generic)
JMP_NOSPEC ebx
1: addl $64,%esi
addl $64,%edi
- SRC(movb -32(%edx),%bl) ; SRC(movb (%edx),%bl)
+ EXC(movb -32(%edx),%bl) ; EXC(movb (%edx),%bl)
ROUND1(-64) ROUND(-60) ROUND(-56) ROUND(-52)
ROUND (-48) ROUND(-44) ROUND(-40) ROUND(-36)
ROUND (-32) ROUND(-28) ROUND(-24) ROUND(-20)
@@ -453,29 +427,20 @@ SYM_FUNC_START(csum_partial_copy_generic)
jz 7f
cmpl $2, %edx
jb 5f
-SRC( movw (%esi), %dx )
+EXC( movw (%esi), %dx )
leal 2(%esi), %esi
-DST( movw %dx, (%edi) )
+EXC( movw %dx, (%edi) )
leal 2(%edi), %edi
je 6f
shll $16,%edx
5:
-SRC( movb (%esi), %dl )
-DST( movb %dl, (%edi) )
+EXC( movb (%esi), %dl )
+EXC( movb %dl, (%edi) )
6: addl %edx, %eax
adcl $0, %eax
7:
.section .fixup, "ax"
-6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
- movl $-EFAULT, (%ebx)
- # zero the complete destination (computing the rest is too much work)
- movl ARGBASE+8(%esp),%edi # dst
- movl ARGBASE+12(%esp),%ecx # len
- xorl %eax,%eax
- rep; stosb
- jmp 7b
-6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
- movl $-EFAULT, (%ebx)
+6001: xorl %eax, %eax
jmp 7b
.previous
diff --git a/arch/x86/lib/cmdline.c b/arch/x86/lib/cmdline.c
index 4f1719e22d3c..b6da09339308 100644
--- a/arch/x86/lib/cmdline.c
+++ b/arch/x86/lib/cmdline.c
@@ -58,7 +58,7 @@ __cmdline_find_option_bool(const char *cmdline, int max_cmdline_size,
state = st_wordcmp;
opptr = option;
wstart = pos;
- /* fall through */
+ fallthrough;
case st_wordcmp:
if (!*opptr) {
@@ -89,7 +89,7 @@ __cmdline_find_option_bool(const char *cmdline, int max_cmdline_size,
break;
}
state = st_wordskip;
- /* fall through */
+ fallthrough;
case st_wordskip:
if (!c)
@@ -151,7 +151,7 @@ __cmdline_find_option(const char *cmdline, int max_cmdline_size,
state = st_wordcmp;
opptr = option;
- /* fall through */
+ fallthrough;
case st_wordcmp:
if ((c == '=') && !*opptr) {
@@ -172,7 +172,7 @@ __cmdline_find_option(const char *cmdline, int max_cmdline_size,
break;
}
state = st_wordskip;
- /* fall through */
+ fallthrough;
case st_wordskip:
if (myisspace(c))
diff --git a/arch/x86/lib/copy_mc.c b/arch/x86/lib/copy_mc.c
new file mode 100644
index 000000000000..c13e8c9ee926
--- /dev/null
+++ b/arch/x86/lib/copy_mc.c
@@ -0,0 +1,96 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2016-2020 Intel Corporation. All rights reserved. */
+
+#include <linux/jump_label.h>
+#include <linux/uaccess.h>
+#include <linux/export.h>
+#include <linux/string.h>
+#include <linux/types.h>
+
+#include <asm/mce.h>
+
+#ifdef CONFIG_X86_MCE
+/*
+ * See COPY_MC_TEST for self-test of the copy_mc_fragile()
+ * implementation.
+ */
+static DEFINE_STATIC_KEY_FALSE(copy_mc_fragile_key);
+
+void enable_copy_mc_fragile(void)
+{
+ static_branch_inc(&copy_mc_fragile_key);
+}
+#define copy_mc_fragile_enabled (static_branch_unlikely(&copy_mc_fragile_key))
+
+/*
+ * Similar to copy_user_handle_tail, probe for the write fault point, or
+ * source exception point.
+ */
+__visible notrace unsigned long
+copy_mc_fragile_handle_tail(char *to, char *from, unsigned len)
+{
+ for (; len; --len, to++, from++)
+ if (copy_mc_fragile(to, from, 1))
+ break;
+ return len;
+}
+#else
+/*
+ * No point in doing careful copying, or consulting a static key when
+ * there is no #MC handler in the CONFIG_X86_MCE=n case.
+ */
+void enable_copy_mc_fragile(void)
+{
+}
+#define copy_mc_fragile_enabled (0)
+#endif
+
+unsigned long copy_mc_enhanced_fast_string(void *dst, const void *src, unsigned len);
+
+/**
+ * copy_mc_to_kernel - memory copy that handles source exceptions
+ *
+ * @dst: destination address
+ * @src: source address
+ * @len: number of bytes to copy
+ *
+ * Call into the 'fragile' version on systems that benefit from avoiding
+ * corner case poison consumption scenarios, For example, accessing
+ * poison across 2 cachelines with a single instruction. Almost all
+ * other uses case can use copy_mc_enhanced_fast_string() for a fast
+ * recoverable copy, or fallback to plain memcpy.
+ *
+ * Return 0 for success, or number of bytes not copied if there was an
+ * exception.
+ */
+unsigned long __must_check copy_mc_to_kernel(void *dst, const void *src, unsigned len)
+{
+ if (copy_mc_fragile_enabled)
+ return copy_mc_fragile(dst, src, len);
+ if (static_cpu_has(X86_FEATURE_ERMS))
+ return copy_mc_enhanced_fast_string(dst, src, len);
+ memcpy(dst, src, len);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(copy_mc_to_kernel);
+
+unsigned long __must_check copy_mc_to_user(void *dst, const void *src, unsigned len)
+{
+ unsigned long ret;
+
+ if (copy_mc_fragile_enabled) {
+ __uaccess_begin();
+ ret = copy_mc_fragile(dst, src, len);
+ __uaccess_end();
+ return ret;
+ }
+
+ if (static_cpu_has(X86_FEATURE_ERMS)) {
+ __uaccess_begin();
+ ret = copy_mc_enhanced_fast_string(dst, src, len);
+ __uaccess_end();
+ return ret;
+ }
+
+ return copy_user_generic(dst, src, len);
+}
diff --git a/arch/x86/lib/copy_mc_64.S b/arch/x86/lib/copy_mc_64.S
new file mode 100644
index 000000000000..892d8915f609
--- /dev/null
+++ b/arch/x86/lib/copy_mc_64.S
@@ -0,0 +1,163 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2016-2020 Intel Corporation. All rights reserved. */
+
+#include <linux/linkage.h>
+#include <asm/copy_mc_test.h>
+#include <asm/export.h>
+#include <asm/asm.h>
+
+#ifndef CONFIG_UML
+
+#ifdef CONFIG_X86_MCE
+COPY_MC_TEST_CTL
+
+/*
+ * copy_mc_fragile - copy memory with indication if an exception / fault happened
+ *
+ * The 'fragile' version is opted into by platform quirks and takes
+ * pains to avoid unrecoverable corner cases like 'fast-string'
+ * instruction sequences, and consuming poison across a cacheline
+ * boundary. The non-fragile version is equivalent to memcpy()
+ * regardless of CPU machine-check-recovery capability.
+ */
+SYM_FUNC_START(copy_mc_fragile)
+ cmpl $8, %edx
+ /* Less than 8 bytes? Go to byte copy loop */
+ jb .L_no_whole_words
+
+ /* Check for bad alignment of source */
+ testl $7, %esi
+ /* Already aligned */
+ jz .L_8byte_aligned
+
+ /* Copy one byte at a time until source is 8-byte aligned */
+ movl %esi, %ecx
+ andl $7, %ecx
+ subl $8, %ecx
+ negl %ecx
+ subl %ecx, %edx
+.L_read_leading_bytes:
+ movb (%rsi), %al
+ COPY_MC_TEST_SRC %rsi 1 .E_leading_bytes
+ COPY_MC_TEST_DST %rdi 1 .E_leading_bytes
+.L_write_leading_bytes:
+ movb %al, (%rdi)
+ incq %rsi
+ incq %rdi
+ decl %ecx
+ jnz .L_read_leading_bytes
+
+.L_8byte_aligned:
+ movl %edx, %ecx
+ andl $7, %edx
+ shrl $3, %ecx
+ jz .L_no_whole_words
+
+.L_read_words:
+ movq (%rsi), %r8
+ COPY_MC_TEST_SRC %rsi 8 .E_read_words
+ COPY_MC_TEST_DST %rdi 8 .E_write_words
+.L_write_words:
+ movq %r8, (%rdi)
+ addq $8, %rsi
+ addq $8, %rdi
+ decl %ecx
+ jnz .L_read_words
+
+ /* Any trailing bytes? */
+.L_no_whole_words:
+ andl %edx, %edx
+ jz .L_done_memcpy_trap
+
+ /* Copy trailing bytes */
+ movl %edx, %ecx
+.L_read_trailing_bytes:
+ movb (%rsi), %al
+ COPY_MC_TEST_SRC %rsi 1 .E_trailing_bytes
+ COPY_MC_TEST_DST %rdi 1 .E_trailing_bytes
+.L_write_trailing_bytes:
+ movb %al, (%rdi)
+ incq %rsi
+ incq %rdi
+ decl %ecx
+ jnz .L_read_trailing_bytes
+
+ /* Copy successful. Return zero */
+.L_done_memcpy_trap:
+ xorl %eax, %eax
+.L_done:
+ ret
+SYM_FUNC_END(copy_mc_fragile)
+EXPORT_SYMBOL_GPL(copy_mc_fragile)
+
+ .section .fixup, "ax"
+ /*
+ * Return number of bytes not copied for any failure. Note that
+ * there is no "tail" handling since the source buffer is 8-byte
+ * aligned and poison is cacheline aligned.
+ */
+.E_read_words:
+ shll $3, %ecx
+.E_leading_bytes:
+ addl %edx, %ecx
+.E_trailing_bytes:
+ mov %ecx, %eax
+ jmp .L_done
+
+ /*
+ * For write fault handling, given the destination is unaligned,
+ * we handle faults on multi-byte writes with a byte-by-byte
+ * copy up to the write-protected page.
+ */
+.E_write_words:
+ shll $3, %ecx
+ addl %edx, %ecx
+ movl %ecx, %edx
+ jmp copy_mc_fragile_handle_tail
+
+ .previous
+
+ _ASM_EXTABLE_FAULT(.L_read_leading_bytes, .E_leading_bytes)
+ _ASM_EXTABLE_FAULT(.L_read_words, .E_read_words)
+ _ASM_EXTABLE_FAULT(.L_read_trailing_bytes, .E_trailing_bytes)
+ _ASM_EXTABLE(.L_write_leading_bytes, .E_leading_bytes)
+ _ASM_EXTABLE(.L_write_words, .E_write_words)
+ _ASM_EXTABLE(.L_write_trailing_bytes, .E_trailing_bytes)
+#endif /* CONFIG_X86_MCE */
+
+/*
+ * copy_mc_enhanced_fast_string - memory copy with exception handling
+ *
+ * Fast string copy + fault / exception handling. If the CPU does
+ * support machine check exception recovery, but does not support
+ * recovering from fast-string exceptions then this CPU needs to be
+ * added to the copy_mc_fragile_key set of quirks. Otherwise, absent any
+ * machine check recovery support this version should be no slower than
+ * standard memcpy.
+ */
+SYM_FUNC_START(copy_mc_enhanced_fast_string)
+ movq %rdi, %rax
+ movq %rdx, %rcx
+.L_copy:
+ rep movsb
+ /* Copy successful. Return zero */
+ xorl %eax, %eax
+ ret
+SYM_FUNC_END(copy_mc_enhanced_fast_string)
+
+ .section .fixup, "ax"
+.E_copy:
+ /*
+ * On fault %rcx is updated such that the copy instruction could
+ * optionally be restarted at the fault position, i.e. it
+ * contains 'bytes remaining'. A non-zero return indicates error
+ * to copy_mc_generic() users, or indicate short transfers to
+ * user-copy routines.
+ */
+ movq %rcx, %rax
+ ret
+
+ .previous
+
+ _ASM_EXTABLE_FAULT(.L_copy, .E_copy)
+#endif /* !CONFIG_UML */
diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
index 816f128a6d52..77b9b2a3b5c8 100644
--- a/arch/x86/lib/copy_user_64.S
+++ b/arch/x86/lib/copy_user_64.S
@@ -15,6 +15,7 @@
#include <asm/asm.h>
#include <asm/smap.h>
#include <asm/export.h>
+#include <asm/trapnr.h>
.macro ALIGN_DESTINATION
/* check for bad alignment of destination */
@@ -36,8 +37,8 @@
jmp .Lcopy_user_handle_tail
.previous
- _ASM_EXTABLE_UA(100b, 103b)
- _ASM_EXTABLE_UA(101b, 103b)
+ _ASM_EXTABLE_CPY(100b, 103b)
+ _ASM_EXTABLE_CPY(101b, 103b)
.endm
/*
@@ -116,26 +117,26 @@ SYM_FUNC_START(copy_user_generic_unrolled)
60: jmp .Lcopy_user_handle_tail /* ecx is zerorest also */
.previous
- _ASM_EXTABLE_UA(1b, 30b)
- _ASM_EXTABLE_UA(2b, 30b)
- _ASM_EXTABLE_UA(3b, 30b)
- _ASM_EXTABLE_UA(4b, 30b)
- _ASM_EXTABLE_UA(5b, 30b)
- _ASM_EXTABLE_UA(6b, 30b)
- _ASM_EXTABLE_UA(7b, 30b)
- _ASM_EXTABLE_UA(8b, 30b)
- _ASM_EXTABLE_UA(9b, 30b)
- _ASM_EXTABLE_UA(10b, 30b)
- _ASM_EXTABLE_UA(11b, 30b)
- _ASM_EXTABLE_UA(12b, 30b)
- _ASM_EXTABLE_UA(13b, 30b)
- _ASM_EXTABLE_UA(14b, 30b)
- _ASM_EXTABLE_UA(15b, 30b)
- _ASM_EXTABLE_UA(16b, 30b)
- _ASM_EXTABLE_UA(18b, 40b)
- _ASM_EXTABLE_UA(19b, 40b)
- _ASM_EXTABLE_UA(21b, 50b)
- _ASM_EXTABLE_UA(22b, 50b)
+ _ASM_EXTABLE_CPY(1b, 30b)
+ _ASM_EXTABLE_CPY(2b, 30b)
+ _ASM_EXTABLE_CPY(3b, 30b)
+ _ASM_EXTABLE_CPY(4b, 30b)
+ _ASM_EXTABLE_CPY(5b, 30b)
+ _ASM_EXTABLE_CPY(6b, 30b)
+ _ASM_EXTABLE_CPY(7b, 30b)
+ _ASM_EXTABLE_CPY(8b, 30b)
+ _ASM_EXTABLE_CPY(9b, 30b)
+ _ASM_EXTABLE_CPY(10b, 30b)
+ _ASM_EXTABLE_CPY(11b, 30b)
+ _ASM_EXTABLE_CPY(12b, 30b)
+ _ASM_EXTABLE_CPY(13b, 30b)
+ _ASM_EXTABLE_CPY(14b, 30b)
+ _ASM_EXTABLE_CPY(15b, 30b)
+ _ASM_EXTABLE_CPY(16b, 30b)
+ _ASM_EXTABLE_CPY(18b, 40b)
+ _ASM_EXTABLE_CPY(19b, 40b)
+ _ASM_EXTABLE_CPY(21b, 50b)
+ _ASM_EXTABLE_CPY(22b, 50b)
SYM_FUNC_END(copy_user_generic_unrolled)
EXPORT_SYMBOL(copy_user_generic_unrolled)
@@ -180,8 +181,8 @@ SYM_FUNC_START(copy_user_generic_string)
jmp .Lcopy_user_handle_tail
.previous
- _ASM_EXTABLE_UA(1b, 11b)
- _ASM_EXTABLE_UA(3b, 12b)
+ _ASM_EXTABLE_CPY(1b, 11b)
+ _ASM_EXTABLE_CPY(3b, 12b)
SYM_FUNC_END(copy_user_generic_string)
EXPORT_SYMBOL(copy_user_generic_string)
@@ -213,7 +214,7 @@ SYM_FUNC_START(copy_user_enhanced_fast_string)
jmp .Lcopy_user_handle_tail
.previous
- _ASM_EXTABLE_UA(1b, 12b)
+ _ASM_EXTABLE_CPY(1b, 12b)
SYM_FUNC_END(copy_user_enhanced_fast_string)
EXPORT_SYMBOL(copy_user_enhanced_fast_string)
@@ -221,6 +222,7 @@ EXPORT_SYMBOL(copy_user_enhanced_fast_string)
* Try to copy last bytes and clear the rest if needed.
* Since protection fault in copy_from/to_user is not a normal situation,
* it is not necessary to optimize tail handling.
+ * Don't try to copy the tail if machine check happened
*
* Input:
* rdi destination
@@ -232,12 +234,25 @@ EXPORT_SYMBOL(copy_user_enhanced_fast_string)
*/
SYM_CODE_START_LOCAL(.Lcopy_user_handle_tail)
movl %edx,%ecx
+ cmp $X86_TRAP_MC,%eax /* check if X86_TRAP_MC */
+ je 3f
1: rep movsb
2: mov %ecx,%eax
ASM_CLAC
ret
- _ASM_EXTABLE_UA(1b, 2b)
+ /*
+ * Return zero to pretend that this copy succeeded. This
+ * is counter-intuitive, but needed to prevent the code
+ * in lib/iov_iter.c from retrying and running back into
+ * the poison cache line again. The machine check handler
+ * will ensure that a SIGBUS is sent to the task.
+ */
+3: xorl %eax,%eax
+ ASM_CLAC
+ ret
+
+ _ASM_EXTABLE_CPY(1b, 2b)
SYM_CODE_END(.Lcopy_user_handle_tail)
/*
@@ -366,27 +381,27 @@ SYM_FUNC_START(__copy_user_nocache)
jmp .Lcopy_user_handle_tail
.previous
- _ASM_EXTABLE_UA(1b, .L_fixup_4x8b_copy)
- _ASM_EXTABLE_UA(2b, .L_fixup_4x8b_copy)
- _ASM_EXTABLE_UA(3b, .L_fixup_4x8b_copy)
- _ASM_EXTABLE_UA(4b, .L_fixup_4x8b_copy)
- _ASM_EXTABLE_UA(5b, .L_fixup_4x8b_copy)
- _ASM_EXTABLE_UA(6b, .L_fixup_4x8b_copy)
- _ASM_EXTABLE_UA(7b, .L_fixup_4x8b_copy)
- _ASM_EXTABLE_UA(8b, .L_fixup_4x8b_copy)
- _ASM_EXTABLE_UA(9b, .L_fixup_4x8b_copy)
- _ASM_EXTABLE_UA(10b, .L_fixup_4x8b_copy)
- _ASM_EXTABLE_UA(11b, .L_fixup_4x8b_copy)
- _ASM_EXTABLE_UA(12b, .L_fixup_4x8b_copy)
- _ASM_EXTABLE_UA(13b, .L_fixup_4x8b_copy)
- _ASM_EXTABLE_UA(14b, .L_fixup_4x8b_copy)
- _ASM_EXTABLE_UA(15b, .L_fixup_4x8b_copy)
- _ASM_EXTABLE_UA(16b, .L_fixup_4x8b_copy)
- _ASM_EXTABLE_UA(20b, .L_fixup_8b_copy)
- _ASM_EXTABLE_UA(21b, .L_fixup_8b_copy)
- _ASM_EXTABLE_UA(30b, .L_fixup_4b_copy)
- _ASM_EXTABLE_UA(31b, .L_fixup_4b_copy)
- _ASM_EXTABLE_UA(40b, .L_fixup_1b_copy)
- _ASM_EXTABLE_UA(41b, .L_fixup_1b_copy)
+ _ASM_EXTABLE_CPY(1b, .L_fixup_4x8b_copy)
+ _ASM_EXTABLE_CPY(2b, .L_fixup_4x8b_copy)
+ _ASM_EXTABLE_CPY(3b, .L_fixup_4x8b_copy)
+ _ASM_EXTABLE_CPY(4b, .L_fixup_4x8b_copy)
+ _ASM_EXTABLE_CPY(5b, .L_fixup_4x8b_copy)
+ _ASM_EXTABLE_CPY(6b, .L_fixup_4x8b_copy)
+ _ASM_EXTABLE_CPY(7b, .L_fixup_4x8b_copy)
+ _ASM_EXTABLE_CPY(8b, .L_fixup_4x8b_copy)
+ _ASM_EXTABLE_CPY(9b, .L_fixup_4x8b_copy)
+ _ASM_EXTABLE_CPY(10b, .L_fixup_4x8b_copy)
+ _ASM_EXTABLE_CPY(11b, .L_fixup_4x8b_copy)
+ _ASM_EXTABLE_CPY(12b, .L_fixup_4x8b_copy)
+ _ASM_EXTABLE_CPY(13b, .L_fixup_4x8b_copy)
+ _ASM_EXTABLE_CPY(14b, .L_fixup_4x8b_copy)
+ _ASM_EXTABLE_CPY(15b, .L_fixup_4x8b_copy)
+ _ASM_EXTABLE_CPY(16b, .L_fixup_4x8b_copy)
+ _ASM_EXTABLE_CPY(20b, .L_fixup_8b_copy)
+ _ASM_EXTABLE_CPY(21b, .L_fixup_8b_copy)
+ _ASM_EXTABLE_CPY(30b, .L_fixup_4b_copy)
+ _ASM_EXTABLE_CPY(31b, .L_fixup_4b_copy)
+ _ASM_EXTABLE_CPY(40b, .L_fixup_1b_copy)
+ _ASM_EXTABLE_CPY(41b, .L_fixup_1b_copy)
SYM_FUNC_END(__copy_user_nocache)
EXPORT_SYMBOL(__copy_user_nocache)
diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S
index 3394a8ff7fd0..1fbd8ee9642d 100644
--- a/arch/x86/lib/csum-copy_64.S
+++ b/arch/x86/lib/csum-copy_64.S
@@ -18,9 +18,6 @@
* rdi source
* rsi destination
* edx len (32bit)
- * ecx sum (32bit)
- * r8 src_err_ptr (int)
- * r9 dst_err_ptr (int)
*
* Output
* eax 64bit sum. undefined in case of exception.
@@ -31,44 +28,32 @@
.macro source
10:
- _ASM_EXTABLE_UA(10b, .Lbad_source)
+ _ASM_EXTABLE_UA(10b, .Lfault)
.endm
.macro dest
20:
- _ASM_EXTABLE_UA(20b, .Lbad_dest)
+ _ASM_EXTABLE_UA(20b, .Lfault)
.endm
- /*
- * No _ASM_EXTABLE_UA; this is used for intentional prefetch on a
- * potentially unmapped kernel address.
- */
- .macro ignore L=.Lignore
-30:
- _ASM_EXTABLE(30b, \L)
- .endm
-
-
SYM_FUNC_START(csum_partial_copy_generic)
- cmpl $3*64, %edx
- jle .Lignore
-
-.Lignore:
- subq $7*8, %rsp
- movq %rbx, 2*8(%rsp)
- movq %r12, 3*8(%rsp)
- movq %r14, 4*8(%rsp)
- movq %r13, 5*8(%rsp)
- movq %r15, 6*8(%rsp)
+ subq $5*8, %rsp
+ movq %rbx, 0*8(%rsp)
+ movq %r12, 1*8(%rsp)
+ movq %r14, 2*8(%rsp)
+ movq %r13, 3*8(%rsp)
+ movq %r15, 4*8(%rsp)
- movq %r8, (%rsp)
- movq %r9, 1*8(%rsp)
-
- movl %ecx, %eax
+ movl $-1, %eax
+ xorl %r9d, %r9d
movl %edx, %ecx
+ cmpl $8, %ecx
+ jb .Lshort
- xorl %r9d, %r9d
- movq %rcx, %r12
+ testb $7, %sil
+ jne .Lunaligned
+.Laligned:
+ movl %ecx, %r12d
shrq $6, %r12
jz .Lhandle_tail /* < 64 */
@@ -99,7 +84,12 @@ SYM_FUNC_START(csum_partial_copy_generic)
source
movq 56(%rdi), %r13
- ignore 2f
+30:
+ /*
+ * No _ASM_EXTABLE_UA; this is used for intentional prefetch on a
+ * potentially unmapped kernel address.
+ */
+ _ASM_EXTABLE(30b, 2f)
prefetcht0 5*64(%rdi)
2:
adcq %rbx, %rax
@@ -131,8 +121,6 @@ SYM_FUNC_START(csum_partial_copy_generic)
dest
movq %r13, 56(%rsi)
-3:
-
leaq 64(%rdi), %rdi
leaq 64(%rsi), %rsi
@@ -142,8 +130,8 @@ SYM_FUNC_START(csum_partial_copy_generic)
/* do last up to 56 bytes */
.Lhandle_tail:
- /* ecx: count */
- movl %ecx, %r10d
+ /* ecx: count, rcx.63: the end result needs to be rol8 */
+ movq %rcx, %r10
andl $63, %ecx
shrl $3, %ecx
jz .Lfold
@@ -172,6 +160,7 @@ SYM_FUNC_START(csum_partial_copy_generic)
.Lhandle_7:
movl %r10d, %ecx
andl $7, %ecx
+.L1: /* .Lshort rejoins the common path here */
shrl $1, %ecx
jz .Lhandle_1
movl $2, %edx
@@ -203,26 +192,65 @@ SYM_FUNC_START(csum_partial_copy_generic)
adcl %r9d, %eax /* carry */
.Lende:
- movq 2*8(%rsp), %rbx
- movq 3*8(%rsp), %r12
- movq 4*8(%rsp), %r14
- movq 5*8(%rsp), %r13
- movq 6*8(%rsp), %r15
- addq $7*8, %rsp
+ testq %r10, %r10
+ js .Lwas_odd
+.Lout:
+ movq 0*8(%rsp), %rbx
+ movq 1*8(%rsp), %r12
+ movq 2*8(%rsp), %r14
+ movq 3*8(%rsp), %r13
+ movq 4*8(%rsp), %r15
+ addq $5*8, %rsp
ret
+.Lshort:
+ movl %ecx, %r10d
+ jmp .L1
+.Lunaligned:
+ xorl %ebx, %ebx
+ testb $1, %sil
+ jne .Lodd
+1: testb $2, %sil
+ je 2f
+ source
+ movw (%rdi), %bx
+ dest
+ movw %bx, (%rsi)
+ leaq 2(%rdi), %rdi
+ subq $2, %rcx
+ leaq 2(%rsi), %rsi
+ addq %rbx, %rax
+2: testb $4, %sil
+ je .Laligned
+ source
+ movl (%rdi), %ebx
+ dest
+ movl %ebx, (%rsi)
+ leaq 4(%rdi), %rdi
+ subq $4, %rcx
+ leaq 4(%rsi), %rsi
+ addq %rbx, %rax
+ jmp .Laligned
+
+.Lodd:
+ source
+ movb (%rdi), %bl
+ dest
+ movb %bl, (%rsi)
+ leaq 1(%rdi), %rdi
+ leaq 1(%rsi), %rsi
+ /* decrement, set MSB */
+ leaq -1(%rcx, %rcx), %rcx
+ rorq $1, %rcx
+ shll $8, %ebx
+ addq %rbx, %rax
+ jmp 1b
+
+.Lwas_odd:
+ roll $8, %eax
+ jmp .Lout
- /* Exception handlers. Very simple, zeroing is done in the wrappers */
-.Lbad_source:
- movq (%rsp), %rax
- testq %rax, %rax
- jz .Lende
- movl $-EFAULT, (%rax)
- jmp .Lende
-
-.Lbad_dest:
- movq 8(%rsp), %rax
- testq %rax, %rax
- jz .Lende
- movl $-EFAULT, (%rax)
- jmp .Lende
+ /* Exception: just return 0 */
+.Lfault:
+ xorl %eax, %eax
+ jmp .Lout
SYM_FUNC_END(csum_partial_copy_generic)
diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
index ee63d7576fd2..189344924a2b 100644
--- a/arch/x86/lib/csum-wrappers_64.c
+++ b/arch/x86/lib/csum-wrappers_64.c
@@ -21,52 +21,16 @@
* src and dst are best aligned to 64bits.
*/
__wsum
-csum_and_copy_from_user(const void __user *src, void *dst,
- int len, __wsum isum, int *errp)
+csum_and_copy_from_user(const void __user *src, void *dst, int len)
{
- might_sleep();
- *errp = 0;
+ __wsum sum;
+ might_sleep();
if (!user_access_begin(src, len))
- goto out_err;
-
- /*
- * Why 6, not 7? To handle odd addresses aligned we
- * would need to do considerable complications to fix the
- * checksum which is defined as an 16bit accumulator. The
- * fix alignment code is primarily for performance
- * compatibility with 32bit and that will handle odd
- * addresses slowly too.
- */
- if (unlikely((unsigned long)src & 6)) {
- while (((unsigned long)src & 6) && len >= 2) {
- __u16 val16;
-
- unsafe_get_user(val16, (const __u16 __user *)src, out);
-
- *(__u16 *)dst = val16;
- isum = (__force __wsum)add32_with_carry(
- (__force unsigned)isum, val16);
- src += 2;
- dst += 2;
- len -= 2;
- }
- }
- isum = csum_partial_copy_generic((__force const void *)src,
- dst, len, isum, errp, NULL);
- user_access_end();
- if (unlikely(*errp))
- goto out_err;
-
- return isum;
-
-out:
+ return 0;
+ sum = csum_partial_copy_generic((__force const void *)src, dst, len);
user_access_end();
-out_err:
- *errp = -EFAULT;
- memset(dst, 0, len);
-
- return isum;
+ return sum;
}
EXPORT_SYMBOL(csum_and_copy_from_user);
@@ -82,40 +46,16 @@ EXPORT_SYMBOL(csum_and_copy_from_user);
* src and dst are best aligned to 64bits.
*/
__wsum
-csum_and_copy_to_user(const void *src, void __user *dst,
- int len, __wsum isum, int *errp)
+csum_and_copy_to_user(const void *src, void __user *dst, int len)
{
- __wsum ret;
+ __wsum sum;
might_sleep();
-
- if (!user_access_begin(dst, len)) {
- *errp = -EFAULT;
+ if (!user_access_begin(dst, len))
return 0;
- }
-
- if (unlikely((unsigned long)dst & 6)) {
- while (((unsigned long)dst & 6) && len >= 2) {
- __u16 val16 = *(__u16 *)src;
-
- isum = (__force __wsum)add32_with_carry(
- (__force unsigned)isum, val16);
- unsafe_put_user(val16, (__u16 __user *)dst, out);
- src += 2;
- dst += 2;
- len -= 2;
- }
- }
-
- *errp = 0;
- ret = csum_partial_copy_generic(src, (void __force *)dst,
- len, isum, NULL, errp);
- user_access_end();
- return ret;
-out:
+ sum = csum_partial_copy_generic(src, (void __force *)dst, len);
user_access_end();
- *errp = -EFAULT;
- return isum;
+ return sum;
}
EXPORT_SYMBOL(csum_and_copy_to_user);
@@ -129,9 +69,9 @@ EXPORT_SYMBOL(csum_and_copy_to_user);
* Returns an 32bit unfolded checksum of the buffer.
*/
__wsum
-csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum)
+csum_partial_copy_nocheck(const void *src, void *dst, int len)
{
- return csum_partial_copy_generic(src, dst, len, sum, NULL, NULL);
+ return csum_partial_copy_generic(src, dst, len);
}
EXPORT_SYMBOL(csum_partial_copy_nocheck);
diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
index c8a85b512796..2cd902e06062 100644
--- a/arch/x86/lib/getuser.S
+++ b/arch/x86/lib/getuser.S
@@ -35,6 +35,8 @@
#include <asm/smap.h>
#include <asm/export.h>
+#define ASM_BARRIER_NOSPEC ALTERNATIVE "", "lfence", X86_FEATURE_LFENCE_RDTSC
+
.text
SYM_FUNC_START(__get_user_1)
mov PER_CPU_VAR(current_task), %_ASM_DX
@@ -114,6 +116,52 @@ SYM_FUNC_START(__get_user_8)
SYM_FUNC_END(__get_user_8)
EXPORT_SYMBOL(__get_user_8)
+/* .. and the same for __get_user, just without the range checks */
+SYM_FUNC_START(__get_user_nocheck_1)
+ ASM_STAC
+ ASM_BARRIER_NOSPEC
+6: movzbl (%_ASM_AX),%edx
+ xor %eax,%eax
+ ASM_CLAC
+ ret
+SYM_FUNC_END(__get_user_nocheck_1)
+EXPORT_SYMBOL(__get_user_nocheck_1)
+
+SYM_FUNC_START(__get_user_nocheck_2)
+ ASM_STAC
+ ASM_BARRIER_NOSPEC
+7: movzwl (%_ASM_AX),%edx
+ xor %eax,%eax
+ ASM_CLAC
+ ret
+SYM_FUNC_END(__get_user_nocheck_2)
+EXPORT_SYMBOL(__get_user_nocheck_2)
+
+SYM_FUNC_START(__get_user_nocheck_4)
+ ASM_STAC
+ ASM_BARRIER_NOSPEC
+8: movl (%_ASM_AX),%edx
+ xor %eax,%eax
+ ASM_CLAC
+ ret
+SYM_FUNC_END(__get_user_nocheck_4)
+EXPORT_SYMBOL(__get_user_nocheck_4)
+
+SYM_FUNC_START(__get_user_nocheck_8)
+ ASM_STAC
+ ASM_BARRIER_NOSPEC
+#ifdef CONFIG_X86_64
+9: movq (%_ASM_AX),%rdx
+#else
+9: movl (%_ASM_AX),%edx
+10: movl 4(%_ASM_AX),%ecx
+#endif
+ xor %eax,%eax
+ ASM_CLAC
+ ret
+SYM_FUNC_END(__get_user_nocheck_8)
+EXPORT_SYMBOL(__get_user_nocheck_8)
+
SYM_CODE_START_LOCAL(.Lbad_get_user_clac)
ASM_CLAC
@@ -134,6 +182,7 @@ bad_get_user_8:
SYM_CODE_END(.Lbad_get_user_8_clac)
#endif
+/* get_user */
_ASM_EXTABLE_UA(1b, .Lbad_get_user_clac)
_ASM_EXTABLE_UA(2b, .Lbad_get_user_clac)
_ASM_EXTABLE_UA(3b, .Lbad_get_user_clac)
@@ -143,3 +192,14 @@ SYM_CODE_END(.Lbad_get_user_8_clac)
_ASM_EXTABLE_UA(4b, .Lbad_get_user_8_clac)
_ASM_EXTABLE_UA(5b, .Lbad_get_user_8_clac)
#endif
+
+/* __get_user */
+ _ASM_EXTABLE_UA(6b, .Lbad_get_user_clac)
+ _ASM_EXTABLE_UA(7b, .Lbad_get_user_clac)
+ _ASM_EXTABLE_UA(8b, .Lbad_get_user_clac)
+#ifdef CONFIG_X86_64
+ _ASM_EXTABLE_UA(9b, .Lbad_get_user_clac)
+#else
+ _ASM_EXTABLE_UA(9b, .Lbad_get_user_8_clac)
+ _ASM_EXTABLE_UA(10b, .Lbad_get_user_8_clac)
+#endif
diff --git a/arch/x86/lib/insn-eval.c b/arch/x86/lib/insn-eval.c
index 31600d851fd8..58f7fb95c7f4 100644
--- a/arch/x86/lib/insn-eval.c
+++ b/arch/x86/lib/insn-eval.c
@@ -20,6 +20,7 @@
enum reg_type {
REG_TYPE_RM = 0,
+ REG_TYPE_REG,
REG_TYPE_INDEX,
REG_TYPE_BASE,
};
@@ -53,6 +54,30 @@ static bool is_string_insn(struct insn *insn)
}
/**
+ * insn_has_rep_prefix() - Determine if instruction has a REP prefix
+ * @insn: Instruction containing the prefix to inspect
+ *
+ * Returns:
+ *
+ * true if the instruction has a REP prefix, false if not.
+ */
+bool insn_has_rep_prefix(struct insn *insn)
+{
+ int i;
+
+ insn_get_prefixes(insn);
+
+ for (i = 0; i < insn->prefixes.nbytes; i++) {
+ insn_byte_t p = insn->prefixes.bytes[i];
+
+ if (p == 0xf2 || p == 0xf3)
+ return true;
+ }
+
+ return false;
+}
+
+/**
* get_seg_reg_override_idx() - obtain segment register override index
* @insn: Valid instruction with segment override prefixes
*
@@ -179,7 +204,7 @@ static int resolve_default_seg(struct insn *insn, struct pt_regs *regs, int off)
if (insn->addr_bytes == 2)
return -EINVAL;
- /* fall through */
+ fallthrough;
case -EDOM:
case offsetof(struct pt_regs, bx):
@@ -362,7 +387,6 @@ static short get_segment_selector(struct pt_regs *regs, int seg_reg_idx)
case INAT_SEG_REG_GS:
return vm86regs->gs;
case INAT_SEG_REG_IGNORE:
- /* fall through */
default:
return -EINVAL;
}
@@ -386,7 +410,6 @@ static short get_segment_selector(struct pt_regs *regs, int seg_reg_idx)
*/
return get_user_gs(regs);
case INAT_SEG_REG_IGNORE:
- /* fall through */
default:
return -EINVAL;
}
@@ -441,6 +464,13 @@ static int get_reg_offset(struct insn *insn, struct pt_regs *regs,
regno += 8;
break;
+ case REG_TYPE_REG:
+ regno = X86_MODRM_REG(insn->modrm.value);
+
+ if (X86_REX_R(insn->rex_prefix.value))
+ regno += 8;
+ break;
+
case REG_TYPE_INDEX:
regno = X86_SIB_INDEX(insn->sib.value);
if (X86_REX_X(insn->rex_prefix.value))
@@ -786,7 +816,7 @@ int insn_get_code_seg_params(struct pt_regs *regs)
*/
return INSN_CODE_SEG_PARAMS(4, 8);
case 3: /* Invalid setting. CS.L=1, CS.D=1 */
- /* fall through */
+ fallthrough;
default:
return -EINVAL;
}
@@ -810,6 +840,21 @@ int insn_get_modrm_rm_off(struct insn *insn, struct pt_regs *regs)
}
/**
+ * insn_get_modrm_reg_off() - Obtain register in reg part of the ModRM byte
+ * @insn: Instruction containing the ModRM byte
+ * @regs: Register values as seen when entering kernel mode
+ *
+ * Returns:
+ *
+ * The register indicated by the reg part of the ModRM byte. The
+ * register is obtained as an offset from the base of pt_regs.
+ */
+int insn_get_modrm_reg_off(struct insn *insn, struct pt_regs *regs)
+{
+ return get_reg_offset(insn, regs, REG_TYPE_REG);
+}
+
+/**
* get_seg_base_limit() - obtain base address and limit of a segment
* @insn: Instruction. Must be valid.
* @regs: Register values as seen when entering kernel mode
@@ -1369,3 +1414,86 @@ void __user *insn_get_addr_ref(struct insn *insn, struct pt_regs *regs)
return (void __user *)-1L;
}
}
+
+/**
+ * insn_fetch_from_user() - Copy instruction bytes from user-space memory
+ * @regs: Structure with register values as seen when entering kernel mode
+ * @buf: Array to store the fetched instruction
+ *
+ * Gets the linear address of the instruction and copies the instruction bytes
+ * to the buf.
+ *
+ * Returns:
+ *
+ * Number of instruction bytes copied.
+ *
+ * 0 if nothing was copied.
+ */
+int insn_fetch_from_user(struct pt_regs *regs, unsigned char buf[MAX_INSN_SIZE])
+{
+ unsigned long seg_base = 0;
+ int not_copied;
+
+ /*
+ * If not in user-space long mode, a custom code segment could be in
+ * use. This is true in protected mode (if the process defined a local
+ * descriptor table), or virtual-8086 mode. In most of the cases
+ * seg_base will be zero as in USER_CS.
+ */
+ if (!user_64bit_mode(regs)) {
+ seg_base = insn_get_seg_base(regs, INAT_SEG_REG_CS);
+ if (seg_base == -1L)
+ return 0;
+ }
+
+
+ not_copied = copy_from_user(buf, (void __user *)(seg_base + regs->ip),
+ MAX_INSN_SIZE);
+
+ return MAX_INSN_SIZE - not_copied;
+}
+
+/**
+ * insn_decode() - Decode an instruction
+ * @insn: Structure to store decoded instruction
+ * @regs: Structure with register values as seen when entering kernel mode
+ * @buf: Buffer containing the instruction bytes
+ * @buf_size: Number of instruction bytes available in buf
+ *
+ * Decodes the instruction provided in buf and stores the decoding results in
+ * insn. Also determines the correct address and operand sizes.
+ *
+ * Returns:
+ *
+ * True if instruction was decoded, False otherwise.
+ */
+bool insn_decode(struct insn *insn, struct pt_regs *regs,
+ unsigned char buf[MAX_INSN_SIZE], int buf_size)
+{
+ int seg_defs;
+
+ insn_init(insn, buf, buf_size, user_64bit_mode(regs));
+
+ /*
+ * Override the default operand and address sizes with what is specified
+ * in the code segment descriptor. The instruction decoder only sets
+ * the address size it to either 4 or 8 address bytes and does nothing
+ * for the operand bytes. This OK for most of the cases, but we could
+ * have special cases where, for instance, a 16-bit code segment
+ * descriptor is used.
+ * If there is an address override prefix, the instruction decoder
+ * correctly updates these values, even for 16-bit defaults.
+ */
+ seg_defs = insn_get_code_seg_params(regs);
+ if (seg_defs == -EINVAL)
+ return false;
+
+ insn->addr_bytes = INSN_CODE_SEG_ADDR_SZ(seg_defs);
+ insn->opnd_bytes = INSN_CODE_SEG_OPND_SZ(seg_defs);
+
+ insn_get_length(insn);
+ if (buf_size < insn->length)
+ return false;
+
+ return true;
+}
diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
index bbcc05bcefad..037faac46b0c 100644
--- a/arch/x86/lib/memcpy_64.S
+++ b/arch/x86/lib/memcpy_64.S
@@ -4,7 +4,6 @@
#include <linux/linkage.h>
#include <asm/errno.h>
#include <asm/cpufeatures.h>
-#include <asm/mcsafe_test.h>
#include <asm/alternative-asm.h>
#include <asm/export.h>
@@ -187,117 +186,3 @@ SYM_FUNC_START_LOCAL(memcpy_orig)
SYM_FUNC_END(memcpy_orig)
.popsection
-
-#ifndef CONFIG_UML
-
-MCSAFE_TEST_CTL
-
-/*
- * __memcpy_mcsafe - memory copy with machine check exception handling
- * Note that we only catch machine checks when reading the source addresses.
- * Writes to target are posted and don't generate machine checks.
- */
-SYM_FUNC_START(__memcpy_mcsafe)
- cmpl $8, %edx
- /* Less than 8 bytes? Go to byte copy loop */
- jb .L_no_whole_words
-
- /* Check for bad alignment of source */
- testl $7, %esi
- /* Already aligned */
- jz .L_8byte_aligned
-
- /* Copy one byte at a time until source is 8-byte aligned */
- movl %esi, %ecx
- andl $7, %ecx
- subl $8, %ecx
- negl %ecx
- subl %ecx, %edx
-.L_read_leading_bytes:
- movb (%rsi), %al
- MCSAFE_TEST_SRC %rsi 1 .E_leading_bytes
- MCSAFE_TEST_DST %rdi 1 .E_leading_bytes
-.L_write_leading_bytes:
- movb %al, (%rdi)
- incq %rsi
- incq %rdi
- decl %ecx
- jnz .L_read_leading_bytes
-
-.L_8byte_aligned:
- movl %edx, %ecx
- andl $7, %edx
- shrl $3, %ecx
- jz .L_no_whole_words
-
-.L_read_words:
- movq (%rsi), %r8
- MCSAFE_TEST_SRC %rsi 8 .E_read_words
- MCSAFE_TEST_DST %rdi 8 .E_write_words
-.L_write_words:
- movq %r8, (%rdi)
- addq $8, %rsi
- addq $8, %rdi
- decl %ecx
- jnz .L_read_words
-
- /* Any trailing bytes? */
-.L_no_whole_words:
- andl %edx, %edx
- jz .L_done_memcpy_trap
-
- /* Copy trailing bytes */
- movl %edx, %ecx
-.L_read_trailing_bytes:
- movb (%rsi), %al
- MCSAFE_TEST_SRC %rsi 1 .E_trailing_bytes
- MCSAFE_TEST_DST %rdi 1 .E_trailing_bytes
-.L_write_trailing_bytes:
- movb %al, (%rdi)
- incq %rsi
- incq %rdi
- decl %ecx
- jnz .L_read_trailing_bytes
-
- /* Copy successful. Return zero */
-.L_done_memcpy_trap:
- xorl %eax, %eax
-.L_done:
- ret
-SYM_FUNC_END(__memcpy_mcsafe)
-EXPORT_SYMBOL_GPL(__memcpy_mcsafe)
-
- .section .fixup, "ax"
- /*
- * Return number of bytes not copied for any failure. Note that
- * there is no "tail" handling since the source buffer is 8-byte
- * aligned and poison is cacheline aligned.
- */
-.E_read_words:
- shll $3, %ecx
-.E_leading_bytes:
- addl %edx, %ecx
-.E_trailing_bytes:
- mov %ecx, %eax
- jmp .L_done
-
- /*
- * For write fault handling, given the destination is unaligned,
- * we handle faults on multi-byte writes with a byte-by-byte
- * copy up to the write-protected page.
- */
-.E_write_words:
- shll $3, %ecx
- addl %edx, %ecx
- movl %ecx, %edx
- jmp mcsafe_handle_tail
-
- .previous
-
- _ASM_EXTABLE_FAULT(.L_read_leading_bytes, .E_leading_bytes)
- _ASM_EXTABLE_FAULT(.L_read_words, .E_read_words)
- _ASM_EXTABLE_FAULT(.L_read_trailing_bytes, .E_trailing_bytes)
- _ASM_EXTABLE(.L_write_leading_bytes, .E_leading_bytes)
- _ASM_EXTABLE(.L_write_words, .E_write_words)
- _ASM_EXTABLE(.L_write_trailing_bytes, .E_trailing_bytes)
-#endif
diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
index 7c7c92db8497..b34a17763f28 100644
--- a/arch/x86/lib/putuser.S
+++ b/arch/x86/lib/putuser.S
@@ -25,7 +25,9 @@
* Inputs: %eax[:%edx] contains the data
* %ecx contains the address
*
- * Outputs: %eax is error code (0 or -EFAULT)
+ * Outputs: %ecx is error code (0 or -EFAULT)
+ *
+ * Clobbers: %ebx needed for task pointer
*
* These functions should not modify any other registers,
* as they get called from within inline assembly.
@@ -38,13 +40,15 @@ SYM_FUNC_START(__put_user_1)
ENTER
cmp TASK_addr_limit(%_ASM_BX),%_ASM_CX
jae .Lbad_put_user
+SYM_INNER_LABEL(__put_user_nocheck_1, SYM_L_GLOBAL)
ASM_STAC
1: movb %al,(%_ASM_CX)
- xor %eax,%eax
+ xor %ecx,%ecx
ASM_CLAC
ret
SYM_FUNC_END(__put_user_1)
EXPORT_SYMBOL(__put_user_1)
+EXPORT_SYMBOL(__put_user_nocheck_1)
SYM_FUNC_START(__put_user_2)
ENTER
@@ -52,13 +56,15 @@ SYM_FUNC_START(__put_user_2)
sub $1,%_ASM_BX
cmp %_ASM_BX,%_ASM_CX
jae .Lbad_put_user
+SYM_INNER_LABEL(__put_user_nocheck_2, SYM_L_GLOBAL)
ASM_STAC
2: movw %ax,(%_ASM_CX)
- xor %eax,%eax
+ xor %ecx,%ecx
ASM_CLAC
ret
SYM_FUNC_END(__put_user_2)
EXPORT_SYMBOL(__put_user_2)
+EXPORT_SYMBOL(__put_user_nocheck_2)
SYM_FUNC_START(__put_user_4)
ENTER
@@ -66,13 +72,15 @@ SYM_FUNC_START(__put_user_4)
sub $3,%_ASM_BX
cmp %_ASM_BX,%_ASM_CX
jae .Lbad_put_user
+SYM_INNER_LABEL(__put_user_nocheck_4, SYM_L_GLOBAL)
ASM_STAC
3: movl %eax,(%_ASM_CX)
- xor %eax,%eax
+ xor %ecx,%ecx
ASM_CLAC
ret
SYM_FUNC_END(__put_user_4)
EXPORT_SYMBOL(__put_user_4)
+EXPORT_SYMBOL(__put_user_nocheck_4)
SYM_FUNC_START(__put_user_8)
ENTER
@@ -80,21 +88,23 @@ SYM_FUNC_START(__put_user_8)
sub $7,%_ASM_BX
cmp %_ASM_BX,%_ASM_CX
jae .Lbad_put_user
+SYM_INNER_LABEL(__put_user_nocheck_8, SYM_L_GLOBAL)
ASM_STAC
4: mov %_ASM_AX,(%_ASM_CX)
#ifdef CONFIG_X86_32
5: movl %edx,4(%_ASM_CX)
#endif
- xor %eax,%eax
+ xor %ecx,%ecx
ASM_CLAC
RET
SYM_FUNC_END(__put_user_8)
EXPORT_SYMBOL(__put_user_8)
+EXPORT_SYMBOL(__put_user_nocheck_8)
SYM_CODE_START_LOCAL(.Lbad_put_user_clac)
ASM_CLAC
.Lbad_put_user:
- movl $-EFAULT,%eax
+ movl $-EFAULT,%ecx
RET
SYM_CODE_END(.Lbad_put_user_clac)
diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
index b0dfac3d3df7..508c81e97ab1 100644
--- a/arch/x86/lib/usercopy_64.c
+++ b/arch/x86/lib/usercopy_64.c
@@ -56,27 +56,6 @@ unsigned long clear_user(void __user *to, unsigned long n)
}
EXPORT_SYMBOL(clear_user);
-/*
- * Similar to copy_user_handle_tail, probe for the write fault point,
- * but reuse __memcpy_mcsafe in case a new read error is encountered.
- * clac() is handled in _copy_to_iter_mcsafe().
- */
-__visible notrace unsigned long
-mcsafe_handle_tail(char *to, char *from, unsigned len)
-{
- for (; len; --len, to++, from++) {
- /*
- * Call the assembly routine back directly since
- * memcpy_mcsafe() may silently fallback to memcpy.
- */
- unsigned long rem = __memcpy_mcsafe(to, from, 1);
-
- if (rem)
- break;
- }
- return len;
-}
-
#ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
/**
* clean_cache_range - write back a cache range with CLWB
@@ -120,7 +99,7 @@ long __copy_user_flushcache(void *dst, const void __user *src, unsigned size)
*/
if (size < 8) {
if (!IS_ALIGNED(dest, 4) || size != 4)
- clean_cache_range(dst, 1);
+ clean_cache_range(dst, size);
} else {
if (!IS_ALIGNED(dest, 8)) {
dest = ALIGN(dest, boot_cpu_data.x86_clflush_size);
diff --git a/arch/x86/math-emu/errors.c b/arch/x86/math-emu/errors.c
index 73dc66d887f3..ec071cbb0804 100644
--- a/arch/x86/math-emu/errors.c
+++ b/arch/x86/math-emu/errors.c
@@ -186,7 +186,7 @@ void FPU_printall(void)
case TAG_Special:
/* Update tagi for the printk below */
tagi = FPU_Special(r);
- /* fall through */
+ fallthrough;
case TAG_Valid:
printk("st(%d) %c .%04lx %04lx %04lx %04lx e%+-6d ", i,
getsign(r) ? '-' : '+',
diff --git a/arch/x86/math-emu/fpu_trig.c b/arch/x86/math-emu/fpu_trig.c
index 127ea54122d7..4a9887851ad8 100644
--- a/arch/x86/math-emu/fpu_trig.c
+++ b/arch/x86/math-emu/fpu_trig.c
@@ -1352,7 +1352,7 @@ static void fyl2xp1(FPU_REG *st0_ptr, u_char st0_tag)
case TW_Denormal:
if (denormal_operand() < 0)
return;
- /* fall through */
+ fallthrough;
case TAG_Zero:
case TAG_Valid:
setsign(st0_ptr, getsign(st0_ptr) ^ getsign(st1_ptr));
diff --git a/arch/x86/mm/cpu_entry_area.c b/arch/x86/mm/cpu_entry_area.c
index 770b613790b3..f5e1e60c9095 100644
--- a/arch/x86/mm/cpu_entry_area.c
+++ b/arch/x86/mm/cpu_entry_area.c
@@ -21,7 +21,8 @@ DEFINE_PER_CPU(struct cea_exception_stacks*, cea_exception_stacks);
DECLARE_PER_CPU_PAGE_ALIGNED(struct doublefault_stack, doublefault_stack);
#endif
-struct cpu_entry_area *get_cpu_entry_area(int cpu)
+/* Is called from entry code, so must be noinstr */
+noinstr struct cpu_entry_area *get_cpu_entry_area(int cpu)
{
unsigned long va = CPU_ENTRY_AREA_PER_CPU + cpu * CPU_ENTRY_AREA_SIZE;
BUILD_BUG_ON(sizeof(struct cpu_entry_area) % PAGE_SIZE != 0);
diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
index 1d6cb07f4f86..b93d6cd08a7f 100644
--- a/arch/x86/mm/extable.c
+++ b/arch/x86/mm/extable.c
@@ -5,6 +5,7 @@
#include <xen/xen.h>
#include <asm/fpu/internal.h>
+#include <asm/sev-es.h>
#include <asm/traps.h>
#include <asm/kdebug.h>
@@ -80,6 +81,18 @@ __visible bool ex_handler_uaccess(const struct exception_table_entry *fixup,
}
EXPORT_SYMBOL(ex_handler_uaccess);
+__visible bool ex_handler_copy(const struct exception_table_entry *fixup,
+ struct pt_regs *regs, int trapnr,
+ unsigned long error_code,
+ unsigned long fault_addr)
+{
+ WARN_ONCE(trapnr == X86_TRAP_GP, "General protection fault in user access. Non-canonical address?");
+ regs->ip = ex_fixup_addr(fixup);
+ regs->ax = trapnr;
+ return true;
+}
+EXPORT_SYMBOL(ex_handler_copy);
+
__visible bool ex_handler_rdmsr_unsafe(const struct exception_table_entry *fixup,
struct pt_regs *regs, int trapnr,
unsigned long error_code,
@@ -125,17 +138,21 @@ __visible bool ex_handler_clear_fs(const struct exception_table_entry *fixup,
}
EXPORT_SYMBOL(ex_handler_clear_fs);
-__visible bool ex_has_fault_handler(unsigned long ip)
+enum handler_type ex_get_fault_handler_type(unsigned long ip)
{
const struct exception_table_entry *e;
ex_handler_t handler;
e = search_exception_tables(ip);
if (!e)
- return false;
+ return EX_HANDLER_NONE;
handler = ex_fixup_handler(e);
-
- return handler == ex_handler_fault;
+ if (handler == ex_handler_fault)
+ return EX_HANDLER_FAULT;
+ else if (handler == ex_handler_uaccess || handler == ex_handler_copy)
+ return EX_HANDLER_UACCESS;
+ else
+ return EX_HANDLER_OTHER;
}
int fixup_exception(struct pt_regs *regs, int trapnr, unsigned long error_code,
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 35f1498e9832..42606a04ae85 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -190,6 +190,53 @@ static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
return pmd_k;
}
+/*
+ * Handle a fault on the vmalloc or module mapping area
+ *
+ * This is needed because there is a race condition between the time
+ * when the vmalloc mapping code updates the PMD to the point in time
+ * where it synchronizes this update with the other page-tables in the
+ * system.
+ *
+ * In this race window another thread/CPU can map an area on the same
+ * PMD, finds it already present and does not synchronize it with the
+ * rest of the system yet. As a result v[mz]alloc might return areas
+ * which are not mapped in every page-table in the system, causing an
+ * unhandled page-fault when they are accessed.
+ */
+static noinline int vmalloc_fault(unsigned long address)
+{
+ unsigned long pgd_paddr;
+ pmd_t *pmd_k;
+ pte_t *pte_k;
+
+ /* Make sure we are in vmalloc area: */
+ if (!(address >= VMALLOC_START && address < VMALLOC_END))
+ return -1;
+
+ /*
+ * Synchronize this task's top level page-table
+ * with the 'reference' page table.
+ *
+ * Do _not_ use "current" here. We might be inside
+ * an interrupt in the middle of a task switch..
+ */
+ pgd_paddr = read_cr3_pa();
+ pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
+ if (!pmd_k)
+ return -1;
+
+ if (pmd_large(*pmd_k))
+ return 0;
+
+ pte_k = pte_offset_kernel(pmd_k, address);
+ if (!pte_present(*pte_k))
+ return -1;
+
+ return 0;
+}
+NOKPROBE_SYMBOL(vmalloc_fault);
+
void arch_sync_kernel_mappings(unsigned long start, unsigned long end)
{
unsigned long addr;
@@ -1081,7 +1128,7 @@ access_error(unsigned long error_code, struct vm_area_struct *vma)
return 0;
}
-static int fault_in_kernel_space(unsigned long address)
+bool fault_in_kernel_space(unsigned long address)
{
/*
* On 64-bit systems, the vsyscall page is at an address above
@@ -1110,6 +1157,37 @@ do_kern_addr_fault(struct pt_regs *regs, unsigned long hw_error_code,
*/
WARN_ON_ONCE(hw_error_code & X86_PF_PK);
+#ifdef CONFIG_X86_32
+ /*
+ * We can fault-in kernel-space virtual memory on-demand. The
+ * 'reference' page table is init_mm.pgd.
+ *
+ * NOTE! We MUST NOT take any locks for this case. We may
+ * be in an interrupt or a critical region, and should
+ * only copy the information from the master page table,
+ * nothing more.
+ *
+ * Before doing this on-demand faulting, ensure that the
+ * fault is not any of the following:
+ * 1. A fault on a PTE with a reserved bit set.
+ * 2. A fault caused by a user-mode access. (Do not demand-
+ * fault kernel memory due to user-mode accesses).
+ * 3. A fault caused by a page-level protection violation.
+ * (A demand fault would be on a non-present page which
+ * would have X86_PF_PROT==0).
+ *
+ * This is only needed to close a race condition on x86-32 in
+ * the vmalloc mapping/unmapping code. See the comment above
+ * vmalloc_fault() for details. On x86-64 the race does not
+ * exist as the vmalloc mappings don't need to be synchronized
+ * there.
+ */
+ if (!(hw_error_code & (X86_PF_RSVD | X86_PF_USER | X86_PF_PROT))) {
+ if (vmalloc_fault(address) >= 0)
+ return;
+ }
+#endif
+
/* Was the fault spurious, caused by lazy TLB invalidation? */
if (spurious_kernel_fault(hw_error_code, address))
return;
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index a4ac13cc3fdc..b5a3fa4033d3 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -217,11 +217,6 @@ static void sync_global_pgds(unsigned long start, unsigned long end)
sync_global_pgds_l4(start, end);
}
-void arch_sync_kernel_mappings(unsigned long start, unsigned long end)
-{
- sync_global_pgds(start, end);
-}
-
/*
* NOTE: This function is marked __ref because it calls __init function
* (alloc_bootmem_pages). It's safe to do it ONLY when after_bootmem == 0.
@@ -1257,14 +1252,19 @@ static void __init preallocate_vmalloc_pages(void)
if (!p4d)
goto failed;
- /*
- * With 5-level paging the P4D level is not folded. So the PGDs
- * are now populated and there is no need to walk down to the
- * PUD level.
- */
if (pgtable_l5_enabled())
continue;
+ /*
+ * The goal here is to allocate all possibly required
+ * hardware page tables pointed to by the top hardware
+ * level.
+ *
+ * On 4-level systems, the P4D layer is folded away and
+ * the above code does no preallocation. Below, go down
+ * to the pud _software_ level to ensure the second
+ * hardware level is allocated on 4-level systems too.
+ */
lvl = "pud";
pud = pud_alloc(&init_mm, p4d, addr);
if (!pud)
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index 84d85dbd1dad..9e5ccc56f8e0 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -574,7 +574,7 @@ static bool memremap_should_map_decrypted(resource_size_t phys_addr,
/* For SEV, these areas are encrypted */
if (sev_active())
break;
- /* Fallthrough */
+ fallthrough;
case E820_TYPE_PRAM:
return true;
diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
index 9f1177edc2e7..ebb7edc8bc0a 100644
--- a/arch/x86/mm/mem_encrypt.c
+++ b/arch/x86/mm/mem_encrypt.c
@@ -38,6 +38,7 @@
* section is later cleared.
*/
u64 sme_me_mask __section(.data) = 0;
+u64 sev_status __section(.data) = 0;
EXPORT_SYMBOL(sme_me_mask);
DEFINE_STATIC_KEY_FALSE(sev_enable_key);
EXPORT_SYMBOL_GPL(sev_enable_key);
@@ -347,7 +348,13 @@ bool sme_active(void)
bool sev_active(void)
{
- return sme_me_mask && sev_enabled;
+ return sev_status & MSR_AMD64_SEV_ENABLED;
+}
+
+/* Needs to be called from non-instrumentable code */
+bool noinstr sev_es_active(void)
+{
+ return sev_status & MSR_AMD64_SEV_ES_ENABLED;
}
/* Override for DMA direct allocation check - ARCH_HAS_FORCE_DMA_UNENCRYPTED */
@@ -400,6 +407,31 @@ void __init mem_encrypt_free_decrypted_mem(void)
free_init_pages("unused decrypted", vaddr, vaddr_end);
}
+static void print_mem_encrypt_feature_info(void)
+{
+ pr_info("AMD Memory Encryption Features active:");
+
+ /* Secure Memory Encryption */
+ if (sme_active()) {
+ /*
+ * SME is mutually exclusive with any of the SEV
+ * features below.
+ */
+ pr_cont(" SME\n");
+ return;
+ }
+
+ /* Secure Encrypted Virtualization */
+ if (sev_active())
+ pr_cont(" SEV");
+
+ /* Encrypted Register State */
+ if (sev_es_active())
+ pr_cont(" SEV-ES");
+
+ pr_cont("\n");
+}
+
/* Architecture __weak replacement functions */
void __init mem_encrypt_init(void)
{
@@ -415,8 +447,6 @@ void __init mem_encrypt_init(void)
if (sev_active())
static_branch_enable(&sev_enable_key);
- pr_info("AMD %s active\n",
- sev_active() ? "Secure Encrypted Virtualization (SEV)"
- : "Secure Memory Encryption (SME)");
+ print_mem_encrypt_feature_info();
}
diff --git a/arch/x86/mm/mem_encrypt_identity.c b/arch/x86/mm/mem_encrypt_identity.c
index e2b0e2ac07bb..68d75379e06a 100644
--- a/arch/x86/mm/mem_encrypt_identity.c
+++ b/arch/x86/mm/mem_encrypt_identity.c
@@ -540,6 +540,9 @@ void __init sme_enable(struct boot_params *bp)
if (!(msr & MSR_AMD64_SEV_ENABLED))
return;
+ /* Save SEV_STATUS to avoid reading MSR again */
+ sev_status = msr;
+
/* SEV state cannot be controlled by a command line option */
sme_me_mask = me_mask;
sev_enabled = true;
diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
index aa76ec2d359b..44148691d78b 100644
--- a/arch/x86/mm/numa.c
+++ b/arch/x86/mm/numa.c
@@ -37,14 +37,12 @@ static __init int numa_setup(char *opt)
return -EINVAL;
if (!strncmp(opt, "off", 3))
numa_off = 1;
-#ifdef CONFIG_NUMA_EMU
if (!strncmp(opt, "fake=", 5))
- numa_emu_cmdline(opt + 5);
-#endif
-#ifdef CONFIG_ACPI_NUMA
+ return numa_emu_cmdline(opt + 5);
if (!strncmp(opt, "noacpi", 6))
- acpi_numa = -1;
-#endif
+ disable_srat();
+ if (!strncmp(opt, "nohmat", 6))
+ disable_hmat();
return 0;
}
early_param("numa", numa_setup);
@@ -516,7 +514,7 @@ static void __init numa_clear_kernel_node_hotplug(void)
* memory ranges, because quirks such as trim_snb_memory()
* reserve specific pages for Sandy Bridge graphics. ]
*/
- for_each_memblock(reserved, mb_region) {
+ for_each_reserved_mem_region(mb_region) {
int nid = memblock_get_region_node(mb_region);
if (nid != MAX_NUMNODES)
@@ -748,6 +746,27 @@ static void __init init_memory_less_node(int nid)
}
/*
+ * A node may exist which has one or more Generic Initiators but no CPUs and no
+ * memory.
+ *
+ * This function must be called after init_cpu_to_node(), to ensure that any
+ * memoryless CPU nodes have already been brought online, and before the
+ * node_data[nid] is needed for zone list setup in build_all_zonelists().
+ *
+ * When this function is called, any nodes containing either memory and/or CPUs
+ * will already be online and there is no need to do anything extra, even if
+ * they also contain one or more Generic Initiators.
+ */
+void __init init_gi_nodes(void)
+{
+ int nid;
+
+ for_each_node_state(nid, N_GENERIC_INITIATOR)
+ if (!node_online(nid))
+ init_memory_less_node(nid);
+}
+
+/*
* Setup early cpu_to_node.
*
* Populate cpu_to_node[] only if x86_cpu_to_apicid[],
@@ -919,7 +938,6 @@ int phys_to_target_node(phys_addr_t start)
return meminfo_to_nid(&numa_reserved_meminfo, start);
}
-EXPORT_SYMBOL_GPL(phys_to_target_node);
int memory_add_physaddr_to_nid(u64 start)
{
diff --git a/arch/x86/mm/numa_emulation.c b/arch/x86/mm/numa_emulation.c
index c5174b4e318b..87d77cc52f86 100644
--- a/arch/x86/mm/numa_emulation.c
+++ b/arch/x86/mm/numa_emulation.c
@@ -13,9 +13,10 @@
static int emu_nid_to_phys[MAX_NUMNODES];
static char *emu_cmdline __initdata;
-void __init numa_emu_cmdline(char *str)
+int __init numa_emu_cmdline(char *str)
{
emu_cmdline = str;
+ return 0;
}
static int __init emu_find_memblk_by_nid(int nid, const struct numa_meminfo *mi)
@@ -321,7 +322,7 @@ static int __init split_nodes_size_interleave(struct numa_meminfo *ei,
u64 addr, u64 max_addr, u64 size)
{
return split_nodes_size_interleave_uniform(ei, pi, addr, max_addr, size,
- 0, NULL, NUMA_NO_NODE);
+ 0, NULL, 0);
}
static int __init setup_emu2phys_nid(int *dfl_phys_nid)
diff --git a/arch/x86/mm/pat/set_memory.c b/arch/x86/mm/pat/set_memory.c
index d1b2a889f035..40baa90e74f4 100644
--- a/arch/x86/mm/pat/set_memory.c
+++ b/arch/x86/mm/pat/set_memory.c
@@ -1999,7 +1999,7 @@ static int __set_memory_enc_dec(unsigned long addr, int numpages, bool enc)
/*
* Before changing the encryption attribute, we need to flush caches.
*/
- cpa_flush(&cpa, 1);
+ cpa_flush(&cpa, !this_cpu_has(X86_FEATURE_SME_COHERENT));
ret = __change_page_attr_set_clr(&cpa, 1);
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 1a3569b43aa5..11666ba19b62 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -14,7 +14,6 @@
#include <asm/nospec-branch.h>
#include <asm/cache.h>
#include <asm/apic.h>
-#include <asm/uv/uv.h>
#include "mm_internal.h"
@@ -555,21 +554,12 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
this_cpu_write(cpu_tlbstate.ctxs[new_asid].tlb_gen, next_tlb_gen);
load_new_mm_cr3(next->pgd, new_asid, true);
- /*
- * NB: This gets called via leave_mm() in the idle path
- * where RCU functions differently. Tracing normally
- * uses RCU, so we need to use the _rcuidle variant.
- *
- * (There is no good reason for this. The idle code should
- * be rearranged to call this before rcu_idle_enter().)
- */
- trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
+ trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
} else {
/* The new ASID is already up to date. */
load_new_mm_cr3(next->pgd, new_asid, false);
- /* See above wrt _rcuidle. */
- trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, 0);
+ trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, 0);
}
/* Make sure we write CR3 before loaded_mm. */
@@ -809,29 +799,6 @@ STATIC_NOPV void native_flush_tlb_others(const struct cpumask *cpumask,
trace_tlb_flush(TLB_REMOTE_SEND_IPI,
(info->end - info->start) >> PAGE_SHIFT);
- if (is_uv_system()) {
- /*
- * This whole special case is confused. UV has a "Broadcast
- * Assist Unit", which seems to be a fancy way to send IPIs.
- * Back when x86 used an explicit TLB flush IPI, UV was
- * optimized to use its own mechanism. These days, x86 uses
- * smp_call_function_many(), but UV still uses a manual IPI,
- * and that IPI's action is out of date -- it does a manual
- * flush instead of calling flush_tlb_func_remote(). This
- * means that the percpu tlb_gen variables won't be updated
- * and we'll do pointless flushes on future context switches.
- *
- * Rather than hooking native_flush_tlb_others() here, I think
- * that UV should be updated so that smp_call_function_many(),
- * etc, are optimal on UV.
- */
- cpumask = uv_flush_tlb_others(cpumask, info);
- if (cpumask)
- smp_call_function_many(cpumask, flush_tlb_func_remote,
- (void *)info, 1);
- return;
- }
-
/*
* If no page tables were freed, we can skip sending IPIs to
* CPUs in lazy TLB mode. They will flush the CPU themselves
diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c
index df1d95913d4e..3507f456fcd0 100644
--- a/arch/x86/pci/common.c
+++ b/arch/x86/pci/common.c
@@ -19,6 +19,7 @@
#include <asm/smp.h>
#include <asm/pci_x86.h>
#include <asm/setup.h>
+#include <asm/irqdomain.h>
unsigned int pci_probe = PCI_PROBE_BIOS | PCI_PROBE_CONF1 | PCI_PROBE_CONF2 |
PCI_PROBE_MMCONF;
@@ -633,8 +634,9 @@ static void set_dev_domain_options(struct pci_dev *pdev)
int pcibios_add_device(struct pci_dev *dev)
{
- struct setup_data *data;
struct pci_setup_rom *rom;
+ struct irq_domain *msidom;
+ struct setup_data *data;
u64 pa_data;
pa_data = boot_params.hdr.setup_data;
@@ -661,6 +663,20 @@ int pcibios_add_device(struct pci_dev *dev)
memunmap(data);
}
set_dev_domain_options(dev);
+
+ /*
+ * Setup the initial MSI domain of the device. If the underlying
+ * bus has a PCI/MSI irqdomain associated use the bus domain,
+ * otherwise set the default domain. This ensures that special irq
+ * domains e.g. VMD are preserved. The default ensures initial
+ * operation if irq remapping is not active. If irq remapping is
+ * active it will overwrite the domain pointer when the device is
+ * associated to a remapping domain.
+ */
+ msidom = dev_get_msi_domain(&dev->bus->dev);
+ if (!msidom)
+ msidom = x86_pci_msi_default_domain;
+ dev_set_msi_domain(&dev->dev, msidom);
return 0;
}
diff --git a/arch/x86/pci/init.c b/arch/x86/pci/init.c
index 5fc617edf108..00bfa1ebad6c 100644
--- a/arch/x86/pci/init.c
+++ b/arch/x86/pci/init.c
@@ -3,16 +3,17 @@
#include <linux/init.h>
#include <asm/pci_x86.h>
#include <asm/x86_init.h>
+#include <asm/irqdomain.h>
/* arch_initcall has too random ordering, so call the initializers
in the right sequence from here. */
static __init int pci_arch_init(void)
{
-#ifdef CONFIG_PCI_DIRECT
- int type = 0;
+ int type;
+
+ x86_create_pci_msi_domain();
type = pci_direct_probe();
-#endif
if (!(pci_probe & PCI_PROBE_NOEARLY))
pci_mmcfg_early_init();
@@ -20,18 +21,16 @@ static __init int pci_arch_init(void)
if (x86_init.pci.arch_init && !x86_init.pci.arch_init())
return 0;
-#ifdef CONFIG_PCI_BIOS
pci_pcbios_init();
-#endif
+
/*
* don't check for raw_pci_ops here because we want pcbios as last
* fallback, yet it's needed to run first to set pcibios_last_bus
* in case legacy PCI probing is used. otherwise detecting peer busses
* fails.
*/
-#ifdef CONFIG_PCI_DIRECT
pci_direct_init(type);
-#endif
+
if (!raw_pci_ops && !raw_pci_ext_ops)
printk(KERN_ERR
"PCI: Fatal: No config space access function found\n");
diff --git a/arch/x86/pci/sta2x11-fixup.c b/arch/x86/pci/sta2x11-fixup.c
index c313d784efab..11c0e80b9ed4 100644
--- a/arch/x86/pci/sta2x11-fixup.c
+++ b/arch/x86/pci/sta2x11-fixup.c
@@ -15,7 +15,6 @@
#include <asm/iommu.h>
#define STA2X11_SWIOTLB_SIZE (4*1024*1024)
-extern int swiotlb_late_init_with_default_size(size_t default_size);
/*
* We build a list of bus numbers that are under the ConneXt. The
diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c
index 9f9aad42ccff..c552cd2d0632 100644
--- a/arch/x86/pci/xen.c
+++ b/arch/x86/pci/xen.c
@@ -26,6 +26,7 @@
#include <asm/xen/pci.h>
#include <asm/xen/cpuid.h>
#include <asm/apic.h>
+#include <asm/acpi.h>
#include <asm/i8259.h>
static int xen_pcifront_enable_irq(struct pci_dev *dev)
@@ -156,6 +157,13 @@ static int acpi_register_gsi_xen(struct device *dev, u32 gsi,
struct xen_pci_frontend_ops *xen_pci_frontend;
EXPORT_SYMBOL_GPL(xen_pci_frontend);
+struct xen_msi_ops {
+ int (*setup_msi_irqs)(struct pci_dev *dev, int nvec, int type);
+ void (*teardown_msi_irqs)(struct pci_dev *dev);
+};
+
+static struct xen_msi_ops xen_msi_ops __ro_after_init;
+
static int xen_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
{
int irq, ret, i;
@@ -371,28 +379,122 @@ static void xen_initdom_restore_msi_irqs(struct pci_dev *dev)
WARN(ret && ret != -ENOSYS, "restore_msi -> %d\n", ret);
}
}
-#endif
+#else /* CONFIG_XEN_DOM0 */
+#define xen_initdom_setup_msi_irqs NULL
+#define xen_initdom_restore_msi_irqs NULL
+#endif /* !CONFIG_XEN_DOM0 */
static void xen_teardown_msi_irqs(struct pci_dev *dev)
{
struct msi_desc *msidesc;
+ int i;
+
+ for_each_pci_msi_entry(msidesc, dev) {
+ if (msidesc->irq) {
+ for (i = 0; i < msidesc->nvec_used; i++)
+ xen_destroy_irq(msidesc->irq + i);
+ }
+ }
+}
+
+static void xen_pv_teardown_msi_irqs(struct pci_dev *dev)
+{
+ struct msi_desc *msidesc = first_pci_msi_entry(dev);
- msidesc = first_pci_msi_entry(dev);
if (msidesc->msi_attrib.is_msix)
xen_pci_frontend_disable_msix(dev);
else
xen_pci_frontend_disable_msi(dev);
- /* Free the IRQ's and the msidesc using the generic code. */
- default_teardown_msi_irqs(dev);
+ xen_teardown_msi_irqs(dev);
}
-static void xen_teardown_msi_irq(unsigned int irq)
+static int xen_msi_domain_alloc_irqs(struct irq_domain *domain,
+ struct device *dev, int nvec)
{
- xen_destroy_irq(irq);
+ int type;
+
+ if (WARN_ON_ONCE(!dev_is_pci(dev)))
+ return -EINVAL;
+
+ if (first_msi_entry(dev)->msi_attrib.is_msix)
+ type = PCI_CAP_ID_MSIX;
+ else
+ type = PCI_CAP_ID_MSI;
+
+ return xen_msi_ops.setup_msi_irqs(to_pci_dev(dev), nvec, type);
}
-#endif
+static void xen_msi_domain_free_irqs(struct irq_domain *domain,
+ struct device *dev)
+{
+ if (WARN_ON_ONCE(!dev_is_pci(dev)))
+ return;
+
+ xen_msi_ops.teardown_msi_irqs(to_pci_dev(dev));
+}
+
+static struct msi_domain_ops xen_pci_msi_domain_ops = {
+ .domain_alloc_irqs = xen_msi_domain_alloc_irqs,
+ .domain_free_irqs = xen_msi_domain_free_irqs,
+};
+
+static struct msi_domain_info xen_pci_msi_domain_info = {
+ .ops = &xen_pci_msi_domain_ops,
+};
+
+/*
+ * This irq domain is a blatant violation of the irq domain design, but
+ * distangling XEN into real irq domains is not a job for mere mortals with
+ * limited XENology. But it's the least dangerous way for a mere mortal to
+ * get rid of the arch_*_msi_irqs() hackery in order to store the irq
+ * domain pointer in struct device. This irq domain wrappery allows to do
+ * that without breaking XEN terminally.
+ */
+static __init struct irq_domain *xen_create_pci_msi_domain(void)
+{
+ struct irq_domain *d = NULL;
+ struct fwnode_handle *fn;
+
+ fn = irq_domain_alloc_named_fwnode("XEN-MSI");
+ if (fn)
+ d = msi_create_irq_domain(fn, &xen_pci_msi_domain_info, NULL);
+
+ /* FIXME: No idea how to survive if this fails */
+ BUG_ON(!d);
+
+ return d;
+}
+
+static __init void xen_setup_pci_msi(void)
+{
+ if (xen_pv_domain()) {
+ if (xen_initial_domain()) {
+ xen_msi_ops.setup_msi_irqs = xen_initdom_setup_msi_irqs;
+ x86_msi.restore_msi_irqs = xen_initdom_restore_msi_irqs;
+ } else {
+ xen_msi_ops.setup_msi_irqs = xen_setup_msi_irqs;
+ }
+ xen_msi_ops.teardown_msi_irqs = xen_pv_teardown_msi_irqs;
+ pci_msi_ignore_mask = 1;
+ } else if (xen_hvm_domain()) {
+ xen_msi_ops.setup_msi_irqs = xen_hvm_setup_msi_irqs;
+ xen_msi_ops.teardown_msi_irqs = xen_teardown_msi_irqs;
+ } else {
+ WARN_ON_ONCE(1);
+ return;
+ }
+
+ /*
+ * Override the PCI/MSI irq domain init function. No point
+ * in allocating the native domain and never use it.
+ */
+ x86_init.irqs.create_pci_msi_domain = xen_create_pci_msi_domain;
+}
+
+#else /* CONFIG_PCI_MSI */
+static inline void xen_setup_pci_msi(void) { }
+#endif /* CONFIG_PCI_MSI */
int __init pci_xen_init(void)
{
@@ -409,17 +511,12 @@ int __init pci_xen_init(void)
/* Keep ACPI out of the picture */
acpi_noirq_set();
-#ifdef CONFIG_PCI_MSI
- x86_msi.setup_msi_irqs = xen_setup_msi_irqs;
- x86_msi.teardown_msi_irq = xen_teardown_msi_irq;
- x86_msi.teardown_msi_irqs = xen_teardown_msi_irqs;
- pci_msi_ignore_mask = 1;
-#endif
+ xen_setup_pci_msi();
return 0;
}
#ifdef CONFIG_PCI_MSI
-void __init xen_msi_init(void)
+static void __init xen_hvm_msi_init(void)
{
if (!disable_apic) {
/*
@@ -434,9 +531,7 @@ void __init xen_msi_init(void)
((eax & XEN_HVM_CPUID_APIC_ACCESS_VIRT) && boot_cpu_has(X86_FEATURE_APIC)))
return;
}
-
- x86_msi.setup_msi_irqs = xen_hvm_setup_msi_irqs;
- x86_msi.teardown_msi_irq = xen_teardown_msi_irq;
+ xen_setup_pci_msi();
}
#endif
@@ -459,7 +554,7 @@ int __init pci_xen_hvm_init(void)
* We need to wait until after x2apic is initialized
* before we can set MSI IRQ ops.
*/
- x86_platform.apic_post_init = xen_msi_init;
+ x86_platform.apic_post_init = xen_hvm_msi_init;
#endif
return 0;
}
@@ -469,12 +564,7 @@ int __init pci_xen_initial_domain(void)
{
int irq;
-#ifdef CONFIG_PCI_MSI
- x86_msi.setup_msi_irqs = xen_initdom_setup_msi_irqs;
- x86_msi.teardown_msi_irq = xen_teardown_msi_irq;
- x86_msi.restore_msi_irqs = xen_initdom_restore_msi_irqs;
- pci_msi_ignore_mask = 1;
-#endif
+ xen_setup_pci_msi();
__acpi_register_gsi = acpi_register_gsi_xen;
__acpi_unregister_gsi = NULL;
/*
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
index f6ea8f1a9d57..8a26e705cb06 100644
--- a/arch/x86/platform/efi/efi.c
+++ b/arch/x86/platform/efi/efi.c
@@ -49,7 +49,6 @@
#include <asm/efi.h>
#include <asm/e820/api.h>
#include <asm/time.h>
-#include <asm/set_memory.h>
#include <asm/tlbflush.h>
#include <asm/x86_init.h>
#include <asm/uv/uv.h>
@@ -91,6 +90,9 @@ static const unsigned long * const efi_tables[] = {
&efi.tpm_log,
&efi.tpm_final_log,
&efi_rng_seed,
+#ifdef CONFIG_LOAD_UEFI_KEYS
+ &efi.mokvar_table,
+#endif
};
u64 efi_setup; /* efi setup_data physical address */
@@ -496,74 +498,6 @@ void __init efi_init(void)
efi_print_memmap();
}
-#if defined(CONFIG_X86_32)
-
-void __init efi_set_executable(efi_memory_desc_t *md, bool executable)
-{
- u64 addr, npages;
-
- addr = md->virt_addr;
- npages = md->num_pages;
-
- memrange_efi_to_native(&addr, &npages);
-
- if (executable)
- set_memory_x(addr, npages);
- else
- set_memory_nx(addr, npages);
-}
-
-void __init runtime_code_page_mkexec(void)
-{
- efi_memory_desc_t *md;
-
- /* Make EFI runtime service code area executable */
- for_each_efi_memory_desc(md) {
- if (md->type != EFI_RUNTIME_SERVICES_CODE)
- continue;
-
- efi_set_executable(md, true);
- }
-}
-
-void __init efi_memory_uc(u64 addr, unsigned long size)
-{
- unsigned long page_shift = 1UL << EFI_PAGE_SHIFT;
- u64 npages;
-
- npages = round_up(size, page_shift) / page_shift;
- memrange_efi_to_native(&addr, &npages);
- set_memory_uc(addr, npages);
-}
-
-void __init old_map_region(efi_memory_desc_t *md)
-{
- u64 start_pfn, end_pfn, end;
- unsigned long size;
- void *va;
-
- start_pfn = PFN_DOWN(md->phys_addr);
- size = md->num_pages << PAGE_SHIFT;
- end = md->phys_addr + size;
- end_pfn = PFN_UP(end);
-
- if (pfn_range_is_mapped(start_pfn, end_pfn)) {
- va = __va(md->phys_addr);
-
- if (!(md->attribute & EFI_MEMORY_WB))
- efi_memory_uc((u64)(unsigned long)va, size);
- } else
- va = efi_ioremap(md->phys_addr, size,
- md->type, md->attribute);
-
- md->virt_addr = (u64) (unsigned long) va;
- if (!va)
- pr_err("ioremap of 0x%llX failed!\n",
- (unsigned long long)md->phys_addr);
-}
-
-#endif
-
/* Merge contiguous regions of the same type and attribute */
static void __init efi_merge_regions(void)
{
diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c
index 826ead67753d..e06a199423c0 100644
--- a/arch/x86/platform/efi/efi_32.c
+++ b/arch/x86/platform/efi/efi_32.c
@@ -29,9 +29,35 @@
#include <asm/io.h>
#include <asm/desc.h>
#include <asm/page.h>
+#include <asm/set_memory.h>
#include <asm/tlbflush.h>
#include <asm/efi.h>
+void __init efi_map_region(efi_memory_desc_t *md)
+{
+ u64 start_pfn, end_pfn, end;
+ unsigned long size;
+ void *va;
+
+ start_pfn = PFN_DOWN(md->phys_addr);
+ size = md->num_pages << PAGE_SHIFT;
+ end = md->phys_addr + size;
+ end_pfn = PFN_UP(end);
+
+ if (pfn_range_is_mapped(start_pfn, end_pfn)) {
+ va = __va(md->phys_addr);
+
+ if (!(md->attribute & EFI_MEMORY_WB))
+ set_memory_uc((unsigned long)va, md->num_pages);
+ } else {
+ va = ioremap_cache(md->phys_addr, size);
+ }
+
+ md->virt_addr = (unsigned long)va;
+ if (!va)
+ pr_err("ioremap of 0x%llX failed!\n", md->phys_addr);
+}
+
/*
* To make EFI call EFI runtime service in physical addressing mode we need
* prolog/epilog before/after the invocation to claim the EFI runtime service
@@ -58,11 +84,6 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
return 0;
}
-void __init efi_map_region(efi_memory_desc_t *md)
-{
- old_map_region(md);
-}
-
void __init efi_map_region_fixed(efi_memory_desc_t *md) {}
void __init parse_efi_setup(u64 phys_addr, u32 data_len) {}
@@ -107,6 +128,15 @@ efi_status_t __init efi_set_virtual_address_map(unsigned long memory_map_size,
void __init efi_runtime_update_mappings(void)
{
- if (__supported_pte_mask & _PAGE_NX)
- runtime_code_page_mkexec();
+ if (__supported_pte_mask & _PAGE_NX) {
+ efi_memory_desc_t *md;
+
+ /* Make EFI runtime service code area executable */
+ for_each_efi_memory_desc(md) {
+ if (md->type != EFI_RUNTIME_SERVICES_CODE)
+ continue;
+
+ set_memory_x(md->virt_addr, md->num_pages);
+ }
+ }
}
diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
index 413583f904a6..8f5759df7776 100644
--- a/arch/x86/platform/efi/efi_64.c
+++ b/arch/x86/platform/efi/efi_64.c
@@ -47,6 +47,7 @@
#include <asm/realmode.h>
#include <asm/time.h>
#include <asm/pgalloc.h>
+#include <asm/sev-es.h>
/*
* We allocate runtime services regions top-down, starting from -4G, i.e.
@@ -230,6 +231,15 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
}
/*
+ * When SEV-ES is active, the GHCB as set by the kernel will be used
+ * by firmware. Create a 1:1 unencrypted mapping for each GHCB.
+ */
+ if (sev_es_efi_map_ghcbs(pgd)) {
+ pr_err("Failed to create 1:1 mapping for the GHCBs!\n");
+ return 1;
+ }
+
+ /*
* When making calls to the firmware everything needs to be 1:1
* mapped and addressable with 32-bit pointers. Map the kernel
* text and allocate a new stack because we can't rely on the
@@ -259,6 +269,8 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
npages = (__end_rodata - __start_rodata) >> PAGE_SHIFT;
rodata = __pa(__start_rodata);
pfn = rodata >> PAGE_SHIFT;
+
+ pf = _PAGE_NX | _PAGE_ENC;
if (kernel_map_pages_in_pgd(pgd, pfn, rodata, npages, pf)) {
pr_err("Failed to map kernel rodata 1:1\n");
return 1;
diff --git a/arch/x86/platform/uv/Makefile b/arch/x86/platform/uv/Makefile
index a3693c829e2e..224ff0504890 100644
--- a/arch/x86/platform/uv/Makefile
+++ b/arch/x86/platform/uv/Makefile
@@ -1,2 +1,2 @@
# SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_X86_UV) += tlb_uv.o bios_uv.o uv_irq.o uv_sysfs.o uv_time.o uv_nmi.o
+obj-$(CONFIG_X86_UV) += bios_uv.o uv_irq.o uv_sysfs.o uv_time.o uv_nmi.o
diff --git a/arch/x86/platform/uv/bios_uv.c b/arch/x86/platform/uv/bios_uv.c
index a2f447dffea6..54511eaccf4d 100644
--- a/arch/x86/platform/uv/bios_uv.c
+++ b/arch/x86/platform/uv/bios_uv.c
@@ -2,8 +2,9 @@
/*
* BIOS run time interface routines.
*
- * Copyright (c) 2008-2009 Silicon Graphics, Inc. All Rights Reserved.
- * Copyright (c) Russ Anderson <rja@sgi.com>
+ * (C) Copyright 2020 Hewlett Packard Enterprise Development LP
+ * Copyright (C) 2007-2017 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (c) Russ Anderson <rja@sgi.com>
*/
#include <linux/efi.h>
@@ -170,16 +171,27 @@ int uv_bios_set_legacy_vga_target(bool decode, int domain, int bus)
(u64)decode, (u64)domain, (u64)bus, 0, 0);
}
-int uv_bios_init(void)
+unsigned long get_uv_systab_phys(bool msg)
{
- uv_systab = NULL;
if ((uv_systab_phys == EFI_INVALID_TABLE_ADDR) ||
!uv_systab_phys || efi_runtime_disabled()) {
- pr_crit("UV: UVsystab: missing\n");
- return -EEXIST;
+ if (msg)
+ pr_crit("UV: UVsystab: missing\n");
+ return 0;
}
+ return uv_systab_phys;
+}
+
+int uv_bios_init(void)
+{
+ unsigned long uv_systab_phys_addr;
+
+ uv_systab = NULL;
+ uv_systab_phys_addr = get_uv_systab_phys(1);
+ if (!uv_systab_phys_addr)
+ return -EEXIST;
- uv_systab = ioremap(uv_systab_phys, sizeof(struct uv_systab));
+ uv_systab = ioremap(uv_systab_phys_addr, sizeof(struct uv_systab));
if (!uv_systab || strncmp(uv_systab->signature, UV_SYSTAB_SIG, 4)) {
pr_err("UV: UVsystab: bad signature!\n");
iounmap(uv_systab);
@@ -191,7 +203,7 @@ int uv_bios_init(void)
int size = uv_systab->size;
iounmap(uv_systab);
- uv_systab = ioremap(uv_systab_phys, size);
+ uv_systab = ioremap(uv_systab_phys_addr, size);
if (!uv_systab) {
pr_err("UV: UVsystab: ioremap(%d) failed!\n", size);
return -EFAULT;
diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c
deleted file mode 100644
index 62ea907668f8..000000000000
--- a/arch/x86/platform/uv/tlb_uv.c
+++ /dev/null
@@ -1,2097 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * SGI UltraViolet TLB flush routines.
- *
- * (c) 2008-2014 Cliff Wickman <cpw@sgi.com>, SGI.
- */
-#include <linux/seq_file.h>
-#include <linux/proc_fs.h>
-#include <linux/debugfs.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/delay.h>
-
-#include <asm/mmu_context.h>
-#include <asm/uv/uv.h>
-#include <asm/uv/uv_mmrs.h>
-#include <asm/uv/uv_hub.h>
-#include <asm/uv/uv_bau.h>
-#include <asm/apic.h>
-#include <asm/tsc.h>
-#include <asm/irq_vectors.h>
-#include <asm/timer.h>
-
-static struct bau_operations ops __ro_after_init;
-
-static int timeout_us;
-static bool nobau = true;
-static int nobau_perm;
-
-/* tunables: */
-static int max_concurr = MAX_BAU_CONCURRENT;
-static int max_concurr_const = MAX_BAU_CONCURRENT;
-static int plugged_delay = PLUGGED_DELAY;
-static int plugsb4reset = PLUGSB4RESET;
-static int giveup_limit = GIVEUP_LIMIT;
-static int timeoutsb4reset = TIMEOUTSB4RESET;
-static int ipi_reset_limit = IPI_RESET_LIMIT;
-static int complete_threshold = COMPLETE_THRESHOLD;
-static int congested_respns_us = CONGESTED_RESPONSE_US;
-static int congested_reps = CONGESTED_REPS;
-static int disabled_period = DISABLED_PERIOD;
-
-static struct tunables tunables[] = {
- {&max_concurr, MAX_BAU_CONCURRENT}, /* must be [0] */
- {&plugged_delay, PLUGGED_DELAY},
- {&plugsb4reset, PLUGSB4RESET},
- {&timeoutsb4reset, TIMEOUTSB4RESET},
- {&ipi_reset_limit, IPI_RESET_LIMIT},
- {&complete_threshold, COMPLETE_THRESHOLD},
- {&congested_respns_us, CONGESTED_RESPONSE_US},
- {&congested_reps, CONGESTED_REPS},
- {&disabled_period, DISABLED_PERIOD},
- {&giveup_limit, GIVEUP_LIMIT}
-};
-
-static struct dentry *tunables_dir;
-
-/* these correspond to the statistics printed by ptc_seq_show() */
-static char *stat_description[] = {
- "sent: number of shootdown messages sent",
- "stime: time spent sending messages",
- "numuvhubs: number of hubs targeted with shootdown",
- "numuvhubs16: number times 16 or more hubs targeted",
- "numuvhubs8: number times 8 or more hubs targeted",
- "numuvhubs4: number times 4 or more hubs targeted",
- "numuvhubs2: number times 2 or more hubs targeted",
- "numuvhubs1: number times 1 hub targeted",
- "numcpus: number of cpus targeted with shootdown",
- "dto: number of destination timeouts",
- "retries: destination timeout retries sent",
- "rok: : destination timeouts successfully retried",
- "resetp: ipi-style resource resets for plugs",
- "resett: ipi-style resource resets for timeouts",
- "giveup: fall-backs to ipi-style shootdowns",
- "sto: number of source timeouts",
- "bz: number of stay-busy's",
- "throt: number times spun in throttle",
- "swack: image of UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE",
- "recv: shootdown messages received",
- "rtime: time spent processing messages",
- "all: shootdown all-tlb messages",
- "one: shootdown one-tlb messages",
- "mult: interrupts that found multiple messages",
- "none: interrupts that found no messages",
- "retry: number of retry messages processed",
- "canc: number messages canceled by retries",
- "nocan: number retries that found nothing to cancel",
- "reset: number of ipi-style reset requests processed",
- "rcan: number messages canceled by reset requests",
- "disable: number times use of the BAU was disabled",
- "enable: number times use of the BAU was re-enabled"
-};
-
-static int __init setup_bau(char *arg)
-{
- int result;
-
- if (!arg)
- return -EINVAL;
-
- result = strtobool(arg, &nobau);
- if (result)
- return result;
-
- /* we need to flip the logic here, so that bau=y sets nobau to false */
- nobau = !nobau;
-
- if (!nobau)
- pr_info("UV BAU Enabled\n");
- else
- pr_info("UV BAU Disabled\n");
-
- return 0;
-}
-early_param("bau", setup_bau);
-
-/* base pnode in this partition */
-static int uv_base_pnode __read_mostly;
-
-static DEFINE_PER_CPU(struct ptc_stats, ptcstats);
-static DEFINE_PER_CPU(struct bau_control, bau_control);
-static DEFINE_PER_CPU(cpumask_var_t, uv_flush_tlb_mask);
-
-static void
-set_bau_on(void)
-{
- int cpu;
- struct bau_control *bcp;
-
- if (nobau_perm) {
- pr_info("BAU not initialized; cannot be turned on\n");
- return;
- }
- nobau = false;
- for_each_present_cpu(cpu) {
- bcp = &per_cpu(bau_control, cpu);
- bcp->nobau = false;
- }
- pr_info("BAU turned on\n");
- return;
-}
-
-static void
-set_bau_off(void)
-{
- int cpu;
- struct bau_control *bcp;
-
- nobau = true;
- for_each_present_cpu(cpu) {
- bcp = &per_cpu(bau_control, cpu);
- bcp->nobau = true;
- }
- pr_info("BAU turned off\n");
- return;
-}
-
-/*
- * Determine the first node on a uvhub. 'Nodes' are used for kernel
- * memory allocation.
- */
-static int __init uvhub_to_first_node(int uvhub)
-{
- int node, b;
-
- for_each_online_node(node) {
- b = uv_node_to_blade_id(node);
- if (uvhub == b)
- return node;
- }
- return -1;
-}
-
-/*
- * Determine the apicid of the first cpu on a uvhub.
- */
-static int __init uvhub_to_first_apicid(int uvhub)
-{
- int cpu;
-
- for_each_present_cpu(cpu)
- if (uvhub == uv_cpu_to_blade_id(cpu))
- return per_cpu(x86_cpu_to_apicid, cpu);
- return -1;
-}
-
-/*
- * Free a software acknowledge hardware resource by clearing its Pending
- * bit. This will return a reply to the sender.
- * If the message has timed out, a reply has already been sent by the
- * hardware but the resource has not been released. In that case our
- * clear of the Timeout bit (as well) will free the resource. No reply will
- * be sent (the hardware will only do one reply per message).
- */
-static void reply_to_message(struct msg_desc *mdp, struct bau_control *bcp,
- int do_acknowledge)
-{
- unsigned long dw;
- struct bau_pq_entry *msg;
-
- msg = mdp->msg;
- if (!msg->canceled && do_acknowledge) {
- dw = (msg->swack_vec << UV_SW_ACK_NPENDING) | msg->swack_vec;
- ops.write_l_sw_ack(dw);
- }
- msg->replied_to = 1;
- msg->swack_vec = 0;
-}
-
-/*
- * Process the receipt of a RETRY message
- */
-static void bau_process_retry_msg(struct msg_desc *mdp,
- struct bau_control *bcp)
-{
- int i;
- int cancel_count = 0;
- unsigned long msg_res;
- unsigned long mmr = 0;
- struct bau_pq_entry *msg = mdp->msg;
- struct bau_pq_entry *msg2;
- struct ptc_stats *stat = bcp->statp;
-
- stat->d_retries++;
- /*
- * cancel any message from msg+1 to the retry itself
- */
- for (msg2 = msg+1, i = 0; i < DEST_Q_SIZE; msg2++, i++) {
- if (msg2 > mdp->queue_last)
- msg2 = mdp->queue_first;
- if (msg2 == msg)
- break;
-
- /* same conditions for cancellation as do_reset */
- if ((msg2->replied_to == 0) && (msg2->canceled == 0) &&
- (msg2->swack_vec) && ((msg2->swack_vec &
- msg->swack_vec) == 0) &&
- (msg2->sending_cpu == msg->sending_cpu) &&
- (msg2->msg_type != MSG_NOOP)) {
- mmr = ops.read_l_sw_ack();
- msg_res = msg2->swack_vec;
- /*
- * This is a message retry; clear the resources held
- * by the previous message only if they timed out.
- * If it has not timed out we have an unexpected
- * situation to report.
- */
- if (mmr & (msg_res << UV_SW_ACK_NPENDING)) {
- unsigned long mr;
- /*
- * Is the resource timed out?
- * Make everyone ignore the cancelled message.
- */
- msg2->canceled = 1;
- stat->d_canceled++;
- cancel_count++;
- mr = (msg_res << UV_SW_ACK_NPENDING) | msg_res;
- ops.write_l_sw_ack(mr);
- }
- }
- }
- if (!cancel_count)
- stat->d_nocanceled++;
-}
-
-/*
- * Do all the things a cpu should do for a TLB shootdown message.
- * Other cpu's may come here at the same time for this message.
- */
-static void bau_process_message(struct msg_desc *mdp, struct bau_control *bcp,
- int do_acknowledge)
-{
- short socket_ack_count = 0;
- short *sp;
- struct atomic_short *asp;
- struct ptc_stats *stat = bcp->statp;
- struct bau_pq_entry *msg = mdp->msg;
- struct bau_control *smaster = bcp->socket_master;
-
- /*
- * This must be a normal message, or retry of a normal message
- */
- if (msg->address == TLB_FLUSH_ALL) {
- flush_tlb_local();
- stat->d_alltlb++;
- } else {
- flush_tlb_one_user(msg->address);
- stat->d_onetlb++;
- }
- stat->d_requestee++;
-
- /*
- * One cpu on each uvhub has the additional job on a RETRY
- * of releasing the resource held by the message that is
- * being retried. That message is identified by sending
- * cpu number.
- */
- if (msg->msg_type == MSG_RETRY && bcp == bcp->uvhub_master)
- bau_process_retry_msg(mdp, bcp);
-
- /*
- * This is a swack message, so we have to reply to it.
- * Count each responding cpu on the socket. This avoids
- * pinging the count's cache line back and forth between
- * the sockets.
- */
- sp = &smaster->socket_acknowledge_count[mdp->msg_slot];
- asp = (struct atomic_short *)sp;
- socket_ack_count = atom_asr(1, asp);
- if (socket_ack_count == bcp->cpus_in_socket) {
- int msg_ack_count;
- /*
- * Both sockets dump their completed count total into
- * the message's count.
- */
- *sp = 0;
- asp = (struct atomic_short *)&msg->acknowledge_count;
- msg_ack_count = atom_asr(socket_ack_count, asp);
-
- if (msg_ack_count == bcp->cpus_in_uvhub) {
- /*
- * All cpus in uvhub saw it; reply
- * (unless we are in the UV2 workaround)
- */
- reply_to_message(mdp, bcp, do_acknowledge);
- }
- }
-
- return;
-}
-
-/*
- * Determine the first cpu on a pnode.
- */
-static int pnode_to_first_cpu(int pnode, struct bau_control *smaster)
-{
- int cpu;
- struct hub_and_pnode *hpp;
-
- for_each_present_cpu(cpu) {
- hpp = &smaster->thp[cpu];
- if (pnode == hpp->pnode)
- return cpu;
- }
- return -1;
-}
-
-/*
- * Last resort when we get a large number of destination timeouts is
- * to clear resources held by a given cpu.
- * Do this with IPI so that all messages in the BAU message queue
- * can be identified by their nonzero swack_vec field.
- *
- * This is entered for a single cpu on the uvhub.
- * The sender want's this uvhub to free a specific message's
- * swack resources.
- */
-static void do_reset(void *ptr)
-{
- int i;
- struct bau_control *bcp = &per_cpu(bau_control, smp_processor_id());
- struct reset_args *rap = (struct reset_args *)ptr;
- struct bau_pq_entry *msg;
- struct ptc_stats *stat = bcp->statp;
-
- stat->d_resets++;
- /*
- * We're looking for the given sender, and
- * will free its swack resource.
- * If all cpu's finally responded after the timeout, its
- * message 'replied_to' was set.
- */
- for (msg = bcp->queue_first, i = 0; i < DEST_Q_SIZE; msg++, i++) {
- unsigned long msg_res;
- /* do_reset: same conditions for cancellation as
- bau_process_retry_msg() */
- if ((msg->replied_to == 0) &&
- (msg->canceled == 0) &&
- (msg->sending_cpu == rap->sender) &&
- (msg->swack_vec) &&
- (msg->msg_type != MSG_NOOP)) {
- unsigned long mmr;
- unsigned long mr;
- /*
- * make everyone else ignore this message
- */
- msg->canceled = 1;
- /*
- * only reset the resource if it is still pending
- */
- mmr = ops.read_l_sw_ack();
- msg_res = msg->swack_vec;
- mr = (msg_res << UV_SW_ACK_NPENDING) | msg_res;
- if (mmr & msg_res) {
- stat->d_rcanceled++;
- ops.write_l_sw_ack(mr);
- }
- }
- }
- return;
-}
-
-/*
- * Use IPI to get all target uvhubs to release resources held by
- * a given sending cpu number.
- */
-static void reset_with_ipi(struct pnmask *distribution, struct bau_control *bcp)
-{
- int pnode;
- int apnode;
- int maskbits;
- int sender = bcp->cpu;
- cpumask_t *mask = bcp->uvhub_master->cpumask;
- struct bau_control *smaster = bcp->socket_master;
- struct reset_args reset_args;
-
- reset_args.sender = sender;
- cpumask_clear(mask);
- /* find a single cpu for each uvhub in this distribution mask */
- maskbits = sizeof(struct pnmask) * BITSPERBYTE;
- /* each bit is a pnode relative to the partition base pnode */
- for (pnode = 0; pnode < maskbits; pnode++) {
- int cpu;
- if (!bau_uvhub_isset(pnode, distribution))
- continue;
- apnode = pnode + bcp->partition_base_pnode;
- cpu = pnode_to_first_cpu(apnode, smaster);
- cpumask_set_cpu(cpu, mask);
- }
-
- /* IPI all cpus; preemption is already disabled */
- smp_call_function_many(mask, do_reset, (void *)&reset_args, 1);
- return;
-}
-
-/*
- * Not to be confused with cycles_2_ns() from tsc.c; this gives a relative
- * number, not an absolute. It converts a duration in cycles to a duration in
- * ns.
- */
-static inline unsigned long long cycles_2_ns(unsigned long long cyc)
-{
- struct cyc2ns_data data;
- unsigned long long ns;
-
- cyc2ns_read_begin(&data);
- ns = mul_u64_u32_shr(cyc, data.cyc2ns_mul, data.cyc2ns_shift);
- cyc2ns_read_end();
-
- return ns;
-}
-
-/*
- * The reverse of the above; converts a duration in ns to a duration in cycles.
- */
-static inline unsigned long long ns_2_cycles(unsigned long long ns)
-{
- struct cyc2ns_data data;
- unsigned long long cyc;
-
- cyc2ns_read_begin(&data);
- cyc = (ns << data.cyc2ns_shift) / data.cyc2ns_mul;
- cyc2ns_read_end();
-
- return cyc;
-}
-
-static inline unsigned long cycles_2_us(unsigned long long cyc)
-{
- return cycles_2_ns(cyc) / NSEC_PER_USEC;
-}
-
-static inline cycles_t sec_2_cycles(unsigned long sec)
-{
- return ns_2_cycles(sec * NSEC_PER_SEC);
-}
-
-static inline unsigned long long usec_2_cycles(unsigned long usec)
-{
- return ns_2_cycles(usec * NSEC_PER_USEC);
-}
-
-/*
- * wait for all cpus on this hub to finish their sends and go quiet
- * leaves uvhub_quiesce set so that no new broadcasts are started by
- * bau_flush_send_and_wait()
- */
-static inline void quiesce_local_uvhub(struct bau_control *hmaster)
-{
- atom_asr(1, (struct atomic_short *)&hmaster->uvhub_quiesce);
-}
-
-/*
- * mark this quiet-requestor as done
- */
-static inline void end_uvhub_quiesce(struct bau_control *hmaster)
-{
- atom_asr(-1, (struct atomic_short *)&hmaster->uvhub_quiesce);
-}
-
-/*
- * UV2 could have an extra bit of status in the ACTIVATION_STATUS_2 register.
- * But not currently used.
- */
-static unsigned long uv2_3_read_status(unsigned long offset, int rshft, int desc)
-{
- return ((read_lmmr(offset) >> rshft) & UV_ACT_STATUS_MASK) << 1;
-}
-
-/*
- * Entered when a bau descriptor has gone into a permanent busy wait because
- * of a hardware bug.
- * Workaround the bug.
- */
-static int handle_uv2_busy(struct bau_control *bcp)
-{
- struct ptc_stats *stat = bcp->statp;
-
- stat->s_uv2_wars++;
- bcp->busy = 1;
- return FLUSH_GIVEUP;
-}
-
-static int uv2_3_wait_completion(struct bau_desc *bau_desc,
- struct bau_control *bcp, long try)
-{
- unsigned long descriptor_stat;
- cycles_t ttm;
- u64 mmr_offset = bcp->status_mmr;
- int right_shift = bcp->status_index;
- int desc = bcp->uvhub_cpu;
- long busy_reps = 0;
- struct ptc_stats *stat = bcp->statp;
-
- descriptor_stat = uv2_3_read_status(mmr_offset, right_shift, desc);
-
- /* spin on the status MMR, waiting for it to go idle */
- while (descriptor_stat != UV2H_DESC_IDLE) {
- if (descriptor_stat == UV2H_DESC_SOURCE_TIMEOUT) {
- /*
- * A h/w bug on the destination side may
- * have prevented the message being marked
- * pending, thus it doesn't get replied to
- * and gets continually nacked until it times
- * out with a SOURCE_TIMEOUT.
- */
- stat->s_stimeout++;
- return FLUSH_GIVEUP;
- } else if (descriptor_stat == UV2H_DESC_DEST_TIMEOUT) {
- ttm = get_cycles();
-
- /*
- * Our retries may be blocked by all destination
- * swack resources being consumed, and a timeout
- * pending. In that case hardware returns the
- * ERROR that looks like a destination timeout.
- * Without using the extended status we have to
- * deduce from the short time that this was a
- * strong nack.
- */
- if (cycles_2_us(ttm - bcp->send_message) < timeout_us) {
- bcp->conseccompletes = 0;
- stat->s_plugged++;
- /* FLUSH_RETRY_PLUGGED causes hang on boot */
- return FLUSH_GIVEUP;
- }
- stat->s_dtimeout++;
- bcp->conseccompletes = 0;
- /* FLUSH_RETRY_TIMEOUT causes hang on boot */
- return FLUSH_GIVEUP;
- } else {
- busy_reps++;
- if (busy_reps > 1000000) {
- /* not to hammer on the clock */
- busy_reps = 0;
- ttm = get_cycles();
- if ((ttm - bcp->send_message) > bcp->timeout_interval)
- return handle_uv2_busy(bcp);
- }
- /*
- * descriptor_stat is still BUSY
- */
- cpu_relax();
- }
- descriptor_stat = uv2_3_read_status(mmr_offset, right_shift, desc);
- }
- bcp->conseccompletes++;
- return FLUSH_COMPLETE;
-}
-
-/*
- * Returns the status of current BAU message for cpu desc as a bit field
- * [Error][Busy][Aux]
- */
-static u64 read_status(u64 status_mmr, int index, int desc)
-{
- u64 stat;
-
- stat = ((read_lmmr(status_mmr) >> index) & UV_ACT_STATUS_MASK) << 1;
- stat |= (read_lmmr(UVH_LB_BAU_SB_ACTIVATION_STATUS_2) >> desc) & 0x1;
-
- return stat;
-}
-
-static int uv4_wait_completion(struct bau_desc *bau_desc,
- struct bau_control *bcp, long try)
-{
- struct ptc_stats *stat = bcp->statp;
- u64 descriptor_stat;
- u64 mmr = bcp->status_mmr;
- int index = bcp->status_index;
- int desc = bcp->uvhub_cpu;
-
- descriptor_stat = read_status(mmr, index, desc);
-
- /* spin on the status MMR, waiting for it to go idle */
- while (descriptor_stat != UV2H_DESC_IDLE) {
- switch (descriptor_stat) {
- case UV2H_DESC_SOURCE_TIMEOUT:
- stat->s_stimeout++;
- return FLUSH_GIVEUP;
-
- case UV2H_DESC_DEST_TIMEOUT:
- stat->s_dtimeout++;
- bcp->conseccompletes = 0;
- return FLUSH_RETRY_TIMEOUT;
-
- case UV2H_DESC_DEST_STRONG_NACK:
- stat->s_plugged++;
- bcp->conseccompletes = 0;
- return FLUSH_RETRY_PLUGGED;
-
- case UV2H_DESC_DEST_PUT_ERR:
- bcp->conseccompletes = 0;
- return FLUSH_GIVEUP;
-
- default:
- /* descriptor_stat is still BUSY */
- cpu_relax();
- }
- descriptor_stat = read_status(mmr, index, desc);
- }
- bcp->conseccompletes++;
- return FLUSH_COMPLETE;
-}
-
-/*
- * Our retries are blocked by all destination sw ack resources being
- * in use, and a timeout is pending. In that case hardware immediately
- * returns the ERROR that looks like a destination timeout.
- */
-static void destination_plugged(struct bau_desc *bau_desc,
- struct bau_control *bcp,
- struct bau_control *hmaster, struct ptc_stats *stat)
-{
- udelay(bcp->plugged_delay);
- bcp->plugged_tries++;
-
- if (bcp->plugged_tries >= bcp->plugsb4reset) {
- bcp->plugged_tries = 0;
-
- quiesce_local_uvhub(hmaster);
-
- spin_lock(&hmaster->queue_lock);
- reset_with_ipi(&bau_desc->distribution, bcp);
- spin_unlock(&hmaster->queue_lock);
-
- end_uvhub_quiesce(hmaster);
-
- bcp->ipi_attempts++;
- stat->s_resets_plug++;
- }
-}
-
-static void destination_timeout(struct bau_desc *bau_desc,
- struct bau_control *bcp, struct bau_control *hmaster,
- struct ptc_stats *stat)
-{
- hmaster->max_concurr = 1;
- bcp->timeout_tries++;
- if (bcp->timeout_tries >= bcp->timeoutsb4reset) {
- bcp->timeout_tries = 0;
-
- quiesce_local_uvhub(hmaster);
-
- spin_lock(&hmaster->queue_lock);
- reset_with_ipi(&bau_desc->distribution, bcp);
- spin_unlock(&hmaster->queue_lock);
-
- end_uvhub_quiesce(hmaster);
-
- bcp->ipi_attempts++;
- stat->s_resets_timeout++;
- }
-}
-
-/*
- * Stop all cpus on a uvhub from using the BAU for a period of time.
- * This is reversed by check_enable.
- */
-static void disable_for_period(struct bau_control *bcp, struct ptc_stats *stat)
-{
- int tcpu;
- struct bau_control *tbcp;
- struct bau_control *hmaster;
- cycles_t tm1;
-
- hmaster = bcp->uvhub_master;
- spin_lock(&hmaster->disable_lock);
- if (!bcp->baudisabled) {
- stat->s_bau_disabled++;
- tm1 = get_cycles();
- for_each_present_cpu(tcpu) {
- tbcp = &per_cpu(bau_control, tcpu);
- if (tbcp->uvhub_master == hmaster) {
- tbcp->baudisabled = 1;
- tbcp->set_bau_on_time =
- tm1 + bcp->disabled_period;
- }
- }
- }
- spin_unlock(&hmaster->disable_lock);
-}
-
-static void count_max_concurr(int stat, struct bau_control *bcp,
- struct bau_control *hmaster)
-{
- bcp->plugged_tries = 0;
- bcp->timeout_tries = 0;
- if (stat != FLUSH_COMPLETE)
- return;
- if (bcp->conseccompletes <= bcp->complete_threshold)
- return;
- if (hmaster->max_concurr >= hmaster->max_concurr_const)
- return;
- hmaster->max_concurr++;
-}
-
-static void record_send_stats(cycles_t time1, cycles_t time2,
- struct bau_control *bcp, struct ptc_stats *stat,
- int completion_status, int try)
-{
- cycles_t elapsed;
-
- if (time2 > time1) {
- elapsed = time2 - time1;
- stat->s_time += elapsed;
-
- if ((completion_status == FLUSH_COMPLETE) && (try == 1)) {
- bcp->period_requests++;
- bcp->period_time += elapsed;
- if ((elapsed > usec_2_cycles(bcp->cong_response_us)) &&
- (bcp->period_requests > bcp->cong_reps) &&
- ((bcp->period_time / bcp->period_requests) >
- usec_2_cycles(bcp->cong_response_us))) {
- stat->s_congested++;
- disable_for_period(bcp, stat);
- }
- }
- } else
- stat->s_requestor--;
-
- if (completion_status == FLUSH_COMPLETE && try > 1)
- stat->s_retriesok++;
- else if (completion_status == FLUSH_GIVEUP) {
- stat->s_giveup++;
- if (get_cycles() > bcp->period_end)
- bcp->period_giveups = 0;
- bcp->period_giveups++;
- if (bcp->period_giveups == 1)
- bcp->period_end = get_cycles() + bcp->disabled_period;
- if (bcp->period_giveups > bcp->giveup_limit) {
- disable_for_period(bcp, stat);
- stat->s_giveuplimit++;
- }
- }
-}
-
-/*
- * Handle the completion status of a message send.
- */
-static void handle_cmplt(int completion_status, struct bau_desc *bau_desc,
- struct bau_control *bcp, struct bau_control *hmaster,
- struct ptc_stats *stat)
-{
- if (completion_status == FLUSH_RETRY_PLUGGED)
- destination_plugged(bau_desc, bcp, hmaster, stat);
- else if (completion_status == FLUSH_RETRY_TIMEOUT)
- destination_timeout(bau_desc, bcp, hmaster, stat);
-}
-
-/*
- * Send a broadcast and wait for it to complete.
- *
- * The flush_mask contains the cpus the broadcast is to be sent to including
- * cpus that are on the local uvhub.
- *
- * Returns 0 if all flushing represented in the mask was done.
- * Returns 1 if it gives up entirely and the original cpu mask is to be
- * returned to the kernel.
- */
-static int uv_flush_send_and_wait(struct cpumask *flush_mask,
- struct bau_control *bcp,
- struct bau_desc *bau_desc)
-{
- int seq_number = 0;
- int completion_stat = 0;
- long try = 0;
- unsigned long index;
- cycles_t time1;
- cycles_t time2;
- struct ptc_stats *stat = bcp->statp;
- struct bau_control *hmaster = bcp->uvhub_master;
- struct uv2_3_bau_msg_header *uv2_3_hdr = NULL;
-
- while (hmaster->uvhub_quiesce)
- cpu_relax();
-
- time1 = get_cycles();
- uv2_3_hdr = &bau_desc->header.uv2_3_hdr;
-
- do {
- if (try == 0) {
- uv2_3_hdr->msg_type = MSG_REGULAR;
- seq_number = bcp->message_number++;
- } else {
- uv2_3_hdr->msg_type = MSG_RETRY;
- stat->s_retry_messages++;
- }
-
- uv2_3_hdr->sequence = seq_number;
- index = (1UL << AS_PUSH_SHIFT) | bcp->uvhub_cpu;
- bcp->send_message = get_cycles();
-
- write_mmr_activation(index);
-
- try++;
- completion_stat = ops.wait_completion(bau_desc, bcp, try);
-
- handle_cmplt(completion_stat, bau_desc, bcp, hmaster, stat);
-
- if (bcp->ipi_attempts >= bcp->ipi_reset_limit) {
- bcp->ipi_attempts = 0;
- stat->s_overipilimit++;
- completion_stat = FLUSH_GIVEUP;
- break;
- }
- cpu_relax();
- } while ((completion_stat == FLUSH_RETRY_PLUGGED) ||
- (completion_stat == FLUSH_RETRY_TIMEOUT));
-
- time2 = get_cycles();
-
- count_max_concurr(completion_stat, bcp, hmaster);
-
- while (hmaster->uvhub_quiesce)
- cpu_relax();
-
- atomic_dec(&hmaster->active_descriptor_count);
-
- record_send_stats(time1, time2, bcp, stat, completion_stat, try);
-
- if (completion_stat == FLUSH_GIVEUP)
- /* FLUSH_GIVEUP will fall back to using IPI's for tlb flush */
- return 1;
- return 0;
-}
-
-/*
- * The BAU is disabled for this uvhub. When the disabled time period has
- * expired re-enable it.
- * Return 0 if it is re-enabled for all cpus on this uvhub.
- */
-static int check_enable(struct bau_control *bcp, struct ptc_stats *stat)
-{
- int tcpu;
- struct bau_control *tbcp;
- struct bau_control *hmaster;
-
- hmaster = bcp->uvhub_master;
- spin_lock(&hmaster->disable_lock);
- if (bcp->baudisabled && (get_cycles() >= bcp->set_bau_on_time)) {
- stat->s_bau_reenabled++;
- for_each_present_cpu(tcpu) {
- tbcp = &per_cpu(bau_control, tcpu);
- if (tbcp->uvhub_master == hmaster) {
- tbcp->baudisabled = 0;
- tbcp->period_requests = 0;
- tbcp->period_time = 0;
- tbcp->period_giveups = 0;
- }
- }
- spin_unlock(&hmaster->disable_lock);
- return 0;
- }
- spin_unlock(&hmaster->disable_lock);
- return -1;
-}
-
-static void record_send_statistics(struct ptc_stats *stat, int locals, int hubs,
- int remotes, struct bau_desc *bau_desc)
-{
- stat->s_requestor++;
- stat->s_ntargcpu += remotes + locals;
- stat->s_ntargremotes += remotes;
- stat->s_ntarglocals += locals;
-
- /* uvhub statistics */
- hubs = bau_uvhub_weight(&bau_desc->distribution);
- if (locals) {
- stat->s_ntarglocaluvhub++;
- stat->s_ntargremoteuvhub += (hubs - 1);
- } else
- stat->s_ntargremoteuvhub += hubs;
-
- stat->s_ntarguvhub += hubs;
-
- if (hubs >= 16)
- stat->s_ntarguvhub16++;
- else if (hubs >= 8)
- stat->s_ntarguvhub8++;
- else if (hubs >= 4)
- stat->s_ntarguvhub4++;
- else if (hubs >= 2)
- stat->s_ntarguvhub2++;
- else
- stat->s_ntarguvhub1++;
-}
-
-/*
- * Translate a cpu mask to the uvhub distribution mask in the BAU
- * activation descriptor.
- */
-static int set_distrib_bits(struct cpumask *flush_mask, struct bau_control *bcp,
- struct bau_desc *bau_desc, int *localsp, int *remotesp)
-{
- int cpu;
- int pnode;
- int cnt = 0;
- struct hub_and_pnode *hpp;
-
- for_each_cpu(cpu, flush_mask) {
- /*
- * The distribution vector is a bit map of pnodes, relative
- * to the partition base pnode (and the partition base nasid
- * in the header).
- * Translate cpu to pnode and hub using a local memory array.
- */
- hpp = &bcp->socket_master->thp[cpu];
- pnode = hpp->pnode - bcp->partition_base_pnode;
- bau_uvhub_set(pnode, &bau_desc->distribution);
- cnt++;
- if (hpp->uvhub == bcp->uvhub)
- (*localsp)++;
- else
- (*remotesp)++;
- }
- if (!cnt)
- return 1;
- return 0;
-}
-
-/*
- * globally purge translation cache of a virtual address or all TLB's
- * @cpumask: mask of all cpu's in which the address is to be removed
- * @mm: mm_struct containing virtual address range
- * @start: start virtual address to be removed from TLB
- * @end: end virtual address to be remove from TLB
- * @cpu: the current cpu
- *
- * This is the entry point for initiating any UV global TLB shootdown.
- *
- * Purges the translation caches of all specified processors of the given
- * virtual address, or purges all TLB's on specified processors.
- *
- * The caller has derived the cpumask from the mm_struct. This function
- * is called only if there are bits set in the mask. (e.g. flush_tlb_page())
- *
- * The cpumask is converted into a uvhubmask of the uvhubs containing
- * those cpus.
- *
- * Note that this function should be called with preemption disabled.
- *
- * Returns NULL if all remote flushing was done.
- * Returns pointer to cpumask if some remote flushing remains to be
- * done. The returned pointer is valid till preemption is re-enabled.
- */
-const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
- const struct flush_tlb_info *info)
-{
- unsigned int cpu = smp_processor_id();
- int locals = 0, remotes = 0, hubs = 0;
- struct bau_desc *bau_desc;
- struct cpumask *flush_mask;
- struct ptc_stats *stat;
- struct bau_control *bcp;
- unsigned long descriptor_status, status, address;
-
- bcp = &per_cpu(bau_control, cpu);
-
- if (bcp->nobau)
- return cpumask;
-
- stat = bcp->statp;
- stat->s_enters++;
-
- if (bcp->busy) {
- descriptor_status =
- read_lmmr(UVH_LB_BAU_SB_ACTIVATION_STATUS_0);
- status = ((descriptor_status >> (bcp->uvhub_cpu *
- UV_ACT_STATUS_SIZE)) & UV_ACT_STATUS_MASK) << 1;
- if (status == UV2H_DESC_BUSY)
- return cpumask;
- bcp->busy = 0;
- }
-
- /* bau was disabled due to slow response */
- if (bcp->baudisabled) {
- if (check_enable(bcp, stat)) {
- stat->s_ipifordisabled++;
- return cpumask;
- }
- }
-
- /*
- * Each sending cpu has a per-cpu mask which it fills from the caller's
- * cpu mask. All cpus are converted to uvhubs and copied to the
- * activation descriptor.
- */
- flush_mask = (struct cpumask *)per_cpu(uv_flush_tlb_mask, cpu);
- /* don't actually do a shootdown of the local cpu */
- cpumask_andnot(flush_mask, cpumask, cpumask_of(cpu));
-
- if (cpumask_test_cpu(cpu, cpumask))
- stat->s_ntargself++;
-
- bau_desc = bcp->descriptor_base;
- bau_desc += (ITEMS_PER_DESC * bcp->uvhub_cpu);
- bau_uvhubs_clear(&bau_desc->distribution, UV_DISTRIBUTION_SIZE);
- if (set_distrib_bits(flush_mask, bcp, bau_desc, &locals, &remotes))
- return NULL;
-
- record_send_statistics(stat, locals, hubs, remotes, bau_desc);
-
- if (!info->end || (info->end - info->start) <= PAGE_SIZE)
- address = info->start;
- else
- address = TLB_FLUSH_ALL;
-
- switch (bcp->uvhub_version) {
- case UV_BAU_V2:
- case UV_BAU_V3:
- bau_desc->payload.uv2_3.address = address;
- bau_desc->payload.uv2_3.sending_cpu = cpu;
- break;
- case UV_BAU_V4:
- bau_desc->payload.uv4.address = address;
- bau_desc->payload.uv4.sending_cpu = cpu;
- bau_desc->payload.uv4.qualifier = BAU_DESC_QUALIFIER;
- break;
- }
-
- /*
- * uv_flush_send_and_wait returns 0 if all cpu's were messaged,
- * or 1 if it gave up and the original cpumask should be returned.
- */
- if (!uv_flush_send_and_wait(flush_mask, bcp, bau_desc))
- return NULL;
- else
- return cpumask;
-}
-
-/*
- * Search the message queue for any 'other' unprocessed message with the
- * same software acknowledge resource bit vector as the 'msg' message.
- */
-static struct bau_pq_entry *find_another_by_swack(struct bau_pq_entry *msg,
- struct bau_control *bcp)
-{
- struct bau_pq_entry *msg_next = msg + 1;
- unsigned char swack_vec = msg->swack_vec;
-
- if (msg_next > bcp->queue_last)
- msg_next = bcp->queue_first;
- while (msg_next != msg) {
- if ((msg_next->canceled == 0) && (msg_next->replied_to == 0) &&
- (msg_next->swack_vec == swack_vec))
- return msg_next;
- msg_next++;
- if (msg_next > bcp->queue_last)
- msg_next = bcp->queue_first;
- }
- return NULL;
-}
-
-/*
- * UV2 needs to work around a bug in which an arriving message has not
- * set a bit in the UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE register.
- * Such a message must be ignored.
- */
-static void process_uv2_message(struct msg_desc *mdp, struct bau_control *bcp)
-{
- unsigned long mmr_image;
- unsigned char swack_vec;
- struct bau_pq_entry *msg = mdp->msg;
- struct bau_pq_entry *other_msg;
-
- mmr_image = ops.read_l_sw_ack();
- swack_vec = msg->swack_vec;
-
- if ((swack_vec & mmr_image) == 0) {
- /*
- * This message was assigned a swack resource, but no
- * reserved acknowlegment is pending.
- * The bug has prevented this message from setting the MMR.
- */
- /*
- * Some message has set the MMR 'pending' bit; it might have
- * been another message. Look for that message.
- */
- other_msg = find_another_by_swack(msg, bcp);
- if (other_msg) {
- /*
- * There is another. Process this one but do not
- * ack it.
- */
- bau_process_message(mdp, bcp, 0);
- /*
- * Let the natural processing of that other message
- * acknowledge it. Don't get the processing of sw_ack's
- * out of order.
- */
- return;
- }
- }
-
- /*
- * Either the MMR shows this one pending a reply or there is no
- * other message using this sw_ack, so it is safe to acknowledge it.
- */
- bau_process_message(mdp, bcp, 1);
-
- return;
-}
-
-/*
- * The BAU message interrupt comes here. (registered by set_intr_gate)
- * See entry_64.S
- *
- * We received a broadcast assist message.
- *
- * Interrupts are disabled; this interrupt could represent
- * the receipt of several messages.
- *
- * All cores/threads on this hub get this interrupt.
- * The last one to see it does the software ack.
- * (the resource will not be freed until noninterruptable cpus see this
- * interrupt; hardware may timeout the s/w ack and reply ERROR)
- */
-DEFINE_IDTENTRY_SYSVEC(sysvec_uv_bau_message)
-{
- int count = 0;
- cycles_t time_start;
- struct bau_pq_entry *msg;
- struct bau_control *bcp;
- struct ptc_stats *stat;
- struct msg_desc msgdesc;
-
- ack_APIC_irq();
- kvm_set_cpu_l1tf_flush_l1d();
- time_start = get_cycles();
-
- bcp = &per_cpu(bau_control, smp_processor_id());
- stat = bcp->statp;
-
- msgdesc.queue_first = bcp->queue_first;
- msgdesc.queue_last = bcp->queue_last;
-
- msg = bcp->bau_msg_head;
- while (msg->swack_vec) {
- count++;
-
- msgdesc.msg_slot = msg - msgdesc.queue_first;
- msgdesc.msg = msg;
- if (bcp->uvhub_version == UV_BAU_V2)
- process_uv2_message(&msgdesc, bcp);
- else
- /* no error workaround for uv3 */
- bau_process_message(&msgdesc, bcp, 1);
-
- msg++;
- if (msg > msgdesc.queue_last)
- msg = msgdesc.queue_first;
- bcp->bau_msg_head = msg;
- }
- stat->d_time += (get_cycles() - time_start);
- if (!count)
- stat->d_nomsg++;
- else if (count > 1)
- stat->d_multmsg++;
-}
-
-/*
- * Each target uvhub (i.e. a uvhub that has cpu's) needs to have
- * shootdown message timeouts enabled. The timeout does not cause
- * an interrupt, but causes an error message to be returned to
- * the sender.
- */
-static void __init enable_timeouts(void)
-{
- int uvhub;
- int nuvhubs;
- int pnode;
- unsigned long mmr_image;
-
- nuvhubs = uv_num_possible_blades();
-
- for (uvhub = 0; uvhub < nuvhubs; uvhub++) {
- if (!uv_blade_nr_possible_cpus(uvhub))
- continue;
-
- pnode = uv_blade_to_pnode(uvhub);
- mmr_image = read_mmr_misc_control(pnode);
- /*
- * Set the timeout period and then lock it in, in three
- * steps; captures and locks in the period.
- *
- * To program the period, the SOFT_ACK_MODE must be off.
- */
- mmr_image &= ~(1L << SOFTACK_MSHIFT);
- write_mmr_misc_control(pnode, mmr_image);
- /*
- * Set the 4-bit period.
- */
- mmr_image &= ~((unsigned long)0xf << SOFTACK_PSHIFT);
- mmr_image |= (SOFTACK_TIMEOUT_PERIOD << SOFTACK_PSHIFT);
- write_mmr_misc_control(pnode, mmr_image);
-
- mmr_image |= (1L << SOFTACK_MSHIFT);
- if (is_uv2_hub()) {
- /* do not touch the legacy mode bit */
- /* hw bug workaround; do not use extended status */
- mmr_image &= ~(1L << UV2_EXT_SHFT);
- } else if (is_uv3_hub()) {
- mmr_image &= ~(1L << PREFETCH_HINT_SHFT);
- mmr_image |= (1L << SB_STATUS_SHFT);
- }
- write_mmr_misc_control(pnode, mmr_image);
- }
-}
-
-static void *ptc_seq_start(struct seq_file *file, loff_t *offset)
-{
- if (*offset < num_possible_cpus())
- return offset;
- return NULL;
-}
-
-static void *ptc_seq_next(struct seq_file *file, void *data, loff_t *offset)
-{
- (*offset)++;
- if (*offset < num_possible_cpus())
- return offset;
- return NULL;
-}
-
-static void ptc_seq_stop(struct seq_file *file, void *data)
-{
-}
-
-/*
- * Display the statistics thru /proc/sgi_uv/ptc_statistics
- * 'data' points to the cpu number
- * Note: see the descriptions in stat_description[].
- */
-static int ptc_seq_show(struct seq_file *file, void *data)
-{
- struct ptc_stats *stat;
- struct bau_control *bcp;
- int cpu;
-
- cpu = *(loff_t *)data;
- if (!cpu) {
- seq_puts(file,
- "# cpu bauoff sent stime self locals remotes ncpus localhub ");
- seq_puts(file, "remotehub numuvhubs numuvhubs16 numuvhubs8 ");
- seq_puts(file,
- "numuvhubs4 numuvhubs2 numuvhubs1 dto snacks retries ");
- seq_puts(file,
- "rok resetp resett giveup sto bz throt disable ");
- seq_puts(file,
- "enable wars warshw warwaits enters ipidis plugged ");
- seq_puts(file,
- "ipiover glim cong swack recv rtime all one mult ");
- seq_puts(file, "none retry canc nocan reset rcan\n");
- }
- if (cpu < num_possible_cpus() && cpu_online(cpu)) {
- bcp = &per_cpu(bau_control, cpu);
- if (bcp->nobau) {
- seq_printf(file, "cpu %d bau disabled\n", cpu);
- return 0;
- }
- stat = bcp->statp;
- /* source side statistics */
- seq_printf(file,
- "cpu %d %d %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld ",
- cpu, bcp->nobau, stat->s_requestor,
- cycles_2_us(stat->s_time),
- stat->s_ntargself, stat->s_ntarglocals,
- stat->s_ntargremotes, stat->s_ntargcpu,
- stat->s_ntarglocaluvhub, stat->s_ntargremoteuvhub,
- stat->s_ntarguvhub, stat->s_ntarguvhub16);
- seq_printf(file, "%ld %ld %ld %ld %ld %ld ",
- stat->s_ntarguvhub8, stat->s_ntarguvhub4,
- stat->s_ntarguvhub2, stat->s_ntarguvhub1,
- stat->s_dtimeout, stat->s_strongnacks);
- seq_printf(file, "%ld %ld %ld %ld %ld %ld %ld %ld ",
- stat->s_retry_messages, stat->s_retriesok,
- stat->s_resets_plug, stat->s_resets_timeout,
- stat->s_giveup, stat->s_stimeout,
- stat->s_busy, stat->s_throttles);
- seq_printf(file, "%ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld ",
- stat->s_bau_disabled, stat->s_bau_reenabled,
- stat->s_uv2_wars, stat->s_uv2_wars_hw,
- stat->s_uv2_war_waits, stat->s_enters,
- stat->s_ipifordisabled, stat->s_plugged,
- stat->s_overipilimit, stat->s_giveuplimit,
- stat->s_congested);
-
- /* destination side statistics */
- seq_printf(file,
- "%lx %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld\n",
- ops.read_g_sw_ack(uv_cpu_to_pnode(cpu)),
- stat->d_requestee, cycles_2_us(stat->d_time),
- stat->d_alltlb, stat->d_onetlb, stat->d_multmsg,
- stat->d_nomsg, stat->d_retries, stat->d_canceled,
- stat->d_nocanceled, stat->d_resets,
- stat->d_rcanceled);
- }
- return 0;
-}
-
-/*
- * Display the tunables thru debugfs
- */
-static ssize_t tunables_read(struct file *file, char __user *userbuf,
- size_t count, loff_t *ppos)
-{
- char *buf;
- int ret;
-
- buf = kasprintf(GFP_KERNEL, "%s %s %s\n%d %d %d %d %d %d %d %d %d %d\n",
- "max_concur plugged_delay plugsb4reset timeoutsb4reset",
- "ipi_reset_limit complete_threshold congested_response_us",
- "congested_reps disabled_period giveup_limit",
- max_concurr, plugged_delay, plugsb4reset,
- timeoutsb4reset, ipi_reset_limit, complete_threshold,
- congested_respns_us, congested_reps, disabled_period,
- giveup_limit);
-
- if (!buf)
- return -ENOMEM;
-
- ret = simple_read_from_buffer(userbuf, count, ppos, buf, strlen(buf));
- kfree(buf);
- return ret;
-}
-
-/*
- * handle a write to /proc/sgi_uv/ptc_statistics
- * -1: reset the statistics
- * 0: display meaning of the statistics
- */
-static ssize_t ptc_proc_write(struct file *file, const char __user *user,
- size_t count, loff_t *data)
-{
- int cpu;
- int i;
- int elements;
- long input_arg;
- char optstr[64];
- struct ptc_stats *stat;
-
- if (count == 0 || count > sizeof(optstr))
- return -EINVAL;
- if (copy_from_user(optstr, user, count))
- return -EFAULT;
- optstr[count - 1] = '\0';
-
- if (!strcmp(optstr, "on")) {
- set_bau_on();
- return count;
- } else if (!strcmp(optstr, "off")) {
- set_bau_off();
- return count;
- }
-
- if (kstrtol(optstr, 10, &input_arg) < 0) {
- pr_debug("%s is invalid\n", optstr);
- return -EINVAL;
- }
-
- if (input_arg == 0) {
- elements = ARRAY_SIZE(stat_description);
- pr_debug("# cpu: cpu number\n");
- pr_debug("Sender statistics:\n");
- for (i = 0; i < elements; i++)
- pr_debug("%s\n", stat_description[i]);
- } else if (input_arg == -1) {
- for_each_present_cpu(cpu) {
- stat = &per_cpu(ptcstats, cpu);
- memset(stat, 0, sizeof(struct ptc_stats));
- }
- }
-
- return count;
-}
-
-static int local_atoi(const char *name)
-{
- int val = 0;
-
- for (;; name++) {
- switch (*name) {
- case '0' ... '9':
- val = 10*val+(*name-'0');
- break;
- default:
- return val;
- }
- }
-}
-
-/*
- * Parse the values written to /sys/kernel/debug/sgi_uv/bau_tunables.
- * Zero values reset them to defaults.
- */
-static int parse_tunables_write(struct bau_control *bcp, char *instr,
- int count)
-{
- char *p;
- char *q;
- int cnt = 0;
- int val;
- int e = ARRAY_SIZE(tunables);
-
- p = instr + strspn(instr, WHITESPACE);
- q = p;
- for (; *p; p = q + strspn(q, WHITESPACE)) {
- q = p + strcspn(p, WHITESPACE);
- cnt++;
- if (q == p)
- break;
- }
- if (cnt != e) {
- pr_info("bau tunable error: should be %d values\n", e);
- return -EINVAL;
- }
-
- p = instr + strspn(instr, WHITESPACE);
- q = p;
- for (cnt = 0; *p; p = q + strspn(q, WHITESPACE), cnt++) {
- q = p + strcspn(p, WHITESPACE);
- val = local_atoi(p);
- switch (cnt) {
- case 0:
- if (val == 0) {
- max_concurr = MAX_BAU_CONCURRENT;
- max_concurr_const = MAX_BAU_CONCURRENT;
- continue;
- }
- if (val < 1 || val > bcp->cpus_in_uvhub) {
- pr_debug(
- "Error: BAU max concurrent %d is invalid\n",
- val);
- return -EINVAL;
- }
- max_concurr = val;
- max_concurr_const = val;
- continue;
- default:
- if (val == 0)
- *tunables[cnt].tunp = tunables[cnt].deflt;
- else
- *tunables[cnt].tunp = val;
- continue;
- }
- }
- return 0;
-}
-
-/*
- * Handle a write to debugfs. (/sys/kernel/debug/sgi_uv/bau_tunables)
- */
-static ssize_t tunables_write(struct file *file, const char __user *user,
- size_t count, loff_t *data)
-{
- int cpu;
- int ret;
- char instr[100];
- struct bau_control *bcp;
-
- if (count == 0 || count > sizeof(instr)-1)
- return -EINVAL;
- if (copy_from_user(instr, user, count))
- return -EFAULT;
-
- instr[count] = '\0';
-
- cpu = get_cpu();
- bcp = &per_cpu(bau_control, cpu);
- ret = parse_tunables_write(bcp, instr, count);
- put_cpu();
- if (ret)
- return ret;
-
- for_each_present_cpu(cpu) {
- bcp = &per_cpu(bau_control, cpu);
- bcp->max_concurr = max_concurr;
- bcp->max_concurr_const = max_concurr;
- bcp->plugged_delay = plugged_delay;
- bcp->plugsb4reset = plugsb4reset;
- bcp->timeoutsb4reset = timeoutsb4reset;
- bcp->ipi_reset_limit = ipi_reset_limit;
- bcp->complete_threshold = complete_threshold;
- bcp->cong_response_us = congested_respns_us;
- bcp->cong_reps = congested_reps;
- bcp->disabled_period = sec_2_cycles(disabled_period);
- bcp->giveup_limit = giveup_limit;
- }
- return count;
-}
-
-static const struct seq_operations uv_ptc_seq_ops = {
- .start = ptc_seq_start,
- .next = ptc_seq_next,
- .stop = ptc_seq_stop,
- .show = ptc_seq_show
-};
-
-static int ptc_proc_open(struct inode *inode, struct file *file)
-{
- return seq_open(file, &uv_ptc_seq_ops);
-}
-
-static int tunables_open(struct inode *inode, struct file *file)
-{
- return 0;
-}
-
-static const struct proc_ops uv_ptc_proc_ops = {
- .proc_open = ptc_proc_open,
- .proc_read = seq_read,
- .proc_write = ptc_proc_write,
- .proc_lseek = seq_lseek,
- .proc_release = seq_release,
-};
-
-static const struct file_operations tunables_fops = {
- .open = tunables_open,
- .read = tunables_read,
- .write = tunables_write,
- .llseek = default_llseek,
-};
-
-static int __init uv_ptc_init(void)
-{
- struct proc_dir_entry *proc_uv_ptc;
-
- if (!is_uv_system())
- return 0;
-
- proc_uv_ptc = proc_create(UV_PTC_BASENAME, 0444, NULL,
- &uv_ptc_proc_ops);
- if (!proc_uv_ptc) {
- pr_err("unable to create %s proc entry\n",
- UV_PTC_BASENAME);
- return -EINVAL;
- }
-
- tunables_dir = debugfs_create_dir(UV_BAU_TUNABLES_DIR, NULL);
- debugfs_create_file(UV_BAU_TUNABLES_FILE, 0600, tunables_dir, NULL,
- &tunables_fops);
- return 0;
-}
-
-/*
- * Initialize the sending side's sending buffers.
- */
-static void activation_descriptor_init(int node, int pnode, int base_pnode)
-{
- int i;
- int cpu;
- unsigned long gpa;
- unsigned long m;
- unsigned long n;
- size_t dsize;
- struct bau_desc *bau_desc;
- struct bau_desc *bd2;
- struct uv2_3_bau_msg_header *uv2_3_hdr;
- struct bau_control *bcp;
-
- /*
- * each bau_desc is 64 bytes; there are 8 (ITEMS_PER_DESC)
- * per cpu; and one per cpu on the uvhub (ADP_SZ)
- */
- dsize = sizeof(struct bau_desc) * ADP_SZ * ITEMS_PER_DESC;
- bau_desc = kmalloc_node(dsize, GFP_KERNEL, node);
- BUG_ON(!bau_desc);
-
- gpa = uv_gpa(bau_desc);
- n = uv_gpa_to_gnode(gpa);
- m = ops.bau_gpa_to_offset(gpa);
-
- /* the 14-bit pnode */
- write_mmr_descriptor_base(pnode,
- (n << UVH_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_SHFT | m));
- /*
- * Initializing all 8 (ITEMS_PER_DESC) descriptors for each
- * cpu even though we only use the first one; one descriptor can
- * describe a broadcast to 256 uv hubs.
- */
- for (i = 0, bd2 = bau_desc; i < (ADP_SZ * ITEMS_PER_DESC); i++, bd2++) {
- memset(bd2, 0, sizeof(struct bau_desc));
- /*
- * BIOS uses legacy mode, but uv2 and uv3 hardware always
- * uses native mode for selective broadcasts.
- */
- uv2_3_hdr = &bd2->header.uv2_3_hdr;
- uv2_3_hdr->swack_flag = 1;
- uv2_3_hdr->base_dest_nasid = UV_PNODE_TO_NASID(base_pnode);
- uv2_3_hdr->dest_subnodeid = UV_LB_SUBNODEID;
- uv2_3_hdr->command = UV_NET_ENDPOINT_INTD;
- }
- for_each_present_cpu(cpu) {
- if (pnode != uv_blade_to_pnode(uv_cpu_to_blade_id(cpu)))
- continue;
- bcp = &per_cpu(bau_control, cpu);
- bcp->descriptor_base = bau_desc;
- }
-}
-
-/*
- * initialize the destination side's receiving buffers
- * entered for each uvhub in the partition
- * - node is first node (kernel memory notion) on the uvhub
- * - pnode is the uvhub's physical identifier
- */
-static void pq_init(int node, int pnode)
-{
- int cpu;
- size_t plsize;
- char *cp;
- void *vp;
- unsigned long gnode, first, last, tail;
- struct bau_pq_entry *pqp;
- struct bau_control *bcp;
-
- plsize = (DEST_Q_SIZE + 1) * sizeof(struct bau_pq_entry);
- vp = kmalloc_node(plsize, GFP_KERNEL, node);
- BUG_ON(!vp);
-
- pqp = (struct bau_pq_entry *)vp;
- cp = (char *)pqp + 31;
- pqp = (struct bau_pq_entry *)(((unsigned long)cp >> 5) << 5);
-
- for_each_present_cpu(cpu) {
- if (pnode != uv_cpu_to_pnode(cpu))
- continue;
- /* for every cpu on this pnode: */
- bcp = &per_cpu(bau_control, cpu);
- bcp->queue_first = pqp;
- bcp->bau_msg_head = pqp;
- bcp->queue_last = pqp + (DEST_Q_SIZE - 1);
- }
-
- first = ops.bau_gpa_to_offset(uv_gpa(pqp));
- last = ops.bau_gpa_to_offset(uv_gpa(pqp + (DEST_Q_SIZE - 1)));
-
- /*
- * Pre UV4, the gnode is required to locate the payload queue
- * and the payload queue tail must be maintained by the kernel.
- */
- bcp = &per_cpu(bau_control, smp_processor_id());
- if (bcp->uvhub_version <= UV_BAU_V3) {
- tail = first;
- gnode = uv_gpa_to_gnode(uv_gpa(pqp));
- first = (gnode << UV_PAYLOADQ_GNODE_SHIFT) | tail;
- write_mmr_payload_tail(pnode, tail);
- }
-
- ops.write_payload_first(pnode, first);
- ops.write_payload_last(pnode, last);
-
- /* in effect, all msg_type's are set to MSG_NOOP */
- memset(pqp, 0, sizeof(struct bau_pq_entry) * DEST_Q_SIZE);
-}
-
-/*
- * Initialization of each UV hub's structures
- */
-static void __init init_uvhub(int uvhub, int vector, int base_pnode)
-{
- int node;
- int pnode;
- unsigned long apicid;
-
- node = uvhub_to_first_node(uvhub);
- pnode = uv_blade_to_pnode(uvhub);
-
- activation_descriptor_init(node, pnode, base_pnode);
-
- pq_init(node, pnode);
- /*
- * The below initialization can't be in firmware because the
- * messaging IRQ will be determined by the OS.
- */
- apicid = uvhub_to_first_apicid(uvhub);
- write_mmr_data_config(pnode, ((apicid << 32) | vector));
-}
-
-/*
- * We will set BAU_MISC_CONTROL with a timeout period.
- * But the BIOS has set UVH_AGING_PRESCALE_SEL and UVH_TRANSACTION_TIMEOUT.
- * So the destination timeout period has to be calculated from them.
- */
-static int calculate_destination_timeout(void)
-{
- unsigned long mmr_image;
- int mult1;
- int base;
- int ret;
-
- /* same destination timeout for uv2 and uv3 */
- /* 4 bits 0/1 for 10/80us base, 3 bits of multiplier */
- mmr_image = uv_read_local_mmr(UVH_LB_BAU_MISC_CONTROL);
- mmr_image = (mmr_image & UV_SA_MASK) >> UV_SA_SHFT;
- if (mmr_image & (1L << UV2_ACK_UNITS_SHFT))
- base = 80;
- else
- base = 10;
- mult1 = mmr_image & UV2_ACK_MASK;
- ret = mult1 * base;
-
- return ret;
-}
-
-static void __init init_per_cpu_tunables(void)
-{
- int cpu;
- struct bau_control *bcp;
-
- for_each_present_cpu(cpu) {
- bcp = &per_cpu(bau_control, cpu);
- bcp->baudisabled = 0;
- if (nobau)
- bcp->nobau = true;
- bcp->statp = &per_cpu(ptcstats, cpu);
- /* time interval to catch a hardware stay-busy bug */
- bcp->timeout_interval = usec_2_cycles(2*timeout_us);
- bcp->max_concurr = max_concurr;
- bcp->max_concurr_const = max_concurr;
- bcp->plugged_delay = plugged_delay;
- bcp->plugsb4reset = plugsb4reset;
- bcp->timeoutsb4reset = timeoutsb4reset;
- bcp->ipi_reset_limit = ipi_reset_limit;
- bcp->complete_threshold = complete_threshold;
- bcp->cong_response_us = congested_respns_us;
- bcp->cong_reps = congested_reps;
- bcp->disabled_period = sec_2_cycles(disabled_period);
- bcp->giveup_limit = giveup_limit;
- spin_lock_init(&bcp->queue_lock);
- spin_lock_init(&bcp->uvhub_lock);
- spin_lock_init(&bcp->disable_lock);
- }
-}
-
-/*
- * Scan all cpus to collect blade and socket summaries.
- */
-static int __init get_cpu_topology(int base_pnode,
- struct uvhub_desc *uvhub_descs,
- unsigned char *uvhub_mask)
-{
- int cpu;
- int pnode;
- int uvhub;
- int socket;
- struct bau_control *bcp;
- struct uvhub_desc *bdp;
- struct socket_desc *sdp;
-
- for_each_present_cpu(cpu) {
- bcp = &per_cpu(bau_control, cpu);
-
- memset(bcp, 0, sizeof(struct bau_control));
-
- pnode = uv_cpu_hub_info(cpu)->pnode;
- if ((pnode - base_pnode) >= UV_DISTRIBUTION_SIZE) {
- pr_emerg(
- "cpu %d pnode %d-%d beyond %d; BAU disabled\n",
- cpu, pnode, base_pnode, UV_DISTRIBUTION_SIZE);
- return 1;
- }
-
- bcp->osnode = cpu_to_node(cpu);
- bcp->partition_base_pnode = base_pnode;
-
- uvhub = uv_cpu_hub_info(cpu)->numa_blade_id;
- *(uvhub_mask + (uvhub/8)) |= (1 << (uvhub%8));
- bdp = &uvhub_descs[uvhub];
-
- bdp->num_cpus++;
- bdp->uvhub = uvhub;
- bdp->pnode = pnode;
-
- /* kludge: 'assuming' one node per socket, and assuming that
- disabling a socket just leaves a gap in node numbers */
- socket = bcp->osnode & 1;
- bdp->socket_mask |= (1 << socket);
- sdp = &bdp->socket[socket];
- sdp->cpu_number[sdp->num_cpus] = cpu;
- sdp->num_cpus++;
- if (sdp->num_cpus > MAX_CPUS_PER_SOCKET) {
- pr_emerg("%d cpus per socket invalid\n",
- sdp->num_cpus);
- return 1;
- }
- }
- return 0;
-}
-
-/*
- * Each socket is to get a local array of pnodes/hubs.
- */
-static void make_per_cpu_thp(struct bau_control *smaster)
-{
- int cpu;
- size_t hpsz = sizeof(struct hub_and_pnode) * num_possible_cpus();
-
- smaster->thp = kzalloc_node(hpsz, GFP_KERNEL, smaster->osnode);
- for_each_present_cpu(cpu) {
- smaster->thp[cpu].pnode = uv_cpu_hub_info(cpu)->pnode;
- smaster->thp[cpu].uvhub = uv_cpu_hub_info(cpu)->numa_blade_id;
- }
-}
-
-/*
- * Each uvhub is to get a local cpumask.
- */
-static void make_per_hub_cpumask(struct bau_control *hmaster)
-{
- int sz = sizeof(cpumask_t);
-
- hmaster->cpumask = kzalloc_node(sz, GFP_KERNEL, hmaster->osnode);
-}
-
-/*
- * Initialize all the per_cpu information for the cpu's on a given socket,
- * given what has been gathered into the socket_desc struct.
- * And reports the chosen hub and socket masters back to the caller.
- */
-static int scan_sock(struct socket_desc *sdp, struct uvhub_desc *bdp,
- struct bau_control **smasterp,
- struct bau_control **hmasterp)
-{
- int i, cpu, uvhub_cpu;
- struct bau_control *bcp;
-
- for (i = 0; i < sdp->num_cpus; i++) {
- cpu = sdp->cpu_number[i];
- bcp = &per_cpu(bau_control, cpu);
- bcp->cpu = cpu;
- if (i == 0) {
- *smasterp = bcp;
- if (!(*hmasterp))
- *hmasterp = bcp;
- }
- bcp->cpus_in_uvhub = bdp->num_cpus;
- bcp->cpus_in_socket = sdp->num_cpus;
- bcp->socket_master = *smasterp;
- bcp->uvhub = bdp->uvhub;
- if (is_uv2_hub())
- bcp->uvhub_version = UV_BAU_V2;
- else if (is_uv3_hub())
- bcp->uvhub_version = UV_BAU_V3;
- else if (is_uv4_hub())
- bcp->uvhub_version = UV_BAU_V4;
- else {
- pr_emerg("uvhub version not 1, 2, 3, or 4\n");
- return 1;
- }
- bcp->uvhub_master = *hmasterp;
- uvhub_cpu = uv_cpu_blade_processor_id(cpu);
- bcp->uvhub_cpu = uvhub_cpu;
-
- /*
- * The ERROR and BUSY status registers are located pairwise over
- * the STATUS_0 and STATUS_1 mmrs; each an array[32] of 2 bits.
- */
- if (uvhub_cpu < UV_CPUS_PER_AS) {
- bcp->status_mmr = UVH_LB_BAU_SB_ACTIVATION_STATUS_0;
- bcp->status_index = uvhub_cpu * UV_ACT_STATUS_SIZE;
- } else {
- bcp->status_mmr = UVH_LB_BAU_SB_ACTIVATION_STATUS_1;
- bcp->status_index = (uvhub_cpu - UV_CPUS_PER_AS)
- * UV_ACT_STATUS_SIZE;
- }
-
- if (bcp->uvhub_cpu >= MAX_CPUS_PER_UVHUB) {
- pr_emerg("%d cpus per uvhub invalid\n",
- bcp->uvhub_cpu);
- return 1;
- }
- }
- return 0;
-}
-
-/*
- * Summarize the blade and socket topology into the per_cpu structures.
- */
-static int __init summarize_uvhub_sockets(int nuvhubs,
- struct uvhub_desc *uvhub_descs,
- unsigned char *uvhub_mask)
-{
- int socket;
- int uvhub;
- unsigned short socket_mask;
-
- for (uvhub = 0; uvhub < nuvhubs; uvhub++) {
- struct uvhub_desc *bdp;
- struct bau_control *smaster = NULL;
- struct bau_control *hmaster = NULL;
-
- if (!(*(uvhub_mask + (uvhub/8)) & (1 << (uvhub%8))))
- continue;
-
- bdp = &uvhub_descs[uvhub];
- socket_mask = bdp->socket_mask;
- socket = 0;
- while (socket_mask) {
- struct socket_desc *sdp;
- if ((socket_mask & 1)) {
- sdp = &bdp->socket[socket];
- if (scan_sock(sdp, bdp, &smaster, &hmaster))
- return 1;
- make_per_cpu_thp(smaster);
- }
- socket++;
- socket_mask = (socket_mask >> 1);
- }
- make_per_hub_cpumask(hmaster);
- }
- return 0;
-}
-
-/*
- * initialize the bau_control structure for each cpu
- */
-static int __init init_per_cpu(int nuvhubs, int base_part_pnode)
-{
- struct uvhub_desc *uvhub_descs;
- unsigned char *uvhub_mask = NULL;
-
- if (is_uv3_hub() || is_uv2_hub())
- timeout_us = calculate_destination_timeout();
-
- uvhub_descs = kcalloc(nuvhubs, sizeof(struct uvhub_desc), GFP_KERNEL);
- if (!uvhub_descs)
- goto fail;
-
- uvhub_mask = kzalloc((nuvhubs+7)/8, GFP_KERNEL);
- if (!uvhub_mask)
- goto fail;
-
- if (get_cpu_topology(base_part_pnode, uvhub_descs, uvhub_mask))
- goto fail;
-
- if (summarize_uvhub_sockets(nuvhubs, uvhub_descs, uvhub_mask))
- goto fail;
-
- kfree(uvhub_descs);
- kfree(uvhub_mask);
- init_per_cpu_tunables();
- return 0;
-
-fail:
- kfree(uvhub_descs);
- kfree(uvhub_mask);
- return 1;
-}
-
-static const struct bau_operations uv2_3_bau_ops __initconst = {
- .bau_gpa_to_offset = uv_gpa_to_offset,
- .read_l_sw_ack = read_mmr_sw_ack,
- .read_g_sw_ack = read_gmmr_sw_ack,
- .write_l_sw_ack = write_mmr_sw_ack,
- .write_g_sw_ack = write_gmmr_sw_ack,
- .write_payload_first = write_mmr_payload_first,
- .write_payload_last = write_mmr_payload_last,
- .wait_completion = uv2_3_wait_completion,
-};
-
-static const struct bau_operations uv4_bau_ops __initconst = {
- .bau_gpa_to_offset = uv_gpa_to_soc_phys_ram,
- .read_l_sw_ack = read_mmr_proc_sw_ack,
- .read_g_sw_ack = read_gmmr_proc_sw_ack,
- .write_l_sw_ack = write_mmr_proc_sw_ack,
- .write_g_sw_ack = write_gmmr_proc_sw_ack,
- .write_payload_first = write_mmr_proc_payload_first,
- .write_payload_last = write_mmr_proc_payload_last,
- .wait_completion = uv4_wait_completion,
-};
-
-/*
- * Initialization of BAU-related structures
- */
-static int __init uv_bau_init(void)
-{
- int uvhub;
- int pnode;
- int nuvhubs;
- int cur_cpu;
- int cpus;
- int vector;
- cpumask_var_t *mask;
-
- if (!is_uv_system())
- return 0;
-
- if (is_uv4_hub())
- ops = uv4_bau_ops;
- else if (is_uv3_hub())
- ops = uv2_3_bau_ops;
- else if (is_uv2_hub())
- ops = uv2_3_bau_ops;
-
- nuvhubs = uv_num_possible_blades();
- if (nuvhubs < 2) {
- pr_crit("UV: BAU disabled - insufficient hub count\n");
- goto err_bau_disable;
- }
-
- for_each_possible_cpu(cur_cpu) {
- mask = &per_cpu(uv_flush_tlb_mask, cur_cpu);
- zalloc_cpumask_var_node(mask, GFP_KERNEL, cpu_to_node(cur_cpu));
- }
-
- uv_base_pnode = 0x7fffffff;
- for (uvhub = 0; uvhub < nuvhubs; uvhub++) {
- cpus = uv_blade_nr_possible_cpus(uvhub);
- if (cpus && (uv_blade_to_pnode(uvhub) < uv_base_pnode))
- uv_base_pnode = uv_blade_to_pnode(uvhub);
- }
-
- /* software timeouts are not supported on UV4 */
- if (is_uv3_hub() || is_uv2_hub())
- enable_timeouts();
-
- if (init_per_cpu(nuvhubs, uv_base_pnode)) {
- pr_crit("UV: BAU disabled - per CPU init failed\n");
- goto err_bau_disable;
- }
-
- vector = UV_BAU_MESSAGE;
- for_each_possible_blade(uvhub) {
- if (uv_blade_nr_possible_cpus(uvhub))
- init_uvhub(uvhub, vector, uv_base_pnode);
- }
-
- for_each_possible_blade(uvhub) {
- if (uv_blade_nr_possible_cpus(uvhub)) {
- unsigned long val;
- unsigned long mmr;
- pnode = uv_blade_to_pnode(uvhub);
- /* INIT the bau */
- val = 1L << 63;
- write_gmmr_activation(pnode, val);
- mmr = 1; /* should be 1 to broadcast to both sockets */
- write_mmr_data_broadcast(pnode, mmr);
- }
- }
-
- return 0;
-
-err_bau_disable:
-
- for_each_possible_cpu(cur_cpu)
- free_cpumask_var(per_cpu(uv_flush_tlb_mask, cur_cpu));
-
- set_bau_off();
- nobau_perm = 1;
-
- return -EINVAL;
-}
-core_initcall(uv_bau_init);
-fs_initcall(uv_ptc_init);
diff --git a/arch/x86/platform/uv/uv_irq.c b/arch/x86/platform/uv/uv_irq.c
index abb6075397f0..18ca2261cc9a 100644
--- a/arch/x86/platform/uv/uv_irq.c
+++ b/arch/x86/platform/uv/uv_irq.c
@@ -90,15 +90,15 @@ static int uv_domain_alloc(struct irq_domain *domain, unsigned int virq,
ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, arg);
if (ret >= 0) {
- if (info->uv_limit == UV_AFFINITY_CPU)
+ if (info->uv.limit == UV_AFFINITY_CPU)
irq_set_status_flags(virq, IRQ_NO_BALANCING);
else
irq_set_status_flags(virq, IRQ_MOVE_PCNTXT);
- chip_data->pnode = uv_blade_to_pnode(info->uv_blade);
- chip_data->offset = info->uv_offset;
+ chip_data->pnode = uv_blade_to_pnode(info->uv.blade);
+ chip_data->offset = info->uv.offset;
irq_domain_set_info(domain, virq, virq, &uv_irq_chip, chip_data,
- handle_percpu_irq, NULL, info->uv_name);
+ handle_percpu_irq, NULL, info->uv.name);
} else {
kfree(chip_data);
}
@@ -193,10 +193,10 @@ int uv_setup_irq(char *irq_name, int cpu, int mmr_blade,
init_irq_alloc_info(&info, cpumask_of(cpu));
info.type = X86_IRQ_ALLOC_TYPE_UV;
- info.uv_limit = limit;
- info.uv_blade = mmr_blade;
- info.uv_offset = mmr_offset;
- info.uv_name = irq_name;
+ info.uv.limit = limit;
+ info.uv.blade = mmr_blade;
+ info.uv.offset = mmr_offset;
+ info.uv.name = irq_name;
return irq_domain_alloc_irqs(domain, 1,
uv_blade_to_memory_nid(mmr_blade), &info);
diff --git a/arch/x86/platform/uv/uv_nmi.c b/arch/x86/platform/uv/uv_nmi.c
index 9d08ff5a755e..eafc530c8767 100644
--- a/arch/x86/platform/uv/uv_nmi.c
+++ b/arch/x86/platform/uv/uv_nmi.c
@@ -2,8 +2,9 @@
/*
* SGI NMI support routines
*
- * Copyright (c) 2009-2013 Silicon Graphics, Inc. All Rights Reserved.
- * Copyright (c) Mike Travis
+ * (C) Copyright 2020 Hewlett Packard Enterprise Development LP
+ * Copyright (C) 2007-2017 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (c) Mike Travis
*/
#include <linux/cpu.h>
@@ -54,6 +55,19 @@ static struct uv_hub_nmi_s **uv_hub_nmi_list;
DEFINE_PER_CPU(struct uv_cpu_nmi_s, uv_cpu_nmi);
+/* Newer SMM NMI handler, not present in all systems */
+static unsigned long uvh_nmi_mmrx; /* UVH_EVENT_OCCURRED0/1 */
+static unsigned long uvh_nmi_mmrx_clear; /* UVH_EVENT_OCCURRED0/1_ALIAS */
+static int uvh_nmi_mmrx_shift; /* UVH_EVENT_OCCURRED0/1_EXTIO_INT0_SHFT */
+static char *uvh_nmi_mmrx_type; /* "EXTIO_INT0" */
+
+/* Non-zero indicates newer SMM NMI handler present */
+static unsigned long uvh_nmi_mmrx_supported; /* UVH_EXTIO_INT0_BROADCAST */
+
+/* Indicates to BIOS that we want to use the newer SMM NMI handler */
+static unsigned long uvh_nmi_mmrx_req; /* UVH_BIOS_KERNEL_MMR_ALIAS_2 */
+static int uvh_nmi_mmrx_req_shift; /* 62 */
+
/* UV hubless values */
#define NMI_CONTROL_PORT 0x70
#define NMI_DUMMY_PORT 0x71
@@ -227,13 +241,41 @@ static inline bool uv_nmi_action_is(const char *action)
/* Setup which NMI support is present in system */
static void uv_nmi_setup_mmrs(void)
{
- if (uv_read_local_mmr(UVH_NMI_MMRX_SUPPORTED)) {
- uv_write_local_mmr(UVH_NMI_MMRX_REQ,
- 1UL << UVH_NMI_MMRX_REQ_SHIFT);
- nmi_mmr = UVH_NMI_MMRX;
- nmi_mmr_clear = UVH_NMI_MMRX_CLEAR;
- nmi_mmr_pending = 1UL << UVH_NMI_MMRX_SHIFT;
- pr_info("UV: SMI NMI support: %s\n", UVH_NMI_MMRX_TYPE);
+ /* First determine arch specific MMRs to handshake with BIOS */
+ if (UVH_EVENT_OCCURRED0_EXTIO_INT0_MASK) {
+ uvh_nmi_mmrx = UVH_EVENT_OCCURRED0;
+ uvh_nmi_mmrx_clear = UVH_EVENT_OCCURRED0_ALIAS;
+ uvh_nmi_mmrx_shift = UVH_EVENT_OCCURRED0_EXTIO_INT0_SHFT;
+ uvh_nmi_mmrx_type = "OCRD0-EXTIO_INT0";
+
+ uvh_nmi_mmrx_supported = UVH_EXTIO_INT0_BROADCAST;
+ uvh_nmi_mmrx_req = UVH_BIOS_KERNEL_MMR_ALIAS_2;
+ uvh_nmi_mmrx_req_shift = 62;
+
+ } else if (UVH_EVENT_OCCURRED1_EXTIO_INT0_MASK) {
+ uvh_nmi_mmrx = UVH_EVENT_OCCURRED1;
+ uvh_nmi_mmrx_clear = UVH_EVENT_OCCURRED1_ALIAS;
+ uvh_nmi_mmrx_shift = UVH_EVENT_OCCURRED1_EXTIO_INT0_SHFT;
+ uvh_nmi_mmrx_type = "OCRD1-EXTIO_INT0";
+
+ uvh_nmi_mmrx_supported = UVH_EXTIO_INT0_BROADCAST;
+ uvh_nmi_mmrx_req = UVH_BIOS_KERNEL_MMR_ALIAS_2;
+ uvh_nmi_mmrx_req_shift = 62;
+
+ } else {
+ pr_err("UV:%s:cannot find EVENT_OCCURRED*_EXTIO_INT0\n",
+ __func__);
+ return;
+ }
+
+ /* Then find out if new NMI is supported */
+ if (likely(uv_read_local_mmr(uvh_nmi_mmrx_supported))) {
+ uv_write_local_mmr(uvh_nmi_mmrx_req,
+ 1UL << uvh_nmi_mmrx_req_shift);
+ nmi_mmr = uvh_nmi_mmrx;
+ nmi_mmr_clear = uvh_nmi_mmrx_clear;
+ nmi_mmr_pending = 1UL << uvh_nmi_mmrx_shift;
+ pr_info("UV: SMI NMI support: %s\n", uvh_nmi_mmrx_type);
} else {
nmi_mmr = UVH_NMI_MMR;
nmi_mmr_clear = UVH_NMI_MMR_CLEAR;
@@ -1049,5 +1091,5 @@ void __init uv_nmi_setup_hubless(void)
/* Ensure NMI enabled in Processor Interface Reg: */
uv_reassert_nmi();
uv_register_nmi_notifier();
- pr_info("UV: Hubless NMI enabled\n");
+ pr_info("UV: PCH NMI enabled\n");
}
diff --git a/arch/x86/platform/uv/uv_time.c b/arch/x86/platform/uv/uv_time.c
index f82a1337a608..54663f3e00cb 100644
--- a/arch/x86/platform/uv/uv_time.c
+++ b/arch/x86/platform/uv/uv_time.c
@@ -2,6 +2,7 @@
/*
* SGI RTC clock/timer routines.
*
+ * (C) Copyright 2020 Hewlett Packard Enterprise Development LP
* Copyright (c) 2009-2013 Silicon Graphics, Inc. All Rights Reserved.
* Copyright (c) Dimitri Sivanich
*/
@@ -52,7 +53,7 @@ struct uv_rtc_timer_head {
struct {
int lcpu; /* systemwide logical cpu number */
u64 expires; /* next timer expiration for this cpu */
- } cpu[1];
+ } cpu[];
};
/*
@@ -84,10 +85,8 @@ static void uv_rtc_send_IPI(int cpu)
/* Check for an RTC interrupt pending */
static int uv_intr_pending(int pnode)
{
- if (is_uvx_hub())
- return uv_read_global_mmr64(pnode, UVXH_EVENT_OCCURRED2) &
- UVXH_EVENT_OCCURRED2_RTC_1_MASK;
- return 0;
+ return uv_read_global_mmr64(pnode, UVH_EVENT_OCCURRED2) &
+ UVH_EVENT_OCCURRED2_RTC_1_MASK;
}
/* Setup interrupt and return non-zero if early expiration occurred. */
@@ -101,8 +100,8 @@ static int uv_setup_intr(int cpu, u64 expires)
UVH_RTC1_INT_CONFIG_M_MASK);
uv_write_global_mmr64(pnode, UVH_INT_CMPB, -1L);
- uv_write_global_mmr64(pnode, UVXH_EVENT_OCCURRED2_ALIAS,
- UVXH_EVENT_OCCURRED2_RTC_1_MASK);
+ uv_write_global_mmr64(pnode, UVH_EVENT_OCCURRED2_ALIAS,
+ UVH_EVENT_OCCURRED2_RTC_1_MASK);
val = (X86_PLATFORM_IPI_VECTOR << UVH_RTC1_INT_CONFIG_VECTOR_SHFT) |
((u64)apicid << UVH_RTC1_INT_CONFIG_APIC_ID_SHFT);
@@ -148,9 +147,8 @@ static __init int uv_rtc_allocate_timers(void)
struct uv_rtc_timer_head *head = blade_info[bid];
if (!head) {
- head = kmalloc_node(sizeof(struct uv_rtc_timer_head) +
- (uv_blade_nr_possible_cpus(bid) *
- 2 * sizeof(u64)),
+ head = kmalloc_node(struct_size(head, cpu,
+ uv_blade_nr_possible_cpus(bid)),
GFP_KERNEL, nid);
if (!head) {
uv_rtc_deallocate_timers();
diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c
index 1ed1208931e0..22fda7d99159 100644
--- a/arch/x86/realmode/init.c
+++ b/arch/x86/realmode/init.c
@@ -9,6 +9,7 @@
#include <asm/realmode.h>
#include <asm/tlbflush.h>
#include <asm/crash.h>
+#include <asm/sev-es.h>
struct real_mode_header *real_mode_header;
u32 *trampoline_cr4_features;
@@ -38,6 +39,25 @@ void __init reserve_real_mode(void)
crash_reserve_low_1M();
}
+static void sme_sev_setup_real_mode(struct trampoline_header *th)
+{
+#ifdef CONFIG_AMD_MEM_ENCRYPT
+ if (sme_active())
+ th->flags |= TH_FLAGS_SME_ACTIVE;
+
+ if (sev_es_active()) {
+ /*
+ * Skip the call to verify_cpu() in secondary_startup_64 as it
+ * will cause #VC exceptions when the AP can't handle them yet.
+ */
+ th->start = (u64) secondary_startup_64_no_verify;
+
+ if (sev_es_setup_ap_jump_table(real_mode_header))
+ panic("Failed to get/update SEV-ES AP Jump Table");
+ }
+#endif
+}
+
static void __init setup_real_mode(void)
{
u16 real_mode_seg;
@@ -104,13 +124,13 @@ static void __init setup_real_mode(void)
*trampoline_cr4_features = mmu_cr4_features;
trampoline_header->flags = 0;
- if (sme_active())
- trampoline_header->flags |= TH_FLAGS_SME_ACTIVE;
trampoline_pgd = (u64 *) __va(real_mode_header->trampoline_pgd);
trampoline_pgd[0] = trampoline_pgd_entry.pgd;
trampoline_pgd[511] = init_top_pgt[511].pgd;
#endif
+
+ sme_sev_setup_real_mode(trampoline_header);
}
/*
diff --git a/arch/x86/realmode/rm/header.S b/arch/x86/realmode/rm/header.S
index af04512c02d9..8c1db5bf5d78 100644
--- a/arch/x86/realmode/rm/header.S
+++ b/arch/x86/realmode/rm/header.S
@@ -20,6 +20,9 @@ SYM_DATA_START(real_mode_header)
/* SMP trampoline */
.long pa_trampoline_start
.long pa_trampoline_header
+#ifdef CONFIG_AMD_MEM_ENCRYPT
+ .long pa_sev_es_trampoline_start
+#endif
#ifdef CONFIG_X86_64
.long pa_trampoline_pgd;
#endif
diff --git a/arch/x86/realmode/rm/trampoline_64.S b/arch/x86/realmode/rm/trampoline_64.S
index 251758ed7443..84c5d1b33d10 100644
--- a/arch/x86/realmode/rm/trampoline_64.S
+++ b/arch/x86/realmode/rm/trampoline_64.S
@@ -56,6 +56,7 @@ SYM_CODE_START(trampoline_start)
testl %eax, %eax # Check for return code
jnz no_longmode
+.Lswitch_to_protected:
/*
* GDT tables in non default location kernel can be beyond 16MB and
* lgdt will not be able to load the address as in real mode default
@@ -80,6 +81,25 @@ no_longmode:
jmp no_longmode
SYM_CODE_END(trampoline_start)
+#ifdef CONFIG_AMD_MEM_ENCRYPT
+/* SEV-ES supports non-zero IP for entry points - no alignment needed */
+SYM_CODE_START(sev_es_trampoline_start)
+ cli # We should be safe anyway
+
+ LJMPW_RM(1f)
+1:
+ mov %cs, %ax # Code and data in the same place
+ mov %ax, %ds
+ mov %ax, %es
+ mov %ax, %ss
+
+ # Setup stack
+ movl $rm_stack_end, %esp
+
+ jmp .Lswitch_to_protected
+SYM_CODE_END(sev_es_trampoline_start)
+#endif /* CONFIG_AMD_MEM_ENCRYPT */
+
#include "../kernel/verify_cpu.S"
.section ".text32","ax"
diff --git a/arch/x86/tools/gen-insn-attr-x86.awk b/arch/x86/tools/gen-insn-attr-x86.awk
index a42015b305f4..af38469afd14 100644
--- a/arch/x86/tools/gen-insn-attr-x86.awk
+++ b/arch/x86/tools/gen-insn-attr-x86.awk
@@ -362,6 +362,9 @@ function convert_operands(count,opnd, i,j,imm,mod)
END {
if (awkchecked != "")
exit 1
+
+ print "#ifndef __BOOT_COMPRESSED\n"
+
# print escape opcode map's array
print "/* Escape opcode map array */"
print "const insn_attr_t * const inat_escape_tables[INAT_ESC_MAX + 1]" \
@@ -388,6 +391,51 @@ END {
for (j = 0; j < max_lprefix; j++)
if (atable[i,j])
print " ["i"]["j"] = "atable[i,j]","
- print "};"
+ print "};\n"
+
+ print "#else /* !__BOOT_COMPRESSED */\n"
+
+ print "/* Escape opcode map array */"
+ print "static const insn_attr_t *inat_escape_tables[INAT_ESC_MAX + 1]" \
+ "[INAT_LSTPFX_MAX + 1];"
+ print ""
+
+ print "/* Group opcode map array */"
+ print "static const insn_attr_t *inat_group_tables[INAT_GRP_MAX + 1]"\
+ "[INAT_LSTPFX_MAX + 1];"
+ print ""
+
+ print "/* AVX opcode map array */"
+ print "static const insn_attr_t *inat_avx_tables[X86_VEX_M_MAX + 1]"\
+ "[INAT_LSTPFX_MAX + 1];"
+ print ""
+
+ print "static void inat_init_tables(void)"
+ print "{"
+
+ # print escape opcode map's array
+ print "\t/* Print Escape opcode map array */"
+ for (i = 0; i < geid; i++)
+ for (j = 0; j < max_lprefix; j++)
+ if (etable[i,j])
+ print "\tinat_escape_tables["i"]["j"] = "etable[i,j]";"
+ print ""
+
+ # print group opcode map's array
+ print "\t/* Print Group opcode map array */"
+ for (i = 0; i < ggid; i++)
+ for (j = 0; j < max_lprefix; j++)
+ if (gtable[i,j])
+ print "\tinat_group_tables["i"]["j"] = "gtable[i,j]";"
+ print ""
+ # print AVX opcode map's array
+ print "\t/* Print AVX opcode map array */"
+ for (i = 0; i < gaid; i++)
+ for (j = 0; j < max_lprefix; j++)
+ if (atable[i,j])
+ print "\tinat_avx_tables["i"]["j"] = "atable[i,j]";"
+
+ print "}"
+ print "#endif"
}
diff --git a/arch/x86/um/asm/checksum.h b/arch/x86/um/asm/checksum.h
index ff6bba2c8ab6..b07824500363 100644
--- a/arch/x86/um/asm/checksum.h
+++ b/arch/x86/um/asm/checksum.h
@@ -20,22 +20,6 @@
*/
extern __wsum csum_partial(const void *buff, int len, __wsum sum);
-/*
- * Note: when you get a NULL pointer exception here this means someone
- * passed in an incorrect kernel address to one of these functions.
- *
- * If you use these functions directly please don't forget the
- * access_ok().
- */
-
-static __inline__
-__wsum csum_partial_copy_nocheck(const void *src, void *dst,
- int len, __wsum sum)
-{
- memcpy(dst, src, len);
- return csum_partial(dst, len, sum);
-}
-
/**
* csum_fold - Fold and invert a 32bit checksum.
* sum: 32bit unfolded sum
diff --git a/arch/x86/um/asm/checksum_32.h b/arch/x86/um/asm/checksum_32.h
index b9ac7c9eb72c..0b13c2947ad1 100644
--- a/arch/x86/um/asm/checksum_32.h
+++ b/arch/x86/um/asm/checksum_32.h
@@ -35,27 +35,4 @@ static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
return csum_fold(sum);
}
-/*
- * Copy and checksum to user
- */
-#define HAVE_CSUM_COPY_USER
-static __inline__ __wsum csum_and_copy_to_user(const void *src,
- void __user *dst,
- int len, __wsum sum, int *err_ptr)
-{
- if (access_ok(dst, len)) {
- if (copy_to_user(dst, src, len)) {
- *err_ptr = -EFAULT;
- return (__force __wsum)-1;
- }
-
- return csum_partial(src, len, sum);
- }
-
- if (len)
- *err_ptr = -EFAULT;
-
- return (__force __wsum)-1; /* invalid checksum */
-}
-
#endif
diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c
index 22e741e0b10c..4409306364dc 100644
--- a/arch/x86/xen/enlighten_pv.c
+++ b/arch/x86/xen/enlighten_pv.c
@@ -32,7 +32,7 @@
#include <linux/pci.h>
#include <linux/gfp.h>
#include <linux/edd.h>
-#include <linux/frame.h>
+#include <linux/objtool.h>
#include <xen/xen.h>
#include <xen/events.h>
@@ -1014,8 +1014,6 @@ void __init xen_setup_vcpu_info_placement(void)
}
static const struct pv_info xen_info __initconst = {
- .shared_kernel_pmd = 0,
-
.extra_user_64bit_cs = FLAT_USER_CS64,
.name = "Xen",
};
@@ -1302,7 +1300,7 @@ asmlinkage __visible void __init xen_start_kernel(void)
* any NUMA information the kernel tries to get from ACPI will
* be meaningless. Prevent it from trying.
*/
- acpi_numa = -1;
+ disable_srat();
#endif
WARN_ON(xen_cpuhp_setup(xen_cpu_up_prepare_pv, xen_cpu_dead_pv));
@@ -1314,10 +1312,6 @@ asmlinkage __visible void __init xen_start_kernel(void)
xen_start_info->nr_pages);
xen_reserve_special_pages();
- /* keep using Xen gdt for now; no urgent need to change it */
-
- pv_info.kernel_rpl = 0;
-
/*
* We used to do this in xen_arch_setup, but that is too late
* on AMD were early_cpu_init (run before ->arch_setup()) calls
@@ -1376,6 +1370,15 @@ asmlinkage __visible void __init xen_start_kernel(void)
x86_init.mpparse.get_smp_config = x86_init_uint_noop;
xen_boot_params_init_edd();
+
+#ifdef CONFIG_ACPI
+ /*
+ * Disable selecting "Firmware First mode" for correctable
+ * memory errors, as this is the duty of the hypervisor to
+ * decide.
+ */
+ acpi_disable_cmcff = 1;
+#endif
}
if (!boot_params.screen_info.orig_video_isVGA)
diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c
index 3273c985d3dd..cf2ade864c30 100644
--- a/arch/x86/xen/mmu_pv.c
+++ b/arch/x86/xen/mmu_pv.c
@@ -285,13 +285,6 @@ static void xen_set_pte(pte_t *ptep, pte_t pteval)
__xen_set_pte(ptep, pteval);
}
-static void xen_set_pte_at(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep, pte_t pteval)
-{
- trace_xen_mmu_set_pte_at(mm, addr, ptep, pteval);
- __xen_set_pte(ptep, pteval);
-}
-
pte_t xen_ptep_modify_prot_start(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep)
{
@@ -1149,7 +1142,7 @@ static void __init xen_pagetable_p2m_free(void)
* We could be in __ka space.
* We roundup to the PMD, which means that if anybody at this stage is
* using the __ka address of xen_start_info or
- * xen_start_info->shared_info they are in going to crash. Fortunatly
+ * xen_start_info->shared_info they are in going to crash. Fortunately
* we have already revectored in xen_setup_kernel_pagetable.
*/
size = roundup(size, PMD_SIZE);
@@ -2105,7 +2098,6 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
.release_pmd = xen_release_pmd_init,
.set_pte = xen_set_pte_init,
- .set_pte_at = xen_set_pte_at,
.set_pmd = xen_set_pmd_hyper,
.ptep_modify_prot_start = __ptep_modify_prot_start,
diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig
index e997e0119c02..d8a29dc5a284 100644
--- a/arch/xtensa/Kconfig
+++ b/arch/xtensa/Kconfig
@@ -217,20 +217,6 @@ config HOTPLUG_CPU
Say N if you want to disable CPU hotplug.
-config SECCOMP
- bool
- prompt "Enable seccomp to safely compute untrusted bytecode"
- help
- This kernel feature is useful for number crunching applications
- that may need to compute untrusted bytecode during their
- execution. By using pipes or other transports made available to
- the process as file descriptors supporting the read/write
- syscalls, it's possible to isolate those applications in
- their own address space using seccomp. Once seccomp is
- enabled via prctl(PR_SET_SECCOMP), it cannot be disabled
- and the task is only allowed to execute a few safe syscalls
- defined by each seccomp mode.
-
config FAST_SYSCALL_XTENSA
bool "Enable fast atomic syscalls"
default n
diff --git a/arch/xtensa/include/asm/checksum.h b/arch/xtensa/include/asm/checksum.h
index 243a5fe79d3c..44ec1d0b2a35 100644
--- a/arch/xtensa/include/asm/checksum.h
+++ b/arch/xtensa/include/asm/checksum.h
@@ -37,32 +37,27 @@ asmlinkage __wsum csum_partial(const void *buff, int len, __wsum sum);
* better 64-bit) boundary
*/
-asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
- int len, __wsum sum,
- int *src_err_ptr, int *dst_err_ptr);
+asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst, int len);
+#define _HAVE_ARCH_CSUM_AND_COPY
/*
* Note: when you get a NULL pointer exception here this means someone
* passed in an incorrect kernel address to one of these functions.
*/
static inline
-__wsum csum_partial_copy_nocheck(const void *src, void *dst,
- int len, __wsum sum)
+__wsum csum_partial_copy_nocheck(const void *src, void *dst, int len)
{
- return csum_partial_copy_generic(src, dst, len, sum, NULL, NULL);
+ return csum_partial_copy_generic(src, dst, len);
}
#define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER
static inline
__wsum csum_and_copy_from_user(const void __user *src, void *dst,
- int len, __wsum sum, int *err_ptr)
+ int len)
{
- if (access_ok(src, len))
- return csum_partial_copy_generic((__force const void *)src, dst,
- len, sum, err_ptr, NULL);
- if (len)
- *err_ptr = -EFAULT;
- return sum;
+ if (!access_ok(src, len))
+ return 0;
+ return csum_partial_copy_generic((__force const void *)src, dst, len);
}
/*
@@ -243,15 +238,10 @@ static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
*/
#define HAVE_CSUM_COPY_USER
static __inline__ __wsum csum_and_copy_to_user(const void *src,
- void __user *dst, int len,
- __wsum sum, int *err_ptr)
+ void __user *dst, int len)
{
- if (access_ok(dst, len))
- return csum_partial_copy_generic(src,dst,len,sum,NULL,err_ptr);
-
- if (len)
- *err_ptr = -EFAULT;
-
- return (__force __wsum)-1; /* invalid checksum */
+ if (!access_ok(dst, len))
+ return 0;
+ return csum_partial_copy_generic(src, (__force void *)dst, len);
}
#endif
diff --git a/arch/xtensa/kernel/signal.c b/arch/xtensa/kernel/signal.c
index 76cee341507b..b3b17d6c50f0 100644
--- a/arch/xtensa/kernel/signal.c
+++ b/arch/xtensa/kernel/signal.c
@@ -448,7 +448,7 @@ static void do_signal(struct pt_regs *regs)
regs->areg[2] = -EINTR;
break;
}
- /* fallthrough */
+ fallthrough;
case -ERESTARTNOINTR:
regs->areg[2] = regs->syscall;
regs->pc -= 3;
diff --git a/arch/xtensa/lib/checksum.S b/arch/xtensa/lib/checksum.S
index 4cb9ca58d9ad..cf1bed1a5bd6 100644
--- a/arch/xtensa/lib/checksum.S
+++ b/arch/xtensa/lib/checksum.S
@@ -175,19 +175,14 @@ ENDPROC(csum_partial)
*/
/*
-unsigned int csum_partial_copy_generic (const char *src, char *dst, int len,
- int sum, int *src_err_ptr, int *dst_err_ptr)
+unsigned int csum_partial_copy_generic (const char *src, char *dst, int len)
a2 = src
a3 = dst
a4 = len
a5 = sum
- a6 = src_err_ptr
- a7 = dst_err_ptr
a8 = temp
a9 = temp
a10 = temp
- a11 = original len for exception handling
- a12 = original dst for exception handling
This function is optimized for 4-byte aligned addresses. Other
alignments work, but not nearly as efficiently.
@@ -196,8 +191,7 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst, int len,
ENTRY(csum_partial_copy_generic)
abi_entry_default
- mov a12, a3
- mov a11, a4
+ movi a5, -1
or a10, a2, a3
/* We optimize the following alignment tests for the 4-byte
@@ -228,26 +222,26 @@ ENTRY(csum_partial_copy_generic)
#endif
EX(10f) l32i a9, a2, 0
EX(10f) l32i a8, a2, 4
-EX(11f) s32i a9, a3, 0
-EX(11f) s32i a8, a3, 4
+EX(10f) s32i a9, a3, 0
+EX(10f) s32i a8, a3, 4
ONES_ADD(a5, a9)
ONES_ADD(a5, a8)
EX(10f) l32i a9, a2, 8
EX(10f) l32i a8, a2, 12
-EX(11f) s32i a9, a3, 8
-EX(11f) s32i a8, a3, 12
+EX(10f) s32i a9, a3, 8
+EX(10f) s32i a8, a3, 12
ONES_ADD(a5, a9)
ONES_ADD(a5, a8)
EX(10f) l32i a9, a2, 16
EX(10f) l32i a8, a2, 20
-EX(11f) s32i a9, a3, 16
-EX(11f) s32i a8, a3, 20
+EX(10f) s32i a9, a3, 16
+EX(10f) s32i a8, a3, 20
ONES_ADD(a5, a9)
ONES_ADD(a5, a8)
EX(10f) l32i a9, a2, 24
EX(10f) l32i a8, a2, 28
-EX(11f) s32i a9, a3, 24
-EX(11f) s32i a8, a3, 28
+EX(10f) s32i a9, a3, 24
+EX(10f) s32i a8, a3, 28
ONES_ADD(a5, a9)
ONES_ADD(a5, a8)
addi a2, a2, 32
@@ -267,7 +261,7 @@ EX(11f) s32i a8, a3, 28
.Loop6:
#endif
EX(10f) l32i a9, a2, 0
-EX(11f) s32i a9, a3, 0
+EX(10f) s32i a9, a3, 0
ONES_ADD(a5, a9)
addi a2, a2, 4
addi a3, a3, 4
@@ -298,7 +292,7 @@ EX(11f) s32i a9, a3, 0
.Loop7:
#endif
EX(10f) l16ui a9, a2, 0
-EX(11f) s16i a9, a3, 0
+EX(10f) s16i a9, a3, 0
ONES_ADD(a5, a9)
addi a2, a2, 2
addi a3, a3, 2
@@ -309,7 +303,7 @@ EX(11f) s16i a9, a3, 0
/* This section processes a possible trailing odd byte. */
_bbci.l a4, 0, 8f /* 1-byte chunk */
EX(10f) l8ui a9, a2, 0
-EX(11f) s8i a9, a3, 0
+EX(10f) s8i a9, a3, 0
#ifdef __XTENSA_EB__
slli a9, a9, 8 /* shift byte to bits 8..15 */
#endif
@@ -334,8 +328,8 @@ EX(11f) s8i a9, a3, 0
#endif
EX(10f) l8ui a9, a2, 0
EX(10f) l8ui a8, a2, 1
-EX(11f) s8i a9, a3, 0
-EX(11f) s8i a8, a3, 1
+EX(10f) s8i a9, a3, 0
+EX(10f) s8i a8, a3, 1
#ifdef __XTENSA_EB__
slli a9, a9, 8 /* combine into a single 16-bit value */
#else /* for checksum computation */
@@ -356,38 +350,7 @@ ENDPROC(csum_partial_copy_generic)
# Exception handler:
.section .fixup, "ax"
-/*
- a6 = src_err_ptr
- a7 = dst_err_ptr
- a11 = original len for exception handling
- a12 = original dst for exception handling
-*/
-
10:
- _movi a2, -EFAULT
- s32i a2, a6, 0 /* src_err_ptr */
-
- # clear the complete destination - computing the rest
- # is too much work
- movi a2, 0
-#if XCHAL_HAVE_LOOPS
- loopgtz a11, 2f
-#else
- beqz a11, 2f
- add a11, a11, a12 /* a11 = ending address */
-.Leloop:
-#endif
- s8i a2, a12, 0
- addi a12, a12, 1
-#if !XCHAL_HAVE_LOOPS
- blt a12, a11, .Leloop
-#endif
-2:
- abi_ret_default
-
-11:
- movi a2, -EFAULT
- s32i a2, a7, 0 /* dst_err_ptr */
movi a2, 0
abi_ret_default
diff --git a/arch/xtensa/mm/init.c b/arch/xtensa/mm/init.c
index a05b306cf371..ad9d59d93f39 100644
--- a/arch/xtensa/mm/init.c
+++ b/arch/xtensa/mm/init.c
@@ -79,67 +79,32 @@ void __init zones_init(void)
free_area_init(max_zone_pfn);
}
-#ifdef CONFIG_HIGHMEM
-static void __init free_area_high(unsigned long pfn, unsigned long end)
-{
- for (; pfn < end; pfn++)
- free_highmem_page(pfn_to_page(pfn));
-}
-
static void __init free_highpages(void)
{
+#ifdef CONFIG_HIGHMEM
unsigned long max_low = max_low_pfn;
- struct memblock_region *mem, *res;
+ phys_addr_t range_start, range_end;
+ u64 i;
- reset_all_zones_managed_pages();
/* set highmem page free */
- for_each_memblock(memory, mem) {
- unsigned long start = memblock_region_memory_base_pfn(mem);
- unsigned long end = memblock_region_memory_end_pfn(mem);
+ for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE,
+ &range_start, &range_end, NULL) {
+ unsigned long start = PHYS_PFN(range_start);
+ unsigned long end = PHYS_PFN(range_end);
/* Ignore complete lowmem entries */
if (end <= max_low)
continue;
- if (memblock_is_nomap(mem))
- continue;
-
/* Truncate partial highmem entries */
if (start < max_low)
start = max_low;
- /* Find and exclude any reserved regions */
- for_each_memblock(reserved, res) {
- unsigned long res_start, res_end;
-
- res_start = memblock_region_reserved_base_pfn(res);
- res_end = memblock_region_reserved_end_pfn(res);
-
- if (res_end < start)
- continue;
- if (res_start < start)
- res_start = start;
- if (res_start > end)
- res_start = end;
- if (res_end > end)
- res_end = end;
- if (res_start != start)
- free_area_high(start, res_start);
- start = res_end;
- if (start == end)
- break;
- }
-
- /* And now free anything which remains */
- if (start < end)
- free_area_high(start, end);
+ for (; start < end; start++)
+ free_highmem_page(pfn_to_page(start));
}
-}
-#else
-static void __init free_highpages(void)
-{
-}
#endif
+}
/*
* Initialize memory pages.