summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/Kconfig7
-rw-r--r--arch/alpha/Kconfig27
-rw-r--r--arch/alpha/configs/defconfig13
-rw-r--r--arch/alpha/include/asm/atomic.h88
-rw-r--r--arch/alpha/include/asm/cmpxchg.h12
-rw-r--r--arch/alpha/include/asm/machvec.h6
-rw-r--r--arch/alpha/include/asm/mmzone.h100
-rw-r--r--arch/alpha/include/asm/page.h6
-rw-r--r--arch/alpha/include/asm/pgalloc.h1
-rw-r--r--arch/alpha/include/asm/pgtable.h5
-rw-r--r--arch/alpha/include/asm/topology.h39
-rw-r--r--arch/alpha/include/asm/unaligned.h12
-rw-r--r--arch/alpha/include/uapi/asm/mman.h3
-rw-r--r--arch/alpha/include/uapi/asm/socket.h2
-rw-r--r--arch/alpha/kernel/core_marvel.c53
-rw-r--r--arch/alpha/kernel/core_wildfire.c29
-rw-r--r--arch/alpha/kernel/pci_iommu.c29
-rw-r--r--arch/alpha/kernel/process.c2
-rw-r--r--arch/alpha/kernel/proto.h8
-rw-r--r--arch/alpha/kernel/setup.c18
-rw-r--r--arch/alpha/kernel/smp.c1
-rw-r--r--arch/alpha/kernel/srmcons.c9
-rw-r--r--arch/alpha/kernel/sys_marvel.c5
-rw-r--r--arch/alpha/kernel/sys_wildfire.c5
-rw-r--r--arch/alpha/kernel/syscalls/syscall.tbl2
-rw-r--r--arch/alpha/mm/Makefile2
-rw-r--r--arch/alpha/mm/init.c3
-rw-r--r--arch/alpha/mm/numa.c223
-rw-r--r--arch/arc/Kconfig13
-rw-r--r--arch/arc/Makefile2
-rw-r--r--arch/arc/include/asm/atomic.h60
-rw-r--r--arch/arc/include/asm/cmpxchg.h14
-rw-r--r--arch/arc/include/asm/mmzone.h40
-rw-r--r--arch/arc/include/asm/page.h12
-rw-r--r--arch/arc/include/asm/pgalloc.h2
-rw-r--r--arch/arc/include/asm/pgtable.h20
-rw-r--r--arch/arc/include/uapi/asm/page.h1
-rw-r--r--arch/arc/include/uapi/asm/sigcontext.h1
-rw-r--r--arch/arc/kernel/entry.S4
-rw-r--r--arch/arc/kernel/kgdb.c1
-rw-r--r--arch/arc/kernel/kprobes.c16
-rw-r--r--arch/arc/kernel/process.c8
-rw-r--r--arch/arc/kernel/signal.c47
-rw-r--r--arch/arc/kernel/smp.c1
-rw-r--r--arch/arc/kernel/stacktrace.c2
-rw-r--r--arch/arc/kernel/troubleshoot.c8
-rw-r--r--arch/arc/kernel/vmlinux.lds.S2
-rw-r--r--arch/arc/mm/init.c32
-rw-r--r--arch/arc/mm/ioremap.c5
-rw-r--r--arch/arc/mm/tlb.c2
-rw-r--r--arch/arm/Kconfig5
-rw-r--r--arch/arm/boot/dts/am33xx-clocks.dtsi10
-rw-r--r--arch/arm/boot/dts/am43xx-clocks.dtsi12
-rw-r--r--arch/arm/boot/dts/arm-realview-eb.dtsi2
-rw-r--r--arch/arm/boot/dts/arm-realview-pb1176.dts2
-rw-r--r--arch/arm/boot/dts/arm-realview-pb11mp.dts2
-rw-r--r--arch/arm/boot/dts/arm-realview-pbx.dtsi2
-rw-r--r--arch/arm/boot/dts/imx53-ppd.dts2
-rw-r--r--arch/arm/boot/dts/imx6dl-yapp4-common.dtsi6
-rw-r--r--arch/arm/boot/dts/imx6q-dhcom-som.dtsi12
-rw-r--r--arch/arm/boot/dts/imx6qdl-emcon-avari.dtsi2
-rw-r--r--arch/arm/boot/dts/imx7d-meerkat96.dts2
-rw-r--r--arch/arm/boot/dts/imx7d-pico.dtsi2
-rw-r--r--arch/arm/boot/dts/sama5d4.dtsi7
-rw-r--r--arch/arm/boot/dts/vexpress-v2m-rs1.dtsi2
-rw-r--r--arch/arm/boot/dts/vexpress-v2m.dtsi2
-rw-r--r--arch/arm/configs/footbridge_defconfig1
-rw-r--r--arch/arm/configs/pxa_defconfig2
-rw-r--r--arch/arm/crypto/Makefile10
-rw-r--r--arch/arm/crypto/poly1305-core.S_shipped1158
-rw-r--r--arch/arm/crypto/sha256-core.S_shipped2816
-rw-r--r--arch/arm/crypto/sha512-core.S_shipped1869
-rw-r--r--arch/arm/include/asm/atomic.h96
-rw-r--r--arch/arm/include/asm/cmpxchg.h20
-rw-r--r--arch/arm/include/asm/cpuidle.h10
-rw-r--r--arch/arm/include/asm/pgalloc.h1
-rw-r--r--arch/arm/include/asm/sync_bitops.h2
-rw-r--r--arch/arm/include/asm/tlbflush.h13
-rw-r--r--arch/arm/include/asm/unaligned.h27
-rw-r--r--arch/arm/kernel/irq.c22
-rw-r--r--arch/arm/kernel/perf_event_v7.c4
-rw-r--r--arch/arm/kernel/process.c2
-rw-r--r--arch/arm/kernel/setup.c16
-rw-r--r--arch/arm/kernel/smp.c1
-rw-r--r--arch/arm/mach-imx/pm-imx27.c1
-rw-r--r--arch/arm/mach-ixp4xx/common.c26
-rw-r--r--arch/arm/mach-npcm/Kconfig1
-rw-r--r--arch/arm/mach-omap1/board-ams-delta.c14
-rw-r--r--arch/arm/mach-omap1/board-h2.c4
-rw-r--r--arch/arm/mach-omap1/pm.c10
-rw-r--r--arch/arm/mach-omap2/board-n8x0.c2
-rw-r--r--arch/arm/mach-pxa/pxa_cplds_irqs.c7
-rw-r--r--arch/arm/mach-zynq/Kconfig2
-rw-r--r--arch/arm/mm/init.c13
-rw-r--r--arch/arm/mm/tlb-v6.S2
-rw-r--r--arch/arm/mm/tlb-v7.S2
-rw-r--r--arch/arm/probes/kprobes/core.c23
-rw-r--r--arch/arm/tools/syscall.tbl2
-rw-r--r--arch/arm/xen/mm.c20
-rw-r--r--arch/arm64/Kbuild3
-rw-r--r--arch/arm64/Kconfig49
-rw-r--r--arch/arm64/Kconfig.platforms1
-rw-r--r--arch/arm64/Makefile5
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var1.dts3
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var4.dts5
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi4
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mq-zii-ultra-rmb3.dts10
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mq-zii-ultra.dtsi23
-rw-r--r--arch/arm64/boot/dts/marvell/armada-37xx.dtsi2
-rw-r--r--arch/arm64/boot/dts/microchip/sparx5.dtsi94
-rw-r--r--arch/arm64/boot/dts/microchip/sparx5_pcb134_board.dtsi481
-rw-r--r--arch/arm64/boot/dts/microchip/sparx5_pcb135_board.dtsi621
-rw-r--r--arch/arm64/boot/dts/qcom/msm8996.dtsi2
-rw-r--r--arch/arm64/boot/dts/renesas/hihope-rzg2-ex-aistarvision-mipi-adapter-2.1.dtsi2
-rw-r--r--arch/arm64/boot/dts/renesas/r8a774a1.dtsi8
-rw-r--r--arch/arm64/boot/dts/renesas/r8a774b1.dtsi8
-rw-r--r--arch/arm64/boot/dts/renesas/r8a774c0-ek874-mipi-2.1.dts2
-rw-r--r--arch/arm64/boot/dts/renesas/r8a774c0.dtsi4
-rw-r--r--arch/arm64/boot/dts/renesas/r8a774e1.dtsi8
-rw-r--r--arch/arm64/boot/dts/renesas/r8a77950.dtsi4
-rw-r--r--arch/arm64/boot/dts/renesas/r8a77951.dtsi12
-rw-r--r--arch/arm64/boot/dts/renesas/r8a77960.dtsi8
-rw-r--r--arch/arm64/boot/dts/renesas/r8a77961.dtsi8
-rw-r--r--arch/arm64/boot/dts/renesas/r8a77965.dtsi8
-rw-r--r--arch/arm64/boot/dts/renesas/r8a77970.dtsi4
-rw-r--r--arch/arm64/boot/dts/renesas/r8a77980.dtsi8
-rw-r--r--arch/arm64/boot/dts/renesas/r8a77990-ebisu.dts2
-rw-r--r--arch/arm64/boot/dts/renesas/r8a77990.dtsi4
-rw-r--r--arch/arm64/boot/dts/renesas/salvator-common.dtsi3
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3308.dtsi22
-rw-r--r--arch/arm64/boot/dts/ti/k3-am64-main.dtsi11
-rw-r--r--arch/arm64/boot/dts/ti/k3-am64-mcu.dtsi3
-rw-r--r--arch/arm64/boot/dts/ti/k3-am65-main.dtsi10
-rw-r--r--arch/arm64/boot/dts/ti/k3-am65-mcu.dtsi4
-rw-r--r--arch/arm64/boot/dts/ti/k3-am65-wakeup.dtsi13
-rw-r--r--arch/arm64/boot/dts/ti/k3-am654-base-board.dts31
-rw-r--r--arch/arm64/boot/dts/ti/k3-j7200-main.dtsi8
-rw-r--r--arch/arm64/boot/dts/ti/k3-j7200-mcu-wakeup.dtsi7
-rw-r--r--arch/arm64/boot/dts/ti/k3-j721e-main.dtsi10
-rw-r--r--arch/arm64/boot/dts/ti/k3-j721e-mcu-wakeup.dtsi11
-rw-r--r--arch/arm64/crypto/Makefile10
-rw-r--r--arch/arm64/crypto/poly1305-core.S_shipped835
-rw-r--r--arch/arm64/crypto/sha256-core.S_shipped2069
-rw-r--r--arch/arm64/crypto/sha512-core.S_shipped1093
-rw-r--r--arch/arm64/include/asm/Kbuild2
-rw-r--r--arch/arm64/include/asm/alternative-macros.h9
-rw-r--r--arch/arm64/include/asm/arch_gicv3.h3
-rw-r--r--arch/arm64/include/asm/asm-prototypes.h6
-rw-r--r--arch/arm64/include/asm/asm_pointer_auth.h49
-rw-r--r--arch/arm64/include/asm/assembler.h98
-rw-r--r--arch/arm64/include/asm/atomic.h2
-rw-r--r--arch/arm64/include/asm/barrier.h2
-rw-r--r--arch/arm64/include/asm/cache.h2
-rw-r--r--arch/arm64/include/asm/cacheflush.h71
-rw-r--r--arch/arm64/include/asm/compiler.h16
-rw-r--r--arch/arm64/include/asm/cpu.h45
-rw-r--r--arch/arm64/include/asm/cpucaps.h74
-rw-r--r--arch/arm64/include/asm/cpufeature.h15
-rw-r--r--arch/arm64/include/asm/cpuidle.h35
-rw-r--r--arch/arm64/include/asm/efi.h2
-rw-r--r--arch/arm64/include/asm/exception.h34
-rw-r--r--arch/arm64/include/asm/fpsimd.h2
-rw-r--r--arch/arm64/include/asm/fpsimdmacros.h4
-rw-r--r--arch/arm64/include/asm/hugetlb.h3
-rw-r--r--arch/arm64/include/asm/insn-def.h9
-rw-r--r--arch/arm64/include/asm/insn.h67
-rw-r--r--arch/arm64/include/asm/kernel-pgtable.h19
-rw-r--r--arch/arm64/include/asm/kvm_arm.h3
-rw-r--r--arch/arm64/include/asm/kvm_asm.h4
-rw-r--r--arch/arm64/include/asm/kvm_emulate.h8
-rw-r--r--arch/arm64/include/asm/kvm_host.h23
-rw-r--r--arch/arm64/include/asm/kvm_mmu.h17
-rw-r--r--arch/arm64/include/asm/kvm_mte.h66
-rw-r--r--arch/arm64/include/asm/kvm_pgtable.h42
-rw-r--r--arch/arm64/include/asm/linkage.h8
-rw-r--r--arch/arm64/include/asm/memory.h24
-rw-r--r--arch/arm64/include/asm/mmu_context.h4
-rw-r--r--arch/arm64/include/asm/module.lds.h17
-rw-r--r--arch/arm64/include/asm/mte-def.h1
-rw-r--r--arch/arm64/include/asm/mte-kasan.h93
-rw-r--r--arch/arm64/include/asm/mte.h8
-rw-r--r--arch/arm64/include/asm/page.h12
-rw-r--r--arch/arm64/include/asm/patching.h13
-rw-r--r--arch/arm64/include/asm/perf_event.h5
-rw-r--r--arch/arm64/include/asm/pgalloc.h1
-rw-r--r--arch/arm64/include/asm/pgtable-hwdef.h7
-rw-r--r--arch/arm64/include/asm/pgtable-prot.h1
-rw-r--r--arch/arm64/include/asm/pgtable.h29
-rw-r--r--arch/arm64/include/asm/pointer_auth.h59
-rw-r--r--arch/arm64/include/asm/preempt.h2
-rw-r--r--arch/arm64/include/asm/processor.h16
-rw-r--r--arch/arm64/include/asm/scs.h8
-rw-r--r--arch/arm64/include/asm/sdei.h10
-rw-r--r--arch/arm64/include/asm/smp.h2
-rw-r--r--arch/arm64/include/asm/stacktrace.h32
-rw-r--r--arch/arm64/include/asm/sysreg.h5
-rw-r--r--arch/arm64/include/asm/tlb.h4
-rw-r--r--arch/arm64/include/asm/unistd32.h4
-rw-r--r--arch/arm64/include/uapi/asm/kvm.h11
-rw-r--r--arch/arm64/kernel/Makefile11
-rw-r--r--arch/arm64/kernel/acpi.c22
-rw-r--r--arch/arm64/kernel/alternative.c2
-rw-r--r--arch/arm64/kernel/asm-offsets.c17
-rw-r--r--arch/arm64/kernel/cpufeature.c210
-rw-r--r--arch/arm64/kernel/cpuinfo.c58
-rw-r--r--arch/arm64/kernel/efi-entry.S9
-rw-r--r--arch/arm64/kernel/entry-common.c256
-rw-r--r--arch/arm64/kernel/entry-fpsimd.S22
-rw-r--r--arch/arm64/kernel/entry.S369
-rw-r--r--arch/arm64/kernel/fpsimd.c6
-rw-r--r--arch/arm64/kernel/ftrace.c1
-rw-r--r--arch/arm64/kernel/head.S76
-rw-r--r--arch/arm64/kernel/hibernate-asm.S7
-rw-r--r--arch/arm64/kernel/hibernate.c20
-rw-r--r--arch/arm64/kernel/idle.c46
-rw-r--r--arch/arm64/kernel/idreg-override.c3
-rw-r--r--arch/arm64/kernel/image-vars.h2
-rw-r--r--arch/arm64/kernel/jump_label.c1
-rw-r--r--arch/arm64/kernel/kaslr.c12
-rw-r--r--arch/arm64/kernel/kgdb.c1
-rw-r--r--arch/arm64/kernel/machine_kexec.c30
-rw-r--r--arch/arm64/kernel/mte.c18
-rw-r--r--arch/arm64/kernel/patching.c150
-rw-r--r--arch/arm64/kernel/perf_callchain.c2
-rw-r--r--arch/arm64/kernel/perf_event.c40
-rw-r--r--arch/arm64/kernel/probes/kprobes.c35
-rw-r--r--arch/arm64/kernel/probes/simulate-insn.c1
-rw-r--r--arch/arm64/kernel/probes/uprobes.c2
-rw-r--r--arch/arm64/kernel/process.c101
-rw-r--r--arch/arm64/kernel/ptrace.c2
-rw-r--r--arch/arm64/kernel/sdei.c64
-rw-r--r--arch/arm64/kernel/setup.c9
-rw-r--r--arch/arm64/kernel/signal.c26
-rw-r--r--arch/arm64/kernel/smccc-call.S83
-rw-r--r--arch/arm64/kernel/smp.c17
-rw-r--r--arch/arm64/kernel/smp_spin_table.c7
-rw-r--r--arch/arm64/kernel/stacktrace.c18
-rw-r--r--arch/arm64/kernel/suspend.c12
-rw-r--r--arch/arm64/kernel/sys_compat.c2
-rw-r--r--arch/arm64/kernel/traps.c135
-rw-r--r--arch/arm64/kvm/Kconfig5
-rw-r--r--arch/arm64/kvm/Makefile2
-rw-r--r--arch/arm64/kvm/arch_timer.c162
-rw-r--r--arch/arm64/kvm/arm.c53
-rw-r--r--arch/arm64/kvm/guest.c134
-rw-r--r--arch/arm64/kvm/hyp/entry.S7
-rw-r--r--arch/arm64/kvm/hyp/exception.c21
-rw-r--r--arch/arm64/kvm/hyp/hyp-entry.S6
-rw-r--r--arch/arm64/kvm/hyp/include/hyp/adjust_pc.h18
-rw-r--r--arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h21
-rw-r--r--arch/arm64/kvm/hyp/include/nvhe/gfp.h45
-rw-r--r--arch/arm64/kvm/hyp/include/nvhe/mem_protect.h2
-rw-r--r--arch/arm64/kvm/hyp/include/nvhe/memory.h7
-rw-r--r--arch/arm64/kvm/hyp/include/nvhe/mm.h13
-rw-r--r--arch/arm64/kvm/hyp/nvhe/cache.S4
-rw-r--r--arch/arm64/kvm/hyp/nvhe/hyp-main.c8
-rw-r--r--arch/arm64/kvm/hyp/nvhe/mem_protect.c60
-rw-r--r--arch/arm64/kvm/hyp/nvhe/page_alloc.c112
-rw-r--r--arch/arm64/kvm/hyp/nvhe/setup.c35
-rw-r--r--arch/arm64/kvm/hyp/nvhe/switch.c3
-rw-r--r--arch/arm64/kvm/hyp/nvhe/tlb.c2
-rw-r--r--arch/arm64/kvm/hyp/pgtable.c61
-rw-r--r--arch/arm64/kvm/hyp/reserved_mem.c3
-rw-r--r--arch/arm64/kvm/hyp/vhe/switch.c3
-rw-r--r--arch/arm64/kvm/mmu.c212
-rw-r--r--arch/arm64/kvm/pmu-emul.c4
-rw-r--r--arch/arm64/kvm/reset.c32
-rw-r--r--arch/arm64/kvm/sys_regs.c74
-rw-r--r--arch/arm64/kvm/vgic/vgic-init.c36
-rw-r--r--arch/arm64/kvm/vgic/vgic-v2.c19
-rw-r--r--arch/arm64/kvm/vgic/vgic-v3.c19
-rw-r--r--arch/arm64/kvm/vgic/vgic.c14
-rw-r--r--arch/arm64/lib/Makefile4
-rw-r--r--arch/arm64/lib/clear_user.S47
-rw-r--r--arch/arm64/lib/insn.c (renamed from arch/arm64/kernel/insn.c)249
-rw-r--r--arch/arm64/lib/kasan_sw_tags.S76
-rw-r--r--arch/arm64/lib/memchr.S65
-rw-r--r--arch/arm64/lib/memcmp.S346
-rw-r--r--arch/arm64/lib/memcpy.S272
-rw-r--r--arch/arm64/lib/memmove.S189
-rw-r--r--arch/arm64/lib/mte.S20
-rw-r--r--arch/arm64/lib/strcmp.S289
-rw-r--r--arch/arm64/lib/strlen.S258
-rw-r--r--arch/arm64/lib/strncmp.S406
-rw-r--r--arch/arm64/lib/uaccess_flushcache.c4
-rw-r--r--arch/arm64/mm/cache.S158
-rw-r--r--arch/arm64/mm/context.c6
-rw-r--r--arch/arm64/mm/dma-mapping.c2
-rw-r--r--arch/arm64/mm/fault.c50
-rw-r--r--arch/arm64/mm/flush.c33
-rw-r--r--arch/arm64/mm/hugetlbpage.c5
-rw-r--r--arch/arm64/mm/init.c41
-rw-r--r--arch/arm64/mm/ioremap.c4
-rw-r--r--arch/arm64/mm/mmu.c38
-rw-r--r--arch/arm64/mm/proc.S36
-rw-r--r--arch/arm64/mm/ptdump.c4
-rw-r--r--arch/arm64/net/bpf_jit_comp.c20
-rw-r--r--arch/arm64/tools/Makefile22
-rw-r--r--arch/arm64/tools/cpucaps66
-rwxr-xr-xarch/arm64/tools/gen-cpucaps.awk40
-rw-r--r--arch/csky/Kconfig3
-rw-r--r--arch/csky/include/asm/cmpxchg.h8
-rw-r--r--arch/csky/include/asm/pgalloc.h2
-rw-r--r--arch/csky/include/asm/pgtable.h1
-rw-r--r--arch/csky/kernel/asm-offsets.c1
-rw-r--r--arch/csky/kernel/probes/kprobes.c17
-rw-r--r--arch/csky/kernel/smp.c1
-rw-r--r--arch/csky/kernel/stacktrace.c2
-rw-r--r--arch/csky/mm/syscache.c12
-rw-r--r--arch/h8300/include/asm/Kbuild1
-rw-r--r--arch/h8300/include/asm/atomic.h97
-rw-r--r--arch/h8300/include/asm/cmpxchg.h66
-rw-r--r--arch/h8300/kernel/asm-offsets.c1
-rw-r--r--arch/h8300/kernel/process.c2
-rw-r--r--arch/h8300/kernel/setup.c2
-rw-r--r--arch/hexagon/include/asm/atomic.h28
-rw-r--r--arch/hexagon/include/asm/cmpxchg.h4
-rw-r--r--arch/hexagon/include/asm/pgtable.h4
-rw-r--r--arch/hexagon/kernel/process.c2
-rw-r--r--arch/ia64/Kconfig9
-rw-r--r--arch/ia64/include/asm/atomic.h74
-rw-r--r--arch/ia64/include/asm/cmpxchg.h16
-rw-r--r--arch/ia64/include/asm/page.h6
-rw-r--r--arch/ia64/include/asm/pal.h3
-rw-r--r--arch/ia64/include/asm/pgalloc.h1
-rw-r--r--arch/ia64/include/asm/pgtable.h1
-rw-r--r--arch/ia64/include/asm/spinlock.h2
-rw-r--r--arch/ia64/include/asm/unaligned.h12
-rw-r--r--arch/ia64/include/asm/uv/uv_hub.h2
-rw-r--r--arch/ia64/include/uapi/asm/cmpxchg.h10
-rw-r--r--arch/ia64/kernel/efi_stub.S2
-rw-r--r--arch/ia64/kernel/kprobes.c16
-rw-r--r--arch/ia64/kernel/mca.c2
-rw-r--r--arch/ia64/kernel/mca_drv.c2
-rw-r--r--arch/ia64/kernel/process.c4
-rw-r--r--arch/ia64/kernel/ptrace.c8
-rw-r--r--arch/ia64/kernel/smpboot.c1
-rw-r--r--arch/ia64/kernel/syscalls/syscall.tbl2
-rw-r--r--arch/ia64/kernel/topology.c5
-rw-r--r--arch/ia64/mm/numa.c5
-rw-r--r--arch/m68k/68000/dragen2.c1
-rw-r--r--arch/m68k/68000/screen.h804
-rw-r--r--arch/m68k/Kconfig6
-rw-r--r--arch/m68k/Kconfig.cpu10
-rw-r--r--arch/m68k/Kconfig.machine3
-rw-r--r--arch/m68k/Makefile3
-rw-r--r--arch/m68k/atari/config.c12
-rw-r--r--arch/m68k/coldfire/clk.c21
-rw-r--r--arch/m68k/coldfire/m5206.c25
-rw-r--r--arch/m68k/coldfire/m520x.c51
-rw-r--r--arch/m68k/coldfire/m523x.c42
-rw-r--r--arch/m68k/coldfire/m5249.c33
-rw-r--r--arch/m68k/coldfire/m525x.c33
-rw-r--r--arch/m68k/coldfire/m5272.c35
-rw-r--r--arch/m68k/coldfire/m527x.c44
-rw-r--r--arch/m68k/coldfire/m528x.c42
-rw-r--r--arch/m68k/coldfire/m5307.c27
-rw-r--r--arch/m68k/coldfire/m53xx.c78
-rw-r--r--arch/m68k/coldfire/m5407.c25
-rw-r--r--arch/m68k/coldfire/m5441x.c126
-rw-r--r--arch/m68k/coldfire/m54xx.c33
-rw-r--r--arch/m68k/configs/amiga_defconfig15
-rw-r--r--arch/m68k/configs/apollo_defconfig5
-rw-r--r--arch/m68k/configs/atari_defconfig13
-rw-r--r--arch/m68k/configs/bvme6000_defconfig5
-rw-r--r--arch/m68k/configs/hp300_defconfig5
-rw-r--r--arch/m68k/configs/mac_defconfig14
-rw-r--r--arch/m68k/configs/multi_defconfig21
-rw-r--r--arch/m68k/configs/mvme147_defconfig5
-rw-r--r--arch/m68k/configs/mvme16x_defconfig5
-rw-r--r--arch/m68k/configs/q40_defconfig13
-rw-r--r--arch/m68k/configs/sun3_defconfig5
-rw-r--r--arch/m68k/configs/sun3x_defconfig5
-rw-r--r--arch/m68k/emu/nfblock.c20
-rw-r--r--arch/m68k/emu/nfcon.c2
-rw-r--r--arch/m68k/include/asm/atomic.h60
-rw-r--r--arch/m68k/include/asm/cmpxchg.h10
-rw-r--r--arch/m68k/include/asm/mcf_pgalloc.h2
-rw-r--r--arch/m68k/include/asm/mcf_pgtable.h2
-rw-r--r--arch/m68k/include/asm/mcfclk.h5
-rw-r--r--arch/m68k/include/asm/mmu_context.h2
-rw-r--r--arch/m68k/include/asm/mmzone.h10
-rw-r--r--arch/m68k/include/asm/motorola_pgalloc.h1
-rw-r--r--arch/m68k/include/asm/motorola_pgtable.h2
-rw-r--r--arch/m68k/include/asm/page.h2
-rw-r--r--arch/m68k/include/asm/page_mm.h35
-rw-r--r--arch/m68k/include/asm/page_no.h6
-rw-r--r--arch/m68k/include/asm/pgtable_mm.h1
-rw-r--r--arch/m68k/include/asm/sun3_pgalloc.h1
-rw-r--r--arch/m68k/include/asm/tlbflush.h2
-rw-r--r--arch/m68k/include/asm/unaligned.h26
-rw-r--r--arch/m68k/kernel/dma.c3
-rw-r--r--arch/m68k/kernel/process.c2
-rw-r--r--arch/m68k/kernel/signal.c3
-rw-r--r--arch/m68k/kernel/sys_m68k.c4
-rw-r--r--arch/m68k/kernel/syscalls/syscall.tbl2
-rw-r--r--arch/m68k/mac/config.c24
-rw-r--r--arch/m68k/mm/init.c20
-rw-r--r--arch/m68k/q40/config.c37
-rw-r--r--arch/microblaze/Kconfig4
-rw-r--r--arch/microblaze/Makefile2
-rw-r--r--arch/microblaze/include/asm/Kbuild1
-rw-r--r--arch/microblaze/include/asm/atomic.h28
-rw-r--r--arch/microblaze/include/asm/cmpxchg.h9
-rw-r--r--arch/microblaze/include/asm/page.h3
-rw-r--r--arch/microblaze/include/asm/pgalloc.h2
-rw-r--r--arch/microblaze/include/asm/pgtable.h2
-rw-r--r--arch/microblaze/include/asm/unaligned.h27
-rw-r--r--arch/microblaze/kernel/asm-offsets.c1
-rw-r--r--arch/microblaze/kernel/syscalls/syscall.tbl2
-rw-r--r--arch/mips/Kconfig22
-rw-r--r--arch/mips/Kconfig.debug8
-rw-r--r--arch/mips/alchemy/board-xxs1500.c1
-rw-r--r--arch/mips/ar7/clock.c113
-rw-r--r--arch/mips/boot/compressed/uart-16550.c4
-rw-r--r--arch/mips/boot/dts/ingenic/ci20.dts24
-rw-r--r--arch/mips/boot/dts/ingenic/gcw0.dts5
-rw-r--r--arch/mips/boot/dts/ingenic/jz4780.dtsi10
-rw-r--r--arch/mips/boot/dts/ingenic/rs90.dts14
-rw-r--r--arch/mips/boot/dts/ingenic/x1000.dtsi7
-rw-r--r--arch/mips/boot/dts/ingenic/x1830.dtsi16
-rw-r--r--arch/mips/boot/dts/loongson/Makefile2
-rw-r--r--arch/mips/boot/dts/loongson/loongson64-2k1000.dtsi64
-rw-r--r--arch/mips/boot/dts/loongson/loongson64g-package.dtsi4
-rw-r--r--arch/mips/boot/dts/loongson/loongson64v_4core_virtio.dts2
-rw-r--r--arch/mips/boot/dts/loongson/ls7a-pch.dtsi8
-rw-r--r--arch/mips/boot/dts/loongson/rs780e-pch.dtsi2
-rw-r--r--arch/mips/boot/dts/mti/sead3.dts2
-rw-r--r--arch/mips/boot/dts/qca/Makefile1
-rw-r--r--arch/mips/boot/dts/qca/ar9331.dtsi2
-rw-r--r--arch/mips/boot/dts/qca/ar9331_openembed_som9331_board.dts110
-rw-r--r--arch/mips/cavium-octeon/octeon-usb.c9
-rw-r--r--arch/mips/configs/loongson3_defconfig12
-rw-r--r--arch/mips/crypto/crc32-mips.c2
-rw-r--r--arch/mips/include/asm/atomic.h55
-rw-r--r--arch/mips/include/asm/cmpxchg.h22
-rw-r--r--arch/mips/include/asm/cpu-features.h4
-rw-r--r--arch/mips/include/asm/highmem.h2
-rw-r--r--arch/mips/include/asm/hugetlb.h8
-rw-r--r--arch/mips/include/asm/irq.h1
-rw-r--r--arch/mips/include/asm/kvm_host.h9
-rw-r--r--arch/mips/include/asm/mach-ar7/ar7.h4
-rw-r--r--arch/mips/include/asm/mach-ralink/mt7620.h53
-rw-r--r--arch/mips/include/asm/mach-ralink/pinmux.h52
-rw-r--r--arch/mips/include/asm/mach-ralink/rt288x.h9
-rw-r--r--arch/mips/include/asm/mach-ralink/rt305x.h24
-rw-r--r--arch/mips/include/asm/mach-ralink/rt3883.h34
-rw-r--r--arch/mips/include/asm/mach-ralink/spaces.h10
-rw-r--r--arch/mips/include/asm/mips-boards/launch.h5
-rw-r--r--arch/mips/include/asm/mipsregs.h8
-rw-r--r--arch/mips/include/asm/mmzone.h8
-rw-r--r--arch/mips/include/asm/page.h2
-rw-r--r--arch/mips/include/asm/pgalloc.h11
-rw-r--r--arch/mips/include/asm/pgtable-32.h1
-rw-r--r--arch/mips/include/asm/pgtable-64.h1
-rw-r--r--arch/mips/include/asm/unistd.h4
-rw-r--r--arch/mips/include/uapi/asm/mman.h3
-rw-r--r--arch/mips/include/uapi/asm/socket.h2
-rw-r--r--arch/mips/ingenic/Kconfig2
-rw-r--r--arch/mips/kernel/asm-offsets.c1
-rw-r--r--arch/mips/kernel/cmpxchg.c4
-rw-r--r--arch/mips/kernel/cpu-probe.c5
-rw-r--r--arch/mips/kernel/kprobes.c3
-rw-r--r--arch/mips/kernel/process.c2
-rw-r--r--arch/mips/kernel/relocate.c1
-rw-r--r--arch/mips/kernel/smp.c1
-rw-r--r--arch/mips/kernel/syscalls/Makefile34
-rw-r--r--arch/mips/kernel/syscalls/syscall_n32.tbl2
-rw-r--r--arch/mips/kernel/syscalls/syscall_n64.tbl2
-rw-r--r--arch/mips/kernel/syscalls/syscall_o32.tbl2
-rw-r--r--arch/mips/kernel/syscalls/syscallnr.sh2
-rw-r--r--arch/mips/kernel/traps.c4
-rw-r--r--arch/mips/kvm/Makefile2
-rw-r--r--arch/mips/kvm/mips.c90
-rw-r--r--arch/mips/kvm/tlb.c3
-rw-r--r--arch/mips/lantiq/xway/dma.c1
-rw-r--r--arch/mips/lib/mips-atomic.c12
-rw-r--r--arch/mips/loongson64/env.c3
-rw-r--r--arch/mips/loongson64/numa.c3
-rw-r--r--arch/mips/loongson64/reset.c5
-rw-r--r--arch/mips/loongson64/smp.c10
-rw-r--r--arch/mips/mm/cache.c30
-rw-r--r--arch/mips/mm/dma-noncoherent.c1
-rw-r--r--arch/mips/mm/init.c7
-rw-r--r--arch/mips/mti-malta/malta-platform.c3
-rw-r--r--arch/mips/pci/pci-rt3883.c1
-rw-r--r--arch/mips/pci/pci-xtalk-bridge.c1
-rw-r--r--arch/mips/pic32/Kconfig1
-rw-r--r--arch/mips/ralink/Kconfig5
-rw-r--r--arch/mips/ralink/clk.c64
-rw-r--r--arch/mips/ralink/mt7620.c320
-rw-r--r--arch/mips/ralink/mt7621.c88
-rw-r--r--arch/mips/ralink/of.c2
-rw-r--r--arch/mips/ralink/prom.c1
-rw-r--r--arch/mips/ralink/rt288x.c21
-rw-r--r--arch/mips/ralink/rt305x.c77
-rw-r--r--arch/mips/ralink/rt3883.c45
-rw-r--r--arch/mips/sgi-ip22/ip22-reset.c1
-rw-r--r--arch/mips/sgi-ip27/ip27-irq.c1
-rw-r--r--arch/mips/sgi-ip30/ip30-irq.c1
-rw-r--r--arch/mips/sgi-ip32/ip32-reset.c1
-rw-r--r--arch/nds32/include/asm/memory.h6
-rw-r--r--arch/nds32/include/asm/pgalloc.h5
-rw-r--r--arch/nds32/kernel/process.c2
-rw-r--r--arch/nios2/include/asm/irq.h1
-rw-r--r--arch/nios2/include/asm/pgalloc.h1
-rw-r--r--arch/nios2/include/asm/pgtable.h2
-rw-r--r--arch/nios2/kernel/irq.c1
-rw-r--r--arch/nios2/kernel/process.c2
-rw-r--r--arch/openrisc/include/asm/atomic.h42
-rw-r--r--arch/openrisc/include/asm/barrier.h9
-rw-r--r--arch/openrisc/include/asm/cmpxchg.h4
-rw-r--r--arch/openrisc/include/asm/pgalloc.h2
-rw-r--r--arch/openrisc/include/asm/pgtable.h1
-rw-r--r--arch/openrisc/include/asm/tlbflush.h2
-rw-r--r--arch/openrisc/include/asm/unaligned.h47
-rw-r--r--arch/openrisc/kernel/asm-offsets.c1
-rw-r--r--arch/openrisc/kernel/setup.c2
-rw-r--r--arch/openrisc/kernel/smp.c2
-rw-r--r--arch/openrisc/mm/init.c6
-rw-r--r--arch/parisc/include/asm/atomic.h34
-rw-r--r--arch/parisc/include/asm/cmpxchg.h14
-rw-r--r--arch/parisc/include/asm/pgalloc.h1
-rw-r--r--arch/parisc/include/asm/pgtable.h2
-rw-r--r--arch/parisc/include/asm/unaligned.h6
-rw-r--r--arch/parisc/include/uapi/asm/mman.h3
-rw-r--r--arch/parisc/include/uapi/asm/socket.h2
-rw-r--r--arch/parisc/kernel/asm-offsets.c1
-rw-r--r--arch/parisc/kernel/pdc_chassis.c1
-rw-r--r--arch/parisc/kernel/pdc_cons.c8
-rw-r--r--arch/parisc/kernel/process.c4
-rw-r--r--arch/parisc/kernel/smp.c1
-rw-r--r--arch/parisc/kernel/syscalls/syscall.tbl2
-rw-r--r--arch/powerpc/Kconfig14
-rw-r--r--arch/powerpc/Kconfig.debug5
-rw-r--r--arch/powerpc/Makefile10
-rw-r--r--arch/powerpc/boot/Makefile6
-rw-r--r--arch/powerpc/boot/decompress.c4
-rw-r--r--arch/powerpc/boot/devtree.c59
-rw-r--r--arch/powerpc/boot/dts/fsl/p1010si-post.dtsi8
-rw-r--r--arch/powerpc/boot/dts/fsl/p2041si-post.dtsi16
-rw-r--r--arch/powerpc/boot/dts/microwatt.dts138
-rw-r--r--arch/powerpc/boot/microwatt.c24
-rw-r--r--arch/powerpc/boot/ns16550.c9
-rwxr-xr-xarch/powerpc/boot/wrapper5
-rw-r--r--arch/powerpc/boot/zImage.ps3.lds.S2
-rw-r--r--arch/powerpc/configs/32-bit.config1
-rw-r--r--arch/powerpc/configs/64-bit.config1
-rw-r--r--arch/powerpc/configs/microwatt_defconfig98
-rw-r--r--arch/powerpc/configs/mpc885_ads_defconfig25
-rw-r--r--arch/powerpc/configs/powernv_defconfig1
-rw-r--r--arch/powerpc/configs/ppc64_defconfig2
-rw-r--r--arch/powerpc/configs/ppc6xx_defconfig1
-rw-r--r--arch/powerpc/configs/pseries_defconfig2
-rw-r--r--arch/powerpc/include/asm/asm-prototypes.h12
-rw-r--r--arch/powerpc/include/asm/atomic.h140
-rw-r--r--arch/powerpc/include/asm/barrier.h2
-rw-r--r--arch/powerpc/include/asm/book3s/32/hash.h45
-rw-r--r--arch/powerpc/include/asm/book3s/32/kup.h195
-rw-r--r--arch/powerpc/include/asm/book3s/32/mmu-hash.h41
-rw-r--r--arch/powerpc/include/asm/book3s/32/pgtable.h38
-rw-r--r--arch/powerpc/include/asm/book3s/64/kup.h3
-rw-r--r--arch/powerpc/include/asm/book3s/64/mmu.h1
-rw-r--r--arch/powerpc/include/asm/book3s/64/pgtable.h3
-rw-r--r--arch/powerpc/include/asm/book3s/64/tlbflush-radix.h4
-rw-r--r--arch/powerpc/include/asm/book3s/pgtable.h1
-rw-r--r--arch/powerpc/include/asm/checksum.h2
-rw-r--r--arch/powerpc/include/asm/cmpxchg.h30
-rw-r--r--arch/powerpc/include/asm/code-patching.h34
-rw-r--r--arch/powerpc/include/asm/cputhreads.h30
-rw-r--r--arch/powerpc/include/asm/exception-64s.h13
-rw-r--r--arch/powerpc/include/asm/head-64.h2
-rw-r--r--arch/powerpc/include/asm/hvcall.h17
-rw-r--r--arch/powerpc/include/asm/hw_irq.h23
-rw-r--r--arch/powerpc/include/asm/inst.h94
-rw-r--r--arch/powerpc/include/asm/interrupt.h68
-rw-r--r--arch/powerpc/include/asm/irq.h5
-rw-r--r--arch/powerpc/include/asm/jump_label.h2
-rw-r--r--arch/powerpc/include/asm/kup.h50
-rw-r--r--arch/powerpc/include/asm/kvm_asm.h1
-rw-r--r--arch/powerpc/include/asm/kvm_book3s.h3
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_64.h8
-rw-r--r--arch/powerpc/include/asm/kvm_guest.h4
-rw-r--r--arch/powerpc/include/asm/kvm_host.h22
-rw-r--r--arch/powerpc/include/asm/kvm_ppc.h18
-rw-r--r--arch/powerpc/include/asm/livepatch.h2
-rw-r--r--arch/powerpc/include/asm/mmu.h19
-rw-r--r--arch/powerpc/include/asm/mmu_context.h19
-rw-r--r--arch/powerpc/include/asm/mmzone.h4
-rw-r--r--arch/powerpc/include/asm/nohash/32/hugetlb-8xx.h5
-rw-r--r--arch/powerpc/include/asm/nohash/32/kup-8xx.h46
-rw-r--r--arch/powerpc/include/asm/nohash/32/mmu-44x.h1
-rw-r--r--arch/powerpc/include/asm/nohash/32/mmu-8xx.h43
-rw-r--r--arch/powerpc/include/asm/nohash/32/pgtable.h1
-rw-r--r--arch/powerpc/include/asm/nohash/64/pgtable.h2
-rw-r--r--arch/powerpc/include/asm/paca.h9
-rw-r--r--arch/powerpc/include/asm/paravirt.h22
-rw-r--r--arch/powerpc/include/asm/pgalloc.h5
-rw-r--r--arch/powerpc/include/asm/pgtable.h11
-rw-r--r--arch/powerpc/include/asm/plpar_wrappers.h6
-rw-r--r--arch/powerpc/include/asm/ppc-opcode.h84
-rw-r--r--arch/powerpc/include/asm/ppc_asm.h15
-rw-r--r--arch/powerpc/include/asm/probes.h4
-rw-r--r--arch/powerpc/include/asm/processor.h21
-rw-r--r--arch/powerpc/include/asm/ps3.h4
-rw-r--r--arch/powerpc/include/asm/pte-walk.h29
-rw-r--r--arch/powerpc/include/asm/ptrace.h91
-rw-r--r--arch/powerpc/include/asm/qspinlock.h2
-rw-r--r--arch/powerpc/include/asm/reg.h13
-rw-r--r--arch/powerpc/include/asm/security_features.h4
-rw-r--r--arch/powerpc/include/asm/set_memory.h34
-rw-r--r--arch/powerpc/include/asm/setup.h1
-rw-r--r--arch/powerpc/include/asm/sstep.h7
-rw-r--r--arch/powerpc/include/asm/syscall.h42
-rw-r--r--arch/powerpc/include/asm/time.h12
-rw-r--r--arch/powerpc/include/asm/uaccess.h2
-rw-r--r--arch/powerpc/include/asm/unaligned.h22
-rw-r--r--arch/powerpc/include/asm/uprobes.h4
-rw-r--r--arch/powerpc/include/asm/vas.h109
-rw-r--r--arch/powerpc/include/asm/xics.h4
-rw-r--r--arch/powerpc/include/uapi/asm/papr_pdsm.h6
-rw-r--r--arch/powerpc/include/uapi/asm/vas-api.h6
-rw-r--r--arch/powerpc/kernel/asm-offsets.c74
-rw-r--r--arch/powerpc/kernel/crash_dump.c6
-rw-r--r--arch/powerpc/kernel/eeh.c23
-rw-r--r--arch/powerpc/kernel/entry_32.S54
-rw-r--r--arch/powerpc/kernel/entry_64.S516
-rw-r--r--arch/powerpc/kernel/epapr_paravirt.c4
-rw-r--r--arch/powerpc/kernel/exceptions-64e.S90
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S501
-rw-r--r--arch/powerpc/kernel/firmware.c10
-rw-r--r--arch/powerpc/kernel/fpu.S4
-rw-r--r--arch/powerpc/kernel/head_32.h41
-rw-r--r--arch/powerpc/kernel/head_40x.S36
-rw-r--r--arch/powerpc/kernel/head_44x.S50
-rw-r--r--arch/powerpc/kernel/head_64.S25
-rw-r--r--arch/powerpc/kernel/head_8xx.S25
-rw-r--r--arch/powerpc/kernel/head_book3s_32.S139
-rw-r--r--arch/powerpc/kernel/head_booke.h31
-rw-r--r--arch/powerpc/kernel/head_fsl_booke.S37
-rw-r--r--arch/powerpc/kernel/hw_breakpoint.c4
-rw-r--r--arch/powerpc/kernel/interrupt.c486
-rw-r--r--arch/powerpc/kernel/interrupt_64.S770
-rw-r--r--arch/powerpc/kernel/io-workarounds.c16
-rw-r--r--arch/powerpc/kernel/iommu.c11
-rw-r--r--arch/powerpc/kernel/irq.c96
-rw-r--r--arch/powerpc/kernel/jump_label.c4
-rw-r--r--arch/powerpc/kernel/kgdb.c19
-rw-r--r--arch/powerpc/kernel/kprobes-ftrace.c4
-rw-r--r--arch/powerpc/kernel/kprobes.c80
-rw-r--r--arch/powerpc/kernel/legacy_serial.c7
-rw-r--r--arch/powerpc/kernel/mce.c3
-rw-r--r--arch/powerpc/kernel/mce_power.c50
-rw-r--r--arch/powerpc/kernel/misc_32.S6
-rw-r--r--arch/powerpc/kernel/module.c4
-rw-r--r--arch/powerpc/kernel/module_32.c19
-rw-r--r--arch/powerpc/kernel/module_64.c55
-rw-r--r--arch/powerpc/kernel/optprobes.c155
-rw-r--r--arch/powerpc/kernel/paca.c2
-rw-r--r--arch/powerpc/kernel/process.c111
-rw-r--r--arch/powerpc/kernel/prom.c2
-rw-r--r--arch/powerpc/kernel/prom_init.c121
-rw-r--r--arch/powerpc/kernel/ptrace/ptrace-adv.c20
-rw-r--r--arch/powerpc/kernel/ptrace/ptrace-noadv.c14
-rw-r--r--arch/powerpc/kernel/ptrace/ptrace-view.c5
-rw-r--r--arch/powerpc/kernel/rtas-rtc.c2
-rw-r--r--arch/powerpc/kernel/rtas.c14
-rw-r--r--arch/powerpc/kernel/security.c24
-rw-r--r--arch/powerpc/kernel/setup-common.c5
-rw-r--r--arch/powerpc/kernel/setup_32.c4
-rw-r--r--arch/powerpc/kernel/setup_64.c19
-rw-r--r--arch/powerpc/kernel/signal.c12
-rw-r--r--arch/powerpc/kernel/signal.h4
-rw-r--r--arch/powerpc/kernel/signal_32.c106
-rw-r--r--arch/powerpc/kernel/signal_64.c58
-rw-r--r--arch/powerpc/kernel/smp.c18
-rw-r--r--arch/powerpc/kernel/stacktrace.c34
-rw-r--r--arch/powerpc/kernel/syscalls.c3
-rw-r--r--arch/powerpc/kernel/syscalls/syscall.tbl2
-rw-r--r--arch/powerpc/kernel/sysfs.c12
-rw-r--r--arch/powerpc/kernel/tau_6xx.c2
-rw-r--r--arch/powerpc/kernel/time.c21
-rw-r--r--arch/powerpc/kernel/trace/ftrace.c51
-rw-r--r--arch/powerpc/kernel/traps.c49
-rw-r--r--arch/powerpc/kernel/udbg_16550.c39
-rw-r--r--arch/powerpc/kernel/uprobes.c8
-rw-r--r--arch/powerpc/kernel/vector.S8
-rw-r--r--arch/powerpc/kernel/vmlinux.lds.S19
-rw-r--r--arch/powerpc/kernel/watchdog.c1
-rw-r--r--arch/powerpc/kexec/core.c4
-rw-r--r--arch/powerpc/kexec/crash.c4
-rw-r--r--arch/powerpc/kvm/Makefile4
-rw-r--r--arch/powerpc/kvm/book3s.c108
-rw-r--r--arch/powerpc/kvm/book3s_32_mmu_host.c3
-rw-r--r--arch/powerpc/kvm/book3s_64_entry.S416
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_hv.c2
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_radix.c27
-rw-r--r--arch/powerpc/kvm/book3s_64_vio_hv.c12
-rw-r--r--arch/powerpc/kvm/book3s_hv.c820
-rw-r--r--arch/powerpc/kvm/book3s_hv_builtin.c137
-rw-r--r--arch/powerpc/kvm/book3s_hv_interrupts.S9
-rw-r--r--arch/powerpc/kvm/book3s_hv_nested.c122
-rw-r--r--arch/powerpc/kvm/book3s_hv_p9_entry.c508
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_mmu.c29
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_xics.c15
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S689
-rw-r--r--arch/powerpc/kvm/book3s_hv_uvmem.c3
-rw-r--r--arch/powerpc/kvm/book3s_pr.c4
-rw-r--r--arch/powerpc/kvm/book3s_pr_papr.c2
-rw-r--r--arch/powerpc/kvm/book3s_segment.S3
-rw-r--r--arch/powerpc/kvm/book3s_xive.c114
-rw-r--r--arch/powerpc/kvm/book3s_xive.h7
-rw-r--r--arch/powerpc/kvm/book3s_xive_native.c11
-rw-r--r--arch/powerpc/kvm/booke.c76
-rw-r--r--arch/powerpc/kvm/powerpc.c3
-rw-r--r--arch/powerpc/lib/Makefile2
-rw-r--r--arch/powerpc/lib/code-patching.c178
-rw-r--r--arch/powerpc/lib/error-inject.c2
-rw-r--r--arch/powerpc/lib/feature-fixups.c344
-rw-r--r--arch/powerpc/lib/restart_table.c56
-rw-r--r--arch/powerpc/lib/sstep.c39
-rw-r--r--arch/powerpc/lib/test_emulate_step.c38
-rw-r--r--arch/powerpc/math-emu/math.c2
-rw-r--r--arch/powerpc/math-emu/math_efp.c2
-rw-r--r--arch/powerpc/mm/Makefile4
-rw-r--r--arch/powerpc/mm/book3s32/Makefile1
-rw-r--r--arch/powerpc/mm/book3s32/hash_low.S6
-rw-r--r--arch/powerpc/mm/book3s32/kuap.c33
-rw-r--r--arch/powerpc/mm/book3s32/kuep.c42
-rw-r--r--arch/powerpc/mm/book3s32/mmu.c20
-rw-r--r--arch/powerpc/mm/book3s32/mmu_context.c48
-rw-r--r--arch/powerpc/mm/book3s64/hash_utils.c24
-rw-r--r--arch/powerpc/mm/book3s64/radix_pgtable.c33
-rw-r--r--arch/powerpc/mm/book3s64/radix_tlb.c228
-rw-r--r--arch/powerpc/mm/ioremap_32.c4
-rw-r--r--arch/powerpc/mm/ioremap_64.c2
-rw-r--r--arch/powerpc/mm/maccess.c4
-rw-r--r--arch/powerpc/mm/mem.c12
-rw-r--r--arch/powerpc/mm/mmu_context.c4
-rw-r--r--arch/powerpc/mm/nohash/44x.c17
-rw-r--r--arch/powerpc/mm/nohash/8xx.c42
-rw-r--r--arch/powerpc/mm/nohash/mmu_context.c173
-rw-r--r--arch/powerpc/mm/nohash/tlb_low.S13
-rw-r--r--arch/powerpc/mm/pageattr.c134
-rw-r--r--arch/powerpc/mm/pgtable.c8
-rw-r--r--arch/powerpc/mm/pgtable_32.c60
-rw-r--r--arch/powerpc/mm/ptdump/ptdump.c22
-rw-r--r--arch/powerpc/net/bpf_jit_comp.c13
-rw-r--r--arch/powerpc/net/bpf_jit_comp32.c220
-rw-r--r--arch/powerpc/net/bpf_jit_comp64.c14
-rw-r--r--arch/powerpc/perf/Makefile6
-rw-r--r--arch/powerpc/perf/callchain.c2
-rw-r--r--arch/powerpc/perf/core-book3s.c6
-rw-r--r--arch/powerpc/perf/generic-compat-pmu.c170
-rw-r--r--arch/powerpc/platforms/52xx/mpc52xx_gpt.c1
-rw-r--r--arch/powerpc/platforms/86xx/mpc86xx_smp.c4
-rw-r--r--arch/powerpc/platforms/Kconfig3
-rw-r--r--arch/powerpc/platforms/Kconfig.cputype16
-rw-r--r--arch/powerpc/platforms/Makefile2
-rw-r--r--arch/powerpc/platforms/book3s/Kconfig15
-rw-r--r--arch/powerpc/platforms/book3s/Makefile2
-rw-r--r--arch/powerpc/platforms/book3s/vas-api.c493
-rw-r--r--arch/powerpc/platforms/cell/Kconfig1
-rw-r--r--arch/powerpc/platforms/cell/pmu.c1
-rw-r--r--arch/powerpc/platforms/cell/spider-pci.c3
-rw-r--r--arch/powerpc/platforms/cell/spufs/switch.c6
-rw-r--r--arch/powerpc/platforms/embedded6xx/flipper-pic.c1
-rw-r--r--arch/powerpc/platforms/embedded6xx/holly.c4
-rw-r--r--arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c4
-rw-r--r--arch/powerpc/platforms/microwatt/Kconfig13
-rw-r--r--arch/powerpc/platforms/microwatt/Makefile1
-rw-r--r--arch/powerpc/platforms/microwatt/rng.c48
-rw-r--r--arch/powerpc/platforms/microwatt/setup.c41
-rw-r--r--arch/powerpc/platforms/pasemi/idle.c4
-rw-r--r--arch/powerpc/platforms/powermac/Kconfig1
-rw-r--r--arch/powerpc/platforms/powermac/bootx_init.c2
-rw-r--r--arch/powerpc/platforms/powermac/smp.c4
-rw-r--r--arch/powerpc/platforms/powernv/Kconfig14
-rw-r--r--arch/powerpc/platforms/powernv/Makefile2
-rw-r--r--arch/powerpc/platforms/powernv/idle.c52
-rw-r--r--arch/powerpc/platforms/powernv/opal-call.c4
-rw-r--r--arch/powerpc/platforms/powernv/opal.c2
-rw-r--r--arch/powerpc/platforms/powernv/pci.c2
-rw-r--r--arch/powerpc/platforms/powernv/subcore.c10
-rw-r--r--arch/powerpc/platforms/powernv/vas-api.c278
-rw-r--r--arch/powerpc/platforms/powernv/vas-debug.c27
-rw-r--r--arch/powerpc/platforms/powernv/vas-fault.c173
-rw-r--r--arch/powerpc/platforms/powernv/vas-trace.h4
-rw-r--r--arch/powerpc/platforms/powernv/vas-window.c264
-rw-r--r--arch/powerpc/platforms/powernv/vas.h50
-rw-r--r--arch/powerpc/platforms/ps3/Kconfig10
-rw-r--r--arch/powerpc/platforms/ps3/interrupt.c5
-rw-r--r--arch/powerpc/platforms/ps3/mm.c12
-rw-r--r--arch/powerpc/platforms/ps3/setup.c43
-rw-r--r--arch/powerpc/platforms/ps3/system-bus.c9
-rw-r--r--arch/powerpc/platforms/pseries/Makefile1
-rw-r--r--arch/powerpc/platforms/pseries/dlpar.c9
-rw-r--r--arch/powerpc/platforms/pseries/hotplug-memory.c92
-rw-r--r--arch/powerpc/platforms/pseries/hvCall.S39
-rw-r--r--arch/powerpc/platforms/pseries/ibmebus.c1
-rw-r--r--arch/powerpc/platforms/pseries/lpar.c29
-rw-r--r--arch/powerpc/platforms/pseries/papr_scm.c101
-rw-r--r--arch/powerpc/platforms/pseries/ras.c6
-rw-r--r--arch/powerpc/platforms/pseries/setup.c9
-rw-r--r--arch/powerpc/platforms/pseries/smp.c4
-rw-r--r--arch/powerpc/platforms/pseries/vas.c595
-rw-r--r--arch/powerpc/platforms/pseries/vas.h125
-rw-r--r--arch/powerpc/sysdev/ehv_pic.c1
-rw-r--r--arch/powerpc/sysdev/fsl_mpic_err.c1
-rw-r--r--arch/powerpc/sysdev/fsl_pci.c2
-rw-r--r--arch/powerpc/sysdev/fsl_rio.c4
-rw-r--r--arch/powerpc/sysdev/i8259.c3
-rw-r--r--arch/powerpc/sysdev/mpic.c2
-rw-r--r--arch/powerpc/sysdev/tsi108_pci.c3
-rw-r--r--arch/powerpc/sysdev/xics/Kconfig3
-rw-r--r--arch/powerpc/sysdev/xics/Makefile1
-rw-r--r--arch/powerpc/sysdev/xics/icp-hv.c1
-rw-r--r--arch/powerpc/sysdev/xics/icp-opal.c1
-rw-r--r--arch/powerpc/sysdev/xics/ics-native.c257
-rw-r--r--arch/powerpc/sysdev/xics/xics-common.c4
-rw-r--r--arch/powerpc/sysdev/xive/Kconfig1
-rw-r--r--arch/powerpc/xmon/xmon.c209
-rw-r--r--arch/riscv/Kconfig26
-rw-r--r--arch/riscv/Kconfig.socs1
-rw-r--r--arch/riscv/Makefile11
-rw-r--r--arch/riscv/boot/dts/microchip/Makefile1
-rw-r--r--arch/riscv/boot/dts/sifive/Makefile1
-rw-r--r--arch/riscv/boot/dts/sifive/fu740-c000.dtsi2
-rw-r--r--arch/riscv/errata/sifive/Makefile2
-rw-r--r--arch/riscv/include/asm/alternative-macros.h4
-rw-r--r--arch/riscv/include/asm/atomic.h128
-rw-r--r--arch/riscv/include/asm/cmpxchg.h34
-rw-r--r--arch/riscv/include/asm/kexec.h4
-rw-r--r--arch/riscv/include/asm/pgalloc.h2
-rw-r--r--arch/riscv/include/asm/pgtable.h7
-rw-r--r--arch/riscv/kernel/machine_kexec.c11
-rw-r--r--arch/riscv/kernel/probes/kprobes.c19
-rw-r--r--arch/riscv/kernel/setup.c4
-rw-r--r--arch/riscv/kernel/smpboot.c1
-rw-r--r--arch/riscv/kernel/stacktrace.c16
-rw-r--r--arch/riscv/kernel/traps.c13
-rw-r--r--arch/riscv/kernel/vmlinux-xip.lds.S15
-rw-r--r--arch/riscv/mm/init.c8
-rw-r--r--arch/riscv/mm/kasan_init.c10
-rw-r--r--arch/s390/Kconfig15
-rw-r--r--arch/s390/Makefile1
-rw-r--r--arch/s390/boot/als.c6
-rw-r--r--arch/s390/boot/boot.h1
-rw-r--r--arch/s390/boot/compressed/Makefile6
-rw-r--r--arch/s390/boot/compressed/decompressor.c8
-rw-r--r--arch/s390/boot/head.S3
-rw-r--r--arch/s390/boot/ipl_parm.c59
-rw-r--r--arch/s390/boot/mem_detect.c73
-rw-r--r--arch/s390/boot/startup.c91
-rw-r--r--arch/s390/boot/uv.c25
-rw-r--r--arch/s390/hypfs/hypfs_sprp.c13
-rw-r--r--arch/s390/include/asm/atomic.h2
-rw-r--r--arch/s390/include/asm/bitops.h10
-rw-r--r--arch/s390/include/asm/ccwgroup.h3
-rw-r--r--arch/s390/include/asm/checksum.h10
-rw-r--r--arch/s390/include/asm/cio.h1
-rw-r--r--arch/s390/include/asm/cmpxchg.h44
-rw-r--r--arch/s390/include/asm/cpu_mcf.h3
-rw-r--r--arch/s390/include/asm/facility.h25
-rw-r--r--arch/s390/include/asm/irqflags.h16
-rw-r--r--arch/s390/include/asm/kasan.h1
-rw-r--r--arch/s390/include/asm/kvm_host.h9
-rw-r--r--arch/s390/include/asm/lowcore.h38
-rw-r--r--arch/s390/include/asm/mmu_context.h15
-rw-r--r--arch/s390/include/asm/page.h21
-rw-r--r--arch/s390/include/asm/pci.h7
-rw-r--r--arch/s390/include/asm/percpu.h27
-rw-r--r--arch/s390/include/asm/pgalloc.h3
-rw-r--r--arch/s390/include/asm/pgtable.h69
-rw-r--r--arch/s390/include/asm/preempt.h4
-rw-r--r--arch/s390/include/asm/processor.h6
-rw-r--r--arch/s390/include/asm/ptrace.h12
-rw-r--r--arch/s390/include/asm/qdio.h4
-rw-r--r--arch/s390/include/asm/setup.h3
-rw-r--r--arch/s390/include/asm/sigp.h14
-rw-r--r--arch/s390/include/asm/stacktrace.h18
-rw-r--r--arch/s390/include/asm/string.h59
-rw-r--r--arch/s390/include/asm/timex.h26
-rw-r--r--arch/s390/include/asm/tpi.h24
-rw-r--r--arch/s390/include/asm/types.h19
-rw-r--r--arch/s390/include/asm/uaccess.h36
-rw-r--r--arch/s390/include/uapi/asm/schid.h3
-rw-r--r--arch/s390/kernel/alternative.c3
-rw-r--r--arch/s390/kernel/asm-offsets.c7
-rw-r--r--arch/s390/kernel/cpcmd.c42
-rw-r--r--arch/s390/kernel/debug.c2
-rw-r--r--arch/s390/kernel/diag.c11
-rw-r--r--arch/s390/kernel/early.c6
-rw-r--r--arch/s390/kernel/entry.S62
-rw-r--r--arch/s390/kernel/ipl.c15
-rw-r--r--arch/s390/kernel/irq.c6
-rw-r--r--arch/s390/kernel/kprobes.c22
-rw-r--r--arch/s390/kernel/nospec-branch.c17
-rw-r--r--arch/s390/kernel/nospec-sysfs.c2
-rw-r--r--arch/s390/kernel/perf_cpum_cf.c92
-rw-r--r--arch/s390/kernel/perf_cpum_cf_common.c1
-rw-r--r--arch/s390/kernel/process.c2
-rw-r--r--arch/s390/kernel/processor.c4
-rw-r--r--arch/s390/kernel/ptrace.c4
-rw-r--r--arch/s390/kernel/setup.c107
-rw-r--r--arch/s390/kernel/signal.c1
-rw-r--r--arch/s390/kernel/smp.c132
-rw-r--r--arch/s390/kernel/sthyi.c13
-rw-r--r--arch/s390/kernel/syscall.c5
-rw-r--r--arch/s390/kernel/syscalls/syscall.tbl2
-rw-r--r--arch/s390/kernel/sysinfo.c19
-rw-r--r--arch/s390/kernel/topology.c12
-rw-r--r--arch/s390/kernel/traps.c4
-rw-r--r--arch/s390/kernel/uv.c8
-rw-r--r--arch/s390/kvm/Makefile3
-rw-r--r--arch/s390/kvm/kvm-s390.c266
-rw-r--r--arch/s390/kvm/pv.c7
-rw-r--r--arch/s390/lib/string.c131
-rw-r--r--arch/s390/lib/uaccess.c25
-rw-r--r--arch/s390/lib/xor.c5
-rw-r--r--arch/s390/mm/fault.c2
-rw-r--r--arch/s390/mm/kasan_init.c35
-rw-r--r--arch/s390/mm/maccess.c19
-rw-r--r--arch/s390/mm/page-states.c12
-rw-r--r--arch/s390/mm/pgtable.c2
-rw-r--r--arch/s390/pci/pci_insn.c55
-rw-r--r--arch/s390/pci/pci_irq.c46
-rw-r--r--arch/s390/pci/pci_mmio.c24
-rw-r--r--arch/s390/purgatory/Makefile1
-rw-r--r--arch/s390/tools/gen_facilities.c4
-rw-r--r--arch/sh/Kconfig1
-rw-r--r--arch/sh/include/asm/atomic-grb.h6
-rw-r--r--arch/sh/include/asm/atomic-irq.h6
-rw-r--r--arch/sh/include/asm/atomic-llsc.h6
-rw-r--r--arch/sh/include/asm/atomic.h8
-rw-r--r--arch/sh/include/asm/cmpxchg.h4
-rw-r--r--arch/sh/include/asm/mmzone.h4
-rw-r--r--arch/sh/include/asm/pgalloc.h1
-rw-r--r--arch/sh/include/asm/pgtable.h2
-rw-r--r--arch/sh/include/asm/unaligned-sh4a.h199
-rw-r--r--arch/sh/include/asm/unaligned.h13
-rw-r--r--arch/sh/kernel/kprobes.c17
-rw-r--r--arch/sh/kernel/process_32.c2
-rw-r--r--arch/sh/kernel/smp.c2
-rw-r--r--arch/sh/kernel/syscalls/syscall.tbl2
-rw-r--r--arch/sh/kernel/topology.c2
-rw-r--r--arch/sh/kernel/traps.c1
-rw-r--r--arch/sh/mm/Kconfig2
-rw-r--r--arch/sh/mm/init.c2
-rw-r--r--arch/sparc/Kconfig7
-rw-r--r--arch/sparc/include/asm/atomic_32.h38
-rw-r--r--arch/sparc/include/asm/atomic_64.h36
-rw-r--r--arch/sparc/include/asm/cmpxchg_32.h12
-rw-r--r--arch/sparc/include/asm/cmpxchg_64.h12
-rw-r--r--arch/sparc/include/asm/mmzone.h4
-rw-r--r--arch/sparc/include/asm/pgalloc_32.h1
-rw-r--r--arch/sparc/include/asm/pgalloc_64.h1
-rw-r--r--arch/sparc/include/asm/pgtable_32.h3
-rw-r--r--arch/sparc/include/asm/pgtable_64.h8
-rw-r--r--arch/sparc/include/asm/unaligned.h11
-rw-r--r--arch/sparc/include/asm/vio.h2
-rw-r--r--arch/sparc/include/uapi/asm/socket.h2
-rw-r--r--arch/sparc/kernel/ds.c6
-rw-r--r--arch/sparc/kernel/kprobes.c17
-rw-r--r--arch/sparc/kernel/process_32.c3
-rw-r--r--arch/sparc/kernel/process_64.c3
-rw-r--r--arch/sparc/kernel/smp_32.c1
-rw-r--r--arch/sparc/kernel/smp_64.c5
-rw-r--r--arch/sparc/kernel/sstate.c1
-rw-r--r--arch/sparc/kernel/syscalls/syscall.tbl2
-rw-r--r--arch/sparc/kernel/vio.c4
-rw-r--r--arch/sparc/lib/atomic32.c24
-rw-r--r--arch/sparc/lib/atomic_64.S42
-rw-r--r--arch/sparc/mm/hugetlbpage.c6
-rw-r--r--arch/sparc/mm/init_64.c13
-rw-r--r--arch/um/configs/kunit_defconfig3
-rw-r--r--arch/um/drivers/line.c15
-rw-r--r--arch/um/drivers/line.h5
-rw-r--r--arch/um/drivers/mconsole_kern.c3
-rw-r--r--arch/um/drivers/ssl.c1
-rw-r--r--arch/um/drivers/stdio_console.c1
-rw-r--r--arch/um/include/asm/pgalloc.h1
-rw-r--r--arch/um/include/asm/pgtable-2level.h1
-rw-r--r--arch/um/include/asm/pgtable-3level.h1
-rw-r--r--arch/um/kernel/process.c2
-rw-r--r--arch/um/kernel/skas/syscall.c2
-rw-r--r--arch/um/kernel/um_arch.c1
-rw-r--r--arch/x86/Kconfig53
-rw-r--r--arch/x86/Makefile18
-rw-r--r--arch/x86/boot/.gitignore1
-rw-r--r--arch/x86/boot/Makefile44
-rw-r--r--arch/x86/boot/compressed/Makefile7
-rw-r--r--arch/x86/boot/compressed/misc.c2
-rw-r--r--arch/x86/boot/compressed/misc.h2
-rw-r--r--arch/x86/boot/compressed/sev.c (renamed from arch/x86/boot/compressed/sev-es.c)4
-rw-r--r--arch/x86/boot/genimage.sh303
-rw-r--r--arch/x86/boot/mtools.conf.in3
-rw-r--r--arch/x86/crypto/curve25519-x86_64.c2
-rw-r--r--arch/x86/entry/Makefile10
-rw-r--r--arch/x86/entry/calling.h45
-rw-r--r--arch/x86/entry/common.c92
-rw-r--r--arch/x86/entry/entry_64.S9
-rw-r--r--arch/x86/entry/syscall_32.c20
-rw-r--r--arch/x86/entry/syscall_64.c17
-rw-r--r--arch/x86/entry/syscall_x32.c35
-rw-r--r--arch/x86/entry/syscalls/Makefile38
-rw-r--r--arch/x86/entry/syscalls/syscall_32.tbl2
-rw-r--r--arch/x86/entry/syscalls/syscall_64.tbl2
-rw-r--r--arch/x86/entry/syscalls/syscallhdr.sh35
-rw-r--r--arch/x86/entry/syscalls/syscalltbl.sh46
-rw-r--r--arch/x86/events/core.c34
-rw-r--r--arch/x86/events/intel/core.c25
-rw-r--r--arch/x86/events/intel/ds.c20
-rw-r--r--arch/x86/events/intel/lbr.c27
-rw-r--r--arch/x86/events/intel/uncore.c4
-rw-r--r--arch/x86/events/intel/uncore.h1
-rw-r--r--arch/x86/events/intel/uncore_snbep.c182
-rw-r--r--arch/x86/events/perf_event.h7
-rw-r--r--arch/x86/events/rapl.c6
-rw-r--r--arch/x86/hyperv/hv_init.c47
-rw-r--r--arch/x86/ia32/ia32_aout.c4
-rw-r--r--arch/x86/include/asm/Kbuild1
-rw-r--r--arch/x86/include/asm/apic.h1
-rw-r--r--arch/x86/include/asm/asm.h27
-rw-r--r--arch/x86/include/asm/atomic.h2
-rw-r--r--arch/x86/include/asm/barrier.h7
-rw-r--r--arch/x86/include/asm/cpufeatures.h3
-rw-r--r--arch/x86/include/asm/crash.h6
-rw-r--r--arch/x86/include/asm/desc.h25
-rw-r--r--arch/x86/include/asm/disabled-features.h7
-rw-r--r--arch/x86/include/asm/fpu/api.h6
-rw-r--r--arch/x86/include/asm/fpu/internal.h50
-rw-r--r--arch/x86/include/asm/hyperv-tlfs.h19
-rw-r--r--arch/x86/include/asm/idtentry.h33
-rw-r--r--arch/x86/include/asm/intel-family.h3
-rw-r--r--arch/x86/include/asm/irq_vectors.h7
-rw-r--r--arch/x86/include/asm/jump_label.h79
-rw-r--r--arch/x86/include/asm/kvm-x86-ops.h10
-rw-r--r--arch/x86/include/asm/kvm_host.h185
-rw-r--r--arch/x86/include/asm/kvm_para.h10
-rw-r--r--arch/x86/include/asm/mce.h13
-rw-r--r--arch/x86/include/asm/msr-index.h10
-rw-r--r--arch/x86/include/asm/nops.h24
-rw-r--r--arch/x86/include/asm/page.h6
-rw-r--r--arch/x86/include/asm/page_64.h2
-rw-r--r--arch/x86/include/asm/perf_event.h1
-rw-r--r--arch/x86/include/asm/pgalloc.h2
-rw-r--r--arch/x86/include/asm/pgtable_types.h2
-rw-r--r--arch/x86/include/asm/preempt.h2
-rw-r--r--arch/x86/include/asm/processor.h3
-rw-r--r--arch/x86/include/asm/sev-common.h76
-rw-r--r--arch/x86/include/asm/sev.h (renamed from arch/x86/include/asm/sev-es.h)30
-rw-r--r--arch/x86/include/asm/sgx.h2
-rw-r--r--arch/x86/include/asm/stackprotector.h2
-rw-r--r--arch/x86/include/asm/svm.h9
-rw-r--r--arch/x86/include/asm/syscall.h13
-rw-r--r--arch/x86/include/asm/syscall_wrapper.h10
-rw-r--r--arch/x86/include/asm/thermal.h4
-rw-r--r--arch/x86/include/asm/unaligned.h15
-rw-r--r--arch/x86/include/asm/unistd.h8
-rw-r--r--arch/x86/include/asm/vdso/clocksource.h2
-rw-r--r--arch/x86/include/uapi/asm/hwcap2.h6
-rw-r--r--arch/x86/include/uapi/asm/kvm.h15
-rw-r--r--arch/x86/include/uapi/asm/kvm_para.h13
-rw-r--r--arch/x86/include/uapi/asm/svm.h3
-rw-r--r--arch/x86/kernel/Makefile7
-rw-r--r--arch/x86/kernel/acpi/boot.c118
-rw-r--r--arch/x86/kernel/acpi/cstate.c3
-rw-r--r--arch/x86/kernel/alternative.c70
-rw-r--r--arch/x86/kernel/amd_nb.c3
-rw-r--r--arch/x86/kernel/apic/apic.c1
-rw-r--r--arch/x86/kernel/apic/vector.c20
-rw-r--r--arch/x86/kernel/cpu/amd.c24
-rw-r--r--arch/x86/kernel/cpu/common.c40
-rw-r--r--arch/x86/kernel/cpu/cpu.h2
-rw-r--r--arch/x86/kernel/cpu/hygon.c4
-rw-r--r--arch/x86/kernel/cpu/intel.c46
-rw-r--r--arch/x86/kernel/cpu/mce/amd.c55
-rw-r--r--arch/x86/kernel/cpu/mce/apei.c3
-rw-r--r--arch/x86/kernel/cpu/mce/core.c13
-rw-r--r--arch/x86/kernel/cpu/mshyperv.c13
-rw-r--r--arch/x86/kernel/cpu/mtrr/cleanup.c2
-rw-r--r--arch/x86/kernel/cpu/mtrr/generic.c4
-rw-r--r--arch/x86/kernel/cpu/perfctr-watchdog.c4
-rw-r--r--arch/x86/kernel/cpu/resctrl/internal.h21
-rw-r--r--arch/x86/kernel/cpu/resctrl/pseudo_lock.c10
-rw-r--r--arch/x86/kernel/cpu/sgx/encl.c2
-rw-r--r--arch/x86/kernel/cpu/sgx/encl.h4
-rw-r--r--arch/x86/kernel/cpu/sgx/virt.c1
-rw-r--r--arch/x86/kernel/cpu/tsx.c37
-rw-r--r--arch/x86/kernel/crash.c13
-rw-r--r--arch/x86/kernel/early-quirks.c2
-rw-r--r--arch/x86/kernel/fpu/signal.c80
-rw-r--r--arch/x86/kernel/fpu/xstate.c98
-rw-r--r--arch/x86/kernel/head64.c2
-rw-r--r--arch/x86/kernel/head_64.S6
-rw-r--r--arch/x86/kernel/idt.c45
-rw-r--r--arch/x86/kernel/jump_label.c81
-rw-r--r--arch/x86/kernel/kprobes/core.c26
-rw-r--r--arch/x86/kernel/kvm.c129
-rw-r--r--arch/x86/kernel/kvmclock.c26
-rw-r--r--arch/x86/kernel/machine_kexec_32.c15
-rw-r--r--arch/x86/kernel/machine_kexec_64.c33
-rw-r--r--arch/x86/kernel/mmconf-fam10h_64.c2
-rw-r--r--arch/x86/kernel/nmi.c2
-rw-r--r--arch/x86/kernel/process.c4
-rw-r--r--arch/x86/kernel/ptrace.c2
-rw-r--r--arch/x86/kernel/reboot.c2
-rw-r--r--arch/x86/kernel/setup.c70
-rw-r--r--arch/x86/kernel/setup_percpu.c6
-rw-r--r--arch/x86/kernel/sev-shared.c (renamed from arch/x86/kernel/sev-es-shared.c)21
-rw-r--r--arch/x86/kernel/sev.c (renamed from arch/x86/kernel/sev-es.c)339
-rw-r--r--arch/x86/kernel/signal.c4
-rw-r--r--arch/x86/kernel/signal_compat.c9
-rw-r--r--arch/x86/kernel/smpboot.c6
-rw-r--r--arch/x86/kernel/trace.c234
-rw-r--r--arch/x86/kernel/traps.c9
-rw-r--r--arch/x86/kernel/tsc.c4
-rw-r--r--arch/x86/kernel/umip.c10
-rw-r--r--arch/x86/kvm/Kconfig6
-rw-r--r--arch/x86/kvm/Makefile11
-rw-r--r--arch/x86/kvm/cpuid.c27
-rw-r--r--arch/x86/kvm/debugfs.c11
-rw-r--r--arch/x86/kvm/emulate.c173
-rw-r--r--arch/x86/kvm/fpu.h140
-rw-r--r--arch/x86/kvm/hyperv.c448
-rw-r--r--arch/x86/kvm/hyperv.h1
-rw-r--r--arch/x86/kvm/kvm_cache_regs.h15
-rw-r--r--arch/x86/kvm/kvm_emulate.h14
-rw-r--r--arch/x86/kvm/kvm_onhyperv.c93
-rw-r--r--arch/x86/kvm/kvm_onhyperv.h32
-rw-r--r--arch/x86/kvm/lapic.c50
-rw-r--r--arch/x86/kvm/lapic.h2
-rw-r--r--arch/x86/kvm/mmu.h30
-rw-r--r--arch/x86/kvm/mmu/mmu.c914
-rw-r--r--arch/x86/kvm/mmu/mmu_internal.h15
-rw-r--r--arch/x86/kvm/mmu/mmutrace.h2
-rw-r--r--arch/x86/kvm/mmu/page_track.c2
-rw-r--r--arch/x86/kvm/mmu/paging_tmpl.h82
-rw-r--r--arch/x86/kvm/mmu/spte.c22
-rw-r--r--arch/x86/kvm/mmu/spte.h32
-rw-r--r--arch/x86/kvm/mmu/tdp_mmu.c66
-rw-r--r--arch/x86/kvm/mmu/tdp_mmu.h35
-rw-r--r--arch/x86/kvm/svm/avic.c26
-rw-r--r--arch/x86/kvm/svm/nested.c114
-rw-r--r--arch/x86/kvm/svm/sev.c58
-rw-r--r--arch/x86/kvm/svm/svm.c174
-rw-r--r--arch/x86/kvm/svm/svm.h63
-rw-r--r--arch/x86/kvm/svm/svm_onhyperv.c41
-rw-r--r--arch/x86/kvm/svm/svm_onhyperv.h130
-rw-r--r--arch/x86/kvm/trace.h8
-rw-r--r--arch/x86/kvm/vmx/capabilities.h7
-rw-r--r--arch/x86/kvm/vmx/evmcs.c3
-rw-r--r--arch/x86/kvm/vmx/evmcs.h8
-rw-r--r--arch/x86/kvm/vmx/nested.c447
-rw-r--r--arch/x86/kvm/vmx/nested.h11
-rw-r--r--arch/x86/kvm/vmx/posted_intr.c14
-rw-r--r--arch/x86/kvm/vmx/posted_intr.h1
-rw-r--r--arch/x86/kvm/vmx/vmcs.h13
-rw-r--r--arch/x86/kvm/vmx/vmcs12.c1
-rw-r--r--arch/x86/kvm/vmx/vmcs12.h10
-rw-r--r--arch/x86/kvm/vmx/vmx.c452
-rw-r--r--arch/x86/kvm/vmx/vmx.h33
-rw-r--r--arch/x86/kvm/x86.c1112
-rw-r--r--arch/x86/kvm/x86.h10
-rw-r--r--arch/x86/lib/insn-eval.c30
-rw-r--r--arch/x86/lib/retpoline.S4
-rw-r--r--arch/x86/mm/extable.c2
-rw-r--r--arch/x86/mm/fault.c8
-rw-r--r--arch/x86/mm/init_32.c4
-rw-r--r--arch/x86/mm/init_64.c5
-rw-r--r--arch/x86/mm/ioremap.c4
-rw-r--r--arch/x86/mm/mem_encrypt_identity.c17
-rw-r--r--arch/x86/mm/numa.c8
-rw-r--r--arch/x86/mm/pgtable.c34
-rw-r--r--arch/x86/mm/pkeys.c4
-rw-r--r--arch/x86/mm/tlb.c10
-rw-r--r--arch/x86/net/bpf_jit_comp.c46
-rw-r--r--arch/x86/pci/amd_bus.c2
-rw-r--r--arch/x86/pci/fixup.c44
-rw-r--r--arch/x86/platform/efi/efi.c2
-rw-r--r--arch/x86/platform/efi/efi_64.c2
-rw-r--r--arch/x86/platform/efi/quirks.c12
-rw-r--r--arch/x86/purgatory/purgatory.c2
-rw-r--r--arch/x86/realmode/Makefile1
-rw-r--r--arch/x86/realmode/init.c16
-rw-r--r--arch/x86/realmode/rm/trampoline_64.S4
-rw-r--r--arch/x86/um/sys_call_table_32.c14
-rw-r--r--arch/x86/um/sys_call_table_64.c15
-rw-r--r--arch/x86/xen/enlighten.c1
-rw-r--r--arch/x86/xen/enlighten_pv.c10
-rw-r--r--arch/xtensa/include/asm/atomic.h26
-rw-r--r--arch/xtensa/include/asm/cmpxchg.h14
-rw-r--r--arch/xtensa/include/asm/page.h4
-rw-r--r--arch/xtensa/include/asm/pgalloc.h2
-rw-r--r--arch/xtensa/include/asm/pgtable.h1
-rw-r--r--arch/xtensa/include/asm/tlbflush.h4
-rw-r--r--arch/xtensa/include/asm/unaligned.h29
-rw-r--r--arch/xtensa/include/uapi/asm/mman.h3
-rw-r--r--arch/xtensa/kernel/process.c2
-rw-r--r--arch/xtensa/kernel/smp.c1
-rw-r--r--arch/xtensa/kernel/syscalls/syscall.tbl2
-rw-r--r--arch/xtensa/platforms/iss/console.c9
-rw-r--r--arch/xtensa/platforms/iss/setup.c1
-rw-r--r--arch/xtensa/platforms/iss/simdisk.c29
1205 files changed, 23950 insertions, 26681 deletions
diff --git a/arch/Kconfig b/arch/Kconfig
index c45b770d3579..129df498a8e1 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -285,6 +285,13 @@ config ARCH_THREAD_STACK_ALLOCATOR
config ARCH_WANTS_DYNAMIC_TASK_STRUCT
bool
+config ARCH_WANTS_NO_INSTR
+ bool
+ help
+ An architecture should select this if the noinstr macro is being used on
+ functions to denote that the toolchain should avoid instrumenting such
+ functions and is required for correctness.
+
config ARCH_32BIT_OFF_T
bool
depends on !64BIT
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig
index 5998106faa60..77d3280dc678 100644
--- a/arch/alpha/Kconfig
+++ b/arch/alpha/Kconfig
@@ -40,6 +40,7 @@ config ALPHA
select MMU_GATHER_NO_RANGE
select SET_FS
select SPARSEMEM_EXTREME if SPARSEMEM
+ select ZONE_DMA
help
The Alpha is a 64-bit general-purpose processor designed and
marketed by the Digital Equipment Corporation of blessed memory,
@@ -65,10 +66,6 @@ config GENERIC_CALIBRATE_DELAY
bool
default y
-config ZONE_DMA
- bool
- default y
-
config GENERIC_ISA_DMA
bool
default y
@@ -549,29 +546,12 @@ config NR_CPUS
MARVEL support can handle a maximum of 32 CPUs, all the others
with working support have a maximum of 4 CPUs.
-config ARCH_DISCONTIGMEM_ENABLE
- bool "Discontiguous Memory Support"
- depends on BROKEN
- help
- Say Y to support efficient handling of discontiguous physical memory,
- for architectures which are either NUMA (Non-Uniform Memory Access)
- or have huge holes in the physical address space for other reasons.
- See <file:Documentation/vm/numa.rst> for more.
-
config ARCH_SPARSEMEM_ENABLE
bool "Sparse Memory Support"
help
Say Y to support efficient handling of discontiguous physical memory,
for systems that have huge holes in the physical address space.
-config NUMA
- bool "NUMA Support (EXPERIMENTAL)"
- depends on DISCONTIGMEM && BROKEN
- help
- Say Y to compile the kernel to support NUMA (Non-Uniform Memory
- Access). This option is for configuring high-end multiprocessor
- server machines. If in doubt, say N.
-
config ALPHA_WTINT
bool "Use WTINT" if ALPHA_SRM || ALPHA_GENERIC
default y if ALPHA_QEMU
@@ -596,11 +576,6 @@ config ALPHA_WTINT
If unsure, say N.
-config NODES_SHIFT
- int
- default "7"
- depends on NEED_MULTIPLE_NODES
-
# LARGE_VMALLOC is racy, if you *really* need it then fix it first
config ALPHA_LARGE_VMALLOC
bool
diff --git a/arch/alpha/configs/defconfig b/arch/alpha/configs/defconfig
index 724c4075df40..dd2dd9f0861f 100644
--- a/arch/alpha/configs/defconfig
+++ b/arch/alpha/configs/defconfig
@@ -25,19 +25,18 @@ CONFIG_PNP=y
CONFIG_ISAPNP=y
CONFIG_BLK_DEV_FD=y
CONFIG_BLK_DEV_LOOP=m
-CONFIG_IDE=y
-CONFIG_BLK_DEV_IDECD=y
-CONFIG_IDE_GENERIC=y
-CONFIG_BLK_DEV_GENERIC=y
-CONFIG_BLK_DEV_ALI15X3=y
-CONFIG_BLK_DEV_CMD64X=y
-CONFIG_BLK_DEV_CY82C693=y
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
CONFIG_BLK_DEV_SR=y
CONFIG_SCSI_AIC7XXX=m
CONFIG_AIC7XXX_CMDS_PER_DEVICE=253
# CONFIG_AIC7XXX_DEBUG_ENABLE is not set
+CONFIG_ATA=y
+# CONFIG_SATA_PMP is not set
+CONFIG_PATA_ALI=y
+CONFIG_PATA_CMD64X=y
+CONFIG_PATA_CYPRESS=y
+CONFIG_ATA_GENERIC=y
CONFIG_NETDEVICES=y
CONFIG_DUMMY=m
CONFIG_NET_ETHERNET=y
diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
index e41c113c6688..f2861a43a61e 100644
--- a/arch/alpha/include/asm/atomic.h
+++ b/arch/alpha/include/asm/atomic.h
@@ -26,11 +26,11 @@
#define ATOMIC64_INIT(i) { (i) }
-#define atomic_read(v) READ_ONCE((v)->counter)
-#define atomic64_read(v) READ_ONCE((v)->counter)
+#define arch_atomic_read(v) READ_ONCE((v)->counter)
+#define arch_atomic64_read(v) READ_ONCE((v)->counter)
-#define atomic_set(v,i) WRITE_ONCE((v)->counter, (i))
-#define atomic64_set(v,i) WRITE_ONCE((v)->counter, (i))
+#define arch_atomic_set(v,i) WRITE_ONCE((v)->counter, (i))
+#define arch_atomic64_set(v,i) WRITE_ONCE((v)->counter, (i))
/*
* To get proper branch prediction for the main line, we must branch
@@ -39,7 +39,7 @@
*/
#define ATOMIC_OP(op, asm_op) \
-static __inline__ void atomic_##op(int i, atomic_t * v) \
+static __inline__ void arch_atomic_##op(int i, atomic_t * v) \
{ \
unsigned long temp; \
__asm__ __volatile__( \
@@ -55,7 +55,7 @@ static __inline__ void atomic_##op(int i, atomic_t * v) \
} \
#define ATOMIC_OP_RETURN(op, asm_op) \
-static inline int atomic_##op##_return_relaxed(int i, atomic_t *v) \
+static inline int arch_atomic_##op##_return_relaxed(int i, atomic_t *v) \
{ \
long temp, result; \
__asm__ __volatile__( \
@@ -74,7 +74,7 @@ static inline int atomic_##op##_return_relaxed(int i, atomic_t *v) \
}
#define ATOMIC_FETCH_OP(op, asm_op) \
-static inline int atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
+static inline int arch_atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
{ \
long temp, result; \
__asm__ __volatile__( \
@@ -92,7 +92,7 @@ static inline int atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
}
#define ATOMIC64_OP(op, asm_op) \
-static __inline__ void atomic64_##op(s64 i, atomic64_t * v) \
+static __inline__ void arch_atomic64_##op(s64 i, atomic64_t * v) \
{ \
s64 temp; \
__asm__ __volatile__( \
@@ -108,7 +108,8 @@ static __inline__ void atomic64_##op(s64 i, atomic64_t * v) \
} \
#define ATOMIC64_OP_RETURN(op, asm_op) \
-static __inline__ s64 atomic64_##op##_return_relaxed(s64 i, atomic64_t * v) \
+static __inline__ s64 \
+arch_atomic64_##op##_return_relaxed(s64 i, atomic64_t * v) \
{ \
s64 temp, result; \
__asm__ __volatile__( \
@@ -127,7 +128,8 @@ static __inline__ s64 atomic64_##op##_return_relaxed(s64 i, atomic64_t * v) \
}
#define ATOMIC64_FETCH_OP(op, asm_op) \
-static __inline__ s64 atomic64_fetch_##op##_relaxed(s64 i, atomic64_t * v) \
+static __inline__ s64 \
+arch_atomic64_fetch_##op##_relaxed(s64 i, atomic64_t * v) \
{ \
s64 temp, result; \
__asm__ __volatile__( \
@@ -155,18 +157,18 @@ static __inline__ s64 atomic64_fetch_##op##_relaxed(s64 i, atomic64_t * v) \
ATOMIC_OPS(add)
ATOMIC_OPS(sub)
-#define atomic_add_return_relaxed atomic_add_return_relaxed
-#define atomic_sub_return_relaxed atomic_sub_return_relaxed
-#define atomic_fetch_add_relaxed atomic_fetch_add_relaxed
-#define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed
+#define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed
+#define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed
+#define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed
+#define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed
-#define atomic64_add_return_relaxed atomic64_add_return_relaxed
-#define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
-#define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
-#define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
+#define arch_atomic64_add_return_relaxed arch_atomic64_add_return_relaxed
+#define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return_relaxed
+#define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add_relaxed
+#define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub_relaxed
-#define atomic_andnot atomic_andnot
-#define atomic64_andnot atomic64_andnot
+#define arch_atomic_andnot arch_atomic_andnot
+#define arch_atomic64_andnot arch_atomic64_andnot
#undef ATOMIC_OPS
#define ATOMIC_OPS(op, asm) \
@@ -180,15 +182,15 @@ ATOMIC_OPS(andnot, bic)
ATOMIC_OPS(or, bis)
ATOMIC_OPS(xor, xor)
-#define atomic_fetch_and_relaxed atomic_fetch_and_relaxed
-#define atomic_fetch_andnot_relaxed atomic_fetch_andnot_relaxed
-#define atomic_fetch_or_relaxed atomic_fetch_or_relaxed
-#define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed
+#define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed
+#define arch_atomic_fetch_andnot_relaxed arch_atomic_fetch_andnot_relaxed
+#define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed
+#define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed
-#define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
-#define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot_relaxed
-#define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed
-#define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed
+#define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and_relaxed
+#define arch_atomic64_fetch_andnot_relaxed arch_atomic64_fetch_andnot_relaxed
+#define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or_relaxed
+#define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor_relaxed
#undef ATOMIC_OPS
#undef ATOMIC64_FETCH_OP
@@ -198,14 +200,18 @@ ATOMIC_OPS(xor, xor)
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
-#define atomic64_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
-#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
+#define arch_atomic64_cmpxchg(v, old, new) \
+ (arch_cmpxchg(&((v)->counter), old, new))
+#define arch_atomic64_xchg(v, new) \
+ (arch_xchg(&((v)->counter), new))
-#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
-#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
+#define arch_atomic_cmpxchg(v, old, new) \
+ (arch_cmpxchg(&((v)->counter), old, new))
+#define arch_atomic_xchg(v, new) \
+ (arch_xchg(&((v)->counter), new))
/**
- * atomic_fetch_add_unless - add unless the number is a given value
+ * arch_atomic_fetch_add_unless - add unless the number is a given value
* @v: pointer of type atomic_t
* @a: the amount to add to v...
* @u: ...unless v is equal to u.
@@ -213,7 +219,7 @@ ATOMIC_OPS(xor, xor)
* Atomically adds @a to @v, so long as it was not @u.
* Returns the old value of @v.
*/
-static __inline__ int atomic_fetch_add_unless(atomic_t *v, int a, int u)
+static __inline__ int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
{
int c, new, old;
smp_mb();
@@ -234,10 +240,10 @@ static __inline__ int atomic_fetch_add_unless(atomic_t *v, int a, int u)
smp_mb();
return old;
}
-#define atomic_fetch_add_unless atomic_fetch_add_unless
+#define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
/**
- * atomic64_fetch_add_unless - add unless the number is a given value
+ * arch_atomic64_fetch_add_unless - add unless the number is a given value
* @v: pointer of type atomic64_t
* @a: the amount to add to v...
* @u: ...unless v is equal to u.
@@ -245,7 +251,7 @@ static __inline__ int atomic_fetch_add_unless(atomic_t *v, int a, int u)
* Atomically adds @a to @v, so long as it was not @u.
* Returns the old value of @v.
*/
-static __inline__ s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
+static __inline__ s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
{
s64 c, new, old;
smp_mb();
@@ -266,16 +272,16 @@ static __inline__ s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
smp_mb();
return old;
}
-#define atomic64_fetch_add_unless atomic64_fetch_add_unless
+#define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless
/*
- * atomic64_dec_if_positive - decrement by 1 if old value positive
+ * arch_atomic64_dec_if_positive - decrement by 1 if old value positive
* @v: pointer of type atomic_t
*
* The function returns the old value of *v minus 1, even if
* the atomic variable, v, was not decremented.
*/
-static inline s64 atomic64_dec_if_positive(atomic64_t *v)
+static inline s64 arch_atomic64_dec_if_positive(atomic64_t *v)
{
s64 old, tmp;
smp_mb();
@@ -295,6 +301,6 @@ static inline s64 atomic64_dec_if_positive(atomic64_t *v)
smp_mb();
return old - 1;
}
-#define atomic64_dec_if_positive atomic64_dec_if_positive
+#define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
#endif /* _ALPHA_ATOMIC_H */
diff --git a/arch/alpha/include/asm/cmpxchg.h b/arch/alpha/include/asm/cmpxchg.h
index 6c7c39452471..6e0a850aa9d3 100644
--- a/arch/alpha/include/asm/cmpxchg.h
+++ b/arch/alpha/include/asm/cmpxchg.h
@@ -17,7 +17,7 @@
sizeof(*(ptr))); \
})
-#define cmpxchg_local(ptr, o, n) \
+#define arch_cmpxchg_local(ptr, o, n) \
({ \
__typeof__(*(ptr)) _o_ = (o); \
__typeof__(*(ptr)) _n_ = (n); \
@@ -26,7 +26,7 @@
sizeof(*(ptr))); \
})
-#define cmpxchg64_local(ptr, o, n) \
+#define arch_cmpxchg64_local(ptr, o, n) \
({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
cmpxchg_local((ptr), (o), (n)); \
@@ -42,7 +42,7 @@
* The leading and the trailing memory barriers guarantee that these
* operations are fully ordered.
*/
-#define xchg(ptr, x) \
+#define arch_xchg(ptr, x) \
({ \
__typeof__(*(ptr)) __ret; \
__typeof__(*(ptr)) _x_ = (x); \
@@ -53,7 +53,7 @@
__ret; \
})
-#define cmpxchg(ptr, o, n) \
+#define arch_cmpxchg(ptr, o, n) \
({ \
__typeof__(*(ptr)) __ret; \
__typeof__(*(ptr)) _o_ = (o); \
@@ -65,10 +65,10 @@
__ret; \
})
-#define cmpxchg64(ptr, o, n) \
+#define arch_cmpxchg64(ptr, o, n) \
({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
- cmpxchg((ptr), (o), (n)); \
+ arch_cmpxchg((ptr), (o), (n)); \
})
#undef ____cmpxchg
diff --git a/arch/alpha/include/asm/machvec.h b/arch/alpha/include/asm/machvec.h
index a4e96e2bec74..e49fabce7b33 100644
--- a/arch/alpha/include/asm/machvec.h
+++ b/arch/alpha/include/asm/machvec.h
@@ -99,12 +99,6 @@ struct alpha_machine_vector
const char *vector_name;
- /* NUMA information */
- int (*pa_to_nid)(unsigned long);
- int (*cpuid_to_nid)(int);
- unsigned long (*node_mem_start)(int);
- unsigned long (*node_mem_size)(int);
-
/* System specific parameters. */
union {
struct {
diff --git a/arch/alpha/include/asm/mmzone.h b/arch/alpha/include/asm/mmzone.h
deleted file mode 100644
index 86644604d977..000000000000
--- a/arch/alpha/include/asm/mmzone.h
+++ /dev/null
@@ -1,100 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Written by Kanoj Sarcar (kanoj@sgi.com) Aug 99
- * Adapted for the alpha wildfire architecture Jan 2001.
- */
-#ifndef _ASM_MMZONE_H_
-#define _ASM_MMZONE_H_
-
-#ifdef CONFIG_DISCONTIGMEM
-
-#include <asm/smp.h>
-
-/*
- * Following are macros that are specific to this numa platform.
- */
-
-extern pg_data_t node_data[];
-
-#define alpha_pa_to_nid(pa) \
- (alpha_mv.pa_to_nid \
- ? alpha_mv.pa_to_nid(pa) \
- : (0))
-#define node_mem_start(nid) \
- (alpha_mv.node_mem_start \
- ? alpha_mv.node_mem_start(nid) \
- : (0UL))
-#define node_mem_size(nid) \
- (alpha_mv.node_mem_size \
- ? alpha_mv.node_mem_size(nid) \
- : ((nid) ? (0UL) : (~0UL)))
-
-#define pa_to_nid(pa) alpha_pa_to_nid(pa)
-#define NODE_DATA(nid) (&node_data[(nid)])
-
-#define node_localnr(pfn, nid) ((pfn) - NODE_DATA(nid)->node_start_pfn)
-
-#if 1
-#define PLAT_NODE_DATA_LOCALNR(p, n) \
- (((p) >> PAGE_SHIFT) - PLAT_NODE_DATA(n)->gendata.node_start_pfn)
-#else
-static inline unsigned long
-PLAT_NODE_DATA_LOCALNR(unsigned long p, int n)
-{
- unsigned long temp;
- temp = p >> PAGE_SHIFT;
- return temp - PLAT_NODE_DATA(n)->gendata.node_start_pfn;
-}
-#endif
-
-/*
- * Following are macros that each numa implementation must define.
- */
-
-/*
- * Given a kernel address, find the home node of the underlying memory.
- */
-#define kvaddr_to_nid(kaddr) pa_to_nid(__pa(kaddr))
-
-/*
- * Given a kaddr, LOCAL_BASE_ADDR finds the owning node of the memory
- * and returns the kaddr corresponding to first physical page in the
- * node's mem_map.
- */
-#define LOCAL_BASE_ADDR(kaddr) \
- ((unsigned long)__va(NODE_DATA(kvaddr_to_nid(kaddr))->node_start_pfn \
- << PAGE_SHIFT))
-
-/* XXX: FIXME -- nyc */
-#define kern_addr_valid(kaddr) (0)
-
-#define mk_pte(page, pgprot) \
-({ \
- pte_t pte; \
- unsigned long pfn; \
- \
- pfn = page_to_pfn(page) << 32; \
- pte_val(pte) = pfn | pgprot_val(pgprot); \
- \
- pte; \
-})
-
-#define pte_page(x) \
-({ \
- unsigned long kvirt; \
- struct page * __xx; \
- \
- kvirt = (unsigned long)__va(pte_val(x) >> (32-PAGE_SHIFT)); \
- __xx = virt_to_page(kvirt); \
- \
- __xx; \
-})
-
-#define pfn_to_nid(pfn) pa_to_nid(((u64)(pfn) << PAGE_SHIFT))
-#define pfn_valid(pfn) \
- (((pfn) - node_start_pfn(pfn_to_nid(pfn))) < \
- node_spanned_pages(pfn_to_nid(pfn))) \
-
-#endif /* CONFIG_DISCONTIGMEM */
-
-#endif /* _ASM_MMZONE_H_ */
diff --git a/arch/alpha/include/asm/page.h b/arch/alpha/include/asm/page.h
index 268f99b4602b..18f48a6f2ff6 100644
--- a/arch/alpha/include/asm/page.h
+++ b/arch/alpha/include/asm/page.h
@@ -17,9 +17,9 @@
extern void clear_page(void *page);
#define clear_user_page(page, vaddr, pg) clear_page(page)
-#define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \
- alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vmaddr)
-#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
+#define alloc_zeroed_user_highpage_movable(vma, vaddr) \
+ alloc_page_vma(GFP_HIGHUSER_MOVABLE | __GFP_ZERO, vma, vmaddr)
+#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE_MOVABLE
extern void copy_page(void * _to, void * _from);
#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
diff --git a/arch/alpha/include/asm/pgalloc.h b/arch/alpha/include/asm/pgalloc.h
index 9c6a24fe493d..68be7adbfe58 100644
--- a/arch/alpha/include/asm/pgalloc.h
+++ b/arch/alpha/include/asm/pgalloc.h
@@ -18,7 +18,6 @@ pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t pte)
{
pmd_set(pmd, (pte_t *)(page_to_pa(pte) + PAGE_OFFSET));
}
-#define pmd_pgtable(pmd) pmd_page(pmd)
static inline void
pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
index 8d856c62e22a..ff690846465e 100644
--- a/arch/alpha/include/asm/pgtable.h
+++ b/arch/alpha/include/asm/pgtable.h
@@ -46,7 +46,6 @@ struct vm_area_struct;
#define PTRS_PER_PMD (1UL << (PAGE_SHIFT-3))
#define PTRS_PER_PGD (1UL << (PAGE_SHIFT-3))
#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
-#define FIRST_USER_ADDRESS 0UL
/* Number of pointers that fit on a page: this will go away. */
#define PTRS_PER_PAGE (1UL << (PAGE_SHIFT-3))
@@ -206,7 +205,6 @@ extern unsigned long __zero_page(void);
#define page_to_pa(page) (page_to_pfn(page) << PAGE_SHIFT)
#define pte_pfn(pte) (pte_val(pte) >> 32)
-#ifndef CONFIG_DISCONTIGMEM
#define pte_page(pte) pfn_to_page(pte_pfn(pte))
#define mk_pte(page, pgprot) \
({ \
@@ -215,7 +213,6 @@ extern unsigned long __zero_page(void);
pte_val(pte) = (page_to_pfn(page) << 32) | pgprot_val(pgprot); \
pte; \
})
-#endif
extern inline pte_t pfn_pte(unsigned long physpfn, pgprot_t pgprot)
{ pte_t pte; pte_val(pte) = (PHYS_TWIDDLE(physpfn) << 32) | pgprot_val(pgprot); return pte; }
@@ -330,9 +327,7 @@ extern inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
-#ifndef CONFIG_DISCONTIGMEM
#define kern_addr_valid(addr) (1)
-#endif
#define pte_ERROR(e) \
printk("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e))
diff --git a/arch/alpha/include/asm/topology.h b/arch/alpha/include/asm/topology.h
index 5a77a40567fa..7d393036aa8f 100644
--- a/arch/alpha/include/asm/topology.h
+++ b/arch/alpha/include/asm/topology.h
@@ -7,45 +7,6 @@
#include <linux/numa.h>
#include <asm/machvec.h>
-#ifdef CONFIG_NUMA
-static inline int cpu_to_node(int cpu)
-{
- int node;
-
- if (!alpha_mv.cpuid_to_nid)
- return 0;
-
- node = alpha_mv.cpuid_to_nid(cpu);
-
-#ifdef DEBUG_NUMA
- BUG_ON(node < 0);
-#endif
-
- return node;
-}
-
-extern struct cpumask node_to_cpumask_map[];
-/* FIXME: This is dumb, recalculating every time. But simple. */
-static const struct cpumask *cpumask_of_node(int node)
-{
- int cpu;
-
- if (node == NUMA_NO_NODE)
- return cpu_all_mask;
-
- cpumask_clear(&node_to_cpumask_map[node]);
-
- for_each_online_cpu(cpu) {
- if (cpu_to_node(cpu) == node)
- cpumask_set_cpu(cpu, node_to_cpumask_map[node]);
- }
-
- return &node_to_cpumask_map[node];
-}
-
-#define cpumask_of_pcibus(bus) (cpu_online_mask)
-
-#endif /* !CONFIG_NUMA */
# include <asm-generic/topology.h>
#endif /* _ASM_ALPHA_TOPOLOGY_H */
diff --git a/arch/alpha/include/asm/unaligned.h b/arch/alpha/include/asm/unaligned.h
deleted file mode 100644
index 863c807b66f8..000000000000
--- a/arch/alpha/include/asm/unaligned.h
+++ /dev/null
@@ -1,12 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_ALPHA_UNALIGNED_H
-#define _ASM_ALPHA_UNALIGNED_H
-
-#include <linux/unaligned/le_struct.h>
-#include <linux/unaligned/be_byteshift.h>
-#include <linux/unaligned/generic.h>
-
-#define get_unaligned __get_unaligned_le
-#define put_unaligned __put_unaligned_le
-
-#endif /* _ASM_ALPHA_UNALIGNED_H */
diff --git a/arch/alpha/include/uapi/asm/mman.h b/arch/alpha/include/uapi/asm/mman.h
index a18ec7f63888..56b4ee5a6c9e 100644
--- a/arch/alpha/include/uapi/asm/mman.h
+++ b/arch/alpha/include/uapi/asm/mman.h
@@ -71,6 +71,9 @@
#define MADV_COLD 20 /* deactivate these pages */
#define MADV_PAGEOUT 21 /* reclaim these pages */
+#define MADV_POPULATE_READ 22 /* populate (prefault) page tables readable */
+#define MADV_POPULATE_WRITE 23 /* populate (prefault) page tables writable */
+
/* compatibility flags */
#define MAP_FILE 0
diff --git a/arch/alpha/include/uapi/asm/socket.h b/arch/alpha/include/uapi/asm/socket.h
index 57420356ce4c..6b3daba60987 100644
--- a/arch/alpha/include/uapi/asm/socket.h
+++ b/arch/alpha/include/uapi/asm/socket.h
@@ -127,6 +127,8 @@
#define SO_PREFER_BUSY_POLL 69
#define SO_BUSY_POLL_BUDGET 70
+#define SO_NETNS_COOKIE 71
+
#if !defined(__KERNEL__)
#if __BITS_PER_LONG == 64
diff --git a/arch/alpha/kernel/core_marvel.c b/arch/alpha/kernel/core_marvel.c
index 4485b77f8658..1efca79ac83c 100644
--- a/arch/alpha/kernel/core_marvel.c
+++ b/arch/alpha/kernel/core_marvel.c
@@ -287,8 +287,7 @@ io7_init_hose(struct io7 *io7, int port)
/*
* Set up window 0 for scatter-gather 8MB at 8MB.
*/
- hose->sg_isa = iommu_arena_new_node(marvel_cpuid_to_nid(io7->pe),
- hose, 0x00800000, 0x00800000, 0);
+ hose->sg_isa = iommu_arena_new_node(0, hose, 0x00800000, 0x00800000, 0);
hose->sg_isa->align_entry = 8; /* cache line boundary */
csrs->POx_WBASE[0].csr =
hose->sg_isa->dma_base | wbase_m_ena | wbase_m_sg;
@@ -305,8 +304,7 @@ io7_init_hose(struct io7 *io7, int port)
/*
* Set up window 2 for scatter-gather (up-to) 1GB at 3GB.
*/
- hose->sg_pci = iommu_arena_new_node(marvel_cpuid_to_nid(io7->pe),
- hose, 0xc0000000, 0x40000000, 0);
+ hose->sg_pci = iommu_arena_new_node(0, hose, 0xc0000000, 0x40000000, 0);
hose->sg_pci->align_entry = 8; /* cache line boundary */
csrs->POx_WBASE[2].csr =
hose->sg_pci->dma_base | wbase_m_ena | wbase_m_sg;
@@ -843,53 +841,8 @@ EXPORT_SYMBOL(marvel_ioportmap);
EXPORT_SYMBOL(marvel_ioread8);
EXPORT_SYMBOL(marvel_iowrite8);
#endif
-
-/*
- * NUMA Support
- */
-/**********
- * FIXME - for now each cpu is a node by itself
- * -- no real support for striped mode
- **********
- */
-int
-marvel_pa_to_nid(unsigned long pa)
-{
- int cpuid;
- if ((pa >> 43) & 1) /* I/O */
- cpuid = (~(pa >> 35) & 0xff);
- else /* mem */
- cpuid = ((pa >> 34) & 0x3) | ((pa >> (37 - 2)) & (0x1f << 2));
-
- return marvel_cpuid_to_nid(cpuid);
-}
-
-int
-marvel_cpuid_to_nid(int cpuid)
-{
- return cpuid;
-}
-
-unsigned long
-marvel_node_mem_start(int nid)
-{
- unsigned long pa;
-
- pa = (nid & 0x3) | ((nid & (0x1f << 2)) << 1);
- pa <<= 34;
-
- return pa;
-}
-
-unsigned long
-marvel_node_mem_size(int nid)
-{
- return 16UL * 1024 * 1024 * 1024; /* 16GB */
-}
-
-
-/*
+/*
* AGP GART Support.
*/
#include <linux/agp_backend.h>
diff --git a/arch/alpha/kernel/core_wildfire.c b/arch/alpha/kernel/core_wildfire.c
index e8d3b033018d..3a804b67f9da 100644
--- a/arch/alpha/kernel/core_wildfire.c
+++ b/arch/alpha/kernel/core_wildfire.c
@@ -434,39 +434,12 @@ wildfire_write_config(struct pci_bus *bus, unsigned int devfn, int where,
return PCIBIOS_SUCCESSFUL;
}
-struct pci_ops wildfire_pci_ops =
+struct pci_ops wildfire_pci_ops =
{
.read = wildfire_read_config,
.write = wildfire_write_config,
};
-
-/*
- * NUMA Support
- */
-int wildfire_pa_to_nid(unsigned long pa)
-{
- return pa >> 36;
-}
-
-int wildfire_cpuid_to_nid(int cpuid)
-{
- /* assume 4 CPUs per node */
- return cpuid >> 2;
-}
-
-unsigned long wildfire_node_mem_start(int nid)
-{
- /* 64GB per node */
- return (unsigned long)nid * (64UL * 1024 * 1024 * 1024);
-}
-
-unsigned long wildfire_node_mem_size(int nid)
-{
- /* 64GB per node */
- return 64UL * 1024 * 1024 * 1024;
-}
-
#if DEBUG_DUMP_REGS
static void __init
diff --git a/arch/alpha/kernel/pci_iommu.c b/arch/alpha/kernel/pci_iommu.c
index d84b19aa8e9d..35d7b3096d6e 100644
--- a/arch/alpha/kernel/pci_iommu.c
+++ b/arch/alpha/kernel/pci_iommu.c
@@ -71,33 +71,6 @@ iommu_arena_new_node(int nid, struct pci_controller *hose, dma_addr_t base,
if (align < mem_size)
align = mem_size;
-
-#ifdef CONFIG_DISCONTIGMEM
-
- arena = memblock_alloc_node(sizeof(*arena), align, nid);
- if (!NODE_DATA(nid) || !arena) {
- printk("%s: couldn't allocate arena from node %d\n"
- " falling back to system-wide allocation\n",
- __func__, nid);
- arena = memblock_alloc(sizeof(*arena), SMP_CACHE_BYTES);
- if (!arena)
- panic("%s: Failed to allocate %zu bytes\n", __func__,
- sizeof(*arena));
- }
-
- arena->ptes = memblock_alloc_node(sizeof(*arena), align, nid);
- if (!NODE_DATA(nid) || !arena->ptes) {
- printk("%s: couldn't allocate arena ptes from node %d\n"
- " falling back to system-wide allocation\n",
- __func__, nid);
- arena->ptes = memblock_alloc(mem_size, align);
- if (!arena->ptes)
- panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
- __func__, mem_size, align);
- }
-
-#else /* CONFIG_DISCONTIGMEM */
-
arena = memblock_alloc(sizeof(*arena), SMP_CACHE_BYTES);
if (!arena)
panic("%s: Failed to allocate %zu bytes\n", __func__,
@@ -107,8 +80,6 @@ iommu_arena_new_node(int nid, struct pci_controller *hose, dma_addr_t base,
panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
__func__, mem_size, align);
-#endif /* CONFIG_DISCONTIGMEM */
-
spin_lock_init(&arena->lock);
arena->hose = hose;
arena->dma_base = base;
diff --git a/arch/alpha/kernel/process.c b/arch/alpha/kernel/process.c
index 5112ab996394..ef0c08ed0481 100644
--- a/arch/alpha/kernel/process.c
+++ b/arch/alpha/kernel/process.c
@@ -380,7 +380,7 @@ get_wchan(struct task_struct *p)
{
unsigned long schedule_frame;
unsigned long pc;
- if (!p || p == current || p->state == TASK_RUNNING)
+ if (!p || p == current || task_is_running(p))
return 0;
/*
* This one depends on the frame size of schedule(). Do a
diff --git a/arch/alpha/kernel/proto.h b/arch/alpha/kernel/proto.h
index 701a05090141..5816a31c1b38 100644
--- a/arch/alpha/kernel/proto.h
+++ b/arch/alpha/kernel/proto.h
@@ -49,10 +49,6 @@ extern void marvel_init_arch(void);
extern void marvel_kill_arch(int);
extern void marvel_machine_check(unsigned long, unsigned long);
extern void marvel_pci_tbi(struct pci_controller *, dma_addr_t, dma_addr_t);
-extern int marvel_pa_to_nid(unsigned long);
-extern int marvel_cpuid_to_nid(int);
-extern unsigned long marvel_node_mem_start(int);
-extern unsigned long marvel_node_mem_size(int);
extern struct _alpha_agp_info *marvel_agp_info(void);
struct io7 *marvel_find_io7(int pe);
struct io7 *marvel_next_io7(struct io7 *prev);
@@ -101,10 +97,6 @@ extern void wildfire_init_arch(void);
extern void wildfire_kill_arch(int);
extern void wildfire_machine_check(unsigned long vector, unsigned long la_ptr);
extern void wildfire_pci_tbi(struct pci_controller *, dma_addr_t, dma_addr_t);
-extern int wildfire_pa_to_nid(unsigned long);
-extern int wildfire_cpuid_to_nid(int);
-extern unsigned long wildfire_node_mem_start(int);
-extern unsigned long wildfire_node_mem_size(int);
/* console.c */
#ifdef CONFIG_VGA_HOSE
diff --git a/arch/alpha/kernel/setup.c b/arch/alpha/kernel/setup.c
index 03dda3beb3bd..7d56c217b235 100644
--- a/arch/alpha/kernel/setup.c
+++ b/arch/alpha/kernel/setup.c
@@ -28,6 +28,7 @@
#include <linux/init.h>
#include <linux/string.h>
#include <linux/ioport.h>
+#include <linux/panic_notifier.h>
#include <linux/platform_device.h>
#include <linux/memblock.h>
#include <linux/pci.h>
@@ -46,7 +47,6 @@
#include <linux/log2.h>
#include <linux/export.h>
-extern struct atomic_notifier_head panic_notifier_list;
static int alpha_panic_event(struct notifier_block *, unsigned long, void *);
static struct notifier_block alpha_panic_block = {
alpha_panic_event,
@@ -79,11 +79,6 @@ int alpha_l3_cacheshape;
unsigned long alpha_verbose_mcheck = CONFIG_VERBOSE_MCHECK_ON;
#endif
-#ifdef CONFIG_NUMA
-struct cpumask node_to_cpumask_map[MAX_NUMNODES] __read_mostly;
-EXPORT_SYMBOL(node_to_cpumask_map);
-#endif
-
/* Which processor we booted from. */
int boot_cpuid;
@@ -305,7 +300,6 @@ move_initrd(unsigned long mem_limit)
}
#endif
-#ifndef CONFIG_DISCONTIGMEM
static void __init
setup_memory(void *kernel_end)
{
@@ -389,9 +383,6 @@ setup_memory(void *kernel_end)
}
#endif /* CONFIG_BLK_DEV_INITRD */
}
-#else
-extern void setup_memory(void *);
-#endif /* !CONFIG_DISCONTIGMEM */
int __init
page_is_ram(unsigned long pfn)
@@ -618,13 +609,6 @@ setup_arch(char **cmdline_p)
"VERBOSE_MCHECK "
#endif
-#ifdef CONFIG_DISCONTIGMEM
- "DISCONTIGMEM "
-#ifdef CONFIG_NUMA
- "NUMA "
-#endif
-#endif
-
#ifdef CONFIG_DEBUG_SPINLOCK
"DEBUG_SPINLOCK "
#endif
diff --git a/arch/alpha/kernel/smp.c b/arch/alpha/kernel/smp.c
index f4dd9f3f3001..4b2575f936d4 100644
--- a/arch/alpha/kernel/smp.c
+++ b/arch/alpha/kernel/smp.c
@@ -166,7 +166,6 @@ smp_callin(void)
DBGS(("smp_callin: commencing CPU %d current %p active_mm %p\n",
cpuid, current, current->active_mm));
- preempt_disable();
cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
}
diff --git a/arch/alpha/kernel/srmcons.c b/arch/alpha/kernel/srmcons.c
index 438b10c44d73..fc03471a0b0f 100644
--- a/arch/alpha/kernel/srmcons.c
+++ b/arch/alpha/kernel/srmcons.c
@@ -142,19 +142,13 @@ srmcons_write(struct tty_struct *tty,
return count;
}
-static int
+static unsigned int
srmcons_write_room(struct tty_struct *tty)
{
return 512;
}
static int
-srmcons_chars_in_buffer(struct tty_struct *tty)
-{
- return 0;
-}
-
-static int
srmcons_open(struct tty_struct *tty, struct file *filp)
{
struct srmcons_private *srmconsp = &srmcons_singleton;
@@ -200,7 +194,6 @@ static const struct tty_operations srmcons_ops = {
.close = srmcons_close,
.write = srmcons_write,
.write_room = srmcons_write_room,
- .chars_in_buffer= srmcons_chars_in_buffer,
};
static int __init
diff --git a/arch/alpha/kernel/sys_marvel.c b/arch/alpha/kernel/sys_marvel.c
index 83d6c53d6d4d..1f99b03effc2 100644
--- a/arch/alpha/kernel/sys_marvel.c
+++ b/arch/alpha/kernel/sys_marvel.c
@@ -461,10 +461,5 @@ struct alpha_machine_vector marvel_ev7_mv __initmv = {
.kill_arch = marvel_kill_arch,
.pci_map_irq = marvel_map_irq,
.pci_swizzle = common_swizzle,
-
- .pa_to_nid = marvel_pa_to_nid,
- .cpuid_to_nid = marvel_cpuid_to_nid,
- .node_mem_start = marvel_node_mem_start,
- .node_mem_size = marvel_node_mem_size,
};
ALIAS_MV(marvel_ev7)
diff --git a/arch/alpha/kernel/sys_wildfire.c b/arch/alpha/kernel/sys_wildfire.c
index 2c54d707142a..3cee05443f07 100644
--- a/arch/alpha/kernel/sys_wildfire.c
+++ b/arch/alpha/kernel/sys_wildfire.c
@@ -337,10 +337,5 @@ struct alpha_machine_vector wildfire_mv __initmv = {
.kill_arch = wildfire_kill_arch,
.pci_map_irq = wildfire_map_irq,
.pci_swizzle = common_swizzle,
-
- .pa_to_nid = wildfire_pa_to_nid,
- .cpuid_to_nid = wildfire_cpuid_to_nid,
- .node_mem_start = wildfire_node_mem_start,
- .node_mem_size = wildfire_node_mem_size,
};
ALIAS_MV(wildfire)
diff --git a/arch/alpha/kernel/syscalls/syscall.tbl b/arch/alpha/kernel/syscalls/syscall.tbl
index 5622578742fd..a17687ed4b51 100644
--- a/arch/alpha/kernel/syscalls/syscall.tbl
+++ b/arch/alpha/kernel/syscalls/syscall.tbl
@@ -482,7 +482,7 @@
550 common process_madvise sys_process_madvise
551 common epoll_pwait2 sys_epoll_pwait2
552 common mount_setattr sys_mount_setattr
-553 common quotactl_path sys_quotactl_path
+553 common quotactl_fd sys_quotactl_fd
554 common landlock_create_ruleset sys_landlock_create_ruleset
555 common landlock_add_rule sys_landlock_add_rule
556 common landlock_restrict_self sys_landlock_restrict_self
diff --git a/arch/alpha/mm/Makefile b/arch/alpha/mm/Makefile
index 08ac6612edad..bd770302eb82 100644
--- a/arch/alpha/mm/Makefile
+++ b/arch/alpha/mm/Makefile
@@ -6,5 +6,3 @@
ccflags-y := -Werror
obj-y := init.o fault.o
-
-obj-$(CONFIG_DISCONTIGMEM) += numa.o
diff --git a/arch/alpha/mm/init.c b/arch/alpha/mm/init.c
index a97650a618f1..f6114d03357c 100644
--- a/arch/alpha/mm/init.c
+++ b/arch/alpha/mm/init.c
@@ -235,8 +235,6 @@ callback_init(void * kernel_end)
return kernel_end;
}
-
-#ifndef CONFIG_DISCONTIGMEM
/*
* paging_init() sets up the memory map.
*/
@@ -257,7 +255,6 @@ void __init paging_init(void)
/* Initialize the kernel's ZERO_PGE. */
memset((void *)ZERO_PGE, 0, PAGE_SIZE);
}
-#endif /* CONFIG_DISCONTIGMEM */
#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_SRM)
void
diff --git a/arch/alpha/mm/numa.c b/arch/alpha/mm/numa.c
deleted file mode 100644
index 0636e254a22f..000000000000
--- a/arch/alpha/mm/numa.c
+++ /dev/null
@@ -1,223 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * linux/arch/alpha/mm/numa.c
- *
- * DISCONTIGMEM NUMA alpha support.
- *
- * Copyright (C) 2001 Andrea Arcangeli <andrea@suse.de> SuSE
- */
-
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/memblock.h>
-#include <linux/swap.h>
-#include <linux/initrd.h>
-#include <linux/pfn.h>
-#include <linux/module.h>
-
-#include <asm/hwrpb.h>
-#include <asm/sections.h>
-
-pg_data_t node_data[MAX_NUMNODES];
-EXPORT_SYMBOL(node_data);
-
-#undef DEBUG_DISCONTIG
-#ifdef DEBUG_DISCONTIG
-#define DBGDCONT(args...) printk(args)
-#else
-#define DBGDCONT(args...)
-#endif
-
-#define for_each_mem_cluster(memdesc, _cluster, i) \
- for ((_cluster) = (memdesc)->cluster, (i) = 0; \
- (i) < (memdesc)->numclusters; (i)++, (_cluster)++)
-
-static void __init show_mem_layout(void)
-{
- struct memclust_struct * cluster;
- struct memdesc_struct * memdesc;
- int i;
-
- /* Find free clusters, and init and free the bootmem accordingly. */
- memdesc = (struct memdesc_struct *)
- (hwrpb->mddt_offset + (unsigned long) hwrpb);
-
- printk("Raw memory layout:\n");
- for_each_mem_cluster(memdesc, cluster, i) {
- printk(" memcluster %2d, usage %1lx, start %8lu, end %8lu\n",
- i, cluster->usage, cluster->start_pfn,
- cluster->start_pfn + cluster->numpages);
- }
-}
-
-static void __init
-setup_memory_node(int nid, void *kernel_end)
-{
- extern unsigned long mem_size_limit;
- struct memclust_struct * cluster;
- struct memdesc_struct * memdesc;
- unsigned long start_kernel_pfn, end_kernel_pfn;
- unsigned long start, end;
- unsigned long node_pfn_start, node_pfn_end;
- unsigned long node_min_pfn, node_max_pfn;
- int i;
- int show_init = 0;
-
- /* Find the bounds of current node */
- node_pfn_start = (node_mem_start(nid)) >> PAGE_SHIFT;
- node_pfn_end = node_pfn_start + (node_mem_size(nid) >> PAGE_SHIFT);
-
- /* Find free clusters, and init and free the bootmem accordingly. */
- memdesc = (struct memdesc_struct *)
- (hwrpb->mddt_offset + (unsigned long) hwrpb);
-
- /* find the bounds of this node (node_min_pfn/node_max_pfn) */
- node_min_pfn = ~0UL;
- node_max_pfn = 0UL;
- for_each_mem_cluster(memdesc, cluster, i) {
- /* Bit 0 is console/PALcode reserved. Bit 1 is
- non-volatile memory -- we might want to mark
- this for later. */
- if (cluster->usage & 3)
- continue;
-
- start = cluster->start_pfn;
- end = start + cluster->numpages;
-
- if (start >= node_pfn_end || end <= node_pfn_start)
- continue;
-
- if (!show_init) {
- show_init = 1;
- printk("Initializing bootmem allocator on Node ID %d\n", nid);
- }
- printk(" memcluster %2d, usage %1lx, start %8lu, end %8lu\n",
- i, cluster->usage, cluster->start_pfn,
- cluster->start_pfn + cluster->numpages);
-
- if (start < node_pfn_start)
- start = node_pfn_start;
- if (end > node_pfn_end)
- end = node_pfn_end;
-
- if (start < node_min_pfn)
- node_min_pfn = start;
- if (end > node_max_pfn)
- node_max_pfn = end;
- }
-
- if (mem_size_limit && node_max_pfn > mem_size_limit) {
- static int msg_shown = 0;
- if (!msg_shown) {
- msg_shown = 1;
- printk("setup: forcing memory size to %ldK (from %ldK).\n",
- mem_size_limit << (PAGE_SHIFT - 10),
- node_max_pfn << (PAGE_SHIFT - 10));
- }
- node_max_pfn = mem_size_limit;
- }
-
- if (node_min_pfn >= node_max_pfn)
- return;
-
- /* Update global {min,max}_low_pfn from node information. */
- if (node_min_pfn < min_low_pfn)
- min_low_pfn = node_min_pfn;
- if (node_max_pfn > max_low_pfn)
- max_pfn = max_low_pfn = node_max_pfn;
-
-#if 0 /* we'll try this one again in a little while */
- /* Cute trick to make sure our local node data is on local memory */
- node_data[nid] = (pg_data_t *)(__va(node_min_pfn << PAGE_SHIFT));
-#endif
- printk(" Detected node memory: start %8lu, end %8lu\n",
- node_min_pfn, node_max_pfn);
-
- DBGDCONT(" DISCONTIG: node_data[%d] is at 0x%p\n", nid, NODE_DATA(nid));
-
- /* Find the bounds of kernel memory. */
- start_kernel_pfn = PFN_DOWN(KERNEL_START_PHYS);
- end_kernel_pfn = PFN_UP(virt_to_phys(kernel_end));
-
- if (!nid && (node_max_pfn < end_kernel_pfn || node_min_pfn > start_kernel_pfn))
- panic("kernel loaded out of ram");
-
- memblock_add_node(PFN_PHYS(node_min_pfn),
- (node_max_pfn - node_min_pfn) << PAGE_SHIFT, nid);
-
- /* Zone start phys-addr must be 2^(MAX_ORDER-1) aligned.
- Note that we round this down, not up - node memory
- has much larger alignment than 8Mb, so it's safe. */
- node_min_pfn &= ~((1UL << (MAX_ORDER-1))-1);
-
- NODE_DATA(nid)->node_start_pfn = node_min_pfn;
- NODE_DATA(nid)->node_present_pages = node_max_pfn - node_min_pfn;
-
- node_set_online(nid);
-}
-
-void __init
-setup_memory(void *kernel_end)
-{
- unsigned long kernel_size;
- int nid;
-
- show_mem_layout();
-
- nodes_clear(node_online_map);
-
- min_low_pfn = ~0UL;
- max_low_pfn = 0UL;
- for (nid = 0; nid < MAX_NUMNODES; nid++)
- setup_memory_node(nid, kernel_end);
-
- kernel_size = virt_to_phys(kernel_end) - KERNEL_START_PHYS;
- memblock_reserve(KERNEL_START_PHYS, kernel_size);
-
-#ifdef CONFIG_BLK_DEV_INITRD
- initrd_start = INITRD_START;
- if (initrd_start) {
- extern void *move_initrd(unsigned long);
-
- initrd_end = initrd_start+INITRD_SIZE;
- printk("Initial ramdisk at: 0x%p (%lu bytes)\n",
- (void *) initrd_start, INITRD_SIZE);
-
- if ((void *)initrd_end > phys_to_virt(PFN_PHYS(max_low_pfn))) {
- if (!move_initrd(PFN_PHYS(max_low_pfn)))
- printk("initrd extends beyond end of memory "
- "(0x%08lx > 0x%p)\ndisabling initrd\n",
- initrd_end,
- phys_to_virt(PFN_PHYS(max_low_pfn)));
- } else {
- nid = kvaddr_to_nid(initrd_start);
- memblock_reserve(virt_to_phys((void *)initrd_start),
- INITRD_SIZE);
- }
- }
-#endif /* CONFIG_BLK_DEV_INITRD */
-}
-
-void __init paging_init(void)
-{
- unsigned long max_zone_pfn[MAX_NR_ZONES] = {0, };
- unsigned long dma_local_pfn;
-
- /*
- * The old global MAX_DMA_ADDRESS per-arch API doesn't fit
- * in the NUMA model, for now we convert it to a pfn and
- * we interpret this pfn as a local per-node information.
- * This issue isn't very important since none of these machines
- * have legacy ISA slots anyways.
- */
- dma_local_pfn = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT;
-
- max_zone_pfn[ZONE_DMA] = dma_local_pfn;
- max_zone_pfn[ZONE_NORMAL] = max_pfn;
-
- free_area_init(max_zone_pfn);
-
- /* Initialize the kernel's ZERO_PGE. */
- memset((void *)ZERO_PGE, 0, PAGE_SIZE);
-}
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
index 2d98501c0897..d8f51eb8963b 100644
--- a/arch/arc/Kconfig
+++ b/arch/arc/Kconfig
@@ -62,10 +62,6 @@ config SCHED_OMIT_FRAME_POINTER
config GENERIC_CSUM
def_bool y
-config ARCH_DISCONTIGMEM_ENABLE
- def_bool n
- depends on BROKEN
-
config ARCH_FLATMEM_ENABLE
def_bool y
@@ -344,15 +340,6 @@ config ARC_HUGEPAGE_16M
endchoice
-config NODES_SHIFT
- int "Maximum NUMA Nodes (as a power of 2)"
- default "0" if !DISCONTIGMEM
- default "1" if DISCONTIGMEM
- depends on NEED_MULTIPLE_NODES
- help
- Accessing memory beyond 1GB (with or w/o PAE) requires 2 memory
- zones.
-
config ARC_COMPACT_IRQ_LEVELS
depends on ISA_ARCOMPACT
bool "Setup Timer IRQ as high Priority"
diff --git a/arch/arc/Makefile b/arch/arc/Makefile
index 4392c9c189c4..e47adc97a89b 100644
--- a/arch/arc/Makefile
+++ b/arch/arc/Makefile
@@ -31,7 +31,7 @@ endif
ifdef CONFIG_ARC_CURR_IN_REG
-# For a global register defintion, make sure it gets passed to every file
+# For a global register definition, make sure it gets passed to every file
# We had a customer reported bug where some code built in kernel was NOT using
# any kernel headers, and missing the r25 global register
# Can't do unconditionally because of recursive include issues
diff --git a/arch/arc/include/asm/atomic.h b/arch/arc/include/asm/atomic.h
index 5afc79c9b2f5..7a36d79b5b2f 100644
--- a/arch/arc/include/asm/atomic.h
+++ b/arch/arc/include/asm/atomic.h
@@ -14,14 +14,14 @@
#include <asm/barrier.h>
#include <asm/smp.h>
-#define atomic_read(v) READ_ONCE((v)->counter)
+#define arch_atomic_read(v) READ_ONCE((v)->counter)
#ifdef CONFIG_ARC_HAS_LLSC
-#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
+#define arch_atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
#define ATOMIC_OP(op, c_op, asm_op) \
-static inline void atomic_##op(int i, atomic_t *v) \
+static inline void arch_atomic_##op(int i, atomic_t *v) \
{ \
unsigned int val; \
\
@@ -37,7 +37,7 @@ static inline void atomic_##op(int i, atomic_t *v) \
} \
#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
-static inline int atomic_##op##_return(int i, atomic_t *v) \
+static inline int arch_atomic_##op##_return(int i, atomic_t *v) \
{ \
unsigned int val; \
\
@@ -63,7 +63,7 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
}
#define ATOMIC_FETCH_OP(op, c_op, asm_op) \
-static inline int atomic_fetch_##op(int i, atomic_t *v) \
+static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \
{ \
unsigned int val, orig; \
\
@@ -94,11 +94,11 @@ static inline int atomic_fetch_##op(int i, atomic_t *v) \
#ifndef CONFIG_SMP
/* violating atomic_xxx API locking protocol in UP for optimization sake */
-#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
+#define arch_atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
#else
-static inline void atomic_set(atomic_t *v, int i)
+static inline void arch_atomic_set(atomic_t *v, int i)
{
/*
* Independent of hardware support, all of the atomic_xxx() APIs need
@@ -116,7 +116,7 @@ static inline void atomic_set(atomic_t *v, int i)
atomic_ops_unlock(flags);
}
-#define atomic_set_release(v, i) atomic_set((v), (i))
+#define arch_atomic_set_release(v, i) arch_atomic_set((v), (i))
#endif
@@ -126,7 +126,7 @@ static inline void atomic_set(atomic_t *v, int i)
*/
#define ATOMIC_OP(op, c_op, asm_op) \
-static inline void atomic_##op(int i, atomic_t *v) \
+static inline void arch_atomic_##op(int i, atomic_t *v) \
{ \
unsigned long flags; \
\
@@ -136,7 +136,7 @@ static inline void atomic_##op(int i, atomic_t *v) \
}
#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
-static inline int atomic_##op##_return(int i, atomic_t *v) \
+static inline int arch_atomic_##op##_return(int i, atomic_t *v) \
{ \
unsigned long flags; \
unsigned long temp; \
@@ -154,7 +154,7 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
}
#define ATOMIC_FETCH_OP(op, c_op, asm_op) \
-static inline int atomic_fetch_##op(int i, atomic_t *v) \
+static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \
{ \
unsigned long flags; \
unsigned long orig; \
@@ -180,9 +180,6 @@ static inline int atomic_fetch_##op(int i, atomic_t *v) \
ATOMIC_OPS(add, +=, add)
ATOMIC_OPS(sub, -=, sub)
-#define atomic_andnot atomic_andnot
-#define atomic_fetch_andnot atomic_fetch_andnot
-
#undef ATOMIC_OPS
#define ATOMIC_OPS(op, c_op, asm_op) \
ATOMIC_OP(op, c_op, asm_op) \
@@ -193,6 +190,9 @@ ATOMIC_OPS(andnot, &= ~, bic)
ATOMIC_OPS(or, |=, or)
ATOMIC_OPS(xor, ^=, xor)
+#define arch_atomic_andnot arch_atomic_andnot
+#define arch_atomic_fetch_andnot arch_atomic_fetch_andnot
+
#undef ATOMIC_OPS
#undef ATOMIC_FETCH_OP
#undef ATOMIC_OP_RETURN
@@ -220,7 +220,7 @@ typedef struct {
#define ATOMIC64_INIT(a) { (a) }
-static inline s64 atomic64_read(const atomic64_t *v)
+static inline s64 arch_atomic64_read(const atomic64_t *v)
{
s64 val;
@@ -232,7 +232,7 @@ static inline s64 atomic64_read(const atomic64_t *v)
return val;
}
-static inline void atomic64_set(atomic64_t *v, s64 a)
+static inline void arch_atomic64_set(atomic64_t *v, s64 a)
{
/*
* This could have been a simple assignment in "C" but would need
@@ -253,7 +253,7 @@ static inline void atomic64_set(atomic64_t *v, s64 a)
}
#define ATOMIC64_OP(op, op1, op2) \
-static inline void atomic64_##op(s64 a, atomic64_t *v) \
+static inline void arch_atomic64_##op(s64 a, atomic64_t *v) \
{ \
s64 val; \
\
@@ -270,7 +270,7 @@ static inline void atomic64_##op(s64 a, atomic64_t *v) \
} \
#define ATOMIC64_OP_RETURN(op, op1, op2) \
-static inline s64 atomic64_##op##_return(s64 a, atomic64_t *v) \
+static inline s64 arch_atomic64_##op##_return(s64 a, atomic64_t *v) \
{ \
s64 val; \
\
@@ -293,7 +293,7 @@ static inline s64 atomic64_##op##_return(s64 a, atomic64_t *v) \
}
#define ATOMIC64_FETCH_OP(op, op1, op2) \
-static inline s64 atomic64_fetch_##op(s64 a, atomic64_t *v) \
+static inline s64 arch_atomic64_fetch_##op(s64 a, atomic64_t *v) \
{ \
s64 val, orig; \
\
@@ -320,9 +320,6 @@ static inline s64 atomic64_fetch_##op(s64 a, atomic64_t *v) \
ATOMIC64_OP_RETURN(op, op1, op2) \
ATOMIC64_FETCH_OP(op, op1, op2)
-#define atomic64_andnot atomic64_andnot
-#define atomic64_fetch_andnot atomic64_fetch_andnot
-
ATOMIC64_OPS(add, add.f, adc)
ATOMIC64_OPS(sub, sub.f, sbc)
ATOMIC64_OPS(and, and, and)
@@ -330,13 +327,16 @@ ATOMIC64_OPS(andnot, bic, bic)
ATOMIC64_OPS(or, or, or)
ATOMIC64_OPS(xor, xor, xor)
+#define arch_atomic64_andnot arch_atomic64_andnot
+#define arch_atomic64_fetch_andnot arch_atomic64_fetch_andnot
+
#undef ATOMIC64_OPS
#undef ATOMIC64_FETCH_OP
#undef ATOMIC64_OP_RETURN
#undef ATOMIC64_OP
static inline s64
-atomic64_cmpxchg(atomic64_t *ptr, s64 expected, s64 new)
+arch_atomic64_cmpxchg(atomic64_t *ptr, s64 expected, s64 new)
{
s64 prev;
@@ -358,7 +358,7 @@ atomic64_cmpxchg(atomic64_t *ptr, s64 expected, s64 new)
return prev;
}
-static inline s64 atomic64_xchg(atomic64_t *ptr, s64 new)
+static inline s64 arch_atomic64_xchg(atomic64_t *ptr, s64 new)
{
s64 prev;
@@ -379,14 +379,14 @@ static inline s64 atomic64_xchg(atomic64_t *ptr, s64 new)
}
/**
- * atomic64_dec_if_positive - decrement by 1 if old value positive
+ * arch_atomic64_dec_if_positive - decrement by 1 if old value positive
* @v: pointer of type atomic64_t
*
* The function returns the old value of *v minus 1, even if
* the atomic variable, v, was not decremented.
*/
-static inline s64 atomic64_dec_if_positive(atomic64_t *v)
+static inline s64 arch_atomic64_dec_if_positive(atomic64_t *v)
{
s64 val;
@@ -408,10 +408,10 @@ static inline s64 atomic64_dec_if_positive(atomic64_t *v)
return val;
}
-#define atomic64_dec_if_positive atomic64_dec_if_positive
+#define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
/**
- * atomic64_fetch_add_unless - add unless the number is a given value
+ * arch_atomic64_fetch_add_unless - add unless the number is a given value
* @v: pointer of type atomic64_t
* @a: the amount to add to v...
* @u: ...unless v is equal to u.
@@ -419,7 +419,7 @@ static inline s64 atomic64_dec_if_positive(atomic64_t *v)
* Atomically adds @a to @v, if it was not @u.
* Returns the old value of @v
*/
-static inline s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
+static inline s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
{
s64 old, temp;
@@ -443,7 +443,7 @@ static inline s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
return old;
}
-#define atomic64_fetch_add_unless atomic64_fetch_add_unless
+#define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless
#endif /* !CONFIG_GENERIC_ATOMIC64 */
diff --git a/arch/arc/include/asm/cmpxchg.h b/arch/arc/include/asm/cmpxchg.h
index 9b87e162e539..d42917e803e1 100644
--- a/arch/arc/include/asm/cmpxchg.h
+++ b/arch/arc/include/asm/cmpxchg.h
@@ -63,7 +63,7 @@ __cmpxchg(volatile void *ptr, unsigned long expected, unsigned long new)
#endif
-#define cmpxchg(ptr, o, n) ({ \
+#define arch_cmpxchg(ptr, o, n) ({ \
(typeof(*(ptr)))__cmpxchg((ptr), \
(unsigned long)(o), \
(unsigned long)(n)); \
@@ -75,7 +75,7 @@ __cmpxchg(volatile void *ptr, unsigned long expected, unsigned long new)
* !LLSC: cmpxchg() has to use an external lock atomic_ops_lock to guarantee
* semantics, and this lock also happens to be used by atomic_*()
*/
-#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
+#define arch_atomic_cmpxchg(v, o, n) ((int)arch_cmpxchg(&((v)->counter), (o), (n)))
/*
@@ -116,14 +116,14 @@ static inline unsigned long __xchg(unsigned long val, volatile void *ptr,
*
* Technically the lock is also needed for UP (boils down to irq save/restore)
* but we can cheat a bit since cmpxchg() atomic_ops_lock() would cause irqs to
- * be disabled thus can't possibly be interrpted/preempted/clobbered by xchg()
+ * be disabled thus can't possibly be interrupted/preempted/clobbered by xchg()
* Other way around, xchg is one instruction anyways, so can't be interrupted
* as such
*/
#if !defined(CONFIG_ARC_HAS_LLSC) && defined(CONFIG_SMP)
-#define xchg(ptr, with) \
+#define arch_xchg(ptr, with) \
({ \
unsigned long flags; \
typeof(*(ptr)) old_val; \
@@ -136,14 +136,14 @@ static inline unsigned long __xchg(unsigned long val, volatile void *ptr,
#else
-#define xchg(ptr, with) _xchg(ptr, with)
+#define arch_xchg(ptr, with) _xchg(ptr, with)
#endif
/*
* "atomic" variant of xchg()
* REQ: It needs to follow the same serialization rules as other atomic_xxx()
- * Since xchg() doesn't always do that, it would seem that following defintion
+ * Since xchg() doesn't always do that, it would seem that following definition
* is incorrect. But here's the rationale:
* SMP : Even xchg() takes the atomic_ops_lock, so OK.
* LLSC: atomic_ops_lock are not relevant at all (even if SMP, since LLSC
@@ -153,6 +153,6 @@ static inline unsigned long __xchg(unsigned long val, volatile void *ptr,
* can't be clobbered by others. Thus no serialization required when
* atomic_xchg is involved.
*/
-#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
+#define arch_atomic_xchg(v, new) (arch_xchg(&((v)->counter), new))
#endif
diff --git a/arch/arc/include/asm/mmzone.h b/arch/arc/include/asm/mmzone.h
deleted file mode 100644
index b86b9d1e54dc..000000000000
--- a/arch/arc/include/asm/mmzone.h
+++ /dev/null
@@ -1,40 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2016 Synopsys, Inc. (www.synopsys.com)
- */
-
-#ifndef _ASM_ARC_MMZONE_H
-#define _ASM_ARC_MMZONE_H
-
-#ifdef CONFIG_DISCONTIGMEM
-
-extern struct pglist_data node_data[];
-#define NODE_DATA(nid) (&node_data[nid])
-
-static inline int pfn_to_nid(unsigned long pfn)
-{
- int is_end_low = 1;
-
- if (IS_ENABLED(CONFIG_ARC_HAS_PAE40))
- is_end_low = pfn <= virt_to_pfn(0xFFFFFFFFUL);
-
- /*
- * node 0: lowmem: 0x8000_0000 to 0xFFFF_FFFF
- * node 1: HIGHMEM w/o PAE40: 0x0 to 0x7FFF_FFFF
- * HIGHMEM with PAE40: 0x1_0000_0000 to ...
- */
- if (pfn >= ARCH_PFN_OFFSET && is_end_low)
- return 0;
-
- return 1;
-}
-
-static inline int pfn_valid(unsigned long pfn)
-{
- int nid = pfn_to_nid(pfn);
-
- return (pfn <= node_end_pfn(nid));
-}
-#endif /* CONFIG_DISCONTIGMEM */
-
-#endif
diff --git a/arch/arc/include/asm/page.h b/arch/arc/include/asm/page.h
index ad9b7fe4dba3..4a9d33372fe2 100644
--- a/arch/arc/include/asm/page.h
+++ b/arch/arc/include/asm/page.h
@@ -7,6 +7,18 @@
#include <uapi/asm/page.h>
+#ifdef CONFIG_ARC_HAS_PAE40
+
+#define MAX_POSSIBLE_PHYSMEM_BITS 40
+#define PAGE_MASK_PHYS (0xff00000000ull | PAGE_MASK)
+
+#else /* CONFIG_ARC_HAS_PAE40 */
+
+#define MAX_POSSIBLE_PHYSMEM_BITS 32
+#define PAGE_MASK_PHYS PAGE_MASK
+
+#endif /* CONFIG_ARC_HAS_PAE40 */
+
#ifndef __ASSEMBLY__
#define clear_page(paddr) memset((paddr), 0, PAGE_SIZE)
diff --git a/arch/arc/include/asm/pgalloc.h b/arch/arc/include/asm/pgalloc.h
index 6147db925248..a32ca3104ced 100644
--- a/arch/arc/include/asm/pgalloc.h
+++ b/arch/arc/include/asm/pgalloc.h
@@ -129,6 +129,4 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t ptep)
#define __pte_free_tlb(tlb, pte, addr) pte_free((tlb)->mm, pte)
-#define pmd_pgtable(pmd) ((pgtable_t) pmd_page_vaddr(pmd))
-
#endif /* _ASM_ARC_PGALLOC_H */
diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h
index 163641726a2b..320cc0ae8a08 100644
--- a/arch/arc/include/asm/pgtable.h
+++ b/arch/arc/include/asm/pgtable.h
@@ -107,8 +107,8 @@
#define ___DEF (_PAGE_PRESENT | _PAGE_CACHEABLE)
/* Set of bits not changed in pte_modify */
-#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_SPECIAL)
-
+#define _PAGE_CHG_MASK (PAGE_MASK_PHYS | _PAGE_ACCESSED | _PAGE_DIRTY | \
+ _PAGE_SPECIAL)
/* More Abbrevaited helpers */
#define PAGE_U_NONE __pgprot(___DEF)
#define PAGE_U_R __pgprot(___DEF | _PAGE_READ)
@@ -132,13 +132,7 @@
#define PTE_BITS_IN_PD0 (_PAGE_GLOBAL | _PAGE_PRESENT | _PAGE_HW_SZ)
#define PTE_BITS_RWX (_PAGE_EXECUTE | _PAGE_WRITE | _PAGE_READ)
-#ifdef CONFIG_ARC_HAS_PAE40
-#define PTE_BITS_NON_RWX_IN_PD1 (0xff00000000 | PAGE_MASK | _PAGE_CACHEABLE)
-#define MAX_POSSIBLE_PHYSMEM_BITS 40
-#else
-#define PTE_BITS_NON_RWX_IN_PD1 (PAGE_MASK | _PAGE_CACHEABLE)
-#define MAX_POSSIBLE_PHYSMEM_BITS 32
-#endif
+#define PTE_BITS_NON_RWX_IN_PD1 (PAGE_MASK_PHYS | _PAGE_CACHEABLE)
/**************************************************************************
* Mapping of vm_flags (Generic VM) to PTE flags (arch specific)
@@ -228,12 +222,6 @@
*/
#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
-/*
- * No special requirements for lowest virtual address we permit any user space
- * mapping to be mapped at.
- */
-#define FIRST_USER_ADDRESS 0UL
-
/****************************************************************
* Bucket load of VM Helpers
@@ -362,6 +350,8 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
#define kern_addr_valid(addr) (1)
+#define pmd_pgtable(pmd) ((pgtable_t) pmd_page_vaddr(pmd))
+
/*
* remap a physical page `pfn' of size `size' with page protection `prot'
* into virtual address `from'
diff --git a/arch/arc/include/uapi/asm/page.h b/arch/arc/include/uapi/asm/page.h
index 2a97e2718a21..2a4ad619abfb 100644
--- a/arch/arc/include/uapi/asm/page.h
+++ b/arch/arc/include/uapi/asm/page.h
@@ -33,5 +33,4 @@
#define PAGE_MASK (~(PAGE_SIZE-1))
-
#endif /* _UAPI__ASM_ARC_PAGE_H */
diff --git a/arch/arc/include/uapi/asm/sigcontext.h b/arch/arc/include/uapi/asm/sigcontext.h
index 95f8a4380e11..7a5449dfcb29 100644
--- a/arch/arc/include/uapi/asm/sigcontext.h
+++ b/arch/arc/include/uapi/asm/sigcontext.h
@@ -18,6 +18,7 @@
*/
struct sigcontext {
struct user_regs_struct regs;
+ struct user_regs_arcv2 v2abi;
};
#endif /* _ASM_ARC_SIGCONTEXT_H */
diff --git a/arch/arc/kernel/entry.S b/arch/arc/kernel/entry.S
index 1743506081da..2cb8dfe866b6 100644
--- a/arch/arc/kernel/entry.S
+++ b/arch/arc/kernel/entry.S
@@ -177,7 +177,7 @@ tracesys:
; Do the Sys Call as we normally would.
; Validate the Sys Call number
- cmp r8, NR_syscalls
+ cmp r8, NR_syscalls - 1
mov.hi r0, -ENOSYS
bhi tracesys_exit
@@ -255,7 +255,7 @@ ENTRY(EV_Trap)
;============ Normal syscall case
; syscall num shd not exceed the total system calls avail
- cmp r8, NR_syscalls
+ cmp r8, NR_syscalls - 1
mov.hi r0, -ENOSYS
bhi .Lret_from_system_call
diff --git a/arch/arc/kernel/kgdb.c b/arch/arc/kernel/kgdb.c
index ecfbc42d3a40..345a0000554c 100644
--- a/arch/arc/kernel/kgdb.c
+++ b/arch/arc/kernel/kgdb.c
@@ -140,6 +140,7 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
ptr = &remcomInBuffer[1];
if (kgdb_hex2long(&ptr, &addr))
regs->ret = addr;
+ fallthrough;
case 'D':
case 'k':
diff --git a/arch/arc/kernel/kprobes.c b/arch/arc/kernel/kprobes.c
index cabef45f11df..5f0415fc7328 100644
--- a/arch/arc/kernel/kprobes.c
+++ b/arch/arc/kernel/kprobes.c
@@ -317,22 +317,6 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned long trapnr)
* caused the fault.
*/
- /* We increment the nmissed count for accounting,
- * we can also use npre/npostfault count for accounting
- * these specific fault cases.
- */
- kprobes_inc_nmissed_count(cur);
-
- /*
- * We come here because instructions in the pre/post
- * handler caused the page_fault, this could happen
- * if handler tries to access user space by
- * copy_from_user(), get_user() etc. Let the
- * user-specified handler try to fix it first.
- */
- if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
- return 1;
-
/*
* In case the user-specified fault handler returned zero,
* try to fix up.
diff --git a/arch/arc/kernel/process.c b/arch/arc/kernel/process.c
index d838d0d57696..3793876f42d9 100644
--- a/arch/arc/kernel/process.c
+++ b/arch/arc/kernel/process.c
@@ -50,14 +50,14 @@ SYSCALL_DEFINE3(arc_usr_cmpxchg, int *, uaddr, int, expected, int, new)
int ret;
/*
- * This is only for old cores lacking LLOCK/SCOND, which by defintion
+ * This is only for old cores lacking LLOCK/SCOND, which by definition
* can't possibly be SMP. Thus doesn't need to be SMP safe.
* And this also helps reduce the overhead for serializing in
* the UP case
*/
WARN_ON_ONCE(IS_ENABLED(CONFIG_SMP));
- /* Z indicates to userspace if operation succeded */
+ /* Z indicates to userspace if operation succeeded */
regs->status32 &= ~STATUS_Z_MASK;
ret = access_ok(uaddr, sizeof(*uaddr));
@@ -107,7 +107,7 @@ fail:
void arch_cpu_idle(void)
{
- /* Re-enable interrupts <= default irq priority before commiting SLEEP */
+ /* Re-enable interrupts <= default irq priority before committing SLEEP */
const unsigned int arg = 0x10 | ARCV2_IRQ_DEF_PRIO;
__asm__ __volatile__(
@@ -120,7 +120,7 @@ void arch_cpu_idle(void)
void arch_cpu_idle(void)
{
- /* sleep, but enable both set E1/E2 (levels of interrutps) before committing */
+ /* sleep, but enable both set E1/E2 (levels of interrupts) before committing */
__asm__ __volatile__("sleep 0x3 \n");
}
diff --git a/arch/arc/kernel/signal.c b/arch/arc/kernel/signal.c
index fdbe06c98895..cb2f88502baf 100644
--- a/arch/arc/kernel/signal.c
+++ b/arch/arc/kernel/signal.c
@@ -61,6 +61,41 @@ struct rt_sigframe {
unsigned int sigret_magic;
};
+static int save_arcv2_regs(struct sigcontext *mctx, struct pt_regs *regs)
+{
+ int err = 0;
+#ifndef CONFIG_ISA_ARCOMPACT
+ struct user_regs_arcv2 v2abi;
+
+ v2abi.r30 = regs->r30;
+#ifdef CONFIG_ARC_HAS_ACCL_REGS
+ v2abi.r58 = regs->r58;
+ v2abi.r59 = regs->r59;
+#else
+ v2abi.r58 = v2abi.r59 = 0;
+#endif
+ err = __copy_to_user(&mctx->v2abi, &v2abi, sizeof(v2abi));
+#endif
+ return err;
+}
+
+static int restore_arcv2_regs(struct sigcontext *mctx, struct pt_regs *regs)
+{
+ int err = 0;
+#ifndef CONFIG_ISA_ARCOMPACT
+ struct user_regs_arcv2 v2abi;
+
+ err = __copy_from_user(&v2abi, &mctx->v2abi, sizeof(v2abi));
+
+ regs->r30 = v2abi.r30;
+#ifdef CONFIG_ARC_HAS_ACCL_REGS
+ regs->r58 = v2abi.r58;
+ regs->r59 = v2abi.r59;
+#endif
+#endif
+ return err;
+}
+
static int
stash_usr_regs(struct rt_sigframe __user *sf, struct pt_regs *regs,
sigset_t *set)
@@ -94,6 +129,10 @@ stash_usr_regs(struct rt_sigframe __user *sf, struct pt_regs *regs,
err = __copy_to_user(&(sf->uc.uc_mcontext.regs.scratch), &uregs.scratch,
sizeof(sf->uc.uc_mcontext.regs.scratch));
+
+ if (is_isa_arcv2())
+ err |= save_arcv2_regs(&(sf->uc.uc_mcontext), regs);
+
err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(sigset_t));
return err ? -EFAULT : 0;
@@ -109,6 +148,10 @@ static int restore_usr_regs(struct pt_regs *regs, struct rt_sigframe __user *sf)
err |= __copy_from_user(&uregs.scratch,
&(sf->uc.uc_mcontext.regs.scratch),
sizeof(sf->uc.uc_mcontext.regs.scratch));
+
+ if (is_isa_arcv2())
+ err |= restore_arcv2_regs(&(sf->uc.uc_mcontext), regs);
+
if (err)
return -EFAULT;
@@ -259,7 +302,7 @@ setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs)
regs->r2 = (unsigned long)&sf->uc;
/*
- * small optim to avoid unconditonally calling do_sigaltstack
+ * small optim to avoid unconditionally calling do_sigaltstack
* in sigreturn path, now that we only have rt_sigreturn
*/
magic = MAGIC_SIGALTSTK;
@@ -391,7 +434,7 @@ void do_signal(struct pt_regs *regs)
void do_notify_resume(struct pt_regs *regs)
{
/*
- * ASM glue gaurantees that this is only called when returning to
+ * ASM glue guarantees that this is only called when returning to
* user mode
*/
if (test_thread_flag(TIF_NOTIFY_RESUME))
diff --git a/arch/arc/kernel/smp.c b/arch/arc/kernel/smp.c
index 52906d314537..db0e104d6835 100644
--- a/arch/arc/kernel/smp.c
+++ b/arch/arc/kernel/smp.c
@@ -189,7 +189,6 @@ void start_kernel_secondary(void)
pr_info("## CPU%u LIVE ##: Executing Code...\n", cpu);
local_irq_enable();
- preempt_disable();
cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
}
diff --git a/arch/arc/kernel/stacktrace.c b/arch/arc/kernel/stacktrace.c
index f73da203b170..1b9576d21e24 100644
--- a/arch/arc/kernel/stacktrace.c
+++ b/arch/arc/kernel/stacktrace.c
@@ -83,7 +83,7 @@ seed_unwind_frame_info(struct task_struct *tsk, struct pt_regs *regs,
* is safe-kept and BLINK at a well known location in there
*/
- if (tsk->state == TASK_RUNNING)
+ if (task_is_running(tsk))
return -1;
frame_info->task = tsk;
diff --git a/arch/arc/kernel/troubleshoot.c b/arch/arc/kernel/troubleshoot.c
index a331bb5d8319..7654c2e42dc0 100644
--- a/arch/arc/kernel/troubleshoot.c
+++ b/arch/arc/kernel/troubleshoot.c
@@ -83,12 +83,12 @@ static void show_faulting_vma(unsigned long address)
* non-inclusive vma
*/
mmap_read_lock(active_mm);
- vma = find_vma(active_mm, address);
+ vma = vma_lookup(active_mm, address);
- /* check against the find_vma( ) behaviour which returns the next VMA
- * if the container VMA is not found
+ /* Lookup the vma at the address and report if the container VMA is not
+ * found
*/
- if (vma && (vma->vm_start <= address)) {
+ if (vma) {
char buf[ARC_PATH_MAX];
char *nm = "?";
diff --git a/arch/arc/kernel/vmlinux.lds.S b/arch/arc/kernel/vmlinux.lds.S
index 33ce59d91461..e2146a8da195 100644
--- a/arch/arc/kernel/vmlinux.lds.S
+++ b/arch/arc/kernel/vmlinux.lds.S
@@ -57,7 +57,6 @@ SECTIONS
.init.ramfs : { INIT_RAM_FS }
. = ALIGN(PAGE_SIZE);
- _stext = .;
HEAD_TEXT_SECTION
INIT_TEXT_SECTION(L1_CACHE_BYTES)
@@ -83,6 +82,7 @@ SECTIONS
.text : {
_text = .;
+ _stext = .;
TEXT_TEXT
SCHED_TEXT
CPUIDLE_TEXT
diff --git a/arch/arc/mm/init.c b/arch/arc/mm/init.c
index 33832e36bdb7..abfeef7bf6f8 100644
--- a/arch/arc/mm/init.c
+++ b/arch/arc/mm/init.c
@@ -32,11 +32,6 @@ unsigned long arch_pfn_offset;
EXPORT_SYMBOL(arch_pfn_offset);
#endif
-#ifdef CONFIG_DISCONTIGMEM
-struct pglist_data node_data[MAX_NUMNODES] __read_mostly;
-EXPORT_SYMBOL(node_data);
-#endif
-
long __init arc_get_mem_sz(void)
{
return low_mem_sz;
@@ -139,25 +134,28 @@ void __init setup_arch_memory(void)
#ifdef CONFIG_HIGHMEM
/*
- * Populate a new node with highmem
- *
* On ARC (w/o PAE) HIGHMEM addresses are actually smaller (0 based)
- * than addresses in normal ala low memory (0x8000_0000 based).
+ * than addresses in normal aka low memory (0x8000_0000 based).
* Even with PAE, the huge peripheral space hole would waste a lot of
- * mem with single mem_map[]. This warrants a mem_map per region design.
- * Thus HIGHMEM on ARC is imlemented with DISCONTIGMEM.
- *
- * DISCONTIGMEM in turns requires multiple nodes. node 0 above is
- * populated with normal memory zone while node 1 only has highmem
+ * mem with single contiguous mem_map[].
+ * Thus when HIGHMEM on ARC is enabled the memory map corresponding
+ * to the hole is freed and ARC specific version of pfn_valid()
+ * handles the hole in the memory map.
*/
-#ifdef CONFIG_DISCONTIGMEM
- node_set_online(1);
-#endif
min_high_pfn = PFN_DOWN(high_mem_start);
max_high_pfn = PFN_DOWN(high_mem_start + high_mem_sz);
- max_zone_pfn[ZONE_HIGHMEM] = min_low_pfn;
+ /*
+ * max_high_pfn should be ok here for both HIGHMEM and HIGHMEM+PAE.
+ * For HIGHMEM without PAE max_high_pfn should be less than
+ * min_low_pfn to guarantee that these two regions don't overlap.
+ * For PAE case highmem is greater than lowmem, so it is natural
+ * to use max_high_pfn.
+ *
+ * In both cases, holes should be handled by pfn_valid().
+ */
+ max_zone_pfn[ZONE_HIGHMEM] = max_high_pfn;
high_memory = (void *)(min_high_pfn << PAGE_SHIFT);
diff --git a/arch/arc/mm/ioremap.c b/arch/arc/mm/ioremap.c
index fac4adc90204..95c649fbc95a 100644
--- a/arch/arc/mm/ioremap.c
+++ b/arch/arc/mm/ioremap.c
@@ -53,9 +53,10 @@ EXPORT_SYMBOL(ioremap);
void __iomem *ioremap_prot(phys_addr_t paddr, unsigned long size,
unsigned long flags)
{
+ unsigned int off;
unsigned long vaddr;
struct vm_struct *area;
- phys_addr_t off, end;
+ phys_addr_t end;
pgprot_t prot = __pgprot(flags);
/* Don't allow wraparound, zero size */
@@ -72,7 +73,7 @@ void __iomem *ioremap_prot(phys_addr_t paddr, unsigned long size,
/* Mappings have to be page-aligned */
off = paddr & ~PAGE_MASK;
- paddr &= PAGE_MASK;
+ paddr &= PAGE_MASK_PHYS;
size = PAGE_ALIGN(end + 1) - paddr;
/*
diff --git a/arch/arc/mm/tlb.c b/arch/arc/mm/tlb.c
index 9bb3c24f3677..9c7c68247289 100644
--- a/arch/arc/mm/tlb.c
+++ b/arch/arc/mm/tlb.c
@@ -576,7 +576,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr_unaligned,
pte_t *ptep)
{
unsigned long vaddr = vaddr_unaligned & PAGE_MASK;
- phys_addr_t paddr = pte_val(*ptep) & PAGE_MASK;
+ phys_addr_t paddr = pte_val(*ptep) & PAGE_MASK_PHYS;
struct page *page = pfn_to_page(pte_pfn(*ptep));
create_tlb(vma, vaddr, ptep);
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 24804f11302d..06b6187b67af 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -218,9 +218,6 @@ config GENERIC_CALIBRATE_DELAY
config ARCH_MAY_HAVE_PC_FDC
bool
-config ZONE_DMA
- bool
-
config ARCH_SUPPORTS_UPROBES
def_bool y
@@ -353,7 +350,6 @@ config ARCH_EP93XX
select ARM_VIC
select GENERIC_IRQ_MULTI_HANDLER
select AUTO_ZRELADDR
- select CLKDEV_LOOKUP
select CLKSRC_MMIO
select CPU_ARM920T
select GPIOLIB
@@ -504,7 +500,6 @@ config ARCH_OMAP1
bool "TI OMAP1"
depends on MMU
select ARCH_OMAP
- select CLKDEV_LOOKUP
select CLKSRC_MMIO
select GENERIC_IRQ_CHIP
select GENERIC_IRQ_MULTI_HANDLER
diff --git a/arch/arm/boot/dts/am33xx-clocks.dtsi b/arch/arm/boot/dts/am33xx-clocks.dtsi
index dced92a8970e..b7b7106f2dee 100644
--- a/arch/arm/boot/dts/am33xx-clocks.dtsi
+++ b/arch/arm/boot/dts/am33xx-clocks.dtsi
@@ -164,7 +164,7 @@
#clock-cells = <0>;
compatible = "ti,am3-dpll-core-clock";
clocks = <&sys_clkin_ck>, <&sys_clkin_ck>;
- reg = <0x0490>, <0x045c>, <0x0468>;
+ reg = <0x0490>, <0x045c>, <0x0468>, <0x0460>, <0x0464>;
};
dpll_core_x2_ck: dpll_core_x2_ck {
@@ -204,7 +204,7 @@
#clock-cells = <0>;
compatible = "ti,am3-dpll-clock";
clocks = <&sys_clkin_ck>, <&sys_clkin_ck>;
- reg = <0x0488>, <0x0420>, <0x042c>;
+ reg = <0x0488>, <0x0420>, <0x042c>, <0x0424>, <0x0428>;
};
dpll_mpu_m2_ck: dpll_mpu_m2_ck@4a8 {
@@ -220,7 +220,7 @@
#clock-cells = <0>;
compatible = "ti,am3-dpll-no-gate-clock";
clocks = <&sys_clkin_ck>, <&sys_clkin_ck>;
- reg = <0x0494>, <0x0434>, <0x0440>;
+ reg = <0x0494>, <0x0434>, <0x0440>, <0x0438>, <0x043c>;
};
dpll_ddr_m2_ck: dpll_ddr_m2_ck@4a0 {
@@ -244,7 +244,7 @@
#clock-cells = <0>;
compatible = "ti,am3-dpll-no-gate-clock";
clocks = <&sys_clkin_ck>, <&sys_clkin_ck>;
- reg = <0x0498>, <0x0448>, <0x0454>;
+ reg = <0x0498>, <0x0448>, <0x0454>, <0x044c>, <0x0450>;
};
dpll_disp_m2_ck: dpll_disp_m2_ck@4a4 {
@@ -261,7 +261,7 @@
#clock-cells = <0>;
compatible = "ti,am3-dpll-no-gate-j-type-clock";
clocks = <&sys_clkin_ck>, <&sys_clkin_ck>;
- reg = <0x048c>, <0x0470>, <0x049c>;
+ reg = <0x048c>, <0x0470>, <0x049c>, <0x0474>, <0x0478>;
};
dpll_per_m2_ck: dpll_per_m2_ck@4ac {
diff --git a/arch/arm/boot/dts/am43xx-clocks.dtsi b/arch/arm/boot/dts/am43xx-clocks.dtsi
index c726cd8dbdf1..314fc5975acb 100644
--- a/arch/arm/boot/dts/am43xx-clocks.dtsi
+++ b/arch/arm/boot/dts/am43xx-clocks.dtsi
@@ -204,7 +204,7 @@
#clock-cells = <0>;
compatible = "ti,am3-dpll-core-clock";
clocks = <&sys_clkin_ck>, <&sys_clkin_ck>;
- reg = <0x2d20>, <0x2d24>, <0x2d2c>;
+ reg = <0x2d20>, <0x2d24>, <0x2d2c>, <0x2d48>, <0x2d4c>;
};
dpll_core_x2_ck: dpll_core_x2_ck {
@@ -250,7 +250,7 @@
#clock-cells = <0>;
compatible = "ti,am3-dpll-clock";
clocks = <&sys_clkin_ck>, <&sys_clkin_ck>;
- reg = <0x2d60>, <0x2d64>, <0x2d6c>;
+ reg = <0x2d60>, <0x2d64>, <0x2d6c>, <0x2d88>, <0x2d8c>;
};
dpll_mpu_m2_ck: dpll_mpu_m2_ck@2d70 {
@@ -276,7 +276,7 @@
#clock-cells = <0>;
compatible = "ti,am3-dpll-clock";
clocks = <&sys_clkin_ck>, <&sys_clkin_ck>;
- reg = <0x2da0>, <0x2da4>, <0x2dac>;
+ reg = <0x2da0>, <0x2da4>, <0x2dac>, <0x2dc8>, <0x2dcc>;
};
dpll_ddr_m2_ck: dpll_ddr_m2_ck@2db0 {
@@ -294,7 +294,7 @@
#clock-cells = <0>;
compatible = "ti,am3-dpll-clock";
clocks = <&sys_clkin_ck>, <&sys_clkin_ck>;
- reg = <0x2e20>, <0x2e24>, <0x2e2c>;
+ reg = <0x2e20>, <0x2e24>, <0x2e2c>, <0x2e48>, <0x2e4c>;
};
dpll_disp_m2_ck: dpll_disp_m2_ck@2e30 {
@@ -313,7 +313,7 @@
#clock-cells = <0>;
compatible = "ti,am3-dpll-j-type-clock";
clocks = <&sys_clkin_ck>, <&sys_clkin_ck>;
- reg = <0x2de0>, <0x2de4>, <0x2dec>;
+ reg = <0x2de0>, <0x2de4>, <0x2dec>, <0x2e08>, <0x2e0c>;
};
dpll_per_m2_ck: dpll_per_m2_ck@2df0 {
@@ -557,7 +557,7 @@
#clock-cells = <0>;
compatible = "ti,am3-dpll-clock";
clocks = <&sys_clkin_ck>, <&sys_clkin_ck>;
- reg = <0x2e60>, <0x2e64>, <0x2e6c>;
+ reg = <0x2e60>, <0x2e64>, <0x2e6c>, <0x2e88>, <0x2e8c>;
};
dpll_extdev_m2_ck: dpll_extdev_m2_ck@2e70 {
diff --git a/arch/arm/boot/dts/arm-realview-eb.dtsi b/arch/arm/boot/dts/arm-realview-eb.dtsi
index a534a8e444d9..04e8a27ba1eb 100644
--- a/arch/arm/boot/dts/arm-realview-eb.dtsi
+++ b/arch/arm/boot/dts/arm-realview-eb.dtsi
@@ -148,7 +148,7 @@
usb: usb@4f000000 {
compatible = "nxp,usb-isp1761";
reg = <0x4f000000 0x20000>;
- port1-otg;
+ dr_mode = "peripheral";
};
bridge {
diff --git a/arch/arm/boot/dts/arm-realview-pb1176.dts b/arch/arm/boot/dts/arm-realview-pb1176.dts
index f925782f8560..366687fb1ee3 100644
--- a/arch/arm/boot/dts/arm-realview-pb1176.dts
+++ b/arch/arm/boot/dts/arm-realview-pb1176.dts
@@ -166,7 +166,7 @@
reg = <0x3b000000 0x20000>;
interrupt-parent = <&intc_fpga1176>;
interrupts = <0 11 IRQ_TYPE_LEVEL_HIGH>;
- port1-otg;
+ dr_mode = "peripheral";
};
bridge {
diff --git a/arch/arm/boot/dts/arm-realview-pb11mp.dts b/arch/arm/boot/dts/arm-realview-pb11mp.dts
index 0c7dabef4a5f..228a51a38f95 100644
--- a/arch/arm/boot/dts/arm-realview-pb11mp.dts
+++ b/arch/arm/boot/dts/arm-realview-pb11mp.dts
@@ -712,7 +712,7 @@
reg = <0x4f000000 0x20000>;
interrupt-parent = <&intc_tc11mp>;
interrupts = <0 3 IRQ_TYPE_LEVEL_HIGH>;
- port1-otg;
+ dr_mode = "peripheral";
};
};
};
diff --git a/arch/arm/boot/dts/arm-realview-pbx.dtsi b/arch/arm/boot/dts/arm-realview-pbx.dtsi
index ac95667ed781..ccf6f756b6ed 100644
--- a/arch/arm/boot/dts/arm-realview-pbx.dtsi
+++ b/arch/arm/boot/dts/arm-realview-pbx.dtsi
@@ -164,7 +164,7 @@
usb: usb@4f000000 {
compatible = "nxp,usb-isp1761";
reg = <0x4f000000 0x20000>;
- port1-otg;
+ dr_mode = "peripheral";
};
bridge {
diff --git a/arch/arm/boot/dts/imx53-ppd.dts b/arch/arm/boot/dts/imx53-ppd.dts
index be040b6a02fa..5a5fa6190a52 100644
--- a/arch/arm/boot/dts/imx53-ppd.dts
+++ b/arch/arm/boot/dts/imx53-ppd.dts
@@ -651,6 +651,7 @@
&uart2 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_uart2>;
+ fsl,dma-info = <24 20>;
status = "okay";
};
@@ -670,6 +671,7 @@
&uart5 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_uart5>;
+ fsl,dma-info = <4096 4>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/imx6dl-yapp4-common.dtsi b/arch/arm/boot/dts/imx6dl-yapp4-common.dtsi
index 7d2c72562c73..9148a01ed6d9 100644
--- a/arch/arm/boot/dts/imx6dl-yapp4-common.dtsi
+++ b/arch/arm/boot/dts/imx6dl-yapp4-common.dtsi
@@ -105,9 +105,13 @@
phy-reset-gpios = <&gpio1 25 GPIO_ACTIVE_LOW>;
phy-reset-duration = <20>;
phy-supply = <&sw2_reg>;
- phy-handle = <&ethphy0>;
status = "okay";
+ fixed-link {
+ speed = <1000>;
+ full-duplex;
+ };
+
mdio {
#address-cells = <1>;
#size-cells = <0>;
diff --git a/arch/arm/boot/dts/imx6q-dhcom-som.dtsi b/arch/arm/boot/dts/imx6q-dhcom-som.dtsi
index 236fc205c389..d0768ae429fa 100644
--- a/arch/arm/boot/dts/imx6q-dhcom-som.dtsi
+++ b/arch/arm/boot/dts/imx6q-dhcom-som.dtsi
@@ -406,6 +406,18 @@
vin-supply = <&sw1_reg>;
};
+&reg_pu {
+ vin-supply = <&sw1_reg>;
+};
+
+&reg_vdd1p1 {
+ vin-supply = <&sw2_reg>;
+};
+
+&reg_vdd2p5 {
+ vin-supply = <&sw2_reg>;
+};
+
&uart1 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_uart1>;
diff --git a/arch/arm/boot/dts/imx6qdl-emcon-avari.dtsi b/arch/arm/boot/dts/imx6qdl-emcon-avari.dtsi
index 828cf3e39784..c4e146f3341b 100644
--- a/arch/arm/boot/dts/imx6qdl-emcon-avari.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-emcon-avari.dtsi
@@ -126,7 +126,7 @@
compatible = "nxp,pca8574";
reg = <0x3a>;
gpio-controller;
- #gpio-cells = <1>;
+ #gpio-cells = <2>;
};
};
diff --git a/arch/arm/boot/dts/imx7d-meerkat96.dts b/arch/arm/boot/dts/imx7d-meerkat96.dts
index 5339210b63d0..dd8003bd1fc0 100644
--- a/arch/arm/boot/dts/imx7d-meerkat96.dts
+++ b/arch/arm/boot/dts/imx7d-meerkat96.dts
@@ -193,7 +193,7 @@
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usdhc1>;
keep-power-in-suspend;
- tuning-step = <2>;
+ fsl,tuning-step = <2>;
vmmc-supply = <&reg_3p3v>;
no-1-8-v;
broken-cd;
diff --git a/arch/arm/boot/dts/imx7d-pico.dtsi b/arch/arm/boot/dts/imx7d-pico.dtsi
index e57da0d32b98..e519897fae08 100644
--- a/arch/arm/boot/dts/imx7d-pico.dtsi
+++ b/arch/arm/boot/dts/imx7d-pico.dtsi
@@ -351,7 +351,7 @@
pinctrl-2 = <&pinctrl_usdhc1_200mhz>;
cd-gpios = <&gpio5 0 GPIO_ACTIVE_LOW>;
bus-width = <4>;
- tuning-step = <2>;
+ fsl,tuning-step = <2>;
vmmc-supply = <&reg_3p3v>;
wakeup-source;
no-1-8-v;
diff --git a/arch/arm/boot/dts/sama5d4.dtsi b/arch/arm/boot/dts/sama5d4.dtsi
index 05c55875835d..e47e1ca63043 100644
--- a/arch/arm/boot/dts/sama5d4.dtsi
+++ b/arch/arm/boot/dts/sama5d4.dtsi
@@ -101,6 +101,13 @@
ranges = <0 0x100000 0x2400>;
};
+ vdec0: vdec@300000 {
+ compatible = "microchip,sama5d4-vdec";
+ reg = <0x00300000 0x100000>;
+ interrupts = <19 IRQ_TYPE_LEVEL_HIGH 4>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 19>;
+ };
+
usb0: gadget@400000 {
compatible = "atmel,sama5d3-udc";
reg = <0x00400000 0x100000
diff --git a/arch/arm/boot/dts/vexpress-v2m-rs1.dtsi b/arch/arm/boot/dts/vexpress-v2m-rs1.dtsi
index 4f7220b11f2d..2ad9fd7c94ec 100644
--- a/arch/arm/boot/dts/vexpress-v2m-rs1.dtsi
+++ b/arch/arm/boot/dts/vexpress-v2m-rs1.dtsi
@@ -144,7 +144,7 @@
compatible = "nxp,usb-isp1761";
reg = <2 0x03000000 0x20000>;
interrupts = <16>;
- port1-otg;
+ dr_mode = "peripheral";
};
iofpga-bus@300000000 {
diff --git a/arch/arm/boot/dts/vexpress-v2m.dtsi b/arch/arm/boot/dts/vexpress-v2m.dtsi
index 2ac41ed3a57c..ec13ceb9ed36 100644
--- a/arch/arm/boot/dts/vexpress-v2m.dtsi
+++ b/arch/arm/boot/dts/vexpress-v2m.dtsi
@@ -62,7 +62,7 @@
compatible = "nxp,usb-isp1761";
reg = <3 0x03000000 0x20000>;
interrupts = <16>;
- port1-otg;
+ dr_mode = "peripheral";
};
iofpga@7,00000000 {
diff --git a/arch/arm/configs/footbridge_defconfig b/arch/arm/configs/footbridge_defconfig
index 2aa3ebeb89d7..7a32de51f0fa 100644
--- a/arch/arm/configs/footbridge_defconfig
+++ b/arch/arm/configs/footbridge_defconfig
@@ -64,7 +64,6 @@ CONFIG_PARIDE_ON26=m
CONFIG_BLK_DEV_LOOP=m
CONFIG_BLK_DEV_NBD=m
CONFIG_BLK_DEV_RAM=y
-CONFIG_IDE=y
CONFIG_NETDEVICES=y
CONFIG_NET_ETHERNET=y
CONFIG_NET_VENDOR_3COM=y
diff --git a/arch/arm/configs/pxa_defconfig b/arch/arm/configs/pxa_defconfig
index 875a3c28a267..363f1b1b08e3 100644
--- a/arch/arm/configs/pxa_defconfig
+++ b/arch/arm/configs/pxa_defconfig
@@ -215,8 +215,6 @@ CONFIG_IIO=m
CONFIG_AD5446=m
CONFIG_EEPROM_AT24=m
CONFIG_SENSORS_LIS3_SPI=m
-CONFIG_IDE=m
-CONFIG_BLK_DEV_IDECS=m
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=m
CONFIG_CHR_DEV_ST=m
diff --git a/arch/arm/crypto/Makefile b/arch/arm/crypto/Makefile
index 8f26c454ea12..eafa898ba6a7 100644
--- a/arch/arm/crypto/Makefile
+++ b/arch/arm/crypto/Makefile
@@ -45,20 +45,12 @@ poly1305-arm-y := poly1305-core.o poly1305-glue.o
nhpoly1305-neon-y := nh-neon-core.o nhpoly1305-neon-glue.o
curve25519-neon-y := curve25519-core.o curve25519-glue.o
-ifdef REGENERATE_ARM_CRYPTO
quiet_cmd_perl = PERL $@
cmd_perl = $(PERL) $(<) > $(@)
-$(src)/poly1305-core.S_shipped: $(src)/poly1305-armv4.pl
+$(obj)/%-core.S: $(src)/%-armv4.pl
$(call cmd,perl)
-$(src)/sha256-core.S_shipped: $(src)/sha256-armv4.pl
- $(call cmd,perl)
-
-$(src)/sha512-core.S_shipped: $(src)/sha512-armv4.pl
- $(call cmd,perl)
-endif
-
clean-files += poly1305-core.S sha256-core.S sha512-core.S
# massage the perlasm code a bit so we only get the NEON routine if we need it
diff --git a/arch/arm/crypto/poly1305-core.S_shipped b/arch/arm/crypto/poly1305-core.S_shipped
deleted file mode 100644
index 37b71d990293..000000000000
--- a/arch/arm/crypto/poly1305-core.S_shipped
+++ /dev/null
@@ -1,1158 +0,0 @@
-#ifndef __KERNEL__
-# include "arm_arch.h"
-#else
-# define __ARM_ARCH__ __LINUX_ARM_ARCH__
-# define __ARM_MAX_ARCH__ __LINUX_ARM_ARCH__
-# define poly1305_init poly1305_init_arm
-# define poly1305_blocks poly1305_blocks_arm
-# define poly1305_emit poly1305_emit_arm
-.globl poly1305_blocks_neon
-#endif
-
-#if defined(__thumb2__)
-.syntax unified
-.thumb
-#else
-.code 32
-#endif
-
-.text
-
-.globl poly1305_emit
-.globl poly1305_blocks
-.globl poly1305_init
-.type poly1305_init,%function
-.align 5
-poly1305_init:
-.Lpoly1305_init:
- stmdb sp!,{r4-r11}
-
- eor r3,r3,r3
- cmp r1,#0
- str r3,[r0,#0] @ zero hash value
- str r3,[r0,#4]
- str r3,[r0,#8]
- str r3,[r0,#12]
- str r3,[r0,#16]
- str r3,[r0,#36] @ clear is_base2_26
- add r0,r0,#20
-
-#ifdef __thumb2__
- it eq
-#endif
- moveq r0,#0
- beq .Lno_key
-
-#if __ARM_MAX_ARCH__>=7
- mov r3,#-1
- str r3,[r0,#28] @ impossible key power value
-# ifndef __KERNEL__
- adr r11,.Lpoly1305_init
- ldr r12,.LOPENSSL_armcap
-# endif
-#endif
- ldrb r4,[r1,#0]
- mov r10,#0x0fffffff
- ldrb r5,[r1,#1]
- and r3,r10,#-4 @ 0x0ffffffc
- ldrb r6,[r1,#2]
- ldrb r7,[r1,#3]
- orr r4,r4,r5,lsl#8
- ldrb r5,[r1,#4]
- orr r4,r4,r6,lsl#16
- ldrb r6,[r1,#5]
- orr r4,r4,r7,lsl#24
- ldrb r7,[r1,#6]
- and r4,r4,r10
-
-#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
-# if !defined(_WIN32)
- ldr r12,[r11,r12] @ OPENSSL_armcap_P
-# endif
-# if defined(__APPLE__) || defined(_WIN32)
- ldr r12,[r12]
-# endif
-#endif
- ldrb r8,[r1,#7]
- orr r5,r5,r6,lsl#8
- ldrb r6,[r1,#8]
- orr r5,r5,r7,lsl#16
- ldrb r7,[r1,#9]
- orr r5,r5,r8,lsl#24
- ldrb r8,[r1,#10]
- and r5,r5,r3
-
-#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
- tst r12,#ARMV7_NEON @ check for NEON
-# ifdef __thumb2__
- adr r9,.Lpoly1305_blocks_neon
- adr r11,.Lpoly1305_blocks
- it ne
- movne r11,r9
- adr r12,.Lpoly1305_emit
- orr r11,r11,#1 @ thumb-ify addresses
- orr r12,r12,#1
-# else
- add r12,r11,#(.Lpoly1305_emit-.Lpoly1305_init)
- ite eq
- addeq r11,r11,#(.Lpoly1305_blocks-.Lpoly1305_init)
- addne r11,r11,#(.Lpoly1305_blocks_neon-.Lpoly1305_init)
-# endif
-#endif
- ldrb r9,[r1,#11]
- orr r6,r6,r7,lsl#8
- ldrb r7,[r1,#12]
- orr r6,r6,r8,lsl#16
- ldrb r8,[r1,#13]
- orr r6,r6,r9,lsl#24
- ldrb r9,[r1,#14]
- and r6,r6,r3
-
- ldrb r10,[r1,#15]
- orr r7,r7,r8,lsl#8
- str r4,[r0,#0]
- orr r7,r7,r9,lsl#16
- str r5,[r0,#4]
- orr r7,r7,r10,lsl#24
- str r6,[r0,#8]
- and r7,r7,r3
- str r7,[r0,#12]
-#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
- stmia r2,{r11,r12} @ fill functions table
- mov r0,#1
-#else
- mov r0,#0
-#endif
-.Lno_key:
- ldmia sp!,{r4-r11}
-#if __ARM_ARCH__>=5
- bx lr @ bx lr
-#else
- tst lr,#1
- moveq pc,lr @ be binary compatible with V4, yet
- .word 0xe12fff1e @ interoperable with Thumb ISA:-)
-#endif
-.size poly1305_init,.-poly1305_init
-.type poly1305_blocks,%function
-.align 5
-poly1305_blocks:
-.Lpoly1305_blocks:
- stmdb sp!,{r3-r11,lr}
-
- ands r2,r2,#-16
- beq .Lno_data
-
- add r2,r2,r1 @ end pointer
- sub sp,sp,#32
-
-#if __ARM_ARCH__<7
- ldmia r0,{r4-r12} @ load context
- add r0,r0,#20
- str r2,[sp,#16] @ offload stuff
- str r0,[sp,#12]
-#else
- ldr lr,[r0,#36] @ is_base2_26
- ldmia r0!,{r4-r8} @ load hash value
- str r2,[sp,#16] @ offload stuff
- str r0,[sp,#12]
-
- adds r9,r4,r5,lsl#26 @ base 2^26 -> base 2^32
- mov r10,r5,lsr#6
- adcs r10,r10,r6,lsl#20
- mov r11,r6,lsr#12
- adcs r11,r11,r7,lsl#14
- mov r12,r7,lsr#18
- adcs r12,r12,r8,lsl#8
- mov r2,#0
- teq lr,#0
- str r2,[r0,#16] @ clear is_base2_26
- adc r2,r2,r8,lsr#24
-
- itttt ne
- movne r4,r9 @ choose between radixes
- movne r5,r10
- movne r6,r11
- movne r7,r12
- ldmia r0,{r9-r12} @ load key
- it ne
- movne r8,r2
-#endif
-
- mov lr,r1
- cmp r3,#0
- str r10,[sp,#20]
- str r11,[sp,#24]
- str r12,[sp,#28]
- b .Loop
-
-.align 4
-.Loop:
-#if __ARM_ARCH__<7
- ldrb r0,[lr],#16 @ load input
-# ifdef __thumb2__
- it hi
-# endif
- addhi r8,r8,#1 @ 1<<128
- ldrb r1,[lr,#-15]
- ldrb r2,[lr,#-14]
- ldrb r3,[lr,#-13]
- orr r1,r0,r1,lsl#8
- ldrb r0,[lr,#-12]
- orr r2,r1,r2,lsl#16
- ldrb r1,[lr,#-11]
- orr r3,r2,r3,lsl#24
- ldrb r2,[lr,#-10]
- adds r4,r4,r3 @ accumulate input
-
- ldrb r3,[lr,#-9]
- orr r1,r0,r1,lsl#8
- ldrb r0,[lr,#-8]
- orr r2,r1,r2,lsl#16
- ldrb r1,[lr,#-7]
- orr r3,r2,r3,lsl#24
- ldrb r2,[lr,#-6]
- adcs r5,r5,r3
-
- ldrb r3,[lr,#-5]
- orr r1,r0,r1,lsl#8
- ldrb r0,[lr,#-4]
- orr r2,r1,r2,lsl#16
- ldrb r1,[lr,#-3]
- orr r3,r2,r3,lsl#24
- ldrb r2,[lr,#-2]
- adcs r6,r6,r3
-
- ldrb r3,[lr,#-1]
- orr r1,r0,r1,lsl#8
- str lr,[sp,#8] @ offload input pointer
- orr r2,r1,r2,lsl#16
- add r10,r10,r10,lsr#2
- orr r3,r2,r3,lsl#24
-#else
- ldr r0,[lr],#16 @ load input
- it hi
- addhi r8,r8,#1 @ padbit
- ldr r1,[lr,#-12]
- ldr r2,[lr,#-8]
- ldr r3,[lr,#-4]
-# ifdef __ARMEB__
- rev r0,r0
- rev r1,r1
- rev r2,r2
- rev r3,r3
-# endif
- adds r4,r4,r0 @ accumulate input
- str lr,[sp,#8] @ offload input pointer
- adcs r5,r5,r1
- add r10,r10,r10,lsr#2
- adcs r6,r6,r2
-#endif
- add r11,r11,r11,lsr#2
- adcs r7,r7,r3
- add r12,r12,r12,lsr#2
-
- umull r2,r3,r5,r9
- adc r8,r8,#0
- umull r0,r1,r4,r9
- umlal r2,r3,r8,r10
- umlal r0,r1,r7,r10
- ldr r10,[sp,#20] @ reload r10
- umlal r2,r3,r6,r12
- umlal r0,r1,r5,r12
- umlal r2,r3,r7,r11
- umlal r0,r1,r6,r11
- umlal r2,r3,r4,r10
- str r0,[sp,#0] @ future r4
- mul r0,r11,r8
- ldr r11,[sp,#24] @ reload r11
- adds r2,r2,r1 @ d1+=d0>>32
- eor r1,r1,r1
- adc lr,r3,#0 @ future r6
- str r2,[sp,#4] @ future r5
-
- mul r2,r12,r8
- eor r3,r3,r3
- umlal r0,r1,r7,r12
- ldr r12,[sp,#28] @ reload r12
- umlal r2,r3,r7,r9
- umlal r0,r1,r6,r9
- umlal r2,r3,r6,r10
- umlal r0,r1,r5,r10
- umlal r2,r3,r5,r11
- umlal r0,r1,r4,r11
- umlal r2,r3,r4,r12
- ldr r4,[sp,#0]
- mul r8,r9,r8
- ldr r5,[sp,#4]
-
- adds r6,lr,r0 @ d2+=d1>>32
- ldr lr,[sp,#8] @ reload input pointer
- adc r1,r1,#0
- adds r7,r2,r1 @ d3+=d2>>32
- ldr r0,[sp,#16] @ reload end pointer
- adc r3,r3,#0
- add r8,r8,r3 @ h4+=d3>>32
-
- and r1,r8,#-4
- and r8,r8,#3
- add r1,r1,r1,lsr#2 @ *=5
- adds r4,r4,r1
- adcs r5,r5,#0
- adcs r6,r6,#0
- adcs r7,r7,#0
- adc r8,r8,#0
-
- cmp r0,lr @ done yet?
- bhi .Loop
-
- ldr r0,[sp,#12]
- add sp,sp,#32
- stmdb r0,{r4-r8} @ store the result
-
-.Lno_data:
-#if __ARM_ARCH__>=5
- ldmia sp!,{r3-r11,pc}
-#else
- ldmia sp!,{r3-r11,lr}
- tst lr,#1
- moveq pc,lr @ be binary compatible with V4, yet
- .word 0xe12fff1e @ interoperable with Thumb ISA:-)
-#endif
-.size poly1305_blocks,.-poly1305_blocks
-.type poly1305_emit,%function
-.align 5
-poly1305_emit:
-.Lpoly1305_emit:
- stmdb sp!,{r4-r11}
-
- ldmia r0,{r3-r7}
-
-#if __ARM_ARCH__>=7
- ldr ip,[r0,#36] @ is_base2_26
-
- adds r8,r3,r4,lsl#26 @ base 2^26 -> base 2^32
- mov r9,r4,lsr#6
- adcs r9,r9,r5,lsl#20
- mov r10,r5,lsr#12
- adcs r10,r10,r6,lsl#14
- mov r11,r6,lsr#18
- adcs r11,r11,r7,lsl#8
- mov r0,#0
- adc r0,r0,r7,lsr#24
-
- tst ip,ip
- itttt ne
- movne r3,r8
- movne r4,r9
- movne r5,r10
- movne r6,r11
- it ne
- movne r7,r0
-#endif
-
- adds r8,r3,#5 @ compare to modulus
- adcs r9,r4,#0
- adcs r10,r5,#0
- adcs r11,r6,#0
- adc r0,r7,#0
- tst r0,#4 @ did it carry/borrow?
-
-#ifdef __thumb2__
- it ne
-#endif
- movne r3,r8
- ldr r8,[r2,#0]
-#ifdef __thumb2__
- it ne
-#endif
- movne r4,r9
- ldr r9,[r2,#4]
-#ifdef __thumb2__
- it ne
-#endif
- movne r5,r10
- ldr r10,[r2,#8]
-#ifdef __thumb2__
- it ne
-#endif
- movne r6,r11
- ldr r11,[r2,#12]
-
- adds r3,r3,r8
- adcs r4,r4,r9
- adcs r5,r5,r10
- adc r6,r6,r11
-
-#if __ARM_ARCH__>=7
-# ifdef __ARMEB__
- rev r3,r3
- rev r4,r4
- rev r5,r5
- rev r6,r6
-# endif
- str r3,[r1,#0]
- str r4,[r1,#4]
- str r5,[r1,#8]
- str r6,[r1,#12]
-#else
- strb r3,[r1,#0]
- mov r3,r3,lsr#8
- strb r4,[r1,#4]
- mov r4,r4,lsr#8
- strb r5,[r1,#8]
- mov r5,r5,lsr#8
- strb r6,[r1,#12]
- mov r6,r6,lsr#8
-
- strb r3,[r1,#1]
- mov r3,r3,lsr#8
- strb r4,[r1,#5]
- mov r4,r4,lsr#8
- strb r5,[r1,#9]
- mov r5,r5,lsr#8
- strb r6,[r1,#13]
- mov r6,r6,lsr#8
-
- strb r3,[r1,#2]
- mov r3,r3,lsr#8
- strb r4,[r1,#6]
- mov r4,r4,lsr#8
- strb r5,[r1,#10]
- mov r5,r5,lsr#8
- strb r6,[r1,#14]
- mov r6,r6,lsr#8
-
- strb r3,[r1,#3]
- strb r4,[r1,#7]
- strb r5,[r1,#11]
- strb r6,[r1,#15]
-#endif
- ldmia sp!,{r4-r11}
-#if __ARM_ARCH__>=5
- bx lr @ bx lr
-#else
- tst lr,#1
- moveq pc,lr @ be binary compatible with V4, yet
- .word 0xe12fff1e @ interoperable with Thumb ISA:-)
-#endif
-.size poly1305_emit,.-poly1305_emit
-#if __ARM_MAX_ARCH__>=7
-.fpu neon
-
-.type poly1305_init_neon,%function
-.align 5
-poly1305_init_neon:
-.Lpoly1305_init_neon:
- ldr r3,[r0,#48] @ first table element
- cmp r3,#-1 @ is value impossible?
- bne .Lno_init_neon
-
- ldr r4,[r0,#20] @ load key base 2^32
- ldr r5,[r0,#24]
- ldr r6,[r0,#28]
- ldr r7,[r0,#32]
-
- and r2,r4,#0x03ffffff @ base 2^32 -> base 2^26
- mov r3,r4,lsr#26
- mov r4,r5,lsr#20
- orr r3,r3,r5,lsl#6
- mov r5,r6,lsr#14
- orr r4,r4,r6,lsl#12
- mov r6,r7,lsr#8
- orr r5,r5,r7,lsl#18
- and r3,r3,#0x03ffffff
- and r4,r4,#0x03ffffff
- and r5,r5,#0x03ffffff
-
- vdup.32 d0,r2 @ r^1 in both lanes
- add r2,r3,r3,lsl#2 @ *5
- vdup.32 d1,r3
- add r3,r4,r4,lsl#2
- vdup.32 d2,r2
- vdup.32 d3,r4
- add r4,r5,r5,lsl#2
- vdup.32 d4,r3
- vdup.32 d5,r5
- add r5,r6,r6,lsl#2
- vdup.32 d6,r4
- vdup.32 d7,r6
- vdup.32 d8,r5
-
- mov r5,#2 @ counter
-
-.Lsquare_neon:
- @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
- @ d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4
- @ d1 = h1*r0 + h0*r1 + h4*5*r2 + h3*5*r3 + h2*5*r4
- @ d2 = h2*r0 + h1*r1 + h0*r2 + h4*5*r3 + h3*5*r4
- @ d3 = h3*r0 + h2*r1 + h1*r2 + h0*r3 + h4*5*r4
- @ d4 = h4*r0 + h3*r1 + h2*r2 + h1*r3 + h0*r4
-
- vmull.u32 q5,d0,d0[1]
- vmull.u32 q6,d1,d0[1]
- vmull.u32 q7,d3,d0[1]
- vmull.u32 q8,d5,d0[1]
- vmull.u32 q9,d7,d0[1]
-
- vmlal.u32 q5,d7,d2[1]
- vmlal.u32 q6,d0,d1[1]
- vmlal.u32 q7,d1,d1[1]
- vmlal.u32 q8,d3,d1[1]
- vmlal.u32 q9,d5,d1[1]
-
- vmlal.u32 q5,d5,d4[1]
- vmlal.u32 q6,d7,d4[1]
- vmlal.u32 q8,d1,d3[1]
- vmlal.u32 q7,d0,d3[1]
- vmlal.u32 q9,d3,d3[1]
-
- vmlal.u32 q5,d3,d6[1]
- vmlal.u32 q8,d0,d5[1]
- vmlal.u32 q6,d5,d6[1]
- vmlal.u32 q7,d7,d6[1]
- vmlal.u32 q9,d1,d5[1]
-
- vmlal.u32 q8,d7,d8[1]
- vmlal.u32 q5,d1,d8[1]
- vmlal.u32 q6,d3,d8[1]
- vmlal.u32 q7,d5,d8[1]
- vmlal.u32 q9,d0,d7[1]
-
- @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
- @ lazy reduction as discussed in "NEON crypto" by D.J. Bernstein
- @ and P. Schwabe
- @
- @ H0>>+H1>>+H2>>+H3>>+H4
- @ H3>>+H4>>*5+H0>>+H1
- @
- @ Trivia.
- @
- @ Result of multiplication of n-bit number by m-bit number is
- @ n+m bits wide. However! Even though 2^n is a n+1-bit number,
- @ m-bit number multiplied by 2^n is still n+m bits wide.
- @
- @ Sum of two n-bit numbers is n+1 bits wide, sum of three - n+2,
- @ and so is sum of four. Sum of 2^m n-m-bit numbers and n-bit
- @ one is n+1 bits wide.
- @
- @ >>+ denotes Hnext += Hn>>26, Hn &= 0x3ffffff. This means that
- @ H0, H2, H3 are guaranteed to be 26 bits wide, while H1 and H4
- @ can be 27. However! In cases when their width exceeds 26 bits
- @ they are limited by 2^26+2^6. This in turn means that *sum*
- @ of the products with these values can still be viewed as sum
- @ of 52-bit numbers as long as the amount of addends is not a
- @ power of 2. For example,
- @
- @ H4 = H4*R0 + H3*R1 + H2*R2 + H1*R3 + H0 * R4,
- @
- @ which can't be larger than 5 * (2^26 + 2^6) * (2^26 + 2^6), or
- @ 5 * (2^52 + 2*2^32 + 2^12), which in turn is smaller than
- @ 8 * (2^52) or 2^55. However, the value is then multiplied by
- @ by 5, so we should be looking at 5 * 5 * (2^52 + 2^33 + 2^12),
- @ which is less than 32 * (2^52) or 2^57. And when processing
- @ data we are looking at triple as many addends...
- @
- @ In key setup procedure pre-reduced H0 is limited by 5*4+1 and
- @ 5*H4 - by 5*5 52-bit addends, or 57 bits. But when hashing the
- @ input H0 is limited by (5*4+1)*3 addends, or 58 bits, while
- @ 5*H4 by 5*5*3, or 59[!] bits. How is this relevant? vmlal.u32
- @ instruction accepts 2x32-bit input and writes 2x64-bit result.
- @ This means that result of reduction have to be compressed upon
- @ loop wrap-around. This can be done in the process of reduction
- @ to minimize amount of instructions [as well as amount of
- @ 128-bit instructions, which benefits low-end processors], but
- @ one has to watch for H2 (which is narrower than H0) and 5*H4
- @ not being wider than 58 bits, so that result of right shift
- @ by 26 bits fits in 32 bits. This is also useful on x86,
- @ because it allows to use paddd in place for paddq, which
- @ benefits Atom, where paddq is ridiculously slow.
-
- vshr.u64 q15,q8,#26
- vmovn.i64 d16,q8
- vshr.u64 q4,q5,#26
- vmovn.i64 d10,q5
- vadd.i64 q9,q9,q15 @ h3 -> h4
- vbic.i32 d16,#0xfc000000 @ &=0x03ffffff
- vadd.i64 q6,q6,q4 @ h0 -> h1
- vbic.i32 d10,#0xfc000000
-
- vshrn.u64 d30,q9,#26
- vmovn.i64 d18,q9
- vshr.u64 q4,q6,#26
- vmovn.i64 d12,q6
- vadd.i64 q7,q7,q4 @ h1 -> h2
- vbic.i32 d18,#0xfc000000
- vbic.i32 d12,#0xfc000000
-
- vadd.i32 d10,d10,d30
- vshl.u32 d30,d30,#2
- vshrn.u64 d8,q7,#26
- vmovn.i64 d14,q7
- vadd.i32 d10,d10,d30 @ h4 -> h0
- vadd.i32 d16,d16,d8 @ h2 -> h3
- vbic.i32 d14,#0xfc000000
-
- vshr.u32 d30,d10,#26
- vbic.i32 d10,#0xfc000000
- vshr.u32 d8,d16,#26
- vbic.i32 d16,#0xfc000000
- vadd.i32 d12,d12,d30 @ h0 -> h1
- vadd.i32 d18,d18,d8 @ h3 -> h4
-
- subs r5,r5,#1
- beq .Lsquare_break_neon
-
- add r6,r0,#(48+0*9*4)
- add r7,r0,#(48+1*9*4)
-
- vtrn.32 d0,d10 @ r^2:r^1
- vtrn.32 d3,d14
- vtrn.32 d5,d16
- vtrn.32 d1,d12
- vtrn.32 d7,d18
-
- vshl.u32 d4,d3,#2 @ *5
- vshl.u32 d6,d5,#2
- vshl.u32 d2,d1,#2
- vshl.u32 d8,d7,#2
- vadd.i32 d4,d4,d3
- vadd.i32 d2,d2,d1
- vadd.i32 d6,d6,d5
- vadd.i32 d8,d8,d7
-
- vst4.32 {d0[0],d1[0],d2[0],d3[0]},[r6]!
- vst4.32 {d0[1],d1[1],d2[1],d3[1]},[r7]!
- vst4.32 {d4[0],d5[0],d6[0],d7[0]},[r6]!
- vst4.32 {d4[1],d5[1],d6[1],d7[1]},[r7]!
- vst1.32 {d8[0]},[r6,:32]
- vst1.32 {d8[1]},[r7,:32]
-
- b .Lsquare_neon
-
-.align 4
-.Lsquare_break_neon:
- add r6,r0,#(48+2*4*9)
- add r7,r0,#(48+3*4*9)
-
- vmov d0,d10 @ r^4:r^3
- vshl.u32 d2,d12,#2 @ *5
- vmov d1,d12
- vshl.u32 d4,d14,#2
- vmov d3,d14
- vshl.u32 d6,d16,#2
- vmov d5,d16
- vshl.u32 d8,d18,#2
- vmov d7,d18
- vadd.i32 d2,d2,d12
- vadd.i32 d4,d4,d14
- vadd.i32 d6,d6,d16
- vadd.i32 d8,d8,d18
-
- vst4.32 {d0[0],d1[0],d2[0],d3[0]},[r6]!
- vst4.32 {d0[1],d1[1],d2[1],d3[1]},[r7]!
- vst4.32 {d4[0],d5[0],d6[0],d7[0]},[r6]!
- vst4.32 {d4[1],d5[1],d6[1],d7[1]},[r7]!
- vst1.32 {d8[0]},[r6]
- vst1.32 {d8[1]},[r7]
-
-.Lno_init_neon:
- bx lr @ bx lr
-.size poly1305_init_neon,.-poly1305_init_neon
-
-.type poly1305_blocks_neon,%function
-.align 5
-poly1305_blocks_neon:
-.Lpoly1305_blocks_neon:
- ldr ip,[r0,#36] @ is_base2_26
-
- cmp r2,#64
- blo .Lpoly1305_blocks
-
- stmdb sp!,{r4-r7}
- vstmdb sp!,{d8-d15} @ ABI specification says so
-
- tst ip,ip @ is_base2_26?
- bne .Lbase2_26_neon
-
- stmdb sp!,{r1-r3,lr}
- bl .Lpoly1305_init_neon
-
- ldr r4,[r0,#0] @ load hash value base 2^32
- ldr r5,[r0,#4]
- ldr r6,[r0,#8]
- ldr r7,[r0,#12]
- ldr ip,[r0,#16]
-
- and r2,r4,#0x03ffffff @ base 2^32 -> base 2^26
- mov r3,r4,lsr#26
- veor d10,d10,d10
- mov r4,r5,lsr#20
- orr r3,r3,r5,lsl#6
- veor d12,d12,d12
- mov r5,r6,lsr#14
- orr r4,r4,r6,lsl#12
- veor d14,d14,d14
- mov r6,r7,lsr#8
- orr r5,r5,r7,lsl#18
- veor d16,d16,d16
- and r3,r3,#0x03ffffff
- orr r6,r6,ip,lsl#24
- veor d18,d18,d18
- and r4,r4,#0x03ffffff
- mov r1,#1
- and r5,r5,#0x03ffffff
- str r1,[r0,#36] @ set is_base2_26
-
- vmov.32 d10[0],r2
- vmov.32 d12[0],r3
- vmov.32 d14[0],r4
- vmov.32 d16[0],r5
- vmov.32 d18[0],r6
- adr r5,.Lzeros
-
- ldmia sp!,{r1-r3,lr}
- b .Lhash_loaded
-
-.align 4
-.Lbase2_26_neon:
- @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
- @ load hash value
-
- veor d10,d10,d10
- veor d12,d12,d12
- veor d14,d14,d14
- veor d16,d16,d16
- veor d18,d18,d18
- vld4.32 {d10[0],d12[0],d14[0],d16[0]},[r0]!
- adr r5,.Lzeros
- vld1.32 {d18[0]},[r0]
- sub r0,r0,#16 @ rewind
-
-.Lhash_loaded:
- add r4,r1,#32
- mov r3,r3,lsl#24
- tst r2,#31
- beq .Leven
-
- vld4.32 {d20[0],d22[0],d24[0],d26[0]},[r1]!
- vmov.32 d28[0],r3
- sub r2,r2,#16
- add r4,r1,#32
-
-# ifdef __ARMEB__
- vrev32.8 q10,q10
- vrev32.8 q13,q13
- vrev32.8 q11,q11
- vrev32.8 q12,q12
-# endif
- vsri.u32 d28,d26,#8 @ base 2^32 -> base 2^26
- vshl.u32 d26,d26,#18
-
- vsri.u32 d26,d24,#14
- vshl.u32 d24,d24,#12
- vadd.i32 d29,d28,d18 @ add hash value and move to #hi
-
- vbic.i32 d26,#0xfc000000
- vsri.u32 d24,d22,#20
- vshl.u32 d22,d22,#6
-
- vbic.i32 d24,#0xfc000000
- vsri.u32 d22,d20,#26
- vadd.i32 d27,d26,d16
-
- vbic.i32 d20,#0xfc000000
- vbic.i32 d22,#0xfc000000
- vadd.i32 d25,d24,d14
-
- vadd.i32 d21,d20,d10
- vadd.i32 d23,d22,d12
-
- mov r7,r5
- add r6,r0,#48
-
- cmp r2,r2
- b .Long_tail
-
-.align 4
-.Leven:
- subs r2,r2,#64
- it lo
- movlo r4,r5
-
- vmov.i32 q14,#1<<24 @ padbit, yes, always
- vld4.32 {d20,d22,d24,d26},[r1] @ inp[0:1]
- add r1,r1,#64
- vld4.32 {d21,d23,d25,d27},[r4] @ inp[2:3] (or 0)
- add r4,r4,#64
- itt hi
- addhi r7,r0,#(48+1*9*4)
- addhi r6,r0,#(48+3*9*4)
-
-# ifdef __ARMEB__
- vrev32.8 q10,q10
- vrev32.8 q13,q13
- vrev32.8 q11,q11
- vrev32.8 q12,q12
-# endif
- vsri.u32 q14,q13,#8 @ base 2^32 -> base 2^26
- vshl.u32 q13,q13,#18
-
- vsri.u32 q13,q12,#14
- vshl.u32 q12,q12,#12
-
- vbic.i32 q13,#0xfc000000
- vsri.u32 q12,q11,#20
- vshl.u32 q11,q11,#6
-
- vbic.i32 q12,#0xfc000000
- vsri.u32 q11,q10,#26
-
- vbic.i32 q10,#0xfc000000
- vbic.i32 q11,#0xfc000000
-
- bls .Lskip_loop
-
- vld4.32 {d0[1],d1[1],d2[1],d3[1]},[r7]! @ load r^2
- vld4.32 {d0[0],d1[0],d2[0],d3[0]},[r6]! @ load r^4
- vld4.32 {d4[1],d5[1],d6[1],d7[1]},[r7]!
- vld4.32 {d4[0],d5[0],d6[0],d7[0]},[r6]!
- b .Loop_neon
-
-.align 5
-.Loop_neon:
- @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
- @ ((inp[0]*r^4+inp[2]*r^2+inp[4])*r^4+inp[6]*r^2
- @ ((inp[1]*r^4+inp[3]*r^2+inp[5])*r^3+inp[7]*r
- @ ___________________/
- @ ((inp[0]*r^4+inp[2]*r^2+inp[4])*r^4+inp[6]*r^2+inp[8])*r^2
- @ ((inp[1]*r^4+inp[3]*r^2+inp[5])*r^4+inp[7]*r^2+inp[9])*r
- @ ___________________/ ____________________/
- @
- @ Note that we start with inp[2:3]*r^2. This is because it
- @ doesn't depend on reduction in previous iteration.
- @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
- @ d4 = h4*r0 + h3*r1 + h2*r2 + h1*r3 + h0*r4
- @ d3 = h3*r0 + h2*r1 + h1*r2 + h0*r3 + h4*5*r4
- @ d2 = h2*r0 + h1*r1 + h0*r2 + h4*5*r3 + h3*5*r4
- @ d1 = h1*r0 + h0*r1 + h4*5*r2 + h3*5*r3 + h2*5*r4
- @ d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4
-
- @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
- @ inp[2:3]*r^2
-
- vadd.i32 d24,d24,d14 @ accumulate inp[0:1]
- vmull.u32 q7,d25,d0[1]
- vadd.i32 d20,d20,d10
- vmull.u32 q5,d21,d0[1]
- vadd.i32 d26,d26,d16
- vmull.u32 q8,d27,d0[1]
- vmlal.u32 q7,d23,d1[1]
- vadd.i32 d22,d22,d12
- vmull.u32 q6,d23,d0[1]
-
- vadd.i32 d28,d28,d18
- vmull.u32 q9,d29,d0[1]
- subs r2,r2,#64
- vmlal.u32 q5,d29,d2[1]
- it lo
- movlo r4,r5
- vmlal.u32 q8,d25,d1[1]
- vld1.32 d8[1],[r7,:32]
- vmlal.u32 q6,d21,d1[1]
- vmlal.u32 q9,d27,d1[1]
-
- vmlal.u32 q5,d27,d4[1]
- vmlal.u32 q8,d23,d3[1]
- vmlal.u32 q9,d25,d3[1]
- vmlal.u32 q6,d29,d4[1]
- vmlal.u32 q7,d21,d3[1]
-
- vmlal.u32 q8,d21,d5[1]
- vmlal.u32 q5,d25,d6[1]
- vmlal.u32 q9,d23,d5[1]
- vmlal.u32 q6,d27,d6[1]
- vmlal.u32 q7,d29,d6[1]
-
- vmlal.u32 q8,d29,d8[1]
- vmlal.u32 q5,d23,d8[1]
- vmlal.u32 q9,d21,d7[1]
- vmlal.u32 q6,d25,d8[1]
- vmlal.u32 q7,d27,d8[1]
-
- vld4.32 {d21,d23,d25,d27},[r4] @ inp[2:3] (or 0)
- add r4,r4,#64
-
- @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
- @ (hash+inp[0:1])*r^4 and accumulate
-
- vmlal.u32 q8,d26,d0[0]
- vmlal.u32 q5,d20,d0[0]
- vmlal.u32 q9,d28,d0[0]
- vmlal.u32 q6,d22,d0[0]
- vmlal.u32 q7,d24,d0[0]
- vld1.32 d8[0],[r6,:32]
-
- vmlal.u32 q8,d24,d1[0]
- vmlal.u32 q5,d28,d2[0]
- vmlal.u32 q9,d26,d1[0]
- vmlal.u32 q6,d20,d1[0]
- vmlal.u32 q7,d22,d1[0]
-
- vmlal.u32 q8,d22,d3[0]
- vmlal.u32 q5,d26,d4[0]
- vmlal.u32 q9,d24,d3[0]
- vmlal.u32 q6,d28,d4[0]
- vmlal.u32 q7,d20,d3[0]
-
- vmlal.u32 q8,d20,d5[0]
- vmlal.u32 q5,d24,d6[0]
- vmlal.u32 q9,d22,d5[0]
- vmlal.u32 q6,d26,d6[0]
- vmlal.u32 q8,d28,d8[0]
-
- vmlal.u32 q7,d28,d6[0]
- vmlal.u32 q5,d22,d8[0]
- vmlal.u32 q9,d20,d7[0]
- vmov.i32 q14,#1<<24 @ padbit, yes, always
- vmlal.u32 q6,d24,d8[0]
- vmlal.u32 q7,d26,d8[0]
-
- vld4.32 {d20,d22,d24,d26},[r1] @ inp[0:1]
- add r1,r1,#64
-# ifdef __ARMEB__
- vrev32.8 q10,q10
- vrev32.8 q11,q11
- vrev32.8 q12,q12
- vrev32.8 q13,q13
-# endif
-
- @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
- @ lazy reduction interleaved with base 2^32 -> base 2^26 of
- @ inp[0:3] previously loaded to q10-q13 and smashed to q10-q14.
-
- vshr.u64 q15,q8,#26
- vmovn.i64 d16,q8
- vshr.u64 q4,q5,#26
- vmovn.i64 d10,q5
- vadd.i64 q9,q9,q15 @ h3 -> h4
- vbic.i32 d16,#0xfc000000
- vsri.u32 q14,q13,#8 @ base 2^32 -> base 2^26
- vadd.i64 q6,q6,q4 @ h0 -> h1
- vshl.u32 q13,q13,#18
- vbic.i32 d10,#0xfc000000
-
- vshrn.u64 d30,q9,#26
- vmovn.i64 d18,q9
- vshr.u64 q4,q6,#26
- vmovn.i64 d12,q6
- vadd.i64 q7,q7,q4 @ h1 -> h2
- vsri.u32 q13,q12,#14
- vbic.i32 d18,#0xfc000000
- vshl.u32 q12,q12,#12
- vbic.i32 d12,#0xfc000000
-
- vadd.i32 d10,d10,d30
- vshl.u32 d30,d30,#2
- vbic.i32 q13,#0xfc000000
- vshrn.u64 d8,q7,#26
- vmovn.i64 d14,q7
- vaddl.u32 q5,d10,d30 @ h4 -> h0 [widen for a sec]
- vsri.u32 q12,q11,#20
- vadd.i32 d16,d16,d8 @ h2 -> h3
- vshl.u32 q11,q11,#6
- vbic.i32 d14,#0xfc000000
- vbic.i32 q12,#0xfc000000
-
- vshrn.u64 d30,q5,#26 @ re-narrow
- vmovn.i64 d10,q5
- vsri.u32 q11,q10,#26
- vbic.i32 q10,#0xfc000000
- vshr.u32 d8,d16,#26
- vbic.i32 d16,#0xfc000000
- vbic.i32 d10,#0xfc000000
- vadd.i32 d12,d12,d30 @ h0 -> h1
- vadd.i32 d18,d18,d8 @ h3 -> h4
- vbic.i32 q11,#0xfc000000
-
- bhi .Loop_neon
-
-.Lskip_loop:
- @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
- @ multiply (inp[0:1]+hash) or inp[2:3] by r^2:r^1
-
- add r7,r0,#(48+0*9*4)
- add r6,r0,#(48+1*9*4)
- adds r2,r2,#32
- it ne
- movne r2,#0
- bne .Long_tail
-
- vadd.i32 d25,d24,d14 @ add hash value and move to #hi
- vadd.i32 d21,d20,d10
- vadd.i32 d27,d26,d16
- vadd.i32 d23,d22,d12
- vadd.i32 d29,d28,d18
-
-.Long_tail:
- vld4.32 {d0[1],d1[1],d2[1],d3[1]},[r7]! @ load r^1
- vld4.32 {d0[0],d1[0],d2[0],d3[0]},[r6]! @ load r^2
-
- vadd.i32 d24,d24,d14 @ can be redundant
- vmull.u32 q7,d25,d0
- vadd.i32 d20,d20,d10
- vmull.u32 q5,d21,d0
- vadd.i32 d26,d26,d16
- vmull.u32 q8,d27,d0
- vadd.i32 d22,d22,d12
- vmull.u32 q6,d23,d0
- vadd.i32 d28,d28,d18
- vmull.u32 q9,d29,d0
-
- vmlal.u32 q5,d29,d2
- vld4.32 {d4[1],d5[1],d6[1],d7[1]},[r7]!
- vmlal.u32 q8,d25,d1
- vld4.32 {d4[0],d5[0],d6[0],d7[0]},[r6]!
- vmlal.u32 q6,d21,d1
- vmlal.u32 q9,d27,d1
- vmlal.u32 q7,d23,d1
-
- vmlal.u32 q8,d23,d3
- vld1.32 d8[1],[r7,:32]
- vmlal.u32 q5,d27,d4
- vld1.32 d8[0],[r6,:32]
- vmlal.u32 q9,d25,d3
- vmlal.u32 q6,d29,d4
- vmlal.u32 q7,d21,d3
-
- vmlal.u32 q8,d21,d5
- it ne
- addne r7,r0,#(48+2*9*4)
- vmlal.u32 q5,d25,d6
- it ne
- addne r6,r0,#(48+3*9*4)
- vmlal.u32 q9,d23,d5
- vmlal.u32 q6,d27,d6
- vmlal.u32 q7,d29,d6
-
- vmlal.u32 q8,d29,d8
- vorn q0,q0,q0 @ all-ones, can be redundant
- vmlal.u32 q5,d23,d8
- vshr.u64 q0,q0,#38
- vmlal.u32 q9,d21,d7
- vmlal.u32 q6,d25,d8
- vmlal.u32 q7,d27,d8
-
- beq .Lshort_tail
-
- @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
- @ (hash+inp[0:1])*r^4:r^3 and accumulate
-
- vld4.32 {d0[1],d1[1],d2[1],d3[1]},[r7]! @ load r^3
- vld4.32 {d0[0],d1[0],d2[0],d3[0]},[r6]! @ load r^4
-
- vmlal.u32 q7,d24,d0
- vmlal.u32 q5,d20,d0
- vmlal.u32 q8,d26,d0
- vmlal.u32 q6,d22,d0
- vmlal.u32 q9,d28,d0
-
- vmlal.u32 q5,d28,d2
- vld4.32 {d4[1],d5[1],d6[1],d7[1]},[r7]!
- vmlal.u32 q8,d24,d1
- vld4.32 {d4[0],d5[0],d6[0],d7[0]},[r6]!
- vmlal.u32 q6,d20,d1
- vmlal.u32 q9,d26,d1
- vmlal.u32 q7,d22,d1
-
- vmlal.u32 q8,d22,d3
- vld1.32 d8[1],[r7,:32]
- vmlal.u32 q5,d26,d4
- vld1.32 d8[0],[r6,:32]
- vmlal.u32 q9,d24,d3
- vmlal.u32 q6,d28,d4
- vmlal.u32 q7,d20,d3
-
- vmlal.u32 q8,d20,d5
- vmlal.u32 q5,d24,d6
- vmlal.u32 q9,d22,d5
- vmlal.u32 q6,d26,d6
- vmlal.u32 q7,d28,d6
-
- vmlal.u32 q8,d28,d8
- vorn q0,q0,q0 @ all-ones
- vmlal.u32 q5,d22,d8
- vshr.u64 q0,q0,#38
- vmlal.u32 q9,d20,d7
- vmlal.u32 q6,d24,d8
- vmlal.u32 q7,d26,d8
-
-.Lshort_tail:
- @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
- @ horizontal addition
-
- vadd.i64 d16,d16,d17
- vadd.i64 d10,d10,d11
- vadd.i64 d18,d18,d19
- vadd.i64 d12,d12,d13
- vadd.i64 d14,d14,d15
-
- @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
- @ lazy reduction, but without narrowing
-
- vshr.u64 q15,q8,#26
- vand.i64 q8,q8,q0
- vshr.u64 q4,q5,#26
- vand.i64 q5,q5,q0
- vadd.i64 q9,q9,q15 @ h3 -> h4
- vadd.i64 q6,q6,q4 @ h0 -> h1
-
- vshr.u64 q15,q9,#26
- vand.i64 q9,q9,q0
- vshr.u64 q4,q6,#26
- vand.i64 q6,q6,q0
- vadd.i64 q7,q7,q4 @ h1 -> h2
-
- vadd.i64 q5,q5,q15
- vshl.u64 q15,q15,#2
- vshr.u64 q4,q7,#26
- vand.i64 q7,q7,q0
- vadd.i64 q5,q5,q15 @ h4 -> h0
- vadd.i64 q8,q8,q4 @ h2 -> h3
-
- vshr.u64 q15,q5,#26
- vand.i64 q5,q5,q0
- vshr.u64 q4,q8,#26
- vand.i64 q8,q8,q0
- vadd.i64 q6,q6,q15 @ h0 -> h1
- vadd.i64 q9,q9,q4 @ h3 -> h4
-
- cmp r2,#0
- bne .Leven
-
- @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
- @ store hash value
-
- vst4.32 {d10[0],d12[0],d14[0],d16[0]},[r0]!
- vst1.32 {d18[0]},[r0]
-
- vldmia sp!,{d8-d15} @ epilogue
- ldmia sp!,{r4-r7}
- bx lr @ bx lr
-.size poly1305_blocks_neon,.-poly1305_blocks_neon
-
-.align 5
-.Lzeros:
-.long 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
-#ifndef __KERNEL__
-.LOPENSSL_armcap:
-# ifdef _WIN32
-.word OPENSSL_armcap_P
-# else
-.word OPENSSL_armcap_P-.Lpoly1305_init
-# endif
-.comm OPENSSL_armcap_P,4,4
-.hidden OPENSSL_armcap_P
-#endif
-#endif
-.asciz "Poly1305 for ARMv4/NEON, CRYPTOGAMS by @dot-asm"
-.align 2
diff --git a/arch/arm/crypto/sha256-core.S_shipped b/arch/arm/crypto/sha256-core.S_shipped
deleted file mode 100644
index 6363014a50d7..000000000000
--- a/arch/arm/crypto/sha256-core.S_shipped
+++ /dev/null
@@ -1,2816 +0,0 @@
-@ SPDX-License-Identifier: GPL-2.0
-
-@ This code is taken from the OpenSSL project but the author (Andy Polyakov)
-@ has relicensed it under the GPLv2. Therefore this program is free software;
-@ you can redistribute it and/or modify it under the terms of the GNU General
-@ Public License version 2 as published by the Free Software Foundation.
-@
-@ The original headers, including the original license headers, are
-@ included below for completeness.
-
-@ ====================================================================
-@ Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
-@ project. The module is, however, dual licensed under OpenSSL and
-@ CRYPTOGAMS licenses depending on where you obtain it. For further
-@ details see https://www.openssl.org/~appro/cryptogams/.
-@ ====================================================================
-
-@ SHA256 block procedure for ARMv4. May 2007.
-
-@ Performance is ~2x better than gcc 3.4 generated code and in "abso-
-@ lute" terms is ~2250 cycles per 64-byte block or ~35 cycles per
-@ byte [on single-issue Xscale PXA250 core].
-
-@ July 2010.
-@
-@ Rescheduling for dual-issue pipeline resulted in 22% improvement on
-@ Cortex A8 core and ~20 cycles per processed byte.
-
-@ February 2011.
-@
-@ Profiler-assisted and platform-specific optimization resulted in 16%
-@ improvement on Cortex A8 core and ~15.4 cycles per processed byte.
-
-@ September 2013.
-@
-@ Add NEON implementation. On Cortex A8 it was measured to process one
-@ byte in 12.5 cycles or 23% faster than integer-only code. Snapdragon
-@ S4 does it in 12.5 cycles too, but it's 50% faster than integer-only
-@ code (meaning that latter performs sub-optimally, nothing was done
-@ about it).
-
-@ May 2014.
-@
-@ Add ARMv8 code path performing at 2.0 cpb on Apple A7.
-
-#ifndef __KERNEL__
-# include "arm_arch.h"
-#else
-# define __ARM_ARCH__ __LINUX_ARM_ARCH__
-# define __ARM_MAX_ARCH__ 7
-#endif
-
-.text
-#if __ARM_ARCH__<7
-.code 32
-#else
-.syntax unified
-# ifdef __thumb2__
-.thumb
-# else
-.code 32
-# endif
-#endif
-
-.type K256,%object
-.align 5
-K256:
-.word 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
-.word 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5
-.word 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3
-.word 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174
-.word 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc
-.word 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da
-.word 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7
-.word 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967
-.word 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13
-.word 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85
-.word 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3
-.word 0xd192e819,0xd6990624,0xf40e3585,0x106aa070
-.word 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5
-.word 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3
-.word 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
-.word 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
-.size K256,.-K256
-.word 0 @ terminator
-#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
-.LOPENSSL_armcap:
-.word OPENSSL_armcap_P-sha256_block_data_order
-#endif
-.align 5
-
-.global sha256_block_data_order
-.type sha256_block_data_order,%function
-sha256_block_data_order:
-.Lsha256_block_data_order:
-#if __ARM_ARCH__<7
- sub r3,pc,#8 @ sha256_block_data_order
-#else
- adr r3,.Lsha256_block_data_order
-#endif
-#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
- ldr r12,.LOPENSSL_armcap
- ldr r12,[r3,r12] @ OPENSSL_armcap_P
- tst r12,#ARMV8_SHA256
- bne .LARMv8
- tst r12,#ARMV7_NEON
- bne .LNEON
-#endif
- add r2,r1,r2,lsl#6 @ len to point at the end of inp
- stmdb sp!,{r0,r1,r2,r4-r11,lr}
- ldmia r0,{r4,r5,r6,r7,r8,r9,r10,r11}
- sub r14,r3,#256+32 @ K256
- sub sp,sp,#16*4 @ alloca(X[16])
-.Loop:
-# if __ARM_ARCH__>=7
- ldr r2,[r1],#4
-# else
- ldrb r2,[r1,#3]
-# endif
- eor r3,r5,r6 @ magic
- eor r12,r12,r12
-#if __ARM_ARCH__>=7
- @ ldr r2,[r1],#4 @ 0
-# if 0==15
- str r1,[sp,#17*4] @ make room for r1
-# endif
- eor r0,r8,r8,ror#5
- add r4,r4,r12 @ h+=Maj(a,b,c) from the past
- eor r0,r0,r8,ror#19 @ Sigma1(e)
-# ifndef __ARMEB__
- rev r2,r2
-# endif
-#else
- @ ldrb r2,[r1,#3] @ 0
- add r4,r4,r12 @ h+=Maj(a,b,c) from the past
- ldrb r12,[r1,#2]
- ldrb r0,[r1,#1]
- orr r2,r2,r12,lsl#8
- ldrb r12,[r1],#4
- orr r2,r2,r0,lsl#16
-# if 0==15
- str r1,[sp,#17*4] @ make room for r1
-# endif
- eor r0,r8,r8,ror#5
- orr r2,r2,r12,lsl#24
- eor r0,r0,r8,ror#19 @ Sigma1(e)
-#endif
- ldr r12,[r14],#4 @ *K256++
- add r11,r11,r2 @ h+=X[i]
- str r2,[sp,#0*4]
- eor r2,r9,r10
- add r11,r11,r0,ror#6 @ h+=Sigma1(e)
- and r2,r2,r8
- add r11,r11,r12 @ h+=K256[i]
- eor r2,r2,r10 @ Ch(e,f,g)
- eor r0,r4,r4,ror#11
- add r11,r11,r2 @ h+=Ch(e,f,g)
-#if 0==31
- and r12,r12,#0xff
- cmp r12,#0xf2 @ done?
-#endif
-#if 0<15
-# if __ARM_ARCH__>=7
- ldr r2,[r1],#4 @ prefetch
-# else
- ldrb r2,[r1,#3]
-# endif
- eor r12,r4,r5 @ a^b, b^c in next round
-#else
- ldr r2,[sp,#2*4] @ from future BODY_16_xx
- eor r12,r4,r5 @ a^b, b^c in next round
- ldr r1,[sp,#15*4] @ from future BODY_16_xx
-#endif
- eor r0,r0,r4,ror#20 @ Sigma0(a)
- and r3,r3,r12 @ (b^c)&=(a^b)
- add r7,r7,r11 @ d+=h
- eor r3,r3,r5 @ Maj(a,b,c)
- add r11,r11,r0,ror#2 @ h+=Sigma0(a)
- @ add r11,r11,r3 @ h+=Maj(a,b,c)
-#if __ARM_ARCH__>=7
- @ ldr r2,[r1],#4 @ 1
-# if 1==15
- str r1,[sp,#17*4] @ make room for r1
-# endif
- eor r0,r7,r7,ror#5
- add r11,r11,r3 @ h+=Maj(a,b,c) from the past
- eor r0,r0,r7,ror#19 @ Sigma1(e)
-# ifndef __ARMEB__
- rev r2,r2
-# endif
-#else
- @ ldrb r2,[r1,#3] @ 1
- add r11,r11,r3 @ h+=Maj(a,b,c) from the past
- ldrb r3,[r1,#2]
- ldrb r0,[r1,#1]
- orr r2,r2,r3,lsl#8
- ldrb r3,[r1],#4
- orr r2,r2,r0,lsl#16
-# if 1==15
- str r1,[sp,#17*4] @ make room for r1
-# endif
- eor r0,r7,r7,ror#5
- orr r2,r2,r3,lsl#24
- eor r0,r0,r7,ror#19 @ Sigma1(e)
-#endif
- ldr r3,[r14],#4 @ *K256++
- add r10,r10,r2 @ h+=X[i]
- str r2,[sp,#1*4]
- eor r2,r8,r9
- add r10,r10,r0,ror#6 @ h+=Sigma1(e)
- and r2,r2,r7
- add r10,r10,r3 @ h+=K256[i]
- eor r2,r2,r9 @ Ch(e,f,g)
- eor r0,r11,r11,ror#11
- add r10,r10,r2 @ h+=Ch(e,f,g)
-#if 1==31
- and r3,r3,#0xff
- cmp r3,#0xf2 @ done?
-#endif
-#if 1<15
-# if __ARM_ARCH__>=7
- ldr r2,[r1],#4 @ prefetch
-# else
- ldrb r2,[r1,#3]
-# endif
- eor r3,r11,r4 @ a^b, b^c in next round
-#else
- ldr r2,[sp,#3*4] @ from future BODY_16_xx
- eor r3,r11,r4 @ a^b, b^c in next round
- ldr r1,[sp,#0*4] @ from future BODY_16_xx
-#endif
- eor r0,r0,r11,ror#20 @ Sigma0(a)
- and r12,r12,r3 @ (b^c)&=(a^b)
- add r6,r6,r10 @ d+=h
- eor r12,r12,r4 @ Maj(a,b,c)
- add r10,r10,r0,ror#2 @ h+=Sigma0(a)
- @ add r10,r10,r12 @ h+=Maj(a,b,c)
-#if __ARM_ARCH__>=7
- @ ldr r2,[r1],#4 @ 2
-# if 2==15
- str r1,[sp,#17*4] @ make room for r1
-# endif
- eor r0,r6,r6,ror#5
- add r10,r10,r12 @ h+=Maj(a,b,c) from the past
- eor r0,r0,r6,ror#19 @ Sigma1(e)
-# ifndef __ARMEB__
- rev r2,r2
-# endif
-#else
- @ ldrb r2,[r1,#3] @ 2
- add r10,r10,r12 @ h+=Maj(a,b,c) from the past
- ldrb r12,[r1,#2]
- ldrb r0,[r1,#1]
- orr r2,r2,r12,lsl#8
- ldrb r12,[r1],#4
- orr r2,r2,r0,lsl#16
-# if 2==15
- str r1,[sp,#17*4] @ make room for r1
-# endif
- eor r0,r6,r6,ror#5
- orr r2,r2,r12,lsl#24
- eor r0,r0,r6,ror#19 @ Sigma1(e)
-#endif
- ldr r12,[r14],#4 @ *K256++
- add r9,r9,r2 @ h+=X[i]
- str r2,[sp,#2*4]
- eor r2,r7,r8
- add r9,r9,r0,ror#6 @ h+=Sigma1(e)
- and r2,r2,r6
- add r9,r9,r12 @ h+=K256[i]
- eor r2,r2,r8 @ Ch(e,f,g)
- eor r0,r10,r10,ror#11
- add r9,r9,r2 @ h+=Ch(e,f,g)
-#if 2==31
- and r12,r12,#0xff
- cmp r12,#0xf2 @ done?
-#endif
-#if 2<15
-# if __ARM_ARCH__>=7
- ldr r2,[r1],#4 @ prefetch
-# else
- ldrb r2,[r1,#3]
-# endif
- eor r12,r10,r11 @ a^b, b^c in next round
-#else
- ldr r2,[sp,#4*4] @ from future BODY_16_xx
- eor r12,r10,r11 @ a^b, b^c in next round
- ldr r1,[sp,#1*4] @ from future BODY_16_xx
-#endif
- eor r0,r0,r10,ror#20 @ Sigma0(a)
- and r3,r3,r12 @ (b^c)&=(a^b)
- add r5,r5,r9 @ d+=h
- eor r3,r3,r11 @ Maj(a,b,c)
- add r9,r9,r0,ror#2 @ h+=Sigma0(a)
- @ add r9,r9,r3 @ h+=Maj(a,b,c)
-#if __ARM_ARCH__>=7
- @ ldr r2,[r1],#4 @ 3
-# if 3==15
- str r1,[sp,#17*4] @ make room for r1
-# endif
- eor r0,r5,r5,ror#5
- add r9,r9,r3 @ h+=Maj(a,b,c) from the past
- eor r0,r0,r5,ror#19 @ Sigma1(e)
-# ifndef __ARMEB__
- rev r2,r2
-# endif
-#else
- @ ldrb r2,[r1,#3] @ 3
- add r9,r9,r3 @ h+=Maj(a,b,c) from the past
- ldrb r3,[r1,#2]
- ldrb r0,[r1,#1]
- orr r2,r2,r3,lsl#8
- ldrb r3,[r1],#4
- orr r2,r2,r0,lsl#16
-# if 3==15
- str r1,[sp,#17*4] @ make room for r1
-# endif
- eor r0,r5,r5,ror#5
- orr r2,r2,r3,lsl#24
- eor r0,r0,r5,ror#19 @ Sigma1(e)
-#endif
- ldr r3,[r14],#4 @ *K256++
- add r8,r8,r2 @ h+=X[i]
- str r2,[sp,#3*4]
- eor r2,r6,r7
- add r8,r8,r0,ror#6 @ h+=Sigma1(e)
- and r2,r2,r5
- add r8,r8,r3 @ h+=K256[i]
- eor r2,r2,r7 @ Ch(e,f,g)
- eor r0,r9,r9,ror#11
- add r8,r8,r2 @ h+=Ch(e,f,g)
-#if 3==31
- and r3,r3,#0xff
- cmp r3,#0xf2 @ done?
-#endif
-#if 3<15
-# if __ARM_ARCH__>=7
- ldr r2,[r1],#4 @ prefetch
-# else
- ldrb r2,[r1,#3]
-# endif
- eor r3,r9,r10 @ a^b, b^c in next round
-#else
- ldr r2,[sp,#5*4] @ from future BODY_16_xx
- eor r3,r9,r10 @ a^b, b^c in next round
- ldr r1,[sp,#2*4] @ from future BODY_16_xx
-#endif
- eor r0,r0,r9,ror#20 @ Sigma0(a)
- and r12,r12,r3 @ (b^c)&=(a^b)
- add r4,r4,r8 @ d+=h
- eor r12,r12,r10 @ Maj(a,b,c)
- add r8,r8,r0,ror#2 @ h+=Sigma0(a)
- @ add r8,r8,r12 @ h+=Maj(a,b,c)
-#if __ARM_ARCH__>=7
- @ ldr r2,[r1],#4 @ 4
-# if 4==15
- str r1,[sp,#17*4] @ make room for r1
-# endif
- eor r0,r4,r4,ror#5
- add r8,r8,r12 @ h+=Maj(a,b,c) from the past
- eor r0,r0,r4,ror#19 @ Sigma1(e)
-# ifndef __ARMEB__
- rev r2,r2
-# endif
-#else
- @ ldrb r2,[r1,#3] @ 4
- add r8,r8,r12 @ h+=Maj(a,b,c) from the past
- ldrb r12,[r1,#2]
- ldrb r0,[r1,#1]
- orr r2,r2,r12,lsl#8
- ldrb r12,[r1],#4
- orr r2,r2,r0,lsl#16
-# if 4==15
- str r1,[sp,#17*4] @ make room for r1
-# endif
- eor r0,r4,r4,ror#5
- orr r2,r2,r12,lsl#24
- eor r0,r0,r4,ror#19 @ Sigma1(e)
-#endif
- ldr r12,[r14],#4 @ *K256++
- add r7,r7,r2 @ h+=X[i]
- str r2,[sp,#4*4]
- eor r2,r5,r6
- add r7,r7,r0,ror#6 @ h+=Sigma1(e)
- and r2,r2,r4
- add r7,r7,r12 @ h+=K256[i]
- eor r2,r2,r6 @ Ch(e,f,g)
- eor r0,r8,r8,ror#11
- add r7,r7,r2 @ h+=Ch(e,f,g)
-#if 4==31
- and r12,r12,#0xff
- cmp r12,#0xf2 @ done?
-#endif
-#if 4<15
-# if __ARM_ARCH__>=7
- ldr r2,[r1],#4 @ prefetch
-# else
- ldrb r2,[r1,#3]
-# endif
- eor r12,r8,r9 @ a^b, b^c in next round
-#else
- ldr r2,[sp,#6*4] @ from future BODY_16_xx
- eor r12,r8,r9 @ a^b, b^c in next round
- ldr r1,[sp,#3*4] @ from future BODY_16_xx
-#endif
- eor r0,r0,r8,ror#20 @ Sigma0(a)
- and r3,r3,r12 @ (b^c)&=(a^b)
- add r11,r11,r7 @ d+=h
- eor r3,r3,r9 @ Maj(a,b,c)
- add r7,r7,r0,ror#2 @ h+=Sigma0(a)
- @ add r7,r7,r3 @ h+=Maj(a,b,c)
-#if __ARM_ARCH__>=7
- @ ldr r2,[r1],#4 @ 5
-# if 5==15
- str r1,[sp,#17*4] @ make room for r1
-# endif
- eor r0,r11,r11,ror#5
- add r7,r7,r3 @ h+=Maj(a,b,c) from the past
- eor r0,r0,r11,ror#19 @ Sigma1(e)
-# ifndef __ARMEB__
- rev r2,r2
-# endif
-#else
- @ ldrb r2,[r1,#3] @ 5
- add r7,r7,r3 @ h+=Maj(a,b,c) from the past
- ldrb r3,[r1,#2]
- ldrb r0,[r1,#1]
- orr r2,r2,r3,lsl#8
- ldrb r3,[r1],#4
- orr r2,r2,r0,lsl#16
-# if 5==15
- str r1,[sp,#17*4] @ make room for r1
-# endif
- eor r0,r11,r11,ror#5
- orr r2,r2,r3,lsl#24
- eor r0,r0,r11,ror#19 @ Sigma1(e)
-#endif
- ldr r3,[r14],#4 @ *K256++
- add r6,r6,r2 @ h+=X[i]
- str r2,[sp,#5*4]
- eor r2,r4,r5
- add r6,r6,r0,ror#6 @ h+=Sigma1(e)
- and r2,r2,r11
- add r6,r6,r3 @ h+=K256[i]
- eor r2,r2,r5 @ Ch(e,f,g)
- eor r0,r7,r7,ror#11
- add r6,r6,r2 @ h+=Ch(e,f,g)
-#if 5==31
- and r3,r3,#0xff
- cmp r3,#0xf2 @ done?
-#endif
-#if 5<15
-# if __ARM_ARCH__>=7
- ldr r2,[r1],#4 @ prefetch
-# else
- ldrb r2,[r1,#3]
-# endif
- eor r3,r7,r8 @ a^b, b^c in next round
-#else
- ldr r2,[sp,#7*4] @ from future BODY_16_xx
- eor r3,r7,r8 @ a^b, b^c in next round
- ldr r1,[sp,#4*4] @ from future BODY_16_xx
-#endif
- eor r0,r0,r7,ror#20 @ Sigma0(a)
- and r12,r12,r3 @ (b^c)&=(a^b)
- add r10,r10,r6 @ d+=h
- eor r12,r12,r8 @ Maj(a,b,c)
- add r6,r6,r0,ror#2 @ h+=Sigma0(a)
- @ add r6,r6,r12 @ h+=Maj(a,b,c)
-#if __ARM_ARCH__>=7
- @ ldr r2,[r1],#4 @ 6
-# if 6==15
- str r1,[sp,#17*4] @ make room for r1
-# endif
- eor r0,r10,r10,ror#5
- add r6,r6,r12 @ h+=Maj(a,b,c) from the past
- eor r0,r0,r10,ror#19 @ Sigma1(e)
-# ifndef __ARMEB__
- rev r2,r2
-# endif
-#else
- @ ldrb r2,[r1,#3] @ 6
- add r6,r6,r12 @ h+=Maj(a,b,c) from the past
- ldrb r12,[r1,#2]
- ldrb r0,[r1,#1]
- orr r2,r2,r12,lsl#8
- ldrb r12,[r1],#4
- orr r2,r2,r0,lsl#16
-# if 6==15
- str r1,[sp,#17*4] @ make room for r1
-# endif
- eor r0,r10,r10,ror#5
- orr r2,r2,r12,lsl#24
- eor r0,r0,r10,ror#19 @ Sigma1(e)
-#endif
- ldr r12,[r14],#4 @ *K256++
- add r5,r5,r2 @ h+=X[i]
- str r2,[sp,#6*4]
- eor r2,r11,r4
- add r5,r5,r0,ror#6 @ h+=Sigma1(e)
- and r2,r2,r10
- add r5,r5,r12 @ h+=K256[i]
- eor r2,r2,r4 @ Ch(e,f,g)
- eor r0,r6,r6,ror#11
- add r5,r5,r2 @ h+=Ch(e,f,g)
-#if 6==31
- and r12,r12,#0xff
- cmp r12,#0xf2 @ done?
-#endif
-#if 6<15
-# if __ARM_ARCH__>=7
- ldr r2,[r1],#4 @ prefetch
-# else
- ldrb r2,[r1,#3]
-# endif
- eor r12,r6,r7 @ a^b, b^c in next round
-#else
- ldr r2,[sp,#8*4] @ from future BODY_16_xx
- eor r12,r6,r7 @ a^b, b^c in next round
- ldr r1,[sp,#5*4] @ from future BODY_16_xx
-#endif
- eor r0,r0,r6,ror#20 @ Sigma0(a)
- and r3,r3,r12 @ (b^c)&=(a^b)
- add r9,r9,r5 @ d+=h
- eor r3,r3,r7 @ Maj(a,b,c)
- add r5,r5,r0,ror#2 @ h+=Sigma0(a)
- @ add r5,r5,r3 @ h+=Maj(a,b,c)
-#if __ARM_ARCH__>=7
- @ ldr r2,[r1],#4 @ 7
-# if 7==15
- str r1,[sp,#17*4] @ make room for r1
-# endif
- eor r0,r9,r9,ror#5
- add r5,r5,r3 @ h+=Maj(a,b,c) from the past
- eor r0,r0,r9,ror#19 @ Sigma1(e)
-# ifndef __ARMEB__
- rev r2,r2
-# endif
-#else
- @ ldrb r2,[r1,#3] @ 7
- add r5,r5,r3 @ h+=Maj(a,b,c) from the past
- ldrb r3,[r1,#2]
- ldrb r0,[r1,#1]
- orr r2,r2,r3,lsl#8
- ldrb r3,[r1],#4
- orr r2,r2,r0,lsl#16
-# if 7==15
- str r1,[sp,#17*4] @ make room for r1
-# endif
- eor r0,r9,r9,ror#5
- orr r2,r2,r3,lsl#24
- eor r0,r0,r9,ror#19 @ Sigma1(e)
-#endif
- ldr r3,[r14],#4 @ *K256++
- add r4,r4,r2 @ h+=X[i]
- str r2,[sp,#7*4]
- eor r2,r10,r11
- add r4,r4,r0,ror#6 @ h+=Sigma1(e)
- and r2,r2,r9
- add r4,r4,r3 @ h+=K256[i]
- eor r2,r2,r11 @ Ch(e,f,g)
- eor r0,r5,r5,ror#11
- add r4,r4,r2 @ h+=Ch(e,f,g)
-#if 7==31
- and r3,r3,#0xff
- cmp r3,#0xf2 @ done?
-#endif
-#if 7<15
-# if __ARM_ARCH__>=7
- ldr r2,[r1],#4 @ prefetch
-# else
- ldrb r2,[r1,#3]
-# endif
- eor r3,r5,r6 @ a^b, b^c in next round
-#else
- ldr r2,[sp,#9*4] @ from future BODY_16_xx
- eor r3,r5,r6 @ a^b, b^c in next round
- ldr r1,[sp,#6*4] @ from future BODY_16_xx
-#endif
- eor r0,r0,r5,ror#20 @ Sigma0(a)
- and r12,r12,r3 @ (b^c)&=(a^b)
- add r8,r8,r4 @ d+=h
- eor r12,r12,r6 @ Maj(a,b,c)
- add r4,r4,r0,ror#2 @ h+=Sigma0(a)
- @ add r4,r4,r12 @ h+=Maj(a,b,c)
-#if __ARM_ARCH__>=7
- @ ldr r2,[r1],#4 @ 8
-# if 8==15
- str r1,[sp,#17*4] @ make room for r1
-# endif
- eor r0,r8,r8,ror#5
- add r4,r4,r12 @ h+=Maj(a,b,c) from the past
- eor r0,r0,r8,ror#19 @ Sigma1(e)
-# ifndef __ARMEB__
- rev r2,r2
-# endif
-#else
- @ ldrb r2,[r1,#3] @ 8
- add r4,r4,r12 @ h+=Maj(a,b,c) from the past
- ldrb r12,[r1,#2]
- ldrb r0,[r1,#1]
- orr r2,r2,r12,lsl#8
- ldrb r12,[r1],#4
- orr r2,r2,r0,lsl#16
-# if 8==15
- str r1,[sp,#17*4] @ make room for r1
-# endif
- eor r0,r8,r8,ror#5
- orr r2,r2,r12,lsl#24
- eor r0,r0,r8,ror#19 @ Sigma1(e)
-#endif
- ldr r12,[r14],#4 @ *K256++
- add r11,r11,r2 @ h+=X[i]
- str r2,[sp,#8*4]
- eor r2,r9,r10
- add r11,r11,r0,ror#6 @ h+=Sigma1(e)
- and r2,r2,r8
- add r11,r11,r12 @ h+=K256[i]
- eor r2,r2,r10 @ Ch(e,f,g)
- eor r0,r4,r4,ror#11
- add r11,r11,r2 @ h+=Ch(e,f,g)
-#if 8==31
- and r12,r12,#0xff
- cmp r12,#0xf2 @ done?
-#endif
-#if 8<15
-# if __ARM_ARCH__>=7
- ldr r2,[r1],#4 @ prefetch
-# else
- ldrb r2,[r1,#3]
-# endif
- eor r12,r4,r5 @ a^b, b^c in next round
-#else
- ldr r2,[sp,#10*4] @ from future BODY_16_xx
- eor r12,r4,r5 @ a^b, b^c in next round
- ldr r1,[sp,#7*4] @ from future BODY_16_xx
-#endif
- eor r0,r0,r4,ror#20 @ Sigma0(a)
- and r3,r3,r12 @ (b^c)&=(a^b)
- add r7,r7,r11 @ d+=h
- eor r3,r3,r5 @ Maj(a,b,c)
- add r11,r11,r0,ror#2 @ h+=Sigma0(a)
- @ add r11,r11,r3 @ h+=Maj(a,b,c)
-#if __ARM_ARCH__>=7
- @ ldr r2,[r1],#4 @ 9
-# if 9==15
- str r1,[sp,#17*4] @ make room for r1
-# endif
- eor r0,r7,r7,ror#5
- add r11,r11,r3 @ h+=Maj(a,b,c) from the past
- eor r0,r0,r7,ror#19 @ Sigma1(e)
-# ifndef __ARMEB__
- rev r2,r2
-# endif
-#else
- @ ldrb r2,[r1,#3] @ 9
- add r11,r11,r3 @ h+=Maj(a,b,c) from the past
- ldrb r3,[r1,#2]
- ldrb r0,[r1,#1]
- orr r2,r2,r3,lsl#8
- ldrb r3,[r1],#4
- orr r2,r2,r0,lsl#16
-# if 9==15
- str r1,[sp,#17*4] @ make room for r1
-# endif
- eor r0,r7,r7,ror#5
- orr r2,r2,r3,lsl#24
- eor r0,r0,r7,ror#19 @ Sigma1(e)
-#endif
- ldr r3,[r14],#4 @ *K256++
- add r10,r10,r2 @ h+=X[i]
- str r2,[sp,#9*4]
- eor r2,r8,r9
- add r10,r10,r0,ror#6 @ h+=Sigma1(e)
- and r2,r2,r7
- add r10,r10,r3 @ h+=K256[i]
- eor r2,r2,r9 @ Ch(e,f,g)
- eor r0,r11,r11,ror#11
- add r10,r10,r2 @ h+=Ch(e,f,g)
-#if 9==31
- and r3,r3,#0xff
- cmp r3,#0xf2 @ done?
-#endif
-#if 9<15
-# if __ARM_ARCH__>=7
- ldr r2,[r1],#4 @ prefetch
-# else
- ldrb r2,[r1,#3]
-# endif
- eor r3,r11,r4 @ a^b, b^c in next round
-#else
- ldr r2,[sp,#11*4] @ from future BODY_16_xx
- eor r3,r11,r4 @ a^b, b^c in next round
- ldr r1,[sp,#8*4] @ from future BODY_16_xx
-#endif
- eor r0,r0,r11,ror#20 @ Sigma0(a)
- and r12,r12,r3 @ (b^c)&=(a^b)
- add r6,r6,r10 @ d+=h
- eor r12,r12,r4 @ Maj(a,b,c)
- add r10,r10,r0,ror#2 @ h+=Sigma0(a)
- @ add r10,r10,r12 @ h+=Maj(a,b,c)
-#if __ARM_ARCH__>=7
- @ ldr r2,[r1],#4 @ 10
-# if 10==15
- str r1,[sp,#17*4] @ make room for r1
-# endif
- eor r0,r6,r6,ror#5
- add r10,r10,r12 @ h+=Maj(a,b,c) from the past
- eor r0,r0,r6,ror#19 @ Sigma1(e)
-# ifndef __ARMEB__
- rev r2,r2
-# endif
-#else
- @ ldrb r2,[r1,#3] @ 10
- add r10,r10,r12 @ h+=Maj(a,b,c) from the past
- ldrb r12,[r1,#2]
- ldrb r0,[r1,#1]
- orr r2,r2,r12,lsl#8
- ldrb r12,[r1],#4
- orr r2,r2,r0,lsl#16
-# if 10==15
- str r1,[sp,#17*4] @ make room for r1
-# endif
- eor r0,r6,r6,ror#5
- orr r2,r2,r12,lsl#24
- eor r0,r0,r6,ror#19 @ Sigma1(e)
-#endif
- ldr r12,[r14],#4 @ *K256++
- add r9,r9,r2 @ h+=X[i]
- str r2,[sp,#10*4]
- eor r2,r7,r8
- add r9,r9,r0,ror#6 @ h+=Sigma1(e)
- and r2,r2,r6
- add r9,r9,r12 @ h+=K256[i]
- eor r2,r2,r8 @ Ch(e,f,g)
- eor r0,r10,r10,ror#11
- add r9,r9,r2 @ h+=Ch(e,f,g)
-#if 10==31
- and r12,r12,#0xff
- cmp r12,#0xf2 @ done?
-#endif
-#if 10<15
-# if __ARM_ARCH__>=7
- ldr r2,[r1],#4 @ prefetch
-# else
- ldrb r2,[r1,#3]
-# endif
- eor r12,r10,r11 @ a^b, b^c in next round
-#else
- ldr r2,[sp,#12*4] @ from future BODY_16_xx
- eor r12,r10,r11 @ a^b, b^c in next round
- ldr r1,[sp,#9*4] @ from future BODY_16_xx
-#endif
- eor r0,r0,r10,ror#20 @ Sigma0(a)
- and r3,r3,r12 @ (b^c)&=(a^b)
- add r5,r5,r9 @ d+=h
- eor r3,r3,r11 @ Maj(a,b,c)
- add r9,r9,r0,ror#2 @ h+=Sigma0(a)
- @ add r9,r9,r3 @ h+=Maj(a,b,c)
-#if __ARM_ARCH__>=7
- @ ldr r2,[r1],#4 @ 11
-# if 11==15
- str r1,[sp,#17*4] @ make room for r1
-# endif
- eor r0,r5,r5,ror#5
- add r9,r9,r3 @ h+=Maj(a,b,c) from the past
- eor r0,r0,r5,ror#19 @ Sigma1(e)
-# ifndef __ARMEB__
- rev r2,r2
-# endif
-#else
- @ ldrb r2,[r1,#3] @ 11
- add r9,r9,r3 @ h+=Maj(a,b,c) from the past
- ldrb r3,[r1,#2]
- ldrb r0,[r1,#1]
- orr r2,r2,r3,lsl#8
- ldrb r3,[r1],#4
- orr r2,r2,r0,lsl#16
-# if 11==15
- str r1,[sp,#17*4] @ make room for r1
-# endif
- eor r0,r5,r5,ror#5
- orr r2,r2,r3,lsl#24
- eor r0,r0,r5,ror#19 @ Sigma1(e)
-#endif
- ldr r3,[r14],#4 @ *K256++
- add r8,r8,r2 @ h+=X[i]
- str r2,[sp,#11*4]
- eor r2,r6,r7
- add r8,r8,r0,ror#6 @ h+=Sigma1(e)
- and r2,r2,r5
- add r8,r8,r3 @ h+=K256[i]
- eor r2,r2,r7 @ Ch(e,f,g)
- eor r0,r9,r9,ror#11
- add r8,r8,r2 @ h+=Ch(e,f,g)
-#if 11==31
- and r3,r3,#0xff
- cmp r3,#0xf2 @ done?
-#endif
-#if 11<15
-# if __ARM_ARCH__>=7
- ldr r2,[r1],#4 @ prefetch
-# else
- ldrb r2,[r1,#3]
-# endif
- eor r3,r9,r10 @ a^b, b^c in next round
-#else
- ldr r2,[sp,#13*4] @ from future BODY_16_xx
- eor r3,r9,r10 @ a^b, b^c in next round
- ldr r1,[sp,#10*4] @ from future BODY_16_xx
-#endif
- eor r0,r0,r9,ror#20 @ Sigma0(a)
- and r12,r12,r3 @ (b^c)&=(a^b)
- add r4,r4,r8 @ d+=h
- eor r12,r12,r10 @ Maj(a,b,c)
- add r8,r8,r0,ror#2 @ h+=Sigma0(a)
- @ add r8,r8,r12 @ h+=Maj(a,b,c)
-#if __ARM_ARCH__>=7
- @ ldr r2,[r1],#4 @ 12
-# if 12==15
- str r1,[sp,#17*4] @ make room for r1
-# endif
- eor r0,r4,r4,ror#5
- add r8,r8,r12 @ h+=Maj(a,b,c) from the past
- eor r0,r0,r4,ror#19 @ Sigma1(e)
-# ifndef __ARMEB__
- rev r2,r2
-# endif
-#else
- @ ldrb r2,[r1,#3] @ 12
- add r8,r8,r12 @ h+=Maj(a,b,c) from the past
- ldrb r12,[r1,#2]
- ldrb r0,[r1,#1]
- orr r2,r2,r12,lsl#8
- ldrb r12,[r1],#4
- orr r2,r2,r0,lsl#16
-# if 12==15
- str r1,[sp,#17*4] @ make room for r1
-# endif
- eor r0,r4,r4,ror#5
- orr r2,r2,r12,lsl#24
- eor r0,r0,r4,ror#19 @ Sigma1(e)
-#endif
- ldr r12,[r14],#4 @ *K256++
- add r7,r7,r2 @ h+=X[i]
- str r2,[sp,#12*4]
- eor r2,r5,r6
- add r7,r7,r0,ror#6 @ h+=Sigma1(e)
- and r2,r2,r4
- add r7,r7,r12 @ h+=K256[i]
- eor r2,r2,r6 @ Ch(e,f,g)
- eor r0,r8,r8,ror#11
- add r7,r7,r2 @ h+=Ch(e,f,g)
-#if 12==31
- and r12,r12,#0xff
- cmp r12,#0xf2 @ done?
-#endif
-#if 12<15
-# if __ARM_ARCH__>=7
- ldr r2,[r1],#4 @ prefetch
-# else
- ldrb r2,[r1,#3]
-# endif
- eor r12,r8,r9 @ a^b, b^c in next round
-#else
- ldr r2,[sp,#14*4] @ from future BODY_16_xx
- eor r12,r8,r9 @ a^b, b^c in next round
- ldr r1,[sp,#11*4] @ from future BODY_16_xx
-#endif
- eor r0,r0,r8,ror#20 @ Sigma0(a)
- and r3,r3,r12 @ (b^c)&=(a^b)
- add r11,r11,r7 @ d+=h
- eor r3,r3,r9 @ Maj(a,b,c)
- add r7,r7,r0,ror#2 @ h+=Sigma0(a)
- @ add r7,r7,r3 @ h+=Maj(a,b,c)
-#if __ARM_ARCH__>=7
- @ ldr r2,[r1],#4 @ 13
-# if 13==15
- str r1,[sp,#17*4] @ make room for r1
-# endif
- eor r0,r11,r11,ror#5
- add r7,r7,r3 @ h+=Maj(a,b,c) from the past
- eor r0,r0,r11,ror#19 @ Sigma1(e)
-# ifndef __ARMEB__
- rev r2,r2
-# endif
-#else
- @ ldrb r2,[r1,#3] @ 13
- add r7,r7,r3 @ h+=Maj(a,b,c) from the past
- ldrb r3,[r1,#2]
- ldrb r0,[r1,#1]
- orr r2,r2,r3,lsl#8
- ldrb r3,[r1],#4
- orr r2,r2,r0,lsl#16
-# if 13==15
- str r1,[sp,#17*4] @ make room for r1
-# endif
- eor r0,r11,r11,ror#5
- orr r2,r2,r3,lsl#24
- eor r0,r0,r11,ror#19 @ Sigma1(e)
-#endif
- ldr r3,[r14],#4 @ *K256++
- add r6,r6,r2 @ h+=X[i]
- str r2,[sp,#13*4]
- eor r2,r4,r5
- add r6,r6,r0,ror#6 @ h+=Sigma1(e)
- and r2,r2,r11
- add r6,r6,r3 @ h+=K256[i]
- eor r2,r2,r5 @ Ch(e,f,g)
- eor r0,r7,r7,ror#11
- add r6,r6,r2 @ h+=Ch(e,f,g)
-#if 13==31
- and r3,r3,#0xff
- cmp r3,#0xf2 @ done?
-#endif
-#if 13<15
-# if __ARM_ARCH__>=7
- ldr r2,[r1],#4 @ prefetch
-# else
- ldrb r2,[r1,#3]
-# endif
- eor r3,r7,r8 @ a^b, b^c in next round
-#else
- ldr r2,[sp,#15*4] @ from future BODY_16_xx
- eor r3,r7,r8 @ a^b, b^c in next round
- ldr r1,[sp,#12*4] @ from future BODY_16_xx
-#endif
- eor r0,r0,r7,ror#20 @ Sigma0(a)
- and r12,r12,r3 @ (b^c)&=(a^b)
- add r10,r10,r6 @ d+=h
- eor r12,r12,r8 @ Maj(a,b,c)
- add r6,r6,r0,ror#2 @ h+=Sigma0(a)
- @ add r6,r6,r12 @ h+=Maj(a,b,c)
-#if __ARM_ARCH__>=7
- @ ldr r2,[r1],#4 @ 14
-# if 14==15
- str r1,[sp,#17*4] @ make room for r1
-# endif
- eor r0,r10,r10,ror#5
- add r6,r6,r12 @ h+=Maj(a,b,c) from the past
- eor r0,r0,r10,ror#19 @ Sigma1(e)
-# ifndef __ARMEB__
- rev r2,r2
-# endif
-#else
- @ ldrb r2,[r1,#3] @ 14
- add r6,r6,r12 @ h+=Maj(a,b,c) from the past
- ldrb r12,[r1,#2]
- ldrb r0,[r1,#1]
- orr r2,r2,r12,lsl#8
- ldrb r12,[r1],#4
- orr r2,r2,r0,lsl#16
-# if 14==15
- str r1,[sp,#17*4] @ make room for r1
-# endif
- eor r0,r10,r10,ror#5
- orr r2,r2,r12,lsl#24
- eor r0,r0,r10,ror#19 @ Sigma1(e)
-#endif
- ldr r12,[r14],#4 @ *K256++
- add r5,r5,r2 @ h+=X[i]
- str r2,[sp,#14*4]
- eor r2,r11,r4
- add r5,r5,r0,ror#6 @ h+=Sigma1(e)
- and r2,r2,r10
- add r5,r5,r12 @ h+=K256[i]
- eor r2,r2,r4 @ Ch(e,f,g)
- eor r0,r6,r6,ror#11
- add r5,r5,r2 @ h+=Ch(e,f,g)
-#if 14==31
- and r12,r12,#0xff
- cmp r12,#0xf2 @ done?
-#endif
-#if 14<15
-# if __ARM_ARCH__>=7
- ldr r2,[r1],#4 @ prefetch
-# else
- ldrb r2,[r1,#3]
-# endif
- eor r12,r6,r7 @ a^b, b^c in next round
-#else
- ldr r2,[sp,#0*4] @ from future BODY_16_xx
- eor r12,r6,r7 @ a^b, b^c in next round
- ldr r1,[sp,#13*4] @ from future BODY_16_xx
-#endif
- eor r0,r0,r6,ror#20 @ Sigma0(a)
- and r3,r3,r12 @ (b^c)&=(a^b)
- add r9,r9,r5 @ d+=h
- eor r3,r3,r7 @ Maj(a,b,c)
- add r5,r5,r0,ror#2 @ h+=Sigma0(a)
- @ add r5,r5,r3 @ h+=Maj(a,b,c)
-#if __ARM_ARCH__>=7
- @ ldr r2,[r1],#4 @ 15
-# if 15==15
- str r1,[sp,#17*4] @ make room for r1
-# endif
- eor r0,r9,r9,ror#5
- add r5,r5,r3 @ h+=Maj(a,b,c) from the past
- eor r0,r0,r9,ror#19 @ Sigma1(e)
-# ifndef __ARMEB__
- rev r2,r2
-# endif
-#else
- @ ldrb r2,[r1,#3] @ 15
- add r5,r5,r3 @ h+=Maj(a,b,c) from the past
- ldrb r3,[r1,#2]
- ldrb r0,[r1,#1]
- orr r2,r2,r3,lsl#8
- ldrb r3,[r1],#4
- orr r2,r2,r0,lsl#16
-# if 15==15
- str r1,[sp,#17*4] @ make room for r1
-# endif
- eor r0,r9,r9,ror#5
- orr r2,r2,r3,lsl#24
- eor r0,r0,r9,ror#19 @ Sigma1(e)
-#endif
- ldr r3,[r14],#4 @ *K256++
- add r4,r4,r2 @ h+=X[i]
- str r2,[sp,#15*4]
- eor r2,r10,r11
- add r4,r4,r0,ror#6 @ h+=Sigma1(e)
- and r2,r2,r9
- add r4,r4,r3 @ h+=K256[i]
- eor r2,r2,r11 @ Ch(e,f,g)
- eor r0,r5,r5,ror#11
- add r4,r4,r2 @ h+=Ch(e,f,g)
-#if 15==31
- and r3,r3,#0xff
- cmp r3,#0xf2 @ done?
-#endif
-#if 15<15
-# if __ARM_ARCH__>=7
- ldr r2,[r1],#4 @ prefetch
-# else
- ldrb r2,[r1,#3]
-# endif
- eor r3,r5,r6 @ a^b, b^c in next round
-#else
- ldr r2,[sp,#1*4] @ from future BODY_16_xx
- eor r3,r5,r6 @ a^b, b^c in next round
- ldr r1,[sp,#14*4] @ from future BODY_16_xx
-#endif
- eor r0,r0,r5,ror#20 @ Sigma0(a)
- and r12,r12,r3 @ (b^c)&=(a^b)
- add r8,r8,r4 @ d+=h
- eor r12,r12,r6 @ Maj(a,b,c)
- add r4,r4,r0,ror#2 @ h+=Sigma0(a)
- @ add r4,r4,r12 @ h+=Maj(a,b,c)
-.Lrounds_16_xx:
- @ ldr r2,[sp,#1*4] @ 16
- @ ldr r1,[sp,#14*4]
- mov r0,r2,ror#7
- add r4,r4,r12 @ h+=Maj(a,b,c) from the past
- mov r12,r1,ror#17
- eor r0,r0,r2,ror#18
- eor r12,r12,r1,ror#19
- eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
- ldr r2,[sp,#0*4]
- eor r12,r12,r1,lsr#10 @ sigma1(X[i+14])
- ldr r1,[sp,#9*4]
-
- add r12,r12,r0
- eor r0,r8,r8,ror#5 @ from BODY_00_15
- add r2,r2,r12
- eor r0,r0,r8,ror#19 @ Sigma1(e)
- add r2,r2,r1 @ X[i]
- ldr r12,[r14],#4 @ *K256++
- add r11,r11,r2 @ h+=X[i]
- str r2,[sp,#0*4]
- eor r2,r9,r10
- add r11,r11,r0,ror#6 @ h+=Sigma1(e)
- and r2,r2,r8
- add r11,r11,r12 @ h+=K256[i]
- eor r2,r2,r10 @ Ch(e,f,g)
- eor r0,r4,r4,ror#11
- add r11,r11,r2 @ h+=Ch(e,f,g)
-#if 16==31
- and r12,r12,#0xff
- cmp r12,#0xf2 @ done?
-#endif
-#if 16<15
-# if __ARM_ARCH__>=7
- ldr r2,[r1],#4 @ prefetch
-# else
- ldrb r2,[r1,#3]
-# endif
- eor r12,r4,r5 @ a^b, b^c in next round
-#else
- ldr r2,[sp,#2*4] @ from future BODY_16_xx
- eor r12,r4,r5 @ a^b, b^c in next round
- ldr r1,[sp,#15*4] @ from future BODY_16_xx
-#endif
- eor r0,r0,r4,ror#20 @ Sigma0(a)
- and r3,r3,r12 @ (b^c)&=(a^b)
- add r7,r7,r11 @ d+=h
- eor r3,r3,r5 @ Maj(a,b,c)
- add r11,r11,r0,ror#2 @ h+=Sigma0(a)
- @ add r11,r11,r3 @ h+=Maj(a,b,c)
- @ ldr r2,[sp,#2*4] @ 17
- @ ldr r1,[sp,#15*4]
- mov r0,r2,ror#7
- add r11,r11,r3 @ h+=Maj(a,b,c) from the past
- mov r3,r1,ror#17
- eor r0,r0,r2,ror#18
- eor r3,r3,r1,ror#19
- eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
- ldr r2,[sp,#1*4]
- eor r3,r3,r1,lsr#10 @ sigma1(X[i+14])
- ldr r1,[sp,#10*4]
-
- add r3,r3,r0
- eor r0,r7,r7,ror#5 @ from BODY_00_15
- add r2,r2,r3
- eor r0,r0,r7,ror#19 @ Sigma1(e)
- add r2,r2,r1 @ X[i]
- ldr r3,[r14],#4 @ *K256++
- add r10,r10,r2 @ h+=X[i]
- str r2,[sp,#1*4]
- eor r2,r8,r9
- add r10,r10,r0,ror#6 @ h+=Sigma1(e)
- and r2,r2,r7
- add r10,r10,r3 @ h+=K256[i]
- eor r2,r2,r9 @ Ch(e,f,g)
- eor r0,r11,r11,ror#11
- add r10,r10,r2 @ h+=Ch(e,f,g)
-#if 17==31
- and r3,r3,#0xff
- cmp r3,#0xf2 @ done?
-#endif
-#if 17<15
-# if __ARM_ARCH__>=7
- ldr r2,[r1],#4 @ prefetch
-# else
- ldrb r2,[r1,#3]
-# endif
- eor r3,r11,r4 @ a^b, b^c in next round
-#else
- ldr r2,[sp,#3*4] @ from future BODY_16_xx
- eor r3,r11,r4 @ a^b, b^c in next round
- ldr r1,[sp,#0*4] @ from future BODY_16_xx
-#endif
- eor r0,r0,r11,ror#20 @ Sigma0(a)
- and r12,r12,r3 @ (b^c)&=(a^b)
- add r6,r6,r10 @ d+=h
- eor r12,r12,r4 @ Maj(a,b,c)
- add r10,r10,r0,ror#2 @ h+=Sigma0(a)
- @ add r10,r10,r12 @ h+=Maj(a,b,c)
- @ ldr r2,[sp,#3*4] @ 18
- @ ldr r1,[sp,#0*4]
- mov r0,r2,ror#7
- add r10,r10,r12 @ h+=Maj(a,b,c) from the past
- mov r12,r1,ror#17
- eor r0,r0,r2,ror#18
- eor r12,r12,r1,ror#19
- eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
- ldr r2,[sp,#2*4]
- eor r12,r12,r1,lsr#10 @ sigma1(X[i+14])
- ldr r1,[sp,#11*4]
-
- add r12,r12,r0
- eor r0,r6,r6,ror#5 @ from BODY_00_15
- add r2,r2,r12
- eor r0,r0,r6,ror#19 @ Sigma1(e)
- add r2,r2,r1 @ X[i]
- ldr r12,[r14],#4 @ *K256++
- add r9,r9,r2 @ h+=X[i]
- str r2,[sp,#2*4]
- eor r2,r7,r8
- add r9,r9,r0,ror#6 @ h+=Sigma1(e)
- and r2,r2,r6
- add r9,r9,r12 @ h+=K256[i]
- eor r2,r2,r8 @ Ch(e,f,g)
- eor r0,r10,r10,ror#11
- add r9,r9,r2 @ h+=Ch(e,f,g)
-#if 18==31
- and r12,r12,#0xff
- cmp r12,#0xf2 @ done?
-#endif
-#if 18<15
-# if __ARM_ARCH__>=7
- ldr r2,[r1],#4 @ prefetch
-# else
- ldrb r2,[r1,#3]
-# endif
- eor r12,r10,r11 @ a^b, b^c in next round
-#else
- ldr r2,[sp,#4*4] @ from future BODY_16_xx
- eor r12,r10,r11 @ a^b, b^c in next round
- ldr r1,[sp,#1*4] @ from future BODY_16_xx
-#endif
- eor r0,r0,r10,ror#20 @ Sigma0(a)
- and r3,r3,r12 @ (b^c)&=(a^b)
- add r5,r5,r9 @ d+=h
- eor r3,r3,r11 @ Maj(a,b,c)
- add r9,r9,r0,ror#2 @ h+=Sigma0(a)
- @ add r9,r9,r3 @ h+=Maj(a,b,c)
- @ ldr r2,[sp,#4*4] @ 19
- @ ldr r1,[sp,#1*4]
- mov r0,r2,ror#7
- add r9,r9,r3 @ h+=Maj(a,b,c) from the past
- mov r3,r1,ror#17
- eor r0,r0,r2,ror#18
- eor r3,r3,r1,ror#19
- eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
- ldr r2,[sp,#3*4]
- eor r3,r3,r1,lsr#10 @ sigma1(X[i+14])
- ldr r1,[sp,#12*4]
-
- add r3,r3,r0
- eor r0,r5,r5,ror#5 @ from BODY_00_15
- add r2,r2,r3
- eor r0,r0,r5,ror#19 @ Sigma1(e)
- add r2,r2,r1 @ X[i]
- ldr r3,[r14],#4 @ *K256++
- add r8,r8,r2 @ h+=X[i]
- str r2,[sp,#3*4]
- eor r2,r6,r7
- add r8,r8,r0,ror#6 @ h+=Sigma1(e)
- and r2,r2,r5
- add r8,r8,r3 @ h+=K256[i]
- eor r2,r2,r7 @ Ch(e,f,g)
- eor r0,r9,r9,ror#11
- add r8,r8,r2 @ h+=Ch(e,f,g)
-#if 19==31
- and r3,r3,#0xff
- cmp r3,#0xf2 @ done?
-#endif
-#if 19<15
-# if __ARM_ARCH__>=7
- ldr r2,[r1],#4 @ prefetch
-# else
- ldrb r2,[r1,#3]
-# endif
- eor r3,r9,r10 @ a^b, b^c in next round
-#else
- ldr r2,[sp,#5*4] @ from future BODY_16_xx
- eor r3,r9,r10 @ a^b, b^c in next round
- ldr r1,[sp,#2*4] @ from future BODY_16_xx
-#endif
- eor r0,r0,r9,ror#20 @ Sigma0(a)
- and r12,r12,r3 @ (b^c)&=(a^b)
- add r4,r4,r8 @ d+=h
- eor r12,r12,r10 @ Maj(a,b,c)
- add r8,r8,r0,ror#2 @ h+=Sigma0(a)
- @ add r8,r8,r12 @ h+=Maj(a,b,c)
- @ ldr r2,[sp,#5*4] @ 20
- @ ldr r1,[sp,#2*4]
- mov r0,r2,ror#7
- add r8,r8,r12 @ h+=Maj(a,b,c) from the past
- mov r12,r1,ror#17
- eor r0,r0,r2,ror#18
- eor r12,r12,r1,ror#19
- eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
- ldr r2,[sp,#4*4]
- eor r12,r12,r1,lsr#10 @ sigma1(X[i+14])
- ldr r1,[sp,#13*4]
-
- add r12,r12,r0
- eor r0,r4,r4,ror#5 @ from BODY_00_15
- add r2,r2,r12
- eor r0,r0,r4,ror#19 @ Sigma1(e)
- add r2,r2,r1 @ X[i]
- ldr r12,[r14],#4 @ *K256++
- add r7,r7,r2 @ h+=X[i]
- str r2,[sp,#4*4]
- eor r2,r5,r6
- add r7,r7,r0,ror#6 @ h+=Sigma1(e)
- and r2,r2,r4
- add r7,r7,r12 @ h+=K256[i]
- eor r2,r2,r6 @ Ch(e,f,g)
- eor r0,r8,r8,ror#11
- add r7,r7,r2 @ h+=Ch(e,f,g)
-#if 20==31
- and r12,r12,#0xff
- cmp r12,#0xf2 @ done?
-#endif
-#if 20<15
-# if __ARM_ARCH__>=7
- ldr r2,[r1],#4 @ prefetch
-# else
- ldrb r2,[r1,#3]
-# endif
- eor r12,r8,r9 @ a^b, b^c in next round
-#else
- ldr r2,[sp,#6*4] @ from future BODY_16_xx
- eor r12,r8,r9 @ a^b, b^c in next round
- ldr r1,[sp,#3*4] @ from future BODY_16_xx
-#endif
- eor r0,r0,r8,ror#20 @ Sigma0(a)
- and r3,r3,r12 @ (b^c)&=(a^b)
- add r11,r11,r7 @ d+=h
- eor r3,r3,r9 @ Maj(a,b,c)
- add r7,r7,r0,ror#2 @ h+=Sigma0(a)
- @ add r7,r7,r3 @ h+=Maj(a,b,c)
- @ ldr r2,[sp,#6*4] @ 21
- @ ldr r1,[sp,#3*4]
- mov r0,r2,ror#7
- add r7,r7,r3 @ h+=Maj(a,b,c) from the past
- mov r3,r1,ror#17
- eor r0,r0,r2,ror#18
- eor r3,r3,r1,ror#19
- eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
- ldr r2,[sp,#5*4]
- eor r3,r3,r1,lsr#10 @ sigma1(X[i+14])
- ldr r1,[sp,#14*4]
-
- add r3,r3,r0
- eor r0,r11,r11,ror#5 @ from BODY_00_15
- add r2,r2,r3
- eor r0,r0,r11,ror#19 @ Sigma1(e)
- add r2,r2,r1 @ X[i]
- ldr r3,[r14],#4 @ *K256++
- add r6,r6,r2 @ h+=X[i]
- str r2,[sp,#5*4]
- eor r2,r4,r5
- add r6,r6,r0,ror#6 @ h+=Sigma1(e)
- and r2,r2,r11
- add r6,r6,r3 @ h+=K256[i]
- eor r2,r2,r5 @ Ch(e,f,g)
- eor r0,r7,r7,ror#11
- add r6,r6,r2 @ h+=Ch(e,f,g)
-#if 21==31
- and r3,r3,#0xff
- cmp r3,#0xf2 @ done?
-#endif
-#if 21<15
-# if __ARM_ARCH__>=7
- ldr r2,[r1],#4 @ prefetch
-# else
- ldrb r2,[r1,#3]
-# endif
- eor r3,r7,r8 @ a^b, b^c in next round
-#else
- ldr r2,[sp,#7*4] @ from future BODY_16_xx
- eor r3,r7,r8 @ a^b, b^c in next round
- ldr r1,[sp,#4*4] @ from future BODY_16_xx
-#endif
- eor r0,r0,r7,ror#20 @ Sigma0(a)
- and r12,r12,r3 @ (b^c)&=(a^b)
- add r10,r10,r6 @ d+=h
- eor r12,r12,r8 @ Maj(a,b,c)
- add r6,r6,r0,ror#2 @ h+=Sigma0(a)
- @ add r6,r6,r12 @ h+=Maj(a,b,c)
- @ ldr r2,[sp,#7*4] @ 22
- @ ldr r1,[sp,#4*4]
- mov r0,r2,ror#7
- add r6,r6,r12 @ h+=Maj(a,b,c) from the past
- mov r12,r1,ror#17
- eor r0,r0,r2,ror#18
- eor r12,r12,r1,ror#19
- eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
- ldr r2,[sp,#6*4]
- eor r12,r12,r1,lsr#10 @ sigma1(X[i+14])
- ldr r1,[sp,#15*4]
-
- add r12,r12,r0
- eor r0,r10,r10,ror#5 @ from BODY_00_15
- add r2,r2,r12
- eor r0,r0,r10,ror#19 @ Sigma1(e)
- add r2,r2,r1 @ X[i]
- ldr r12,[r14],#4 @ *K256++
- add r5,r5,r2 @ h+=X[i]
- str r2,[sp,#6*4]
- eor r2,r11,r4
- add r5,r5,r0,ror#6 @ h+=Sigma1(e)
- and r2,r2,r10
- add r5,r5,r12 @ h+=K256[i]
- eor r2,r2,r4 @ Ch(e,f,g)
- eor r0,r6,r6,ror#11
- add r5,r5,r2 @ h+=Ch(e,f,g)
-#if 22==31
- and r12,r12,#0xff
- cmp r12,#0xf2 @ done?
-#endif
-#if 22<15
-# if __ARM_ARCH__>=7
- ldr r2,[r1],#4 @ prefetch
-# else
- ldrb r2,[r1,#3]
-# endif
- eor r12,r6,r7 @ a^b, b^c in next round
-#else
- ldr r2,[sp,#8*4] @ from future BODY_16_xx
- eor r12,r6,r7 @ a^b, b^c in next round
- ldr r1,[sp,#5*4] @ from future BODY_16_xx
-#endif
- eor r0,r0,r6,ror#20 @ Sigma0(a)
- and r3,r3,r12 @ (b^c)&=(a^b)
- add r9,r9,r5 @ d+=h
- eor r3,r3,r7 @ Maj(a,b,c)
- add r5,r5,r0,ror#2 @ h+=Sigma0(a)
- @ add r5,r5,r3 @ h+=Maj(a,b,c)
- @ ldr r2,[sp,#8*4] @ 23
- @ ldr r1,[sp,#5*4]
- mov r0,r2,ror#7
- add r5,r5,r3 @ h+=Maj(a,b,c) from the past
- mov r3,r1,ror#17
- eor r0,r0,r2,ror#18
- eor r3,r3,r1,ror#19
- eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
- ldr r2,[sp,#7*4]
- eor r3,r3,r1,lsr#10 @ sigma1(X[i+14])
- ldr r1,[sp,#0*4]
-
- add r3,r3,r0
- eor r0,r9,r9,ror#5 @ from BODY_00_15
- add r2,r2,r3
- eor r0,r0,r9,ror#19 @ Sigma1(e)
- add r2,r2,r1 @ X[i]
- ldr r3,[r14],#4 @ *K256++
- add r4,r4,r2 @ h+=X[i]
- str r2,[sp,#7*4]
- eor r2,r10,r11
- add r4,r4,r0,ror#6 @ h+=Sigma1(e)
- and r2,r2,r9
- add r4,r4,r3 @ h+=K256[i]
- eor r2,r2,r11 @ Ch(e,f,g)
- eor r0,r5,r5,ror#11
- add r4,r4,r2 @ h+=Ch(e,f,g)
-#if 23==31
- and r3,r3,#0xff
- cmp r3,#0xf2 @ done?
-#endif
-#if 23<15
-# if __ARM_ARCH__>=7
- ldr r2,[r1],#4 @ prefetch
-# else
- ldrb r2,[r1,#3]
-# endif
- eor r3,r5,r6 @ a^b, b^c in next round
-#else
- ldr r2,[sp,#9*4] @ from future BODY_16_xx
- eor r3,r5,r6 @ a^b, b^c in next round
- ldr r1,[sp,#6*4] @ from future BODY_16_xx
-#endif
- eor r0,r0,r5,ror#20 @ Sigma0(a)
- and r12,r12,r3 @ (b^c)&=(a^b)
- add r8,r8,r4 @ d+=h
- eor r12,r12,r6 @ Maj(a,b,c)
- add r4,r4,r0,ror#2 @ h+=Sigma0(a)
- @ add r4,r4,r12 @ h+=Maj(a,b,c)
- @ ldr r2,[sp,#9*4] @ 24
- @ ldr r1,[sp,#6*4]
- mov r0,r2,ror#7
- add r4,r4,r12 @ h+=Maj(a,b,c) from the past
- mov r12,r1,ror#17
- eor r0,r0,r2,ror#18
- eor r12,r12,r1,ror#19
- eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
- ldr r2,[sp,#8*4]
- eor r12,r12,r1,lsr#10 @ sigma1(X[i+14])
- ldr r1,[sp,#1*4]
-
- add r12,r12,r0
- eor r0,r8,r8,ror#5 @ from BODY_00_15
- add r2,r2,r12
- eor r0,r0,r8,ror#19 @ Sigma1(e)
- add r2,r2,r1 @ X[i]
- ldr r12,[r14],#4 @ *K256++
- add r11,r11,r2 @ h+=X[i]
- str r2,[sp,#8*4]
- eor r2,r9,r10
- add r11,r11,r0,ror#6 @ h+=Sigma1(e)
- and r2,r2,r8
- add r11,r11,r12 @ h+=K256[i]
- eor r2,r2,r10 @ Ch(e,f,g)
- eor r0,r4,r4,ror#11
- add r11,r11,r2 @ h+=Ch(e,f,g)
-#if 24==31
- and r12,r12,#0xff
- cmp r12,#0xf2 @ done?
-#endif
-#if 24<15
-# if __ARM_ARCH__>=7
- ldr r2,[r1],#4 @ prefetch
-# else
- ldrb r2,[r1,#3]
-# endif
- eor r12,r4,r5 @ a^b, b^c in next round
-#else
- ldr r2,[sp,#10*4] @ from future BODY_16_xx
- eor r12,r4,r5 @ a^b, b^c in next round
- ldr r1,[sp,#7*4] @ from future BODY_16_xx
-#endif
- eor r0,r0,r4,ror#20 @ Sigma0(a)
- and r3,r3,r12 @ (b^c)&=(a^b)
- add r7,r7,r11 @ d+=h
- eor r3,r3,r5 @ Maj(a,b,c)
- add r11,r11,r0,ror#2 @ h+=Sigma0(a)
- @ add r11,r11,r3 @ h+=Maj(a,b,c)
- @ ldr r2,[sp,#10*4] @ 25
- @ ldr r1,[sp,#7*4]
- mov r0,r2,ror#7
- add r11,r11,r3 @ h+=Maj(a,b,c) from the past
- mov r3,r1,ror#17
- eor r0,r0,r2,ror#18
- eor r3,r3,r1,ror#19
- eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
- ldr r2,[sp,#9*4]
- eor r3,r3,r1,lsr#10 @ sigma1(X[i+14])
- ldr r1,[sp,#2*4]
-
- add r3,r3,r0
- eor r0,r7,r7,ror#5 @ from BODY_00_15
- add r2,r2,r3
- eor r0,r0,r7,ror#19 @ Sigma1(e)
- add r2,r2,r1 @ X[i]
- ldr r3,[r14],#4 @ *K256++
- add r10,r10,r2 @ h+=X[i]
- str r2,[sp,#9*4]
- eor r2,r8,r9
- add r10,r10,r0,ror#6 @ h+=Sigma1(e)
- and r2,r2,r7
- add r10,r10,r3 @ h+=K256[i]
- eor r2,r2,r9 @ Ch(e,f,g)
- eor r0,r11,r11,ror#11
- add r10,r10,r2 @ h+=Ch(e,f,g)
-#if 25==31
- and r3,r3,#0xff
- cmp r3,#0xf2 @ done?
-#endif
-#if 25<15
-# if __ARM_ARCH__>=7
- ldr r2,[r1],#4 @ prefetch
-# else
- ldrb r2,[r1,#3]
-# endif
- eor r3,r11,r4 @ a^b, b^c in next round
-#else
- ldr r2,[sp,#11*4] @ from future BODY_16_xx
- eor r3,r11,r4 @ a^b, b^c in next round
- ldr r1,[sp,#8*4] @ from future BODY_16_xx
-#endif
- eor r0,r0,r11,ror#20 @ Sigma0(a)
- and r12,r12,r3 @ (b^c)&=(a^b)
- add r6,r6,r10 @ d+=h
- eor r12,r12,r4 @ Maj(a,b,c)
- add r10,r10,r0,ror#2 @ h+=Sigma0(a)
- @ add r10,r10,r12 @ h+=Maj(a,b,c)
- @ ldr r2,[sp,#11*4] @ 26
- @ ldr r1,[sp,#8*4]
- mov r0,r2,ror#7
- add r10,r10,r12 @ h+=Maj(a,b,c) from the past
- mov r12,r1,ror#17
- eor r0,r0,r2,ror#18
- eor r12,r12,r1,ror#19
- eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
- ldr r2,[sp,#10*4]
- eor r12,r12,r1,lsr#10 @ sigma1(X[i+14])
- ldr r1,[sp,#3*4]
-
- add r12,r12,r0
- eor r0,r6,r6,ror#5 @ from BODY_00_15
- add r2,r2,r12
- eor r0,r0,r6,ror#19 @ Sigma1(e)
- add r2,r2,r1 @ X[i]
- ldr r12,[r14],#4 @ *K256++
- add r9,r9,r2 @ h+=X[i]
- str r2,[sp,#10*4]
- eor r2,r7,r8
- add r9,r9,r0,ror#6 @ h+=Sigma1(e)
- and r2,r2,r6
- add r9,r9,r12 @ h+=K256[i]
- eor r2,r2,r8 @ Ch(e,f,g)
- eor r0,r10,r10,ror#11
- add r9,r9,r2 @ h+=Ch(e,f,g)
-#if 26==31
- and r12,r12,#0xff
- cmp r12,#0xf2 @ done?
-#endif
-#if 26<15
-# if __ARM_ARCH__>=7
- ldr r2,[r1],#4 @ prefetch
-# else
- ldrb r2,[r1,#3]
-# endif
- eor r12,r10,r11 @ a^b, b^c in next round
-#else
- ldr r2,[sp,#12*4] @ from future BODY_16_xx
- eor r12,r10,r11 @ a^b, b^c in next round
- ldr r1,[sp,#9*4] @ from future BODY_16_xx
-#endif
- eor r0,r0,r10,ror#20 @ Sigma0(a)
- and r3,r3,r12 @ (b^c)&=(a^b)
- add r5,r5,r9 @ d+=h
- eor r3,r3,r11 @ Maj(a,b,c)
- add r9,r9,r0,ror#2 @ h+=Sigma0(a)
- @ add r9,r9,r3 @ h+=Maj(a,b,c)
- @ ldr r2,[sp,#12*4] @ 27
- @ ldr r1,[sp,#9*4]
- mov r0,r2,ror#7
- add r9,r9,r3 @ h+=Maj(a,b,c) from the past
- mov r3,r1,ror#17
- eor r0,r0,r2,ror#18
- eor r3,r3,r1,ror#19
- eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
- ldr r2,[sp,#11*4]
- eor r3,r3,r1,lsr#10 @ sigma1(X[i+14])
- ldr r1,[sp,#4*4]
-
- add r3,r3,r0
- eor r0,r5,r5,ror#5 @ from BODY_00_15
- add r2,r2,r3
- eor r0,r0,r5,ror#19 @ Sigma1(e)
- add r2,r2,r1 @ X[i]
- ldr r3,[r14],#4 @ *K256++
- add r8,r8,r2 @ h+=X[i]
- str r2,[sp,#11*4]
- eor r2,r6,r7
- add r8,r8,r0,ror#6 @ h+=Sigma1(e)
- and r2,r2,r5
- add r8,r8,r3 @ h+=K256[i]
- eor r2,r2,r7 @ Ch(e,f,g)
- eor r0,r9,r9,ror#11
- add r8,r8,r2 @ h+=Ch(e,f,g)
-#if 27==31
- and r3,r3,#0xff
- cmp r3,#0xf2 @ done?
-#endif
-#if 27<15
-# if __ARM_ARCH__>=7
- ldr r2,[r1],#4 @ prefetch
-# else
- ldrb r2,[r1,#3]
-# endif
- eor r3,r9,r10 @ a^b, b^c in next round
-#else
- ldr r2,[sp,#13*4] @ from future BODY_16_xx
- eor r3,r9,r10 @ a^b, b^c in next round
- ldr r1,[sp,#10*4] @ from future BODY_16_xx
-#endif
- eor r0,r0,r9,ror#20 @ Sigma0(a)
- and r12,r12,r3 @ (b^c)&=(a^b)
- add r4,r4,r8 @ d+=h
- eor r12,r12,r10 @ Maj(a,b,c)
- add r8,r8,r0,ror#2 @ h+=Sigma0(a)
- @ add r8,r8,r12 @ h+=Maj(a,b,c)
- @ ldr r2,[sp,#13*4] @ 28
- @ ldr r1,[sp,#10*4]
- mov r0,r2,ror#7
- add r8,r8,r12 @ h+=Maj(a,b,c) from the past
- mov r12,r1,ror#17
- eor r0,r0,r2,ror#18
- eor r12,r12,r1,ror#19
- eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
- ldr r2,[sp,#12*4]
- eor r12,r12,r1,lsr#10 @ sigma1(X[i+14])
- ldr r1,[sp,#5*4]
-
- add r12,r12,r0
- eor r0,r4,r4,ror#5 @ from BODY_00_15
- add r2,r2,r12
- eor r0,r0,r4,ror#19 @ Sigma1(e)
- add r2,r2,r1 @ X[i]
- ldr r12,[r14],#4 @ *K256++
- add r7,r7,r2 @ h+=X[i]
- str r2,[sp,#12*4]
- eor r2,r5,r6
- add r7,r7,r0,ror#6 @ h+=Sigma1(e)
- and r2,r2,r4
- add r7,r7,r12 @ h+=K256[i]
- eor r2,r2,r6 @ Ch(e,f,g)
- eor r0,r8,r8,ror#11
- add r7,r7,r2 @ h+=Ch(e,f,g)
-#if 28==31
- and r12,r12,#0xff
- cmp r12,#0xf2 @ done?
-#endif
-#if 28<15
-# if __ARM_ARCH__>=7
- ldr r2,[r1],#4 @ prefetch
-# else
- ldrb r2,[r1,#3]
-# endif
- eor r12,r8,r9 @ a^b, b^c in next round
-#else
- ldr r2,[sp,#14*4] @ from future BODY_16_xx
- eor r12,r8,r9 @ a^b, b^c in next round
- ldr r1,[sp,#11*4] @ from future BODY_16_xx
-#endif
- eor r0,r0,r8,ror#20 @ Sigma0(a)
- and r3,r3,r12 @ (b^c)&=(a^b)
- add r11,r11,r7 @ d+=h
- eor r3,r3,r9 @ Maj(a,b,c)
- add r7,r7,r0,ror#2 @ h+=Sigma0(a)
- @ add r7,r7,r3 @ h+=Maj(a,b,c)
- @ ldr r2,[sp,#14*4] @ 29
- @ ldr r1,[sp,#11*4]
- mov r0,r2,ror#7
- add r7,r7,r3 @ h+=Maj(a,b,c) from the past
- mov r3,r1,ror#17
- eor r0,r0,r2,ror#18
- eor r3,r3,r1,ror#19
- eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
- ldr r2,[sp,#13*4]
- eor r3,r3,r1,lsr#10 @ sigma1(X[i+14])
- ldr r1,[sp,#6*4]
-
- add r3,r3,r0
- eor r0,r11,r11,ror#5 @ from BODY_00_15
- add r2,r2,r3
- eor r0,r0,r11,ror#19 @ Sigma1(e)
- add r2,r2,r1 @ X[i]
- ldr r3,[r14],#4 @ *K256++
- add r6,r6,r2 @ h+=X[i]
- str r2,[sp,#13*4]
- eor r2,r4,r5
- add r6,r6,r0,ror#6 @ h+=Sigma1(e)
- and r2,r2,r11
- add r6,r6,r3 @ h+=K256[i]
- eor r2,r2,r5 @ Ch(e,f,g)
- eor r0,r7,r7,ror#11
- add r6,r6,r2 @ h+=Ch(e,f,g)
-#if 29==31
- and r3,r3,#0xff
- cmp r3,#0xf2 @ done?
-#endif
-#if 29<15
-# if __ARM_ARCH__>=7
- ldr r2,[r1],#4 @ prefetch
-# else
- ldrb r2,[r1,#3]
-# endif
- eor r3,r7,r8 @ a^b, b^c in next round
-#else
- ldr r2,[sp,#15*4] @ from future BODY_16_xx
- eor r3,r7,r8 @ a^b, b^c in next round
- ldr r1,[sp,#12*4] @ from future BODY_16_xx
-#endif
- eor r0,r0,r7,ror#20 @ Sigma0(a)
- and r12,r12,r3 @ (b^c)&=(a^b)
- add r10,r10,r6 @ d+=h
- eor r12,r12,r8 @ Maj(a,b,c)
- add r6,r6,r0,ror#2 @ h+=Sigma0(a)
- @ add r6,r6,r12 @ h+=Maj(a,b,c)
- @ ldr r2,[sp,#15*4] @ 30
- @ ldr r1,[sp,#12*4]
- mov r0,r2,ror#7
- add r6,r6,r12 @ h+=Maj(a,b,c) from the past
- mov r12,r1,ror#17
- eor r0,r0,r2,ror#18
- eor r12,r12,r1,ror#19
- eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
- ldr r2,[sp,#14*4]
- eor r12,r12,r1,lsr#10 @ sigma1(X[i+14])
- ldr r1,[sp,#7*4]
-
- add r12,r12,r0
- eor r0,r10,r10,ror#5 @ from BODY_00_15
- add r2,r2,r12
- eor r0,r0,r10,ror#19 @ Sigma1(e)
- add r2,r2,r1 @ X[i]
- ldr r12,[r14],#4 @ *K256++
- add r5,r5,r2 @ h+=X[i]
- str r2,[sp,#14*4]
- eor r2,r11,r4
- add r5,r5,r0,ror#6 @ h+=Sigma1(e)
- and r2,r2,r10
- add r5,r5,r12 @ h+=K256[i]
- eor r2,r2,r4 @ Ch(e,f,g)
- eor r0,r6,r6,ror#11
- add r5,r5,r2 @ h+=Ch(e,f,g)
-#if 30==31
- and r12,r12,#0xff
- cmp r12,#0xf2 @ done?
-#endif
-#if 30<15
-# if __ARM_ARCH__>=7
- ldr r2,[r1],#4 @ prefetch
-# else
- ldrb r2,[r1,#3]
-# endif
- eor r12,r6,r7 @ a^b, b^c in next round
-#else
- ldr r2,[sp,#0*4] @ from future BODY_16_xx
- eor r12,r6,r7 @ a^b, b^c in next round
- ldr r1,[sp,#13*4] @ from future BODY_16_xx
-#endif
- eor r0,r0,r6,ror#20 @ Sigma0(a)
- and r3,r3,r12 @ (b^c)&=(a^b)
- add r9,r9,r5 @ d+=h
- eor r3,r3,r7 @ Maj(a,b,c)
- add r5,r5,r0,ror#2 @ h+=Sigma0(a)
- @ add r5,r5,r3 @ h+=Maj(a,b,c)
- @ ldr r2,[sp,#0*4] @ 31
- @ ldr r1,[sp,#13*4]
- mov r0,r2,ror#7
- add r5,r5,r3 @ h+=Maj(a,b,c) from the past
- mov r3,r1,ror#17
- eor r0,r0,r2,ror#18
- eor r3,r3,r1,ror#19
- eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
- ldr r2,[sp,#15*4]
- eor r3,r3,r1,lsr#10 @ sigma1(X[i+14])
- ldr r1,[sp,#8*4]
-
- add r3,r3,r0
- eor r0,r9,r9,ror#5 @ from BODY_00_15
- add r2,r2,r3
- eor r0,r0,r9,ror#19 @ Sigma1(e)
- add r2,r2,r1 @ X[i]
- ldr r3,[r14],#4 @ *K256++
- add r4,r4,r2 @ h+=X[i]
- str r2,[sp,#15*4]
- eor r2,r10,r11
- add r4,r4,r0,ror#6 @ h+=Sigma1(e)
- and r2,r2,r9
- add r4,r4,r3 @ h+=K256[i]
- eor r2,r2,r11 @ Ch(e,f,g)
- eor r0,r5,r5,ror#11
- add r4,r4,r2 @ h+=Ch(e,f,g)
-#if 31==31
- and r3,r3,#0xff
- cmp r3,#0xf2 @ done?
-#endif
-#if 31<15
-# if __ARM_ARCH__>=7
- ldr r2,[r1],#4 @ prefetch
-# else
- ldrb r2,[r1,#3]
-# endif
- eor r3,r5,r6 @ a^b, b^c in next round
-#else
- ldr r2,[sp,#1*4] @ from future BODY_16_xx
- eor r3,r5,r6 @ a^b, b^c in next round
- ldr r1,[sp,#14*4] @ from future BODY_16_xx
-#endif
- eor r0,r0,r5,ror#20 @ Sigma0(a)
- and r12,r12,r3 @ (b^c)&=(a^b)
- add r8,r8,r4 @ d+=h
- eor r12,r12,r6 @ Maj(a,b,c)
- add r4,r4,r0,ror#2 @ h+=Sigma0(a)
- @ add r4,r4,r12 @ h+=Maj(a,b,c)
-#if __ARM_ARCH__>=7
- ite eq @ Thumb2 thing, sanity check in ARM
-#endif
- ldreq r3,[sp,#16*4] @ pull ctx
- bne .Lrounds_16_xx
-
- add r4,r4,r12 @ h+=Maj(a,b,c) from the past
- ldr r0,[r3,#0]
- ldr r2,[r3,#4]
- ldr r12,[r3,#8]
- add r4,r4,r0
- ldr r0,[r3,#12]
- add r5,r5,r2
- ldr r2,[r3,#16]
- add r6,r6,r12
- ldr r12,[r3,#20]
- add r7,r7,r0
- ldr r0,[r3,#24]
- add r8,r8,r2
- ldr r2,[r3,#28]
- add r9,r9,r12
- ldr r1,[sp,#17*4] @ pull inp
- ldr r12,[sp,#18*4] @ pull inp+len
- add r10,r10,r0
- add r11,r11,r2
- stmia r3,{r4,r5,r6,r7,r8,r9,r10,r11}
- cmp r1,r12
- sub r14,r14,#256 @ rewind Ktbl
- bne .Loop
-
- add sp,sp,#19*4 @ destroy frame
-#if __ARM_ARCH__>=5
- ldmia sp!,{r4-r11,pc}
-#else
- ldmia sp!,{r4-r11,lr}
- tst lr,#1
- moveq pc,lr @ be binary compatible with V4, yet
- .word 0xe12fff1e @ interoperable with Thumb ISA:-)
-#endif
-.size sha256_block_data_order,.-sha256_block_data_order
-#if __ARM_MAX_ARCH__>=7
-.arch armv7-a
-.fpu neon
-
-.global sha256_block_data_order_neon
-.type sha256_block_data_order_neon,%function
-.align 4
-sha256_block_data_order_neon:
-.LNEON:
- stmdb sp!,{r4-r12,lr}
-
- sub r11,sp,#16*4+16
- adr r14,.Lsha256_block_data_order
- sub r14,r14,#.Lsha256_block_data_order-K256
- bic r11,r11,#15 @ align for 128-bit stores
- mov r12,sp
- mov sp,r11 @ alloca
- add r2,r1,r2,lsl#6 @ len to point at the end of inp
-
- vld1.8 {q0},[r1]!
- vld1.8 {q1},[r1]!
- vld1.8 {q2},[r1]!
- vld1.8 {q3},[r1]!
- vld1.32 {q8},[r14,:128]!
- vld1.32 {q9},[r14,:128]!
- vld1.32 {q10},[r14,:128]!
- vld1.32 {q11},[r14,:128]!
- vrev32.8 q0,q0 @ yes, even on
- str r0,[sp,#64]
- vrev32.8 q1,q1 @ big-endian
- str r1,[sp,#68]
- mov r1,sp
- vrev32.8 q2,q2
- str r2,[sp,#72]
- vrev32.8 q3,q3
- str r12,[sp,#76] @ save original sp
- vadd.i32 q8,q8,q0
- vadd.i32 q9,q9,q1
- vst1.32 {q8},[r1,:128]!
- vadd.i32 q10,q10,q2
- vst1.32 {q9},[r1,:128]!
- vadd.i32 q11,q11,q3
- vst1.32 {q10},[r1,:128]!
- vst1.32 {q11},[r1,:128]!
-
- ldmia r0,{r4-r11}
- sub r1,r1,#64
- ldr r2,[sp,#0]
- eor r12,r12,r12
- eor r3,r5,r6
- b .L_00_48
-
-.align 4
-.L_00_48:
- vext.8 q8,q0,q1,#4
- add r11,r11,r2
- eor r2,r9,r10
- eor r0,r8,r8,ror#5
- vext.8 q9,q2,q3,#4
- add r4,r4,r12
- and r2,r2,r8
- eor r12,r0,r8,ror#19
- vshr.u32 q10,q8,#7
- eor r0,r4,r4,ror#11
- eor r2,r2,r10
- vadd.i32 q0,q0,q9
- add r11,r11,r12,ror#6
- eor r12,r4,r5
- vshr.u32 q9,q8,#3
- eor r0,r0,r4,ror#20
- add r11,r11,r2
- vsli.32 q10,q8,#25
- ldr r2,[sp,#4]
- and r3,r3,r12
- vshr.u32 q11,q8,#18
- add r7,r7,r11
- add r11,r11,r0,ror#2
- eor r3,r3,r5
- veor q9,q9,q10
- add r10,r10,r2
- vsli.32 q11,q8,#14
- eor r2,r8,r9
- eor r0,r7,r7,ror#5
- vshr.u32 d24,d7,#17
- add r11,r11,r3
- and r2,r2,r7
- veor q9,q9,q11
- eor r3,r0,r7,ror#19
- eor r0,r11,r11,ror#11
- vsli.32 d24,d7,#15
- eor r2,r2,r9
- add r10,r10,r3,ror#6
- vshr.u32 d25,d7,#10
- eor r3,r11,r4
- eor r0,r0,r11,ror#20
- vadd.i32 q0,q0,q9
- add r10,r10,r2
- ldr r2,[sp,#8]
- veor d25,d25,d24
- and r12,r12,r3
- add r6,r6,r10
- vshr.u32 d24,d7,#19
- add r10,r10,r0,ror#2
- eor r12,r12,r4
- vsli.32 d24,d7,#13
- add r9,r9,r2
- eor r2,r7,r8
- veor d25,d25,d24
- eor r0,r6,r6,ror#5
- add r10,r10,r12
- vadd.i32 d0,d0,d25
- and r2,r2,r6
- eor r12,r0,r6,ror#19
- vshr.u32 d24,d0,#17
- eor r0,r10,r10,ror#11
- eor r2,r2,r8
- vsli.32 d24,d0,#15
- add r9,r9,r12,ror#6
- eor r12,r10,r11
- vshr.u32 d25,d0,#10
- eor r0,r0,r10,ror#20
- add r9,r9,r2
- veor d25,d25,d24
- ldr r2,[sp,#12]
- and r3,r3,r12
- vshr.u32 d24,d0,#19
- add r5,r5,r9
- add r9,r9,r0,ror#2
- eor r3,r3,r11
- vld1.32 {q8},[r14,:128]!
- add r8,r8,r2
- vsli.32 d24,d0,#13
- eor r2,r6,r7
- eor r0,r5,r5,ror#5
- veor d25,d25,d24
- add r9,r9,r3
- and r2,r2,r5
- vadd.i32 d1,d1,d25
- eor r3,r0,r5,ror#19
- eor r0,r9,r9,ror#11
- vadd.i32 q8,q8,q0
- eor r2,r2,r7
- add r8,r8,r3,ror#6
- eor r3,r9,r10
- eor r0,r0,r9,ror#20
- add r8,r8,r2
- ldr r2,[sp,#16]
- and r12,r12,r3
- add r4,r4,r8
- vst1.32 {q8},[r1,:128]!
- add r8,r8,r0,ror#2
- eor r12,r12,r10
- vext.8 q8,q1,q2,#4
- add r7,r7,r2
- eor r2,r5,r6
- eor r0,r4,r4,ror#5
- vext.8 q9,q3,q0,#4
- add r8,r8,r12
- and r2,r2,r4
- eor r12,r0,r4,ror#19
- vshr.u32 q10,q8,#7
- eor r0,r8,r8,ror#11
- eor r2,r2,r6
- vadd.i32 q1,q1,q9
- add r7,r7,r12,ror#6
- eor r12,r8,r9
- vshr.u32 q9,q8,#3
- eor r0,r0,r8,ror#20
- add r7,r7,r2
- vsli.32 q10,q8,#25
- ldr r2,[sp,#20]
- and r3,r3,r12
- vshr.u32 q11,q8,#18
- add r11,r11,r7
- add r7,r7,r0,ror#2
- eor r3,r3,r9
- veor q9,q9,q10
- add r6,r6,r2
- vsli.32 q11,q8,#14
- eor r2,r4,r5
- eor r0,r11,r11,ror#5
- vshr.u32 d24,d1,#17
- add r7,r7,r3
- and r2,r2,r11
- veor q9,q9,q11
- eor r3,r0,r11,ror#19
- eor r0,r7,r7,ror#11
- vsli.32 d24,d1,#15
- eor r2,r2,r5
- add r6,r6,r3,ror#6
- vshr.u32 d25,d1,#10
- eor r3,r7,r8
- eor r0,r0,r7,ror#20
- vadd.i32 q1,q1,q9
- add r6,r6,r2
- ldr r2,[sp,#24]
- veor d25,d25,d24
- and r12,r12,r3
- add r10,r10,r6
- vshr.u32 d24,d1,#19
- add r6,r6,r0,ror#2
- eor r12,r12,r8
- vsli.32 d24,d1,#13
- add r5,r5,r2
- eor r2,r11,r4
- veor d25,d25,d24
- eor r0,r10,r10,ror#5
- add r6,r6,r12
- vadd.i32 d2,d2,d25
- and r2,r2,r10
- eor r12,r0,r10,ror#19
- vshr.u32 d24,d2,#17
- eor r0,r6,r6,ror#11
- eor r2,r2,r4
- vsli.32 d24,d2,#15
- add r5,r5,r12,ror#6
- eor r12,r6,r7
- vshr.u32 d25,d2,#10
- eor r0,r0,r6,ror#20
- add r5,r5,r2
- veor d25,d25,d24
- ldr r2,[sp,#28]
- and r3,r3,r12
- vshr.u32 d24,d2,#19
- add r9,r9,r5
- add r5,r5,r0,ror#2
- eor r3,r3,r7
- vld1.32 {q8},[r14,:128]!
- add r4,r4,r2
- vsli.32 d24,d2,#13
- eor r2,r10,r11
- eor r0,r9,r9,ror#5
- veor d25,d25,d24
- add r5,r5,r3
- and r2,r2,r9
- vadd.i32 d3,d3,d25
- eor r3,r0,r9,ror#19
- eor r0,r5,r5,ror#11
- vadd.i32 q8,q8,q1
- eor r2,r2,r11
- add r4,r4,r3,ror#6
- eor r3,r5,r6
- eor r0,r0,r5,ror#20
- add r4,r4,r2
- ldr r2,[sp,#32]
- and r12,r12,r3
- add r8,r8,r4
- vst1.32 {q8},[r1,:128]!
- add r4,r4,r0,ror#2
- eor r12,r12,r6
- vext.8 q8,q2,q3,#4
- add r11,r11,r2
- eor r2,r9,r10
- eor r0,r8,r8,ror#5
- vext.8 q9,q0,q1,#4
- add r4,r4,r12
- and r2,r2,r8
- eor r12,r0,r8,ror#19
- vshr.u32 q10,q8,#7
- eor r0,r4,r4,ror#11
- eor r2,r2,r10
- vadd.i32 q2,q2,q9
- add r11,r11,r12,ror#6
- eor r12,r4,r5
- vshr.u32 q9,q8,#3
- eor r0,r0,r4,ror#20
- add r11,r11,r2
- vsli.32 q10,q8,#25
- ldr r2,[sp,#36]
- and r3,r3,r12
- vshr.u32 q11,q8,#18
- add r7,r7,r11
- add r11,r11,r0,ror#2
- eor r3,r3,r5
- veor q9,q9,q10
- add r10,r10,r2
- vsli.32 q11,q8,#14
- eor r2,r8,r9
- eor r0,r7,r7,ror#5
- vshr.u32 d24,d3,#17
- add r11,r11,r3
- and r2,r2,r7
- veor q9,q9,q11
- eor r3,r0,r7,ror#19
- eor r0,r11,r11,ror#11
- vsli.32 d24,d3,#15
- eor r2,r2,r9
- add r10,r10,r3,ror#6
- vshr.u32 d25,d3,#10
- eor r3,r11,r4
- eor r0,r0,r11,ror#20
- vadd.i32 q2,q2,q9
- add r10,r10,r2
- ldr r2,[sp,#40]
- veor d25,d25,d24
- and r12,r12,r3
- add r6,r6,r10
- vshr.u32 d24,d3,#19
- add r10,r10,r0,ror#2
- eor r12,r12,r4
- vsli.32 d24,d3,#13
- add r9,r9,r2
- eor r2,r7,r8
- veor d25,d25,d24
- eor r0,r6,r6,ror#5
- add r10,r10,r12
- vadd.i32 d4,d4,d25
- and r2,r2,r6
- eor r12,r0,r6,ror#19
- vshr.u32 d24,d4,#17
- eor r0,r10,r10,ror#11
- eor r2,r2,r8
- vsli.32 d24,d4,#15
- add r9,r9,r12,ror#6
- eor r12,r10,r11
- vshr.u32 d25,d4,#10
- eor r0,r0,r10,ror#20
- add r9,r9,r2
- veor d25,d25,d24
- ldr r2,[sp,#44]
- and r3,r3,r12
- vshr.u32 d24,d4,#19
- add r5,r5,r9
- add r9,r9,r0,ror#2
- eor r3,r3,r11
- vld1.32 {q8},[r14,:128]!
- add r8,r8,r2
- vsli.32 d24,d4,#13
- eor r2,r6,r7
- eor r0,r5,r5,ror#5
- veor d25,d25,d24
- add r9,r9,r3
- and r2,r2,r5
- vadd.i32 d5,d5,d25
- eor r3,r0,r5,ror#19
- eor r0,r9,r9,ror#11
- vadd.i32 q8,q8,q2
- eor r2,r2,r7
- add r8,r8,r3,ror#6
- eor r3,r9,r10
- eor r0,r0,r9,ror#20
- add r8,r8,r2
- ldr r2,[sp,#48]
- and r12,r12,r3
- add r4,r4,r8
- vst1.32 {q8},[r1,:128]!
- add r8,r8,r0,ror#2
- eor r12,r12,r10
- vext.8 q8,q3,q0,#4
- add r7,r7,r2
- eor r2,r5,r6
- eor r0,r4,r4,ror#5
- vext.8 q9,q1,q2,#4
- add r8,r8,r12
- and r2,r2,r4
- eor r12,r0,r4,ror#19
- vshr.u32 q10,q8,#7
- eor r0,r8,r8,ror#11
- eor r2,r2,r6
- vadd.i32 q3,q3,q9
- add r7,r7,r12,ror#6
- eor r12,r8,r9
- vshr.u32 q9,q8,#3
- eor r0,r0,r8,ror#20
- add r7,r7,r2
- vsli.32 q10,q8,#25
- ldr r2,[sp,#52]
- and r3,r3,r12
- vshr.u32 q11,q8,#18
- add r11,r11,r7
- add r7,r7,r0,ror#2
- eor r3,r3,r9
- veor q9,q9,q10
- add r6,r6,r2
- vsli.32 q11,q8,#14
- eor r2,r4,r5
- eor r0,r11,r11,ror#5
- vshr.u32 d24,d5,#17
- add r7,r7,r3
- and r2,r2,r11
- veor q9,q9,q11
- eor r3,r0,r11,ror#19
- eor r0,r7,r7,ror#11
- vsli.32 d24,d5,#15
- eor r2,r2,r5
- add r6,r6,r3,ror#6
- vshr.u32 d25,d5,#10
- eor r3,r7,r8
- eor r0,r0,r7,ror#20
- vadd.i32 q3,q3,q9
- add r6,r6,r2
- ldr r2,[sp,#56]
- veor d25,d25,d24
- and r12,r12,r3
- add r10,r10,r6
- vshr.u32 d24,d5,#19
- add r6,r6,r0,ror#2
- eor r12,r12,r8
- vsli.32 d24,d5,#13
- add r5,r5,r2
- eor r2,r11,r4
- veor d25,d25,d24
- eor r0,r10,r10,ror#5
- add r6,r6,r12
- vadd.i32 d6,d6,d25
- and r2,r2,r10
- eor r12,r0,r10,ror#19
- vshr.u32 d24,d6,#17
- eor r0,r6,r6,ror#11
- eor r2,r2,r4
- vsli.32 d24,d6,#15
- add r5,r5,r12,ror#6
- eor r12,r6,r7
- vshr.u32 d25,d6,#10
- eor r0,r0,r6,ror#20
- add r5,r5,r2
- veor d25,d25,d24
- ldr r2,[sp,#60]
- and r3,r3,r12
- vshr.u32 d24,d6,#19
- add r9,r9,r5
- add r5,r5,r0,ror#2
- eor r3,r3,r7
- vld1.32 {q8},[r14,:128]!
- add r4,r4,r2
- vsli.32 d24,d6,#13
- eor r2,r10,r11
- eor r0,r9,r9,ror#5
- veor d25,d25,d24
- add r5,r5,r3
- and r2,r2,r9
- vadd.i32 d7,d7,d25
- eor r3,r0,r9,ror#19
- eor r0,r5,r5,ror#11
- vadd.i32 q8,q8,q3
- eor r2,r2,r11
- add r4,r4,r3,ror#6
- eor r3,r5,r6
- eor r0,r0,r5,ror#20
- add r4,r4,r2
- ldr r2,[r14]
- and r12,r12,r3
- add r8,r8,r4
- vst1.32 {q8},[r1,:128]!
- add r4,r4,r0,ror#2
- eor r12,r12,r6
- teq r2,#0 @ check for K256 terminator
- ldr r2,[sp,#0]
- sub r1,r1,#64
- bne .L_00_48
-
- ldr r1,[sp,#68]
- ldr r0,[sp,#72]
- sub r14,r14,#256 @ rewind r14
- teq r1,r0
- it eq
- subeq r1,r1,#64 @ avoid SEGV
- vld1.8 {q0},[r1]! @ load next input block
- vld1.8 {q1},[r1]!
- vld1.8 {q2},[r1]!
- vld1.8 {q3},[r1]!
- it ne
- strne r1,[sp,#68]
- mov r1,sp
- add r11,r11,r2
- eor r2,r9,r10
- eor r0,r8,r8,ror#5
- add r4,r4,r12
- vld1.32 {q8},[r14,:128]!
- and r2,r2,r8
- eor r12,r0,r8,ror#19
- eor r0,r4,r4,ror#11
- eor r2,r2,r10
- vrev32.8 q0,q0
- add r11,r11,r12,ror#6
- eor r12,r4,r5
- eor r0,r0,r4,ror#20
- add r11,r11,r2
- vadd.i32 q8,q8,q0
- ldr r2,[sp,#4]
- and r3,r3,r12
- add r7,r7,r11
- add r11,r11,r0,ror#2
- eor r3,r3,r5
- add r10,r10,r2
- eor r2,r8,r9
- eor r0,r7,r7,ror#5
- add r11,r11,r3
- and r2,r2,r7
- eor r3,r0,r7,ror#19
- eor r0,r11,r11,ror#11
- eor r2,r2,r9
- add r10,r10,r3,ror#6
- eor r3,r11,r4
- eor r0,r0,r11,ror#20
- add r10,r10,r2
- ldr r2,[sp,#8]
- and r12,r12,r3
- add r6,r6,r10
- add r10,r10,r0,ror#2
- eor r12,r12,r4
- add r9,r9,r2
- eor r2,r7,r8
- eor r0,r6,r6,ror#5
- add r10,r10,r12
- and r2,r2,r6
- eor r12,r0,r6,ror#19
- eor r0,r10,r10,ror#11
- eor r2,r2,r8
- add r9,r9,r12,ror#6
- eor r12,r10,r11
- eor r0,r0,r10,ror#20
- add r9,r9,r2
- ldr r2,[sp,#12]
- and r3,r3,r12
- add r5,r5,r9
- add r9,r9,r0,ror#2
- eor r3,r3,r11
- add r8,r8,r2
- eor r2,r6,r7
- eor r0,r5,r5,ror#5
- add r9,r9,r3
- and r2,r2,r5
- eor r3,r0,r5,ror#19
- eor r0,r9,r9,ror#11
- eor r2,r2,r7
- add r8,r8,r3,ror#6
- eor r3,r9,r10
- eor r0,r0,r9,ror#20
- add r8,r8,r2
- ldr r2,[sp,#16]
- and r12,r12,r3
- add r4,r4,r8
- add r8,r8,r0,ror#2
- eor r12,r12,r10
- vst1.32 {q8},[r1,:128]!
- add r7,r7,r2
- eor r2,r5,r6
- eor r0,r4,r4,ror#5
- add r8,r8,r12
- vld1.32 {q8},[r14,:128]!
- and r2,r2,r4
- eor r12,r0,r4,ror#19
- eor r0,r8,r8,ror#11
- eor r2,r2,r6
- vrev32.8 q1,q1
- add r7,r7,r12,ror#6
- eor r12,r8,r9
- eor r0,r0,r8,ror#20
- add r7,r7,r2
- vadd.i32 q8,q8,q1
- ldr r2,[sp,#20]
- and r3,r3,r12
- add r11,r11,r7
- add r7,r7,r0,ror#2
- eor r3,r3,r9
- add r6,r6,r2
- eor r2,r4,r5
- eor r0,r11,r11,ror#5
- add r7,r7,r3
- and r2,r2,r11
- eor r3,r0,r11,ror#19
- eor r0,r7,r7,ror#11
- eor r2,r2,r5
- add r6,r6,r3,ror#6
- eor r3,r7,r8
- eor r0,r0,r7,ror#20
- add r6,r6,r2
- ldr r2,[sp,#24]
- and r12,r12,r3
- add r10,r10,r6
- add r6,r6,r0,ror#2
- eor r12,r12,r8
- add r5,r5,r2
- eor r2,r11,r4
- eor r0,r10,r10,ror#5
- add r6,r6,r12
- and r2,r2,r10
- eor r12,r0,r10,ror#19
- eor r0,r6,r6,ror#11
- eor r2,r2,r4
- add r5,r5,r12,ror#6
- eor r12,r6,r7
- eor r0,r0,r6,ror#20
- add r5,r5,r2
- ldr r2,[sp,#28]
- and r3,r3,r12
- add r9,r9,r5
- add r5,r5,r0,ror#2
- eor r3,r3,r7
- add r4,r4,r2
- eor r2,r10,r11
- eor r0,r9,r9,ror#5
- add r5,r5,r3
- and r2,r2,r9
- eor r3,r0,r9,ror#19
- eor r0,r5,r5,ror#11
- eor r2,r2,r11
- add r4,r4,r3,ror#6
- eor r3,r5,r6
- eor r0,r0,r5,ror#20
- add r4,r4,r2
- ldr r2,[sp,#32]
- and r12,r12,r3
- add r8,r8,r4
- add r4,r4,r0,ror#2
- eor r12,r12,r6
- vst1.32 {q8},[r1,:128]!
- add r11,r11,r2
- eor r2,r9,r10
- eor r0,r8,r8,ror#5
- add r4,r4,r12
- vld1.32 {q8},[r14,:128]!
- and r2,r2,r8
- eor r12,r0,r8,ror#19
- eor r0,r4,r4,ror#11
- eor r2,r2,r10
- vrev32.8 q2,q2
- add r11,r11,r12,ror#6
- eor r12,r4,r5
- eor r0,r0,r4,ror#20
- add r11,r11,r2
- vadd.i32 q8,q8,q2
- ldr r2,[sp,#36]
- and r3,r3,r12
- add r7,r7,r11
- add r11,r11,r0,ror#2
- eor r3,r3,r5
- add r10,r10,r2
- eor r2,r8,r9
- eor r0,r7,r7,ror#5
- add r11,r11,r3
- and r2,r2,r7
- eor r3,r0,r7,ror#19
- eor r0,r11,r11,ror#11
- eor r2,r2,r9
- add r10,r10,r3,ror#6
- eor r3,r11,r4
- eor r0,r0,r11,ror#20
- add r10,r10,r2
- ldr r2,[sp,#40]
- and r12,r12,r3
- add r6,r6,r10
- add r10,r10,r0,ror#2
- eor r12,r12,r4
- add r9,r9,r2
- eor r2,r7,r8
- eor r0,r6,r6,ror#5
- add r10,r10,r12
- and r2,r2,r6
- eor r12,r0,r6,ror#19
- eor r0,r10,r10,ror#11
- eor r2,r2,r8
- add r9,r9,r12,ror#6
- eor r12,r10,r11
- eor r0,r0,r10,ror#20
- add r9,r9,r2
- ldr r2,[sp,#44]
- and r3,r3,r12
- add r5,r5,r9
- add r9,r9,r0,ror#2
- eor r3,r3,r11
- add r8,r8,r2
- eor r2,r6,r7
- eor r0,r5,r5,ror#5
- add r9,r9,r3
- and r2,r2,r5
- eor r3,r0,r5,ror#19
- eor r0,r9,r9,ror#11
- eor r2,r2,r7
- add r8,r8,r3,ror#6
- eor r3,r9,r10
- eor r0,r0,r9,ror#20
- add r8,r8,r2
- ldr r2,[sp,#48]
- and r12,r12,r3
- add r4,r4,r8
- add r8,r8,r0,ror#2
- eor r12,r12,r10
- vst1.32 {q8},[r1,:128]!
- add r7,r7,r2
- eor r2,r5,r6
- eor r0,r4,r4,ror#5
- add r8,r8,r12
- vld1.32 {q8},[r14,:128]!
- and r2,r2,r4
- eor r12,r0,r4,ror#19
- eor r0,r8,r8,ror#11
- eor r2,r2,r6
- vrev32.8 q3,q3
- add r7,r7,r12,ror#6
- eor r12,r8,r9
- eor r0,r0,r8,ror#20
- add r7,r7,r2
- vadd.i32 q8,q8,q3
- ldr r2,[sp,#52]
- and r3,r3,r12
- add r11,r11,r7
- add r7,r7,r0,ror#2
- eor r3,r3,r9
- add r6,r6,r2
- eor r2,r4,r5
- eor r0,r11,r11,ror#5
- add r7,r7,r3
- and r2,r2,r11
- eor r3,r0,r11,ror#19
- eor r0,r7,r7,ror#11
- eor r2,r2,r5
- add r6,r6,r3,ror#6
- eor r3,r7,r8
- eor r0,r0,r7,ror#20
- add r6,r6,r2
- ldr r2,[sp,#56]
- and r12,r12,r3
- add r10,r10,r6
- add r6,r6,r0,ror#2
- eor r12,r12,r8
- add r5,r5,r2
- eor r2,r11,r4
- eor r0,r10,r10,ror#5
- add r6,r6,r12
- and r2,r2,r10
- eor r12,r0,r10,ror#19
- eor r0,r6,r6,ror#11
- eor r2,r2,r4
- add r5,r5,r12,ror#6
- eor r12,r6,r7
- eor r0,r0,r6,ror#20
- add r5,r5,r2
- ldr r2,[sp,#60]
- and r3,r3,r12
- add r9,r9,r5
- add r5,r5,r0,ror#2
- eor r3,r3,r7
- add r4,r4,r2
- eor r2,r10,r11
- eor r0,r9,r9,ror#5
- add r5,r5,r3
- and r2,r2,r9
- eor r3,r0,r9,ror#19
- eor r0,r5,r5,ror#11
- eor r2,r2,r11
- add r4,r4,r3,ror#6
- eor r3,r5,r6
- eor r0,r0,r5,ror#20
- add r4,r4,r2
- ldr r2,[sp,#64]
- and r12,r12,r3
- add r8,r8,r4
- add r4,r4,r0,ror#2
- eor r12,r12,r6
- vst1.32 {q8},[r1,:128]!
- ldr r0,[r2,#0]
- add r4,r4,r12 @ h+=Maj(a,b,c) from the past
- ldr r12,[r2,#4]
- ldr r3,[r2,#8]
- ldr r1,[r2,#12]
- add r4,r4,r0 @ accumulate
- ldr r0,[r2,#16]
- add r5,r5,r12
- ldr r12,[r2,#20]
- add r6,r6,r3
- ldr r3,[r2,#24]
- add r7,r7,r1
- ldr r1,[r2,#28]
- add r8,r8,r0
- str r4,[r2],#4
- add r9,r9,r12
- str r5,[r2],#4
- add r10,r10,r3
- str r6,[r2],#4
- add r11,r11,r1
- str r7,[r2],#4
- stmia r2,{r8-r11}
-
- ittte ne
- movne r1,sp
- ldrne r2,[sp,#0]
- eorne r12,r12,r12
- ldreq sp,[sp,#76] @ restore original sp
- itt ne
- eorne r3,r5,r6
- bne .L_00_48
-
- ldmia sp!,{r4-r12,pc}
-.size sha256_block_data_order_neon,.-sha256_block_data_order_neon
-#endif
-#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
-
-# ifdef __thumb2__
-# define INST(a,b,c,d) .byte c,d|0xc,a,b
-# else
-# define INST(a,b,c,d) .byte a,b,c,d
-# endif
-
-.type sha256_block_data_order_armv8,%function
-.align 5
-sha256_block_data_order_armv8:
-.LARMv8:
- vld1.32 {q0,q1},[r0]
-# ifdef __thumb2__
- adr r3,.LARMv8
- sub r3,r3,#.LARMv8-K256
-# else
- adrl r3,K256
-# endif
- add r2,r1,r2,lsl#6 @ len to point at the end of inp
-
-.Loop_v8:
- vld1.8 {q8-q9},[r1]!
- vld1.8 {q10-q11},[r1]!
- vld1.32 {q12},[r3]!
- vrev32.8 q8,q8
- vrev32.8 q9,q9
- vrev32.8 q10,q10
- vrev32.8 q11,q11
- vmov q14,q0 @ offload
- vmov q15,q1
- teq r1,r2
- vld1.32 {q13},[r3]!
- vadd.i32 q12,q12,q8
- INST(0xe2,0x03,0xfa,0xf3) @ sha256su0 q8,q9
- vmov q2,q0
- INST(0x68,0x0c,0x02,0xf3) @ sha256h q0,q1,q12
- INST(0x68,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q12
- INST(0xe6,0x0c,0x64,0xf3) @ sha256su1 q8,q10,q11
- vld1.32 {q12},[r3]!
- vadd.i32 q13,q13,q9
- INST(0xe4,0x23,0xfa,0xf3) @ sha256su0 q9,q10
- vmov q2,q0
- INST(0x6a,0x0c,0x02,0xf3) @ sha256h q0,q1,q13
- INST(0x6a,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q13
- INST(0xe0,0x2c,0x66,0xf3) @ sha256su1 q9,q11,q8
- vld1.32 {q13},[r3]!
- vadd.i32 q12,q12,q10
- INST(0xe6,0x43,0xfa,0xf3) @ sha256su0 q10,q11
- vmov q2,q0
- INST(0x68,0x0c,0x02,0xf3) @ sha256h q0,q1,q12
- INST(0x68,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q12
- INST(0xe2,0x4c,0x60,0xf3) @ sha256su1 q10,q8,q9
- vld1.32 {q12},[r3]!
- vadd.i32 q13,q13,q11
- INST(0xe0,0x63,0xfa,0xf3) @ sha256su0 q11,q8
- vmov q2,q0
- INST(0x6a,0x0c,0x02,0xf3) @ sha256h q0,q1,q13
- INST(0x6a,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q13
- INST(0xe4,0x6c,0x62,0xf3) @ sha256su1 q11,q9,q10
- vld1.32 {q13},[r3]!
- vadd.i32 q12,q12,q8
- INST(0xe2,0x03,0xfa,0xf3) @ sha256su0 q8,q9
- vmov q2,q0
- INST(0x68,0x0c,0x02,0xf3) @ sha256h q0,q1,q12
- INST(0x68,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q12
- INST(0xe6,0x0c,0x64,0xf3) @ sha256su1 q8,q10,q11
- vld1.32 {q12},[r3]!
- vadd.i32 q13,q13,q9
- INST(0xe4,0x23,0xfa,0xf3) @ sha256su0 q9,q10
- vmov q2,q0
- INST(0x6a,0x0c,0x02,0xf3) @ sha256h q0,q1,q13
- INST(0x6a,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q13
- INST(0xe0,0x2c,0x66,0xf3) @ sha256su1 q9,q11,q8
- vld1.32 {q13},[r3]!
- vadd.i32 q12,q12,q10
- INST(0xe6,0x43,0xfa,0xf3) @ sha256su0 q10,q11
- vmov q2,q0
- INST(0x68,0x0c,0x02,0xf3) @ sha256h q0,q1,q12
- INST(0x68,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q12
- INST(0xe2,0x4c,0x60,0xf3) @ sha256su1 q10,q8,q9
- vld1.32 {q12},[r3]!
- vadd.i32 q13,q13,q11
- INST(0xe0,0x63,0xfa,0xf3) @ sha256su0 q11,q8
- vmov q2,q0
- INST(0x6a,0x0c,0x02,0xf3) @ sha256h q0,q1,q13
- INST(0x6a,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q13
- INST(0xe4,0x6c,0x62,0xf3) @ sha256su1 q11,q9,q10
- vld1.32 {q13},[r3]!
- vadd.i32 q12,q12,q8
- INST(0xe2,0x03,0xfa,0xf3) @ sha256su0 q8,q9
- vmov q2,q0
- INST(0x68,0x0c,0x02,0xf3) @ sha256h q0,q1,q12
- INST(0x68,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q12
- INST(0xe6,0x0c,0x64,0xf3) @ sha256su1 q8,q10,q11
- vld1.32 {q12},[r3]!
- vadd.i32 q13,q13,q9
- INST(0xe4,0x23,0xfa,0xf3) @ sha256su0 q9,q10
- vmov q2,q0
- INST(0x6a,0x0c,0x02,0xf3) @ sha256h q0,q1,q13
- INST(0x6a,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q13
- INST(0xe0,0x2c,0x66,0xf3) @ sha256su1 q9,q11,q8
- vld1.32 {q13},[r3]!
- vadd.i32 q12,q12,q10
- INST(0xe6,0x43,0xfa,0xf3) @ sha256su0 q10,q11
- vmov q2,q0
- INST(0x68,0x0c,0x02,0xf3) @ sha256h q0,q1,q12
- INST(0x68,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q12
- INST(0xe2,0x4c,0x60,0xf3) @ sha256su1 q10,q8,q9
- vld1.32 {q12},[r3]!
- vadd.i32 q13,q13,q11
- INST(0xe0,0x63,0xfa,0xf3) @ sha256su0 q11,q8
- vmov q2,q0
- INST(0x6a,0x0c,0x02,0xf3) @ sha256h q0,q1,q13
- INST(0x6a,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q13
- INST(0xe4,0x6c,0x62,0xf3) @ sha256su1 q11,q9,q10
- vld1.32 {q13},[r3]!
- vadd.i32 q12,q12,q8
- vmov q2,q0
- INST(0x68,0x0c,0x02,0xf3) @ sha256h q0,q1,q12
- INST(0x68,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q12
-
- vld1.32 {q12},[r3]!
- vadd.i32 q13,q13,q9
- vmov q2,q0
- INST(0x6a,0x0c,0x02,0xf3) @ sha256h q0,q1,q13
- INST(0x6a,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q13
-
- vld1.32 {q13},[r3]
- vadd.i32 q12,q12,q10
- sub r3,r3,#256-16 @ rewind
- vmov q2,q0
- INST(0x68,0x0c,0x02,0xf3) @ sha256h q0,q1,q12
- INST(0x68,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q12
-
- vadd.i32 q13,q13,q11
- vmov q2,q0
- INST(0x6a,0x0c,0x02,0xf3) @ sha256h q0,q1,q13
- INST(0x6a,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q13
-
- vadd.i32 q0,q0,q14
- vadd.i32 q1,q1,q15
- it ne
- bne .Loop_v8
-
- vst1.32 {q0,q1},[r0]
-
- bx lr @ bx lr
-.size sha256_block_data_order_armv8,.-sha256_block_data_order_armv8
-#endif
-.asciz "SHA256 block transform for ARMv4/NEON/ARMv8, CRYPTOGAMS by <appro@openssl.org>"
-.align 2
-#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
-.comm OPENSSL_armcap_P,4,4
-#endif
diff --git a/arch/arm/crypto/sha512-core.S_shipped b/arch/arm/crypto/sha512-core.S_shipped
deleted file mode 100644
index 03014624f2ab..000000000000
--- a/arch/arm/crypto/sha512-core.S_shipped
+++ /dev/null
@@ -1,1869 +0,0 @@
-@ SPDX-License-Identifier: GPL-2.0
-
-@ This code is taken from the OpenSSL project but the author (Andy Polyakov)
-@ has relicensed it under the GPLv2. Therefore this program is free software;
-@ you can redistribute it and/or modify it under the terms of the GNU General
-@ Public License version 2 as published by the Free Software Foundation.
-@
-@ The original headers, including the original license headers, are
-@ included below for completeness.
-
-@ ====================================================================
-@ Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
-@ project. The module is, however, dual licensed under OpenSSL and
-@ CRYPTOGAMS licenses depending on where you obtain it. For further
-@ details see https://www.openssl.org/~appro/cryptogams/.
-@ ====================================================================
-
-@ SHA512 block procedure for ARMv4. September 2007.
-
-@ This code is ~4.5 (four and a half) times faster than code generated
-@ by gcc 3.4 and it spends ~72 clock cycles per byte [on single-issue
-@ Xscale PXA250 core].
-@
-@ July 2010.
-@
-@ Rescheduling for dual-issue pipeline resulted in 6% improvement on
-@ Cortex A8 core and ~40 cycles per processed byte.
-
-@ February 2011.
-@
-@ Profiler-assisted and platform-specific optimization resulted in 7%
-@ improvement on Coxtex A8 core and ~38 cycles per byte.
-
-@ March 2011.
-@
-@ Add NEON implementation. On Cortex A8 it was measured to process
-@ one byte in 23.3 cycles or ~60% faster than integer-only code.
-
-@ August 2012.
-@
-@ Improve NEON performance by 12% on Snapdragon S4. In absolute
-@ terms it's 22.6 cycles per byte, which is disappointing result.
-@ Technical writers asserted that 3-way S4 pipeline can sustain
-@ multiple NEON instructions per cycle, but dual NEON issue could
-@ not be observed, see https://www.openssl.org/~appro/Snapdragon-S4.html
-@ for further details. On side note Cortex-A15 processes one byte in
-@ 16 cycles.
-
-@ Byte order [in]dependence. =========================================
-@
-@ Originally caller was expected to maintain specific *dword* order in
-@ h[0-7], namely with most significant dword at *lower* address, which
-@ was reflected in below two parameters as 0 and 4. Now caller is
-@ expected to maintain native byte order for whole 64-bit values.
-#ifndef __KERNEL__
-# include "arm_arch.h"
-# define VFP_ABI_PUSH vstmdb sp!,{d8-d15}
-# define VFP_ABI_POP vldmia sp!,{d8-d15}
-#else
-# define __ARM_ARCH__ __LINUX_ARM_ARCH__
-# define __ARM_MAX_ARCH__ 7
-# define VFP_ABI_PUSH
-# define VFP_ABI_POP
-#endif
-
-#ifdef __ARMEL__
-# define LO 0
-# define HI 4
-# define WORD64(hi0,lo0,hi1,lo1) .word lo0,hi0, lo1,hi1
-#else
-# define HI 0
-# define LO 4
-# define WORD64(hi0,lo0,hi1,lo1) .word hi0,lo0, hi1,lo1
-#endif
-
-.text
-#if __ARM_ARCH__<7
-.code 32
-#else
-.syntax unified
-# ifdef __thumb2__
-.thumb
-# else
-.code 32
-# endif
-#endif
-
-.type K512,%object
-.align 5
-K512:
-WORD64(0x428a2f98,0xd728ae22, 0x71374491,0x23ef65cd)
-WORD64(0xb5c0fbcf,0xec4d3b2f, 0xe9b5dba5,0x8189dbbc)
-WORD64(0x3956c25b,0xf348b538, 0x59f111f1,0xb605d019)
-WORD64(0x923f82a4,0xaf194f9b, 0xab1c5ed5,0xda6d8118)
-WORD64(0xd807aa98,0xa3030242, 0x12835b01,0x45706fbe)
-WORD64(0x243185be,0x4ee4b28c, 0x550c7dc3,0xd5ffb4e2)
-WORD64(0x72be5d74,0xf27b896f, 0x80deb1fe,0x3b1696b1)
-WORD64(0x9bdc06a7,0x25c71235, 0xc19bf174,0xcf692694)
-WORD64(0xe49b69c1,0x9ef14ad2, 0xefbe4786,0x384f25e3)
-WORD64(0x0fc19dc6,0x8b8cd5b5, 0x240ca1cc,0x77ac9c65)
-WORD64(0x2de92c6f,0x592b0275, 0x4a7484aa,0x6ea6e483)
-WORD64(0x5cb0a9dc,0xbd41fbd4, 0x76f988da,0x831153b5)
-WORD64(0x983e5152,0xee66dfab, 0xa831c66d,0x2db43210)
-WORD64(0xb00327c8,0x98fb213f, 0xbf597fc7,0xbeef0ee4)
-WORD64(0xc6e00bf3,0x3da88fc2, 0xd5a79147,0x930aa725)
-WORD64(0x06ca6351,0xe003826f, 0x14292967,0x0a0e6e70)
-WORD64(0x27b70a85,0x46d22ffc, 0x2e1b2138,0x5c26c926)
-WORD64(0x4d2c6dfc,0x5ac42aed, 0x53380d13,0x9d95b3df)
-WORD64(0x650a7354,0x8baf63de, 0x766a0abb,0x3c77b2a8)
-WORD64(0x81c2c92e,0x47edaee6, 0x92722c85,0x1482353b)
-WORD64(0xa2bfe8a1,0x4cf10364, 0xa81a664b,0xbc423001)
-WORD64(0xc24b8b70,0xd0f89791, 0xc76c51a3,0x0654be30)
-WORD64(0xd192e819,0xd6ef5218, 0xd6990624,0x5565a910)
-WORD64(0xf40e3585,0x5771202a, 0x106aa070,0x32bbd1b8)
-WORD64(0x19a4c116,0xb8d2d0c8, 0x1e376c08,0x5141ab53)
-WORD64(0x2748774c,0xdf8eeb99, 0x34b0bcb5,0xe19b48a8)
-WORD64(0x391c0cb3,0xc5c95a63, 0x4ed8aa4a,0xe3418acb)
-WORD64(0x5b9cca4f,0x7763e373, 0x682e6ff3,0xd6b2b8a3)
-WORD64(0x748f82ee,0x5defb2fc, 0x78a5636f,0x43172f60)
-WORD64(0x84c87814,0xa1f0ab72, 0x8cc70208,0x1a6439ec)
-WORD64(0x90befffa,0x23631e28, 0xa4506ceb,0xde82bde9)
-WORD64(0xbef9a3f7,0xb2c67915, 0xc67178f2,0xe372532b)
-WORD64(0xca273ece,0xea26619c, 0xd186b8c7,0x21c0c207)
-WORD64(0xeada7dd6,0xcde0eb1e, 0xf57d4f7f,0xee6ed178)
-WORD64(0x06f067aa,0x72176fba, 0x0a637dc5,0xa2c898a6)
-WORD64(0x113f9804,0xbef90dae, 0x1b710b35,0x131c471b)
-WORD64(0x28db77f5,0x23047d84, 0x32caab7b,0x40c72493)
-WORD64(0x3c9ebe0a,0x15c9bebc, 0x431d67c4,0x9c100d4c)
-WORD64(0x4cc5d4be,0xcb3e42b6, 0x597f299c,0xfc657e2a)
-WORD64(0x5fcb6fab,0x3ad6faec, 0x6c44198c,0x4a475817)
-.size K512,.-K512
-#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
-.LOPENSSL_armcap:
-.word OPENSSL_armcap_P-sha512_block_data_order
-.skip 32-4
-#else
-.skip 32
-#endif
-
-.global sha512_block_data_order
-.type sha512_block_data_order,%function
-sha512_block_data_order:
-.Lsha512_block_data_order:
-#if __ARM_ARCH__<7
- sub r3,pc,#8 @ sha512_block_data_order
-#else
- adr r3,.Lsha512_block_data_order
-#endif
-#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
- ldr r12,.LOPENSSL_armcap
- ldr r12,[r3,r12] @ OPENSSL_armcap_P
- tst r12,#1
- bne .LNEON
-#endif
- add r2,r1,r2,lsl#7 @ len to point at the end of inp
- stmdb sp!,{r4-r12,lr}
- sub r14,r3,#672 @ K512
- sub sp,sp,#9*8
-
- ldr r7,[r0,#32+LO]
- ldr r8,[r0,#32+HI]
- ldr r9, [r0,#48+LO]
- ldr r10, [r0,#48+HI]
- ldr r11, [r0,#56+LO]
- ldr r12, [r0,#56+HI]
-.Loop:
- str r9, [sp,#48+0]
- str r10, [sp,#48+4]
- str r11, [sp,#56+0]
- str r12, [sp,#56+4]
- ldr r5,[r0,#0+LO]
- ldr r6,[r0,#0+HI]
- ldr r3,[r0,#8+LO]
- ldr r4,[r0,#8+HI]
- ldr r9, [r0,#16+LO]
- ldr r10, [r0,#16+HI]
- ldr r11, [r0,#24+LO]
- ldr r12, [r0,#24+HI]
- str r3,[sp,#8+0]
- str r4,[sp,#8+4]
- str r9, [sp,#16+0]
- str r10, [sp,#16+4]
- str r11, [sp,#24+0]
- str r12, [sp,#24+4]
- ldr r3,[r0,#40+LO]
- ldr r4,[r0,#40+HI]
- str r3,[sp,#40+0]
- str r4,[sp,#40+4]
-
-.L00_15:
-#if __ARM_ARCH__<7
- ldrb r3,[r1,#7]
- ldrb r9, [r1,#6]
- ldrb r10, [r1,#5]
- ldrb r11, [r1,#4]
- ldrb r4,[r1,#3]
- ldrb r12, [r1,#2]
- orr r3,r3,r9,lsl#8
- ldrb r9, [r1,#1]
- orr r3,r3,r10,lsl#16
- ldrb r10, [r1],#8
- orr r3,r3,r11,lsl#24
- orr r4,r4,r12,lsl#8
- orr r4,r4,r9,lsl#16
- orr r4,r4,r10,lsl#24
-#else
- ldr r3,[r1,#4]
- ldr r4,[r1],#8
-#ifdef __ARMEL__
- rev r3,r3
- rev r4,r4
-#endif
-#endif
- @ Sigma1(x) (ROTR((x),14) ^ ROTR((x),18) ^ ROTR((x),41))
- @ LO lo>>14^hi<<18 ^ lo>>18^hi<<14 ^ hi>>9^lo<<23
- @ HI hi>>14^lo<<18 ^ hi>>18^lo<<14 ^ lo>>9^hi<<23
- mov r9,r7,lsr#14
- str r3,[sp,#64+0]
- mov r10,r8,lsr#14
- str r4,[sp,#64+4]
- eor r9,r9,r8,lsl#18
- ldr r11,[sp,#56+0] @ h.lo
- eor r10,r10,r7,lsl#18
- ldr r12,[sp,#56+4] @ h.hi
- eor r9,r9,r7,lsr#18
- eor r10,r10,r8,lsr#18
- eor r9,r9,r8,lsl#14
- eor r10,r10,r7,lsl#14
- eor r9,r9,r8,lsr#9
- eor r10,r10,r7,lsr#9
- eor r9,r9,r7,lsl#23
- eor r10,r10,r8,lsl#23 @ Sigma1(e)
- adds r3,r3,r9
- ldr r9,[sp,#40+0] @ f.lo
- adc r4,r4,r10 @ T += Sigma1(e)
- ldr r10,[sp,#40+4] @ f.hi
- adds r3,r3,r11
- ldr r11,[sp,#48+0] @ g.lo
- adc r4,r4,r12 @ T += h
- ldr r12,[sp,#48+4] @ g.hi
-
- eor r9,r9,r11
- str r7,[sp,#32+0]
- eor r10,r10,r12
- str r8,[sp,#32+4]
- and r9,r9,r7
- str r5,[sp,#0+0]
- and r10,r10,r8
- str r6,[sp,#0+4]
- eor r9,r9,r11
- ldr r11,[r14,#LO] @ K[i].lo
- eor r10,r10,r12 @ Ch(e,f,g)
- ldr r12,[r14,#HI] @ K[i].hi
-
- adds r3,r3,r9
- ldr r7,[sp,#24+0] @ d.lo
- adc r4,r4,r10 @ T += Ch(e,f,g)
- ldr r8,[sp,#24+4] @ d.hi
- adds r3,r3,r11
- and r9,r11,#0xff
- adc r4,r4,r12 @ T += K[i]
- adds r7,r7,r3
- ldr r11,[sp,#8+0] @ b.lo
- adc r8,r8,r4 @ d += T
- teq r9,#148
-
- ldr r12,[sp,#16+0] @ c.lo
-#if __ARM_ARCH__>=7
- it eq @ Thumb2 thing, sanity check in ARM
-#endif
- orreq r14,r14,#1
- @ Sigma0(x) (ROTR((x),28) ^ ROTR((x),34) ^ ROTR((x),39))
- @ LO lo>>28^hi<<4 ^ hi>>2^lo<<30 ^ hi>>7^lo<<25
- @ HI hi>>28^lo<<4 ^ lo>>2^hi<<30 ^ lo>>7^hi<<25
- mov r9,r5,lsr#28
- mov r10,r6,lsr#28
- eor r9,r9,r6,lsl#4
- eor r10,r10,r5,lsl#4
- eor r9,r9,r6,lsr#2
- eor r10,r10,r5,lsr#2
- eor r9,r9,r5,lsl#30
- eor r10,r10,r6,lsl#30
- eor r9,r9,r6,lsr#7
- eor r10,r10,r5,lsr#7
- eor r9,r9,r5,lsl#25
- eor r10,r10,r6,lsl#25 @ Sigma0(a)
- adds r3,r3,r9
- and r9,r5,r11
- adc r4,r4,r10 @ T += Sigma0(a)
-
- ldr r10,[sp,#8+4] @ b.hi
- orr r5,r5,r11
- ldr r11,[sp,#16+4] @ c.hi
- and r5,r5,r12
- and r12,r6,r10
- orr r6,r6,r10
- orr r5,r5,r9 @ Maj(a,b,c).lo
- and r6,r6,r11
- adds r5,r5,r3
- orr r6,r6,r12 @ Maj(a,b,c).hi
- sub sp,sp,#8
- adc r6,r6,r4 @ h += T
- tst r14,#1
- add r14,r14,#8
- tst r14,#1
- beq .L00_15
- ldr r9,[sp,#184+0]
- ldr r10,[sp,#184+4]
- bic r14,r14,#1
-.L16_79:
- @ sigma0(x) (ROTR((x),1) ^ ROTR((x),8) ^ ((x)>>7))
- @ LO lo>>1^hi<<31 ^ lo>>8^hi<<24 ^ lo>>7^hi<<25
- @ HI hi>>1^lo<<31 ^ hi>>8^lo<<24 ^ hi>>7
- mov r3,r9,lsr#1
- ldr r11,[sp,#80+0]
- mov r4,r10,lsr#1
- ldr r12,[sp,#80+4]
- eor r3,r3,r10,lsl#31
- eor r4,r4,r9,lsl#31
- eor r3,r3,r9,lsr#8
- eor r4,r4,r10,lsr#8
- eor r3,r3,r10,lsl#24
- eor r4,r4,r9,lsl#24
- eor r3,r3,r9,lsr#7
- eor r4,r4,r10,lsr#7
- eor r3,r3,r10,lsl#25
-
- @ sigma1(x) (ROTR((x),19) ^ ROTR((x),61) ^ ((x)>>6))
- @ LO lo>>19^hi<<13 ^ hi>>29^lo<<3 ^ lo>>6^hi<<26
- @ HI hi>>19^lo<<13 ^ lo>>29^hi<<3 ^ hi>>6
- mov r9,r11,lsr#19
- mov r10,r12,lsr#19
- eor r9,r9,r12,lsl#13
- eor r10,r10,r11,lsl#13
- eor r9,r9,r12,lsr#29
- eor r10,r10,r11,lsr#29
- eor r9,r9,r11,lsl#3
- eor r10,r10,r12,lsl#3
- eor r9,r9,r11,lsr#6
- eor r10,r10,r12,lsr#6
- ldr r11,[sp,#120+0]
- eor r9,r9,r12,lsl#26
-
- ldr r12,[sp,#120+4]
- adds r3,r3,r9
- ldr r9,[sp,#192+0]
- adc r4,r4,r10
-
- ldr r10,[sp,#192+4]
- adds r3,r3,r11
- adc r4,r4,r12
- adds r3,r3,r9
- adc r4,r4,r10
- @ Sigma1(x) (ROTR((x),14) ^ ROTR((x),18) ^ ROTR((x),41))
- @ LO lo>>14^hi<<18 ^ lo>>18^hi<<14 ^ hi>>9^lo<<23
- @ HI hi>>14^lo<<18 ^ hi>>18^lo<<14 ^ lo>>9^hi<<23
- mov r9,r7,lsr#14
- str r3,[sp,#64+0]
- mov r10,r8,lsr#14
- str r4,[sp,#64+4]
- eor r9,r9,r8,lsl#18
- ldr r11,[sp,#56+0] @ h.lo
- eor r10,r10,r7,lsl#18
- ldr r12,[sp,#56+4] @ h.hi
- eor r9,r9,r7,lsr#18
- eor r10,r10,r8,lsr#18
- eor r9,r9,r8,lsl#14
- eor r10,r10,r7,lsl#14
- eor r9,r9,r8,lsr#9
- eor r10,r10,r7,lsr#9
- eor r9,r9,r7,lsl#23
- eor r10,r10,r8,lsl#23 @ Sigma1(e)
- adds r3,r3,r9
- ldr r9,[sp,#40+0] @ f.lo
- adc r4,r4,r10 @ T += Sigma1(e)
- ldr r10,[sp,#40+4] @ f.hi
- adds r3,r3,r11
- ldr r11,[sp,#48+0] @ g.lo
- adc r4,r4,r12 @ T += h
- ldr r12,[sp,#48+4] @ g.hi
-
- eor r9,r9,r11
- str r7,[sp,#32+0]
- eor r10,r10,r12
- str r8,[sp,#32+4]
- and r9,r9,r7
- str r5,[sp,#0+0]
- and r10,r10,r8
- str r6,[sp,#0+4]
- eor r9,r9,r11
- ldr r11,[r14,#LO] @ K[i].lo
- eor r10,r10,r12 @ Ch(e,f,g)
- ldr r12,[r14,#HI] @ K[i].hi
-
- adds r3,r3,r9
- ldr r7,[sp,#24+0] @ d.lo
- adc r4,r4,r10 @ T += Ch(e,f,g)
- ldr r8,[sp,#24+4] @ d.hi
- adds r3,r3,r11
- and r9,r11,#0xff
- adc r4,r4,r12 @ T += K[i]
- adds r7,r7,r3
- ldr r11,[sp,#8+0] @ b.lo
- adc r8,r8,r4 @ d += T
- teq r9,#23
-
- ldr r12,[sp,#16+0] @ c.lo
-#if __ARM_ARCH__>=7
- it eq @ Thumb2 thing, sanity check in ARM
-#endif
- orreq r14,r14,#1
- @ Sigma0(x) (ROTR((x),28) ^ ROTR((x),34) ^ ROTR((x),39))
- @ LO lo>>28^hi<<4 ^ hi>>2^lo<<30 ^ hi>>7^lo<<25
- @ HI hi>>28^lo<<4 ^ lo>>2^hi<<30 ^ lo>>7^hi<<25
- mov r9,r5,lsr#28
- mov r10,r6,lsr#28
- eor r9,r9,r6,lsl#4
- eor r10,r10,r5,lsl#4
- eor r9,r9,r6,lsr#2
- eor r10,r10,r5,lsr#2
- eor r9,r9,r5,lsl#30
- eor r10,r10,r6,lsl#30
- eor r9,r9,r6,lsr#7
- eor r10,r10,r5,lsr#7
- eor r9,r9,r5,lsl#25
- eor r10,r10,r6,lsl#25 @ Sigma0(a)
- adds r3,r3,r9
- and r9,r5,r11
- adc r4,r4,r10 @ T += Sigma0(a)
-
- ldr r10,[sp,#8+4] @ b.hi
- orr r5,r5,r11
- ldr r11,[sp,#16+4] @ c.hi
- and r5,r5,r12
- and r12,r6,r10
- orr r6,r6,r10
- orr r5,r5,r9 @ Maj(a,b,c).lo
- and r6,r6,r11
- adds r5,r5,r3
- orr r6,r6,r12 @ Maj(a,b,c).hi
- sub sp,sp,#8
- adc r6,r6,r4 @ h += T
- tst r14,#1
- add r14,r14,#8
-#if __ARM_ARCH__>=7
- ittt eq @ Thumb2 thing, sanity check in ARM
-#endif
- ldreq r9,[sp,#184+0]
- ldreq r10,[sp,#184+4]
- beq .L16_79
- bic r14,r14,#1
-
- ldr r3,[sp,#8+0]
- ldr r4,[sp,#8+4]
- ldr r9, [r0,#0+LO]
- ldr r10, [r0,#0+HI]
- ldr r11, [r0,#8+LO]
- ldr r12, [r0,#8+HI]
- adds r9,r5,r9
- str r9, [r0,#0+LO]
- adc r10,r6,r10
- str r10, [r0,#0+HI]
- adds r11,r3,r11
- str r11, [r0,#8+LO]
- adc r12,r4,r12
- str r12, [r0,#8+HI]
-
- ldr r5,[sp,#16+0]
- ldr r6,[sp,#16+4]
- ldr r3,[sp,#24+0]
- ldr r4,[sp,#24+4]
- ldr r9, [r0,#16+LO]
- ldr r10, [r0,#16+HI]
- ldr r11, [r0,#24+LO]
- ldr r12, [r0,#24+HI]
- adds r9,r5,r9
- str r9, [r0,#16+LO]
- adc r10,r6,r10
- str r10, [r0,#16+HI]
- adds r11,r3,r11
- str r11, [r0,#24+LO]
- adc r12,r4,r12
- str r12, [r0,#24+HI]
-
- ldr r3,[sp,#40+0]
- ldr r4,[sp,#40+4]
- ldr r9, [r0,#32+LO]
- ldr r10, [r0,#32+HI]
- ldr r11, [r0,#40+LO]
- ldr r12, [r0,#40+HI]
- adds r7,r7,r9
- str r7,[r0,#32+LO]
- adc r8,r8,r10
- str r8,[r0,#32+HI]
- adds r11,r3,r11
- str r11, [r0,#40+LO]
- adc r12,r4,r12
- str r12, [r0,#40+HI]
-
- ldr r5,[sp,#48+0]
- ldr r6,[sp,#48+4]
- ldr r3,[sp,#56+0]
- ldr r4,[sp,#56+4]
- ldr r9, [r0,#48+LO]
- ldr r10, [r0,#48+HI]
- ldr r11, [r0,#56+LO]
- ldr r12, [r0,#56+HI]
- adds r9,r5,r9
- str r9, [r0,#48+LO]
- adc r10,r6,r10
- str r10, [r0,#48+HI]
- adds r11,r3,r11
- str r11, [r0,#56+LO]
- adc r12,r4,r12
- str r12, [r0,#56+HI]
-
- add sp,sp,#640
- sub r14,r14,#640
-
- teq r1,r2
- bne .Loop
-
- add sp,sp,#8*9 @ destroy frame
-#if __ARM_ARCH__>=5
- ldmia sp!,{r4-r12,pc}
-#else
- ldmia sp!,{r4-r12,lr}
- tst lr,#1
- moveq pc,lr @ be binary compatible with V4, yet
- .word 0xe12fff1e @ interoperable with Thumb ISA:-)
-#endif
-.size sha512_block_data_order,.-sha512_block_data_order
-#if __ARM_MAX_ARCH__>=7
-.arch armv7-a
-.fpu neon
-
-.global sha512_block_data_order_neon
-.type sha512_block_data_order_neon,%function
-.align 4
-sha512_block_data_order_neon:
-.LNEON:
- dmb @ errata #451034 on early Cortex A8
- add r2,r1,r2,lsl#7 @ len to point at the end of inp
- VFP_ABI_PUSH
- adr r3,.Lsha512_block_data_order
- sub r3,r3,.Lsha512_block_data_order-K512
- vldmia r0,{d16-d23} @ load context
-.Loop_neon:
- vshr.u64 d24,d20,#14 @ 0
-#if 0<16
- vld1.64 {d0},[r1]! @ handles unaligned
-#endif
- vshr.u64 d25,d20,#18
-#if 0>0
- vadd.i64 d16,d30 @ h+=Maj from the past
-#endif
- vshr.u64 d26,d20,#41
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d20,#50
- vsli.64 d25,d20,#46
- vmov d29,d20
- vsli.64 d26,d20,#23
-#if 0<16 && defined(__ARMEL__)
- vrev64.8 d0,d0
-#endif
- veor d25,d24
- vbsl d29,d21,d22 @ Ch(e,f,g)
- vshr.u64 d24,d16,#28
- veor d26,d25 @ Sigma1(e)
- vadd.i64 d27,d29,d23
- vshr.u64 d25,d16,#34
- vsli.64 d24,d16,#36
- vadd.i64 d27,d26
- vshr.u64 d26,d16,#39
- vadd.i64 d28,d0
- vsli.64 d25,d16,#30
- veor d30,d16,d17
- vsli.64 d26,d16,#25
- veor d23,d24,d25
- vadd.i64 d27,d28
- vbsl d30,d18,d17 @ Maj(a,b,c)
- veor d23,d26 @ Sigma0(a)
- vadd.i64 d19,d27
- vadd.i64 d30,d27
- @ vadd.i64 d23,d30
- vshr.u64 d24,d19,#14 @ 1
-#if 1<16
- vld1.64 {d1},[r1]! @ handles unaligned
-#endif
- vshr.u64 d25,d19,#18
-#if 1>0
- vadd.i64 d23,d30 @ h+=Maj from the past
-#endif
- vshr.u64 d26,d19,#41
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d19,#50
- vsli.64 d25,d19,#46
- vmov d29,d19
- vsli.64 d26,d19,#23
-#if 1<16 && defined(__ARMEL__)
- vrev64.8 d1,d1
-#endif
- veor d25,d24
- vbsl d29,d20,d21 @ Ch(e,f,g)
- vshr.u64 d24,d23,#28
- veor d26,d25 @ Sigma1(e)
- vadd.i64 d27,d29,d22
- vshr.u64 d25,d23,#34
- vsli.64 d24,d23,#36
- vadd.i64 d27,d26
- vshr.u64 d26,d23,#39
- vadd.i64 d28,d1
- vsli.64 d25,d23,#30
- veor d30,d23,d16
- vsli.64 d26,d23,#25
- veor d22,d24,d25
- vadd.i64 d27,d28
- vbsl d30,d17,d16 @ Maj(a,b,c)
- veor d22,d26 @ Sigma0(a)
- vadd.i64 d18,d27
- vadd.i64 d30,d27
- @ vadd.i64 d22,d30
- vshr.u64 d24,d18,#14 @ 2
-#if 2<16
- vld1.64 {d2},[r1]! @ handles unaligned
-#endif
- vshr.u64 d25,d18,#18
-#if 2>0
- vadd.i64 d22,d30 @ h+=Maj from the past
-#endif
- vshr.u64 d26,d18,#41
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d18,#50
- vsli.64 d25,d18,#46
- vmov d29,d18
- vsli.64 d26,d18,#23
-#if 2<16 && defined(__ARMEL__)
- vrev64.8 d2,d2
-#endif
- veor d25,d24
- vbsl d29,d19,d20 @ Ch(e,f,g)
- vshr.u64 d24,d22,#28
- veor d26,d25 @ Sigma1(e)
- vadd.i64 d27,d29,d21
- vshr.u64 d25,d22,#34
- vsli.64 d24,d22,#36
- vadd.i64 d27,d26
- vshr.u64 d26,d22,#39
- vadd.i64 d28,d2
- vsli.64 d25,d22,#30
- veor d30,d22,d23
- vsli.64 d26,d22,#25
- veor d21,d24,d25
- vadd.i64 d27,d28
- vbsl d30,d16,d23 @ Maj(a,b,c)
- veor d21,d26 @ Sigma0(a)
- vadd.i64 d17,d27
- vadd.i64 d30,d27
- @ vadd.i64 d21,d30
- vshr.u64 d24,d17,#14 @ 3
-#if 3<16
- vld1.64 {d3},[r1]! @ handles unaligned
-#endif
- vshr.u64 d25,d17,#18
-#if 3>0
- vadd.i64 d21,d30 @ h+=Maj from the past
-#endif
- vshr.u64 d26,d17,#41
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d17,#50
- vsli.64 d25,d17,#46
- vmov d29,d17
- vsli.64 d26,d17,#23
-#if 3<16 && defined(__ARMEL__)
- vrev64.8 d3,d3
-#endif
- veor d25,d24
- vbsl d29,d18,d19 @ Ch(e,f,g)
- vshr.u64 d24,d21,#28
- veor d26,d25 @ Sigma1(e)
- vadd.i64 d27,d29,d20
- vshr.u64 d25,d21,#34
- vsli.64 d24,d21,#36
- vadd.i64 d27,d26
- vshr.u64 d26,d21,#39
- vadd.i64 d28,d3
- vsli.64 d25,d21,#30
- veor d30,d21,d22
- vsli.64 d26,d21,#25
- veor d20,d24,d25
- vadd.i64 d27,d28
- vbsl d30,d23,d22 @ Maj(a,b,c)
- veor d20,d26 @ Sigma0(a)
- vadd.i64 d16,d27
- vadd.i64 d30,d27
- @ vadd.i64 d20,d30
- vshr.u64 d24,d16,#14 @ 4
-#if 4<16
- vld1.64 {d4},[r1]! @ handles unaligned
-#endif
- vshr.u64 d25,d16,#18
-#if 4>0
- vadd.i64 d20,d30 @ h+=Maj from the past
-#endif
- vshr.u64 d26,d16,#41
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d16,#50
- vsli.64 d25,d16,#46
- vmov d29,d16
- vsli.64 d26,d16,#23
-#if 4<16 && defined(__ARMEL__)
- vrev64.8 d4,d4
-#endif
- veor d25,d24
- vbsl d29,d17,d18 @ Ch(e,f,g)
- vshr.u64 d24,d20,#28
- veor d26,d25 @ Sigma1(e)
- vadd.i64 d27,d29,d19
- vshr.u64 d25,d20,#34
- vsli.64 d24,d20,#36
- vadd.i64 d27,d26
- vshr.u64 d26,d20,#39
- vadd.i64 d28,d4
- vsli.64 d25,d20,#30
- veor d30,d20,d21
- vsli.64 d26,d20,#25
- veor d19,d24,d25
- vadd.i64 d27,d28
- vbsl d30,d22,d21 @ Maj(a,b,c)
- veor d19,d26 @ Sigma0(a)
- vadd.i64 d23,d27
- vadd.i64 d30,d27
- @ vadd.i64 d19,d30
- vshr.u64 d24,d23,#14 @ 5
-#if 5<16
- vld1.64 {d5},[r1]! @ handles unaligned
-#endif
- vshr.u64 d25,d23,#18
-#if 5>0
- vadd.i64 d19,d30 @ h+=Maj from the past
-#endif
- vshr.u64 d26,d23,#41
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d23,#50
- vsli.64 d25,d23,#46
- vmov d29,d23
- vsli.64 d26,d23,#23
-#if 5<16 && defined(__ARMEL__)
- vrev64.8 d5,d5
-#endif
- veor d25,d24
- vbsl d29,d16,d17 @ Ch(e,f,g)
- vshr.u64 d24,d19,#28
- veor d26,d25 @ Sigma1(e)
- vadd.i64 d27,d29,d18
- vshr.u64 d25,d19,#34
- vsli.64 d24,d19,#36
- vadd.i64 d27,d26
- vshr.u64 d26,d19,#39
- vadd.i64 d28,d5
- vsli.64 d25,d19,#30
- veor d30,d19,d20
- vsli.64 d26,d19,#25
- veor d18,d24,d25
- vadd.i64 d27,d28
- vbsl d30,d21,d20 @ Maj(a,b,c)
- veor d18,d26 @ Sigma0(a)
- vadd.i64 d22,d27
- vadd.i64 d30,d27
- @ vadd.i64 d18,d30
- vshr.u64 d24,d22,#14 @ 6
-#if 6<16
- vld1.64 {d6},[r1]! @ handles unaligned
-#endif
- vshr.u64 d25,d22,#18
-#if 6>0
- vadd.i64 d18,d30 @ h+=Maj from the past
-#endif
- vshr.u64 d26,d22,#41
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d22,#50
- vsli.64 d25,d22,#46
- vmov d29,d22
- vsli.64 d26,d22,#23
-#if 6<16 && defined(__ARMEL__)
- vrev64.8 d6,d6
-#endif
- veor d25,d24
- vbsl d29,d23,d16 @ Ch(e,f,g)
- vshr.u64 d24,d18,#28
- veor d26,d25 @ Sigma1(e)
- vadd.i64 d27,d29,d17
- vshr.u64 d25,d18,#34
- vsli.64 d24,d18,#36
- vadd.i64 d27,d26
- vshr.u64 d26,d18,#39
- vadd.i64 d28,d6
- vsli.64 d25,d18,#30
- veor d30,d18,d19
- vsli.64 d26,d18,#25
- veor d17,d24,d25
- vadd.i64 d27,d28
- vbsl d30,d20,d19 @ Maj(a,b,c)
- veor d17,d26 @ Sigma0(a)
- vadd.i64 d21,d27
- vadd.i64 d30,d27
- @ vadd.i64 d17,d30
- vshr.u64 d24,d21,#14 @ 7
-#if 7<16
- vld1.64 {d7},[r1]! @ handles unaligned
-#endif
- vshr.u64 d25,d21,#18
-#if 7>0
- vadd.i64 d17,d30 @ h+=Maj from the past
-#endif
- vshr.u64 d26,d21,#41
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d21,#50
- vsli.64 d25,d21,#46
- vmov d29,d21
- vsli.64 d26,d21,#23
-#if 7<16 && defined(__ARMEL__)
- vrev64.8 d7,d7
-#endif
- veor d25,d24
- vbsl d29,d22,d23 @ Ch(e,f,g)
- vshr.u64 d24,d17,#28
- veor d26,d25 @ Sigma1(e)
- vadd.i64 d27,d29,d16
- vshr.u64 d25,d17,#34
- vsli.64 d24,d17,#36
- vadd.i64 d27,d26
- vshr.u64 d26,d17,#39
- vadd.i64 d28,d7
- vsli.64 d25,d17,#30
- veor d30,d17,d18
- vsli.64 d26,d17,#25
- veor d16,d24,d25
- vadd.i64 d27,d28
- vbsl d30,d19,d18 @ Maj(a,b,c)
- veor d16,d26 @ Sigma0(a)
- vadd.i64 d20,d27
- vadd.i64 d30,d27
- @ vadd.i64 d16,d30
- vshr.u64 d24,d20,#14 @ 8
-#if 8<16
- vld1.64 {d8},[r1]! @ handles unaligned
-#endif
- vshr.u64 d25,d20,#18
-#if 8>0
- vadd.i64 d16,d30 @ h+=Maj from the past
-#endif
- vshr.u64 d26,d20,#41
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d20,#50
- vsli.64 d25,d20,#46
- vmov d29,d20
- vsli.64 d26,d20,#23
-#if 8<16 && defined(__ARMEL__)
- vrev64.8 d8,d8
-#endif
- veor d25,d24
- vbsl d29,d21,d22 @ Ch(e,f,g)
- vshr.u64 d24,d16,#28
- veor d26,d25 @ Sigma1(e)
- vadd.i64 d27,d29,d23
- vshr.u64 d25,d16,#34
- vsli.64 d24,d16,#36
- vadd.i64 d27,d26
- vshr.u64 d26,d16,#39
- vadd.i64 d28,d8
- vsli.64 d25,d16,#30
- veor d30,d16,d17
- vsli.64 d26,d16,#25
- veor d23,d24,d25
- vadd.i64 d27,d28
- vbsl d30,d18,d17 @ Maj(a,b,c)
- veor d23,d26 @ Sigma0(a)
- vadd.i64 d19,d27
- vadd.i64 d30,d27
- @ vadd.i64 d23,d30
- vshr.u64 d24,d19,#14 @ 9
-#if 9<16
- vld1.64 {d9},[r1]! @ handles unaligned
-#endif
- vshr.u64 d25,d19,#18
-#if 9>0
- vadd.i64 d23,d30 @ h+=Maj from the past
-#endif
- vshr.u64 d26,d19,#41
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d19,#50
- vsli.64 d25,d19,#46
- vmov d29,d19
- vsli.64 d26,d19,#23
-#if 9<16 && defined(__ARMEL__)
- vrev64.8 d9,d9
-#endif
- veor d25,d24
- vbsl d29,d20,d21 @ Ch(e,f,g)
- vshr.u64 d24,d23,#28
- veor d26,d25 @ Sigma1(e)
- vadd.i64 d27,d29,d22
- vshr.u64 d25,d23,#34
- vsli.64 d24,d23,#36
- vadd.i64 d27,d26
- vshr.u64 d26,d23,#39
- vadd.i64 d28,d9
- vsli.64 d25,d23,#30
- veor d30,d23,d16
- vsli.64 d26,d23,#25
- veor d22,d24,d25
- vadd.i64 d27,d28
- vbsl d30,d17,d16 @ Maj(a,b,c)
- veor d22,d26 @ Sigma0(a)
- vadd.i64 d18,d27
- vadd.i64 d30,d27
- @ vadd.i64 d22,d30
- vshr.u64 d24,d18,#14 @ 10
-#if 10<16
- vld1.64 {d10},[r1]! @ handles unaligned
-#endif
- vshr.u64 d25,d18,#18
-#if 10>0
- vadd.i64 d22,d30 @ h+=Maj from the past
-#endif
- vshr.u64 d26,d18,#41
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d18,#50
- vsli.64 d25,d18,#46
- vmov d29,d18
- vsli.64 d26,d18,#23
-#if 10<16 && defined(__ARMEL__)
- vrev64.8 d10,d10
-#endif
- veor d25,d24
- vbsl d29,d19,d20 @ Ch(e,f,g)
- vshr.u64 d24,d22,#28
- veor d26,d25 @ Sigma1(e)
- vadd.i64 d27,d29,d21
- vshr.u64 d25,d22,#34
- vsli.64 d24,d22,#36
- vadd.i64 d27,d26
- vshr.u64 d26,d22,#39
- vadd.i64 d28,d10
- vsli.64 d25,d22,#30
- veor d30,d22,d23
- vsli.64 d26,d22,#25
- veor d21,d24,d25
- vadd.i64 d27,d28
- vbsl d30,d16,d23 @ Maj(a,b,c)
- veor d21,d26 @ Sigma0(a)
- vadd.i64 d17,d27
- vadd.i64 d30,d27
- @ vadd.i64 d21,d30
- vshr.u64 d24,d17,#14 @ 11
-#if 11<16
- vld1.64 {d11},[r1]! @ handles unaligned
-#endif
- vshr.u64 d25,d17,#18
-#if 11>0
- vadd.i64 d21,d30 @ h+=Maj from the past
-#endif
- vshr.u64 d26,d17,#41
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d17,#50
- vsli.64 d25,d17,#46
- vmov d29,d17
- vsli.64 d26,d17,#23
-#if 11<16 && defined(__ARMEL__)
- vrev64.8 d11,d11
-#endif
- veor d25,d24
- vbsl d29,d18,d19 @ Ch(e,f,g)
- vshr.u64 d24,d21,#28
- veor d26,d25 @ Sigma1(e)
- vadd.i64 d27,d29,d20
- vshr.u64 d25,d21,#34
- vsli.64 d24,d21,#36
- vadd.i64 d27,d26
- vshr.u64 d26,d21,#39
- vadd.i64 d28,d11
- vsli.64 d25,d21,#30
- veor d30,d21,d22
- vsli.64 d26,d21,#25
- veor d20,d24,d25
- vadd.i64 d27,d28
- vbsl d30,d23,d22 @ Maj(a,b,c)
- veor d20,d26 @ Sigma0(a)
- vadd.i64 d16,d27
- vadd.i64 d30,d27
- @ vadd.i64 d20,d30
- vshr.u64 d24,d16,#14 @ 12
-#if 12<16
- vld1.64 {d12},[r1]! @ handles unaligned
-#endif
- vshr.u64 d25,d16,#18
-#if 12>0
- vadd.i64 d20,d30 @ h+=Maj from the past
-#endif
- vshr.u64 d26,d16,#41
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d16,#50
- vsli.64 d25,d16,#46
- vmov d29,d16
- vsli.64 d26,d16,#23
-#if 12<16 && defined(__ARMEL__)
- vrev64.8 d12,d12
-#endif
- veor d25,d24
- vbsl d29,d17,d18 @ Ch(e,f,g)
- vshr.u64 d24,d20,#28
- veor d26,d25 @ Sigma1(e)
- vadd.i64 d27,d29,d19
- vshr.u64 d25,d20,#34
- vsli.64 d24,d20,#36
- vadd.i64 d27,d26
- vshr.u64 d26,d20,#39
- vadd.i64 d28,d12
- vsli.64 d25,d20,#30
- veor d30,d20,d21
- vsli.64 d26,d20,#25
- veor d19,d24,d25
- vadd.i64 d27,d28
- vbsl d30,d22,d21 @ Maj(a,b,c)
- veor d19,d26 @ Sigma0(a)
- vadd.i64 d23,d27
- vadd.i64 d30,d27
- @ vadd.i64 d19,d30
- vshr.u64 d24,d23,#14 @ 13
-#if 13<16
- vld1.64 {d13},[r1]! @ handles unaligned
-#endif
- vshr.u64 d25,d23,#18
-#if 13>0
- vadd.i64 d19,d30 @ h+=Maj from the past
-#endif
- vshr.u64 d26,d23,#41
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d23,#50
- vsli.64 d25,d23,#46
- vmov d29,d23
- vsli.64 d26,d23,#23
-#if 13<16 && defined(__ARMEL__)
- vrev64.8 d13,d13
-#endif
- veor d25,d24
- vbsl d29,d16,d17 @ Ch(e,f,g)
- vshr.u64 d24,d19,#28
- veor d26,d25 @ Sigma1(e)
- vadd.i64 d27,d29,d18
- vshr.u64 d25,d19,#34
- vsli.64 d24,d19,#36
- vadd.i64 d27,d26
- vshr.u64 d26,d19,#39
- vadd.i64 d28,d13
- vsli.64 d25,d19,#30
- veor d30,d19,d20
- vsli.64 d26,d19,#25
- veor d18,d24,d25
- vadd.i64 d27,d28
- vbsl d30,d21,d20 @ Maj(a,b,c)
- veor d18,d26 @ Sigma0(a)
- vadd.i64 d22,d27
- vadd.i64 d30,d27
- @ vadd.i64 d18,d30
- vshr.u64 d24,d22,#14 @ 14
-#if 14<16
- vld1.64 {d14},[r1]! @ handles unaligned
-#endif
- vshr.u64 d25,d22,#18
-#if 14>0
- vadd.i64 d18,d30 @ h+=Maj from the past
-#endif
- vshr.u64 d26,d22,#41
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d22,#50
- vsli.64 d25,d22,#46
- vmov d29,d22
- vsli.64 d26,d22,#23
-#if 14<16 && defined(__ARMEL__)
- vrev64.8 d14,d14
-#endif
- veor d25,d24
- vbsl d29,d23,d16 @ Ch(e,f,g)
- vshr.u64 d24,d18,#28
- veor d26,d25 @ Sigma1(e)
- vadd.i64 d27,d29,d17
- vshr.u64 d25,d18,#34
- vsli.64 d24,d18,#36
- vadd.i64 d27,d26
- vshr.u64 d26,d18,#39
- vadd.i64 d28,d14
- vsli.64 d25,d18,#30
- veor d30,d18,d19
- vsli.64 d26,d18,#25
- veor d17,d24,d25
- vadd.i64 d27,d28
- vbsl d30,d20,d19 @ Maj(a,b,c)
- veor d17,d26 @ Sigma0(a)
- vadd.i64 d21,d27
- vadd.i64 d30,d27
- @ vadd.i64 d17,d30
- vshr.u64 d24,d21,#14 @ 15
-#if 15<16
- vld1.64 {d15},[r1]! @ handles unaligned
-#endif
- vshr.u64 d25,d21,#18
-#if 15>0
- vadd.i64 d17,d30 @ h+=Maj from the past
-#endif
- vshr.u64 d26,d21,#41
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d21,#50
- vsli.64 d25,d21,#46
- vmov d29,d21
- vsli.64 d26,d21,#23
-#if 15<16 && defined(__ARMEL__)
- vrev64.8 d15,d15
-#endif
- veor d25,d24
- vbsl d29,d22,d23 @ Ch(e,f,g)
- vshr.u64 d24,d17,#28
- veor d26,d25 @ Sigma1(e)
- vadd.i64 d27,d29,d16
- vshr.u64 d25,d17,#34
- vsli.64 d24,d17,#36
- vadd.i64 d27,d26
- vshr.u64 d26,d17,#39
- vadd.i64 d28,d15
- vsli.64 d25,d17,#30
- veor d30,d17,d18
- vsli.64 d26,d17,#25
- veor d16,d24,d25
- vadd.i64 d27,d28
- vbsl d30,d19,d18 @ Maj(a,b,c)
- veor d16,d26 @ Sigma0(a)
- vadd.i64 d20,d27
- vadd.i64 d30,d27
- @ vadd.i64 d16,d30
- mov r12,#4
-.L16_79_neon:
- subs r12,#1
- vshr.u64 q12,q7,#19
- vshr.u64 q13,q7,#61
- vadd.i64 d16,d30 @ h+=Maj from the past
- vshr.u64 q15,q7,#6
- vsli.64 q12,q7,#45
- vext.8 q14,q0,q1,#8 @ X[i+1]
- vsli.64 q13,q7,#3
- veor q15,q12
- vshr.u64 q12,q14,#1
- veor q15,q13 @ sigma1(X[i+14])
- vshr.u64 q13,q14,#8
- vadd.i64 q0,q15
- vshr.u64 q15,q14,#7
- vsli.64 q12,q14,#63
- vsli.64 q13,q14,#56
- vext.8 q14,q4,q5,#8 @ X[i+9]
- veor q15,q12
- vshr.u64 d24,d20,#14 @ from NEON_00_15
- vadd.i64 q0,q14
- vshr.u64 d25,d20,#18 @ from NEON_00_15
- veor q15,q13 @ sigma0(X[i+1])
- vshr.u64 d26,d20,#41 @ from NEON_00_15
- vadd.i64 q0,q15
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d20,#50
- vsli.64 d25,d20,#46
- vmov d29,d20
- vsli.64 d26,d20,#23
-#if 16<16 && defined(__ARMEL__)
- vrev64.8 ,
-#endif
- veor d25,d24
- vbsl d29,d21,d22 @ Ch(e,f,g)
- vshr.u64 d24,d16,#28
- veor d26,d25 @ Sigma1(e)
- vadd.i64 d27,d29,d23
- vshr.u64 d25,d16,#34
- vsli.64 d24,d16,#36
- vadd.i64 d27,d26
- vshr.u64 d26,d16,#39
- vadd.i64 d28,d0
- vsli.64 d25,d16,#30
- veor d30,d16,d17
- vsli.64 d26,d16,#25
- veor d23,d24,d25
- vadd.i64 d27,d28
- vbsl d30,d18,d17 @ Maj(a,b,c)
- veor d23,d26 @ Sigma0(a)
- vadd.i64 d19,d27
- vadd.i64 d30,d27
- @ vadd.i64 d23,d30
- vshr.u64 d24,d19,#14 @ 17
-#if 17<16
- vld1.64 {d1},[r1]! @ handles unaligned
-#endif
- vshr.u64 d25,d19,#18
-#if 17>0
- vadd.i64 d23,d30 @ h+=Maj from the past
-#endif
- vshr.u64 d26,d19,#41
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d19,#50
- vsli.64 d25,d19,#46
- vmov d29,d19
- vsli.64 d26,d19,#23
-#if 17<16 && defined(__ARMEL__)
- vrev64.8 ,
-#endif
- veor d25,d24
- vbsl d29,d20,d21 @ Ch(e,f,g)
- vshr.u64 d24,d23,#28
- veor d26,d25 @ Sigma1(e)
- vadd.i64 d27,d29,d22
- vshr.u64 d25,d23,#34
- vsli.64 d24,d23,#36
- vadd.i64 d27,d26
- vshr.u64 d26,d23,#39
- vadd.i64 d28,d1
- vsli.64 d25,d23,#30
- veor d30,d23,d16
- vsli.64 d26,d23,#25
- veor d22,d24,d25
- vadd.i64 d27,d28
- vbsl d30,d17,d16 @ Maj(a,b,c)
- veor d22,d26 @ Sigma0(a)
- vadd.i64 d18,d27
- vadd.i64 d30,d27
- @ vadd.i64 d22,d30
- vshr.u64 q12,q0,#19
- vshr.u64 q13,q0,#61
- vadd.i64 d22,d30 @ h+=Maj from the past
- vshr.u64 q15,q0,#6
- vsli.64 q12,q0,#45
- vext.8 q14,q1,q2,#8 @ X[i+1]
- vsli.64 q13,q0,#3
- veor q15,q12
- vshr.u64 q12,q14,#1
- veor q15,q13 @ sigma1(X[i+14])
- vshr.u64 q13,q14,#8
- vadd.i64 q1,q15
- vshr.u64 q15,q14,#7
- vsli.64 q12,q14,#63
- vsli.64 q13,q14,#56
- vext.8 q14,q5,q6,#8 @ X[i+9]
- veor q15,q12
- vshr.u64 d24,d18,#14 @ from NEON_00_15
- vadd.i64 q1,q14
- vshr.u64 d25,d18,#18 @ from NEON_00_15
- veor q15,q13 @ sigma0(X[i+1])
- vshr.u64 d26,d18,#41 @ from NEON_00_15
- vadd.i64 q1,q15
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d18,#50
- vsli.64 d25,d18,#46
- vmov d29,d18
- vsli.64 d26,d18,#23
-#if 18<16 && defined(__ARMEL__)
- vrev64.8 ,
-#endif
- veor d25,d24
- vbsl d29,d19,d20 @ Ch(e,f,g)
- vshr.u64 d24,d22,#28
- veor d26,d25 @ Sigma1(e)
- vadd.i64 d27,d29,d21
- vshr.u64 d25,d22,#34
- vsli.64 d24,d22,#36
- vadd.i64 d27,d26
- vshr.u64 d26,d22,#39
- vadd.i64 d28,d2
- vsli.64 d25,d22,#30
- veor d30,d22,d23
- vsli.64 d26,d22,#25
- veor d21,d24,d25
- vadd.i64 d27,d28
- vbsl d30,d16,d23 @ Maj(a,b,c)
- veor d21,d26 @ Sigma0(a)
- vadd.i64 d17,d27
- vadd.i64 d30,d27
- @ vadd.i64 d21,d30
- vshr.u64 d24,d17,#14 @ 19
-#if 19<16
- vld1.64 {d3},[r1]! @ handles unaligned
-#endif
- vshr.u64 d25,d17,#18
-#if 19>0
- vadd.i64 d21,d30 @ h+=Maj from the past
-#endif
- vshr.u64 d26,d17,#41
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d17,#50
- vsli.64 d25,d17,#46
- vmov d29,d17
- vsli.64 d26,d17,#23
-#if 19<16 && defined(__ARMEL__)
- vrev64.8 ,
-#endif
- veor d25,d24
- vbsl d29,d18,d19 @ Ch(e,f,g)
- vshr.u64 d24,d21,#28
- veor d26,d25 @ Sigma1(e)
- vadd.i64 d27,d29,d20
- vshr.u64 d25,d21,#34
- vsli.64 d24,d21,#36
- vadd.i64 d27,d26
- vshr.u64 d26,d21,#39
- vadd.i64 d28,d3
- vsli.64 d25,d21,#30
- veor d30,d21,d22
- vsli.64 d26,d21,#25
- veor d20,d24,d25
- vadd.i64 d27,d28
- vbsl d30,d23,d22 @ Maj(a,b,c)
- veor d20,d26 @ Sigma0(a)
- vadd.i64 d16,d27
- vadd.i64 d30,d27
- @ vadd.i64 d20,d30
- vshr.u64 q12,q1,#19
- vshr.u64 q13,q1,#61
- vadd.i64 d20,d30 @ h+=Maj from the past
- vshr.u64 q15,q1,#6
- vsli.64 q12,q1,#45
- vext.8 q14,q2,q3,#8 @ X[i+1]
- vsli.64 q13,q1,#3
- veor q15,q12
- vshr.u64 q12,q14,#1
- veor q15,q13 @ sigma1(X[i+14])
- vshr.u64 q13,q14,#8
- vadd.i64 q2,q15
- vshr.u64 q15,q14,#7
- vsli.64 q12,q14,#63
- vsli.64 q13,q14,#56
- vext.8 q14,q6,q7,#8 @ X[i+9]
- veor q15,q12
- vshr.u64 d24,d16,#14 @ from NEON_00_15
- vadd.i64 q2,q14
- vshr.u64 d25,d16,#18 @ from NEON_00_15
- veor q15,q13 @ sigma0(X[i+1])
- vshr.u64 d26,d16,#41 @ from NEON_00_15
- vadd.i64 q2,q15
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d16,#50
- vsli.64 d25,d16,#46
- vmov d29,d16
- vsli.64 d26,d16,#23
-#if 20<16 && defined(__ARMEL__)
- vrev64.8 ,
-#endif
- veor d25,d24
- vbsl d29,d17,d18 @ Ch(e,f,g)
- vshr.u64 d24,d20,#28
- veor d26,d25 @ Sigma1(e)
- vadd.i64 d27,d29,d19
- vshr.u64 d25,d20,#34
- vsli.64 d24,d20,#36
- vadd.i64 d27,d26
- vshr.u64 d26,d20,#39
- vadd.i64 d28,d4
- vsli.64 d25,d20,#30
- veor d30,d20,d21
- vsli.64 d26,d20,#25
- veor d19,d24,d25
- vadd.i64 d27,d28
- vbsl d30,d22,d21 @ Maj(a,b,c)
- veor d19,d26 @ Sigma0(a)
- vadd.i64 d23,d27
- vadd.i64 d30,d27
- @ vadd.i64 d19,d30
- vshr.u64 d24,d23,#14 @ 21
-#if 21<16
- vld1.64 {d5},[r1]! @ handles unaligned
-#endif
- vshr.u64 d25,d23,#18
-#if 21>0
- vadd.i64 d19,d30 @ h+=Maj from the past
-#endif
- vshr.u64 d26,d23,#41
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d23,#50
- vsli.64 d25,d23,#46
- vmov d29,d23
- vsli.64 d26,d23,#23
-#if 21<16 && defined(__ARMEL__)
- vrev64.8 ,
-#endif
- veor d25,d24
- vbsl d29,d16,d17 @ Ch(e,f,g)
- vshr.u64 d24,d19,#28
- veor d26,d25 @ Sigma1(e)
- vadd.i64 d27,d29,d18
- vshr.u64 d25,d19,#34
- vsli.64 d24,d19,#36
- vadd.i64 d27,d26
- vshr.u64 d26,d19,#39
- vadd.i64 d28,d5
- vsli.64 d25,d19,#30
- veor d30,d19,d20
- vsli.64 d26,d19,#25
- veor d18,d24,d25
- vadd.i64 d27,d28
- vbsl d30,d21,d20 @ Maj(a,b,c)
- veor d18,d26 @ Sigma0(a)
- vadd.i64 d22,d27
- vadd.i64 d30,d27
- @ vadd.i64 d18,d30
- vshr.u64 q12,q2,#19
- vshr.u64 q13,q2,#61
- vadd.i64 d18,d30 @ h+=Maj from the past
- vshr.u64 q15,q2,#6
- vsli.64 q12,q2,#45
- vext.8 q14,q3,q4,#8 @ X[i+1]
- vsli.64 q13,q2,#3
- veor q15,q12
- vshr.u64 q12,q14,#1
- veor q15,q13 @ sigma1(X[i+14])
- vshr.u64 q13,q14,#8
- vadd.i64 q3,q15
- vshr.u64 q15,q14,#7
- vsli.64 q12,q14,#63
- vsli.64 q13,q14,#56
- vext.8 q14,q7,q0,#8 @ X[i+9]
- veor q15,q12
- vshr.u64 d24,d22,#14 @ from NEON_00_15
- vadd.i64 q3,q14
- vshr.u64 d25,d22,#18 @ from NEON_00_15
- veor q15,q13 @ sigma0(X[i+1])
- vshr.u64 d26,d22,#41 @ from NEON_00_15
- vadd.i64 q3,q15
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d22,#50
- vsli.64 d25,d22,#46
- vmov d29,d22
- vsli.64 d26,d22,#23
-#if 22<16 && defined(__ARMEL__)
- vrev64.8 ,
-#endif
- veor d25,d24
- vbsl d29,d23,d16 @ Ch(e,f,g)
- vshr.u64 d24,d18,#28
- veor d26,d25 @ Sigma1(e)
- vadd.i64 d27,d29,d17
- vshr.u64 d25,d18,#34
- vsli.64 d24,d18,#36
- vadd.i64 d27,d26
- vshr.u64 d26,d18,#39
- vadd.i64 d28,d6
- vsli.64 d25,d18,#30
- veor d30,d18,d19
- vsli.64 d26,d18,#25
- veor d17,d24,d25
- vadd.i64 d27,d28
- vbsl d30,d20,d19 @ Maj(a,b,c)
- veor d17,d26 @ Sigma0(a)
- vadd.i64 d21,d27
- vadd.i64 d30,d27
- @ vadd.i64 d17,d30
- vshr.u64 d24,d21,#14 @ 23
-#if 23<16
- vld1.64 {d7},[r1]! @ handles unaligned
-#endif
- vshr.u64 d25,d21,#18
-#if 23>0
- vadd.i64 d17,d30 @ h+=Maj from the past
-#endif
- vshr.u64 d26,d21,#41
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d21,#50
- vsli.64 d25,d21,#46
- vmov d29,d21
- vsli.64 d26,d21,#23
-#if 23<16 && defined(__ARMEL__)
- vrev64.8 ,
-#endif
- veor d25,d24
- vbsl d29,d22,d23 @ Ch(e,f,g)
- vshr.u64 d24,d17,#28
- veor d26,d25 @ Sigma1(e)
- vadd.i64 d27,d29,d16
- vshr.u64 d25,d17,#34
- vsli.64 d24,d17,#36
- vadd.i64 d27,d26
- vshr.u64 d26,d17,#39
- vadd.i64 d28,d7
- vsli.64 d25,d17,#30
- veor d30,d17,d18
- vsli.64 d26,d17,#25
- veor d16,d24,d25
- vadd.i64 d27,d28
- vbsl d30,d19,d18 @ Maj(a,b,c)
- veor d16,d26 @ Sigma0(a)
- vadd.i64 d20,d27
- vadd.i64 d30,d27
- @ vadd.i64 d16,d30
- vshr.u64 q12,q3,#19
- vshr.u64 q13,q3,#61
- vadd.i64 d16,d30 @ h+=Maj from the past
- vshr.u64 q15,q3,#6
- vsli.64 q12,q3,#45
- vext.8 q14,q4,q5,#8 @ X[i+1]
- vsli.64 q13,q3,#3
- veor q15,q12
- vshr.u64 q12,q14,#1
- veor q15,q13 @ sigma1(X[i+14])
- vshr.u64 q13,q14,#8
- vadd.i64 q4,q15
- vshr.u64 q15,q14,#7
- vsli.64 q12,q14,#63
- vsli.64 q13,q14,#56
- vext.8 q14,q0,q1,#8 @ X[i+9]
- veor q15,q12
- vshr.u64 d24,d20,#14 @ from NEON_00_15
- vadd.i64 q4,q14
- vshr.u64 d25,d20,#18 @ from NEON_00_15
- veor q15,q13 @ sigma0(X[i+1])
- vshr.u64 d26,d20,#41 @ from NEON_00_15
- vadd.i64 q4,q15
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d20,#50
- vsli.64 d25,d20,#46
- vmov d29,d20
- vsli.64 d26,d20,#23
-#if 24<16 && defined(__ARMEL__)
- vrev64.8 ,
-#endif
- veor d25,d24
- vbsl d29,d21,d22 @ Ch(e,f,g)
- vshr.u64 d24,d16,#28
- veor d26,d25 @ Sigma1(e)
- vadd.i64 d27,d29,d23
- vshr.u64 d25,d16,#34
- vsli.64 d24,d16,#36
- vadd.i64 d27,d26
- vshr.u64 d26,d16,#39
- vadd.i64 d28,d8
- vsli.64 d25,d16,#30
- veor d30,d16,d17
- vsli.64 d26,d16,#25
- veor d23,d24,d25
- vadd.i64 d27,d28
- vbsl d30,d18,d17 @ Maj(a,b,c)
- veor d23,d26 @ Sigma0(a)
- vadd.i64 d19,d27
- vadd.i64 d30,d27
- @ vadd.i64 d23,d30
- vshr.u64 d24,d19,#14 @ 25
-#if 25<16
- vld1.64 {d9},[r1]! @ handles unaligned
-#endif
- vshr.u64 d25,d19,#18
-#if 25>0
- vadd.i64 d23,d30 @ h+=Maj from the past
-#endif
- vshr.u64 d26,d19,#41
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d19,#50
- vsli.64 d25,d19,#46
- vmov d29,d19
- vsli.64 d26,d19,#23
-#if 25<16 && defined(__ARMEL__)
- vrev64.8 ,
-#endif
- veor d25,d24
- vbsl d29,d20,d21 @ Ch(e,f,g)
- vshr.u64 d24,d23,#28
- veor d26,d25 @ Sigma1(e)
- vadd.i64 d27,d29,d22
- vshr.u64 d25,d23,#34
- vsli.64 d24,d23,#36
- vadd.i64 d27,d26
- vshr.u64 d26,d23,#39
- vadd.i64 d28,d9
- vsli.64 d25,d23,#30
- veor d30,d23,d16
- vsli.64 d26,d23,#25
- veor d22,d24,d25
- vadd.i64 d27,d28
- vbsl d30,d17,d16 @ Maj(a,b,c)
- veor d22,d26 @ Sigma0(a)
- vadd.i64 d18,d27
- vadd.i64 d30,d27
- @ vadd.i64 d22,d30
- vshr.u64 q12,q4,#19
- vshr.u64 q13,q4,#61
- vadd.i64 d22,d30 @ h+=Maj from the past
- vshr.u64 q15,q4,#6
- vsli.64 q12,q4,#45
- vext.8 q14,q5,q6,#8 @ X[i+1]
- vsli.64 q13,q4,#3
- veor q15,q12
- vshr.u64 q12,q14,#1
- veor q15,q13 @ sigma1(X[i+14])
- vshr.u64 q13,q14,#8
- vadd.i64 q5,q15
- vshr.u64 q15,q14,#7
- vsli.64 q12,q14,#63
- vsli.64 q13,q14,#56
- vext.8 q14,q1,q2,#8 @ X[i+9]
- veor q15,q12
- vshr.u64 d24,d18,#14 @ from NEON_00_15
- vadd.i64 q5,q14
- vshr.u64 d25,d18,#18 @ from NEON_00_15
- veor q15,q13 @ sigma0(X[i+1])
- vshr.u64 d26,d18,#41 @ from NEON_00_15
- vadd.i64 q5,q15
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d18,#50
- vsli.64 d25,d18,#46
- vmov d29,d18
- vsli.64 d26,d18,#23
-#if 26<16 && defined(__ARMEL__)
- vrev64.8 ,
-#endif
- veor d25,d24
- vbsl d29,d19,d20 @ Ch(e,f,g)
- vshr.u64 d24,d22,#28
- veor d26,d25 @ Sigma1(e)
- vadd.i64 d27,d29,d21
- vshr.u64 d25,d22,#34
- vsli.64 d24,d22,#36
- vadd.i64 d27,d26
- vshr.u64 d26,d22,#39
- vadd.i64 d28,d10
- vsli.64 d25,d22,#30
- veor d30,d22,d23
- vsli.64 d26,d22,#25
- veor d21,d24,d25
- vadd.i64 d27,d28
- vbsl d30,d16,d23 @ Maj(a,b,c)
- veor d21,d26 @ Sigma0(a)
- vadd.i64 d17,d27
- vadd.i64 d30,d27
- @ vadd.i64 d21,d30
- vshr.u64 d24,d17,#14 @ 27
-#if 27<16
- vld1.64 {d11},[r1]! @ handles unaligned
-#endif
- vshr.u64 d25,d17,#18
-#if 27>0
- vadd.i64 d21,d30 @ h+=Maj from the past
-#endif
- vshr.u64 d26,d17,#41
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d17,#50
- vsli.64 d25,d17,#46
- vmov d29,d17
- vsli.64 d26,d17,#23
-#if 27<16 && defined(__ARMEL__)
- vrev64.8 ,
-#endif
- veor d25,d24
- vbsl d29,d18,d19 @ Ch(e,f,g)
- vshr.u64 d24,d21,#28
- veor d26,d25 @ Sigma1(e)
- vadd.i64 d27,d29,d20
- vshr.u64 d25,d21,#34
- vsli.64 d24,d21,#36
- vadd.i64 d27,d26
- vshr.u64 d26,d21,#39
- vadd.i64 d28,d11
- vsli.64 d25,d21,#30
- veor d30,d21,d22
- vsli.64 d26,d21,#25
- veor d20,d24,d25
- vadd.i64 d27,d28
- vbsl d30,d23,d22 @ Maj(a,b,c)
- veor d20,d26 @ Sigma0(a)
- vadd.i64 d16,d27
- vadd.i64 d30,d27
- @ vadd.i64 d20,d30
- vshr.u64 q12,q5,#19
- vshr.u64 q13,q5,#61
- vadd.i64 d20,d30 @ h+=Maj from the past
- vshr.u64 q15,q5,#6
- vsli.64 q12,q5,#45
- vext.8 q14,q6,q7,#8 @ X[i+1]
- vsli.64 q13,q5,#3
- veor q15,q12
- vshr.u64 q12,q14,#1
- veor q15,q13 @ sigma1(X[i+14])
- vshr.u64 q13,q14,#8
- vadd.i64 q6,q15
- vshr.u64 q15,q14,#7
- vsli.64 q12,q14,#63
- vsli.64 q13,q14,#56
- vext.8 q14,q2,q3,#8 @ X[i+9]
- veor q15,q12
- vshr.u64 d24,d16,#14 @ from NEON_00_15
- vadd.i64 q6,q14
- vshr.u64 d25,d16,#18 @ from NEON_00_15
- veor q15,q13 @ sigma0(X[i+1])
- vshr.u64 d26,d16,#41 @ from NEON_00_15
- vadd.i64 q6,q15
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d16,#50
- vsli.64 d25,d16,#46
- vmov d29,d16
- vsli.64 d26,d16,#23
-#if 28<16 && defined(__ARMEL__)
- vrev64.8 ,
-#endif
- veor d25,d24
- vbsl d29,d17,d18 @ Ch(e,f,g)
- vshr.u64 d24,d20,#28
- veor d26,d25 @ Sigma1(e)
- vadd.i64 d27,d29,d19
- vshr.u64 d25,d20,#34
- vsli.64 d24,d20,#36
- vadd.i64 d27,d26
- vshr.u64 d26,d20,#39
- vadd.i64 d28,d12
- vsli.64 d25,d20,#30
- veor d30,d20,d21
- vsli.64 d26,d20,#25
- veor d19,d24,d25
- vadd.i64 d27,d28
- vbsl d30,d22,d21 @ Maj(a,b,c)
- veor d19,d26 @ Sigma0(a)
- vadd.i64 d23,d27
- vadd.i64 d30,d27
- @ vadd.i64 d19,d30
- vshr.u64 d24,d23,#14 @ 29
-#if 29<16
- vld1.64 {d13},[r1]! @ handles unaligned
-#endif
- vshr.u64 d25,d23,#18
-#if 29>0
- vadd.i64 d19,d30 @ h+=Maj from the past
-#endif
- vshr.u64 d26,d23,#41
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d23,#50
- vsli.64 d25,d23,#46
- vmov d29,d23
- vsli.64 d26,d23,#23
-#if 29<16 && defined(__ARMEL__)
- vrev64.8 ,
-#endif
- veor d25,d24
- vbsl d29,d16,d17 @ Ch(e,f,g)
- vshr.u64 d24,d19,#28
- veor d26,d25 @ Sigma1(e)
- vadd.i64 d27,d29,d18
- vshr.u64 d25,d19,#34
- vsli.64 d24,d19,#36
- vadd.i64 d27,d26
- vshr.u64 d26,d19,#39
- vadd.i64 d28,d13
- vsli.64 d25,d19,#30
- veor d30,d19,d20
- vsli.64 d26,d19,#25
- veor d18,d24,d25
- vadd.i64 d27,d28
- vbsl d30,d21,d20 @ Maj(a,b,c)
- veor d18,d26 @ Sigma0(a)
- vadd.i64 d22,d27
- vadd.i64 d30,d27
- @ vadd.i64 d18,d30
- vshr.u64 q12,q6,#19
- vshr.u64 q13,q6,#61
- vadd.i64 d18,d30 @ h+=Maj from the past
- vshr.u64 q15,q6,#6
- vsli.64 q12,q6,#45
- vext.8 q14,q7,q0,#8 @ X[i+1]
- vsli.64 q13,q6,#3
- veor q15,q12
- vshr.u64 q12,q14,#1
- veor q15,q13 @ sigma1(X[i+14])
- vshr.u64 q13,q14,#8
- vadd.i64 q7,q15
- vshr.u64 q15,q14,#7
- vsli.64 q12,q14,#63
- vsli.64 q13,q14,#56
- vext.8 q14,q3,q4,#8 @ X[i+9]
- veor q15,q12
- vshr.u64 d24,d22,#14 @ from NEON_00_15
- vadd.i64 q7,q14
- vshr.u64 d25,d22,#18 @ from NEON_00_15
- veor q15,q13 @ sigma0(X[i+1])
- vshr.u64 d26,d22,#41 @ from NEON_00_15
- vadd.i64 q7,q15
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d22,#50
- vsli.64 d25,d22,#46
- vmov d29,d22
- vsli.64 d26,d22,#23
-#if 30<16 && defined(__ARMEL__)
- vrev64.8 ,
-#endif
- veor d25,d24
- vbsl d29,d23,d16 @ Ch(e,f,g)
- vshr.u64 d24,d18,#28
- veor d26,d25 @ Sigma1(e)
- vadd.i64 d27,d29,d17
- vshr.u64 d25,d18,#34
- vsli.64 d24,d18,#36
- vadd.i64 d27,d26
- vshr.u64 d26,d18,#39
- vadd.i64 d28,d14
- vsli.64 d25,d18,#30
- veor d30,d18,d19
- vsli.64 d26,d18,#25
- veor d17,d24,d25
- vadd.i64 d27,d28
- vbsl d30,d20,d19 @ Maj(a,b,c)
- veor d17,d26 @ Sigma0(a)
- vadd.i64 d21,d27
- vadd.i64 d30,d27
- @ vadd.i64 d17,d30
- vshr.u64 d24,d21,#14 @ 31
-#if 31<16
- vld1.64 {d15},[r1]! @ handles unaligned
-#endif
- vshr.u64 d25,d21,#18
-#if 31>0
- vadd.i64 d17,d30 @ h+=Maj from the past
-#endif
- vshr.u64 d26,d21,#41
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d21,#50
- vsli.64 d25,d21,#46
- vmov d29,d21
- vsli.64 d26,d21,#23
-#if 31<16 && defined(__ARMEL__)
- vrev64.8 ,
-#endif
- veor d25,d24
- vbsl d29,d22,d23 @ Ch(e,f,g)
- vshr.u64 d24,d17,#28
- veor d26,d25 @ Sigma1(e)
- vadd.i64 d27,d29,d16
- vshr.u64 d25,d17,#34
- vsli.64 d24,d17,#36
- vadd.i64 d27,d26
- vshr.u64 d26,d17,#39
- vadd.i64 d28,d15
- vsli.64 d25,d17,#30
- veor d30,d17,d18
- vsli.64 d26,d17,#25
- veor d16,d24,d25
- vadd.i64 d27,d28
- vbsl d30,d19,d18 @ Maj(a,b,c)
- veor d16,d26 @ Sigma0(a)
- vadd.i64 d20,d27
- vadd.i64 d30,d27
- @ vadd.i64 d16,d30
- bne .L16_79_neon
-
- vadd.i64 d16,d30 @ h+=Maj from the past
- vldmia r0,{d24-d31} @ load context to temp
- vadd.i64 q8,q12 @ vectorized accumulate
- vadd.i64 q9,q13
- vadd.i64 q10,q14
- vadd.i64 q11,q15
- vstmia r0,{d16-d23} @ save context
- teq r1,r2
- sub r3,#640 @ rewind K512
- bne .Loop_neon
-
- VFP_ABI_POP
- bx lr @ .word 0xe12fff1e
-.size sha512_block_data_order_neon,.-sha512_block_data_order_neon
-#endif
-.asciz "SHA512 block transform for ARMv4/NEON, CRYPTOGAMS by <appro@openssl.org>"
-.align 2
-#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
-.comm OPENSSL_armcap_P,4,4
-#endif
diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
index 455eb19a5ac1..db8512d9a918 100644
--- a/arch/arm/include/asm/atomic.h
+++ b/arch/arm/include/asm/atomic.h
@@ -22,8 +22,8 @@
* strex/ldrex monitor on some implementations. The reason we can use it for
* atomic_set() is the clrex or dummy strex done on every exception return.
*/
-#define atomic_read(v) READ_ONCE((v)->counter)
-#define atomic_set(v,i) WRITE_ONCE(((v)->counter), (i))
+#define arch_atomic_read(v) READ_ONCE((v)->counter)
+#define arch_atomic_set(v,i) WRITE_ONCE(((v)->counter), (i))
#if __LINUX_ARM_ARCH__ >= 6
@@ -34,7 +34,7 @@
*/
#define ATOMIC_OP(op, c_op, asm_op) \
-static inline void atomic_##op(int i, atomic_t *v) \
+static inline void arch_atomic_##op(int i, atomic_t *v) \
{ \
unsigned long tmp; \
int result; \
@@ -52,7 +52,7 @@ static inline void atomic_##op(int i, atomic_t *v) \
} \
#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
-static inline int atomic_##op##_return_relaxed(int i, atomic_t *v) \
+static inline int arch_atomic_##op##_return_relaxed(int i, atomic_t *v) \
{ \
unsigned long tmp; \
int result; \
@@ -73,7 +73,7 @@ static inline int atomic_##op##_return_relaxed(int i, atomic_t *v) \
}
#define ATOMIC_FETCH_OP(op, c_op, asm_op) \
-static inline int atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
+static inline int arch_atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
{ \
unsigned long tmp; \
int result, val; \
@@ -93,17 +93,17 @@ static inline int atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
return result; \
}
-#define atomic_add_return_relaxed atomic_add_return_relaxed
-#define atomic_sub_return_relaxed atomic_sub_return_relaxed
-#define atomic_fetch_add_relaxed atomic_fetch_add_relaxed
-#define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed
+#define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed
+#define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed
+#define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed
+#define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed
-#define atomic_fetch_and_relaxed atomic_fetch_and_relaxed
-#define atomic_fetch_andnot_relaxed atomic_fetch_andnot_relaxed
-#define atomic_fetch_or_relaxed atomic_fetch_or_relaxed
-#define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed
+#define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed
+#define arch_atomic_fetch_andnot_relaxed arch_atomic_fetch_andnot_relaxed
+#define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed
+#define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed
-static inline int atomic_cmpxchg_relaxed(atomic_t *ptr, int old, int new)
+static inline int arch_atomic_cmpxchg_relaxed(atomic_t *ptr, int old, int new)
{
int oldval;
unsigned long res;
@@ -123,9 +123,9 @@ static inline int atomic_cmpxchg_relaxed(atomic_t *ptr, int old, int new)
return oldval;
}
-#define atomic_cmpxchg_relaxed atomic_cmpxchg_relaxed
+#define arch_atomic_cmpxchg_relaxed arch_atomic_cmpxchg_relaxed
-static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
+static inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
{
int oldval, newval;
unsigned long tmp;
@@ -151,7 +151,7 @@ static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
return oldval;
}
-#define atomic_fetch_add_unless atomic_fetch_add_unless
+#define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
#else /* ARM_ARCH_6 */
@@ -160,7 +160,7 @@ static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
#endif
#define ATOMIC_OP(op, c_op, asm_op) \
-static inline void atomic_##op(int i, atomic_t *v) \
+static inline void arch_atomic_##op(int i, atomic_t *v) \
{ \
unsigned long flags; \
\
@@ -170,7 +170,7 @@ static inline void atomic_##op(int i, atomic_t *v) \
} \
#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
-static inline int atomic_##op##_return(int i, atomic_t *v) \
+static inline int arch_atomic_##op##_return(int i, atomic_t *v) \
{ \
unsigned long flags; \
int val; \
@@ -184,7 +184,7 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
}
#define ATOMIC_FETCH_OP(op, c_op, asm_op) \
-static inline int atomic_fetch_##op(int i, atomic_t *v) \
+static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \
{ \
unsigned long flags; \
int val; \
@@ -197,7 +197,7 @@ static inline int atomic_fetch_##op(int i, atomic_t *v) \
return val; \
}
-static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
+static inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new)
{
int ret;
unsigned long flags;
@@ -211,7 +211,7 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
return ret;
}
-#define atomic_fetch_andnot atomic_fetch_andnot
+#define arch_atomic_fetch_andnot arch_atomic_fetch_andnot
#endif /* __LINUX_ARM_ARCH__ */
@@ -223,7 +223,7 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
ATOMIC_OPS(add, +=, add)
ATOMIC_OPS(sub, -=, sub)
-#define atomic_andnot atomic_andnot
+#define arch_atomic_andnot arch_atomic_andnot
#undef ATOMIC_OPS
#define ATOMIC_OPS(op, c_op, asm_op) \
@@ -240,7 +240,7 @@ ATOMIC_OPS(xor, ^=, eor)
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
-#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
+#define arch_atomic_xchg(v, new) (arch_xchg(&((v)->counter), new))
#ifndef CONFIG_GENERIC_ATOMIC64
typedef struct {
@@ -250,7 +250,7 @@ typedef struct {
#define ATOMIC64_INIT(i) { (i) }
#ifdef CONFIG_ARM_LPAE
-static inline s64 atomic64_read(const atomic64_t *v)
+static inline s64 arch_atomic64_read(const atomic64_t *v)
{
s64 result;
@@ -263,7 +263,7 @@ static inline s64 atomic64_read(const atomic64_t *v)
return result;
}
-static inline void atomic64_set(atomic64_t *v, s64 i)
+static inline void arch_atomic64_set(atomic64_t *v, s64 i)
{
__asm__ __volatile__("@ atomic64_set\n"
" strd %2, %H2, [%1]"
@@ -272,7 +272,7 @@ static inline void atomic64_set(atomic64_t *v, s64 i)
);
}
#else
-static inline s64 atomic64_read(const atomic64_t *v)
+static inline s64 arch_atomic64_read(const atomic64_t *v)
{
s64 result;
@@ -285,7 +285,7 @@ static inline s64 atomic64_read(const atomic64_t *v)
return result;
}
-static inline void atomic64_set(atomic64_t *v, s64 i)
+static inline void arch_atomic64_set(atomic64_t *v, s64 i)
{
s64 tmp;
@@ -302,7 +302,7 @@ static inline void atomic64_set(atomic64_t *v, s64 i)
#endif
#define ATOMIC64_OP(op, op1, op2) \
-static inline void atomic64_##op(s64 i, atomic64_t *v) \
+static inline void arch_atomic64_##op(s64 i, atomic64_t *v) \
{ \
s64 result; \
unsigned long tmp; \
@@ -322,7 +322,7 @@ static inline void atomic64_##op(s64 i, atomic64_t *v) \
#define ATOMIC64_OP_RETURN(op, op1, op2) \
static inline s64 \
-atomic64_##op##_return_relaxed(s64 i, atomic64_t *v) \
+arch_atomic64_##op##_return_relaxed(s64 i, atomic64_t *v) \
{ \
s64 result; \
unsigned long tmp; \
@@ -345,7 +345,7 @@ atomic64_##op##_return_relaxed(s64 i, atomic64_t *v) \
#define ATOMIC64_FETCH_OP(op, op1, op2) \
static inline s64 \
-atomic64_fetch_##op##_relaxed(s64 i, atomic64_t *v) \
+arch_atomic64_fetch_##op##_relaxed(s64 i, atomic64_t *v) \
{ \
s64 result, val; \
unsigned long tmp; \
@@ -374,34 +374,34 @@ atomic64_fetch_##op##_relaxed(s64 i, atomic64_t *v) \
ATOMIC64_OPS(add, adds, adc)
ATOMIC64_OPS(sub, subs, sbc)
-#define atomic64_add_return_relaxed atomic64_add_return_relaxed
-#define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
-#define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
-#define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
+#define arch_atomic64_add_return_relaxed arch_atomic64_add_return_relaxed
+#define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return_relaxed
+#define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add_relaxed
+#define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub_relaxed
#undef ATOMIC64_OPS
#define ATOMIC64_OPS(op, op1, op2) \
ATOMIC64_OP(op, op1, op2) \
ATOMIC64_FETCH_OP(op, op1, op2)
-#define atomic64_andnot atomic64_andnot
+#define arch_atomic64_andnot arch_atomic64_andnot
ATOMIC64_OPS(and, and, and)
ATOMIC64_OPS(andnot, bic, bic)
ATOMIC64_OPS(or, orr, orr)
ATOMIC64_OPS(xor, eor, eor)
-#define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
-#define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot_relaxed
-#define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed
-#define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed
+#define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and_relaxed
+#define arch_atomic64_fetch_andnot_relaxed arch_atomic64_fetch_andnot_relaxed
+#define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or_relaxed
+#define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor_relaxed
#undef ATOMIC64_OPS
#undef ATOMIC64_FETCH_OP
#undef ATOMIC64_OP_RETURN
#undef ATOMIC64_OP
-static inline s64 atomic64_cmpxchg_relaxed(atomic64_t *ptr, s64 old, s64 new)
+static inline s64 arch_atomic64_cmpxchg_relaxed(atomic64_t *ptr, s64 old, s64 new)
{
s64 oldval;
unsigned long res;
@@ -422,9 +422,9 @@ static inline s64 atomic64_cmpxchg_relaxed(atomic64_t *ptr, s64 old, s64 new)
return oldval;
}
-#define atomic64_cmpxchg_relaxed atomic64_cmpxchg_relaxed
+#define arch_atomic64_cmpxchg_relaxed arch_atomic64_cmpxchg_relaxed
-static inline s64 atomic64_xchg_relaxed(atomic64_t *ptr, s64 new)
+static inline s64 arch_atomic64_xchg_relaxed(atomic64_t *ptr, s64 new)
{
s64 result;
unsigned long tmp;
@@ -442,9 +442,9 @@ static inline s64 atomic64_xchg_relaxed(atomic64_t *ptr, s64 new)
return result;
}
-#define atomic64_xchg_relaxed atomic64_xchg_relaxed
+#define arch_atomic64_xchg_relaxed arch_atomic64_xchg_relaxed
-static inline s64 atomic64_dec_if_positive(atomic64_t *v)
+static inline s64 arch_atomic64_dec_if_positive(atomic64_t *v)
{
s64 result;
unsigned long tmp;
@@ -470,9 +470,9 @@ static inline s64 atomic64_dec_if_positive(atomic64_t *v)
return result;
}
-#define atomic64_dec_if_positive atomic64_dec_if_positive
+#define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
-static inline s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
+static inline s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
{
s64 oldval, newval;
unsigned long tmp;
@@ -500,7 +500,7 @@ static inline s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
return oldval;
}
-#define atomic64_fetch_add_unless atomic64_fetch_add_unless
+#define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless
#endif /* !CONFIG_GENERIC_ATOMIC64 */
#endif
diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h
index 8b701f8e175c..4dfe538dfc68 100644
--- a/arch/arm/include/asm/cmpxchg.h
+++ b/arch/arm/include/asm/cmpxchg.h
@@ -114,7 +114,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
return ret;
}
-#define xchg_relaxed(ptr, x) ({ \
+#define arch_xchg_relaxed(ptr, x) ({ \
(__typeof__(*(ptr)))__xchg((unsigned long)(x), (ptr), \
sizeof(*(ptr))); \
})
@@ -128,20 +128,20 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
#error "SMP is not supported on this platform"
#endif
-#define xchg xchg_relaxed
+#define arch_xchg arch_xchg_relaxed
/*
* cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
* them available.
*/
-#define cmpxchg_local(ptr, o, n) ({ \
- (__typeof(*ptr))__cmpxchg_local_generic((ptr), \
+#define arch_cmpxchg_local(ptr, o, n) ({ \
+ (__typeof(*ptr))__generic_cmpxchg_local((ptr), \
(unsigned long)(o), \
(unsigned long)(n), \
sizeof(*(ptr))); \
})
-#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
+#define arch_cmpxchg64_local(ptr, o, n) __generic_cmpxchg64_local((ptr), (o), (n))
#include <asm-generic/cmpxchg.h>
@@ -207,7 +207,7 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
return oldval;
}
-#define cmpxchg_relaxed(ptr,o,n) ({ \
+#define arch_cmpxchg_relaxed(ptr,o,n) ({ \
(__typeof__(*(ptr)))__cmpxchg((ptr), \
(unsigned long)(o), \
(unsigned long)(n), \
@@ -224,7 +224,7 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,
#ifdef CONFIG_CPU_V6 /* min ARCH == ARMv6 */
case 1:
case 2:
- ret = __cmpxchg_local_generic(ptr, old, new, size);
+ ret = __generic_cmpxchg_local(ptr, old, new, size);
break;
#endif
default:
@@ -234,7 +234,7 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,
return ret;
}
-#define cmpxchg_local(ptr, o, n) ({ \
+#define arch_cmpxchg_local(ptr, o, n) ({ \
(__typeof(*ptr))__cmpxchg_local((ptr), \
(unsigned long)(o), \
(unsigned long)(n), \
@@ -266,13 +266,13 @@ static inline unsigned long long __cmpxchg64(unsigned long long *ptr,
return oldval;
}
-#define cmpxchg64_relaxed(ptr, o, n) ({ \
+#define arch_cmpxchg64_relaxed(ptr, o, n) ({ \
(__typeof__(*(ptr)))__cmpxchg64((ptr), \
(unsigned long long)(o), \
(unsigned long long)(n)); \
})
-#define cmpxchg64_local(ptr, o, n) cmpxchg64_relaxed((ptr), (o), (n))
+#define arch_cmpxchg64_local(ptr, o, n) arch_cmpxchg64_relaxed((ptr), (o), (n))
#endif /* __LINUX_ARM_ARCH__ >= 6 */
diff --git a/arch/arm/include/asm/cpuidle.h b/arch/arm/include/asm/cpuidle.h
index 0d67ed682e07..397be5ed30e7 100644
--- a/arch/arm/include/asm/cpuidle.h
+++ b/arch/arm/include/asm/cpuidle.h
@@ -7,9 +7,11 @@
#ifdef CONFIG_CPU_IDLE
extern int arm_cpuidle_simple_enter(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index);
+#define __cpuidle_method_section __used __section("__cpuidle_method_of_table")
#else
static inline int arm_cpuidle_simple_enter(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index) { return -ENODEV; }
+#define __cpuidle_method_section __maybe_unused /* drop silently */
#endif
/* Common ARM WFI state */
@@ -42,11 +44,15 @@ struct of_cpuidle_method {
#define CPUIDLE_METHOD_OF_DECLARE(name, _method, _ops) \
static const struct of_cpuidle_method __cpuidle_method_of_table_##name \
- __used __section("__cpuidle_method_of_table") \
- = { .method = _method, .ops = _ops }
+ __cpuidle_method_section = { .method = _method, .ops = _ops }
extern int arm_cpuidle_suspend(int index);
extern int arm_cpuidle_init(int cpu);
+struct arm_cpuidle_irq_context { };
+
+#define arm_cpuidle_save_irq_context(c) (void)c
+#define arm_cpuidle_restore_irq_context(c) (void)c
+
#endif
diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h
index fdee1f04f4f3..a17f01235c29 100644
--- a/arch/arm/include/asm/pgalloc.h
+++ b/arch/arm/include/asm/pgalloc.h
@@ -143,7 +143,6 @@ pmd_populate(struct mm_struct *mm, pmd_t *pmdp, pgtable_t ptep)
__pmd_populate(pmdp, page_to_phys(ptep), prot);
}
-#define pmd_pgtable(pmd) pmd_page(pmd)
#endif /* CONFIG_MMU */
diff --git a/arch/arm/include/asm/sync_bitops.h b/arch/arm/include/asm/sync_bitops.h
index 39ff217136d1..6f5d627c44a3 100644
--- a/arch/arm/include/asm/sync_bitops.h
+++ b/arch/arm/include/asm/sync_bitops.h
@@ -21,7 +21,7 @@
#define sync_test_and_clear_bit(nr, p) _test_and_clear_bit(nr, p)
#define sync_test_and_change_bit(nr, p) _test_and_change_bit(nr, p)
#define sync_test_bit(nr, addr) test_bit(nr, addr)
-#define sync_cmpxchg cmpxchg
+#define arch_sync_cmpxchg arch_cmpxchg
#endif
diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h
index 24cbfc112dfa..0ccc985b90af 100644
--- a/arch/arm/include/asm/tlbflush.h
+++ b/arch/arm/include/asm/tlbflush.h
@@ -253,7 +253,7 @@ extern struct cpu_tlb_fns cpu_tlb;
* space.
* - mm - mm_struct describing address space
*
- * flush_tlb_range(mm,start,end)
+ * flush_tlb_range(vma,start,end)
*
* Invalidate a range of TLB entries in the specified
* address space.
@@ -261,18 +261,11 @@ extern struct cpu_tlb_fns cpu_tlb;
* - start - start address (may not be aligned)
* - end - end address (exclusive, may not be aligned)
*
- * flush_tlb_page(vaddr,vma)
+ * flush_tlb_page(vma, uaddr)
*
* Invalidate the specified page in the specified address range.
+ * - vma - vm_area_struct describing address range
* - vaddr - virtual address (may not be aligned)
- * - vma - vma_struct describing address range
- *
- * flush_kern_tlb_page(kaddr)
- *
- * Invalidate the TLB entry for the specified page. The address
- * will be in the kernels virtual memory space. Current uses
- * only require the D-TLB to be invalidated.
- * - kaddr - Kernel virtual memory address
*/
/*
diff --git a/arch/arm/include/asm/unaligned.h b/arch/arm/include/asm/unaligned.h
deleted file mode 100644
index ab905ffcf193..000000000000
--- a/arch/arm/include/asm/unaligned.h
+++ /dev/null
@@ -1,27 +0,0 @@
-#ifndef __ASM_ARM_UNALIGNED_H
-#define __ASM_ARM_UNALIGNED_H
-
-/*
- * We generally want to set CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS on ARMv6+,
- * but we don't want to use linux/unaligned/access_ok.h since that can lead
- * to traps on unaligned stm/ldm or strd/ldrd.
- */
-#include <asm/byteorder.h>
-
-#if defined(__LITTLE_ENDIAN)
-# include <linux/unaligned/le_struct.h>
-# include <linux/unaligned/be_byteshift.h>
-# include <linux/unaligned/generic.h>
-# define get_unaligned __get_unaligned_le
-# define put_unaligned __put_unaligned_le
-#elif defined(__BIG_ENDIAN)
-# include <linux/unaligned/be_struct.h>
-# include <linux/unaligned/le_byteshift.h>
-# include <linux/unaligned/generic.h>
-# define get_unaligned __get_unaligned_be
-# define put_unaligned __put_unaligned_be
-#else
-# error need to define endianess
-#endif
-
-#endif /* __ASM_ARM_UNALIGNED_H */
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c
index 698b6f636156..20ab1e607522 100644
--- a/arch/arm/kernel/irq.c
+++ b/arch/arm/kernel/irq.c
@@ -63,7 +63,27 @@ int arch_show_interrupts(struct seq_file *p, int prec)
*/
void handle_IRQ(unsigned int irq, struct pt_regs *regs)
{
- __handle_domain_irq(NULL, irq, false, regs);
+ struct pt_regs *old_regs = set_irq_regs(regs);
+ struct irq_desc *desc;
+
+ irq_enter();
+
+ /*
+ * Some hardware gives randomly wrong interrupts. Rather
+ * than crashing, do something sensible.
+ */
+ if (unlikely(!irq || irq >= nr_irqs))
+ desc = NULL;
+ else
+ desc = irq_to_desc(irq);
+
+ if (likely(desc))
+ handle_irq_desc(desc);
+ else
+ ack_bad_irq(irq);
+
+ irq_exit();
+ set_irq_regs(old_regs);
}
/*
diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c
index 2924d7910b10..eb2190477da1 100644
--- a/arch/arm/kernel/perf_event_v7.c
+++ b/arch/arm/kernel/perf_event_v7.c
@@ -773,10 +773,10 @@ static inline void armv7pmu_write_counter(struct perf_event *event, u64 value)
pr_err("CPU%u writing wrong counter %d\n",
smp_processor_id(), idx);
} else if (idx == ARMV7_IDX_CYCLE_COUNTER) {
- asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" (value));
+ asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" ((u32)value));
} else {
armv7_pmnc_select_counter(idx);
- asm volatile("mcr p15, 0, %0, c9, c13, 2" : : "r" (value));
+ asm volatile("mcr p15, 0, %0, c9, c13, 2" : : "r" ((u32)value));
}
}
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index 6324f4db9b02..fc9e8b37eaa8 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -288,7 +288,7 @@ unsigned long get_wchan(struct task_struct *p)
struct stackframe frame;
unsigned long stack_page;
int count = 0;
- if (!p || p == current || p->state == TASK_RUNNING)
+ if (!p || p == current || task_is_running(p))
return 0;
frame.fp = thread_saved_fp(p);
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 08c5676477d7..7693a1f05ecc 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -545,9 +545,11 @@ void notrace cpu_init(void)
* In Thumb-2, msr with an immediate value is not allowed.
*/
#ifdef CONFIG_THUMB2_KERNEL
-#define PLC "r"
+#define PLC_l "l"
+#define PLC_r "r"
#else
-#define PLC "I"
+#define PLC_l "I"
+#define PLC_r "I"
#endif
/*
@@ -569,15 +571,15 @@ void notrace cpu_init(void)
"msr cpsr_c, %9"
:
: "r" (stk),
- PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
+ PLC_r (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
"I" (offsetof(struct stack, irq[0])),
- PLC (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
+ PLC_r (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
"I" (offsetof(struct stack, abt[0])),
- PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE),
+ PLC_r (PSR_F_BIT | PSR_I_BIT | UND_MODE),
"I" (offsetof(struct stack, und[0])),
- PLC (PSR_F_BIT | PSR_I_BIT | FIQ_MODE),
+ PLC_r (PSR_F_BIT | PSR_I_BIT | FIQ_MODE),
"I" (offsetof(struct stack, fiq[0])),
- PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
+ PLC_l (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
: "r14");
#endif
}
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 74679240a9d8..c7bb168b0d97 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -432,7 +432,6 @@ asmlinkage void secondary_start_kernel(void)
#endif
pr_debug("CPU%u: Booted secondary processor\n", cpu);
- preempt_disable();
trace_hardirqs_off();
/*
diff --git a/arch/arm/mach-imx/pm-imx27.c b/arch/arm/mach-imx/pm-imx27.c
index 020e6deb67c8..237e8aa9fe83 100644
--- a/arch/arm/mach-imx/pm-imx27.c
+++ b/arch/arm/mach-imx/pm-imx27.c
@@ -12,6 +12,7 @@
#include <linux/suspend.h>
#include <linux/io.h>
+#include "common.h"
#include "hardware.h"
static int mx27_suspend_enter(suspend_state_t state)
diff --git a/arch/arm/mach-ixp4xx/common.c b/arch/arm/mach-ixp4xx/common.c
index 000f672a94c9..007a44412e24 100644
--- a/arch/arm/mach-ixp4xx/common.c
+++ b/arch/arm/mach-ixp4xx/common.c
@@ -233,12 +233,38 @@ static struct platform_device *ixp46x_devices[] __initdata = {
unsigned long ixp4xx_exp_bus_size;
EXPORT_SYMBOL(ixp4xx_exp_bus_size);
+static struct platform_device_info ixp_dev_info __initdata = {
+ .name = "ixp4xx_crypto",
+ .id = 0,
+ .dma_mask = DMA_BIT_MASK(32),
+};
+
+static int __init ixp_crypto_register(void)
+{
+ struct platform_device *pdev;
+
+ if (!(~(*IXP4XX_EXP_CFG2) & (IXP4XX_FEATURE_HASH |
+ IXP4XX_FEATURE_AES | IXP4XX_FEATURE_DES))) {
+ printk(KERN_ERR "ixp_crypto: No HW crypto available\n");
+ return -ENODEV;
+ }
+
+ pdev = platform_device_register_full(&ixp_dev_info);
+ if (IS_ERR(pdev))
+ return PTR_ERR(pdev);
+
+ return 0;
+}
+
void __init ixp4xx_sys_init(void)
{
ixp4xx_exp_bus_size = SZ_16M;
platform_add_devices(ixp4xx_devices, ARRAY_SIZE(ixp4xx_devices));
+ if (IS_ENABLED(CONFIG_CRYPTO_DEV_IXP4XX))
+ ixp_crypto_register();
+
if (cpu_is_ixp46x()) {
int region;
diff --git a/arch/arm/mach-npcm/Kconfig b/arch/arm/mach-npcm/Kconfig
index 658c8efb4ca1..a71cf1d189ae 100644
--- a/arch/arm/mach-npcm/Kconfig
+++ b/arch/arm/mach-npcm/Kconfig
@@ -10,6 +10,7 @@ config ARCH_WPCM450
bool "Support for WPCM450 BMC (Hermon)"
depends on ARCH_MULTI_V5
select CPU_ARM926T
+ select WPCM450_AIC
select NPCM7XX_TIMER
help
General support for WPCM450 BMC (Hermon).
diff --git a/arch/arm/mach-omap1/board-ams-delta.c b/arch/arm/mach-omap1/board-ams-delta.c
index 2ee527c00284..1026a816dcc0 100644
--- a/arch/arm/mach-omap1/board-ams-delta.c
+++ b/arch/arm/mach-omap1/board-ams-delta.c
@@ -458,20 +458,6 @@ static struct gpiod_lookup_table leds_gpio_table = {
#ifdef CONFIG_LEDS_TRIGGERS
DEFINE_LED_TRIGGER(ams_delta_camera_led_trigger);
-
-static int ams_delta_camera_power(struct device *dev, int power)
-{
- /*
- * turn on camera LED
- */
- if (power)
- led_trigger_event(ams_delta_camera_led_trigger, LED_FULL);
- else
- led_trigger_event(ams_delta_camera_led_trigger, LED_OFF);
- return 0;
-}
-#else
-#define ams_delta_camera_power NULL
#endif
static struct platform_device ams_delta_audio_device = {
diff --git a/arch/arm/mach-omap1/board-h2.c b/arch/arm/mach-omap1/board-h2.c
index c40cf5ef8607..977b0b744c22 100644
--- a/arch/arm/mach-omap1/board-h2.c
+++ b/arch/arm/mach-omap1/board-h2.c
@@ -320,7 +320,7 @@ static int tps_setup(struct i2c_client *client, void *context)
{
if (!IS_BUILTIN(CONFIG_TPS65010))
return -ENOSYS;
-
+
tps65010_config_vregs1(TPS_LDO2_ENABLE | TPS_VLDO2_3_0V |
TPS_LDO1_ENABLE | TPS_VLDO1_3_0V);
@@ -394,6 +394,8 @@ static void __init h2_init(void)
BUG_ON(gpio_request(H2_NAND_RB_GPIO_PIN, "NAND ready") < 0);
gpio_direction_input(H2_NAND_RB_GPIO_PIN);
+ gpiod_add_lookup_table(&isp1301_gpiod_table);
+
omap_cfg_reg(L3_1610_FLASH_CS2B_OE);
omap_cfg_reg(M8_1610_FLASH_CS2B_WE);
diff --git a/arch/arm/mach-omap1/pm.c b/arch/arm/mach-omap1/pm.c
index 2c1e2b32b9b3..a745d64d4699 100644
--- a/arch/arm/mach-omap1/pm.c
+++ b/arch/arm/mach-omap1/pm.c
@@ -655,9 +655,13 @@ static int __init omap_pm_init(void)
irq = INT_7XX_WAKE_UP_REQ;
else if (cpu_is_omap16xx())
irq = INT_1610_WAKE_UP_REQ;
- if (request_irq(irq, omap_wakeup_interrupt, 0, "peripheral wakeup",
- NULL))
- pr_err("Failed to request irq %d (peripheral wakeup)\n", irq);
+ else
+ irq = -1;
+
+ if (irq >= 0) {
+ if (request_irq(irq, omap_wakeup_interrupt, 0, "peripheral wakeup", NULL))
+ pr_err("Failed to request irq %d (peripheral wakeup)\n", irq);
+ }
/* Program new power ramp-up time
* (0 for most boards since we don't lower voltage when in deep sleep)
diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c
index 418a61ecb827..5e86145db0e2 100644
--- a/arch/arm/mach-omap2/board-n8x0.c
+++ b/arch/arm/mach-omap2/board-n8x0.c
@@ -322,6 +322,7 @@ static int n8x0_mmc_get_cover_state(struct device *dev, int slot)
static void n8x0_mmc_callback(void *data, u8 card_mask)
{
+#ifdef CONFIG_MMC_OMAP
int bit, *openp, index;
if (board_is_n800()) {
@@ -339,7 +340,6 @@ static void n8x0_mmc_callback(void *data, u8 card_mask)
else
*openp = 0;
-#ifdef CONFIG_MMC_OMAP
omap_mmc_notify_cover_event(mmc_device, index, *openp);
#else
pr_warn("MMC: notify cover event not available\n");
diff --git a/arch/arm/mach-pxa/pxa_cplds_irqs.c b/arch/arm/mach-pxa/pxa_cplds_irqs.c
index ec0d9b094744..bddfc7cd5d40 100644
--- a/arch/arm/mach-pxa/pxa_cplds_irqs.c
+++ b/arch/arm/mach-pxa/pxa_cplds_irqs.c
@@ -121,8 +121,13 @@ static int cplds_probe(struct platform_device *pdev)
return fpga->irq;
base_irq = platform_get_irq(pdev, 1);
- if (base_irq < 0)
+ if (base_irq < 0) {
base_irq = 0;
+ } else {
+ ret = devm_irq_alloc_descs(&pdev->dev, base_irq, base_irq, CPLDS_NB_IRQ, 0);
+ if (ret < 0)
+ return ret;
+ }
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
fpga->base = devm_ioremap_resource(&pdev->dev, res);
diff --git a/arch/arm/mach-zynq/Kconfig b/arch/arm/mach-zynq/Kconfig
index 43fb941dcd07..a56748d671c4 100644
--- a/arch/arm/mach-zynq/Kconfig
+++ b/arch/arm/mach-zynq/Kconfig
@@ -6,7 +6,7 @@ config ARCH_ZYNQ
select ARCH_SUPPORTS_BIG_ENDIAN
select ARM_AMBA
select ARM_GIC
- select ARM_GLOBAL_TIMER if !CPU_FREQ
+ select ARM_GLOBAL_TIMER
select CADENCE_TTC_TIMER
select HAVE_ARM_SCU if SMP
select HAVE_ARM_TWD if SMP
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 9d4744a632c6..6162a070a410 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -125,11 +125,22 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max_low,
int pfn_valid(unsigned long pfn)
{
phys_addr_t addr = __pfn_to_phys(pfn);
+ unsigned long pageblock_size = PAGE_SIZE * pageblock_nr_pages;
if (__phys_to_pfn(addr) != pfn)
return 0;
- return memblock_is_map_memory(addr);
+ /*
+ * If address less than pageblock_size bytes away from a present
+ * memory chunk there still will be a memory map entry for it
+ * because we round freed memory map to the pageblock boundaries.
+ */
+ if (memblock_overlaps_region(&memblock.memory,
+ ALIGN_DOWN(addr, pageblock_size),
+ pageblock_size))
+ return 1;
+
+ return 0;
}
EXPORT_SYMBOL(pfn_valid);
#endif
diff --git a/arch/arm/mm/tlb-v6.S b/arch/arm/mm/tlb-v6.S
index 5335b9687297..74f4b383afe3 100644
--- a/arch/arm/mm/tlb-v6.S
+++ b/arch/arm/mm/tlb-v6.S
@@ -24,7 +24,7 @@
*
* - start - start address (may not be aligned)
* - end - end address (exclusive, may not be aligned)
- * - vma - vma_struct describing address range
+ * - vma - vm_area_struct describing address range
*
* It is assumed that:
* - the "Invalidate single entry" instruction will invalidate
diff --git a/arch/arm/mm/tlb-v7.S b/arch/arm/mm/tlb-v7.S
index 1bb28d7db567..87bf4ab17721 100644
--- a/arch/arm/mm/tlb-v7.S
+++ b/arch/arm/mm/tlb-v7.S
@@ -23,7 +23,7 @@
*
* - start - start address (may not be aligned)
* - end - end address (exclusive, may not be aligned)
- * - vma - vma_struct describing address range
+ * - vma - vm_area_struct describing address range
*
* It is assumed that:
* - the "Invalidate single entry" instruction will invalidate
diff --git a/arch/arm/probes/kprobes/core.c b/arch/arm/probes/kprobes/core.c
index a9653117ca0d..27e0af78e88b 100644
--- a/arch/arm/probes/kprobes/core.c
+++ b/arch/arm/probes/kprobes/core.c
@@ -348,29 +348,6 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned int fsr)
reset_current_kprobe();
}
break;
-
- case KPROBE_HIT_ACTIVE:
- case KPROBE_HIT_SSDONE:
- /*
- * We increment the nmissed count for accounting,
- * we can also use npre/npostfault count for accounting
- * these specific fault cases.
- */
- kprobes_inc_nmissed_count(cur);
-
- /*
- * We come here because instructions in the pre/post
- * handler caused the page_fault, this could happen
- * if handler tries to access user space by
- * copy_from_user(), get_user() etc. Let the
- * user-specified handler try to fix it.
- */
- if (cur->fault_handler && cur->fault_handler(cur, regs, fsr))
- return 1;
- break;
-
- default:
- break;
}
return 0;
diff --git a/arch/arm/tools/syscall.tbl b/arch/arm/tools/syscall.tbl
index c7679d7db98b..c5df1179fc5d 100644
--- a/arch/arm/tools/syscall.tbl
+++ b/arch/arm/tools/syscall.tbl
@@ -456,7 +456,7 @@
440 common process_madvise sys_process_madvise
441 common epoll_pwait2 sys_epoll_pwait2
442 common mount_setattr sys_mount_setattr
-443 common quotactl_path sys_quotactl_path
+443 common quotactl_fd sys_quotactl_fd
444 common landlock_create_ruleset sys_landlock_create_ruleset
445 common landlock_add_rule sys_landlock_add_rule
446 common landlock_restrict_self sys_landlock_restrict_self
diff --git a/arch/arm/xen/mm.c b/arch/arm/xen/mm.c
index f8f07469d259..a7e54a087b80 100644
--- a/arch/arm/xen/mm.c
+++ b/arch/arm/xen/mm.c
@@ -135,24 +135,18 @@ void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order)
return;
}
-int xen_swiotlb_detect(void)
-{
- if (!xen_domain())
- return 0;
- if (xen_feature(XENFEAT_direct_mapped))
- return 1;
- /* legacy case */
- if (!xen_feature(XENFEAT_not_direct_mapped) && xen_initial_domain())
- return 1;
- return 0;
-}
-
static int __init xen_mm_init(void)
{
struct gnttab_cache_flush cflush;
+ int rc;
+
if (!xen_swiotlb_detect())
return 0;
- xen_swiotlb_init();
+
+ rc = xen_swiotlb_init();
+ /* we can work with the default swiotlb */
+ if (rc < 0 && rc != -EEXIST)
+ return rc;
cflush.op = 0;
cflush.a.dev_bus_addr = 0;
diff --git a/arch/arm64/Kbuild b/arch/arm64/Kbuild
index d6465823b281..7b393cfec071 100644
--- a/arch/arm64/Kbuild
+++ b/arch/arm64/Kbuild
@@ -1,6 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
-obj-y += kernel/ mm/
-obj-$(CONFIG_NET) += net/
+obj-y += kernel/ mm/ net/
obj-$(CONFIG_KVM) += kvm/
obj-$(CONFIG_XEN) += xen/
obj-$(CONFIG_CRYPTO) += crypto/
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 9f1d8566bbf9..e07e7de9ac49 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -42,6 +42,7 @@ config ARM64
select ARCH_HAS_SYSCALL_WRAPPER
select ARCH_HAS_TEARDOWN_DMA_OPS if IOMMU_SUPPORT
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
+ select ARCH_HAS_ZONE_DMA_SET if EXPERT
select ARCH_HAVE_ELF_PROT
select ARCH_HAVE_NMI_SAFE_CMPXCHG
select ARCH_INLINE_READ_LOCK if !PREEMPTION
@@ -93,6 +94,7 @@ config ARM64
select ARCH_WANT_FRAME_POINTERS
select ARCH_WANT_HUGE_PMD_SHARE if ARM64_4K_PAGES || (ARM64_16K_PAGES && !ARM64_VA_BITS_36)
select ARCH_WANT_LD_ORPHAN_WARN
+ select ARCH_WANTS_NO_INSTR
select ARCH_HAS_UBSAN_SANITIZE_ALL
select ARM_AMBA
select ARM_ARCH_TIMER
@@ -154,7 +156,6 @@ config ARM64
select HAVE_ARCH_KGDB
select HAVE_ARCH_MMAP_RND_BITS
select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT
- select HAVE_ARCH_PFN_VALID
select HAVE_ARCH_PREL32_RELOCATIONS
select HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET
select HAVE_ARCH_SECCOMP_FILTER
@@ -307,14 +308,6 @@ config GENERIC_CSUM
config GENERIC_CALIBRATE_DELAY
def_bool y
-config ZONE_DMA
- bool "Support DMA zone" if EXPERT
- default y
-
-config ZONE_DMA32
- bool "Support DMA32 zone" if EXPERT
- default y
-
config ARCH_MHP_MEMMAP_ON_MEMORY_ENABLE
def_bool y
@@ -1035,7 +1028,7 @@ config NODES_SHIFT
int "Maximum NUMA Nodes (as a power of 2)"
range 1 10
default "4"
- depends on NEED_MULTIPLE_NODES
+ depends on NUMA
help
Specify the maximum number of NUMA Nodes available on the target
system. Increases memory reserved to accommodate various tables.
@@ -1052,9 +1045,6 @@ config NEED_PER_CPU_EMBED_FIRST_CHUNK
def_bool y
depends on NUMA
-config HOLES_IN_ZONE
- def_bool y
-
source "kernel/Kconfig.hz"
config ARCH_SPARSEMEM_ENABLE
@@ -1481,12 +1471,6 @@ menu "ARMv8.3 architectural features"
config ARM64_PTR_AUTH
bool "Enable support for pointer authentication"
default y
- depends on (CC_HAS_SIGN_RETURN_ADDRESS || CC_HAS_BRANCH_PROT_PAC_RET) && AS_HAS_PAC
- # Modern compilers insert a .note.gnu.property section note for PAC
- # which is only understood by binutils starting with version 2.33.1.
- depends on LD_IS_LLD || LD_VERSION >= 23301 || (CC_IS_GCC && GCC_VERSION < 90100)
- depends on !CC_IS_CLANG || AS_HAS_CFI_NEGATE_RA_STATE
- depends on (!FUNCTION_GRAPH_TRACER || DYNAMIC_FTRACE_WITH_REGS)
help
Pointer authentication (part of the ARMv8.3 Extensions) provides
instructions for signing and authenticating pointers against secret
@@ -1498,13 +1482,6 @@ config ARM64_PTR_AUTH
for each process at exec() time, with these keys being
context-switched along with the process.
- If the compiler supports the -mbranch-protection or
- -msign-return-address flag (e.g. GCC 7 or later), then this option
- will also cause the kernel itself to be compiled with return address
- protection. In this case, and if the target hardware is known to
- support pointer authentication, then CONFIG_STACKPROTECTOR can be
- disabled with minimal loss of protection.
-
The feature is detected at runtime. If the feature is not present in
hardware it will not be advertised to userspace/KVM guest nor will it
be enabled.
@@ -1515,6 +1492,24 @@ config ARM64_PTR_AUTH
but with the feature disabled. On such a system, this option should
not be selected.
+config ARM64_PTR_AUTH_KERNEL
+ bool "Use pointer authentication for kernel"
+ default y
+ depends on ARM64_PTR_AUTH
+ depends on (CC_HAS_SIGN_RETURN_ADDRESS || CC_HAS_BRANCH_PROT_PAC_RET) && AS_HAS_PAC
+ # Modern compilers insert a .note.gnu.property section note for PAC
+ # which is only understood by binutils starting with version 2.33.1.
+ depends on LD_IS_LLD || LD_VERSION >= 23301 || (CC_IS_GCC && GCC_VERSION < 90100)
+ depends on !CC_IS_CLANG || AS_HAS_CFI_NEGATE_RA_STATE
+ depends on (!FUNCTION_GRAPH_TRACER || DYNAMIC_FTRACE_WITH_REGS)
+ help
+ If the compiler supports the -mbranch-protection or
+ -msign-return-address flag (e.g. GCC 7 or later), then this option
+ will cause the kernel itself to be compiled with return address
+ protection. In this case, and if the target hardware is known to
+ support pointer authentication, then CONFIG_STACKPROTECTOR can be
+ disabled with minimal loss of protection.
+
This feature works with FUNCTION_GRAPH_TRACER option only if
DYNAMIC_FTRACE_WITH_REGS is enabled.
@@ -1606,7 +1601,7 @@ config ARM64_BTI_KERNEL
bool "Use Branch Target Identification for kernel"
default y
depends on ARM64_BTI
- depends on ARM64_PTR_AUTH
+ depends on ARM64_PTR_AUTH_KERNEL
depends on CC_HAS_BRANCH_PROT_PAC_RET_BTI
# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=94697
depends on !CC_IS_GCC || GCC_VERSION >= 100100
diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms
index 6409b47b73e4..7336c1fd0dda 100644
--- a/arch/arm64/Kconfig.platforms
+++ b/arch/arm64/Kconfig.platforms
@@ -165,6 +165,7 @@ config ARCH_MEDIATEK
config ARCH_MESON
bool "Amlogic Platforms"
+ select COMMON_CLK
select MESON_IRQ_GPIO
help
This enables support for the arm64 based Amlogic SoCs
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
index 7ef44478560d..3b5b1c480449 100644
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -70,7 +70,7 @@ endif
# off, this will be overridden if we are using branch protection.
branch-prot-flags-y += $(call cc-option,-mbranch-protection=none)
-ifeq ($(CONFIG_ARM64_PTR_AUTH),y)
+ifeq ($(CONFIG_ARM64_PTR_AUTH_KERNEL),y)
branch-prot-flags-$(CONFIG_CC_HAS_SIGN_RETURN_ADDRESS) := -msign-return-address=all
# We enable additional protection for leaf functions as there is some
# narrow potential for ROP protection benefits and no substantial
@@ -175,6 +175,9 @@ vdso_install:
$(if $(CONFIG_COMPAT_VDSO), \
$(Q)$(MAKE) $(build)=arch/arm64/kernel/vdso32 $@)
+archprepare:
+ $(Q)$(MAKE) $(build)=arch/arm64/tools kapi
+
# We use MRPROPER_FILES and CLEAN_FILES now
archclean:
$(Q)$(MAKE) $(clean)=$(boot)
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var1.dts b/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var1.dts
index 6c309b97587d..e8d31279b7a3 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var1.dts
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var1.dts
@@ -46,7 +46,8 @@
eee-broken-100tx;
qca,clk-out-frequency = <125000000>;
qca,clk-out-strength = <AR803X_STRENGTH_FULL>;
- vddio-supply = <&vddh>;
+ qca,keep-pll-enabled;
+ vddio-supply = <&vddio>;
vddio: vddio-regulator {
regulator-name = "VDDIO";
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var4.dts b/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var4.dts
index df212ed5bb94..e65d1c477e2c 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var4.dts
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var4.dts
@@ -31,11 +31,10 @@
reg = <0x4>;
eee-broken-1000t;
eee-broken-100tx;
-
qca,clk-out-frequency = <125000000>;
qca,clk-out-strength = <AR803X_STRENGTH_FULL>;
-
- vddio-supply = <&vddh>;
+ qca,keep-pll-enabled;
+ vddio-supply = <&vddio>;
vddio: vddio-regulator {
regulator-name = "VDDIO";
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi
index eca06a0c3cf8..a30249ebffa8 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi
@@ -197,8 +197,8 @@
ddr: memory-controller@1080000 {
compatible = "fsl,qoriq-memory-controller";
reg = <0x0 0x1080000 0x0 0x1000>;
- interrupts = <GIC_SPI 144 IRQ_TYPE_LEVEL_HIGH>;
- big-endian;
+ interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>;
+ little-endian;
};
dcfg: syscon@1e00000 {
diff --git a/arch/arm64/boot/dts/freescale/imx8mq-zii-ultra-rmb3.dts b/arch/arm64/boot/dts/freescale/imx8mq-zii-ultra-rmb3.dts
index 631e01c1b9fd..be1e7d6f0ecb 100644
--- a/arch/arm64/boot/dts/freescale/imx8mq-zii-ultra-rmb3.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mq-zii-ultra-rmb3.dts
@@ -88,11 +88,11 @@
pinctrl-0 = <&pinctrl_codec2>;
reg = <0x18>;
#sound-dai-cells = <0>;
- HPVDD-supply = <&reg_3p3v>;
- SPRVDD-supply = <&reg_3p3v>;
- SPLVDD-supply = <&reg_3p3v>;
- AVDD-supply = <&reg_3p3v>;
- IOVDD-supply = <&reg_3p3v>;
+ HPVDD-supply = <&reg_gen_3p3>;
+ SPRVDD-supply = <&reg_gen_3p3>;
+ SPLVDD-supply = <&reg_gen_3p3>;
+ AVDD-supply = <&reg_gen_3p3>;
+ IOVDD-supply = <&reg_gen_3p3>;
DVDD-supply = <&vgen4_reg>;
reset-gpios = <&gpio3 4 GPIO_ACTIVE_HIGH>;
};
diff --git a/arch/arm64/boot/dts/freescale/imx8mq-zii-ultra.dtsi b/arch/arm64/boot/dts/freescale/imx8mq-zii-ultra.dtsi
index 4dc8383478ee..a08a568c31d9 100644
--- a/arch/arm64/boot/dts/freescale/imx8mq-zii-ultra.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mq-zii-ultra.dtsi
@@ -45,8 +45,8 @@
reg_12p0_main: regulator-12p0-main {
compatible = "regulator-fixed";
regulator-name = "12V_MAIN";
- regulator-min-microvolt = <5000000>;
- regulator-max-microvolt = <5000000>;
+ regulator-min-microvolt = <12000000>;
+ regulator-max-microvolt = <12000000>;
regulator-always-on;
};
@@ -77,15 +77,6 @@
regulator-always-on;
};
- reg_3p3v: regulator-3p3v {
- compatible = "regulator-fixed";
- vin-supply = <&reg_3p3_main>;
- regulator-name = "GEN_3V3";
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
- regulator-always-on;
- };
-
reg_usdhc2_vmmc: regulator-vsd-3v3 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_reg_usdhc2>;
@@ -415,11 +406,11 @@
pinctrl-0 = <&pinctrl_codec1>;
reg = <0x18>;
#sound-dai-cells = <0>;
- HPVDD-supply = <&reg_3p3v>;
- SPRVDD-supply = <&reg_3p3v>;
- SPLVDD-supply = <&reg_3p3v>;
- AVDD-supply = <&reg_3p3v>;
- IOVDD-supply = <&reg_3p3v>;
+ HPVDD-supply = <&reg_gen_3p3>;
+ SPRVDD-supply = <&reg_gen_3p3>;
+ SPLVDD-supply = <&reg_gen_3p3>;
+ AVDD-supply = <&reg_gen_3p3>;
+ IOVDD-supply = <&reg_gen_3p3>;
DVDD-supply = <&vgen4_reg>;
reset-gpios = <&gpio3 3 GPIO_ACTIVE_LOW>;
};
diff --git a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
index 456dcd4a7793..6ffbb099fcac 100644
--- a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
@@ -134,7 +134,7 @@
uart0: serial@12000 {
compatible = "marvell,armada-3700-uart";
- reg = <0x12000 0x200>;
+ reg = <0x12000 0x18>;
clocks = <&xtalclk>;
interrupts =
<GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>,
diff --git a/arch/arm64/boot/dts/microchip/sparx5.dtsi b/arch/arm64/boot/dts/microchip/sparx5.dtsi
index d64621d1213b..ad07fff40544 100644
--- a/arch/arm64/boot/dts/microchip/sparx5.dtsi
+++ b/arch/arm64/boot/dts/microchip/sparx5.dtsi
@@ -135,9 +135,12 @@
};
};
- reset@611010008 {
- compatible = "microchip,sparx5-chip-reset";
+ reset: reset-controller@611010008 {
+ compatible = "microchip,sparx5-switch-reset";
reg = <0x6 0x11010008 0x4>;
+ reg-names = "gcb";
+ #reset-cells = <1>;
+ cpu-syscon = <&cpu_ctrl>;
};
uart0: serial@600100000 {
@@ -275,6 +278,21 @@
"GPIO_46", "GPIO_47";
function = "emmc";
};
+
+ miim1_pins: miim1-pins {
+ pins = "GPIO_56", "GPIO_57";
+ function = "miim";
+ };
+
+ miim2_pins: miim2-pins {
+ pins = "GPIO_58", "GPIO_59";
+ function = "miim";
+ };
+
+ miim3_pins: miim3-pins {
+ pins = "GPIO_52", "GPIO_53";
+ function = "miim";
+ };
};
sgpio0: gpio@61101036c {
@@ -285,6 +303,8 @@
clocks = <&sys_clk>;
pinctrl-0 = <&sgpio0_pins>;
pinctrl-names = "default";
+ resets = <&reset 0>;
+ reset-names = "switch";
reg = <0x6 0x1101036c 0x100>;
sgpio_in0: gpio@0 {
compatible = "microchip,sparx5-sgpio-bank";
@@ -292,6 +312,9 @@
gpio-controller;
#gpio-cells = <3>;
ngpios = <96>;
+ interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-controller;
+ #interrupt-cells = <3>;
};
sgpio_out0: gpio@1 {
compatible = "microchip,sparx5-sgpio-bank";
@@ -310,6 +333,8 @@
clocks = <&sys_clk>;
pinctrl-0 = <&sgpio1_pins>;
pinctrl-names = "default";
+ resets = <&reset 0>;
+ reset-names = "switch";
reg = <0x6 0x11010484 0x100>;
sgpio_in1: gpio@0 {
compatible = "microchip,sparx5-sgpio-bank";
@@ -317,6 +342,9 @@
gpio-controller;
#gpio-cells = <3>;
ngpios = <96>;
+ interrupts = <GIC_SPI 18 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-controller;
+ #interrupt-cells = <3>;
};
sgpio_out1: gpio@1 {
compatible = "microchip,sparx5-sgpio-bank";
@@ -335,6 +363,8 @@
clocks = <&sys_clk>;
pinctrl-0 = <&sgpio2_pins>;
pinctrl-names = "default";
+ resets = <&reset 0>;
+ reset-names = "switch";
reg = <0x6 0x1101059c 0x100>;
sgpio_in2: gpio@0 {
reg = <0>;
@@ -342,6 +372,9 @@
gpio-controller;
#gpio-cells = <3>;
ngpios = <96>;
+ interrupts = <GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-controller;
+ #interrupt-cells = <3>;
};
sgpio_out2: gpio@1 {
compatible = "microchip,sparx5-sgpio-bank";
@@ -386,5 +419,62 @@
#thermal-sensor-cells = <0>;
clocks = <&ahb_clk>;
};
+
+ mdio0: mdio@6110102b0 {
+ compatible = "mscc,ocelot-miim";
+ status = "disabled";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x6 0x110102b0 0x24>;
+ };
+
+ mdio1: mdio@6110102d4 {
+ compatible = "mscc,ocelot-miim";
+ status = "disabled";
+ pinctrl-0 = <&miim1_pins>;
+ pinctrl-names = "default";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x6 0x110102d4 0x24>;
+ };
+
+ mdio2: mdio@6110102f8 {
+ compatible = "mscc,ocelot-miim";
+ status = "disabled";
+ pinctrl-0 = <&miim2_pins>;
+ pinctrl-names = "default";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x6 0x110102d4 0x24>;
+ };
+
+ mdio3: mdio@61101031c {
+ compatible = "mscc,ocelot-miim";
+ status = "disabled";
+ pinctrl-0 = <&miim3_pins>;
+ pinctrl-names = "default";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x6 0x1101031c 0x24>;
+ };
+
+ serdes: serdes@10808000 {
+ compatible = "microchip,sparx5-serdes";
+ #phy-cells = <1>;
+ clocks = <&sys_clk>;
+ reg = <0x6 0x10808000 0x5d0000>;
+ };
+
+ switch: switch@0x600000000 {
+ compatible = "microchip,sparx5-switch";
+ reg = <0x6 0 0x401000>,
+ <0x6 0x10004000 0x7fc000>,
+ <0x6 0x11010000 0xaf0000>;
+ reg-names = "cpu", "dev", "gcb";
+ interrupt-names = "xtr";
+ interrupts = <GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>;
+ resets = <&reset 0>;
+ reset-names = "switch";
+ };
};
};
diff --git a/arch/arm64/boot/dts/microchip/sparx5_pcb134_board.dtsi b/arch/arm64/boot/dts/microchip/sparx5_pcb134_board.dtsi
index f0c915160990..33faf1f3264f 100644
--- a/arch/arm64/boot/dts/microchip/sparx5_pcb134_board.dtsi
+++ b/arch/arm64/boot/dts/microchip/sparx5_pcb134_board.dtsi
@@ -7,30 +7,6 @@
#include "sparx5_pcb_common.dtsi"
/{
- aliases {
- i2c0 = &i2c0;
- i2c100 = &i2c100;
- i2c101 = &i2c101;
- i2c102 = &i2c102;
- i2c103 = &i2c103;
- i2c104 = &i2c104;
- i2c105 = &i2c105;
- i2c106 = &i2c106;
- i2c107 = &i2c107;
- i2c108 = &i2c108;
- i2c109 = &i2c109;
- i2c110 = &i2c110;
- i2c111 = &i2c111;
- i2c112 = &i2c112;
- i2c113 = &i2c113;
- i2c114 = &i2c114;
- i2c115 = &i2c115;
- i2c116 = &i2c116;
- i2c117 = &i2c117;
- i2c118 = &i2c118;
- i2c119 = &i2c119;
- };
-
gpio-restart {
compatible = "gpio-restart";
gpios = <&gpio 37 GPIO_ACTIVE_LOW>;
@@ -298,17 +274,10 @@
&spi0 {
status = "okay";
- spi@0 {
- compatible = "spi-mux";
- mux-controls = <&mux>;
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <0>; /* CS0 */
- spi-flash@9 {
- compatible = "jedec,spi-nor";
- spi-max-frequency = <8000000>;
- reg = <0x9>; /* SPI */
- };
+ spi-flash@0 {
+ compatible = "jedec,spi-nor";
+ spi-max-frequency = <8000000>;
+ reg = <0>;
};
};
@@ -328,6 +297,33 @@
};
};
+&sgpio0 {
+ status = "okay";
+ microchip,sgpio-port-ranges = <8 15>;
+ gpio@0 {
+ ngpios = <64>;
+ };
+ gpio@1 {
+ ngpios = <64>;
+ };
+};
+
+&sgpio1 {
+ status = "okay";
+ microchip,sgpio-port-ranges = <24 31>;
+ gpio@0 {
+ ngpios = <64>;
+ };
+ gpio@1 {
+ ngpios = <64>;
+ };
+};
+
+&sgpio2 {
+ status = "okay";
+ microchip,sgpio-port-ranges = <0 0>, <11 31>;
+};
+
&gpio {
i2cmux_pins_i: i2cmux-pins-i {
pins = "GPIO_16", "GPIO_17", "GPIO_18", "GPIO_19",
@@ -415,9 +411,9 @@
&i2c0_imux {
pinctrl-names =
- "i2c100", "i2c101", "i2c102", "i2c103",
- "i2c104", "i2c105", "i2c106", "i2c107",
- "i2c108", "i2c109", "i2c110", "i2c111", "idle";
+ "i2c_sfp1", "i2c_sfp2", "i2c_sfp3", "i2c_sfp4",
+ "i2c_sfp5", "i2c_sfp6", "i2c_sfp7", "i2c_sfp8",
+ "i2c_sfp9", "i2c_sfp10", "i2c_sfp11", "i2c_sfp12", "idle";
pinctrl-0 = <&i2cmux_0>;
pinctrl-1 = <&i2cmux_1>;
pinctrl-2 = <&i2cmux_2>;
@@ -431,62 +427,62 @@
pinctrl-10 = <&i2cmux_10>;
pinctrl-11 = <&i2cmux_11>;
pinctrl-12 = <&i2cmux_pins_i>;
- i2c100: i2c_sfp1 {
+ i2c_sfp1: i2c_sfp1 {
reg = <0x0>;
#address-cells = <1>;
#size-cells = <0>;
};
- i2c101: i2c_sfp2 {
+ i2c_sfp2: i2c_sfp2 {
reg = <0x1>;
#address-cells = <1>;
#size-cells = <0>;
};
- i2c102: i2c_sfp3 {
+ i2c_sfp3: i2c_sfp3 {
reg = <0x2>;
#address-cells = <1>;
#size-cells = <0>;
};
- i2c103: i2c_sfp4 {
+ i2c_sfp4: i2c_sfp4 {
reg = <0x3>;
#address-cells = <1>;
#size-cells = <0>;
};
- i2c104: i2c_sfp5 {
+ i2c_sfp5: i2c_sfp5 {
reg = <0x4>;
#address-cells = <1>;
#size-cells = <0>;
};
- i2c105: i2c_sfp6 {
+ i2c_sfp6: i2c_sfp6 {
reg = <0x5>;
#address-cells = <1>;
#size-cells = <0>;
};
- i2c106: i2c_sfp7 {
+ i2c_sfp7: i2c_sfp7 {
reg = <0x6>;
#address-cells = <1>;
#size-cells = <0>;
};
- i2c107: i2c_sfp8 {
+ i2c_sfp8: i2c_sfp8 {
reg = <0x7>;
#address-cells = <1>;
#size-cells = <0>;
};
- i2c108: i2c_sfp9 {
+ i2c_sfp9: i2c_sfp9 {
reg = <0x8>;
#address-cells = <1>;
#size-cells = <0>;
};
- i2c109: i2c_sfp10 {
+ i2c_sfp10: i2c_sfp10 {
reg = <0x9>;
#address-cells = <1>;
#size-cells = <0>;
};
- i2c110: i2c_sfp11 {
+ i2c_sfp11: i2c_sfp11 {
reg = <0xa>;
#address-cells = <1>;
#size-cells = <0>;
};
- i2c111: i2c_sfp12 {
+ i2c_sfp12: i2c_sfp12 {
reg = <0xb>;
#address-cells = <1>;
#size-cells = <0>;
@@ -499,44 +495,413 @@
&gpio 61 GPIO_ACTIVE_HIGH
&gpio 54 GPIO_ACTIVE_HIGH>;
idle-state = <0x8>;
- i2c112: i2c_sfp13 {
+ i2c_sfp13: i2c_sfp13 {
reg = <0x0>;
#address-cells = <1>;
#size-cells = <0>;
};
- i2c113: i2c_sfp14 {
+ i2c_sfp14: i2c_sfp14 {
reg = <0x1>;
#address-cells = <1>;
#size-cells = <0>;
};
- i2c114: i2c_sfp15 {
+ i2c_sfp15: i2c_sfp15 {
reg = <0x2>;
#address-cells = <1>;
#size-cells = <0>;
};
- i2c115: i2c_sfp16 {
+ i2c_sfp16: i2c_sfp16 {
reg = <0x3>;
#address-cells = <1>;
#size-cells = <0>;
};
- i2c116: i2c_sfp17 {
+ i2c_sfp17: i2c_sfp17 {
reg = <0x4>;
#address-cells = <1>;
#size-cells = <0>;
};
- i2c117: i2c_sfp18 {
+ i2c_sfp18: i2c_sfp18 {
reg = <0x5>;
#address-cells = <1>;
#size-cells = <0>;
};
- i2c118: i2c_sfp19 {
+ i2c_sfp19: i2c_sfp19 {
reg = <0x6>;
#address-cells = <1>;
#size-cells = <0>;
};
- i2c119: i2c_sfp20 {
+ i2c_sfp20: i2c_sfp20 {
reg = <0x7>;
#address-cells = <1>;
#size-cells = <0>;
};
};
+
+&mdio3 {
+ status = "ok";
+ phy64: ethernet-phy@64 {
+ reg = <28>;
+ };
+};
+
+&axi {
+ sfp_eth12: sfp-eth12 {
+ compatible = "sff,sfp";
+ i2c-bus = <&i2c_sfp1>;
+ tx-disable-gpios = <&sgpio_out2 11 1 GPIO_ACTIVE_LOW>;
+ los-gpios = <&sgpio_in2 11 1 GPIO_ACTIVE_HIGH>;
+ mod-def0-gpios = <&sgpio_in2 11 2 GPIO_ACTIVE_LOW>;
+ tx-fault-gpios = <&sgpio_in2 12 0 GPIO_ACTIVE_HIGH>;
+ };
+ sfp_eth13: sfp-eth13 {
+ compatible = "sff,sfp";
+ i2c-bus = <&i2c_sfp2>;
+ tx-disable-gpios = <&sgpio_out2 12 1 GPIO_ACTIVE_LOW>;
+ los-gpios = <&sgpio_in2 12 1 GPIO_ACTIVE_HIGH>;
+ mod-def0-gpios = <&sgpio_in2 12 2 GPIO_ACTIVE_LOW>;
+ tx-fault-gpios = <&sgpio_in2 13 0 GPIO_ACTIVE_HIGH>;
+ };
+ sfp_eth14: sfp-eth14 {
+ compatible = "sff,sfp";
+ i2c-bus = <&i2c_sfp3>;
+ tx-disable-gpios = <&sgpio_out2 13 1 GPIO_ACTIVE_LOW>;
+ los-gpios = <&sgpio_in2 13 1 GPIO_ACTIVE_HIGH>;
+ mod-def0-gpios = <&sgpio_in2 13 2 GPIO_ACTIVE_LOW>;
+ tx-fault-gpios = <&sgpio_in2 14 0 GPIO_ACTIVE_HIGH>;
+ };
+ sfp_eth15: sfp-eth15 {
+ compatible = "sff,sfp";
+ i2c-bus = <&i2c_sfp4>;
+ tx-disable-gpios = <&sgpio_out2 14 1 GPIO_ACTIVE_LOW>;
+ los-gpios = <&sgpio_in2 14 1 GPIO_ACTIVE_HIGH>;
+ mod-def0-gpios = <&sgpio_in2 14 2 GPIO_ACTIVE_LOW>;
+ tx-fault-gpios = <&sgpio_in2 15 0 GPIO_ACTIVE_HIGH>;
+ };
+ sfp_eth48: sfp-eth48 {
+ compatible = "sff,sfp";
+ i2c-bus = <&i2c_sfp5>;
+ tx-disable-gpios = <&sgpio_out2 15 1 GPIO_ACTIVE_LOW>;
+ los-gpios = <&sgpio_in2 15 1 GPIO_ACTIVE_HIGH>;
+ mod-def0-gpios = <&sgpio_in2 15 2 GPIO_ACTIVE_LOW>;
+ tx-fault-gpios = <&sgpio_in2 16 0 GPIO_ACTIVE_HIGH>;
+ };
+ sfp_eth49: sfp-eth49 {
+ compatible = "sff,sfp";
+ i2c-bus = <&i2c_sfp6>;
+ tx-disable-gpios = <&sgpio_out2 16 1 GPIO_ACTIVE_LOW>;
+ los-gpios = <&sgpio_in2 16 1 GPIO_ACTIVE_HIGH>;
+ mod-def0-gpios = <&sgpio_in2 16 2 GPIO_ACTIVE_LOW>;
+ tx-fault-gpios = <&sgpio_in2 17 0 GPIO_ACTIVE_HIGH>;
+ };
+ sfp_eth50: sfp-eth50 {
+ compatible = "sff,sfp";
+ i2c-bus = <&i2c_sfp7>;
+ tx-disable-gpios = <&sgpio_out2 17 1 GPIO_ACTIVE_LOW>;
+ los-gpios = <&sgpio_in2 17 1 GPIO_ACTIVE_HIGH>;
+ mod-def0-gpios = <&sgpio_in2 17 2 GPIO_ACTIVE_LOW>;
+ tx-fault-gpios = <&sgpio_in2 18 0 GPIO_ACTIVE_HIGH>;
+ };
+ sfp_eth51: sfp-eth51 {
+ compatible = "sff,sfp";
+ i2c-bus = <&i2c_sfp8>;
+ tx-disable-gpios = <&sgpio_out2 18 1 GPIO_ACTIVE_LOW>;
+ los-gpios = <&sgpio_in2 18 1 GPIO_ACTIVE_HIGH>;
+ mod-def0-gpios = <&sgpio_in2 18 2 GPIO_ACTIVE_LOW>;
+ tx-fault-gpios = <&sgpio_in2 19 0 GPIO_ACTIVE_HIGH>;
+ };
+ sfp_eth52: sfp-eth52 {
+ compatible = "sff,sfp";
+ i2c-bus = <&i2c_sfp9>;
+ tx-disable-gpios = <&sgpio_out2 19 1 GPIO_ACTIVE_LOW>;
+ los-gpios = <&sgpio_in2 19 1 GPIO_ACTIVE_HIGH>;
+ mod-def0-gpios = <&sgpio_in2 19 2 GPIO_ACTIVE_LOW>;
+ tx-fault-gpios = <&sgpio_in2 20 0 GPIO_ACTIVE_HIGH>;
+ };
+ sfp_eth53: sfp-eth53 {
+ compatible = "sff,sfp";
+ i2c-bus = <&i2c_sfp10>;
+ tx-disable-gpios = <&sgpio_out2 20 1 GPIO_ACTIVE_LOW>;
+ los-gpios = <&sgpio_in2 20 1 GPIO_ACTIVE_HIGH>;
+ mod-def0-gpios = <&sgpio_in2 20 2 GPIO_ACTIVE_LOW>;
+ tx-fault-gpios = <&sgpio_in2 21 0 GPIO_ACTIVE_HIGH>;
+ };
+ sfp_eth54: sfp-eth54 {
+ compatible = "sff,sfp";
+ i2c-bus = <&i2c_sfp11>;
+ tx-disable-gpios = <&sgpio_out2 21 1 GPIO_ACTIVE_LOW>;
+ los-gpios = <&sgpio_in2 21 1 GPIO_ACTIVE_HIGH>;
+ mod-def0-gpios = <&sgpio_in2 21 2 GPIO_ACTIVE_LOW>;
+ tx-fault-gpios = <&sgpio_in2 22 0 GPIO_ACTIVE_HIGH>;
+ };
+ sfp_eth55: sfp-eth55 {
+ compatible = "sff,sfp";
+ i2c-bus = <&i2c_sfp12>;
+ tx-disable-gpios = <&sgpio_out2 22 1 GPIO_ACTIVE_LOW>;
+ los-gpios = <&sgpio_in2 22 1 GPIO_ACTIVE_HIGH>;
+ mod-def0-gpios = <&sgpio_in2 22 2 GPIO_ACTIVE_LOW>;
+ tx-fault-gpios = <&sgpio_in2 23 0 GPIO_ACTIVE_HIGH>;
+ };
+ sfp_eth56: sfp-eth56 {
+ compatible = "sff,sfp";
+ i2c-bus = <&i2c_sfp13>;
+ tx-disable-gpios = <&sgpio_out2 23 1 GPIO_ACTIVE_LOW>;
+ los-gpios = <&sgpio_in2 23 1 GPIO_ACTIVE_HIGH>;
+ mod-def0-gpios = <&sgpio_in2 23 2 GPIO_ACTIVE_LOW>;
+ tx-fault-gpios = <&sgpio_in2 24 0 GPIO_ACTIVE_HIGH>;
+ };
+ sfp_eth57: sfp-eth57 {
+ compatible = "sff,sfp";
+ i2c-bus = <&i2c_sfp14>;
+ tx-disable-gpios = <&sgpio_out2 24 1 GPIO_ACTIVE_LOW>;
+ los-gpios = <&sgpio_in2 24 1 GPIO_ACTIVE_HIGH>;
+ mod-def0-gpios = <&sgpio_in2 24 2 GPIO_ACTIVE_LOW>;
+ tx-fault-gpios = <&sgpio_in2 25 0 GPIO_ACTIVE_HIGH>;
+ };
+ sfp_eth58: sfp-eth58 {
+ compatible = "sff,sfp";
+ i2c-bus = <&i2c_sfp15>;
+ tx-disable-gpios = <&sgpio_out2 25 1 GPIO_ACTIVE_LOW>;
+ los-gpios = <&sgpio_in2 25 1 GPIO_ACTIVE_HIGH>;
+ mod-def0-gpios = <&sgpio_in2 25 2 GPIO_ACTIVE_LOW>;
+ tx-fault-gpios = <&sgpio_in2 26 0 GPIO_ACTIVE_HIGH>;
+ };
+ sfp_eth59: sfp-eth59 {
+ compatible = "sff,sfp";
+ i2c-bus = <&i2c_sfp16>;
+ tx-disable-gpios = <&sgpio_out2 26 1 GPIO_ACTIVE_LOW>;
+ los-gpios = <&sgpio_in2 26 1 GPIO_ACTIVE_HIGH>;
+ mod-def0-gpios = <&sgpio_in2 26 2 GPIO_ACTIVE_LOW>;
+ tx-fault-gpios = <&sgpio_in2 27 0 GPIO_ACTIVE_HIGH>;
+ };
+ sfp_eth60: sfp-eth60 {
+ compatible = "sff,sfp";
+ i2c-bus = <&i2c_sfp17>;
+ tx-disable-gpios = <&sgpio_out2 27 1 GPIO_ACTIVE_LOW>;
+ los-gpios = <&sgpio_in2 27 1 GPIO_ACTIVE_HIGH>;
+ mod-def0-gpios = <&sgpio_in2 27 2 GPIO_ACTIVE_LOW>;
+ tx-fault-gpios = <&sgpio_in2 28 0 GPIO_ACTIVE_HIGH>;
+ };
+ sfp_eth61: sfp-eth61 {
+ compatible = "sff,sfp";
+ i2c-bus = <&i2c_sfp18>;
+ tx-disable-gpios = <&sgpio_out2 28 1 GPIO_ACTIVE_LOW>;
+ los-gpios = <&sgpio_in2 28 1 GPIO_ACTIVE_HIGH>;
+ mod-def0-gpios = <&sgpio_in2 28 2 GPIO_ACTIVE_LOW>;
+ tx-fault-gpios = <&sgpio_in2 29 0 GPIO_ACTIVE_HIGH>;
+ };
+ sfp_eth62: sfp-eth62 {
+ compatible = "sff,sfp";
+ i2c-bus = <&i2c_sfp19>;
+ tx-disable-gpios = <&sgpio_out2 29 1 GPIO_ACTIVE_LOW>;
+ los-gpios = <&sgpio_in2 29 1 GPIO_ACTIVE_HIGH>;
+ mod-def0-gpios = <&sgpio_in2 29 2 GPIO_ACTIVE_LOW>;
+ tx-fault-gpios = <&sgpio_in2 30 0 GPIO_ACTIVE_HIGH>;
+ };
+ sfp_eth63: sfp-eth63 {
+ compatible = "sff,sfp";
+ i2c-bus = <&i2c_sfp20>;
+ tx-disable-gpios = <&sgpio_out2 30 1 GPIO_ACTIVE_LOW>;
+ los-gpios = <&sgpio_in2 30 1 GPIO_ACTIVE_HIGH>;
+ mod-def0-gpios = <&sgpio_in2 30 2 GPIO_ACTIVE_LOW>;
+ tx-fault-gpios = <&sgpio_in2 31 0 GPIO_ACTIVE_HIGH>;
+ };
+};
+
+&switch {
+ ethernet-ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ /* 10G SFPs */
+ port12: port@12 {
+ reg = <12>;
+ microchip,bandwidth = <10000>;
+ phys = <&serdes 13>;
+ phy-mode = "10gbase-r";
+ sfp = <&sfp_eth12>;
+ microchip,sd-sgpio = <301>;
+ managed = "in-band-status";
+ };
+ port13: port@13 {
+ reg = <13>;
+ /* Example: CU SFP, 1G speed */
+ microchip,bandwidth = <10000>;
+ phys = <&serdes 14>;
+ phy-mode = "10gbase-r";
+ sfp = <&sfp_eth13>;
+ microchip,sd-sgpio = <305>;
+ managed = "in-band-status";
+ };
+ port14: port@14 {
+ reg = <14>;
+ microchip,bandwidth = <10000>;
+ phys = <&serdes 15>;
+ phy-mode = "10gbase-r";
+ sfp = <&sfp_eth14>;
+ microchip,sd-sgpio = <309>;
+ managed = "in-band-status";
+ };
+ port15: port@15 {
+ reg = <15>;
+ microchip,bandwidth = <10000>;
+ phys = <&serdes 16>;
+ phy-mode = "10gbase-r";
+ sfp = <&sfp_eth15>;
+ microchip,sd-sgpio = <313>;
+ managed = "in-band-status";
+ };
+ port48: port@48 {
+ reg = <48>;
+ microchip,bandwidth = <10000>;
+ phys = <&serdes 17>;
+ phy-mode = "10gbase-r";
+ sfp = <&sfp_eth48>;
+ microchip,sd-sgpio = <317>;
+ managed = "in-band-status";
+ };
+ port49: port@49 {
+ reg = <49>;
+ microchip,bandwidth = <10000>;
+ phys = <&serdes 18>;
+ phy-mode = "10gbase-r";
+ sfp = <&sfp_eth49>;
+ microchip,sd-sgpio = <321>;
+ managed = "in-band-status";
+ };
+ port50: port@50 {
+ reg = <50>;
+ microchip,bandwidth = <10000>;
+ phys = <&serdes 19>;
+ phy-mode = "10gbase-r";
+ sfp = <&sfp_eth50>;
+ microchip,sd-sgpio = <325>;
+ managed = "in-band-status";
+ };
+ port51: port@51 {
+ reg = <51>;
+ microchip,bandwidth = <10000>;
+ phys = <&serdes 20>;
+ phy-mode = "10gbase-r";
+ sfp = <&sfp_eth51>;
+ microchip,sd-sgpio = <329>;
+ managed = "in-band-status";
+ };
+ port52: port@52 {
+ reg = <52>;
+ microchip,bandwidth = <10000>;
+ phys = <&serdes 21>;
+ phy-mode = "10gbase-r";
+ sfp = <&sfp_eth52>;
+ microchip,sd-sgpio = <333>;
+ managed = "in-band-status";
+ };
+ port53: port@53 {
+ reg = <53>;
+ microchip,bandwidth = <10000>;
+ phys = <&serdes 22>;
+ phy-mode = "10gbase-r";
+ sfp = <&sfp_eth53>;
+ microchip,sd-sgpio = <337>;
+ managed = "in-band-status";
+ };
+ port54: port@54 {
+ reg = <54>;
+ microchip,bandwidth = <10000>;
+ phys = <&serdes 23>;
+ phy-mode = "10gbase-r";
+ sfp = <&sfp_eth54>;
+ microchip,sd-sgpio = <341>;
+ managed = "in-band-status";
+ };
+ port55: port@55 {
+ reg = <55>;
+ microchip,bandwidth = <10000>;
+ phys = <&serdes 24>;
+ phy-mode = "10gbase-r";
+ sfp = <&sfp_eth55>;
+ microchip,sd-sgpio = <345>;
+ managed = "in-band-status";
+ };
+ /* 25G SFPs */
+ port56: port@56 {
+ reg = <56>;
+ microchip,bandwidth = <10000>;
+ phys = <&serdes 25>;
+ phy-mode = "10gbase-r";
+ sfp = <&sfp_eth56>;
+ microchip,sd-sgpio = <349>;
+ managed = "in-band-status";
+ };
+ port57: port@57 {
+ reg = <57>;
+ microchip,bandwidth = <10000>;
+ phys = <&serdes 26>;
+ phy-mode = "10gbase-r";
+ sfp = <&sfp_eth57>;
+ microchip,sd-sgpio = <353>;
+ managed = "in-band-status";
+ };
+ port58: port@58 {
+ reg = <58>;
+ microchip,bandwidth = <10000>;
+ phys = <&serdes 27>;
+ phy-mode = "10gbase-r";
+ sfp = <&sfp_eth58>;
+ microchip,sd-sgpio = <357>;
+ managed = "in-band-status";
+ };
+ port59: port@59 {
+ reg = <59>;
+ microchip,bandwidth = <10000>;
+ phys = <&serdes 28>;
+ phy-mode = "10gbase-r";
+ sfp = <&sfp_eth59>;
+ microchip,sd-sgpio = <361>;
+ managed = "in-band-status";
+ };
+ port60: port@60 {
+ reg = <60>;
+ microchip,bandwidth = <10000>;
+ phys = <&serdes 29>;
+ phy-mode = "10gbase-r";
+ sfp = <&sfp_eth60>;
+ microchip,sd-sgpio = <365>;
+ managed = "in-band-status";
+ };
+ port61: port@61 {
+ reg = <61>;
+ microchip,bandwidth = <10000>;
+ phys = <&serdes 30>;
+ phy-mode = "10gbase-r";
+ sfp = <&sfp_eth61>;
+ microchip,sd-sgpio = <369>;
+ managed = "in-band-status";
+ };
+ port62: port@62 {
+ reg = <62>;
+ microchip,bandwidth = <10000>;
+ phys = <&serdes 31>;
+ phy-mode = "10gbase-r";
+ sfp = <&sfp_eth62>;
+ microchip,sd-sgpio = <373>;
+ managed = "in-band-status";
+ };
+ port63: port@63 {
+ reg = <63>;
+ microchip,bandwidth = <10000>;
+ phys = <&serdes 32>;
+ phy-mode = "10gbase-r";
+ sfp = <&sfp_eth63>;
+ microchip,sd-sgpio = <377>;
+ managed = "in-band-status";
+ };
+ /* Finally the Management interface */
+ port64: port@64 {
+ reg = <64>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 0>;
+ phy-handle = <&phy64>;
+ phy-mode = "sgmii";
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/microchip/sparx5_pcb135_board.dtsi b/arch/arm64/boot/dts/microchip/sparx5_pcb135_board.dtsi
index e28c6dd16377..ef96e6d8c6b3 100644
--- a/arch/arm64/boot/dts/microchip/sparx5_pcb135_board.dtsi
+++ b/arch/arm64/boot/dts/microchip/sparx5_pcb135_board.dtsi
@@ -7,14 +7,6 @@
#include "sparx5_pcb_common.dtsi"
/{
- aliases {
- i2c0 = &i2c0;
- i2c152 = &i2c152;
- i2c153 = &i2c153;
- i2c154 = &i2c154;
- i2c155 = &i2c155;
- };
-
gpio-restart {
compatible = "gpio-restart";
gpios = <&gpio 37 GPIO_ACTIVE_LOW>;
@@ -97,17 +89,10 @@
&spi0 {
status = "okay";
- spi@0 {
- compatible = "spi-mux";
- mux-controls = <&mux>;
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <0>; /* CS0 */
- spi-flash@9 {
- compatible = "jedec,spi-nor";
- spi-max-frequency = <8000000>;
- reg = <0x9>; /* SPI */
- };
+ spi-flash@0 {
+ compatible = "jedec,spi-nor";
+ spi-max-frequency = <8000000>;
+ reg = <0>;
};
};
@@ -138,6 +123,11 @@
};
};
+&sgpio2 {
+ status = "okay";
+ microchip,sgpio-port-ranges = <0 0>, <16 18>, <28 31>;
+};
+
&axi {
i2c0_imux: i2c0-imux@0 {
compatible = "i2c-mux-pinctrl";
@@ -149,31 +139,614 @@
&i2c0_imux {
pinctrl-names =
- "i2c152", "i2c153", "i2c154", "i2c155",
+ "i2c_sfp1", "i2c_sfp2", "i2c_sfp3", "i2c_sfp4",
"idle";
pinctrl-0 = <&i2cmux_s29>;
pinctrl-1 = <&i2cmux_s30>;
pinctrl-2 = <&i2cmux_s31>;
pinctrl-3 = <&i2cmux_s32>;
pinctrl-4 = <&i2cmux_pins_i>;
- i2c152: i2c_sfp1 {
+ i2c_sfp1: i2c_sfp1 {
reg = <0x0>;
#address-cells = <1>;
#size-cells = <0>;
};
- i2c153: i2c_sfp2 {
+ i2c_sfp2: i2c_sfp2 {
reg = <0x1>;
#address-cells = <1>;
#size-cells = <0>;
};
- i2c154: i2c_sfp3 {
+ i2c_sfp3: i2c_sfp3 {
reg = <0x2>;
#address-cells = <1>;
#size-cells = <0>;
};
- i2c155: i2c_sfp4 {
+ i2c_sfp4: i2c_sfp4 {
reg = <0x3>;
#address-cells = <1>;
#size-cells = <0>;
};
};
+
+&axi {
+ sfp_eth60: sfp-eth60 {
+ compatible = "sff,sfp";
+ i2c-bus = <&i2c_sfp1>;
+ tx-disable-gpios = <&sgpio_out2 28 0 GPIO_ACTIVE_LOW>;
+ rate-select0-gpios = <&sgpio_out2 28 1 GPIO_ACTIVE_HIGH>;
+ los-gpios = <&sgpio_in2 28 0 GPIO_ACTIVE_HIGH>;
+ mod-def0-gpios = <&sgpio_in2 28 1 GPIO_ACTIVE_LOW>;
+ tx-fault-gpios = <&sgpio_in2 28 2 GPIO_ACTIVE_HIGH>;
+ };
+ sfp_eth61: sfp-eth61 {
+ compatible = "sff,sfp";
+ i2c-bus = <&i2c_sfp2>;
+ tx-disable-gpios = <&sgpio_out2 29 0 GPIO_ACTIVE_LOW>;
+ rate-select0-gpios = <&sgpio_out2 29 1 GPIO_ACTIVE_HIGH>;
+ los-gpios = <&sgpio_in2 29 0 GPIO_ACTIVE_HIGH>;
+ mod-def0-gpios = <&sgpio_in2 29 1 GPIO_ACTIVE_LOW>;
+ tx-fault-gpios = <&sgpio_in2 29 2 GPIO_ACTIVE_HIGH>;
+ };
+ sfp_eth62: sfp-eth62 {
+ compatible = "sff,sfp";
+ i2c-bus = <&i2c_sfp3>;
+ tx-disable-gpios = <&sgpio_out2 30 0 GPIO_ACTIVE_LOW>;
+ rate-select0-gpios = <&sgpio_out2 30 1 GPIO_ACTIVE_HIGH>;
+ los-gpios = <&sgpio_in2 30 0 GPIO_ACTIVE_HIGH>;
+ mod-def0-gpios = <&sgpio_in2 30 1 GPIO_ACTIVE_LOW>;
+ tx-fault-gpios = <&sgpio_in2 30 2 GPIO_ACTIVE_HIGH>;
+ };
+ sfp_eth63: sfp-eth63 {
+ compatible = "sff,sfp";
+ i2c-bus = <&i2c_sfp4>;
+ tx-disable-gpios = <&sgpio_out2 31 0 GPIO_ACTIVE_LOW>;
+ rate-select0-gpios = <&sgpio_out2 31 1 GPIO_ACTIVE_HIGH>;
+ los-gpios = <&sgpio_in2 31 0 GPIO_ACTIVE_HIGH>;
+ mod-def0-gpios = <&sgpio_in2 31 1 GPIO_ACTIVE_LOW>;
+ tx-fault-gpios = <&sgpio_in2 31 2 GPIO_ACTIVE_HIGH>;
+ };
+};
+
+&mdio0 {
+ status = "ok";
+ phy0: ethernet-phy@0 {
+ reg = <0>;
+ };
+ phy1: ethernet-phy@1 {
+ reg = <1>;
+ };
+ phy2: ethernet-phy@2 {
+ reg = <2>;
+ };
+ phy3: ethernet-phy@3 {
+ reg = <3>;
+ };
+ phy4: ethernet-phy@4 {
+ reg = <4>;
+ };
+ phy5: ethernet-phy@5 {
+ reg = <5>;
+ };
+ phy6: ethernet-phy@6 {
+ reg = <6>;
+ };
+ phy7: ethernet-phy@7 {
+ reg = <7>;
+ };
+ phy8: ethernet-phy@8 {
+ reg = <8>;
+ };
+ phy9: ethernet-phy@9 {
+ reg = <9>;
+ };
+ phy10: ethernet-phy@10 {
+ reg = <10>;
+ };
+ phy11: ethernet-phy@11 {
+ reg = <11>;
+ };
+ phy12: ethernet-phy@12 {
+ reg = <12>;
+ };
+ phy13: ethernet-phy@13 {
+ reg = <13>;
+ };
+ phy14: ethernet-phy@14 {
+ reg = <14>;
+ };
+ phy15: ethernet-phy@15 {
+ reg = <15>;
+ };
+ phy16: ethernet-phy@16 {
+ reg = <16>;
+ };
+ phy17: ethernet-phy@17 {
+ reg = <17>;
+ };
+ phy18: ethernet-phy@18 {
+ reg = <18>;
+ };
+ phy19: ethernet-phy@19 {
+ reg = <19>;
+ };
+ phy20: ethernet-phy@20 {
+ reg = <20>;
+ };
+ phy21: ethernet-phy@21 {
+ reg = <21>;
+ };
+ phy22: ethernet-phy@22 {
+ reg = <22>;
+ };
+ phy23: ethernet-phy@23 {
+ reg = <23>;
+ };
+};
+
+&mdio1 {
+ status = "ok";
+ phy24: ethernet-phy@24 {
+ reg = <0>;
+ };
+ phy25: ethernet-phy@25 {
+ reg = <1>;
+ };
+ phy26: ethernet-phy@26 {
+ reg = <2>;
+ };
+ phy27: ethernet-phy@27 {
+ reg = <3>;
+ };
+ phy28: ethernet-phy@28 {
+ reg = <4>;
+ };
+ phy29: ethernet-phy@29 {
+ reg = <5>;
+ };
+ phy30: ethernet-phy@30 {
+ reg = <6>;
+ };
+ phy31: ethernet-phy@31 {
+ reg = <7>;
+ };
+ phy32: ethernet-phy@32 {
+ reg = <8>;
+ };
+ phy33: ethernet-phy@33 {
+ reg = <9>;
+ };
+ phy34: ethernet-phy@34 {
+ reg = <10>;
+ };
+ phy35: ethernet-phy@35 {
+ reg = <11>;
+ };
+ phy36: ethernet-phy@36 {
+ reg = <12>;
+ };
+ phy37: ethernet-phy@37 {
+ reg = <13>;
+ };
+ phy38: ethernet-phy@38 {
+ reg = <14>;
+ };
+ phy39: ethernet-phy@39 {
+ reg = <15>;
+ };
+ phy40: ethernet-phy@40 {
+ reg = <16>;
+ };
+ phy41: ethernet-phy@41 {
+ reg = <17>;
+ };
+ phy42: ethernet-phy@42 {
+ reg = <18>;
+ };
+ phy43: ethernet-phy@43 {
+ reg = <19>;
+ };
+ phy44: ethernet-phy@44 {
+ reg = <20>;
+ };
+ phy45: ethernet-phy@45 {
+ reg = <21>;
+ };
+ phy46: ethernet-phy@46 {
+ reg = <22>;
+ };
+ phy47: ethernet-phy@47 {
+ reg = <23>;
+ };
+};
+
+&mdio3 {
+ status = "ok";
+ phy64: ethernet-phy@64 {
+ reg = <28>;
+ };
+};
+
+&switch {
+ ethernet-ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port0: port@0 {
+ reg = <0>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 13>;
+ phy-handle = <&phy0>;
+ phy-mode = "qsgmii";
+ };
+ port1: port@1 {
+ reg = <1>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 13>;
+ phy-handle = <&phy1>;
+ phy-mode = "qsgmii";
+ };
+ port2: port@2 {
+ reg = <2>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 13>;
+ phy-handle = <&phy2>;
+ phy-mode = "qsgmii";
+ };
+ port3: port@3 {
+ reg = <3>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 13>;
+ phy-handle = <&phy3>;
+ phy-mode = "qsgmii";
+ };
+ port4: port@4 {
+ reg = <4>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 14>;
+ phy-handle = <&phy4>;
+ phy-mode = "qsgmii";
+ };
+ port5: port@5 {
+ reg = <5>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 14>;
+ phy-handle = <&phy5>;
+ phy-mode = "qsgmii";
+ };
+ port6: port@6 {
+ reg = <6>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 14>;
+ phy-handle = <&phy6>;
+ phy-mode = "qsgmii";
+ };
+ port7: port@7 {
+ reg = <7>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 14>;
+ phy-handle = <&phy7>;
+ phy-mode = "qsgmii";
+ };
+ port8: port@8 {
+ reg = <8>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 15>;
+ phy-handle = <&phy8>;
+ phy-mode = "qsgmii";
+ };
+ port9: port@9 {
+ reg = <9>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 15>;
+ phy-handle = <&phy9>;
+ phy-mode = "qsgmii";
+ };
+ port10: port@10 {
+ reg = <10>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 15>;
+ phy-handle = <&phy10>;
+ phy-mode = "qsgmii";
+ };
+ port11: port@11 {
+ reg = <11>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 15>;
+ phy-handle = <&phy11>;
+ phy-mode = "qsgmii";
+ };
+ port12: port@12 {
+ reg = <12>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 16>;
+ phy-handle = <&phy12>;
+ phy-mode = "qsgmii";
+ };
+ port13: port@13 {
+ reg = <13>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 16>;
+ phy-handle = <&phy13>;
+ phy-mode = "qsgmii";
+ };
+ port14: port@14 {
+ reg = <14>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 16>;
+ phy-handle = <&phy14>;
+ phy-mode = "qsgmii";
+ };
+ port15: port@15 {
+ reg = <15>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 16>;
+ phy-handle = <&phy15>;
+ phy-mode = "qsgmii";
+ };
+ port16: port@16 {
+ reg = <16>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 17>;
+ phy-handle = <&phy16>;
+ phy-mode = "qsgmii";
+ };
+ port17: port@17 {
+ reg = <17>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 17>;
+ phy-handle = <&phy17>;
+ phy-mode = "qsgmii";
+ };
+ port18: port@18 {
+ reg = <18>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 17>;
+ phy-handle = <&phy18>;
+ phy-mode = "qsgmii";
+ };
+ port19: port@19 {
+ reg = <19>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 17>;
+ phy-handle = <&phy19>;
+ phy-mode = "qsgmii";
+ };
+ port20: port@20 {
+ reg = <20>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 18>;
+ phy-handle = <&phy20>;
+ phy-mode = "qsgmii";
+ };
+ port21: port@21 {
+ reg = <21>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 18>;
+ phy-handle = <&phy21>;
+ phy-mode = "qsgmii";
+ };
+ port22: port@22 {
+ reg = <22>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 18>;
+ phy-handle = <&phy22>;
+ phy-mode = "qsgmii";
+ };
+ port23: port@23 {
+ reg = <23>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 18>;
+ phy-handle = <&phy23>;
+ phy-mode = "qsgmii";
+ };
+ port24: port@24 {
+ reg = <24>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 19>;
+ phy-handle = <&phy24>;
+ phy-mode = "qsgmii";
+ };
+ port25: port@25 {
+ reg = <25>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 19>;
+ phy-handle = <&phy25>;
+ phy-mode = "qsgmii";
+ };
+ port26: port@26 {
+ reg = <26>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 19>;
+ phy-handle = <&phy26>;
+ phy-mode = "qsgmii";
+ };
+ port27: port@27 {
+ reg = <27>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 19>;
+ phy-handle = <&phy27>;
+ phy-mode = "qsgmii";
+ };
+ port28: port@28 {
+ reg = <28>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 20>;
+ phy-handle = <&phy28>;
+ phy-mode = "qsgmii";
+ };
+ port29: port@29 {
+ reg = <29>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 20>;
+ phy-handle = <&phy29>;
+ phy-mode = "qsgmii";
+ };
+ port30: port@30 {
+ reg = <30>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 20>;
+ phy-handle = <&phy30>;
+ phy-mode = "qsgmii";
+ };
+ port31: port@31 {
+ reg = <31>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 20>;
+ phy-handle = <&phy31>;
+ phy-mode = "qsgmii";
+ };
+ port32: port@32 {
+ reg = <32>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 21>;
+ phy-handle = <&phy32>;
+ phy-mode = "qsgmii";
+ };
+ port33: port@33 {
+ reg = <33>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 21>;
+ phy-handle = <&phy33>;
+ phy-mode = "qsgmii";
+ };
+ port34: port@34 {
+ reg = <34>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 21>;
+ phy-handle = <&phy34>;
+ phy-mode = "qsgmii";
+ };
+ port35: port@35 {
+ reg = <35>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 21>;
+ phy-handle = <&phy35>;
+ phy-mode = "qsgmii";
+ };
+ port36: port@36 {
+ reg = <36>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 22>;
+ phy-handle = <&phy36>;
+ phy-mode = "qsgmii";
+ };
+ port37: port@37 {
+ reg = <37>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 22>;
+ phy-handle = <&phy37>;
+ phy-mode = "qsgmii";
+ };
+ port38: port@38 {
+ reg = <38>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 22>;
+ phy-handle = <&phy38>;
+ phy-mode = "qsgmii";
+ };
+ port39: port@39 {
+ reg = <39>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 22>;
+ phy-handle = <&phy39>;
+ phy-mode = "qsgmii";
+ };
+ port40: port@40 {
+ reg = <40>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 23>;
+ phy-handle = <&phy40>;
+ phy-mode = "qsgmii";
+ };
+ port41: port@41 {
+ reg = <41>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 23>;
+ phy-handle = <&phy41>;
+ phy-mode = "qsgmii";
+ };
+ port42: port@42 {
+ reg = <42>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 23>;
+ phy-handle = <&phy42>;
+ phy-mode = "qsgmii";
+ };
+ port43: port@43 {
+ reg = <43>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 23>;
+ phy-handle = <&phy43>;
+ phy-mode = "qsgmii";
+ };
+ port44: port@44 {
+ reg = <44>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 24>;
+ phy-handle = <&phy44>;
+ phy-mode = "qsgmii";
+ };
+ port45: port@45 {
+ reg = <45>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 24>;
+ phy-handle = <&phy45>;
+ phy-mode = "qsgmii";
+ };
+ port46: port@46 {
+ reg = <46>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 24>;
+ phy-handle = <&phy46>;
+ phy-mode = "qsgmii";
+ };
+ port47: port@47 {
+ reg = <47>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 24>;
+ phy-handle = <&phy47>;
+ phy-mode = "qsgmii";
+ };
+ /* Then the 25G interfaces */
+ port60: port@60 {
+ reg = <60>;
+ microchip,bandwidth = <25000>;
+ phys = <&serdes 29>;
+ phy-mode = "10gbase-r";
+ sfp = <&sfp_eth60>;
+ managed = "in-band-status";
+ };
+ port61: port@61 {
+ reg = <61>;
+ microchip,bandwidth = <25000>;
+ phys = <&serdes 30>;
+ phy-mode = "10gbase-r";
+ sfp = <&sfp_eth61>;
+ managed = "in-band-status";
+ };
+ port62: port@62 {
+ reg = <62>;
+ microchip,bandwidth = <25000>;
+ phys = <&serdes 31>;
+ phy-mode = "10gbase-r";
+ sfp = <&sfp_eth62>;
+ managed = "in-band-status";
+ };
+ port63: port@63 {
+ reg = <63>;
+ microchip,bandwidth = <25000>;
+ phys = <&serdes 32>;
+ phy-mode = "10gbase-r";
+ sfp = <&sfp_eth63>;
+ managed = "in-band-status";
+ };
+ /* Finally the Management interface */
+ port64: port@64 {
+ reg = <64>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 0>;
+ phy-handle = <&phy64>;
+ phy-mode = "sgmii";
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8996.dtsi b/arch/arm64/boot/dts/qcom/msm8996.dtsi
index ce430ba9c118..5d06216c5c1c 100644
--- a/arch/arm64/boot/dts/qcom/msm8996.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8996.dtsi
@@ -1136,7 +1136,7 @@
};
adreno_smmu: iommu@b40000 {
- compatible = "qcom,msm8996-smmu-v2", "qcom,smmu-v2";
+ compatible = "qcom,msm8996-smmu-v2", "qcom,adreno-smmu", "qcom,smmu-v2";
reg = <0x00b40000 0x10000>;
#global-interrupts = <1>;
diff --git a/arch/arm64/boot/dts/renesas/hihope-rzg2-ex-aistarvision-mipi-adapter-2.1.dtsi b/arch/arm64/boot/dts/renesas/hihope-rzg2-ex-aistarvision-mipi-adapter-2.1.dtsi
index c62ddb9b2ba5..3771144a2ce4 100644
--- a/arch/arm64/boot/dts/renesas/hihope-rzg2-ex-aistarvision-mipi-adapter-2.1.dtsi
+++ b/arch/arm64/boot/dts/renesas/hihope-rzg2-ex-aistarvision-mipi-adapter-2.1.dtsi
@@ -14,7 +14,6 @@
ports {
port@0 {
- reg = <0>;
csi20_in: endpoint {
clock-lanes = <0>;
data-lanes = <1 2>;
@@ -29,7 +28,6 @@
ports {
port@0 {
- reg = <0>;
csi40_in: endpoint {
clock-lanes = <0>;
data-lanes = <1 2>;
diff --git a/arch/arm64/boot/dts/renesas/r8a774a1.dtsi b/arch/arm64/boot/dts/renesas/r8a774a1.dtsi
index d64fb8b1b86c..46f8dbf68904 100644
--- a/arch/arm64/boot/dts/renesas/r8a774a1.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a774a1.dtsi
@@ -2573,6 +2573,10 @@
#address-cells = <1>;
#size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ };
+
port@1 {
#address-cells = <1>;
#size-cells = <0>;
@@ -2628,6 +2632,10 @@
#address-cells = <1>;
#size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ };
+
port@1 {
#address-cells = <1>;
#size-cells = <0>;
diff --git a/arch/arm64/boot/dts/renesas/r8a774b1.dtsi b/arch/arm64/boot/dts/renesas/r8a774b1.dtsi
index 5b05474dc272..d16a4be5ef77 100644
--- a/arch/arm64/boot/dts/renesas/r8a774b1.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a774b1.dtsi
@@ -2419,6 +2419,10 @@
#address-cells = <1>;
#size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ };
+
port@1 {
#address-cells = <1>;
#size-cells = <0>;
@@ -2474,6 +2478,10 @@
#address-cells = <1>;
#size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ };
+
port@1 {
#address-cells = <1>;
#size-cells = <0>;
diff --git a/arch/arm64/boot/dts/renesas/r8a774c0-ek874-mipi-2.1.dts b/arch/arm64/boot/dts/renesas/r8a774c0-ek874-mipi-2.1.dts
index e7b4a929bb17..2e3d1981cac4 100644
--- a/arch/arm64/boot/dts/renesas/r8a774c0-ek874-mipi-2.1.dts
+++ b/arch/arm64/boot/dts/renesas/r8a774c0-ek874-mipi-2.1.dts
@@ -33,7 +33,7 @@
status = "okay";
ports {
- port {
+ port@0 {
csi40_in: endpoint {
clock-lanes = <0>;
data-lanes = <1 2>;
diff --git a/arch/arm64/boot/dts/renesas/r8a774c0.dtsi b/arch/arm64/boot/dts/renesas/r8a774c0.dtsi
index 20fa3caa050e..1aef34447abd 100644
--- a/arch/arm64/boot/dts/renesas/r8a774c0.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a774c0.dtsi
@@ -1823,6 +1823,10 @@
#address-cells = <1>;
#size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ };
+
port@1 {
#address-cells = <1>;
#size-cells = <0>;
diff --git a/arch/arm64/boot/dts/renesas/r8a774e1.dtsi b/arch/arm64/boot/dts/renesas/r8a774e1.dtsi
index 8eb006cbd9af..1f51237ab0a6 100644
--- a/arch/arm64/boot/dts/renesas/r8a774e1.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a774e1.dtsi
@@ -2709,6 +2709,10 @@
#address-cells = <1>;
#size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ };
+
port@1 {
#address-cells = <1>;
#size-cells = <0>;
@@ -2764,6 +2768,10 @@
#address-cells = <1>;
#size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ };
+
port@1 {
#address-cells = <1>;
#size-cells = <0>;
diff --git a/arch/arm64/boot/dts/renesas/r8a77950.dtsi b/arch/arm64/boot/dts/renesas/r8a77950.dtsi
index 25b87da32eeb..b643d3079db1 100644
--- a/arch/arm64/boot/dts/renesas/r8a77950.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a77950.dtsi
@@ -192,6 +192,10 @@
#address-cells = <1>;
#size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ };
+
port@1 {
#address-cells = <1>;
#size-cells = <0>;
diff --git a/arch/arm64/boot/dts/renesas/r8a77951.dtsi b/arch/arm64/boot/dts/renesas/r8a77951.dtsi
index 5c39152e4570..85d66d15465a 100644
--- a/arch/arm64/boot/dts/renesas/r8a77951.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a77951.dtsi
@@ -3097,6 +3097,10 @@
#address-cells = <1>;
#size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ };
+
port@1 {
#address-cells = <1>;
#size-cells = <0>;
@@ -3152,6 +3156,10 @@
#address-cells = <1>;
#size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ };
+
port@1 {
#address-cells = <1>;
#size-cells = <0>;
@@ -3191,6 +3199,10 @@
#address-cells = <1>;
#size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ };
+
port@1 {
#address-cells = <1>;
#size-cells = <0>;
diff --git a/arch/arm64/boot/dts/renesas/r8a77960.dtsi b/arch/arm64/boot/dts/renesas/r8a77960.dtsi
index 25d947a81b29..12476e354d74 100644
--- a/arch/arm64/boot/dts/renesas/r8a77960.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a77960.dtsi
@@ -2761,6 +2761,10 @@
#address-cells = <1>;
#size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ };
+
port@1 {
#address-cells = <1>;
#size-cells = <0>;
@@ -2816,6 +2820,10 @@
#address-cells = <1>;
#size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ };
+
port@1 {
#address-cells = <1>;
#size-cells = <0>;
diff --git a/arch/arm64/boot/dts/renesas/r8a77961.dtsi b/arch/arm64/boot/dts/renesas/r8a77961.dtsi
index ab081f14af9a..d9804768425a 100644
--- a/arch/arm64/boot/dts/renesas/r8a77961.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a77961.dtsi
@@ -2499,6 +2499,10 @@
#address-cells = <1>;
#size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ };
+
port@1 {
#address-cells = <1>;
#size-cells = <0>;
@@ -2554,6 +2558,10 @@
#address-cells = <1>;
#size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ };
+
port@1 {
#address-cells = <1>;
#size-cells = <0>;
diff --git a/arch/arm64/boot/dts/renesas/r8a77965.dtsi b/arch/arm64/boot/dts/renesas/r8a77965.dtsi
index 657b20d3533b..dcb9df861d74 100644
--- a/arch/arm64/boot/dts/renesas/r8a77965.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a77965.dtsi
@@ -2575,6 +2575,10 @@
#address-cells = <1>;
#size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ };
+
port@1 {
#address-cells = <1>;
#size-cells = <0>;
@@ -2630,6 +2634,10 @@
#address-cells = <1>;
#size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ };
+
port@1 {
#address-cells = <1>;
#size-cells = <0>;
diff --git a/arch/arm64/boot/dts/renesas/r8a77970.dtsi b/arch/arm64/boot/dts/renesas/r8a77970.dtsi
index 5a5d5649332a..e8f6352c3665 100644
--- a/arch/arm64/boot/dts/renesas/r8a77970.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a77970.dtsi
@@ -1106,6 +1106,10 @@
#address-cells = <1>;
#size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ };
+
port@1 {
#address-cells = <1>;
#size-cells = <0>;
diff --git a/arch/arm64/boot/dts/renesas/r8a77980.dtsi b/arch/arm64/boot/dts/renesas/r8a77980.dtsi
index 1ffa4a995a7a..7b51d464de0e 100644
--- a/arch/arm64/boot/dts/renesas/r8a77980.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a77980.dtsi
@@ -1439,6 +1439,10 @@
#address-cells = <1>;
#size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ };
+
port@1 {
#address-cells = <1>;
#size-cells = <0>;
@@ -1478,6 +1482,10 @@
#address-cells = <1>;
#size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ };
+
port@1 {
#address-cells = <1>;
#size-cells = <0>;
diff --git a/arch/arm64/boot/dts/renesas/r8a77990-ebisu.dts b/arch/arm64/boot/dts/renesas/r8a77990-ebisu.dts
index 295d34f1d216..4715e4a4abe0 100644
--- a/arch/arm64/boot/dts/renesas/r8a77990-ebisu.dts
+++ b/arch/arm64/boot/dts/renesas/r8a77990-ebisu.dts
@@ -298,8 +298,6 @@
ports {
port@0 {
- reg = <0>;
-
csi40_in: endpoint {
clock-lanes = <0>;
data-lanes = <1 2>;
diff --git a/arch/arm64/boot/dts/renesas/r8a77990.dtsi b/arch/arm64/boot/dts/renesas/r8a77990.dtsi
index 5010f23fafcc..0eaea58f4210 100644
--- a/arch/arm64/boot/dts/renesas/r8a77990.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a77990.dtsi
@@ -1970,6 +1970,10 @@
#address-cells = <1>;
#size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ };
+
port@1 {
#address-cells = <1>;
#size-cells = <0>;
diff --git a/arch/arm64/boot/dts/renesas/salvator-common.dtsi b/arch/arm64/boot/dts/renesas/salvator-common.dtsi
index e18747df219f..453ffcef24fa 100644
--- a/arch/arm64/boot/dts/renesas/salvator-common.dtsi
+++ b/arch/arm64/boot/dts/renesas/salvator-common.dtsi
@@ -349,7 +349,6 @@
ports {
port@0 {
- reg = <0>;
csi20_in: endpoint {
clock-lanes = <0>;
data-lanes = <1>;
@@ -364,8 +363,6 @@
ports {
port@0 {
- reg = <0>;
-
csi40_in: endpoint {
clock-lanes = <0>;
data-lanes = <1 2 3 4>;
diff --git a/arch/arm64/boot/dts/rockchip/rk3308.dtsi b/arch/arm64/boot/dts/rockchip/rk3308.dtsi
index 0c5fa9801e6f..b815ce73e5c6 100644
--- a/arch/arm64/boot/dts/rockchip/rk3308.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3308.dtsi
@@ -637,6 +637,28 @@
status = "disabled";
};
+ gmac: ethernet@ff4e0000 {
+ compatible = "rockchip,rk3308-gmac";
+ reg = <0x0 0xff4e0000 0x0 0x10000>;
+ interrupts = <GIC_SPI 64 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "macirq";
+ clocks = <&cru SCLK_MAC>, <&cru SCLK_MAC_RX_TX>,
+ <&cru SCLK_MAC_RX_TX>, <&cru SCLK_MAC_REF>,
+ <&cru SCLK_MAC>, <&cru ACLK_MAC>,
+ <&cru PCLK_MAC>, <&cru SCLK_MAC_RMII>;
+ clock-names = "stmmaceth", "mac_clk_rx",
+ "mac_clk_tx", "clk_mac_ref",
+ "clk_mac_refout", "aclk_mac",
+ "pclk_mac", "clk_mac_speed";
+ phy-mode = "rmii";
+ pinctrl-names = "default";
+ pinctrl-0 = <&rmii_pins &mac_refclk_12ma>;
+ resets = <&cru SRST_MAC_A>;
+ reset-names = "stmmaceth";
+ rockchip,grf = <&grf>;
+ status = "disabled";
+ };
+
cru: clock-controller@ff500000 {
compatible = "rockchip,rk3308-cru";
reg = <0x0 0xff500000 0x0 0x1000>;
diff --git a/arch/arm64/boot/dts/ti/k3-am64-main.dtsi b/arch/arm64/boot/dts/ti/k3-am64-main.dtsi
index b2bcbf23eefd..ca59d1f711f8 100644
--- a/arch/arm64/boot/dts/ti/k3-am64-main.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am64-main.dtsi
@@ -42,12 +42,12 @@
};
};
- dmss: dmss {
+ dmss: bus@48000000 {
compatible = "simple-mfd";
#address-cells = <2>;
#size-cells = <2>;
dma-ranges;
- ranges;
+ ranges = <0x00 0x48000000 0x00 0x48000000 0x00 0x06400000>;
ti,sci-dev-id = <25>;
@@ -134,7 +134,7 @@
};
};
- dmsc: dmsc@44043000 {
+ dmsc: system-controller@44043000 {
compatible = "ti,k2g-sci";
ti,host-id = <12>;
mbox-names = "rx", "tx";
@@ -148,7 +148,7 @@
#power-domain-cells = <2>;
};
- k3_clks: clocks {
+ k3_clks: clock-controller {
compatible = "ti,k2g-sci-clk";
#clock-cells = <2>;
};
@@ -373,8 +373,9 @@
clocks = <&k3_clks 145 0>;
};
- main_gpio_intr: interrupt-controller0 {
+ main_gpio_intr: interrupt-controller@a00000 {
compatible = "ti,sci-intr";
+ reg = <0x00 0x00a00000 0x00 0x800>;
ti,intr-trigger-type = <1>;
interrupt-controller;
interrupt-parent = <&gic500>;
diff --git a/arch/arm64/boot/dts/ti/k3-am64-mcu.dtsi b/arch/arm64/boot/dts/ti/k3-am64-mcu.dtsi
index 99e94dee1bd4..deb19ae5e168 100644
--- a/arch/arm64/boot/dts/ti/k3-am64-mcu.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am64-mcu.dtsi
@@ -74,8 +74,9 @@
clocks = <&k3_clks 148 0>;
};
- mcu_gpio_intr: interrupt-controller1 {
+ mcu_gpio_intr: interrupt-controller@4210000 {
compatible = "ti,sci-intr";
+ reg = <0x00 0x04210000 0x00 0x200>;
ti,intr-trigger-type = <1>;
interrupt-controller;
interrupt-parent = <&gic500>;
diff --git a/arch/arm64/boot/dts/ti/k3-am65-main.dtsi b/arch/arm64/boot/dts/ti/k3-am65-main.dtsi
index cb340d1b401f..6cd3131eb9ff 100644
--- a/arch/arm64/boot/dts/ti/k3-am65-main.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am65-main.dtsi
@@ -433,8 +433,9 @@
#phy-cells = <0>;
};
- intr_main_gpio: interrupt-controller0 {
+ intr_main_gpio: interrupt-controller@a00000 {
compatible = "ti,sci-intr";
+ reg = <0x0 0x00a00000 0x0 0x400>;
ti,intr-trigger-type = <1>;
interrupt-controller;
interrupt-parent = <&gic500>;
@@ -444,18 +445,19 @@
ti,interrupt-ranges = <0 392 32>;
};
- main-navss {
+ main_navss: bus@30800000 {
compatible = "simple-mfd";
#address-cells = <2>;
#size-cells = <2>;
- ranges;
+ ranges = <0x0 0x30800000 0x0 0x30800000 0x0 0xbc00000>;
dma-coherent;
dma-ranges;
ti,sci-dev-id = <118>;
- intr_main_navss: interrupt-controller1 {
+ intr_main_navss: interrupt-controller@310e0000 {
compatible = "ti,sci-intr";
+ reg = <0x0 0x310e0000 0x0 0x2000>;
ti,intr-trigger-type = <4>;
interrupt-controller;
interrupt-parent = <&gic500>;
diff --git a/arch/arm64/boot/dts/ti/k3-am65-mcu.dtsi b/arch/arm64/boot/dts/ti/k3-am65-mcu.dtsi
index 0388c02c2203..f5b8ef2f5f77 100644
--- a/arch/arm64/boot/dts/ti/k3-am65-mcu.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am65-mcu.dtsi
@@ -116,11 +116,11 @@
};
};
- mcu-navss {
+ mcu_navss: bus@28380000 {
compatible = "simple-mfd";
#address-cells = <2>;
#size-cells = <2>;
- ranges;
+ ranges = <0x00 0x28380000 0x00 0x28380000 0x00 0x03880000>;
dma-coherent;
dma-ranges;
diff --git a/arch/arm64/boot/dts/ti/k3-am65-wakeup.dtsi b/arch/arm64/boot/dts/ti/k3-am65-wakeup.dtsi
index ed42f13e7663..7cb864b4d74a 100644
--- a/arch/arm64/boot/dts/ti/k3-am65-wakeup.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am65-wakeup.dtsi
@@ -6,24 +6,24 @@
*/
&cbass_wakeup {
- dmsc: dmsc {
+ dmsc: system-controller@44083000 {
compatible = "ti,am654-sci";
ti,host-id = <12>;
- #address-cells = <1>;
- #size-cells = <1>;
- ranges;
mbox-names = "rx", "tx";
mboxes= <&secure_proxy_main 11>,
<&secure_proxy_main 13>;
+ reg-names = "debug_messages";
+ reg = <0x44083000 0x1000>;
+
k3_pds: power-controller {
compatible = "ti,sci-pm-domain";
#power-domain-cells = <2>;
};
- k3_clks: clocks {
+ k3_clks: clock-controller {
compatible = "ti,k2g-sci-clk";
#clock-cells = <2>;
};
@@ -69,8 +69,9 @@
power-domains = <&k3_pds 115 TI_SCI_PD_EXCLUSIVE>;
};
- intr_wkup_gpio: interrupt-controller2 {
+ intr_wkup_gpio: interrupt-controller@42200000 {
compatible = "ti,sci-intr";
+ reg = <0x42200000 0x200>;
ti,intr-trigger-type = <1>;
interrupt-controller;
interrupt-parent = <&gic500>;
diff --git a/arch/arm64/boot/dts/ti/k3-am654-base-board.dts b/arch/arm64/boot/dts/ti/k3-am654-base-board.dts
index 9e87fb313a54..eddb2ffb93ca 100644
--- a/arch/arm64/boot/dts/ti/k3-am654-base-board.dts
+++ b/arch/arm64/boot/dts/ti/k3-am654-base-board.dts
@@ -85,12 +85,6 @@
gpios = <&wkup_gpio0 27 GPIO_ACTIVE_LOW>;
};
};
-
- clk_ov5640_fixed: clock {
- compatible = "fixed-clock";
- #clock-cells = <0>;
- clock-frequency = <24000000>;
- };
};
&wkup_pmx0 {
@@ -287,23 +281,6 @@
pinctrl-names = "default";
pinctrl-0 = <&main_i2c1_pins_default>;
clock-frequency = <400000>;
-
- ov5640: camera@3c {
- compatible = "ovti,ov5640";
- reg = <0x3c>;
-
- clocks = <&clk_ov5640_fixed>;
- clock-names = "xclk";
-
- port {
- csi2_cam0: endpoint {
- remote-endpoint = <&csi2_phy0>;
- clock-lanes = <0>;
- data-lanes = <1 2>;
- };
- };
- };
-
};
&main_i2c2 {
@@ -496,14 +473,6 @@
};
};
-&csi2_0 {
- csi2_phy0: endpoint {
- remote-endpoint = <&csi2_cam0>;
- clock-lanes = <0>;
- data-lanes = <1 2>;
- };
-};
-
&mcu_cpsw {
pinctrl-names = "default";
pinctrl-0 = <&mcu_cpsw_pins_default &mcu_mdio_pins_default>;
diff --git a/arch/arm64/boot/dts/ti/k3-j7200-main.dtsi b/arch/arm64/boot/dts/ti/k3-j7200-main.dtsi
index f86c493a44f1..19fea8adbcff 100644
--- a/arch/arm64/boot/dts/ti/k3-j7200-main.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-j7200-main.dtsi
@@ -68,8 +68,9 @@
};
};
- main_gpio_intr: interrupt-controller0 {
+ main_gpio_intr: interrupt-controller@a00000 {
compatible = "ti,sci-intr";
+ reg = <0x00 0x00a00000 0x00 0x800>;
ti,intr-trigger-type = <1>;
interrupt-controller;
interrupt-parent = <&gic500>;
@@ -85,9 +86,12 @@
#size-cells = <2>;
ranges = <0x00 0x30000000 0x00 0x30000000 0x00 0x0c400000>;
ti,sci-dev-id = <199>;
+ dma-coherent;
+ dma-ranges;
- main_navss_intr: interrupt-controller1 {
+ main_navss_intr: interrupt-controller@310e0000 {
compatible = "ti,sci-intr";
+ reg = <0x00 0x310e0000 0x00 0x4000>;
ti,intr-trigger-type = <4>;
interrupt-controller;
interrupt-parent = <&gic500>;
diff --git a/arch/arm64/boot/dts/ti/k3-j7200-mcu-wakeup.dtsi b/arch/arm64/boot/dts/ti/k3-j7200-mcu-wakeup.dtsi
index 5e74e43822c3..5663fe3ea466 100644
--- a/arch/arm64/boot/dts/ti/k3-j7200-mcu-wakeup.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-j7200-mcu-wakeup.dtsi
@@ -6,7 +6,7 @@
*/
&cbass_mcu_wakeup {
- dmsc: dmsc@44083000 {
+ dmsc: system-controller@44083000 {
compatible = "ti,k2g-sci";
ti,host-id = <12>;
@@ -23,7 +23,7 @@
#power-domain-cells = <2>;
};
- k3_clks: clocks {
+ k3_clks: clock-controller {
compatible = "ti,k2g-sci-clk";
#clock-cells = <2>;
};
@@ -96,8 +96,9 @@
clock-names = "fclk";
};
- wkup_gpio_intr: interrupt-controller2 {
+ wkup_gpio_intr: interrupt-controller@42200000 {
compatible = "ti,sci-intr";
+ reg = <0x00 0x42200000 0x00 0x400>;
ti,intr-trigger-type = <1>;
interrupt-controller;
interrupt-parent = <&gic500>;
diff --git a/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi b/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi
index c2aa45a3ac79..3bcafe4c1742 100644
--- a/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi
@@ -76,8 +76,9 @@
};
};
- main_gpio_intr: interrupt-controller0 {
+ main_gpio_intr: interrupt-controller@a00000 {
compatible = "ti,sci-intr";
+ reg = <0x00 0x00a00000 0x00 0x800>;
ti,intr-trigger-type = <1>;
interrupt-controller;
interrupt-parent = <&gic500>;
@@ -87,18 +88,19 @@
ti,interrupt-ranges = <8 392 56>;
};
- main-navss {
+ main_navss: bus@30000000 {
compatible = "simple-mfd";
#address-cells = <2>;
#size-cells = <2>;
- ranges;
+ ranges = <0x00 0x30000000 0x00 0x30000000 0x00 0x0c400000>;
dma-coherent;
dma-ranges;
ti,sci-dev-id = <199>;
- main_navss_intr: interrupt-controller1 {
+ main_navss_intr: interrupt-controller@310e0000 {
compatible = "ti,sci-intr";
+ reg = <0x0 0x310e0000 0x0 0x4000>;
ti,intr-trigger-type = <4>;
interrupt-controller;
interrupt-parent = <&gic500>;
diff --git a/arch/arm64/boot/dts/ti/k3-j721e-mcu-wakeup.dtsi b/arch/arm64/boot/dts/ti/k3-j721e-mcu-wakeup.dtsi
index d56e3475aee7..5e825e4d0306 100644
--- a/arch/arm64/boot/dts/ti/k3-j721e-mcu-wakeup.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-j721e-mcu-wakeup.dtsi
@@ -6,7 +6,7 @@
*/
&cbass_mcu_wakeup {
- dmsc: dmsc@44083000 {
+ dmsc: system-controller@44083000 {
compatible = "ti,k2g-sci";
ti,host-id = <12>;
@@ -23,7 +23,7 @@
#power-domain-cells = <2>;
};
- k3_clks: clocks {
+ k3_clks: clock-controller {
compatible = "ti,k2g-sci-clk";
#clock-cells = <2>;
};
@@ -96,8 +96,9 @@
clock-names = "fclk";
};
- wkup_gpio_intr: interrupt-controller2 {
+ wkup_gpio_intr: interrupt-controller@42200000 {
compatible = "ti,sci-intr";
+ reg = <0x00 0x42200000 0x00 0x400>;
ti,intr-trigger-type = <1>;
interrupt-controller;
interrupt-parent = <&gic500>;
@@ -249,11 +250,11 @@
};
};
- mcu-navss {
+ mcu_navss: bus@28380000 {
compatible = "simple-mfd";
#address-cells = <2>;
#size-cells = <2>;
- ranges;
+ ranges = <0x00 0x28380000 0x00 0x28380000 0x00 0x03880000>;
dma-coherent;
dma-ranges;
diff --git a/arch/arm64/crypto/Makefile b/arch/arm64/crypto/Makefile
index d0901e610df3..09a805cc32d7 100644
--- a/arch/arm64/crypto/Makefile
+++ b/arch/arm64/crypto/Makefile
@@ -68,19 +68,13 @@ CFLAGS_aes-glue-ce.o := -DUSE_V8_CRYPTO_EXTENSIONS
$(obj)/aes-glue-%.o: $(src)/aes-glue.c FORCE
$(call if_changed_rule,cc_o_c)
-ifdef REGENERATE_ARM64_CRYPTO
quiet_cmd_perlasm = PERLASM $@
cmd_perlasm = $(PERL) $(<) void $(@)
-$(src)/poly1305-core.S_shipped: $(src)/poly1305-armv8.pl
+$(obj)/%-core.S: $(src)/%-armv8.pl
$(call cmd,perlasm)
-$(src)/sha256-core.S_shipped: $(src)/sha512-armv8.pl
+$(obj)/sha256-core.S: $(src)/sha512-armv8.pl
$(call cmd,perlasm)
-$(src)/sha512-core.S_shipped: $(src)/sha512-armv8.pl
- $(call cmd,perlasm)
-
-endif
-
clean-files += poly1305-core.S sha256-core.S sha512-core.S
diff --git a/arch/arm64/crypto/poly1305-core.S_shipped b/arch/arm64/crypto/poly1305-core.S_shipped
deleted file mode 100644
index fb2822abf63a..000000000000
--- a/arch/arm64/crypto/poly1305-core.S_shipped
+++ /dev/null
@@ -1,835 +0,0 @@
-#ifndef __KERNEL__
-# include "arm_arch.h"
-.extern OPENSSL_armcap_P
-#endif
-
-.text
-
-// forward "declarations" are required for Apple
-.globl poly1305_blocks
-.globl poly1305_emit
-
-.globl poly1305_init
-.type poly1305_init,%function
-.align 5
-poly1305_init:
- cmp x1,xzr
- stp xzr,xzr,[x0] // zero hash value
- stp xzr,xzr,[x0,#16] // [along with is_base2_26]
-
- csel x0,xzr,x0,eq
- b.eq .Lno_key
-
-#ifndef __KERNEL__
- adrp x17,OPENSSL_armcap_P
- ldr w17,[x17,#:lo12:OPENSSL_armcap_P]
-#endif
-
- ldp x7,x8,[x1] // load key
- mov x9,#0xfffffffc0fffffff
- movk x9,#0x0fff,lsl#48
-#ifdef __AARCH64EB__
- rev x7,x7 // flip bytes
- rev x8,x8
-#endif
- and x7,x7,x9 // &=0ffffffc0fffffff
- and x9,x9,#-4
- and x8,x8,x9 // &=0ffffffc0ffffffc
- mov w9,#-1
- stp x7,x8,[x0,#32] // save key value
- str w9,[x0,#48] // impossible key power value
-
-#ifndef __KERNEL__
- tst w17,#ARMV7_NEON
-
- adr x12,.Lpoly1305_blocks
- adr x7,.Lpoly1305_blocks_neon
- adr x13,.Lpoly1305_emit
-
- csel x12,x12,x7,eq
-
-# ifdef __ILP32__
- stp w12,w13,[x2]
-# else
- stp x12,x13,[x2]
-# endif
-#endif
- mov x0,#1
-.Lno_key:
- ret
-.size poly1305_init,.-poly1305_init
-
-.type poly1305_blocks,%function
-.align 5
-poly1305_blocks:
-.Lpoly1305_blocks:
- ands x2,x2,#-16
- b.eq .Lno_data
-
- ldp x4,x5,[x0] // load hash value
- ldp x6,x17,[x0,#16] // [along with is_base2_26]
- ldp x7,x8,[x0,#32] // load key value
-
-#ifdef __AARCH64EB__
- lsr x12,x4,#32
- mov w13,w4
- lsr x14,x5,#32
- mov w15,w5
- lsr x16,x6,#32
-#else
- mov w12,w4
- lsr x13,x4,#32
- mov w14,w5
- lsr x15,x5,#32
- mov w16,w6
-#endif
-
- add x12,x12,x13,lsl#26 // base 2^26 -> base 2^64
- lsr x13,x14,#12
- adds x12,x12,x14,lsl#52
- add x13,x13,x15,lsl#14
- adc x13,x13,xzr
- lsr x14,x16,#24
- adds x13,x13,x16,lsl#40
- adc x14,x14,xzr
-
- cmp x17,#0 // is_base2_26?
- add x9,x8,x8,lsr#2 // s1 = r1 + (r1 >> 2)
- csel x4,x4,x12,eq // choose between radixes
- csel x5,x5,x13,eq
- csel x6,x6,x14,eq
-
-.Loop:
- ldp x10,x11,[x1],#16 // load input
- sub x2,x2,#16
-#ifdef __AARCH64EB__
- rev x10,x10
- rev x11,x11
-#endif
- adds x4,x4,x10 // accumulate input
- adcs x5,x5,x11
-
- mul x12,x4,x7 // h0*r0
- adc x6,x6,x3
- umulh x13,x4,x7
-
- mul x10,x5,x9 // h1*5*r1
- umulh x11,x5,x9
-
- adds x12,x12,x10
- mul x10,x4,x8 // h0*r1
- adc x13,x13,x11
- umulh x14,x4,x8
-
- adds x13,x13,x10
- mul x10,x5,x7 // h1*r0
- adc x14,x14,xzr
- umulh x11,x5,x7
-
- adds x13,x13,x10
- mul x10,x6,x9 // h2*5*r1
- adc x14,x14,x11
- mul x11,x6,x7 // h2*r0
-
- adds x13,x13,x10
- adc x14,x14,x11
-
- and x10,x14,#-4 // final reduction
- and x6,x14,#3
- add x10,x10,x14,lsr#2
- adds x4,x12,x10
- adcs x5,x13,xzr
- adc x6,x6,xzr
-
- cbnz x2,.Loop
-
- stp x4,x5,[x0] // store hash value
- stp x6,xzr,[x0,#16] // [and clear is_base2_26]
-
-.Lno_data:
- ret
-.size poly1305_blocks,.-poly1305_blocks
-
-.type poly1305_emit,%function
-.align 5
-poly1305_emit:
-.Lpoly1305_emit:
- ldp x4,x5,[x0] // load hash base 2^64
- ldp x6,x7,[x0,#16] // [along with is_base2_26]
- ldp x10,x11,[x2] // load nonce
-
-#ifdef __AARCH64EB__
- lsr x12,x4,#32
- mov w13,w4
- lsr x14,x5,#32
- mov w15,w5
- lsr x16,x6,#32
-#else
- mov w12,w4
- lsr x13,x4,#32
- mov w14,w5
- lsr x15,x5,#32
- mov w16,w6
-#endif
-
- add x12,x12,x13,lsl#26 // base 2^26 -> base 2^64
- lsr x13,x14,#12
- adds x12,x12,x14,lsl#52
- add x13,x13,x15,lsl#14
- adc x13,x13,xzr
- lsr x14,x16,#24
- adds x13,x13,x16,lsl#40
- adc x14,x14,xzr
-
- cmp x7,#0 // is_base2_26?
- csel x4,x4,x12,eq // choose between radixes
- csel x5,x5,x13,eq
- csel x6,x6,x14,eq
-
- adds x12,x4,#5 // compare to modulus
- adcs x13,x5,xzr
- adc x14,x6,xzr
-
- tst x14,#-4 // see if it's carried/borrowed
-
- csel x4,x4,x12,eq
- csel x5,x5,x13,eq
-
-#ifdef __AARCH64EB__
- ror x10,x10,#32 // flip nonce words
- ror x11,x11,#32
-#endif
- adds x4,x4,x10 // accumulate nonce
- adc x5,x5,x11
-#ifdef __AARCH64EB__
- rev x4,x4 // flip output bytes
- rev x5,x5
-#endif
- stp x4,x5,[x1] // write result
-
- ret
-.size poly1305_emit,.-poly1305_emit
-.type poly1305_mult,%function
-.align 5
-poly1305_mult:
- mul x12,x4,x7 // h0*r0
- umulh x13,x4,x7
-
- mul x10,x5,x9 // h1*5*r1
- umulh x11,x5,x9
-
- adds x12,x12,x10
- mul x10,x4,x8 // h0*r1
- adc x13,x13,x11
- umulh x14,x4,x8
-
- adds x13,x13,x10
- mul x10,x5,x7 // h1*r0
- adc x14,x14,xzr
- umulh x11,x5,x7
-
- adds x13,x13,x10
- mul x10,x6,x9 // h2*5*r1
- adc x14,x14,x11
- mul x11,x6,x7 // h2*r0
-
- adds x13,x13,x10
- adc x14,x14,x11
-
- and x10,x14,#-4 // final reduction
- and x6,x14,#3
- add x10,x10,x14,lsr#2
- adds x4,x12,x10
- adcs x5,x13,xzr
- adc x6,x6,xzr
-
- ret
-.size poly1305_mult,.-poly1305_mult
-
-.type poly1305_splat,%function
-.align 4
-poly1305_splat:
- and x12,x4,#0x03ffffff // base 2^64 -> base 2^26
- ubfx x13,x4,#26,#26
- extr x14,x5,x4,#52
- and x14,x14,#0x03ffffff
- ubfx x15,x5,#14,#26
- extr x16,x6,x5,#40
-
- str w12,[x0,#16*0] // r0
- add w12,w13,w13,lsl#2 // r1*5
- str w13,[x0,#16*1] // r1
- add w13,w14,w14,lsl#2 // r2*5
- str w12,[x0,#16*2] // s1
- str w14,[x0,#16*3] // r2
- add w14,w15,w15,lsl#2 // r3*5
- str w13,[x0,#16*4] // s2
- str w15,[x0,#16*5] // r3
- add w15,w16,w16,lsl#2 // r4*5
- str w14,[x0,#16*6] // s3
- str w16,[x0,#16*7] // r4
- str w15,[x0,#16*8] // s4
-
- ret
-.size poly1305_splat,.-poly1305_splat
-
-#ifdef __KERNEL__
-.globl poly1305_blocks_neon
-#endif
-.type poly1305_blocks_neon,%function
-.align 5
-poly1305_blocks_neon:
-.Lpoly1305_blocks_neon:
- ldr x17,[x0,#24]
- cmp x2,#128
- b.lo .Lpoly1305_blocks
-
- .inst 0xd503233f // paciasp
- stp x29,x30,[sp,#-80]!
- add x29,sp,#0
-
- stp d8,d9,[sp,#16] // meet ABI requirements
- stp d10,d11,[sp,#32]
- stp d12,d13,[sp,#48]
- stp d14,d15,[sp,#64]
-
- cbz x17,.Lbase2_64_neon
-
- ldp w10,w11,[x0] // load hash value base 2^26
- ldp w12,w13,[x0,#8]
- ldr w14,[x0,#16]
-
- tst x2,#31
- b.eq .Leven_neon
-
- ldp x7,x8,[x0,#32] // load key value
-
- add x4,x10,x11,lsl#26 // base 2^26 -> base 2^64
- lsr x5,x12,#12
- adds x4,x4,x12,lsl#52
- add x5,x5,x13,lsl#14
- adc x5,x5,xzr
- lsr x6,x14,#24
- adds x5,x5,x14,lsl#40
- adc x14,x6,xzr // can be partially reduced...
-
- ldp x12,x13,[x1],#16 // load input
- sub x2,x2,#16
- add x9,x8,x8,lsr#2 // s1 = r1 + (r1 >> 2)
-
-#ifdef __AARCH64EB__
- rev x12,x12
- rev x13,x13
-#endif
- adds x4,x4,x12 // accumulate input
- adcs x5,x5,x13
- adc x6,x6,x3
-
- bl poly1305_mult
-
- and x10,x4,#0x03ffffff // base 2^64 -> base 2^26
- ubfx x11,x4,#26,#26
- extr x12,x5,x4,#52
- and x12,x12,#0x03ffffff
- ubfx x13,x5,#14,#26
- extr x14,x6,x5,#40
-
- b .Leven_neon
-
-.align 4
-.Lbase2_64_neon:
- ldp x7,x8,[x0,#32] // load key value
-
- ldp x4,x5,[x0] // load hash value base 2^64
- ldr x6,[x0,#16]
-
- tst x2,#31
- b.eq .Linit_neon
-
- ldp x12,x13,[x1],#16 // load input
- sub x2,x2,#16
- add x9,x8,x8,lsr#2 // s1 = r1 + (r1 >> 2)
-#ifdef __AARCH64EB__
- rev x12,x12
- rev x13,x13
-#endif
- adds x4,x4,x12 // accumulate input
- adcs x5,x5,x13
- adc x6,x6,x3
-
- bl poly1305_mult
-
-.Linit_neon:
- ldr w17,[x0,#48] // first table element
- and x10,x4,#0x03ffffff // base 2^64 -> base 2^26
- ubfx x11,x4,#26,#26
- extr x12,x5,x4,#52
- and x12,x12,#0x03ffffff
- ubfx x13,x5,#14,#26
- extr x14,x6,x5,#40
-
- cmp w17,#-1 // is value impossible?
- b.ne .Leven_neon
-
- fmov d24,x10
- fmov d25,x11
- fmov d26,x12
- fmov d27,x13
- fmov d28,x14
-
- ////////////////////////////////// initialize r^n table
- mov x4,x7 // r^1
- add x9,x8,x8,lsr#2 // s1 = r1 + (r1 >> 2)
- mov x5,x8
- mov x6,xzr
- add x0,x0,#48+12
- bl poly1305_splat
-
- bl poly1305_mult // r^2
- sub x0,x0,#4
- bl poly1305_splat
-
- bl poly1305_mult // r^3
- sub x0,x0,#4
- bl poly1305_splat
-
- bl poly1305_mult // r^4
- sub x0,x0,#4
- bl poly1305_splat
- sub x0,x0,#48 // restore original x0
- b .Ldo_neon
-
-.align 4
-.Leven_neon:
- fmov d24,x10
- fmov d25,x11
- fmov d26,x12
- fmov d27,x13
- fmov d28,x14
-
-.Ldo_neon:
- ldp x8,x12,[x1,#32] // inp[2:3]
- subs x2,x2,#64
- ldp x9,x13,[x1,#48]
- add x16,x1,#96
- adr x17,.Lzeros
-
- lsl x3,x3,#24
- add x15,x0,#48
-
-#ifdef __AARCH64EB__
- rev x8,x8
- rev x12,x12
- rev x9,x9
- rev x13,x13
-#endif
- and x4,x8,#0x03ffffff // base 2^64 -> base 2^26
- and x5,x9,#0x03ffffff
- ubfx x6,x8,#26,#26
- ubfx x7,x9,#26,#26
- add x4,x4,x5,lsl#32 // bfi x4,x5,#32,#32
- extr x8,x12,x8,#52
- extr x9,x13,x9,#52
- add x6,x6,x7,lsl#32 // bfi x6,x7,#32,#32
- fmov d14,x4
- and x8,x8,#0x03ffffff
- and x9,x9,#0x03ffffff
- ubfx x10,x12,#14,#26
- ubfx x11,x13,#14,#26
- add x12,x3,x12,lsr#40
- add x13,x3,x13,lsr#40
- add x8,x8,x9,lsl#32 // bfi x8,x9,#32,#32
- fmov d15,x6
- add x10,x10,x11,lsl#32 // bfi x10,x11,#32,#32
- add x12,x12,x13,lsl#32 // bfi x12,x13,#32,#32
- fmov d16,x8
- fmov d17,x10
- fmov d18,x12
-
- ldp x8,x12,[x1],#16 // inp[0:1]
- ldp x9,x13,[x1],#48
-
- ld1 {v0.4s,v1.4s,v2.4s,v3.4s},[x15],#64
- ld1 {v4.4s,v5.4s,v6.4s,v7.4s},[x15],#64
- ld1 {v8.4s},[x15]
-
-#ifdef __AARCH64EB__
- rev x8,x8
- rev x12,x12
- rev x9,x9
- rev x13,x13
-#endif
- and x4,x8,#0x03ffffff // base 2^64 -> base 2^26
- and x5,x9,#0x03ffffff
- ubfx x6,x8,#26,#26
- ubfx x7,x9,#26,#26
- add x4,x4,x5,lsl#32 // bfi x4,x5,#32,#32
- extr x8,x12,x8,#52
- extr x9,x13,x9,#52
- add x6,x6,x7,lsl#32 // bfi x6,x7,#32,#32
- fmov d9,x4
- and x8,x8,#0x03ffffff
- and x9,x9,#0x03ffffff
- ubfx x10,x12,#14,#26
- ubfx x11,x13,#14,#26
- add x12,x3,x12,lsr#40
- add x13,x3,x13,lsr#40
- add x8,x8,x9,lsl#32 // bfi x8,x9,#32,#32
- fmov d10,x6
- add x10,x10,x11,lsl#32 // bfi x10,x11,#32,#32
- add x12,x12,x13,lsl#32 // bfi x12,x13,#32,#32
- movi v31.2d,#-1
- fmov d11,x8
- fmov d12,x10
- fmov d13,x12
- ushr v31.2d,v31.2d,#38
-
- b.ls .Lskip_loop
-
-.align 4
-.Loop_neon:
- ////////////////////////////////////////////////////////////////
- // ((inp[0]*r^4+inp[2]*r^2+inp[4])*r^4+inp[6]*r^2
- // ((inp[1]*r^4+inp[3]*r^2+inp[5])*r^3+inp[7]*r
- // ___________________/
- // ((inp[0]*r^4+inp[2]*r^2+inp[4])*r^4+inp[6]*r^2+inp[8])*r^2
- // ((inp[1]*r^4+inp[3]*r^2+inp[5])*r^4+inp[7]*r^2+inp[9])*r
- // ___________________/ ____________________/
- //
- // Note that we start with inp[2:3]*r^2. This is because it
- // doesn't depend on reduction in previous iteration.
- ////////////////////////////////////////////////////////////////
- // d4 = h0*r4 + h1*r3 + h2*r2 + h3*r1 + h4*r0
- // d3 = h0*r3 + h1*r2 + h2*r1 + h3*r0 + h4*5*r4
- // d2 = h0*r2 + h1*r1 + h2*r0 + h3*5*r4 + h4*5*r3
- // d1 = h0*r1 + h1*r0 + h2*5*r4 + h3*5*r3 + h4*5*r2
- // d0 = h0*r0 + h1*5*r4 + h2*5*r3 + h3*5*r2 + h4*5*r1
-
- subs x2,x2,#64
- umull v23.2d,v14.2s,v7.s[2]
- csel x16,x17,x16,lo
- umull v22.2d,v14.2s,v5.s[2]
- umull v21.2d,v14.2s,v3.s[2]
- ldp x8,x12,[x16],#16 // inp[2:3] (or zero)
- umull v20.2d,v14.2s,v1.s[2]
- ldp x9,x13,[x16],#48
- umull v19.2d,v14.2s,v0.s[2]
-#ifdef __AARCH64EB__
- rev x8,x8
- rev x12,x12
- rev x9,x9
- rev x13,x13
-#endif
-
- umlal v23.2d,v15.2s,v5.s[2]
- and x4,x8,#0x03ffffff // base 2^64 -> base 2^26
- umlal v22.2d,v15.2s,v3.s[2]
- and x5,x9,#0x03ffffff
- umlal v21.2d,v15.2s,v1.s[2]
- ubfx x6,x8,#26,#26
- umlal v20.2d,v15.2s,v0.s[2]
- ubfx x7,x9,#26,#26
- umlal v19.2d,v15.2s,v8.s[2]
- add x4,x4,x5,lsl#32 // bfi x4,x5,#32,#32
-
- umlal v23.2d,v16.2s,v3.s[2]
- extr x8,x12,x8,#52
- umlal v22.2d,v16.2s,v1.s[2]
- extr x9,x13,x9,#52
- umlal v21.2d,v16.2s,v0.s[2]
- add x6,x6,x7,lsl#32 // bfi x6,x7,#32,#32
- umlal v20.2d,v16.2s,v8.s[2]
- fmov d14,x4
- umlal v19.2d,v16.2s,v6.s[2]
- and x8,x8,#0x03ffffff
-
- umlal v23.2d,v17.2s,v1.s[2]
- and x9,x9,#0x03ffffff
- umlal v22.2d,v17.2s,v0.s[2]
- ubfx x10,x12,#14,#26
- umlal v21.2d,v17.2s,v8.s[2]
- ubfx x11,x13,#14,#26
- umlal v20.2d,v17.2s,v6.s[2]
- add x8,x8,x9,lsl#32 // bfi x8,x9,#32,#32
- umlal v19.2d,v17.2s,v4.s[2]
- fmov d15,x6
-
- add v11.2s,v11.2s,v26.2s
- add x12,x3,x12,lsr#40
- umlal v23.2d,v18.2s,v0.s[2]
- add x13,x3,x13,lsr#40
- umlal v22.2d,v18.2s,v8.s[2]
- add x10,x10,x11,lsl#32 // bfi x10,x11,#32,#32
- umlal v21.2d,v18.2s,v6.s[2]
- add x12,x12,x13,lsl#32 // bfi x12,x13,#32,#32
- umlal v20.2d,v18.2s,v4.s[2]
- fmov d16,x8
- umlal v19.2d,v18.2s,v2.s[2]
- fmov d17,x10
-
- ////////////////////////////////////////////////////////////////
- // (hash+inp[0:1])*r^4 and accumulate
-
- add v9.2s,v9.2s,v24.2s
- fmov d18,x12
- umlal v22.2d,v11.2s,v1.s[0]
- ldp x8,x12,[x1],#16 // inp[0:1]
- umlal v19.2d,v11.2s,v6.s[0]
- ldp x9,x13,[x1],#48
- umlal v23.2d,v11.2s,v3.s[0]
- umlal v20.2d,v11.2s,v8.s[0]
- umlal v21.2d,v11.2s,v0.s[0]
-#ifdef __AARCH64EB__
- rev x8,x8
- rev x12,x12
- rev x9,x9
- rev x13,x13
-#endif
-
- add v10.2s,v10.2s,v25.2s
- umlal v22.2d,v9.2s,v5.s[0]
- umlal v23.2d,v9.2s,v7.s[0]
- and x4,x8,#0x03ffffff // base 2^64 -> base 2^26
- umlal v21.2d,v9.2s,v3.s[0]
- and x5,x9,#0x03ffffff
- umlal v19.2d,v9.2s,v0.s[0]
- ubfx x6,x8,#26,#26
- umlal v20.2d,v9.2s,v1.s[0]
- ubfx x7,x9,#26,#26
-
- add v12.2s,v12.2s,v27.2s
- add x4,x4,x5,lsl#32 // bfi x4,x5,#32,#32
- umlal v22.2d,v10.2s,v3.s[0]
- extr x8,x12,x8,#52
- umlal v23.2d,v10.2s,v5.s[0]
- extr x9,x13,x9,#52
- umlal v19.2d,v10.2s,v8.s[0]
- add x6,x6,x7,lsl#32 // bfi x6,x7,#32,#32
- umlal v21.2d,v10.2s,v1.s[0]
- fmov d9,x4
- umlal v20.2d,v10.2s,v0.s[0]
- and x8,x8,#0x03ffffff
-
- add v13.2s,v13.2s,v28.2s
- and x9,x9,#0x03ffffff
- umlal v22.2d,v12.2s,v0.s[0]
- ubfx x10,x12,#14,#26
- umlal v19.2d,v12.2s,v4.s[0]
- ubfx x11,x13,#14,#26
- umlal v23.2d,v12.2s,v1.s[0]
- add x8,x8,x9,lsl#32 // bfi x8,x9,#32,#32
- umlal v20.2d,v12.2s,v6.s[0]
- fmov d10,x6
- umlal v21.2d,v12.2s,v8.s[0]
- add x12,x3,x12,lsr#40
-
- umlal v22.2d,v13.2s,v8.s[0]
- add x13,x3,x13,lsr#40
- umlal v19.2d,v13.2s,v2.s[0]
- add x10,x10,x11,lsl#32 // bfi x10,x11,#32,#32
- umlal v23.2d,v13.2s,v0.s[0]
- add x12,x12,x13,lsl#32 // bfi x12,x13,#32,#32
- umlal v20.2d,v13.2s,v4.s[0]
- fmov d11,x8
- umlal v21.2d,v13.2s,v6.s[0]
- fmov d12,x10
- fmov d13,x12
-
- /////////////////////////////////////////////////////////////////
- // lazy reduction as discussed in "NEON crypto" by D.J. Bernstein
- // and P. Schwabe
- //
- // [see discussion in poly1305-armv4 module]
-
- ushr v29.2d,v22.2d,#26
- xtn v27.2s,v22.2d
- ushr v30.2d,v19.2d,#26
- and v19.16b,v19.16b,v31.16b
- add v23.2d,v23.2d,v29.2d // h3 -> h4
- bic v27.2s,#0xfc,lsl#24 // &=0x03ffffff
- add v20.2d,v20.2d,v30.2d // h0 -> h1
-
- ushr v29.2d,v23.2d,#26
- xtn v28.2s,v23.2d
- ushr v30.2d,v20.2d,#26
- xtn v25.2s,v20.2d
- bic v28.2s,#0xfc,lsl#24
- add v21.2d,v21.2d,v30.2d // h1 -> h2
-
- add v19.2d,v19.2d,v29.2d
- shl v29.2d,v29.2d,#2
- shrn v30.2s,v21.2d,#26
- xtn v26.2s,v21.2d
- add v19.2d,v19.2d,v29.2d // h4 -> h0
- bic v25.2s,#0xfc,lsl#24
- add v27.2s,v27.2s,v30.2s // h2 -> h3
- bic v26.2s,#0xfc,lsl#24
-
- shrn v29.2s,v19.2d,#26
- xtn v24.2s,v19.2d
- ushr v30.2s,v27.2s,#26
- bic v27.2s,#0xfc,lsl#24
- bic v24.2s,#0xfc,lsl#24
- add v25.2s,v25.2s,v29.2s // h0 -> h1
- add v28.2s,v28.2s,v30.2s // h3 -> h4
-
- b.hi .Loop_neon
-
-.Lskip_loop:
- dup v16.2d,v16.d[0]
- add v11.2s,v11.2s,v26.2s
-
- ////////////////////////////////////////////////////////////////
- // multiply (inp[0:1]+hash) or inp[2:3] by r^2:r^1
-
- adds x2,x2,#32
- b.ne .Long_tail
-
- dup v16.2d,v11.d[0]
- add v14.2s,v9.2s,v24.2s
- add v17.2s,v12.2s,v27.2s
- add v15.2s,v10.2s,v25.2s
- add v18.2s,v13.2s,v28.2s
-
-.Long_tail:
- dup v14.2d,v14.d[0]
- umull2 v19.2d,v16.4s,v6.4s
- umull2 v22.2d,v16.4s,v1.4s
- umull2 v23.2d,v16.4s,v3.4s
- umull2 v21.2d,v16.4s,v0.4s
- umull2 v20.2d,v16.4s,v8.4s
-
- dup v15.2d,v15.d[0]
- umlal2 v19.2d,v14.4s,v0.4s
- umlal2 v21.2d,v14.4s,v3.4s
- umlal2 v22.2d,v14.4s,v5.4s
- umlal2 v23.2d,v14.4s,v7.4s
- umlal2 v20.2d,v14.4s,v1.4s
-
- dup v17.2d,v17.d[0]
- umlal2 v19.2d,v15.4s,v8.4s
- umlal2 v22.2d,v15.4s,v3.4s
- umlal2 v21.2d,v15.4s,v1.4s
- umlal2 v23.2d,v15.4s,v5.4s
- umlal2 v20.2d,v15.4s,v0.4s
-
- dup v18.2d,v18.d[0]
- umlal2 v22.2d,v17.4s,v0.4s
- umlal2 v23.2d,v17.4s,v1.4s
- umlal2 v19.2d,v17.4s,v4.4s
- umlal2 v20.2d,v17.4s,v6.4s
- umlal2 v21.2d,v17.4s,v8.4s
-
- umlal2 v22.2d,v18.4s,v8.4s
- umlal2 v19.2d,v18.4s,v2.4s
- umlal2 v23.2d,v18.4s,v0.4s
- umlal2 v20.2d,v18.4s,v4.4s
- umlal2 v21.2d,v18.4s,v6.4s
-
- b.eq .Lshort_tail
-
- ////////////////////////////////////////////////////////////////
- // (hash+inp[0:1])*r^4:r^3 and accumulate
-
- add v9.2s,v9.2s,v24.2s
- umlal v22.2d,v11.2s,v1.2s
- umlal v19.2d,v11.2s,v6.2s
- umlal v23.2d,v11.2s,v3.2s
- umlal v20.2d,v11.2s,v8.2s
- umlal v21.2d,v11.2s,v0.2s
-
- add v10.2s,v10.2s,v25.2s
- umlal v22.2d,v9.2s,v5.2s
- umlal v19.2d,v9.2s,v0.2s
- umlal v23.2d,v9.2s,v7.2s
- umlal v20.2d,v9.2s,v1.2s
- umlal v21.2d,v9.2s,v3.2s
-
- add v12.2s,v12.2s,v27.2s
- umlal v22.2d,v10.2s,v3.2s
- umlal v19.2d,v10.2s,v8.2s
- umlal v23.2d,v10.2s,v5.2s
- umlal v20.2d,v10.2s,v0.2s
- umlal v21.2d,v10.2s,v1.2s
-
- add v13.2s,v13.2s,v28.2s
- umlal v22.2d,v12.2s,v0.2s
- umlal v19.2d,v12.2s,v4.2s
- umlal v23.2d,v12.2s,v1.2s
- umlal v20.2d,v12.2s,v6.2s
- umlal v21.2d,v12.2s,v8.2s
-
- umlal v22.2d,v13.2s,v8.2s
- umlal v19.2d,v13.2s,v2.2s
- umlal v23.2d,v13.2s,v0.2s
- umlal v20.2d,v13.2s,v4.2s
- umlal v21.2d,v13.2s,v6.2s
-
-.Lshort_tail:
- ////////////////////////////////////////////////////////////////
- // horizontal add
-
- addp v22.2d,v22.2d,v22.2d
- ldp d8,d9,[sp,#16] // meet ABI requirements
- addp v19.2d,v19.2d,v19.2d
- ldp d10,d11,[sp,#32]
- addp v23.2d,v23.2d,v23.2d
- ldp d12,d13,[sp,#48]
- addp v20.2d,v20.2d,v20.2d
- ldp d14,d15,[sp,#64]
- addp v21.2d,v21.2d,v21.2d
- ldr x30,[sp,#8]
-
- ////////////////////////////////////////////////////////////////
- // lazy reduction, but without narrowing
-
- ushr v29.2d,v22.2d,#26
- and v22.16b,v22.16b,v31.16b
- ushr v30.2d,v19.2d,#26
- and v19.16b,v19.16b,v31.16b
-
- add v23.2d,v23.2d,v29.2d // h3 -> h4
- add v20.2d,v20.2d,v30.2d // h0 -> h1
-
- ushr v29.2d,v23.2d,#26
- and v23.16b,v23.16b,v31.16b
- ushr v30.2d,v20.2d,#26
- and v20.16b,v20.16b,v31.16b
- add v21.2d,v21.2d,v30.2d // h1 -> h2
-
- add v19.2d,v19.2d,v29.2d
- shl v29.2d,v29.2d,#2
- ushr v30.2d,v21.2d,#26
- and v21.16b,v21.16b,v31.16b
- add v19.2d,v19.2d,v29.2d // h4 -> h0
- add v22.2d,v22.2d,v30.2d // h2 -> h3
-
- ushr v29.2d,v19.2d,#26
- and v19.16b,v19.16b,v31.16b
- ushr v30.2d,v22.2d,#26
- and v22.16b,v22.16b,v31.16b
- add v20.2d,v20.2d,v29.2d // h0 -> h1
- add v23.2d,v23.2d,v30.2d // h3 -> h4
-
- ////////////////////////////////////////////////////////////////
- // write the result, can be partially reduced
-
- st4 {v19.s,v20.s,v21.s,v22.s}[0],[x0],#16
- mov x4,#1
- st1 {v23.s}[0],[x0]
- str x4,[x0,#8] // set is_base2_26
-
- ldr x29,[sp],#80
- .inst 0xd50323bf // autiasp
- ret
-.size poly1305_blocks_neon,.-poly1305_blocks_neon
-
-.align 5
-.Lzeros:
-.long 0,0,0,0,0,0,0,0
-.asciz "Poly1305 for ARMv8, CRYPTOGAMS by @dot-asm"
-.align 2
-#if !defined(__KERNEL__) && !defined(_WIN64)
-.comm OPENSSL_armcap_P,4,4
-.hidden OPENSSL_armcap_P
-#endif
diff --git a/arch/arm64/crypto/sha256-core.S_shipped b/arch/arm64/crypto/sha256-core.S_shipped
deleted file mode 100644
index 7c7ce2e3bad6..000000000000
--- a/arch/arm64/crypto/sha256-core.S_shipped
+++ /dev/null
@@ -1,2069 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-
-// This code is taken from the OpenSSL project but the author (Andy Polyakov)
-// has relicensed it under the GPLv2. Therefore this program is free software;
-// you can redistribute it and/or modify it under the terms of the GNU General
-// Public License version 2 as published by the Free Software Foundation.
-//
-// The original headers, including the original license headers, are
-// included below for completeness.
-
-// Copyright 2014-2016 The OpenSSL Project Authors. All Rights Reserved.
-//
-// Licensed under the OpenSSL license (the "License"). You may not use
-// this file except in compliance with the License. You can obtain a copy
-// in the file LICENSE in the source distribution or at
-// https://www.openssl.org/source/license.html
-
-// ====================================================================
-// Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
-// project. The module is, however, dual licensed under OpenSSL and
-// CRYPTOGAMS licenses depending on where you obtain it. For further
-// details see http://www.openssl.org/~appro/cryptogams/.
-// ====================================================================
-//
-// SHA256/512 for ARMv8.
-//
-// Performance in cycles per processed byte and improvement coefficient
-// over code generated with "default" compiler:
-//
-// SHA256-hw SHA256(*) SHA512
-// Apple A7 1.97 10.5 (+33%) 6.73 (-1%(**))
-// Cortex-A53 2.38 15.5 (+115%) 10.0 (+150%(***))
-// Cortex-A57 2.31 11.6 (+86%) 7.51 (+260%(***))
-// Denver 2.01 10.5 (+26%) 6.70 (+8%)
-// X-Gene 20.0 (+100%) 12.8 (+300%(***))
-// Mongoose 2.36 13.0 (+50%) 8.36 (+33%)
-//
-// (*) Software SHA256 results are of lesser relevance, presented
-// mostly for informational purposes.
-// (**) The result is a trade-off: it's possible to improve it by
-// 10% (or by 1 cycle per round), but at the cost of 20% loss
-// on Cortex-A53 (or by 4 cycles per round).
-// (***) Super-impressive coefficients over gcc-generated code are
-// indication of some compiler "pathology", most notably code
-// generated with -mgeneral-regs-only is significanty faster
-// and the gap is only 40-90%.
-//
-// October 2016.
-//
-// Originally it was reckoned that it makes no sense to implement NEON
-// version of SHA256 for 64-bit processors. This is because performance
-// improvement on most wide-spread Cortex-A5x processors was observed
-// to be marginal, same on Cortex-A53 and ~10% on A57. But then it was
-// observed that 32-bit NEON SHA256 performs significantly better than
-// 64-bit scalar version on *some* of the more recent processors. As
-// result 64-bit NEON version of SHA256 was added to provide best
-// all-round performance. For example it executes ~30% faster on X-Gene
-// and Mongoose. [For reference, NEON version of SHA512 is bound to
-// deliver much less improvement, likely *negative* on Cortex-A5x.
-// Which is why NEON support is limited to SHA256.]
-
-#ifndef __KERNEL__
-# include "arm_arch.h"
-#endif
-
-.text
-
-.extern OPENSSL_armcap_P
-.globl sha256_block_data_order
-.type sha256_block_data_order,%function
-.align 6
-sha256_block_data_order:
-#ifndef __KERNEL__
-# ifdef __ILP32__
- ldrsw x16,.LOPENSSL_armcap_P
-# else
- ldr x16,.LOPENSSL_armcap_P
-# endif
- adr x17,.LOPENSSL_armcap_P
- add x16,x16,x17
- ldr w16,[x16]
- tst w16,#ARMV8_SHA256
- b.ne .Lv8_entry
- tst w16,#ARMV7_NEON
- b.ne .Lneon_entry
-#endif
- stp x29,x30,[sp,#-128]!
- add x29,sp,#0
-
- stp x19,x20,[sp,#16]
- stp x21,x22,[sp,#32]
- stp x23,x24,[sp,#48]
- stp x25,x26,[sp,#64]
- stp x27,x28,[sp,#80]
- sub sp,sp,#4*4
-
- ldp w20,w21,[x0] // load context
- ldp w22,w23,[x0,#2*4]
- ldp w24,w25,[x0,#4*4]
- add x2,x1,x2,lsl#6 // end of input
- ldp w26,w27,[x0,#6*4]
- adr x30,.LK256
- stp x0,x2,[x29,#96]
-
-.Loop:
- ldp w3,w4,[x1],#2*4
- ldr w19,[x30],#4 // *K++
- eor w28,w21,w22 // magic seed
- str x1,[x29,#112]
-#ifndef __AARCH64EB__
- rev w3,w3 // 0
-#endif
- ror w16,w24,#6
- add w27,w27,w19 // h+=K[i]
- eor w6,w24,w24,ror#14
- and w17,w25,w24
- bic w19,w26,w24
- add w27,w27,w3 // h+=X[i]
- orr w17,w17,w19 // Ch(e,f,g)
- eor w19,w20,w21 // a^b, b^c in next round
- eor w16,w16,w6,ror#11 // Sigma1(e)
- ror w6,w20,#2
- add w27,w27,w17 // h+=Ch(e,f,g)
- eor w17,w20,w20,ror#9
- add w27,w27,w16 // h+=Sigma1(e)
- and w28,w28,w19 // (b^c)&=(a^b)
- add w23,w23,w27 // d+=h
- eor w28,w28,w21 // Maj(a,b,c)
- eor w17,w6,w17,ror#13 // Sigma0(a)
- add w27,w27,w28 // h+=Maj(a,b,c)
- ldr w28,[x30],#4 // *K++, w19 in next round
- //add w27,w27,w17 // h+=Sigma0(a)
-#ifndef __AARCH64EB__
- rev w4,w4 // 1
-#endif
- ldp w5,w6,[x1],#2*4
- add w27,w27,w17 // h+=Sigma0(a)
- ror w16,w23,#6
- add w26,w26,w28 // h+=K[i]
- eor w7,w23,w23,ror#14
- and w17,w24,w23
- bic w28,w25,w23
- add w26,w26,w4 // h+=X[i]
- orr w17,w17,w28 // Ch(e,f,g)
- eor w28,w27,w20 // a^b, b^c in next round
- eor w16,w16,w7,ror#11 // Sigma1(e)
- ror w7,w27,#2
- add w26,w26,w17 // h+=Ch(e,f,g)
- eor w17,w27,w27,ror#9
- add w26,w26,w16 // h+=Sigma1(e)
- and w19,w19,w28 // (b^c)&=(a^b)
- add w22,w22,w26 // d+=h
- eor w19,w19,w20 // Maj(a,b,c)
- eor w17,w7,w17,ror#13 // Sigma0(a)
- add w26,w26,w19 // h+=Maj(a,b,c)
- ldr w19,[x30],#4 // *K++, w28 in next round
- //add w26,w26,w17 // h+=Sigma0(a)
-#ifndef __AARCH64EB__
- rev w5,w5 // 2
-#endif
- add w26,w26,w17 // h+=Sigma0(a)
- ror w16,w22,#6
- add w25,w25,w19 // h+=K[i]
- eor w8,w22,w22,ror#14
- and w17,w23,w22
- bic w19,w24,w22
- add w25,w25,w5 // h+=X[i]
- orr w17,w17,w19 // Ch(e,f,g)
- eor w19,w26,w27 // a^b, b^c in next round
- eor w16,w16,w8,ror#11 // Sigma1(e)
- ror w8,w26,#2
- add w25,w25,w17 // h+=Ch(e,f,g)
- eor w17,w26,w26,ror#9
- add w25,w25,w16 // h+=Sigma1(e)
- and w28,w28,w19 // (b^c)&=(a^b)
- add w21,w21,w25 // d+=h
- eor w28,w28,w27 // Maj(a,b,c)
- eor w17,w8,w17,ror#13 // Sigma0(a)
- add w25,w25,w28 // h+=Maj(a,b,c)
- ldr w28,[x30],#4 // *K++, w19 in next round
- //add w25,w25,w17 // h+=Sigma0(a)
-#ifndef __AARCH64EB__
- rev w6,w6 // 3
-#endif
- ldp w7,w8,[x1],#2*4
- add w25,w25,w17 // h+=Sigma0(a)
- ror w16,w21,#6
- add w24,w24,w28 // h+=K[i]
- eor w9,w21,w21,ror#14
- and w17,w22,w21
- bic w28,w23,w21
- add w24,w24,w6 // h+=X[i]
- orr w17,w17,w28 // Ch(e,f,g)
- eor w28,w25,w26 // a^b, b^c in next round
- eor w16,w16,w9,ror#11 // Sigma1(e)
- ror w9,w25,#2
- add w24,w24,w17 // h+=Ch(e,f,g)
- eor w17,w25,w25,ror#9
- add w24,w24,w16 // h+=Sigma1(e)
- and w19,w19,w28 // (b^c)&=(a^b)
- add w20,w20,w24 // d+=h
- eor w19,w19,w26 // Maj(a,b,c)
- eor w17,w9,w17,ror#13 // Sigma0(a)
- add w24,w24,w19 // h+=Maj(a,b,c)
- ldr w19,[x30],#4 // *K++, w28 in next round
- //add w24,w24,w17 // h+=Sigma0(a)
-#ifndef __AARCH64EB__
- rev w7,w7 // 4
-#endif
- add w24,w24,w17 // h+=Sigma0(a)
- ror w16,w20,#6
- add w23,w23,w19 // h+=K[i]
- eor w10,w20,w20,ror#14
- and w17,w21,w20
- bic w19,w22,w20
- add w23,w23,w7 // h+=X[i]
- orr w17,w17,w19 // Ch(e,f,g)
- eor w19,w24,w25 // a^b, b^c in next round
- eor w16,w16,w10,ror#11 // Sigma1(e)
- ror w10,w24,#2
- add w23,w23,w17 // h+=Ch(e,f,g)
- eor w17,w24,w24,ror#9
- add w23,w23,w16 // h+=Sigma1(e)
- and w28,w28,w19 // (b^c)&=(a^b)
- add w27,w27,w23 // d+=h
- eor w28,w28,w25 // Maj(a,b,c)
- eor w17,w10,w17,ror#13 // Sigma0(a)
- add w23,w23,w28 // h+=Maj(a,b,c)
- ldr w28,[x30],#4 // *K++, w19 in next round
- //add w23,w23,w17 // h+=Sigma0(a)
-#ifndef __AARCH64EB__
- rev w8,w8 // 5
-#endif
- ldp w9,w10,[x1],#2*4
- add w23,w23,w17 // h+=Sigma0(a)
- ror w16,w27,#6
- add w22,w22,w28 // h+=K[i]
- eor w11,w27,w27,ror#14
- and w17,w20,w27
- bic w28,w21,w27
- add w22,w22,w8 // h+=X[i]
- orr w17,w17,w28 // Ch(e,f,g)
- eor w28,w23,w24 // a^b, b^c in next round
- eor w16,w16,w11,ror#11 // Sigma1(e)
- ror w11,w23,#2
- add w22,w22,w17 // h+=Ch(e,f,g)
- eor w17,w23,w23,ror#9
- add w22,w22,w16 // h+=Sigma1(e)
- and w19,w19,w28 // (b^c)&=(a^b)
- add w26,w26,w22 // d+=h
- eor w19,w19,w24 // Maj(a,b,c)
- eor w17,w11,w17,ror#13 // Sigma0(a)
- add w22,w22,w19 // h+=Maj(a,b,c)
- ldr w19,[x30],#4 // *K++, w28 in next round
- //add w22,w22,w17 // h+=Sigma0(a)
-#ifndef __AARCH64EB__
- rev w9,w9 // 6
-#endif
- add w22,w22,w17 // h+=Sigma0(a)
- ror w16,w26,#6
- add w21,w21,w19 // h+=K[i]
- eor w12,w26,w26,ror#14
- and w17,w27,w26
- bic w19,w20,w26
- add w21,w21,w9 // h+=X[i]
- orr w17,w17,w19 // Ch(e,f,g)
- eor w19,w22,w23 // a^b, b^c in next round
- eor w16,w16,w12,ror#11 // Sigma1(e)
- ror w12,w22,#2
- add w21,w21,w17 // h+=Ch(e,f,g)
- eor w17,w22,w22,ror#9
- add w21,w21,w16 // h+=Sigma1(e)
- and w28,w28,w19 // (b^c)&=(a^b)
- add w25,w25,w21 // d+=h
- eor w28,w28,w23 // Maj(a,b,c)
- eor w17,w12,w17,ror#13 // Sigma0(a)
- add w21,w21,w28 // h+=Maj(a,b,c)
- ldr w28,[x30],#4 // *K++, w19 in next round
- //add w21,w21,w17 // h+=Sigma0(a)
-#ifndef __AARCH64EB__
- rev w10,w10 // 7
-#endif
- ldp w11,w12,[x1],#2*4
- add w21,w21,w17 // h+=Sigma0(a)
- ror w16,w25,#6
- add w20,w20,w28 // h+=K[i]
- eor w13,w25,w25,ror#14
- and w17,w26,w25
- bic w28,w27,w25
- add w20,w20,w10 // h+=X[i]
- orr w17,w17,w28 // Ch(e,f,g)
- eor w28,w21,w22 // a^b, b^c in next round
- eor w16,w16,w13,ror#11 // Sigma1(e)
- ror w13,w21,#2
- add w20,w20,w17 // h+=Ch(e,f,g)
- eor w17,w21,w21,ror#9
- add w20,w20,w16 // h+=Sigma1(e)
- and w19,w19,w28 // (b^c)&=(a^b)
- add w24,w24,w20 // d+=h
- eor w19,w19,w22 // Maj(a,b,c)
- eor w17,w13,w17,ror#13 // Sigma0(a)
- add w20,w20,w19 // h+=Maj(a,b,c)
- ldr w19,[x30],#4 // *K++, w28 in next round
- //add w20,w20,w17 // h+=Sigma0(a)
-#ifndef __AARCH64EB__
- rev w11,w11 // 8
-#endif
- add w20,w20,w17 // h+=Sigma0(a)
- ror w16,w24,#6
- add w27,w27,w19 // h+=K[i]
- eor w14,w24,w24,ror#14
- and w17,w25,w24
- bic w19,w26,w24
- add w27,w27,w11 // h+=X[i]
- orr w17,w17,w19 // Ch(e,f,g)
- eor w19,w20,w21 // a^b, b^c in next round
- eor w16,w16,w14,ror#11 // Sigma1(e)
- ror w14,w20,#2
- add w27,w27,w17 // h+=Ch(e,f,g)
- eor w17,w20,w20,ror#9
- add w27,w27,w16 // h+=Sigma1(e)
- and w28,w28,w19 // (b^c)&=(a^b)
- add w23,w23,w27 // d+=h
- eor w28,w28,w21 // Maj(a,b,c)
- eor w17,w14,w17,ror#13 // Sigma0(a)
- add w27,w27,w28 // h+=Maj(a,b,c)
- ldr w28,[x30],#4 // *K++, w19 in next round
- //add w27,w27,w17 // h+=Sigma0(a)
-#ifndef __AARCH64EB__
- rev w12,w12 // 9
-#endif
- ldp w13,w14,[x1],#2*4
- add w27,w27,w17 // h+=Sigma0(a)
- ror w16,w23,#6
- add w26,w26,w28 // h+=K[i]
- eor w15,w23,w23,ror#14
- and w17,w24,w23
- bic w28,w25,w23
- add w26,w26,w12 // h+=X[i]
- orr w17,w17,w28 // Ch(e,f,g)
- eor w28,w27,w20 // a^b, b^c in next round
- eor w16,w16,w15,ror#11 // Sigma1(e)
- ror w15,w27,#2
- add w26,w26,w17 // h+=Ch(e,f,g)
- eor w17,w27,w27,ror#9
- add w26,w26,w16 // h+=Sigma1(e)
- and w19,w19,w28 // (b^c)&=(a^b)
- add w22,w22,w26 // d+=h
- eor w19,w19,w20 // Maj(a,b,c)
- eor w17,w15,w17,ror#13 // Sigma0(a)
- add w26,w26,w19 // h+=Maj(a,b,c)
- ldr w19,[x30],#4 // *K++, w28 in next round
- //add w26,w26,w17 // h+=Sigma0(a)
-#ifndef __AARCH64EB__
- rev w13,w13 // 10
-#endif
- add w26,w26,w17 // h+=Sigma0(a)
- ror w16,w22,#6
- add w25,w25,w19 // h+=K[i]
- eor w0,w22,w22,ror#14
- and w17,w23,w22
- bic w19,w24,w22
- add w25,w25,w13 // h+=X[i]
- orr w17,w17,w19 // Ch(e,f,g)
- eor w19,w26,w27 // a^b, b^c in next round
- eor w16,w16,w0,ror#11 // Sigma1(e)
- ror w0,w26,#2
- add w25,w25,w17 // h+=Ch(e,f,g)
- eor w17,w26,w26,ror#9
- add w25,w25,w16 // h+=Sigma1(e)
- and w28,w28,w19 // (b^c)&=(a^b)
- add w21,w21,w25 // d+=h
- eor w28,w28,w27 // Maj(a,b,c)
- eor w17,w0,w17,ror#13 // Sigma0(a)
- add w25,w25,w28 // h+=Maj(a,b,c)
- ldr w28,[x30],#4 // *K++, w19 in next round
- //add w25,w25,w17 // h+=Sigma0(a)
-#ifndef __AARCH64EB__
- rev w14,w14 // 11
-#endif
- ldp w15,w0,[x1],#2*4
- add w25,w25,w17 // h+=Sigma0(a)
- str w6,[sp,#12]
- ror w16,w21,#6
- add w24,w24,w28 // h+=K[i]
- eor w6,w21,w21,ror#14
- and w17,w22,w21
- bic w28,w23,w21
- add w24,w24,w14 // h+=X[i]
- orr w17,w17,w28 // Ch(e,f,g)
- eor w28,w25,w26 // a^b, b^c in next round
- eor w16,w16,w6,ror#11 // Sigma1(e)
- ror w6,w25,#2
- add w24,w24,w17 // h+=Ch(e,f,g)
- eor w17,w25,w25,ror#9
- add w24,w24,w16 // h+=Sigma1(e)
- and w19,w19,w28 // (b^c)&=(a^b)
- add w20,w20,w24 // d+=h
- eor w19,w19,w26 // Maj(a,b,c)
- eor w17,w6,w17,ror#13 // Sigma0(a)
- add w24,w24,w19 // h+=Maj(a,b,c)
- ldr w19,[x30],#4 // *K++, w28 in next round
- //add w24,w24,w17 // h+=Sigma0(a)
-#ifndef __AARCH64EB__
- rev w15,w15 // 12
-#endif
- add w24,w24,w17 // h+=Sigma0(a)
- str w7,[sp,#0]
- ror w16,w20,#6
- add w23,w23,w19 // h+=K[i]
- eor w7,w20,w20,ror#14
- and w17,w21,w20
- bic w19,w22,w20
- add w23,w23,w15 // h+=X[i]
- orr w17,w17,w19 // Ch(e,f,g)
- eor w19,w24,w25 // a^b, b^c in next round
- eor w16,w16,w7,ror#11 // Sigma1(e)
- ror w7,w24,#2
- add w23,w23,w17 // h+=Ch(e,f,g)
- eor w17,w24,w24,ror#9
- add w23,w23,w16 // h+=Sigma1(e)
- and w28,w28,w19 // (b^c)&=(a^b)
- add w27,w27,w23 // d+=h
- eor w28,w28,w25 // Maj(a,b,c)
- eor w17,w7,w17,ror#13 // Sigma0(a)
- add w23,w23,w28 // h+=Maj(a,b,c)
- ldr w28,[x30],#4 // *K++, w19 in next round
- //add w23,w23,w17 // h+=Sigma0(a)
-#ifndef __AARCH64EB__
- rev w0,w0 // 13
-#endif
- ldp w1,w2,[x1]
- add w23,w23,w17 // h+=Sigma0(a)
- str w8,[sp,#4]
- ror w16,w27,#6
- add w22,w22,w28 // h+=K[i]
- eor w8,w27,w27,ror#14
- and w17,w20,w27
- bic w28,w21,w27
- add w22,w22,w0 // h+=X[i]
- orr w17,w17,w28 // Ch(e,f,g)
- eor w28,w23,w24 // a^b, b^c in next round
- eor w16,w16,w8,ror#11 // Sigma1(e)
- ror w8,w23,#2
- add w22,w22,w17 // h+=Ch(e,f,g)
- eor w17,w23,w23,ror#9
- add w22,w22,w16 // h+=Sigma1(e)
- and w19,w19,w28 // (b^c)&=(a^b)
- add w26,w26,w22 // d+=h
- eor w19,w19,w24 // Maj(a,b,c)
- eor w17,w8,w17,ror#13 // Sigma0(a)
- add w22,w22,w19 // h+=Maj(a,b,c)
- ldr w19,[x30],#4 // *K++, w28 in next round
- //add w22,w22,w17 // h+=Sigma0(a)
-#ifndef __AARCH64EB__
- rev w1,w1 // 14
-#endif
- ldr w6,[sp,#12]
- add w22,w22,w17 // h+=Sigma0(a)
- str w9,[sp,#8]
- ror w16,w26,#6
- add w21,w21,w19 // h+=K[i]
- eor w9,w26,w26,ror#14
- and w17,w27,w26
- bic w19,w20,w26
- add w21,w21,w1 // h+=X[i]
- orr w17,w17,w19 // Ch(e,f,g)
- eor w19,w22,w23 // a^b, b^c in next round
- eor w16,w16,w9,ror#11 // Sigma1(e)
- ror w9,w22,#2
- add w21,w21,w17 // h+=Ch(e,f,g)
- eor w17,w22,w22,ror#9
- add w21,w21,w16 // h+=Sigma1(e)
- and w28,w28,w19 // (b^c)&=(a^b)
- add w25,w25,w21 // d+=h
- eor w28,w28,w23 // Maj(a,b,c)
- eor w17,w9,w17,ror#13 // Sigma0(a)
- add w21,w21,w28 // h+=Maj(a,b,c)
- ldr w28,[x30],#4 // *K++, w19 in next round
- //add w21,w21,w17 // h+=Sigma0(a)
-#ifndef __AARCH64EB__
- rev w2,w2 // 15
-#endif
- ldr w7,[sp,#0]
- add w21,w21,w17 // h+=Sigma0(a)
- str w10,[sp,#12]
- ror w16,w25,#6
- add w20,w20,w28 // h+=K[i]
- ror w9,w4,#7
- and w17,w26,w25
- ror w8,w1,#17
- bic w28,w27,w25
- ror w10,w21,#2
- add w20,w20,w2 // h+=X[i]
- eor w16,w16,w25,ror#11
- eor w9,w9,w4,ror#18
- orr w17,w17,w28 // Ch(e,f,g)
- eor w28,w21,w22 // a^b, b^c in next round
- eor w16,w16,w25,ror#25 // Sigma1(e)
- eor w10,w10,w21,ror#13
- add w20,w20,w17 // h+=Ch(e,f,g)
- and w19,w19,w28 // (b^c)&=(a^b)
- eor w8,w8,w1,ror#19
- eor w9,w9,w4,lsr#3 // sigma0(X[i+1])
- add w20,w20,w16 // h+=Sigma1(e)
- eor w19,w19,w22 // Maj(a,b,c)
- eor w17,w10,w21,ror#22 // Sigma0(a)
- eor w8,w8,w1,lsr#10 // sigma1(X[i+14])
- add w3,w3,w12
- add w24,w24,w20 // d+=h
- add w20,w20,w19 // h+=Maj(a,b,c)
- ldr w19,[x30],#4 // *K++, w28 in next round
- add w3,w3,w9
- add w20,w20,w17 // h+=Sigma0(a)
- add w3,w3,w8
-.Loop_16_xx:
- ldr w8,[sp,#4]
- str w11,[sp,#0]
- ror w16,w24,#6
- add w27,w27,w19 // h+=K[i]
- ror w10,w5,#7
- and w17,w25,w24
- ror w9,w2,#17
- bic w19,w26,w24
- ror w11,w20,#2
- add w27,w27,w3 // h+=X[i]
- eor w16,w16,w24,ror#11
- eor w10,w10,w5,ror#18
- orr w17,w17,w19 // Ch(e,f,g)
- eor w19,w20,w21 // a^b, b^c in next round
- eor w16,w16,w24,ror#25 // Sigma1(e)
- eor w11,w11,w20,ror#13
- add w27,w27,w17 // h+=Ch(e,f,g)
- and w28,w28,w19 // (b^c)&=(a^b)
- eor w9,w9,w2,ror#19
- eor w10,w10,w5,lsr#3 // sigma0(X[i+1])
- add w27,w27,w16 // h+=Sigma1(e)
- eor w28,w28,w21 // Maj(a,b,c)
- eor w17,w11,w20,ror#22 // Sigma0(a)
- eor w9,w9,w2,lsr#10 // sigma1(X[i+14])
- add w4,w4,w13
- add w23,w23,w27 // d+=h
- add w27,w27,w28 // h+=Maj(a,b,c)
- ldr w28,[x30],#4 // *K++, w19 in next round
- add w4,w4,w10
- add w27,w27,w17 // h+=Sigma0(a)
- add w4,w4,w9
- ldr w9,[sp,#8]
- str w12,[sp,#4]
- ror w16,w23,#6
- add w26,w26,w28 // h+=K[i]
- ror w11,w6,#7
- and w17,w24,w23
- ror w10,w3,#17
- bic w28,w25,w23
- ror w12,w27,#2
- add w26,w26,w4 // h+=X[i]
- eor w16,w16,w23,ror#11
- eor w11,w11,w6,ror#18
- orr w17,w17,w28 // Ch(e,f,g)
- eor w28,w27,w20 // a^b, b^c in next round
- eor w16,w16,w23,ror#25 // Sigma1(e)
- eor w12,w12,w27,ror#13
- add w26,w26,w17 // h+=Ch(e,f,g)
- and w19,w19,w28 // (b^c)&=(a^b)
- eor w10,w10,w3,ror#19
- eor w11,w11,w6,lsr#3 // sigma0(X[i+1])
- add w26,w26,w16 // h+=Sigma1(e)
- eor w19,w19,w20 // Maj(a,b,c)
- eor w17,w12,w27,ror#22 // Sigma0(a)
- eor w10,w10,w3,lsr#10 // sigma1(X[i+14])
- add w5,w5,w14
- add w22,w22,w26 // d+=h
- add w26,w26,w19 // h+=Maj(a,b,c)
- ldr w19,[x30],#4 // *K++, w28 in next round
- add w5,w5,w11
- add w26,w26,w17 // h+=Sigma0(a)
- add w5,w5,w10
- ldr w10,[sp,#12]
- str w13,[sp,#8]
- ror w16,w22,#6
- add w25,w25,w19 // h+=K[i]
- ror w12,w7,#7
- and w17,w23,w22
- ror w11,w4,#17
- bic w19,w24,w22
- ror w13,w26,#2
- add w25,w25,w5 // h+=X[i]
- eor w16,w16,w22,ror#11
- eor w12,w12,w7,ror#18
- orr w17,w17,w19 // Ch(e,f,g)
- eor w19,w26,w27 // a^b, b^c in next round
- eor w16,w16,w22,ror#25 // Sigma1(e)
- eor w13,w13,w26,ror#13
- add w25,w25,w17 // h+=Ch(e,f,g)
- and w28,w28,w19 // (b^c)&=(a^b)
- eor w11,w11,w4,ror#19
- eor w12,w12,w7,lsr#3 // sigma0(X[i+1])
- add w25,w25,w16 // h+=Sigma1(e)
- eor w28,w28,w27 // Maj(a,b,c)
- eor w17,w13,w26,ror#22 // Sigma0(a)
- eor w11,w11,w4,lsr#10 // sigma1(X[i+14])
- add w6,w6,w15
- add w21,w21,w25 // d+=h
- add w25,w25,w28 // h+=Maj(a,b,c)
- ldr w28,[x30],#4 // *K++, w19 in next round
- add w6,w6,w12
- add w25,w25,w17 // h+=Sigma0(a)
- add w6,w6,w11
- ldr w11,[sp,#0]
- str w14,[sp,#12]
- ror w16,w21,#6
- add w24,w24,w28 // h+=K[i]
- ror w13,w8,#7
- and w17,w22,w21
- ror w12,w5,#17
- bic w28,w23,w21
- ror w14,w25,#2
- add w24,w24,w6 // h+=X[i]
- eor w16,w16,w21,ror#11
- eor w13,w13,w8,ror#18
- orr w17,w17,w28 // Ch(e,f,g)
- eor w28,w25,w26 // a^b, b^c in next round
- eor w16,w16,w21,ror#25 // Sigma1(e)
- eor w14,w14,w25,ror#13
- add w24,w24,w17 // h+=Ch(e,f,g)
- and w19,w19,w28 // (b^c)&=(a^b)
- eor w12,w12,w5,ror#19
- eor w13,w13,w8,lsr#3 // sigma0(X[i+1])
- add w24,w24,w16 // h+=Sigma1(e)
- eor w19,w19,w26 // Maj(a,b,c)
- eor w17,w14,w25,ror#22 // Sigma0(a)
- eor w12,w12,w5,lsr#10 // sigma1(X[i+14])
- add w7,w7,w0
- add w20,w20,w24 // d+=h
- add w24,w24,w19 // h+=Maj(a,b,c)
- ldr w19,[x30],#4 // *K++, w28 in next round
- add w7,w7,w13
- add w24,w24,w17 // h+=Sigma0(a)
- add w7,w7,w12
- ldr w12,[sp,#4]
- str w15,[sp,#0]
- ror w16,w20,#6
- add w23,w23,w19 // h+=K[i]
- ror w14,w9,#7
- and w17,w21,w20
- ror w13,w6,#17
- bic w19,w22,w20
- ror w15,w24,#2
- add w23,w23,w7 // h+=X[i]
- eor w16,w16,w20,ror#11
- eor w14,w14,w9,ror#18
- orr w17,w17,w19 // Ch(e,f,g)
- eor w19,w24,w25 // a^b, b^c in next round
- eor w16,w16,w20,ror#25 // Sigma1(e)
- eor w15,w15,w24,ror#13
- add w23,w23,w17 // h+=Ch(e,f,g)
- and w28,w28,w19 // (b^c)&=(a^b)
- eor w13,w13,w6,ror#19
- eor w14,w14,w9,lsr#3 // sigma0(X[i+1])
- add w23,w23,w16 // h+=Sigma1(e)
- eor w28,w28,w25 // Maj(a,b,c)
- eor w17,w15,w24,ror#22 // Sigma0(a)
- eor w13,w13,w6,lsr#10 // sigma1(X[i+14])
- add w8,w8,w1
- add w27,w27,w23 // d+=h
- add w23,w23,w28 // h+=Maj(a,b,c)
- ldr w28,[x30],#4 // *K++, w19 in next round
- add w8,w8,w14
- add w23,w23,w17 // h+=Sigma0(a)
- add w8,w8,w13
- ldr w13,[sp,#8]
- str w0,[sp,#4]
- ror w16,w27,#6
- add w22,w22,w28 // h+=K[i]
- ror w15,w10,#7
- and w17,w20,w27
- ror w14,w7,#17
- bic w28,w21,w27
- ror w0,w23,#2
- add w22,w22,w8 // h+=X[i]
- eor w16,w16,w27,ror#11
- eor w15,w15,w10,ror#18
- orr w17,w17,w28 // Ch(e,f,g)
- eor w28,w23,w24 // a^b, b^c in next round
- eor w16,w16,w27,ror#25 // Sigma1(e)
- eor w0,w0,w23,ror#13
- add w22,w22,w17 // h+=Ch(e,f,g)
- and w19,w19,w28 // (b^c)&=(a^b)
- eor w14,w14,w7,ror#19
- eor w15,w15,w10,lsr#3 // sigma0(X[i+1])
- add w22,w22,w16 // h+=Sigma1(e)
- eor w19,w19,w24 // Maj(a,b,c)
- eor w17,w0,w23,ror#22 // Sigma0(a)
- eor w14,w14,w7,lsr#10 // sigma1(X[i+14])
- add w9,w9,w2
- add w26,w26,w22 // d+=h
- add w22,w22,w19 // h+=Maj(a,b,c)
- ldr w19,[x30],#4 // *K++, w28 in next round
- add w9,w9,w15
- add w22,w22,w17 // h+=Sigma0(a)
- add w9,w9,w14
- ldr w14,[sp,#12]
- str w1,[sp,#8]
- ror w16,w26,#6
- add w21,w21,w19 // h+=K[i]
- ror w0,w11,#7
- and w17,w27,w26
- ror w15,w8,#17
- bic w19,w20,w26
- ror w1,w22,#2
- add w21,w21,w9 // h+=X[i]
- eor w16,w16,w26,ror#11
- eor w0,w0,w11,ror#18
- orr w17,w17,w19 // Ch(e,f,g)
- eor w19,w22,w23 // a^b, b^c in next round
- eor w16,w16,w26,ror#25 // Sigma1(e)
- eor w1,w1,w22,ror#13
- add w21,w21,w17 // h+=Ch(e,f,g)
- and w28,w28,w19 // (b^c)&=(a^b)
- eor w15,w15,w8,ror#19
- eor w0,w0,w11,lsr#3 // sigma0(X[i+1])
- add w21,w21,w16 // h+=Sigma1(e)
- eor w28,w28,w23 // Maj(a,b,c)
- eor w17,w1,w22,ror#22 // Sigma0(a)
- eor w15,w15,w8,lsr#10 // sigma1(X[i+14])
- add w10,w10,w3
- add w25,w25,w21 // d+=h
- add w21,w21,w28 // h+=Maj(a,b,c)
- ldr w28,[x30],#4 // *K++, w19 in next round
- add w10,w10,w0
- add w21,w21,w17 // h+=Sigma0(a)
- add w10,w10,w15
- ldr w15,[sp,#0]
- str w2,[sp,#12]
- ror w16,w25,#6
- add w20,w20,w28 // h+=K[i]
- ror w1,w12,#7
- and w17,w26,w25
- ror w0,w9,#17
- bic w28,w27,w25
- ror w2,w21,#2
- add w20,w20,w10 // h+=X[i]
- eor w16,w16,w25,ror#11
- eor w1,w1,w12,ror#18
- orr w17,w17,w28 // Ch(e,f,g)
- eor w28,w21,w22 // a^b, b^c in next round
- eor w16,w16,w25,ror#25 // Sigma1(e)
- eor w2,w2,w21,ror#13
- add w20,w20,w17 // h+=Ch(e,f,g)
- and w19,w19,w28 // (b^c)&=(a^b)
- eor w0,w0,w9,ror#19
- eor w1,w1,w12,lsr#3 // sigma0(X[i+1])
- add w20,w20,w16 // h+=Sigma1(e)
- eor w19,w19,w22 // Maj(a,b,c)
- eor w17,w2,w21,ror#22 // Sigma0(a)
- eor w0,w0,w9,lsr#10 // sigma1(X[i+14])
- add w11,w11,w4
- add w24,w24,w20 // d+=h
- add w20,w20,w19 // h+=Maj(a,b,c)
- ldr w19,[x30],#4 // *K++, w28 in next round
- add w11,w11,w1
- add w20,w20,w17 // h+=Sigma0(a)
- add w11,w11,w0
- ldr w0,[sp,#4]
- str w3,[sp,#0]
- ror w16,w24,#6
- add w27,w27,w19 // h+=K[i]
- ror w2,w13,#7
- and w17,w25,w24
- ror w1,w10,#17
- bic w19,w26,w24
- ror w3,w20,#2
- add w27,w27,w11 // h+=X[i]
- eor w16,w16,w24,ror#11
- eor w2,w2,w13,ror#18
- orr w17,w17,w19 // Ch(e,f,g)
- eor w19,w20,w21 // a^b, b^c in next round
- eor w16,w16,w24,ror#25 // Sigma1(e)
- eor w3,w3,w20,ror#13
- add w27,w27,w17 // h+=Ch(e,f,g)
- and w28,w28,w19 // (b^c)&=(a^b)
- eor w1,w1,w10,ror#19
- eor w2,w2,w13,lsr#3 // sigma0(X[i+1])
- add w27,w27,w16 // h+=Sigma1(e)
- eor w28,w28,w21 // Maj(a,b,c)
- eor w17,w3,w20,ror#22 // Sigma0(a)
- eor w1,w1,w10,lsr#10 // sigma1(X[i+14])
- add w12,w12,w5
- add w23,w23,w27 // d+=h
- add w27,w27,w28 // h+=Maj(a,b,c)
- ldr w28,[x30],#4 // *K++, w19 in next round
- add w12,w12,w2
- add w27,w27,w17 // h+=Sigma0(a)
- add w12,w12,w1
- ldr w1,[sp,#8]
- str w4,[sp,#4]
- ror w16,w23,#6
- add w26,w26,w28 // h+=K[i]
- ror w3,w14,#7
- and w17,w24,w23
- ror w2,w11,#17
- bic w28,w25,w23
- ror w4,w27,#2
- add w26,w26,w12 // h+=X[i]
- eor w16,w16,w23,ror#11
- eor w3,w3,w14,ror#18
- orr w17,w17,w28 // Ch(e,f,g)
- eor w28,w27,w20 // a^b, b^c in next round
- eor w16,w16,w23,ror#25 // Sigma1(e)
- eor w4,w4,w27,ror#13
- add w26,w26,w17 // h+=Ch(e,f,g)
- and w19,w19,w28 // (b^c)&=(a^b)
- eor w2,w2,w11,ror#19
- eor w3,w3,w14,lsr#3 // sigma0(X[i+1])
- add w26,w26,w16 // h+=Sigma1(e)
- eor w19,w19,w20 // Maj(a,b,c)
- eor w17,w4,w27,ror#22 // Sigma0(a)
- eor w2,w2,w11,lsr#10 // sigma1(X[i+14])
- add w13,w13,w6
- add w22,w22,w26 // d+=h
- add w26,w26,w19 // h+=Maj(a,b,c)
- ldr w19,[x30],#4 // *K++, w28 in next round
- add w13,w13,w3
- add w26,w26,w17 // h+=Sigma0(a)
- add w13,w13,w2
- ldr w2,[sp,#12]
- str w5,[sp,#8]
- ror w16,w22,#6
- add w25,w25,w19 // h+=K[i]
- ror w4,w15,#7
- and w17,w23,w22
- ror w3,w12,#17
- bic w19,w24,w22
- ror w5,w26,#2
- add w25,w25,w13 // h+=X[i]
- eor w16,w16,w22,ror#11
- eor w4,w4,w15,ror#18
- orr w17,w17,w19 // Ch(e,f,g)
- eor w19,w26,w27 // a^b, b^c in next round
- eor w16,w16,w22,ror#25 // Sigma1(e)
- eor w5,w5,w26,ror#13
- add w25,w25,w17 // h+=Ch(e,f,g)
- and w28,w28,w19 // (b^c)&=(a^b)
- eor w3,w3,w12,ror#19
- eor w4,w4,w15,lsr#3 // sigma0(X[i+1])
- add w25,w25,w16 // h+=Sigma1(e)
- eor w28,w28,w27 // Maj(a,b,c)
- eor w17,w5,w26,ror#22 // Sigma0(a)
- eor w3,w3,w12,lsr#10 // sigma1(X[i+14])
- add w14,w14,w7
- add w21,w21,w25 // d+=h
- add w25,w25,w28 // h+=Maj(a,b,c)
- ldr w28,[x30],#4 // *K++, w19 in next round
- add w14,w14,w4
- add w25,w25,w17 // h+=Sigma0(a)
- add w14,w14,w3
- ldr w3,[sp,#0]
- str w6,[sp,#12]
- ror w16,w21,#6
- add w24,w24,w28 // h+=K[i]
- ror w5,w0,#7
- and w17,w22,w21
- ror w4,w13,#17
- bic w28,w23,w21
- ror w6,w25,#2
- add w24,w24,w14 // h+=X[i]
- eor w16,w16,w21,ror#11
- eor w5,w5,w0,ror#18
- orr w17,w17,w28 // Ch(e,f,g)
- eor w28,w25,w26 // a^b, b^c in next round
- eor w16,w16,w21,ror#25 // Sigma1(e)
- eor w6,w6,w25,ror#13
- add w24,w24,w17 // h+=Ch(e,f,g)
- and w19,w19,w28 // (b^c)&=(a^b)
- eor w4,w4,w13,ror#19
- eor w5,w5,w0,lsr#3 // sigma0(X[i+1])
- add w24,w24,w16 // h+=Sigma1(e)
- eor w19,w19,w26 // Maj(a,b,c)
- eor w17,w6,w25,ror#22 // Sigma0(a)
- eor w4,w4,w13,lsr#10 // sigma1(X[i+14])
- add w15,w15,w8
- add w20,w20,w24 // d+=h
- add w24,w24,w19 // h+=Maj(a,b,c)
- ldr w19,[x30],#4 // *K++, w28 in next round
- add w15,w15,w5
- add w24,w24,w17 // h+=Sigma0(a)
- add w15,w15,w4
- ldr w4,[sp,#4]
- str w7,[sp,#0]
- ror w16,w20,#6
- add w23,w23,w19 // h+=K[i]
- ror w6,w1,#7
- and w17,w21,w20
- ror w5,w14,#17
- bic w19,w22,w20
- ror w7,w24,#2
- add w23,w23,w15 // h+=X[i]
- eor w16,w16,w20,ror#11
- eor w6,w6,w1,ror#18
- orr w17,w17,w19 // Ch(e,f,g)
- eor w19,w24,w25 // a^b, b^c in next round
- eor w16,w16,w20,ror#25 // Sigma1(e)
- eor w7,w7,w24,ror#13
- add w23,w23,w17 // h+=Ch(e,f,g)
- and w28,w28,w19 // (b^c)&=(a^b)
- eor w5,w5,w14,ror#19
- eor w6,w6,w1,lsr#3 // sigma0(X[i+1])
- add w23,w23,w16 // h+=Sigma1(e)
- eor w28,w28,w25 // Maj(a,b,c)
- eor w17,w7,w24,ror#22 // Sigma0(a)
- eor w5,w5,w14,lsr#10 // sigma1(X[i+14])
- add w0,w0,w9
- add w27,w27,w23 // d+=h
- add w23,w23,w28 // h+=Maj(a,b,c)
- ldr w28,[x30],#4 // *K++, w19 in next round
- add w0,w0,w6
- add w23,w23,w17 // h+=Sigma0(a)
- add w0,w0,w5
- ldr w5,[sp,#8]
- str w8,[sp,#4]
- ror w16,w27,#6
- add w22,w22,w28 // h+=K[i]
- ror w7,w2,#7
- and w17,w20,w27
- ror w6,w15,#17
- bic w28,w21,w27
- ror w8,w23,#2
- add w22,w22,w0 // h+=X[i]
- eor w16,w16,w27,ror#11
- eor w7,w7,w2,ror#18
- orr w17,w17,w28 // Ch(e,f,g)
- eor w28,w23,w24 // a^b, b^c in next round
- eor w16,w16,w27,ror#25 // Sigma1(e)
- eor w8,w8,w23,ror#13
- add w22,w22,w17 // h+=Ch(e,f,g)
- and w19,w19,w28 // (b^c)&=(a^b)
- eor w6,w6,w15,ror#19
- eor w7,w7,w2,lsr#3 // sigma0(X[i+1])
- add w22,w22,w16 // h+=Sigma1(e)
- eor w19,w19,w24 // Maj(a,b,c)
- eor w17,w8,w23,ror#22 // Sigma0(a)
- eor w6,w6,w15,lsr#10 // sigma1(X[i+14])
- add w1,w1,w10
- add w26,w26,w22 // d+=h
- add w22,w22,w19 // h+=Maj(a,b,c)
- ldr w19,[x30],#4 // *K++, w28 in next round
- add w1,w1,w7
- add w22,w22,w17 // h+=Sigma0(a)
- add w1,w1,w6
- ldr w6,[sp,#12]
- str w9,[sp,#8]
- ror w16,w26,#6
- add w21,w21,w19 // h+=K[i]
- ror w8,w3,#7
- and w17,w27,w26
- ror w7,w0,#17
- bic w19,w20,w26
- ror w9,w22,#2
- add w21,w21,w1 // h+=X[i]
- eor w16,w16,w26,ror#11
- eor w8,w8,w3,ror#18
- orr w17,w17,w19 // Ch(e,f,g)
- eor w19,w22,w23 // a^b, b^c in next round
- eor w16,w16,w26,ror#25 // Sigma1(e)
- eor w9,w9,w22,ror#13
- add w21,w21,w17 // h+=Ch(e,f,g)
- and w28,w28,w19 // (b^c)&=(a^b)
- eor w7,w7,w0,ror#19
- eor w8,w8,w3,lsr#3 // sigma0(X[i+1])
- add w21,w21,w16 // h+=Sigma1(e)
- eor w28,w28,w23 // Maj(a,b,c)
- eor w17,w9,w22,ror#22 // Sigma0(a)
- eor w7,w7,w0,lsr#10 // sigma1(X[i+14])
- add w2,w2,w11
- add w25,w25,w21 // d+=h
- add w21,w21,w28 // h+=Maj(a,b,c)
- ldr w28,[x30],#4 // *K++, w19 in next round
- add w2,w2,w8
- add w21,w21,w17 // h+=Sigma0(a)
- add w2,w2,w7
- ldr w7,[sp,#0]
- str w10,[sp,#12]
- ror w16,w25,#6
- add w20,w20,w28 // h+=K[i]
- ror w9,w4,#7
- and w17,w26,w25
- ror w8,w1,#17
- bic w28,w27,w25
- ror w10,w21,#2
- add w20,w20,w2 // h+=X[i]
- eor w16,w16,w25,ror#11
- eor w9,w9,w4,ror#18
- orr w17,w17,w28 // Ch(e,f,g)
- eor w28,w21,w22 // a^b, b^c in next round
- eor w16,w16,w25,ror#25 // Sigma1(e)
- eor w10,w10,w21,ror#13
- add w20,w20,w17 // h+=Ch(e,f,g)
- and w19,w19,w28 // (b^c)&=(a^b)
- eor w8,w8,w1,ror#19
- eor w9,w9,w4,lsr#3 // sigma0(X[i+1])
- add w20,w20,w16 // h+=Sigma1(e)
- eor w19,w19,w22 // Maj(a,b,c)
- eor w17,w10,w21,ror#22 // Sigma0(a)
- eor w8,w8,w1,lsr#10 // sigma1(X[i+14])
- add w3,w3,w12
- add w24,w24,w20 // d+=h
- add w20,w20,w19 // h+=Maj(a,b,c)
- ldr w19,[x30],#4 // *K++, w28 in next round
- add w3,w3,w9
- add w20,w20,w17 // h+=Sigma0(a)
- add w3,w3,w8
- cbnz w19,.Loop_16_xx
-
- ldp x0,x2,[x29,#96]
- ldr x1,[x29,#112]
- sub x30,x30,#260 // rewind
-
- ldp w3,w4,[x0]
- ldp w5,w6,[x0,#2*4]
- add x1,x1,#14*4 // advance input pointer
- ldp w7,w8,[x0,#4*4]
- add w20,w20,w3
- ldp w9,w10,[x0,#6*4]
- add w21,w21,w4
- add w22,w22,w5
- add w23,w23,w6
- stp w20,w21,[x0]
- add w24,w24,w7
- add w25,w25,w8
- stp w22,w23,[x0,#2*4]
- add w26,w26,w9
- add w27,w27,w10
- cmp x1,x2
- stp w24,w25,[x0,#4*4]
- stp w26,w27,[x0,#6*4]
- b.ne .Loop
-
- ldp x19,x20,[x29,#16]
- add sp,sp,#4*4
- ldp x21,x22,[x29,#32]
- ldp x23,x24,[x29,#48]
- ldp x25,x26,[x29,#64]
- ldp x27,x28,[x29,#80]
- ldp x29,x30,[sp],#128
- ret
-.size sha256_block_data_order,.-sha256_block_data_order
-
-.align 6
-.type .LK256,%object
-.LK256:
- .long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
- .long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5
- .long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3
- .long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174
- .long 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc
- .long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da
- .long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7
- .long 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967
- .long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13
- .long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85
- .long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3
- .long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070
- .long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5
- .long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3
- .long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
- .long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
- .long 0 //terminator
-.size .LK256,.-.LK256
-#ifndef __KERNEL__
-.align 3
-.LOPENSSL_armcap_P:
-# ifdef __ILP32__
- .long OPENSSL_armcap_P-.
-# else
- .quad OPENSSL_armcap_P-.
-# endif
-#endif
-.asciz "SHA256 block transform for ARMv8, CRYPTOGAMS by <appro@openssl.org>"
-.align 2
-#ifndef __KERNEL__
-.type sha256_block_armv8,%function
-.align 6
-sha256_block_armv8:
-.Lv8_entry:
- stp x29,x30,[sp,#-16]!
- add x29,sp,#0
-
- ld1 {v0.4s,v1.4s},[x0]
- adr x3,.LK256
-
-.Loop_hw:
- ld1 {v4.16b-v7.16b},[x1],#64
- sub x2,x2,#1
- ld1 {v16.4s},[x3],#16
- rev32 v4.16b,v4.16b
- rev32 v5.16b,v5.16b
- rev32 v6.16b,v6.16b
- rev32 v7.16b,v7.16b
- orr v18.16b,v0.16b,v0.16b // offload
- orr v19.16b,v1.16b,v1.16b
- ld1 {v17.4s},[x3],#16
- add v16.4s,v16.4s,v4.4s
- .inst 0x5e2828a4 //sha256su0 v4.16b,v5.16b
- orr v2.16b,v0.16b,v0.16b
- .inst 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s
- .inst 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s
- .inst 0x5e0760c4 //sha256su1 v4.16b,v6.16b,v7.16b
- ld1 {v16.4s},[x3],#16
- add v17.4s,v17.4s,v5.4s
- .inst 0x5e2828c5 //sha256su0 v5.16b,v6.16b
- orr v2.16b,v0.16b,v0.16b
- .inst 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s
- .inst 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s
- .inst 0x5e0460e5 //sha256su1 v5.16b,v7.16b,v4.16b
- ld1 {v17.4s},[x3],#16
- add v16.4s,v16.4s,v6.4s
- .inst 0x5e2828e6 //sha256su0 v6.16b,v7.16b
- orr v2.16b,v0.16b,v0.16b
- .inst 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s
- .inst 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s
- .inst 0x5e056086 //sha256su1 v6.16b,v4.16b,v5.16b
- ld1 {v16.4s},[x3],#16
- add v17.4s,v17.4s,v7.4s
- .inst 0x5e282887 //sha256su0 v7.16b,v4.16b
- orr v2.16b,v0.16b,v0.16b
- .inst 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s
- .inst 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s
- .inst 0x5e0660a7 //sha256su1 v7.16b,v5.16b,v6.16b
- ld1 {v17.4s},[x3],#16
- add v16.4s,v16.4s,v4.4s
- .inst 0x5e2828a4 //sha256su0 v4.16b,v5.16b
- orr v2.16b,v0.16b,v0.16b
- .inst 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s
- .inst 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s
- .inst 0x5e0760c4 //sha256su1 v4.16b,v6.16b,v7.16b
- ld1 {v16.4s},[x3],#16
- add v17.4s,v17.4s,v5.4s
- .inst 0x5e2828c5 //sha256su0 v5.16b,v6.16b
- orr v2.16b,v0.16b,v0.16b
- .inst 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s
- .inst 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s
- .inst 0x5e0460e5 //sha256su1 v5.16b,v7.16b,v4.16b
- ld1 {v17.4s},[x3],#16
- add v16.4s,v16.4s,v6.4s
- .inst 0x5e2828e6 //sha256su0 v6.16b,v7.16b
- orr v2.16b,v0.16b,v0.16b
- .inst 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s
- .inst 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s
- .inst 0x5e056086 //sha256su1 v6.16b,v4.16b,v5.16b
- ld1 {v16.4s},[x3],#16
- add v17.4s,v17.4s,v7.4s
- .inst 0x5e282887 //sha256su0 v7.16b,v4.16b
- orr v2.16b,v0.16b,v0.16b
- .inst 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s
- .inst 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s
- .inst 0x5e0660a7 //sha256su1 v7.16b,v5.16b,v6.16b
- ld1 {v17.4s},[x3],#16
- add v16.4s,v16.4s,v4.4s
- .inst 0x5e2828a4 //sha256su0 v4.16b,v5.16b
- orr v2.16b,v0.16b,v0.16b
- .inst 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s
- .inst 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s
- .inst 0x5e0760c4 //sha256su1 v4.16b,v6.16b,v7.16b
- ld1 {v16.4s},[x3],#16
- add v17.4s,v17.4s,v5.4s
- .inst 0x5e2828c5 //sha256su0 v5.16b,v6.16b
- orr v2.16b,v0.16b,v0.16b
- .inst 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s
- .inst 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s
- .inst 0x5e0460e5 //sha256su1 v5.16b,v7.16b,v4.16b
- ld1 {v17.4s},[x3],#16
- add v16.4s,v16.4s,v6.4s
- .inst 0x5e2828e6 //sha256su0 v6.16b,v7.16b
- orr v2.16b,v0.16b,v0.16b
- .inst 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s
- .inst 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s
- .inst 0x5e056086 //sha256su1 v6.16b,v4.16b,v5.16b
- ld1 {v16.4s},[x3],#16
- add v17.4s,v17.4s,v7.4s
- .inst 0x5e282887 //sha256su0 v7.16b,v4.16b
- orr v2.16b,v0.16b,v0.16b
- .inst 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s
- .inst 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s
- .inst 0x5e0660a7 //sha256su1 v7.16b,v5.16b,v6.16b
- ld1 {v17.4s},[x3],#16
- add v16.4s,v16.4s,v4.4s
- orr v2.16b,v0.16b,v0.16b
- .inst 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s
- .inst 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s
-
- ld1 {v16.4s},[x3],#16
- add v17.4s,v17.4s,v5.4s
- orr v2.16b,v0.16b,v0.16b
- .inst 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s
- .inst 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s
-
- ld1 {v17.4s},[x3]
- add v16.4s,v16.4s,v6.4s
- sub x3,x3,#64*4-16 // rewind
- orr v2.16b,v0.16b,v0.16b
- .inst 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s
- .inst 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s
-
- add v17.4s,v17.4s,v7.4s
- orr v2.16b,v0.16b,v0.16b
- .inst 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s
- .inst 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s
-
- add v0.4s,v0.4s,v18.4s
- add v1.4s,v1.4s,v19.4s
-
- cbnz x2,.Loop_hw
-
- st1 {v0.4s,v1.4s},[x0]
-
- ldr x29,[sp],#16
- ret
-.size sha256_block_armv8,.-sha256_block_armv8
-#endif
-#ifdef __KERNEL__
-.globl sha256_block_neon
-#endif
-.type sha256_block_neon,%function
-.align 4
-sha256_block_neon:
-.Lneon_entry:
- stp x29, x30, [sp, #-16]!
- mov x29, sp
- sub sp,sp,#16*4
-
- adr x16,.LK256
- add x2,x1,x2,lsl#6 // len to point at the end of inp
-
- ld1 {v0.16b},[x1], #16
- ld1 {v1.16b},[x1], #16
- ld1 {v2.16b},[x1], #16
- ld1 {v3.16b},[x1], #16
- ld1 {v4.4s},[x16], #16
- ld1 {v5.4s},[x16], #16
- ld1 {v6.4s},[x16], #16
- ld1 {v7.4s},[x16], #16
- rev32 v0.16b,v0.16b // yes, even on
- rev32 v1.16b,v1.16b // big-endian
- rev32 v2.16b,v2.16b
- rev32 v3.16b,v3.16b
- mov x17,sp
- add v4.4s,v4.4s,v0.4s
- add v5.4s,v5.4s,v1.4s
- add v6.4s,v6.4s,v2.4s
- st1 {v4.4s-v5.4s},[x17], #32
- add v7.4s,v7.4s,v3.4s
- st1 {v6.4s-v7.4s},[x17]
- sub x17,x17,#32
-
- ldp w3,w4,[x0]
- ldp w5,w6,[x0,#8]
- ldp w7,w8,[x0,#16]
- ldp w9,w10,[x0,#24]
- ldr w12,[sp,#0]
- mov w13,wzr
- eor w14,w4,w5
- mov w15,wzr
- b .L_00_48
-
-.align 4
-.L_00_48:
- ext v4.16b,v0.16b,v1.16b,#4
- add w10,w10,w12
- add w3,w3,w15
- and w12,w8,w7
- bic w15,w9,w7
- ext v7.16b,v2.16b,v3.16b,#4
- eor w11,w7,w7,ror#5
- add w3,w3,w13
- mov d19,v3.d[1]
- orr w12,w12,w15
- eor w11,w11,w7,ror#19
- ushr v6.4s,v4.4s,#7
- eor w15,w3,w3,ror#11
- ushr v5.4s,v4.4s,#3
- add w10,w10,w12
- add v0.4s,v0.4s,v7.4s
- ror w11,w11,#6
- sli v6.4s,v4.4s,#25
- eor w13,w3,w4
- eor w15,w15,w3,ror#20
- ushr v7.4s,v4.4s,#18
- add w10,w10,w11
- ldr w12,[sp,#4]
- and w14,w14,w13
- eor v5.16b,v5.16b,v6.16b
- ror w15,w15,#2
- add w6,w6,w10
- sli v7.4s,v4.4s,#14
- eor w14,w14,w4
- ushr v16.4s,v19.4s,#17
- add w9,w9,w12
- add w10,w10,w15
- and w12,w7,w6
- eor v5.16b,v5.16b,v7.16b
- bic w15,w8,w6
- eor w11,w6,w6,ror#5
- sli v16.4s,v19.4s,#15
- add w10,w10,w14
- orr w12,w12,w15
- ushr v17.4s,v19.4s,#10
- eor w11,w11,w6,ror#19
- eor w15,w10,w10,ror#11
- ushr v7.4s,v19.4s,#19
- add w9,w9,w12
- ror w11,w11,#6
- add v0.4s,v0.4s,v5.4s
- eor w14,w10,w3
- eor w15,w15,w10,ror#20
- sli v7.4s,v19.4s,#13
- add w9,w9,w11
- ldr w12,[sp,#8]
- and w13,w13,w14
- eor v17.16b,v17.16b,v16.16b
- ror w15,w15,#2
- add w5,w5,w9
- eor w13,w13,w3
- eor v17.16b,v17.16b,v7.16b
- add w8,w8,w12
- add w9,w9,w15
- and w12,w6,w5
- add v0.4s,v0.4s,v17.4s
- bic w15,w7,w5
- eor w11,w5,w5,ror#5
- add w9,w9,w13
- ushr v18.4s,v0.4s,#17
- orr w12,w12,w15
- ushr v19.4s,v0.4s,#10
- eor w11,w11,w5,ror#19
- eor w15,w9,w9,ror#11
- sli v18.4s,v0.4s,#15
- add w8,w8,w12
- ushr v17.4s,v0.4s,#19
- ror w11,w11,#6
- eor w13,w9,w10
- eor v19.16b,v19.16b,v18.16b
- eor w15,w15,w9,ror#20
- add w8,w8,w11
- sli v17.4s,v0.4s,#13
- ldr w12,[sp,#12]
- and w14,w14,w13
- ror w15,w15,#2
- ld1 {v4.4s},[x16], #16
- add w4,w4,w8
- eor v19.16b,v19.16b,v17.16b
- eor w14,w14,w10
- eor v17.16b,v17.16b,v17.16b
- add w7,w7,w12
- add w8,w8,w15
- and w12,w5,w4
- mov v17.d[1],v19.d[0]
- bic w15,w6,w4
- eor w11,w4,w4,ror#5
- add w8,w8,w14
- add v0.4s,v0.4s,v17.4s
- orr w12,w12,w15
- eor w11,w11,w4,ror#19
- eor w15,w8,w8,ror#11
- add v4.4s,v4.4s,v0.4s
- add w7,w7,w12
- ror w11,w11,#6
- eor w14,w8,w9
- eor w15,w15,w8,ror#20
- add w7,w7,w11
- ldr w12,[sp,#16]
- and w13,w13,w14
- ror w15,w15,#2
- add w3,w3,w7
- eor w13,w13,w9
- st1 {v4.4s},[x17], #16
- ext v4.16b,v1.16b,v2.16b,#4
- add w6,w6,w12
- add w7,w7,w15
- and w12,w4,w3
- bic w15,w5,w3
- ext v7.16b,v3.16b,v0.16b,#4
- eor w11,w3,w3,ror#5
- add w7,w7,w13
- mov d19,v0.d[1]
- orr w12,w12,w15
- eor w11,w11,w3,ror#19
- ushr v6.4s,v4.4s,#7
- eor w15,w7,w7,ror#11
- ushr v5.4s,v4.4s,#3
- add w6,w6,w12
- add v1.4s,v1.4s,v7.4s
- ror w11,w11,#6
- sli v6.4s,v4.4s,#25
- eor w13,w7,w8
- eor w15,w15,w7,ror#20
- ushr v7.4s,v4.4s,#18
- add w6,w6,w11
- ldr w12,[sp,#20]
- and w14,w14,w13
- eor v5.16b,v5.16b,v6.16b
- ror w15,w15,#2
- add w10,w10,w6
- sli v7.4s,v4.4s,#14
- eor w14,w14,w8
- ushr v16.4s,v19.4s,#17
- add w5,w5,w12
- add w6,w6,w15
- and w12,w3,w10
- eor v5.16b,v5.16b,v7.16b
- bic w15,w4,w10
- eor w11,w10,w10,ror#5
- sli v16.4s,v19.4s,#15
- add w6,w6,w14
- orr w12,w12,w15
- ushr v17.4s,v19.4s,#10
- eor w11,w11,w10,ror#19
- eor w15,w6,w6,ror#11
- ushr v7.4s,v19.4s,#19
- add w5,w5,w12
- ror w11,w11,#6
- add v1.4s,v1.4s,v5.4s
- eor w14,w6,w7
- eor w15,w15,w6,ror#20
- sli v7.4s,v19.4s,#13
- add w5,w5,w11
- ldr w12,[sp,#24]
- and w13,w13,w14
- eor v17.16b,v17.16b,v16.16b
- ror w15,w15,#2
- add w9,w9,w5
- eor w13,w13,w7
- eor v17.16b,v17.16b,v7.16b
- add w4,w4,w12
- add w5,w5,w15
- and w12,w10,w9
- add v1.4s,v1.4s,v17.4s
- bic w15,w3,w9
- eor w11,w9,w9,ror#5
- add w5,w5,w13
- ushr v18.4s,v1.4s,#17
- orr w12,w12,w15
- ushr v19.4s,v1.4s,#10
- eor w11,w11,w9,ror#19
- eor w15,w5,w5,ror#11
- sli v18.4s,v1.4s,#15
- add w4,w4,w12
- ushr v17.4s,v1.4s,#19
- ror w11,w11,#6
- eor w13,w5,w6
- eor v19.16b,v19.16b,v18.16b
- eor w15,w15,w5,ror#20
- add w4,w4,w11
- sli v17.4s,v1.4s,#13
- ldr w12,[sp,#28]
- and w14,w14,w13
- ror w15,w15,#2
- ld1 {v4.4s},[x16], #16
- add w8,w8,w4
- eor v19.16b,v19.16b,v17.16b
- eor w14,w14,w6
- eor v17.16b,v17.16b,v17.16b
- add w3,w3,w12
- add w4,w4,w15
- and w12,w9,w8
- mov v17.d[1],v19.d[0]
- bic w15,w10,w8
- eor w11,w8,w8,ror#5
- add w4,w4,w14
- add v1.4s,v1.4s,v17.4s
- orr w12,w12,w15
- eor w11,w11,w8,ror#19
- eor w15,w4,w4,ror#11
- add v4.4s,v4.4s,v1.4s
- add w3,w3,w12
- ror w11,w11,#6
- eor w14,w4,w5
- eor w15,w15,w4,ror#20
- add w3,w3,w11
- ldr w12,[sp,#32]
- and w13,w13,w14
- ror w15,w15,#2
- add w7,w7,w3
- eor w13,w13,w5
- st1 {v4.4s},[x17], #16
- ext v4.16b,v2.16b,v3.16b,#4
- add w10,w10,w12
- add w3,w3,w15
- and w12,w8,w7
- bic w15,w9,w7
- ext v7.16b,v0.16b,v1.16b,#4
- eor w11,w7,w7,ror#5
- add w3,w3,w13
- mov d19,v1.d[1]
- orr w12,w12,w15
- eor w11,w11,w7,ror#19
- ushr v6.4s,v4.4s,#7
- eor w15,w3,w3,ror#11
- ushr v5.4s,v4.4s,#3
- add w10,w10,w12
- add v2.4s,v2.4s,v7.4s
- ror w11,w11,#6
- sli v6.4s,v4.4s,#25
- eor w13,w3,w4
- eor w15,w15,w3,ror#20
- ushr v7.4s,v4.4s,#18
- add w10,w10,w11
- ldr w12,[sp,#36]
- and w14,w14,w13
- eor v5.16b,v5.16b,v6.16b
- ror w15,w15,#2
- add w6,w6,w10
- sli v7.4s,v4.4s,#14
- eor w14,w14,w4
- ushr v16.4s,v19.4s,#17
- add w9,w9,w12
- add w10,w10,w15
- and w12,w7,w6
- eor v5.16b,v5.16b,v7.16b
- bic w15,w8,w6
- eor w11,w6,w6,ror#5
- sli v16.4s,v19.4s,#15
- add w10,w10,w14
- orr w12,w12,w15
- ushr v17.4s,v19.4s,#10
- eor w11,w11,w6,ror#19
- eor w15,w10,w10,ror#11
- ushr v7.4s,v19.4s,#19
- add w9,w9,w12
- ror w11,w11,#6
- add v2.4s,v2.4s,v5.4s
- eor w14,w10,w3
- eor w15,w15,w10,ror#20
- sli v7.4s,v19.4s,#13
- add w9,w9,w11
- ldr w12,[sp,#40]
- and w13,w13,w14
- eor v17.16b,v17.16b,v16.16b
- ror w15,w15,#2
- add w5,w5,w9
- eor w13,w13,w3
- eor v17.16b,v17.16b,v7.16b
- add w8,w8,w12
- add w9,w9,w15
- and w12,w6,w5
- add v2.4s,v2.4s,v17.4s
- bic w15,w7,w5
- eor w11,w5,w5,ror#5
- add w9,w9,w13
- ushr v18.4s,v2.4s,#17
- orr w12,w12,w15
- ushr v19.4s,v2.4s,#10
- eor w11,w11,w5,ror#19
- eor w15,w9,w9,ror#11
- sli v18.4s,v2.4s,#15
- add w8,w8,w12
- ushr v17.4s,v2.4s,#19
- ror w11,w11,#6
- eor w13,w9,w10
- eor v19.16b,v19.16b,v18.16b
- eor w15,w15,w9,ror#20
- add w8,w8,w11
- sli v17.4s,v2.4s,#13
- ldr w12,[sp,#44]
- and w14,w14,w13
- ror w15,w15,#2
- ld1 {v4.4s},[x16], #16
- add w4,w4,w8
- eor v19.16b,v19.16b,v17.16b
- eor w14,w14,w10
- eor v17.16b,v17.16b,v17.16b
- add w7,w7,w12
- add w8,w8,w15
- and w12,w5,w4
- mov v17.d[1],v19.d[0]
- bic w15,w6,w4
- eor w11,w4,w4,ror#5
- add w8,w8,w14
- add v2.4s,v2.4s,v17.4s
- orr w12,w12,w15
- eor w11,w11,w4,ror#19
- eor w15,w8,w8,ror#11
- add v4.4s,v4.4s,v2.4s
- add w7,w7,w12
- ror w11,w11,#6
- eor w14,w8,w9
- eor w15,w15,w8,ror#20
- add w7,w7,w11
- ldr w12,[sp,#48]
- and w13,w13,w14
- ror w15,w15,#2
- add w3,w3,w7
- eor w13,w13,w9
- st1 {v4.4s},[x17], #16
- ext v4.16b,v3.16b,v0.16b,#4
- add w6,w6,w12
- add w7,w7,w15
- and w12,w4,w3
- bic w15,w5,w3
- ext v7.16b,v1.16b,v2.16b,#4
- eor w11,w3,w3,ror#5
- add w7,w7,w13
- mov d19,v2.d[1]
- orr w12,w12,w15
- eor w11,w11,w3,ror#19
- ushr v6.4s,v4.4s,#7
- eor w15,w7,w7,ror#11
- ushr v5.4s,v4.4s,#3
- add w6,w6,w12
- add v3.4s,v3.4s,v7.4s
- ror w11,w11,#6
- sli v6.4s,v4.4s,#25
- eor w13,w7,w8
- eor w15,w15,w7,ror#20
- ushr v7.4s,v4.4s,#18
- add w6,w6,w11
- ldr w12,[sp,#52]
- and w14,w14,w13
- eor v5.16b,v5.16b,v6.16b
- ror w15,w15,#2
- add w10,w10,w6
- sli v7.4s,v4.4s,#14
- eor w14,w14,w8
- ushr v16.4s,v19.4s,#17
- add w5,w5,w12
- add w6,w6,w15
- and w12,w3,w10
- eor v5.16b,v5.16b,v7.16b
- bic w15,w4,w10
- eor w11,w10,w10,ror#5
- sli v16.4s,v19.4s,#15
- add w6,w6,w14
- orr w12,w12,w15
- ushr v17.4s,v19.4s,#10
- eor w11,w11,w10,ror#19
- eor w15,w6,w6,ror#11
- ushr v7.4s,v19.4s,#19
- add w5,w5,w12
- ror w11,w11,#6
- add v3.4s,v3.4s,v5.4s
- eor w14,w6,w7
- eor w15,w15,w6,ror#20
- sli v7.4s,v19.4s,#13
- add w5,w5,w11
- ldr w12,[sp,#56]
- and w13,w13,w14
- eor v17.16b,v17.16b,v16.16b
- ror w15,w15,#2
- add w9,w9,w5
- eor w13,w13,w7
- eor v17.16b,v17.16b,v7.16b
- add w4,w4,w12
- add w5,w5,w15
- and w12,w10,w9
- add v3.4s,v3.4s,v17.4s
- bic w15,w3,w9
- eor w11,w9,w9,ror#5
- add w5,w5,w13
- ushr v18.4s,v3.4s,#17
- orr w12,w12,w15
- ushr v19.4s,v3.4s,#10
- eor w11,w11,w9,ror#19
- eor w15,w5,w5,ror#11
- sli v18.4s,v3.4s,#15
- add w4,w4,w12
- ushr v17.4s,v3.4s,#19
- ror w11,w11,#6
- eor w13,w5,w6
- eor v19.16b,v19.16b,v18.16b
- eor w15,w15,w5,ror#20
- add w4,w4,w11
- sli v17.4s,v3.4s,#13
- ldr w12,[sp,#60]
- and w14,w14,w13
- ror w15,w15,#2
- ld1 {v4.4s},[x16], #16
- add w8,w8,w4
- eor v19.16b,v19.16b,v17.16b
- eor w14,w14,w6
- eor v17.16b,v17.16b,v17.16b
- add w3,w3,w12
- add w4,w4,w15
- and w12,w9,w8
- mov v17.d[1],v19.d[0]
- bic w15,w10,w8
- eor w11,w8,w8,ror#5
- add w4,w4,w14
- add v3.4s,v3.4s,v17.4s
- orr w12,w12,w15
- eor w11,w11,w8,ror#19
- eor w15,w4,w4,ror#11
- add v4.4s,v4.4s,v3.4s
- add w3,w3,w12
- ror w11,w11,#6
- eor w14,w4,w5
- eor w15,w15,w4,ror#20
- add w3,w3,w11
- ldr w12,[x16]
- and w13,w13,w14
- ror w15,w15,#2
- add w7,w7,w3
- eor w13,w13,w5
- st1 {v4.4s},[x17], #16
- cmp w12,#0 // check for K256 terminator
- ldr w12,[sp,#0]
- sub x17,x17,#64
- bne .L_00_48
-
- sub x16,x16,#256 // rewind x16
- cmp x1,x2
- mov x17, #64
- csel x17, x17, xzr, eq
- sub x1,x1,x17 // avoid SEGV
- mov x17,sp
- add w10,w10,w12
- add w3,w3,w15
- and w12,w8,w7
- ld1 {v0.16b},[x1],#16
- bic w15,w9,w7
- eor w11,w7,w7,ror#5
- ld1 {v4.4s},[x16],#16
- add w3,w3,w13
- orr w12,w12,w15
- eor w11,w11,w7,ror#19
- eor w15,w3,w3,ror#11
- rev32 v0.16b,v0.16b
- add w10,w10,w12
- ror w11,w11,#6
- eor w13,w3,w4
- eor w15,w15,w3,ror#20
- add v4.4s,v4.4s,v0.4s
- add w10,w10,w11
- ldr w12,[sp,#4]
- and w14,w14,w13
- ror w15,w15,#2
- add w6,w6,w10
- eor w14,w14,w4
- add w9,w9,w12
- add w10,w10,w15
- and w12,w7,w6
- bic w15,w8,w6
- eor w11,w6,w6,ror#5
- add w10,w10,w14
- orr w12,w12,w15
- eor w11,w11,w6,ror#19
- eor w15,w10,w10,ror#11
- add w9,w9,w12
- ror w11,w11,#6
- eor w14,w10,w3
- eor w15,w15,w10,ror#20
- add w9,w9,w11
- ldr w12,[sp,#8]
- and w13,w13,w14
- ror w15,w15,#2
- add w5,w5,w9
- eor w13,w13,w3
- add w8,w8,w12
- add w9,w9,w15
- and w12,w6,w5
- bic w15,w7,w5
- eor w11,w5,w5,ror#5
- add w9,w9,w13
- orr w12,w12,w15
- eor w11,w11,w5,ror#19
- eor w15,w9,w9,ror#11
- add w8,w8,w12
- ror w11,w11,#6
- eor w13,w9,w10
- eor w15,w15,w9,ror#20
- add w8,w8,w11
- ldr w12,[sp,#12]
- and w14,w14,w13
- ror w15,w15,#2
- add w4,w4,w8
- eor w14,w14,w10
- add w7,w7,w12
- add w8,w8,w15
- and w12,w5,w4
- bic w15,w6,w4
- eor w11,w4,w4,ror#5
- add w8,w8,w14
- orr w12,w12,w15
- eor w11,w11,w4,ror#19
- eor w15,w8,w8,ror#11
- add w7,w7,w12
- ror w11,w11,#6
- eor w14,w8,w9
- eor w15,w15,w8,ror#20
- add w7,w7,w11
- ldr w12,[sp,#16]
- and w13,w13,w14
- ror w15,w15,#2
- add w3,w3,w7
- eor w13,w13,w9
- st1 {v4.4s},[x17], #16
- add w6,w6,w12
- add w7,w7,w15
- and w12,w4,w3
- ld1 {v1.16b},[x1],#16
- bic w15,w5,w3
- eor w11,w3,w3,ror#5
- ld1 {v4.4s},[x16],#16
- add w7,w7,w13
- orr w12,w12,w15
- eor w11,w11,w3,ror#19
- eor w15,w7,w7,ror#11
- rev32 v1.16b,v1.16b
- add w6,w6,w12
- ror w11,w11,#6
- eor w13,w7,w8
- eor w15,w15,w7,ror#20
- add v4.4s,v4.4s,v1.4s
- add w6,w6,w11
- ldr w12,[sp,#20]
- and w14,w14,w13
- ror w15,w15,#2
- add w10,w10,w6
- eor w14,w14,w8
- add w5,w5,w12
- add w6,w6,w15
- and w12,w3,w10
- bic w15,w4,w10
- eor w11,w10,w10,ror#5
- add w6,w6,w14
- orr w12,w12,w15
- eor w11,w11,w10,ror#19
- eor w15,w6,w6,ror#11
- add w5,w5,w12
- ror w11,w11,#6
- eor w14,w6,w7
- eor w15,w15,w6,ror#20
- add w5,w5,w11
- ldr w12,[sp,#24]
- and w13,w13,w14
- ror w15,w15,#2
- add w9,w9,w5
- eor w13,w13,w7
- add w4,w4,w12
- add w5,w5,w15
- and w12,w10,w9
- bic w15,w3,w9
- eor w11,w9,w9,ror#5
- add w5,w5,w13
- orr w12,w12,w15
- eor w11,w11,w9,ror#19
- eor w15,w5,w5,ror#11
- add w4,w4,w12
- ror w11,w11,#6
- eor w13,w5,w6
- eor w15,w15,w5,ror#20
- add w4,w4,w11
- ldr w12,[sp,#28]
- and w14,w14,w13
- ror w15,w15,#2
- add w8,w8,w4
- eor w14,w14,w6
- add w3,w3,w12
- add w4,w4,w15
- and w12,w9,w8
- bic w15,w10,w8
- eor w11,w8,w8,ror#5
- add w4,w4,w14
- orr w12,w12,w15
- eor w11,w11,w8,ror#19
- eor w15,w4,w4,ror#11
- add w3,w3,w12
- ror w11,w11,#6
- eor w14,w4,w5
- eor w15,w15,w4,ror#20
- add w3,w3,w11
- ldr w12,[sp,#32]
- and w13,w13,w14
- ror w15,w15,#2
- add w7,w7,w3
- eor w13,w13,w5
- st1 {v4.4s},[x17], #16
- add w10,w10,w12
- add w3,w3,w15
- and w12,w8,w7
- ld1 {v2.16b},[x1],#16
- bic w15,w9,w7
- eor w11,w7,w7,ror#5
- ld1 {v4.4s},[x16],#16
- add w3,w3,w13
- orr w12,w12,w15
- eor w11,w11,w7,ror#19
- eor w15,w3,w3,ror#11
- rev32 v2.16b,v2.16b
- add w10,w10,w12
- ror w11,w11,#6
- eor w13,w3,w4
- eor w15,w15,w3,ror#20
- add v4.4s,v4.4s,v2.4s
- add w10,w10,w11
- ldr w12,[sp,#36]
- and w14,w14,w13
- ror w15,w15,#2
- add w6,w6,w10
- eor w14,w14,w4
- add w9,w9,w12
- add w10,w10,w15
- and w12,w7,w6
- bic w15,w8,w6
- eor w11,w6,w6,ror#5
- add w10,w10,w14
- orr w12,w12,w15
- eor w11,w11,w6,ror#19
- eor w15,w10,w10,ror#11
- add w9,w9,w12
- ror w11,w11,#6
- eor w14,w10,w3
- eor w15,w15,w10,ror#20
- add w9,w9,w11
- ldr w12,[sp,#40]
- and w13,w13,w14
- ror w15,w15,#2
- add w5,w5,w9
- eor w13,w13,w3
- add w8,w8,w12
- add w9,w9,w15
- and w12,w6,w5
- bic w15,w7,w5
- eor w11,w5,w5,ror#5
- add w9,w9,w13
- orr w12,w12,w15
- eor w11,w11,w5,ror#19
- eor w15,w9,w9,ror#11
- add w8,w8,w12
- ror w11,w11,#6
- eor w13,w9,w10
- eor w15,w15,w9,ror#20
- add w8,w8,w11
- ldr w12,[sp,#44]
- and w14,w14,w13
- ror w15,w15,#2
- add w4,w4,w8
- eor w14,w14,w10
- add w7,w7,w12
- add w8,w8,w15
- and w12,w5,w4
- bic w15,w6,w4
- eor w11,w4,w4,ror#5
- add w8,w8,w14
- orr w12,w12,w15
- eor w11,w11,w4,ror#19
- eor w15,w8,w8,ror#11
- add w7,w7,w12
- ror w11,w11,#6
- eor w14,w8,w9
- eor w15,w15,w8,ror#20
- add w7,w7,w11
- ldr w12,[sp,#48]
- and w13,w13,w14
- ror w15,w15,#2
- add w3,w3,w7
- eor w13,w13,w9
- st1 {v4.4s},[x17], #16
- add w6,w6,w12
- add w7,w7,w15
- and w12,w4,w3
- ld1 {v3.16b},[x1],#16
- bic w15,w5,w3
- eor w11,w3,w3,ror#5
- ld1 {v4.4s},[x16],#16
- add w7,w7,w13
- orr w12,w12,w15
- eor w11,w11,w3,ror#19
- eor w15,w7,w7,ror#11
- rev32 v3.16b,v3.16b
- add w6,w6,w12
- ror w11,w11,#6
- eor w13,w7,w8
- eor w15,w15,w7,ror#20
- add v4.4s,v4.4s,v3.4s
- add w6,w6,w11
- ldr w12,[sp,#52]
- and w14,w14,w13
- ror w15,w15,#2
- add w10,w10,w6
- eor w14,w14,w8
- add w5,w5,w12
- add w6,w6,w15
- and w12,w3,w10
- bic w15,w4,w10
- eor w11,w10,w10,ror#5
- add w6,w6,w14
- orr w12,w12,w15
- eor w11,w11,w10,ror#19
- eor w15,w6,w6,ror#11
- add w5,w5,w12
- ror w11,w11,#6
- eor w14,w6,w7
- eor w15,w15,w6,ror#20
- add w5,w5,w11
- ldr w12,[sp,#56]
- and w13,w13,w14
- ror w15,w15,#2
- add w9,w9,w5
- eor w13,w13,w7
- add w4,w4,w12
- add w5,w5,w15
- and w12,w10,w9
- bic w15,w3,w9
- eor w11,w9,w9,ror#5
- add w5,w5,w13
- orr w12,w12,w15
- eor w11,w11,w9,ror#19
- eor w15,w5,w5,ror#11
- add w4,w4,w12
- ror w11,w11,#6
- eor w13,w5,w6
- eor w15,w15,w5,ror#20
- add w4,w4,w11
- ldr w12,[sp,#60]
- and w14,w14,w13
- ror w15,w15,#2
- add w8,w8,w4
- eor w14,w14,w6
- add w3,w3,w12
- add w4,w4,w15
- and w12,w9,w8
- bic w15,w10,w8
- eor w11,w8,w8,ror#5
- add w4,w4,w14
- orr w12,w12,w15
- eor w11,w11,w8,ror#19
- eor w15,w4,w4,ror#11
- add w3,w3,w12
- ror w11,w11,#6
- eor w14,w4,w5
- eor w15,w15,w4,ror#20
- add w3,w3,w11
- and w13,w13,w14
- ror w15,w15,#2
- add w7,w7,w3
- eor w13,w13,w5
- st1 {v4.4s},[x17], #16
- add w3,w3,w15 // h+=Sigma0(a) from the past
- ldp w11,w12,[x0,#0]
- add w3,w3,w13 // h+=Maj(a,b,c) from the past
- ldp w13,w14,[x0,#8]
- add w3,w3,w11 // accumulate
- add w4,w4,w12
- ldp w11,w12,[x0,#16]
- add w5,w5,w13
- add w6,w6,w14
- ldp w13,w14,[x0,#24]
- add w7,w7,w11
- add w8,w8,w12
- ldr w12,[sp,#0]
- stp w3,w4,[x0,#0]
- add w9,w9,w13
- mov w13,wzr
- stp w5,w6,[x0,#8]
- add w10,w10,w14
- stp w7,w8,[x0,#16]
- eor w14,w4,w5
- stp w9,w10,[x0,#24]
- mov w15,wzr
- mov x17,sp
- b.ne .L_00_48
-
- ldr x29,[x29]
- add sp,sp,#16*4+16
- ret
-.size sha256_block_neon,.-sha256_block_neon
-#ifndef __KERNEL__
-.comm OPENSSL_armcap_P,4,4
-#endif
diff --git a/arch/arm64/crypto/sha512-core.S_shipped b/arch/arm64/crypto/sha512-core.S_shipped
deleted file mode 100644
index e063a6106720..000000000000
--- a/arch/arm64/crypto/sha512-core.S_shipped
+++ /dev/null
@@ -1,1093 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-
-// This code is taken from the OpenSSL project but the author (Andy Polyakov)
-// has relicensed it under the GPLv2. Therefore this program is free software;
-// you can redistribute it and/or modify it under the terms of the GNU General
-// Public License version 2 as published by the Free Software Foundation.
-//
-// The original headers, including the original license headers, are
-// included below for completeness.
-
-// Copyright 2014-2016 The OpenSSL Project Authors. All Rights Reserved.
-//
-// Licensed under the OpenSSL license (the "License"). You may not use
-// this file except in compliance with the License. You can obtain a copy
-// in the file LICENSE in the source distribution or at
-// https://www.openssl.org/source/license.html
-
-// ====================================================================
-// Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
-// project. The module is, however, dual licensed under OpenSSL and
-// CRYPTOGAMS licenses depending on where you obtain it. For further
-// details see http://www.openssl.org/~appro/cryptogams/.
-// ====================================================================
-//
-// SHA256/512 for ARMv8.
-//
-// Performance in cycles per processed byte and improvement coefficient
-// over code generated with "default" compiler:
-//
-// SHA256-hw SHA256(*) SHA512
-// Apple A7 1.97 10.5 (+33%) 6.73 (-1%(**))
-// Cortex-A53 2.38 15.5 (+115%) 10.0 (+150%(***))
-// Cortex-A57 2.31 11.6 (+86%) 7.51 (+260%(***))
-// Denver 2.01 10.5 (+26%) 6.70 (+8%)
-// X-Gene 20.0 (+100%) 12.8 (+300%(***))
-// Mongoose 2.36 13.0 (+50%) 8.36 (+33%)
-//
-// (*) Software SHA256 results are of lesser relevance, presented
-// mostly for informational purposes.
-// (**) The result is a trade-off: it's possible to improve it by
-// 10% (or by 1 cycle per round), but at the cost of 20% loss
-// on Cortex-A53 (or by 4 cycles per round).
-// (***) Super-impressive coefficients over gcc-generated code are
-// indication of some compiler "pathology", most notably code
-// generated with -mgeneral-regs-only is significanty faster
-// and the gap is only 40-90%.
-//
-// October 2016.
-//
-// Originally it was reckoned that it makes no sense to implement NEON
-// version of SHA256 for 64-bit processors. This is because performance
-// improvement on most wide-spread Cortex-A5x processors was observed
-// to be marginal, same on Cortex-A53 and ~10% on A57. But then it was
-// observed that 32-bit NEON SHA256 performs significantly better than
-// 64-bit scalar version on *some* of the more recent processors. As
-// result 64-bit NEON version of SHA256 was added to provide best
-// all-round performance. For example it executes ~30% faster on X-Gene
-// and Mongoose. [For reference, NEON version of SHA512 is bound to
-// deliver much less improvement, likely *negative* on Cortex-A5x.
-// Which is why NEON support is limited to SHA256.]
-
-#ifndef __KERNEL__
-# include "arm_arch.h"
-#endif
-
-.text
-
-.extern OPENSSL_armcap_P
-.globl sha512_block_data_order
-.type sha512_block_data_order,%function
-.align 6
-sha512_block_data_order:
- stp x29,x30,[sp,#-128]!
- add x29,sp,#0
-
- stp x19,x20,[sp,#16]
- stp x21,x22,[sp,#32]
- stp x23,x24,[sp,#48]
- stp x25,x26,[sp,#64]
- stp x27,x28,[sp,#80]
- sub sp,sp,#4*8
-
- ldp x20,x21,[x0] // load context
- ldp x22,x23,[x0,#2*8]
- ldp x24,x25,[x0,#4*8]
- add x2,x1,x2,lsl#7 // end of input
- ldp x26,x27,[x0,#6*8]
- adr x30,.LK512
- stp x0,x2,[x29,#96]
-
-.Loop:
- ldp x3,x4,[x1],#2*8
- ldr x19,[x30],#8 // *K++
- eor x28,x21,x22 // magic seed
- str x1,[x29,#112]
-#ifndef __AARCH64EB__
- rev x3,x3 // 0
-#endif
- ror x16,x24,#14
- add x27,x27,x19 // h+=K[i]
- eor x6,x24,x24,ror#23
- and x17,x25,x24
- bic x19,x26,x24
- add x27,x27,x3 // h+=X[i]
- orr x17,x17,x19 // Ch(e,f,g)
- eor x19,x20,x21 // a^b, b^c in next round
- eor x16,x16,x6,ror#18 // Sigma1(e)
- ror x6,x20,#28
- add x27,x27,x17 // h+=Ch(e,f,g)
- eor x17,x20,x20,ror#5
- add x27,x27,x16 // h+=Sigma1(e)
- and x28,x28,x19 // (b^c)&=(a^b)
- add x23,x23,x27 // d+=h
- eor x28,x28,x21 // Maj(a,b,c)
- eor x17,x6,x17,ror#34 // Sigma0(a)
- add x27,x27,x28 // h+=Maj(a,b,c)
- ldr x28,[x30],#8 // *K++, x19 in next round
- //add x27,x27,x17 // h+=Sigma0(a)
-#ifndef __AARCH64EB__
- rev x4,x4 // 1
-#endif
- ldp x5,x6,[x1],#2*8
- add x27,x27,x17 // h+=Sigma0(a)
- ror x16,x23,#14
- add x26,x26,x28 // h+=K[i]
- eor x7,x23,x23,ror#23
- and x17,x24,x23
- bic x28,x25,x23
- add x26,x26,x4 // h+=X[i]
- orr x17,x17,x28 // Ch(e,f,g)
- eor x28,x27,x20 // a^b, b^c in next round
- eor x16,x16,x7,ror#18 // Sigma1(e)
- ror x7,x27,#28
- add x26,x26,x17 // h+=Ch(e,f,g)
- eor x17,x27,x27,ror#5
- add x26,x26,x16 // h+=Sigma1(e)
- and x19,x19,x28 // (b^c)&=(a^b)
- add x22,x22,x26 // d+=h
- eor x19,x19,x20 // Maj(a,b,c)
- eor x17,x7,x17,ror#34 // Sigma0(a)
- add x26,x26,x19 // h+=Maj(a,b,c)
- ldr x19,[x30],#8 // *K++, x28 in next round
- //add x26,x26,x17 // h+=Sigma0(a)
-#ifndef __AARCH64EB__
- rev x5,x5 // 2
-#endif
- add x26,x26,x17 // h+=Sigma0(a)
- ror x16,x22,#14
- add x25,x25,x19 // h+=K[i]
- eor x8,x22,x22,ror#23
- and x17,x23,x22
- bic x19,x24,x22
- add x25,x25,x5 // h+=X[i]
- orr x17,x17,x19 // Ch(e,f,g)
- eor x19,x26,x27 // a^b, b^c in next round
- eor x16,x16,x8,ror#18 // Sigma1(e)
- ror x8,x26,#28
- add x25,x25,x17 // h+=Ch(e,f,g)
- eor x17,x26,x26,ror#5
- add x25,x25,x16 // h+=Sigma1(e)
- and x28,x28,x19 // (b^c)&=(a^b)
- add x21,x21,x25 // d+=h
- eor x28,x28,x27 // Maj(a,b,c)
- eor x17,x8,x17,ror#34 // Sigma0(a)
- add x25,x25,x28 // h+=Maj(a,b,c)
- ldr x28,[x30],#8 // *K++, x19 in next round
- //add x25,x25,x17 // h+=Sigma0(a)
-#ifndef __AARCH64EB__
- rev x6,x6 // 3
-#endif
- ldp x7,x8,[x1],#2*8
- add x25,x25,x17 // h+=Sigma0(a)
- ror x16,x21,#14
- add x24,x24,x28 // h+=K[i]
- eor x9,x21,x21,ror#23
- and x17,x22,x21
- bic x28,x23,x21
- add x24,x24,x6 // h+=X[i]
- orr x17,x17,x28 // Ch(e,f,g)
- eor x28,x25,x26 // a^b, b^c in next round
- eor x16,x16,x9,ror#18 // Sigma1(e)
- ror x9,x25,#28
- add x24,x24,x17 // h+=Ch(e,f,g)
- eor x17,x25,x25,ror#5
- add x24,x24,x16 // h+=Sigma1(e)
- and x19,x19,x28 // (b^c)&=(a^b)
- add x20,x20,x24 // d+=h
- eor x19,x19,x26 // Maj(a,b,c)
- eor x17,x9,x17,ror#34 // Sigma0(a)
- add x24,x24,x19 // h+=Maj(a,b,c)
- ldr x19,[x30],#8 // *K++, x28 in next round
- //add x24,x24,x17 // h+=Sigma0(a)
-#ifndef __AARCH64EB__
- rev x7,x7 // 4
-#endif
- add x24,x24,x17 // h+=Sigma0(a)
- ror x16,x20,#14
- add x23,x23,x19 // h+=K[i]
- eor x10,x20,x20,ror#23
- and x17,x21,x20
- bic x19,x22,x20
- add x23,x23,x7 // h+=X[i]
- orr x17,x17,x19 // Ch(e,f,g)
- eor x19,x24,x25 // a^b, b^c in next round
- eor x16,x16,x10,ror#18 // Sigma1(e)
- ror x10,x24,#28
- add x23,x23,x17 // h+=Ch(e,f,g)
- eor x17,x24,x24,ror#5
- add x23,x23,x16 // h+=Sigma1(e)
- and x28,x28,x19 // (b^c)&=(a^b)
- add x27,x27,x23 // d+=h
- eor x28,x28,x25 // Maj(a,b,c)
- eor x17,x10,x17,ror#34 // Sigma0(a)
- add x23,x23,x28 // h+=Maj(a,b,c)
- ldr x28,[x30],#8 // *K++, x19 in next round
- //add x23,x23,x17 // h+=Sigma0(a)
-#ifndef __AARCH64EB__
- rev x8,x8 // 5
-#endif
- ldp x9,x10,[x1],#2*8
- add x23,x23,x17 // h+=Sigma0(a)
- ror x16,x27,#14
- add x22,x22,x28 // h+=K[i]
- eor x11,x27,x27,ror#23
- and x17,x20,x27
- bic x28,x21,x27
- add x22,x22,x8 // h+=X[i]
- orr x17,x17,x28 // Ch(e,f,g)
- eor x28,x23,x24 // a^b, b^c in next round
- eor x16,x16,x11,ror#18 // Sigma1(e)
- ror x11,x23,#28
- add x22,x22,x17 // h+=Ch(e,f,g)
- eor x17,x23,x23,ror#5
- add x22,x22,x16 // h+=Sigma1(e)
- and x19,x19,x28 // (b^c)&=(a^b)
- add x26,x26,x22 // d+=h
- eor x19,x19,x24 // Maj(a,b,c)
- eor x17,x11,x17,ror#34 // Sigma0(a)
- add x22,x22,x19 // h+=Maj(a,b,c)
- ldr x19,[x30],#8 // *K++, x28 in next round
- //add x22,x22,x17 // h+=Sigma0(a)
-#ifndef __AARCH64EB__
- rev x9,x9 // 6
-#endif
- add x22,x22,x17 // h+=Sigma0(a)
- ror x16,x26,#14
- add x21,x21,x19 // h+=K[i]
- eor x12,x26,x26,ror#23
- and x17,x27,x26
- bic x19,x20,x26
- add x21,x21,x9 // h+=X[i]
- orr x17,x17,x19 // Ch(e,f,g)
- eor x19,x22,x23 // a^b, b^c in next round
- eor x16,x16,x12,ror#18 // Sigma1(e)
- ror x12,x22,#28
- add x21,x21,x17 // h+=Ch(e,f,g)
- eor x17,x22,x22,ror#5
- add x21,x21,x16 // h+=Sigma1(e)
- and x28,x28,x19 // (b^c)&=(a^b)
- add x25,x25,x21 // d+=h
- eor x28,x28,x23 // Maj(a,b,c)
- eor x17,x12,x17,ror#34 // Sigma0(a)
- add x21,x21,x28 // h+=Maj(a,b,c)
- ldr x28,[x30],#8 // *K++, x19 in next round
- //add x21,x21,x17 // h+=Sigma0(a)
-#ifndef __AARCH64EB__
- rev x10,x10 // 7
-#endif
- ldp x11,x12,[x1],#2*8
- add x21,x21,x17 // h+=Sigma0(a)
- ror x16,x25,#14
- add x20,x20,x28 // h+=K[i]
- eor x13,x25,x25,ror#23
- and x17,x26,x25
- bic x28,x27,x25
- add x20,x20,x10 // h+=X[i]
- orr x17,x17,x28 // Ch(e,f,g)
- eor x28,x21,x22 // a^b, b^c in next round
- eor x16,x16,x13,ror#18 // Sigma1(e)
- ror x13,x21,#28
- add x20,x20,x17 // h+=Ch(e,f,g)
- eor x17,x21,x21,ror#5
- add x20,x20,x16 // h+=Sigma1(e)
- and x19,x19,x28 // (b^c)&=(a^b)
- add x24,x24,x20 // d+=h
- eor x19,x19,x22 // Maj(a,b,c)
- eor x17,x13,x17,ror#34 // Sigma0(a)
- add x20,x20,x19 // h+=Maj(a,b,c)
- ldr x19,[x30],#8 // *K++, x28 in next round
- //add x20,x20,x17 // h+=Sigma0(a)
-#ifndef __AARCH64EB__
- rev x11,x11 // 8
-#endif
- add x20,x20,x17 // h+=Sigma0(a)
- ror x16,x24,#14
- add x27,x27,x19 // h+=K[i]
- eor x14,x24,x24,ror#23
- and x17,x25,x24
- bic x19,x26,x24
- add x27,x27,x11 // h+=X[i]
- orr x17,x17,x19 // Ch(e,f,g)
- eor x19,x20,x21 // a^b, b^c in next round
- eor x16,x16,x14,ror#18 // Sigma1(e)
- ror x14,x20,#28
- add x27,x27,x17 // h+=Ch(e,f,g)
- eor x17,x20,x20,ror#5
- add x27,x27,x16 // h+=Sigma1(e)
- and x28,x28,x19 // (b^c)&=(a^b)
- add x23,x23,x27 // d+=h
- eor x28,x28,x21 // Maj(a,b,c)
- eor x17,x14,x17,ror#34 // Sigma0(a)
- add x27,x27,x28 // h+=Maj(a,b,c)
- ldr x28,[x30],#8 // *K++, x19 in next round
- //add x27,x27,x17 // h+=Sigma0(a)
-#ifndef __AARCH64EB__
- rev x12,x12 // 9
-#endif
- ldp x13,x14,[x1],#2*8
- add x27,x27,x17 // h+=Sigma0(a)
- ror x16,x23,#14
- add x26,x26,x28 // h+=K[i]
- eor x15,x23,x23,ror#23
- and x17,x24,x23
- bic x28,x25,x23
- add x26,x26,x12 // h+=X[i]
- orr x17,x17,x28 // Ch(e,f,g)
- eor x28,x27,x20 // a^b, b^c in next round
- eor x16,x16,x15,ror#18 // Sigma1(e)
- ror x15,x27,#28
- add x26,x26,x17 // h+=Ch(e,f,g)
- eor x17,x27,x27,ror#5
- add x26,x26,x16 // h+=Sigma1(e)
- and x19,x19,x28 // (b^c)&=(a^b)
- add x22,x22,x26 // d+=h
- eor x19,x19,x20 // Maj(a,b,c)
- eor x17,x15,x17,ror#34 // Sigma0(a)
- add x26,x26,x19 // h+=Maj(a,b,c)
- ldr x19,[x30],#8 // *K++, x28 in next round
- //add x26,x26,x17 // h+=Sigma0(a)
-#ifndef __AARCH64EB__
- rev x13,x13 // 10
-#endif
- add x26,x26,x17 // h+=Sigma0(a)
- ror x16,x22,#14
- add x25,x25,x19 // h+=K[i]
- eor x0,x22,x22,ror#23
- and x17,x23,x22
- bic x19,x24,x22
- add x25,x25,x13 // h+=X[i]
- orr x17,x17,x19 // Ch(e,f,g)
- eor x19,x26,x27 // a^b, b^c in next round
- eor x16,x16,x0,ror#18 // Sigma1(e)
- ror x0,x26,#28
- add x25,x25,x17 // h+=Ch(e,f,g)
- eor x17,x26,x26,ror#5
- add x25,x25,x16 // h+=Sigma1(e)
- and x28,x28,x19 // (b^c)&=(a^b)
- add x21,x21,x25 // d+=h
- eor x28,x28,x27 // Maj(a,b,c)
- eor x17,x0,x17,ror#34 // Sigma0(a)
- add x25,x25,x28 // h+=Maj(a,b,c)
- ldr x28,[x30],#8 // *K++, x19 in next round
- //add x25,x25,x17 // h+=Sigma0(a)
-#ifndef __AARCH64EB__
- rev x14,x14 // 11
-#endif
- ldp x15,x0,[x1],#2*8
- add x25,x25,x17 // h+=Sigma0(a)
- str x6,[sp,#24]
- ror x16,x21,#14
- add x24,x24,x28 // h+=K[i]
- eor x6,x21,x21,ror#23
- and x17,x22,x21
- bic x28,x23,x21
- add x24,x24,x14 // h+=X[i]
- orr x17,x17,x28 // Ch(e,f,g)
- eor x28,x25,x26 // a^b, b^c in next round
- eor x16,x16,x6,ror#18 // Sigma1(e)
- ror x6,x25,#28
- add x24,x24,x17 // h+=Ch(e,f,g)
- eor x17,x25,x25,ror#5
- add x24,x24,x16 // h+=Sigma1(e)
- and x19,x19,x28 // (b^c)&=(a^b)
- add x20,x20,x24 // d+=h
- eor x19,x19,x26 // Maj(a,b,c)
- eor x17,x6,x17,ror#34 // Sigma0(a)
- add x24,x24,x19 // h+=Maj(a,b,c)
- ldr x19,[x30],#8 // *K++, x28 in next round
- //add x24,x24,x17 // h+=Sigma0(a)
-#ifndef __AARCH64EB__
- rev x15,x15 // 12
-#endif
- add x24,x24,x17 // h+=Sigma0(a)
- str x7,[sp,#0]
- ror x16,x20,#14
- add x23,x23,x19 // h+=K[i]
- eor x7,x20,x20,ror#23
- and x17,x21,x20
- bic x19,x22,x20
- add x23,x23,x15 // h+=X[i]
- orr x17,x17,x19 // Ch(e,f,g)
- eor x19,x24,x25 // a^b, b^c in next round
- eor x16,x16,x7,ror#18 // Sigma1(e)
- ror x7,x24,#28
- add x23,x23,x17 // h+=Ch(e,f,g)
- eor x17,x24,x24,ror#5
- add x23,x23,x16 // h+=Sigma1(e)
- and x28,x28,x19 // (b^c)&=(a^b)
- add x27,x27,x23 // d+=h
- eor x28,x28,x25 // Maj(a,b,c)
- eor x17,x7,x17,ror#34 // Sigma0(a)
- add x23,x23,x28 // h+=Maj(a,b,c)
- ldr x28,[x30],#8 // *K++, x19 in next round
- //add x23,x23,x17 // h+=Sigma0(a)
-#ifndef __AARCH64EB__
- rev x0,x0 // 13
-#endif
- ldp x1,x2,[x1]
- add x23,x23,x17 // h+=Sigma0(a)
- str x8,[sp,#8]
- ror x16,x27,#14
- add x22,x22,x28 // h+=K[i]
- eor x8,x27,x27,ror#23
- and x17,x20,x27
- bic x28,x21,x27
- add x22,x22,x0 // h+=X[i]
- orr x17,x17,x28 // Ch(e,f,g)
- eor x28,x23,x24 // a^b, b^c in next round
- eor x16,x16,x8,ror#18 // Sigma1(e)
- ror x8,x23,#28
- add x22,x22,x17 // h+=Ch(e,f,g)
- eor x17,x23,x23,ror#5
- add x22,x22,x16 // h+=Sigma1(e)
- and x19,x19,x28 // (b^c)&=(a^b)
- add x26,x26,x22 // d+=h
- eor x19,x19,x24 // Maj(a,b,c)
- eor x17,x8,x17,ror#34 // Sigma0(a)
- add x22,x22,x19 // h+=Maj(a,b,c)
- ldr x19,[x30],#8 // *K++, x28 in next round
- //add x22,x22,x17 // h+=Sigma0(a)
-#ifndef __AARCH64EB__
- rev x1,x1 // 14
-#endif
- ldr x6,[sp,#24]
- add x22,x22,x17 // h+=Sigma0(a)
- str x9,[sp,#16]
- ror x16,x26,#14
- add x21,x21,x19 // h+=K[i]
- eor x9,x26,x26,ror#23
- and x17,x27,x26
- bic x19,x20,x26
- add x21,x21,x1 // h+=X[i]
- orr x17,x17,x19 // Ch(e,f,g)
- eor x19,x22,x23 // a^b, b^c in next round
- eor x16,x16,x9,ror#18 // Sigma1(e)
- ror x9,x22,#28
- add x21,x21,x17 // h+=Ch(e,f,g)
- eor x17,x22,x22,ror#5
- add x21,x21,x16 // h+=Sigma1(e)
- and x28,x28,x19 // (b^c)&=(a^b)
- add x25,x25,x21 // d+=h
- eor x28,x28,x23 // Maj(a,b,c)
- eor x17,x9,x17,ror#34 // Sigma0(a)
- add x21,x21,x28 // h+=Maj(a,b,c)
- ldr x28,[x30],#8 // *K++, x19 in next round
- //add x21,x21,x17 // h+=Sigma0(a)
-#ifndef __AARCH64EB__
- rev x2,x2 // 15
-#endif
- ldr x7,[sp,#0]
- add x21,x21,x17 // h+=Sigma0(a)
- str x10,[sp,#24]
- ror x16,x25,#14
- add x20,x20,x28 // h+=K[i]
- ror x9,x4,#1
- and x17,x26,x25
- ror x8,x1,#19
- bic x28,x27,x25
- ror x10,x21,#28
- add x20,x20,x2 // h+=X[i]
- eor x16,x16,x25,ror#18
- eor x9,x9,x4,ror#8
- orr x17,x17,x28 // Ch(e,f,g)
- eor x28,x21,x22 // a^b, b^c in next round
- eor x16,x16,x25,ror#41 // Sigma1(e)
- eor x10,x10,x21,ror#34
- add x20,x20,x17 // h+=Ch(e,f,g)
- and x19,x19,x28 // (b^c)&=(a^b)
- eor x8,x8,x1,ror#61
- eor x9,x9,x4,lsr#7 // sigma0(X[i+1])
- add x20,x20,x16 // h+=Sigma1(e)
- eor x19,x19,x22 // Maj(a,b,c)
- eor x17,x10,x21,ror#39 // Sigma0(a)
- eor x8,x8,x1,lsr#6 // sigma1(X[i+14])
- add x3,x3,x12
- add x24,x24,x20 // d+=h
- add x20,x20,x19 // h+=Maj(a,b,c)
- ldr x19,[x30],#8 // *K++, x28 in next round
- add x3,x3,x9
- add x20,x20,x17 // h+=Sigma0(a)
- add x3,x3,x8
-.Loop_16_xx:
- ldr x8,[sp,#8]
- str x11,[sp,#0]
- ror x16,x24,#14
- add x27,x27,x19 // h+=K[i]
- ror x10,x5,#1
- and x17,x25,x24
- ror x9,x2,#19
- bic x19,x26,x24
- ror x11,x20,#28
- add x27,x27,x3 // h+=X[i]
- eor x16,x16,x24,ror#18
- eor x10,x10,x5,ror#8
- orr x17,x17,x19 // Ch(e,f,g)
- eor x19,x20,x21 // a^b, b^c in next round
- eor x16,x16,x24,ror#41 // Sigma1(e)
- eor x11,x11,x20,ror#34
- add x27,x27,x17 // h+=Ch(e,f,g)
- and x28,x28,x19 // (b^c)&=(a^b)
- eor x9,x9,x2,ror#61
- eor x10,x10,x5,lsr#7 // sigma0(X[i+1])
- add x27,x27,x16 // h+=Sigma1(e)
- eor x28,x28,x21 // Maj(a,b,c)
- eor x17,x11,x20,ror#39 // Sigma0(a)
- eor x9,x9,x2,lsr#6 // sigma1(X[i+14])
- add x4,x4,x13
- add x23,x23,x27 // d+=h
- add x27,x27,x28 // h+=Maj(a,b,c)
- ldr x28,[x30],#8 // *K++, x19 in next round
- add x4,x4,x10
- add x27,x27,x17 // h+=Sigma0(a)
- add x4,x4,x9
- ldr x9,[sp,#16]
- str x12,[sp,#8]
- ror x16,x23,#14
- add x26,x26,x28 // h+=K[i]
- ror x11,x6,#1
- and x17,x24,x23
- ror x10,x3,#19
- bic x28,x25,x23
- ror x12,x27,#28
- add x26,x26,x4 // h+=X[i]
- eor x16,x16,x23,ror#18
- eor x11,x11,x6,ror#8
- orr x17,x17,x28 // Ch(e,f,g)
- eor x28,x27,x20 // a^b, b^c in next round
- eor x16,x16,x23,ror#41 // Sigma1(e)
- eor x12,x12,x27,ror#34
- add x26,x26,x17 // h+=Ch(e,f,g)
- and x19,x19,x28 // (b^c)&=(a^b)
- eor x10,x10,x3,ror#61
- eor x11,x11,x6,lsr#7 // sigma0(X[i+1])
- add x26,x26,x16 // h+=Sigma1(e)
- eor x19,x19,x20 // Maj(a,b,c)
- eor x17,x12,x27,ror#39 // Sigma0(a)
- eor x10,x10,x3,lsr#6 // sigma1(X[i+14])
- add x5,x5,x14
- add x22,x22,x26 // d+=h
- add x26,x26,x19 // h+=Maj(a,b,c)
- ldr x19,[x30],#8 // *K++, x28 in next round
- add x5,x5,x11
- add x26,x26,x17 // h+=Sigma0(a)
- add x5,x5,x10
- ldr x10,[sp,#24]
- str x13,[sp,#16]
- ror x16,x22,#14
- add x25,x25,x19 // h+=K[i]
- ror x12,x7,#1
- and x17,x23,x22
- ror x11,x4,#19
- bic x19,x24,x22
- ror x13,x26,#28
- add x25,x25,x5 // h+=X[i]
- eor x16,x16,x22,ror#18
- eor x12,x12,x7,ror#8
- orr x17,x17,x19 // Ch(e,f,g)
- eor x19,x26,x27 // a^b, b^c in next round
- eor x16,x16,x22,ror#41 // Sigma1(e)
- eor x13,x13,x26,ror#34
- add x25,x25,x17 // h+=Ch(e,f,g)
- and x28,x28,x19 // (b^c)&=(a^b)
- eor x11,x11,x4,ror#61
- eor x12,x12,x7,lsr#7 // sigma0(X[i+1])
- add x25,x25,x16 // h+=Sigma1(e)
- eor x28,x28,x27 // Maj(a,b,c)
- eor x17,x13,x26,ror#39 // Sigma0(a)
- eor x11,x11,x4,lsr#6 // sigma1(X[i+14])
- add x6,x6,x15
- add x21,x21,x25 // d+=h
- add x25,x25,x28 // h+=Maj(a,b,c)
- ldr x28,[x30],#8 // *K++, x19 in next round
- add x6,x6,x12
- add x25,x25,x17 // h+=Sigma0(a)
- add x6,x6,x11
- ldr x11,[sp,#0]
- str x14,[sp,#24]
- ror x16,x21,#14
- add x24,x24,x28 // h+=K[i]
- ror x13,x8,#1
- and x17,x22,x21
- ror x12,x5,#19
- bic x28,x23,x21
- ror x14,x25,#28
- add x24,x24,x6 // h+=X[i]
- eor x16,x16,x21,ror#18
- eor x13,x13,x8,ror#8
- orr x17,x17,x28 // Ch(e,f,g)
- eor x28,x25,x26 // a^b, b^c in next round
- eor x16,x16,x21,ror#41 // Sigma1(e)
- eor x14,x14,x25,ror#34
- add x24,x24,x17 // h+=Ch(e,f,g)
- and x19,x19,x28 // (b^c)&=(a^b)
- eor x12,x12,x5,ror#61
- eor x13,x13,x8,lsr#7 // sigma0(X[i+1])
- add x24,x24,x16 // h+=Sigma1(e)
- eor x19,x19,x26 // Maj(a,b,c)
- eor x17,x14,x25,ror#39 // Sigma0(a)
- eor x12,x12,x5,lsr#6 // sigma1(X[i+14])
- add x7,x7,x0
- add x20,x20,x24 // d+=h
- add x24,x24,x19 // h+=Maj(a,b,c)
- ldr x19,[x30],#8 // *K++, x28 in next round
- add x7,x7,x13
- add x24,x24,x17 // h+=Sigma0(a)
- add x7,x7,x12
- ldr x12,[sp,#8]
- str x15,[sp,#0]
- ror x16,x20,#14
- add x23,x23,x19 // h+=K[i]
- ror x14,x9,#1
- and x17,x21,x20
- ror x13,x6,#19
- bic x19,x22,x20
- ror x15,x24,#28
- add x23,x23,x7 // h+=X[i]
- eor x16,x16,x20,ror#18
- eor x14,x14,x9,ror#8
- orr x17,x17,x19 // Ch(e,f,g)
- eor x19,x24,x25 // a^b, b^c in next round
- eor x16,x16,x20,ror#41 // Sigma1(e)
- eor x15,x15,x24,ror#34
- add x23,x23,x17 // h+=Ch(e,f,g)
- and x28,x28,x19 // (b^c)&=(a^b)
- eor x13,x13,x6,ror#61
- eor x14,x14,x9,lsr#7 // sigma0(X[i+1])
- add x23,x23,x16 // h+=Sigma1(e)
- eor x28,x28,x25 // Maj(a,b,c)
- eor x17,x15,x24,ror#39 // Sigma0(a)
- eor x13,x13,x6,lsr#6 // sigma1(X[i+14])
- add x8,x8,x1
- add x27,x27,x23 // d+=h
- add x23,x23,x28 // h+=Maj(a,b,c)
- ldr x28,[x30],#8 // *K++, x19 in next round
- add x8,x8,x14
- add x23,x23,x17 // h+=Sigma0(a)
- add x8,x8,x13
- ldr x13,[sp,#16]
- str x0,[sp,#8]
- ror x16,x27,#14
- add x22,x22,x28 // h+=K[i]
- ror x15,x10,#1
- and x17,x20,x27
- ror x14,x7,#19
- bic x28,x21,x27
- ror x0,x23,#28
- add x22,x22,x8 // h+=X[i]
- eor x16,x16,x27,ror#18
- eor x15,x15,x10,ror#8
- orr x17,x17,x28 // Ch(e,f,g)
- eor x28,x23,x24 // a^b, b^c in next round
- eor x16,x16,x27,ror#41 // Sigma1(e)
- eor x0,x0,x23,ror#34
- add x22,x22,x17 // h+=Ch(e,f,g)
- and x19,x19,x28 // (b^c)&=(a^b)
- eor x14,x14,x7,ror#61
- eor x15,x15,x10,lsr#7 // sigma0(X[i+1])
- add x22,x22,x16 // h+=Sigma1(e)
- eor x19,x19,x24 // Maj(a,b,c)
- eor x17,x0,x23,ror#39 // Sigma0(a)
- eor x14,x14,x7,lsr#6 // sigma1(X[i+14])
- add x9,x9,x2
- add x26,x26,x22 // d+=h
- add x22,x22,x19 // h+=Maj(a,b,c)
- ldr x19,[x30],#8 // *K++, x28 in next round
- add x9,x9,x15
- add x22,x22,x17 // h+=Sigma0(a)
- add x9,x9,x14
- ldr x14,[sp,#24]
- str x1,[sp,#16]
- ror x16,x26,#14
- add x21,x21,x19 // h+=K[i]
- ror x0,x11,#1
- and x17,x27,x26
- ror x15,x8,#19
- bic x19,x20,x26
- ror x1,x22,#28
- add x21,x21,x9 // h+=X[i]
- eor x16,x16,x26,ror#18
- eor x0,x0,x11,ror#8
- orr x17,x17,x19 // Ch(e,f,g)
- eor x19,x22,x23 // a^b, b^c in next round
- eor x16,x16,x26,ror#41 // Sigma1(e)
- eor x1,x1,x22,ror#34
- add x21,x21,x17 // h+=Ch(e,f,g)
- and x28,x28,x19 // (b^c)&=(a^b)
- eor x15,x15,x8,ror#61
- eor x0,x0,x11,lsr#7 // sigma0(X[i+1])
- add x21,x21,x16 // h+=Sigma1(e)
- eor x28,x28,x23 // Maj(a,b,c)
- eor x17,x1,x22,ror#39 // Sigma0(a)
- eor x15,x15,x8,lsr#6 // sigma1(X[i+14])
- add x10,x10,x3
- add x25,x25,x21 // d+=h
- add x21,x21,x28 // h+=Maj(a,b,c)
- ldr x28,[x30],#8 // *K++, x19 in next round
- add x10,x10,x0
- add x21,x21,x17 // h+=Sigma0(a)
- add x10,x10,x15
- ldr x15,[sp,#0]
- str x2,[sp,#24]
- ror x16,x25,#14
- add x20,x20,x28 // h+=K[i]
- ror x1,x12,#1
- and x17,x26,x25
- ror x0,x9,#19
- bic x28,x27,x25
- ror x2,x21,#28
- add x20,x20,x10 // h+=X[i]
- eor x16,x16,x25,ror#18
- eor x1,x1,x12,ror#8
- orr x17,x17,x28 // Ch(e,f,g)
- eor x28,x21,x22 // a^b, b^c in next round
- eor x16,x16,x25,ror#41 // Sigma1(e)
- eor x2,x2,x21,ror#34
- add x20,x20,x17 // h+=Ch(e,f,g)
- and x19,x19,x28 // (b^c)&=(a^b)
- eor x0,x0,x9,ror#61
- eor x1,x1,x12,lsr#7 // sigma0(X[i+1])
- add x20,x20,x16 // h+=Sigma1(e)
- eor x19,x19,x22 // Maj(a,b,c)
- eor x17,x2,x21,ror#39 // Sigma0(a)
- eor x0,x0,x9,lsr#6 // sigma1(X[i+14])
- add x11,x11,x4
- add x24,x24,x20 // d+=h
- add x20,x20,x19 // h+=Maj(a,b,c)
- ldr x19,[x30],#8 // *K++, x28 in next round
- add x11,x11,x1
- add x20,x20,x17 // h+=Sigma0(a)
- add x11,x11,x0
- ldr x0,[sp,#8]
- str x3,[sp,#0]
- ror x16,x24,#14
- add x27,x27,x19 // h+=K[i]
- ror x2,x13,#1
- and x17,x25,x24
- ror x1,x10,#19
- bic x19,x26,x24
- ror x3,x20,#28
- add x27,x27,x11 // h+=X[i]
- eor x16,x16,x24,ror#18
- eor x2,x2,x13,ror#8
- orr x17,x17,x19 // Ch(e,f,g)
- eor x19,x20,x21 // a^b, b^c in next round
- eor x16,x16,x24,ror#41 // Sigma1(e)
- eor x3,x3,x20,ror#34
- add x27,x27,x17 // h+=Ch(e,f,g)
- and x28,x28,x19 // (b^c)&=(a^b)
- eor x1,x1,x10,ror#61
- eor x2,x2,x13,lsr#7 // sigma0(X[i+1])
- add x27,x27,x16 // h+=Sigma1(e)
- eor x28,x28,x21 // Maj(a,b,c)
- eor x17,x3,x20,ror#39 // Sigma0(a)
- eor x1,x1,x10,lsr#6 // sigma1(X[i+14])
- add x12,x12,x5
- add x23,x23,x27 // d+=h
- add x27,x27,x28 // h+=Maj(a,b,c)
- ldr x28,[x30],#8 // *K++, x19 in next round
- add x12,x12,x2
- add x27,x27,x17 // h+=Sigma0(a)
- add x12,x12,x1
- ldr x1,[sp,#16]
- str x4,[sp,#8]
- ror x16,x23,#14
- add x26,x26,x28 // h+=K[i]
- ror x3,x14,#1
- and x17,x24,x23
- ror x2,x11,#19
- bic x28,x25,x23
- ror x4,x27,#28
- add x26,x26,x12 // h+=X[i]
- eor x16,x16,x23,ror#18
- eor x3,x3,x14,ror#8
- orr x17,x17,x28 // Ch(e,f,g)
- eor x28,x27,x20 // a^b, b^c in next round
- eor x16,x16,x23,ror#41 // Sigma1(e)
- eor x4,x4,x27,ror#34
- add x26,x26,x17 // h+=Ch(e,f,g)
- and x19,x19,x28 // (b^c)&=(a^b)
- eor x2,x2,x11,ror#61
- eor x3,x3,x14,lsr#7 // sigma0(X[i+1])
- add x26,x26,x16 // h+=Sigma1(e)
- eor x19,x19,x20 // Maj(a,b,c)
- eor x17,x4,x27,ror#39 // Sigma0(a)
- eor x2,x2,x11,lsr#6 // sigma1(X[i+14])
- add x13,x13,x6
- add x22,x22,x26 // d+=h
- add x26,x26,x19 // h+=Maj(a,b,c)
- ldr x19,[x30],#8 // *K++, x28 in next round
- add x13,x13,x3
- add x26,x26,x17 // h+=Sigma0(a)
- add x13,x13,x2
- ldr x2,[sp,#24]
- str x5,[sp,#16]
- ror x16,x22,#14
- add x25,x25,x19 // h+=K[i]
- ror x4,x15,#1
- and x17,x23,x22
- ror x3,x12,#19
- bic x19,x24,x22
- ror x5,x26,#28
- add x25,x25,x13 // h+=X[i]
- eor x16,x16,x22,ror#18
- eor x4,x4,x15,ror#8
- orr x17,x17,x19 // Ch(e,f,g)
- eor x19,x26,x27 // a^b, b^c in next round
- eor x16,x16,x22,ror#41 // Sigma1(e)
- eor x5,x5,x26,ror#34
- add x25,x25,x17 // h+=Ch(e,f,g)
- and x28,x28,x19 // (b^c)&=(a^b)
- eor x3,x3,x12,ror#61
- eor x4,x4,x15,lsr#7 // sigma0(X[i+1])
- add x25,x25,x16 // h+=Sigma1(e)
- eor x28,x28,x27 // Maj(a,b,c)
- eor x17,x5,x26,ror#39 // Sigma0(a)
- eor x3,x3,x12,lsr#6 // sigma1(X[i+14])
- add x14,x14,x7
- add x21,x21,x25 // d+=h
- add x25,x25,x28 // h+=Maj(a,b,c)
- ldr x28,[x30],#8 // *K++, x19 in next round
- add x14,x14,x4
- add x25,x25,x17 // h+=Sigma0(a)
- add x14,x14,x3
- ldr x3,[sp,#0]
- str x6,[sp,#24]
- ror x16,x21,#14
- add x24,x24,x28 // h+=K[i]
- ror x5,x0,#1
- and x17,x22,x21
- ror x4,x13,#19
- bic x28,x23,x21
- ror x6,x25,#28
- add x24,x24,x14 // h+=X[i]
- eor x16,x16,x21,ror#18
- eor x5,x5,x0,ror#8
- orr x17,x17,x28 // Ch(e,f,g)
- eor x28,x25,x26 // a^b, b^c in next round
- eor x16,x16,x21,ror#41 // Sigma1(e)
- eor x6,x6,x25,ror#34
- add x24,x24,x17 // h+=Ch(e,f,g)
- and x19,x19,x28 // (b^c)&=(a^b)
- eor x4,x4,x13,ror#61
- eor x5,x5,x0,lsr#7 // sigma0(X[i+1])
- add x24,x24,x16 // h+=Sigma1(e)
- eor x19,x19,x26 // Maj(a,b,c)
- eor x17,x6,x25,ror#39 // Sigma0(a)
- eor x4,x4,x13,lsr#6 // sigma1(X[i+14])
- add x15,x15,x8
- add x20,x20,x24 // d+=h
- add x24,x24,x19 // h+=Maj(a,b,c)
- ldr x19,[x30],#8 // *K++, x28 in next round
- add x15,x15,x5
- add x24,x24,x17 // h+=Sigma0(a)
- add x15,x15,x4
- ldr x4,[sp,#8]
- str x7,[sp,#0]
- ror x16,x20,#14
- add x23,x23,x19 // h+=K[i]
- ror x6,x1,#1
- and x17,x21,x20
- ror x5,x14,#19
- bic x19,x22,x20
- ror x7,x24,#28
- add x23,x23,x15 // h+=X[i]
- eor x16,x16,x20,ror#18
- eor x6,x6,x1,ror#8
- orr x17,x17,x19 // Ch(e,f,g)
- eor x19,x24,x25 // a^b, b^c in next round
- eor x16,x16,x20,ror#41 // Sigma1(e)
- eor x7,x7,x24,ror#34
- add x23,x23,x17 // h+=Ch(e,f,g)
- and x28,x28,x19 // (b^c)&=(a^b)
- eor x5,x5,x14,ror#61
- eor x6,x6,x1,lsr#7 // sigma0(X[i+1])
- add x23,x23,x16 // h+=Sigma1(e)
- eor x28,x28,x25 // Maj(a,b,c)
- eor x17,x7,x24,ror#39 // Sigma0(a)
- eor x5,x5,x14,lsr#6 // sigma1(X[i+14])
- add x0,x0,x9
- add x27,x27,x23 // d+=h
- add x23,x23,x28 // h+=Maj(a,b,c)
- ldr x28,[x30],#8 // *K++, x19 in next round
- add x0,x0,x6
- add x23,x23,x17 // h+=Sigma0(a)
- add x0,x0,x5
- ldr x5,[sp,#16]
- str x8,[sp,#8]
- ror x16,x27,#14
- add x22,x22,x28 // h+=K[i]
- ror x7,x2,#1
- and x17,x20,x27
- ror x6,x15,#19
- bic x28,x21,x27
- ror x8,x23,#28
- add x22,x22,x0 // h+=X[i]
- eor x16,x16,x27,ror#18
- eor x7,x7,x2,ror#8
- orr x17,x17,x28 // Ch(e,f,g)
- eor x28,x23,x24 // a^b, b^c in next round
- eor x16,x16,x27,ror#41 // Sigma1(e)
- eor x8,x8,x23,ror#34
- add x22,x22,x17 // h+=Ch(e,f,g)
- and x19,x19,x28 // (b^c)&=(a^b)
- eor x6,x6,x15,ror#61
- eor x7,x7,x2,lsr#7 // sigma0(X[i+1])
- add x22,x22,x16 // h+=Sigma1(e)
- eor x19,x19,x24 // Maj(a,b,c)
- eor x17,x8,x23,ror#39 // Sigma0(a)
- eor x6,x6,x15,lsr#6 // sigma1(X[i+14])
- add x1,x1,x10
- add x26,x26,x22 // d+=h
- add x22,x22,x19 // h+=Maj(a,b,c)
- ldr x19,[x30],#8 // *K++, x28 in next round
- add x1,x1,x7
- add x22,x22,x17 // h+=Sigma0(a)
- add x1,x1,x6
- ldr x6,[sp,#24]
- str x9,[sp,#16]
- ror x16,x26,#14
- add x21,x21,x19 // h+=K[i]
- ror x8,x3,#1
- and x17,x27,x26
- ror x7,x0,#19
- bic x19,x20,x26
- ror x9,x22,#28
- add x21,x21,x1 // h+=X[i]
- eor x16,x16,x26,ror#18
- eor x8,x8,x3,ror#8
- orr x17,x17,x19 // Ch(e,f,g)
- eor x19,x22,x23 // a^b, b^c in next round
- eor x16,x16,x26,ror#41 // Sigma1(e)
- eor x9,x9,x22,ror#34
- add x21,x21,x17 // h+=Ch(e,f,g)
- and x28,x28,x19 // (b^c)&=(a^b)
- eor x7,x7,x0,ror#61
- eor x8,x8,x3,lsr#7 // sigma0(X[i+1])
- add x21,x21,x16 // h+=Sigma1(e)
- eor x28,x28,x23 // Maj(a,b,c)
- eor x17,x9,x22,ror#39 // Sigma0(a)
- eor x7,x7,x0,lsr#6 // sigma1(X[i+14])
- add x2,x2,x11
- add x25,x25,x21 // d+=h
- add x21,x21,x28 // h+=Maj(a,b,c)
- ldr x28,[x30],#8 // *K++, x19 in next round
- add x2,x2,x8
- add x21,x21,x17 // h+=Sigma0(a)
- add x2,x2,x7
- ldr x7,[sp,#0]
- str x10,[sp,#24]
- ror x16,x25,#14
- add x20,x20,x28 // h+=K[i]
- ror x9,x4,#1
- and x17,x26,x25
- ror x8,x1,#19
- bic x28,x27,x25
- ror x10,x21,#28
- add x20,x20,x2 // h+=X[i]
- eor x16,x16,x25,ror#18
- eor x9,x9,x4,ror#8
- orr x17,x17,x28 // Ch(e,f,g)
- eor x28,x21,x22 // a^b, b^c in next round
- eor x16,x16,x25,ror#41 // Sigma1(e)
- eor x10,x10,x21,ror#34
- add x20,x20,x17 // h+=Ch(e,f,g)
- and x19,x19,x28 // (b^c)&=(a^b)
- eor x8,x8,x1,ror#61
- eor x9,x9,x4,lsr#7 // sigma0(X[i+1])
- add x20,x20,x16 // h+=Sigma1(e)
- eor x19,x19,x22 // Maj(a,b,c)
- eor x17,x10,x21,ror#39 // Sigma0(a)
- eor x8,x8,x1,lsr#6 // sigma1(X[i+14])
- add x3,x3,x12
- add x24,x24,x20 // d+=h
- add x20,x20,x19 // h+=Maj(a,b,c)
- ldr x19,[x30],#8 // *K++, x28 in next round
- add x3,x3,x9
- add x20,x20,x17 // h+=Sigma0(a)
- add x3,x3,x8
- cbnz x19,.Loop_16_xx
-
- ldp x0,x2,[x29,#96]
- ldr x1,[x29,#112]
- sub x30,x30,#648 // rewind
-
- ldp x3,x4,[x0]
- ldp x5,x6,[x0,#2*8]
- add x1,x1,#14*8 // advance input pointer
- ldp x7,x8,[x0,#4*8]
- add x20,x20,x3
- ldp x9,x10,[x0,#6*8]
- add x21,x21,x4
- add x22,x22,x5
- add x23,x23,x6
- stp x20,x21,[x0]
- add x24,x24,x7
- add x25,x25,x8
- stp x22,x23,[x0,#2*8]
- add x26,x26,x9
- add x27,x27,x10
- cmp x1,x2
- stp x24,x25,[x0,#4*8]
- stp x26,x27,[x0,#6*8]
- b.ne .Loop
-
- ldp x19,x20,[x29,#16]
- add sp,sp,#4*8
- ldp x21,x22,[x29,#32]
- ldp x23,x24,[x29,#48]
- ldp x25,x26,[x29,#64]
- ldp x27,x28,[x29,#80]
- ldp x29,x30,[sp],#128
- ret
-.size sha512_block_data_order,.-sha512_block_data_order
-
-.align 6
-.type .LK512,%object
-.LK512:
- .quad 0x428a2f98d728ae22,0x7137449123ef65cd
- .quad 0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc
- .quad 0x3956c25bf348b538,0x59f111f1b605d019
- .quad 0x923f82a4af194f9b,0xab1c5ed5da6d8118
- .quad 0xd807aa98a3030242,0x12835b0145706fbe
- .quad 0x243185be4ee4b28c,0x550c7dc3d5ffb4e2
- .quad 0x72be5d74f27b896f,0x80deb1fe3b1696b1
- .quad 0x9bdc06a725c71235,0xc19bf174cf692694
- .quad 0xe49b69c19ef14ad2,0xefbe4786384f25e3
- .quad 0x0fc19dc68b8cd5b5,0x240ca1cc77ac9c65
- .quad 0x2de92c6f592b0275,0x4a7484aa6ea6e483
- .quad 0x5cb0a9dcbd41fbd4,0x76f988da831153b5
- .quad 0x983e5152ee66dfab,0xa831c66d2db43210
- .quad 0xb00327c898fb213f,0xbf597fc7beef0ee4
- .quad 0xc6e00bf33da88fc2,0xd5a79147930aa725
- .quad 0x06ca6351e003826f,0x142929670a0e6e70
- .quad 0x27b70a8546d22ffc,0x2e1b21385c26c926
- .quad 0x4d2c6dfc5ac42aed,0x53380d139d95b3df
- .quad 0x650a73548baf63de,0x766a0abb3c77b2a8
- .quad 0x81c2c92e47edaee6,0x92722c851482353b
- .quad 0xa2bfe8a14cf10364,0xa81a664bbc423001
- .quad 0xc24b8b70d0f89791,0xc76c51a30654be30
- .quad 0xd192e819d6ef5218,0xd69906245565a910
- .quad 0xf40e35855771202a,0x106aa07032bbd1b8
- .quad 0x19a4c116b8d2d0c8,0x1e376c085141ab53
- .quad 0x2748774cdf8eeb99,0x34b0bcb5e19b48a8
- .quad 0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb
- .quad 0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3
- .quad 0x748f82ee5defb2fc,0x78a5636f43172f60
- .quad 0x84c87814a1f0ab72,0x8cc702081a6439ec
- .quad 0x90befffa23631e28,0xa4506cebde82bde9
- .quad 0xbef9a3f7b2c67915,0xc67178f2e372532b
- .quad 0xca273eceea26619c,0xd186b8c721c0c207
- .quad 0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178
- .quad 0x06f067aa72176fba,0x0a637dc5a2c898a6
- .quad 0x113f9804bef90dae,0x1b710b35131c471b
- .quad 0x28db77f523047d84,0x32caab7b40c72493
- .quad 0x3c9ebe0a15c9bebc,0x431d67c49c100d4c
- .quad 0x4cc5d4becb3e42b6,0x597f299cfc657e2a
- .quad 0x5fcb6fab3ad6faec,0x6c44198c4a475817
- .quad 0 // terminator
-.size .LK512,.-.LK512
-#ifndef __KERNEL__
-.align 3
-.LOPENSSL_armcap_P:
-# ifdef __ILP32__
- .long OPENSSL_armcap_P-.
-# else
- .quad OPENSSL_armcap_P-.
-# endif
-#endif
-.asciz "SHA512 block transform for ARMv8, CRYPTOGAMS by <appro@openssl.org>"
-.align 2
-#ifndef __KERNEL__
-.comm OPENSSL_armcap_P,4,4
-#endif
diff --git a/arch/arm64/include/asm/Kbuild b/arch/arm64/include/asm/Kbuild
index 07ac208edc89..26889dbfe904 100644
--- a/arch/arm64/include/asm/Kbuild
+++ b/arch/arm64/include/asm/Kbuild
@@ -5,3 +5,5 @@ generic-y += qrwlock.h
generic-y += qspinlock.h
generic-y += set_memory.h
generic-y += user.h
+
+generated-y += cpucaps.h
diff --git a/arch/arm64/include/asm/alternative-macros.h b/arch/arm64/include/asm/alternative-macros.h
index 8a078fc662ac..7e157ab6cd50 100644
--- a/arch/arm64/include/asm/alternative-macros.h
+++ b/arch/arm64/include/asm/alternative-macros.h
@@ -3,12 +3,10 @@
#define __ASM_ALTERNATIVE_MACROS_H
#include <asm/cpucaps.h>
+#include <asm/insn-def.h>
#define ARM64_CB_PATCH ARM64_NCAPS
-/* A64 instructions are always 32 bits. */
-#define AARCH64_INSN_SIZE 4
-
#ifndef __ASSEMBLY__
#include <linux/stringify.h>
@@ -197,11 +195,6 @@ alternative_endif
#define _ALTERNATIVE_CFG(insn1, insn2, cap, cfg, ...) \
alternative_insn insn1, insn2, cap, IS_ENABLED(cfg)
-.macro user_alt, label, oldinstr, newinstr, cond
-9999: alternative_insn "\oldinstr", "\newinstr", \cond
- _asm_extable 9999b, \label
-.endm
-
#endif /* __ASSEMBLY__ */
/*
diff --git a/arch/arm64/include/asm/arch_gicv3.h b/arch/arm64/include/asm/arch_gicv3.h
index 934b9be582d2..4ad22c3135db 100644
--- a/arch/arm64/include/asm/arch_gicv3.h
+++ b/arch/arm64/include/asm/arch_gicv3.h
@@ -124,7 +124,8 @@ static inline u32 gic_read_rpr(void)
#define gic_read_lpir(c) readq_relaxed(c)
#define gic_write_lpir(v, c) writeq_relaxed(v, c)
-#define gic_flush_dcache_to_poc(a,l) __flush_dcache_area((a), (l))
+#define gic_flush_dcache_to_poc(a,l) \
+ dcache_clean_inval_poc((unsigned long)(a), (unsigned long)(a)+(l))
#define gits_read_baser(c) readq_relaxed(c)
#define gits_write_baser(v, c) writeq_relaxed(v, c)
diff --git a/arch/arm64/include/asm/asm-prototypes.h b/arch/arm64/include/asm/asm-prototypes.h
index 1c9a3a0c5fa5..ec1d9655f885 100644
--- a/arch/arm64/include/asm/asm-prototypes.h
+++ b/arch/arm64/include/asm/asm-prototypes.h
@@ -23,4 +23,10 @@ long long __ashlti3(long long a, int b);
long long __ashrti3(long long a, int b);
long long __lshrti3(long long a, int b);
+/*
+ * This function uses a custom calling convention and cannot be called from C so
+ * this prototype is not entirely accurate.
+ */
+void __hwasan_tag_mismatch(unsigned long addr, unsigned long access_info);
+
#endif /* __ASM_PROTOTYPES_H */
diff --git a/arch/arm64/include/asm/asm_pointer_auth.h b/arch/arm64/include/asm/asm_pointer_auth.h
index 8ca2dc0661ee..f1bba5fc61c4 100644
--- a/arch/arm64/include/asm/asm_pointer_auth.h
+++ b/arch/arm64/include/asm/asm_pointer_auth.h
@@ -7,19 +7,7 @@
#include <asm/cpufeature.h>
#include <asm/sysreg.h>
-#ifdef CONFIG_ARM64_PTR_AUTH
-/*
- * thread.keys_user.ap* as offset exceeds the #imm offset range
- * so use the base value of ldp as thread.keys_user and offset as
- * thread.keys_user.ap*.
- */
- .macro __ptrauth_keys_install_user tsk, tmp1, tmp2, tmp3
- mov \tmp1, #THREAD_KEYS_USER
- add \tmp1, \tsk, \tmp1
- ldp \tmp2, \tmp3, [\tmp1, #PTRAUTH_USER_KEY_APIA]
- msr_s SYS_APIAKEYLO_EL1, \tmp2
- msr_s SYS_APIAKEYHI_EL1, \tmp3
- .endm
+#ifdef CONFIG_ARM64_PTR_AUTH_KERNEL
.macro __ptrauth_keys_install_kernel_nosync tsk, tmp1, tmp2, tmp3
mov \tmp1, #THREAD_KEYS_KERNEL
@@ -42,6 +30,33 @@ alternative_if ARM64_HAS_ADDRESS_AUTH
alternative_else_nop_endif
.endm
+#else /* CONFIG_ARM64_PTR_AUTH_KERNEL */
+
+ .macro __ptrauth_keys_install_kernel_nosync tsk, tmp1, tmp2, tmp3
+ .endm
+
+ .macro ptrauth_keys_install_kernel_nosync tsk, tmp1, tmp2, tmp3
+ .endm
+
+ .macro ptrauth_keys_install_kernel tsk, tmp1, tmp2, tmp3
+ .endm
+
+#endif /* CONFIG_ARM64_PTR_AUTH_KERNEL */
+
+#ifdef CONFIG_ARM64_PTR_AUTH
+/*
+ * thread.keys_user.ap* as offset exceeds the #imm offset range
+ * so use the base value of ldp as thread.keys_user and offset as
+ * thread.keys_user.ap*.
+ */
+ .macro __ptrauth_keys_install_user tsk, tmp1, tmp2, tmp3
+ mov \tmp1, #THREAD_KEYS_USER
+ add \tmp1, \tsk, \tmp1
+ ldp \tmp2, \tmp3, [\tmp1, #PTRAUTH_USER_KEY_APIA]
+ msr_s SYS_APIAKEYLO_EL1, \tmp2
+ msr_s SYS_APIAKEYHI_EL1, \tmp3
+ .endm
+
.macro __ptrauth_keys_init_cpu tsk, tmp1, tmp2, tmp3
mrs \tmp1, id_aa64isar1_el1
ubfx \tmp1, \tmp1, #ID_AA64ISAR1_APA_SHIFT, #8
@@ -64,17 +79,11 @@ alternative_else_nop_endif
.Lno_addr_auth\@:
.endm
-#else /* CONFIG_ARM64_PTR_AUTH */
+#else /* !CONFIG_ARM64_PTR_AUTH */
.macro ptrauth_keys_install_user tsk, tmp1, tmp2, tmp3
.endm
- .macro ptrauth_keys_install_kernel_nosync tsk, tmp1, tmp2, tmp3
- .endm
-
- .macro ptrauth_keys_install_kernel tsk, tmp1, tmp2, tmp3
- .endm
-
#endif /* CONFIG_ARM64_PTR_AUTH */
#endif /* __ASM_ASM_POINTER_AUTH_H */
diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
index 8418c1bd8f04..89faca0e740d 100644
--- a/arch/arm64/include/asm/assembler.h
+++ b/arch/arm64/include/asm/assembler.h
@@ -130,15 +130,27 @@ alternative_endif
.endm
/*
- * Emit an entry into the exception table
+ * Create an exception table entry for `insn`, which will branch to `fixup`
+ * when an unhandled fault is taken.
*/
- .macro _asm_extable, from, to
+ .macro _asm_extable, insn, fixup
.pushsection __ex_table, "a"
.align 3
- .long (\from - .), (\to - .)
+ .long (\insn - .), (\fixup - .)
.popsection
.endm
+/*
+ * Create an exception table entry for `insn` if `fixup` is provided. Otherwise
+ * do nothing.
+ */
+ .macro _cond_extable, insn, fixup
+ .ifnc \fixup,
+ _asm_extable \insn, \fixup
+ .endif
+ .endm
+
+
#define USER(l, x...) \
9999: x; \
_asm_extable 9999b, l
@@ -232,17 +244,25 @@ lr .req x30 // link register
* @dst: destination register
*/
#if defined(__KVM_NVHE_HYPERVISOR__) || defined(__KVM_VHE_HYPERVISOR__)
- .macro this_cpu_offset, dst
+ .macro get_this_cpu_offset, dst
mrs \dst, tpidr_el2
.endm
#else
- .macro this_cpu_offset, dst
+ .macro get_this_cpu_offset, dst
alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
mrs \dst, tpidr_el1
alternative_else
mrs \dst, tpidr_el2
alternative_endif
.endm
+
+ .macro set_this_cpu_offset, src
+alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
+ msr tpidr_el1, \src
+alternative_else
+ msr tpidr_el2, \src
+alternative_endif
+ .endm
#endif
/*
@@ -253,7 +273,7 @@ alternative_endif
.macro adr_this_cpu, dst, sym, tmp
adrp \tmp, \sym
add \dst, \tmp, #:lo12:\sym
- this_cpu_offset \tmp
+ get_this_cpu_offset \tmp
add \dst, \dst, \tmp
.endm
@@ -264,7 +284,7 @@ alternative_endif
*/
.macro ldr_this_cpu dst, sym, tmp
adr_l \dst, \sym
- this_cpu_offset \tmp
+ get_this_cpu_offset \tmp
ldr \dst, [\dst, \tmp]
.endm
@@ -375,51 +395,53 @@ alternative_cb_end
bfi \tcr, \tmp0, \pos, #3
.endm
+ .macro __dcache_op_workaround_clean_cache, op, addr
+alternative_if_not ARM64_WORKAROUND_CLEAN_CACHE
+ dc \op, \addr
+alternative_else
+ dc civac, \addr
+alternative_endif
+ .endm
+
/*
* Macro to perform a data cache maintenance for the interval
- * [kaddr, kaddr + size)
+ * [start, end)
*
* op: operation passed to dc instruction
* domain: domain used in dsb instruciton
- * kaddr: starting virtual address of the region
- * size: size of the region
- * Corrupts: kaddr, size, tmp1, tmp2
+ * start: starting virtual address of the region
+ * end: end virtual address of the region
+ * fixup: optional label to branch to on user fault
+ * Corrupts: start, end, tmp1, tmp2
*/
- .macro __dcache_op_workaround_clean_cache, op, kaddr
-alternative_if_not ARM64_WORKAROUND_CLEAN_CACHE
- dc \op, \kaddr
-alternative_else
- dc civac, \kaddr
-alternative_endif
- .endm
-
- .macro dcache_by_line_op op, domain, kaddr, size, tmp1, tmp2
+ .macro dcache_by_line_op op, domain, start, end, tmp1, tmp2, fixup
dcache_line_size \tmp1, \tmp2
- add \size, \kaddr, \size
sub \tmp2, \tmp1, #1
- bic \kaddr, \kaddr, \tmp2
-9998:
+ bic \start, \start, \tmp2
+.Ldcache_op\@:
.ifc \op, cvau
- __dcache_op_workaround_clean_cache \op, \kaddr
+ __dcache_op_workaround_clean_cache \op, \start
.else
.ifc \op, cvac
- __dcache_op_workaround_clean_cache \op, \kaddr
+ __dcache_op_workaround_clean_cache \op, \start
.else
.ifc \op, cvap
- sys 3, c7, c12, 1, \kaddr // dc cvap
+ sys 3, c7, c12, 1, \start // dc cvap
.else
.ifc \op, cvadp
- sys 3, c7, c13, 1, \kaddr // dc cvadp
+ sys 3, c7, c13, 1, \start // dc cvadp
.else
- dc \op, \kaddr
+ dc \op, \start
.endif
.endif
.endif
.endif
- add \kaddr, \kaddr, \tmp1
- cmp \kaddr, \size
- b.lo 9998b
+ add \start, \start, \tmp1
+ cmp \start, \end
+ b.lo .Ldcache_op\@
dsb \domain
+
+ _cond_extable .Ldcache_op\@, \fixup
.endm
/*
@@ -427,20 +449,22 @@ alternative_endif
* [start, end)
*
* start, end: virtual addresses describing the region
- * label: A label to branch to on user fault.
+ * fixup: optional label to branch to on user fault
* Corrupts: tmp1, tmp2
*/
- .macro invalidate_icache_by_line start, end, tmp1, tmp2, label
+ .macro invalidate_icache_by_line start, end, tmp1, tmp2, fixup
icache_line_size \tmp1, \tmp2
sub \tmp2, \tmp1, #1
bic \tmp2, \start, \tmp2
-9997:
-USER(\label, ic ivau, \tmp2) // invalidate I line PoU
+.Licache_op\@:
+ ic ivau, \tmp2 // invalidate I line PoU
add \tmp2, \tmp2, \tmp1
cmp \tmp2, \end
- b.lo 9997b
+ b.lo .Licache_op\@
dsb ish
isb
+
+ _cond_extable .Licache_op\@, \fixup
.endm
/*
@@ -745,7 +769,7 @@ USER(\label, ic ivau, \tmp2) // invalidate I line PoU
cbz \tmp, \lbl
#endif
adr_l \tmp, irq_stat + IRQ_CPUSTAT_SOFTIRQ_PENDING
- this_cpu_offset \tmp2
+ get_this_cpu_offset \tmp2
ldr w\tmp, [\tmp, \tmp2]
cbnz w\tmp, \lbl // yield on pending softirq in task context
.Lnoyield_\@:
diff --git a/arch/arm64/include/asm/atomic.h b/arch/arm64/include/asm/atomic.h
index b56a4b2bc248..c9979273d389 100644
--- a/arch/arm64/include/asm/atomic.h
+++ b/arch/arm64/include/asm/atomic.h
@@ -223,6 +223,4 @@ static __always_inline long arch_atomic64_dec_if_positive(atomic64_t *v)
#define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
-#define ARCH_ATOMIC
-
#endif /* __ASM_ATOMIC_H */
diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h
index 2175ec0004ed..451e11e5fd23 100644
--- a/arch/arm64/include/asm/barrier.h
+++ b/arch/arm64/include/asm/barrier.h
@@ -74,7 +74,7 @@ static inline unsigned long array_index_mask_nospec(unsigned long idx,
* This insanity brought to you by speculative system register reads,
* out-of-order memory accesses, sequence locks and Thomas Gleixner.
*
- * http://lists.infradead.org/pipermail/linux-arm-kernel/2019-February/631195.html
+ * https://lore.kernel.org/r/alpine.DEB.2.21.1902081950260.1662@nanos.tec.linutronix.de/
*/
#define arch_counter_enforce_ordering(val) do { \
u64 tmp, _val = (val); \
diff --git a/arch/arm64/include/asm/cache.h b/arch/arm64/include/asm/cache.h
index a074459f8f2f..a9c0716e7440 100644
--- a/arch/arm64/include/asm/cache.h
+++ b/arch/arm64/include/asm/cache.h
@@ -47,7 +47,7 @@
* cache before the transfer is done, causing old data to be seen by
* the CPU.
*/
-#define ARCH_DMA_MINALIGN (128)
+#define ARCH_DMA_MINALIGN L1_CACHE_BYTES
#ifdef CONFIG_KASAN_SW_TAGS
#define ARCH_SLAB_MINALIGN (1ULL << KASAN_SHADOW_SCALE_SHIFT)
diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h
index 52e5c1623224..543c997eb3b7 100644
--- a/arch/arm64/include/asm/cacheflush.h
+++ b/arch/arm64/include/asm/cacheflush.h
@@ -30,45 +30,58 @@
* the implementation assumes non-aliasing VIPT D-cache and (aliasing)
* VIPT I-cache.
*
- * flush_icache_range(start, end)
+ * All functions below apply to the interval [start, end)
+ * - start - virtual start address (inclusive)
+ * - end - virtual end address (exclusive)
*
- * Ensure coherency between the I-cache and the D-cache in the
- * region described by start, end.
- * - start - virtual start address
- * - end - virtual end address
+ * caches_clean_inval_pou(start, end)
*
- * invalidate_icache_range(start, end)
+ * Ensure coherency between the I-cache and the D-cache region to
+ * the Point of Unification.
*
- * Invalidate the I-cache in the region described by start, end.
- * - start - virtual start address
- * - end - virtual end address
+ * caches_clean_inval_user_pou(start, end)
*
- * __flush_cache_user_range(start, end)
+ * Ensure coherency between the I-cache and the D-cache region to
+ * the Point of Unification.
+ * Use only if the region might access user memory.
*
- * Ensure coherency between the I-cache and the D-cache in the
- * region described by start, end.
- * - start - virtual start address
- * - end - virtual end address
+ * icache_inval_pou(start, end)
*
- * __flush_dcache_area(kaddr, size)
+ * Invalidate I-cache region to the Point of Unification.
*
- * Ensure that the data held in page is written back.
- * - kaddr - page address
- * - size - region size
+ * dcache_clean_inval_poc(start, end)
+ *
+ * Clean and invalidate D-cache region to the Point of Coherency.
+ *
+ * dcache_inval_poc(start, end)
+ *
+ * Invalidate D-cache region to the Point of Coherency.
+ *
+ * dcache_clean_poc(start, end)
+ *
+ * Clean D-cache region to the Point of Coherency.
+ *
+ * dcache_clean_pop(start, end)
+ *
+ * Clean D-cache region to the Point of Persistence.
+ *
+ * dcache_clean_pou(start, end)
+ *
+ * Clean D-cache region to the Point of Unification.
*/
-extern void __flush_icache_range(unsigned long start, unsigned long end);
-extern int invalidate_icache_range(unsigned long start, unsigned long end);
-extern void __flush_dcache_area(void *addr, size_t len);
-extern void __inval_dcache_area(void *addr, size_t len);
-extern void __clean_dcache_area_poc(void *addr, size_t len);
-extern void __clean_dcache_area_pop(void *addr, size_t len);
-extern void __clean_dcache_area_pou(void *addr, size_t len);
-extern long __flush_cache_user_range(unsigned long start, unsigned long end);
-extern void sync_icache_aliases(void *kaddr, unsigned long len);
+extern void caches_clean_inval_pou(unsigned long start, unsigned long end);
+extern void icache_inval_pou(unsigned long start, unsigned long end);
+extern void dcache_clean_inval_poc(unsigned long start, unsigned long end);
+extern void dcache_inval_poc(unsigned long start, unsigned long end);
+extern void dcache_clean_poc(unsigned long start, unsigned long end);
+extern void dcache_clean_pop(unsigned long start, unsigned long end);
+extern void dcache_clean_pou(unsigned long start, unsigned long end);
+extern long caches_clean_inval_user_pou(unsigned long start, unsigned long end);
+extern void sync_icache_aliases(unsigned long start, unsigned long end);
static inline void flush_icache_range(unsigned long start, unsigned long end)
{
- __flush_icache_range(start, end);
+ caches_clean_inval_pou(start, end);
/*
* IPI all online CPUs so that they undergo a context synchronization
@@ -122,7 +135,7 @@ extern void copy_to_user_page(struct vm_area_struct *, struct page *,
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
extern void flush_dcache_page(struct page *);
-static __always_inline void __flush_icache_all(void)
+static __always_inline void icache_inval_all_pou(void)
{
if (cpus_have_const_cap(ARM64_HAS_CACHE_DIC))
return;
diff --git a/arch/arm64/include/asm/compiler.h b/arch/arm64/include/asm/compiler.h
index 6fb2e6bcc392..dc3ea4080e2e 100644
--- a/arch/arm64/include/asm/compiler.h
+++ b/arch/arm64/include/asm/compiler.h
@@ -23,4 +23,20 @@
#define __builtin_return_address(val) \
(void *)(ptrauth_clear_pac((unsigned long)__builtin_return_address(val)))
+#ifdef CONFIG_CFI_CLANG
+/*
+ * With CONFIG_CFI_CLANG, the compiler replaces function address
+ * references with the address of the function's CFI jump table
+ * entry. The function_nocfi macro always returns the address of the
+ * actual function instead.
+ */
+#define function_nocfi(x) ({ \
+ void *addr; \
+ asm("adrp %0, " __stringify(x) "\n\t" \
+ "add %0, %0, :lo12:" __stringify(x) \
+ : "=r" (addr)); \
+ addr; \
+})
+#endif
+
#endif /* __ASM_COMPILER_H */
diff --git a/arch/arm64/include/asm/cpu.h b/arch/arm64/include/asm/cpu.h
index 7faae6ff3ab4..0f6d16faa540 100644
--- a/arch/arm64/include/asm/cpu.h
+++ b/arch/arm64/include/asm/cpu.h
@@ -12,26 +12,7 @@
/*
* Records attributes of an individual CPU.
*/
-struct cpuinfo_arm64 {
- struct cpu cpu;
- struct kobject kobj;
- u32 reg_ctr;
- u32 reg_cntfrq;
- u32 reg_dczid;
- u32 reg_midr;
- u32 reg_revidr;
-
- u64 reg_id_aa64dfr0;
- u64 reg_id_aa64dfr1;
- u64 reg_id_aa64isar0;
- u64 reg_id_aa64isar1;
- u64 reg_id_aa64mmfr0;
- u64 reg_id_aa64mmfr1;
- u64 reg_id_aa64mmfr2;
- u64 reg_id_aa64pfr0;
- u64 reg_id_aa64pfr1;
- u64 reg_id_aa64zfr0;
-
+struct cpuinfo_32bit {
u32 reg_id_dfr0;
u32 reg_id_dfr1;
u32 reg_id_isar0;
@@ -54,6 +35,30 @@ struct cpuinfo_arm64 {
u32 reg_mvfr0;
u32 reg_mvfr1;
u32 reg_mvfr2;
+};
+
+struct cpuinfo_arm64 {
+ struct cpu cpu;
+ struct kobject kobj;
+ u64 reg_ctr;
+ u64 reg_cntfrq;
+ u64 reg_dczid;
+ u64 reg_midr;
+ u64 reg_revidr;
+ u64 reg_gmid;
+
+ u64 reg_id_aa64dfr0;
+ u64 reg_id_aa64dfr1;
+ u64 reg_id_aa64isar0;
+ u64 reg_id_aa64isar1;
+ u64 reg_id_aa64mmfr0;
+ u64 reg_id_aa64mmfr1;
+ u64 reg_id_aa64mmfr2;
+ u64 reg_id_aa64pfr0;
+ u64 reg_id_aa64pfr1;
+ u64 reg_id_aa64zfr0;
+
+ struct cpuinfo_32bit aarch32;
/* pseudo-ZCR for recording maximum ZCR_EL1 LEN value: */
u64 reg_zcr;
diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h
deleted file mode 100644
index b0c5eda0498f..000000000000
--- a/arch/arm64/include/asm/cpucaps.h
+++ /dev/null
@@ -1,74 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * arch/arm64/include/asm/cpucaps.h
- *
- * Copyright (C) 2016 ARM Ltd.
- */
-#ifndef __ASM_CPUCAPS_H
-#define __ASM_CPUCAPS_H
-
-#define ARM64_WORKAROUND_CLEAN_CACHE 0
-#define ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE 1
-#define ARM64_WORKAROUND_845719 2
-#define ARM64_HAS_SYSREG_GIC_CPUIF 3
-#define ARM64_HAS_PAN 4
-#define ARM64_HAS_LSE_ATOMICS 5
-#define ARM64_WORKAROUND_CAVIUM_23154 6
-#define ARM64_WORKAROUND_834220 7
-#define ARM64_HAS_NO_HW_PREFETCH 8
-#define ARM64_HAS_VIRT_HOST_EXTN 11
-#define ARM64_WORKAROUND_CAVIUM_27456 12
-#define ARM64_HAS_32BIT_EL0 13
-#define ARM64_SPECTRE_V3A 14
-#define ARM64_HAS_CNP 15
-#define ARM64_HAS_NO_FPSIMD 16
-#define ARM64_WORKAROUND_REPEAT_TLBI 17
-#define ARM64_WORKAROUND_QCOM_FALKOR_E1003 18
-#define ARM64_WORKAROUND_858921 19
-#define ARM64_WORKAROUND_CAVIUM_30115 20
-#define ARM64_HAS_DCPOP 21
-#define ARM64_SVE 22
-#define ARM64_UNMAP_KERNEL_AT_EL0 23
-#define ARM64_SPECTRE_V2 24
-#define ARM64_HAS_RAS_EXTN 25
-#define ARM64_WORKAROUND_843419 26
-#define ARM64_HAS_CACHE_IDC 27
-#define ARM64_HAS_CACHE_DIC 28
-#define ARM64_HW_DBM 29
-#define ARM64_SPECTRE_V4 30
-#define ARM64_MISMATCHED_CACHE_TYPE 31
-#define ARM64_HAS_STAGE2_FWB 32
-#define ARM64_HAS_CRC32 33
-#define ARM64_SSBS 34
-#define ARM64_WORKAROUND_1418040 35
-#define ARM64_HAS_SB 36
-#define ARM64_WORKAROUND_SPECULATIVE_AT 37
-#define ARM64_HAS_ADDRESS_AUTH_ARCH 38
-#define ARM64_HAS_ADDRESS_AUTH_IMP_DEF 39
-#define ARM64_HAS_GENERIC_AUTH_ARCH 40
-#define ARM64_HAS_GENERIC_AUTH_IMP_DEF 41
-#define ARM64_HAS_IRQ_PRIO_MASKING 42
-#define ARM64_HAS_DCPODP 43
-#define ARM64_WORKAROUND_1463225 44
-#define ARM64_WORKAROUND_CAVIUM_TX2_219_TVM 45
-#define ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM 46
-#define ARM64_WORKAROUND_1542419 47
-#define ARM64_HAS_E0PD 48
-#define ARM64_HAS_RNG 49
-#define ARM64_HAS_AMU_EXTN 50
-#define ARM64_HAS_ADDRESS_AUTH 51
-#define ARM64_HAS_GENERIC_AUTH 52
-#define ARM64_HAS_32BIT_EL1 53
-#define ARM64_BTI 54
-#define ARM64_HAS_ARMv8_4_TTL 55
-#define ARM64_HAS_TLB_RANGE 56
-#define ARM64_MTE 57
-#define ARM64_WORKAROUND_1508412 58
-#define ARM64_HAS_LDAPR 59
-#define ARM64_KVM_PROTECTED_MODE 60
-#define ARM64_WORKAROUND_NVIDIA_CARMEL_CNP 61
-#define ARM64_HAS_EPAN 62
-
-#define ARM64_NCAPS 63
-
-#endif /* __ASM_CPUCAPS_H */
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index 338840c00e8e..9bb9d11750d7 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -619,6 +619,13 @@ static inline bool id_aa64pfr0_sve(u64 pfr0)
return val > 0;
}
+static inline bool id_aa64pfr1_mte(u64 pfr1)
+{
+ u32 val = cpuid_feature_extract_unsigned_field(pfr1, ID_AA64PFR1_MTE_SHIFT);
+
+ return val >= ID_AA64PFR1_MTE;
+}
+
void __init setup_cpu_features(void);
void check_local_cpu_capabilities(void);
@@ -630,9 +637,15 @@ static inline bool cpu_supports_mixed_endian_el0(void)
return id_aa64mmfr0_mixed_endian_el0(read_cpuid(ID_AA64MMFR0_EL1));
}
+const struct cpumask *system_32bit_el0_cpumask(void);
+DECLARE_STATIC_KEY_FALSE(arm64_mismatched_32bit_el0);
+
static inline bool system_supports_32bit_el0(void)
{
- return cpus_have_const_cap(ARM64_HAS_32BIT_EL0);
+ u64 pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
+
+ return static_branch_unlikely(&arm64_mismatched_32bit_el0) ||
+ id_aa64pfr0_32bit_el0(pfr0);
}
static inline bool system_supports_4kb_granule(void)
diff --git a/arch/arm64/include/asm/cpuidle.h b/arch/arm64/include/asm/cpuidle.h
index 3c5ddb429ea2..14a19d1141bd 100644
--- a/arch/arm64/include/asm/cpuidle.h
+++ b/arch/arm64/include/asm/cpuidle.h
@@ -18,4 +18,39 @@ static inline int arm_cpuidle_suspend(int index)
return -EOPNOTSUPP;
}
#endif
+
+#ifdef CONFIG_ARM64_PSEUDO_NMI
+#include <asm/arch_gicv3.h>
+
+struct arm_cpuidle_irq_context {
+ unsigned long pmr;
+ unsigned long daif_bits;
+};
+
+#define arm_cpuidle_save_irq_context(__c) \
+ do { \
+ struct arm_cpuidle_irq_context *c = __c; \
+ if (system_uses_irq_prio_masking()) { \
+ c->daif_bits = read_sysreg(daif); \
+ write_sysreg(c->daif_bits | PSR_I_BIT | PSR_F_BIT, \
+ daif); \
+ c->pmr = gic_read_pmr(); \
+ gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET); \
+ } \
+ } while (0)
+
+#define arm_cpuidle_restore_irq_context(__c) \
+ do { \
+ struct arm_cpuidle_irq_context *c = __c; \
+ if (system_uses_irq_prio_masking()) { \
+ gic_write_pmr(c->pmr); \
+ write_sysreg(c->daif_bits, daif); \
+ } \
+ } while (0)
+#else
+struct arm_cpuidle_irq_context { };
+
+#define arm_cpuidle_save_irq_context(c) (void)c
+#define arm_cpuidle_restore_irq_context(c) (void)c
+#endif
#endif
diff --git a/arch/arm64/include/asm/efi.h b/arch/arm64/include/asm/efi.h
index 3578aba9c608..1bed37eb013a 100644
--- a/arch/arm64/include/asm/efi.h
+++ b/arch/arm64/include/asm/efi.h
@@ -137,7 +137,7 @@ void efi_virtmap_unload(void);
static inline void efi_capsule_flush_cache_range(void *addr, int size)
{
- __flush_dcache_area(addr, size);
+ dcache_clean_inval_poc((unsigned long)addr, (unsigned long)addr + size);
}
#endif /* _ASM_EFI_H */
diff --git a/arch/arm64/include/asm/exception.h b/arch/arm64/include/asm/exception.h
index 6546158d2f2d..4afbc45b8bb0 100644
--- a/arch/arm64/include/asm/exception.h
+++ b/arch/arm64/include/asm/exception.h
@@ -31,20 +31,35 @@ static inline u32 disr_to_esr(u64 disr)
return esr;
}
-asmlinkage void el1_sync_handler(struct pt_regs *regs);
-asmlinkage void el0_sync_handler(struct pt_regs *regs);
-asmlinkage void el0_sync_compat_handler(struct pt_regs *regs);
+asmlinkage void handle_bad_stack(struct pt_regs *regs);
-asmlinkage void noinstr enter_el1_irq_or_nmi(struct pt_regs *regs);
-asmlinkage void noinstr exit_el1_irq_or_nmi(struct pt_regs *regs);
+asmlinkage void el1t_64_sync_handler(struct pt_regs *regs);
+asmlinkage void el1t_64_irq_handler(struct pt_regs *regs);
+asmlinkage void el1t_64_fiq_handler(struct pt_regs *regs);
+asmlinkage void el1t_64_error_handler(struct pt_regs *regs);
+
+asmlinkage void el1h_64_sync_handler(struct pt_regs *regs);
+asmlinkage void el1h_64_irq_handler(struct pt_regs *regs);
+asmlinkage void el1h_64_fiq_handler(struct pt_regs *regs);
+asmlinkage void el1h_64_error_handler(struct pt_regs *regs);
+
+asmlinkage void el0t_64_sync_handler(struct pt_regs *regs);
+asmlinkage void el0t_64_irq_handler(struct pt_regs *regs);
+asmlinkage void el0t_64_fiq_handler(struct pt_regs *regs);
+asmlinkage void el0t_64_error_handler(struct pt_regs *regs);
+
+asmlinkage void el0t_32_sync_handler(struct pt_regs *regs);
+asmlinkage void el0t_32_irq_handler(struct pt_regs *regs);
+asmlinkage void el0t_32_fiq_handler(struct pt_regs *regs);
+asmlinkage void el0t_32_error_handler(struct pt_regs *regs);
+
+asmlinkage void call_on_irq_stack(struct pt_regs *regs,
+ void (*func)(struct pt_regs *));
asmlinkage void enter_from_user_mode(void);
asmlinkage void exit_to_user_mode(void);
-void arm64_enter_nmi(struct pt_regs *regs);
-void arm64_exit_nmi(struct pt_regs *regs);
void do_mem_abort(unsigned long far, unsigned int esr, struct pt_regs *regs);
void do_undefinstr(struct pt_regs *regs);
void do_bti(struct pt_regs *regs);
-asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr);
void do_debug_exception(unsigned long addr_if_watchpoint, unsigned int esr,
struct pt_regs *regs);
void do_fpsimd_acc(unsigned int esr, struct pt_regs *regs);
@@ -57,4 +72,7 @@ void do_cp15instr(unsigned int esr, struct pt_regs *regs);
void do_el0_svc(struct pt_regs *regs);
void do_el0_svc_compat(struct pt_regs *regs);
void do_ptrauth_fault(struct pt_regs *regs, unsigned int esr);
+void do_serror(struct pt_regs *regs, unsigned int esr);
+
+void panic_bad_stack(struct pt_regs *regs, unsigned int esr, unsigned long far);
#endif /* __ASM_EXCEPTION_H */
diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h
index 2599504674b5..c072161d5c65 100644
--- a/arch/arm64/include/asm/fpsimd.h
+++ b/arch/arm64/include/asm/fpsimd.h
@@ -69,7 +69,7 @@ static inline void *sve_pffr(struct thread_struct *thread)
extern void sve_save_state(void *state, u32 *pfpsr);
extern void sve_load_state(void const *state, u32 const *pfpsr,
unsigned long vq_minus_1);
-extern void sve_flush_live(void);
+extern void sve_flush_live(unsigned long vq_minus_1);
extern void sve_load_from_fpsimd_state(struct user_fpsimd_state const *state,
unsigned long vq_minus_1);
extern unsigned int sve_get_vl(void);
diff --git a/arch/arm64/include/asm/fpsimdmacros.h b/arch/arm64/include/asm/fpsimdmacros.h
index a2563992d2dc..059204477ce6 100644
--- a/arch/arm64/include/asm/fpsimdmacros.h
+++ b/arch/arm64/include/asm/fpsimdmacros.h
@@ -213,8 +213,10 @@
mov v\nz\().16b, v\nz\().16b
.endm
-.macro sve_flush
+.macro sve_flush_z
_for n, 0, 31, _sve_flush_z \n
+.endm
+.macro sve_flush_p_ffr
_for n, 0, 15, _sve_pfalse \n
_sve_wrffr 0
.endm
diff --git a/arch/arm64/include/asm/hugetlb.h b/arch/arm64/include/asm/hugetlb.h
index 5abf91e3494c..1242f71937f8 100644
--- a/arch/arm64/include/asm/hugetlb.h
+++ b/arch/arm64/include/asm/hugetlb.h
@@ -23,8 +23,7 @@ static inline void arch_clear_hugepage_flags(struct page *page)
}
#define arch_clear_hugepage_flags arch_clear_hugepage_flags
-extern pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
- struct page *page, int writable);
+pte_t arch_make_huge_pte(pte_t entry, unsigned int shift, vm_flags_t flags);
#define arch_make_huge_pte arch_make_huge_pte
#define __HAVE_ARCH_HUGE_SET_HUGE_PTE_AT
extern void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
diff --git a/arch/arm64/include/asm/insn-def.h b/arch/arm64/include/asm/insn-def.h
new file mode 100644
index 000000000000..2c075f615c6a
--- /dev/null
+++ b/arch/arm64/include/asm/insn-def.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __ASM_INSN_DEF_H
+#define __ASM_INSN_DEF_H
+
+/* A64 instructions are always 32 bits. */
+#define AARCH64_INSN_SIZE 4
+
+#endif /* __ASM_INSN_DEF_H */
diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h
index 4ebb9c054ccc..6b776c8667b2 100644
--- a/arch/arm64/include/asm/insn.h
+++ b/arch/arm64/include/asm/insn.h
@@ -10,7 +10,7 @@
#include <linux/build_bug.h>
#include <linux/types.h>
-#include <asm/alternative.h>
+#include <asm/insn-def.h>
#ifndef __ASSEMBLY__
/*
@@ -30,6 +30,7 @@
*/
enum aarch64_insn_encoding_class {
AARCH64_INSN_CLS_UNKNOWN, /* UNALLOCATED */
+ AARCH64_INSN_CLS_SVE, /* SVE instructions */
AARCH64_INSN_CLS_DP_IMM, /* Data processing - immediate */
AARCH64_INSN_CLS_DP_REG, /* Data processing - register */
AARCH64_INSN_CLS_DP_FPSIMD, /* Data processing - SIMD and FP */
@@ -294,6 +295,12 @@ __AARCH64_INSN_FUNCS(adr, 0x9F000000, 0x10000000)
__AARCH64_INSN_FUNCS(adrp, 0x9F000000, 0x90000000)
__AARCH64_INSN_FUNCS(prfm, 0x3FC00000, 0x39800000)
__AARCH64_INSN_FUNCS(prfm_lit, 0xFF000000, 0xD8000000)
+__AARCH64_INSN_FUNCS(store_imm, 0x3FC00000, 0x39000000)
+__AARCH64_INSN_FUNCS(load_imm, 0x3FC00000, 0x39400000)
+__AARCH64_INSN_FUNCS(store_pre, 0x3FE00C00, 0x38000C00)
+__AARCH64_INSN_FUNCS(load_pre, 0x3FE00C00, 0x38400C00)
+__AARCH64_INSN_FUNCS(store_post, 0x3FE00C00, 0x38000400)
+__AARCH64_INSN_FUNCS(load_post, 0x3FE00C00, 0x38400400)
__AARCH64_INSN_FUNCS(str_reg, 0x3FE0EC00, 0x38206800)
__AARCH64_INSN_FUNCS(ldadd, 0x3F20FC00, 0x38200000)
__AARCH64_INSN_FUNCS(ldr_reg, 0x3FE0EC00, 0x38606800)
@@ -302,6 +309,8 @@ __AARCH64_INSN_FUNCS(ldrsw_lit, 0xFF000000, 0x98000000)
__AARCH64_INSN_FUNCS(exclusive, 0x3F800000, 0x08000000)
__AARCH64_INSN_FUNCS(load_ex, 0x3F400000, 0x08400000)
__AARCH64_INSN_FUNCS(store_ex, 0x3F400000, 0x08000000)
+__AARCH64_INSN_FUNCS(stp, 0x7FC00000, 0x29000000)
+__AARCH64_INSN_FUNCS(ldp, 0x7FC00000, 0x29400000)
__AARCH64_INSN_FUNCS(stp_post, 0x7FC00000, 0x28800000)
__AARCH64_INSN_FUNCS(ldp_post, 0x7FC00000, 0x28C00000)
__AARCH64_INSN_FUNCS(stp_pre, 0x7FC00000, 0x29800000)
@@ -334,6 +343,7 @@ __AARCH64_INSN_FUNCS(rev64, 0x7FFFFC00, 0x5AC00C00)
__AARCH64_INSN_FUNCS(and, 0x7F200000, 0x0A000000)
__AARCH64_INSN_FUNCS(bic, 0x7F200000, 0x0A200000)
__AARCH64_INSN_FUNCS(orr, 0x7F200000, 0x2A000000)
+__AARCH64_INSN_FUNCS(mov_reg, 0x7FE0FFE0, 0x2A0003E0)
__AARCH64_INSN_FUNCS(orn, 0x7F200000, 0x2A200000)
__AARCH64_INSN_FUNCS(eor, 0x7F200000, 0x4A000000)
__AARCH64_INSN_FUNCS(eon, 0x7F200000, 0x4A200000)
@@ -368,6 +378,14 @@ __AARCH64_INSN_FUNCS(eret_auth, 0xFFFFFBFF, 0xD69F0BFF)
__AARCH64_INSN_FUNCS(mrs, 0xFFF00000, 0xD5300000)
__AARCH64_INSN_FUNCS(msr_imm, 0xFFF8F01F, 0xD500401F)
__AARCH64_INSN_FUNCS(msr_reg, 0xFFF00000, 0xD5100000)
+__AARCH64_INSN_FUNCS(dmb, 0xFFFFF0FF, 0xD50330BF)
+__AARCH64_INSN_FUNCS(dsb_base, 0xFFFFF0FF, 0xD503309F)
+__AARCH64_INSN_FUNCS(dsb_nxs, 0xFFFFF3FF, 0xD503323F)
+__AARCH64_INSN_FUNCS(isb, 0xFFFFF0FF, 0xD50330DF)
+__AARCH64_INSN_FUNCS(sb, 0xFFFFFFFF, 0xD50330FF)
+__AARCH64_INSN_FUNCS(clrex, 0xFFFFF0FF, 0xD503305F)
+__AARCH64_INSN_FUNCS(ssbb, 0xFFFFFFFF, 0xD503309F)
+__AARCH64_INSN_FUNCS(pssbb, 0xFFFFFFFF, 0xD503349F)
#undef __AARCH64_INSN_FUNCS
@@ -379,8 +397,47 @@ static inline bool aarch64_insn_is_adr_adrp(u32 insn)
return aarch64_insn_is_adr(insn) || aarch64_insn_is_adrp(insn);
}
-int aarch64_insn_read(void *addr, u32 *insnp);
-int aarch64_insn_write(void *addr, u32 insn);
+static inline bool aarch64_insn_is_dsb(u32 insn)
+{
+ return aarch64_insn_is_dsb_base(insn) || aarch64_insn_is_dsb_nxs(insn);
+}
+
+static inline bool aarch64_insn_is_barrier(u32 insn)
+{
+ return aarch64_insn_is_dmb(insn) || aarch64_insn_is_dsb(insn) ||
+ aarch64_insn_is_isb(insn) || aarch64_insn_is_sb(insn) ||
+ aarch64_insn_is_clrex(insn) || aarch64_insn_is_ssbb(insn) ||
+ aarch64_insn_is_pssbb(insn);
+}
+
+static inline bool aarch64_insn_is_store_single(u32 insn)
+{
+ return aarch64_insn_is_store_imm(insn) ||
+ aarch64_insn_is_store_pre(insn) ||
+ aarch64_insn_is_store_post(insn);
+}
+
+static inline bool aarch64_insn_is_store_pair(u32 insn)
+{
+ return aarch64_insn_is_stp(insn) ||
+ aarch64_insn_is_stp_pre(insn) ||
+ aarch64_insn_is_stp_post(insn);
+}
+
+static inline bool aarch64_insn_is_load_single(u32 insn)
+{
+ return aarch64_insn_is_load_imm(insn) ||
+ aarch64_insn_is_load_pre(insn) ||
+ aarch64_insn_is_load_post(insn);
+}
+
+static inline bool aarch64_insn_is_load_pair(u32 insn)
+{
+ return aarch64_insn_is_ldp(insn) ||
+ aarch64_insn_is_ldp_pre(insn) ||
+ aarch64_insn_is_ldp_post(insn);
+}
+
enum aarch64_insn_encoding_class aarch64_get_insn_class(u32 insn);
bool aarch64_insn_uses_literal(u32 insn);
bool aarch64_insn_is_branch(u32 insn);
@@ -487,9 +544,6 @@ u32 aarch64_insn_gen_prefetch(enum aarch64_insn_register base,
s32 aarch64_get_branch_offset(u32 insn);
u32 aarch64_set_branch_offset(u32 insn, s32 offset);
-int aarch64_insn_patch_text_nosync(void *addr, u32 insn);
-int aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt);
-
s32 aarch64_insn_adrp_get_offset(u32 insn);
u32 aarch64_insn_adrp_set_offset(u32 insn, s32 offset);
@@ -506,6 +560,7 @@ u32 aarch32_insn_mcr_extract_crm(u32 insn);
typedef bool (pstate_check_t)(unsigned long);
extern pstate_check_t * const aarch32_opcode_cond_checks[16];
+
#endif /* __ASSEMBLY__ */
#endif /* __ASM_INSN_H */
diff --git a/arch/arm64/include/asm/kernel-pgtable.h b/arch/arm64/include/asm/kernel-pgtable.h
index d44df9d62fc9..3512184cfec1 100644
--- a/arch/arm64/include/asm/kernel-pgtable.h
+++ b/arch/arm64/include/asm/kernel-pgtable.h
@@ -18,9 +18,9 @@
* 64K (section size = 512M).
*/
#ifdef CONFIG_ARM64_4K_PAGES
-#define ARM64_SWAPPER_USES_SECTION_MAPS 1
+#define ARM64_KERNEL_USES_PMD_MAPS 1
#else
-#define ARM64_SWAPPER_USES_SECTION_MAPS 0
+#define ARM64_KERNEL_USES_PMD_MAPS 0
#endif
/*
@@ -33,7 +33,7 @@
* VA range, so pages required to map highest possible PA are reserved in all
* cases.
*/
-#if ARM64_SWAPPER_USES_SECTION_MAPS
+#if ARM64_KERNEL_USES_PMD_MAPS
#define SWAPPER_PGTABLE_LEVELS (CONFIG_PGTABLE_LEVELS - 1)
#define IDMAP_PGTABLE_LEVELS (ARM64_HW_PGTABLE_LEVELS(PHYS_MASK_SHIFT) - 1)
#else
@@ -90,9 +90,9 @@
#define IDMAP_DIR_SIZE (IDMAP_PGTABLE_LEVELS * PAGE_SIZE)
/* Initial memory map size */
-#if ARM64_SWAPPER_USES_SECTION_MAPS
-#define SWAPPER_BLOCK_SHIFT SECTION_SHIFT
-#define SWAPPER_BLOCK_SIZE SECTION_SIZE
+#if ARM64_KERNEL_USES_PMD_MAPS
+#define SWAPPER_BLOCK_SHIFT PMD_SHIFT
+#define SWAPPER_BLOCK_SIZE PMD_SIZE
#define SWAPPER_TABLE_SHIFT PUD_SHIFT
#else
#define SWAPPER_BLOCK_SHIFT PAGE_SHIFT
@@ -100,16 +100,13 @@
#define SWAPPER_TABLE_SHIFT PMD_SHIFT
#endif
-/* The size of the initial kernel direct mapping */
-#define SWAPPER_INIT_MAP_SIZE (_AC(1, UL) << SWAPPER_TABLE_SHIFT)
-
/*
* Initial memory map attributes.
*/
#define SWAPPER_PTE_FLAGS (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED)
#define SWAPPER_PMD_FLAGS (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S)
-#if ARM64_SWAPPER_USES_SECTION_MAPS
+#if ARM64_KERNEL_USES_PMD_MAPS
#define SWAPPER_MM_MMUFLAGS (PMD_ATTRINDX(MT_NORMAL) | SWAPPER_PMD_FLAGS)
#else
#define SWAPPER_MM_MMUFLAGS (PTE_ATTRINDX(MT_NORMAL) | SWAPPER_PTE_FLAGS)
@@ -125,7 +122,7 @@
#if defined(CONFIG_ARM64_4K_PAGES)
#define ARM64_MEMSTART_SHIFT PUD_SHIFT
#elif defined(CONFIG_ARM64_16K_PAGES)
-#define ARM64_MEMSTART_SHIFT (PMD_SHIFT + 5)
+#define ARM64_MEMSTART_SHIFT CONT_PMD_SHIFT
#else
#define ARM64_MEMSTART_SHIFT PMD_SHIFT
#endif
diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
index 692c9049befa..d436831dd706 100644
--- a/arch/arm64/include/asm/kvm_arm.h
+++ b/arch/arm64/include/asm/kvm_arm.h
@@ -12,7 +12,8 @@
#include <asm/types.h>
/* Hyp Configuration Register (HCR) bits */
-#define HCR_ATA (UL(1) << 56)
+#define HCR_ATA_SHIFT 56
+#define HCR_ATA (UL(1) << HCR_ATA_SHIFT)
#define HCR_FWB (UL(1) << 46)
#define HCR_API (UL(1) << 41)
#define HCR_APK (UL(1) << 40)
diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
index cf8df032b9c3..9f0bf2109be7 100644
--- a/arch/arm64/include/asm/kvm_asm.h
+++ b/arch/arm64/include/asm/kvm_asm.h
@@ -8,6 +8,7 @@
#define __ARM_KVM_ASM_H__
#include <asm/hyp_image.h>
+#include <asm/insn.h>
#include <asm/virt.h>
#define ARM_EXIT_WITH_SERROR_BIT 31
@@ -63,6 +64,7 @@
#define __KVM_HOST_SMCCC_FUNC___pkvm_cpu_set_vector 18
#define __KVM_HOST_SMCCC_FUNC___pkvm_prot_finalize 19
#define __KVM_HOST_SMCCC_FUNC___pkvm_mark_hyp 20
+#define __KVM_HOST_SMCCC_FUNC___kvm_adjust_pc 21
#ifndef __ASSEMBLY__
@@ -201,6 +203,8 @@ extern void __kvm_timer_set_cntvoff(u64 cntvoff);
extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
+extern void __kvm_adjust_pc(struct kvm_vcpu *vcpu);
+
extern u64 __vgic_v3_get_gic_config(void);
extern u64 __vgic_v3_read_vmcr(void);
extern void __vgic_v3_write_vmcr(u32 vmcr);
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index f612c090f2e4..fd418955e31e 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -84,6 +84,9 @@ static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
if (cpus_have_const_cap(ARM64_MISMATCHED_CACHE_TYPE) ||
vcpu_el1_is_32bit(vcpu))
vcpu->arch.hcr_el2 |= HCR_TID2;
+
+ if (kvm_has_mte(vcpu->kvm))
+ vcpu->arch.hcr_el2 |= HCR_ATA;
}
static inline unsigned long *vcpu_hcr(struct kvm_vcpu *vcpu)
@@ -463,4 +466,9 @@ static __always_inline void kvm_incr_pc(struct kvm_vcpu *vcpu)
vcpu->arch.flags |= KVM_ARM64_INCREMENT_PC;
}
+static inline bool vcpu_has_feature(struct kvm_vcpu *vcpu, int feature)
+{
+ return test_bit(feature, vcpu->arch.features);
+}
+
#endif /* __ARM64_KVM_EMULATE_H__ */
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 7cd7d5c8c4bc..41911585ae0c 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -46,6 +46,7 @@
#define KVM_REQ_VCPU_RESET KVM_ARCH_REQ(2)
#define KVM_REQ_RECORD_STEAL KVM_ARCH_REQ(3)
#define KVM_REQ_RELOAD_GICv4 KVM_ARCH_REQ(4)
+#define KVM_REQ_RELOAD_PMU KVM_ARCH_REQ(5)
#define KVM_DIRTY_LOG_MANUAL_CAPS (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE | \
KVM_DIRTY_LOG_INITIALLY_SET)
@@ -132,6 +133,9 @@ struct kvm_arch {
u8 pfr0_csv2;
u8 pfr0_csv3;
+
+ /* Memory Tagging Extension enabled for the guest */
+ bool mte_enabled;
};
struct kvm_vcpu_fault_info {
@@ -206,6 +210,12 @@ enum vcpu_sysreg {
CNTP_CVAL_EL0,
CNTP_CTL_EL0,
+ /* Memory Tagging Extension registers */
+ RGSR_EL1, /* Random Allocation Tag Seed Register */
+ GCR_EL1, /* Tag Control Register */
+ TFSR_EL1, /* Tag Fault Status Register (EL1) */
+ TFSRE0_EL1, /* Tag Fault Status Register (EL0) */
+
/* 32bit specific registers. Keep them at the end of the range */
DACR32_EL2, /* Domain Access Control Register */
IFSR32_EL2, /* Instruction Fault Status Register */
@@ -556,16 +566,11 @@ static inline bool __vcpu_write_sys_reg_to_cpu(u64 val, int reg)
}
struct kvm_vm_stat {
- ulong remote_tlb_flush;
+ struct kvm_vm_stat_generic generic;
};
struct kvm_vcpu_stat {
- u64 halt_successful_poll;
- u64 halt_attempted_poll;
- u64 halt_poll_success_ns;
- u64 halt_poll_fail_ns;
- u64 halt_poll_invalid;
- u64 halt_wakeup;
+ struct kvm_vcpu_stat_generic generic;
u64 hvc_exit_stat;
u64 wfe_exit_stat;
u64 wfi_exit_stat;
@@ -721,6 +726,9 @@ int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu,
int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu,
struct kvm_device_attr *attr);
+long kvm_vm_ioctl_mte_copy_tags(struct kvm *kvm,
+ struct kvm_arm_copy_mte_tags *copy_tags);
+
/* Guest/host FPSIMD coordination helpers */
int kvm_arch_vcpu_run_map_fp(struct kvm_vcpu *vcpu);
void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu);
@@ -769,6 +777,7 @@ bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu);
#define kvm_arm_vcpu_sve_finalized(vcpu) \
((vcpu)->arch.flags & KVM_ARM64_VCPU_SVE_FINALIZED)
+#define kvm_has_mte(kvm) (system_supports_mte() && (kvm)->arch.mte_enabled)
#define kvm_vcpu_has_pmu(vcpu) \
(test_bit(KVM_ARM_VCPU_PMU_V3, (vcpu)->arch.features))
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index 25ed956f9af1..b52c5c4b9a3d 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -180,17 +180,16 @@ static inline void *__kvm_vector_slot2addr(void *base,
struct kvm;
-#define kvm_flush_dcache_to_poc(a,l) __flush_dcache_area((a), (l))
+#define kvm_flush_dcache_to_poc(a,l) \
+ dcache_clean_inval_poc((unsigned long)(a), (unsigned long)(a)+(l))
static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
{
return (vcpu_read_sys_reg(vcpu, SCTLR_EL1) & 0b101) == 0b101;
}
-static inline void __clean_dcache_guest_page(kvm_pfn_t pfn, unsigned long size)
+static inline void __clean_dcache_guest_page(void *va, size_t size)
{
- void *va = page_address(pfn_to_page(pfn));
-
/*
* With FWB, we ensure that the guest always accesses memory using
* cacheable attributes, and we don't have to clean to PoC when
@@ -203,18 +202,14 @@ static inline void __clean_dcache_guest_page(kvm_pfn_t pfn, unsigned long size)
kvm_flush_dcache_to_poc(va, size);
}
-static inline void __invalidate_icache_guest_page(kvm_pfn_t pfn,
- unsigned long size)
+static inline void __invalidate_icache_guest_page(void *va, size_t size)
{
if (icache_is_aliasing()) {
/* any kind of VIPT cache */
- __flush_icache_all();
+ icache_inval_all_pou();
} else if (is_kernel_in_hyp_mode() || !icache_is_vpipt()) {
/* PIPT or VPIPT at EL2 (see comment in __kvm_tlb_flush_vmid_ipa) */
- void *va = page_address(pfn_to_page(pfn));
-
- invalidate_icache_range((unsigned long)va,
- (unsigned long)va + size);
+ icache_inval_pou((unsigned long)va, (unsigned long)va + size);
}
}
diff --git a/arch/arm64/include/asm/kvm_mte.h b/arch/arm64/include/asm/kvm_mte.h
new file mode 100644
index 000000000000..de002636eb1f
--- /dev/null
+++ b/arch/arm64/include/asm/kvm_mte.h
@@ -0,0 +1,66 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2021 ARM Ltd.
+ */
+#ifndef __ASM_KVM_MTE_H
+#define __ASM_KVM_MTE_H
+
+#ifdef __ASSEMBLY__
+
+#include <asm/sysreg.h>
+
+#ifdef CONFIG_ARM64_MTE
+
+.macro mte_switch_to_guest g_ctxt, h_ctxt, reg1
+alternative_if_not ARM64_MTE
+ b .L__skip_switch\@
+alternative_else_nop_endif
+ mrs \reg1, hcr_el2
+ tbz \reg1, #(HCR_ATA_SHIFT), .L__skip_switch\@
+
+ mrs_s \reg1, SYS_RGSR_EL1
+ str \reg1, [\h_ctxt, #CPU_RGSR_EL1]
+ mrs_s \reg1, SYS_GCR_EL1
+ str \reg1, [\h_ctxt, #CPU_GCR_EL1]
+
+ ldr \reg1, [\g_ctxt, #CPU_RGSR_EL1]
+ msr_s SYS_RGSR_EL1, \reg1
+ ldr \reg1, [\g_ctxt, #CPU_GCR_EL1]
+ msr_s SYS_GCR_EL1, \reg1
+
+.L__skip_switch\@:
+.endm
+
+.macro mte_switch_to_hyp g_ctxt, h_ctxt, reg1
+alternative_if_not ARM64_MTE
+ b .L__skip_switch\@
+alternative_else_nop_endif
+ mrs \reg1, hcr_el2
+ tbz \reg1, #(HCR_ATA_SHIFT), .L__skip_switch\@
+
+ mrs_s \reg1, SYS_RGSR_EL1
+ str \reg1, [\g_ctxt, #CPU_RGSR_EL1]
+ mrs_s \reg1, SYS_GCR_EL1
+ str \reg1, [\g_ctxt, #CPU_GCR_EL1]
+
+ ldr \reg1, [\h_ctxt, #CPU_RGSR_EL1]
+ msr_s SYS_RGSR_EL1, \reg1
+ ldr \reg1, [\h_ctxt, #CPU_GCR_EL1]
+ msr_s SYS_GCR_EL1, \reg1
+
+ isb
+
+.L__skip_switch\@:
+.endm
+
+#else /* !CONFIG_ARM64_MTE */
+
+.macro mte_switch_to_guest g_ctxt, h_ctxt, reg1
+.endm
+
+.macro mte_switch_to_hyp g_ctxt, h_ctxt, reg1
+.endm
+
+#endif /* CONFIG_ARM64_MTE */
+#endif /* __ASSEMBLY__ */
+#endif /* __ASM_KVM_MTE_H */
diff --git a/arch/arm64/include/asm/kvm_pgtable.h b/arch/arm64/include/asm/kvm_pgtable.h
index c3674c47d48c..f004c0115d89 100644
--- a/arch/arm64/include/asm/kvm_pgtable.h
+++ b/arch/arm64/include/asm/kvm_pgtable.h
@@ -27,23 +27,29 @@ typedef u64 kvm_pte_t;
/**
* struct kvm_pgtable_mm_ops - Memory management callbacks.
- * @zalloc_page: Allocate a single zeroed memory page. The @arg parameter
- * can be used by the walker to pass a memcache. The
- * initial refcount of the page is 1.
- * @zalloc_pages_exact: Allocate an exact number of zeroed memory pages. The
- * @size parameter is in bytes, and is rounded-up to the
- * next page boundary. The resulting allocation is
- * physically contiguous.
- * @free_pages_exact: Free an exact number of memory pages previously
- * allocated by zalloc_pages_exact.
- * @get_page: Increment the refcount on a page.
- * @put_page: Decrement the refcount on a page. When the refcount
- * reaches 0 the page is automatically freed.
- * @page_count: Return the refcount of a page.
- * @phys_to_virt: Convert a physical address into a virtual address mapped
- * in the current context.
- * @virt_to_phys: Convert a virtual address mapped in the current context
- * into a physical address.
+ * @zalloc_page: Allocate a single zeroed memory page.
+ * The @arg parameter can be used by the walker
+ * to pass a memcache. The initial refcount of
+ * the page is 1.
+ * @zalloc_pages_exact: Allocate an exact number of zeroed memory pages.
+ * The @size parameter is in bytes, and is rounded
+ * up to the next page boundary. The resulting
+ * allocation is physically contiguous.
+ * @free_pages_exact: Free an exact number of memory pages previously
+ * allocated by zalloc_pages_exact.
+ * @get_page: Increment the refcount on a page.
+ * @put_page: Decrement the refcount on a page. When the
+ * refcount reaches 0 the page is automatically
+ * freed.
+ * @page_count: Return the refcount of a page.
+ * @phys_to_virt: Convert a physical address into a virtual
+ * address mapped in the current context.
+ * @virt_to_phys: Convert a virtual address mapped in the current
+ * context into a physical address.
+ * @dcache_clean_inval_poc: Clean and invalidate the data cache to the PoC
+ * for the specified memory address range.
+ * @icache_inval_pou: Invalidate the instruction cache to the PoU
+ * for the specified memory address range.
*/
struct kvm_pgtable_mm_ops {
void* (*zalloc_page)(void *arg);
@@ -54,6 +60,8 @@ struct kvm_pgtable_mm_ops {
int (*page_count)(void *addr);
void* (*phys_to_virt)(phys_addr_t phys);
phys_addr_t (*virt_to_phys)(void *addr);
+ void (*dcache_clean_inval_poc)(void *addr, size_t size);
+ void (*icache_inval_pou)(void *addr, size_t size);
};
/**
diff --git a/arch/arm64/include/asm/linkage.h b/arch/arm64/include/asm/linkage.h
index ba89a9af820a..9906541a6861 100644
--- a/arch/arm64/include/asm/linkage.h
+++ b/arch/arm64/include/asm/linkage.h
@@ -56,8 +56,16 @@
SYM_FUNC_START_ALIAS(__pi_##x); \
SYM_FUNC_START_WEAK(x)
+#define SYM_FUNC_START_WEAK_ALIAS_PI(x) \
+ SYM_FUNC_START_ALIAS(__pi_##x); \
+ SYM_START(x, SYM_L_WEAK, SYM_A_ALIGN)
+
#define SYM_FUNC_END_PI(x) \
SYM_FUNC_END(x); \
SYM_FUNC_END_ALIAS(__pi_##x)
+#define SYM_FUNC_END_ALIAS_PI(x) \
+ SYM_FUNC_END_ALIAS(x); \
+ SYM_FUNC_END_ALIAS(__pi_##x)
+
#endif
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index 87b90dc27a43..824a3655dd93 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -135,10 +135,8 @@
#define MT_NORMAL 0
#define MT_NORMAL_TAGGED 1
#define MT_NORMAL_NC 2
-#define MT_NORMAL_WT 3
-#define MT_DEVICE_nGnRnE 4
-#define MT_DEVICE_nGnRE 5
-#define MT_DEVICE_GRE 6
+#define MT_DEVICE_nGnRnE 3
+#define MT_DEVICE_nGnRE 4
/*
* Memory types for Stage-2 translation
@@ -323,22 +321,6 @@ static inline void *phys_to_virt(phys_addr_t x)
#define virt_to_pfn(x) __phys_to_pfn(__virt_to_phys((unsigned long)(x)))
#define sym_to_pfn(x) __phys_to_pfn(__pa_symbol(x))
-#ifdef CONFIG_CFI_CLANG
-/*
- * With CONFIG_CFI_CLANG, the compiler replaces function address
- * references with the address of the function's CFI jump table
- * entry. The function_nocfi macro always returns the address of the
- * actual function instead.
- */
-#define function_nocfi(x) ({ \
- void *addr; \
- asm("adrp %0, " __stringify(x) "\n\t" \
- "add %0, %0, :lo12:" __stringify(x) \
- : "=r" (addr)); \
- addr; \
-})
-#endif
-
/*
* virt_to_page(x) convert a _valid_ virtual address to struct page *
* virt_addr_valid(x) indicates whether a virtual address is valid
@@ -369,7 +351,7 @@ static inline void *phys_to_virt(phys_addr_t x)
#define virt_addr_valid(addr) ({ \
__typeof__(addr) __addr = __tag_reset(addr); \
- __is_lm_address(__addr) && pfn_valid(virt_to_pfn(__addr)); \
+ __is_lm_address(__addr) && pfn_is_map_memory(virt_to_pfn(__addr)); \
})
void dump_mem_limit(void);
diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h
index d3cef9133539..eeb210997149 100644
--- a/arch/arm64/include/asm/mmu_context.h
+++ b/arch/arm64/include/asm/mmu_context.h
@@ -177,9 +177,9 @@ static inline void update_saved_ttbr0(struct task_struct *tsk,
return;
if (mm == &init_mm)
- ttbr = __pa_symbol(reserved_pg_dir);
+ ttbr = phys_to_ttbr(__pa_symbol(reserved_pg_dir));
else
- ttbr = virt_to_phys(mm->pgd) | ASID(mm) << 48;
+ ttbr = phys_to_ttbr(virt_to_phys(mm->pgd)) | ASID(mm) << 48;
WRITE_ONCE(task_thread_info(tsk)->ttbr0, ttbr);
}
diff --git a/arch/arm64/include/asm/module.lds.h b/arch/arm64/include/asm/module.lds.h
index 810045628c66..a11ccadd47d2 100644
--- a/arch/arm64/include/asm/module.lds.h
+++ b/arch/arm64/include/asm/module.lds.h
@@ -1,7 +1,20 @@
-#ifdef CONFIG_ARM64_MODULE_PLTS
SECTIONS {
+#ifdef CONFIG_ARM64_MODULE_PLTS
.plt 0 (NOLOAD) : { BYTE(0) }
.init.plt 0 (NOLOAD) : { BYTE(0) }
.text.ftrace_trampoline 0 (NOLOAD) : { BYTE(0) }
-}
#endif
+
+#ifdef CONFIG_KASAN_SW_TAGS
+ /*
+ * Outlined checks go into comdat-deduplicated sections named .text.hot.
+ * Because they are in comdats they are not combined by the linker and
+ * we otherwise end up with multiple sections with the same .text.hot
+ * name in the .ko file. The kernel module loader warns if it sees
+ * multiple sections with the same name so we use this sections
+ * directive to force them into a single section and silence the
+ * warning.
+ */
+ .text.hot : { *(.text.hot) }
+#endif
+}
diff --git a/arch/arm64/include/asm/mte-def.h b/arch/arm64/include/asm/mte-def.h
index cf241b0f0a42..626d359b396e 100644
--- a/arch/arm64/include/asm/mte-def.h
+++ b/arch/arm64/include/asm/mte-def.h
@@ -7,6 +7,7 @@
#define MTE_GRANULE_SIZE UL(16)
#define MTE_GRANULE_MASK (~(MTE_GRANULE_SIZE - 1))
+#define MTE_GRANULES_PER_PAGE (PAGE_SIZE / MTE_GRANULE_SIZE)
#define MTE_TAG_SHIFT 56
#define MTE_TAG_SIZE 4
#define MTE_TAG_MASK GENMASK((MTE_TAG_SHIFT + (MTE_TAG_SIZE - 1)), MTE_TAG_SHIFT)
diff --git a/arch/arm64/include/asm/mte-kasan.h b/arch/arm64/include/asm/mte-kasan.h
index ddd4d17cf9a0..d952352bd008 100644
--- a/arch/arm64/include/asm/mte-kasan.h
+++ b/arch/arm64/include/asm/mte-kasan.h
@@ -48,43 +48,84 @@ static inline u8 mte_get_random_tag(void)
return mte_get_ptr_tag(addr);
}
+static inline u64 __stg_post(u64 p)
+{
+ asm volatile(__MTE_PREAMBLE "stg %0, [%0], #16"
+ : "+r"(p)
+ :
+ : "memory");
+ return p;
+}
+
+static inline u64 __stzg_post(u64 p)
+{
+ asm volatile(__MTE_PREAMBLE "stzg %0, [%0], #16"
+ : "+r"(p)
+ :
+ : "memory");
+ return p;
+}
+
+static inline void __dc_gva(u64 p)
+{
+ asm volatile(__MTE_PREAMBLE "dc gva, %0" : : "r"(p) : "memory");
+}
+
+static inline void __dc_gzva(u64 p)
+{
+ asm volatile(__MTE_PREAMBLE "dc gzva, %0" : : "r"(p) : "memory");
+}
+
/*
* Assign allocation tags for a region of memory based on the pointer tag.
* Note: The address must be non-NULL and MTE_GRANULE_SIZE aligned and
- * size must be non-zero and MTE_GRANULE_SIZE aligned.
+ * size must be MTE_GRANULE_SIZE aligned.
*/
-static inline void mte_set_mem_tag_range(void *addr, size_t size,
- u8 tag, bool init)
+static inline void mte_set_mem_tag_range(void *addr, size_t size, u8 tag,
+ bool init)
{
- u64 curr, end;
+ u64 curr, mask, dczid_bs, end1, end2, end3;
- if (!size)
- return;
+ /* Read DC G(Z)VA block size from the system register. */
+ dczid_bs = 4ul << (read_cpuid(DCZID_EL0) & 0xf);
curr = (u64)__tag_set(addr, tag);
- end = curr + size;
+ mask = dczid_bs - 1;
+ /* STG/STZG up to the end of the first block. */
+ end1 = curr | mask;
+ end3 = curr + size;
+ /* DC GVA / GZVA in [end1, end2) */
+ end2 = end3 & ~mask;
/*
- * 'asm volatile' is required to prevent the compiler to move
- * the statement outside of the loop.
+ * The following code uses STG on the first DC GVA block even if the
+ * start address is aligned - it appears to be faster than an alignment
+ * check + conditional branch. Also, if the range size is at least 2 DC
+ * GVA blocks, the first two loops can use post-condition to save one
+ * branch each.
*/
- if (init) {
- do {
- asm volatile(__MTE_PREAMBLE "stzg %0, [%0]"
- :
- : "r" (curr)
- : "memory");
- curr += MTE_GRANULE_SIZE;
- } while (curr != end);
- } else {
- do {
- asm volatile(__MTE_PREAMBLE "stg %0, [%0]"
- :
- : "r" (curr)
- : "memory");
- curr += MTE_GRANULE_SIZE;
- } while (curr != end);
- }
+#define SET_MEMTAG_RANGE(stg_post, dc_gva) \
+ do { \
+ if (size >= 2 * dczid_bs) { \
+ do { \
+ curr = stg_post(curr); \
+ } while (curr < end1); \
+ \
+ do { \
+ dc_gva(curr); \
+ curr += dczid_bs; \
+ } while (curr < end2); \
+ } \
+ \
+ while (curr < end3) \
+ curr = stg_post(curr); \
+ } while (0)
+
+ if (init)
+ SET_MEMTAG_RANGE(__stzg_post, __dc_gzva);
+ else
+ SET_MEMTAG_RANGE(__stg_post, __dc_gva);
+#undef SET_MEMTAG_RANGE
}
void mte_enable_kernel_sync(void);
diff --git a/arch/arm64/include/asm/mte.h b/arch/arm64/include/asm/mte.h
index bc88a1ced0d7..58c7f80f5596 100644
--- a/arch/arm64/include/asm/mte.h
+++ b/arch/arm64/include/asm/mte.h
@@ -37,7 +37,8 @@ void mte_free_tag_storage(char *storage);
/* track which pages have valid allocation tags */
#define PG_mte_tagged PG_arch_2
-void mte_sync_tags(pte_t *ptep, pte_t pte);
+void mte_zero_clear_page_tags(void *addr);
+void mte_sync_tags(pte_t old_pte, pte_t pte);
void mte_copy_page_tags(void *kto, const void *kfrom);
void mte_thread_init_user(void);
void mte_thread_switch(struct task_struct *next);
@@ -53,7 +54,10 @@ int mte_ptrace_copy_tags(struct task_struct *child, long request,
/* unused if !CONFIG_ARM64_MTE, silence the compiler */
#define PG_mte_tagged 0
-static inline void mte_sync_tags(pte_t *ptep, pte_t pte)
+static inline void mte_zero_clear_page_tags(void *addr)
+{
+}
+static inline void mte_sync_tags(pte_t old_pte, pte_t pte)
{
}
static inline void mte_copy_page_tags(void *kto, const void *kfrom)
diff --git a/arch/arm64/include/asm/page.h b/arch/arm64/include/asm/page.h
index 012cffc574e8..993a27ea6f54 100644
--- a/arch/arm64/include/asm/page.h
+++ b/arch/arm64/include/asm/page.h
@@ -13,6 +13,7 @@
#ifndef __ASSEMBLY__
#include <linux/personality.h> /* for READ_IMPLIES_EXEC */
+#include <linux/types.h> /* for gfp_t */
#include <asm/pgtable-types.h>
struct page;
@@ -28,16 +29,19 @@ void copy_user_highpage(struct page *to, struct page *from,
void copy_highpage(struct page *to, struct page *from);
#define __HAVE_ARCH_COPY_HIGHPAGE
-#define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \
- alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr)
-#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
+struct page *alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma,
+ unsigned long vaddr);
+#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE_MOVABLE
+
+void tag_clear_highpage(struct page *to);
+#define __HAVE_ARCH_TAG_CLEAR_HIGHPAGE
#define clear_user_page(page, vaddr, pg) clear_page(page)
#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
typedef struct page *pgtable_t;
-extern int pfn_valid(unsigned long);
+int pfn_is_map_memory(unsigned long pfn);
#include <asm/memory.h>
diff --git a/arch/arm64/include/asm/patching.h b/arch/arm64/include/asm/patching.h
new file mode 100644
index 000000000000..6bf5adc56295
--- /dev/null
+++ b/arch/arm64/include/asm/patching.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef __ASM_PATCHING_H
+#define __ASM_PATCHING_H
+
+#include <linux/types.h>
+
+int aarch64_insn_read(void *addr, u32 *insnp);
+int aarch64_insn_write(void *addr, u32 insn);
+
+int aarch64_insn_patch_text_nosync(void *addr, u32 insn);
+int aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt);
+
+#endif /* __ASM_PATCHING_H */
diff --git a/arch/arm64/include/asm/perf_event.h b/arch/arm64/include/asm/perf_event.h
index 60731f602d3e..4ef6f19331f9 100644
--- a/arch/arm64/include/asm/perf_event.h
+++ b/arch/arm64/include/asm/perf_event.h
@@ -239,6 +239,11 @@
/* PMMIR_EL1.SLOTS mask */
#define ARMV8_PMU_SLOTS_MASK 0xff
+#define ARMV8_PMU_BUS_SLOTS_SHIFT 8
+#define ARMV8_PMU_BUS_SLOTS_MASK 0xff
+#define ARMV8_PMU_BUS_WIDTH_SHIFT 16
+#define ARMV8_PMU_BUS_WIDTH_MASK 0xf
+
#ifdef CONFIG_PERF_EVENTS
struct pt_regs;
extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
diff --git a/arch/arm64/include/asm/pgalloc.h b/arch/arm64/include/asm/pgalloc.h
index 31fbab3d6f99..8433a2058eb1 100644
--- a/arch/arm64/include/asm/pgalloc.h
+++ b/arch/arm64/include/asm/pgalloc.h
@@ -86,6 +86,5 @@ pmd_populate(struct mm_struct *mm, pmd_t *pmdp, pgtable_t ptep)
VM_BUG_ON(mm == &init_mm);
__pmd_populate(pmdp, page_to_phys(ptep), PMD_TYPE_TABLE | PMD_TABLE_PXN);
}
-#define pmd_pgtable(pmd) pmd_page(pmd)
#endif
diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h
index b82575a33f8b..40085e53f573 100644
--- a/arch/arm64/include/asm/pgtable-hwdef.h
+++ b/arch/arm64/include/asm/pgtable-hwdef.h
@@ -72,13 +72,6 @@
#define PTRS_PER_PGD (1 << (VA_BITS - PGDIR_SHIFT))
/*
- * Section address mask and size definitions.
- */
-#define SECTION_SHIFT PMD_SHIFT
-#define SECTION_SIZE (_AC(1, UL) << SECTION_SHIFT)
-#define SECTION_MASK (~(SECTION_SIZE-1))
-
-/*
* Contiguous page definitions.
*/
#define CONT_PTE_SHIFT (CONFIG_ARM64_CONT_PTE_SHIFT + PAGE_SHIFT)
diff --git a/arch/arm64/include/asm/pgtable-prot.h b/arch/arm64/include/asm/pgtable-prot.h
index 938092df76cf..7032f04c8ac6 100644
--- a/arch/arm64/include/asm/pgtable-prot.h
+++ b/arch/arm64/include/asm/pgtable-prot.h
@@ -55,7 +55,6 @@ extern bool arm64_use_ng_mappings;
#define PROT_DEVICE_nGnRnE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRnE))
#define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRE))
#define PROT_NORMAL_NC (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL_NC))
-#define PROT_NORMAL_WT (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL_WT))
#define PROT_NORMAL (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL))
#define PROT_NORMAL_TAGGED (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL_TAGGED))
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 0b10204e72fc..508c7ffad515 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -26,8 +26,6 @@
#define vmemmap ((struct page *)VMEMMAP_START - (memstart_addr >> PAGE_SHIFT))
-#define FIRST_USER_ADDRESS 0UL
-
#ifndef __ASSEMBLY__
#include <asm/cmpxchg.h>
@@ -314,9 +312,25 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
if (pte_present(pte) && pte_user_exec(pte) && !pte_special(pte))
__sync_icache_dcache(pte);
- if (system_supports_mte() &&
- pte_present(pte) && pte_tagged(pte) && !pte_special(pte))
- mte_sync_tags(ptep, pte);
+ /*
+ * If the PTE would provide user space access to the tags associated
+ * with it then ensure that the MTE tags are synchronised. Although
+ * pte_access_permitted() returns false for exec only mappings, they
+ * don't expose tags (instruction fetches don't check tags).
+ */
+ if (system_supports_mte() && pte_access_permitted(pte, false) &&
+ !pte_special(pte)) {
+ pte_t old_pte = READ_ONCE(*ptep);
+ /*
+ * We only need to synchronise if the new PTE has tags enabled
+ * or if swapping in (in which case another mapping may have
+ * set tags in the past even if this PTE isn't tagged).
+ * (!pte_none() && !pte_present()) is an open coded version of
+ * is_swap_pte()
+ */
+ if (pte_tagged(pte) || (!pte_none(old_pte) && !pte_present(old_pte)))
+ mte_sync_tags(old_pte, pte);
+ }
__check_racy_pte_update(mm, ptep, pte);
@@ -511,13 +525,12 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
#define pmd_none(pmd) (!pmd_val(pmd))
-#define pmd_bad(pmd) (!(pmd_val(pmd) & PMD_TABLE_BIT))
-
#define pmd_table(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \
PMD_TYPE_TABLE)
#define pmd_sect(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \
PMD_TYPE_SECT)
#define pmd_leaf(pmd) pmd_sect(pmd)
+#define pmd_bad(pmd) (!pmd_table(pmd))
#define pmd_leaf_size(pmd) (pmd_cont(pmd) ? CONT_PMD_SIZE : PMD_SIZE)
#define pte_leaf_size(pte) (pte_cont(pte) ? CONT_PTE_SIZE : PAGE_SIZE)
@@ -604,7 +617,7 @@ static inline unsigned long pmd_page_vaddr(pmd_t pmd)
pr_err("%s:%d: bad pmd %016llx.\n", __FILE__, __LINE__, pmd_val(e))
#define pud_none(pud) (!pud_val(pud))
-#define pud_bad(pud) (!(pud_val(pud) & PUD_TABLE_BIT))
+#define pud_bad(pud) (!pud_table(pud))
#define pud_present(pud) pte_present(pud_pte(pud))
#define pud_leaf(pud) pud_sect(pud)
#define pud_valid(pud) pte_valid(pud_pte(pud))
diff --git a/arch/arm64/include/asm/pointer_auth.h b/arch/arm64/include/asm/pointer_auth.h
index d50416be99be..28a78b67d9b4 100644
--- a/arch/arm64/include/asm/pointer_auth.h
+++ b/arch/arm64/include/asm/pointer_auth.h
@@ -31,10 +31,6 @@ struct ptrauth_keys_user {
struct ptrauth_key apga;
};
-struct ptrauth_keys_kernel {
- struct ptrauth_key apia;
-};
-
#define __ptrauth_key_install_nosync(k, v) \
do { \
struct ptrauth_key __pki_v = (v); \
@@ -42,6 +38,29 @@ do { \
write_sysreg_s(__pki_v.hi, SYS_ ## k ## KEYHI_EL1); \
} while (0)
+#ifdef CONFIG_ARM64_PTR_AUTH_KERNEL
+
+struct ptrauth_keys_kernel {
+ struct ptrauth_key apia;
+};
+
+static __always_inline void ptrauth_keys_init_kernel(struct ptrauth_keys_kernel *keys)
+{
+ if (system_supports_address_auth())
+ get_random_bytes(&keys->apia, sizeof(keys->apia));
+}
+
+static __always_inline void ptrauth_keys_switch_kernel(struct ptrauth_keys_kernel *keys)
+{
+ if (!system_supports_address_auth())
+ return;
+
+ __ptrauth_key_install_nosync(APIA, keys->apia);
+ isb();
+}
+
+#endif /* CONFIG_ARM64_PTR_AUTH_KERNEL */
+
static inline void ptrauth_keys_install_user(struct ptrauth_keys_user *keys)
{
if (system_supports_address_auth()) {
@@ -69,21 +88,6 @@ static inline void ptrauth_keys_init_user(struct ptrauth_keys_user *keys)
ptrauth_keys_install_user(keys);
}
-static __always_inline void ptrauth_keys_init_kernel(struct ptrauth_keys_kernel *keys)
-{
- if (system_supports_address_auth())
- get_random_bytes(&keys->apia, sizeof(keys->apia));
-}
-
-static __always_inline void ptrauth_keys_switch_kernel(struct ptrauth_keys_kernel *keys)
-{
- if (!system_supports_address_auth())
- return;
-
- __ptrauth_key_install_nosync(APIA, keys->apia);
- isb();
-}
-
extern int ptrauth_prctl_reset_keys(struct task_struct *tsk, unsigned long arg);
extern int ptrauth_set_enabled_keys(struct task_struct *tsk, unsigned long keys,
@@ -121,11 +125,6 @@ static __always_inline void ptrauth_enable(void)
#define ptrauth_thread_switch_user(tsk) \
ptrauth_keys_install_user(&(tsk)->thread.keys_user)
-#define ptrauth_thread_init_kernel(tsk) \
- ptrauth_keys_init_kernel(&(tsk)->thread.keys_kernel)
-#define ptrauth_thread_switch_kernel(tsk) \
- ptrauth_keys_switch_kernel(&(tsk)->thread.keys_kernel)
-
#else /* CONFIG_ARM64_PTR_AUTH */
#define ptrauth_enable()
#define ptrauth_prctl_reset_keys(tsk, arg) (-EINVAL)
@@ -134,11 +133,19 @@ static __always_inline void ptrauth_enable(void)
#define ptrauth_strip_insn_pac(lr) (lr)
#define ptrauth_suspend_exit()
#define ptrauth_thread_init_user()
-#define ptrauth_thread_init_kernel(tsk)
#define ptrauth_thread_switch_user(tsk)
-#define ptrauth_thread_switch_kernel(tsk)
#endif /* CONFIG_ARM64_PTR_AUTH */
+#ifdef CONFIG_ARM64_PTR_AUTH_KERNEL
+#define ptrauth_thread_init_kernel(tsk) \
+ ptrauth_keys_init_kernel(&(tsk)->thread.keys_kernel)
+#define ptrauth_thread_switch_kernel(tsk) \
+ ptrauth_keys_switch_kernel(&(tsk)->thread.keys_kernel)
+#else
+#define ptrauth_thread_init_kernel(tsk)
+#define ptrauth_thread_switch_kernel(tsk)
+#endif /* CONFIG_ARM64_PTR_AUTH_KERNEL */
+
#define PR_PAC_ENABLED_KEYS_MASK \
(PR_PAC_APIAKEY | PR_PAC_APIBKEY | PR_PAC_APDAKEY | PR_PAC_APDBKEY)
diff --git a/arch/arm64/include/asm/preempt.h b/arch/arm64/include/asm/preempt.h
index 80e946b2abee..e83f0982b99c 100644
--- a/arch/arm64/include/asm/preempt.h
+++ b/arch/arm64/include/asm/preempt.h
@@ -23,7 +23,7 @@ static inline void preempt_count_set(u64 pc)
} while (0)
#define init_idle_preempt_count(p, cpu) do { \
- task_thread_info(p)->preempt_count = PREEMPT_ENABLED; \
+ task_thread_info(p)->preempt_count = PREEMPT_DISABLED; \
} while (0)
static inline void set_preempt_need_resched(void)
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
index 9df3feeee890..b6517fd03d7b 100644
--- a/arch/arm64/include/asm/processor.h
+++ b/arch/arm64/include/asm/processor.h
@@ -148,8 +148,10 @@ struct thread_struct {
struct debug_info debug; /* debugging */
#ifdef CONFIG_ARM64_PTR_AUTH
struct ptrauth_keys_user keys_user;
+#ifdef CONFIG_ARM64_PTR_AUTH_KERNEL
struct ptrauth_keys_kernel keys_kernel;
#endif
+#endif
#ifdef CONFIG_ARM64_MTE
u64 gcr_user_excl;
#endif
@@ -257,8 +259,6 @@ void set_task_sctlr_el1(u64 sctlr);
extern struct task_struct *cpu_switch_to(struct task_struct *prev,
struct task_struct *next);
-asmlinkage void arm64_preempt_schedule_irq(void);
-
#define task_pt_regs(p) \
((struct pt_regs *)(THREAD_SIZE + task_stack_page(p)) - 1)
@@ -329,13 +329,13 @@ long get_tagged_addr_ctrl(struct task_struct *task);
* of header definitions for the use of task_stack_page.
*/
-#define current_top_of_stack() \
-({ \
- struct stack_info _info; \
- BUG_ON(!on_accessible_stack(current, current_stack_pointer, &_info)); \
- _info.high; \
+#define current_top_of_stack() \
+({ \
+ struct stack_info _info; \
+ BUG_ON(!on_accessible_stack(current, current_stack_pointer, 1, &_info)); \
+ _info.high; \
})
-#define on_thread_stack() (on_task_stack(current, current_stack_pointer, NULL))
+#define on_thread_stack() (on_task_stack(current, current_stack_pointer, 1, NULL))
#endif /* __ASSEMBLY__ */
#endif /* __ASM_PROCESSOR_H */
diff --git a/arch/arm64/include/asm/scs.h b/arch/arm64/include/asm/scs.h
index eaa2cd92e4c1..8297bccf0784 100644
--- a/arch/arm64/include/asm/scs.h
+++ b/arch/arm64/include/asm/scs.h
@@ -9,18 +9,18 @@
#ifdef CONFIG_SHADOW_CALL_STACK
scs_sp .req x18
- .macro scs_load tsk, tmp
+ .macro scs_load tsk
ldr scs_sp, [\tsk, #TSK_TI_SCS_SP]
.endm
- .macro scs_save tsk, tmp
+ .macro scs_save tsk
str scs_sp, [\tsk, #TSK_TI_SCS_SP]
.endm
#else
- .macro scs_load tsk, tmp
+ .macro scs_load tsk
.endm
- .macro scs_save tsk, tmp
+ .macro scs_save tsk
.endm
#endif /* CONFIG_SHADOW_CALL_STACK */
diff --git a/arch/arm64/include/asm/sdei.h b/arch/arm64/include/asm/sdei.h
index 63e0b92a5fbb..7bea1d705dd6 100644
--- a/arch/arm64/include/asm/sdei.h
+++ b/arch/arm64/include/asm/sdei.h
@@ -37,13 +37,17 @@ struct sdei_registered_event;
asmlinkage unsigned long __sdei_handler(struct pt_regs *regs,
struct sdei_registered_event *arg);
+unsigned long do_sdei_event(struct pt_regs *regs,
+ struct sdei_registered_event *arg);
+
unsigned long sdei_arch_get_entry_point(int conduit);
#define sdei_arch_get_entry_point(x) sdei_arch_get_entry_point(x)
struct stack_info;
-bool _on_sdei_stack(unsigned long sp, struct stack_info *info);
-static inline bool on_sdei_stack(unsigned long sp,
+bool _on_sdei_stack(unsigned long sp, unsigned long size,
+ struct stack_info *info);
+static inline bool on_sdei_stack(unsigned long sp, unsigned long size,
struct stack_info *info)
{
if (!IS_ENABLED(CONFIG_VMAP_STACK))
@@ -51,7 +55,7 @@ static inline bool on_sdei_stack(unsigned long sp,
if (!IS_ENABLED(CONFIG_ARM_SDE_INTERFACE))
return false;
if (in_nmi())
- return _on_sdei_stack(sp, info);
+ return _on_sdei_stack(sp, size, info);
return false;
}
diff --git a/arch/arm64/include/asm/smp.h b/arch/arm64/include/asm/smp.h
index 0e357757c0cc..fc55f5a57a06 100644
--- a/arch/arm64/include/asm/smp.h
+++ b/arch/arm64/include/asm/smp.h
@@ -73,12 +73,10 @@ asmlinkage void secondary_start_kernel(void);
/*
* Initial data for bringing up a secondary CPU.
- * @stack - sp for the secondary CPU
* @status - Result passed back from the secondary CPU to
* indicate failure.
*/
struct secondary_data {
- void *stack;
struct task_struct *task;
long status;
};
diff --git a/arch/arm64/include/asm/stacktrace.h b/arch/arm64/include/asm/stacktrace.h
index 4b33ca620679..1801399204d7 100644
--- a/arch/arm64/include/asm/stacktrace.h
+++ b/arch/arm64/include/asm/stacktrace.h
@@ -69,14 +69,14 @@ extern void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk,
DECLARE_PER_CPU(unsigned long *, irq_stack_ptr);
-static inline bool on_stack(unsigned long sp, unsigned long low,
- unsigned long high, enum stack_type type,
- struct stack_info *info)
+static inline bool on_stack(unsigned long sp, unsigned long size,
+ unsigned long low, unsigned long high,
+ enum stack_type type, struct stack_info *info)
{
if (!low)
return false;
- if (sp < low || sp >= high)
+ if (sp < low || sp + size < sp || sp + size > high)
return false;
if (info) {
@@ -87,38 +87,38 @@ static inline bool on_stack(unsigned long sp, unsigned long low,
return true;
}
-static inline bool on_irq_stack(unsigned long sp,
+static inline bool on_irq_stack(unsigned long sp, unsigned long size,
struct stack_info *info)
{
unsigned long low = (unsigned long)raw_cpu_read(irq_stack_ptr);
unsigned long high = low + IRQ_STACK_SIZE;
- return on_stack(sp, low, high, STACK_TYPE_IRQ, info);
+ return on_stack(sp, size, low, high, STACK_TYPE_IRQ, info);
}
static inline bool on_task_stack(const struct task_struct *tsk,
- unsigned long sp,
+ unsigned long sp, unsigned long size,
struct stack_info *info)
{
unsigned long low = (unsigned long)task_stack_page(tsk);
unsigned long high = low + THREAD_SIZE;
- return on_stack(sp, low, high, STACK_TYPE_TASK, info);
+ return on_stack(sp, size, low, high, STACK_TYPE_TASK, info);
}
#ifdef CONFIG_VMAP_STACK
DECLARE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack);
-static inline bool on_overflow_stack(unsigned long sp,
+static inline bool on_overflow_stack(unsigned long sp, unsigned long size,
struct stack_info *info)
{
unsigned long low = (unsigned long)raw_cpu_ptr(overflow_stack);
unsigned long high = low + OVERFLOW_STACK_SIZE;
- return on_stack(sp, low, high, STACK_TYPE_OVERFLOW, info);
+ return on_stack(sp, size, low, high, STACK_TYPE_OVERFLOW, info);
}
#else
-static inline bool on_overflow_stack(unsigned long sp,
+static inline bool on_overflow_stack(unsigned long sp, unsigned long size,
struct stack_info *info) { return false; }
#endif
@@ -128,21 +128,21 @@ static inline bool on_overflow_stack(unsigned long sp,
* context.
*/
static inline bool on_accessible_stack(const struct task_struct *tsk,
- unsigned long sp,
+ unsigned long sp, unsigned long size,
struct stack_info *info)
{
if (info)
info->type = STACK_TYPE_UNKNOWN;
- if (on_task_stack(tsk, sp, info))
+ if (on_task_stack(tsk, sp, size, info))
return true;
if (tsk != current || preemptible())
return false;
- if (on_irq_stack(sp, info))
+ if (on_irq_stack(sp, size, info))
return true;
- if (on_overflow_stack(sp, info))
+ if (on_overflow_stack(sp, size, info))
return true;
- if (on_sdei_stack(sp, info))
+ if (on_sdei_stack(sp, size, info))
return true;
return false;
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index 65d15700a168..7b9c3acba684 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -651,7 +651,8 @@
#define INIT_SCTLR_EL2_MMU_ON \
(SCTLR_ELx_M | SCTLR_ELx_C | SCTLR_ELx_SA | SCTLR_ELx_I | \
- SCTLR_ELx_IESB | SCTLR_ELx_WXN | ENDIAN_SET_EL2 | SCTLR_EL2_RES1)
+ SCTLR_ELx_IESB | SCTLR_ELx_WXN | ENDIAN_SET_EL2 | \
+ SCTLR_ELx_ITFSB | SCTLR_EL2_RES1)
#define INIT_SCTLR_EL2_MMU_OFF \
(SCTLR_EL2_RES1 | ENDIAN_SET_EL2)
@@ -703,9 +704,7 @@
/* MAIR_ELx memory attributes (used by Linux) */
#define MAIR_ATTR_DEVICE_nGnRnE UL(0x00)
#define MAIR_ATTR_DEVICE_nGnRE UL(0x04)
-#define MAIR_ATTR_DEVICE_GRE UL(0x0c)
#define MAIR_ATTR_NORMAL_NC UL(0x44)
-#define MAIR_ATTR_NORMAL_WT UL(0xbb)
#define MAIR_ATTR_NORMAL_TAGGED UL(0xf0)
#define MAIR_ATTR_NORMAL UL(0xff)
#define MAIR_ATTR_MASK UL(0xff)
diff --git a/arch/arm64/include/asm/tlb.h b/arch/arm64/include/asm/tlb.h
index 61c97d3b58c7..c995d1f4594f 100644
--- a/arch/arm64/include/asm/tlb.h
+++ b/arch/arm64/include/asm/tlb.h
@@ -28,6 +28,10 @@ static void tlb_flush(struct mmu_gather *tlb);
*/
static inline int tlb_get_level(struct mmu_gather *tlb)
{
+ /* The TTL field is only valid for the leaf entry. */
+ if (tlb->freed_tables)
+ return 0;
+
if (tlb->cleared_ptes && !(tlb->cleared_pmds ||
tlb->cleared_puds ||
tlb->cleared_p4ds))
diff --git a/arch/arm64/include/asm/unistd32.h b/arch/arm64/include/asm/unistd32.h
index 7859749d6628..99ffcafc736c 100644
--- a/arch/arm64/include/asm/unistd32.h
+++ b/arch/arm64/include/asm/unistd32.h
@@ -893,8 +893,8 @@ __SYSCALL(__NR_process_madvise, sys_process_madvise)
__SYSCALL(__NR_epoll_pwait2, compat_sys_epoll_pwait2)
#define __NR_mount_setattr 442
__SYSCALL(__NR_mount_setattr, sys_mount_setattr)
-#define __NR_quotactl_path 443
-__SYSCALL(__NR_quotactl_path, sys_quotactl_path)
+#define __NR_quotactl_fd 443
+__SYSCALL(__NR_quotactl_fd, sys_quotactl_fd)
#define __NR_landlock_create_ruleset 444
__SYSCALL(__NR_landlock_create_ruleset, sys_landlock_create_ruleset)
#define __NR_landlock_add_rule 445
diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h
index 24223adae150..b3edde68bc3e 100644
--- a/arch/arm64/include/uapi/asm/kvm.h
+++ b/arch/arm64/include/uapi/asm/kvm.h
@@ -184,6 +184,17 @@ struct kvm_vcpu_events {
__u32 reserved[12];
};
+struct kvm_arm_copy_mte_tags {
+ __u64 guest_ipa;
+ __u64 length;
+ void __user *addr;
+ __u64 flags;
+ __u64 reserved[2];
+};
+
+#define KVM_ARM_TAGS_TO_GUEST 0
+#define KVM_ARM_TAGS_FROM_GUEST 1
+
/* If you need to interpret the index values, here is the key: */
#define KVM_REG_ARM_COPROC_MASK 0x000000000FFF0000
#define KVM_REG_ARM_COPROC_SHIFT 16
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile
index 6cc97730790e..cce308586fcc 100644
--- a/arch/arm64/kernel/Makefile
+++ b/arch/arm64/kernel/Makefile
@@ -14,15 +14,22 @@ CFLAGS_REMOVE_return_address.o = $(CC_FLAGS_FTRACE)
CFLAGS_REMOVE_syscall.o = -fstack-protector -fstack-protector-strong
CFLAGS_syscall.o += -fno-stack-protector
+# It's not safe to invoke KCOV when portions of the kernel environment aren't
+# available or are out-of-sync with HW state. Since `noinstr` doesn't always
+# inhibit KCOV instrumentation, disable it for the entire compilation unit.
+KCOV_INSTRUMENT_entry.o := n
+KCOV_INSTRUMENT_idle.o := n
+
# Object file lists.
obj-y := debug-monitors.o entry.o irq.o fpsimd.o \
entry-common.o entry-fpsimd.o process.o ptrace.o \
setup.o signal.o sys.o stacktrace.o time.o traps.o \
- io.o vdso.o hyp-stub.o psci.o cpu_ops.o insn.o \
+ io.o vdso.o hyp-stub.o psci.o cpu_ops.o \
return_address.o cpuinfo.o cpu_errata.o \
cpufeature.o alternative.o cacheinfo.o \
smp.o smp_spin_table.o topology.o smccc-call.o \
- syscall.o proton-pack.o idreg-override.o
+ syscall.o proton-pack.o idreg-override.o idle.o \
+ patching.o
targets += efi-entry.o
diff --git a/arch/arm64/kernel/acpi.c b/arch/arm64/kernel/acpi.c
index cada0b816c8a..f3851724fe35 100644
--- a/arch/arm64/kernel/acpi.c
+++ b/arch/arm64/kernel/acpi.c
@@ -239,6 +239,18 @@ done:
}
}
+static pgprot_t __acpi_get_writethrough_mem_attribute(void)
+{
+ /*
+ * Although UEFI specifies the use of Normal Write-through for
+ * EFI_MEMORY_WT, it is seldom used in practice and not implemented
+ * by most (all?) CPUs. Rather than allocate a MAIR just for this
+ * purpose, emit a warning and use Normal Non-cacheable instead.
+ */
+ pr_warn_once("No MAIR allocation for EFI_MEMORY_WT; treating as Normal Non-cacheable\n");
+ return __pgprot(PROT_NORMAL_NC);
+}
+
pgprot_t __acpi_get_mem_attribute(phys_addr_t addr)
{
/*
@@ -246,7 +258,7 @@ pgprot_t __acpi_get_mem_attribute(phys_addr_t addr)
* types" of UEFI 2.5 section 2.3.6.1, each EFI memory type is
* mapped to a corresponding MAIR attribute encoding.
* The EFI memory attribute advises all possible capabilities
- * of a memory region. We use the most efficient capability.
+ * of a memory region.
*/
u64 attr;
@@ -254,10 +266,10 @@ pgprot_t __acpi_get_mem_attribute(phys_addr_t addr)
attr = efi_mem_attributes(addr);
if (attr & EFI_MEMORY_WB)
return PAGE_KERNEL;
- if (attr & EFI_MEMORY_WT)
- return __pgprot(PROT_NORMAL_WT);
if (attr & EFI_MEMORY_WC)
return __pgprot(PROT_NORMAL_NC);
+ if (attr & EFI_MEMORY_WT)
+ return __acpi_get_writethrough_mem_attribute();
return __pgprot(PROT_DEVICE_nGnRnE);
}
@@ -340,10 +352,10 @@ void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size)
default:
if (region->attribute & EFI_MEMORY_WB)
prot = PAGE_KERNEL;
- else if (region->attribute & EFI_MEMORY_WT)
- prot = __pgprot(PROT_NORMAL_WT);
else if (region->attribute & EFI_MEMORY_WC)
prot = __pgprot(PROT_NORMAL_NC);
+ else if (region->attribute & EFI_MEMORY_WT)
+ prot = __acpi_get_writethrough_mem_attribute();
}
}
return __ioremap(phys, size, prot);
diff --git a/arch/arm64/kernel/alternative.c b/arch/arm64/kernel/alternative.c
index c906d20c7b52..3fb79b76e9d9 100644
--- a/arch/arm64/kernel/alternative.c
+++ b/arch/arm64/kernel/alternative.c
@@ -181,7 +181,7 @@ static void __nocfi __apply_alternatives(struct alt_region *region, bool is_modu
*/
if (!is_module) {
dsb(ish);
- __flush_icache_all();
+ icache_inval_all_pou();
isb();
/* Ignore ARM64_CB bit from feature mask */
diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
index 0cb34ccb6e73..c85670692afa 100644
--- a/arch/arm64/kernel/asm-offsets.c
+++ b/arch/arm64/kernel/asm-offsets.c
@@ -27,6 +27,7 @@
int main(void)
{
DEFINE(TSK_ACTIVE_MM, offsetof(struct task_struct, active_mm));
+ DEFINE(TSK_CPU, offsetof(struct task_struct, cpu));
BLANK();
DEFINE(TSK_TI_FLAGS, offsetof(struct task_struct, thread_info.flags));
DEFINE(TSK_TI_PREEMPT, offsetof(struct task_struct, thread_info.preempt_count));
@@ -46,6 +47,8 @@ int main(void)
DEFINE(THREAD_SCTLR_USER, offsetof(struct task_struct, thread.sctlr_user));
#ifdef CONFIG_ARM64_PTR_AUTH
DEFINE(THREAD_KEYS_USER, offsetof(struct task_struct, thread.keys_user));
+#endif
+#ifdef CONFIG_ARM64_PTR_AUTH_KERNEL
DEFINE(THREAD_KEYS_KERNEL, offsetof(struct task_struct, thread.keys_kernel));
#endif
#ifdef CONFIG_ARM64_MTE
@@ -99,7 +102,6 @@ int main(void)
DEFINE(SOFTIRQ_SHIFT, SOFTIRQ_SHIFT);
DEFINE(IRQ_CPUSTAT_SOFTIRQ_PENDING, offsetof(irq_cpustat_t, __softirq_pending));
BLANK();
- DEFINE(CPU_BOOT_STACK, offsetof(struct secondary_data, stack));
DEFINE(CPU_BOOT_TASK, offsetof(struct secondary_data, task));
BLANK();
DEFINE(FTR_OVR_VAL_OFFSET, offsetof(struct arm64_ftr_override, val));
@@ -111,6 +113,8 @@ int main(void)
DEFINE(VCPU_WORKAROUND_FLAGS, offsetof(struct kvm_vcpu, arch.workaround_flags));
DEFINE(VCPU_HCR_EL2, offsetof(struct kvm_vcpu, arch.hcr_el2));
DEFINE(CPU_USER_PT_REGS, offsetof(struct kvm_cpu_context, regs));
+ DEFINE(CPU_RGSR_EL1, offsetof(struct kvm_cpu_context, sys_regs[RGSR_EL1]));
+ DEFINE(CPU_GCR_EL1, offsetof(struct kvm_cpu_context, sys_regs[GCR_EL1]));
DEFINE(CPU_APIAKEYLO_EL1, offsetof(struct kvm_cpu_context, sys_regs[APIAKEYLO_EL1]));
DEFINE(CPU_APIBKEYLO_EL1, offsetof(struct kvm_cpu_context, sys_regs[APIBKEYLO_EL1]));
DEFINE(CPU_APDAKEYLO_EL1, offsetof(struct kvm_cpu_context, sys_regs[APDAKEYLO_EL1]));
@@ -138,6 +142,15 @@ int main(void)
DEFINE(ARM_SMCCC_RES_X2_OFFS, offsetof(struct arm_smccc_res, a2));
DEFINE(ARM_SMCCC_QUIRK_ID_OFFS, offsetof(struct arm_smccc_quirk, id));
DEFINE(ARM_SMCCC_QUIRK_STATE_OFFS, offsetof(struct arm_smccc_quirk, state));
+ DEFINE(ARM_SMCCC_1_2_REGS_X0_OFFS, offsetof(struct arm_smccc_1_2_regs, a0));
+ DEFINE(ARM_SMCCC_1_2_REGS_X2_OFFS, offsetof(struct arm_smccc_1_2_regs, a2));
+ DEFINE(ARM_SMCCC_1_2_REGS_X4_OFFS, offsetof(struct arm_smccc_1_2_regs, a4));
+ DEFINE(ARM_SMCCC_1_2_REGS_X6_OFFS, offsetof(struct arm_smccc_1_2_regs, a6));
+ DEFINE(ARM_SMCCC_1_2_REGS_X8_OFFS, offsetof(struct arm_smccc_1_2_regs, a8));
+ DEFINE(ARM_SMCCC_1_2_REGS_X10_OFFS, offsetof(struct arm_smccc_1_2_regs, a10));
+ DEFINE(ARM_SMCCC_1_2_REGS_X12_OFFS, offsetof(struct arm_smccc_1_2_regs, a12));
+ DEFINE(ARM_SMCCC_1_2_REGS_X14_OFFS, offsetof(struct arm_smccc_1_2_regs, a14));
+ DEFINE(ARM_SMCCC_1_2_REGS_X16_OFFS, offsetof(struct arm_smccc_1_2_regs, a16));
BLANK();
DEFINE(HIBERN_PBE_ORIG, offsetof(struct pbe, orig_address));
DEFINE(HIBERN_PBE_ADDR, offsetof(struct pbe, address));
@@ -153,7 +166,9 @@ int main(void)
#endif
#ifdef CONFIG_ARM64_PTR_AUTH
DEFINE(PTRAUTH_USER_KEY_APIA, offsetof(struct ptrauth_keys_user, apia));
+#ifdef CONFIG_ARM64_PTR_AUTH_KERNEL
DEFINE(PTRAUTH_KERNEL_KEY_APIA, offsetof(struct ptrauth_keys_kernel, apia));
+#endif
BLANK();
#endif
return 0;
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index efed2830d141..125d5c9471ac 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -76,6 +76,7 @@
#include <asm/cpufeature.h>
#include <asm/cpu_ops.h>
#include <asm/fpsimd.h>
+#include <asm/insn.h>
#include <asm/kvm_host.h>
#include <asm/mmu_context.h>
#include <asm/mte.h>
@@ -108,6 +109,24 @@ bool arm64_use_ng_mappings = false;
EXPORT_SYMBOL(arm64_use_ng_mappings);
/*
+ * Permit PER_LINUX32 and execve() of 32-bit binaries even if not all CPUs
+ * support it?
+ */
+static bool __read_mostly allow_mismatched_32bit_el0;
+
+/*
+ * Static branch enabled only if allow_mismatched_32bit_el0 is set and we have
+ * seen at least one CPU capable of 32-bit EL0.
+ */
+DEFINE_STATIC_KEY_FALSE(arm64_mismatched_32bit_el0);
+
+/*
+ * Mask of CPUs supporting 32-bit EL0.
+ * Only valid if arm64_mismatched_32bit_el0 is enabled.
+ */
+static cpumask_var_t cpu_32bit_el0_mask __cpumask_var_read_mostly;
+
+/*
* Flag to indicate if we have computed the system wide
* capabilities based on the boot time active CPUs. This
* will be used to determine if a new booting CPU should
@@ -400,6 +419,11 @@ static const struct arm64_ftr_bits ftr_dczid[] = {
ARM64_FTR_END,
};
+static const struct arm64_ftr_bits ftr_gmid[] = {
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, SYS_GMID_EL1_BS_SHIFT, 4, 0),
+ ARM64_FTR_END,
+};
+
static const struct arm64_ftr_bits ftr_id_isar0[] = {
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR0_DIVIDE_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR0_DEBUG_SHIFT, 4, 0),
@@ -617,6 +641,9 @@ static const struct __ftr_reg_entry {
/* Op1 = 0, CRn = 1, CRm = 2 */
ARM64_FTR_REG(SYS_ZCR_EL1, ftr_zcr),
+ /* Op1 = 1, CRn = 0, CRm = 0 */
+ ARM64_FTR_REG(SYS_GMID_EL1, ftr_gmid),
+
/* Op1 = 3, CRn = 0, CRm = 0 */
{ SYS_CTR_EL0, &arm64_ftr_reg_ctrel0 },
ARM64_FTR_REG(SYS_DCZID_EL0, ftr_dczid),
@@ -767,7 +794,7 @@ static void __init sort_ftr_regs(void)
* Any bits that are not covered by an arm64_ftr_bits entry are considered
* RES0 for the system-wide value, and must strictly match.
*/
-static void __init init_cpu_ftr_reg(u32 sys_reg, u64 new)
+static void init_cpu_ftr_reg(u32 sys_reg, u64 new)
{
u64 val = 0;
u64 strict_mask = ~0x0ULL;
@@ -863,6 +890,31 @@ static void __init init_cpu_hwcaps_indirect_list(void)
static void __init setup_boot_cpu_capabilities(void);
+static void init_32bit_cpu_features(struct cpuinfo_32bit *info)
+{
+ init_cpu_ftr_reg(SYS_ID_DFR0_EL1, info->reg_id_dfr0);
+ init_cpu_ftr_reg(SYS_ID_DFR1_EL1, info->reg_id_dfr1);
+ init_cpu_ftr_reg(SYS_ID_ISAR0_EL1, info->reg_id_isar0);
+ init_cpu_ftr_reg(SYS_ID_ISAR1_EL1, info->reg_id_isar1);
+ init_cpu_ftr_reg(SYS_ID_ISAR2_EL1, info->reg_id_isar2);
+ init_cpu_ftr_reg(SYS_ID_ISAR3_EL1, info->reg_id_isar3);
+ init_cpu_ftr_reg(SYS_ID_ISAR4_EL1, info->reg_id_isar4);
+ init_cpu_ftr_reg(SYS_ID_ISAR5_EL1, info->reg_id_isar5);
+ init_cpu_ftr_reg(SYS_ID_ISAR6_EL1, info->reg_id_isar6);
+ init_cpu_ftr_reg(SYS_ID_MMFR0_EL1, info->reg_id_mmfr0);
+ init_cpu_ftr_reg(SYS_ID_MMFR1_EL1, info->reg_id_mmfr1);
+ init_cpu_ftr_reg(SYS_ID_MMFR2_EL1, info->reg_id_mmfr2);
+ init_cpu_ftr_reg(SYS_ID_MMFR3_EL1, info->reg_id_mmfr3);
+ init_cpu_ftr_reg(SYS_ID_MMFR4_EL1, info->reg_id_mmfr4);
+ init_cpu_ftr_reg(SYS_ID_MMFR5_EL1, info->reg_id_mmfr5);
+ init_cpu_ftr_reg(SYS_ID_PFR0_EL1, info->reg_id_pfr0);
+ init_cpu_ftr_reg(SYS_ID_PFR1_EL1, info->reg_id_pfr1);
+ init_cpu_ftr_reg(SYS_ID_PFR2_EL1, info->reg_id_pfr2);
+ init_cpu_ftr_reg(SYS_MVFR0_EL1, info->reg_mvfr0);
+ init_cpu_ftr_reg(SYS_MVFR1_EL1, info->reg_mvfr1);
+ init_cpu_ftr_reg(SYS_MVFR2_EL1, info->reg_mvfr2);
+}
+
void __init init_cpu_features(struct cpuinfo_arm64 *info)
{
/* Before we start using the tables, make sure it is sorted */
@@ -882,35 +934,17 @@ void __init init_cpu_features(struct cpuinfo_arm64 *info)
init_cpu_ftr_reg(SYS_ID_AA64PFR1_EL1, info->reg_id_aa64pfr1);
init_cpu_ftr_reg(SYS_ID_AA64ZFR0_EL1, info->reg_id_aa64zfr0);
- if (id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0)) {
- init_cpu_ftr_reg(SYS_ID_DFR0_EL1, info->reg_id_dfr0);
- init_cpu_ftr_reg(SYS_ID_DFR1_EL1, info->reg_id_dfr1);
- init_cpu_ftr_reg(SYS_ID_ISAR0_EL1, info->reg_id_isar0);
- init_cpu_ftr_reg(SYS_ID_ISAR1_EL1, info->reg_id_isar1);
- init_cpu_ftr_reg(SYS_ID_ISAR2_EL1, info->reg_id_isar2);
- init_cpu_ftr_reg(SYS_ID_ISAR3_EL1, info->reg_id_isar3);
- init_cpu_ftr_reg(SYS_ID_ISAR4_EL1, info->reg_id_isar4);
- init_cpu_ftr_reg(SYS_ID_ISAR5_EL1, info->reg_id_isar5);
- init_cpu_ftr_reg(SYS_ID_ISAR6_EL1, info->reg_id_isar6);
- init_cpu_ftr_reg(SYS_ID_MMFR0_EL1, info->reg_id_mmfr0);
- init_cpu_ftr_reg(SYS_ID_MMFR1_EL1, info->reg_id_mmfr1);
- init_cpu_ftr_reg(SYS_ID_MMFR2_EL1, info->reg_id_mmfr2);
- init_cpu_ftr_reg(SYS_ID_MMFR3_EL1, info->reg_id_mmfr3);
- init_cpu_ftr_reg(SYS_ID_MMFR4_EL1, info->reg_id_mmfr4);
- init_cpu_ftr_reg(SYS_ID_MMFR5_EL1, info->reg_id_mmfr5);
- init_cpu_ftr_reg(SYS_ID_PFR0_EL1, info->reg_id_pfr0);
- init_cpu_ftr_reg(SYS_ID_PFR1_EL1, info->reg_id_pfr1);
- init_cpu_ftr_reg(SYS_ID_PFR2_EL1, info->reg_id_pfr2);
- init_cpu_ftr_reg(SYS_MVFR0_EL1, info->reg_mvfr0);
- init_cpu_ftr_reg(SYS_MVFR1_EL1, info->reg_mvfr1);
- init_cpu_ftr_reg(SYS_MVFR2_EL1, info->reg_mvfr2);
- }
+ if (id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0))
+ init_32bit_cpu_features(&info->aarch32);
if (id_aa64pfr0_sve(info->reg_id_aa64pfr0)) {
init_cpu_ftr_reg(SYS_ZCR_EL1, info->reg_zcr);
sve_init_vq_map();
}
+ if (id_aa64pfr1_mte(info->reg_id_aa64pfr1))
+ init_cpu_ftr_reg(SYS_GMID_EL1, info->reg_gmid);
+
/*
* Initialize the indirect array of CPU hwcaps capabilities pointers
* before we handle the boot CPU below.
@@ -975,21 +1009,29 @@ static void relax_cpu_ftr_reg(u32 sys_id, int field)
WARN_ON(!ftrp->width);
}
-static int update_32bit_cpu_features(int cpu, struct cpuinfo_arm64 *info,
- struct cpuinfo_arm64 *boot)
+static void lazy_init_32bit_cpu_features(struct cpuinfo_arm64 *info,
+ struct cpuinfo_arm64 *boot)
+{
+ static bool boot_cpu_32bit_regs_overridden = false;
+
+ if (!allow_mismatched_32bit_el0 || boot_cpu_32bit_regs_overridden)
+ return;
+
+ if (id_aa64pfr0_32bit_el0(boot->reg_id_aa64pfr0))
+ return;
+
+ boot->aarch32 = info->aarch32;
+ init_32bit_cpu_features(&boot->aarch32);
+ boot_cpu_32bit_regs_overridden = true;
+}
+
+static int update_32bit_cpu_features(int cpu, struct cpuinfo_32bit *info,
+ struct cpuinfo_32bit *boot)
{
int taint = 0;
u64 pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
/*
- * If we don't have AArch32 at all then skip the checks entirely
- * as the register values may be UNKNOWN and we're not going to be
- * using them for anything.
- */
- if (!id_aa64pfr0_32bit_el0(pfr0))
- return taint;
-
- /*
* If we don't have AArch32 at EL1, then relax the strictness of
* EL1-dependent register fields to avoid spurious sanity check fails.
*/
@@ -1135,10 +1177,29 @@ void update_cpu_features(int cpu,
}
/*
+ * The kernel uses the LDGM/STGM instructions and the number of tags
+ * they read/write depends on the GMID_EL1.BS field. Check that the
+ * value is the same on all CPUs.
+ */
+ if (IS_ENABLED(CONFIG_ARM64_MTE) &&
+ id_aa64pfr1_mte(info->reg_id_aa64pfr1)) {
+ taint |= check_update_ftr_reg(SYS_GMID_EL1, cpu,
+ info->reg_gmid, boot->reg_gmid);
+ }
+
+ /*
+ * If we don't have AArch32 at all then skip the checks entirely
+ * as the register values may be UNKNOWN and we're not going to be
+ * using them for anything.
+ *
* This relies on a sanitised view of the AArch64 ID registers
* (e.g. SYS_ID_AA64PFR0_EL1), so we call it last.
*/
- taint |= update_32bit_cpu_features(cpu, info, boot);
+ if (id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0)) {
+ lazy_init_32bit_cpu_features(info, boot);
+ taint |= update_32bit_cpu_features(cpu, &info->aarch32,
+ &boot->aarch32);
+ }
/*
* Mismatched CPU features are a recipe for disaster. Don't even
@@ -1248,6 +1309,28 @@ has_cpuid_feature(const struct arm64_cpu_capabilities *entry, int scope)
return feature_matches(val, entry);
}
+const struct cpumask *system_32bit_el0_cpumask(void)
+{
+ if (!system_supports_32bit_el0())
+ return cpu_none_mask;
+
+ if (static_branch_unlikely(&arm64_mismatched_32bit_el0))
+ return cpu_32bit_el0_mask;
+
+ return cpu_possible_mask;
+}
+
+static bool has_32bit_el0(const struct arm64_cpu_capabilities *entry, int scope)
+{
+ if (!has_cpuid_feature(entry, scope))
+ return allow_mismatched_32bit_el0;
+
+ if (scope == SCOPE_SYSTEM)
+ pr_info("detected: 32-bit EL0 Support\n");
+
+ return true;
+}
+
static bool has_useable_gicv3_cpuif(const struct arm64_cpu_capabilities *entry, int scope)
{
bool has_sre;
@@ -1866,10 +1949,9 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.cpu_enable = cpu_copy_el2regs,
},
{
- .desc = "32-bit EL0 Support",
- .capability = ARM64_HAS_32BIT_EL0,
+ .capability = ARM64_HAS_32BIT_EL0_DO_NOT_USE,
.type = ARM64_CPUCAP_SYSTEM_FEATURE,
- .matches = has_cpuid_feature,
+ .matches = has_32bit_el0,
.sys_reg = SYS_ID_AA64PFR0_EL1,
.sign = FTR_UNSIGNED,
.field_pos = ID_AA64PFR0_EL0_SHIFT,
@@ -2378,7 +2460,7 @@ static const struct arm64_cpu_capabilities compat_elf_hwcaps[] = {
{},
};
-static void __init cap_set_elf_hwcap(const struct arm64_cpu_capabilities *cap)
+static void cap_set_elf_hwcap(const struct arm64_cpu_capabilities *cap)
{
switch (cap->hwcap_type) {
case CAP_HWCAP:
@@ -2423,7 +2505,7 @@ static bool cpus_have_elf_hwcap(const struct arm64_cpu_capabilities *cap)
return rc;
}
-static void __init setup_elf_hwcaps(const struct arm64_cpu_capabilities *hwcaps)
+static void setup_elf_hwcaps(const struct arm64_cpu_capabilities *hwcaps)
{
/* We support emulation of accesses to CPU ID feature registers */
cpu_set_named_feature(CPUID);
@@ -2598,7 +2680,7 @@ static void check_early_cpu_features(void)
}
static void
-verify_local_elf_hwcaps(const struct arm64_cpu_capabilities *caps)
+__verify_local_elf_hwcaps(const struct arm64_cpu_capabilities *caps)
{
for (; caps->matches; caps++)
@@ -2609,6 +2691,14 @@ verify_local_elf_hwcaps(const struct arm64_cpu_capabilities *caps)
}
}
+static void verify_local_elf_hwcaps(void)
+{
+ __verify_local_elf_hwcaps(arm64_elf_hwcaps);
+
+ if (id_aa64pfr0_32bit_el0(read_cpuid(ID_AA64PFR0_EL1)))
+ __verify_local_elf_hwcaps(compat_elf_hwcaps);
+}
+
static void verify_sve_features(void)
{
u64 safe_zcr = read_sanitised_ftr_reg(SYS_ZCR_EL1);
@@ -2673,11 +2763,7 @@ static void verify_local_cpu_capabilities(void)
* on all secondary CPUs.
*/
verify_local_cpu_caps(SCOPE_ALL & ~SCOPE_BOOT_CPU);
-
- verify_local_elf_hwcaps(arm64_elf_hwcaps);
-
- if (system_supports_32bit_el0())
- verify_local_elf_hwcaps(compat_elf_hwcaps);
+ verify_local_elf_hwcaps();
if (system_supports_sve())
verify_sve_features();
@@ -2812,6 +2898,34 @@ void __init setup_cpu_features(void)
ARCH_DMA_MINALIGN);
}
+static int enable_mismatched_32bit_el0(unsigned int cpu)
+{
+ struct cpuinfo_arm64 *info = &per_cpu(cpu_data, cpu);
+ bool cpu_32bit = id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0);
+
+ if (cpu_32bit) {
+ cpumask_set_cpu(cpu, cpu_32bit_el0_mask);
+ static_branch_enable_cpuslocked(&arm64_mismatched_32bit_el0);
+ setup_elf_hwcaps(compat_elf_hwcaps);
+ }
+
+ return 0;
+}
+
+static int __init init_32bit_el0_mask(void)
+{
+ if (!allow_mismatched_32bit_el0)
+ return 0;
+
+ if (!zalloc_cpumask_var(&cpu_32bit_el0_mask, GFP_KERNEL))
+ return -ENOMEM;
+
+ return cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
+ "arm64/mismatched_32bit_el0:online",
+ enable_mismatched_32bit_el0, NULL);
+}
+subsys_initcall_sync(init_32bit_el0_mask);
+
static void __maybe_unused cpu_enable_cnp(struct arm64_cpu_capabilities const *cap)
{
cpu_replace_ttbr1(lm_alias(swapper_pg_dir));
@@ -2905,8 +3019,8 @@ static int emulate_mrs(struct pt_regs *regs, u32 insn)
}
static struct undef_hook mrs_hook = {
- .instr_mask = 0xfff00000,
- .instr_val = 0xd5300000,
+ .instr_mask = 0xffff0000,
+ .instr_val = 0xd5380000,
.pstate_mask = PSR_AA32_MODE_MASK,
.pstate_val = PSR_MODE_EL0t,
.fn = emulate_mrs,
diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c
index 51fcf99d5351..87731fea5e41 100644
--- a/arch/arm64/kernel/cpuinfo.c
+++ b/arch/arm64/kernel/cpuinfo.c
@@ -246,7 +246,7 @@ static struct kobj_type cpuregs_kobj_type = {
struct cpuinfo_arm64 *info = kobj_to_cpuinfo(kobj); \
\
if (info->reg_midr) \
- return sprintf(buf, "0x%016x\n", info->reg_##_field); \
+ return sprintf(buf, "0x%016llx\n", info->reg_##_field); \
else \
return 0; \
} \
@@ -344,6 +344,32 @@ static void cpuinfo_detect_icache_policy(struct cpuinfo_arm64 *info)
pr_info("Detected %s I-cache on CPU%d\n", icache_policy_str[l1ip], cpu);
}
+static void __cpuinfo_store_cpu_32bit(struct cpuinfo_32bit *info)
+{
+ info->reg_id_dfr0 = read_cpuid(ID_DFR0_EL1);
+ info->reg_id_dfr1 = read_cpuid(ID_DFR1_EL1);
+ info->reg_id_isar0 = read_cpuid(ID_ISAR0_EL1);
+ info->reg_id_isar1 = read_cpuid(ID_ISAR1_EL1);
+ info->reg_id_isar2 = read_cpuid(ID_ISAR2_EL1);
+ info->reg_id_isar3 = read_cpuid(ID_ISAR3_EL1);
+ info->reg_id_isar4 = read_cpuid(ID_ISAR4_EL1);
+ info->reg_id_isar5 = read_cpuid(ID_ISAR5_EL1);
+ info->reg_id_isar6 = read_cpuid(ID_ISAR6_EL1);
+ info->reg_id_mmfr0 = read_cpuid(ID_MMFR0_EL1);
+ info->reg_id_mmfr1 = read_cpuid(ID_MMFR1_EL1);
+ info->reg_id_mmfr2 = read_cpuid(ID_MMFR2_EL1);
+ info->reg_id_mmfr3 = read_cpuid(ID_MMFR3_EL1);
+ info->reg_id_mmfr4 = read_cpuid(ID_MMFR4_EL1);
+ info->reg_id_mmfr5 = read_cpuid(ID_MMFR5_EL1);
+ info->reg_id_pfr0 = read_cpuid(ID_PFR0_EL1);
+ info->reg_id_pfr1 = read_cpuid(ID_PFR1_EL1);
+ info->reg_id_pfr2 = read_cpuid(ID_PFR2_EL1);
+
+ info->reg_mvfr0 = read_cpuid(MVFR0_EL1);
+ info->reg_mvfr1 = read_cpuid(MVFR1_EL1);
+ info->reg_mvfr2 = read_cpuid(MVFR2_EL1);
+}
+
static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info)
{
info->reg_cntfrq = arch_timer_get_cntfrq();
@@ -371,31 +397,11 @@ static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info)
info->reg_id_aa64pfr1 = read_cpuid(ID_AA64PFR1_EL1);
info->reg_id_aa64zfr0 = read_cpuid(ID_AA64ZFR0_EL1);
- /* Update the 32bit ID registers only if AArch32 is implemented */
- if (id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0)) {
- info->reg_id_dfr0 = read_cpuid(ID_DFR0_EL1);
- info->reg_id_dfr1 = read_cpuid(ID_DFR1_EL1);
- info->reg_id_isar0 = read_cpuid(ID_ISAR0_EL1);
- info->reg_id_isar1 = read_cpuid(ID_ISAR1_EL1);
- info->reg_id_isar2 = read_cpuid(ID_ISAR2_EL1);
- info->reg_id_isar3 = read_cpuid(ID_ISAR3_EL1);
- info->reg_id_isar4 = read_cpuid(ID_ISAR4_EL1);
- info->reg_id_isar5 = read_cpuid(ID_ISAR5_EL1);
- info->reg_id_isar6 = read_cpuid(ID_ISAR6_EL1);
- info->reg_id_mmfr0 = read_cpuid(ID_MMFR0_EL1);
- info->reg_id_mmfr1 = read_cpuid(ID_MMFR1_EL1);
- info->reg_id_mmfr2 = read_cpuid(ID_MMFR2_EL1);
- info->reg_id_mmfr3 = read_cpuid(ID_MMFR3_EL1);
- info->reg_id_mmfr4 = read_cpuid(ID_MMFR4_EL1);
- info->reg_id_mmfr5 = read_cpuid(ID_MMFR5_EL1);
- info->reg_id_pfr0 = read_cpuid(ID_PFR0_EL1);
- info->reg_id_pfr1 = read_cpuid(ID_PFR1_EL1);
- info->reg_id_pfr2 = read_cpuid(ID_PFR2_EL1);
-
- info->reg_mvfr0 = read_cpuid(MVFR0_EL1);
- info->reg_mvfr1 = read_cpuid(MVFR1_EL1);
- info->reg_mvfr2 = read_cpuid(MVFR2_EL1);
- }
+ if (id_aa64pfr1_mte(info->reg_id_aa64pfr1))
+ info->reg_gmid = read_cpuid(GMID_EL1);
+
+ if (id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0))
+ __cpuinfo_store_cpu_32bit(&info->aarch32);
if (IS_ENABLED(CONFIG_ARM64_SVE) &&
id_aa64pfr0_sve(info->reg_id_aa64pfr0))
diff --git a/arch/arm64/kernel/efi-entry.S b/arch/arm64/kernel/efi-entry.S
index 0073b24b5d25..61a87fa1c305 100644
--- a/arch/arm64/kernel/efi-entry.S
+++ b/arch/arm64/kernel/efi-entry.S
@@ -28,7 +28,8 @@ SYM_CODE_START(efi_enter_kernel)
* stale icache entries from before relocation.
*/
ldr w1, =kernel_size
- bl __clean_dcache_area_poc
+ add x1, x0, x1
+ bl dcache_clean_poc
ic ialluis
/*
@@ -36,8 +37,8 @@ SYM_CODE_START(efi_enter_kernel)
* so that we can safely disable the MMU and caches.
*/
adr x0, 0f
- ldr w1, 3f
- bl __clean_dcache_area_poc
+ adr x1, 3f
+ bl dcache_clean_poc
0:
/* Turn off Dcache and MMU */
mrs x0, CurrentEL
@@ -64,5 +65,5 @@ SYM_CODE_START(efi_enter_kernel)
mov x2, xzr
mov x3, xzr
br x19
+3:
SYM_CODE_END(efi_enter_kernel)
-3: .long . - 0b
diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c
index 340d04e13617..12ce14a98b7c 100644
--- a/arch/arm64/kernel/entry-common.c
+++ b/arch/arm64/kernel/entry-common.c
@@ -6,7 +6,11 @@
*/
#include <linux/context_tracking.h>
+#include <linux/linkage.h>
+#include <linux/lockdep.h>
#include <linux/ptrace.h>
+#include <linux/sched.h>
+#include <linux/sched/debug.h>
#include <linux/thread_info.h>
#include <asm/cpufeature.h>
@@ -15,7 +19,11 @@
#include <asm/exception.h>
#include <asm/kprobes.h>
#include <asm/mmu.h>
+#include <asm/processor.h>
+#include <asm/sdei.h>
+#include <asm/stacktrace.h>
#include <asm/sysreg.h>
+#include <asm/system_misc.h>
/*
* This is intended to match the logic in irqentry_enter(), handling the kernel
@@ -67,7 +75,7 @@ static void noinstr exit_to_kernel_mode(struct pt_regs *regs)
}
}
-void noinstr arm64_enter_nmi(struct pt_regs *regs)
+static void noinstr arm64_enter_nmi(struct pt_regs *regs)
{
regs->lockdep_hardirqs = lockdep_hardirqs_enabled();
@@ -80,7 +88,7 @@ void noinstr arm64_enter_nmi(struct pt_regs *regs)
ftrace_nmi_enter();
}
-void noinstr arm64_exit_nmi(struct pt_regs *regs)
+static void noinstr arm64_exit_nmi(struct pt_regs *regs)
{
bool restore = regs->lockdep_hardirqs;
@@ -97,7 +105,7 @@ void noinstr arm64_exit_nmi(struct pt_regs *regs)
__nmi_exit();
}
-asmlinkage void noinstr enter_el1_irq_or_nmi(struct pt_regs *regs)
+static void noinstr enter_el1_irq_or_nmi(struct pt_regs *regs)
{
if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && !interrupts_enabled(regs))
arm64_enter_nmi(regs);
@@ -105,7 +113,7 @@ asmlinkage void noinstr enter_el1_irq_or_nmi(struct pt_regs *regs)
enter_from_kernel_mode(regs);
}
-asmlinkage void noinstr exit_el1_irq_or_nmi(struct pt_regs *regs)
+static void noinstr exit_el1_irq_or_nmi(struct pt_regs *regs)
{
if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && !interrupts_enabled(regs))
arm64_exit_nmi(regs);
@@ -113,6 +121,65 @@ asmlinkage void noinstr exit_el1_irq_or_nmi(struct pt_regs *regs)
exit_to_kernel_mode(regs);
}
+static void __sched arm64_preempt_schedule_irq(void)
+{
+ lockdep_assert_irqs_disabled();
+
+ /*
+ * DAIF.DA are cleared at the start of IRQ/FIQ handling, and when GIC
+ * priority masking is used the GIC irqchip driver will clear DAIF.IF
+ * using gic_arch_enable_irqs() for normal IRQs. If anything is set in
+ * DAIF we must have handled an NMI, so skip preemption.
+ */
+ if (system_uses_irq_prio_masking() && read_sysreg(daif))
+ return;
+
+ /*
+ * Preempting a task from an IRQ means we leave copies of PSTATE
+ * on the stack. cpufeature's enable calls may modify PSTATE, but
+ * resuming one of these preempted tasks would undo those changes.
+ *
+ * Only allow a task to be preempted once cpufeatures have been
+ * enabled.
+ */
+ if (system_capabilities_finalized())
+ preempt_schedule_irq();
+}
+
+static void do_interrupt_handler(struct pt_regs *regs,
+ void (*handler)(struct pt_regs *))
+{
+ if (on_thread_stack())
+ call_on_irq_stack(regs, handler);
+ else
+ handler(regs);
+}
+
+extern void (*handle_arch_irq)(struct pt_regs *);
+extern void (*handle_arch_fiq)(struct pt_regs *);
+
+static void noinstr __panic_unhandled(struct pt_regs *regs, const char *vector,
+ unsigned int esr)
+{
+ arm64_enter_nmi(regs);
+
+ console_verbose();
+
+ pr_crit("Unhandled %s exception on CPU%d, ESR 0x%08x -- %s\n",
+ vector, smp_processor_id(), esr,
+ esr_get_class_string(esr));
+
+ __show_regs(regs);
+ panic("Unhandled exception");
+}
+
+#define UNHANDLED(el, regsize, vector) \
+asmlinkage void noinstr el##_##regsize##_##vector##_handler(struct pt_regs *regs) \
+{ \
+ const char *desc = #regsize "-bit " #el " " #vector; \
+ __panic_unhandled(regs, desc, read_sysreg(esr_el1)); \
+}
+
#ifdef CONFIG_ARM64_ERRATUM_1463225
static DEFINE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa);
@@ -162,6 +229,11 @@ static bool cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs)
}
#endif /* CONFIG_ARM64_ERRATUM_1463225 */
+UNHANDLED(el1t, 64, sync)
+UNHANDLED(el1t, 64, irq)
+UNHANDLED(el1t, 64, fiq)
+UNHANDLED(el1t, 64, error)
+
static void noinstr el1_abort(struct pt_regs *regs, unsigned long esr)
{
unsigned long far = read_sysreg(far_el1);
@@ -193,15 +265,6 @@ static void noinstr el1_undef(struct pt_regs *regs)
exit_to_kernel_mode(regs);
}
-static void noinstr el1_inv(struct pt_regs *regs, unsigned long esr)
-{
- enter_from_kernel_mode(regs);
- local_daif_inherit(regs);
- bad_mode(regs, 0, esr);
- local_daif_mask();
- exit_to_kernel_mode(regs);
-}
-
static void noinstr arm64_enter_el1_dbg(struct pt_regs *regs)
{
regs->lockdep_hardirqs = lockdep_hardirqs_enabled();
@@ -245,7 +308,7 @@ static void noinstr el1_fpac(struct pt_regs *regs, unsigned long esr)
exit_to_kernel_mode(regs);
}
-asmlinkage void noinstr el1_sync_handler(struct pt_regs *regs)
+asmlinkage void noinstr el1h_64_sync_handler(struct pt_regs *regs)
{
unsigned long esr = read_sysreg(esr_el1);
@@ -275,10 +338,50 @@ asmlinkage void noinstr el1_sync_handler(struct pt_regs *regs)
el1_fpac(regs, esr);
break;
default:
- el1_inv(regs, esr);
+ __panic_unhandled(regs, "64-bit el1h sync", esr);
}
}
+static void noinstr el1_interrupt(struct pt_regs *regs,
+ void (*handler)(struct pt_regs *))
+{
+ write_sysreg(DAIF_PROCCTX_NOIRQ, daif);
+
+ enter_el1_irq_or_nmi(regs);
+ do_interrupt_handler(regs, handler);
+
+ /*
+ * Note: thread_info::preempt_count includes both thread_info::count
+ * and thread_info::need_resched, and is not equivalent to
+ * preempt_count().
+ */
+ if (IS_ENABLED(CONFIG_PREEMPTION) &&
+ READ_ONCE(current_thread_info()->preempt_count) == 0)
+ arm64_preempt_schedule_irq();
+
+ exit_el1_irq_or_nmi(regs);
+}
+
+asmlinkage void noinstr el1h_64_irq_handler(struct pt_regs *regs)
+{
+ el1_interrupt(regs, handle_arch_irq);
+}
+
+asmlinkage void noinstr el1h_64_fiq_handler(struct pt_regs *regs)
+{
+ el1_interrupt(regs, handle_arch_fiq);
+}
+
+asmlinkage void noinstr el1h_64_error_handler(struct pt_regs *regs)
+{
+ unsigned long esr = read_sysreg(esr_el1);
+
+ local_daif_restore(DAIF_ERRCTX);
+ arm64_enter_nmi(regs);
+ do_serror(regs, esr);
+ arm64_exit_nmi(regs);
+}
+
asmlinkage void noinstr enter_from_user_mode(void)
{
lockdep_hardirqs_off(CALLER_ADDR0);
@@ -398,7 +501,7 @@ static void noinstr el0_dbg(struct pt_regs *regs, unsigned long esr)
enter_from_user_mode();
do_debug_exception(far, esr, regs);
- local_daif_restore(DAIF_PROCCTX_NOIRQ);
+ local_daif_restore(DAIF_PROCCTX);
}
static void noinstr el0_svc(struct pt_regs *regs)
@@ -415,7 +518,7 @@ static void noinstr el0_fpac(struct pt_regs *regs, unsigned long esr)
do_ptrauth_fault(regs, esr);
}
-asmlinkage void noinstr el0_sync_handler(struct pt_regs *regs)
+asmlinkage void noinstr el0t_64_sync_handler(struct pt_regs *regs)
{
unsigned long esr = read_sysreg(esr_el1);
@@ -468,6 +571,56 @@ asmlinkage void noinstr el0_sync_handler(struct pt_regs *regs)
}
}
+static void noinstr el0_interrupt(struct pt_regs *regs,
+ void (*handler)(struct pt_regs *))
+{
+ enter_from_user_mode();
+
+ write_sysreg(DAIF_PROCCTX_NOIRQ, daif);
+
+ if (regs->pc & BIT(55))
+ arm64_apply_bp_hardening();
+
+ do_interrupt_handler(regs, handler);
+}
+
+static void noinstr __el0_irq_handler_common(struct pt_regs *regs)
+{
+ el0_interrupt(regs, handle_arch_irq);
+}
+
+asmlinkage void noinstr el0t_64_irq_handler(struct pt_regs *regs)
+{
+ __el0_irq_handler_common(regs);
+}
+
+static void noinstr __el0_fiq_handler_common(struct pt_regs *regs)
+{
+ el0_interrupt(regs, handle_arch_fiq);
+}
+
+asmlinkage void noinstr el0t_64_fiq_handler(struct pt_regs *regs)
+{
+ __el0_fiq_handler_common(regs);
+}
+
+static void __el0_error_handler_common(struct pt_regs *regs)
+{
+ unsigned long esr = read_sysreg(esr_el1);
+
+ enter_from_user_mode();
+ local_daif_restore(DAIF_ERRCTX);
+ arm64_enter_nmi(regs);
+ do_serror(regs, esr);
+ arm64_exit_nmi(regs);
+ local_daif_restore(DAIF_PROCCTX);
+}
+
+asmlinkage void noinstr el0t_64_error_handler(struct pt_regs *regs)
+{
+ __el0_error_handler_common(regs);
+}
+
#ifdef CONFIG_COMPAT
static void noinstr el0_cp15(struct pt_regs *regs, unsigned long esr)
{
@@ -483,7 +636,7 @@ static void noinstr el0_svc_compat(struct pt_regs *regs)
do_el0_svc_compat(regs);
}
-asmlinkage void noinstr el0_sync_compat_handler(struct pt_regs *regs)
+asmlinkage void noinstr el0t_32_sync_handler(struct pt_regs *regs)
{
unsigned long esr = read_sysreg(esr_el1);
@@ -526,4 +679,71 @@ asmlinkage void noinstr el0_sync_compat_handler(struct pt_regs *regs)
el0_inv(regs, esr);
}
}
+
+asmlinkage void noinstr el0t_32_irq_handler(struct pt_regs *regs)
+{
+ __el0_irq_handler_common(regs);
+}
+
+asmlinkage void noinstr el0t_32_fiq_handler(struct pt_regs *regs)
+{
+ __el0_fiq_handler_common(regs);
+}
+
+asmlinkage void noinstr el0t_32_error_handler(struct pt_regs *regs)
+{
+ __el0_error_handler_common(regs);
+}
+#else /* CONFIG_COMPAT */
+UNHANDLED(el0t, 32, sync)
+UNHANDLED(el0t, 32, irq)
+UNHANDLED(el0t, 32, fiq)
+UNHANDLED(el0t, 32, error)
#endif /* CONFIG_COMPAT */
+
+#ifdef CONFIG_VMAP_STACK
+asmlinkage void noinstr handle_bad_stack(struct pt_regs *regs)
+{
+ unsigned int esr = read_sysreg(esr_el1);
+ unsigned long far = read_sysreg(far_el1);
+
+ arm64_enter_nmi(regs);
+ panic_bad_stack(regs, esr, far);
+}
+#endif /* CONFIG_VMAP_STACK */
+
+#ifdef CONFIG_ARM_SDE_INTERFACE
+asmlinkage noinstr unsigned long
+__sdei_handler(struct pt_regs *regs, struct sdei_registered_event *arg)
+{
+ unsigned long ret;
+
+ /*
+ * We didn't take an exception to get here, so the HW hasn't
+ * set/cleared bits in PSTATE that we may rely on.
+ *
+ * The original SDEI spec (ARM DEN 0054A) can be read ambiguously as to
+ * whether PSTATE bits are inherited unchanged or generated from
+ * scratch, and the TF-A implementation always clears PAN and always
+ * clears UAO. There are no other known implementations.
+ *
+ * Subsequent revisions (ARM DEN 0054B) follow the usual rules for how
+ * PSTATE is modified upon architectural exceptions, and so PAN is
+ * either inherited or set per SCTLR_ELx.SPAN, and UAO is always
+ * cleared.
+ *
+ * We must explicitly reset PAN to the expected state, including
+ * clearing it when the host isn't using it, in case a VM had it set.
+ */
+ if (system_uses_hw_pan())
+ set_pstate_pan(1);
+ else if (cpu_has_pan())
+ set_pstate_pan(0);
+
+ arm64_enter_nmi(regs);
+ ret = do_sdei_event(regs, arg);
+ arm64_exit_nmi(regs);
+
+ return ret;
+}
+#endif /* CONFIG_ARM_SDE_INTERFACE */
diff --git a/arch/arm64/kernel/entry-fpsimd.S b/arch/arm64/kernel/entry-fpsimd.S
index 3ecec60d3295..0a7a64753878 100644
--- a/arch/arm64/kernel/entry-fpsimd.S
+++ b/arch/arm64/kernel/entry-fpsimd.S
@@ -63,16 +63,24 @@ SYM_FUNC_END(sve_set_vq)
* and the rest zeroed. All the other SVE registers will be zeroed.
*/
SYM_FUNC_START(sve_load_from_fpsimd_state)
- sve_load_vq x1, x2, x3
- fpsimd_restore x0, 8
- _for n, 0, 15, _sve_pfalse \n
- _sve_wrffr 0
- ret
+ sve_load_vq x1, x2, x3
+ fpsimd_restore x0, 8
+ sve_flush_p_ffr
+ ret
SYM_FUNC_END(sve_load_from_fpsimd_state)
-/* Zero all SVE registers but the first 128-bits of each vector */
+/*
+ * Zero all SVE registers but the first 128-bits of each vector
+ *
+ * VQ must already be configured by caller, any further updates of VQ
+ * will need to ensure that the register state remains valid.
+ *
+ * x0 = VQ - 1
+ */
SYM_FUNC_START(sve_flush_live)
- sve_flush
+ cbz x0, 1f // A VQ-1 of 0 is 128 bits so no extra Z state
+ sve_flush_z
+1: sve_flush_p_ffr
ret
SYM_FUNC_END(sve_flush_live)
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index 3513984a88bd..863d44f73028 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -33,12 +33,6 @@
* Context tracking and irqflag tracing need to instrument transitions between
* user and kernel mode.
*/
- .macro user_exit_irqoff
-#if defined(CONFIG_CONTEXT_TRACKING) || defined(CONFIG_TRACE_IRQFLAGS)
- bl enter_from_user_mode
-#endif
- .endm
-
.macro user_enter_irqoff
#if defined(CONFIG_CONTEXT_TRACKING) || defined(CONFIG_TRACE_IRQFLAGS)
bl exit_to_user_mode
@@ -51,16 +45,7 @@
.endr
.endm
-/*
- * Bad Abort numbers
- *-----------------
- */
-#define BAD_SYNC 0
-#define BAD_IRQ 1
-#define BAD_FIQ 2
-#define BAD_ERROR 3
-
- .macro kernel_ventry, el, label, regsize = 64
+ .macro kernel_ventry, el:req, ht:req, regsize:req, label:req
.align 7
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
.if \el == 0
@@ -87,7 +72,7 @@ alternative_else_nop_endif
tbnz x0, #THREAD_SHIFT, 0f
sub x0, sp, x0 // x0'' = sp' - x0' = (sp + x0) - sp = x0
sub sp, sp, x0 // sp'' = sp' - x0 = (sp + x0) - x0 = sp
- b el\()\el\()_\label
+ b el\el\ht\()_\regsize\()_\label
0:
/*
@@ -119,7 +104,7 @@ alternative_else_nop_endif
sub sp, sp, x0
mrs x0, tpidrro_el0
#endif
- b el\()\el\()_\label
+ b el\el\ht\()_\regsize\()_\label
.endm
.macro tramp_alias, dst, sym
@@ -275,7 +260,7 @@ alternative_else_nop_endif
mte_set_kernel_gcr x22, x23
- scs_load tsk, x20
+ scs_load tsk
.else
add x21, sp, #PT_REGS_SIZE
get_current_task tsk
@@ -285,7 +270,7 @@ alternative_else_nop_endif
stp lr, x21, [sp, #S_LR]
/*
- * For exceptions from EL0, create a terminal frame record.
+ * For exceptions from EL0, create a final frame record.
* For exceptions from EL1, create a synthetic frame record so the
* interrupted code shows up in the backtrace.
*/
@@ -375,7 +360,7 @@ alternative_if ARM64_WORKAROUND_845719
alternative_else_nop_endif
#endif
3:
- scs_save tsk, x0
+ scs_save tsk
#ifdef CONFIG_ARM64_PTR_AUTH
alternative_if ARM64_HAS_ADDRESS_AUTH
@@ -486,63 +471,12 @@ SYM_CODE_START_LOCAL(__swpan_exit_el0)
SYM_CODE_END(__swpan_exit_el0)
#endif
- .macro irq_stack_entry
- mov x19, sp // preserve the original sp
-#ifdef CONFIG_SHADOW_CALL_STACK
- mov x24, scs_sp // preserve the original shadow stack
-#endif
-
- /*
- * Compare sp with the base of the task stack.
- * If the top ~(THREAD_SIZE - 1) bits match, we are on a task stack,
- * and should switch to the irq stack.
- */
- ldr x25, [tsk, TSK_STACK]
- eor x25, x25, x19
- and x25, x25, #~(THREAD_SIZE - 1)
- cbnz x25, 9998f
-
- ldr_this_cpu x25, irq_stack_ptr, x26
- mov x26, #IRQ_STACK_SIZE
- add x26, x25, x26
-
- /* switch to the irq stack */
- mov sp, x26
-
-#ifdef CONFIG_SHADOW_CALL_STACK
- /* also switch to the irq shadow stack */
- ldr_this_cpu scs_sp, irq_shadow_call_stack_ptr, x26
-#endif
-
-9998:
- .endm
-
- /*
- * The callee-saved regs (x19-x29) should be preserved between
- * irq_stack_entry and irq_stack_exit, but note that kernel_entry
- * uses x20-x23 to store data for later use.
- */
- .macro irq_stack_exit
- mov sp, x19
-#ifdef CONFIG_SHADOW_CALL_STACK
- mov scs_sp, x24
-#endif
- .endm
-
/* GPRs used by entry code */
tsk .req x28 // current thread_info
/*
* Interrupt handling.
*/
- .macro irq_handler, handler:req
- ldr_l x1, \handler
- mov x0, sp
- irq_stack_entry
- blr x1
- irq_stack_exit
- .endm
-
.macro gic_prio_kentry_setup, tmp:req
#ifdef CONFIG_ARM64_PSEUDO_NMI
alternative_if ARM64_HAS_IRQ_PRIO_MASKING
@@ -552,45 +486,6 @@ tsk .req x28 // current thread_info
#endif
.endm
- .macro el1_interrupt_handler, handler:req
- enable_da
-
- mov x0, sp
- bl enter_el1_irq_or_nmi
-
- irq_handler \handler
-
-#ifdef CONFIG_PREEMPTION
- ldr x24, [tsk, #TSK_TI_PREEMPT] // get preempt count
-alternative_if ARM64_HAS_IRQ_PRIO_MASKING
- /*
- * DA were cleared at start of handling, and IF are cleared by
- * the GIC irqchip driver using gic_arch_enable_irqs() for
- * normal IRQs. If anything is set, it means we come back from
- * an NMI instead of a normal IRQ, so skip preemption
- */
- mrs x0, daif
- orr x24, x24, x0
-alternative_else_nop_endif
- cbnz x24, 1f // preempt count != 0 || NMI return path
- bl arm64_preempt_schedule_irq // irq en/disable is done inside
-1:
-#endif
-
- mov x0, sp
- bl exit_el1_irq_or_nmi
- .endm
-
- .macro el0_interrupt_handler, handler:req
- user_exit_irqoff
- enable_da
-
- tbz x22, #55, 1f
- bl do_el0_irq_bp_hardening
-1:
- irq_handler \handler
- .endm
-
.text
/*
@@ -600,32 +495,25 @@ alternative_else_nop_endif
.align 11
SYM_CODE_START(vectors)
- kernel_ventry 1, sync_invalid // Synchronous EL1t
- kernel_ventry 1, irq_invalid // IRQ EL1t
- kernel_ventry 1, fiq_invalid // FIQ EL1t
- kernel_ventry 1, error_invalid // Error EL1t
-
- kernel_ventry 1, sync // Synchronous EL1h
- kernel_ventry 1, irq // IRQ EL1h
- kernel_ventry 1, fiq // FIQ EL1h
- kernel_ventry 1, error // Error EL1h
-
- kernel_ventry 0, sync // Synchronous 64-bit EL0
- kernel_ventry 0, irq // IRQ 64-bit EL0
- kernel_ventry 0, fiq // FIQ 64-bit EL0
- kernel_ventry 0, error // Error 64-bit EL0
-
-#ifdef CONFIG_COMPAT
- kernel_ventry 0, sync_compat, 32 // Synchronous 32-bit EL0
- kernel_ventry 0, irq_compat, 32 // IRQ 32-bit EL0
- kernel_ventry 0, fiq_compat, 32 // FIQ 32-bit EL0
- kernel_ventry 0, error_compat, 32 // Error 32-bit EL0
-#else
- kernel_ventry 0, sync_invalid, 32 // Synchronous 32-bit EL0
- kernel_ventry 0, irq_invalid, 32 // IRQ 32-bit EL0
- kernel_ventry 0, fiq_invalid, 32 // FIQ 32-bit EL0
- kernel_ventry 0, error_invalid, 32 // Error 32-bit EL0
-#endif
+ kernel_ventry 1, t, 64, sync // Synchronous EL1t
+ kernel_ventry 1, t, 64, irq // IRQ EL1t
+ kernel_ventry 1, t, 64, fiq // FIQ EL1h
+ kernel_ventry 1, t, 64, error // Error EL1t
+
+ kernel_ventry 1, h, 64, sync // Synchronous EL1h
+ kernel_ventry 1, h, 64, irq // IRQ EL1h
+ kernel_ventry 1, h, 64, fiq // FIQ EL1h
+ kernel_ventry 1, h, 64, error // Error EL1h
+
+ kernel_ventry 0, t, 64, sync // Synchronous 64-bit EL0
+ kernel_ventry 0, t, 64, irq // IRQ 64-bit EL0
+ kernel_ventry 0, t, 64, fiq // FIQ 64-bit EL0
+ kernel_ventry 0, t, 64, error // Error 64-bit EL0
+
+ kernel_ventry 0, t, 32, sync // Synchronous 32-bit EL0
+ kernel_ventry 0, t, 32, irq // IRQ 32-bit EL0
+ kernel_ventry 0, t, 32, fiq // FIQ 32-bit EL0
+ kernel_ventry 0, t, 32, error // Error 32-bit EL0
SYM_CODE_END(vectors)
#ifdef CONFIG_VMAP_STACK
@@ -656,147 +544,46 @@ __bad_stack:
ASM_BUG()
#endif /* CONFIG_VMAP_STACK */
-/*
- * Invalid mode handlers
- */
- .macro inv_entry, el, reason, regsize = 64
+
+ .macro entry_handler el:req, ht:req, regsize:req, label:req
+SYM_CODE_START_LOCAL(el\el\ht\()_\regsize\()_\label)
kernel_entry \el, \regsize
mov x0, sp
- mov x1, #\reason
- mrs x2, esr_el1
- bl bad_mode
- ASM_BUG()
+ bl el\el\ht\()_\regsize\()_\label\()_handler
+ .if \el == 0
+ b ret_to_user
+ .else
+ b ret_to_kernel
+ .endif
+SYM_CODE_END(el\el\ht\()_\regsize\()_\label)
.endm
-SYM_CODE_START_LOCAL(el0_sync_invalid)
- inv_entry 0, BAD_SYNC
-SYM_CODE_END(el0_sync_invalid)
-
-SYM_CODE_START_LOCAL(el0_irq_invalid)
- inv_entry 0, BAD_IRQ
-SYM_CODE_END(el0_irq_invalid)
-
-SYM_CODE_START_LOCAL(el0_fiq_invalid)
- inv_entry 0, BAD_FIQ
-SYM_CODE_END(el0_fiq_invalid)
-
-SYM_CODE_START_LOCAL(el0_error_invalid)
- inv_entry 0, BAD_ERROR
-SYM_CODE_END(el0_error_invalid)
-
-SYM_CODE_START_LOCAL(el1_sync_invalid)
- inv_entry 1, BAD_SYNC
-SYM_CODE_END(el1_sync_invalid)
-
-SYM_CODE_START_LOCAL(el1_irq_invalid)
- inv_entry 1, BAD_IRQ
-SYM_CODE_END(el1_irq_invalid)
-
-SYM_CODE_START_LOCAL(el1_fiq_invalid)
- inv_entry 1, BAD_FIQ
-SYM_CODE_END(el1_fiq_invalid)
-
-SYM_CODE_START_LOCAL(el1_error_invalid)
- inv_entry 1, BAD_ERROR
-SYM_CODE_END(el1_error_invalid)
-
/*
- * EL1 mode handlers.
+ * Early exception handlers
*/
- .align 6
-SYM_CODE_START_LOCAL_NOALIGN(el1_sync)
- kernel_entry 1
- mov x0, sp
- bl el1_sync_handler
- kernel_exit 1
-SYM_CODE_END(el1_sync)
-
- .align 6
-SYM_CODE_START_LOCAL_NOALIGN(el1_irq)
- kernel_entry 1
- el1_interrupt_handler handle_arch_irq
- kernel_exit 1
-SYM_CODE_END(el1_irq)
-
-SYM_CODE_START_LOCAL_NOALIGN(el1_fiq)
- kernel_entry 1
- el1_interrupt_handler handle_arch_fiq
- kernel_exit 1
-SYM_CODE_END(el1_fiq)
-
-/*
- * EL0 mode handlers.
- */
- .align 6
-SYM_CODE_START_LOCAL_NOALIGN(el0_sync)
- kernel_entry 0
- mov x0, sp
- bl el0_sync_handler
- b ret_to_user
-SYM_CODE_END(el0_sync)
-
-#ifdef CONFIG_COMPAT
- .align 6
-SYM_CODE_START_LOCAL_NOALIGN(el0_sync_compat)
- kernel_entry 0, 32
- mov x0, sp
- bl el0_sync_compat_handler
- b ret_to_user
-SYM_CODE_END(el0_sync_compat)
-
- .align 6
-SYM_CODE_START_LOCAL_NOALIGN(el0_irq_compat)
- kernel_entry 0, 32
- b el0_irq_naked
-SYM_CODE_END(el0_irq_compat)
-
-SYM_CODE_START_LOCAL_NOALIGN(el0_fiq_compat)
- kernel_entry 0, 32
- b el0_fiq_naked
-SYM_CODE_END(el0_fiq_compat)
-
-SYM_CODE_START_LOCAL_NOALIGN(el0_error_compat)
- kernel_entry 0, 32
- b el0_error_naked
-SYM_CODE_END(el0_error_compat)
-#endif
-
- .align 6
-SYM_CODE_START_LOCAL_NOALIGN(el0_irq)
- kernel_entry 0
-el0_irq_naked:
- el0_interrupt_handler handle_arch_irq
- b ret_to_user
-SYM_CODE_END(el0_irq)
-
-SYM_CODE_START_LOCAL_NOALIGN(el0_fiq)
- kernel_entry 0
-el0_fiq_naked:
- el0_interrupt_handler handle_arch_fiq
- b ret_to_user
-SYM_CODE_END(el0_fiq)
-
-SYM_CODE_START_LOCAL(el1_error)
- kernel_entry 1
- mrs x1, esr_el1
- enable_dbg
- mov x0, sp
- bl do_serror
+ entry_handler 1, t, 64, sync
+ entry_handler 1, t, 64, irq
+ entry_handler 1, t, 64, fiq
+ entry_handler 1, t, 64, error
+
+ entry_handler 1, h, 64, sync
+ entry_handler 1, h, 64, irq
+ entry_handler 1, h, 64, fiq
+ entry_handler 1, h, 64, error
+
+ entry_handler 0, t, 64, sync
+ entry_handler 0, t, 64, irq
+ entry_handler 0, t, 64, fiq
+ entry_handler 0, t, 64, error
+
+ entry_handler 0, t, 32, sync
+ entry_handler 0, t, 32, irq
+ entry_handler 0, t, 32, fiq
+ entry_handler 0, t, 32, error
+
+SYM_CODE_START_LOCAL(ret_to_kernel)
kernel_exit 1
-SYM_CODE_END(el1_error)
-
-SYM_CODE_START_LOCAL(el0_error)
- kernel_entry 0
-el0_error_naked:
- mrs x25, esr_el1
- user_exit_irqoff
- enable_dbg
- mov x0, sp
- mov x1, x25
- bl do_serror
- enable_da
- b ret_to_user
-SYM_CODE_END(el0_error)
+SYM_CODE_END(ret_to_kernel)
/*
* "slow" syscall return path.
@@ -979,8 +766,8 @@ SYM_FUNC_START(cpu_switch_to)
mov sp, x9
msr sp_el0, x1
ptrauth_keys_install_kernel x1, x8, x9, x10
- scs_save x0, x8
- scs_load x1, x8
+ scs_save x0
+ scs_load x1
ret
SYM_FUNC_END(cpu_switch_to)
NOKPROBE(cpu_switch_to)
@@ -998,6 +785,42 @@ SYM_CODE_START(ret_from_fork)
SYM_CODE_END(ret_from_fork)
NOKPROBE(ret_from_fork)
+/*
+ * void call_on_irq_stack(struct pt_regs *regs,
+ * void (*func)(struct pt_regs *));
+ *
+ * Calls func(regs) using this CPU's irq stack and shadow irq stack.
+ */
+SYM_FUNC_START(call_on_irq_stack)
+#ifdef CONFIG_SHADOW_CALL_STACK
+ stp scs_sp, xzr, [sp, #-16]!
+ ldr_this_cpu scs_sp, irq_shadow_call_stack_ptr, x17
+#endif
+ /* Create a frame record to save our LR and SP (implicit in FP) */
+ stp x29, x30, [sp, #-16]!
+ mov x29, sp
+
+ ldr_this_cpu x16, irq_stack_ptr, x17
+ mov x15, #IRQ_STACK_SIZE
+ add x16, x16, x15
+
+ /* Move to the new stack and call the function there */
+ mov sp, x16
+ blr x1
+
+ /*
+ * Restore the SP from the FP, and restore the FP and LR from the frame
+ * record.
+ */
+ mov sp, x29
+ ldp x29, x30, [sp], #16
+#ifdef CONFIG_SHADOW_CALL_STACK
+ ldp scs_sp, xzr, [sp], #16
+#endif
+ ret
+SYM_FUNC_END(call_on_irq_stack)
+NOKPROBE(call_on_irq_stack)
+
#ifdef CONFIG_ARM_SDE_INTERFACE
#include <asm/sdei.h>
diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
index ad3dd34a83cf..e57b23f95284 100644
--- a/arch/arm64/kernel/fpsimd.c
+++ b/arch/arm64/kernel/fpsimd.c
@@ -957,8 +957,10 @@ void do_sve_acc(unsigned int esr, struct pt_regs *regs)
* disabling the trap, otherwise update our in-memory copy.
*/
if (!test_thread_flag(TIF_FOREIGN_FPSTATE)) {
- sve_set_vq(sve_vq_from_vl(current->thread.sve_vl) - 1);
- sve_flush_live();
+ unsigned long vq_minus_one =
+ sve_vq_from_vl(current->thread.sve_vl) - 1;
+ sve_set_vq(vq_minus_one);
+ sve_flush_live(vq_minus_one);
fpsimd_bind_task_to_cpu();
} else {
fpsimd_to_sve(current);
diff --git a/arch/arm64/kernel/ftrace.c b/arch/arm64/kernel/ftrace.c
index b5d3ddaf69d9..7f467bd9db7a 100644
--- a/arch/arm64/kernel/ftrace.c
+++ b/arch/arm64/kernel/ftrace.c
@@ -15,6 +15,7 @@
#include <asm/debug-monitors.h>
#include <asm/ftrace.h>
#include <asm/insn.h>
+#include <asm/patching.h>
#ifdef CONFIG_DYNAMIC_FTRACE
/*
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index 96873dfa67fd..c5c994a73a64 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -16,6 +16,7 @@
#include <asm/asm_pointer_auth.h>
#include <asm/assembler.h>
#include <asm/boot.h>
+#include <asm/bug.h>
#include <asm/ptrace.h>
#include <asm/asm-offsets.h>
#include <asm/cache.h>
@@ -117,8 +118,8 @@ SYM_CODE_START_LOCAL(preserve_boot_args)
dmb sy // needed before dc ivac with
// MMU off
- mov x1, #0x20 // 4 x 8 bytes
- b __inval_dcache_area // tail call
+ add x1, x0, #0x20 // 4 x 8 bytes
+ b dcache_inval_poc // tail call
SYM_CODE_END(preserve_boot_args)
/*
@@ -195,7 +196,7 @@ SYM_CODE_END(preserve_boot_args)
and \iend, \iend, \istart // iend = (vend >> shift) & (ptrs - 1)
mov \istart, \ptrs
mul \istart, \istart, \count
- add \iend, \iend, \istart // iend += (count - 1) * ptrs
+ add \iend, \iend, \istart // iend += count * ptrs
// our entries span multiple tables
lsr \istart, \vstart, \shift
@@ -268,8 +269,7 @@ SYM_FUNC_START_LOCAL(__create_page_tables)
*/
adrp x0, init_pg_dir
adrp x1, init_pg_end
- sub x1, x1, x0
- bl __inval_dcache_area
+ bl dcache_inval_poc
/*
* Clear the init page tables.
@@ -354,7 +354,6 @@ SYM_FUNC_START_LOCAL(__create_page_tables)
#endif
1:
ldr_l x4, idmap_ptrs_per_pgd
- mov x5, x3 // __pa(__idmap_text_start)
adr_l x6, __idmap_text_end // __pa(__idmap_text_end)
map_memory x0, x1, x3, x6, x7, x3, x4, x10, x11, x12, x13, x14
@@ -382,39 +381,57 @@ SYM_FUNC_START_LOCAL(__create_page_tables)
adrp x0, idmap_pg_dir
adrp x1, idmap_pg_end
- sub x1, x1, x0
- bl __inval_dcache_area
+ bl dcache_inval_poc
adrp x0, init_pg_dir
adrp x1, init_pg_end
- sub x1, x1, x0
- bl __inval_dcache_area
+ bl dcache_inval_poc
ret x28
SYM_FUNC_END(__create_page_tables)
+ /*
+ * Initialize CPU registers with task-specific and cpu-specific context.
+ *
+ * Create a final frame record at task_pt_regs(current)->stackframe, so
+ * that the unwinder can identify the final frame record of any task by
+ * its location in the task stack. We reserve the entire pt_regs space
+ * for consistency with user tasks and kthreads.
+ */
+ .macro init_cpu_task tsk, tmp1, tmp2
+ msr sp_el0, \tsk
+
+ ldr \tmp1, [\tsk, #TSK_STACK]
+ add sp, \tmp1, #THREAD_SIZE
+ sub sp, sp, #PT_REGS_SIZE
+
+ stp xzr, xzr, [sp, #S_STACKFRAME]
+ add x29, sp, #S_STACKFRAME
+
+ scs_load \tsk
+
+ adr_l \tmp1, __per_cpu_offset
+ ldr w\tmp2, [\tsk, #TSK_CPU]
+ ldr \tmp1, [\tmp1, \tmp2, lsl #3]
+ set_this_cpu_offset \tmp1
+ .endm
+
/*
* The following fragment of code is executed with the MMU enabled.
*
* x0 = __PHYS_OFFSET
*/
SYM_FUNC_START_LOCAL(__primary_switched)
- adrp x4, init_thread_union
- add sp, x4, #THREAD_SIZE
- adr_l x5, init_task
- msr sp_el0, x5 // Save thread_info
+ adr_l x4, init_task
+ init_cpu_task x4, x5, x6
adr_l x8, vectors // load VBAR_EL1 with virtual
msr vbar_el1, x8 // vector table address
isb
- stp xzr, x30, [sp, #-16]!
+ stp x29, x30, [sp, #-16]!
mov x29, sp
-#ifdef CONFIG_SHADOW_CALL_STACK
- adr_l scs_sp, init_shadow_call_stack // Set shadow call stack
-#endif
-
str_l x21, __fdt_pointer, x5 // Save FDT pointer
ldr_l x4, kimage_vaddr // Save the offset between
@@ -446,10 +463,9 @@ SYM_FUNC_START_LOCAL(__primary_switched)
0:
#endif
bl switch_to_vhe // Prefer VHE if possible
- add sp, sp, #16
- mov x29, #0
- mov x30, #0
- b start_kernel
+ ldp x29, x30, [sp], #16
+ bl start_kernel
+ ASM_BUG()
SYM_FUNC_END(__primary_switched)
.pushsection ".rodata", "a"
@@ -551,7 +567,7 @@ SYM_FUNC_START_LOCAL(set_cpu_boot_mode_flag)
cmp w0, #BOOT_CPU_MODE_EL2
b.ne 1f
add x1, x1, #4
-1: str w0, [x1] // This CPU has booted in EL1
+1: str w0, [x1] // Save CPU boot mode
dmb sy
dc ivac, x1 // Invalidate potentially stale cache line
ret
@@ -632,21 +648,17 @@ SYM_FUNC_START_LOCAL(__secondary_switched)
isb
adr_l x0, secondary_data
- ldr x1, [x0, #CPU_BOOT_STACK] // get secondary_data.stack
- cbz x1, __secondary_too_slow
- mov sp, x1
ldr x2, [x0, #CPU_BOOT_TASK]
cbz x2, __secondary_too_slow
- msr sp_el0, x2
- scs_load x2, x3
- mov x29, #0
- mov x30, #0
+
+ init_cpu_task x2, x1, x3
#ifdef CONFIG_ARM64_PTR_AUTH
ptrauth_keys_init_cpu x2, x3, x4, x5
#endif
- b secondary_start_kernel
+ bl secondary_start_kernel
+ ASM_BUG()
SYM_FUNC_END(__secondary_switched)
SYM_FUNC_START_LOCAL(__secondary_too_slow)
diff --git a/arch/arm64/kernel/hibernate-asm.S b/arch/arm64/kernel/hibernate-asm.S
index 8ccca660034e..81c0186a5e32 100644
--- a/arch/arm64/kernel/hibernate-asm.S
+++ b/arch/arm64/kernel/hibernate-asm.S
@@ -45,7 +45,7 @@
* Because this code has to be copied to a 'safe' page, it can't call out to
* other functions by PC-relative address. Also remember that it may be
* mid-way through over-writing other functions. For this reason it contains
- * code from flush_icache_range() and uses the copy_page() macro.
+ * code from caches_clean_inval_pou() and uses the copy_page() macro.
*
* This 'safe' page is mapped via ttbr0, and executed from there. This function
* switches to a copy of the linear map in ttbr1, performs the restore, then
@@ -87,11 +87,12 @@ SYM_CODE_START(swsusp_arch_suspend_exit)
copy_page x0, x1, x2, x3, x4, x5, x6, x7, x8, x9
add x1, x10, #PAGE_SIZE
- /* Clean the copied page to PoU - based on flush_icache_range() */
+ /* Clean the copied page to PoU - based on caches_clean_inval_pou() */
raw_dcache_line_size x2, x3
sub x3, x2, #1
bic x4, x10, x3
-2: dc cvau, x4 /* clean D line / unified line */
+2: /* clean D line / unified line */
+alternative_insn "dc cvau, x4", "dc civac, x4", ARM64_WORKAROUND_CLEAN_CACHE
add x4, x4, x2
cmp x4, x1
b.lo 2b
diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c
index b1cef371df2b..46a0b4d6e251 100644
--- a/arch/arm64/kernel/hibernate.c
+++ b/arch/arm64/kernel/hibernate.c
@@ -210,7 +210,7 @@ static int create_safe_exec_page(void *src_start, size_t length,
return -ENOMEM;
memcpy(page, src_start, length);
- __flush_icache_range((unsigned long)page, (unsigned long)page + length);
+ caches_clean_inval_pou((unsigned long)page, (unsigned long)page + length);
rc = trans_pgd_idmap_page(&trans_info, &trans_ttbr0, &t0sz, page);
if (rc)
return rc;
@@ -240,8 +240,6 @@ static int create_safe_exec_page(void *src_start, size_t length,
return 0;
}
-#define dcache_clean_range(start, end) __flush_dcache_area(start, (end - start))
-
#ifdef CONFIG_ARM64_MTE
static DEFINE_XARRAY(mte_pages);
@@ -383,13 +381,18 @@ int swsusp_arch_suspend(void)
ret = swsusp_save();
} else {
/* Clean kernel core startup/idle code to PoC*/
- dcache_clean_range(__mmuoff_data_start, __mmuoff_data_end);
- dcache_clean_range(__idmap_text_start, __idmap_text_end);
+ dcache_clean_inval_poc((unsigned long)__mmuoff_data_start,
+ (unsigned long)__mmuoff_data_end);
+ dcache_clean_inval_poc((unsigned long)__idmap_text_start,
+ (unsigned long)__idmap_text_end);
/* Clean kvm setup code to PoC? */
if (el2_reset_needed()) {
- dcache_clean_range(__hyp_idmap_text_start, __hyp_idmap_text_end);
- dcache_clean_range(__hyp_text_start, __hyp_text_end);
+ dcache_clean_inval_poc(
+ (unsigned long)__hyp_idmap_text_start,
+ (unsigned long)__hyp_idmap_text_end);
+ dcache_clean_inval_poc((unsigned long)__hyp_text_start,
+ (unsigned long)__hyp_text_end);
}
swsusp_mte_restore_tags();
@@ -474,7 +477,8 @@ int swsusp_arch_resume(void)
* The hibernate exit text contains a set of el2 vectors, that will
* be executed at el2 with the mmu off in order to reload hyp-stub.
*/
- __flush_dcache_area(hibernate_exit, exit_size);
+ dcache_clean_inval_poc((unsigned long)hibernate_exit,
+ (unsigned long)hibernate_exit + exit_size);
/*
* KASLR will cause the el2 vectors to be in a different location in
diff --git a/arch/arm64/kernel/idle.c b/arch/arm64/kernel/idle.c
new file mode 100644
index 000000000000..a2cfbacec2bb
--- /dev/null
+++ b/arch/arm64/kernel/idle.c
@@ -0,0 +1,46 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Low-level idle sequences
+ */
+
+#include <linux/cpu.h>
+#include <linux/irqflags.h>
+
+#include <asm/barrier.h>
+#include <asm/cpuidle.h>
+#include <asm/cpufeature.h>
+#include <asm/sysreg.h>
+
+/*
+ * cpu_do_idle()
+ *
+ * Idle the processor (wait for interrupt).
+ *
+ * If the CPU supports priority masking we must do additional work to
+ * ensure that interrupts are not masked at the PMR (because the core will
+ * not wake up if we block the wake up signal in the interrupt controller).
+ */
+void noinstr cpu_do_idle(void)
+{
+ struct arm_cpuidle_irq_context context;
+
+ arm_cpuidle_save_irq_context(&context);
+
+ dsb(sy);
+ wfi();
+
+ arm_cpuidle_restore_irq_context(&context);
+}
+
+/*
+ * This is our default idle handler.
+ */
+void noinstr arch_cpu_idle(void)
+{
+ /*
+ * This should do all the clock switching and wait for interrupt
+ * tricks
+ */
+ cpu_do_idle();
+ raw_local_irq_enable();
+}
diff --git a/arch/arm64/kernel/idreg-override.c b/arch/arm64/kernel/idreg-override.c
index e628c8ce1ffe..53a381a7f65d 100644
--- a/arch/arm64/kernel/idreg-override.c
+++ b/arch/arm64/kernel/idreg-override.c
@@ -237,7 +237,8 @@ asmlinkage void __init init_feature_override(void)
for (i = 0; i < ARRAY_SIZE(regs); i++) {
if (regs[i]->override)
- __flush_dcache_area(regs[i]->override,
+ dcache_clean_inval_poc((unsigned long)regs[i]->override,
+ (unsigned long)regs[i]->override +
sizeof(*regs[i]->override));
}
}
diff --git a/arch/arm64/kernel/image-vars.h b/arch/arm64/kernel/image-vars.h
index bcf3c2755370..c96a9a0043bf 100644
--- a/arch/arm64/kernel/image-vars.h
+++ b/arch/arm64/kernel/image-vars.h
@@ -35,7 +35,7 @@ __efistub_strnlen = __pi_strnlen;
__efistub_strcmp = __pi_strcmp;
__efistub_strncmp = __pi_strncmp;
__efistub_strrchr = __pi_strrchr;
-__efistub___clean_dcache_area_poc = __pi___clean_dcache_area_poc;
+__efistub_dcache_clean_poc = __pi_dcache_clean_poc;
#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
__efistub___memcpy = __pi_memcpy;
diff --git a/arch/arm64/kernel/jump_label.c b/arch/arm64/kernel/jump_label.c
index 9a8a0ae1e75f..fc98037e1220 100644
--- a/arch/arm64/kernel/jump_label.c
+++ b/arch/arm64/kernel/jump_label.c
@@ -8,6 +8,7 @@
#include <linux/kernel.h>
#include <linux/jump_label.h>
#include <asm/insn.h>
+#include <asm/patching.h>
void arch_jump_label_transform(struct jump_entry *entry,
enum jump_label_type type)
diff --git a/arch/arm64/kernel/kaslr.c b/arch/arm64/kernel/kaslr.c
index 341342b207f6..cfa2cfde3019 100644
--- a/arch/arm64/kernel/kaslr.c
+++ b/arch/arm64/kernel/kaslr.c
@@ -72,7 +72,9 @@ u64 __init kaslr_early_init(void)
* we end up running with module randomization disabled.
*/
module_alloc_base = (u64)_etext - MODULES_VSIZE;
- __flush_dcache_area(&module_alloc_base, sizeof(module_alloc_base));
+ dcache_clean_inval_poc((unsigned long)&module_alloc_base,
+ (unsigned long)&module_alloc_base +
+ sizeof(module_alloc_base));
/*
* Try to map the FDT early. If this fails, we simply bail,
@@ -170,8 +172,12 @@ u64 __init kaslr_early_init(void)
module_alloc_base += (module_range * (seed & ((1 << 21) - 1))) >> 21;
module_alloc_base &= PAGE_MASK;
- __flush_dcache_area(&module_alloc_base, sizeof(module_alloc_base));
- __flush_dcache_area(&memstart_offset_seed, sizeof(memstart_offset_seed));
+ dcache_clean_inval_poc((unsigned long)&module_alloc_base,
+ (unsigned long)&module_alloc_base +
+ sizeof(module_alloc_base));
+ dcache_clean_inval_poc((unsigned long)&memstart_offset_seed,
+ (unsigned long)&memstart_offset_seed +
+ sizeof(memstart_offset_seed));
return offset;
}
diff --git a/arch/arm64/kernel/kgdb.c b/arch/arm64/kernel/kgdb.c
index 1a157ca33262..2aede780fb80 100644
--- a/arch/arm64/kernel/kgdb.c
+++ b/arch/arm64/kernel/kgdb.c
@@ -17,6 +17,7 @@
#include <asm/debug-monitors.h>
#include <asm/insn.h>
+#include <asm/patching.h>
#include <asm/traps.h>
struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] = {
diff --git a/arch/arm64/kernel/machine_kexec.c b/arch/arm64/kernel/machine_kexec.c
index 90a335c74442..03ceabe4d912 100644
--- a/arch/arm64/kernel/machine_kexec.c
+++ b/arch/arm64/kernel/machine_kexec.c
@@ -68,10 +68,16 @@ int machine_kexec_post_load(struct kimage *kimage)
kimage->arch.kern_reloc = __pa(reloc_code);
kexec_image_info(kimage);
- /* Flush the reloc_code in preparation for its execution. */
- __flush_dcache_area(reloc_code, arm64_relocate_new_kernel_size);
- flush_icache_range((uintptr_t)reloc_code, (uintptr_t)reloc_code +
- arm64_relocate_new_kernel_size);
+ /*
+ * For execution with the MMU off, reloc_code needs to be cleaned to the
+ * PoC and invalidated from the I-cache.
+ */
+ dcache_clean_inval_poc((unsigned long)reloc_code,
+ (unsigned long)reloc_code +
+ arm64_relocate_new_kernel_size);
+ icache_inval_pou((uintptr_t)reloc_code,
+ (uintptr_t)reloc_code +
+ arm64_relocate_new_kernel_size);
return 0;
}
@@ -102,16 +108,18 @@ static void kexec_list_flush(struct kimage *kimage)
for (entry = &kimage->head; ; entry++) {
unsigned int flag;
- void *addr;
+ unsigned long addr;
/* flush the list entries. */
- __flush_dcache_area(entry, sizeof(kimage_entry_t));
+ dcache_clean_inval_poc((unsigned long)entry,
+ (unsigned long)entry +
+ sizeof(kimage_entry_t));
flag = *entry & IND_FLAGS;
if (flag == IND_DONE)
break;
- addr = phys_to_virt(*entry & PAGE_MASK);
+ addr = (unsigned long)phys_to_virt(*entry & PAGE_MASK);
switch (flag) {
case IND_INDIRECTION:
@@ -120,7 +128,7 @@ static void kexec_list_flush(struct kimage *kimage)
break;
case IND_SOURCE:
/* flush the source pages. */
- __flush_dcache_area(addr, PAGE_SIZE);
+ dcache_clean_inval_poc(addr, addr + PAGE_SIZE);
break;
case IND_DESTINATION:
break;
@@ -147,8 +155,10 @@ static void kexec_segment_flush(const struct kimage *kimage)
kimage->segment[i].memsz,
kimage->segment[i].memsz / PAGE_SIZE);
- __flush_dcache_area(phys_to_virt(kimage->segment[i].mem),
- kimage->segment[i].memsz);
+ dcache_clean_inval_poc(
+ (unsigned long)phys_to_virt(kimage->segment[i].mem),
+ (unsigned long)phys_to_virt(kimage->segment[i].mem) +
+ kimage->segment[i].memsz);
}
}
diff --git a/arch/arm64/kernel/mte.c b/arch/arm64/kernel/mte.c
index 125a10e413e9..69b3fde8759e 100644
--- a/arch/arm64/kernel/mte.c
+++ b/arch/arm64/kernel/mte.c
@@ -32,10 +32,9 @@ DEFINE_STATIC_KEY_FALSE(mte_async_mode);
EXPORT_SYMBOL_GPL(mte_async_mode);
#endif
-static void mte_sync_page_tags(struct page *page, pte_t *ptep, bool check_swap)
+static void mte_sync_page_tags(struct page *page, pte_t old_pte,
+ bool check_swap, bool pte_is_tagged)
{
- pte_t old_pte = READ_ONCE(*ptep);
-
if (check_swap && is_swap_pte(old_pte)) {
swp_entry_t entry = pte_to_swp_entry(old_pte);
@@ -43,6 +42,9 @@ static void mte_sync_page_tags(struct page *page, pte_t *ptep, bool check_swap)
return;
}
+ if (!pte_is_tagged)
+ return;
+
page_kasan_tag_reset(page);
/*
* We need smp_wmb() in between setting the flags and clearing the
@@ -55,16 +57,22 @@ static void mte_sync_page_tags(struct page *page, pte_t *ptep, bool check_swap)
mte_clear_page_tags(page_address(page));
}
-void mte_sync_tags(pte_t *ptep, pte_t pte)
+void mte_sync_tags(pte_t old_pte, pte_t pte)
{
struct page *page = pte_page(pte);
long i, nr_pages = compound_nr(page);
bool check_swap = nr_pages == 1;
+ bool pte_is_tagged = pte_tagged(pte);
+
+ /* Early out if there's nothing to do */
+ if (!check_swap && !pte_is_tagged)
+ return;
/* if PG_mte_tagged is set, tags have already been initialised */
for (i = 0; i < nr_pages; i++, page++) {
if (!test_and_set_bit(PG_mte_tagged, &page->flags))
- mte_sync_page_tags(page, ptep, check_swap);
+ mte_sync_page_tags(page, old_pte, check_swap,
+ pte_is_tagged);
}
}
diff --git a/arch/arm64/kernel/patching.c b/arch/arm64/kernel/patching.c
new file mode 100644
index 000000000000..771f543464e0
--- /dev/null
+++ b/arch/arm64/kernel/patching.c
@@ -0,0 +1,150 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/spinlock.h>
+#include <linux/stop_machine.h>
+#include <linux/uaccess.h>
+
+#include <asm/cacheflush.h>
+#include <asm/fixmap.h>
+#include <asm/insn.h>
+#include <asm/kprobes.h>
+#include <asm/patching.h>
+#include <asm/sections.h>
+
+static DEFINE_RAW_SPINLOCK(patch_lock);
+
+static bool is_exit_text(unsigned long addr)
+{
+ /* discarded with init text/data */
+ return system_state < SYSTEM_RUNNING &&
+ addr >= (unsigned long)__exittext_begin &&
+ addr < (unsigned long)__exittext_end;
+}
+
+static bool is_image_text(unsigned long addr)
+{
+ return core_kernel_text(addr) || is_exit_text(addr);
+}
+
+static void __kprobes *patch_map(void *addr, int fixmap)
+{
+ unsigned long uintaddr = (uintptr_t) addr;
+ bool image = is_image_text(uintaddr);
+ struct page *page;
+
+ if (image)
+ page = phys_to_page(__pa_symbol(addr));
+ else if (IS_ENABLED(CONFIG_STRICT_MODULE_RWX))
+ page = vmalloc_to_page(addr);
+ else
+ return addr;
+
+ BUG_ON(!page);
+ return (void *)set_fixmap_offset(fixmap, page_to_phys(page) +
+ (uintaddr & ~PAGE_MASK));
+}
+
+static void __kprobes patch_unmap(int fixmap)
+{
+ clear_fixmap(fixmap);
+}
+/*
+ * In ARMv8-A, A64 instructions have a fixed length of 32 bits and are always
+ * little-endian.
+ */
+int __kprobes aarch64_insn_read(void *addr, u32 *insnp)
+{
+ int ret;
+ __le32 val;
+
+ ret = copy_from_kernel_nofault(&val, addr, AARCH64_INSN_SIZE);
+ if (!ret)
+ *insnp = le32_to_cpu(val);
+
+ return ret;
+}
+
+static int __kprobes __aarch64_insn_write(void *addr, __le32 insn)
+{
+ void *waddr = addr;
+ unsigned long flags = 0;
+ int ret;
+
+ raw_spin_lock_irqsave(&patch_lock, flags);
+ waddr = patch_map(addr, FIX_TEXT_POKE0);
+
+ ret = copy_to_kernel_nofault(waddr, &insn, AARCH64_INSN_SIZE);
+
+ patch_unmap(FIX_TEXT_POKE0);
+ raw_spin_unlock_irqrestore(&patch_lock, flags);
+
+ return ret;
+}
+
+int __kprobes aarch64_insn_write(void *addr, u32 insn)
+{
+ return __aarch64_insn_write(addr, cpu_to_le32(insn));
+}
+
+int __kprobes aarch64_insn_patch_text_nosync(void *addr, u32 insn)
+{
+ u32 *tp = addr;
+ int ret;
+
+ /* A64 instructions must be word aligned */
+ if ((uintptr_t)tp & 0x3)
+ return -EINVAL;
+
+ ret = aarch64_insn_write(tp, insn);
+ if (ret == 0)
+ caches_clean_inval_pou((uintptr_t)tp,
+ (uintptr_t)tp + AARCH64_INSN_SIZE);
+
+ return ret;
+}
+
+struct aarch64_insn_patch {
+ void **text_addrs;
+ u32 *new_insns;
+ int insn_cnt;
+ atomic_t cpu_count;
+};
+
+static int __kprobes aarch64_insn_patch_text_cb(void *arg)
+{
+ int i, ret = 0;
+ struct aarch64_insn_patch *pp = arg;
+
+ /* The first CPU becomes master */
+ if (atomic_inc_return(&pp->cpu_count) == 1) {
+ for (i = 0; ret == 0 && i < pp->insn_cnt; i++)
+ ret = aarch64_insn_patch_text_nosync(pp->text_addrs[i],
+ pp->new_insns[i]);
+ /* Notify other processors with an additional increment. */
+ atomic_inc(&pp->cpu_count);
+ } else {
+ while (atomic_read(&pp->cpu_count) <= num_online_cpus())
+ cpu_relax();
+ isb();
+ }
+
+ return ret;
+}
+
+int __kprobes aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt)
+{
+ struct aarch64_insn_patch patch = {
+ .text_addrs = addrs,
+ .new_insns = insns,
+ .insn_cnt = cnt,
+ .cpu_count = ATOMIC_INIT(0),
+ };
+
+ if (cnt <= 0)
+ return -EINVAL;
+
+ return stop_machine_cpuslocked(aarch64_insn_patch_text_cb, &patch,
+ cpu_online_mask);
+}
diff --git a/arch/arm64/kernel/perf_callchain.c b/arch/arm64/kernel/perf_callchain.c
index 88ff471b0bce..4a72c2727309 100644
--- a/arch/arm64/kernel/perf_callchain.c
+++ b/arch/arm64/kernel/perf_callchain.c
@@ -116,7 +116,7 @@ void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
tail = (struct frame_tail __user *)regs->regs[29];
while (entry->nr < entry->max_stack &&
- tail && !((unsigned long)tail & 0xf))
+ tail && !((unsigned long)tail & 0x7))
tail = user_backtrace(tail, entry);
} else {
#ifdef CONFIG_COMPAT
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
index f594957e29bd..d07788dad388 100644
--- a/arch/arm64/kernel/perf_event.c
+++ b/arch/arm64/kernel/perf_event.c
@@ -165,10 +165,7 @@ armv8pmu_events_sysfs_show(struct device *dev,
}
#define ARMV8_EVENT_ATTR(name, config) \
- (&((struct perf_pmu_events_attr) { \
- .attr = __ATTR(name, 0444, armv8pmu_events_sysfs_show, NULL), \
- .id = config, \
- }).attr.attr)
+ PMU_EVENT_ATTR_ID(name, armv8pmu_events_sysfs_show, config)
static struct attribute *armv8_pmuv3_event_attrs[] = {
ARMV8_EVENT_ATTR(sw_incr, ARMV8_PMUV3_PERFCTR_SW_INCR),
@@ -312,13 +309,46 @@ static ssize_t slots_show(struct device *dev, struct device_attribute *attr,
struct arm_pmu *cpu_pmu = container_of(pmu, struct arm_pmu, pmu);
u32 slots = cpu_pmu->reg_pmmir & ARMV8_PMU_SLOTS_MASK;
- return snprintf(page, PAGE_SIZE, "0x%08x\n", slots);
+ return sysfs_emit(page, "0x%08x\n", slots);
}
static DEVICE_ATTR_RO(slots);
+static ssize_t bus_slots_show(struct device *dev, struct device_attribute *attr,
+ char *page)
+{
+ struct pmu *pmu = dev_get_drvdata(dev);
+ struct arm_pmu *cpu_pmu = container_of(pmu, struct arm_pmu, pmu);
+ u32 bus_slots = (cpu_pmu->reg_pmmir >> ARMV8_PMU_BUS_SLOTS_SHIFT)
+ & ARMV8_PMU_BUS_SLOTS_MASK;
+
+ return sysfs_emit(page, "0x%08x\n", bus_slots);
+}
+
+static DEVICE_ATTR_RO(bus_slots);
+
+static ssize_t bus_width_show(struct device *dev, struct device_attribute *attr,
+ char *page)
+{
+ struct pmu *pmu = dev_get_drvdata(dev);
+ struct arm_pmu *cpu_pmu = container_of(pmu, struct arm_pmu, pmu);
+ u32 bus_width = (cpu_pmu->reg_pmmir >> ARMV8_PMU_BUS_WIDTH_SHIFT)
+ & ARMV8_PMU_BUS_WIDTH_MASK;
+ u32 val = 0;
+
+ /* Encoded as Log2(number of bytes), plus one */
+ if (bus_width > 2 && bus_width < 13)
+ val = 1 << (bus_width - 1);
+
+ return sysfs_emit(page, "0x%08x\n", val);
+}
+
+static DEVICE_ATTR_RO(bus_width);
+
static struct attribute *armv8_pmuv3_caps_attrs[] = {
&dev_attr_slots.attr,
+ &dev_attr_bus_slots.attr,
+ &dev_attr_bus_width.attr,
NULL,
};
diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c
index d607c9912025..6dbcc89f6662 100644
--- a/arch/arm64/kernel/probes/kprobes.c
+++ b/arch/arm64/kernel/probes/kprobes.c
@@ -7,26 +7,28 @@
* Copyright (C) 2013 Linaro Limited.
* Author: Sandeepa Prabhu <sandeepa.prabhu@linaro.org>
*/
+#include <linux/extable.h>
#include <linux/kasan.h>
#include <linux/kernel.h>
#include <linux/kprobes.h>
-#include <linux/extable.h>
-#include <linux/slab.h>
-#include <linux/stop_machine.h>
#include <linux/sched/debug.h>
#include <linux/set_memory.h>
+#include <linux/slab.h>
+#include <linux/stop_machine.h>
#include <linux/stringify.h>
+#include <linux/uaccess.h>
#include <linux/vmalloc.h>
-#include <asm/traps.h>
-#include <asm/ptrace.h>
+
#include <asm/cacheflush.h>
-#include <asm/debug-monitors.h>
#include <asm/daifflags.h>
-#include <asm/system_misc.h>
+#include <asm/debug-monitors.h>
#include <asm/insn.h>
-#include <linux/uaccess.h>
#include <asm/irq.h>
+#include <asm/patching.h>
+#include <asm/ptrace.h>
#include <asm/sections.h>
+#include <asm/system_misc.h>
+#include <asm/traps.h>
#include "decode-insn.h"
@@ -277,23 +279,6 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned int fsr)
case KPROBE_HIT_ACTIVE:
case KPROBE_HIT_SSDONE:
/*
- * We increment the nmissed count for accounting,
- * we can also use npre/npostfault count for accounting
- * these specific fault cases.
- */
- kprobes_inc_nmissed_count(cur);
-
- /*
- * We come here because instructions in the pre/post
- * handler caused the page_fault, this could happen
- * if handler tries to access user space by
- * copy_from_user(), get_user() etc. Let the
- * user-specified handler try to fix it first.
- */
- if (cur->fault_handler && cur->fault_handler(cur, regs, fsr))
- return 1;
-
- /*
* In case the user-specified fault handler returned
* zero, try to fix up.
*/
diff --git a/arch/arm64/kernel/probes/simulate-insn.c b/arch/arm64/kernel/probes/simulate-insn.c
index 25f67ec59635..22d0b3252476 100644
--- a/arch/arm64/kernel/probes/simulate-insn.c
+++ b/arch/arm64/kernel/probes/simulate-insn.c
@@ -10,6 +10,7 @@
#include <linux/kprobes.h>
#include <asm/ptrace.h>
+#include <asm/traps.h>
#include "simulate-insn.h"
diff --git a/arch/arm64/kernel/probes/uprobes.c b/arch/arm64/kernel/probes/uprobes.c
index 2c247634552b..9be668f3f034 100644
--- a/arch/arm64/kernel/probes/uprobes.c
+++ b/arch/arm64/kernel/probes/uprobes.c
@@ -21,7 +21,7 @@ void arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
memcpy(dst, src, len);
/* flush caches (dcache/icache) */
- sync_icache_aliases(dst, len);
+ sync_icache_aliases((unsigned long)dst, (unsigned long)dst + len);
kunmap_atomic(xol_page_kaddr);
}
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index 5591725cebcc..c8989b999250 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -18,7 +18,6 @@
#include <linux/sched/task.h>
#include <linux/sched/task_stack.h>
#include <linux/kernel.h>
-#include <linux/lockdep.h>
#include <linux/mman.h>
#include <linux/mm.h>
#include <linux/nospec.h>
@@ -46,7 +45,6 @@
#include <linux/prctl.h>
#include <asm/alternative.h>
-#include <asm/arch_gicv3.h>
#include <asm/compat.h>
#include <asm/cpufeature.h>
#include <asm/cacheflush.h>
@@ -72,63 +70,6 @@ EXPORT_SYMBOL(__stack_chk_guard);
void (*pm_power_off)(void);
EXPORT_SYMBOL_GPL(pm_power_off);
-static void noinstr __cpu_do_idle(void)
-{
- dsb(sy);
- wfi();
-}
-
-static void noinstr __cpu_do_idle_irqprio(void)
-{
- unsigned long pmr;
- unsigned long daif_bits;
-
- daif_bits = read_sysreg(daif);
- write_sysreg(daif_bits | PSR_I_BIT | PSR_F_BIT, daif);
-
- /*
- * Unmask PMR before going idle to make sure interrupts can
- * be raised.
- */
- pmr = gic_read_pmr();
- gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
-
- __cpu_do_idle();
-
- gic_write_pmr(pmr);
- write_sysreg(daif_bits, daif);
-}
-
-/*
- * cpu_do_idle()
- *
- * Idle the processor (wait for interrupt).
- *
- * If the CPU supports priority masking we must do additional work to
- * ensure that interrupts are not masked at the PMR (because the core will
- * not wake up if we block the wake up signal in the interrupt controller).
- */
-void noinstr cpu_do_idle(void)
-{
- if (system_uses_irq_prio_masking())
- __cpu_do_idle_irqprio();
- else
- __cpu_do_idle();
-}
-
-/*
- * This is our default idle handler.
- */
-void noinstr arch_cpu_idle(void)
-{
- /*
- * This should do all the clock switching and wait for interrupt
- * tricks
- */
- cpu_do_idle();
- raw_local_irq_enable();
-}
-
#ifdef CONFIG_HOTPLUG_CPU
void arch_cpu_idle_dead(void)
{
@@ -430,6 +371,11 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start,
}
p->thread.cpu_context.pc = (unsigned long)ret_from_fork;
p->thread.cpu_context.sp = (unsigned long)childregs;
+ /*
+ * For the benefit of the unwinder, set up childregs->stackframe
+ * as the final frame for the new task.
+ */
+ p->thread.cpu_context.fp = (unsigned long)childregs->stackframe;
ptrace_hw_copy_thread(p);
@@ -522,6 +468,15 @@ static void erratum_1418040_thread_switch(struct task_struct *prev,
write_sysreg(val, cntkctl_el1);
}
+static void compat_thread_switch(struct task_struct *next)
+{
+ if (!is_compat_thread(task_thread_info(next)))
+ return;
+
+ if (static_branch_unlikely(&arm64_mismatched_32bit_el0))
+ set_tsk_thread_flag(next, TIF_NOTIFY_RESUME);
+}
+
static void update_sctlr_el1(u64 sctlr)
{
/*
@@ -563,6 +518,7 @@ __notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev,
ssbs_thread_switch(next);
erratum_1418040_thread_switch(prev, next);
ptrauth_thread_switch_user(next);
+ compat_thread_switch(next);
/*
* Complete any pending TLB or cache maintenance on this CPU in case
@@ -593,7 +549,7 @@ unsigned long get_wchan(struct task_struct *p)
struct stackframe frame;
unsigned long stack_page, ret = 0;
int count = 0;
- if (!p || p == current || p->state == TASK_RUNNING)
+ if (!p || p == current || task_is_running(p))
return 0;
stack_page = (unsigned long)try_get_task_stack(p);
@@ -628,8 +584,15 @@ unsigned long arch_align_stack(unsigned long sp)
*/
void arch_setup_new_exec(void)
{
- current->mm->context.flags = is_compat_task() ? MMCF_AARCH32 : 0;
+ unsigned long mmflags = 0;
+
+ if (is_compat_task()) {
+ mmflags = MMCF_AARCH32;
+ if (static_branch_unlikely(&arm64_mismatched_32bit_el0))
+ set_tsk_thread_flag(current, TIF_NOTIFY_RESUME);
+ }
+ current->mm->context.flags = mmflags;
ptrauth_thread_init_user();
mte_thread_init_user();
@@ -719,22 +682,6 @@ static int __init tagged_addr_init(void)
core_initcall(tagged_addr_init);
#endif /* CONFIG_ARM64_TAGGED_ADDR_ABI */
-asmlinkage void __sched arm64_preempt_schedule_irq(void)
-{
- lockdep_assert_irqs_disabled();
-
- /*
- * Preempting a task from an IRQ means we leave copies of PSTATE
- * on the stack. cpufeature's enable calls may modify PSTATE, but
- * resuming one of these preempted tasks would undo those changes.
- *
- * Only allow a task to be preempted once cpufeatures have been
- * enabled.
- */
- if (system_capabilities_finalized())
- preempt_schedule_irq();
-}
-
#ifdef CONFIG_BINFMT_ELF
int arch_elf_adjust_prot(int prot, const struct arch_elf_state *state,
bool has_interp, bool is_interp)
diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
index eb2f73939b7b..499b6b2f9757 100644
--- a/arch/arm64/kernel/ptrace.c
+++ b/arch/arm64/kernel/ptrace.c
@@ -122,7 +122,7 @@ static bool regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr)
{
return ((addr & ~(THREAD_SIZE - 1)) ==
(kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1))) ||
- on_irq_stack(addr, NULL);
+ on_irq_stack(addr, sizeof(unsigned long), NULL);
}
/**
diff --git a/arch/arm64/kernel/sdei.c b/arch/arm64/kernel/sdei.c
index 2c7ca449dd51..47f77d1234cb 100644
--- a/arch/arm64/kernel/sdei.c
+++ b/arch/arm64/kernel/sdei.c
@@ -162,31 +162,33 @@ static int init_sdei_scs(void)
return err;
}
-static bool on_sdei_normal_stack(unsigned long sp, struct stack_info *info)
+static bool on_sdei_normal_stack(unsigned long sp, unsigned long size,
+ struct stack_info *info)
{
unsigned long low = (unsigned long)raw_cpu_read(sdei_stack_normal_ptr);
unsigned long high = low + SDEI_STACK_SIZE;
- return on_stack(sp, low, high, STACK_TYPE_SDEI_NORMAL, info);
+ return on_stack(sp, size, low, high, STACK_TYPE_SDEI_NORMAL, info);
}
-static bool on_sdei_critical_stack(unsigned long sp, struct stack_info *info)
+static bool on_sdei_critical_stack(unsigned long sp, unsigned long size,
+ struct stack_info *info)
{
unsigned long low = (unsigned long)raw_cpu_read(sdei_stack_critical_ptr);
unsigned long high = low + SDEI_STACK_SIZE;
- return on_stack(sp, low, high, STACK_TYPE_SDEI_CRITICAL, info);
+ return on_stack(sp, size, low, high, STACK_TYPE_SDEI_CRITICAL, info);
}
-bool _on_sdei_stack(unsigned long sp, struct stack_info *info)
+bool _on_sdei_stack(unsigned long sp, unsigned long size, struct stack_info *info)
{
if (!IS_ENABLED(CONFIG_VMAP_STACK))
return false;
- if (on_sdei_critical_stack(sp, info))
+ if (on_sdei_critical_stack(sp, size, info))
return true;
- if (on_sdei_normal_stack(sp, info))
+ if (on_sdei_normal_stack(sp, size, info))
return true;
return false;
@@ -231,13 +233,13 @@ out_err:
}
/*
- * __sdei_handler() returns one of:
+ * do_sdei_event() returns one of:
* SDEI_EV_HANDLED - success, return to the interrupted context.
* SDEI_EV_FAILED - failure, return this error code to firmare.
* virtual-address - success, return to this address.
*/
-static __kprobes unsigned long _sdei_handler(struct pt_regs *regs,
- struct sdei_registered_event *arg)
+unsigned long __kprobes do_sdei_event(struct pt_regs *regs,
+ struct sdei_registered_event *arg)
{
u32 mode;
int i, err = 0;
@@ -292,45 +294,3 @@ static __kprobes unsigned long _sdei_handler(struct pt_regs *regs,
return vbar + 0x480;
}
-
-static void __kprobes notrace __sdei_pstate_entry(void)
-{
- /*
- * The original SDEI spec (ARM DEN 0054A) can be read ambiguously as to
- * whether PSTATE bits are inherited unchanged or generated from
- * scratch, and the TF-A implementation always clears PAN and always
- * clears UAO. There are no other known implementations.
- *
- * Subsequent revisions (ARM DEN 0054B) follow the usual rules for how
- * PSTATE is modified upon architectural exceptions, and so PAN is
- * either inherited or set per SCTLR_ELx.SPAN, and UAO is always
- * cleared.
- *
- * We must explicitly reset PAN to the expected state, including
- * clearing it when the host isn't using it, in case a VM had it set.
- */
- if (system_uses_hw_pan())
- set_pstate_pan(1);
- else if (cpu_has_pan())
- set_pstate_pan(0);
-}
-
-asmlinkage noinstr unsigned long
-__sdei_handler(struct pt_regs *regs, struct sdei_registered_event *arg)
-{
- unsigned long ret;
-
- /*
- * We didn't take an exception to get here, so the HW hasn't
- * set/cleared bits in PSTATE that we may rely on. Initialize PAN.
- */
- __sdei_pstate_entry();
-
- arm64_enter_nmi(regs);
-
- ret = _sdei_handler(regs, arg);
-
- arm64_exit_nmi(regs);
-
- return ret;
-}
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index 61845c0821d9..880f40bae60e 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -23,6 +23,7 @@
#include <linux/interrupt.h>
#include <linux/smp.h>
#include <linux/fs.h>
+#include <linux/panic_notifier.h>
#include <linux/proc_fs.h>
#include <linux/memblock.h>
#include <linux/of_fdt.h>
@@ -87,12 +88,6 @@ void __init smp_setup_processor_id(void)
u64 mpidr = read_cpuid_mpidr() & MPIDR_HWID_BITMASK;
set_cpu_logical_map(0, mpidr);
- /*
- * clear __my_cpu_offset on boot CPU to avoid hang caused by
- * using percpu variable early, for example, lockdep will
- * access percpu variable inside lock_release
- */
- set_my_cpu_offset(0);
pr_info("Booting Linux on physical CPU 0x%010lx [0x%08x]\n",
(unsigned long)mpidr, read_cpuid_id());
}
@@ -381,7 +376,7 @@ void __init __no_sanitize_address setup_arch(char **cmdline_p)
* faults in case uaccess_enable() is inadvertently called by the init
* thread.
*/
- init_task.thread_info.ttbr0 = __pa_symbol(reserved_pg_dir);
+ init_task.thread_info.ttbr0 = phys_to_ttbr(__pa_symbol(reserved_pg_dir));
#endif
if (boot_args[1] || boot_args[2] || boot_args[3]) {
diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c
index 6237486ff6bb..f8192f4ae0b8 100644
--- a/arch/arm64/kernel/signal.c
+++ b/arch/arm64/kernel/signal.c
@@ -911,6 +911,19 @@ static void do_signal(struct pt_regs *regs)
restore_saved_sigmask();
}
+static bool cpu_affinity_invalid(struct pt_regs *regs)
+{
+ if (!compat_user_mode(regs))
+ return false;
+
+ /*
+ * We're preemptible, but a reschedule will cause us to check the
+ * affinity again.
+ */
+ return !cpumask_test_cpu(raw_smp_processor_id(),
+ system_32bit_el0_cpumask());
+}
+
asmlinkage void do_notify_resume(struct pt_regs *regs,
unsigned long thread_flags)
{
@@ -938,6 +951,19 @@ asmlinkage void do_notify_resume(struct pt_regs *regs,
if (thread_flags & _TIF_NOTIFY_RESUME) {
tracehook_notify_resume(regs);
rseq_handle_notify_resume(NULL, regs);
+
+ /*
+ * If we reschedule after checking the affinity
+ * then we must ensure that TIF_NOTIFY_RESUME
+ * is set so that we check the affinity again.
+ * Since tracehook_notify_resume() clears the
+ * flag, ensure that the compiler doesn't move
+ * it after the affinity check.
+ */
+ barrier();
+
+ if (cpu_affinity_invalid(regs))
+ force_sig(SIGKILL);
}
if (thread_flags & _TIF_FOREIGN_FPSTATE)
diff --git a/arch/arm64/kernel/smccc-call.S b/arch/arm64/kernel/smccc-call.S
index d62447964ed9..d3d37f932b97 100644
--- a/arch/arm64/kernel/smccc-call.S
+++ b/arch/arm64/kernel/smccc-call.S
@@ -7,8 +7,34 @@
#include <asm/asm-offsets.h>
#include <asm/assembler.h>
+#include <asm/thread_info.h>
+
+/*
+ * If we have SMCCC v1.3 and (as is likely) no SVE state in
+ * the registers then set the SMCCC hint bit to say there's no
+ * need to preserve it. Do this by directly adjusting the SMCCC
+ * function value which is already stored in x0 ready to be called.
+ */
+SYM_FUNC_START(__arm_smccc_sve_check)
+
+ ldr_l x16, smccc_has_sve_hint
+ cbz x16, 2f
+
+ get_current_task x16
+ ldr x16, [x16, #TSK_TI_FLAGS]
+ tbnz x16, #TIF_FOREIGN_FPSTATE, 1f // Any live FP state?
+ tbnz x16, #TIF_SVE, 2f // Does that state include SVE?
+
+1: orr x0, x0, ARM_SMCCC_1_3_SVE_HINT
+
+2: ret
+SYM_FUNC_END(__arm_smccc_sve_check)
+EXPORT_SYMBOL(__arm_smccc_sve_check)
.macro SMCCC instr
+alternative_if ARM64_SVE
+ bl __arm_smccc_sve_check
+alternative_else_nop_endif
\instr #0
ldr x4, [sp]
stp x0, x1, [x4, #ARM_SMCCC_RES_X0_OFFS]
@@ -43,3 +69,60 @@ SYM_FUNC_START(__arm_smccc_hvc)
SMCCC hvc
SYM_FUNC_END(__arm_smccc_hvc)
EXPORT_SYMBOL(__arm_smccc_hvc)
+
+ .macro SMCCC_1_2 instr
+ /* Save `res` and free a GPR that won't be clobbered */
+ stp x1, x19, [sp, #-16]!
+
+ /* Ensure `args` won't be clobbered while loading regs in next step */
+ mov x19, x0
+
+ /* Load the registers x0 - x17 from the struct arm_smccc_1_2_regs */
+ ldp x0, x1, [x19, #ARM_SMCCC_1_2_REGS_X0_OFFS]
+ ldp x2, x3, [x19, #ARM_SMCCC_1_2_REGS_X2_OFFS]
+ ldp x4, x5, [x19, #ARM_SMCCC_1_2_REGS_X4_OFFS]
+ ldp x6, x7, [x19, #ARM_SMCCC_1_2_REGS_X6_OFFS]
+ ldp x8, x9, [x19, #ARM_SMCCC_1_2_REGS_X8_OFFS]
+ ldp x10, x11, [x19, #ARM_SMCCC_1_2_REGS_X10_OFFS]
+ ldp x12, x13, [x19, #ARM_SMCCC_1_2_REGS_X12_OFFS]
+ ldp x14, x15, [x19, #ARM_SMCCC_1_2_REGS_X14_OFFS]
+ ldp x16, x17, [x19, #ARM_SMCCC_1_2_REGS_X16_OFFS]
+
+ \instr #0
+
+ /* Load the `res` from the stack */
+ ldr x19, [sp]
+
+ /* Store the registers x0 - x17 into the result structure */
+ stp x0, x1, [x19, #ARM_SMCCC_1_2_REGS_X0_OFFS]
+ stp x2, x3, [x19, #ARM_SMCCC_1_2_REGS_X2_OFFS]
+ stp x4, x5, [x19, #ARM_SMCCC_1_2_REGS_X4_OFFS]
+ stp x6, x7, [x19, #ARM_SMCCC_1_2_REGS_X6_OFFS]
+ stp x8, x9, [x19, #ARM_SMCCC_1_2_REGS_X8_OFFS]
+ stp x10, x11, [x19, #ARM_SMCCC_1_2_REGS_X10_OFFS]
+ stp x12, x13, [x19, #ARM_SMCCC_1_2_REGS_X12_OFFS]
+ stp x14, x15, [x19, #ARM_SMCCC_1_2_REGS_X14_OFFS]
+ stp x16, x17, [x19, #ARM_SMCCC_1_2_REGS_X16_OFFS]
+
+ /* Restore original x19 */
+ ldp xzr, x19, [sp], #16
+ ret
+.endm
+
+/*
+ * void arm_smccc_1_2_hvc(const struct arm_smccc_1_2_regs *args,
+ * struct arm_smccc_1_2_regs *res);
+ */
+SYM_FUNC_START(arm_smccc_1_2_hvc)
+ SMCCC_1_2 hvc
+SYM_FUNC_END(arm_smccc_1_2_hvc)
+EXPORT_SYMBOL(arm_smccc_1_2_hvc)
+
+/*
+ * void arm_smccc_1_2_smc(const struct arm_smccc_1_2_regs *args,
+ * struct arm_smccc_1_2_regs *res);
+ */
+SYM_FUNC_START(arm_smccc_1_2_smc)
+ SMCCC_1_2 smc
+SYM_FUNC_END(arm_smccc_1_2_smc)
+EXPORT_SYMBOL(arm_smccc_1_2_smc)
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index dcd7041b2b07..6f6ff072acbd 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -120,9 +120,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
* page tables.
*/
secondary_data.task = idle;
- secondary_data.stack = task_stack_page(idle) + THREAD_SIZE;
update_cpu_boot_status(CPU_MMU_OFF);
- __flush_dcache_area(&secondary_data, sizeof(secondary_data));
/* Now bring the CPU into our world */
ret = boot_secondary(cpu, idle);
@@ -142,8 +140,6 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
pr_crit("CPU%u: failed to come online\n", cpu);
secondary_data.task = NULL;
- secondary_data.stack = NULL;
- __flush_dcache_area(&secondary_data, sizeof(secondary_data));
status = READ_ONCE(secondary_data.status);
if (status == CPU_MMU_OFF)
status = READ_ONCE(__early_cpu_boot_status);
@@ -202,10 +198,7 @@ asmlinkage notrace void secondary_start_kernel(void)
u64 mpidr = read_cpuid_mpidr() & MPIDR_HWID_BITMASK;
struct mm_struct *mm = &init_mm;
const struct cpu_operations *ops;
- unsigned int cpu;
-
- cpu = task_cpu(current);
- set_my_cpu_offset(per_cpu_offset(cpu));
+ unsigned int cpu = smp_processor_id();
/*
* All kernel threads share the same mm context; grab a
@@ -224,7 +217,6 @@ asmlinkage notrace void secondary_start_kernel(void)
init_gic_priority_masking();
rcu_cpu_starting(cpu);
- preempt_disable();
trace_hardirqs_off();
/*
@@ -352,7 +344,7 @@ void __cpu_die(unsigned int cpu)
pr_crit("CPU%u: cpu didn't die\n", cpu);
return;
}
- pr_notice("CPU%u: shutdown\n", cpu);
+ pr_debug("CPU%u: shutdown\n", cpu);
/*
* Now that the dying CPU is beyond the point of no return w.r.t.
@@ -452,6 +444,11 @@ void __init smp_cpus_done(unsigned int max_cpus)
void __init smp_prepare_boot_cpu(void)
{
+ /*
+ * The runtime per-cpu areas have been allocated by
+ * setup_per_cpu_areas(), and CPU0's boot time per-cpu area will be
+ * freed shortly, so we must move over to the runtime per-cpu area.
+ */
set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
cpuinfo_store_boot_cpu();
diff --git a/arch/arm64/kernel/smp_spin_table.c b/arch/arm64/kernel/smp_spin_table.c
index c45a83512805..7e1624ecab3c 100644
--- a/arch/arm64/kernel/smp_spin_table.c
+++ b/arch/arm64/kernel/smp_spin_table.c
@@ -36,7 +36,7 @@ static void write_pen_release(u64 val)
unsigned long size = sizeof(secondary_holding_pen_release);
secondary_holding_pen_release = val;
- __flush_dcache_area(start, size);
+ dcache_clean_inval_poc((unsigned long)start, (unsigned long)start + size);
}
@@ -90,8 +90,9 @@ static int smp_spin_table_cpu_prepare(unsigned int cpu)
* the boot protocol.
*/
writeq_relaxed(pa_holding_pen, release_addr);
- __flush_dcache_area((__force void *)release_addr,
- sizeof(*release_addr));
+ dcache_clean_inval_poc((__force unsigned long)release_addr,
+ (__force unsigned long)release_addr +
+ sizeof(*release_addr));
/*
* Send an event to wake up the secondary CPU.
diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
index de07147a7926..b189de5ca6cb 100644
--- a/arch/arm64/kernel/stacktrace.c
+++ b/arch/arm64/kernel/stacktrace.c
@@ -68,13 +68,17 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
unsigned long fp = frame->fp;
struct stack_info info;
- if (fp & 0xf)
- return -EINVAL;
-
if (!tsk)
tsk = current;
- if (!on_accessible_stack(tsk, fp, &info))
+ /* Final frame; nothing to unwind */
+ if (fp == (unsigned long)task_pt_regs(tsk)->stackframe)
+ return -ENOENT;
+
+ if (fp & 0x7)
+ return -EINVAL;
+
+ if (!on_accessible_stack(tsk, fp, 16, &info))
return -EINVAL;
if (test_bit(info.type, frame->stacks_done))
@@ -128,12 +132,6 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
frame->pc = ptrauth_strip_insn_pac(frame->pc);
- /*
- * This is a terminal record, so we have finished unwinding.
- */
- if (!frame->fp && !frame->pc)
- return -ENOENT;
-
return 0;
}
NOKPROBE_SYMBOL(unwind_frame);
diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c
index e3f72df9509d..938ce6fbee8a 100644
--- a/arch/arm64/kernel/suspend.c
+++ b/arch/arm64/kernel/suspend.c
@@ -7,6 +7,7 @@
#include <asm/alternative.h>
#include <asm/cacheflush.h>
#include <asm/cpufeature.h>
+#include <asm/cpuidle.h>
#include <asm/daifflags.h>
#include <asm/debug-monitors.h>
#include <asm/exec.h>
@@ -91,6 +92,7 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
int ret = 0;
unsigned long flags;
struct sleep_stack_data state;
+ struct arm_cpuidle_irq_context context;
/* Report any MTE async fault before going to suspend */
mte_suspend_enter();
@@ -103,12 +105,18 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
flags = local_daif_save();
/*
- * Function graph tracer state gets incosistent when the kernel
+ * Function graph tracer state gets inconsistent when the kernel
* calls functions that never return (aka suspend finishers) hence
* disable graph tracing during their execution.
*/
pause_graph_tracing();
+ /*
+ * Switch to using DAIF.IF instead of PMR in order to reliably
+ * resume if we're using pseudo-NMIs.
+ */
+ arm_cpuidle_save_irq_context(&context);
+
if (__cpu_suspend_enter(&state)) {
/* Call the suspend finisher */
ret = fn(arg);
@@ -126,6 +134,8 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
RCU_NONIDLE(__cpu_suspend_exit());
}
+ arm_cpuidle_restore_irq_context(&context);
+
unpause_graph_tracing();
/*
diff --git a/arch/arm64/kernel/sys_compat.c b/arch/arm64/kernel/sys_compat.c
index 265fe3eb1069..db5159a3055f 100644
--- a/arch/arm64/kernel/sys_compat.c
+++ b/arch/arm64/kernel/sys_compat.c
@@ -41,7 +41,7 @@ __do_compat_cache_op(unsigned long start, unsigned long end)
dsb(ish);
}
- ret = __flush_cache_user_range(start, start + chunk);
+ ret = caches_clean_inval_user_pou(start, start + chunk);
if (ret)
return ret;
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index a05d34f0e82a..b03e383d944a 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -38,6 +38,7 @@
#include <asm/extable.h>
#include <asm/insn.h>
#include <asm/kprobes.h>
+#include <asm/patching.h>
#include <asm/traps.h>
#include <asm/smp.h>
#include <asm/stack_pointer.h>
@@ -45,11 +46,102 @@
#include <asm/system_misc.h>
#include <asm/sysreg.h>
-static const char *handler[] = {
- "Synchronous Abort",
- "IRQ",
- "FIQ",
- "Error"
+static bool __kprobes __check_eq(unsigned long pstate)
+{
+ return (pstate & PSR_Z_BIT) != 0;
+}
+
+static bool __kprobes __check_ne(unsigned long pstate)
+{
+ return (pstate & PSR_Z_BIT) == 0;
+}
+
+static bool __kprobes __check_cs(unsigned long pstate)
+{
+ return (pstate & PSR_C_BIT) != 0;
+}
+
+static bool __kprobes __check_cc(unsigned long pstate)
+{
+ return (pstate & PSR_C_BIT) == 0;
+}
+
+static bool __kprobes __check_mi(unsigned long pstate)
+{
+ return (pstate & PSR_N_BIT) != 0;
+}
+
+static bool __kprobes __check_pl(unsigned long pstate)
+{
+ return (pstate & PSR_N_BIT) == 0;
+}
+
+static bool __kprobes __check_vs(unsigned long pstate)
+{
+ return (pstate & PSR_V_BIT) != 0;
+}
+
+static bool __kprobes __check_vc(unsigned long pstate)
+{
+ return (pstate & PSR_V_BIT) == 0;
+}
+
+static bool __kprobes __check_hi(unsigned long pstate)
+{
+ pstate &= ~(pstate >> 1); /* PSR_C_BIT &= ~PSR_Z_BIT */
+ return (pstate & PSR_C_BIT) != 0;
+}
+
+static bool __kprobes __check_ls(unsigned long pstate)
+{
+ pstate &= ~(pstate >> 1); /* PSR_C_BIT &= ~PSR_Z_BIT */
+ return (pstate & PSR_C_BIT) == 0;
+}
+
+static bool __kprobes __check_ge(unsigned long pstate)
+{
+ pstate ^= (pstate << 3); /* PSR_N_BIT ^= PSR_V_BIT */
+ return (pstate & PSR_N_BIT) == 0;
+}
+
+static bool __kprobes __check_lt(unsigned long pstate)
+{
+ pstate ^= (pstate << 3); /* PSR_N_BIT ^= PSR_V_BIT */
+ return (pstate & PSR_N_BIT) != 0;
+}
+
+static bool __kprobes __check_gt(unsigned long pstate)
+{
+ /*PSR_N_BIT ^= PSR_V_BIT */
+ unsigned long temp = pstate ^ (pstate << 3);
+
+ temp |= (pstate << 1); /*PSR_N_BIT |= PSR_Z_BIT */
+ return (temp & PSR_N_BIT) == 0;
+}
+
+static bool __kprobes __check_le(unsigned long pstate)
+{
+ /*PSR_N_BIT ^= PSR_V_BIT */
+ unsigned long temp = pstate ^ (pstate << 3);
+
+ temp |= (pstate << 1); /*PSR_N_BIT |= PSR_Z_BIT */
+ return (temp & PSR_N_BIT) != 0;
+}
+
+static bool __kprobes __check_al(unsigned long pstate)
+{
+ return true;
+}
+
+/*
+ * Note that the ARMv8 ARM calls condition code 0b1111 "nv", but states that
+ * it behaves identically to 0b1110 ("al").
+ */
+pstate_check_t * const aarch32_opcode_cond_checks[16] = {
+ __check_eq, __check_ne, __check_cs, __check_cc,
+ __check_mi, __check_pl, __check_vs, __check_vc,
+ __check_hi, __check_ls, __check_ge, __check_lt,
+ __check_gt, __check_le, __check_al, __check_al
};
int show_unhandled_signals = 0;
@@ -751,27 +843,8 @@ const char *esr_get_class_string(u32 esr)
}
/*
- * bad_mode handles the impossible case in the exception vector. This is always
- * fatal.
- */
-asmlinkage void notrace bad_mode(struct pt_regs *regs, int reason, unsigned int esr)
-{
- arm64_enter_nmi(regs);
-
- console_verbose();
-
- pr_crit("Bad mode in %s handler detected on CPU%d, code 0x%08x -- %s\n",
- handler[reason], smp_processor_id(), esr,
- esr_get_class_string(esr));
-
- __show_regs(regs);
- local_daif_mask();
- panic("bad mode");
-}
-
-/*
* bad_el0_sync handles unexpected, but potentially recoverable synchronous
- * exceptions taken from EL0. Unlike bad_mode, this returns.
+ * exceptions taken from EL0.
*/
void bad_el0_sync(struct pt_regs *regs, int reason, unsigned int esr)
{
@@ -789,15 +862,11 @@ void bad_el0_sync(struct pt_regs *regs, int reason, unsigned int esr)
DEFINE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack)
__aligned(16);
-asmlinkage void noinstr handle_bad_stack(struct pt_regs *regs)
+void panic_bad_stack(struct pt_regs *regs, unsigned int esr, unsigned long far)
{
unsigned long tsk_stk = (unsigned long)current->stack;
unsigned long irq_stk = (unsigned long)this_cpu_read(irq_stack_ptr);
unsigned long ovf_stk = (unsigned long)this_cpu_ptr(overflow_stack);
- unsigned int esr = read_sysreg(esr_el1);
- unsigned long far = read_sysreg(far_el1);
-
- arm64_enter_nmi(regs);
console_verbose();
pr_emerg("Insufficient stack space to handle exception!");
@@ -870,15 +939,11 @@ bool arm64_is_fatal_ras_serror(struct pt_regs *regs, unsigned int esr)
}
}
-asmlinkage void noinstr do_serror(struct pt_regs *regs, unsigned int esr)
+void do_serror(struct pt_regs *regs, unsigned int esr)
{
- arm64_enter_nmi(regs);
-
/* non-RAS errors are not containable */
if (!arm64_is_ras_serror(esr) || arm64_is_fatal_ras_serror(regs, esr))
arm64_serror_panic(regs, esr);
-
- arm64_exit_nmi(regs);
}
/* GENERIC_BUG traps */
diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig
index 3964acf5451e..a4eba0908bfa 100644
--- a/arch/arm64/kvm/Kconfig
+++ b/arch/arm64/kvm/Kconfig
@@ -20,8 +20,6 @@ if VIRTUALIZATION
menuconfig KVM
bool "Kernel-based Virtual Machine (KVM) support"
depends on OF
- # for TASKSTATS/TASK_DELAY_ACCT:
- depends on NET && MULTIUSER
select MMU_NOTIFIER
select PREEMPT_NOTIFIERS
select HAVE_KVM_CPU_RELAX_INTERCEPT
@@ -38,8 +36,7 @@ menuconfig KVM
select IRQ_BYPASS_MANAGER
select HAVE_KVM_IRQ_BYPASS
select HAVE_KVM_VCPU_RUN_PID_CHANGE
- select TASKSTATS
- select TASK_DELAY_ACCT
+ select SCHED_INFO
help
Support hosting virtualized guest machines.
diff --git a/arch/arm64/kvm/Makefile b/arch/arm64/kvm/Makefile
index 589921392cb1..989bb5dad2c8 100644
--- a/arch/arm64/kvm/Makefile
+++ b/arch/arm64/kvm/Makefile
@@ -11,7 +11,7 @@ obj-$(CONFIG_KVM) += kvm.o
obj-$(CONFIG_KVM) += hyp/
kvm-y := $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o $(KVM)/eventfd.o \
- $(KVM)/vfio.o $(KVM)/irqchip.o \
+ $(KVM)/vfio.o $(KVM)/irqchip.o $(KVM)/binary_stats.o \
arm.o mmu.o mmio.o psci.o perf.o hypercalls.o pvtime.o \
inject_fault.o va_layout.o handle_exit.o \
guest.o debug.o reset.o sys_regs.o \
diff --git a/arch/arm64/kvm/arch_timer.c b/arch/arm64/kvm/arch_timer.c
index 74e0699661e9..3df67c127489 100644
--- a/arch/arm64/kvm/arch_timer.c
+++ b/arch/arm64/kvm/arch_timer.c
@@ -9,6 +9,7 @@
#include <linux/kvm_host.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
+#include <linux/irqdomain.h>
#include <linux/uaccess.h>
#include <clocksource/arm_arch_timer.h>
@@ -973,36 +974,154 @@ static int kvm_timer_dying_cpu(unsigned int cpu)
return 0;
}
-int kvm_timer_hyp_init(bool has_gic)
+static int timer_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu)
{
- struct arch_timer_kvm_info *info;
- int err;
+ if (vcpu)
+ irqd_set_forwarded_to_vcpu(d);
+ else
+ irqd_clr_forwarded_to_vcpu(d);
- info = arch_timer_get_kvm_info();
- timecounter = &info->timecounter;
+ return 0;
+}
- if (!timecounter->cc) {
- kvm_err("kvm_arch_timer: uninitialized timecounter\n");
- return -ENODEV;
+static int timer_irq_set_irqchip_state(struct irq_data *d,
+ enum irqchip_irq_state which, bool val)
+{
+ if (which != IRQCHIP_STATE_ACTIVE || !irqd_is_forwarded_to_vcpu(d))
+ return irq_chip_set_parent_state(d, which, val);
+
+ if (val)
+ irq_chip_mask_parent(d);
+ else
+ irq_chip_unmask_parent(d);
+
+ return 0;
+}
+
+static void timer_irq_eoi(struct irq_data *d)
+{
+ if (!irqd_is_forwarded_to_vcpu(d))
+ irq_chip_eoi_parent(d);
+}
+
+static void timer_irq_ack(struct irq_data *d)
+{
+ d = d->parent_data;
+ if (d->chip->irq_ack)
+ d->chip->irq_ack(d);
+}
+
+static struct irq_chip timer_chip = {
+ .name = "KVM",
+ .irq_ack = timer_irq_ack,
+ .irq_mask = irq_chip_mask_parent,
+ .irq_unmask = irq_chip_unmask_parent,
+ .irq_eoi = timer_irq_eoi,
+ .irq_set_type = irq_chip_set_type_parent,
+ .irq_set_vcpu_affinity = timer_irq_set_vcpu_affinity,
+ .irq_set_irqchip_state = timer_irq_set_irqchip_state,
+};
+
+static int timer_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *arg)
+{
+ irq_hw_number_t hwirq = (uintptr_t)arg;
+
+ return irq_domain_set_hwirq_and_chip(domain, virq, hwirq,
+ &timer_chip, NULL);
+}
+
+static void timer_irq_domain_free(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs)
+{
+}
+
+static const struct irq_domain_ops timer_domain_ops = {
+ .alloc = timer_irq_domain_alloc,
+ .free = timer_irq_domain_free,
+};
+
+static struct irq_ops arch_timer_irq_ops = {
+ .get_input_level = kvm_arch_timer_get_input_level,
+};
+
+static void kvm_irq_fixup_flags(unsigned int virq, u32 *flags)
+{
+ *flags = irq_get_trigger_type(virq);
+ if (*flags != IRQF_TRIGGER_HIGH && *flags != IRQF_TRIGGER_LOW) {
+ kvm_err("Invalid trigger for timer IRQ%d, assuming level low\n",
+ virq);
+ *flags = IRQF_TRIGGER_LOW;
}
+}
- /* First, do the virtual EL1 timer irq */
+static int kvm_irq_init(struct arch_timer_kvm_info *info)
+{
+ struct irq_domain *domain = NULL;
if (info->virtual_irq <= 0) {
kvm_err("kvm_arch_timer: invalid virtual timer IRQ: %d\n",
info->virtual_irq);
return -ENODEV;
}
+
host_vtimer_irq = info->virtual_irq;
+ kvm_irq_fixup_flags(host_vtimer_irq, &host_vtimer_irq_flags);
+
+ if (kvm_vgic_global_state.no_hw_deactivation) {
+ struct fwnode_handle *fwnode;
+ struct irq_data *data;
+
+ fwnode = irq_domain_alloc_named_fwnode("kvm-timer");
+ if (!fwnode)
+ return -ENOMEM;
+
+ /* Assume both vtimer and ptimer in the same parent */
+ data = irq_get_irq_data(host_vtimer_irq);
+ domain = irq_domain_create_hierarchy(data->domain, 0,
+ NR_KVM_TIMERS, fwnode,
+ &timer_domain_ops, NULL);
+ if (!domain) {
+ irq_domain_free_fwnode(fwnode);
+ return -ENOMEM;
+ }
+
+ arch_timer_irq_ops.flags |= VGIC_IRQ_SW_RESAMPLE;
+ WARN_ON(irq_domain_push_irq(domain, host_vtimer_irq,
+ (void *)TIMER_VTIMER));
+ }
- host_vtimer_irq_flags = irq_get_trigger_type(host_vtimer_irq);
- if (host_vtimer_irq_flags != IRQF_TRIGGER_HIGH &&
- host_vtimer_irq_flags != IRQF_TRIGGER_LOW) {
- kvm_err("Invalid trigger for vtimer IRQ%d, assuming level low\n",
- host_vtimer_irq);
- host_vtimer_irq_flags = IRQF_TRIGGER_LOW;
+ if (info->physical_irq > 0) {
+ host_ptimer_irq = info->physical_irq;
+ kvm_irq_fixup_flags(host_ptimer_irq, &host_ptimer_irq_flags);
+
+ if (domain)
+ WARN_ON(irq_domain_push_irq(domain, host_ptimer_irq,
+ (void *)TIMER_PTIMER));
}
+ return 0;
+}
+
+int kvm_timer_hyp_init(bool has_gic)
+{
+ struct arch_timer_kvm_info *info;
+ int err;
+
+ info = arch_timer_get_kvm_info();
+ timecounter = &info->timecounter;
+
+ if (!timecounter->cc) {
+ kvm_err("kvm_arch_timer: uninitialized timecounter\n");
+ return -ENODEV;
+ }
+
+ err = kvm_irq_init(info);
+ if (err)
+ return err;
+
+ /* First, do the virtual EL1 timer irq */
+
err = request_percpu_irq(host_vtimer_irq, kvm_arch_timer_handler,
"kvm guest vtimer", kvm_get_running_vcpus());
if (err) {
@@ -1027,15 +1146,6 @@ int kvm_timer_hyp_init(bool has_gic)
/* Now let's do the physical EL1 timer irq */
if (info->physical_irq > 0) {
- host_ptimer_irq = info->physical_irq;
- host_ptimer_irq_flags = irq_get_trigger_type(host_ptimer_irq);
- if (host_ptimer_irq_flags != IRQF_TRIGGER_HIGH &&
- host_ptimer_irq_flags != IRQF_TRIGGER_LOW) {
- kvm_err("Invalid trigger for ptimer IRQ%d, assuming level low\n",
- host_ptimer_irq);
- host_ptimer_irq_flags = IRQF_TRIGGER_LOW;
- }
-
err = request_percpu_irq(host_ptimer_irq, kvm_arch_timer_handler,
"kvm guest ptimer", kvm_get_running_vcpus());
if (err) {
@@ -1143,7 +1253,7 @@ int kvm_timer_enable(struct kvm_vcpu *vcpu)
ret = kvm_vgic_map_phys_irq(vcpu,
map.direct_vtimer->host_timer_irq,
map.direct_vtimer->irq.irq,
- kvm_arch_timer_get_input_level);
+ &arch_timer_irq_ops);
if (ret)
return ret;
@@ -1151,7 +1261,7 @@ int kvm_timer_enable(struct kvm_vcpu *vcpu)
ret = kvm_vgic_map_phys_irq(vcpu,
map.direct_ptimer->host_timer_irq,
map.direct_ptimer->irq.irq,
- kvm_arch_timer_get_input_level);
+ &arch_timer_irq_ops);
}
if (ret)
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index 1cb39c0803a4..e9a2b8f27792 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -93,6 +93,12 @@ int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
r = 0;
kvm->arch.return_nisv_io_abort_to_user = true;
break;
+ case KVM_CAP_ARM_MTE:
+ if (!system_supports_mte() || kvm->created_vcpus)
+ return -EINVAL;
+ r = 0;
+ kvm->arch.mte_enabled = true;
+ break;
default:
r = -EINVAL;
break;
@@ -237,6 +243,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
*/
r = 1;
break;
+ case KVM_CAP_ARM_MTE:
+ r = system_supports_mte();
+ break;
case KVM_CAP_STEAL_TIME:
r = kvm_arm_pvtime_supported();
break;
@@ -689,9 +698,22 @@ static void check_vcpu_requests(struct kvm_vcpu *vcpu)
vgic_v4_load(vcpu);
preempt_enable();
}
+
+ if (kvm_check_request(KVM_REQ_RELOAD_PMU, vcpu))
+ kvm_pmu_handle_pmcr(vcpu,
+ __vcpu_sys_reg(vcpu, PMCR_EL0));
}
}
+static bool vcpu_mode_is_bad_32bit(struct kvm_vcpu *vcpu)
+{
+ if (likely(!vcpu_mode_is_32bit(vcpu)))
+ return false;
+
+ return !system_supports_32bit_el0() ||
+ static_branch_unlikely(&arm64_mismatched_32bit_el0);
+}
+
/**
* kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code
* @vcpu: The VCPU pointer
@@ -720,11 +742,13 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
return ret;
}
- if (run->immediate_exit)
- return -EINTR;
-
vcpu_load(vcpu);
+ if (run->immediate_exit) {
+ ret = -EINTR;
+ goto out;
+ }
+
kvm_sigset_activate(vcpu);
ret = 1;
@@ -875,7 +899,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
* with the asymmetric AArch32 case), return to userspace with
* a fatal error.
*/
- if (!system_supports_32bit_el0() && vcpu_mode_is_32bit(vcpu)) {
+ if (vcpu_mode_is_bad_32bit(vcpu)) {
/*
* As we have caught the guest red-handed, decide that
* it isn't fit for purpose anymore by making the vcpu
@@ -897,6 +921,18 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
kvm_sigset_deactivate(vcpu);
+out:
+ /*
+ * In the unlikely event that we are returning to userspace
+ * with pending exceptions or PC adjustment, commit these
+ * adjustments in order to give userspace a consistent view of
+ * the vcpu state. Note that this relies on __kvm_adjust_pc()
+ * being preempt-safe on VHE.
+ */
+ if (unlikely(vcpu->arch.flags & (KVM_ARM64_PENDING_EXCEPTION |
+ KVM_ARM64_INCREMENT_PC)))
+ kvm_call_hyp(__kvm_adjust_pc, vcpu);
+
vcpu_put(vcpu);
return ret;
}
@@ -1064,7 +1100,7 @@ static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu,
if (!cpus_have_final_cap(ARM64_HAS_STAGE2_FWB))
stage2_unmap_vm(vcpu->kvm);
else
- __flush_icache_all();
+ icache_inval_all_pou();
}
vcpu_reset_hcr(vcpu);
@@ -1336,6 +1372,13 @@ long kvm_arch_vm_ioctl(struct file *filp,
return 0;
}
+ case KVM_ARM_MTE_COPY_TAGS: {
+ struct kvm_arm_copy_mte_tags copy_tags;
+
+ if (copy_from_user(&copy_tags, argp, sizeof(copy_tags)))
+ return -EFAULT;
+ return kvm_vm_ioctl_mte_copy_tags(kvm, &copy_tags);
+ }
default:
return -EINVAL;
}
diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c
index 5cb4a1cd5603..1dfb83578277 100644
--- a/arch/arm64/kvm/guest.c
+++ b/arch/arm64/kvm/guest.c
@@ -28,20 +28,40 @@
#include "trace.h"
-struct kvm_stats_debugfs_item debugfs_entries[] = {
- VCPU_STAT("halt_successful_poll", halt_successful_poll),
- VCPU_STAT("halt_attempted_poll", halt_attempted_poll),
- VCPU_STAT("halt_poll_invalid", halt_poll_invalid),
- VCPU_STAT("halt_wakeup", halt_wakeup),
- VCPU_STAT("hvc_exit_stat", hvc_exit_stat),
- VCPU_STAT("wfe_exit_stat", wfe_exit_stat),
- VCPU_STAT("wfi_exit_stat", wfi_exit_stat),
- VCPU_STAT("mmio_exit_user", mmio_exit_user),
- VCPU_STAT("mmio_exit_kernel", mmio_exit_kernel),
- VCPU_STAT("exits", exits),
- VCPU_STAT("halt_poll_success_ns", halt_poll_success_ns),
- VCPU_STAT("halt_poll_fail_ns", halt_poll_fail_ns),
- { NULL }
+const struct _kvm_stats_desc kvm_vm_stats_desc[] = {
+ KVM_GENERIC_VM_STATS()
+};
+static_assert(ARRAY_SIZE(kvm_vm_stats_desc) ==
+ sizeof(struct kvm_vm_stat) / sizeof(u64));
+
+const struct kvm_stats_header kvm_vm_stats_header = {
+ .name_size = KVM_STATS_NAME_SIZE,
+ .num_desc = ARRAY_SIZE(kvm_vm_stats_desc),
+ .id_offset = sizeof(struct kvm_stats_header),
+ .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
+ .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
+ sizeof(kvm_vm_stats_desc),
+};
+
+const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
+ KVM_GENERIC_VCPU_STATS(),
+ STATS_DESC_COUNTER(VCPU, hvc_exit_stat),
+ STATS_DESC_COUNTER(VCPU, wfe_exit_stat),
+ STATS_DESC_COUNTER(VCPU, wfi_exit_stat),
+ STATS_DESC_COUNTER(VCPU, mmio_exit_user),
+ STATS_DESC_COUNTER(VCPU, mmio_exit_kernel),
+ STATS_DESC_COUNTER(VCPU, exits)
+};
+static_assert(ARRAY_SIZE(kvm_vcpu_stats_desc) ==
+ sizeof(struct kvm_vcpu_stat) / sizeof(u64));
+
+const struct kvm_stats_header kvm_vcpu_stats_header = {
+ .name_size = KVM_STATS_NAME_SIZE,
+ .num_desc = ARRAY_SIZE(kvm_vcpu_stats_desc),
+ .id_offset = sizeof(struct kvm_stats_header),
+ .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
+ .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
+ sizeof(kvm_vcpu_stats_desc),
};
static bool core_reg_offset_is_vreg(u64 off)
@@ -995,3 +1015,89 @@ int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu,
return ret;
}
+
+long kvm_vm_ioctl_mte_copy_tags(struct kvm *kvm,
+ struct kvm_arm_copy_mte_tags *copy_tags)
+{
+ gpa_t guest_ipa = copy_tags->guest_ipa;
+ size_t length = copy_tags->length;
+ void __user *tags = copy_tags->addr;
+ gpa_t gfn;
+ bool write = !(copy_tags->flags & KVM_ARM_TAGS_FROM_GUEST);
+ int ret = 0;
+
+ if (!kvm_has_mte(kvm))
+ return -EINVAL;
+
+ if (copy_tags->reserved[0] || copy_tags->reserved[1])
+ return -EINVAL;
+
+ if (copy_tags->flags & ~KVM_ARM_TAGS_FROM_GUEST)
+ return -EINVAL;
+
+ if (length & ~PAGE_MASK || guest_ipa & ~PAGE_MASK)
+ return -EINVAL;
+
+ gfn = gpa_to_gfn(guest_ipa);
+
+ mutex_lock(&kvm->slots_lock);
+
+ while (length > 0) {
+ kvm_pfn_t pfn = gfn_to_pfn_prot(kvm, gfn, write, NULL);
+ void *maddr;
+ unsigned long num_tags;
+ struct page *page;
+
+ if (is_error_noslot_pfn(pfn)) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ page = pfn_to_online_page(pfn);
+ if (!page) {
+ /* Reject ZONE_DEVICE memory */
+ ret = -EFAULT;
+ goto out;
+ }
+ maddr = page_address(page);
+
+ if (!write) {
+ if (test_bit(PG_mte_tagged, &page->flags))
+ num_tags = mte_copy_tags_to_user(tags, maddr,
+ MTE_GRANULES_PER_PAGE);
+ else
+ /* No tags in memory, so write zeros */
+ num_tags = MTE_GRANULES_PER_PAGE -
+ clear_user(tags, MTE_GRANULES_PER_PAGE);
+ kvm_release_pfn_clean(pfn);
+ } else {
+ num_tags = mte_copy_tags_from_user(maddr, tags,
+ MTE_GRANULES_PER_PAGE);
+
+ /*
+ * Set the flag after checking the write
+ * completed fully
+ */
+ if (num_tags == MTE_GRANULES_PER_PAGE)
+ set_bit(PG_mte_tagged, &page->flags);
+
+ kvm_release_pfn_dirty(pfn);
+ }
+
+ if (num_tags != MTE_GRANULES_PER_PAGE) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ gfn++;
+ tags += num_tags;
+ length -= PAGE_SIZE;
+ }
+
+out:
+ mutex_unlock(&kvm->slots_lock);
+ /* If some data has been copied report the number of bytes copied */
+ if (length != copy_tags->length)
+ return copy_tags->length - length;
+ return ret;
+}
diff --git a/arch/arm64/kvm/hyp/entry.S b/arch/arm64/kvm/hyp/entry.S
index e831d3dfd50d..435346ea1504 100644
--- a/arch/arm64/kvm/hyp/entry.S
+++ b/arch/arm64/kvm/hyp/entry.S
@@ -13,6 +13,7 @@
#include <asm/kvm_arm.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_mmu.h>
+#include <asm/kvm_mte.h>
#include <asm/kvm_ptrauth.h>
.text
@@ -51,6 +52,9 @@ alternative_else_nop_endif
add x29, x0, #VCPU_CONTEXT
+ // mte_switch_to_guest(g_ctxt, h_ctxt, tmp1)
+ mte_switch_to_guest x29, x1, x2
+
// Macro ptrauth_switch_to_guest format:
// ptrauth_switch_to_guest(guest cxt, tmp1, tmp2, tmp3)
// The below macro to restore guest keys is not implemented in C code
@@ -142,6 +146,9 @@ SYM_INNER_LABEL(__guest_exit, SYM_L_GLOBAL)
// when this feature is enabled for kernel code.
ptrauth_switch_to_hyp x1, x2, x3, x4, x5
+ // mte_switch_to_hyp(g_ctxt, h_ctxt, reg1)
+ mte_switch_to_hyp x1, x2, x3
+
// Restore hyp's sp_el0
restore_sp_el0 x2, x3
diff --git a/arch/arm64/kvm/hyp/exception.c b/arch/arm64/kvm/hyp/exception.c
index 73629094f903..0418399e0a20 100644
--- a/arch/arm64/kvm/hyp/exception.c
+++ b/arch/arm64/kvm/hyp/exception.c
@@ -112,7 +112,8 @@ static void enter_exception64(struct kvm_vcpu *vcpu, unsigned long target_mode,
new |= (old & PSR_C_BIT);
new |= (old & PSR_V_BIT);
- // TODO: TCO (if/when ARMv8.5-MemTag is exposed to guests)
+ if (kvm_has_mte(vcpu->kvm))
+ new |= PSR_TCO_BIT;
new |= (old & PSR_DIT_BIT);
@@ -296,7 +297,7 @@ static void enter_exception32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset)
*vcpu_pc(vcpu) = vect_offset;
}
-void kvm_inject_exception(struct kvm_vcpu *vcpu)
+static void kvm_inject_exception(struct kvm_vcpu *vcpu)
{
if (vcpu_el1_is_32bit(vcpu)) {
switch (vcpu->arch.flags & KVM_ARM64_EXCEPT_MASK) {
@@ -329,3 +330,19 @@ void kvm_inject_exception(struct kvm_vcpu *vcpu)
}
}
}
+
+/*
+ * Adjust the guest PC (and potentially exception state) depending on
+ * flags provided by the emulation code.
+ */
+void __kvm_adjust_pc(struct kvm_vcpu *vcpu)
+{
+ if (vcpu->arch.flags & KVM_ARM64_PENDING_EXCEPTION) {
+ kvm_inject_exception(vcpu);
+ vcpu->arch.flags &= ~(KVM_ARM64_PENDING_EXCEPTION |
+ KVM_ARM64_EXCEPT_MASK);
+ } else if (vcpu->arch.flags & KVM_ARM64_INCREMENT_PC) {
+ kvm_skip_instr(vcpu);
+ vcpu->arch.flags &= ~KVM_ARM64_INCREMENT_PC;
+ }
+}
diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S
index 5f49df4ffdd8..9aa9b73475c9 100644
--- a/arch/arm64/kvm/hyp/hyp-entry.S
+++ b/arch/arm64/kvm/hyp/hyp-entry.S
@@ -76,6 +76,7 @@ el1_trap:
b __guest_exit
el1_irq:
+el1_fiq:
get_vcpu_ptr x1, x0
mov x0, #ARM_EXCEPTION_IRQ
b __guest_exit
@@ -131,7 +132,6 @@ SYM_CODE_END(\label)
invalid_vector el2t_error_invalid
invalid_vector el2h_irq_invalid
invalid_vector el2h_fiq_invalid
- invalid_vector el1_fiq_invalid
.ltorg
@@ -179,12 +179,12 @@ SYM_CODE_START(__kvm_hyp_vector)
valid_vect el1_sync // Synchronous 64-bit EL1
valid_vect el1_irq // IRQ 64-bit EL1
- invalid_vect el1_fiq_invalid // FIQ 64-bit EL1
+ valid_vect el1_fiq // FIQ 64-bit EL1
valid_vect el1_error // Error 64-bit EL1
valid_vect el1_sync // Synchronous 32-bit EL1
valid_vect el1_irq // IRQ 32-bit EL1
- invalid_vect el1_fiq_invalid // FIQ 32-bit EL1
+ valid_vect el1_fiq // FIQ 32-bit EL1
valid_vect el1_error // Error 32-bit EL1
SYM_CODE_END(__kvm_hyp_vector)
diff --git a/arch/arm64/kvm/hyp/include/hyp/adjust_pc.h b/arch/arm64/kvm/hyp/include/hyp/adjust_pc.h
index 61716359035d..4fdfeabefeb4 100644
--- a/arch/arm64/kvm/hyp/include/hyp/adjust_pc.h
+++ b/arch/arm64/kvm/hyp/include/hyp/adjust_pc.h
@@ -13,8 +13,6 @@
#include <asm/kvm_emulate.h>
#include <asm/kvm_host.h>
-void kvm_inject_exception(struct kvm_vcpu *vcpu);
-
static inline void kvm_skip_instr(struct kvm_vcpu *vcpu)
{
if (vcpu_mode_is_32bit(vcpu)) {
@@ -44,22 +42,6 @@ static inline void __kvm_skip_instr(struct kvm_vcpu *vcpu)
}
/*
- * Adjust the guest PC on entry, depending on flags provided by EL1
- * for the purpose of emulation (MMIO, sysreg) or exception injection.
- */
-static inline void __adjust_pc(struct kvm_vcpu *vcpu)
-{
- if (vcpu->arch.flags & KVM_ARM64_PENDING_EXCEPTION) {
- kvm_inject_exception(vcpu);
- vcpu->arch.flags &= ~(KVM_ARM64_PENDING_EXCEPTION |
- KVM_ARM64_EXCEPT_MASK);
- } else if (vcpu->arch.flags & KVM_ARM64_INCREMENT_PC) {
- kvm_skip_instr(vcpu);
- vcpu->arch.flags &= ~KVM_ARM64_INCREMENT_PC;
- }
-}
-
-/*
* Skip an instruction while host sysregs are live.
* Assumes host is always 64-bit.
*/
diff --git a/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h b/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h
index cce43bfe158f..de7e14c862e6 100644
--- a/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h
+++ b/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h
@@ -14,6 +14,7 @@
#include <asm/kvm_asm.h>
#include <asm/kvm_emulate.h>
#include <asm/kvm_hyp.h>
+#include <asm/kvm_mmu.h>
static inline void __sysreg_save_common_state(struct kvm_cpu_context *ctxt)
{
@@ -26,6 +27,16 @@ static inline void __sysreg_save_user_state(struct kvm_cpu_context *ctxt)
ctxt_sys_reg(ctxt, TPIDRRO_EL0) = read_sysreg(tpidrro_el0);
}
+static inline bool ctxt_has_mte(struct kvm_cpu_context *ctxt)
+{
+ struct kvm_vcpu *vcpu = ctxt->__hyp_running_vcpu;
+
+ if (!vcpu)
+ vcpu = container_of(ctxt, struct kvm_vcpu, arch.ctxt);
+
+ return kvm_has_mte(kern_hyp_va(vcpu->kvm));
+}
+
static inline void __sysreg_save_el1_state(struct kvm_cpu_context *ctxt)
{
ctxt_sys_reg(ctxt, CSSELR_EL1) = read_sysreg(csselr_el1);
@@ -46,6 +57,11 @@ static inline void __sysreg_save_el1_state(struct kvm_cpu_context *ctxt)
ctxt_sys_reg(ctxt, PAR_EL1) = read_sysreg_par();
ctxt_sys_reg(ctxt, TPIDR_EL1) = read_sysreg(tpidr_el1);
+ if (ctxt_has_mte(ctxt)) {
+ ctxt_sys_reg(ctxt, TFSR_EL1) = read_sysreg_el1(SYS_TFSR);
+ ctxt_sys_reg(ctxt, TFSRE0_EL1) = read_sysreg_s(SYS_TFSRE0_EL1);
+ }
+
ctxt_sys_reg(ctxt, SP_EL1) = read_sysreg(sp_el1);
ctxt_sys_reg(ctxt, ELR_EL1) = read_sysreg_el1(SYS_ELR);
ctxt_sys_reg(ctxt, SPSR_EL1) = read_sysreg_el1(SYS_SPSR);
@@ -107,6 +123,11 @@ static inline void __sysreg_restore_el1_state(struct kvm_cpu_context *ctxt)
write_sysreg(ctxt_sys_reg(ctxt, PAR_EL1), par_el1);
write_sysreg(ctxt_sys_reg(ctxt, TPIDR_EL1), tpidr_el1);
+ if (ctxt_has_mte(ctxt)) {
+ write_sysreg_el1(ctxt_sys_reg(ctxt, TFSR_EL1), SYS_TFSR);
+ write_sysreg_s(ctxt_sys_reg(ctxt, TFSRE0_EL1), SYS_TFSRE0_EL1);
+ }
+
if (!has_vhe() &&
cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT) &&
ctxt->__hyp_running_vcpu) {
diff --git a/arch/arm64/kvm/hyp/include/nvhe/gfp.h b/arch/arm64/kvm/hyp/include/nvhe/gfp.h
index 18a4494337bd..fb0f523d1492 100644
--- a/arch/arm64/kvm/hyp/include/nvhe/gfp.h
+++ b/arch/arm64/kvm/hyp/include/nvhe/gfp.h
@@ -7,7 +7,7 @@
#include <nvhe/memory.h>
#include <nvhe/spinlock.h>
-#define HYP_NO_ORDER UINT_MAX
+#define HYP_NO_ORDER USHRT_MAX
struct hyp_pool {
/*
@@ -19,48 +19,13 @@ struct hyp_pool {
struct list_head free_area[MAX_ORDER];
phys_addr_t range_start;
phys_addr_t range_end;
- unsigned int max_order;
+ unsigned short max_order;
};
-static inline void hyp_page_ref_inc(struct hyp_page *p)
-{
- struct hyp_pool *pool = hyp_page_to_pool(p);
-
- hyp_spin_lock(&pool->lock);
- p->refcount++;
- hyp_spin_unlock(&pool->lock);
-}
-
-static inline int hyp_page_ref_dec_and_test(struct hyp_page *p)
-{
- struct hyp_pool *pool = hyp_page_to_pool(p);
- int ret;
-
- hyp_spin_lock(&pool->lock);
- p->refcount--;
- ret = (p->refcount == 0);
- hyp_spin_unlock(&pool->lock);
-
- return ret;
-}
-
-static inline void hyp_set_page_refcounted(struct hyp_page *p)
-{
- struct hyp_pool *pool = hyp_page_to_pool(p);
-
- hyp_spin_lock(&pool->lock);
- if (p->refcount) {
- hyp_spin_unlock(&pool->lock);
- BUG();
- }
- p->refcount = 1;
- hyp_spin_unlock(&pool->lock);
-}
-
/* Allocation */
-void *hyp_alloc_pages(struct hyp_pool *pool, unsigned int order);
-void hyp_get_page(void *addr);
-void hyp_put_page(void *addr);
+void *hyp_alloc_pages(struct hyp_pool *pool, unsigned short order);
+void hyp_get_page(struct hyp_pool *pool, void *addr);
+void hyp_put_page(struct hyp_pool *pool, void *addr);
/* Used pages cannot be freed */
int hyp_pool_init(struct hyp_pool *pool, u64 pfn, unsigned int nr_pages,
diff --git a/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h b/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h
index 42d81ec739fa..9c227d87c36d 100644
--- a/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h
+++ b/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h
@@ -23,7 +23,7 @@ extern struct host_kvm host_kvm;
int __pkvm_prot_finalize(void);
int __pkvm_mark_hyp(phys_addr_t start, phys_addr_t end);
-int kvm_host_prepare_stage2(void *mem_pgt_pool, void *dev_pgt_pool);
+int kvm_host_prepare_stage2(void *pgt_pool_base);
void handle_host_mem_abort(struct kvm_cpu_context *host_ctxt);
static __always_inline void __load_host_stage2(void)
diff --git a/arch/arm64/kvm/hyp/include/nvhe/memory.h b/arch/arm64/kvm/hyp/include/nvhe/memory.h
index fd78bde939ee..592b7edb3edb 100644
--- a/arch/arm64/kvm/hyp/include/nvhe/memory.h
+++ b/arch/arm64/kvm/hyp/include/nvhe/memory.h
@@ -7,12 +7,9 @@
#include <linux/types.h>
-struct hyp_pool;
struct hyp_page {
- unsigned int refcount;
- unsigned int order;
- struct hyp_pool *pool;
- struct list_head node;
+ unsigned short refcount;
+ unsigned short order;
};
extern u64 __hyp_vmemmap;
diff --git a/arch/arm64/kvm/hyp/include/nvhe/mm.h b/arch/arm64/kvm/hyp/include/nvhe/mm.h
index 0095f6289742..8ec3a5a7744b 100644
--- a/arch/arm64/kvm/hyp/include/nvhe/mm.h
+++ b/arch/arm64/kvm/hyp/include/nvhe/mm.h
@@ -78,19 +78,20 @@ static inline unsigned long hyp_s1_pgtable_pages(void)
return res;
}
-static inline unsigned long host_s2_mem_pgtable_pages(void)
+static inline unsigned long host_s2_pgtable_pages(void)
{
+ unsigned long res;
+
/*
* Include an extra 16 pages to safely upper-bound the worst case of
* concatenated pgds.
*/
- return __hyp_pgtable_total_pages() + 16;
-}
+ res = __hyp_pgtable_total_pages() + 16;
-static inline unsigned long host_s2_dev_pgtable_pages(void)
-{
/* Allow 1 GiB for MMIO mappings */
- return __hyp_pgtable_max_pages(SZ_1G >> PAGE_SHIFT);
+ res += __hyp_pgtable_max_pages(SZ_1G >> PAGE_SHIFT);
+
+ return res;
}
#endif /* __KVM_HYP_MM_H */
diff --git a/arch/arm64/kvm/hyp/nvhe/cache.S b/arch/arm64/kvm/hyp/nvhe/cache.S
index 36cef6915428..958734f4d6b0 100644
--- a/arch/arm64/kvm/hyp/nvhe/cache.S
+++ b/arch/arm64/kvm/hyp/nvhe/cache.S
@@ -7,7 +7,7 @@
#include <asm/assembler.h>
#include <asm/alternative.h>
-SYM_FUNC_START_PI(__flush_dcache_area)
+SYM_FUNC_START_PI(dcache_clean_inval_poc)
dcache_by_line_op civac, sy, x0, x1, x2, x3
ret
-SYM_FUNC_END_PI(__flush_dcache_area)
+SYM_FUNC_END_PI(dcache_clean_inval_poc)
diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-main.c b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
index f36420a80474..1632f001f4ed 100644
--- a/arch/arm64/kvm/hyp/nvhe/hyp-main.c
+++ b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
@@ -28,6 +28,13 @@ static void handle___kvm_vcpu_run(struct kvm_cpu_context *host_ctxt)
cpu_reg(host_ctxt, 1) = __kvm_vcpu_run(kern_hyp_va(vcpu));
}
+static void handle___kvm_adjust_pc(struct kvm_cpu_context *host_ctxt)
+{
+ DECLARE_REG(struct kvm_vcpu *, vcpu, host_ctxt, 1);
+
+ __kvm_adjust_pc(kern_hyp_va(vcpu));
+}
+
static void handle___kvm_flush_vm_context(struct kvm_cpu_context *host_ctxt)
{
__kvm_flush_vm_context();
@@ -170,6 +177,7 @@ typedef void (*hcall_t)(struct kvm_cpu_context *);
static const hcall_t host_hcall[] = {
HANDLE_FUNC(__kvm_vcpu_run),
+ HANDLE_FUNC(__kvm_adjust_pc),
HANDLE_FUNC(__kvm_flush_vm_context),
HANDLE_FUNC(__kvm_tlb_flush_vmid_ipa),
HANDLE_FUNC(__kvm_tlb_flush_vmid),
diff --git a/arch/arm64/kvm/hyp/nvhe/mem_protect.c b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
index e342f7f4f4fb..d938ce95d3bd 100644
--- a/arch/arm64/kvm/hyp/nvhe/mem_protect.c
+++ b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
@@ -23,8 +23,7 @@
extern unsigned long hyp_nr_cpus;
struct host_kvm host_kvm;
-struct hyp_pool host_s2_mem;
-struct hyp_pool host_s2_dev;
+static struct hyp_pool host_s2_pool;
/*
* Copies of the host's CPU features registers holding sanitized values.
@@ -36,7 +35,7 @@ static const u8 pkvm_hyp_id = 1;
static void *host_s2_zalloc_pages_exact(size_t size)
{
- return hyp_alloc_pages(&host_s2_mem, get_order(size));
+ return hyp_alloc_pages(&host_s2_pool, get_order(size));
}
static void *host_s2_zalloc_page(void *pool)
@@ -44,20 +43,24 @@ static void *host_s2_zalloc_page(void *pool)
return hyp_alloc_pages(pool, 0);
}
-static int prepare_s2_pools(void *mem_pgt_pool, void *dev_pgt_pool)
+static void host_s2_get_page(void *addr)
+{
+ hyp_get_page(&host_s2_pool, addr);
+}
+
+static void host_s2_put_page(void *addr)
+{
+ hyp_put_page(&host_s2_pool, addr);
+}
+
+static int prepare_s2_pool(void *pgt_pool_base)
{
unsigned long nr_pages, pfn;
int ret;
- pfn = hyp_virt_to_pfn(mem_pgt_pool);
- nr_pages = host_s2_mem_pgtable_pages();
- ret = hyp_pool_init(&host_s2_mem, pfn, nr_pages, 0);
- if (ret)
- return ret;
-
- pfn = hyp_virt_to_pfn(dev_pgt_pool);
- nr_pages = host_s2_dev_pgtable_pages();
- ret = hyp_pool_init(&host_s2_dev, pfn, nr_pages, 0);
+ pfn = hyp_virt_to_pfn(pgt_pool_base);
+ nr_pages = host_s2_pgtable_pages();
+ ret = hyp_pool_init(&host_s2_pool, pfn, nr_pages, 0);
if (ret)
return ret;
@@ -67,8 +70,8 @@ static int prepare_s2_pools(void *mem_pgt_pool, void *dev_pgt_pool)
.phys_to_virt = hyp_phys_to_virt,
.virt_to_phys = hyp_virt_to_phys,
.page_count = hyp_page_count,
- .get_page = hyp_get_page,
- .put_page = hyp_put_page,
+ .get_page = host_s2_get_page,
+ .put_page = host_s2_put_page,
};
return 0;
@@ -86,7 +89,7 @@ static void prepare_host_vtcr(void)
id_aa64mmfr1_el1_sys_val, phys_shift);
}
-int kvm_host_prepare_stage2(void *mem_pgt_pool, void *dev_pgt_pool)
+int kvm_host_prepare_stage2(void *pgt_pool_base)
{
struct kvm_s2_mmu *mmu = &host_kvm.arch.mmu;
int ret;
@@ -94,7 +97,7 @@ int kvm_host_prepare_stage2(void *mem_pgt_pool, void *dev_pgt_pool)
prepare_host_vtcr();
hyp_spin_lock_init(&host_kvm.lock);
- ret = prepare_s2_pools(mem_pgt_pool, dev_pgt_pool);
+ ret = prepare_s2_pool(pgt_pool_base);
if (ret)
return ret;
@@ -199,11 +202,10 @@ static bool range_is_memory(u64 start, u64 end)
}
static inline int __host_stage2_idmap(u64 start, u64 end,
- enum kvm_pgtable_prot prot,
- struct hyp_pool *pool)
+ enum kvm_pgtable_prot prot)
{
return kvm_pgtable_stage2_map(&host_kvm.pgt, start, end - start, start,
- prot, pool);
+ prot, &host_s2_pool);
}
static int host_stage2_idmap(u64 addr)
@@ -211,7 +213,6 @@ static int host_stage2_idmap(u64 addr)
enum kvm_pgtable_prot prot = KVM_PGTABLE_PROT_R | KVM_PGTABLE_PROT_W;
struct kvm_mem_range range;
bool is_memory = find_mem_range(addr, &range);
- struct hyp_pool *pool = is_memory ? &host_s2_mem : &host_s2_dev;
int ret;
if (is_memory)
@@ -222,22 +223,21 @@ static int host_stage2_idmap(u64 addr)
if (ret)
goto unlock;
- ret = __host_stage2_idmap(range.start, range.end, prot, pool);
- if (is_memory || ret != -ENOMEM)
+ ret = __host_stage2_idmap(range.start, range.end, prot);
+ if (ret != -ENOMEM)
goto unlock;
/*
- * host_s2_mem has been provided with enough pages to cover all of
- * memory with page granularity, so we should never hit the ENOMEM case.
- * However, it is difficult to know how much of the MMIO range we will
- * need to cover upfront, so we may need to 'recycle' the pages if we
- * run out.
+ * The pool has been provided with enough pages to cover all of memory
+ * with page granularity, but it is difficult to know how much of the
+ * MMIO range we will need to cover upfront, so we may need to 'recycle'
+ * the pages if we run out.
*/
ret = host_stage2_unmap_dev_all();
if (ret)
goto unlock;
- ret = __host_stage2_idmap(range.start, range.end, prot, pool);
+ ret = __host_stage2_idmap(range.start, range.end, prot);
unlock:
hyp_spin_unlock(&host_kvm.lock);
@@ -258,7 +258,7 @@ int __pkvm_mark_hyp(phys_addr_t start, phys_addr_t end)
hyp_spin_lock(&host_kvm.lock);
ret = kvm_pgtable_stage2_set_owner(&host_kvm.pgt, start, end - start,
- &host_s2_mem, pkvm_hyp_id);
+ &host_s2_pool, pkvm_hyp_id);
hyp_spin_unlock(&host_kvm.lock);
return ret != -EAGAIN ? ret : 0;
diff --git a/arch/arm64/kvm/hyp/nvhe/page_alloc.c b/arch/arm64/kvm/hyp/nvhe/page_alloc.c
index 237e03bf0cb1..41fc25bdfb34 100644
--- a/arch/arm64/kvm/hyp/nvhe/page_alloc.c
+++ b/arch/arm64/kvm/hyp/nvhe/page_alloc.c
@@ -32,7 +32,7 @@ u64 __hyp_vmemmap;
*/
static struct hyp_page *__find_buddy_nocheck(struct hyp_pool *pool,
struct hyp_page *p,
- unsigned int order)
+ unsigned short order)
{
phys_addr_t addr = hyp_page_to_phys(p);
@@ -51,21 +51,49 @@ static struct hyp_page *__find_buddy_nocheck(struct hyp_pool *pool,
/* Find a buddy page currently available for allocation */
static struct hyp_page *__find_buddy_avail(struct hyp_pool *pool,
struct hyp_page *p,
- unsigned int order)
+ unsigned short order)
{
struct hyp_page *buddy = __find_buddy_nocheck(pool, p, order);
- if (!buddy || buddy->order != order || list_empty(&buddy->node))
+ if (!buddy || buddy->order != order || buddy->refcount)
return NULL;
return buddy;
}
+/*
+ * Pages that are available for allocation are tracked in free-lists, so we use
+ * the pages themselves to store the list nodes to avoid wasting space. As the
+ * allocator always returns zeroed pages (which are zeroed on the hyp_put_page()
+ * path to optimize allocation speed), we also need to clean-up the list node in
+ * each page when we take it out of the list.
+ */
+static inline void page_remove_from_list(struct hyp_page *p)
+{
+ struct list_head *node = hyp_page_to_virt(p);
+
+ __list_del_entry(node);
+ memset(node, 0, sizeof(*node));
+}
+
+static inline void page_add_to_list(struct hyp_page *p, struct list_head *head)
+{
+ struct list_head *node = hyp_page_to_virt(p);
+
+ INIT_LIST_HEAD(node);
+ list_add_tail(node, head);
+}
+
+static inline struct hyp_page *node_to_page(struct list_head *node)
+{
+ return hyp_virt_to_page(node);
+}
+
static void __hyp_attach_page(struct hyp_pool *pool,
struct hyp_page *p)
{
- unsigned int order = p->order;
+ unsigned short order = p->order;
struct hyp_page *buddy;
memset(hyp_page_to_virt(p), 0, PAGE_SIZE << p->order);
@@ -83,32 +111,23 @@ static void __hyp_attach_page(struct hyp_pool *pool,
break;
/* Take the buddy out of its list, and coallesce with @p */
- list_del_init(&buddy->node);
+ page_remove_from_list(buddy);
buddy->order = HYP_NO_ORDER;
p = min(p, buddy);
}
/* Mark the new head, and insert it */
p->order = order;
- list_add_tail(&p->node, &pool->free_area[order]);
-}
-
-static void hyp_attach_page(struct hyp_page *p)
-{
- struct hyp_pool *pool = hyp_page_to_pool(p);
-
- hyp_spin_lock(&pool->lock);
- __hyp_attach_page(pool, p);
- hyp_spin_unlock(&pool->lock);
+ page_add_to_list(p, &pool->free_area[order]);
}
static struct hyp_page *__hyp_extract_page(struct hyp_pool *pool,
struct hyp_page *p,
- unsigned int order)
+ unsigned short order)
{
struct hyp_page *buddy;
- list_del_init(&p->node);
+ page_remove_from_list(p);
while (p->order > order) {
/*
* The buddy of order n - 1 currently has HYP_NO_ORDER as it
@@ -119,30 +138,64 @@ static struct hyp_page *__hyp_extract_page(struct hyp_pool *pool,
p->order--;
buddy = __find_buddy_nocheck(pool, p, p->order);
buddy->order = p->order;
- list_add_tail(&buddy->node, &pool->free_area[buddy->order]);
+ page_add_to_list(buddy, &pool->free_area[buddy->order]);
}
return p;
}
-void hyp_put_page(void *addr)
+static inline void hyp_page_ref_inc(struct hyp_page *p)
{
- struct hyp_page *p = hyp_virt_to_page(addr);
+ BUG_ON(p->refcount == USHRT_MAX);
+ p->refcount++;
+}
+static inline int hyp_page_ref_dec_and_test(struct hyp_page *p)
+{
+ p->refcount--;
+ return (p->refcount == 0);
+}
+
+static inline void hyp_set_page_refcounted(struct hyp_page *p)
+{
+ BUG_ON(p->refcount);
+ p->refcount = 1;
+}
+
+static void __hyp_put_page(struct hyp_pool *pool, struct hyp_page *p)
+{
if (hyp_page_ref_dec_and_test(p))
- hyp_attach_page(p);
+ __hyp_attach_page(pool, p);
+}
+
+/*
+ * Changes to the buddy tree and page refcounts must be done with the hyp_pool
+ * lock held. If a refcount change requires an update to the buddy tree (e.g.
+ * hyp_put_page()), both operations must be done within the same critical
+ * section to guarantee transient states (e.g. a page with null refcount but
+ * not yet attached to a free list) can't be observed by well-behaved readers.
+ */
+void hyp_put_page(struct hyp_pool *pool, void *addr)
+{
+ struct hyp_page *p = hyp_virt_to_page(addr);
+
+ hyp_spin_lock(&pool->lock);
+ __hyp_put_page(pool, p);
+ hyp_spin_unlock(&pool->lock);
}
-void hyp_get_page(void *addr)
+void hyp_get_page(struct hyp_pool *pool, void *addr)
{
struct hyp_page *p = hyp_virt_to_page(addr);
+ hyp_spin_lock(&pool->lock);
hyp_page_ref_inc(p);
+ hyp_spin_unlock(&pool->lock);
}
-void *hyp_alloc_pages(struct hyp_pool *pool, unsigned int order)
+void *hyp_alloc_pages(struct hyp_pool *pool, unsigned short order)
{
- unsigned int i = order;
+ unsigned short i = order;
struct hyp_page *p;
hyp_spin_lock(&pool->lock);
@@ -156,11 +209,11 @@ void *hyp_alloc_pages(struct hyp_pool *pool, unsigned int order)
}
/* Extract it from the tree at the right order */
- p = list_first_entry(&pool->free_area[i], struct hyp_page, node);
+ p = node_to_page(pool->free_area[i].next);
p = __hyp_extract_page(pool, p, order);
- hyp_spin_unlock(&pool->lock);
hyp_set_page_refcounted(p);
+ hyp_spin_unlock(&pool->lock);
return hyp_page_to_virt(p);
}
@@ -181,15 +234,14 @@ int hyp_pool_init(struct hyp_pool *pool, u64 pfn, unsigned int nr_pages,
/* Init the vmemmap portion */
p = hyp_phys_to_page(phys);
- memset(p, 0, sizeof(*p) * nr_pages);
for (i = 0; i < nr_pages; i++) {
- p[i].pool = pool;
- INIT_LIST_HEAD(&p[i].node);
+ p[i].order = 0;
+ hyp_set_page_refcounted(&p[i]);
}
/* Attach the unused pages to the buddy tree */
for (i = reserved_pages; i < nr_pages; i++)
- __hyp_attach_page(pool, &p[i]);
+ __hyp_put_page(pool, &p[i]);
return 0;
}
diff --git a/arch/arm64/kvm/hyp/nvhe/setup.c b/arch/arm64/kvm/hyp/nvhe/setup.c
index 7488f53b0aa2..0b574d106519 100644
--- a/arch/arm64/kvm/hyp/nvhe/setup.c
+++ b/arch/arm64/kvm/hyp/nvhe/setup.c
@@ -17,7 +17,6 @@
#include <nvhe/trap_handler.h>
struct hyp_pool hpool;
-struct kvm_pgtable_mm_ops pkvm_pgtable_mm_ops;
unsigned long hyp_nr_cpus;
#define hyp_percpu_size ((unsigned long)__per_cpu_end - \
@@ -25,8 +24,8 @@ unsigned long hyp_nr_cpus;
static void *vmemmap_base;
static void *hyp_pgt_base;
-static void *host_s2_mem_pgt_base;
-static void *host_s2_dev_pgt_base;
+static void *host_s2_pgt_base;
+static struct kvm_pgtable_mm_ops pkvm_pgtable_mm_ops;
static int divide_memory_pool(void *virt, unsigned long size)
{
@@ -45,14 +44,9 @@ static int divide_memory_pool(void *virt, unsigned long size)
if (!hyp_pgt_base)
return -ENOMEM;
- nr_pages = host_s2_mem_pgtable_pages();
- host_s2_mem_pgt_base = hyp_early_alloc_contig(nr_pages);
- if (!host_s2_mem_pgt_base)
- return -ENOMEM;
-
- nr_pages = host_s2_dev_pgtable_pages();
- host_s2_dev_pgt_base = hyp_early_alloc_contig(nr_pages);
- if (!host_s2_dev_pgt_base)
+ nr_pages = host_s2_pgtable_pages();
+ host_s2_pgt_base = hyp_early_alloc_contig(nr_pages);
+ if (!host_s2_pgt_base)
return -ENOMEM;
return 0;
@@ -134,7 +128,8 @@ static void update_nvhe_init_params(void)
for (i = 0; i < hyp_nr_cpus; i++) {
params = per_cpu_ptr(&kvm_init_params, i);
params->pgd_pa = __hyp_pa(pkvm_pgtable.pgd);
- __flush_dcache_area(params, sizeof(*params));
+ dcache_clean_inval_poc((unsigned long)params,
+ (unsigned long)params + sizeof(*params));
}
}
@@ -143,6 +138,16 @@ static void *hyp_zalloc_hyp_page(void *arg)
return hyp_alloc_pages(&hpool, 0);
}
+static void hpool_get_page(void *addr)
+{
+ hyp_get_page(&hpool, addr);
+}
+
+static void hpool_put_page(void *addr)
+{
+ hyp_put_page(&hpool, addr);
+}
+
void __noreturn __pkvm_init_finalise(void)
{
struct kvm_host_data *host_data = this_cpu_ptr(&kvm_host_data);
@@ -158,7 +163,7 @@ void __noreturn __pkvm_init_finalise(void)
if (ret)
goto out;
- ret = kvm_host_prepare_stage2(host_s2_mem_pgt_base, host_s2_dev_pgt_base);
+ ret = kvm_host_prepare_stage2(host_s2_pgt_base);
if (ret)
goto out;
@@ -166,8 +171,8 @@ void __noreturn __pkvm_init_finalise(void)
.zalloc_page = hyp_zalloc_hyp_page,
.phys_to_virt = hyp_phys_to_virt,
.virt_to_phys = hyp_virt_to_phys,
- .get_page = hyp_get_page,
- .put_page = hyp_put_page,
+ .get_page = hpool_get_page,
+ .put_page = hpool_put_page,
};
pkvm_pgtable.mm_ops = &pkvm_pgtable_mm_ops;
diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c
index e9f6ea704d07..f7af9688c1f7 100644
--- a/arch/arm64/kvm/hyp/nvhe/switch.c
+++ b/arch/arm64/kvm/hyp/nvhe/switch.c
@@ -4,7 +4,6 @@
* Author: Marc Zyngier <marc.zyngier@arm.com>
*/
-#include <hyp/adjust_pc.h>
#include <hyp/switch.h>
#include <hyp/sysreg-sr.h>
@@ -201,7 +200,7 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
*/
__debug_save_host_buffers_nvhe(vcpu);
- __adjust_pc(vcpu);
+ __kvm_adjust_pc(vcpu);
/*
* We must restore the 32-bit state before the sysregs, thanks
diff --git a/arch/arm64/kvm/hyp/nvhe/tlb.c b/arch/arm64/kvm/hyp/nvhe/tlb.c
index 83dc3b271bc5..38ed0f6f2703 100644
--- a/arch/arm64/kvm/hyp/nvhe/tlb.c
+++ b/arch/arm64/kvm/hyp/nvhe/tlb.c
@@ -104,7 +104,7 @@ void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu,
* you should be running with VHE enabled.
*/
if (icache_is_vpipt())
- __flush_icache_all();
+ icache_inval_all_pou();
__tlb_switch_to_host(&cxt);
}
diff --git a/arch/arm64/kvm/hyp/pgtable.c b/arch/arm64/kvm/hyp/pgtable.c
index c37c1dc4feaf..05321f4165e3 100644
--- a/arch/arm64/kvm/hyp/pgtable.c
+++ b/arch/arm64/kvm/hyp/pgtable.c
@@ -577,12 +577,24 @@ static void stage2_put_pte(kvm_pte_t *ptep, struct kvm_s2_mmu *mmu, u64 addr,
mm_ops->put_page(ptep);
}
+static bool stage2_pte_cacheable(struct kvm_pgtable *pgt, kvm_pte_t pte)
+{
+ u64 memattr = pte & KVM_PTE_LEAF_ATTR_LO_S2_MEMATTR;
+ return memattr == KVM_S2_MEMATTR(pgt, NORMAL);
+}
+
+static bool stage2_pte_executable(kvm_pte_t pte)
+{
+ return !(pte & KVM_PTE_LEAF_ATTR_HI_S2_XN);
+}
+
static int stage2_map_walker_try_leaf(u64 addr, u64 end, u32 level,
kvm_pte_t *ptep,
struct stage2_map_data *data)
{
kvm_pte_t new, old = *ptep;
u64 granule = kvm_granule_size(level), phys = data->phys;
+ struct kvm_pgtable *pgt = data->mmu->pgt;
struct kvm_pgtable_mm_ops *mm_ops = data->mm_ops;
if (!kvm_block_mapping_supported(addr, end, phys, level))
@@ -606,6 +618,14 @@ static int stage2_map_walker_try_leaf(u64 addr, u64 end, u32 level,
stage2_put_pte(ptep, data->mmu, addr, level, mm_ops);
}
+ /* Perform CMOs before installation of the guest stage-2 PTE */
+ if (mm_ops->dcache_clean_inval_poc && stage2_pte_cacheable(pgt, new))
+ mm_ops->dcache_clean_inval_poc(kvm_pte_follow(new, mm_ops),
+ granule);
+
+ if (mm_ops->icache_inval_pou && stage2_pte_executable(new))
+ mm_ops->icache_inval_pou(kvm_pte_follow(new, mm_ops), granule);
+
smp_store_release(ptep, new);
if (stage2_pte_is_counted(new))
mm_ops->get_page(ptep);
@@ -798,12 +818,6 @@ int kvm_pgtable_stage2_set_owner(struct kvm_pgtable *pgt, u64 addr, u64 size,
return ret;
}
-static bool stage2_pte_cacheable(struct kvm_pgtable *pgt, kvm_pte_t pte)
-{
- u64 memattr = pte & KVM_PTE_LEAF_ATTR_LO_S2_MEMATTR;
- return memattr == KVM_S2_MEMATTR(pgt, NORMAL);
-}
-
static int stage2_unmap_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
enum kvm_pgtable_walk_flags flag,
void * const arg)
@@ -839,8 +853,11 @@ static int stage2_unmap_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
stage2_put_pte(ptep, mmu, addr, level, mm_ops);
if (need_flush) {
- __flush_dcache_area(kvm_pte_follow(pte, mm_ops),
- kvm_granule_size(level));
+ kvm_pte_t *pte_follow = kvm_pte_follow(pte, mm_ops);
+
+ dcache_clean_inval_poc((unsigned long)pte_follow,
+ (unsigned long)pte_follow +
+ kvm_granule_size(level));
}
if (childp)
@@ -861,10 +878,11 @@ int kvm_pgtable_stage2_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size)
}
struct stage2_attr_data {
- kvm_pte_t attr_set;
- kvm_pte_t attr_clr;
- kvm_pte_t pte;
- u32 level;
+ kvm_pte_t attr_set;
+ kvm_pte_t attr_clr;
+ kvm_pte_t pte;
+ u32 level;
+ struct kvm_pgtable_mm_ops *mm_ops;
};
static int stage2_attr_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
@@ -873,6 +891,7 @@ static int stage2_attr_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
{
kvm_pte_t pte = *ptep;
struct stage2_attr_data *data = arg;
+ struct kvm_pgtable_mm_ops *mm_ops = data->mm_ops;
if (!kvm_pte_valid(pte))
return 0;
@@ -887,8 +906,17 @@ static int stage2_attr_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
* but worst-case the access flag update gets lost and will be
* set on the next access instead.
*/
- if (data->pte != pte)
+ if (data->pte != pte) {
+ /*
+ * Invalidate instruction cache before updating the guest
+ * stage-2 PTE if we are going to add executable permission.
+ */
+ if (mm_ops->icache_inval_pou &&
+ stage2_pte_executable(pte) && !stage2_pte_executable(*ptep))
+ mm_ops->icache_inval_pou(kvm_pte_follow(pte, mm_ops),
+ kvm_granule_size(level));
WRITE_ONCE(*ptep, pte);
+ }
return 0;
}
@@ -903,6 +931,7 @@ static int stage2_update_leaf_attrs(struct kvm_pgtable *pgt, u64 addr,
struct stage2_attr_data data = {
.attr_set = attr_set & attr_mask,
.attr_clr = attr_clr & attr_mask,
+ .mm_ops = pgt->mm_ops,
};
struct kvm_pgtable_walker walker = {
.cb = stage2_attr_walker,
@@ -988,11 +1017,15 @@ static int stage2_flush_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
struct kvm_pgtable *pgt = arg;
struct kvm_pgtable_mm_ops *mm_ops = pgt->mm_ops;
kvm_pte_t pte = *ptep;
+ kvm_pte_t *pte_follow;
if (!kvm_pte_valid(pte) || !stage2_pte_cacheable(pgt, pte))
return 0;
- __flush_dcache_area(kvm_pte_follow(pte, mm_ops), kvm_granule_size(level));
+ pte_follow = kvm_pte_follow(pte, mm_ops);
+ dcache_clean_inval_poc((unsigned long)pte_follow,
+ (unsigned long)pte_follow +
+ kvm_granule_size(level));
return 0;
}
diff --git a/arch/arm64/kvm/hyp/reserved_mem.c b/arch/arm64/kvm/hyp/reserved_mem.c
index 83ca23ac259b..d654921dd09b 100644
--- a/arch/arm64/kvm/hyp/reserved_mem.c
+++ b/arch/arm64/kvm/hyp/reserved_mem.c
@@ -71,8 +71,7 @@ void __init kvm_hyp_reserve(void)
}
hyp_mem_pages += hyp_s1_pgtable_pages();
- hyp_mem_pages += host_s2_mem_pgtable_pages();
- hyp_mem_pages += host_s2_dev_pgtable_pages();
+ hyp_mem_pages += host_s2_pgtable_pages();
/*
* The hyp_vmemmap needs to be backed by pages, but these pages
diff --git a/arch/arm64/kvm/hyp/vhe/switch.c b/arch/arm64/kvm/hyp/vhe/switch.c
index 7b8f7db5c1ed..b3229924d243 100644
--- a/arch/arm64/kvm/hyp/vhe/switch.c
+++ b/arch/arm64/kvm/hyp/vhe/switch.c
@@ -4,7 +4,6 @@
* Author: Marc Zyngier <marc.zyngier@arm.com>
*/
-#include <hyp/adjust_pc.h>
#include <hyp/switch.h>
#include <linux/arm-smccc.h>
@@ -132,7 +131,7 @@ static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
__load_guest_stage2(vcpu->arch.hw_mmu);
__activate_traps(vcpu);
- __adjust_pc(vcpu);
+ __kvm_adjust_pc(vcpu);
sysreg_restore_guest_state_vhe(guest_ctxt);
__debug_switch_to_guest(vcpu);
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index c5d1f3c87dbd..3155c9e778f0 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -85,7 +85,7 @@ void kvm_flush_remote_tlbs(struct kvm *kvm)
static bool kvm_is_device_pfn(unsigned long pfn)
{
- return !pfn_valid(pfn);
+ return !pfn_is_map_memory(pfn);
}
static void *stage2_memcache_zalloc_page(void *arg)
@@ -126,6 +126,16 @@ static void *kvm_host_va(phys_addr_t phys)
return __va(phys);
}
+static void clean_dcache_guest_page(void *va, size_t size)
+{
+ __clean_dcache_guest_page(va, size);
+}
+
+static void invalidate_icache_guest_page(void *va, size_t size)
+{
+ __invalidate_icache_guest_page(va, size);
+}
+
/*
* Unmapping vs dcache management:
*
@@ -432,6 +442,8 @@ static struct kvm_pgtable_mm_ops kvm_s2_mm_ops = {
.page_count = kvm_host_page_count,
.phys_to_virt = kvm_host_va,
.virt_to_phys = kvm_host_pa,
+ .dcache_clean_inval_poc = clean_dcache_guest_page,
+ .icache_inval_pou = invalidate_icache_guest_page,
};
/**
@@ -693,16 +705,6 @@ void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask);
}
-static void clean_dcache_guest_page(kvm_pfn_t pfn, unsigned long size)
-{
- __clean_dcache_guest_page(pfn, size);
-}
-
-static void invalidate_icache_guest_page(kvm_pfn_t pfn, unsigned long size)
-{
- __invalidate_icache_guest_page(pfn, size);
-}
-
static void kvm_send_hwpoison_signal(unsigned long address, short lsb)
{
send_sig_mceerr(BUS_MCEERR_AR, (void __user *)address, lsb, current);
@@ -822,6 +824,74 @@ transparent_hugepage_adjust(struct kvm_memory_slot *memslot,
return PAGE_SIZE;
}
+static int get_vma_page_shift(struct vm_area_struct *vma, unsigned long hva)
+{
+ unsigned long pa;
+
+ if (is_vm_hugetlb_page(vma) && !(vma->vm_flags & VM_PFNMAP))
+ return huge_page_shift(hstate_vma(vma));
+
+ if (!(vma->vm_flags & VM_PFNMAP))
+ return PAGE_SHIFT;
+
+ VM_BUG_ON(is_vm_hugetlb_page(vma));
+
+ pa = (vma->vm_pgoff << PAGE_SHIFT) + (hva - vma->vm_start);
+
+#ifndef __PAGETABLE_PMD_FOLDED
+ if ((hva & (PUD_SIZE - 1)) == (pa & (PUD_SIZE - 1)) &&
+ ALIGN_DOWN(hva, PUD_SIZE) >= vma->vm_start &&
+ ALIGN(hva, PUD_SIZE) <= vma->vm_end)
+ return PUD_SHIFT;
+#endif
+
+ if ((hva & (PMD_SIZE - 1)) == (pa & (PMD_SIZE - 1)) &&
+ ALIGN_DOWN(hva, PMD_SIZE) >= vma->vm_start &&
+ ALIGN(hva, PMD_SIZE) <= vma->vm_end)
+ return PMD_SHIFT;
+
+ return PAGE_SHIFT;
+}
+
+/*
+ * The page will be mapped in stage 2 as Normal Cacheable, so the VM will be
+ * able to see the page's tags and therefore they must be initialised first. If
+ * PG_mte_tagged is set, tags have already been initialised.
+ *
+ * The race in the test/set of the PG_mte_tagged flag is handled by:
+ * - preventing VM_SHARED mappings in a memslot with MTE preventing two VMs
+ * racing to santise the same page
+ * - mmap_lock protects between a VM faulting a page in and the VMM performing
+ * an mprotect() to add VM_MTE
+ */
+static int sanitise_mte_tags(struct kvm *kvm, kvm_pfn_t pfn,
+ unsigned long size)
+{
+ unsigned long i, nr_pages = size >> PAGE_SHIFT;
+ struct page *page;
+
+ if (!kvm_has_mte(kvm))
+ return 0;
+
+ /*
+ * pfn_to_online_page() is used to reject ZONE_DEVICE pages
+ * that may not support tags.
+ */
+ page = pfn_to_online_page(pfn);
+
+ if (!page)
+ return -EFAULT;
+
+ for (i = 0; i < nr_pages; i++, page++) {
+ if (!test_bit(PG_mte_tagged, &page->flags)) {
+ mte_clear_page_tags(page_address(page));
+ set_bit(PG_mte_tagged, &page->flags);
+ }
+ }
+
+ return 0;
+}
+
static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
struct kvm_memory_slot *memslot, unsigned long hva,
unsigned long fault_status)
@@ -830,6 +900,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
bool write_fault, writable, force_pte = false;
bool exec_fault;
bool device = false;
+ bool shared;
unsigned long mmu_seq;
struct kvm *kvm = vcpu->kvm;
struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
@@ -853,26 +924,31 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
return -EFAULT;
}
- /* Let's check if we will get back a huge page backed by hugetlbfs */
+ /*
+ * Let's check if we will get back a huge page backed by hugetlbfs, or
+ * get block mapping for device MMIO region.
+ */
mmap_read_lock(current->mm);
- vma = find_vma_intersection(current->mm, hva, hva + 1);
+ vma = vma_lookup(current->mm, hva);
if (unlikely(!vma)) {
kvm_err("Failed to find VMA for hva 0x%lx\n", hva);
mmap_read_unlock(current->mm);
return -EFAULT;
}
- if (is_vm_hugetlb_page(vma))
- vma_shift = huge_page_shift(hstate_vma(vma));
- else
- vma_shift = PAGE_SHIFT;
-
- if (logging_active ||
- (vma->vm_flags & VM_PFNMAP)) {
+ /*
+ * logging_active is guaranteed to never be true for VM_PFNMAP
+ * memslots.
+ */
+ if (logging_active) {
force_pte = true;
vma_shift = PAGE_SHIFT;
+ } else {
+ vma_shift = get_vma_page_shift(vma, hva);
}
+ shared = (vma->vm_flags & VM_PFNMAP);
+
switch (vma_shift) {
#ifndef __PAGETABLE_PMD_FOLDED
case PUD_SHIFT:
@@ -943,8 +1019,17 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
return -EFAULT;
if (kvm_is_device_pfn(pfn)) {
+ /*
+ * If the page was identified as device early by looking at
+ * the VMA flags, vma_pagesize is already representing the
+ * largest quantity we can map. If instead it was mapped
+ * via gfn_to_pfn_prot(), vma_pagesize is set to PAGE_SIZE
+ * and must not be upgraded.
+ *
+ * In both cases, we don't let transparent_hugepage_adjust()
+ * change things at the last minute.
+ */
device = true;
- force_pte = true;
} else if (logging_active && !write_fault) {
/*
* Only actually map the page as writable if this was a write
@@ -965,19 +1050,25 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
* If we are not forced to use page mapping, check if we are
* backed by a THP and thus use block mapping if possible.
*/
- if (vma_pagesize == PAGE_SIZE && !force_pte)
+ if (vma_pagesize == PAGE_SIZE && !(force_pte || device))
vma_pagesize = transparent_hugepage_adjust(memslot, hva,
&pfn, &fault_ipa);
+
+ if (fault_status != FSC_PERM && !device && kvm_has_mte(kvm)) {
+ /* Check the VMM hasn't introduced a new VM_SHARED VMA */
+ if (!shared)
+ ret = sanitise_mte_tags(kvm, pfn, vma_pagesize);
+ else
+ ret = -EFAULT;
+ if (ret)
+ goto out_unlock;
+ }
+
if (writable)
prot |= KVM_PGTABLE_PROT_W;
- if (fault_status != FSC_PERM && !device)
- clean_dcache_guest_page(pfn, vma_pagesize);
-
- if (exec_fault) {
+ if (exec_fault)
prot |= KVM_PGTABLE_PROT_X;
- invalidate_icache_guest_page(pfn, vma_pagesize);
- }
if (device)
prot |= KVM_PGTABLE_PROT_DEVICE;
@@ -1156,31 +1247,34 @@ out_unlock:
bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
{
if (!kvm->arch.mmu.pgt)
- return 0;
+ return false;
__unmap_stage2_range(&kvm->arch.mmu, range->start << PAGE_SHIFT,
(range->end - range->start) << PAGE_SHIFT,
range->may_block);
- return 0;
+ return false;
}
bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
{
kvm_pfn_t pfn = pte_pfn(range->pte);
+ int ret;
if (!kvm->arch.mmu.pgt)
- return 0;
+ return false;
WARN_ON(range->end - range->start != 1);
- /*
- * We've moved a page around, probably through CoW, so let's treat it
- * just like a translation fault and clean the cache to the PoC.
- */
- clean_dcache_guest_page(pfn, PAGE_SIZE);
+ ret = sanitise_mte_tags(kvm, pfn, PAGE_SIZE);
+ if (ret)
+ return false;
/*
+ * We've moved a page around, probably through CoW, so let's treat
+ * it just like a translation fault and the map handler will clean
+ * the cache to the PoC.
+ *
* The MMU notifiers will have unmapped a huge PMD before calling
* ->change_pte() (which in turn calls kvm_set_spte_gfn()) and
* therefore we never need to clear out a huge PMD through this
@@ -1190,7 +1284,7 @@ bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
PAGE_SIZE, __pfn_to_phys(pfn),
KVM_PGTABLE_PROT_R, NULL);
- return 0;
+ return false;
}
bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
@@ -1200,7 +1294,7 @@ bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
pte_t pte;
if (!kvm->arch.mmu.pgt)
- return 0;
+ return false;
WARN_ON(size != PAGE_SIZE && size != PMD_SIZE && size != PUD_SIZE);
@@ -1213,7 +1307,7 @@ bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
{
if (!kvm->arch.mmu.pgt)
- return 0;
+ return false;
return kvm_pgtable_stage2_is_young(kvm->arch.mmu.pgt,
range->start << PAGE_SHIFT);
@@ -1346,7 +1440,6 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
{
hva_t hva = mem->userspace_addr;
hva_t reg_end = hva + mem->memory_size;
- bool writable = !(mem->flags & KVM_MEM_READONLY);
int ret = 0;
if (change != KVM_MR_CREATE && change != KVM_MR_MOVE &&
@@ -1363,8 +1456,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
mmap_read_lock(current->mm);
/*
* A memory region could potentially cover multiple VMAs, and any holes
- * between them, so iterate over all of them to find out if we can map
- * any of them right now.
+ * between them, so iterate over all of them.
*
* +--------------------------------------------+
* +---------------+----------------+ +----------------+
@@ -1375,51 +1467,29 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
*/
do {
struct vm_area_struct *vma;
- hva_t vm_start, vm_end;
vma = find_vma_intersection(current->mm, hva, reg_end);
if (!vma)
break;
/*
- * Take the intersection of this VMA with the memory region
+ * VM_SHARED mappings are not allowed with MTE to avoid races
+ * when updating the PG_mte_tagged page flag, see
+ * sanitise_mte_tags for more details.
*/
- vm_start = max(hva, vma->vm_start);
- vm_end = min(reg_end, vma->vm_end);
+ if (kvm_has_mte(kvm) && vma->vm_flags & VM_SHARED)
+ return -EINVAL;
if (vma->vm_flags & VM_PFNMAP) {
- gpa_t gpa = mem->guest_phys_addr +
- (vm_start - mem->userspace_addr);
- phys_addr_t pa;
-
- pa = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT;
- pa += vm_start - vma->vm_start;
-
/* IO region dirty page logging not allowed */
if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES) {
ret = -EINVAL;
- goto out;
- }
-
- ret = kvm_phys_addr_ioremap(kvm, gpa, pa,
- vm_end - vm_start,
- writable);
- if (ret)
break;
+ }
}
- hva = vm_end;
+ hva = min(reg_end, vma->vm_end);
} while (hva < reg_end);
- if (change == KVM_MR_FLAGS_ONLY)
- goto out;
-
- spin_lock(&kvm->mmu_lock);
- if (ret)
- unmap_stage2_range(&kvm->arch.mmu, mem->guest_phys_addr, mem->memory_size);
- else if (!cpus_have_final_cap(ARM64_HAS_STAGE2_FWB))
- stage2_flush_memslot(kvm, memslot);
- spin_unlock(&kvm->mmu_lock);
-out:
mmap_read_unlock(current->mm);
return ret;
}
diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c
index fd167d4f4215..f33825c995cb 100644
--- a/arch/arm64/kvm/pmu-emul.c
+++ b/arch/arm64/kvm/pmu-emul.c
@@ -578,6 +578,7 @@ void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
kvm_pmu_set_counter_value(vcpu, ARMV8_PMU_CYCLE_IDX, 0);
if (val & ARMV8_PMU_PMCR_P) {
+ mask &= ~BIT(ARMV8_PMU_CYCLE_IDX);
for_each_set_bit(i, &mask, 32)
kvm_pmu_set_counter_value(vcpu, i, 0);
}
@@ -850,6 +851,9 @@ int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu)
return -EINVAL;
}
+ /* One-off reload of the PMU on first run */
+ kvm_make_request(KVM_REQ_RELOAD_PMU, vcpu);
+
return 0;
}
diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c
index 956cdc240148..cba7872d69a8 100644
--- a/arch/arm64/kvm/reset.c
+++ b/arch/arm64/kvm/reset.c
@@ -166,6 +166,29 @@ static int kvm_vcpu_enable_ptrauth(struct kvm_vcpu *vcpu)
return 0;
}
+static bool vcpu_allowed_register_width(struct kvm_vcpu *vcpu)
+{
+ struct kvm_vcpu *tmp;
+ bool is32bit;
+ int i;
+
+ is32bit = vcpu_has_feature(vcpu, KVM_ARM_VCPU_EL1_32BIT);
+ if (!cpus_have_const_cap(ARM64_HAS_32BIT_EL1) && is32bit)
+ return false;
+
+ /* MTE is incompatible with AArch32 */
+ if (kvm_has_mte(vcpu->kvm) && is32bit)
+ return false;
+
+ /* Check that the vcpus are either all 32bit or all 64bit */
+ kvm_for_each_vcpu(i, tmp, vcpu->kvm) {
+ if (vcpu_has_feature(tmp, KVM_ARM_VCPU_EL1_32BIT) != is32bit)
+ return false;
+ }
+
+ return true;
+}
+
/**
* kvm_reset_vcpu - sets core registers and sys_regs to reset value
* @vcpu: The VCPU pointer
@@ -217,13 +240,14 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
}
}
+ if (!vcpu_allowed_register_width(vcpu)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
switch (vcpu->arch.target) {
default:
if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features)) {
- if (!cpus_have_const_cap(ARM64_HAS_32BIT_EL1)) {
- ret = -EINVAL;
- goto out;
- }
pstate = VCPU_RESET_PSTATE_SVC;
} else {
pstate = VCPU_RESET_PSTATE_EL1;
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index 76ea2800c33e..f6f126eb6ac1 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -399,14 +399,14 @@ static bool trap_bvr(struct kvm_vcpu *vcpu,
struct sys_reg_params *p,
const struct sys_reg_desc *rd)
{
- u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
+ u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm];
if (p->is_write)
reg_to_dbg(vcpu, p, rd, dbg_reg);
else
dbg_to_reg(vcpu, p, rd, dbg_reg);
- trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
+ trace_trap_reg(__func__, rd->CRm, p->is_write, *dbg_reg);
return true;
}
@@ -414,7 +414,7 @@ static bool trap_bvr(struct kvm_vcpu *vcpu,
static int set_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
const struct kvm_one_reg *reg, void __user *uaddr)
{
- __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
+ __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm];
if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
return -EFAULT;
@@ -424,7 +424,7 @@ static int set_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
static int get_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
const struct kvm_one_reg *reg, void __user *uaddr)
{
- __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
+ __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm];
if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
return -EFAULT;
@@ -434,21 +434,21 @@ static int get_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
static void reset_bvr(struct kvm_vcpu *vcpu,
const struct sys_reg_desc *rd)
{
- vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg] = rd->val;
+ vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm] = rd->val;
}
static bool trap_bcr(struct kvm_vcpu *vcpu,
struct sys_reg_params *p,
const struct sys_reg_desc *rd)
{
- u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg];
+ u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm];
if (p->is_write)
reg_to_dbg(vcpu, p, rd, dbg_reg);
else
dbg_to_reg(vcpu, p, rd, dbg_reg);
- trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
+ trace_trap_reg(__func__, rd->CRm, p->is_write, *dbg_reg);
return true;
}
@@ -456,7 +456,7 @@ static bool trap_bcr(struct kvm_vcpu *vcpu,
static int set_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
const struct kvm_one_reg *reg, void __user *uaddr)
{
- __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg];
+ __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm];
if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
return -EFAULT;
@@ -467,7 +467,7 @@ static int set_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
static int get_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
const struct kvm_one_reg *reg, void __user *uaddr)
{
- __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg];
+ __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm];
if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
return -EFAULT;
@@ -477,22 +477,22 @@ static int get_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
static void reset_bcr(struct kvm_vcpu *vcpu,
const struct sys_reg_desc *rd)
{
- vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg] = rd->val;
+ vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm] = rd->val;
}
static bool trap_wvr(struct kvm_vcpu *vcpu,
struct sys_reg_params *p,
const struct sys_reg_desc *rd)
{
- u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg];
+ u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm];
if (p->is_write)
reg_to_dbg(vcpu, p, rd, dbg_reg);
else
dbg_to_reg(vcpu, p, rd, dbg_reg);
- trace_trap_reg(__func__, rd->reg, p->is_write,
- vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg]);
+ trace_trap_reg(__func__, rd->CRm, p->is_write,
+ vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm]);
return true;
}
@@ -500,7 +500,7 @@ static bool trap_wvr(struct kvm_vcpu *vcpu,
static int set_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
const struct kvm_one_reg *reg, void __user *uaddr)
{
- __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg];
+ __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm];
if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
return -EFAULT;
@@ -510,7 +510,7 @@ static int set_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
static int get_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
const struct kvm_one_reg *reg, void __user *uaddr)
{
- __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg];
+ __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm];
if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
return -EFAULT;
@@ -520,21 +520,21 @@ static int get_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
static void reset_wvr(struct kvm_vcpu *vcpu,
const struct sys_reg_desc *rd)
{
- vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg] = rd->val;
+ vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm] = rd->val;
}
static bool trap_wcr(struct kvm_vcpu *vcpu,
struct sys_reg_params *p,
const struct sys_reg_desc *rd)
{
- u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
+ u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm];
if (p->is_write)
reg_to_dbg(vcpu, p, rd, dbg_reg);
else
dbg_to_reg(vcpu, p, rd, dbg_reg);
- trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
+ trace_trap_reg(__func__, rd->CRm, p->is_write, *dbg_reg);
return true;
}
@@ -542,7 +542,7 @@ static bool trap_wcr(struct kvm_vcpu *vcpu,
static int set_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
const struct kvm_one_reg *reg, void __user *uaddr)
{
- __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
+ __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm];
if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
return -EFAULT;
@@ -552,7 +552,7 @@ static int set_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
static int get_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
const struct kvm_one_reg *reg, void __user *uaddr)
{
- __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
+ __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm];
if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
return -EFAULT;
@@ -562,7 +562,7 @@ static int get_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
static void reset_wcr(struct kvm_vcpu *vcpu,
const struct sys_reg_desc *rd)
{
- vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg] = rd->val;
+ vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm] = rd->val;
}
static void reset_amair_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
@@ -1047,6 +1047,13 @@ static u64 read_id_reg(const struct kvm_vcpu *vcpu,
break;
case SYS_ID_AA64PFR1_EL1:
val &= ~FEATURE(ID_AA64PFR1_MTE);
+ if (kvm_has_mte(vcpu->kvm)) {
+ u64 pfr, mte;
+
+ pfr = read_sanitised_ftr_reg(SYS_ID_AA64PFR1_EL1);
+ mte = cpuid_feature_extract_unsigned_field(pfr, ID_AA64PFR1_MTE_SHIFT);
+ val |= FIELD_PREP(FEATURE(ID_AA64PFR1_MTE), mte);
+ }
break;
case SYS_ID_AA64ISAR1_EL1:
if (!vcpu_has_ptrauth(vcpu))
@@ -1302,6 +1309,23 @@ static bool access_ccsidr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
return true;
}
+static unsigned int mte_visibility(const struct kvm_vcpu *vcpu,
+ const struct sys_reg_desc *rd)
+{
+ if (kvm_has_mte(vcpu->kvm))
+ return 0;
+
+ return REG_HIDDEN;
+}
+
+#define MTE_REG(name) { \
+ SYS_DESC(SYS_##name), \
+ .access = undef_access, \
+ .reset = reset_unknown, \
+ .reg = name, \
+ .visibility = mte_visibility, \
+}
+
/* sys_reg_desc initialiser for known cpufeature ID registers */
#define ID_SANITISED(name) { \
SYS_DESC(SYS_##name), \
@@ -1470,8 +1494,8 @@ static const struct sys_reg_desc sys_reg_descs[] = {
{ SYS_DESC(SYS_ACTLR_EL1), access_actlr, reset_actlr, ACTLR_EL1 },
{ SYS_DESC(SYS_CPACR_EL1), NULL, reset_val, CPACR_EL1, 0 },
- { SYS_DESC(SYS_RGSR_EL1), undef_access },
- { SYS_DESC(SYS_GCR_EL1), undef_access },
+ MTE_REG(RGSR_EL1),
+ MTE_REG(GCR_EL1),
{ SYS_DESC(SYS_ZCR_EL1), NULL, reset_val, ZCR_EL1, 0, .visibility = sve_visibility },
{ SYS_DESC(SYS_TRFCR_EL1), undef_access },
@@ -1498,8 +1522,8 @@ static const struct sys_reg_desc sys_reg_descs[] = {
{ SYS_DESC(SYS_ERXMISC0_EL1), trap_raz_wi },
{ SYS_DESC(SYS_ERXMISC1_EL1), trap_raz_wi },
- { SYS_DESC(SYS_TFSR_EL1), undef_access },
- { SYS_DESC(SYS_TFSRE0_EL1), undef_access },
+ MTE_REG(TFSR_EL1),
+ MTE_REG(TFSRE0_EL1),
{ SYS_DESC(SYS_FAR_EL1), access_vm_reg, reset_unknown, FAR_EL1 },
{ SYS_DESC(SYS_PAR_EL1), NULL, reset_unknown, PAR_EL1 },
diff --git a/arch/arm64/kvm/vgic/vgic-init.c b/arch/arm64/kvm/vgic/vgic-init.c
index 58cbda00e56d..340c51d87677 100644
--- a/arch/arm64/kvm/vgic/vgic-init.c
+++ b/arch/arm64/kvm/vgic/vgic-init.c
@@ -482,6 +482,16 @@ static irqreturn_t vgic_maintenance_handler(int irq, void *data)
return IRQ_HANDLED;
}
+static struct gic_kvm_info *gic_kvm_info;
+
+void __init vgic_set_kvm_info(const struct gic_kvm_info *info)
+{
+ BUG_ON(gic_kvm_info != NULL);
+ gic_kvm_info = kmalloc(sizeof(*info), GFP_KERNEL);
+ if (gic_kvm_info)
+ *gic_kvm_info = *info;
+}
+
/**
* kvm_vgic_init_cpu_hardware - initialize the GIC VE hardware
*
@@ -509,18 +519,29 @@ void kvm_vgic_init_cpu_hardware(void)
*/
int kvm_vgic_hyp_init(void)
{
- const struct gic_kvm_info *gic_kvm_info;
+ bool has_mask;
int ret;
- gic_kvm_info = gic_get_kvm_info();
if (!gic_kvm_info)
return -ENODEV;
- if (!gic_kvm_info->maint_irq) {
+ has_mask = !gic_kvm_info->no_maint_irq_mask;
+
+ if (has_mask && !gic_kvm_info->maint_irq) {
kvm_err("No vgic maintenance irq\n");
return -ENXIO;
}
+ /*
+ * If we get one of these oddball non-GICs, taint the kernel,
+ * as we have no idea of how they *really* behave.
+ */
+ if (gic_kvm_info->no_hw_deactivation) {
+ kvm_info("Non-architectural vgic, tainting kernel\n");
+ add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK);
+ kvm_vgic_global_state.no_hw_deactivation = true;
+ }
+
switch (gic_kvm_info->type) {
case GIC_V2:
ret = vgic_v2_probe(gic_kvm_info);
@@ -536,10 +557,17 @@ int kvm_vgic_hyp_init(void)
ret = -ENODEV;
}
+ kvm_vgic_global_state.maint_irq = gic_kvm_info->maint_irq;
+
+ kfree(gic_kvm_info);
+ gic_kvm_info = NULL;
+
if (ret)
return ret;
- kvm_vgic_global_state.maint_irq = gic_kvm_info->maint_irq;
+ if (!has_mask)
+ return 0;
+
ret = request_percpu_irq(kvm_vgic_global_state.maint_irq,
vgic_maintenance_handler,
"vgic", kvm_get_running_vcpus());
diff --git a/arch/arm64/kvm/vgic/vgic-v2.c b/arch/arm64/kvm/vgic/vgic-v2.c
index 11934c2af2f4..2c580204f1dc 100644
--- a/arch/arm64/kvm/vgic/vgic-v2.c
+++ b/arch/arm64/kvm/vgic/vgic-v2.c
@@ -108,11 +108,22 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu)
* If this causes us to lower the level, we have to also clear
* the physical active state, since we will otherwise never be
* told when the interrupt becomes asserted again.
+ *
+ * Another case is when the interrupt requires a helping hand
+ * on deactivation (no HW deactivation, for example).
*/
- if (vgic_irq_is_mapped_level(irq) && (val & GICH_LR_PENDING_BIT)) {
- irq->line_level = vgic_get_phys_line_level(irq);
+ if (vgic_irq_is_mapped_level(irq)) {
+ bool resample = false;
+
+ if (val & GICH_LR_PENDING_BIT) {
+ irq->line_level = vgic_get_phys_line_level(irq);
+ resample = !irq->line_level;
+ } else if (vgic_irq_needs_resampling(irq) &&
+ !(irq->active || irq->pending_latch)) {
+ resample = true;
+ }
- if (!irq->line_level)
+ if (resample)
vgic_irq_set_phys_active(irq, false);
}
@@ -152,7 +163,7 @@ void vgic_v2_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr)
if (irq->group)
val |= GICH_LR_GROUP1;
- if (irq->hw) {
+ if (irq->hw && !vgic_irq_needs_resampling(irq)) {
val |= GICH_LR_HW;
val |= irq->hwintid << GICH_LR_PHYSID_CPUID_SHIFT;
/*
diff --git a/arch/arm64/kvm/vgic/vgic-v3.c b/arch/arm64/kvm/vgic/vgic-v3.c
index 41ecf219c333..66004f61cd83 100644
--- a/arch/arm64/kvm/vgic/vgic-v3.c
+++ b/arch/arm64/kvm/vgic/vgic-v3.c
@@ -101,11 +101,22 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu)
* If this causes us to lower the level, we have to also clear
* the physical active state, since we will otherwise never be
* told when the interrupt becomes asserted again.
+ *
+ * Another case is when the interrupt requires a helping hand
+ * on deactivation (no HW deactivation, for example).
*/
- if (vgic_irq_is_mapped_level(irq) && (val & ICH_LR_PENDING_BIT)) {
- irq->line_level = vgic_get_phys_line_level(irq);
+ if (vgic_irq_is_mapped_level(irq)) {
+ bool resample = false;
+
+ if (val & ICH_LR_PENDING_BIT) {
+ irq->line_level = vgic_get_phys_line_level(irq);
+ resample = !irq->line_level;
+ } else if (vgic_irq_needs_resampling(irq) &&
+ !(irq->active || irq->pending_latch)) {
+ resample = true;
+ }
- if (!irq->line_level)
+ if (resample)
vgic_irq_set_phys_active(irq, false);
}
@@ -136,7 +147,7 @@ void vgic_v3_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr)
}
}
- if (irq->hw) {
+ if (irq->hw && !vgic_irq_needs_resampling(irq)) {
val |= ICH_LR_HW;
val |= ((u64)irq->hwintid) << ICH_LR_PHYS_ID_SHIFT;
/*
diff --git a/arch/arm64/kvm/vgic/vgic.c b/arch/arm64/kvm/vgic/vgic.c
index 15b666200f0b..111bff47e471 100644
--- a/arch/arm64/kvm/vgic/vgic.c
+++ b/arch/arm64/kvm/vgic/vgic.c
@@ -182,8 +182,8 @@ bool vgic_get_phys_line_level(struct vgic_irq *irq)
BUG_ON(!irq->hw);
- if (irq->get_input_level)
- return irq->get_input_level(irq->intid);
+ if (irq->ops && irq->ops->get_input_level)
+ return irq->ops->get_input_level(irq->intid);
WARN_ON(irq_get_irqchip_state(irq->host_irq,
IRQCHIP_STATE_PENDING,
@@ -480,7 +480,7 @@ int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid,
/* @irq->irq_lock must be held */
static int kvm_vgic_map_irq(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
unsigned int host_irq,
- bool (*get_input_level)(int vindid))
+ struct irq_ops *ops)
{
struct irq_desc *desc;
struct irq_data *data;
@@ -500,7 +500,7 @@ static int kvm_vgic_map_irq(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
irq->hw = true;
irq->host_irq = host_irq;
irq->hwintid = data->hwirq;
- irq->get_input_level = get_input_level;
+ irq->ops = ops;
return 0;
}
@@ -509,11 +509,11 @@ static inline void kvm_vgic_unmap_irq(struct vgic_irq *irq)
{
irq->hw = false;
irq->hwintid = 0;
- irq->get_input_level = NULL;
+ irq->ops = NULL;
}
int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, unsigned int host_irq,
- u32 vintid, bool (*get_input_level)(int vindid))
+ u32 vintid, struct irq_ops *ops)
{
struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, vintid);
unsigned long flags;
@@ -522,7 +522,7 @@ int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, unsigned int host_irq,
BUG_ON(!irq);
raw_spin_lock_irqsave(&irq->irq_lock, flags);
- ret = kvm_vgic_map_irq(vcpu, irq, host_irq, get_input_level);
+ ret = kvm_vgic_map_irq(vcpu, irq, host_irq, ops);
raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
vgic_put_irq(vcpu->kvm, irq);
diff --git a/arch/arm64/lib/Makefile b/arch/arm64/lib/Makefile
index d31e1169d9b8..6dd56a49790a 100644
--- a/arch/arm64/lib/Makefile
+++ b/arch/arm64/lib/Makefile
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
lib-y := clear_user.o delay.o copy_from_user.o \
copy_to_user.o copy_in_user.o copy_page.o \
- clear_page.o csum.o memchr.o memcpy.o memmove.o \
+ clear_page.o csum.o insn.o memchr.o memcpy.o \
memset.o memcmp.o strcmp.o strncmp.o strlen.o \
strnlen.o strchr.o strrchr.o tishift.o
@@ -18,3 +18,5 @@ obj-$(CONFIG_CRC32) += crc32.o
obj-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o
obj-$(CONFIG_ARM64_MTE) += mte.o
+
+obj-$(CONFIG_KASAN_SW_TAGS) += kasan_sw_tags.o
diff --git a/arch/arm64/lib/clear_user.S b/arch/arm64/lib/clear_user.S
index af9afcbec92c..a7efb2ad2a1c 100644
--- a/arch/arm64/lib/clear_user.S
+++ b/arch/arm64/lib/clear_user.S
@@ -1,12 +1,9 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Based on arch/arm/lib/clear_user.S
- *
- * Copyright (C) 2012 ARM Ltd.
+ * Copyright (C) 2021 Arm Ltd.
*/
-#include <linux/linkage.h>
-#include <asm/asm-uaccess.h>
+#include <linux/linkage.h>
#include <asm/assembler.h>
.text
@@ -19,25 +16,33 @@
*
* Alignment fixed up by hardware.
*/
+
+ .p2align 4
+ // Alignment is for the loop, but since the prologue (including BTI)
+ // is also 16 bytes we can keep any padding outside the function
SYM_FUNC_START(__arch_clear_user)
- mov x2, x1 // save the size for fixup return
+ add x2, x0, x1
subs x1, x1, #8
b.mi 2f
1:
-user_ldst 9f, sttr, xzr, x0, 8
+USER(9f, sttr xzr, [x0])
+ add x0, x0, #8
subs x1, x1, #8
- b.pl 1b
-2: adds x1, x1, #4
- b.mi 3f
-user_ldst 9f, sttr, wzr, x0, 4
- sub x1, x1, #4
-3: adds x1, x1, #2
- b.mi 4f
-user_ldst 9f, sttrh, wzr, x0, 2
- sub x1, x1, #2
-4: adds x1, x1, #1
- b.mi 5f
-user_ldst 9f, sttrb, wzr, x0, 0
+ b.hi 1b
+USER(9f, sttr xzr, [x2, #-8])
+ mov x0, #0
+ ret
+
+2: tbz x1, #2, 3f
+USER(9f, sttr wzr, [x0])
+USER(8f, sttr wzr, [x2, #-4])
+ mov x0, #0
+ ret
+
+3: tbz x1, #1, 4f
+USER(9f, sttrh wzr, [x0])
+4: tbz x1, #0, 5f
+USER(7f, sttrb wzr, [x2, #-1])
5: mov x0, #0
ret
SYM_FUNC_END(__arch_clear_user)
@@ -45,6 +50,8 @@ EXPORT_SYMBOL(__arch_clear_user)
.section .fixup,"ax"
.align 2
-9: mov x0, x2 // return the original size
+7: sub x0, x2, #5 // Adjust for faulting on the final byte...
+8: add x0, x0, #4 // ...or the second word of the 4-7 byte case
+9: sub x0, x2, x0
ret
.previous
diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/lib/insn.c
index 6c0de2f60ea9..b506a4b1e38c 100644
--- a/arch/arm64/kernel/insn.c
+++ b/arch/arm64/lib/insn.c
@@ -7,21 +7,14 @@
*/
#include <linux/bitops.h>
#include <linux/bug.h>
-#include <linux/compiler.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/smp.h>
-#include <linux/spinlock.h>
-#include <linux/stop_machine.h>
+#include <linux/printk.h>
+#include <linux/sizes.h>
#include <linux/types.h>
-#include <linux/uaccess.h>
-#include <asm/cacheflush.h>
#include <asm/debug-monitors.h>
-#include <asm/fixmap.h>
+#include <asm/errno.h>
#include <asm/insn.h>
#include <asm/kprobes.h>
-#include <asm/sections.h>
#define AARCH64_INSN_SF_BIT BIT(31)
#define AARCH64_INSN_N_BIT BIT(22)
@@ -30,7 +23,7 @@
static const int aarch64_insn_encoding_class[] = {
AARCH64_INSN_CLS_UNKNOWN,
AARCH64_INSN_CLS_UNKNOWN,
- AARCH64_INSN_CLS_UNKNOWN,
+ AARCH64_INSN_CLS_SVE,
AARCH64_INSN_CLS_UNKNOWN,
AARCH64_INSN_CLS_LDST,
AARCH64_INSN_CLS_DP_REG,
@@ -83,81 +76,6 @@ bool aarch64_insn_is_branch_imm(u32 insn)
aarch64_insn_is_bcond(insn));
}
-static DEFINE_RAW_SPINLOCK(patch_lock);
-
-static bool is_exit_text(unsigned long addr)
-{
- /* discarded with init text/data */
- return system_state < SYSTEM_RUNNING &&
- addr >= (unsigned long)__exittext_begin &&
- addr < (unsigned long)__exittext_end;
-}
-
-static bool is_image_text(unsigned long addr)
-{
- return core_kernel_text(addr) || is_exit_text(addr);
-}
-
-static void __kprobes *patch_map(void *addr, int fixmap)
-{
- unsigned long uintaddr = (uintptr_t) addr;
- bool image = is_image_text(uintaddr);
- struct page *page;
-
- if (image)
- page = phys_to_page(__pa_symbol(addr));
- else if (IS_ENABLED(CONFIG_STRICT_MODULE_RWX))
- page = vmalloc_to_page(addr);
- else
- return addr;
-
- BUG_ON(!page);
- return (void *)set_fixmap_offset(fixmap, page_to_phys(page) +
- (uintaddr & ~PAGE_MASK));
-}
-
-static void __kprobes patch_unmap(int fixmap)
-{
- clear_fixmap(fixmap);
-}
-/*
- * In ARMv8-A, A64 instructions have a fixed length of 32 bits and are always
- * little-endian.
- */
-int __kprobes aarch64_insn_read(void *addr, u32 *insnp)
-{
- int ret;
- __le32 val;
-
- ret = copy_from_kernel_nofault(&val, addr, AARCH64_INSN_SIZE);
- if (!ret)
- *insnp = le32_to_cpu(val);
-
- return ret;
-}
-
-static int __kprobes __aarch64_insn_write(void *addr, __le32 insn)
-{
- void *waddr = addr;
- unsigned long flags = 0;
- int ret;
-
- raw_spin_lock_irqsave(&patch_lock, flags);
- waddr = patch_map(addr, FIX_TEXT_POKE0);
-
- ret = copy_to_kernel_nofault(waddr, &insn, AARCH64_INSN_SIZE);
-
- patch_unmap(FIX_TEXT_POKE0);
- raw_spin_unlock_irqrestore(&patch_lock, flags);
-
- return ret;
-}
-
-int __kprobes aarch64_insn_write(void *addr, u32 insn)
-{
- return __aarch64_insn_write(addr, cpu_to_le32(insn));
-}
-
bool __kprobes aarch64_insn_uses_literal(u32 insn)
{
/* ldr/ldrsw (literal), prfm */
@@ -187,67 +105,6 @@ bool __kprobes aarch64_insn_is_branch(u32 insn)
aarch64_insn_is_bcond(insn);
}
-int __kprobes aarch64_insn_patch_text_nosync(void *addr, u32 insn)
-{
- u32 *tp = addr;
- int ret;
-
- /* A64 instructions must be word aligned */
- if ((uintptr_t)tp & 0x3)
- return -EINVAL;
-
- ret = aarch64_insn_write(tp, insn);
- if (ret == 0)
- __flush_icache_range((uintptr_t)tp,
- (uintptr_t)tp + AARCH64_INSN_SIZE);
-
- return ret;
-}
-
-struct aarch64_insn_patch {
- void **text_addrs;
- u32 *new_insns;
- int insn_cnt;
- atomic_t cpu_count;
-};
-
-static int __kprobes aarch64_insn_patch_text_cb(void *arg)
-{
- int i, ret = 0;
- struct aarch64_insn_patch *pp = arg;
-
- /* The first CPU becomes master */
- if (atomic_inc_return(&pp->cpu_count) == 1) {
- for (i = 0; ret == 0 && i < pp->insn_cnt; i++)
- ret = aarch64_insn_patch_text_nosync(pp->text_addrs[i],
- pp->new_insns[i]);
- /* Notify other processors with an additional increment. */
- atomic_inc(&pp->cpu_count);
- } else {
- while (atomic_read(&pp->cpu_count) <= num_online_cpus())
- cpu_relax();
- isb();
- }
-
- return ret;
-}
-
-int __kprobes aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt)
-{
- struct aarch64_insn_patch patch = {
- .text_addrs = addrs,
- .new_insns = insns,
- .insn_cnt = cnt,
- .cpu_count = ATOMIC_INIT(0),
- };
-
- if (cnt <= 0)
- return -EINVAL;
-
- return stop_machine_cpuslocked(aarch64_insn_patch_text_cb, &patch,
- cpu_online_mask);
-}
-
static int __kprobes aarch64_get_imm_shift_mask(enum aarch64_insn_imm_type type,
u32 *maskp, int *shiftp)
{
@@ -1432,104 +1289,6 @@ u32 aarch32_insn_mcr_extract_crm(u32 insn)
return insn & CRM_MASK;
}
-static bool __kprobes __check_eq(unsigned long pstate)
-{
- return (pstate & PSR_Z_BIT) != 0;
-}
-
-static bool __kprobes __check_ne(unsigned long pstate)
-{
- return (pstate & PSR_Z_BIT) == 0;
-}
-
-static bool __kprobes __check_cs(unsigned long pstate)
-{
- return (pstate & PSR_C_BIT) != 0;
-}
-
-static bool __kprobes __check_cc(unsigned long pstate)
-{
- return (pstate & PSR_C_BIT) == 0;
-}
-
-static bool __kprobes __check_mi(unsigned long pstate)
-{
- return (pstate & PSR_N_BIT) != 0;
-}
-
-static bool __kprobes __check_pl(unsigned long pstate)
-{
- return (pstate & PSR_N_BIT) == 0;
-}
-
-static bool __kprobes __check_vs(unsigned long pstate)
-{
- return (pstate & PSR_V_BIT) != 0;
-}
-
-static bool __kprobes __check_vc(unsigned long pstate)
-{
- return (pstate & PSR_V_BIT) == 0;
-}
-
-static bool __kprobes __check_hi(unsigned long pstate)
-{
- pstate &= ~(pstate >> 1); /* PSR_C_BIT &= ~PSR_Z_BIT */
- return (pstate & PSR_C_BIT) != 0;
-}
-
-static bool __kprobes __check_ls(unsigned long pstate)
-{
- pstate &= ~(pstate >> 1); /* PSR_C_BIT &= ~PSR_Z_BIT */
- return (pstate & PSR_C_BIT) == 0;
-}
-
-static bool __kprobes __check_ge(unsigned long pstate)
-{
- pstate ^= (pstate << 3); /* PSR_N_BIT ^= PSR_V_BIT */
- return (pstate & PSR_N_BIT) == 0;
-}
-
-static bool __kprobes __check_lt(unsigned long pstate)
-{
- pstate ^= (pstate << 3); /* PSR_N_BIT ^= PSR_V_BIT */
- return (pstate & PSR_N_BIT) != 0;
-}
-
-static bool __kprobes __check_gt(unsigned long pstate)
-{
- /*PSR_N_BIT ^= PSR_V_BIT */
- unsigned long temp = pstate ^ (pstate << 3);
-
- temp |= (pstate << 1); /*PSR_N_BIT |= PSR_Z_BIT */
- return (temp & PSR_N_BIT) == 0;
-}
-
-static bool __kprobes __check_le(unsigned long pstate)
-{
- /*PSR_N_BIT ^= PSR_V_BIT */
- unsigned long temp = pstate ^ (pstate << 3);
-
- temp |= (pstate << 1); /*PSR_N_BIT |= PSR_Z_BIT */
- return (temp & PSR_N_BIT) != 0;
-}
-
-static bool __kprobes __check_al(unsigned long pstate)
-{
- return true;
-}
-
-/*
- * Note that the ARMv8 ARM calls condition code 0b1111 "nv", but states that
- * it behaves identically to 0b1110 ("al").
- */
-pstate_check_t * const aarch32_opcode_cond_checks[16] = {
- __check_eq, __check_ne, __check_cs, __check_cc,
- __check_mi, __check_pl, __check_vs, __check_vc,
- __check_hi, __check_ls, __check_ge, __check_lt,
- __check_gt, __check_le, __check_al, __check_al
-};
-
static bool range_of_ones(u64 val)
{
/* Doesn't handle full ones or full zeroes */
diff --git a/arch/arm64/lib/kasan_sw_tags.S b/arch/arm64/lib/kasan_sw_tags.S
new file mode 100644
index 000000000000..5b04464c045e
--- /dev/null
+++ b/arch/arm64/lib/kasan_sw_tags.S
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020 Google LLC
+ */
+
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+
+/*
+ * Report a tag mismatch detected by tag-based KASAN.
+ *
+ * A compiler-generated thunk calls this with a non-AAPCS calling
+ * convention. Upon entry to this function, registers are as follows:
+ *
+ * x0: fault address (see below for restore)
+ * x1: fault description (see below for restore)
+ * x2 to x15: callee-saved
+ * x16 to x17: safe to clobber
+ * x18 to x30: callee-saved
+ * sp: pre-decremented by 256 bytes (see below for restore)
+ *
+ * The caller has decremented the SP by 256 bytes, and created a
+ * structure on the stack as follows:
+ *
+ * sp + 0..15: x0 and x1 to be restored
+ * sp + 16..231: free for use
+ * sp + 232..247: x29 and x30 (same as in GPRs)
+ * sp + 248..255: free for use
+ *
+ * Note that this is not a struct pt_regs.
+ *
+ * To call a regular AAPCS function we must save x2 to x15 (which we can
+ * store in the gaps), and create a frame record (for which we can use
+ * x29 and x30 spilled by the caller as those match the GPRs).
+ *
+ * The caller expects x0 and x1 to be restored from the structure, and
+ * for the structure to be removed from the stack (i.e. the SP must be
+ * incremented by 256 prior to return).
+ */
+SYM_CODE_START(__hwasan_tag_mismatch)
+#ifdef BTI_C
+ BTI_C
+#endif
+ add x29, sp, #232
+ stp x2, x3, [sp, #8 * 2]
+ stp x4, x5, [sp, #8 * 4]
+ stp x6, x7, [sp, #8 * 6]
+ stp x8, x9, [sp, #8 * 8]
+ stp x10, x11, [sp, #8 * 10]
+ stp x12, x13, [sp, #8 * 12]
+ stp x14, x15, [sp, #8 * 14]
+#ifndef CONFIG_SHADOW_CALL_STACK
+ str x18, [sp, #8 * 18]
+#endif
+
+ mov x2, x30
+ bl kasan_tag_mismatch
+
+ ldp x0, x1, [sp]
+ ldp x2, x3, [sp, #8 * 2]
+ ldp x4, x5, [sp, #8 * 4]
+ ldp x6, x7, [sp, #8 * 6]
+ ldp x8, x9, [sp, #8 * 8]
+ ldp x10, x11, [sp, #8 * 10]
+ ldp x12, x13, [sp, #8 * 12]
+ ldp x14, x15, [sp, #8 * 14]
+#ifndef CONFIG_SHADOW_CALL_STACK
+ ldr x18, [sp, #8 * 18]
+#endif
+ ldp x29, x30, [sp, #8 * 29]
+
+ /* remove the structure from the stack */
+ add sp, sp, #256
+ ret
+SYM_CODE_END(__hwasan_tag_mismatch)
+EXPORT_SYMBOL(__hwasan_tag_mismatch)
diff --git a/arch/arm64/lib/memchr.S b/arch/arm64/lib/memchr.S
index edf6b970a277..7c2276fdab54 100644
--- a/arch/arm64/lib/memchr.S
+++ b/arch/arm64/lib/memchr.S
@@ -1,9 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Based on arch/arm/lib/memchr.S
- *
- * Copyright (C) 1995-2000 Russell King
- * Copyright (C) 2013 ARM Ltd.
+ * Copyright (C) 2021 Arm Ltd.
*/
#include <linux/linkage.h>
@@ -19,16 +16,60 @@
* Returns:
* x0 - address of first occurrence of 'c' or 0
*/
+
+#define L(label) .L ## label
+
+#define REP8_01 0x0101010101010101
+#define REP8_7f 0x7f7f7f7f7f7f7f7f
+
+#define srcin x0
+#define chrin w1
+#define cntin x2
+
+#define result x0
+
+#define wordcnt x3
+#define rep01 x4
+#define repchr x5
+#define cur_word x6
+#define cur_byte w6
+#define tmp x7
+#define tmp2 x8
+
+ .p2align 4
+ nop
SYM_FUNC_START_WEAK_PI(memchr)
- and w1, w1, #0xff
-1: subs x2, x2, #1
- b.mi 2f
- ldrb w3, [x0], #1
- cmp w3, w1
- b.ne 1b
- sub x0, x0, #1
+ and chrin, chrin, #0xff
+ lsr wordcnt, cntin, #3
+ cbz wordcnt, L(byte_loop)
+ mov rep01, #REP8_01
+ mul repchr, x1, rep01
+ and cntin, cntin, #7
+L(word_loop):
+ ldr cur_word, [srcin], #8
+ sub wordcnt, wordcnt, #1
+ eor cur_word, cur_word, repchr
+ sub tmp, cur_word, rep01
+ orr tmp2, cur_word, #REP8_7f
+ bics tmp, tmp, tmp2
+ b.ne L(found_word)
+ cbnz wordcnt, L(word_loop)
+L(byte_loop):
+ cbz cntin, L(not_found)
+ ldrb cur_byte, [srcin], #1
+ sub cntin, cntin, #1
+ cmp cur_byte, chrin
+ b.ne L(byte_loop)
+ sub srcin, srcin, #1
+ ret
+L(found_word):
+CPU_LE( rev tmp, tmp)
+ clz tmp, tmp
+ sub tmp, tmp, #64
+ add result, srcin, tmp, asr #3
ret
-2: mov x0, #0
+L(not_found):
+ mov result, #0
ret
SYM_FUNC_END_PI(memchr)
EXPORT_SYMBOL_NOKASAN(memchr)
diff --git a/arch/arm64/lib/memcmp.S b/arch/arm64/lib/memcmp.S
index c0671e793ea9..7d956384222f 100644
--- a/arch/arm64/lib/memcmp.S
+++ b/arch/arm64/lib/memcmp.S
@@ -1,247 +1,139 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (C) 2013 ARM Ltd.
- * Copyright (C) 2013 Linaro.
+ * Copyright (c) 2013-2021, Arm Limited.
*
- * This code is based on glibc cortex strings work originally authored by Linaro
- * be found @
- *
- * http://bazaar.launchpad.net/~linaro-toolchain-dev/cortex-strings/trunk/
- * files/head:/src/aarch64/
+ * Adapted from the original at:
+ * https://github.com/ARM-software/optimized-routines/blob/e823e3abf5f89ecb/string/aarch64/memcmp.S
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
-/*
-* compare memory areas(when two memory areas' offset are different,
-* alignment handled by the hardware)
-*
-* Parameters:
-* x0 - const memory area 1 pointer
-* x1 - const memory area 2 pointer
-* x2 - the maximal compare byte length
-* Returns:
-* x0 - a compare result, maybe less than, equal to, or greater than ZERO
-*/
+/* Assumptions:
+ *
+ * ARMv8-a, AArch64, unaligned accesses.
+ */
+
+#define L(label) .L ## label
/* Parameters and result. */
-src1 .req x0
-src2 .req x1
-limit .req x2
-result .req x0
+#define src1 x0
+#define src2 x1
+#define limit x2
+#define result w0
/* Internal variables. */
-data1 .req x3
-data1w .req w3
-data2 .req x4
-data2w .req w4
-has_nul .req x5
-diff .req x6
-endloop .req x7
-tmp1 .req x8
-tmp2 .req x9
-tmp3 .req x10
-pos .req x11
-limit_wd .req x12
-mask .req x13
+#define data1 x3
+#define data1w w3
+#define data1h x4
+#define data2 x5
+#define data2w w5
+#define data2h x6
+#define tmp1 x7
+#define tmp2 x8
SYM_FUNC_START_WEAK_PI(memcmp)
- cbz limit, .Lret0
- eor tmp1, src1, src2
- tst tmp1, #7
- b.ne .Lmisaligned8
- ands tmp1, src1, #7
- b.ne .Lmutual_align
- sub limit_wd, limit, #1 /* limit != 0, so no underflow. */
- lsr limit_wd, limit_wd, #3 /* Convert to Dwords. */
- /*
- * The input source addresses are at alignment boundary.
- * Directly compare eight bytes each time.
- */
-.Lloop_aligned:
- ldr data1, [src1], #8
- ldr data2, [src2], #8
-.Lstart_realigned:
- subs limit_wd, limit_wd, #1
- eor diff, data1, data2 /* Non-zero if differences found. */
- csinv endloop, diff, xzr, cs /* Last Dword or differences. */
- cbz endloop, .Lloop_aligned
-
- /* Not reached the limit, must have found a diff. */
- tbz limit_wd, #63, .Lnot_limit
-
- /* Limit % 8 == 0 => the diff is in the last 8 bytes. */
- ands limit, limit, #7
- b.eq .Lnot_limit
- /*
- * The remained bytes less than 8. It is needed to extract valid data
- * from last eight bytes of the intended memory range.
- */
- lsl limit, limit, #3 /* bytes-> bits. */
- mov mask, #~0
-CPU_BE( lsr mask, mask, limit )
-CPU_LE( lsl mask, mask, limit )
- bic data1, data1, mask
- bic data2, data2, mask
-
- orr diff, diff, mask
- b .Lnot_limit
-
-.Lmutual_align:
- /*
- * Sources are mutually aligned, but are not currently at an
- * alignment boundary. Round down the addresses and then mask off
- * the bytes that precede the start point.
- */
- bic src1, src1, #7
- bic src2, src2, #7
- ldr data1, [src1], #8
- ldr data2, [src2], #8
- /*
- * We can not add limit with alignment offset(tmp1) here. Since the
- * addition probably make the limit overflown.
- */
- sub limit_wd, limit, #1/*limit != 0, so no underflow.*/
- and tmp3, limit_wd, #7
- lsr limit_wd, limit_wd, #3
- add tmp3, tmp3, tmp1
- add limit_wd, limit_wd, tmp3, lsr #3
- add limit, limit, tmp1/* Adjust the limit for the extra. */
-
- lsl tmp1, tmp1, #3/* Bytes beyond alignment -> bits.*/
- neg tmp1, tmp1/* Bits to alignment -64. */
- mov tmp2, #~0
- /*mask off the non-intended bytes before the start address.*/
-CPU_BE( lsl tmp2, tmp2, tmp1 )/*Big-endian.Early bytes are at MSB*/
- /* Little-endian. Early bytes are at LSB. */
-CPU_LE( lsr tmp2, tmp2, tmp1 )
-
- orr data1, data1, tmp2
- orr data2, data2, tmp2
- b .Lstart_realigned
-
- /*src1 and src2 have different alignment offset.*/
-.Lmisaligned8:
- cmp limit, #8
- b.lo .Ltiny8proc /*limit < 8: compare byte by byte*/
-
- and tmp1, src1, #7
- neg tmp1, tmp1
- add tmp1, tmp1, #8/*valid length in the first 8 bytes of src1*/
- and tmp2, src2, #7
- neg tmp2, tmp2
- add tmp2, tmp2, #8/*valid length in the first 8 bytes of src2*/
- subs tmp3, tmp1, tmp2
- csel pos, tmp1, tmp2, hi /*Choose the maximum.*/
-
- sub limit, limit, pos
- /*compare the proceeding bytes in the first 8 byte segment.*/
-.Ltinycmp:
- ldrb data1w, [src1], #1
- ldrb data2w, [src2], #1
- subs pos, pos, #1
- ccmp data1w, data2w, #0, ne /* NZCV = 0b0000. */
- b.eq .Ltinycmp
- cbnz pos, 1f /*diff occurred before the last byte.*/
- cmp data1w, data2w
- b.eq .Lstart_align
-1:
- sub result, data1, data2
+ subs limit, limit, 8
+ b.lo L(less8)
+
+ ldr data1, [src1], 8
+ ldr data2, [src2], 8
+ cmp data1, data2
+ b.ne L(return)
+
+ subs limit, limit, 8
+ b.gt L(more16)
+
+ ldr data1, [src1, limit]
+ ldr data2, [src2, limit]
+ b L(return)
+
+L(more16):
+ ldr data1, [src1], 8
+ ldr data2, [src2], 8
+ cmp data1, data2
+ bne L(return)
+
+ /* Jump directly to comparing the last 16 bytes for 32 byte (or less)
+ strings. */
+ subs limit, limit, 16
+ b.ls L(last_bytes)
+
+ /* We overlap loads between 0-32 bytes at either side of SRC1 when we
+ try to align, so limit it only to strings larger than 128 bytes. */
+ cmp limit, 96
+ b.ls L(loop16)
+
+ /* Align src1 and adjust src2 with bytes not yet done. */
+ and tmp1, src1, 15
+ add limit, limit, tmp1
+ sub src1, src1, tmp1
+ sub src2, src2, tmp1
+
+ /* Loop performing 16 bytes per iteration using aligned src1.
+ Limit is pre-decremented by 16 and must be larger than zero.
+ Exit if <= 16 bytes left to do or if the data is not equal. */
+ .p2align 4
+L(loop16):
+ ldp data1, data1h, [src1], 16
+ ldp data2, data2h, [src2], 16
+ subs limit, limit, 16
+ ccmp data1, data2, 0, hi
+ ccmp data1h, data2h, 0, eq
+ b.eq L(loop16)
+
+ cmp data1, data2
+ bne L(return)
+ mov data1, data1h
+ mov data2, data2h
+ cmp data1, data2
+ bne L(return)
+
+ /* Compare last 1-16 bytes using unaligned access. */
+L(last_bytes):
+ add src1, src1, limit
+ add src2, src2, limit
+ ldp data1, data1h, [src1]
+ ldp data2, data2h, [src2]
+ cmp data1, data2
+ bne L(return)
+ mov data1, data1h
+ mov data2, data2h
+ cmp data1, data2
+
+ /* Compare data bytes and set return value to 0, -1 or 1. */
+L(return):
+#ifndef __AARCH64EB__
+ rev data1, data1
+ rev data2, data2
+#endif
+ cmp data1, data2
+L(ret_eq):
+ cset result, ne
+ cneg result, result, lo
ret
-.Lstart_align:
- lsr limit_wd, limit, #3
- cbz limit_wd, .Lremain8
-
- ands xzr, src1, #7
- b.eq .Lrecal_offset
- /*process more leading bytes to make src1 aligned...*/
- add src1, src1, tmp3 /*backwards src1 to alignment boundary*/
- add src2, src2, tmp3
- sub limit, limit, tmp3
- lsr limit_wd, limit, #3
- cbz limit_wd, .Lremain8
- /*load 8 bytes from aligned SRC1..*/
- ldr data1, [src1], #8
- ldr data2, [src2], #8
-
- subs limit_wd, limit_wd, #1
- eor diff, data1, data2 /*Non-zero if differences found.*/
- csinv endloop, diff, xzr, ne
- cbnz endloop, .Lunequal_proc
- /*How far is the current SRC2 from the alignment boundary...*/
- and tmp3, tmp3, #7
-
-.Lrecal_offset:/*src1 is aligned now..*/
- neg pos, tmp3
-.Lloopcmp_proc:
- /*
- * Divide the eight bytes into two parts. First,backwards the src2
- * to an alignment boundary,load eight bytes and compare from
- * the SRC2 alignment boundary. If all 8 bytes are equal,then start
- * the second part's comparison. Otherwise finish the comparison.
- * This special handle can garantee all the accesses are in the
- * thread/task space in avoid to overrange access.
- */
- ldr data1, [src1,pos]
- ldr data2, [src2,pos]
- eor diff, data1, data2 /* Non-zero if differences found. */
- cbnz diff, .Lnot_limit
-
- /*The second part process*/
- ldr data1, [src1], #8
- ldr data2, [src2], #8
- eor diff, data1, data2 /* Non-zero if differences found. */
- subs limit_wd, limit_wd, #1
- csinv endloop, diff, xzr, ne/*if limit_wd is 0,will finish the cmp*/
- cbz endloop, .Lloopcmp_proc
-.Lunequal_proc:
- cbz diff, .Lremain8
-
-/* There is difference occurred in the latest comparison. */
-.Lnot_limit:
-/*
-* For little endian,reverse the low significant equal bits into MSB,then
-* following CLZ can find how many equal bits exist.
-*/
-CPU_LE( rev diff, diff )
-CPU_LE( rev data1, data1 )
-CPU_LE( rev data2, data2 )
-
- /*
- * The MS-non-zero bit of DIFF marks either the first bit
- * that is different, or the end of the significant data.
- * Shifting left now will bring the critical information into the
- * top bits.
- */
- clz pos, diff
- lsl data1, data1, pos
- lsl data2, data2, pos
- /*
- * We need to zero-extend (char is unsigned) the value and then
- * perform a signed subtraction.
- */
- lsr data1, data1, #56
- sub result, data1, data2, lsr #56
+ .p2align 4
+ /* Compare up to 8 bytes. Limit is [-8..-1]. */
+L(less8):
+ adds limit, limit, 4
+ b.lo L(less4)
+ ldr data1w, [src1], 4
+ ldr data2w, [src2], 4
+ cmp data1w, data2w
+ b.ne L(return)
+ sub limit, limit, 4
+L(less4):
+ adds limit, limit, 4
+ beq L(ret_eq)
+L(byte_loop):
+ ldrb data1w, [src1], 1
+ ldrb data2w, [src2], 1
+ subs limit, limit, 1
+ ccmp data1w, data2w, 0, ne /* NZCV = 0b0000. */
+ b.eq L(byte_loop)
+ sub result, data1w, data2w
ret
-.Lremain8:
- /* Limit % 8 == 0 =>. all data are equal.*/
- ands limit, limit, #7
- b.eq .Lret0
-
-.Ltiny8proc:
- ldrb data1w, [src1], #1
- ldrb data2w, [src2], #1
- subs limit, limit, #1
-
- ccmp data1w, data2w, #0, ne /* NZCV = 0b0000. */
- b.eq .Ltiny8proc
- sub result, data1, data2
- ret
-.Lret0:
- mov result, #0
- ret
SYM_FUNC_END_PI(memcmp)
EXPORT_SYMBOL_NOKASAN(memcmp)
diff --git a/arch/arm64/lib/memcpy.S b/arch/arm64/lib/memcpy.S
index dc8d2a216a6e..b82fd64ee1e1 100644
--- a/arch/arm64/lib/memcpy.S
+++ b/arch/arm64/lib/memcpy.S
@@ -1,66 +1,252 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (C) 2013 ARM Ltd.
- * Copyright (C) 2013 Linaro.
+ * Copyright (c) 2012-2021, Arm Limited.
*
- * This code is based on glibc cortex strings work originally authored by Linaro
- * be found @
- *
- * http://bazaar.launchpad.net/~linaro-toolchain-dev/cortex-strings/trunk/
- * files/head:/src/aarch64/
+ * Adapted from the original at:
+ * https://github.com/ARM-software/optimized-routines/blob/afd6244a1f8d9229/string/aarch64/memcpy.S
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
-#include <asm/cache.h>
-/*
- * Copy a buffer from src to dest (alignment handled by the hardware)
+/* Assumptions:
+ *
+ * ARMv8-a, AArch64, unaligned accesses.
*
- * Parameters:
- * x0 - dest
- * x1 - src
- * x2 - n
- * Returns:
- * x0 - dest
*/
- .macro ldrb1 reg, ptr, val
- ldrb \reg, [\ptr], \val
- .endm
-
- .macro strb1 reg, ptr, val
- strb \reg, [\ptr], \val
- .endm
- .macro ldrh1 reg, ptr, val
- ldrh \reg, [\ptr], \val
- .endm
+#define L(label) .L ## label
- .macro strh1 reg, ptr, val
- strh \reg, [\ptr], \val
- .endm
+#define dstin x0
+#define src x1
+#define count x2
+#define dst x3
+#define srcend x4
+#define dstend x5
+#define A_l x6
+#define A_lw w6
+#define A_h x7
+#define B_l x8
+#define B_lw w8
+#define B_h x9
+#define C_l x10
+#define C_lw w10
+#define C_h x11
+#define D_l x12
+#define D_h x13
+#define E_l x14
+#define E_h x15
+#define F_l x16
+#define F_h x17
+#define G_l count
+#define G_h dst
+#define H_l src
+#define H_h srcend
+#define tmp1 x14
- .macro ldr1 reg, ptr, val
- ldr \reg, [\ptr], \val
- .endm
+/* This implementation handles overlaps and supports both memcpy and memmove
+ from a single entry point. It uses unaligned accesses and branchless
+ sequences to keep the code small, simple and improve performance.
- .macro str1 reg, ptr, val
- str \reg, [\ptr], \val
- .endm
+ Copies are split into 3 main cases: small copies of up to 32 bytes, medium
+ copies of up to 128 bytes, and large copies. The overhead of the overlap
+ check is negligible since it is only required for large copies.
- .macro ldp1 reg1, reg2, ptr, val
- ldp \reg1, \reg2, [\ptr], \val
- .endm
-
- .macro stp1 reg1, reg2, ptr, val
- stp \reg1, \reg2, [\ptr], \val
- .endm
+ Large copies use a software pipelined loop processing 64 bytes per iteration.
+ The destination pointer is 16-byte aligned to minimize unaligned accesses.
+ The loop tail is handled by always copying 64 bytes from the end.
+*/
+SYM_FUNC_START_ALIAS(__memmove)
+SYM_FUNC_START_WEAK_ALIAS_PI(memmove)
SYM_FUNC_START_ALIAS(__memcpy)
SYM_FUNC_START_WEAK_PI(memcpy)
-#include "copy_template.S"
+ add srcend, src, count
+ add dstend, dstin, count
+ cmp count, 128
+ b.hi L(copy_long)
+ cmp count, 32
+ b.hi L(copy32_128)
+
+ /* Small copies: 0..32 bytes. */
+ cmp count, 16
+ b.lo L(copy16)
+ ldp A_l, A_h, [src]
+ ldp D_l, D_h, [srcend, -16]
+ stp A_l, A_h, [dstin]
+ stp D_l, D_h, [dstend, -16]
+ ret
+
+ /* Copy 8-15 bytes. */
+L(copy16):
+ tbz count, 3, L(copy8)
+ ldr A_l, [src]
+ ldr A_h, [srcend, -8]
+ str A_l, [dstin]
+ str A_h, [dstend, -8]
+ ret
+
+ .p2align 3
+ /* Copy 4-7 bytes. */
+L(copy8):
+ tbz count, 2, L(copy4)
+ ldr A_lw, [src]
+ ldr B_lw, [srcend, -4]
+ str A_lw, [dstin]
+ str B_lw, [dstend, -4]
+ ret
+
+ /* Copy 0..3 bytes using a branchless sequence. */
+L(copy4):
+ cbz count, L(copy0)
+ lsr tmp1, count, 1
+ ldrb A_lw, [src]
+ ldrb C_lw, [srcend, -1]
+ ldrb B_lw, [src, tmp1]
+ strb A_lw, [dstin]
+ strb B_lw, [dstin, tmp1]
+ strb C_lw, [dstend, -1]
+L(copy0):
+ ret
+
+ .p2align 4
+ /* Medium copies: 33..128 bytes. */
+L(copy32_128):
+ ldp A_l, A_h, [src]
+ ldp B_l, B_h, [src, 16]
+ ldp C_l, C_h, [srcend, -32]
+ ldp D_l, D_h, [srcend, -16]
+ cmp count, 64
+ b.hi L(copy128)
+ stp A_l, A_h, [dstin]
+ stp B_l, B_h, [dstin, 16]
+ stp C_l, C_h, [dstend, -32]
+ stp D_l, D_h, [dstend, -16]
ret
+
+ .p2align 4
+ /* Copy 65..128 bytes. */
+L(copy128):
+ ldp E_l, E_h, [src, 32]
+ ldp F_l, F_h, [src, 48]
+ cmp count, 96
+ b.ls L(copy96)
+ ldp G_l, G_h, [srcend, -64]
+ ldp H_l, H_h, [srcend, -48]
+ stp G_l, G_h, [dstend, -64]
+ stp H_l, H_h, [dstend, -48]
+L(copy96):
+ stp A_l, A_h, [dstin]
+ stp B_l, B_h, [dstin, 16]
+ stp E_l, E_h, [dstin, 32]
+ stp F_l, F_h, [dstin, 48]
+ stp C_l, C_h, [dstend, -32]
+ stp D_l, D_h, [dstend, -16]
+ ret
+
+ .p2align 4
+ /* Copy more than 128 bytes. */
+L(copy_long):
+ /* Use backwards copy if there is an overlap. */
+ sub tmp1, dstin, src
+ cbz tmp1, L(copy0)
+ cmp tmp1, count
+ b.lo L(copy_long_backwards)
+
+ /* Copy 16 bytes and then align dst to 16-byte alignment. */
+
+ ldp D_l, D_h, [src]
+ and tmp1, dstin, 15
+ bic dst, dstin, 15
+ sub src, src, tmp1
+ add count, count, tmp1 /* Count is now 16 too large. */
+ ldp A_l, A_h, [src, 16]
+ stp D_l, D_h, [dstin]
+ ldp B_l, B_h, [src, 32]
+ ldp C_l, C_h, [src, 48]
+ ldp D_l, D_h, [src, 64]!
+ subs count, count, 128 + 16 /* Test and readjust count. */
+ b.ls L(copy64_from_end)
+
+L(loop64):
+ stp A_l, A_h, [dst, 16]
+ ldp A_l, A_h, [src, 16]
+ stp B_l, B_h, [dst, 32]
+ ldp B_l, B_h, [src, 32]
+ stp C_l, C_h, [dst, 48]
+ ldp C_l, C_h, [src, 48]
+ stp D_l, D_h, [dst, 64]!
+ ldp D_l, D_h, [src, 64]!
+ subs count, count, 64
+ b.hi L(loop64)
+
+ /* Write the last iteration and copy 64 bytes from the end. */
+L(copy64_from_end):
+ ldp E_l, E_h, [srcend, -64]
+ stp A_l, A_h, [dst, 16]
+ ldp A_l, A_h, [srcend, -48]
+ stp B_l, B_h, [dst, 32]
+ ldp B_l, B_h, [srcend, -32]
+ stp C_l, C_h, [dst, 48]
+ ldp C_l, C_h, [srcend, -16]
+ stp D_l, D_h, [dst, 64]
+ stp E_l, E_h, [dstend, -64]
+ stp A_l, A_h, [dstend, -48]
+ stp B_l, B_h, [dstend, -32]
+ stp C_l, C_h, [dstend, -16]
+ ret
+
+ .p2align 4
+
+ /* Large backwards copy for overlapping copies.
+ Copy 16 bytes and then align dst to 16-byte alignment. */
+L(copy_long_backwards):
+ ldp D_l, D_h, [srcend, -16]
+ and tmp1, dstend, 15
+ sub srcend, srcend, tmp1
+ sub count, count, tmp1
+ ldp A_l, A_h, [srcend, -16]
+ stp D_l, D_h, [dstend, -16]
+ ldp B_l, B_h, [srcend, -32]
+ ldp C_l, C_h, [srcend, -48]
+ ldp D_l, D_h, [srcend, -64]!
+ sub dstend, dstend, tmp1
+ subs count, count, 128
+ b.ls L(copy64_from_start)
+
+L(loop64_backwards):
+ stp A_l, A_h, [dstend, -16]
+ ldp A_l, A_h, [srcend, -16]
+ stp B_l, B_h, [dstend, -32]
+ ldp B_l, B_h, [srcend, -32]
+ stp C_l, C_h, [dstend, -48]
+ ldp C_l, C_h, [srcend, -48]
+ stp D_l, D_h, [dstend, -64]!
+ ldp D_l, D_h, [srcend, -64]!
+ subs count, count, 64
+ b.hi L(loop64_backwards)
+
+ /* Write the last iteration and copy 64 bytes from the start. */
+L(copy64_from_start):
+ ldp G_l, G_h, [src, 48]
+ stp A_l, A_h, [dstend, -16]
+ ldp A_l, A_h, [src, 32]
+ stp B_l, B_h, [dstend, -32]
+ ldp B_l, B_h, [src, 16]
+ stp C_l, C_h, [dstend, -48]
+ ldp C_l, C_h, [src]
+ stp D_l, D_h, [dstend, -64]
+ stp G_l, G_h, [dstin, 48]
+ stp A_l, A_h, [dstin, 32]
+ stp B_l, B_h, [dstin, 16]
+ stp C_l, C_h, [dstin]
+ ret
+
SYM_FUNC_END_PI(memcpy)
EXPORT_SYMBOL(memcpy)
SYM_FUNC_END_ALIAS(__memcpy)
EXPORT_SYMBOL(__memcpy)
+SYM_FUNC_END_ALIAS_PI(memmove)
+EXPORT_SYMBOL(memmove)
+SYM_FUNC_END_ALIAS(__memmove)
+EXPORT_SYMBOL(__memmove)
diff --git a/arch/arm64/lib/memmove.S b/arch/arm64/lib/memmove.S
deleted file mode 100644
index 1035dce4bdaf..000000000000
--- a/arch/arm64/lib/memmove.S
+++ /dev/null
@@ -1,189 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2013 ARM Ltd.
- * Copyright (C) 2013 Linaro.
- *
- * This code is based on glibc cortex strings work originally authored by Linaro
- * be found @
- *
- * http://bazaar.launchpad.net/~linaro-toolchain-dev/cortex-strings/trunk/
- * files/head:/src/aarch64/
- */
-
-#include <linux/linkage.h>
-#include <asm/assembler.h>
-#include <asm/cache.h>
-
-/*
- * Move a buffer from src to test (alignment handled by the hardware).
- * If dest <= src, call memcpy, otherwise copy in reverse order.
- *
- * Parameters:
- * x0 - dest
- * x1 - src
- * x2 - n
- * Returns:
- * x0 - dest
- */
-dstin .req x0
-src .req x1
-count .req x2
-tmp1 .req x3
-tmp1w .req w3
-tmp2 .req x4
-tmp2w .req w4
-tmp3 .req x5
-tmp3w .req w5
-dst .req x6
-
-A_l .req x7
-A_h .req x8
-B_l .req x9
-B_h .req x10
-C_l .req x11
-C_h .req x12
-D_l .req x13
-D_h .req x14
-
-SYM_FUNC_START_ALIAS(__memmove)
-SYM_FUNC_START_WEAK_PI(memmove)
- cmp dstin, src
- b.lo __memcpy
- add tmp1, src, count
- cmp dstin, tmp1
- b.hs __memcpy /* No overlap. */
-
- add dst, dstin, count
- add src, src, count
- cmp count, #16
- b.lo .Ltail15 /*probably non-alignment accesses.*/
-
- ands tmp2, src, #15 /* Bytes to reach alignment. */
- b.eq .LSrcAligned
- sub count, count, tmp2
- /*
- * process the aligned offset length to make the src aligned firstly.
- * those extra instructions' cost is acceptable. It also make the
- * coming accesses are based on aligned address.
- */
- tbz tmp2, #0, 1f
- ldrb tmp1w, [src, #-1]!
- strb tmp1w, [dst, #-1]!
-1:
- tbz tmp2, #1, 2f
- ldrh tmp1w, [src, #-2]!
- strh tmp1w, [dst, #-2]!
-2:
- tbz tmp2, #2, 3f
- ldr tmp1w, [src, #-4]!
- str tmp1w, [dst, #-4]!
-3:
- tbz tmp2, #3, .LSrcAligned
- ldr tmp1, [src, #-8]!
- str tmp1, [dst, #-8]!
-
-.LSrcAligned:
- cmp count, #64
- b.ge .Lcpy_over64
-
- /*
- * Deal with small copies quickly by dropping straight into the
- * exit block.
- */
-.Ltail63:
- /*
- * Copy up to 48 bytes of data. At this point we only need the
- * bottom 6 bits of count to be accurate.
- */
- ands tmp1, count, #0x30
- b.eq .Ltail15
- cmp tmp1w, #0x20
- b.eq 1f
- b.lt 2f
- ldp A_l, A_h, [src, #-16]!
- stp A_l, A_h, [dst, #-16]!
-1:
- ldp A_l, A_h, [src, #-16]!
- stp A_l, A_h, [dst, #-16]!
-2:
- ldp A_l, A_h, [src, #-16]!
- stp A_l, A_h, [dst, #-16]!
-
-.Ltail15:
- tbz count, #3, 1f
- ldr tmp1, [src, #-8]!
- str tmp1, [dst, #-8]!
-1:
- tbz count, #2, 2f
- ldr tmp1w, [src, #-4]!
- str tmp1w, [dst, #-4]!
-2:
- tbz count, #1, 3f
- ldrh tmp1w, [src, #-2]!
- strh tmp1w, [dst, #-2]!
-3:
- tbz count, #0, .Lexitfunc
- ldrb tmp1w, [src, #-1]
- strb tmp1w, [dst, #-1]
-
-.Lexitfunc:
- ret
-
-.Lcpy_over64:
- subs count, count, #128
- b.ge .Lcpy_body_large
- /*
- * Less than 128 bytes to copy, so handle 64 bytes here and then jump
- * to the tail.
- */
- ldp A_l, A_h, [src, #-16]
- stp A_l, A_h, [dst, #-16]
- ldp B_l, B_h, [src, #-32]
- ldp C_l, C_h, [src, #-48]
- stp B_l, B_h, [dst, #-32]
- stp C_l, C_h, [dst, #-48]
- ldp D_l, D_h, [src, #-64]!
- stp D_l, D_h, [dst, #-64]!
-
- tst count, #0x3f
- b.ne .Ltail63
- ret
-
- /*
- * Critical loop. Start at a new cache line boundary. Assuming
- * 64 bytes per line this ensures the entire loop is in one line.
- */
- .p2align L1_CACHE_SHIFT
-.Lcpy_body_large:
- /* pre-load 64 bytes data. */
- ldp A_l, A_h, [src, #-16]
- ldp B_l, B_h, [src, #-32]
- ldp C_l, C_h, [src, #-48]
- ldp D_l, D_h, [src, #-64]!
-1:
- /*
- * interlace the load of next 64 bytes data block with store of the last
- * loaded 64 bytes data.
- */
- stp A_l, A_h, [dst, #-16]
- ldp A_l, A_h, [src, #-16]
- stp B_l, B_h, [dst, #-32]
- ldp B_l, B_h, [src, #-32]
- stp C_l, C_h, [dst, #-48]
- ldp C_l, C_h, [src, #-48]
- stp D_l, D_h, [dst, #-64]!
- ldp D_l, D_h, [src, #-64]!
- subs count, count, #64
- b.ge 1b
- stp A_l, A_h, [dst, #-16]
- stp B_l, B_h, [dst, #-32]
- stp C_l, C_h, [dst, #-48]
- stp D_l, D_h, [dst, #-64]!
-
- tst count, #0x3f
- b.ne .Ltail63
- ret
-SYM_FUNC_END_PI(memmove)
-EXPORT_SYMBOL(memmove)
-SYM_FUNC_END_ALIAS(__memmove)
-EXPORT_SYMBOL(__memmove)
diff --git a/arch/arm64/lib/mte.S b/arch/arm64/lib/mte.S
index 351537c12f36..e83643b3995f 100644
--- a/arch/arm64/lib/mte.S
+++ b/arch/arm64/lib/mte.S
@@ -37,6 +37,26 @@ SYM_FUNC_START(mte_clear_page_tags)
SYM_FUNC_END(mte_clear_page_tags)
/*
+ * Zero the page and tags at the same time
+ *
+ * Parameters:
+ * x0 - address to the beginning of the page
+ */
+SYM_FUNC_START(mte_zero_clear_page_tags)
+ mrs x1, dczid_el0
+ and w1, w1, #0xf
+ mov x2, #4
+ lsl x1, x2, x1
+ and x0, x0, #(1 << MTE_TAG_SHIFT) - 1 // clear the tag
+
+1: dc gzva, x0
+ add x0, x0, x1
+ tst x0, #(PAGE_SIZE - 1)
+ b.ne 1b
+ ret
+SYM_FUNC_END(mte_zero_clear_page_tags)
+
+/*
* Copy the tags from the source page to the destination one
* x0 - address of the destination page
* x1 - address of the source page
diff --git a/arch/arm64/lib/strcmp.S b/arch/arm64/lib/strcmp.S
index 4e79566726c8..d7bee210a798 100644
--- a/arch/arm64/lib/strcmp.S
+++ b/arch/arm64/lib/strcmp.S
@@ -1,84 +1,123 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (C) 2013 ARM Ltd.
- * Copyright (C) 2013 Linaro.
+ * Copyright (c) 2012-2021, Arm Limited.
*
- * This code is based on glibc cortex strings work originally authored by Linaro
- * be found @
- *
- * http://bazaar.launchpad.net/~linaro-toolchain-dev/cortex-strings/trunk/
- * files/head:/src/aarch64/
+ * Adapted from the original at:
+ * https://github.com/ARM-software/optimized-routines/blob/afd6244a1f8d9229/string/aarch64/strcmp.S
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
-/*
- * compare two strings
+/* Assumptions:
*
- * Parameters:
- * x0 - const string 1 pointer
- * x1 - const string 2 pointer
- * Returns:
- * x0 - an integer less than, equal to, or greater than zero
- * if s1 is found, respectively, to be less than, to match,
- * or be greater than s2.
+ * ARMv8-a, AArch64
*/
+#define L(label) .L ## label
+
#define REP8_01 0x0101010101010101
#define REP8_7f 0x7f7f7f7f7f7f7f7f
#define REP8_80 0x8080808080808080
/* Parameters and result. */
-src1 .req x0
-src2 .req x1
-result .req x0
+#define src1 x0
+#define src2 x1
+#define result x0
/* Internal variables. */
-data1 .req x2
-data1w .req w2
-data2 .req x3
-data2w .req w3
-has_nul .req x4
-diff .req x5
-syndrome .req x6
-tmp1 .req x7
-tmp2 .req x8
-tmp3 .req x9
-zeroones .req x10
-pos .req x11
-
+#define data1 x2
+#define data1w w2
+#define data2 x3
+#define data2w w3
+#define has_nul x4
+#define diff x5
+#define syndrome x6
+#define tmp1 x7
+#define tmp2 x8
+#define tmp3 x9
+#define zeroones x10
+#define pos x11
+
+ /* Start of performance-critical section -- one 64B cache line. */
+ .align 6
SYM_FUNC_START_WEAK_PI(strcmp)
eor tmp1, src1, src2
mov zeroones, #REP8_01
tst tmp1, #7
- b.ne .Lmisaligned8
+ b.ne L(misaligned8)
ands tmp1, src1, #7
- b.ne .Lmutual_align
-
- /*
- * NUL detection works on the principle that (X - 1) & (~X) & 0x80
- * (=> (X - 1) & ~(X | 0x7f)) is non-zero iff a byte is zero, and
- * can be done in parallel across the entire word.
- */
-.Lloop_aligned:
+ b.ne L(mutual_align)
+ /* NUL detection works on the principle that (X - 1) & (~X) & 0x80
+ (=> (X - 1) & ~(X | 0x7f)) is non-zero iff a byte is zero, and
+ can be done in parallel across the entire word. */
+L(loop_aligned):
ldr data1, [src1], #8
ldr data2, [src2], #8
-.Lstart_realigned:
+L(start_realigned):
sub tmp1, data1, zeroones
orr tmp2, data1, #REP8_7f
eor diff, data1, data2 /* Non-zero if differences found. */
bic has_nul, tmp1, tmp2 /* Non-zero if NUL terminator. */
orr syndrome, diff, has_nul
- cbz syndrome, .Lloop_aligned
- b .Lcal_cmpresult
+ cbz syndrome, L(loop_aligned)
+ /* End of performance-critical section -- one 64B cache line. */
+
+L(end):
+#ifndef __AARCH64EB__
+ rev syndrome, syndrome
+ rev data1, data1
+ /* The MS-non-zero bit of the syndrome marks either the first bit
+ that is different, or the top bit of the first zero byte.
+ Shifting left now will bring the critical information into the
+ top bits. */
+ clz pos, syndrome
+ rev data2, data2
+ lsl data1, data1, pos
+ lsl data2, data2, pos
+ /* But we need to zero-extend (char is unsigned) the value and then
+ perform a signed 32-bit subtraction. */
+ lsr data1, data1, #56
+ sub result, data1, data2, lsr #56
+ ret
+#else
+ /* For big-endian we cannot use the trick with the syndrome value
+ as carry-propagation can corrupt the upper bits if the trailing
+ bytes in the string contain 0x01. */
+ /* However, if there is no NUL byte in the dword, we can generate
+ the result directly. We can't just subtract the bytes as the
+ MSB might be significant. */
+ cbnz has_nul, 1f
+ cmp data1, data2
+ cset result, ne
+ cneg result, result, lo
+ ret
+1:
+ /* Re-compute the NUL-byte detection, using a byte-reversed value. */
+ rev tmp3, data1
+ sub tmp1, tmp3, zeroones
+ orr tmp2, tmp3, #REP8_7f
+ bic has_nul, tmp1, tmp2
+ rev has_nul, has_nul
+ orr syndrome, diff, has_nul
+ clz pos, syndrome
+ /* The MS-non-zero bit of the syndrome marks either the first bit
+ that is different, or the top bit of the first zero byte.
+ Shifting left now will bring the critical information into the
+ top bits. */
+ lsl data1, data1, pos
+ lsl data2, data2, pos
+ /* But we need to zero-extend (char is unsigned) the value and then
+ perform a signed 32-bit subtraction. */
+ lsr data1, data1, #56
+ sub result, data1, data2, lsr #56
+ ret
+#endif
-.Lmutual_align:
- /*
- * Sources are mutually aligned, but are not currently at an
- * alignment boundary. Round down the addresses and then mask off
- * the bytes that preceed the start point.
- */
+L(mutual_align):
+ /* Sources are mutually aligned, but are not currently at an
+ alignment boundary. Round down the addresses and then mask off
+ the bytes that preceed the start point. */
bic src1, src1, #7
bic src2, src2, #7
lsl tmp1, tmp1, #3 /* Bytes beyond alignment -> bits. */
@@ -86,138 +125,52 @@ SYM_FUNC_START_WEAK_PI(strcmp)
neg tmp1, tmp1 /* Bits to alignment -64. */
ldr data2, [src2], #8
mov tmp2, #~0
+#ifdef __AARCH64EB__
/* Big-endian. Early bytes are at MSB. */
-CPU_BE( lsl tmp2, tmp2, tmp1 ) /* Shift (tmp1 & 63). */
+ lsl tmp2, tmp2, tmp1 /* Shift (tmp1 & 63). */
+#else
/* Little-endian. Early bytes are at LSB. */
-CPU_LE( lsr tmp2, tmp2, tmp1 ) /* Shift (tmp1 & 63). */
-
+ lsr tmp2, tmp2, tmp1 /* Shift (tmp1 & 63). */
+#endif
orr data1, data1, tmp2
orr data2, data2, tmp2
- b .Lstart_realigned
-
-.Lmisaligned8:
- /*
- * Get the align offset length to compare per byte first.
- * After this process, one string's address will be aligned.
- */
- and tmp1, src1, #7
- neg tmp1, tmp1
- add tmp1, tmp1, #8
- and tmp2, src2, #7
- neg tmp2, tmp2
- add tmp2, tmp2, #8
- subs tmp3, tmp1, tmp2
- csel pos, tmp1, tmp2, hi /*Choose the maximum. */
-.Ltinycmp:
+ b L(start_realigned)
+
+L(misaligned8):
+ /* Align SRC1 to 8 bytes and then compare 8 bytes at a time, always
+ checking to make sure that we don't access beyond page boundary in
+ SRC2. */
+ tst src1, #7
+ b.eq L(loop_misaligned)
+L(do_misaligned):
ldrb data1w, [src1], #1
ldrb data2w, [src2], #1
- subs pos, pos, #1
- ccmp data1w, #1, #0, ne /* NZCV = 0b0000. */
- ccmp data1w, data2w, #0, cs /* NZCV = 0b0000. */
- b.eq .Ltinycmp
- cbnz pos, 1f /*find the null or unequal...*/
cmp data1w, #1
- ccmp data1w, data2w, #0, cs
- b.eq .Lstart_align /*the last bytes are equal....*/
-1:
- sub result, data1, data2
- ret
-
-.Lstart_align:
- ands xzr, src1, #7
- b.eq .Lrecal_offset
- /*process more leading bytes to make str1 aligned...*/
- add src1, src1, tmp3
- add src2, src2, tmp3
- /*load 8 bytes from aligned str1 and non-aligned str2..*/
+ ccmp data1w, data2w, #0, cs /* NZCV = 0b0000. */
+ b.ne L(done)
+ tst src1, #7
+ b.ne L(do_misaligned)
+
+L(loop_misaligned):
+ /* Test if we are within the last dword of the end of a 4K page. If
+ yes then jump back to the misaligned loop to copy a byte at a time. */
+ and tmp1, src2, #0xff8
+ eor tmp1, tmp1, #0xff8
+ cbz tmp1, L(do_misaligned)
ldr data1, [src1], #8
ldr data2, [src2], #8
sub tmp1, data1, zeroones
orr tmp2, data1, #REP8_7f
- bic has_nul, tmp1, tmp2
- eor diff, data1, data2 /* Non-zero if differences found. */
- orr syndrome, diff, has_nul
- cbnz syndrome, .Lcal_cmpresult
- /*How far is the current str2 from the alignment boundary...*/
- and tmp3, tmp3, #7
-.Lrecal_offset:
- neg pos, tmp3
-.Lloopcmp_proc:
- /*
- * Divide the eight bytes into two parts. First,backwards the src2
- * to an alignment boundary,load eight bytes from the SRC2 alignment
- * boundary,then compare with the relative bytes from SRC1.
- * If all 8 bytes are equal,then start the second part's comparison.
- * Otherwise finish the comparison.
- * This special handle can garantee all the accesses are in the
- * thread/task space in avoid to overrange access.
- */
- ldr data1, [src1,pos]
- ldr data2, [src2,pos]
- sub tmp1, data1, zeroones
- orr tmp2, data1, #REP8_7f
- bic has_nul, tmp1, tmp2
- eor diff, data1, data2 /* Non-zero if differences found. */
- orr syndrome, diff, has_nul
- cbnz syndrome, .Lcal_cmpresult
-
- /*The second part process*/
- ldr data1, [src1], #8
- ldr data2, [src2], #8
- sub tmp1, data1, zeroones
- orr tmp2, data1, #REP8_7f
- bic has_nul, tmp1, tmp2
- eor diff, data1, data2 /* Non-zero if differences found. */
+ eor diff, data1, data2 /* Non-zero if differences found. */
+ bic has_nul, tmp1, tmp2 /* Non-zero if NUL terminator. */
orr syndrome, diff, has_nul
- cbz syndrome, .Lloopcmp_proc
+ cbz syndrome, L(loop_misaligned)
+ b L(end)
-.Lcal_cmpresult:
- /*
- * reversed the byte-order as big-endian,then CLZ can find the most
- * significant zero bits.
- */
-CPU_LE( rev syndrome, syndrome )
-CPU_LE( rev data1, data1 )
-CPU_LE( rev data2, data2 )
-
- /*
- * For big-endian we cannot use the trick with the syndrome value
- * as carry-propagation can corrupt the upper bits if the trailing
- * bytes in the string contain 0x01.
- * However, if there is no NUL byte in the dword, we can generate
- * the result directly. We cannot just subtract the bytes as the
- * MSB might be significant.
- */
-CPU_BE( cbnz has_nul, 1f )
-CPU_BE( cmp data1, data2 )
-CPU_BE( cset result, ne )
-CPU_BE( cneg result, result, lo )
-CPU_BE( ret )
-CPU_BE( 1: )
- /*Re-compute the NUL-byte detection, using a byte-reversed value. */
-CPU_BE( rev tmp3, data1 )
-CPU_BE( sub tmp1, tmp3, zeroones )
-CPU_BE( orr tmp2, tmp3, #REP8_7f )
-CPU_BE( bic has_nul, tmp1, tmp2 )
-CPU_BE( rev has_nul, has_nul )
-CPU_BE( orr syndrome, diff, has_nul )
-
- clz pos, syndrome
- /*
- * The MS-non-zero bit of the syndrome marks either the first bit
- * that is different, or the top bit of the first zero byte.
- * Shifting left now will bring the critical information into the
- * top bits.
- */
- lsl data1, data1, pos
- lsl data2, data2, pos
- /*
- * But we need to zero-extend (char is unsigned) the value and then
- * perform a signed 32-bit subtraction.
- */
- lsr data1, data1, #56
- sub result, data1, data2, lsr #56
+L(done):
+ sub result, data1, data2
ret
+
SYM_FUNC_END_PI(strcmp)
EXPORT_SYMBOL_NOKASAN(strcmp)
diff --git a/arch/arm64/lib/strlen.S b/arch/arm64/lib/strlen.S
index ee3ed882dd79..35fbdb7d6e1a 100644
--- a/arch/arm64/lib/strlen.S
+++ b/arch/arm64/lib/strlen.S
@@ -1,115 +1,203 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (C) 2013 ARM Ltd.
- * Copyright (C) 2013 Linaro.
+ * Copyright (c) 2013-2021, Arm Limited.
*
- * This code is based on glibc cortex strings work originally authored by Linaro
- * be found @
- *
- * http://bazaar.launchpad.net/~linaro-toolchain-dev/cortex-strings/trunk/
- * files/head:/src/aarch64/
+ * Adapted from the original at:
+ * https://github.com/ARM-software/optimized-routines/blob/98e4d6a5c13c8e54/string/aarch64/strlen.S
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
-/*
- * calculate the length of a string
+/* Assumptions:
*
- * Parameters:
- * x0 - const string pointer
- * Returns:
- * x0 - the return length of specific string
+ * ARMv8-a, AArch64, unaligned accesses, min page size 4k.
*/
+#define L(label) .L ## label
+
/* Arguments and results. */
-srcin .req x0
-len .req x0
+#define srcin x0
+#define len x0
/* Locals and temporaries. */
-src .req x1
-data1 .req x2
-data2 .req x3
-data2a .req x4
-has_nul1 .req x5
-has_nul2 .req x6
-tmp1 .req x7
-tmp2 .req x8
-tmp3 .req x9
-tmp4 .req x10
-zeroones .req x11
-pos .req x12
+#define src x1
+#define data1 x2
+#define data2 x3
+#define has_nul1 x4
+#define has_nul2 x5
+#define tmp1 x4
+#define tmp2 x5
+#define tmp3 x6
+#define tmp4 x7
+#define zeroones x8
+
+ /* NUL detection works on the principle that (X - 1) & (~X) & 0x80
+ (=> (X - 1) & ~(X | 0x7f)) is non-zero iff a byte is zero, and
+ can be done in parallel across the entire word. A faster check
+ (X - 1) & 0x80 is zero for non-NUL ASCII characters, but gives
+ false hits for characters 129..255. */
#define REP8_01 0x0101010101010101
#define REP8_7f 0x7f7f7f7f7f7f7f7f
#define REP8_80 0x8080808080808080
+#define MIN_PAGE_SIZE 4096
+
+ /* Since strings are short on average, we check the first 16 bytes
+ of the string for a NUL character. In order to do an unaligned ldp
+ safely we have to do a page cross check first. If there is a NUL
+ byte we calculate the length from the 2 8-byte words using
+ conditional select to reduce branch mispredictions (it is unlikely
+ strlen will be repeatedly called on strings with the same length).
+
+ If the string is longer than 16 bytes, we align src so don't need
+ further page cross checks, and process 32 bytes per iteration
+ using the fast NUL check. If we encounter non-ASCII characters,
+ fallback to a second loop using the full NUL check.
+
+ If the page cross check fails, we read 16 bytes from an aligned
+ address, remove any characters before the string, and continue
+ in the main loop using aligned loads. Since strings crossing a
+ page in the first 16 bytes are rare (probability of
+ 16/MIN_PAGE_SIZE ~= 0.4%), this case does not need to be optimized.
+
+ AArch64 systems have a minimum page size of 4k. We don't bother
+ checking for larger page sizes - the cost of setting up the correct
+ page size is just not worth the extra gain from a small reduction in
+ the cases taking the slow path. Note that we only care about
+ whether the first fetch, which may be misaligned, crosses a page
+ boundary. */
+
SYM_FUNC_START_WEAK_PI(strlen)
- mov zeroones, #REP8_01
- bic src, srcin, #15
- ands tmp1, srcin, #15
- b.ne .Lmisaligned
- /*
- * NUL detection works on the principle that (X - 1) & (~X) & 0x80
- * (=> (X - 1) & ~(X | 0x7f)) is non-zero iff a byte is zero, and
- * can be done in parallel across the entire word.
- */
- /*
- * The inner loop deals with two Dwords at a time. This has a
- * slightly higher start-up cost, but we should win quite quickly,
- * especially on cores with a high number of issue slots per
- * cycle, as we get much better parallelism out of the operations.
- */
-.Lloop:
- ldp data1, data2, [src], #16
-.Lrealigned:
+ and tmp1, srcin, MIN_PAGE_SIZE - 1
+ mov zeroones, REP8_01
+ cmp tmp1, MIN_PAGE_SIZE - 16
+ b.gt L(page_cross)
+ ldp data1, data2, [srcin]
+#ifdef __AARCH64EB__
+ /* For big-endian, carry propagation (if the final byte in the
+ string is 0x01) means we cannot use has_nul1/2 directly.
+ Since we expect strings to be small and early-exit,
+ byte-swap the data now so has_null1/2 will be correct. */
+ rev data1, data1
+ rev data2, data2
+#endif
sub tmp1, data1, zeroones
- orr tmp2, data1, #REP8_7f
+ orr tmp2, data1, REP8_7f
sub tmp3, data2, zeroones
- orr tmp4, data2, #REP8_7f
- bic has_nul1, tmp1, tmp2
- bics has_nul2, tmp3, tmp4
- ccmp has_nul1, #0, #0, eq /* NZCV = 0000 */
- b.eq .Lloop
+ orr tmp4, data2, REP8_7f
+ bics has_nul1, tmp1, tmp2
+ bic has_nul2, tmp3, tmp4
+ ccmp has_nul2, 0, 0, eq
+ beq L(main_loop_entry)
+
+ /* Enter with C = has_nul1 == 0. */
+ csel has_nul1, has_nul1, has_nul2, cc
+ mov len, 8
+ rev has_nul1, has_nul1
+ clz tmp1, has_nul1
+ csel len, xzr, len, cc
+ add len, len, tmp1, lsr 3
+ ret
+ /* The inner loop processes 32 bytes per iteration and uses the fast
+ NUL check. If we encounter non-ASCII characters, use a second
+ loop with the accurate NUL check. */
+ .p2align 4
+L(main_loop_entry):
+ bic src, srcin, 15
+ sub src, src, 16
+L(main_loop):
+ ldp data1, data2, [src, 32]!
+L(page_cross_entry):
+ sub tmp1, data1, zeroones
+ sub tmp3, data2, zeroones
+ orr tmp2, tmp1, tmp3
+ tst tmp2, zeroones, lsl 7
+ bne 1f
+ ldp data1, data2, [src, 16]
+ sub tmp1, data1, zeroones
+ sub tmp3, data2, zeroones
+ orr tmp2, tmp1, tmp3
+ tst tmp2, zeroones, lsl 7
+ beq L(main_loop)
+ add src, src, 16
+1:
+ /* The fast check failed, so do the slower, accurate NUL check. */
+ orr tmp2, data1, REP8_7f
+ orr tmp4, data2, REP8_7f
+ bics has_nul1, tmp1, tmp2
+ bic has_nul2, tmp3, tmp4
+ ccmp has_nul2, 0, 0, eq
+ beq L(nonascii_loop)
+
+ /* Enter with C = has_nul1 == 0. */
+L(tail):
+#ifdef __AARCH64EB__
+ /* For big-endian, carry propagation (if the final byte in the
+ string is 0x01) means we cannot use has_nul1/2 directly. The
+ easiest way to get the correct byte is to byte-swap the data
+ and calculate the syndrome a second time. */
+ csel data1, data1, data2, cc
+ rev data1, data1
+ sub tmp1, data1, zeroones
+ orr tmp2, data1, REP8_7f
+ bic has_nul1, tmp1, tmp2
+#else
+ csel has_nul1, has_nul1, has_nul2, cc
+#endif
sub len, src, srcin
- cbz has_nul1, .Lnul_in_data2
-CPU_BE( mov data2, data1 ) /*prepare data to re-calculate the syndrome*/
- sub len, len, #8
- mov has_nul2, has_nul1
-.Lnul_in_data2:
- /*
- * For big-endian, carry propagation (if the final byte in the
- * string is 0x01) means we cannot use has_nul directly. The
- * easiest way to get the correct byte is to byte-swap the data
- * and calculate the syndrome a second time.
- */
-CPU_BE( rev data2, data2 )
-CPU_BE( sub tmp1, data2, zeroones )
-CPU_BE( orr tmp2, data2, #REP8_7f )
-CPU_BE( bic has_nul2, tmp1, tmp2 )
-
- sub len, len, #8
- rev has_nul2, has_nul2
- clz pos, has_nul2
- add len, len, pos, lsr #3 /* Bits to bytes. */
+ rev has_nul1, has_nul1
+ add tmp2, len, 8
+ clz tmp1, has_nul1
+ csel len, len, tmp2, cc
+ add len, len, tmp1, lsr 3
ret
-.Lmisaligned:
- cmp tmp1, #8
- neg tmp1, tmp1
- ldp data1, data2, [src], #16
- lsl tmp1, tmp1, #3 /* Bytes beyond alignment -> bits. */
- mov tmp2, #~0
- /* Big-endian. Early bytes are at MSB. */
-CPU_BE( lsl tmp2, tmp2, tmp1 ) /* Shift (tmp1 & 63). */
+L(nonascii_loop):
+ ldp data1, data2, [src, 16]!
+ sub tmp1, data1, zeroones
+ orr tmp2, data1, REP8_7f
+ sub tmp3, data2, zeroones
+ orr tmp4, data2, REP8_7f
+ bics has_nul1, tmp1, tmp2
+ bic has_nul2, tmp3, tmp4
+ ccmp has_nul2, 0, 0, eq
+ bne L(tail)
+ ldp data1, data2, [src, 16]!
+ sub tmp1, data1, zeroones
+ orr tmp2, data1, REP8_7f
+ sub tmp3, data2, zeroones
+ orr tmp4, data2, REP8_7f
+ bics has_nul1, tmp1, tmp2
+ bic has_nul2, tmp3, tmp4
+ ccmp has_nul2, 0, 0, eq
+ beq L(nonascii_loop)
+ b L(tail)
+
+ /* Load 16 bytes from [srcin & ~15] and force the bytes that precede
+ srcin to 0x7f, so we ignore any NUL bytes before the string.
+ Then continue in the aligned loop. */
+L(page_cross):
+ bic src, srcin, 15
+ ldp data1, data2, [src]
+ lsl tmp1, srcin, 3
+ mov tmp4, -1
+#ifdef __AARCH64EB__
+ /* Big-endian. Early bytes are at MSB. */
+ lsr tmp1, tmp4, tmp1 /* Shift (tmp1 & 63). */
+#else
/* Little-endian. Early bytes are at LSB. */
-CPU_LE( lsr tmp2, tmp2, tmp1 ) /* Shift (tmp1 & 63). */
+ lsl tmp1, tmp4, tmp1 /* Shift (tmp1 & 63). */
+#endif
+ orr tmp1, tmp1, REP8_80
+ orn data1, data1, tmp1
+ orn tmp2, data2, tmp1
+ tst srcin, 8
+ csel data1, data1, tmp4, eq
+ csel data2, data2, tmp2, eq
+ b L(page_cross_entry)
- orr data1, data1, tmp2
- orr data2a, data2, tmp2
- csinv data1, data1, xzr, le
- csel data2, data2, data2a, le
- b .Lrealigned
SYM_FUNC_END_PI(strlen)
EXPORT_SYMBOL_NOKASAN(strlen)
diff --git a/arch/arm64/lib/strncmp.S b/arch/arm64/lib/strncmp.S
index 2a7ee949ed47..48d44f7fddb1 100644
--- a/arch/arm64/lib/strncmp.S
+++ b/arch/arm64/lib/strncmp.S
@@ -1,299 +1,261 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (C) 2013 ARM Ltd.
- * Copyright (C) 2013 Linaro.
+ * Copyright (c) 2013-2021, Arm Limited.
*
- * This code is based on glibc cortex strings work originally authored by Linaro
- * be found @
- *
- * http://bazaar.launchpad.net/~linaro-toolchain-dev/cortex-strings/trunk/
- * files/head:/src/aarch64/
+ * Adapted from the original at:
+ * https://github.com/ARM-software/optimized-routines/blob/e823e3abf5f89ecb/string/aarch64/strncmp.S
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
-/*
- * compare two strings
+/* Assumptions:
*
- * Parameters:
- * x0 - const string 1 pointer
- * x1 - const string 2 pointer
- * x2 - the maximal length to be compared
- * Returns:
- * x0 - an integer less than, equal to, or greater than zero if s1 is found,
- * respectively, to be less than, to match, or be greater than s2.
+ * ARMv8-a, AArch64
*/
+#define L(label) .L ## label
+
#define REP8_01 0x0101010101010101
#define REP8_7f 0x7f7f7f7f7f7f7f7f
#define REP8_80 0x8080808080808080
/* Parameters and result. */
-src1 .req x0
-src2 .req x1
-limit .req x2
-result .req x0
+#define src1 x0
+#define src2 x1
+#define limit x2
+#define result x0
/* Internal variables. */
-data1 .req x3
-data1w .req w3
-data2 .req x4
-data2w .req w4
-has_nul .req x5
-diff .req x6
-syndrome .req x7
-tmp1 .req x8
-tmp2 .req x9
-tmp3 .req x10
-zeroones .req x11
-pos .req x12
-limit_wd .req x13
-mask .req x14
-endloop .req x15
+#define data1 x3
+#define data1w w3
+#define data2 x4
+#define data2w w4
+#define has_nul x5
+#define diff x6
+#define syndrome x7
+#define tmp1 x8
+#define tmp2 x9
+#define tmp3 x10
+#define zeroones x11
+#define pos x12
+#define limit_wd x13
+#define mask x14
+#define endloop x15
+#define count mask
SYM_FUNC_START_WEAK_PI(strncmp)
- cbz limit, .Lret0
+ cbz limit, L(ret0)
eor tmp1, src1, src2
mov zeroones, #REP8_01
tst tmp1, #7
- b.ne .Lmisaligned8
- ands tmp1, src1, #7
- b.ne .Lmutual_align
+ and count, src1, #7
+ b.ne L(misaligned8)
+ cbnz count, L(mutual_align)
/* Calculate the number of full and partial words -1. */
- /*
- * when limit is mulitply of 8, if not sub 1,
- * the judgement of last dword will wrong.
- */
- sub limit_wd, limit, #1 /* limit != 0, so no underflow. */
- lsr limit_wd, limit_wd, #3 /* Convert to Dwords. */
+ sub limit_wd, limit, #1 /* limit != 0, so no underflow. */
+ lsr limit_wd, limit_wd, #3 /* Convert to Dwords. */
- /*
- * NUL detection works on the principle that (X - 1) & (~X) & 0x80
- * (=> (X - 1) & ~(X | 0x7f)) is non-zero iff a byte is zero, and
- * can be done in parallel across the entire word.
- */
-.Lloop_aligned:
+ /* NUL detection works on the principle that (X - 1) & (~X) & 0x80
+ (=> (X - 1) & ~(X | 0x7f)) is non-zero iff a byte is zero, and
+ can be done in parallel across the entire word. */
+ .p2align 4
+L(loop_aligned):
ldr data1, [src1], #8
ldr data2, [src2], #8
-.Lstart_realigned:
+L(start_realigned):
subs limit_wd, limit_wd, #1
sub tmp1, data1, zeroones
orr tmp2, data1, #REP8_7f
- eor diff, data1, data2 /* Non-zero if differences found. */
- csinv endloop, diff, xzr, pl /* Last Dword or differences.*/
- bics has_nul, tmp1, tmp2 /* Non-zero if NUL terminator. */
+ eor diff, data1, data2 /* Non-zero if differences found. */
+ csinv endloop, diff, xzr, pl /* Last Dword or differences. */
+ bics has_nul, tmp1, tmp2 /* Non-zero if NUL terminator. */
ccmp endloop, #0, #0, eq
- b.eq .Lloop_aligned
+ b.eq L(loop_aligned)
+ /* End of main loop */
- /*Not reached the limit, must have found the end or a diff. */
- tbz limit_wd, #63, .Lnot_limit
+ /* Not reached the limit, must have found the end or a diff. */
+ tbz limit_wd, #63, L(not_limit)
/* Limit % 8 == 0 => all bytes significant. */
ands limit, limit, #7
- b.eq .Lnot_limit
+ b.eq L(not_limit)
- lsl limit, limit, #3 /* Bits -> bytes. */
+ lsl limit, limit, #3 /* Bits -> bytes. */
mov mask, #~0
-CPU_BE( lsr mask, mask, limit )
-CPU_LE( lsl mask, mask, limit )
+#ifdef __AARCH64EB__
+ lsr mask, mask, limit
+#else
+ lsl mask, mask, limit
+#endif
bic data1, data1, mask
bic data2, data2, mask
/* Make sure that the NUL byte is marked in the syndrome. */
orr has_nul, has_nul, mask
-.Lnot_limit:
+L(not_limit):
orr syndrome, diff, has_nul
- b .Lcal_cmpresult
-.Lmutual_align:
- /*
- * Sources are mutually aligned, but are not currently at an
- * alignment boundary. Round down the addresses and then mask off
- * the bytes that precede the start point.
- * We also need to adjust the limit calculations, but without
- * overflowing if the limit is near ULONG_MAX.
- */
+#ifndef __AARCH64EB__
+ rev syndrome, syndrome
+ rev data1, data1
+ /* The MS-non-zero bit of the syndrome marks either the first bit
+ that is different, or the top bit of the first zero byte.
+ Shifting left now will bring the critical information into the
+ top bits. */
+ clz pos, syndrome
+ rev data2, data2
+ lsl data1, data1, pos
+ lsl data2, data2, pos
+ /* But we need to zero-extend (char is unsigned) the value and then
+ perform a signed 32-bit subtraction. */
+ lsr data1, data1, #56
+ sub result, data1, data2, lsr #56
+ ret
+#else
+ /* For big-endian we cannot use the trick with the syndrome value
+ as carry-propagation can corrupt the upper bits if the trailing
+ bytes in the string contain 0x01. */
+ /* However, if there is no NUL byte in the dword, we can generate
+ the result directly. We can't just subtract the bytes as the
+ MSB might be significant. */
+ cbnz has_nul, 1f
+ cmp data1, data2
+ cset result, ne
+ cneg result, result, lo
+ ret
+1:
+ /* Re-compute the NUL-byte detection, using a byte-reversed value. */
+ rev tmp3, data1
+ sub tmp1, tmp3, zeroones
+ orr tmp2, tmp3, #REP8_7f
+ bic has_nul, tmp1, tmp2
+ rev has_nul, has_nul
+ orr syndrome, diff, has_nul
+ clz pos, syndrome
+ /* The MS-non-zero bit of the syndrome marks either the first bit
+ that is different, or the top bit of the first zero byte.
+ Shifting left now will bring the critical information into the
+ top bits. */
+ lsl data1, data1, pos
+ lsl data2, data2, pos
+ /* But we need to zero-extend (char is unsigned) the value and then
+ perform a signed 32-bit subtraction. */
+ lsr data1, data1, #56
+ sub result, data1, data2, lsr #56
+ ret
+#endif
+
+L(mutual_align):
+ /* Sources are mutually aligned, but are not currently at an
+ alignment boundary. Round down the addresses and then mask off
+ the bytes that precede the start point.
+ We also need to adjust the limit calculations, but without
+ overflowing if the limit is near ULONG_MAX. */
bic src1, src1, #7
bic src2, src2, #7
ldr data1, [src1], #8
- neg tmp3, tmp1, lsl #3 /* 64 - bits(bytes beyond align). */
+ neg tmp3, count, lsl #3 /* 64 - bits(bytes beyond align). */
ldr data2, [src2], #8
mov tmp2, #~0
- sub limit_wd, limit, #1 /* limit != 0, so no underflow. */
+ sub limit_wd, limit, #1 /* limit != 0, so no underflow. */
+#ifdef __AARCH64EB__
/* Big-endian. Early bytes are at MSB. */
-CPU_BE( lsl tmp2, tmp2, tmp3 ) /* Shift (tmp1 & 63). */
+ lsl tmp2, tmp2, tmp3 /* Shift (count & 63). */
+#else
/* Little-endian. Early bytes are at LSB. */
-CPU_LE( lsr tmp2, tmp2, tmp3 ) /* Shift (tmp1 & 63). */
-
+ lsr tmp2, tmp2, tmp3 /* Shift (count & 63). */
+#endif
and tmp3, limit_wd, #7
lsr limit_wd, limit_wd, #3
- /* Adjust the limit. Only low 3 bits used, so overflow irrelevant.*/
- add limit, limit, tmp1
- add tmp3, tmp3, tmp1
+ /* Adjust the limit. Only low 3 bits used, so overflow irrelevant. */
+ add limit, limit, count
+ add tmp3, tmp3, count
orr data1, data1, tmp2
orr data2, data2, tmp2
add limit_wd, limit_wd, tmp3, lsr #3
- b .Lstart_realigned
+ b L(start_realigned)
+
+ .p2align 4
+ /* Don't bother with dwords for up to 16 bytes. */
+L(misaligned8):
+ cmp limit, #16
+ b.hs L(try_misaligned_words)
-/*when src1 offset is not equal to src2 offset...*/
-.Lmisaligned8:
- cmp limit, #8
- b.lo .Ltiny8proc /*limit < 8... */
- /*
- * Get the align offset length to compare per byte first.
- * After this process, one string's address will be aligned.*/
- and tmp1, src1, #7
- neg tmp1, tmp1
- add tmp1, tmp1, #8
- and tmp2, src2, #7
- neg tmp2, tmp2
- add tmp2, tmp2, #8
- subs tmp3, tmp1, tmp2
- csel pos, tmp1, tmp2, hi /*Choose the maximum. */
- /*
- * Here, limit is not less than 8, so directly run .Ltinycmp
- * without checking the limit.*/
- sub limit, limit, pos
-.Ltinycmp:
+L(byte_loop):
+ /* Perhaps we can do better than this. */
ldrb data1w, [src1], #1
ldrb data2w, [src2], #1
- subs pos, pos, #1
- ccmp data1w, #1, #0, ne /* NZCV = 0b0000. */
- ccmp data1w, data2w, #0, cs /* NZCV = 0b0000. */
- b.eq .Ltinycmp
- cbnz pos, 1f /*find the null or unequal...*/
- cmp data1w, #1
- ccmp data1w, data2w, #0, cs
- b.eq .Lstart_align /*the last bytes are equal....*/
-1:
+ subs limit, limit, #1
+ ccmp data1w, #1, #0, hi /* NZCV = 0b0000. */
+ ccmp data1w, data2w, #0, cs /* NZCV = 0b0000. */
+ b.eq L(byte_loop)
+L(done):
sub result, data1, data2
ret
-
-.Lstart_align:
+ /* Align the SRC1 to a dword by doing a bytewise compare and then do
+ the dword loop. */
+L(try_misaligned_words):
lsr limit_wd, limit, #3
- cbz limit_wd, .Lremain8
- /*process more leading bytes to make str1 aligned...*/
- ands xzr, src1, #7
- b.eq .Lrecal_offset
- add src1, src1, tmp3 /*tmp3 is positive in this branch.*/
- add src2, src2, tmp3
- ldr data1, [src1], #8
- ldr data2, [src2], #8
+ cbz count, L(do_misaligned)
- sub limit, limit, tmp3
+ neg count, count
+ and count, count, #7
+ sub limit, limit, count
lsr limit_wd, limit, #3
- subs limit_wd, limit_wd, #1
- sub tmp1, data1, zeroones
- orr tmp2, data1, #REP8_7f
- eor diff, data1, data2 /* Non-zero if differences found. */
- csinv endloop, diff, xzr, ne/*if limit_wd is 0,will finish the cmp*/
- bics has_nul, tmp1, tmp2
- ccmp endloop, #0, #0, eq /*has_null is ZERO: no null byte*/
- b.ne .Lunequal_proc
- /*How far is the current str2 from the alignment boundary...*/
- and tmp3, tmp3, #7
-.Lrecal_offset:
- neg pos, tmp3
-.Lloopcmp_proc:
- /*
- * Divide the eight bytes into two parts. First,backwards the src2
- * to an alignment boundary,load eight bytes from the SRC2 alignment
- * boundary,then compare with the relative bytes from SRC1.
- * If all 8 bytes are equal,then start the second part's comparison.
- * Otherwise finish the comparison.
- * This special handle can garantee all the accesses are in the
- * thread/task space in avoid to overrange access.
- */
- ldr data1, [src1,pos]
- ldr data2, [src2,pos]
- sub tmp1, data1, zeroones
- orr tmp2, data1, #REP8_7f
- bics has_nul, tmp1, tmp2 /* Non-zero if NUL terminator. */
- eor diff, data1, data2 /* Non-zero if differences found. */
- csinv endloop, diff, xzr, eq
- cbnz endloop, .Lunequal_proc
+L(page_end_loop):
+ ldrb data1w, [src1], #1
+ ldrb data2w, [src2], #1
+ cmp data1w, #1
+ ccmp data1w, data2w, #0, cs /* NZCV = 0b0000. */
+ b.ne L(done)
+ subs count, count, #1
+ b.hi L(page_end_loop)
+
+L(do_misaligned):
+ /* Prepare ourselves for the next page crossing. Unlike the aligned
+ loop, we fetch 1 less dword because we risk crossing bounds on
+ SRC2. */
+ mov count, #8
+ subs limit_wd, limit_wd, #1
+ b.lo L(done_loop)
+L(loop_misaligned):
+ and tmp2, src2, #0xff8
+ eor tmp2, tmp2, #0xff8
+ cbz tmp2, L(page_end_loop)
- /*The second part process*/
ldr data1, [src1], #8
ldr data2, [src2], #8
- subs limit_wd, limit_wd, #1
sub tmp1, data1, zeroones
orr tmp2, data1, #REP8_7f
- eor diff, data1, data2 /* Non-zero if differences found. */
- csinv endloop, diff, xzr, ne/*if limit_wd is 0,will finish the cmp*/
- bics has_nul, tmp1, tmp2
- ccmp endloop, #0, #0, eq /*has_null is ZERO: no null byte*/
- b.eq .Lloopcmp_proc
-
-.Lunequal_proc:
- orr syndrome, diff, has_nul
- cbz syndrome, .Lremain8
-.Lcal_cmpresult:
- /*
- * reversed the byte-order as big-endian,then CLZ can find the most
- * significant zero bits.
- */
-CPU_LE( rev syndrome, syndrome )
-CPU_LE( rev data1, data1 )
-CPU_LE( rev data2, data2 )
- /*
- * For big-endian we cannot use the trick with the syndrome value
- * as carry-propagation can corrupt the upper bits if the trailing
- * bytes in the string contain 0x01.
- * However, if there is no NUL byte in the dword, we can generate
- * the result directly. We can't just subtract the bytes as the
- * MSB might be significant.
- */
-CPU_BE( cbnz has_nul, 1f )
-CPU_BE( cmp data1, data2 )
-CPU_BE( cset result, ne )
-CPU_BE( cneg result, result, lo )
-CPU_BE( ret )
-CPU_BE( 1: )
- /* Re-compute the NUL-byte detection, using a byte-reversed value.*/
-CPU_BE( rev tmp3, data1 )
-CPU_BE( sub tmp1, tmp3, zeroones )
-CPU_BE( orr tmp2, tmp3, #REP8_7f )
-CPU_BE( bic has_nul, tmp1, tmp2 )
-CPU_BE( rev has_nul, has_nul )
-CPU_BE( orr syndrome, diff, has_nul )
- /*
- * The MS-non-zero bit of the syndrome marks either the first bit
- * that is different, or the top bit of the first zero byte.
- * Shifting left now will bring the critical information into the
- * top bits.
- */
- clz pos, syndrome
- lsl data1, data1, pos
- lsl data2, data2, pos
- /*
- * But we need to zero-extend (char is unsigned) the value and then
- * perform a signed 32-bit subtraction.
- */
- lsr data1, data1, #56
- sub result, data1, data2, lsr #56
- ret
-
-.Lremain8:
- /* Limit % 8 == 0 => all bytes significant. */
- ands limit, limit, #7
- b.eq .Lret0
-.Ltiny8proc:
- ldrb data1w, [src1], #1
- ldrb data2w, [src2], #1
- subs limit, limit, #1
+ eor diff, data1, data2 /* Non-zero if differences found. */
+ bics has_nul, tmp1, tmp2 /* Non-zero if NUL terminator. */
+ ccmp diff, #0, #0, eq
+ b.ne L(not_limit)
+ subs limit_wd, limit_wd, #1
+ b.pl L(loop_misaligned)
- ccmp data1w, #1, #0, ne /* NZCV = 0b0000. */
- ccmp data1w, data2w, #0, cs /* NZCV = 0b0000. */
- b.eq .Ltiny8proc
- sub result, data1, data2
- ret
+L(done_loop):
+ /* We found a difference or a NULL before the limit was reached. */
+ and limit, limit, #7
+ cbz limit, L(not_limit)
+ /* Read the last word. */
+ sub src1, src1, 8
+ sub src2, src2, 8
+ ldr data1, [src1, limit]
+ ldr data2, [src2, limit]
+ sub tmp1, data1, zeroones
+ orr tmp2, data1, #REP8_7f
+ eor diff, data1, data2 /* Non-zero if differences found. */
+ bics has_nul, tmp1, tmp2 /* Non-zero if NUL terminator. */
+ ccmp diff, #0, #0, eq
+ b.ne L(not_limit)
-.Lret0:
+L(ret0):
mov result, #0
ret
+
SYM_FUNC_END_PI(strncmp)
EXPORT_SYMBOL_NOKASAN(strncmp)
diff --git a/arch/arm64/lib/uaccess_flushcache.c b/arch/arm64/lib/uaccess_flushcache.c
index c83bb5a4aad2..baee22961bdb 100644
--- a/arch/arm64/lib/uaccess_flushcache.c
+++ b/arch/arm64/lib/uaccess_flushcache.c
@@ -15,7 +15,7 @@ void memcpy_flushcache(void *dst, const void *src, size_t cnt)
* barrier to order the cache maintenance against the memcpy.
*/
memcpy(dst, src, cnt);
- __clean_dcache_area_pop(dst, cnt);
+ dcache_clean_pop((unsigned long)dst, (unsigned long)dst + cnt);
}
EXPORT_SYMBOL_GPL(memcpy_flushcache);
@@ -33,6 +33,6 @@ unsigned long __copy_user_flushcache(void *to, const void __user *from,
rc = raw_copy_from_user(to, from, n);
/* See above */
- __clean_dcache_area_pop(to, n - rc);
+ dcache_clean_pop((unsigned long)to, (unsigned long)to + n - rc);
return rc;
}
diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S
index 2d881f34dd9d..5051b3c1a4f1 100644
--- a/arch/arm64/mm/cache.S
+++ b/arch/arm64/mm/cache.S
@@ -15,7 +15,7 @@
#include <asm/asm-uaccess.h>
/*
- * flush_icache_range(start,end)
+ * caches_clean_inval_pou_macro(start,end) [fixup]
*
* Ensure that the I and D caches are coherent within specified region.
* This is typically used when code has been written to a memory region,
@@ -23,12 +23,27 @@
*
* - start - virtual start address of region
* - end - virtual end address of region
+ * - fixup - optional label to branch to on user fault
*/
-SYM_FUNC_START(__flush_icache_range)
- /* FALLTHROUGH */
+.macro caches_clean_inval_pou_macro, fixup
+alternative_if ARM64_HAS_CACHE_IDC
+ dsb ishst
+ b .Ldc_skip_\@
+alternative_else_nop_endif
+ mov x2, x0
+ mov x3, x1
+ dcache_by_line_op cvau, ish, x2, x3, x4, x5, \fixup
+.Ldc_skip_\@:
+alternative_if ARM64_HAS_CACHE_DIC
+ isb
+ b .Lic_skip_\@
+alternative_else_nop_endif
+ invalidate_icache_by_line x0, x1, x2, x3, \fixup
+.Lic_skip_\@:
+.endm
/*
- * __flush_cache_user_range(start,end)
+ * caches_clean_inval_pou(start,end)
*
* Ensure that the I and D caches are coherent within specified region.
* This is typically used when code has been written to a memory region,
@@ -37,117 +52,103 @@ SYM_FUNC_START(__flush_icache_range)
* - start - virtual start address of region
* - end - virtual end address of region
*/
-SYM_FUNC_START(__flush_cache_user_range)
+SYM_FUNC_START(caches_clean_inval_pou)
+ caches_clean_inval_pou_macro
+ ret
+SYM_FUNC_END(caches_clean_inval_pou)
+
+/*
+ * caches_clean_inval_user_pou(start,end)
+ *
+ * Ensure that the I and D caches are coherent within specified region.
+ * This is typically used when code has been written to a memory region,
+ * and will be executed.
+ *
+ * - start - virtual start address of region
+ * - end - virtual end address of region
+ */
+SYM_FUNC_START(caches_clean_inval_user_pou)
uaccess_ttbr0_enable x2, x3, x4
-alternative_if ARM64_HAS_CACHE_IDC
- dsb ishst
- b 7f
-alternative_else_nop_endif
- dcache_line_size x2, x3
- sub x3, x2, #1
- bic x4, x0, x3
-1:
-user_alt 9f, "dc cvau, x4", "dc civac, x4", ARM64_WORKAROUND_CLEAN_CACHE
- add x4, x4, x2
- cmp x4, x1
- b.lo 1b
- dsb ish
-7:
-alternative_if ARM64_HAS_CACHE_DIC
- isb
- b 8f
-alternative_else_nop_endif
- invalidate_icache_by_line x0, x1, x2, x3, 9f
-8: mov x0, #0
+ caches_clean_inval_pou_macro 2f
+ mov x0, xzr
1:
uaccess_ttbr0_disable x1, x2
ret
-9:
+2:
mov x0, #-EFAULT
b 1b
-SYM_FUNC_END(__flush_icache_range)
-SYM_FUNC_END(__flush_cache_user_range)
+SYM_FUNC_END(caches_clean_inval_user_pou)
/*
- * invalidate_icache_range(start,end)
+ * icache_inval_pou(start,end)
*
* Ensure that the I cache is invalid within specified region.
*
* - start - virtual start address of region
* - end - virtual end address of region
*/
-SYM_FUNC_START(invalidate_icache_range)
+SYM_FUNC_START(icache_inval_pou)
alternative_if ARM64_HAS_CACHE_DIC
- mov x0, xzr
isb
ret
alternative_else_nop_endif
- uaccess_ttbr0_enable x2, x3, x4
-
- invalidate_icache_by_line x0, x1, x2, x3, 2f
- mov x0, xzr
-1:
- uaccess_ttbr0_disable x1, x2
+ invalidate_icache_by_line x0, x1, x2, x3
ret
-2:
- mov x0, #-EFAULT
- b 1b
-SYM_FUNC_END(invalidate_icache_range)
+SYM_FUNC_END(icache_inval_pou)
/*
- * __flush_dcache_area(kaddr, size)
+ * dcache_clean_inval_poc(start, end)
*
- * Ensure that any D-cache lines for the interval [kaddr, kaddr+size)
+ * Ensure that any D-cache lines for the interval [start, end)
* are cleaned and invalidated to the PoC.
*
- * - kaddr - kernel address
- * - size - size in question
+ * - start - virtual start address of region
+ * - end - virtual end address of region
*/
-SYM_FUNC_START_PI(__flush_dcache_area)
+SYM_FUNC_START_PI(dcache_clean_inval_poc)
dcache_by_line_op civac, sy, x0, x1, x2, x3
ret
-SYM_FUNC_END_PI(__flush_dcache_area)
+SYM_FUNC_END_PI(dcache_clean_inval_poc)
/*
- * __clean_dcache_area_pou(kaddr, size)
+ * dcache_clean_pou(start, end)
*
- * Ensure that any D-cache lines for the interval [kaddr, kaddr+size)
+ * Ensure that any D-cache lines for the interval [start, end)
* are cleaned to the PoU.
*
- * - kaddr - kernel address
- * - size - size in question
+ * - start - virtual start address of region
+ * - end - virtual end address of region
*/
-SYM_FUNC_START(__clean_dcache_area_pou)
+SYM_FUNC_START(dcache_clean_pou)
alternative_if ARM64_HAS_CACHE_IDC
dsb ishst
ret
alternative_else_nop_endif
dcache_by_line_op cvau, ish, x0, x1, x2, x3
ret
-SYM_FUNC_END(__clean_dcache_area_pou)
+SYM_FUNC_END(dcache_clean_pou)
/*
- * __inval_dcache_area(kaddr, size)
+ * dcache_inval_poc(start, end)
*
- * Ensure that any D-cache lines for the interval [kaddr, kaddr+size)
+ * Ensure that any D-cache lines for the interval [start, end)
* are invalidated. Any partial lines at the ends of the interval are
* also cleaned to PoC to prevent data loss.
*
- * - kaddr - kernel address
- * - size - size in question
+ * - start - kernel start address of region
+ * - end - kernel end address of region
*/
SYM_FUNC_START_LOCAL(__dma_inv_area)
-SYM_FUNC_START_PI(__inval_dcache_area)
+SYM_FUNC_START_PI(dcache_inval_poc)
/* FALLTHROUGH */
/*
- * __dma_inv_area(start, size)
+ * __dma_inv_area(start, end)
* - start - virtual start address of region
- * - size - size in question
+ * - end - virtual end address of region
*/
- add x1, x1, x0
dcache_line_size x2, x3
sub x3, x2, #1
tst x1, x3 // end cache line aligned?
@@ -165,48 +166,48 @@ SYM_FUNC_START_PI(__inval_dcache_area)
b.lo 2b
dsb sy
ret
-SYM_FUNC_END_PI(__inval_dcache_area)
+SYM_FUNC_END_PI(dcache_inval_poc)
SYM_FUNC_END(__dma_inv_area)
/*
- * __clean_dcache_area_poc(kaddr, size)
+ * dcache_clean_poc(start, end)
*
- * Ensure that any D-cache lines for the interval [kaddr, kaddr+size)
+ * Ensure that any D-cache lines for the interval [start, end)
* are cleaned to the PoC.
*
- * - kaddr - kernel address
- * - size - size in question
+ * - start - virtual start address of region
+ * - end - virtual end address of region
*/
SYM_FUNC_START_LOCAL(__dma_clean_area)
-SYM_FUNC_START_PI(__clean_dcache_area_poc)
+SYM_FUNC_START_PI(dcache_clean_poc)
/* FALLTHROUGH */
/*
- * __dma_clean_area(start, size)
+ * __dma_clean_area(start, end)
* - start - virtual start address of region
- * - size - size in question
+ * - end - virtual end address of region
*/
dcache_by_line_op cvac, sy, x0, x1, x2, x3
ret
-SYM_FUNC_END_PI(__clean_dcache_area_poc)
+SYM_FUNC_END_PI(dcache_clean_poc)
SYM_FUNC_END(__dma_clean_area)
/*
- * __clean_dcache_area_pop(kaddr, size)
+ * dcache_clean_pop(start, end)
*
- * Ensure that any D-cache lines for the interval [kaddr, kaddr+size)
+ * Ensure that any D-cache lines for the interval [start, end)
* are cleaned to the PoP.
*
- * - kaddr - kernel address
- * - size - size in question
+ * - start - virtual start address of region
+ * - end - virtual end address of region
*/
-SYM_FUNC_START_PI(__clean_dcache_area_pop)
+SYM_FUNC_START_PI(dcache_clean_pop)
alternative_if_not ARM64_HAS_DCPOP
- b __clean_dcache_area_poc
+ b dcache_clean_poc
alternative_else_nop_endif
dcache_by_line_op cvap, sy, x0, x1, x2, x3
ret
-SYM_FUNC_END_PI(__clean_dcache_area_pop)
+SYM_FUNC_END_PI(dcache_clean_pop)
/*
* __dma_flush_area(start, size)
@@ -217,6 +218,7 @@ SYM_FUNC_END_PI(__clean_dcache_area_pop)
* - size - size in question
*/
SYM_FUNC_START_PI(__dma_flush_area)
+ add x1, x0, x1
dcache_by_line_op civac, sy, x0, x1, x2, x3
ret
SYM_FUNC_END_PI(__dma_flush_area)
@@ -228,6 +230,7 @@ SYM_FUNC_END_PI(__dma_flush_area)
* - dir - DMA direction
*/
SYM_FUNC_START_PI(__dma_map_area)
+ add x1, x0, x1
cmp w2, #DMA_FROM_DEVICE
b.eq __dma_inv_area
b __dma_clean_area
@@ -240,6 +243,7 @@ SYM_FUNC_END_PI(__dma_map_area)
* - dir - DMA direction
*/
SYM_FUNC_START_PI(__dma_unmap_area)
+ add x1, x0, x1
cmp w2, #DMA_TO_DEVICE
b.ne __dma_inv_area
ret
diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c
index 001737a8f309..cd72576ae2b7 100644
--- a/arch/arm64/mm/context.c
+++ b/arch/arm64/mm/context.c
@@ -402,14 +402,12 @@ static int asids_init(void)
{
asid_bits = get_cpu_asid_bits();
atomic64_set(&asid_generation, ASID_FIRST_VERSION);
- asid_map = kcalloc(BITS_TO_LONGS(NUM_USER_ASIDS), sizeof(*asid_map),
- GFP_KERNEL);
+ asid_map = bitmap_zalloc(NUM_USER_ASIDS, GFP_KERNEL);
if (!asid_map)
panic("Failed to allocate bitmap for %lu ASIDs\n",
NUM_USER_ASIDS);
- pinned_asid_map = kcalloc(BITS_TO_LONGS(NUM_USER_ASIDS),
- sizeof(*pinned_asid_map), GFP_KERNEL);
+ pinned_asid_map = bitmap_zalloc(NUM_USER_ASIDS, GFP_KERNEL);
nr_pinned_asids = 0;
/*
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index 4bf1dd3eb041..6719f9efea09 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -50,7 +50,7 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
dev->dma_coherent = coherent;
if (iommu)
- iommu_setup_dma_ops(dev, dma_base, size);
+ iommu_setup_dma_ops(dev, dma_base, dma_base + size - 1);
#ifdef CONFIG_XEN
if (xen_swiotlb_detect())
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 871c82ab0a30..349c488765ca 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -99,6 +99,8 @@ static void mem_abort_decode(unsigned int esr)
pr_alert(" EA = %lu, S1PTW = %lu\n",
(esr & ESR_ELx_EA) >> ESR_ELx_EA_SHIFT,
(esr & ESR_ELx_S1PTW) >> ESR_ELx_S1PTW_SHIFT);
+ pr_alert(" FSC = 0x%02x: %s\n", (esr & ESR_ELx_FSC),
+ esr_to_fault_info(esr)->name);
if (esr_is_data_abort(esr))
data_abort_decode(esr);
@@ -232,13 +234,17 @@ static bool is_el1_instruction_abort(unsigned int esr)
return ESR_ELx_EC(esr) == ESR_ELx_EC_IABT_CUR;
}
+static bool is_el1_data_abort(unsigned int esr)
+{
+ return ESR_ELx_EC(esr) == ESR_ELx_EC_DABT_CUR;
+}
+
static inline bool is_el1_permission_fault(unsigned long addr, unsigned int esr,
struct pt_regs *regs)
{
- unsigned int ec = ESR_ELx_EC(esr);
unsigned int fsc_type = esr & ESR_ELx_FSC_TYPE;
- if (ec != ESR_ELx_EC_DABT_CUR && ec != ESR_ELx_EC_IABT_CUR)
+ if (!is_el1_data_abort(esr) && !is_el1_instruction_abort(esr))
return false;
if (fsc_type == ESR_ELx_FSC_PERM)
@@ -258,7 +264,7 @@ static bool __kprobes is_spurious_el1_translation_fault(unsigned long addr,
unsigned long flags;
u64 par, dfsc;
- if (ESR_ELx_EC(esr) != ESR_ELx_EC_DABT_CUR ||
+ if (!is_el1_data_abort(esr) ||
(esr & ESR_ELx_FSC_TYPE) != ESR_ELx_FSC_FAULT)
return false;
@@ -346,10 +352,9 @@ static void do_tag_recovery(unsigned long addr, unsigned int esr,
static bool is_el1_mte_sync_tag_check_fault(unsigned int esr)
{
- unsigned int ec = ESR_ELx_EC(esr);
unsigned int fsc = esr & ESR_ELx_FSC;
- if (ec != ESR_ELx_EC_DABT_CUR)
+ if (!is_el1_data_abort(esr))
return false;
if (fsc == ESR_ELx_FSC_MTE)
@@ -504,7 +509,7 @@ static vm_fault_t __do_page_fault(struct mm_struct *mm, unsigned long addr,
*/
if (!(vma->vm_flags & vm_flags))
return VM_FAULT_BADACCESS;
- return handle_mm_fault(vma, addr & PAGE_MASK, mm_flags, regs);
+ return handle_mm_fault(vma, addr, mm_flags, regs);
}
static bool is_el0_instruction_abort(unsigned int esr)
@@ -836,13 +841,6 @@ void do_mem_abort(unsigned long far, unsigned int esr, struct pt_regs *regs)
}
NOKPROBE_SYMBOL(do_mem_abort);
-void do_el0_irq_bp_hardening(void)
-{
- /* PC has already been checked in entry.S */
- arm64_apply_bp_hardening();
-}
-NOKPROBE_SYMBOL(do_el0_irq_bp_hardening);
-
void do_sp_pc_abort(unsigned long addr, unsigned int esr, struct pt_regs *regs)
{
arm64_notify_die("SP/PC alignment exception", regs, SIGBUS, BUS_ADRALN,
@@ -921,3 +919,29 @@ void do_debug_exception(unsigned long addr_if_watchpoint, unsigned int esr,
debug_exception_exit(regs);
}
NOKPROBE_SYMBOL(do_debug_exception);
+
+/*
+ * Used during anonymous page fault handling.
+ */
+struct page *alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma,
+ unsigned long vaddr)
+{
+ gfp_t flags = GFP_HIGHUSER_MOVABLE | __GFP_ZERO;
+
+ /*
+ * If the page is mapped with PROT_MTE, initialise the tags at the
+ * point of allocation and page zeroing as this is usually faster than
+ * separate DC ZVA and STGM.
+ */
+ if (vma->vm_flags & VM_MTE)
+ flags |= __GFP_ZEROTAGS;
+
+ return alloc_page_vma(flags, vma, vaddr);
+}
+
+void tag_clear_highpage(struct page *page)
+{
+ mte_zero_clear_page_tags(page_address(page));
+ page_kasan_tag_reset(page);
+ set_bit(PG_mte_tagged, &page->flags);
+}
diff --git a/arch/arm64/mm/flush.c b/arch/arm64/mm/flush.c
index ac485163a4a7..2aaf950b906c 100644
--- a/arch/arm64/mm/flush.c
+++ b/arch/arm64/mm/flush.c
@@ -14,28 +14,25 @@
#include <asm/cache.h>
#include <asm/tlbflush.h>
-void sync_icache_aliases(void *kaddr, unsigned long len)
+void sync_icache_aliases(unsigned long start, unsigned long end)
{
- unsigned long addr = (unsigned long)kaddr;
-
if (icache_is_aliasing()) {
- __clean_dcache_area_pou(kaddr, len);
- __flush_icache_all();
+ dcache_clean_pou(start, end);
+ icache_inval_all_pou();
} else {
/*
* Don't issue kick_all_cpus_sync() after I-cache invalidation
* for user mappings.
*/
- __flush_icache_range(addr, addr + len);
+ caches_clean_inval_pou(start, end);
}
}
-static void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
- unsigned long uaddr, void *kaddr,
- unsigned long len)
+static void flush_ptrace_access(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end)
{
if (vma->vm_flags & VM_EXEC)
- sync_icache_aliases(kaddr, len);
+ sync_icache_aliases(start, end);
}
/*
@@ -48,15 +45,19 @@ void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
unsigned long len)
{
memcpy(dst, src, len);
- flush_ptrace_access(vma, page, uaddr, dst, len);
+ flush_ptrace_access(vma, (unsigned long)dst, (unsigned long)dst + len);
}
void __sync_icache_dcache(pte_t pte)
{
struct page *page = pte_page(pte);
- if (!test_and_set_bit(PG_dcache_clean, &page->flags))
- sync_icache_aliases(page_address(page), page_size(page));
+ if (!test_bit(PG_dcache_clean, &page->flags)) {
+ sync_icache_aliases((unsigned long)page_address(page),
+ (unsigned long)page_address(page) +
+ page_size(page));
+ set_bit(PG_dcache_clean, &page->flags);
+ }
}
EXPORT_SYMBOL_GPL(__sync_icache_dcache);
@@ -75,20 +76,20 @@ EXPORT_SYMBOL(flush_dcache_page);
/*
* Additional functions defined in assembly.
*/
-EXPORT_SYMBOL(__flush_icache_range);
+EXPORT_SYMBOL(caches_clean_inval_pou);
#ifdef CONFIG_ARCH_HAS_PMEM_API
void arch_wb_cache_pmem(void *addr, size_t size)
{
/* Ensure order against any prior non-cacheable writes */
dmb(osh);
- __clean_dcache_area_pop(addr, size);
+ dcache_clean_pop((unsigned long)addr, (unsigned long)addr + size);
}
EXPORT_SYMBOL_GPL(arch_wb_cache_pmem);
void arch_invalidate_pmem(void *addr, size_t size)
{
- __inval_dcache_area(addr, size);
+ dcache_inval_poc((unsigned long)addr, (unsigned long)addr + size);
}
EXPORT_SYMBOL_GPL(arch_invalidate_pmem);
#endif
diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c
index 58987a98e179..23505fc35324 100644
--- a/arch/arm64/mm/hugetlbpage.c
+++ b/arch/arm64/mm/hugetlbpage.c
@@ -339,10 +339,9 @@ pte_t *huge_pte_offset(struct mm_struct *mm,
return NULL;
}
-pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
- struct page *page, int writable)
+pte_t arch_make_huge_pte(pte_t entry, unsigned int shift, vm_flags_t flags)
{
- size_t pagesize = huge_page_size(hstate_vma(vma));
+ size_t pagesize = 1UL << shift;
if (pagesize == CONT_PTE_SIZE) {
entry = pte_mkcont(entry);
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 16a2b2b1c54d..8490ed2917ff 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -43,6 +43,7 @@
#include <linux/sizes.h>
#include <asm/tlb.h>
#include <asm/alternative.h>
+#include <asm/xen/swiotlb-xen.h>
/*
* We need to be able to catch inadvertent references to memstart_addr
@@ -218,42 +219,17 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max)
free_area_init(max_zone_pfns);
}
-int pfn_valid(unsigned long pfn)
+int pfn_is_map_memory(unsigned long pfn)
{
phys_addr_t addr = PFN_PHYS(pfn);
- struct mem_section *ms;
- /*
- * Ensure the upper PAGE_SHIFT bits are clear in the
- * pfn. Else it might lead to false positives when
- * some of the upper bits are set, but the lower bits
- * match a valid pfn.
- */
+ /* avoid false positives for bogus PFNs, see comment in pfn_valid() */
if (PHYS_PFN(addr) != pfn)
return 0;
- if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
- return 0;
-
- ms = __pfn_to_section(pfn);
- if (!valid_section(ms))
- return 0;
-
- /*
- * ZONE_DEVICE memory does not have the memblock entries.
- * memblock_is_map_memory() check for ZONE_DEVICE based
- * addresses will always fail. Even the normal hotplugged
- * memory will never have MEMBLOCK_NOMAP flag set in their
- * memblock entries. Skip memblock search for all non early
- * memory sections covering all of hotplug memory including
- * both normal and ZONE_DEVICE based.
- */
- if (!early_section(ms))
- return pfn_section_valid(ms, pfn);
-
return memblock_is_map_memory(addr);
}
-EXPORT_SYMBOL(pfn_valid);
+EXPORT_SYMBOL(pfn_is_map_memory);
static phys_addr_t memory_limit = PHYS_ADDR_MAX;
@@ -482,7 +458,7 @@ void __init mem_init(void)
if (swiotlb_force == SWIOTLB_FORCE ||
max_pfn > PFN_DOWN(arm64_dma_phys_limit))
swiotlb_init(1);
- else
+ else if (!xen_swiotlb_detect())
swiotlb_force = SWIOTLB_NO_FORCE;
set_max_mapnr(max_pfn - PHYS_PFN_OFFSET);
@@ -498,6 +474,13 @@ void __init mem_init(void)
BUILD_BUG_ON(TASK_SIZE_32 > DEFAULT_MAP_WINDOW_64);
#endif
+ /*
+ * Selected page table levels should match when derived from
+ * scratch using the virtual address range and page size.
+ */
+ BUILD_BUG_ON(ARM64_HW_PGTABLE_LEVELS(CONFIG_ARM64_VA_BITS) !=
+ CONFIG_PGTABLE_LEVELS);
+
if (PAGE_SIZE >= 16384 && get_num_physpages() <= 128) {
extern int sysctl_overcommit_memory;
/*
diff --git a/arch/arm64/mm/ioremap.c b/arch/arm64/mm/ioremap.c
index b5e83c46b23e..b7c81dacabf0 100644
--- a/arch/arm64/mm/ioremap.c
+++ b/arch/arm64/mm/ioremap.c
@@ -43,7 +43,7 @@ static void __iomem *__ioremap_caller(phys_addr_t phys_addr, size_t size,
/*
* Don't allow RAM to be mapped.
*/
- if (WARN_ON(pfn_valid(__phys_to_pfn(phys_addr))))
+ if (WARN_ON(pfn_is_map_memory(__phys_to_pfn(phys_addr))))
return NULL;
area = get_vm_area_caller(size, VM_IOREMAP, caller);
@@ -84,7 +84,7 @@ EXPORT_SYMBOL(iounmap);
void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size)
{
/* For normal memory we already have a cacheable mapping. */
- if (pfn_valid(__phys_to_pfn(phys_addr)))
+ if (pfn_is_map_memory(__phys_to_pfn(phys_addr)))
return (void __iomem *)__phys_to_virt(phys_addr);
return __ioremap_caller(phys_addr, size, __pgprot(PROT_NORMAL),
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 6dd9369e3ea0..595fde9a47dd 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -82,7 +82,7 @@ void set_swapper_pgd(pgd_t *pgdp, pgd_t pgd)
pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
unsigned long size, pgprot_t vma_prot)
{
- if (!pfn_valid(pfn))
+ if (!pfn_is_map_memory(pfn))
return pgprot_noncached(vma_prot);
else if (file->f_flags & O_SYNC)
return pgprot_writecombine(vma_prot);
@@ -228,7 +228,7 @@ static void init_pmd(pud_t *pudp, unsigned long addr, unsigned long end,
next = pmd_addr_end(addr, end);
/* try section mapping first */
- if (((addr | next | phys) & ~SECTION_MASK) == 0 &&
+ if (((addr | next | phys) & ~PMD_MASK) == 0 &&
(flags & NO_BLOCK_MAPPINGS) == 0) {
pmd_set_huge(pmdp, phys, prot);
@@ -515,7 +515,8 @@ static void __init map_mem(pgd_t *pgdp)
*/
BUILD_BUG_ON(pgd_index(direct_map_end - 1) == pgd_index(direct_map_end));
- if (rodata_full || crash_mem_map || debug_pagealloc_enabled())
+ if (rodata_full || crash_mem_map || debug_pagealloc_enabled() ||
+ IS_ENABLED(CONFIG_KFENCE))
flags |= NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
/*
@@ -1113,14 +1114,14 @@ static void free_empty_tables(unsigned long addr, unsigned long end,
}
#endif
-#if !ARM64_SWAPPER_USES_SECTION_MAPS
+#if !ARM64_KERNEL_USES_PMD_MAPS
int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
struct vmem_altmap *altmap)
{
WARN_ON((start < VMEMMAP_START) || (end > VMEMMAP_END));
return vmemmap_populate_basepages(start, end, node, altmap);
}
-#else /* !ARM64_SWAPPER_USES_SECTION_MAPS */
+#else /* !ARM64_KERNEL_USES_PMD_MAPS */
int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
struct vmem_altmap *altmap)
{
@@ -1165,17 +1166,18 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
return 0;
}
-#endif /* !ARM64_SWAPPER_USES_SECTION_MAPS */
+#endif /* !ARM64_KERNEL_USES_PMD_MAPS */
+
+#ifdef CONFIG_MEMORY_HOTPLUG
void vmemmap_free(unsigned long start, unsigned long end,
struct vmem_altmap *altmap)
{
-#ifdef CONFIG_MEMORY_HOTPLUG
WARN_ON((start < VMEMMAP_START) || (end > VMEMMAP_END));
unmap_hotplug_range(start, end, true, altmap);
free_empty_tables(start, end, VMEMMAP_START, VMEMMAP_END);
-#endif
}
+#endif /* CONFIG_MEMORY_HOTPLUG */
static inline pud_t *fixmap_pud(unsigned long addr)
{
@@ -1337,6 +1339,7 @@ void *__init fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot)
return dt_virt;
}
+#if CONFIG_PGTABLE_LEVELS > 3
int pud_set_huge(pud_t *pudp, phys_addr_t phys, pgprot_t prot)
{
pud_t new_pud = pfn_pud(__phys_to_pfn(phys), mk_pud_sect_prot(prot));
@@ -1351,6 +1354,16 @@ int pud_set_huge(pud_t *pudp, phys_addr_t phys, pgprot_t prot)
return 1;
}
+int pud_clear_huge(pud_t *pudp)
+{
+ if (!pud_sect(READ_ONCE(*pudp)))
+ return 0;
+ pud_clear(pudp);
+ return 1;
+}
+#endif
+
+#if CONFIG_PGTABLE_LEVELS > 2
int pmd_set_huge(pmd_t *pmdp, phys_addr_t phys, pgprot_t prot)
{
pmd_t new_pmd = pfn_pmd(__phys_to_pfn(phys), mk_pmd_sect_prot(prot));
@@ -1365,14 +1378,6 @@ int pmd_set_huge(pmd_t *pmdp, phys_addr_t phys, pgprot_t prot)
return 1;
}
-int pud_clear_huge(pud_t *pudp)
-{
- if (!pud_sect(READ_ONCE(*pudp)))
- return 0;
- pud_clear(pudp);
- return 1;
-}
-
int pmd_clear_huge(pmd_t *pmdp)
{
if (!pmd_sect(READ_ONCE(*pmdp)))
@@ -1380,6 +1385,7 @@ int pmd_clear_huge(pmd_t *pmdp)
pmd_clear(pmdp);
return 1;
}
+#endif
int pmd_free_pte_page(pmd_t *pmdp, unsigned long addr)
{
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index 0a48191534ff..35936c5ae1ce 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -46,9 +46,13 @@
#endif
#ifdef CONFIG_KASAN_HW_TAGS
-#define TCR_KASAN_HW_FLAGS SYS_TCR_EL1_TCMA1 | TCR_TBI1 | TCR_TBID1
+#define TCR_MTE_FLAGS SYS_TCR_EL1_TCMA1 | TCR_TBI1 | TCR_TBID1
#else
-#define TCR_KASAN_HW_FLAGS 0
+/*
+ * The mte_zero_clear_page_tags() implementation uses DC GZVA, which relies on
+ * TBI being enabled at EL1.
+ */
+#define TCR_MTE_FLAGS TCR_TBI1 | TCR_TBID1
#endif
/*
@@ -58,10 +62,8 @@
#define MAIR_EL1_SET \
(MAIR_ATTRIDX(MAIR_ATTR_DEVICE_nGnRnE, MT_DEVICE_nGnRnE) | \
MAIR_ATTRIDX(MAIR_ATTR_DEVICE_nGnRE, MT_DEVICE_nGnRE) | \
- MAIR_ATTRIDX(MAIR_ATTR_DEVICE_GRE, MT_DEVICE_GRE) | \
MAIR_ATTRIDX(MAIR_ATTR_NORMAL_NC, MT_NORMAL_NC) | \
MAIR_ATTRIDX(MAIR_ATTR_NORMAL, MT_NORMAL) | \
- MAIR_ATTRIDX(MAIR_ATTR_NORMAL_WT, MT_NORMAL_WT) | \
MAIR_ATTRIDX(MAIR_ATTR_NORMAL, MT_NORMAL_TAGGED))
#ifdef CONFIG_CPU_PM
@@ -83,11 +85,7 @@ SYM_FUNC_START(cpu_do_suspend)
mrs x9, mdscr_el1
mrs x10, oslsr_el1
mrs x11, sctlr_el1
-alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
- mrs x12, tpidr_el1
-alternative_else
- mrs x12, tpidr_el2
-alternative_endif
+ get_this_cpu_offset x12
mrs x13, sp_el0
stp x2, x3, [x0]
stp x4, x5, [x0, #16]
@@ -145,11 +143,7 @@ SYM_FUNC_START(cpu_do_resume)
msr mdscr_el1, x10
msr sctlr_el1, x12
-alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
- msr tpidr_el1, x13
-alternative_else
- msr tpidr_el2, x13
-alternative_endif
+ set_this_cpu_offset x13
msr sp_el0, x14
/*
* Restore oslsr_el1 by writing oslar_el1
@@ -447,12 +441,24 @@ SYM_FUNC_START(__cpu_setup)
mov x10, #(SYS_GCR_EL1_RRND | SYS_GCR_EL1_EXCL_MASK)
msr_s SYS_GCR_EL1, x10
+ /*
+ * If GCR_EL1.RRND=1 is implemented the same way as RRND=0, then
+ * RGSR_EL1.SEED must be non-zero for IRG to produce
+ * pseudorandom numbers. As RGSR_EL1 is UNKNOWN out of reset, we
+ * must initialize it.
+ */
+ mrs x10, CNTVCT_EL0
+ ands x10, x10, #SYS_RGSR_EL1_SEED_MASK
+ csinc x10, x10, xzr, ne
+ lsl x10, x10, #SYS_RGSR_EL1_SEED_SHIFT
+ msr_s SYS_RGSR_EL1, x10
+
/* clear any pending tag check faults in TFSR*_EL1 */
msr_s SYS_TFSR_EL1, xzr
msr_s SYS_TFSRE0_EL1, xzr
/* set the TCR_EL1 bits */
- mov_q x10, TCR_KASAN_HW_FLAGS
+ mov_q x10, TCR_MTE_FLAGS
orr tcr, tcr, x10
1:
#endif
diff --git a/arch/arm64/mm/ptdump.c b/arch/arm64/mm/ptdump.c
index a1937dfff31c..1c403536c9bb 100644
--- a/arch/arm64/mm/ptdump.c
+++ b/arch/arm64/mm/ptdump.c
@@ -159,10 +159,6 @@ static const struct prot_bits pte_bits[] = {
.set = "DEVICE/nGnRE",
}, {
.mask = PTE_ATTRINDX_MASK,
- .val = PTE_ATTRINDX(MT_DEVICE_GRE),
- .set = "DEVICE/GRE",
- }, {
- .mask = PTE_ATTRINDX_MASK,
.val = PTE_ATTRINDX(MT_NORMAL_NC),
.set = "MEM/NORMAL-NC",
}, {
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
index f7b194878a99..dccf98a37283 100644
--- a/arch/arm64/net/bpf_jit_comp.c
+++ b/arch/arm64/net/bpf_jit_comp.c
@@ -16,6 +16,7 @@
#include <asm/byteorder.h>
#include <asm/cacheflush.h>
#include <asm/debug-monitors.h>
+#include <asm/insn.h>
#include <asm/set_memory.h>
#include "bpf_jit.h"
@@ -178,9 +179,6 @@ static bool is_addsub_imm(u32 imm)
return !(imm & ~0xfff) || !(imm & ~0xfff000);
}
-/* Stack must be multiples of 16B */
-#define STACK_ALIGN(sz) (((sz) + 15) & ~15)
-
/* Tail call offset to jump into */
#if IS_ENABLED(CONFIG_ARM64_BTI_KERNEL)
#define PROLOGUE_OFFSET 8
@@ -255,7 +253,8 @@ static int build_prologue(struct jit_ctx *ctx, bool ebpf_from_cbpf)
emit(A64_BTI_J, ctx);
}
- ctx->stack_size = STACK_ALIGN(prog->aux->stack_depth);
+ /* Stack must be multiples of 16B */
+ ctx->stack_size = round_up(prog->aux->stack_depth, 16);
/* Set up function call stack */
emit(A64_SUB_I(1, A64_SP, A64_SP, ctx->stack_size), ctx);
@@ -487,17 +486,12 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
break;
case BPF_ALU | BPF_DIV | BPF_X:
case BPF_ALU64 | BPF_DIV | BPF_X:
+ emit(A64_UDIV(is64, dst, dst, src), ctx);
+ break;
case BPF_ALU | BPF_MOD | BPF_X:
case BPF_ALU64 | BPF_MOD | BPF_X:
- switch (BPF_OP(code)) {
- case BPF_DIV:
- emit(A64_UDIV(is64, dst, dst, src), ctx);
- break;
- case BPF_MOD:
- emit(A64_UDIV(is64, tmp, dst, src), ctx);
- emit(A64_MSUB(is64, dst, dst, tmp, src), ctx);
- break;
- }
+ emit(A64_UDIV(is64, tmp, dst, src), ctx);
+ emit(A64_MSUB(is64, dst, dst, tmp, src), ctx);
break;
case BPF_ALU | BPF_LSH | BPF_X:
case BPF_ALU64 | BPF_LSH | BPF_X:
diff --git a/arch/arm64/tools/Makefile b/arch/arm64/tools/Makefile
new file mode 100644
index 000000000000..932b4fe5c768
--- /dev/null
+++ b/arch/arm64/tools/Makefile
@@ -0,0 +1,22 @@
+# SPDX-License-Identifier: GPL-2.0
+
+gen := arch/$(ARCH)/include/generated
+kapi := $(gen)/asm
+
+kapi-hdrs-y := $(kapi)/cpucaps.h
+
+targets += $(addprefix ../../../,$(gen-y) $(kapi-hdrs-y))
+
+PHONY += kapi
+
+kapi: $(kapi-hdrs-y) $(gen-y)
+
+# Create output directory if not already present
+_dummy := $(shell [ -d '$(kapi)' ] || mkdir -p '$(kapi)')
+
+quiet_cmd_gen_cpucaps = GEN $@
+ cmd_gen_cpucaps = mkdir -p $(dir $@) && \
+ $(AWK) -f $(filter-out $(PHONY),$^) > $@
+
+$(kapi)/cpucaps.h: $(src)/gen-cpucaps.awk $(src)/cpucaps FORCE
+ $(call if_changed,gen_cpucaps)
diff --git a/arch/arm64/tools/cpucaps b/arch/arm64/tools/cpucaps
new file mode 100644
index 000000000000..49305c2e6dfd
--- /dev/null
+++ b/arch/arm64/tools/cpucaps
@@ -0,0 +1,66 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Internal CPU capabilities constants, keep this list sorted
+
+BTI
+# Unreliable: use system_supports_32bit_el0() instead.
+HAS_32BIT_EL0_DO_NOT_USE
+HAS_32BIT_EL1
+HAS_ADDRESS_AUTH
+HAS_ADDRESS_AUTH_ARCH
+HAS_ADDRESS_AUTH_IMP_DEF
+HAS_AMU_EXTN
+HAS_ARMv8_4_TTL
+HAS_CACHE_DIC
+HAS_CACHE_IDC
+HAS_CNP
+HAS_CRC32
+HAS_DCPODP
+HAS_DCPOP
+HAS_E0PD
+HAS_EPAN
+HAS_GENERIC_AUTH
+HAS_GENERIC_AUTH_ARCH
+HAS_GENERIC_AUTH_IMP_DEF
+HAS_IRQ_PRIO_MASKING
+HAS_LDAPR
+HAS_LSE_ATOMICS
+HAS_NO_FPSIMD
+HAS_NO_HW_PREFETCH
+HAS_PAN
+HAS_RAS_EXTN
+HAS_RNG
+HAS_SB
+HAS_STAGE2_FWB
+HAS_SYSREG_GIC_CPUIF
+HAS_TLB_RANGE
+HAS_VIRT_HOST_EXTN
+HW_DBM
+KVM_PROTECTED_MODE
+MISMATCHED_CACHE_TYPE
+MTE
+SPECTRE_V2
+SPECTRE_V3A
+SPECTRE_V4
+SSBS
+SVE
+UNMAP_KERNEL_AT_EL0
+WORKAROUND_834220
+WORKAROUND_843419
+WORKAROUND_845719
+WORKAROUND_858921
+WORKAROUND_1418040
+WORKAROUND_1463225
+WORKAROUND_1508412
+WORKAROUND_1542419
+WORKAROUND_CAVIUM_23154
+WORKAROUND_CAVIUM_27456
+WORKAROUND_CAVIUM_30115
+WORKAROUND_CAVIUM_TX2_219_PRFM
+WORKAROUND_CAVIUM_TX2_219_TVM
+WORKAROUND_CLEAN_CACHE
+WORKAROUND_DEVICE_LOAD_ACQUIRE
+WORKAROUND_NVIDIA_CARMEL_CNP
+WORKAROUND_QCOM_FALKOR_E1003
+WORKAROUND_REPEAT_TLBI
+WORKAROUND_SPECULATIVE_AT
diff --git a/arch/arm64/tools/gen-cpucaps.awk b/arch/arm64/tools/gen-cpucaps.awk
new file mode 100755
index 000000000000..00c9e72a200a
--- /dev/null
+++ b/arch/arm64/tools/gen-cpucaps.awk
@@ -0,0 +1,40 @@
+#!/bin/awk -f
+# SPDX-License-Identifier: GPL-2.0
+# gen-cpucaps.awk: arm64 cpucaps header generator
+#
+# Usage: awk -f gen-cpucaps.awk cpucaps.txt
+
+# Log an error and terminate
+function fatal(msg) {
+ print "Error at line " NR ": " msg > "/dev/stderr"
+ exit 1
+}
+
+# skip blank lines and comment lines
+/^$/ { next }
+/^#/ { next }
+
+BEGIN {
+ print "#ifndef __ASM_CPUCAPS_H"
+ print "#define __ASM_CPUCAPS_H"
+ print ""
+ print "/* Generated file - do not edit */"
+ cap_num = 0
+ print ""
+}
+
+/^[vA-Z0-9_]+$/ {
+ printf("#define ARM64_%-30s\t%d\n", $0, cap_num++)
+ next
+}
+
+END {
+ printf("#define ARM64_NCAPS\t\t\t\t%d\n", cap_num)
+ print ""
+ print "#endif /* __ASM_CPUCAPS_H */"
+}
+
+# Any lines not handled by previous rules are unexpected
+{
+ fatal("unhandled statement")
+}
diff --git a/arch/csky/Kconfig b/arch/csky/Kconfig
index 8de5b987edb9..2716f6395ba7 100644
--- a/arch/csky/Kconfig
+++ b/arch/csky/Kconfig
@@ -76,8 +76,6 @@ config CSKY
select PERF_USE_VMALLOC if CPU_CK610
select RTC_LIB
select TIMER_OF
- select USB_ARCH_HAS_EHCI
- select USB_ARCH_HAS_OHCI
select GENERIC_PCI_IOMAP
select HAVE_PCI
select PCI_DOMAINS_GENERIC if PCI
@@ -245,7 +243,6 @@ endchoice
menuconfig HAVE_TCM
bool "Tightly-Coupled/Sram Memory"
- select GENERIC_ALLOCATOR
help
The implementation are not only used by TCM (Tightly-Coupled Meory)
but also used by sram on SOC bus. It follow existed linux tcm
diff --git a/arch/csky/include/asm/cmpxchg.h b/arch/csky/include/asm/cmpxchg.h
index dabc8e46ce7b..d1bef11f8dc9 100644
--- a/arch/csky/include/asm/cmpxchg.h
+++ b/arch/csky/include/asm/cmpxchg.h
@@ -31,7 +31,7 @@ extern void __bad_xchg(void);
__ret; \
})
-#define xchg_relaxed(ptr, x) \
+#define arch_xchg_relaxed(ptr, x) \
(__xchg_relaxed((x), (ptr), sizeof(*(ptr))))
#define __cmpxchg_relaxed(ptr, old, new, size) \
@@ -61,14 +61,14 @@ extern void __bad_xchg(void);
__ret; \
})
-#define cmpxchg_relaxed(ptr, o, n) \
+#define arch_cmpxchg_relaxed(ptr, o, n) \
(__cmpxchg_relaxed((ptr), (o), (n), sizeof(*(ptr))))
-#define cmpxchg(ptr, o, n) \
+#define arch_cmpxchg(ptr, o, n) \
({ \
__typeof__(*(ptr)) __ret; \
__smp_release_fence(); \
- __ret = cmpxchg_relaxed(ptr, o, n); \
+ __ret = arch_cmpxchg_relaxed(ptr, o, n); \
__smp_acquire_fence(); \
__ret; \
})
diff --git a/arch/csky/include/asm/pgalloc.h b/arch/csky/include/asm/pgalloc.h
index cd211aabbefd..bbbd0698b397 100644
--- a/arch/csky/include/asm/pgalloc.h
+++ b/arch/csky/include/asm/pgalloc.h
@@ -22,8 +22,6 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
set_pmd(pmd, __pmd(__pa(page_address(pte))));
}
-#define pmd_pgtable(pmd) pmd_page(pmd)
-
extern void pgd_init(unsigned long *p);
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
diff --git a/arch/csky/include/asm/pgtable.h b/arch/csky/include/asm/pgtable.h
index 0d60367b6bfa..151607ed5158 100644
--- a/arch/csky/include/asm/pgtable.h
+++ b/arch/csky/include/asm/pgtable.h
@@ -14,7 +14,6 @@
#define PGDIR_MASK (~(PGDIR_SIZE-1))
#define USER_PTRS_PER_PGD (PAGE_OFFSET/PGDIR_SIZE)
-#define FIRST_USER_ADDRESS 0UL
/*
* C-SKY is two-level paging structure:
diff --git a/arch/csky/kernel/asm-offsets.c b/arch/csky/kernel/asm-offsets.c
index 17479860d43d..1cbcba4b0dd1 100644
--- a/arch/csky/kernel/asm-offsets.c
+++ b/arch/csky/kernel/asm-offsets.c
@@ -9,7 +9,6 @@
int main(void)
{
/* offsets into the task struct */
- DEFINE(TASK_STATE, offsetof(struct task_struct, state));
DEFINE(TASK_THREAD_INFO, offsetof(struct task_struct, stack));
DEFINE(TASK_FLAGS, offsetof(struct task_struct, flags));
DEFINE(TASK_PTRACE, offsetof(struct task_struct, ptrace));
diff --git a/arch/csky/kernel/probes/kprobes.c b/arch/csky/kernel/probes/kprobes.c
index 589f090f48b9..68b22b499aeb 100644
--- a/arch/csky/kernel/probes/kprobes.c
+++ b/arch/csky/kernel/probes/kprobes.c
@@ -295,23 +295,6 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned int trapnr)
case KPROBE_HIT_ACTIVE:
case KPROBE_HIT_SSDONE:
/*
- * We increment the nmissed count for accounting,
- * we can also use npre/npostfault count for accounting
- * these specific fault cases.
- */
- kprobes_inc_nmissed_count(cur);
-
- /*
- * We come here because instructions in the pre/post
- * handler caused the page_fault, this could happen
- * if handler tries to access user space by
- * copy_from_user(), get_user() etc. Let the
- * user-specified handler try to fix it first.
- */
- if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
- return 1;
-
- /*
* In case the user-specified fault handler returned
* zero, try to fix up.
*/
diff --git a/arch/csky/kernel/smp.c b/arch/csky/kernel/smp.c
index 0f9f5eef9338..e2993539af8e 100644
--- a/arch/csky/kernel/smp.c
+++ b/arch/csky/kernel/smp.c
@@ -281,7 +281,6 @@ void csky_start_secondary(void)
pr_info("CPU%u Online: %s...\n", cpu, __func__);
local_irq_enable();
- preempt_disable();
cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
}
diff --git a/arch/csky/kernel/stacktrace.c b/arch/csky/kernel/stacktrace.c
index 16ae20a0af34..1b280ef08004 100644
--- a/arch/csky/kernel/stacktrace.c
+++ b/arch/csky/kernel/stacktrace.c
@@ -115,7 +115,7 @@ unsigned long get_wchan(struct task_struct *task)
{
unsigned long pc = 0;
- if (likely(task && task != current && task->state != TASK_RUNNING))
+ if (likely(task && task != current && !task_is_running(task)))
walk_stackframe(task, NULL, save_wchan, &pc);
return pc;
}
diff --git a/arch/csky/mm/syscache.c b/arch/csky/mm/syscache.c
index 4e51d63850c4..cd847ad62c7e 100644
--- a/arch/csky/mm/syscache.c
+++ b/arch/csky/mm/syscache.c
@@ -12,15 +12,17 @@ SYSCALL_DEFINE3(cacheflush,
int, cache)
{
switch (cache) {
- case ICACHE:
case BCACHE:
- flush_icache_mm_range(current->mm,
- (unsigned long)addr,
- (unsigned long)addr + bytes);
- fallthrough;
case DCACHE:
dcache_wb_range((unsigned long)addr,
(unsigned long)addr + bytes);
+ if (cache != BCACHE)
+ break;
+ fallthrough;
+ case ICACHE:
+ flush_icache_mm_range(current->mm,
+ (unsigned long)addr,
+ (unsigned long)addr + bytes);
break;
default:
return -EINVAL;
diff --git a/arch/h8300/include/asm/Kbuild b/arch/h8300/include/asm/Kbuild
index 60ee7f0d60a8..e23139c8fc0d 100644
--- a/arch/h8300/include/asm/Kbuild
+++ b/arch/h8300/include/asm/Kbuild
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
generic-y += asm-offsets.h
+generic-y += cmpxchg.h
generic-y += extable.h
generic-y += kvm_para.h
generic-y += mcs_spinlock.h
diff --git a/arch/h8300/include/asm/atomic.h b/arch/h8300/include/asm/atomic.h
deleted file mode 100644
index a990d151f163..000000000000
--- a/arch/h8300/include/asm/atomic.h
+++ /dev/null
@@ -1,97 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __ARCH_H8300_ATOMIC__
-#define __ARCH_H8300_ATOMIC__
-
-#include <linux/compiler.h>
-#include <linux/types.h>
-#include <asm/cmpxchg.h>
-#include <asm/irqflags.h>
-
-/*
- * Atomic operations that C can't guarantee us. Useful for
- * resource counting etc..
- */
-
-#define atomic_read(v) READ_ONCE((v)->counter)
-#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
-
-#define ATOMIC_OP_RETURN(op, c_op) \
-static inline int atomic_##op##_return(int i, atomic_t *v) \
-{ \
- h8300flags flags; \
- int ret; \
- \
- flags = arch_local_irq_save(); \
- ret = v->counter c_op i; \
- arch_local_irq_restore(flags); \
- return ret; \
-}
-
-#define ATOMIC_FETCH_OP(op, c_op) \
-static inline int atomic_fetch_##op(int i, atomic_t *v) \
-{ \
- h8300flags flags; \
- int ret; \
- \
- flags = arch_local_irq_save(); \
- ret = v->counter; \
- v->counter c_op i; \
- arch_local_irq_restore(flags); \
- return ret; \
-}
-
-#define ATOMIC_OP(op, c_op) \
-static inline void atomic_##op(int i, atomic_t *v) \
-{ \
- h8300flags flags; \
- \
- flags = arch_local_irq_save(); \
- v->counter c_op i; \
- arch_local_irq_restore(flags); \
-}
-
-ATOMIC_OP_RETURN(add, +=)
-ATOMIC_OP_RETURN(sub, -=)
-
-#define ATOMIC_OPS(op, c_op) \
- ATOMIC_OP(op, c_op) \
- ATOMIC_FETCH_OP(op, c_op)
-
-ATOMIC_OPS(and, &=)
-ATOMIC_OPS(or, |=)
-ATOMIC_OPS(xor, ^=)
-ATOMIC_OPS(add, +=)
-ATOMIC_OPS(sub, -=)
-
-#undef ATOMIC_OPS
-#undef ATOMIC_OP_RETURN
-#undef ATOMIC_OP
-
-static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
-{
- int ret;
- h8300flags flags;
-
- flags = arch_local_irq_save();
- ret = v->counter;
- if (likely(ret == old))
- v->counter = new;
- arch_local_irq_restore(flags);
- return ret;
-}
-
-static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
-{
- int ret;
- h8300flags flags;
-
- flags = arch_local_irq_save();
- ret = v->counter;
- if (ret != u)
- v->counter += a;
- arch_local_irq_restore(flags);
- return ret;
-}
-#define atomic_fetch_add_unless atomic_fetch_add_unless
-
-#endif /* __ARCH_H8300_ATOMIC __ */
diff --git a/arch/h8300/include/asm/cmpxchg.h b/arch/h8300/include/asm/cmpxchg.h
deleted file mode 100644
index c64bb38ce242..000000000000
--- a/arch/h8300/include/asm/cmpxchg.h
+++ /dev/null
@@ -1,66 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __ARCH_H8300_CMPXCHG__
-#define __ARCH_H8300_CMPXCHG__
-
-#include <linux/irqflags.h>
-
-#define xchg(ptr, x) \
- ((__typeof__(*(ptr)))__xchg((unsigned long)(x), (ptr), \
- sizeof(*(ptr))))
-
-struct __xchg_dummy { unsigned long a[100]; };
-#define __xg(x) ((volatile struct __xchg_dummy *)(x))
-
-static inline unsigned long __xchg(unsigned long x,
- volatile void *ptr, int size)
-{
- unsigned long tmp, flags;
-
- local_irq_save(flags);
-
- switch (size) {
- case 1:
- __asm__ __volatile__
- ("mov.b %2,%0\n\t"
- "mov.b %1,%2"
- : "=&r" (tmp) : "r" (x), "m" (*__xg(ptr)));
- break;
- case 2:
- __asm__ __volatile__
- ("mov.w %2,%0\n\t"
- "mov.w %1,%2"
- : "=&r" (tmp) : "r" (x), "m" (*__xg(ptr)));
- break;
- case 4:
- __asm__ __volatile__
- ("mov.l %2,%0\n\t"
- "mov.l %1,%2"
- : "=&r" (tmp) : "r" (x), "m" (*__xg(ptr)));
- break;
- default:
- tmp = 0;
- }
- local_irq_restore(flags);
- return tmp;
-}
-
-#include <asm-generic/cmpxchg-local.h>
-
-/*
- * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
- * them available.
- */
-#define cmpxchg_local(ptr, o, n) \
- ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), \
- (unsigned long)(o), \
- (unsigned long)(n), \
- sizeof(*(ptr))))
-#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
-
-#ifndef CONFIG_SMP
-#include <asm-generic/cmpxchg.h>
-#endif
-
-#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
-
-#endif /* __ARCH_H8300_CMPXCHG__ */
diff --git a/arch/h8300/kernel/asm-offsets.c b/arch/h8300/kernel/asm-offsets.c
index d4b53af657c8..65571ee15132 100644
--- a/arch/h8300/kernel/asm-offsets.c
+++ b/arch/h8300/kernel/asm-offsets.c
@@ -21,7 +21,6 @@
int main(void)
{
/* offsets into the task struct */
- OFFSET(TASK_STATE, task_struct, state);
OFFSET(TASK_FLAGS, task_struct, flags);
OFFSET(TASK_PTRACE, task_struct, ptrace);
OFFSET(TASK_BLOCKED, task_struct, blocked);
diff --git a/arch/h8300/kernel/process.c b/arch/h8300/kernel/process.c
index 46b1342ce515..2ac27e4248a4 100644
--- a/arch/h8300/kernel/process.c
+++ b/arch/h8300/kernel/process.c
@@ -134,7 +134,7 @@ unsigned long get_wchan(struct task_struct *p)
unsigned long stack_page;
int count = 0;
- if (!p || p == current || p->state == TASK_RUNNING)
+ if (!p || p == current || task_is_running(p))
return 0;
stack_page = (unsigned long)p;
diff --git a/arch/h8300/kernel/setup.c b/arch/h8300/kernel/setup.c
index 0281f92eea3d..c3590b2e9592 100644
--- a/arch/h8300/kernel/setup.c
+++ b/arch/h8300/kernel/setup.c
@@ -69,8 +69,6 @@ void __init h8300_fdt_init(void *fdt, char *bootargs)
static void __init bootmem_init(void)
{
- struct memblock_region *region;
-
memory_end = memory_start = 0;
/* Find main memory where is the kernel */
diff --git a/arch/hexagon/include/asm/atomic.h b/arch/hexagon/include/asm/atomic.h
index 4ab895d7111f..6e94f8d04146 100644
--- a/arch/hexagon/include/asm/atomic.h
+++ b/arch/hexagon/include/asm/atomic.h
@@ -14,7 +14,7 @@
/* Normal writes in our arch don't clear lock reservations */
-static inline void atomic_set(atomic_t *v, int new)
+static inline void arch_atomic_set(atomic_t *v, int new)
{
asm volatile(
"1: r6 = memw_locked(%0);\n"
@@ -26,26 +26,26 @@ static inline void atomic_set(atomic_t *v, int new)
);
}
-#define atomic_set_release(v, i) atomic_set((v), (i))
+#define arch_atomic_set_release(v, i) arch_atomic_set((v), (i))
/**
- * atomic_read - reads a word, atomically
+ * arch_atomic_read - reads a word, atomically
* @v: pointer to atomic value
*
* Assumes all word reads on our architecture are atomic.
*/
-#define atomic_read(v) READ_ONCE((v)->counter)
+#define arch_atomic_read(v) READ_ONCE((v)->counter)
/**
- * atomic_xchg - atomic
+ * arch_atomic_xchg - atomic
* @v: pointer to memory to change
* @new: new value (technically passed in a register -- see xchg)
*/
-#define atomic_xchg(v, new) (xchg(&((v)->counter), (new)))
+#define arch_atomic_xchg(v, new) (arch_xchg(&((v)->counter), (new)))
/**
- * atomic_cmpxchg - atomic compare-and-exchange values
+ * arch_atomic_cmpxchg - atomic compare-and-exchange values
* @v: pointer to value to change
* @old: desired old value to match
* @new: new value to put in
@@ -61,7 +61,7 @@ static inline void atomic_set(atomic_t *v, int new)
*
* "old" is "expected" old val, __oldval is actual old value
*/
-static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
+static inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new)
{
int __oldval;
@@ -81,7 +81,7 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
}
#define ATOMIC_OP(op) \
-static inline void atomic_##op(int i, atomic_t *v) \
+static inline void arch_atomic_##op(int i, atomic_t *v) \
{ \
int output; \
\
@@ -97,7 +97,7 @@ static inline void atomic_##op(int i, atomic_t *v) \
} \
#define ATOMIC_OP_RETURN(op) \
-static inline int atomic_##op##_return(int i, atomic_t *v) \
+static inline int arch_atomic_##op##_return(int i, atomic_t *v) \
{ \
int output; \
\
@@ -114,7 +114,7 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
}
#define ATOMIC_FETCH_OP(op) \
-static inline int atomic_fetch_##op(int i, atomic_t *v) \
+static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \
{ \
int output, val; \
\
@@ -148,7 +148,7 @@ ATOMIC_OPS(xor)
#undef ATOMIC_OP
/**
- * atomic_fetch_add_unless - add unless the number is a given value
+ * arch_atomic_fetch_add_unless - add unless the number is a given value
* @v: pointer to value
* @a: amount to add
* @u: unless value is equal to u
@@ -157,7 +157,7 @@ ATOMIC_OPS(xor)
*
*/
-static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
+static inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
{
int __oldval;
register int tmp;
@@ -180,6 +180,6 @@ static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
);
return __oldval;
}
-#define atomic_fetch_add_unless atomic_fetch_add_unless
+#define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
#endif
diff --git a/arch/hexagon/include/asm/cmpxchg.h b/arch/hexagon/include/asm/cmpxchg.h
index 92b8a02e588a..cdb705e1496a 100644
--- a/arch/hexagon/include/asm/cmpxchg.h
+++ b/arch/hexagon/include/asm/cmpxchg.h
@@ -42,7 +42,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr,
* Atomically swap the contents of a register with memory. Should be atomic
* between multiple CPU's and within interrupts on the same CPU.
*/
-#define xchg(ptr, v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v), (ptr), \
+#define arch_xchg(ptr, v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v), (ptr), \
sizeof(*(ptr))))
/*
@@ -51,7 +51,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr,
* variable casting.
*/
-#define cmpxchg(ptr, old, new) \
+#define arch_cmpxchg(ptr, old, new) \
({ \
__typeof__(ptr) __ptr = (ptr); \
__typeof__(*(ptr)) __old = (old); \
diff --git a/arch/hexagon/include/asm/pgtable.h b/arch/hexagon/include/asm/pgtable.h
index dbb22b80b8c4..18cd6ea9ab23 100644
--- a/arch/hexagon/include/asm/pgtable.h
+++ b/arch/hexagon/include/asm/pgtable.h
@@ -155,9 +155,6 @@ extern unsigned long _dflt_cache_att;
extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; /* located in head.S */
-/* Seems to be zero even in architectures where the zero page is firewalled? */
-#define FIRST_USER_ADDRESS 0UL
-
/* HUGETLB not working currently */
#ifdef CONFIG_HUGETLB_PAGE
#define pte_mkhuge(pte) __pte((pte_val(pte) & ~0x3) | HVM_HUGEPAGE_SIZE)
@@ -242,7 +239,6 @@ static inline int pmd_bad(pmd_t pmd)
* pmd_page - converts a PMD entry to a page pointer
*/
#define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
-#define pmd_pgtable(pmd) pmd_page(pmd)
/**
* pte_none - check if pte is mapped
diff --git a/arch/hexagon/kernel/process.c b/arch/hexagon/kernel/process.c
index c61165c99ae0..6a6835fb4242 100644
--- a/arch/hexagon/kernel/process.c
+++ b/arch/hexagon/kernel/process.c
@@ -135,7 +135,7 @@ unsigned long get_wchan(struct task_struct *p)
unsigned long fp, pc;
unsigned long stack_page;
int count = 0;
- if (!p || p == current || p->state == TASK_RUNNING)
+ if (!p || p == current || task_is_running(p))
return 0;
stack_page = (unsigned long)task_stack_page(p);
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 279252e3e0f7..cf425c2c63af 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -60,6 +60,7 @@ config IA64
select NUMA if !FLATMEM
select PCI_MSI_ARCH_FALLBACKS if PCI_MSI
select SET_FS
+ select ZONE_DMA32
default y
help
The Itanium Processor Family is Intel's 64-bit successor to
@@ -72,9 +73,6 @@ config 64BIT
select ATA_NONSTANDARD if ATA
default y
-config ZONE_DMA32
- def_bool y
-
config MMU
bool
default y
@@ -302,15 +300,12 @@ config NODES_SHIFT
int "Max num nodes shift(3-10)"
range 3 10
default "10"
- depends on NEED_MULTIPLE_NODES
+ depends on NUMA
help
This option specifies the maximum number of nodes in your SSI system.
MAX_NUMNODES will be 2^(This value).
If in doubt, use the default.
-config HOLES_IN_ZONE
- bool
-
config HAVE_ARCH_NODEDATA_EXTENSION
def_bool y
depends on NUMA
diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
index f267d956458f..266c429b9137 100644
--- a/arch/ia64/include/asm/atomic.h
+++ b/arch/ia64/include/asm/atomic.h
@@ -21,11 +21,11 @@
#define ATOMIC64_INIT(i) { (i) }
-#define atomic_read(v) READ_ONCE((v)->counter)
-#define atomic64_read(v) READ_ONCE((v)->counter)
+#define arch_atomic_read(v) READ_ONCE((v)->counter)
+#define arch_atomic64_read(v) READ_ONCE((v)->counter)
-#define atomic_set(v,i) WRITE_ONCE(((v)->counter), (i))
-#define atomic64_set(v,i) WRITE_ONCE(((v)->counter), (i))
+#define arch_atomic_set(v,i) WRITE_ONCE(((v)->counter), (i))
+#define arch_atomic64_set(v,i) WRITE_ONCE(((v)->counter), (i))
#define ATOMIC_OP(op, c_op) \
static __inline__ int \
@@ -36,7 +36,7 @@ ia64_atomic_##op (int i, atomic_t *v) \
\
do { \
CMPXCHG_BUGCHECK(v); \
- old = atomic_read(v); \
+ old = arch_atomic_read(v); \
new = old c_op i; \
} while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old); \
return new; \
@@ -51,7 +51,7 @@ ia64_atomic_fetch_##op (int i, atomic_t *v) \
\
do { \
CMPXCHG_BUGCHECK(v); \
- old = atomic_read(v); \
+ old = arch_atomic_read(v); \
new = old c_op i; \
} while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old); \
return old; \
@@ -74,7 +74,7 @@ ATOMIC_OPS(sub, -)
#define __ia64_atomic_const(i) 0
#endif
-#define atomic_add_return(i,v) \
+#define arch_atomic_add_return(i,v) \
({ \
int __ia64_aar_i = (i); \
__ia64_atomic_const(i) \
@@ -82,7 +82,7 @@ ATOMIC_OPS(sub, -)
: ia64_atomic_add(__ia64_aar_i, v); \
})
-#define atomic_sub_return(i,v) \
+#define arch_atomic_sub_return(i,v) \
({ \
int __ia64_asr_i = (i); \
__ia64_atomic_const(i) \
@@ -90,7 +90,7 @@ ATOMIC_OPS(sub, -)
: ia64_atomic_sub(__ia64_asr_i, v); \
})
-#define atomic_fetch_add(i,v) \
+#define arch_atomic_fetch_add(i,v) \
({ \
int __ia64_aar_i = (i); \
__ia64_atomic_const(i) \
@@ -98,7 +98,7 @@ ATOMIC_OPS(sub, -)
: ia64_atomic_fetch_add(__ia64_aar_i, v); \
})
-#define atomic_fetch_sub(i,v) \
+#define arch_atomic_fetch_sub(i,v) \
({ \
int __ia64_asr_i = (i); \
__ia64_atomic_const(i) \
@@ -110,13 +110,13 @@ ATOMIC_FETCH_OP(and, &)
ATOMIC_FETCH_OP(or, |)
ATOMIC_FETCH_OP(xor, ^)
-#define atomic_and(i,v) (void)ia64_atomic_fetch_and(i,v)
-#define atomic_or(i,v) (void)ia64_atomic_fetch_or(i,v)
-#define atomic_xor(i,v) (void)ia64_atomic_fetch_xor(i,v)
+#define arch_atomic_and(i,v) (void)ia64_atomic_fetch_and(i,v)
+#define arch_atomic_or(i,v) (void)ia64_atomic_fetch_or(i,v)
+#define arch_atomic_xor(i,v) (void)ia64_atomic_fetch_xor(i,v)
-#define atomic_fetch_and(i,v) ia64_atomic_fetch_and(i,v)
-#define atomic_fetch_or(i,v) ia64_atomic_fetch_or(i,v)
-#define atomic_fetch_xor(i,v) ia64_atomic_fetch_xor(i,v)
+#define arch_atomic_fetch_and(i,v) ia64_atomic_fetch_and(i,v)
+#define arch_atomic_fetch_or(i,v) ia64_atomic_fetch_or(i,v)
+#define arch_atomic_fetch_xor(i,v) ia64_atomic_fetch_xor(i,v)
#undef ATOMIC_OPS
#undef ATOMIC_FETCH_OP
@@ -131,7 +131,7 @@ ia64_atomic64_##op (s64 i, atomic64_t *v) \
\
do { \
CMPXCHG_BUGCHECK(v); \
- old = atomic64_read(v); \
+ old = arch_atomic64_read(v); \
new = old c_op i; \
} while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old); \
return new; \
@@ -146,7 +146,7 @@ ia64_atomic64_fetch_##op (s64 i, atomic64_t *v) \
\
do { \
CMPXCHG_BUGCHECK(v); \
- old = atomic64_read(v); \
+ old = arch_atomic64_read(v); \
new = old c_op i; \
} while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old); \
return old; \
@@ -159,7 +159,7 @@ ia64_atomic64_fetch_##op (s64 i, atomic64_t *v) \
ATOMIC64_OPS(add, +)
ATOMIC64_OPS(sub, -)
-#define atomic64_add_return(i,v) \
+#define arch_atomic64_add_return(i,v) \
({ \
s64 __ia64_aar_i = (i); \
__ia64_atomic_const(i) \
@@ -167,7 +167,7 @@ ATOMIC64_OPS(sub, -)
: ia64_atomic64_add(__ia64_aar_i, v); \
})
-#define atomic64_sub_return(i,v) \
+#define arch_atomic64_sub_return(i,v) \
({ \
s64 __ia64_asr_i = (i); \
__ia64_atomic_const(i) \
@@ -175,7 +175,7 @@ ATOMIC64_OPS(sub, -)
: ia64_atomic64_sub(__ia64_asr_i, v); \
})
-#define atomic64_fetch_add(i,v) \
+#define arch_atomic64_fetch_add(i,v) \
({ \
s64 __ia64_aar_i = (i); \
__ia64_atomic_const(i) \
@@ -183,7 +183,7 @@ ATOMIC64_OPS(sub, -)
: ia64_atomic64_fetch_add(__ia64_aar_i, v); \
})
-#define atomic64_fetch_sub(i,v) \
+#define arch_atomic64_fetch_sub(i,v) \
({ \
s64 __ia64_asr_i = (i); \
__ia64_atomic_const(i) \
@@ -195,29 +195,29 @@ ATOMIC64_FETCH_OP(and, &)
ATOMIC64_FETCH_OP(or, |)
ATOMIC64_FETCH_OP(xor, ^)
-#define atomic64_and(i,v) (void)ia64_atomic64_fetch_and(i,v)
-#define atomic64_or(i,v) (void)ia64_atomic64_fetch_or(i,v)
-#define atomic64_xor(i,v) (void)ia64_atomic64_fetch_xor(i,v)
+#define arch_atomic64_and(i,v) (void)ia64_atomic64_fetch_and(i,v)
+#define arch_atomic64_or(i,v) (void)ia64_atomic64_fetch_or(i,v)
+#define arch_atomic64_xor(i,v) (void)ia64_atomic64_fetch_xor(i,v)
-#define atomic64_fetch_and(i,v) ia64_atomic64_fetch_and(i,v)
-#define atomic64_fetch_or(i,v) ia64_atomic64_fetch_or(i,v)
-#define atomic64_fetch_xor(i,v) ia64_atomic64_fetch_xor(i,v)
+#define arch_atomic64_fetch_and(i,v) ia64_atomic64_fetch_and(i,v)
+#define arch_atomic64_fetch_or(i,v) ia64_atomic64_fetch_or(i,v)
+#define arch_atomic64_fetch_xor(i,v) ia64_atomic64_fetch_xor(i,v)
#undef ATOMIC64_OPS
#undef ATOMIC64_FETCH_OP
#undef ATOMIC64_OP
-#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
-#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
+#define arch_atomic_cmpxchg(v, old, new) (arch_cmpxchg(&((v)->counter), old, new))
+#define arch_atomic_xchg(v, new) (arch_xchg(&((v)->counter), new))
-#define atomic64_cmpxchg(v, old, new) \
- (cmpxchg(&((v)->counter), old, new))
-#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
+#define arch_atomic64_cmpxchg(v, old, new) \
+ (arch_cmpxchg(&((v)->counter), old, new))
+#define arch_atomic64_xchg(v, new) (arch_xchg(&((v)->counter), new))
-#define atomic_add(i,v) (void)atomic_add_return((i), (v))
-#define atomic_sub(i,v) (void)atomic_sub_return((i), (v))
+#define arch_atomic_add(i,v) (void)arch_atomic_add_return((i), (v))
+#define arch_atomic_sub(i,v) (void)arch_atomic_sub_return((i), (v))
-#define atomic64_add(i,v) (void)atomic64_add_return((i), (v))
-#define atomic64_sub(i,v) (void)atomic64_sub_return((i), (v))
+#define arch_atomic64_add(i,v) (void)arch_atomic64_add_return((i), (v))
+#define arch_atomic64_sub(i,v) (void)arch_atomic64_sub_return((i), (v))
#endif /* _ASM_IA64_ATOMIC_H */
diff --git a/arch/ia64/include/asm/cmpxchg.h b/arch/ia64/include/asm/cmpxchg.h
new file mode 100644
index 000000000000..94ef84429843
--- /dev/null
+++ b/arch/ia64/include/asm/cmpxchg.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_IA64_CMPXCHG_H
+#define _ASM_IA64_CMPXCHG_H
+
+#include <uapi/asm/cmpxchg.h>
+
+#define arch_xchg(ptr, x) \
+({(__typeof__(*(ptr))) __xchg((unsigned long) (x), (ptr), sizeof(*(ptr)));})
+
+#define arch_cmpxchg(ptr, o, n) cmpxchg_acq((ptr), (o), (n))
+#define arch_cmpxchg64(ptr, o, n) cmpxchg_acq((ptr), (o), (n))
+
+#define arch_cmpxchg_local arch_cmpxchg
+#define arch_cmpxchg64_local arch_cmpxchg64
+
+#endif /* _ASM_IA64_CMPXCHG_H */
diff --git a/arch/ia64/include/asm/page.h b/arch/ia64/include/asm/page.h
index f4dc81fa7146..1b990466d540 100644
--- a/arch/ia64/include/asm/page.h
+++ b/arch/ia64/include/asm/page.h
@@ -82,16 +82,16 @@ do { \
} while (0)
-#define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \
+#define alloc_zeroed_user_highpage_movable(vma, vaddr) \
({ \
struct page *page = alloc_page_vma( \
- GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr); \
+ GFP_HIGHUSER_MOVABLE | __GFP_ZERO, vma, vaddr); \
if (page) \
flush_dcache_page(page); \
page; \
})
-#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
+#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE_MOVABLE
#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
diff --git a/arch/ia64/include/asm/pal.h b/arch/ia64/include/asm/pal.h
index b1d87955e8cc..e6b652f9e45e 100644
--- a/arch/ia64/include/asm/pal.h
+++ b/arch/ia64/include/asm/pal.h
@@ -99,6 +99,7 @@
#include <linux/types.h>
#include <asm/fpu.h>
+#include <asm/intrinsics.h>
/*
* Data types needed to pass information into PAL procedures and
@@ -1086,7 +1087,7 @@ static inline long ia64_pal_freq_base(unsigned long *platform_base_freq)
/*
* Get the ratios for processor frequency, bus frequency and interval timer to
- * to base frequency of the platform
+ * the base frequency of the platform
*/
static inline s64
ia64_pal_freq_ratios (struct pal_freq_ratio *proc_ratio, struct pal_freq_ratio *bus_ratio,
diff --git a/arch/ia64/include/asm/pgalloc.h b/arch/ia64/include/asm/pgalloc.h
index 9601cfe83c94..0fb2b6291d58 100644
--- a/arch/ia64/include/asm/pgalloc.h
+++ b/arch/ia64/include/asm/pgalloc.h
@@ -52,7 +52,6 @@ pmd_populate(struct mm_struct *mm, pmd_t * pmd_entry, pgtable_t pte)
{
pmd_val(*pmd_entry) = page_to_phys(pte);
}
-#define pmd_pgtable(pmd) pmd_page(pmd)
static inline void
pmd_populate_kernel(struct mm_struct *mm, pmd_t * pmd_entry, pte_t * pte)
diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
index d765fd948fae..3f5dbbd8b9d8 100644
--- a/arch/ia64/include/asm/pgtable.h
+++ b/arch/ia64/include/asm/pgtable.h
@@ -128,7 +128,6 @@
#define PTRS_PER_PGD_SHIFT PTRS_PER_PTD_SHIFT
#define PTRS_PER_PGD (1UL << PTRS_PER_PGD_SHIFT)
#define USER_PTRS_PER_PGD (5*PTRS_PER_PGD/8) /* regions 0-4 are user regions */
-#define FIRST_USER_ADDRESS 0UL
/*
* All the normal masks have the "page accessed" bits on, as any time
diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
index 5f620e66384e..864775970c50 100644
--- a/arch/ia64/include/asm/spinlock.h
+++ b/arch/ia64/include/asm/spinlock.h
@@ -26,7 +26,7 @@
* the queue, and the other indicating the current tail. The lock is acquired
* by atomically noting the tail and incrementing it by one (thus adding
* ourself to the queue and noting our position), then waiting until the head
- * becomes equal to the the initial value of the tail.
+ * becomes equal to the initial value of the tail.
* The pad bits in the middle are used to prevent the next_ticket number
* overflowing into the now_serving number.
*
diff --git a/arch/ia64/include/asm/unaligned.h b/arch/ia64/include/asm/unaligned.h
deleted file mode 100644
index 328942e3cbce..000000000000
--- a/arch/ia64/include/asm/unaligned.h
+++ /dev/null
@@ -1,12 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_IA64_UNALIGNED_H
-#define _ASM_IA64_UNALIGNED_H
-
-#include <linux/unaligned/le_struct.h>
-#include <linux/unaligned/be_byteshift.h>
-#include <linux/unaligned/generic.h>
-
-#define get_unaligned __get_unaligned_le
-#define put_unaligned __put_unaligned_le
-
-#endif /* _ASM_IA64_UNALIGNED_H */
diff --git a/arch/ia64/include/asm/uv/uv_hub.h b/arch/ia64/include/asm/uv/uv_hub.h
index 2a88c7204e52..809ddb6896db 100644
--- a/arch/ia64/include/asm/uv/uv_hub.h
+++ b/arch/ia64/include/asm/uv/uv_hub.h
@@ -257,7 +257,7 @@ static inline int uv_numa_blade_id(void)
return 0;
}
-/* Convert a cpu number to the the UV blade number */
+/* Convert a cpu number to the UV blade number */
static inline int uv_cpu_to_blade_id(int cpu)
{
return 0;
diff --git a/arch/ia64/include/uapi/asm/cmpxchg.h b/arch/ia64/include/uapi/asm/cmpxchg.h
index 5d90307fd6e0..926c6cb1e029 100644
--- a/arch/ia64/include/uapi/asm/cmpxchg.h
+++ b/arch/ia64/include/uapi/asm/cmpxchg.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-#ifndef _ASM_IA64_CMPXCHG_H
-#define _ASM_IA64_CMPXCHG_H
+#ifndef _UAPI_ASM_IA64_CMPXCHG_H
+#define _UAPI_ASM_IA64_CMPXCHG_H
/*
* Compare/Exchange, forked from asm/intrinsics.h
@@ -53,8 +53,10 @@ extern void ia64_xchg_called_with_bad_pointer(void);
__xchg_result; \
})
+#ifndef __KERNEL__
#define xchg(ptr, x) \
({(__typeof__(*(ptr))) __xchg((unsigned long) (x), (ptr), sizeof(*(ptr)));})
+#endif
/*
* Atomic compare and exchange. Compare OLD with MEM, if identical,
@@ -126,12 +128,14 @@ extern long ia64_cmpxchg_called_with_bad_pointer(void);
* we had to back-pedal and keep the "legacy" behavior of a full fence :-(
*/
+#ifndef __KERNEL__
/* for compatibility with other platforms: */
#define cmpxchg(ptr, o, n) cmpxchg_acq((ptr), (o), (n))
#define cmpxchg64(ptr, o, n) cmpxchg_acq((ptr), (o), (n))
#define cmpxchg_local cmpxchg
#define cmpxchg64_local cmpxchg64
+#endif
#ifdef CONFIG_IA64_DEBUG_CMPXCHG
# define CMPXCHG_BUGCHECK_DECL int _cmpxchg_bugcheck_count = 128;
@@ -152,4 +156,4 @@ do { \
#endif /* !__ASSEMBLY__ */
-#endif /* _ASM_IA64_CMPXCHG_H */
+#endif /* _UAPI_ASM_IA64_CMPXCHG_H */
diff --git a/arch/ia64/kernel/efi_stub.S b/arch/ia64/kernel/efi_stub.S
index 58233bb7976d..1fd61b78fb29 100644
--- a/arch/ia64/kernel/efi_stub.S
+++ b/arch/ia64/kernel/efi_stub.S
@@ -7,7 +7,7 @@
*
* This stub allows us to make EFI calls in physical mode with interrupts
* turned off. We need this because we can't call SetVirtualMap() until
- * the kernel has booted far enough to allow allocation of struct vma_struct
+ * the kernel has booted far enough to allow allocation of struct vm_area_struct
* entries (which we would need to map stuff with memory attributes other
* than uncached or writeback...). Since the GetTime() service gets called
* earlier than that, we need to be able to make physical mode EFI calls from
diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c
index fc1ff8a4d7de..441ed04b1037 100644
--- a/arch/ia64/kernel/kprobes.c
+++ b/arch/ia64/kernel/kprobes.c
@@ -844,22 +844,6 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
case KPROBE_HIT_ACTIVE:
case KPROBE_HIT_SSDONE:
/*
- * We increment the nmissed count for accounting,
- * we can also use npre/npostfault count for accounting
- * these specific fault cases.
- */
- kprobes_inc_nmissed_count(cur);
-
- /*
- * We come here because instructions in the pre/post
- * handler caused the page_fault, this could happen
- * if handler tries to access user space by
- * copy_from_user(), get_user() etc. Let the
- * user-specified handler try to fix it first.
- */
- if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
- return 1;
- /*
* In case the user-specified fault handler returned
* zero, try to fix up.
*/
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
index cdbac4b52f30..e628a88607bb 100644
--- a/arch/ia64/kernel/mca.c
+++ b/arch/ia64/kernel/mca.c
@@ -1788,7 +1788,7 @@ format_mca_init_stack(void *mca_data, unsigned long offset,
ti->task = p;
ti->cpu = cpu;
p->stack = ti;
- p->state = TASK_UNINTERRUPTIBLE;
+ p->__state = TASK_UNINTERRUPTIBLE;
cpumask_set_cpu(cpu, &p->cpus_mask);
INIT_LIST_HEAD(&p->tasks);
p->parent = p->real_parent = p->group_leader = p;
diff --git a/arch/ia64/kernel/mca_drv.c b/arch/ia64/kernel/mca_drv.c
index 36a69b4e6169..5bfc79be4cef 100644
--- a/arch/ia64/kernel/mca_drv.c
+++ b/arch/ia64/kernel/mca_drv.c
@@ -343,7 +343,7 @@ init_record_index_pools(void)
/* - 2 - */
sect_min_size = sal_log_sect_min_sizes[0];
- for (i = 1; i < sizeof sal_log_sect_min_sizes/sizeof(size_t); i++)
+ for (i = 1; i < ARRAY_SIZE(sal_log_sect_min_sizes); i++)
if (sect_min_size > sal_log_sect_min_sizes[i])
sect_min_size = sal_log_sect_min_sizes[i];
diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c
index 7e1a1525e202..e56d63f4abf9 100644
--- a/arch/ia64/kernel/process.c
+++ b/arch/ia64/kernel/process.c
@@ -529,7 +529,7 @@ get_wchan (struct task_struct *p)
unsigned long ip;
int count = 0;
- if (!p || p == current || p->state == TASK_RUNNING)
+ if (!p || p == current || task_is_running(p))
return 0;
/*
@@ -542,7 +542,7 @@ get_wchan (struct task_struct *p)
*/
unw_init_from_blocked_task(&info, p);
do {
- if (p->state == TASK_RUNNING)
+ if (task_is_running(p))
return 0;
if (unw_unwind(&info) < 0)
return 0;
diff --git a/arch/ia64/kernel/ptrace.c b/arch/ia64/kernel/ptrace.c
index e14f5653393a..df28c7dd164f 100644
--- a/arch/ia64/kernel/ptrace.c
+++ b/arch/ia64/kernel/ptrace.c
@@ -641,11 +641,11 @@ ptrace_attach_sync_user_rbs (struct task_struct *child)
read_lock(&tasklist_lock);
if (child->sighand) {
spin_lock_irq(&child->sighand->siglock);
- if (child->state == TASK_STOPPED &&
+ if (READ_ONCE(child->__state) == TASK_STOPPED &&
!test_and_set_tsk_thread_flag(child, TIF_RESTORE_RSE)) {
set_notify_resume(child);
- child->state = TASK_TRACED;
+ WRITE_ONCE(child->__state, TASK_TRACED);
stopped = 1;
}
spin_unlock_irq(&child->sighand->siglock);
@@ -665,9 +665,9 @@ ptrace_attach_sync_user_rbs (struct task_struct *child)
read_lock(&tasklist_lock);
if (child->sighand) {
spin_lock_irq(&child->sighand->siglock);
- if (child->state == TASK_TRACED &&
+ if (READ_ONCE(child->__state) == TASK_TRACED &&
(child->signal->flags & SIGNAL_STOP_STOPPED)) {
- child->state = TASK_STOPPED;
+ WRITE_ONCE(child->__state, TASK_STOPPED);
}
spin_unlock_irq(&child->sighand->siglock);
}
diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c
index 49b488580939..d10f780c13b9 100644
--- a/arch/ia64/kernel/smpboot.c
+++ b/arch/ia64/kernel/smpboot.c
@@ -441,7 +441,6 @@ start_secondary (void *unused)
#endif
efi_map_pal_code();
cpu_init();
- preempt_disable();
smp_callin();
cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
diff --git a/arch/ia64/kernel/syscalls/syscall.tbl b/arch/ia64/kernel/syscalls/syscall.tbl
index 1ee8e736a48e..6d07742c57b8 100644
--- a/arch/ia64/kernel/syscalls/syscall.tbl
+++ b/arch/ia64/kernel/syscalls/syscall.tbl
@@ -363,7 +363,7 @@
440 common process_madvise sys_process_madvise
441 common epoll_pwait2 sys_epoll_pwait2
442 common mount_setattr sys_mount_setattr
-443 common quotactl_path sys_quotactl_path
+443 common quotactl_fd sys_quotactl_fd
444 common landlock_create_ruleset sys_landlock_create_ruleset
445 common landlock_add_rule sys_landlock_add_rule
446 common landlock_restrict_self sys_landlock_restrict_self
diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c
index 09fc385c2acd..3639e0a7cb3b 100644
--- a/arch/ia64/kernel/topology.c
+++ b/arch/ia64/kernel/topology.c
@@ -3,9 +3,8 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * This file contains NUMA specific variables and functions which can
- * be split away from DISCONTIGMEM and are used on NUMA machines with
- * contiguous memory.
+ * This file contains NUMA specific variables and functions which are used on
+ * NUMA machines with contiguous memory.
* 2002/08/07 Erich Focht <efocht@ess.nec.de>
* Populate cpu entries in sysfs for non-numa systems as well
* Intel Corporation - Ashok Raj
diff --git a/arch/ia64/mm/numa.c b/arch/ia64/mm/numa.c
index 46b6e5f3a40f..d6579ec3ea32 100644
--- a/arch/ia64/mm/numa.c
+++ b/arch/ia64/mm/numa.c
@@ -3,9 +3,8 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * This file contains NUMA specific variables and functions which can
- * be split away from DISCONTIGMEM and are used on NUMA machines with
- * contiguous memory.
+ * This file contains NUMA specific variables and functions which are used on
+ * NUMA machines with contiguous memory.
*
* 2002/08/07 Erich Focht <efocht@ess.nec.de>
*/
diff --git a/arch/m68k/68000/dragen2.c b/arch/m68k/68000/dragen2.c
index 584893c57c37..62f10a9e1ab7 100644
--- a/arch/m68k/68000/dragen2.c
+++ b/arch/m68k/68000/dragen2.c
@@ -11,6 +11,7 @@
#include <linux/init.h>
#include <asm/machdep.h>
#include <asm/MC68VZ328.h>
+#include "screen.h"
/***************************************************************************/
/* Init Drangon Engine hardware */
diff --git a/arch/m68k/68000/screen.h b/arch/m68k/68000/screen.h
new file mode 100644
index 000000000000..2089bdf02688
--- /dev/null
+++ b/arch/m68k/68000/screen.h
@@ -0,0 +1,804 @@
+/* Created with The GIMP */
+#define screen_width 320
+#define screen_height 240
+static unsigned char screen_bits[] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0e,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x39, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd3, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x56,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x0d, 0x55, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x29, 0x5a, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6b, 0x55,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x70, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x03, 0x95, 0x6a, 0x00, 0x00, 0x00, 0x03, 0xf8, 0x70, 0xe0, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38,
+ 0x01, 0xf8, 0x00, 0x1f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, 0x5b, 0x55, 0x00, 0x00, 0x00, 0x0f,
+ 0xfc, 0x70, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x38, 0x03, 0xfc, 0x00, 0x7f, 0x80, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x31, 0xb5, 0x56,
+ 0x00, 0x00, 0x00, 0x1e, 0x0c, 0x70, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x70, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x03, 0x0e, 0x00, 0x61,
+ 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0xd7, 0x55, 0x55, 0x00, 0x03, 0x8e, 0x1c, 0x00, 0x70, 0xe1, 0x9e,
+ 0x0e, 0x38, 0xe1, 0x80, 0x70, 0xc0, 0xf0, 0x73, 0x33, 0xc0, 0x78, 0x38,
+ 0x00, 0x0e, 0x00, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x02, 0xa9, 0xaa, 0xad, 0x00, 0x03, 0x8e, 0x38,
+ 0x00, 0x70, 0xe1, 0xff, 0x0e, 0x38, 0xe3, 0x80, 0x71, 0xc3, 0xf8, 0x77,
+ 0x3f, 0xe1, 0xfc, 0x38, 0x00, 0x0e, 0x00, 0xef, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x2b, 0x55, 0x6a,
+ 0x00, 0x03, 0x8e, 0x38, 0x00, 0x70, 0xe1, 0xe7, 0x0e, 0x38, 0x73, 0x00,
+ 0x73, 0x83, 0x9c, 0x7f, 0x3c, 0xe1, 0xce, 0x38, 0x00, 0x1c, 0x00, 0xff,
+ 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x15, 0x56, 0xab, 0x55, 0x00, 0x03, 0x8e, 0x38, 0x00, 0x70, 0xe1, 0xc7,
+ 0x0e, 0x38, 0x76, 0x00, 0x77, 0x07, 0x1c, 0x78, 0x38, 0xe3, 0x8e, 0x38,
+ 0x00, 0x78, 0x00, 0xf3, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x69, 0x55, 0x5a, 0xb7, 0x00, 0x03, 0x8e, 0x38,
+ 0x00, 0x70, 0xe1, 0xc7, 0x0e, 0x38, 0x3c, 0x00, 0x7e, 0x07, 0xfc, 0x70,
+ 0x38, 0xe3, 0xfe, 0x38, 0x00, 0xf0, 0x00, 0xe1, 0xc0, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0xa5, 0x9a, 0xab, 0x6d,
+ 0x00, 0x03, 0x8e, 0x3c, 0x00, 0x70, 0xe1, 0xc7, 0x0e, 0x38, 0x3e, 0x00,
+ 0x7f, 0x07, 0xfc, 0x70, 0x38, 0xe3, 0xfe, 0x38, 0x01, 0xc0, 0x00, 0xe1,
+ 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05,
+ 0x4d, 0x75, 0xb6, 0xd5, 0x00, 0x03, 0x8e, 0x1c, 0x00, 0x70, 0xe1, 0xc7,
+ 0x0e, 0x38, 0x77, 0x00, 0x77, 0x87, 0x00, 0x70, 0x38, 0xe3, 0x80, 0x38,
+ 0x03, 0x80, 0x00, 0xe1, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x14, 0xaa, 0xca, 0xd5, 0x5b, 0x00, 0x03, 0x9e, 0x1f,
+ 0x0c, 0x70, 0xe1, 0xc7, 0x0e, 0x78, 0x67, 0x00, 0x73, 0xc7, 0x8c, 0x70,
+ 0x38, 0xe3, 0xc6, 0x38, 0x03, 0xfe, 0x38, 0x71, 0xc0, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2a, 0xb5, 0xbb, 0x5b, 0x6a,
+ 0x00, 0x03, 0xfe, 0x0f, 0xfc, 0x70, 0xe1, 0xc7, 0x0f, 0xf8, 0xe3, 0x80,
+ 0x71, 0xe3, 0xfc, 0x70, 0x38, 0xe1, 0xfe, 0x38, 0x03, 0xfe, 0x38, 0x7f,
+ 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xa9,
+ 0x6b, 0x56, 0xd6, 0xad, 0x00, 0x01, 0xe6, 0x03, 0xf0, 0x70, 0xe1, 0xc7,
+ 0x07, 0x98, 0xc3, 0x80, 0x70, 0xe0, 0xf8, 0x70, 0x38, 0xe0, 0x7c, 0x38,
+ 0x03, 0xfe, 0x38, 0x1f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x03, 0x55, 0x55, 0x6a, 0xba, 0xeb, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x15, 0x56, 0xd5, 0xd5, 0xd5, 0xac,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2a, 0x5a,
+ 0xb7, 0x3d, 0x57, 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x75, 0x6d, 0xaa, 0xd3, 0xac, 0xaa, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x8b, 0x6a, 0xb6, 0xde, 0x6b, 0xb6,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xba, 0xad,
+ 0xeb, 0x32, 0xda, 0xad, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0c,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x1e, 0xaa, 0xb5, 0xad, 0x6e, 0x96, 0xaa, 0x00, 0x00, 0x00, 0x00,
+ 0x01, 0x86, 0x00, 0x00, 0x07, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xfc, 0x00, 0x00, 0x0c, 0x00, 0x00, 0x00, 0xc3, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x15, 0x6b, 0x6f, 0x5a, 0xb5, 0x75, 0x69,
+ 0x00, 0x00, 0x00, 0x00, 0x01, 0x86, 0x00, 0x00, 0x06, 0x18, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf5, 0xda, 0xd9,
+ 0x53, 0xeb, 0x6b, 0x4b, 0x00, 0x00, 0xf0, 0xde, 0x03, 0xe6, 0xf0, 0x78,
+ 0x06, 0x0c, 0x6c, 0x7c, 0x1f, 0x87, 0x86, 0xf0, 0xc0, 0xde, 0x0f, 0xcc,
+ 0xde, 0x0f, 0x00, 0xc3, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x03, 0xd6, 0xab, 0x57, 0x6e, 0x8a, 0xd6, 0xba, 0x00, 0x01, 0x98, 0xe7,
+ 0x01, 0x87, 0x38, 0xcc, 0x06, 0x0c, 0x6c, 0x06, 0x31, 0x8c, 0xc7, 0x38,
+ 0xc0, 0xe7, 0x18, 0xcc, 0xe7, 0x19, 0x80, 0xc3, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x0e, 0x2d, 0x5e, 0xda, 0x55, 0xbb, 0x59, 0x42,
+ 0x00, 0x03, 0x0c, 0xc3, 0x01, 0x86, 0x19, 0x8c, 0x06, 0x0c, 0x70, 0x06,
+ 0x61, 0x98, 0x66, 0x18, 0xf8, 0xc3, 0x30, 0xcc, 0xc3, 0x31, 0x80, 0xc3,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1a, 0xf7, 0x6b, 0x6a,
+ 0xab, 0x56, 0xd6, 0xbf, 0x00, 0x03, 0x0c, 0xc3, 0x01, 0x86, 0x19, 0xfc,
+ 0x06, 0x0c, 0x60, 0x7e, 0x61, 0x98, 0x66, 0x18, 0xc0, 0xc3, 0x30, 0xcc,
+ 0xc3, 0x3f, 0x80, 0xc3, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x75, 0x94, 0xdb, 0x75, 0x6e, 0xda, 0xaa, 0xa2, 0x00, 0x03, 0x0c, 0xc3,
+ 0x01, 0x86, 0x19, 0x80, 0x06, 0x0c, 0x60, 0xc6, 0x61, 0x98, 0x66, 0x18,
+ 0xc0, 0xc3, 0x30, 0xcc, 0xc3, 0x30, 0x00, 0xc3, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x01, 0xdb, 0x6b, 0xad, 0x9b, 0x27, 0x55, 0x55, 0x55,
+ 0x00, 0x03, 0x0c, 0xc3, 0x01, 0x86, 0x19, 0x80, 0x06, 0x0c, 0x60, 0xc6,
+ 0x33, 0x98, 0x66, 0x18, 0xc0, 0xc3, 0x19, 0xcc, 0xc3, 0x30, 0x00, 0xc3,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, 0x36, 0xde, 0xb5, 0x65,
+ 0x75, 0x5a, 0xd5, 0x56, 0x00, 0x01, 0x98, 0xc3, 0x01, 0x86, 0x18, 0xc4,
+ 0x06, 0x18, 0x60, 0xce, 0x1d, 0x8c, 0xc6, 0x18, 0xc0, 0xc3, 0x0e, 0xcc,
+ 0xc3, 0x18, 0x80, 0xc3, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a,
+ 0xed, 0xb5, 0x6d, 0x56, 0x55, 0x55, 0x55, 0xae, 0x00, 0x00, 0xf0, 0xc3,
+ 0x00, 0xe6, 0x18, 0x78, 0x07, 0xf0, 0x60, 0x77, 0x01, 0x87, 0x86, 0x18,
+ 0xfc, 0xc3, 0x00, 0xcc, 0xc3, 0x0f, 0x00, 0xc3, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x7a, 0xab, 0x6d, 0xda, 0xaa, 0xca, 0xd5, 0x6d, 0x58,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x63, 0x00, 0x00, 0x00, 0x00, 0x00, 0x31, 0x80, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x56, 0xda, 0xaa, 0xea, 0xae,
+ 0x9b, 0x5a, 0xa9, 0x4b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x3e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1f, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0xd5,
+ 0xbb, 0xfd, 0xad, 0xad, 0x69, 0xea, 0xcb, 0x55, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x05, 0xaf, 0xb6, 0x8a, 0x6a, 0xb9, 0x5a, 0x2d, 0xba, 0xb6,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1d, 0x7a, 0x75, 0x6e, 0xcd, 0x52,
+ 0x9b, 0xdb, 0x55, 0xaa, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5b, 0xad,
+ 0xaf, 0x95, 0x55, 0xed, 0x55, 0x55, 0x6b, 0x55, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0xea, 0xb5, 0xec, 0xfd, 0x59, 0x5a, 0xb5, 0x56, 0xaa, 0xb6,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0xb7, 0x6b, 0x36, 0x4a, 0xeb, 0xab,
+ 0x2d, 0x6a, 0x9b, 0x6b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0xad, 0x6f,
+ 0x6b, 0xeb, 0xdd, 0x7b, 0x6a, 0x55, 0xb5, 0x56, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x17, 0x76, 0xda, 0xd6, 0x5d, 0x62, 0xc6, 0xd5, 0x36, 0xaa, 0xb5,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xdb, 0x56, 0xbc, 0xf2, 0xa5, 0x5d,
+ 0x96, 0xaa, 0xb6, 0xd6, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xab, 0xee, 0xfd,
+ 0xd5, 0x2c, 0x6d, 0x9a, 0x75, 0x5b, 0x6a, 0xb5, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x03, 0xee, 0xad, 0x55, 0x15, 0xef, 0x54, 0xf6, 0xc5, 0x6a, 0xa9, 0xa6,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x05, 0x5b, 0xd6, 0xad, 0xbe, 0xb0, 0xa6, 0x35,
+ 0x5b, 0xd5, 0x4a, 0x9b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1b, 0xeb, 0x5d, 0xf5,
+ 0xaa, 0xd7, 0xf4, 0x75, 0xba, 0x55, 0xaf, 0x6e, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x37, 0x5e, 0xf7, 0x55, 0x61, 0xbc, 0x08, 0x5b, 0x55, 0x5a, 0xa9, 0xb5,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x01, 0xf6, 0xba, 0xaa, 0xaa, 0x9f, 0x69, 0xec, 0xd5,
+ 0x4b, 0xa9, 0xaa, 0x2a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x2d, 0xd5, 0xed, 0x5a,
+ 0xf2, 0xd6, 0xae, 0xdb, 0x9e, 0x27, 0x5f, 0xb5, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0f,
+ 0x6d, 0x5b, 0xaa, 0xda, 0xae, 0x95, 0x58, 0xd5, 0x34, 0x6d, 0x68, 0xad,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x15, 0xdb, 0xd7, 0x56, 0xb5, 0xd5, 0x6d, 0x29, 0x5b,
+ 0x4b, 0xdb, 0x57, 0xb6, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x36, 0xda, 0xac, 0xd5, 0x4b,
+ 0x57, 0x6a, 0x9b, 0x76, 0x5c, 0x95, 0x54, 0x2a, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfa,
+ 0xb7, 0xfb, 0xab, 0xb6, 0xea, 0xad, 0x62, 0x95, 0xa1, 0xf5, 0xa9, 0xad,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x03, 0x57, 0xaf, 0x6b, 0x72, 0x54, 0x99, 0xd5, 0x0e, 0xf3,
+ 0x5f, 0x15, 0x2a, 0xaa, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xea, 0xd4, 0xad, 0x5d, 0x35,
+ 0xb5, 0x34, 0xb2, 0xaa, 0x54, 0xba, 0xad, 0x55, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1f, 0x5e,
+ 0xaf, 0xed, 0xab, 0xea, 0xb3, 0xaa, 0x6a, 0xad, 0xd5, 0xd5, 0xaa, 0xab,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x2a, 0xb9, 0xf5, 0xbb, 0xb5, 0x55, 0x64, 0x57, 0x55, 0x6b,
+ 0x5d, 0xd0, 0x52, 0xac, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xab, 0x5b, 0xb6, 0x6d, 0x37,
+ 0x6f, 0xaa, 0x5b, 0xa2, 0xb5, 0x1f, 0xed, 0x73, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x65, 0xaa,
+ 0x6e, 0xfb, 0xd3, 0x5a, 0xd4, 0x54, 0xaa, 0x5e, 0xc3, 0xb5, 0x15, 0x56,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x07, 0xaf, 0x7f, 0xea, 0xb6, 0xaa, 0xd6, 0xad, 0xf1, 0xd2, 0xd5,
+ 0x56, 0x55, 0x6a, 0xd5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, 0xbd, 0xda, 0xaf, 0x75, 0x6f, 0x5a,
+ 0x45, 0x26, 0xb7, 0x5b, 0x20, 0xdd, 0x55, 0x2a, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3a, 0xeb, 0x7f,
+ 0xf5, 0x4d, 0x95, 0x76, 0xd9, 0x56, 0xb5, 0x52, 0x6d, 0x12, 0xad, 0xaa,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x57, 0x3f, 0xa4, 0x8f, 0xb9, 0x6e, 0xa2, 0x6f, 0x1d, 0x6a, 0xef,
+ 0xb5, 0x6d, 0xaa, 0xb5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x03, 0xfd, 0x75, 0x2f, 0x7c, 0x57, 0x88, 0xf6,
+ 0x80, 0xb5, 0x57, 0x5c, 0xd7, 0x55, 0x54, 0xae, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x39, 0xff, 0xf6,
+ 0xab, 0x7a, 0x57, 0x94, 0xef, 0x0d, 0xe4, 0xea, 0xa8, 0xaa, 0xab, 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x0c, 0xf7, 0xff, 0x6d, 0xb7, 0x4b, 0x39, 0x17, 0x2a, 0xfa, 0xb7, 0x56,
+ 0xb7, 0xaa, 0xed, 0xd9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x18, 0xbe, 0xfd, 0xda, 0x9e, 0xec, 0xe6, 0xd5,
+ 0x52, 0x2a, 0x58, 0xa9, 0x54, 0x5a, 0x97, 0xe7, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x72, 0xe9, 0xf6, 0xbe,
+ 0xd4, 0x5b, 0xad, 0x63, 0x61, 0xf7, 0xb7, 0xaf, 0x55, 0x52, 0x9f, 0xee,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02,
+ 0xc9, 0xbd, 0xfb, 0x6b, 0xeb, 0xf5, 0x6b, 0x2d, 0x57, 0x52, 0x94, 0xaa,
+ 0xb1, 0xab, 0x4a, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x02, 0xdb, 0xcb, 0xff, 0xf5, 0x3b, 0x55, 0xa0, 0x00,
+ 0x45, 0x6e, 0xb5, 0xb5, 0x65, 0x52, 0x00, 0x4c, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0d, 0x2e, 0x7d, 0xdb, 0xa5,
+ 0xca, 0xbe, 0x80, 0x00, 0x0a, 0x54, 0xaa, 0xa5, 0x45, 0x08, 0x09, 0x15,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38,
+ 0xd8, 0xd3, 0x1b, 0xae, 0xb7, 0xea, 0x00, 0x00, 0x00, 0x95, 0xaa, 0x56,
+ 0xdc, 0xe1, 0x21, 0x35, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x53, 0x41, 0xff, 0x77, 0xbd, 0xaa, 0x58, 0x00, 0x00,
+ 0x00, 0xdb, 0x75, 0xd4, 0xb2, 0xa4, 0x07, 0xea, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0xef, 0x07, 0xbd, 0x65, 0xeb,
+ 0xa2, 0xd0, 0x00, 0x00, 0x00, 0x25, 0x4b, 0x35, 0x56, 0x80, 0x77, 0x6a,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x28,
+ 0x01, 0x28, 0x9a, 0xb6, 0xd7, 0x60, 0x00, 0x00, 0x00, 0x0c, 0xaa, 0xaa,
+ 0xaa, 0x02, 0x07, 0x55, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x17, 0xe0, 0x17, 0xb1, 0xbf, 0xee, 0xb4, 0xc0, 0x00, 0x00,
+ 0x00, 0x08, 0xaa, 0xad, 0x68, 0x54, 0x04, 0x5a, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, 0x74, 0x87, 0x77, 0x72, 0x5a,
+ 0xab, 0x80, 0x00, 0x00, 0x00, 0x02, 0xd4, 0xb5, 0x52, 0x08, 0x5b, 0xd4,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x75, 0x00,
+ 0x1f, 0xd6, 0xef, 0xda, 0xd5, 0x00, 0x00, 0x00, 0x0c, 0x01, 0x52, 0xd5,
+ 0x40, 0xf1, 0x55, 0x15, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x9b, 0x00, 0x17, 0x4b, 0x92, 0xb7, 0xaf, 0x00, 0x00, 0x00,
+ 0x0e, 0x01, 0x4e, 0xaa, 0xae, 0x95, 0x55, 0x6d, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0xb8, 0x11, 0x2b, 0x13, 0x76, 0xef,
+ 0x54, 0x00, 0x00, 0x00, 0x0f, 0x00, 0x54, 0xaa, 0xaa, 0xb5, 0xad, 0x55,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x40, 0x00,
+ 0x7e, 0x7c, 0x65, 0xf4, 0x78, 0x00, 0x00, 0x00, 0x1f, 0x00, 0xab, 0x56,
+ 0xd5, 0x55, 0x59, 0x5a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x0a, 0x40, 0x42, 0x6d, 0xce, 0xe5, 0xae, 0x54, 0x00, 0x00, 0x00,
+ 0x18, 0x00, 0x6d, 0x75, 0x5d, 0x55, 0x4d, 0x52, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x2a, 0x82, 0x03, 0xdc, 0x54, 0xbf, 0x61,
+ 0xa8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x12, 0xad, 0xa2, 0xb5, 0x60, 0xad,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5a, 0x00, 0x16,
+ 0xd9, 0xb5, 0xa4, 0xc7, 0x6c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5a, 0xea,
+ 0xae, 0xd5, 0x57, 0xaa, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x54, 0x04, 0x0d, 0x76, 0xbb, 0x4b, 0xbc, 0x58, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x36, 0x95, 0xa9, 0x55, 0x54, 0xaa, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x01, 0x50, 0x08, 0x5b, 0xc5, 0x3d, 0x97, 0x0a,
+ 0xd0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0xb6, 0xab, 0x2b, 0x55, 0xab,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x24, 0x20, 0x3d,
+ 0x59, 0x7b, 0x76, 0x37, 0x58, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1d, 0x6d,
+ 0x75, 0xb5, 0x55, 0x54, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x12, 0xf0, 0x01, 0xff, 0x21, 0xa8, 0xc3, 0x74, 0xa8, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x1b, 0xa9, 0x4b, 0x55, 0x55, 0x2a, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x25, 0xc0, 0x41, 0xca, 0x9c, 0x77, 0x58, 0x9d,
+ 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0x15, 0x56, 0xb4, 0xad, 0xb2, 0x55,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4b, 0x80, 0x0f, 0xbe,
+ 0xc0, 0xf6, 0xd5, 0xb3, 0x50, 0x00, 0x00, 0x00, 0x00, 0x00, 0x15, 0x54,
+ 0xa5, 0xaa, 0x55, 0x54, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x17, 0x04, 0x87, 0xbc, 0x6a, 0x3b, 0xac, 0x9d, 0x58, 0x00, 0x00, 0x03,
+ 0xe0, 0x00, 0x16, 0xab, 0x55, 0x4a, 0xd6, 0xa5, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x01, 0x5c, 0x00, 0x1f, 0x54, 0xc9, 0x2a, 0xb7, 0xa6,
+ 0xd8, 0x0f, 0x00, 0x07, 0xf8, 0x00, 0x15, 0x6a, 0x55, 0x5a, 0xa4, 0xaa,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7a, 0x08, 0x74, 0xca,
+ 0x9c, 0x5a, 0xa8, 0xc5, 0x30, 0x1f, 0x80, 0x0f, 0xfc, 0x00, 0x0d, 0x55,
+ 0xaa, 0xa5, 0x55, 0x49, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a,
+ 0xf0, 0x20, 0xea, 0x5a, 0xb0, 0xe7, 0x95, 0x7d, 0x10, 0x3f, 0xc0, 0x1f,
+ 0xfe, 0x00, 0x0a, 0xaa, 0xaa, 0xad, 0x4a, 0x95, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x03, 0xf4, 0x02, 0x7d, 0xb5, 0x8f, 0x9c, 0xaa, 0xe9,
+ 0xa0, 0x3f, 0xc0, 0x1f, 0xfe, 0x00, 0x06, 0xb5, 0x54, 0xa9, 0x2a, 0x54,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x17, 0x40, 0x47, 0xeb, 0xab,
+ 0x75, 0x51, 0x55, 0x4d, 0xd8, 0x3f, 0xe0, 0x3f, 0x3f, 0x00, 0x0a, 0xaa,
+ 0xa9, 0x4a, 0xaa, 0xad, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x23,
+ 0xc0, 0x06, 0xff, 0xe5, 0xb6, 0xbb, 0xa9, 0x34, 0x48, 0x30, 0xe0, 0x3c,
+ 0x0f, 0x00, 0x0a, 0xaa, 0x8a, 0xaa, 0xaa, 0xa4, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0xaf, 0x80, 0x9f, 0xa1, 0x8d, 0xb5, 0xd6, 0x93, 0xcd,
+ 0x90, 0x62, 0x60, 0x3c, 0x27, 0x00, 0x06, 0xaa, 0xb5, 0xaa, 0xaa, 0xa9,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x9f, 0x92, 0x3f, 0xb5, 0x36,
+ 0x56, 0xb5, 0x9d, 0x55, 0x90, 0x62, 0x60, 0x38, 0x17, 0x80, 0x0d, 0x54,
+ 0xaa, 0x54, 0xaa, 0x9a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x7a,
+ 0x00, 0xba, 0xab, 0x73, 0xe7, 0xa2, 0xb6, 0x55, 0x2c, 0x61, 0x60, 0x38,
+ 0x17, 0x80, 0x09, 0x6b, 0x4a, 0xd5, 0x4a, 0x92, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x0d, 0xfe, 0x03, 0x6d, 0xea, 0x08, 0x51, 0x1d, 0x2b, 0x35,
+ 0x08, 0x61, 0x60, 0x38, 0x07, 0x80, 0x06, 0xda, 0xaa, 0xa9, 0x55, 0x55,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x27, 0xf8, 0x4e, 0xfb, 0x89, 0xde,
+ 0x35, 0xea, 0x6b, 0x72, 0x28, 0x60, 0x70, 0x38, 0x07, 0x80, 0x05, 0x52,
+ 0xaa, 0xaa, 0x95, 0x55, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0f, 0xf8,
+ 0x0d, 0x63, 0xbd, 0xe3, 0x57, 0x55, 0xb6, 0x49, 0xcc, 0x20, 0x7f, 0xf8,
+ 0x07, 0x80, 0x0a, 0xaa, 0xaa, 0xaa, 0xaa, 0xad, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x5d, 0xe1, 0x34, 0xc6, 0xab, 0xa7, 0x91, 0x77, 0x37, 0xcc,
+ 0x48, 0x30, 0x9f, 0x3c, 0x07, 0x80, 0x0a, 0xaa, 0xaa, 0xaa, 0xaa, 0x69,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x3b, 0xc0, 0x34, 0xdb, 0x4e, 0xf9,
+ 0xae, 0xd5, 0x54, 0xe1, 0xb4, 0x19, 0x3f, 0xfe, 0x0f, 0x00, 0x05, 0x55,
+ 0x55, 0x55, 0x52, 0x92, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x7f, 0x89,
+ 0xf1, 0xf7, 0xe5, 0x57, 0xea, 0x8d, 0x4c, 0xda, 0xac, 0x13, 0xff, 0xff,
+ 0x80, 0x00, 0x0a, 0xd5, 0xaa, 0xa4, 0x95, 0x54, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0xf7, 0x02, 0x53, 0x6c, 0x72, 0x10, 0xa3, 0x6a, 0x74, 0xea,
+ 0x34, 0x07, 0xff, 0xff, 0xe0, 0x00, 0x05, 0x4e, 0x54, 0x95, 0x55, 0x55,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x13, 0xfc, 0x97, 0xd6, 0x55, 0xb6, 0xab,
+ 0xb7, 0x45, 0x46, 0xad, 0xf4, 0x1f, 0xff, 0xff, 0xfe, 0x00, 0x05, 0x2a,
+ 0xd5, 0x54, 0x95, 0x51, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, 0xcf, 0x0e,
+ 0xfb, 0x2d, 0x12, 0xd4, 0xb8, 0xb6, 0xeb, 0x6a, 0x10, 0x3f, 0xff, 0xff,
+ 0xff, 0x00, 0x02, 0xda, 0x8a, 0xab, 0x6a, 0xaa, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x57, 0xbc, 0x3a, 0xc6, 0x0d, 0x76, 0xb1, 0x77, 0x15, 0x2a, 0x56,
+ 0x34, 0x7f, 0xff, 0xff, 0xff, 0x80, 0x02, 0xa5, 0x75, 0x2a, 0x52, 0xaa,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x02, 0x0f, 0xfc, 0xdf, 0x75, 0x9d, 0x5a, 0x67,
+ 0x15, 0x59, 0xb3, 0x6c, 0x4c, 0x7f, 0xff, 0xff, 0xff, 0x80, 0x05, 0x55,
+ 0x8a, 0xaa, 0xaa, 0xa4, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1f, 0xf9, 0xdb,
+ 0xed, 0x36, 0x5a, 0xeb, 0xad, 0x6b, 0x57, 0x5d, 0xa4, 0x7f, 0xff, 0xff,
+ 0xf3, 0x80, 0x05, 0x54, 0xb2, 0xaa, 0xaa, 0xa9, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x7f, 0xf4, 0x5a, 0xaa, 0xdb, 0x40, 0x20, 0xea, 0xaa, 0xa8, 0x19,
+ 0xe8, 0x1f, 0xff, 0xff, 0xe7, 0x00, 0x02, 0x55, 0x4b, 0xd5, 0x55, 0x4a,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x05, 0xff, 0xf2, 0xb5, 0x7d, 0x56, 0xaa, 0x76,
+ 0xa5, 0xce, 0xb4, 0xd1, 0x2c, 0x0f, 0xff, 0xff, 0x0f, 0x00, 0x02, 0xa5,
+ 0x5a, 0x2a, 0x55, 0x59, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0xff, 0xc1, 0x31,
+ 0x9a, 0x8d, 0x61, 0x2e, 0xb2, 0xb5, 0x57, 0x59, 0x88, 0x07, 0xff, 0xf8,
+ 0x7e, 0x06, 0x00, 0xaa, 0x65, 0xd2, 0xaa, 0xaa, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x1f, 0x7e, 0x02, 0x55, 0x67, 0xb5, 0x5b, 0x05, 0xfa, 0xab, 0x0a, 0x8d,
+ 0xe4, 0x03, 0xff, 0xc3, 0xfe, 0x07, 0x01, 0xaa, 0xaa, 0x26, 0xaa, 0xa9,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x16, 0x7d, 0x10, 0x57, 0x9d, 0x4a, 0x8a, 0xd7,
+ 0x95, 0x5a, 0xd5, 0x4d, 0x58, 0x00, 0xfc, 0x1f, 0xe6, 0x03, 0xc1, 0x55,
+ 0x52, 0xd5, 0x55, 0x2a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x26, 0xe1, 0x42, 0xa4,
+ 0x7d, 0xd7, 0xba, 0xb0, 0x24, 0xd5, 0x97, 0xc5, 0xd2, 0x00, 0x00, 0x7f,
+ 0x87, 0x03, 0xe0, 0x42, 0xaa, 0x95, 0x55, 0x4b, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x0d, 0x42, 0x17, 0xfb, 0x8c, 0xac, 0x46, 0xae, 0xdd, 0xb3, 0x68, 0x92,
+ 0x6c, 0x03, 0x03, 0xfe, 0x0f, 0x01, 0xe0, 0x5a, 0x55, 0x62, 0xaa, 0x54,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x01, 0x3d, 0x0a, 0x32, 0xd5, 0x7b, 0xc9, 0xde, 0xad,
+ 0x91, 0x4a, 0xaa, 0xc6, 0x68, 0x21, 0xff, 0xf8, 0x7f, 0x00, 0x60, 0x2a,
+ 0xa9, 0x2e, 0xa9, 0x55, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x22, 0x64, 0xb3, 0x18,
+ 0x8e, 0xae, 0xb4, 0x46, 0x9a, 0xbb, 0x54, 0xd5, 0xb4, 0x30, 0xff, 0xe0,
+ 0xff, 0x80, 0x00, 0x52, 0xa5, 0x51, 0x55, 0x4a, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05,
+ 0x82, 0xc1, 0x52, 0xb3, 0xcb, 0xc5, 0xd4, 0xda, 0xd3, 0x4a, 0x68, 0x87,
+ 0x68, 0x30, 0x3f, 0x03, 0xff, 0x80, 0x00, 0x2a, 0xaa, 0x55, 0x45, 0x55,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x01, 0x5b, 0xc6, 0xa9, 0x19, 0x74, 0x3d, 0xa9, 0x92,
+ 0xab, 0x5b, 0x95, 0xb0, 0x28, 0x78, 0x00, 0x07, 0xff, 0xc0, 0x00, 0x15,
+ 0x2a, 0x95, 0x2a, 0x2a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x10, 0x0f, 0x6b, 0xa2,
+ 0xb7, 0x22, 0x61, 0x2b, 0x52, 0xaa, 0x33, 0x1d, 0xa0, 0x7c, 0x00, 0x0f,
+ 0xff, 0xe0, 0x00, 0x14, 0xa4, 0xaa, 0xa9, 0x52, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09,
+ 0x00, 0x23, 0x27, 0x64, 0x8d, 0xfb, 0x64, 0xa7, 0x5a, 0xaa, 0xd3, 0x5a,
+ 0xa0, 0x7c, 0x00, 0x3f, 0xff, 0xe0, 0x00, 0x05, 0x29, 0x52, 0x45, 0x55,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x02, 0x4a, 0x5f, 0x66, 0x6a, 0xab, 0x22, 0xaf, 0xec,
+ 0xa9, 0x55, 0x17, 0x17, 0xa0, 0xfe, 0x00, 0x7f, 0xff, 0xf0, 0x00, 0x02,
+ 0xaa, 0x4a, 0xaa, 0x94, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x11, 0x8d, 0x83, 0x49, 0xad,
+ 0x9a, 0x57, 0x50, 0x6b, 0x6b, 0x5a, 0xd1, 0x1a, 0x80, 0xff, 0x00, 0xff,
+ 0xff, 0xf0, 0x00, 0x05, 0x52, 0x95, 0x29, 0x55, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8c,
+ 0x1b, 0x4b, 0xcb, 0x45, 0x7b, 0x4e, 0x4b, 0x5d, 0x2a, 0xd5, 0x16, 0x29,
+ 0x41, 0xff, 0x87, 0xff, 0xff, 0xf8, 0x00, 0x01, 0x2a, 0xa5, 0x4a, 0x52,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x20, 0x52, 0x98, 0x9d, 0x0e, 0x95, 0x65, 0x38, 0x95,
+ 0x55, 0x6a, 0xab, 0x35, 0x81, 0xff, 0xff, 0xff, 0xff, 0xf8, 0x00, 0x01,
+ 0xa9, 0x14, 0xa9, 0x4a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x80, 0xe3, 0x2b, 0x74, 0x4d,
+ 0xb5, 0x55, 0xd5, 0x95, 0x69, 0x4a, 0xac, 0x6e, 0x03, 0xff, 0xff, 0xff,
+ 0xff, 0xfc, 0x00, 0x01, 0x25, 0x65, 0x2a, 0x95, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
+ 0x9a, 0xb7, 0x0b, 0xb0, 0x6f, 0xfc, 0xfd, 0x6a, 0x8b, 0xd5, 0x4e, 0xa8,
+ 0x03, 0xff, 0xff, 0xff, 0xff, 0xfc, 0x00, 0x01, 0x49, 0x14, 0xa5, 0x6a,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x08, 0x02, 0x94, 0xab, 0x3d, 0xc9, 0x6b, 0xb5, 0x43, 0xee,
+ 0xb6, 0x95, 0x54, 0x9a, 0x07, 0xff, 0xff, 0xff, 0xff, 0xfc, 0x00, 0x00,
+ 0xaa, 0x54, 0xaa, 0x4a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x42, 0x13, 0x9f, 0x68, 0x5b,
+ 0xff, 0xfa, 0xd7, 0xfa, 0xa5, 0x6a, 0xac, 0xd4, 0x07, 0xff, 0xff, 0xff,
+ 0xff, 0xfe, 0x00, 0x00, 0x55, 0x53, 0x2a, 0xaa, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x04,
+ 0x65, 0x3c, 0x8b, 0x93, 0x87, 0x76, 0x75, 0xf5, 0x57, 0x55, 0x5c, 0xa8,
+ 0x0f, 0xff, 0xff, 0xff, 0xff, 0xfe, 0x00, 0x00, 0x0b, 0x54, 0x95, 0x54,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x2a, 0x7b, 0xfd, 0x75, 0xba, 0xae, 0xfe, 0x44, 0x5a,
+ 0x8a, 0xca, 0x9c, 0xf0, 0x0f, 0xff, 0xff, 0xff, 0xff, 0xfe, 0x00, 0x00,
+ 0x14, 0x4a, 0xaa, 0xaa, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0xd8, 0xa2, 0xbd, 0x59, 0x52,
+ 0x8e, 0xec, 0x48, 0xf1, 0x2d, 0x52, 0x9c, 0xa0, 0x0f, 0xff, 0xff, 0xff,
+ 0xff, 0xfe, 0x02, 0x00, 0x02, 0x95, 0x54, 0x92, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0b, 0x35,
+ 0x5f, 0xed, 0x6a, 0xf2, 0x95, 0x19, 0x09, 0xaa, 0x6a, 0x5d, 0x48, 0xc0,
+ 0x0f, 0xff, 0xff, 0xff, 0xff, 0xff, 0x01, 0x00, 0x02, 0xaa, 0x52, 0xaa,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x04, 0x22, 0xa6, 0xc3, 0xdf, 0xd0, 0x66, 0x29, 0xf0, 0x01, 0x6a,
+ 0xca, 0xd5, 0x59, 0x80, 0x07, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x80,
+ 0x05, 0x2a, 0xaa, 0x52, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0d, 0xa4, 0x03, 0xf2, 0x61, 0xed,
+ 0x17, 0xe2, 0x02, 0xaa, 0x99, 0x2a, 0x98, 0x80, 0x07, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0x80, 0x40, 0x02, 0x55, 0x55, 0x2a, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x09, 0x05,
+ 0x88, 0xa7, 0x47, 0xdc, 0xc0, 0x00, 0x2a, 0x6b, 0x4a, 0x6a, 0xb0, 0x00,
+ 0x07, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc0, 0x20, 0x01, 0x55, 0x55, 0x55,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x80, 0x2e, 0x89, 0x11, 0xfb, 0x87, 0x9c, 0x90, 0x64, 0xaa, 0x9a,
+ 0x4a, 0xa9, 0x1a, 0x00, 0x0f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc0, 0x30,
+ 0x00, 0xa8, 0x42, 0xaa, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x2b, 0x16, 0x2b, 0x85, 0x09, 0xf8,
+ 0x83, 0x45, 0x5d, 0xad, 0x95, 0x49, 0x5c, 0x08, 0x1f, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xe0, 0x10, 0x00, 0xae, 0xea, 0x4a, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x49, 0x28,
+ 0x3b, 0xf5, 0xbd, 0x30, 0x14, 0x96, 0x95, 0x6a, 0xd2, 0xb5, 0xaa, 0x10,
+ 0x1f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe0, 0x18, 0x00, 0xa9, 0x15, 0x55,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x02, 0x19, 0x26, 0x58, 0x76, 0x4a, 0x1e, 0xe1, 0x7a, 0xba, 0xb1, 0x5b,
+ 0x4a, 0xab, 0x2c, 0x10, 0x3f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0, 0x08,
+ 0x00, 0x2b, 0x6a, 0xa8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x90, 0x11, 0x21, 0x4d, 0x78, 0x6d, 0x20,
+ 0x0a, 0x84, 0xda, 0xaf, 0x55, 0x56, 0x52, 0x20, 0x3f, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xf0, 0x0c, 0x00, 0x54, 0xaa, 0xa5, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x22, 0xe8, 0x62,
+ 0xcd, 0x49, 0xd8, 0x42, 0xb4, 0xb8, 0x96, 0x3a, 0xa5, 0x54, 0xd4, 0x00,
+ 0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf8, 0x04, 0x00, 0x14, 0xaa, 0x4a,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x05, 0x2d, 0x18, 0x4e, 0x0e, 0x61, 0xb0, 0x0a, 0xab, 0x6b, 0xaa, 0x37,
+ 0x0d, 0x5a, 0xa8, 0x00, 0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf8, 0x06,
+ 0x00, 0x2b, 0x4a, 0xaa, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x10, 0x40, 0xb1, 0xd8, 0x5f, 0xc1, 0x90, 0x55,
+ 0x55, 0x4a, 0x32, 0xed, 0x79, 0x49, 0x50, 0x00, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xf8, 0x06, 0x00, 0x12, 0xaa, 0xa9, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x16, 0x00, 0xd1,
+ 0xac, 0x89, 0x91, 0x4a, 0xaa, 0xaa, 0xa9, 0xb2, 0xc5, 0x65, 0x68, 0x00,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0x03, 0x00, 0x15, 0x2a, 0x4a,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02,
+ 0x64, 0x54, 0x81, 0xcf, 0x89, 0x00, 0x1f, 0xf2, 0xaa, 0xd5, 0x6b, 0x35,
+ 0x5a, 0xad, 0xa8, 0x01, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0x03,
+ 0x00, 0x05, 0x6a, 0x92, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x89, 0x23, 0xf6, 0x52, 0x28, 0x19, 0xd5,
+ 0x45, 0x55, 0x49, 0x54, 0xa5, 0x6a, 0xa0, 0x01, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xfc, 0x03, 0x00, 0x15, 0x24, 0xaa, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x09, 0x18, 0x04, 0x6c,
+ 0x64, 0x61, 0x21, 0x95, 0xad, 0x56, 0xaa, 0xd7, 0xab, 0x55, 0x50, 0x03,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0x01, 0x80, 0x04, 0xaa, 0x94,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02,
+ 0x12, 0x46, 0x20, 0xdc, 0xc4, 0x01, 0x01, 0xaa, 0xaa, 0xa9, 0x5b, 0x49,
+ 0x2a, 0xad, 0xa0, 0x03, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0x01,
+ 0x80, 0x02, 0xa4, 0xa5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x10, 0x44, 0x08, 0x00, 0xd1, 0x84, 0x20, 0x05, 0xaa,
+ 0xaa, 0x95, 0x66, 0xaa, 0xaa, 0xaa, 0xa0, 0x03, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xfe, 0x01, 0x80, 0x05, 0x55, 0x29, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x0c, 0xa8, 0x89, 0x99,
+ 0x00, 0x22, 0x03, 0xd5, 0x4a, 0xaa, 0xa7, 0x4d, 0x5a, 0xd5, 0x40, 0x07,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0x01, 0x80, 0x02, 0x49, 0x4a,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x48,
+ 0x18, 0x02, 0x03, 0x24, 0x10, 0xa8, 0x2f, 0x6a, 0xaa, 0xaa, 0x45, 0x51,
+ 0x4a, 0xb5, 0x40, 0x07, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0x00,
+ 0x80, 0x04, 0x95, 0x2a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x02, 0x01, 0x05, 0x48, 0x07, 0xae, 0x83, 0x02, 0x8c, 0x2a,
+ 0x95, 0x52, 0xcd, 0x56, 0xad, 0x56, 0xc0, 0x07, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xfe, 0x00, 0xc0, 0x01, 0x52, 0x52, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x12, 0x58, 0x98, 0x2f, 0x30,
+ 0x02, 0x82, 0x01, 0x6a, 0xaa, 0xaa, 0x8d, 0x29, 0x35, 0xb5, 0x04, 0x07,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0x00, 0xc0, 0x02, 0x94, 0x95,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08,
+ 0x45, 0x30, 0x1f, 0xa0, 0x25, 0x1d, 0x15, 0x4a, 0xa5, 0x51, 0xad, 0x55,
+ 0x6d, 0x6a, 0x84, 0x0f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0x00,
+ 0xc0, 0x01, 0x29, 0x52, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x08, 0xa1, 0x2d, 0x21, 0x7f, 0x40, 0x4d, 0xb0, 0x35, 0x55,
+ 0x15, 0x15, 0x79, 0x25, 0x4a, 0xca, 0x04, 0x0f, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0x00, 0xc0, 0x02, 0x52, 0x96, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x80, 0x40, 0x7f, 0x24,
+ 0x8a, 0x58, 0xad, 0x55, 0x55, 0x52, 0x0a, 0x5a, 0xaa, 0xbb, 0x04, 0x0f,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0xc0, 0x00, 0xa5, 0x20,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x41, 0x09,
+ 0x50, 0x42, 0xfd, 0xc0, 0x59, 0x53, 0x53, 0x4c, 0xaa, 0x54, 0x5a, 0xa2,
+ 0xad, 0xaa, 0x04, 0x0f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
+ 0xc0, 0x01, 0x4d, 0x55, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x08, 0x02, 0x15, 0x90, 0x7d, 0x15, 0x76, 0xac, 0xad, 0x52,
+ 0x95, 0x55, 0xaa, 0x4c, 0xa5, 0x54, 0x04, 0x0f, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0x00, 0xc0, 0x00, 0xa5, 0x12, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x0c, 0x83, 0x01, 0xff, 0xf9,
+ 0x15, 0x29, 0x51, 0x55, 0x52, 0x2c, 0xa2, 0x54, 0xab, 0x6c, 0x04, 0x0f,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x80, 0x01, 0x28, 0x54,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x98,
+ 0x06, 0x01, 0xff, 0xe9, 0x55, 0xd5, 0x56, 0xa5, 0x55, 0xaa, 0xad, 0x51,
+ 0x35, 0x54, 0x04, 0x0f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
+ 0x80, 0x00, 0x4a, 0xa9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x01, 0x08, 0x31, 0x1c, 0x12, 0xf4, 0xc5, 0x44, 0x5a, 0xaa, 0x49,
+ 0xaa, 0xa9, 0xa8, 0x95, 0x55, 0x50, 0x04, 0x0f, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0x01, 0x80, 0x01, 0x52, 0x82, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x40, 0x48, 0x1c, 0x41, 0x3e, 0x2e,
+ 0x8b, 0x54, 0xaa, 0xab, 0x52, 0xa5, 0x45, 0x24, 0x95, 0x50, 0x06, 0x0f,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x01, 0x00, 0x01, 0x24, 0x5a,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc1, 0x00,
+ 0x38, 0x01, 0x1d, 0x64, 0x9a, 0xa9, 0x2a, 0x94, 0xaa, 0xab, 0x55, 0x55,
+ 0xaa, 0xd8, 0x02, 0x0f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x03,
+ 0x00, 0x01, 0x4a, 0x92, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x13, 0xc8, 0x00, 0xb0, 0x02, 0x3e, 0x96, 0x35, 0x6a, 0xaa, 0x55,
+ 0x4a, 0x95, 0x4a, 0x54, 0x55, 0x28, 0x03, 0x0f, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0x8f, 0xc0, 0x01, 0x29, 0x4a, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x83, 0x49, 0x20, 0x8c, 0x78, 0xb5,
+ 0x65, 0x4a, 0xaa, 0xa6, 0xaa, 0xa5, 0x29, 0x45, 0xaa, 0xa8, 0x03, 0x0f,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf8, 0x70, 0x01, 0x45, 0x28,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x25, 0x03, 0x20,
+ 0x00, 0x30, 0x75, 0x4c, 0x95, 0x55, 0x54, 0xa9, 0x52, 0x95, 0x52, 0x95,
+ 0x2a, 0xa8, 0x01, 0x8f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x80,
+ 0x18, 0x00, 0x94, 0xa5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x0d, 0x0c, 0x0c, 0x44, 0x18, 0xe5, 0xb9, 0xb5, 0x54, 0x92, 0x96,
+ 0xaa, 0x55, 0x4a, 0x69, 0x55, 0x20, 0x01, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0x00, 0x00, 0x01, 0x29, 0x29, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x01, 0x2e, 0x08, 0x1c, 0x01, 0x7b, 0xdc, 0x96,
+ 0x4a, 0xaa, 0xaa, 0xa9, 0x4a, 0xaa, 0x95, 0x0a, 0x73, 0x50, 0x00, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0x00, 0x00, 0x01, 0x52, 0x52,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1c, 0x20, 0x30,
+ 0x44, 0x79, 0x88, 0xd4, 0x95, 0x49, 0x54, 0xab, 0x52, 0x94, 0xa4, 0xaa,
+ 0xc6, 0x51, 0xe0, 0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0x00,
+ 0x00, 0x00, 0x8a, 0x8a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x38, 0x2a, 0x21, 0x01, 0xfb, 0xa0, 0x7d, 0x54, 0xb5, 0x55, 0x54,
+ 0x95, 0x55, 0x29, 0x55, 0x29, 0x57, 0xf8, 0x3f, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xfe, 0x00, 0x00, 0x02, 0x50, 0x52, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x02, 0x78, 0x90, 0x00, 0x0d, 0xfb, 0xca, 0xd2,
+ 0xaa, 0x92, 0x49, 0x55, 0x55, 0x2a, 0x4a, 0x4a, 0xd3, 0x57, 0xfc, 0x1f,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe0, 0x00, 0x00, 0x02, 0x95, 0x54,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x39, 0x20, 0xa4,
+ 0x13, 0xfd, 0xab, 0x2a, 0x52, 0xa5, 0x52, 0x55, 0x52, 0xa9, 0x55, 0x55,
+ 0x2a, 0x57, 0xff, 0x0f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc6, 0x00,
+ 0x00, 0x0d, 0x24, 0x89, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x08, 0x30, 0xc0, 0x00, 0x77, 0xb5, 0x55, 0x69, 0x4a, 0xaa, 0x96, 0xaa,
+ 0x94, 0xaa, 0x91, 0x45, 0x55, 0x4f, 0xff, 0x03, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xcf, 0x00, 0x00, 0x1e, 0x55, 0x2a, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x01, 0x30, 0x81, 0x00, 0x67, 0xb4, 0x74, 0x52,
+ 0x55, 0x55, 0x2a, 0xaa, 0xaa, 0x52, 0x55, 0x35, 0x52, 0xaf, 0xff, 0x81,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xcf, 0x00, 0x00, 0x1e, 0xa0, 0x48,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0xe3, 0x00, 0x08,
+ 0x86, 0x4a, 0xd4, 0xd5, 0x54, 0x91, 0x49, 0x55, 0x51, 0x54, 0x95, 0x49,
+ 0x56, 0x4f, 0xff, 0xc0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xdf, 0x80,
+ 0x00, 0x3e, 0x0b, 0x53, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x05, 0xc4, 0x02, 0x00, 0x7f, 0xbd, 0x8a, 0x25, 0x49, 0x55, 0x2a, 0xb5,
+ 0x55, 0x52, 0xa4, 0x96, 0xac, 0x8f, 0xff, 0xe0, 0x7f, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0x9f, 0x80, 0x00, 0x7e, 0x54, 0xa4, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x43, 0x80, 0x20, 0x21, 0x4d, 0x3f, 0x52, 0xb4,
+ 0x55, 0x2a, 0x55, 0x55, 0x4a, 0xa4, 0x95, 0x32, 0xa5, 0x4f, 0xff, 0xe0,
+ 0x3f, 0xff, 0xff, 0xff, 0xff, 0xff, 0x9f, 0xc0, 0x00, 0xfe, 0x49, 0x14,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0xe8, 0x04, 0x08,
+ 0x1a, 0x46, 0xd4, 0x49, 0xaa, 0xa4, 0xaa, 0xaa, 0x54, 0x95, 0x2a, 0xc4,
+ 0x94, 0x8f, 0xff, 0xf0, 0x0f, 0xff, 0xff, 0xff, 0xff, 0xff, 0x9f, 0xe0,
+ 0x01, 0xfe, 0x92, 0x49, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x8e, 0x40, 0x80, 0x00, 0x74, 0x44, 0x51, 0x55, 0x64, 0xa9, 0x49, 0x55,
+ 0x49, 0x51, 0x52, 0x5a, 0x2a, 0x1f, 0xff, 0xf8, 0x07, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0x9f, 0xf0, 0x07, 0xfe, 0x24, 0x52, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x0d, 0x90, 0x00, 0x41, 0xeb, 0x12, 0xab, 0x52,
+ 0xa9, 0x4a, 0xaa, 0xa5, 0x52, 0x95, 0x2a, 0xa2, 0xa8, 0x1f, 0xff, 0xf8,
+ 0x03, 0xff, 0xff, 0xff, 0xff, 0xff, 0x9f, 0xf8, 0x1f, 0xfe, 0x52, 0x8a,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x3b, 0x81, 0x08, 0x05,
+ 0xe3, 0x15, 0xa5, 0x36, 0x95, 0x4a, 0x4a, 0xaa, 0x45, 0x2a, 0xa2, 0xac,
+ 0x00, 0x3f, 0xff, 0xfc, 0x01, 0xff, 0xff, 0xff, 0xff, 0xff, 0x9f, 0xff,
+ 0xff, 0xfe, 0x04, 0xa8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xb2, 0x00, 0x00, 0x80, 0xe8, 0x52, 0xac, 0xa8, 0xa4, 0x92, 0xa9, 0x4a,
+ 0x95, 0x51, 0x2d, 0x50, 0xe1, 0xff, 0xff, 0xfe, 0x00, 0x7f, 0xff, 0xff,
+ 0xff, 0xff, 0x9f, 0xff, 0xff, 0xfe, 0x29, 0x05, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x04, 0x00, 0x35, 0x10, 0x09, 0xa9, 0x4a, 0xaa, 0xaa,
+ 0xa9, 0x54, 0x95, 0x2a, 0xa4, 0x8a, 0xa9, 0x43, 0xff, 0xff, 0xff, 0xfe,
+ 0x00, 0x3f, 0xff, 0xff, 0xff, 0xff, 0x9f, 0xff, 0xff, 0xfe, 0x12, 0xa9,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2e, 0x01, 0x03,
+ 0x34, 0xa5, 0x19, 0x25, 0x55, 0x55, 0x55, 0x55, 0x15, 0x55, 0x45, 0x53,
+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x1f, 0xff, 0xff, 0xff, 0xff, 0x9f, 0xff,
+ 0xff, 0xfe, 0x04, 0x92, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10,
+ 0x49, 0x3c, 0x00, 0x34, 0x75, 0x4a, 0xa2, 0xaa, 0x92, 0x92, 0x92, 0x54,
+ 0x52, 0x54, 0x99, 0x53, 0xff, 0xff, 0xff, 0xff, 0x80, 0x0f, 0xff, 0xff,
+ 0xff, 0xff, 0x9f, 0xff, 0xff, 0xff, 0x14, 0x94, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x02, 0x00, 0x78, 0x12, 0xfc, 0xaa, 0x92, 0x2d, 0x54,
+ 0xa4, 0xa5, 0x2a, 0x92, 0x94, 0x92, 0xa2, 0xa3, 0xff, 0xff, 0xff, 0xff,
+ 0x80, 0x0f, 0xff, 0xff, 0xff, 0xff, 0x9f, 0xff, 0xff, 0xff, 0x01, 0x51,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x16, 0xf0, 0x80, 0x71,
+ 0xc5, 0x2a, 0x65, 0x25, 0x55, 0x4a, 0x52, 0xaa, 0x51, 0x52, 0x95, 0x21,
+ 0xff, 0xff, 0xff, 0xff, 0xc0, 0x07, 0xff, 0xff, 0xff, 0xff, 0x8f, 0xff,
+ 0xff, 0xff, 0x81, 0x12, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04,
+ 0x8e, 0xa0, 0x00, 0xb3, 0x94, 0xa4, 0xa9, 0x55, 0x45, 0x54, 0xa5, 0x25,
+ 0x25, 0x2a, 0x65, 0x41, 0xff, 0xff, 0xff, 0xff, 0xe0, 0x07, 0xff, 0xff,
+ 0xff, 0xff, 0x8f, 0xff, 0xff, 0xff, 0x80, 0x4a, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0xa0, 0x0f, 0x42, 0x13, 0x46, 0x94, 0x94, 0xa5, 0x4a,
+ 0x54, 0x92, 0x88, 0xa8, 0x49, 0x55, 0x4a, 0x51, 0xff, 0xff, 0xff, 0xff,
+ 0xe0, 0x07, 0xff, 0xff, 0xff, 0xff, 0x8f, 0xff, 0xff, 0xff, 0xe0, 0x52,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4c, 0x2d, 0x00, 0x87, 0x56,
+ 0x4b, 0x2a, 0x99, 0x55, 0x4a, 0xaa, 0x55, 0x4a, 0x92, 0xa2, 0x54, 0xa8,
+ 0xff, 0xff, 0xff, 0xff, 0xf0, 0x07, 0xff, 0xff, 0xff, 0xfd, 0x8f, 0xff,
+ 0xff, 0xff, 0xfe, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x88,
+ 0x18, 0x04, 0x1f, 0x11, 0x51, 0x51, 0x0b, 0x54, 0x94, 0xa5, 0x52, 0x92,
+ 0x55, 0x54, 0x95, 0x20, 0xff, 0xff, 0xff, 0xff, 0xf8, 0x0f, 0xff, 0xff,
+ 0xff, 0xf9, 0x8f, 0xff, 0xff, 0xff, 0xff, 0x89, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x11, 0xe0, 0x38, 0x01, 0x3e, 0x4d, 0x0a, 0x55, 0x5a, 0x55,
+ 0x55, 0x4a, 0x25, 0x24, 0x81, 0x25, 0x29, 0x48, 0xff, 0xff, 0xff, 0xff,
+ 0xf8, 0x1f, 0xff, 0xff, 0xff, 0xf0, 0x8f, 0xff, 0xff, 0xff, 0xff, 0xc2,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x80, 0xf0, 0x90, 0x7e, 0xd4,
+ 0x68, 0x92, 0xaa, 0x54, 0x92, 0x92, 0xa9, 0x55, 0x2e, 0xaa, 0xa2, 0x58,
+ 0x7f, 0xff, 0xff, 0xff, 0xfc, 0x3f, 0xff, 0xff, 0xff, 0xe0, 0x8f, 0xff,
+ 0xff, 0xff, 0xff, 0xe4, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0f, 0x04,
+ 0x40, 0x00, 0x49, 0xe2, 0x8a, 0xa9, 0x54, 0x95, 0x2a, 0x54, 0x4a, 0x48,
+ 0x91, 0x51, 0x2a, 0xc8, 0x7f, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff,
+ 0xff, 0xc0, 0x8f, 0xff, 0xff, 0xff, 0xff, 0xe2, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x2e, 0x00, 0x00, 0x03, 0x79, 0x4a, 0xa9, 0x45, 0x49, 0x54,
+ 0xa4, 0x89, 0x49, 0x52, 0x8a, 0x85, 0x4a, 0x98, 0x7f, 0xff, 0xff, 0xff,
+ 0xfe, 0x7f, 0xff, 0xff, 0xff, 0x80, 0x8f, 0xff, 0xff, 0xff, 0xff, 0xc4,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1c, 0x48, 0x44, 0x91, 0x65, 0x55,
+ 0x4a, 0x54, 0x96, 0x85, 0x55, 0x52, 0xaa, 0x8a, 0x1a, 0xaa, 0x59, 0x30,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xfe, 0x01, 0x0f, 0xff,
+ 0xff, 0xff, 0xff, 0x82, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x20, 0x00,
+ 0x00, 0x02, 0xc3, 0x50, 0x29, 0x49, 0x2d, 0x35, 0x24, 0x54, 0x89, 0x32,
+ 0x52, 0x91, 0x49, 0x40, 0xff, 0xff, 0xff, 0xff, 0xff, 0x30, 0xff, 0xff,
+ 0xfc, 0x01, 0x0f, 0xff, 0xff, 0xff, 0xff, 0x04, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x20, 0x08, 0x80, 0x00, 0x87, 0x4b, 0x42, 0x95, 0x2a, 0x4a,
+ 0x4a, 0x89, 0x52, 0x44, 0x45, 0x25, 0x55, 0x10, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0x98, 0x7f, 0xff, 0xf0, 0x01, 0x0f, 0xff, 0xff, 0xff, 0xfc, 0x09,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x40, 0x40, 0x11, 0x15, 0x6a, 0x92,
+ 0x4d, 0x29, 0x4a, 0xaa, 0xa4, 0xaa, 0x55, 0x54, 0x9a, 0x74, 0x44, 0xb1,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0x8c, 0x1f, 0xff, 0x00, 0x01, 0x0f, 0xff,
+ 0xff, 0xff, 0xf0, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00,
+ 0x80, 0x01, 0x48, 0x54, 0x94, 0xa6, 0xa9, 0x52, 0x45, 0x24, 0x84, 0x92,
+ 0xa4, 0x85, 0x2a, 0x43, 0xff, 0xff, 0xff, 0xff, 0xff, 0x84, 0x00, 0x00,
+ 0x00, 0x01, 0x1f, 0xff, 0xff, 0xff, 0xc0, 0x92, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x11, 0x00, 0x09, 0x02, 0x03, 0x53, 0x15, 0x2a, 0x49, 0x4a, 0xa5,
+ 0x49, 0x29, 0x29, 0x24, 0x2a, 0xd5, 0x54, 0xa3, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xc6, 0x00, 0x00, 0x00, 0x03, 0x1f, 0xff, 0xff, 0xfe, 0x01, 0x24,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x24, 0x02, 0x20, 0x26, 0xd4, 0xa2,
+ 0x49, 0xb5, 0x15, 0x4a, 0x52, 0xa9, 0x4a, 0x92, 0xa5, 0x12, 0x52, 0xa3,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xc6, 0x00, 0x00, 0x00, 0x03, 0x1f, 0xff,
+ 0xff, 0xf0, 0x14, 0x92, 0x00, 0x00, 0x00, 0x00, 0x00, 0x84, 0x00, 0x44,
+ 0x00, 0x0c, 0xc4, 0xa4, 0xa8, 0x25, 0x2a, 0xaa, 0x84, 0x45, 0x28, 0xa4,
+ 0xa8, 0xa5, 0x4a, 0x43, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc2, 0x00, 0x00,
+ 0x00, 0x06, 0x1f, 0xff, 0xff, 0xc0, 0x25, 0x24, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x10, 0x80, 0x12, 0x01, 0x11, 0x12, 0x8a, 0x95, 0x49, 0x55, 0x49,
+ 0x2a, 0xaa, 0x45, 0x0a, 0x4a, 0x94, 0xaa, 0x90, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xc2, 0x00, 0x00, 0x00, 0x06, 0x1f, 0xff, 0xff, 0x02, 0x48, 0x92,
+ 0x00, 0x00, 0x00, 0x00, 0x02, 0x01, 0x08, 0x00, 0x22, 0x01, 0x24, 0x95,
+ 0x21, 0x55, 0x55, 0x2a, 0x54, 0x92, 0x94, 0xa8, 0xa9, 0x2a, 0x92, 0xa8,
+ 0x1f, 0xff, 0xff, 0xff, 0xff, 0x82, 0x00, 0x00, 0x00, 0x06, 0x0f, 0xff,
+ 0xfe, 0x02, 0x49, 0x24, 0x00, 0x00, 0x00, 0x00, 0x00, 0x60, 0x01, 0x40,
+ 0x00, 0xc4, 0x89, 0x11, 0x54, 0x95, 0x29, 0x49, 0x49, 0x24, 0x29, 0x12,
+ 0x92, 0xa5, 0x55, 0x28, 0x01, 0xff, 0xff, 0xff, 0xff, 0x82, 0x00, 0x00,
+ 0x00, 0x02, 0x0f, 0xff, 0xfc, 0x14, 0x92, 0x49, 0x00, 0x00, 0x00, 0x00,
+ 0x08, 0x42, 0x06, 0x44, 0x45, 0x08, 0x92, 0x24, 0x95, 0xaa, 0xa5, 0x52,
+ 0x92, 0x55, 0x42, 0x52, 0xaa, 0x4a, 0x49, 0x4a, 0x80, 0x0f, 0xff, 0xff,
+ 0xff, 0x84, 0x00, 0x00, 0x00, 0x00, 0x0f, 0xff, 0xf0, 0x04, 0x92, 0x49,
+ 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x2a, 0x80, 0x01, 0x95, 0x2a, 0x8a,
+ 0xa0, 0x95, 0x2a, 0x95, 0x54, 0xa4, 0x54, 0x84, 0x44, 0x95, 0x55, 0x29,
+ 0x54, 0x00, 0x7f, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, 0xff,
+ 0xe0, 0x29, 0x24, 0x92, 0x00, 0x00, 0x00, 0x00, 0x22, 0x04, 0x03, 0x03,
+ 0x42, 0x12, 0x52, 0x2a, 0x15, 0x55, 0x54, 0xa8, 0x92, 0xa9, 0x49, 0x29,
+ 0x54, 0xaa, 0x92, 0x52, 0xa5, 0x40, 0x07, 0xff, 0xff, 0x00, 0x15, 0x50,
+ 0x92, 0x44, 0x07, 0xff, 0xc0, 0x52, 0x54, 0xaa, 0x00, 0x00, 0x00, 0x00,
+ 0x84, 0x02, 0x86, 0x03, 0x08, 0x41, 0x48, 0x94, 0xa5, 0x52, 0xaa, 0x8a,
+ 0x49, 0x52, 0x92, 0x45, 0x49, 0x55, 0x2a, 0xaa, 0x94, 0x96, 0x00, 0x7f,
+ 0xfe, 0x01, 0x55, 0x2b, 0x25, 0x50, 0x03, 0xff, 0x00, 0xa4, 0x91, 0x08,
+ 0x00, 0x00, 0x00, 0x00, 0x88, 0x2c, 0x18, 0x14, 0x0a, 0xaa, 0x92, 0x55,
+ 0x49, 0x55, 0x4a, 0xa9, 0x52, 0x44, 0x92, 0x94, 0x92, 0x92, 0xca, 0x4a,
+ 0x55, 0x27, 0xa0, 0x0f, 0xf8, 0x05, 0x35, 0x49, 0x68, 0x91, 0x00, 0xf8,
+ 0x04, 0x95, 0x25, 0x29, 0x00, 0x00, 0x00, 0x01, 0x08, 0x04, 0x18, 0x00,
+ 0x08, 0x04, 0x49, 0x08, 0x55, 0x4a, 0xa9, 0x12, 0x95, 0x55, 0x49, 0x22,
+ 0x65, 0x2a, 0x5a, 0x92, 0xa9, 0x49, 0x54, 0x00, 0xe0, 0x24, 0xb2, 0x55,
+ 0x15, 0x4c, 0x00, 0x00, 0x02, 0x48, 0x52, 0x52, 0x00, 0x00, 0x00, 0x02,
+ 0x21, 0x09, 0x50, 0x81, 0x51, 0x69, 0x48, 0x52, 0x92, 0x94, 0xaa, 0x54,
+ 0x25, 0x54, 0x92, 0x4a, 0x89, 0x54, 0x82, 0xaa, 0x4a, 0x53, 0x55, 0x00,
+ 0x00, 0x49, 0x65, 0x52, 0x51, 0x12, 0x80, 0x00, 0x14, 0x92, 0x92, 0x42,
+ 0x00, 0x00, 0x00, 0x01, 0x00, 0x20, 0x50, 0x24, 0x2a, 0x95, 0x25, 0x29,
+ 0x4a, 0xb5, 0x48, 0xa5, 0x52, 0x49, 0x24, 0x94, 0xaa, 0xab, 0x15, 0x24,
+ 0x92, 0xa5, 0x54, 0x40, 0x00, 0xaa, 0xb5, 0x25, 0x4a, 0xa4, 0x80, 0x00,
+ 0x21, 0x24, 0xa8, 0x94, 0x00, 0x00, 0x00, 0x0a, 0x44, 0x02, 0xb1, 0x01,
+ 0x24, 0x80, 0x48, 0xa2, 0x55, 0x55, 0x52, 0x8a, 0x94, 0xaa, 0x49, 0x22,
+ 0x8a, 0x49, 0x49, 0x55, 0x4a, 0x51, 0x25, 0x48, 0x02, 0xa8, 0x6a, 0x69,
+ 0x12, 0x29, 0x50, 0x00, 0x55, 0x24, 0x81, 0x24, 0x00, 0x00, 0x00, 0x23,
+ 0x80, 0x90, 0x70, 0x14, 0x82, 0x55, 0x22, 0x8a, 0xa9, 0x14, 0xaa, 0xa9,
+ 0x25, 0x51, 0x2a, 0x55, 0x21, 0x55, 0x55, 0x12, 0x55, 0x32, 0xaa, 0x94,
+ 0xa5, 0x55, 0x5a, 0x92, 0x54, 0xa5, 0x52, 0x24, 0xa2, 0x49, 0x2a, 0x92,
+ 0x00, 0x00, 0x00, 0x0e, 0x08, 0x00, 0xe2, 0xa0, 0x04, 0x54, 0x84, 0x52,
+ 0x4a, 0xa4, 0xaa, 0x2a, 0xa9, 0x25, 0x45, 0x42, 0xae, 0x92, 0x44, 0xa4,
+ 0x91, 0x24, 0xaa, 0xaa, 0xaa, 0x91, 0x55, 0x55, 0x51, 0x55, 0x54, 0xa9,
+ 0x14, 0x92, 0x49, 0x24, 0x00, 0x00, 0x00, 0x24, 0x01, 0x82, 0x70, 0x09,
+ 0x2a, 0x41, 0x2a, 0x89, 0x14, 0x49, 0x29, 0x52, 0x4a, 0xaa, 0x2a, 0x15,
+ 0x12, 0xaa, 0xa9, 0x2a, 0xaa, 0xba, 0xa4, 0x94, 0x95, 0x55, 0x5a, 0x49,
+ 0x25, 0x20, 0x49, 0x22, 0xa5, 0x24, 0x92, 0x92, 0x00, 0x00, 0x01, 0x0c,
+ 0x93, 0x10, 0xe2, 0x11, 0x00, 0x94, 0x22, 0x52, 0x69, 0x53, 0x52, 0x45,
+ 0x49, 0x22, 0xa4, 0x4a, 0x55, 0x29, 0x2a, 0xa4, 0x52, 0x42, 0xaa, 0xa5,
+ 0x52, 0xa8, 0xaa, 0x55, 0x4a, 0xab, 0xa9, 0x4a, 0x54, 0x49, 0x32, 0x24 };
diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig
index 372e4e69c43a..96989ad46f66 100644
--- a/arch/m68k/Kconfig
+++ b/arch/m68k/Kconfig
@@ -21,6 +21,7 @@ config M68K
select HAVE_AOUT if MMU
select HAVE_ASM_MODVERSIONS
select HAVE_DEBUG_BUGVERBOSE
+ select HAVE_EFFICIENT_UNALIGNED_ACCESS if !CPU_HAS_NO_UNALIGNED
select HAVE_FUTEX_CMPXCHG if MMU && FUTEX
select HAVE_IDE
select HAVE_MOD_ARCH_SPECIFIC
@@ -34,6 +35,7 @@ config M68K
select SET_FS
select UACCESS_MEMCPY if !MMU
select VIRT_TO_BUS
+ select ZONE_DMA
config CPU_BIG_ENDIAN
def_bool y
@@ -62,10 +64,6 @@ config TIME_LOW_RES
config NO_IOPORT_MAP
def_bool y
-config ZONE_DMA
- bool
- default y
-
config HZ
int
default 1000 if CLEOPATRA
diff --git a/arch/m68k/Kconfig.cpu b/arch/m68k/Kconfig.cpu
index f4d23977d2a5..29e946394fdb 100644
--- a/arch/m68k/Kconfig.cpu
+++ b/arch/m68k/Kconfig.cpu
@@ -408,10 +408,6 @@ config SINGLE_MEMORY_CHUNK
order" to save memory that could be wasted for unused memory map.
Say N if not sure.
-config ARCH_DISCONTIGMEM_ENABLE
- depends on BROKEN
- def_bool MMU && !SINGLE_MEMORY_CHUNK
-
config FORCE_MAX_ZONEORDER
int "Maximum zone order" if ADVANCED
depends on !SINGLE_MEMORY_CHUNK
@@ -451,11 +447,6 @@ config M68K_L2_CACHE
depends on MAC
default y
-config NODES_SHIFT
- int
- default "3"
- depends on DISCONTIGMEM
-
config CPU_HAS_NO_BITFIELDS
bool
@@ -553,4 +544,3 @@ config CACHE_COPYBACK
The ColdFire CPU cache is set into Copy-back mode.
endchoice
endif
-
diff --git a/arch/m68k/Kconfig.machine b/arch/m68k/Kconfig.machine
index 4d59ec2f5b8d..d964c1f27399 100644
--- a/arch/m68k/Kconfig.machine
+++ b/arch/m68k/Kconfig.machine
@@ -25,6 +25,9 @@ config ATARI
this kernel on an Atari, say Y here and browse the material
available in <file:Documentation/m68k>; otherwise say N.
+config ATARI_KBD_CORE
+ bool
+
config MAC
bool "Macintosh support"
depends on MMU
diff --git a/arch/m68k/Makefile b/arch/m68k/Makefile
index 82620f14124d..c54055a3d284 100644
--- a/arch/m68k/Makefile
+++ b/arch/m68k/Makefile
@@ -66,8 +66,7 @@ KBUILD_CFLAGS += $(cpuflags-y)
KBUILD_CFLAGS += -pipe -ffreestanding
ifdef CONFIG_MMU
-# without -fno-strength-reduce the 53c7xx.c driver fails ;-(
-KBUILD_CFLAGS += -fno-strength-reduce -ffixed-a2
+KBUILD_CFLAGS += -ffixed-a2
else
# we can use a m68k-linux-gcc toolchain with these in place
KBUILD_CPPFLAGS += -DUTS_SYSNAME=\"uClinux\"
diff --git a/arch/m68k/atari/config.c b/arch/m68k/atari/config.c
index 44f9b5216ac9..261a0f57cc9a 100644
--- a/arch/m68k/atari/config.c
+++ b/arch/m68k/atari/config.c
@@ -875,16 +875,8 @@ static const struct resource atari_scsi_tt_rsrc[] __initconst = {
#define FALCON_IDE_BASE 0xfff00000
static const struct resource atari_falconide_rsrc[] __initconst = {
- {
- .flags = IORESOURCE_MEM,
- .start = FALCON_IDE_BASE,
- .end = FALCON_IDE_BASE + 0x39,
- },
- {
- .flags = IORESOURCE_IRQ,
- .start = IRQ_MFP_FSCSI,
- .end = IRQ_MFP_FSCSI,
- },
+ DEFINE_RES_MEM(FALCON_IDE_BASE, 0x38),
+ DEFINE_RES_MEM(FALCON_IDE_BASE + 0x38, 2),
};
int __init atari_platform_init(void)
diff --git a/arch/m68k/coldfire/clk.c b/arch/m68k/coldfire/clk.c
index 076a9caa9557..2ed841e94111 100644
--- a/arch/m68k/coldfire/clk.c
+++ b/arch/m68k/coldfire/clk.c
@@ -73,20 +73,6 @@ struct clk_ops clk_ops1 = {
#endif /* MCFPM_PPMCR1 */
#endif /* MCFPM_PPMCR0 */
-struct clk *clk_get(struct device *dev, const char *id)
-{
- const char *clk_name = dev ? dev_name(dev) : id ? id : NULL;
- struct clk *clk;
- unsigned i;
-
- for (i = 0; (clk = mcf_clks[i]) != NULL; ++i)
- if (!strcmp(clk->name, clk_name))
- return clk;
- pr_warn("clk_get: didn't find clock %s\n", clk_name);
- return ERR_PTR(-ENOENT);
-}
-EXPORT_SYMBOL(clk_get);
-
int clk_enable(struct clk *clk)
{
unsigned long flags;
@@ -117,13 +103,6 @@ void clk_disable(struct clk *clk)
}
EXPORT_SYMBOL(clk_disable);
-void clk_put(struct clk *clk)
-{
- if (clk->enabled != 0)
- pr_warn("clk_put %s still enabled\n", clk->name);
-}
-EXPORT_SYMBOL(clk_put);
-
unsigned long clk_get_rate(struct clk *clk)
{
if (!clk)
diff --git a/arch/m68k/coldfire/m5206.c b/arch/m68k/coldfire/m5206.c
index 2f14ea95c391..5e726e94b5ab 100644
--- a/arch/m68k/coldfire/m5206.c
+++ b/arch/m68k/coldfire/m5206.c
@@ -10,6 +10,7 @@
/***************************************************************************/
+#include <linux/clkdev.h>
#include <linux/kernel.h>
#include <linux/param.h>
#include <linux/init.h>
@@ -23,21 +24,15 @@
DEFINE_CLK(pll, "pll.0", MCF_CLK);
DEFINE_CLK(sys, "sys.0", MCF_BUSCLK);
-DEFINE_CLK(mcftmr0, "mcftmr.0", MCF_BUSCLK);
-DEFINE_CLK(mcftmr1, "mcftmr.1", MCF_BUSCLK);
-DEFINE_CLK(mcfuart0, "mcfuart.0", MCF_BUSCLK);
-DEFINE_CLK(mcfuart1, "mcfuart.1", MCF_BUSCLK);
-DEFINE_CLK(mcfi2c0, "imx1-i2c.0", MCF_BUSCLK);
-struct clk *mcf_clks[] = {
- &clk_pll,
- &clk_sys,
- &clk_mcftmr0,
- &clk_mcftmr1,
- &clk_mcfuart0,
- &clk_mcfuart1,
- &clk_mcfi2c0,
- NULL
+static struct clk_lookup m5206_clk_lookup[] = {
+ CLKDEV_INIT(NULL, "pll.0", &clk_pll),
+ CLKDEV_INIT(NULL, "sys.0", &clk_sys),
+ CLKDEV_INIT("mcftmr.0", NULL, &clk_sys),
+ CLKDEV_INIT("mcftmr.1", NULL, &clk_sys),
+ CLKDEV_INIT("mcfuart.0", NULL, &clk_sys),
+ CLKDEV_INIT("mcfuart.1", NULL, &clk_sys),
+ CLKDEV_INIT("imx1-i2c.0", NULL, &clk_sys),
};
/***************************************************************************/
@@ -66,6 +61,8 @@ void __init config_BSP(char *commandp, int size)
mcf_mapirq2imr(28, MCFINTC_EINT4);
mcf_mapirq2imr(31, MCFINTC_EINT7);
m5206_i2c_init();
+
+ clkdev_add_table(m5206_clk_lookup, ARRAY_SIZE(m5206_clk_lookup));
}
/***************************************************************************/
diff --git a/arch/m68k/coldfire/m520x.c b/arch/m68k/coldfire/m520x.c
index b5b2a267dada..d2f96b40aee1 100644
--- a/arch/m68k/coldfire/m520x.c
+++ b/arch/m68k/coldfire/m520x.c
@@ -12,6 +12,7 @@
/***************************************************************************/
+#include <linux/clkdev.h>
#include <linux/kernel.h>
#include <linux/param.h>
#include <linux/init.h>
@@ -48,31 +49,29 @@ DEFINE_CLK(0, "sys.0", 40, MCF_BUSCLK);
DEFINE_CLK(0, "gpio.0", 41, MCF_BUSCLK);
DEFINE_CLK(0, "sdram.0", 42, MCF_CLK);
-struct clk *mcf_clks[] = {
- &__clk_0_2, /* flexbus */
- &__clk_0_12, /* fec.0 */
- &__clk_0_17, /* edma */
- &__clk_0_18, /* intc.0 */
- &__clk_0_21, /* iack.0 */
- &__clk_0_22, /* imx1-i2c.0 */
- &__clk_0_23, /* mcfqspi.0 */
- &__clk_0_24, /* mcfuart.0 */
- &__clk_0_25, /* mcfuart.1 */
- &__clk_0_26, /* mcfuart.2 */
- &__clk_0_28, /* mcftmr.0 */
- &__clk_0_29, /* mcftmr.1 */
- &__clk_0_30, /* mcftmr.2 */
- &__clk_0_31, /* mcftmr.3 */
-
- &__clk_0_32, /* mcfpit.0 */
- &__clk_0_33, /* mcfpit.1 */
- &__clk_0_34, /* mcfeport.0 */
- &__clk_0_35, /* mcfwdt.0 */
- &__clk_0_36, /* pll.0 */
- &__clk_0_40, /* sys.0 */
- &__clk_0_41, /* gpio.0 */
- &__clk_0_42, /* sdram.0 */
- NULL,
+static struct clk_lookup m520x_clk_lookup[] = {
+ CLKDEV_INIT(NULL, "flexbus", &__clk_0_2),
+ CLKDEV_INIT("fec.0", NULL, &__clk_0_12),
+ CLKDEV_INIT("edma", NULL, &__clk_0_17),
+ CLKDEV_INIT("intc.0", NULL, &__clk_0_18),
+ CLKDEV_INIT("iack.0", NULL, &__clk_0_21),
+ CLKDEV_INIT("imx1-i2c.0", NULL, &__clk_0_22),
+ CLKDEV_INIT("mcfqspi.0", NULL, &__clk_0_23),
+ CLKDEV_INIT("mcfuart.0", NULL, &__clk_0_24),
+ CLKDEV_INIT("mcfuart.1", NULL, &__clk_0_25),
+ CLKDEV_INIT("mcfuart.2", NULL, &__clk_0_26),
+ CLKDEV_INIT("mcftmr.0", NULL, &__clk_0_28),
+ CLKDEV_INIT("mcftmr.1", NULL, &__clk_0_29),
+ CLKDEV_INIT("mcftmr.2", NULL, &__clk_0_30),
+ CLKDEV_INIT("mcftmr.3", NULL, &__clk_0_31),
+ CLKDEV_INIT("mcfpit.0", NULL, &__clk_0_32),
+ CLKDEV_INIT("mcfpit.1", NULL, &__clk_0_33),
+ CLKDEV_INIT("mcfeport.0", NULL, &__clk_0_34),
+ CLKDEV_INIT("mcfwdt.0", NULL, &__clk_0_35),
+ CLKDEV_INIT(NULL, "pll.0", &__clk_0_36),
+ CLKDEV_INIT(NULL, "sys.0", &__clk_0_40),
+ CLKDEV_INIT("gpio.0", NULL, &__clk_0_41),
+ CLKDEV_INIT("sdram.0", NULL, &__clk_0_42),
};
static struct clk * const enable_clks[] __initconst = {
@@ -115,6 +114,8 @@ static void __init m520x_clk_init(void)
/* make sure these clocks are disabled */
for (i = 0; i < ARRAY_SIZE(disable_clks); ++i)
__clk_init_disabled(disable_clks[i]);
+
+ clkdev_add_table(m520x_clk_lookup, ARRAY_SIZE(m520x_clk_lookup));
}
/***************************************************************************/
diff --git a/arch/m68k/coldfire/m523x.c b/arch/m68k/coldfire/m523x.c
index ddf2496ed117..193c178162c1 100644
--- a/arch/m68k/coldfire/m523x.c
+++ b/arch/m68k/coldfire/m523x.c
@@ -13,6 +13,7 @@
/***************************************************************************/
+#include <linux/clkdev.h>
#include <linux/kernel.h>
#include <linux/param.h>
#include <linux/init.h>
@@ -26,31 +27,20 @@
DEFINE_CLK(pll, "pll.0", MCF_CLK);
DEFINE_CLK(sys, "sys.0", MCF_BUSCLK);
-DEFINE_CLK(mcfpit0, "mcfpit.0", MCF_CLK);
-DEFINE_CLK(mcfpit1, "mcfpit.1", MCF_CLK);
-DEFINE_CLK(mcfpit2, "mcfpit.2", MCF_CLK);
-DEFINE_CLK(mcfpit3, "mcfpit.3", MCF_CLK);
-DEFINE_CLK(mcfuart0, "mcfuart.0", MCF_BUSCLK);
-DEFINE_CLK(mcfuart1, "mcfuart.1", MCF_BUSCLK);
-DEFINE_CLK(mcfuart2, "mcfuart.2", MCF_BUSCLK);
-DEFINE_CLK(mcfqspi0, "mcfqspi.0", MCF_BUSCLK);
-DEFINE_CLK(fec0, "fec.0", MCF_BUSCLK);
-DEFINE_CLK(mcfi2c0, "imx1-i2c.0", MCF_BUSCLK);
-
-struct clk *mcf_clks[] = {
- &clk_pll,
- &clk_sys,
- &clk_mcfpit0,
- &clk_mcfpit1,
- &clk_mcfpit2,
- &clk_mcfpit3,
- &clk_mcfuart0,
- &clk_mcfuart1,
- &clk_mcfuart2,
- &clk_mcfqspi0,
- &clk_fec0,
- &clk_mcfi2c0,
- NULL
+
+struct clk_lookup m523x_clk_lookup[] = {
+ CLKDEV_INIT(NULL, "pll.0", &clk_pll),
+ CLKDEV_INIT(NULL, "sys.0", &clk_sys),
+ CLKDEV_INIT("mcfpit.0", NULL, &clk_pll),
+ CLKDEV_INIT("mcfpit.1", NULL, &clk_pll),
+ CLKDEV_INIT("mcfpit.2", NULL, &clk_pll),
+ CLKDEV_INIT("mcfpit.3", NULL, &clk_pll),
+ CLKDEV_INIT("mcfuart.0", NULL, &clk_sys),
+ CLKDEV_INIT("mcfuart.1", NULL, &clk_sys),
+ CLKDEV_INIT("mcfuart.2", NULL, &clk_sys),
+ CLKDEV_INIT("mcfqspi.0", NULL, &clk_sys),
+ CLKDEV_INIT("fec.0", NULL, &clk_sys),
+ CLKDEV_INIT("imx1-i2c.0", NULL, &clk_sys),
};
/***************************************************************************/
@@ -100,6 +90,8 @@ void __init config_BSP(char *commandp, int size)
m523x_fec_init();
m523x_qspi_init();
m523x_i2c_init();
+
+ clkdev_add_table(m523x_clk_lookup, ARRAY_SIZE(m523x_clk_lookup));
}
/***************************************************************************/
diff --git a/arch/m68k/coldfire/m5249.c b/arch/m68k/coldfire/m5249.c
index 0590f8c421f1..6d66972de214 100644
--- a/arch/m68k/coldfire/m5249.c
+++ b/arch/m68k/coldfire/m5249.c
@@ -9,6 +9,7 @@
/***************************************************************************/
+#include <linux/clkdev.h>
#include <linux/kernel.h>
#include <linux/param.h>
#include <linux/init.h>
@@ -23,25 +24,17 @@
DEFINE_CLK(pll, "pll.0", MCF_CLK);
DEFINE_CLK(sys, "sys.0", MCF_BUSCLK);
-DEFINE_CLK(mcftmr0, "mcftmr.0", MCF_BUSCLK);
-DEFINE_CLK(mcftmr1, "mcftmr.1", MCF_BUSCLK);
-DEFINE_CLK(mcfuart0, "mcfuart.0", MCF_BUSCLK);
-DEFINE_CLK(mcfuart1, "mcfuart.1", MCF_BUSCLK);
-DEFINE_CLK(mcfqspi0, "mcfqspi.0", MCF_BUSCLK);
-DEFINE_CLK(mcfi2c0, "imx1-i2c.0", MCF_BUSCLK);
-DEFINE_CLK(mcfi2c1, "imx1-i2c.1", MCF_BUSCLK);
-
-struct clk *mcf_clks[] = {
- &clk_pll,
- &clk_sys,
- &clk_mcftmr0,
- &clk_mcftmr1,
- &clk_mcfuart0,
- &clk_mcfuart1,
- &clk_mcfqspi0,
- &clk_mcfi2c0,
- &clk_mcfi2c1,
- NULL
+
+struct clk_lookup m5249_clk_lookup[] = {
+ CLKDEV_INIT(NULL, "pll.0", &clk_pll),
+ CLKDEV_INIT(NULL, "sys.0", &clk_sys),
+ CLKDEV_INIT("mcftmr.0", NULL, &clk_sys),
+ CLKDEV_INIT("mcftmr.1", NULL, &clk_sys),
+ CLKDEV_INIT("mcfuart.0", NULL, &clk_sys),
+ CLKDEV_INIT("mcfuart.1", NULL, &clk_sys),
+ CLKDEV_INIT("mcfqspi.0", NULL, &clk_sys),
+ CLKDEV_INIT("imx1-i2c.0", NULL, &clk_sys),
+ CLKDEV_INIT("imx1-i2c.1", NULL, &clk_sys),
};
/***************************************************************************/
@@ -137,6 +130,8 @@ void __init config_BSP(char *commandp, int size)
#endif
m5249_qspi_init();
m5249_i2c_init();
+
+ clkdev_add_table(m5249_clk_lookup, ARRAY_SIZE(m5249_clk_lookup));
}
/***************************************************************************/
diff --git a/arch/m68k/coldfire/m525x.c b/arch/m68k/coldfire/m525x.c
index 1772359c416c..2c4d2ca2f20d 100644
--- a/arch/m68k/coldfire/m525x.c
+++ b/arch/m68k/coldfire/m525x.c
@@ -9,6 +9,7 @@
/***************************************************************************/
+#include <linux/clkdev.h>
#include <linux/kernel.h>
#include <linux/param.h>
#include <linux/init.h>
@@ -23,25 +24,17 @@
DEFINE_CLK(pll, "pll.0", MCF_CLK);
DEFINE_CLK(sys, "sys.0", MCF_BUSCLK);
-DEFINE_CLK(mcftmr0, "mcftmr.0", MCF_BUSCLK);
-DEFINE_CLK(mcftmr1, "mcftmr.1", MCF_BUSCLK);
-DEFINE_CLK(mcfuart0, "mcfuart.0", MCF_BUSCLK);
-DEFINE_CLK(mcfuart1, "mcfuart.1", MCF_BUSCLK);
-DEFINE_CLK(mcfqspi0, "mcfqspi.0", MCF_BUSCLK);
-DEFINE_CLK(mcfi2c0, "imx1-i2c.0", MCF_BUSCLK);
-DEFINE_CLK(mcfi2c1, "imx1-i2c.1", MCF_BUSCLK);
-
-struct clk *mcf_clks[] = {
- &clk_pll,
- &clk_sys,
- &clk_mcftmr0,
- &clk_mcftmr1,
- &clk_mcfuart0,
- &clk_mcfuart1,
- &clk_mcfqspi0,
- &clk_mcfi2c0,
- &clk_mcfi2c1,
- NULL
+
+static struct clk_lookup m525x_clk_lookup[] = {
+ CLKDEV_INIT(NULL, "pll.0", &pll),
+ CLKDEV_INIT(NULL, "sys.0", &clk_sys),
+ CLKDEV_INIT("mcftmr.0", NULL, &clk_sys),
+ CLKDEV_INIT("mcftmr.1", NULL, &clk_sys),
+ CLKDEV_INIT("mcfuart.0", NULL, &clk_sys),
+ CLKDEV_INIT("mcfuart.1", NULL, &clk_sys),
+ CLKDEV_INIT("mcfqspi.0", NULL, &clk_sys),
+ CLKDEV_INIT("imx1-i2c.0", NULL, &clk_sys),
+ CLKDEV_INIT("imx1-i2c.1", NULL, &clk_sys),
};
/***************************************************************************/
@@ -88,6 +81,8 @@ void __init config_BSP(char *commandp, int size)
m525x_qspi_init();
m525x_i2c_init();
+
+ clkdev_add_table(m525x_clk_lookup, ARRAY_SIZE(m525x_clk_lookup));
}
/***************************************************************************/
diff --git a/arch/m68k/coldfire/m5272.c b/arch/m68k/coldfire/m5272.c
index 6b3ab583c698..734dab657fe3 100644
--- a/arch/m68k/coldfire/m5272.c
+++ b/arch/m68k/coldfire/m5272.c
@@ -10,6 +10,7 @@
/***************************************************************************/
+#include <linux/clkdev.h>
#include <linux/kernel.h>
#include <linux/param.h>
#include <linux/init.h>
@@ -34,27 +35,18 @@ unsigned char ledbank = 0xff;
DEFINE_CLK(pll, "pll.0", MCF_CLK);
DEFINE_CLK(sys, "sys.0", MCF_BUSCLK);
-DEFINE_CLK(mcftmr0, "mcftmr.0", MCF_BUSCLK);
-DEFINE_CLK(mcftmr1, "mcftmr.1", MCF_BUSCLK);
-DEFINE_CLK(mcftmr2, "mcftmr.2", MCF_BUSCLK);
-DEFINE_CLK(mcftmr3, "mcftmr.3", MCF_BUSCLK);
-DEFINE_CLK(mcfuart0, "mcfuart.0", MCF_BUSCLK);
-DEFINE_CLK(mcfuart1, "mcfuart.1", MCF_BUSCLK);
-DEFINE_CLK(mcfqspi0, "mcfqspi.0", MCF_BUSCLK);
-DEFINE_CLK(fec0, "fec.0", MCF_BUSCLK);
-
-struct clk *mcf_clks[] = {
- &clk_pll,
- &clk_sys,
- &clk_mcftmr0,
- &clk_mcftmr1,
- &clk_mcftmr2,
- &clk_mcftmr3,
- &clk_mcfuart0,
- &clk_mcfuart1,
- &clk_mcfqspi0,
- &clk_fec0,
- NULL
+
+static struct clk_lookup m5272_clk_lookup[] = {
+ CLKDEV_INIT(NULL, "pll.0", &clk_pll),
+ CLKDEV_INIT(NULL, "sys.0", &clk_sys),
+ CLKDEV_INIT("mcftmr.0", NULL, &clk_sys),
+ CLKDEV_INIT("mcftmr.1", NULL, &clk_sys),
+ CLKDEV_INIT("mcftmr.2", NULL, &clk_sys),
+ CLKDEV_INIT("mcftmr.3", NULL, &clk_sys),
+ CLKDEV_INIT("mcfuart.0", NULL, &clk_sys),
+ CLKDEV_INIT("mcfuart.1", NULL, &clk_sys),
+ CLKDEV_INIT("mcfqspi.0", NULL, &clk_sys),
+ CLKDEV_INIT("fec.0", NULL, &clk_sys),
};
/***************************************************************************/
@@ -128,6 +120,7 @@ static int __init init_BSP(void)
{
m5272_uarts_init();
fixed_phy_add(PHY_POLL, 0, &nettel_fixed_phy_status);
+ clkdev_add_table(m5272_clk_lookup, ARRAY_SIZE(m5272_clk_lookup));
return 0;
}
diff --git a/arch/m68k/coldfire/m527x.c b/arch/m68k/coldfire/m527x.c
index cad462df6861..037f3e520acc 100644
--- a/arch/m68k/coldfire/m527x.c
+++ b/arch/m68k/coldfire/m527x.c
@@ -13,6 +13,7 @@
/***************************************************************************/
+#include <linux/clkdev.h>
#include <linux/kernel.h>
#include <linux/param.h>
#include <linux/init.h>
@@ -27,33 +28,21 @@
DEFINE_CLK(pll, "pll.0", MCF_CLK);
DEFINE_CLK(sys, "sys.0", MCF_BUSCLK);
-DEFINE_CLK(mcfpit0, "mcfpit.0", MCF_CLK);
-DEFINE_CLK(mcfpit1, "mcfpit.1", MCF_CLK);
-DEFINE_CLK(mcfpit2, "mcfpit.2", MCF_CLK);
-DEFINE_CLK(mcfpit3, "mcfpit.3", MCF_CLK);
-DEFINE_CLK(mcfuart0, "mcfuart.0", MCF_BUSCLK);
-DEFINE_CLK(mcfuart1, "mcfuart.1", MCF_BUSCLK);
-DEFINE_CLK(mcfuart2, "mcfuart.2", MCF_BUSCLK);
-DEFINE_CLK(mcfqspi0, "mcfqspi.0", MCF_BUSCLK);
-DEFINE_CLK(fec0, "fec.0", MCF_BUSCLK);
-DEFINE_CLK(fec1, "fec.1", MCF_BUSCLK);
-DEFINE_CLK(mcfi2c0, "imx1-i2c.0", MCF_BUSCLK);
-
-struct clk *mcf_clks[] = {
- &clk_pll,
- &clk_sys,
- &clk_mcfpit0,
- &clk_mcfpit1,
- &clk_mcfpit2,
- &clk_mcfpit3,
- &clk_mcfuart0,
- &clk_mcfuart1,
- &clk_mcfuart2,
- &clk_mcfqspi0,
- &clk_fec0,
- &clk_fec1,
- &clk_mcfi2c0,
- NULL
+
+static struct clk_lookup m527x_clk_lookup[] = {
+ CLKDEV_INIT(NULL, "pll.0", &clk_pll),
+ CLKDEV_INIT(NULL, "sys.0", &clk_sys),
+ CLKDEV_INIT("mcfpit.0", NULL, &clk_pll),
+ CLKDEV_INIT("mcfpit.1", NULL, &clk_pll),
+ CLKDEV_INIT("mcfpit.2", NULL, &clk_pll),
+ CLKDEV_INIT("mcfpit.3", NULL, &clk_pll),
+ CLKDEV_INIT("mcfuart.0", NULL, &clk_sys),
+ CLKDEV_INIT("mcfuart.1", NULL, &clk_sys),
+ CLKDEV_INIT("mcfuart.2", NULL, &clk_sys),
+ CLKDEV_INIT("mcfqspi.0", NULL, &clk_sys),
+ CLKDEV_INIT("fec.0", NULL, &clk_sys),
+ CLKDEV_INIT("fec.1", NULL, &clk_sys),
+ CLKDEV_INIT("imx1-i2c.0", NULL, &clk_sys),
};
/***************************************************************************/
@@ -151,6 +140,7 @@ void __init config_BSP(char *commandp, int size)
m527x_fec_init();
m527x_qspi_init();
m527x_i2c_init();
+ clkdev_add_table(m527x_clk_lookup, ARRAY_SIZE(m527x_clk_lookup));
}
/***************************************************************************/
diff --git a/arch/m68k/coldfire/m528x.c b/arch/m68k/coldfire/m528x.c
index 7ad3193887e8..51a6a6236e12 100644
--- a/arch/m68k/coldfire/m528x.c
+++ b/arch/m68k/coldfire/m528x.c
@@ -13,6 +13,7 @@
/***************************************************************************/
+#include <linux/clkdev.h>
#include <linux/kernel.h>
#include <linux/param.h>
#include <linux/init.h>
@@ -28,31 +29,20 @@
DEFINE_CLK(pll, "pll.0", MCF_CLK);
DEFINE_CLK(sys, "sys.0", MCF_BUSCLK);
-DEFINE_CLK(mcfpit0, "mcfpit.0", MCF_CLK);
-DEFINE_CLK(mcfpit1, "mcfpit.1", MCF_CLK);
-DEFINE_CLK(mcfpit2, "mcfpit.2", MCF_CLK);
-DEFINE_CLK(mcfpit3, "mcfpit.3", MCF_CLK);
-DEFINE_CLK(mcfuart0, "mcfuart.0", MCF_BUSCLK);
-DEFINE_CLK(mcfuart1, "mcfuart.1", MCF_BUSCLK);
-DEFINE_CLK(mcfuart2, "mcfuart.2", MCF_BUSCLK);
-DEFINE_CLK(mcfqspi0, "mcfqspi.0", MCF_BUSCLK);
-DEFINE_CLK(fec0, "fec.0", MCF_BUSCLK);
-DEFINE_CLK(mcfi2c0, "imx1-i2c.0", MCF_BUSCLK);
-
-struct clk *mcf_clks[] = {
- &clk_pll,
- &clk_sys,
- &clk_mcfpit0,
- &clk_mcfpit1,
- &clk_mcfpit2,
- &clk_mcfpit3,
- &clk_mcfuart0,
- &clk_mcfuart1,
- &clk_mcfuart2,
- &clk_mcfqspi0,
- &clk_fec0,
- &clk_mcfi2c0,
- NULL
+
+static struct clk_lookup m528x_clk_lookup[] = {
+ CLKDEV_INIT(NULL, "pll.0", &clk_pll),
+ CLKDEV_INIT(NULL, "sys.0", &clk_sys),
+ CLKDEV_INIT("mcfpit.0", NULL, &clk_pll),
+ CLKDEV_INIT("mcfpit.1", NULL, &clk_pll),
+ CLKDEV_INIT("mcfpit.2", NULL, &clk_pll),
+ CLKDEV_INIT("mcfpit.3", NULL, &clk_pll),
+ CLKDEV_INIT("mcfuart.0", NULL, &clk_sys),
+ CLKDEV_INIT("mcfuart.1", NULL, &clk_sys),
+ CLKDEV_INIT("mcfuart.2", NULL, &clk_sys),
+ CLKDEV_INIT("mcfqspi.0", NULL, &clk_sys),
+ CLKDEV_INIT("fec.0", NULL, &clk_sys),
+ CLKDEV_INIT("imx1-i2c.0", NULL, &clk_sys),
};
/***************************************************************************/
@@ -146,6 +136,8 @@ void __init config_BSP(char *commandp, int size)
m528x_fec_init();
m528x_qspi_init();
m528x_i2c_init();
+
+ clkdev_add_table(m528x_clk_lookup, ARRAY_SIZE(m528x_clk_lookup));
}
/***************************************************************************/
diff --git a/arch/m68k/coldfire/m5307.c b/arch/m68k/coldfire/m5307.c
index 64b4b1fd34ff..4ed2e43ab3ad 100644
--- a/arch/m68k/coldfire/m5307.c
+++ b/arch/m68k/coldfire/m5307.c
@@ -10,6 +10,7 @@
/***************************************************************************/
+#include <linux/clkdev.h>
#include <linux/kernel.h>
#include <linux/param.h>
#include <linux/init.h>
@@ -32,21 +33,15 @@ unsigned char ledbank = 0xff;
DEFINE_CLK(pll, "pll.0", MCF_CLK);
DEFINE_CLK(sys, "sys.0", MCF_BUSCLK);
-DEFINE_CLK(mcftmr0, "mcftmr.0", MCF_BUSCLK);
-DEFINE_CLK(mcftmr1, "mcftmr.1", MCF_BUSCLK);
-DEFINE_CLK(mcfuart0, "mcfuart.0", MCF_BUSCLK);
-DEFINE_CLK(mcfuart1, "mcfuart.1", MCF_BUSCLK);
-DEFINE_CLK(mcfi2c0, "imx1-i2c.0", MCF_BUSCLK);
-
-struct clk *mcf_clks[] = {
- &clk_pll,
- &clk_sys,
- &clk_mcftmr0,
- &clk_mcftmr1,
- &clk_mcfuart0,
- &clk_mcfuart1,
- &clk_mcfi2c0,
- NULL
+
+static struct clk_lookup m5307_clk_lookup[] = {
+ CLKDEV_INIT(NULL, "pll.0", &clk_pll),
+ CLKDEV_INIT(NULL, "sys.0", &clk_sys),
+ CLKDEV_INIT("mcftmr.0", NULL, &clk_sys),
+ CLKDEV_INIT("mcftmr.1", NULL, &clk_sys),
+ CLKDEV_INIT("mcfuart.0", NULL, &clk_sys),
+ CLKDEV_INIT("mcfuart.1", NULL, &clk_sys),
+ CLKDEV_INIT("imx1-i2c.0", NULL, &clk_sys),
};
/***************************************************************************/
@@ -88,6 +83,8 @@ void __init config_BSP(char *commandp, int size)
wdebug(MCFDEBUG_CSR, MCFDEBUG_CSR_PSTCLK);
#endif
m5307_i2c_init();
+
+ clkdev_add_table(m5307_clk_lookup, ARRAY_SIZE(m5307_clk_lookup));
}
/***************************************************************************/
diff --git a/arch/m68k/coldfire/m53xx.c b/arch/m68k/coldfire/m53xx.c
index 075722c0c4f0..bd033e1975bf 100644
--- a/arch/m68k/coldfire/m53xx.c
+++ b/arch/m68k/coldfire/m53xx.c
@@ -13,6 +13,7 @@
/***************************************************************************/
+#include <linux/clkdev.h>
#include <linux/kernel.h>
#include <linux/param.h>
#include <linux/init.h>
@@ -65,45 +66,42 @@ DEFINE_CLK(1, "mdha.0", 32, MCF_CLK);
DEFINE_CLK(1, "skha.0", 33, MCF_CLK);
DEFINE_CLK(1, "rng.0", 34, MCF_CLK);
-struct clk *mcf_clks[] = {
- &__clk_0_2, /* flexbus */
- &__clk_0_8, /* mcfcan.0 */
- &__clk_0_12, /* fec.0 */
- &__clk_0_17, /* edma */
- &__clk_0_18, /* intc.0 */
- &__clk_0_19, /* intc.1 */
- &__clk_0_21, /* iack.0 */
- &__clk_0_22, /* imx1-i2c.0 */
- &__clk_0_23, /* mcfqspi.0 */
- &__clk_0_24, /* mcfuart.0 */
- &__clk_0_25, /* mcfuart.1 */
- &__clk_0_26, /* mcfuart.2 */
- &__clk_0_28, /* mcftmr.0 */
- &__clk_0_29, /* mcftmr.1 */
- &__clk_0_30, /* mcftmr.2 */
- &__clk_0_31, /* mcftmr.3 */
-
- &__clk_0_32, /* mcfpit.0 */
- &__clk_0_33, /* mcfpit.1 */
- &__clk_0_34, /* mcfpit.2 */
- &__clk_0_35, /* mcfpit.3 */
- &__clk_0_36, /* mcfpwm.0 */
- &__clk_0_37, /* mcfeport.0 */
- &__clk_0_38, /* mcfwdt.0 */
- &__clk_0_40, /* sys.0 */
- &__clk_0_41, /* gpio.0 */
- &__clk_0_42, /* mcfrtc.0 */
- &__clk_0_43, /* mcflcd.0 */
- &__clk_0_44, /* mcfusb-otg.0 */
- &__clk_0_45, /* mcfusb-host.0 */
- &__clk_0_46, /* sdram.0 */
- &__clk_0_47, /* ssi.0 */
- &__clk_0_48, /* pll.0 */
-
- &__clk_1_32, /* mdha.0 */
- &__clk_1_33, /* skha.0 */
- &__clk_1_34, /* rng.0 */
- NULL,
+static struct clk_lookup m53xx_clk_lookup[] = {
+ CLKDEV_INIT("flexbus", NULL, &__clk_0_2),
+ CLKDEV_INIT("mcfcan.0", NULL, &__clk_0_8),
+ CLKDEV_INIT("fec.0", NULL, &__clk_0_12),
+ CLKDEV_INIT("edma", NULL, &__clk_0_17),
+ CLKDEV_INIT("intc.0", NULL, &__clk_0_18),
+ CLKDEV_INIT("intc.1", NULL, &__clk_0_19),
+ CLKDEV_INIT("iack.0", NULL, &__clk_0_21),
+ CLKDEV_INIT("imx1-i2c.0", NULL, &__clk_0_22),
+ CLKDEV_INIT("mcfqspi.0", NULL, &__clk_0_23),
+ CLKDEV_INIT("mcfuart.0", NULL, &__clk_0_24),
+ CLKDEV_INIT("mcfuart.1", NULL, &__clk_0_25),
+ CLKDEV_INIT("mcfuart.2", NULL, &__clk_0_26),
+ CLKDEV_INIT("mcftmr.0", NULL, &__clk_0_28),
+ CLKDEV_INIT("mcftmr.1", NULL, &__clk_0_29),
+ CLKDEV_INIT("mcftmr.2", NULL, &__clk_0_30),
+ CLKDEV_INIT("mcftmr.3", NULL, &__clk_0_31),
+ CLKDEV_INIT("mcfpit.0", NULL, &__clk_0_32),
+ CLKDEV_INIT("mcfpit.1", NULL, &__clk_0_33),
+ CLKDEV_INIT("mcfpit.2", NULL, &__clk_0_34),
+ CLKDEV_INIT("mcfpit.3", NULL, &__clk_0_35),
+ CLKDEV_INIT("mcfpwm.0", NULL, &__clk_0_36),
+ CLKDEV_INIT("mcfeport.0", NULL, &__clk_0_37),
+ CLKDEV_INIT("mcfwdt.0", NULL, &__clk_0_38),
+ CLKDEV_INIT(NULL, "sys.0", &__clk_0_40),
+ CLKDEV_INIT("gpio.0", NULL, &__clk_0_41),
+ CLKDEV_INIT("mcfrtc.0", NULL, &__clk_0_42),
+ CLKDEV_INIT("mcflcd.0", NULL, &__clk_0_43),
+ CLKDEV_INIT("mcfusb-otg.0", NULL, &__clk_0_44),
+ CLKDEV_INIT("mcfusb-host.0", NULL, &__clk_0_45),
+ CLKDEV_INIT("sdram.0", NULL, &__clk_0_46),
+ CLKDEV_INIT("ssi.0", NULL, &__clk_0_47),
+ CLKDEV_INIT(NULL, "pll.0", &__clk_0_48),
+ CLKDEV_INIT("mdha.0", NULL, &__clk_1_32),
+ CLKDEV_INIT("skha.0", NULL, &__clk_1_33),
+ CLKDEV_INIT("rng.0", NULL, &__clk_1_34),
};
static struct clk * const enable_clks[] __initconst = {
@@ -158,6 +156,8 @@ static void __init m53xx_clk_init(void)
/* make sure these clocks are disabled */
for (i = 0; i < ARRAY_SIZE(disable_clks); ++i)
__clk_init_disabled(disable_clks[i]);
+
+ clkdev_add_table(m53xx_clk_lookup, ARRAY_SIZE(m53xx_clk_lookup));
}
/***************************************************************************/
diff --git a/arch/m68k/coldfire/m5407.c b/arch/m68k/coldfire/m5407.c
index 0400d76115a1..b32efb3042a2 100644
--- a/arch/m68k/coldfire/m5407.c
+++ b/arch/m68k/coldfire/m5407.c
@@ -10,6 +10,7 @@
/***************************************************************************/
+#include <linux/clkdev.h>
#include <linux/kernel.h>
#include <linux/param.h>
#include <linux/init.h>
@@ -23,21 +24,15 @@
DEFINE_CLK(pll, "pll.0", MCF_CLK);
DEFINE_CLK(sys, "sys.0", MCF_BUSCLK);
-DEFINE_CLK(mcftmr0, "mcftmr.0", MCF_BUSCLK);
-DEFINE_CLK(mcftmr1, "mcftmr.1", MCF_BUSCLK);
-DEFINE_CLK(mcfuart0, "mcfuart.0", MCF_BUSCLK);
-DEFINE_CLK(mcfuart1, "mcfuart.1", MCF_BUSCLK);
-DEFINE_CLK(mcfi2c0, "imx1-i2c.0", MCF_BUSCLK);
-struct clk *mcf_clks[] = {
- &clk_pll,
- &clk_sys,
- &clk_mcftmr0,
- &clk_mcftmr1,
- &clk_mcfuart0,
- &clk_mcfuart1,
- &clk_mcfi2c0,
- NULL
+static struct clk_lookup m5407_clk_lookup[] = {
+ CLKDEV_INIT(NULL, "pll.0", &clk_pll),
+ CLKDEV_INIT(NULL, "sys.0", &clk_sys),
+ CLKDEV_INIT("mcftmr.0", NULL, &clk_sys),
+ CLKDEV_INIT("mcftmr.1", NULL, &clk_sys),
+ CLKDEV_INIT("mcfuart.0", NULL, &clk_sys),
+ CLKDEV_INIT("mcfuart.1", NULL, &clk_sys),
+ CLKDEV_INIT("imx1-i2c.0", NULL, &clk_sys),
};
/***************************************************************************/
@@ -63,6 +58,8 @@ void __init config_BSP(char *commandp, int size)
mcf_mapirq2imr(29, MCFINTC_EINT5);
mcf_mapirq2imr(31, MCFINTC_EINT7);
m5407_i2c_init();
+
+ clkdev_add_table(m5407_clk_lookup, ARRAY_SIZE(m5407_clk_lookup));
}
/***************************************************************************/
diff --git a/arch/m68k/coldfire/m5441x.c b/arch/m68k/coldfire/m5441x.c
index 1e5259a652d1..ce14693d18b6 100644
--- a/arch/m68k/coldfire/m5441x.c
+++ b/arch/m68k/coldfire/m5441x.c
@@ -5,6 +5,7 @@
* (C) Copyright Steven King <sfking@fdwdc.com>
*/
+#include <linux/clkdev.h>
#include <linux/kernel.h>
#include <linux/param.h>
#include <linux/init.h>
@@ -78,72 +79,67 @@ DEFINE_CLK(2, "ipg.0", 0, MCF_CLK);
DEFINE_CLK(2, "ahb.0", 1, MCF_CLK);
DEFINE_CLK(2, "per.0", 2, MCF_CLK);
-struct clk *mcf_clks[] = {
- &__clk_0_2,
- &__clk_0_8,
- &__clk_0_9,
- &__clk_0_14,
- &__clk_0_15,
- &__clk_0_17,
- &__clk_0_18,
- &__clk_0_19,
- &__clk_0_20,
- &__clk_0_22,
- &__clk_0_23,
- &__clk_0_24,
- &__clk_0_25,
- &__clk_0_26,
- &__clk_0_27,
- &__clk_0_28,
- &__clk_0_29,
- &__clk_0_30,
- &__clk_0_31,
- &__clk_0_32,
- &__clk_0_33,
- &__clk_0_34,
- &__clk_0_35,
- &__clk_0_37,
- &__clk_0_38,
- &__clk_0_39,
- &__clk_0_42,
- &__clk_0_43,
- &__clk_0_44,
- &__clk_0_45,
- &__clk_0_46,
- &__clk_0_47,
- &__clk_0_48,
- &__clk_0_49,
- &__clk_0_50,
- &__clk_0_51,
- &__clk_0_53,
- &__clk_0_54,
- &__clk_0_55,
- &__clk_0_56,
- &__clk_0_63,
-
- &__clk_1_2,
- &__clk_1_4,
- &__clk_1_5,
- &__clk_1_6,
- &__clk_1_7,
- &__clk_1_24,
- &__clk_1_25,
- &__clk_1_26,
- &__clk_1_27,
- &__clk_1_28,
- &__clk_1_29,
- &__clk_1_34,
- &__clk_1_36,
- &__clk_1_37,
-
- &__clk_2_0,
- &__clk_2_1,
- &__clk_2_2,
-
- NULL,
+static struct clk_lookup m5411x_clk_lookup[] = {
+ CLKDEV_INIT("flexbus", NULL, &__clk_0_2),
+ CLKDEV_INIT("mcfcan.0", NULL, &__clk_0_8),
+ CLKDEV_INIT("mcfcan.1", NULL, &__clk_0_9),
+ CLKDEV_INIT("imx1-i2c.1", NULL, &__clk_0_14),
+ CLKDEV_INIT("mcfdspi.1", NULL, &__clk_0_15),
+ CLKDEV_INIT("edma", NULL, &__clk_0_17),
+ CLKDEV_INIT("intc.0", NULL, &__clk_0_18),
+ CLKDEV_INIT("intc.1", NULL, &__clk_0_19),
+ CLKDEV_INIT("intc.2", NULL, &__clk_0_20),
+ CLKDEV_INIT("imx1-i2c.0", NULL, &__clk_0_22),
+ CLKDEV_INIT("fsl-dspi.0", NULL, &__clk_0_23),
+ CLKDEV_INIT("mcfuart.0", NULL, &__clk_0_24),
+ CLKDEV_INIT("mcfuart.1", NULL, &__clk_0_25),
+ CLKDEV_INIT("mcfuart.2", NULL, &__clk_0_26),
+ CLKDEV_INIT("mcfuart.3", NULL, &__clk_0_27),
+ CLKDEV_INIT("mcftmr.0", NULL, &__clk_0_28),
+ CLKDEV_INIT("mcftmr.1", NULL, &__clk_0_29),
+ CLKDEV_INIT("mcftmr.2", NULL, &__clk_0_30),
+ CLKDEV_INIT("mcftmr.3", NULL, &__clk_0_31),
+ CLKDEV_INIT("mcfpit.0", NULL, &__clk_0_32),
+ CLKDEV_INIT("mcfpit.1", NULL, &__clk_0_33),
+ CLKDEV_INIT("mcfpit.2", NULL, &__clk_0_34),
+ CLKDEV_INIT("mcfpit.3", NULL, &__clk_0_35),
+ CLKDEV_INIT("mcfeport.0", NULL, &__clk_0_37),
+ CLKDEV_INIT("mcfadc.0", NULL, &__clk_0_38),
+ CLKDEV_INIT("mcfdac.0", NULL, &__clk_0_39),
+ CLKDEV_INIT("mcfrtc.0", NULL, &__clk_0_42),
+ CLKDEV_INIT("mcfsim.0", NULL, &__clk_0_43),
+ CLKDEV_INIT("mcfusb-otg.0", NULL, &__clk_0_44),
+ CLKDEV_INIT("mcfusb-host.0", NULL, &__clk_0_45),
+ CLKDEV_INIT("mcfddr-sram.0", NULL, &__clk_0_46),
+ CLKDEV_INIT("mcfssi.0", NULL, &__clk_0_47),
+ CLKDEV_INIT(NULL, "pll.0", &__clk_0_48),
+ CLKDEV_INIT("mcfrng.0", NULL, &__clk_0_49),
+ CLKDEV_INIT("mcfssi.1", NULL, &__clk_0_50),
+ CLKDEV_INIT("sdhci-esdhc-mcf.0", NULL, &__clk_0_51),
+ CLKDEV_INIT("enet-fec.0", NULL, &__clk_0_53),
+ CLKDEV_INIT("enet-fec.1", NULL, &__clk_0_54),
+ CLKDEV_INIT("switch.0", NULL, &__clk_0_55),
+ CLKDEV_INIT("switch.1", NULL, &__clk_0_56),
+ CLKDEV_INIT("nand.0", NULL, &__clk_0_63),
+ CLKDEV_INIT("mcfow.0", NULL, &__clk_1_2),
+ CLKDEV_INIT("imx1-i2c.2", NULL, &__clk_1_4),
+ CLKDEV_INIT("imx1-i2c.3", NULL, &__clk_1_5),
+ CLKDEV_INIT("imx1-i2c.4", NULL, &__clk_1_6),
+ CLKDEV_INIT("imx1-i2c.5", NULL, &__clk_1_7),
+ CLKDEV_INIT("mcfuart.4", NULL, &__clk_1_24),
+ CLKDEV_INIT("mcfuart.5", NULL, &__clk_1_25),
+ CLKDEV_INIT("mcfuart.6", NULL, &__clk_1_26),
+ CLKDEV_INIT("mcfuart.7", NULL, &__clk_1_27),
+ CLKDEV_INIT("mcfuart.8", NULL, &__clk_1_28),
+ CLKDEV_INIT("mcfuart.9", NULL, &__clk_1_29),
+ CLKDEV_INIT("mcfpwm.0", NULL, &__clk_1_34),
+ CLKDEV_INIT(NULL, "sys.0", &__clk_1_36),
+ CLKDEV_INIT("gpio.0", NULL, &__clk_1_37),
+ CLKDEV_INIT("ipg.0", NULL, &__clk_2_0),
+ CLKDEV_INIT("ahb.0", NULL, &__clk_2_1),
+ CLKDEV_INIT("per.0", NULL, &__clk_2_2),
};
-
static struct clk * const enable_clks[] __initconst = {
/* make sure these clocks are enabled */
&__clk_0_15, /* dspi.1 */
@@ -228,6 +224,8 @@ static void __init m5441x_clk_init(void)
/* make sure these clocks are disabled */
for (i = 0; i < ARRAY_SIZE(disable_clks); ++i)
__clk_init_disabled(disable_clks[i]);
+
+ clkdev_add_table(m5411x_clk_lookup, ARRAY_SIZE(m5411x_clk_lookup));
}
static void __init m5441x_uarts_init(void)
diff --git a/arch/m68k/coldfire/m54xx.c b/arch/m68k/coldfire/m54xx.c
index 360c723c0ae6..8e3c8fee8327 100644
--- a/arch/m68k/coldfire/m54xx.c
+++ b/arch/m68k/coldfire/m54xx.c
@@ -9,6 +9,7 @@
/***************************************************************************/
+#include <linux/clkdev.h>
#include <linux/kernel.h>
#include <linux/param.h>
#include <linux/init.h>
@@ -32,25 +33,17 @@
DEFINE_CLK(pll, "pll.0", MCF_CLK);
DEFINE_CLK(sys, "sys.0", MCF_BUSCLK);
-DEFINE_CLK(mcfslt0, "mcfslt.0", MCF_BUSCLK);
-DEFINE_CLK(mcfslt1, "mcfslt.1", MCF_BUSCLK);
-DEFINE_CLK(mcfuart0, "mcfuart.0", MCF_BUSCLK);
-DEFINE_CLK(mcfuart1, "mcfuart.1", MCF_BUSCLK);
-DEFINE_CLK(mcfuart2, "mcfuart.2", MCF_BUSCLK);
-DEFINE_CLK(mcfuart3, "mcfuart.3", MCF_BUSCLK);
-DEFINE_CLK(mcfi2c0, "imx1-i2c.0", MCF_BUSCLK);
-
-struct clk *mcf_clks[] = {
- &clk_pll,
- &clk_sys,
- &clk_mcfslt0,
- &clk_mcfslt1,
- &clk_mcfuart0,
- &clk_mcfuart1,
- &clk_mcfuart2,
- &clk_mcfuart3,
- &clk_mcfi2c0,
- NULL
+
+static struct clk_lookup m54xx_clk_lookup[] = {
+ CLKDEV_INIT(NULL, "pll.0", &clk_pll),
+ CLKDEV_INIT(NULL, "sys.0", &clk_sys),
+ CLKDEV_INIT("mcfslt.0", NULL, &clk_sys),
+ CLKDEV_INIT("mcfslt.1", NULL, &clk_sys),
+ CLKDEV_INIT("mcfuart.0", NULL, &clk_sys),
+ CLKDEV_INIT("mcfuart.1", NULL, &clk_sys),
+ CLKDEV_INIT("mcfuart.2", NULL, &clk_sys),
+ CLKDEV_INIT("mcfuart.3", NULL, &clk_sys),
+ CLKDEV_INIT("imx1-i2c.0", NULL, &clk_sys),
};
/***************************************************************************/
@@ -100,6 +93,8 @@ void __init config_BSP(char *commandp, int size)
mach_sched_init = hw_timer_init;
m54xx_uarts_init();
m54xx_i2c_init();
+
+ clkdev_add_table(m54xx_clk_lookup, ARRAY_SIZE(m54xx_clk_lookup));
}
/***************************************************************************/
diff --git a/arch/m68k/configs/amiga_defconfig b/arch/m68k/configs/amiga_defconfig
index 59b727b69357..0a2cacf7be08 100644
--- a/arch/m68k/configs/amiga_defconfig
+++ b/arch/m68k/configs/amiga_defconfig
@@ -85,7 +85,6 @@ CONFIG_IPV6_VTI=m
CONFIG_IPV6_GRE=m
CONFIG_NETFILTER=y
CONFIG_NF_CONNTRACK=m
-CONFIG_NF_LOG_NETDEV=m
CONFIG_NF_CONNTRACK_ZONES=y
# CONFIG_NF_CONNTRACK_PROCFS is not set
# CONFIG_NF_CT_PROTO_DCCP is not set
@@ -207,6 +206,7 @@ CONFIG_NFT_FIB_IPV4=m
CONFIG_NF_TABLES_ARP=y
CONFIG_NF_FLOW_TABLE_IPV4=m
CONFIG_NF_LOG_ARP=m
+CONFIG_NF_LOG_IPV4=m
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_MATCH_AH=m
CONFIG_IP_NF_MATCH_ECN=m
@@ -253,7 +253,6 @@ CONFIG_IP6_NF_TARGET_NPT=m
CONFIG_NF_TABLES_BRIDGE=m
CONFIG_NFT_BRIDGE_META=m
CONFIG_NFT_BRIDGE_REJECT=m
-CONFIG_NF_LOG_BRIDGE=m
CONFIG_NF_CONNTRACK_BRIDGE=m
CONFIG_BRIDGE_NF_EBTABLES=m
CONFIG_BRIDGE_EBT_BROUTE=m
@@ -323,11 +322,6 @@ CONFIG_BLK_DEV_RAM=y
CONFIG_CDROM_PKTCDVD=m
CONFIG_ATA_OVER_ETH=m
CONFIG_DUMMY_IRQ=m
-CONFIG_IDE=y
-CONFIG_IDE_GD_ATAPI=y
-CONFIG_BLK_DEV_IDECD=y
-CONFIG_BLK_DEV_GAYLE=y
-CONFIG_BLK_DEV_BUDDHA=y
CONFIG_RAID_ATTRS=m
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
@@ -344,6 +338,11 @@ CONFIG_GVP11_SCSI=y
CONFIG_SCSI_A4000T=y
CONFIG_SCSI_ZORRO7XX=y
CONFIG_SCSI_ZORRO_ESP=y
+CONFIG_ATA=y
+# CONFIG_ATA_VERBOSE_ERROR is not set
+# CONFIG_ATA_BMDMA is not set
+CONFIG_PATA_GAYLE=y
+CONFIG_PATA_BUDDHA=y
CONFIG_MD=y
CONFIG_MD_LINEAR=m
CONFIG_BLK_DEV_DM=m
@@ -563,6 +562,7 @@ CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_RSA=m
CONFIG_CRYPTO_DH=m
CONFIG_CRYPTO_ECDH=m
+CONFIG_CRYPTO_ECDSA=m
CONFIG_CRYPTO_ECRDSA=m
CONFIG_CRYPTO_SM2=m
CONFIG_CRYPTO_CURVE25519=m
@@ -627,6 +627,7 @@ CONFIG_KUNIT_ALL_TESTS=m
CONFIG_TEST_LIST_SORT=m
CONFIG_TEST_MIN_HEAP=m
CONFIG_TEST_SORT=m
+CONFIG_TEST_DIV64=m
CONFIG_REED_SOLOMON_TEST=m
CONFIG_ATOMIC64_SELFTEST=m
CONFIG_ASYNC_RAID6_TEST=m
diff --git a/arch/m68k/configs/apollo_defconfig b/arch/m68k/configs/apollo_defconfig
index 8d4ddcebe7b8..4dc6dcfaf28a 100644
--- a/arch/m68k/configs/apollo_defconfig
+++ b/arch/m68k/configs/apollo_defconfig
@@ -81,7 +81,6 @@ CONFIG_IPV6_VTI=m
CONFIG_IPV6_GRE=m
CONFIG_NETFILTER=y
CONFIG_NF_CONNTRACK=m
-CONFIG_NF_LOG_NETDEV=m
CONFIG_NF_CONNTRACK_ZONES=y
# CONFIG_NF_CONNTRACK_PROCFS is not set
# CONFIG_NF_CT_PROTO_DCCP is not set
@@ -203,6 +202,7 @@ CONFIG_NFT_FIB_IPV4=m
CONFIG_NF_TABLES_ARP=y
CONFIG_NF_FLOW_TABLE_IPV4=m
CONFIG_NF_LOG_ARP=m
+CONFIG_NF_LOG_IPV4=m
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_MATCH_AH=m
CONFIG_IP_NF_MATCH_ECN=m
@@ -249,7 +249,6 @@ CONFIG_IP6_NF_TARGET_NPT=m
CONFIG_NF_TABLES_BRIDGE=m
CONFIG_NFT_BRIDGE_META=m
CONFIG_NFT_BRIDGE_REJECT=m
-CONFIG_NF_LOG_BRIDGE=m
CONFIG_NF_CONNTRACK_BRIDGE=m
CONFIG_BRIDGE_NF_EBTABLES=m
CONFIG_BRIDGE_EBT_BROUTE=m
@@ -519,6 +518,7 @@ CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_RSA=m
CONFIG_CRYPTO_DH=m
CONFIG_CRYPTO_ECDH=m
+CONFIG_CRYPTO_ECDSA=m
CONFIG_CRYPTO_ECRDSA=m
CONFIG_CRYPTO_SM2=m
CONFIG_CRYPTO_CURVE25519=m
@@ -583,6 +583,7 @@ CONFIG_KUNIT_ALL_TESTS=m
CONFIG_TEST_LIST_SORT=m
CONFIG_TEST_MIN_HEAP=m
CONFIG_TEST_SORT=m
+CONFIG_TEST_DIV64=m
CONFIG_REED_SOLOMON_TEST=m
CONFIG_ATOMIC64_SELFTEST=m
CONFIG_ASYNC_RAID6_TEST=m
diff --git a/arch/m68k/configs/atari_defconfig b/arch/m68k/configs/atari_defconfig
index 9cc9f1a06516..23d910a692ab 100644
--- a/arch/m68k/configs/atari_defconfig
+++ b/arch/m68k/configs/atari_defconfig
@@ -88,7 +88,6 @@ CONFIG_IPV6_VTI=m
CONFIG_IPV6_GRE=m
CONFIG_NETFILTER=y
CONFIG_NF_CONNTRACK=m
-CONFIG_NF_LOG_NETDEV=m
CONFIG_NF_CONNTRACK_ZONES=y
# CONFIG_NF_CONNTRACK_PROCFS is not set
# CONFIG_NF_CT_PROTO_DCCP is not set
@@ -210,6 +209,7 @@ CONFIG_NFT_FIB_IPV4=m
CONFIG_NF_TABLES_ARP=y
CONFIG_NF_FLOW_TABLE_IPV4=m
CONFIG_NF_LOG_ARP=m
+CONFIG_NF_LOG_IPV4=m
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_MATCH_AH=m
CONFIG_IP_NF_MATCH_ECN=m
@@ -256,7 +256,6 @@ CONFIG_IP6_NF_TARGET_NPT=m
CONFIG_NF_TABLES_BRIDGE=m
CONFIG_NFT_BRIDGE_META=m
CONFIG_NFT_BRIDGE_REJECT=m
-CONFIG_NF_LOG_BRIDGE=m
CONFIG_NF_CONNTRACK_BRIDGE=m
CONFIG_BRIDGE_NF_EBTABLES=m
CONFIG_BRIDGE_EBT_BROUTE=m
@@ -324,10 +323,6 @@ CONFIG_BLK_DEV_RAM=y
CONFIG_CDROM_PKTCDVD=m
CONFIG_ATA_OVER_ETH=m
CONFIG_DUMMY_IRQ=m
-CONFIG_IDE=y
-CONFIG_IDE_GD_ATAPI=y
-CONFIG_BLK_DEV_IDECD=y
-CONFIG_BLK_DEV_FALCON_IDE=y
CONFIG_RAID_ATTRS=m
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
@@ -339,6 +334,10 @@ CONFIG_SCSI_SAS_ATTRS=m
CONFIG_ISCSI_TCP=m
CONFIG_ISCSI_BOOT_SYSFS=m
CONFIG_ATARI_SCSI=y
+CONFIG_ATA=y
+# CONFIG_ATA_VERBOSE_ERROR is not set
+# CONFIG_ATA_BMDMA is not set
+CONFIG_PATA_FALCON=y
CONFIG_MD=y
CONFIG_MD_LINEAR=m
CONFIG_BLK_DEV_DM=m
@@ -541,6 +540,7 @@ CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_RSA=m
CONFIG_CRYPTO_DH=m
CONFIG_CRYPTO_ECDH=m
+CONFIG_CRYPTO_ECDSA=m
CONFIG_CRYPTO_ECRDSA=m
CONFIG_CRYPTO_SM2=m
CONFIG_CRYPTO_CURVE25519=m
@@ -605,6 +605,7 @@ CONFIG_KUNIT_ALL_TESTS=m
CONFIG_TEST_LIST_SORT=m
CONFIG_TEST_MIN_HEAP=m
CONFIG_TEST_SORT=m
+CONFIG_TEST_DIV64=m
CONFIG_REED_SOLOMON_TEST=m
CONFIG_ATOMIC64_SELFTEST=m
CONFIG_ASYNC_RAID6_TEST=m
diff --git a/arch/m68k/configs/bvme6000_defconfig b/arch/m68k/configs/bvme6000_defconfig
index c3f3f462e6ce..2c3f42833846 100644
--- a/arch/m68k/configs/bvme6000_defconfig
+++ b/arch/m68k/configs/bvme6000_defconfig
@@ -78,7 +78,6 @@ CONFIG_IPV6_VTI=m
CONFIG_IPV6_GRE=m
CONFIG_NETFILTER=y
CONFIG_NF_CONNTRACK=m
-CONFIG_NF_LOG_NETDEV=m
CONFIG_NF_CONNTRACK_ZONES=y
# CONFIG_NF_CONNTRACK_PROCFS is not set
# CONFIG_NF_CT_PROTO_DCCP is not set
@@ -200,6 +199,7 @@ CONFIG_NFT_FIB_IPV4=m
CONFIG_NF_TABLES_ARP=y
CONFIG_NF_FLOW_TABLE_IPV4=m
CONFIG_NF_LOG_ARP=m
+CONFIG_NF_LOG_IPV4=m
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_MATCH_AH=m
CONFIG_IP_NF_MATCH_ECN=m
@@ -246,7 +246,6 @@ CONFIG_IP6_NF_TARGET_NPT=m
CONFIG_NF_TABLES_BRIDGE=m
CONFIG_NFT_BRIDGE_META=m
CONFIG_NFT_BRIDGE_REJECT=m
-CONFIG_NF_LOG_BRIDGE=m
CONFIG_NF_CONNTRACK_BRIDGE=m
CONFIG_BRIDGE_NF_EBTABLES=m
CONFIG_BRIDGE_EBT_BROUTE=m
@@ -512,6 +511,7 @@ CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_RSA=m
CONFIG_CRYPTO_DH=m
CONFIG_CRYPTO_ECDH=m
+CONFIG_CRYPTO_ECDSA=m
CONFIG_CRYPTO_ECRDSA=m
CONFIG_CRYPTO_SM2=m
CONFIG_CRYPTO_CURVE25519=m
@@ -576,6 +576,7 @@ CONFIG_KUNIT_ALL_TESTS=m
CONFIG_TEST_LIST_SORT=m
CONFIG_TEST_MIN_HEAP=m
CONFIG_TEST_SORT=m
+CONFIG_TEST_DIV64=m
CONFIG_REED_SOLOMON_TEST=m
CONFIG_ATOMIC64_SELFTEST=m
CONFIG_ASYNC_RAID6_TEST=m
diff --git a/arch/m68k/configs/hp300_defconfig b/arch/m68k/configs/hp300_defconfig
index 8c908fc5c191..5b1898d4b249 100644
--- a/arch/m68k/configs/hp300_defconfig
+++ b/arch/m68k/configs/hp300_defconfig
@@ -80,7 +80,6 @@ CONFIG_IPV6_VTI=m
CONFIG_IPV6_GRE=m
CONFIG_NETFILTER=y
CONFIG_NF_CONNTRACK=m
-CONFIG_NF_LOG_NETDEV=m
CONFIG_NF_CONNTRACK_ZONES=y
# CONFIG_NF_CONNTRACK_PROCFS is not set
# CONFIG_NF_CT_PROTO_DCCP is not set
@@ -202,6 +201,7 @@ CONFIG_NFT_FIB_IPV4=m
CONFIG_NF_TABLES_ARP=y
CONFIG_NF_FLOW_TABLE_IPV4=m
CONFIG_NF_LOG_ARP=m
+CONFIG_NF_LOG_IPV4=m
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_MATCH_AH=m
CONFIG_IP_NF_MATCH_ECN=m
@@ -248,7 +248,6 @@ CONFIG_IP6_NF_TARGET_NPT=m
CONFIG_NF_TABLES_BRIDGE=m
CONFIG_NFT_BRIDGE_META=m
CONFIG_NFT_BRIDGE_REJECT=m
-CONFIG_NF_LOG_BRIDGE=m
CONFIG_NF_CONNTRACK_BRIDGE=m
CONFIG_BRIDGE_NF_EBTABLES=m
CONFIG_BRIDGE_EBT_BROUTE=m
@@ -521,6 +520,7 @@ CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_RSA=m
CONFIG_CRYPTO_DH=m
CONFIG_CRYPTO_ECDH=m
+CONFIG_CRYPTO_ECDSA=m
CONFIG_CRYPTO_ECRDSA=m
CONFIG_CRYPTO_SM2=m
CONFIG_CRYPTO_CURVE25519=m
@@ -585,6 +585,7 @@ CONFIG_KUNIT_ALL_TESTS=m
CONFIG_TEST_LIST_SORT=m
CONFIG_TEST_MIN_HEAP=m
CONFIG_TEST_SORT=m
+CONFIG_TEST_DIV64=m
CONFIG_REED_SOLOMON_TEST=m
CONFIG_ATOMIC64_SELFTEST=m
CONFIG_ASYNC_RAID6_TEST=m
diff --git a/arch/m68k/configs/mac_defconfig b/arch/m68k/configs/mac_defconfig
index 4e68b72d9c50..9606ccd8dafa 100644
--- a/arch/m68k/configs/mac_defconfig
+++ b/arch/m68k/configs/mac_defconfig
@@ -79,7 +79,6 @@ CONFIG_IPV6_VTI=m
CONFIG_IPV6_GRE=m
CONFIG_NETFILTER=y
CONFIG_NF_CONNTRACK=m
-CONFIG_NF_LOG_NETDEV=m
CONFIG_NF_CONNTRACK_ZONES=y
# CONFIG_NF_CONNTRACK_PROCFS is not set
# CONFIG_NF_CT_PROTO_DCCP is not set
@@ -201,6 +200,7 @@ CONFIG_NFT_FIB_IPV4=m
CONFIG_NF_TABLES_ARP=y
CONFIG_NF_FLOW_TABLE_IPV4=m
CONFIG_NF_LOG_ARP=m
+CONFIG_NF_LOG_IPV4=m
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_MATCH_AH=m
CONFIG_IP_NF_MATCH_ECN=m
@@ -247,7 +247,6 @@ CONFIG_IP6_NF_TARGET_NPT=m
CONFIG_NF_TABLES_BRIDGE=m
CONFIG_NFT_BRIDGE_META=m
CONFIG_NFT_BRIDGE_REJECT=m
-CONFIG_NF_LOG_BRIDGE=m
CONFIG_NF_CONNTRACK_BRIDGE=m
CONFIG_BRIDGE_NF_EBTABLES=m
CONFIG_BRIDGE_EBT_BROUTE=m
@@ -315,11 +314,6 @@ CONFIG_BLK_DEV_RAM=y
CONFIG_CDROM_PKTCDVD=m
CONFIG_ATA_OVER_ETH=m
CONFIG_DUMMY_IRQ=m
-CONFIG_IDE=y
-CONFIG_IDE_GD_ATAPI=y
-CONFIG_BLK_DEV_IDECD=y
-CONFIG_BLK_DEV_PLATFORM=y
-CONFIG_BLK_DEV_MAC_IDE=y
CONFIG_RAID_ATTRS=m
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
@@ -332,6 +326,10 @@ CONFIG_ISCSI_TCP=m
CONFIG_ISCSI_BOOT_SYSFS=m
CONFIG_MAC_SCSI=y
CONFIG_SCSI_MAC_ESP=y
+CONFIG_ATA=y
+# CONFIG_ATA_VERBOSE_ERROR is not set
+# CONFIG_ATA_BMDMA is not set
+CONFIG_PATA_PLATFORM=y
CONFIG_MD=y
CONFIG_MD_LINEAR=m
CONFIG_BLK_DEV_DM=m
@@ -544,6 +542,7 @@ CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_RSA=m
CONFIG_CRYPTO_DH=m
CONFIG_CRYPTO_ECDH=m
+CONFIG_CRYPTO_ECDSA=m
CONFIG_CRYPTO_ECRDSA=m
CONFIG_CRYPTO_SM2=m
CONFIG_CRYPTO_CURVE25519=m
@@ -608,6 +607,7 @@ CONFIG_KUNIT_ALL_TESTS=m
CONFIG_TEST_LIST_SORT=m
CONFIG_TEST_MIN_HEAP=m
CONFIG_TEST_SORT=m
+CONFIG_TEST_DIV64=m
CONFIG_REED_SOLOMON_TEST=m
CONFIG_ATOMIC64_SELFTEST=m
CONFIG_ASYNC_RAID6_TEST=m
diff --git a/arch/m68k/configs/multi_defconfig b/arch/m68k/configs/multi_defconfig
index d31896293c39..3175ba5007e1 100644
--- a/arch/m68k/configs/multi_defconfig
+++ b/arch/m68k/configs/multi_defconfig
@@ -99,7 +99,6 @@ CONFIG_IPV6_VTI=m
CONFIG_IPV6_GRE=m
CONFIG_NETFILTER=y
CONFIG_NF_CONNTRACK=m
-CONFIG_NF_LOG_NETDEV=m
CONFIG_NF_CONNTRACK_ZONES=y
# CONFIG_NF_CONNTRACK_PROCFS is not set
# CONFIG_NF_CT_PROTO_DCCP is not set
@@ -221,6 +220,7 @@ CONFIG_NFT_FIB_IPV4=m
CONFIG_NF_TABLES_ARP=y
CONFIG_NF_FLOW_TABLE_IPV4=m
CONFIG_NF_LOG_ARP=m
+CONFIG_NF_LOG_IPV4=m
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_MATCH_AH=m
CONFIG_IP_NF_MATCH_ECN=m
@@ -267,7 +267,6 @@ CONFIG_IP6_NF_TARGET_NPT=m
CONFIG_NF_TABLES_BRIDGE=m
CONFIG_NFT_BRIDGE_META=m
CONFIG_NFT_BRIDGE_REJECT=m
-CONFIG_NF_LOG_BRIDGE=m
CONFIG_NF_CONNTRACK_BRIDGE=m
CONFIG_BRIDGE_NF_EBTABLES=m
CONFIG_BRIDGE_EBT_BROUTE=m
@@ -344,15 +343,6 @@ CONFIG_BLK_DEV_RAM=y
CONFIG_CDROM_PKTCDVD=m
CONFIG_ATA_OVER_ETH=m
CONFIG_DUMMY_IRQ=m
-CONFIG_IDE=y
-CONFIG_IDE_GD_ATAPI=y
-CONFIG_BLK_DEV_IDECD=y
-CONFIG_BLK_DEV_PLATFORM=y
-CONFIG_BLK_DEV_GAYLE=y
-CONFIG_BLK_DEV_BUDDHA=y
-CONFIG_BLK_DEV_FALCON_IDE=y
-CONFIG_BLK_DEV_MAC_IDE=y
-CONFIG_BLK_DEV_Q40IDE=y
CONFIG_RAID_ATTRS=m
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
@@ -376,6 +366,13 @@ CONFIG_MVME147_SCSI=y
CONFIG_MVME16x_SCSI=y
CONFIG_BVME6000_SCSI=y
CONFIG_SUN3X_ESP=y
+CONFIG_ATA=y
+# CONFIG_ATA_VERBOSE_ERROR is not set
+# CONFIG_ATA_BMDMA is not set
+CONFIG_PATA_FALCON=y
+CONFIG_PATA_GAYLE=y
+CONFIG_PATA_BUDDHA=y
+CONFIG_PATA_PLATFORM=y
CONFIG_MD=y
CONFIG_MD_LINEAR=m
CONFIG_BLK_DEV_DM=m
@@ -630,6 +627,7 @@ CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_RSA=m
CONFIG_CRYPTO_DH=m
CONFIG_CRYPTO_ECDH=m
+CONFIG_CRYPTO_ECDSA=m
CONFIG_CRYPTO_ECRDSA=m
CONFIG_CRYPTO_SM2=m
CONFIG_CRYPTO_CURVE25519=m
@@ -694,6 +692,7 @@ CONFIG_KUNIT_ALL_TESTS=m
CONFIG_TEST_LIST_SORT=m
CONFIG_TEST_MIN_HEAP=m
CONFIG_TEST_SORT=m
+CONFIG_TEST_DIV64=m
CONFIG_REED_SOLOMON_TEST=m
CONFIG_ATOMIC64_SELFTEST=m
CONFIG_ASYNC_RAID6_TEST=m
diff --git a/arch/m68k/configs/mvme147_defconfig b/arch/m68k/configs/mvme147_defconfig
index c7442f9dd469..793085f00c99 100644
--- a/arch/m68k/configs/mvme147_defconfig
+++ b/arch/m68k/configs/mvme147_defconfig
@@ -77,7 +77,6 @@ CONFIG_IPV6_VTI=m
CONFIG_IPV6_GRE=m
CONFIG_NETFILTER=y
CONFIG_NF_CONNTRACK=m
-CONFIG_NF_LOG_NETDEV=m
CONFIG_NF_CONNTRACK_ZONES=y
# CONFIG_NF_CONNTRACK_PROCFS is not set
# CONFIG_NF_CT_PROTO_DCCP is not set
@@ -199,6 +198,7 @@ CONFIG_NFT_FIB_IPV4=m
CONFIG_NF_TABLES_ARP=y
CONFIG_NF_FLOW_TABLE_IPV4=m
CONFIG_NF_LOG_ARP=m
+CONFIG_NF_LOG_IPV4=m
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_MATCH_AH=m
CONFIG_IP_NF_MATCH_ECN=m
@@ -245,7 +245,6 @@ CONFIG_IP6_NF_TARGET_NPT=m
CONFIG_NF_TABLES_BRIDGE=m
CONFIG_NFT_BRIDGE_META=m
CONFIG_NFT_BRIDGE_REJECT=m
-CONFIG_NF_LOG_BRIDGE=m
CONFIG_NF_CONNTRACK_BRIDGE=m
CONFIG_BRIDGE_NF_EBTABLES=m
CONFIG_BRIDGE_EBT_BROUTE=m
@@ -511,6 +510,7 @@ CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_RSA=m
CONFIG_CRYPTO_DH=m
CONFIG_CRYPTO_ECDH=m
+CONFIG_CRYPTO_ECDSA=m
CONFIG_CRYPTO_ECRDSA=m
CONFIG_CRYPTO_SM2=m
CONFIG_CRYPTO_CURVE25519=m
@@ -575,6 +575,7 @@ CONFIG_KUNIT_ALL_TESTS=m
CONFIG_TEST_LIST_SORT=m
CONFIG_TEST_MIN_HEAP=m
CONFIG_TEST_SORT=m
+CONFIG_TEST_DIV64=m
CONFIG_REED_SOLOMON_TEST=m
CONFIG_ATOMIC64_SELFTEST=m
CONFIG_ASYNC_RAID6_TEST=m
diff --git a/arch/m68k/configs/mvme16x_defconfig b/arch/m68k/configs/mvme16x_defconfig
index 233b82ea103a..56fbac7943b2 100644
--- a/arch/m68k/configs/mvme16x_defconfig
+++ b/arch/m68k/configs/mvme16x_defconfig
@@ -78,7 +78,6 @@ CONFIG_IPV6_VTI=m
CONFIG_IPV6_GRE=m
CONFIG_NETFILTER=y
CONFIG_NF_CONNTRACK=m
-CONFIG_NF_LOG_NETDEV=m
CONFIG_NF_CONNTRACK_ZONES=y
# CONFIG_NF_CONNTRACK_PROCFS is not set
# CONFIG_NF_CT_PROTO_DCCP is not set
@@ -200,6 +199,7 @@ CONFIG_NFT_FIB_IPV4=m
CONFIG_NF_TABLES_ARP=y
CONFIG_NF_FLOW_TABLE_IPV4=m
CONFIG_NF_LOG_ARP=m
+CONFIG_NF_LOG_IPV4=m
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_MATCH_AH=m
CONFIG_IP_NF_MATCH_ECN=m
@@ -246,7 +246,6 @@ CONFIG_IP6_NF_TARGET_NPT=m
CONFIG_NF_TABLES_BRIDGE=m
CONFIG_NFT_BRIDGE_META=m
CONFIG_NFT_BRIDGE_REJECT=m
-CONFIG_NF_LOG_BRIDGE=m
CONFIG_NF_CONNTRACK_BRIDGE=m
CONFIG_BRIDGE_NF_EBTABLES=m
CONFIG_BRIDGE_EBT_BROUTE=m
@@ -512,6 +511,7 @@ CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_RSA=m
CONFIG_CRYPTO_DH=m
CONFIG_CRYPTO_ECDH=m
+CONFIG_CRYPTO_ECDSA=m
CONFIG_CRYPTO_ECRDSA=m
CONFIG_CRYPTO_SM2=m
CONFIG_CRYPTO_CURVE25519=m
@@ -576,6 +576,7 @@ CONFIG_KUNIT_ALL_TESTS=m
CONFIG_TEST_LIST_SORT=m
CONFIG_TEST_MIN_HEAP=m
CONFIG_TEST_SORT=m
+CONFIG_TEST_DIV64=m
CONFIG_REED_SOLOMON_TEST=m
CONFIG_ATOMIC64_SELFTEST=m
CONFIG_ASYNC_RAID6_TEST=m
diff --git a/arch/m68k/configs/q40_defconfig b/arch/m68k/configs/q40_defconfig
index 664025a0f6a4..0e15431b65e2 100644
--- a/arch/m68k/configs/q40_defconfig
+++ b/arch/m68k/configs/q40_defconfig
@@ -79,7 +79,6 @@ CONFIG_IPV6_VTI=m
CONFIG_IPV6_GRE=m
CONFIG_NETFILTER=y
CONFIG_NF_CONNTRACK=m
-CONFIG_NF_LOG_NETDEV=m
CONFIG_NF_CONNTRACK_ZONES=y
# CONFIG_NF_CONNTRACK_PROCFS is not set
# CONFIG_NF_CT_PROTO_DCCP is not set
@@ -201,6 +200,7 @@ CONFIG_NFT_FIB_IPV4=m
CONFIG_NF_TABLES_ARP=y
CONFIG_NF_FLOW_TABLE_IPV4=m
CONFIG_NF_LOG_ARP=m
+CONFIG_NF_LOG_IPV4=m
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_MATCH_AH=m
CONFIG_IP_NF_MATCH_ECN=m
@@ -247,7 +247,6 @@ CONFIG_IP6_NF_TARGET_NPT=m
CONFIG_NF_TABLES_BRIDGE=m
CONFIG_NFT_BRIDGE_META=m
CONFIG_NFT_BRIDGE_REJECT=m
-CONFIG_NF_LOG_BRIDGE=m
CONFIG_NF_CONNTRACK_BRIDGE=m
CONFIG_BRIDGE_NF_EBTABLES=m
CONFIG_BRIDGE_EBT_BROUTE=m
@@ -314,10 +313,6 @@ CONFIG_BLK_DEV_RAM=y
CONFIG_CDROM_PKTCDVD=m
CONFIG_ATA_OVER_ETH=m
CONFIG_DUMMY_IRQ=m
-CONFIG_IDE=y
-CONFIG_IDE_GD_ATAPI=y
-CONFIG_BLK_DEV_IDECD=y
-CONFIG_BLK_DEV_Q40IDE=y
CONFIG_RAID_ATTRS=m
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
@@ -328,6 +323,10 @@ CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_SAS_ATTRS=m
CONFIG_ISCSI_TCP=m
CONFIG_ISCSI_BOOT_SYSFS=m
+CONFIG_ATA=y
+# CONFIG_ATA_VERBOSE_ERROR is not set
+# CONFIG_ATA_BMDMA is not set
+CONFIG_PATA_FALCON=y
CONFIG_MD=y
CONFIG_MD_LINEAR=m
CONFIG_BLK_DEV_DM=m
@@ -530,6 +529,7 @@ CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_RSA=m
CONFIG_CRYPTO_DH=m
CONFIG_CRYPTO_ECDH=m
+CONFIG_CRYPTO_ECDSA=m
CONFIG_CRYPTO_ECRDSA=m
CONFIG_CRYPTO_SM2=m
CONFIG_CRYPTO_CURVE25519=m
@@ -594,6 +594,7 @@ CONFIG_KUNIT_ALL_TESTS=m
CONFIG_TEST_LIST_SORT=m
CONFIG_TEST_MIN_HEAP=m
CONFIG_TEST_SORT=m
+CONFIG_TEST_DIV64=m
CONFIG_REED_SOLOMON_TEST=m
CONFIG_ATOMIC64_SELFTEST=m
CONFIG_ASYNC_RAID6_TEST=m
diff --git a/arch/m68k/configs/sun3_defconfig b/arch/m68k/configs/sun3_defconfig
index 73293a0b3dc8..3490a05f29b8 100644
--- a/arch/m68k/configs/sun3_defconfig
+++ b/arch/m68k/configs/sun3_defconfig
@@ -75,7 +75,6 @@ CONFIG_IPV6_VTI=m
CONFIG_IPV6_GRE=m
CONFIG_NETFILTER=y
CONFIG_NF_CONNTRACK=m
-CONFIG_NF_LOG_NETDEV=m
CONFIG_NF_CONNTRACK_ZONES=y
# CONFIG_NF_CONNTRACK_PROCFS is not set
# CONFIG_NF_CT_PROTO_DCCP is not set
@@ -197,6 +196,7 @@ CONFIG_NFT_FIB_IPV4=m
CONFIG_NF_TABLES_ARP=y
CONFIG_NF_FLOW_TABLE_IPV4=m
CONFIG_NF_LOG_ARP=m
+CONFIG_NF_LOG_IPV4=m
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_MATCH_AH=m
CONFIG_IP_NF_MATCH_ECN=m
@@ -243,7 +243,6 @@ CONFIG_IP6_NF_TARGET_NPT=m
CONFIG_NF_TABLES_BRIDGE=m
CONFIG_NFT_BRIDGE_META=m
CONFIG_NFT_BRIDGE_REJECT=m
-CONFIG_NF_LOG_BRIDGE=m
CONFIG_NF_CONNTRACK_BRIDGE=m
CONFIG_BRIDGE_NF_EBTABLES=m
CONFIG_BRIDGE_EBT_BROUTE=m
@@ -514,6 +513,7 @@ CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_RSA=m
CONFIG_CRYPTO_DH=m
CONFIG_CRYPTO_ECDH=m
+CONFIG_CRYPTO_ECDSA=m
CONFIG_CRYPTO_ECRDSA=m
CONFIG_CRYPTO_SM2=m
CONFIG_CRYPTO_CURVE25519=m
@@ -577,6 +577,7 @@ CONFIG_KUNIT_ALL_TESTS=m
CONFIG_TEST_LIST_SORT=m
CONFIG_TEST_MIN_HEAP=m
CONFIG_TEST_SORT=m
+CONFIG_TEST_DIV64=m
CONFIG_REED_SOLOMON_TEST=m
CONFIG_ATOMIC64_SELFTEST=m
CONFIG_ASYNC_RAID6_TEST=m
diff --git a/arch/m68k/configs/sun3x_defconfig b/arch/m68k/configs/sun3x_defconfig
index bca8a6f3e92f..4e92c8c332fc 100644
--- a/arch/m68k/configs/sun3x_defconfig
+++ b/arch/m68k/configs/sun3x_defconfig
@@ -75,7 +75,6 @@ CONFIG_IPV6_VTI=m
CONFIG_IPV6_GRE=m
CONFIG_NETFILTER=y
CONFIG_NF_CONNTRACK=m
-CONFIG_NF_LOG_NETDEV=m
CONFIG_NF_CONNTRACK_ZONES=y
# CONFIG_NF_CONNTRACK_PROCFS is not set
# CONFIG_NF_CT_PROTO_DCCP is not set
@@ -197,6 +196,7 @@ CONFIG_NFT_FIB_IPV4=m
CONFIG_NF_TABLES_ARP=y
CONFIG_NF_FLOW_TABLE_IPV4=m
CONFIG_NF_LOG_ARP=m
+CONFIG_NF_LOG_IPV4=m
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_MATCH_AH=m
CONFIG_IP_NF_MATCH_ECN=m
@@ -243,7 +243,6 @@ CONFIG_IP6_NF_TARGET_NPT=m
CONFIG_NF_TABLES_BRIDGE=m
CONFIG_NFT_BRIDGE_META=m
CONFIG_NFT_BRIDGE_REJECT=m
-CONFIG_NF_LOG_BRIDGE=m
CONFIG_NF_CONNTRACK_BRIDGE=m
CONFIG_BRIDGE_NF_EBTABLES=m
CONFIG_BRIDGE_EBT_BROUTE=m
@@ -513,6 +512,7 @@ CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_RSA=m
CONFIG_CRYPTO_DH=m
CONFIG_CRYPTO_ECDH=m
+CONFIG_CRYPTO_ECDSA=m
CONFIG_CRYPTO_ECRDSA=m
CONFIG_CRYPTO_SM2=m
CONFIG_CRYPTO_CURVE25519=m
@@ -577,6 +577,7 @@ CONFIG_KUNIT_ALL_TESTS=m
CONFIG_TEST_LIST_SORT=m
CONFIG_TEST_MIN_HEAP=m
CONFIG_TEST_SORT=m
+CONFIG_TEST_DIV64=m
CONFIG_REED_SOLOMON_TEST=m
CONFIG_ATOMIC64_SELFTEST=m
CONFIG_ASYNC_RAID6_TEST=m
diff --git a/arch/m68k/emu/nfblock.c b/arch/m68k/emu/nfblock.c
index ba808543161a..9a8394e96388 100644
--- a/arch/m68k/emu/nfblock.c
+++ b/arch/m68k/emu/nfblock.c
@@ -55,7 +55,6 @@ struct nfhd_device {
int id;
u32 blocks, bsize;
int bshift;
- struct request_queue *queue;
struct gendisk *disk;
};
@@ -119,32 +118,24 @@ static int __init nfhd_init_one(int id, u32 blocks, u32 bsize)
dev->bsize = bsize;
dev->bshift = ffs(bsize) - 10;
- dev->queue = blk_alloc_queue(NUMA_NO_NODE);
- if (dev->queue == NULL)
- goto free_dev;
-
- blk_queue_logical_block_size(dev->queue, bsize);
-
- dev->disk = alloc_disk(16);
+ dev->disk = blk_alloc_disk(NUMA_NO_NODE);
if (!dev->disk)
- goto free_queue;
+ goto free_dev;
dev->disk->major = major_num;
dev->disk->first_minor = dev_id * 16;
+ dev->disk->minors = 16;
dev->disk->fops = &nfhd_ops;
dev->disk->private_data = dev;
sprintf(dev->disk->disk_name, "nfhd%u", dev_id);
set_capacity(dev->disk, (sector_t)blocks * (bsize / 512));
- dev->disk->queue = dev->queue;
-
+ blk_queue_logical_block_size(dev->disk->queue, bsize);
add_disk(dev->disk);
list_add_tail(&dev->list, &nfhd_list);
return 0;
-free_queue:
- blk_cleanup_queue(dev->queue);
free_dev:
kfree(dev);
out:
@@ -186,8 +177,7 @@ static void __exit nfhd_exit(void)
list_for_each_entry_safe(dev, next, &nfhd_list, list) {
list_del(&dev->list);
del_gendisk(dev->disk);
- put_disk(dev->disk);
- blk_cleanup_queue(dev->queue);
+ blk_cleanup_disk(dev->disk);
kfree(dev);
}
unregister_blkdev(major_num, "nfhd");
diff --git a/arch/m68k/emu/nfcon.c b/arch/m68k/emu/nfcon.c
index 57e8c8fb5eba..92636c89d65b 100644
--- a/arch/m68k/emu/nfcon.c
+++ b/arch/m68k/emu/nfcon.c
@@ -85,7 +85,7 @@ static int nfcon_tty_put_char(struct tty_struct *tty, unsigned char ch)
return 1;
}
-static int nfcon_tty_write_room(struct tty_struct *tty)
+static unsigned int nfcon_tty_write_room(struct tty_struct *tty)
{
return 64;
}
diff --git a/arch/m68k/include/asm/atomic.h b/arch/m68k/include/asm/atomic.h
index 756c5cc58f94..8637bf8a2f65 100644
--- a/arch/m68k/include/asm/atomic.h
+++ b/arch/m68k/include/asm/atomic.h
@@ -16,8 +16,8 @@
* We do not have SMP m68k systems, so we don't have to deal with that.
*/
-#define atomic_read(v) READ_ONCE((v)->counter)
-#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
+#define arch_atomic_read(v) READ_ONCE((v)->counter)
+#define arch_atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
/*
* The ColdFire parts cannot do some immediate to memory operations,
@@ -30,7 +30,7 @@
#endif
#define ATOMIC_OP(op, c_op, asm_op) \
-static inline void atomic_##op(int i, atomic_t *v) \
+static inline void arch_atomic_##op(int i, atomic_t *v) \
{ \
__asm__ __volatile__(#asm_op "l %1,%0" : "+m" (*v) : ASM_DI (i));\
} \
@@ -38,7 +38,7 @@ static inline void atomic_##op(int i, atomic_t *v) \
#ifdef CONFIG_RMW_INSNS
#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
-static inline int atomic_##op##_return(int i, atomic_t *v) \
+static inline int arch_atomic_##op##_return(int i, atomic_t *v) \
{ \
int t, tmp; \
\
@@ -48,12 +48,12 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
" casl %2,%1,%0\n" \
" jne 1b" \
: "+m" (*v), "=&d" (t), "=&d" (tmp) \
- : "g" (i), "2" (atomic_read(v))); \
+ : "g" (i), "2" (arch_atomic_read(v))); \
return t; \
}
#define ATOMIC_FETCH_OP(op, c_op, asm_op) \
-static inline int atomic_fetch_##op(int i, atomic_t *v) \
+static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \
{ \
int t, tmp; \
\
@@ -63,14 +63,14 @@ static inline int atomic_fetch_##op(int i, atomic_t *v) \
" casl %2,%1,%0\n" \
" jne 1b" \
: "+m" (*v), "=&d" (t), "=&d" (tmp) \
- : "g" (i), "2" (atomic_read(v))); \
+ : "g" (i), "2" (arch_atomic_read(v))); \
return tmp; \
}
#else
#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
-static inline int atomic_##op##_return(int i, atomic_t * v) \
+static inline int arch_atomic_##op##_return(int i, atomic_t * v) \
{ \
unsigned long flags; \
int t; \
@@ -83,7 +83,7 @@ static inline int atomic_##op##_return(int i, atomic_t * v) \
}
#define ATOMIC_FETCH_OP(op, c_op, asm_op) \
-static inline int atomic_fetch_##op(int i, atomic_t * v) \
+static inline int arch_atomic_fetch_##op(int i, atomic_t * v) \
{ \
unsigned long flags; \
int t; \
@@ -120,27 +120,27 @@ ATOMIC_OPS(xor, ^=, eor)
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
-static inline void atomic_inc(atomic_t *v)
+static inline void arch_atomic_inc(atomic_t *v)
{
__asm__ __volatile__("addql #1,%0" : "+m" (*v));
}
-#define atomic_inc atomic_inc
+#define arch_atomic_inc arch_atomic_inc
-static inline void atomic_dec(atomic_t *v)
+static inline void arch_atomic_dec(atomic_t *v)
{
__asm__ __volatile__("subql #1,%0" : "+m" (*v));
}
-#define atomic_dec atomic_dec
+#define arch_atomic_dec arch_atomic_dec
-static inline int atomic_dec_and_test(atomic_t *v)
+static inline int arch_atomic_dec_and_test(atomic_t *v)
{
char c;
__asm__ __volatile__("subql #1,%1; seq %0" : "=d" (c), "+m" (*v));
return c != 0;
}
-#define atomic_dec_and_test atomic_dec_and_test
+#define arch_atomic_dec_and_test arch_atomic_dec_and_test
-static inline int atomic_dec_and_test_lt(atomic_t *v)
+static inline int arch_atomic_dec_and_test_lt(atomic_t *v)
{
char c;
__asm__ __volatile__(
@@ -150,49 +150,49 @@ static inline int atomic_dec_and_test_lt(atomic_t *v)
return c != 0;
}
-static inline int atomic_inc_and_test(atomic_t *v)
+static inline int arch_atomic_inc_and_test(atomic_t *v)
{
char c;
__asm__ __volatile__("addql #1,%1; seq %0" : "=d" (c), "+m" (*v));
return c != 0;
}
-#define atomic_inc_and_test atomic_inc_and_test
+#define arch_atomic_inc_and_test arch_atomic_inc_and_test
#ifdef CONFIG_RMW_INSNS
-#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
-#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
+#define arch_atomic_cmpxchg(v, o, n) ((int)arch_cmpxchg(&((v)->counter), (o), (n)))
+#define arch_atomic_xchg(v, new) (arch_xchg(&((v)->counter), new))
#else /* !CONFIG_RMW_INSNS */
-static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
+static inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new)
{
unsigned long flags;
int prev;
local_irq_save(flags);
- prev = atomic_read(v);
+ prev = arch_atomic_read(v);
if (prev == old)
- atomic_set(v, new);
+ arch_atomic_set(v, new);
local_irq_restore(flags);
return prev;
}
-static inline int atomic_xchg(atomic_t *v, int new)
+static inline int arch_atomic_xchg(atomic_t *v, int new)
{
unsigned long flags;
int prev;
local_irq_save(flags);
- prev = atomic_read(v);
- atomic_set(v, new);
+ prev = arch_atomic_read(v);
+ arch_atomic_set(v, new);
local_irq_restore(flags);
return prev;
}
#endif /* !CONFIG_RMW_INSNS */
-static inline int atomic_sub_and_test(int i, atomic_t *v)
+static inline int arch_atomic_sub_and_test(int i, atomic_t *v)
{
char c;
__asm__ __volatile__("subl %2,%1; seq %0"
@@ -200,9 +200,9 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
: ASM_DI (i));
return c != 0;
}
-#define atomic_sub_and_test atomic_sub_and_test
+#define arch_atomic_sub_and_test arch_atomic_sub_and_test
-static inline int atomic_add_negative(int i, atomic_t *v)
+static inline int arch_atomic_add_negative(int i, atomic_t *v)
{
char c;
__asm__ __volatile__("addl %2,%1; smi %0"
@@ -210,6 +210,6 @@ static inline int atomic_add_negative(int i, atomic_t *v)
: ASM_DI (i));
return c != 0;
}
-#define atomic_add_negative atomic_add_negative
+#define arch_atomic_add_negative arch_atomic_add_negative
#endif /* __ARCH_M68K_ATOMIC __ */
diff --git a/arch/m68k/include/asm/cmpxchg.h b/arch/m68k/include/asm/cmpxchg.h
index a4aa82021d3b..e8ca4b0ccefa 100644
--- a/arch/m68k/include/asm/cmpxchg.h
+++ b/arch/m68k/include/asm/cmpxchg.h
@@ -76,11 +76,11 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz
}
#endif
-#define xchg(ptr,x) ({(__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)));})
+#define arch_xchg(ptr,x) ({(__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)));})
#include <asm-generic/cmpxchg-local.h>
-#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
+#define arch_cmpxchg64_local(ptr, o, n) __generic_cmpxchg64_local((ptr), (o), (n))
extern unsigned long __invalid_cmpxchg_size(volatile void *,
unsigned long, unsigned long, int);
@@ -118,14 +118,14 @@ static inline unsigned long __cmpxchg(volatile void *p, unsigned long old,
return old;
}
-#define cmpxchg(ptr, o, n) \
+#define arch_cmpxchg(ptr, o, n) \
({(__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \
(unsigned long)(n), sizeof(*(ptr)));})
-#define cmpxchg_local(ptr, o, n) \
+#define arch_cmpxchg_local(ptr, o, n) \
({(__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \
(unsigned long)(n), sizeof(*(ptr)));})
-#define cmpxchg64(ptr, o, n) cmpxchg64_local((ptr), (o), (n))
+#define arch_cmpxchg64(ptr, o, n) arch_cmpxchg64_local((ptr), (o), (n))
#else
diff --git a/arch/m68k/include/asm/mcf_pgalloc.h b/arch/m68k/include/asm/mcf_pgalloc.h
index bc1228e00518..5c2c0a864524 100644
--- a/arch/m68k/include/asm/mcf_pgalloc.h
+++ b/arch/m68k/include/asm/mcf_pgalloc.h
@@ -32,8 +32,6 @@ extern inline pmd_t *pmd_alloc_kernel(pgd_t *pgd, unsigned long address)
#define pmd_populate_kernel pmd_populate
-#define pmd_pgtable(pmd) pfn_to_virt(pmd_val(pmd) >> PAGE_SHIFT)
-
static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pgtable,
unsigned long address)
{
diff --git a/arch/m68k/include/asm/mcf_pgtable.h b/arch/m68k/include/asm/mcf_pgtable.h
index 8d4ec05996c5..6f2b87d7a50d 100644
--- a/arch/m68k/include/asm/mcf_pgtable.h
+++ b/arch/m68k/include/asm/mcf_pgtable.h
@@ -150,6 +150,8 @@
#ifndef __ASSEMBLY__
+#define pmd_pgtable(pmd) pfn_to_virt(pmd_val(pmd) >> PAGE_SHIFT)
+
/*
* Conversion functions: convert a page and protection to a page entry,
* and a page entry and page directory to the page they refer to.
diff --git a/arch/m68k/include/asm/mcfclk.h b/arch/m68k/include/asm/mcfclk.h
index 722627e06d66..4e9a6b827a14 100644
--- a/arch/m68k/include/asm/mcfclk.h
+++ b/arch/m68k/include/asm/mcfclk.h
@@ -15,15 +15,12 @@ struct clk_ops {
};
struct clk {
- const char *name;
struct clk_ops *clk_ops;
unsigned long rate;
unsigned long enabled;
u8 slot;
};
-extern struct clk *mcf_clks[];
-
#ifdef MCFPM_PPMCR0
extern struct clk_ops clk_ops0;
#ifdef MCFPM_PPMCR1
@@ -34,7 +31,6 @@ extern struct clk_ops clk_ops2;
#define DEFINE_CLK(clk_bank, clk_name, clk_slot, clk_rate) \
static struct clk __clk_##clk_bank##_##clk_slot = { \
- .name = clk_name, \
.clk_ops = &clk_ops##clk_bank, \
.rate = clk_rate, \
.slot = clk_slot, \
@@ -45,7 +41,6 @@ void __clk_init_disabled(struct clk *);
#else
#define DEFINE_CLK(clk_ref, clk_name, clk_rate) \
static struct clk clk_##clk_ref = { \
- .name = clk_name, \
.rate = clk_rate, \
}
#endif /* MCFPM_PPMCR0 */
diff --git a/arch/m68k/include/asm/mmu_context.h b/arch/m68k/include/asm/mmu_context.h
index a5d358855878..8ed6ac14d99f 100644
--- a/arch/m68k/include/asm/mmu_context.h
+++ b/arch/m68k/include/asm/mmu_context.h
@@ -31,7 +31,7 @@ static inline void get_mmu_context(struct mm_struct *mm)
if (mm->context != NO_CONTEXT)
return;
- while (atomic_dec_and_test_lt(&nr_free_contexts)) {
+ while (arch_atomic_dec_and_test_lt(&nr_free_contexts)) {
atomic_inc(&nr_free_contexts);
steal_context();
}
diff --git a/arch/m68k/include/asm/mmzone.h b/arch/m68k/include/asm/mmzone.h
deleted file mode 100644
index 64573fe8e60d..000000000000
--- a/arch/m68k/include/asm/mmzone.h
+++ /dev/null
@@ -1,10 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_M68K_MMZONE_H_
-#define _ASM_M68K_MMZONE_H_
-
-extern pg_data_t pg_data_map[];
-
-#define NODE_DATA(nid) (&pg_data_map[nid])
-#define NODE_MEM_MAP(nid) (NODE_DATA(nid)->node_mem_map)
-
-#endif /* _ASM_M68K_MMZONE_H_ */
diff --git a/arch/m68k/include/asm/motorola_pgalloc.h b/arch/m68k/include/asm/motorola_pgalloc.h
index b4fc3b4f6bb3..74a817d9387f 100644
--- a/arch/m68k/include/asm/motorola_pgalloc.h
+++ b/arch/m68k/include/asm/motorola_pgalloc.h
@@ -88,7 +88,6 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t page
{
pmd_set(pmd, page);
}
-#define pmd_pgtable(pmd) ((pgtable_t)pmd_page_vaddr(pmd))
static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
{
diff --git a/arch/m68k/include/asm/motorola_pgtable.h b/arch/m68k/include/asm/motorola_pgtable.h
index 8076467eff4b..a2908164ee6f 100644
--- a/arch/m68k/include/asm/motorola_pgtable.h
+++ b/arch/m68k/include/asm/motorola_pgtable.h
@@ -105,6 +105,8 @@ extern unsigned long mm_cachebits;
#define __S110 PAGE_SHARED_C
#define __S111 PAGE_SHARED_C
+#define pmd_pgtable(pmd) ((pgtable_t)pmd_page_vaddr(pmd))
+
/*
* Conversion functions: convert a page and protection to a page entry,
* and a page entry and page directory to the page they refer to.
diff --git a/arch/m68k/include/asm/page.h b/arch/m68k/include/asm/page.h
index 97087dd3ca6d..2f1c54e4725d 100644
--- a/arch/m68k/include/asm/page.h
+++ b/arch/m68k/include/asm/page.h
@@ -62,7 +62,7 @@ extern unsigned long _ramend;
#include <asm/page_no.h>
#endif
-#if !defined(CONFIG_MMU) || defined(CONFIG_DISCONTIGMEM)
+#ifndef CONFIG_MMU
#define __phys_to_pfn(paddr) ((unsigned long)((paddr) >> PAGE_SHIFT))
#define __pfn_to_phys(pfn) PFN_PHYS(pfn)
#endif
diff --git a/arch/m68k/include/asm/page_mm.h b/arch/m68k/include/asm/page_mm.h
index 2411ea9ef578..a5b459bcb7d8 100644
--- a/arch/m68k/include/asm/page_mm.h
+++ b/arch/m68k/include/asm/page_mm.h
@@ -126,26 +126,6 @@ static inline void *__va(unsigned long x)
extern int m68k_virt_to_node_shift;
-#ifndef CONFIG_DISCONTIGMEM
-#define __virt_to_node(addr) (&pg_data_map[0])
-#else
-extern struct pglist_data *pg_data_table[];
-
-static inline __attribute_const__ int __virt_to_node_shift(void)
-{
- int shift;
-
- asm (
- "1: moveq #0,%0\n"
- m68k_fixup(%c1, 1b)
- : "=d" (shift)
- : "i" (m68k_fixup_vnode_shift));
- return shift;
-}
-
-#define __virt_to_node(addr) (pg_data_table[(unsigned long)(addr) >> __virt_to_node_shift()])
-#endif
-
#define virt_to_page(addr) ({ \
pfn_to_page(virt_to_pfn(addr)); \
})
@@ -153,23 +133,8 @@ static inline __attribute_const__ int __virt_to_node_shift(void)
pfn_to_virt(page_to_pfn(page)); \
})
-#ifdef CONFIG_DISCONTIGMEM
-#define pfn_to_page(pfn) ({ \
- unsigned long __pfn = (pfn); \
- struct pglist_data *pgdat; \
- pgdat = __virt_to_node((unsigned long)pfn_to_virt(__pfn)); \
- pgdat->node_mem_map + (__pfn - pgdat->node_start_pfn); \
-})
-#define page_to_pfn(_page) ({ \
- const struct page *__p = (_page); \
- struct pglist_data *pgdat; \
- pgdat = &pg_data_map[page_to_nid(__p)]; \
- ((__p) - pgdat->node_mem_map) + pgdat->node_start_pfn; \
-})
-#else
#define ARCH_PFN_OFFSET (m68k_memory[0].addr >> PAGE_SHIFT)
#include <asm-generic/memory_model.h>
-#endif
#define virt_addr_valid(kaddr) ((unsigned long)(kaddr) >= PAGE_OFFSET && (unsigned long)(kaddr) < (unsigned long)high_memory)
#define pfn_valid(pfn) virt_addr_valid(pfn_to_virt(pfn))
diff --git a/arch/m68k/include/asm/page_no.h b/arch/m68k/include/asm/page_no.h
index 8d0f862ee9d7..c9d0d84158a4 100644
--- a/arch/m68k/include/asm/page_no.h
+++ b/arch/m68k/include/asm/page_no.h
@@ -13,9 +13,9 @@ extern unsigned long memory_end;
#define clear_user_page(page, vaddr, pg) clear_page(page)
#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
-#define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \
- alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr)
-#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
+#define alloc_zeroed_user_highpage_movable(vma, vaddr) \
+ alloc_page_vma(GFP_HIGHUSER_MOVABLE | __GFP_ZERO, vma, vaddr)
+#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE_MOVABLE
#define __pa(vaddr) ((unsigned long)(vaddr))
#define __va(paddr) ((void *)((unsigned long)(paddr)))
diff --git a/arch/m68k/include/asm/pgtable_mm.h b/arch/m68k/include/asm/pgtable_mm.h
index aca22c2c1ee2..143ba7de9bda 100644
--- a/arch/m68k/include/asm/pgtable_mm.h
+++ b/arch/m68k/include/asm/pgtable_mm.h
@@ -72,7 +72,6 @@
#define PTRS_PER_PGD 128
#endif
#define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE)
-#define FIRST_USER_ADDRESS 0UL
/* Virtual address region for use by kernel_map() */
#ifdef CONFIG_SUN3
diff --git a/arch/m68k/include/asm/sun3_pgalloc.h b/arch/m68k/include/asm/sun3_pgalloc.h
index 000f64869b91..198036aff519 100644
--- a/arch/m68k/include/asm/sun3_pgalloc.h
+++ b/arch/m68k/include/asm/sun3_pgalloc.h
@@ -32,7 +32,6 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t page
{
pmd_val(*pmd) = __pa((unsigned long)page_address(page));
}
-#define pmd_pgtable(pmd) pmd_page(pmd)
/*
* allocating and freeing a pmd is trivial: the 1-entry pmd is
diff --git a/arch/m68k/include/asm/tlbflush.h b/arch/m68k/include/asm/tlbflush.h
index 5337bc2c262f..a6318ccd308f 100644
--- a/arch/m68k/include/asm/tlbflush.h
+++ b/arch/m68k/include/asm/tlbflush.h
@@ -263,7 +263,7 @@ static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr
BUG();
}
-static inline void flush_tlb_range(struct mm_struct *mm,
+static inline void flush_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
BUG();
diff --git a/arch/m68k/include/asm/unaligned.h b/arch/m68k/include/asm/unaligned.h
deleted file mode 100644
index 98c8930d3d35..000000000000
--- a/arch/m68k/include/asm/unaligned.h
+++ /dev/null
@@ -1,26 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_M68K_UNALIGNED_H
-#define _ASM_M68K_UNALIGNED_H
-
-
-#ifdef CONFIG_CPU_HAS_NO_UNALIGNED
-#include <linux/unaligned/be_struct.h>
-#include <linux/unaligned/le_byteshift.h>
-#include <linux/unaligned/generic.h>
-
-#define get_unaligned __get_unaligned_be
-#define put_unaligned __put_unaligned_be
-
-#else
-/*
- * The m68k can do unaligned accesses itself.
- */
-#include <linux/unaligned/access_ok.h>
-#include <linux/unaligned/generic.h>
-
-#define get_unaligned __get_unaligned_be
-#define put_unaligned __put_unaligned_be
-
-#endif
-
-#endif /* _ASM_M68K_UNALIGNED_H */
diff --git a/arch/m68k/kernel/dma.c b/arch/m68k/kernel/dma.c
index 1c1b875fadc1..2e192a5df949 100644
--- a/arch/m68k/kernel/dma.c
+++ b/arch/m68k/kernel/dma.c
@@ -34,9 +34,6 @@ pgprot_t pgprot_dmacoherent(pgprot_t prot)
return prot;
}
#else
-
-#include <asm/cacheflush.h>
-
void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
gfp_t gfp, unsigned long attrs)
{
diff --git a/arch/m68k/kernel/process.c b/arch/m68k/kernel/process.c
index da83cc83e791..db49f9091711 100644
--- a/arch/m68k/kernel/process.c
+++ b/arch/m68k/kernel/process.c
@@ -268,7 +268,7 @@ unsigned long get_wchan(struct task_struct *p)
unsigned long fp, pc;
unsigned long stack_page;
int count = 0;
- if (!p || p == current || p->state == TASK_RUNNING)
+ if (!p || p == current || task_is_running(p))
return 0;
stack_page = (unsigned long)task_stack_page(p);
diff --git a/arch/m68k/kernel/signal.c b/arch/m68k/kernel/signal.c
index a4b7ee1df211..8f215e79e70e 100644
--- a/arch/m68k/kernel/signal.c
+++ b/arch/m68k/kernel/signal.c
@@ -623,7 +623,8 @@ static inline void siginfo_build_tests(void)
BUILD_BUG_ON(offsetof(siginfo_t, si_pkey) != 0x12);
/* _sigfault._perf */
- BUILD_BUG_ON(offsetof(siginfo_t, si_perf) != 0x10);
+ BUILD_BUG_ON(offsetof(siginfo_t, si_perf_data) != 0x10);
+ BUILD_BUG_ON(offsetof(siginfo_t, si_perf_type) != 0x14);
/* _sigpoll */
BUILD_BUG_ON(offsetof(siginfo_t, si_band) != 0x0c);
diff --git a/arch/m68k/kernel/sys_m68k.c b/arch/m68k/kernel/sys_m68k.c
index f55bdcb8e4f1..bd0274c7592e 100644
--- a/arch/m68k/kernel/sys_m68k.c
+++ b/arch/m68k/kernel/sys_m68k.c
@@ -402,8 +402,8 @@ sys_cacheflush (unsigned long addr, int scope, int cache, unsigned long len)
* to this process.
*/
mmap_read_lock(current->mm);
- vma = find_vma(current->mm, addr);
- if (!vma || addr < vma->vm_start || addr + len > vma->vm_end)
+ vma = vma_lookup(current->mm, addr);
+ if (!vma || addr + len > vma->vm_end)
goto out_unlock;
}
diff --git a/arch/m68k/kernel/syscalls/syscall.tbl b/arch/m68k/kernel/syscalls/syscall.tbl
index 0dd019dc2136..541bc1b3a8f9 100644
--- a/arch/m68k/kernel/syscalls/syscall.tbl
+++ b/arch/m68k/kernel/syscalls/syscall.tbl
@@ -442,7 +442,7 @@
440 common process_madvise sys_process_madvise
441 common epoll_pwait2 sys_epoll_pwait2
442 common mount_setattr sys_mount_setattr
-443 common quotactl_path sys_quotactl_path
+443 common quotactl_fd sys_quotactl_fd
444 common landlock_create_ruleset sys_landlock_create_ruleset
445 common landlock_add_rule sys_landlock_add_rule
446 common landlock_restrict_self sys_landlock_restrict_self
diff --git a/arch/m68k/mac/config.c b/arch/m68k/mac/config.c
index 1cdac959bd91..5d16f9b47aa9 100644
--- a/arch/m68k/mac/config.c
+++ b/arch/m68k/mac/config.c
@@ -933,13 +933,15 @@ static const struct resource mac_scsi_ccl_rsrc[] __initconst = {
},
};
-static const struct resource mac_ide_quadra_rsrc[] __initconst = {
- DEFINE_RES_MEM(0x50F1A000, 0x104),
+static const struct resource mac_pata_quadra_rsrc[] __initconst = {
+ DEFINE_RES_MEM(0x50F1A000, 0x38),
+ DEFINE_RES_MEM(0x50F1A038, 0x04),
DEFINE_RES_IRQ(IRQ_NUBUS_F),
};
-static const struct resource mac_ide_pb_rsrc[] __initconst = {
- DEFINE_RES_MEM(0x50F1A000, 0x104),
+static const struct resource mac_pata_pb_rsrc[] __initconst = {
+ DEFINE_RES_MEM(0x50F1A000, 0x38),
+ DEFINE_RES_MEM(0x50F1A038, 0x04),
DEFINE_RES_IRQ(IRQ_NUBUS_C),
};
@@ -949,7 +951,7 @@ static const struct resource mac_pata_baboon_rsrc[] __initconst = {
DEFINE_RES_IRQ(IRQ_BABOON_1),
};
-static const struct pata_platform_info mac_pata_baboon_data __initconst = {
+static const struct pata_platform_info mac_pata_data __initconst = {
.ioport_shift = 2,
};
@@ -1067,17 +1069,19 @@ int __init mac_platform_init(void)
switch (macintosh_config->ide_type) {
case MAC_IDE_QUADRA:
- platform_device_register_simple("mac_ide", -1,
- mac_ide_quadra_rsrc, ARRAY_SIZE(mac_ide_quadra_rsrc));
+ platform_device_register_resndata(NULL, "pata_platform", -1,
+ mac_pata_quadra_rsrc, ARRAY_SIZE(mac_pata_quadra_rsrc),
+ &mac_pata_data, sizeof(mac_pata_data));
break;
case MAC_IDE_PB:
- platform_device_register_simple("mac_ide", -1,
- mac_ide_pb_rsrc, ARRAY_SIZE(mac_ide_pb_rsrc));
+ platform_device_register_resndata(NULL, "pata_platform", -1,
+ mac_pata_pb_rsrc, ARRAY_SIZE(mac_pata_pb_rsrc),
+ &mac_pata_data, sizeof(mac_pata_data));
break;
case MAC_IDE_BABOON:
platform_device_register_resndata(NULL, "pata_platform", -1,
mac_pata_baboon_rsrc, ARRAY_SIZE(mac_pata_baboon_rsrc),
- &mac_pata_baboon_data, sizeof(mac_pata_baboon_data));
+ &mac_pata_data, sizeof(mac_pata_data));
break;
}
diff --git a/arch/m68k/mm/init.c b/arch/m68k/mm/init.c
index 1759ab875d47..5d749e188246 100644
--- a/arch/m68k/mm/init.c
+++ b/arch/m68k/mm/init.c
@@ -44,28 +44,8 @@ EXPORT_SYMBOL(empty_zero_page);
int m68k_virt_to_node_shift;
-#ifdef CONFIG_DISCONTIGMEM
-pg_data_t pg_data_map[MAX_NUMNODES];
-EXPORT_SYMBOL(pg_data_map);
-
-pg_data_t *pg_data_table[65];
-EXPORT_SYMBOL(pg_data_table);
-#endif
-
void __init m68k_setup_node(int node)
{
-#ifdef CONFIG_DISCONTIGMEM
- struct m68k_mem_info *info = m68k_memory + node;
- int i, end;
-
- i = (unsigned long)phys_to_virt(info->addr) >> __virt_to_node_shift();
- end = (unsigned long)phys_to_virt(info->addr + info->size - 1) >> __virt_to_node_shift();
- for (; i <= end; i++) {
- if (pg_data_table[i])
- pr_warn("overlap at %u for chunk %u\n", i, node);
- pg_data_table[i] = pg_data_map + node;
- }
-#endif
node_set_online(node);
}
diff --git a/arch/m68k/q40/config.c b/arch/m68k/q40/config.c
index d6a423875231..5caf1e5be1c2 100644
--- a/arch/m68k/q40/config.c
+++ b/arch/m68k/q40/config.c
@@ -286,14 +286,39 @@ static int q40_set_rtc_pll(struct rtc_pll_info *pll)
return -EINVAL;
}
-static __init int q40_add_kbd_device(void)
-{
- struct platform_device *pdev;
+#define PCIDE_BASE1 0x1f0
+#define PCIDE_BASE2 0x170
+#define PCIDE_CTL 0x206
+
+static const struct resource q40_pata_rsrc_0[] __initconst = {
+ DEFINE_RES_MEM(q40_isa_io_base + PCIDE_BASE1 * 4, 0x38),
+ DEFINE_RES_MEM(q40_isa_io_base + (PCIDE_BASE1 + PCIDE_CTL) * 4, 2),
+ DEFINE_RES_IO(PCIDE_BASE1, 8),
+ DEFINE_RES_IO(PCIDE_BASE1 + PCIDE_CTL, 1),
+ DEFINE_RES_IRQ(14),
+};
+static const struct resource q40_pata_rsrc_1[] __initconst = {
+ DEFINE_RES_MEM(q40_isa_io_base + PCIDE_BASE2 * 4, 0x38),
+ DEFINE_RES_MEM(q40_isa_io_base + (PCIDE_BASE2 + PCIDE_CTL) * 4, 2),
+ DEFINE_RES_IO(PCIDE_BASE2, 8),
+ DEFINE_RES_IO(PCIDE_BASE2 + PCIDE_CTL, 1),
+ DEFINE_RES_IRQ(15),
+};
+
+static __init int q40_platform_init(void)
+{
if (!MACH_IS_Q40)
return -ENODEV;
- pdev = platform_device_register_simple("q40kbd", -1, NULL, 0);
- return PTR_ERR_OR_ZERO(pdev);
+ platform_device_register_simple("q40kbd", -1, NULL, 0);
+
+ platform_device_register_simple("atari-falcon-ide", 0, q40_pata_rsrc_0,
+ ARRAY_SIZE(q40_pata_rsrc_0));
+
+ platform_device_register_simple("atari-falcon-ide", 1, q40_pata_rsrc_1,
+ ARRAY_SIZE(q40_pata_rsrc_1));
+
+ return 0;
}
-arch_initcall(q40_add_kbd_device);
+arch_initcall(q40_platform_init);
diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig
index 0660f47012bc..14a67a42fcae 100644
--- a/arch/microblaze/Kconfig
+++ b/arch/microblaze/Kconfig
@@ -43,6 +43,7 @@ config MICROBLAZE
select MMU_GATHER_NO_RANGE
select SPARSE_IRQ
select SET_FS
+ select ZONE_DMA
# Endianness selection
choice
@@ -60,9 +61,6 @@ config CPU_LITTLE_ENDIAN
endchoice
-config ZONE_DMA
- def_bool y
-
config ARCH_HAS_ILOG2_U32
def_bool n
diff --git a/arch/microblaze/Makefile b/arch/microblaze/Makefile
index b41f323e1fde..6d4af39e3890 100644
--- a/arch/microblaze/Makefile
+++ b/arch/microblaze/Makefile
@@ -3,7 +3,7 @@ KBUILD_DEFCONFIG := mmu_defconfig
UTS_SYSNAME = -DUTS_SYSNAME=\"Linux\"
-# What CPU vesion are we building for, and crack it open
+# What CPU version are we building for, and crack it open
# as major.minor.rev
CPU_VER := $(shell echo $(CONFIG_XILINX_MICROBLAZE0_HW_VER))
CPU_MAJOR := $(shell echo $(CPU_VER) | cut -d '.' -f 1)
diff --git a/arch/microblaze/include/asm/Kbuild b/arch/microblaze/include/asm/Kbuild
index 29b0e557aa7c..a055f5dbe00a 100644
--- a/arch/microblaze/include/asm/Kbuild
+++ b/arch/microblaze/include/asm/Kbuild
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
generated-y += syscall_table.h
+generic-y += cmpxchg.h
generic-y += extable.h
generic-y += kvm_para.h
generic-y += mcs_spinlock.h
diff --git a/arch/microblaze/include/asm/atomic.h b/arch/microblaze/include/asm/atomic.h
deleted file mode 100644
index 41e9aff23a62..000000000000
--- a/arch/microblaze/include/asm/atomic.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_MICROBLAZE_ATOMIC_H
-#define _ASM_MICROBLAZE_ATOMIC_H
-
-#include <asm/cmpxchg.h>
-#include <asm-generic/atomic.h>
-#include <asm-generic/atomic64.h>
-
-/*
- * Atomically test *v and decrement if it is greater than 0.
- * The function returns the old value of *v minus 1.
- */
-static inline int atomic_dec_if_positive(atomic_t *v)
-{
- unsigned long flags;
- int res;
-
- local_irq_save(flags);
- res = v->counter - 1;
- if (res >= 0)
- v->counter = res;
- local_irq_restore(flags);
-
- return res;
-}
-#define atomic_dec_if_positive atomic_dec_if_positive
-
-#endif /* _ASM_MICROBLAZE_ATOMIC_H */
diff --git a/arch/microblaze/include/asm/cmpxchg.h b/arch/microblaze/include/asm/cmpxchg.h
deleted file mode 100644
index 3523b51aab36..000000000000
--- a/arch/microblaze/include/asm/cmpxchg.h
+++ /dev/null
@@ -1,9 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_MICROBLAZE_CMPXCHG_H
-#define _ASM_MICROBLAZE_CMPXCHG_H
-
-#ifndef CONFIG_SMP
-# include <asm-generic/cmpxchg.h>
-#endif
-
-#endif /* _ASM_MICROBLAZE_CMPXCHG_H */
diff --git a/arch/microblaze/include/asm/page.h b/arch/microblaze/include/asm/page.h
index bf681f272f72..ce550978f4fc 100644
--- a/arch/microblaze/include/asm/page.h
+++ b/arch/microblaze/include/asm/page.h
@@ -35,9 +35,6 @@
#define ARCH_SLAB_MINALIGN L1_CACHE_BYTES
-#define PAGE_UP(addr) (((addr)+((PAGE_SIZE)-1))&(~((PAGE_SIZE)-1)))
-#define PAGE_DOWN(addr) ((addr)&(~((PAGE_SIZE)-1)))
-
/*
* PAGE_OFFSET -- the first address of the first page of memory. With MMU
* it is set to the kernel start address (aligned on a page boundary).
diff --git a/arch/microblaze/include/asm/pgalloc.h b/arch/microblaze/include/asm/pgalloc.h
index d56b9f670ad1..6c33b05f730f 100644
--- a/arch/microblaze/include/asm/pgalloc.h
+++ b/arch/microblaze/include/asm/pgalloc.h
@@ -28,8 +28,6 @@ static inline pgd_t *get_pgd(void)
#define pgd_alloc(mm) get_pgd()
-#define pmd_pgtable(pmd) pmd_page(pmd)
-
extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm);
#define __pte_free_tlb(tlb, pte, addr) pte_free((tlb)->mm, (pte))
diff --git a/arch/microblaze/include/asm/pgtable.h b/arch/microblaze/include/asm/pgtable.h
index 9ae8d2c17dd5..71cd547655d9 100644
--- a/arch/microblaze/include/asm/pgtable.h
+++ b/arch/microblaze/include/asm/pgtable.h
@@ -25,8 +25,6 @@ extern int mem_init_done;
#include <asm/mmu.h>
#include <asm/page.h>
-#define FIRST_USER_ADDRESS 0UL
-
extern unsigned long va_to_phys(unsigned long address);
extern pte_t *va_to_pte(unsigned long address);
diff --git a/arch/microblaze/include/asm/unaligned.h b/arch/microblaze/include/asm/unaligned.h
deleted file mode 100644
index 448299beab69..000000000000
--- a/arch/microblaze/include/asm/unaligned.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright (C) 2008 Michal Simek <monstr@monstr.eu>
- * Copyright (C) 2006 Atmark Techno, Inc.
- */
-
-#ifndef _ASM_MICROBLAZE_UNALIGNED_H
-#define _ASM_MICROBLAZE_UNALIGNED_H
-
-# ifdef __KERNEL__
-
-# ifdef __MICROBLAZEEL__
-# include <linux/unaligned/le_struct.h>
-# include <linux/unaligned/be_byteshift.h>
-# define get_unaligned __get_unaligned_le
-# define put_unaligned __put_unaligned_le
-# else
-# include <linux/unaligned/be_struct.h>
-# include <linux/unaligned/le_byteshift.h>
-# define get_unaligned __get_unaligned_be
-# define put_unaligned __put_unaligned_be
-# endif
-
-# include <linux/unaligned/generic.h>
-
-# endif /* __KERNEL__ */
-#endif /* _ASM_MICROBLAZE_UNALIGNED_H */
diff --git a/arch/microblaze/kernel/asm-offsets.c b/arch/microblaze/kernel/asm-offsets.c
index 6c69ce7be2e8..b77dd188dec4 100644
--- a/arch/microblaze/kernel/asm-offsets.c
+++ b/arch/microblaze/kernel/asm-offsets.c
@@ -70,7 +70,6 @@ int main(int argc, char *argv[])
/* struct task_struct */
DEFINE(TS_THREAD_INFO, offsetof(struct task_struct, stack));
- DEFINE(TASK_STATE, offsetof(struct task_struct, state));
DEFINE(TASK_FLAGS, offsetof(struct task_struct, flags));
DEFINE(TASK_PTRACE, offsetof(struct task_struct, ptrace));
DEFINE(TASK_BLOCKED, offsetof(struct task_struct, blocked));
diff --git a/arch/microblaze/kernel/syscalls/syscall.tbl b/arch/microblaze/kernel/syscalls/syscall.tbl
index 2ac716984ca2..a176faca2927 100644
--- a/arch/microblaze/kernel/syscalls/syscall.tbl
+++ b/arch/microblaze/kernel/syscalls/syscall.tbl
@@ -448,7 +448,7 @@
440 common process_madvise sys_process_madvise
441 common epoll_pwait2 sys_epoll_pwait2
442 common mount_setattr sys_mount_setattr
-443 common quotactl_path sys_quotactl_path
+443 common quotactl_fd sys_quotactl_fd
444 common landlock_create_ruleset sys_landlock_create_ruleset
445 common landlock_add_rule sys_landlock_add_rule
446 common landlock_restrict_self sys_landlock_restrict_self
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index ed51970c08e7..cee6087cd686 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -112,6 +112,7 @@ config MACH_INGENIC
select SYS_SUPPORTS_LITTLE_ENDIAN
select SYS_SUPPORTS_ZBOOT
select DMA_NONCOHERENT
+ select ARCH_HAS_SYNC_DMA_FOR_CPU
select IRQ_MIPS_CPU
select PINCTRL
select GPIOLIB
@@ -201,6 +202,7 @@ config MIPS_ALCHEMY
config AR7
bool "Texas Instruments AR7"
select BOOT_ELF32
+ select COMMON_CLK
select DMA_NONCOHERENT
select CEVT_R4K
select CSRC_R4K
@@ -215,7 +217,6 @@ config AR7
select SYS_SUPPORTS_ZBOOT_UART16550
select GPIOLIB
select VLYNQ
- select HAVE_LEGACY_CLK
help
Support for the Texas Instruments AR7 System-on-a-Chip
family: TNETD7100, 7200 and 7300.
@@ -332,7 +333,6 @@ config BCM63XX
select SWAP_IO_SPACE
select GPIOLIB
select MIPS_L1_CACHE_SHIFT_4
- select CLKDEV_LOOKUP
select HAVE_LEGACY_CLK
help
Support for BCM63XX based boards
@@ -428,6 +428,8 @@ config MACH_INGENIC_SOC
select MIPS_GENERIC
select MACH_INGENIC
select SYS_SUPPORTS_ZBOOT_UART16550
+ select CPU_SUPPORTS_CPUFREQ
+ select MIPS_EXTERNAL_TIMER
config LANTIQ
bool "Lantiq based platforms"
@@ -446,7 +448,6 @@ config LANTIQ
select GPIOLIB
select SWAP_IO_SPACE
select BOOT_RAW
- select CLKDEV_LOOKUP
select HAVE_LEGACY_CLK
select USE_OF
select PINCTRL
@@ -630,6 +631,7 @@ config MACH_NINTENDO64
config RALINK
bool "Ralink based machines"
select CEVT_R4K
+ select COMMON_CLK
select CSRC_R4K
select BOOT_RAW
select DMA_NONCOHERENT
@@ -642,7 +644,6 @@ config RALINK
select SYS_SUPPORTS_MIPS16
select SYS_SUPPORTS_ZBOOT
select SYS_HAS_EARLY_PRINTK
- select CLKDEV_LOOKUP
select ARCH_HAS_RESET_CONTROLLER
select RESET_CONTROLLER
@@ -996,7 +997,6 @@ config CAVIUM_OCTEON_SOC
select HAVE_PLAT_FW_INIT_CMDLINE
select HAVE_PLAT_MEMCPY
select ZONE_DMA32
- select HOLES_IN_ZONE
select GPIOLIB
select USE_OF
select ARCH_SPARSEMEM_ENABLE
@@ -1233,9 +1233,6 @@ config HAVE_PLAT_MEMCPY
config ISA_DMA_API
bool
-config HOLES_IN_ZONE
- bool
-
config SYS_SUPPORTS_RELOCATABLE
bool
help
@@ -2867,7 +2864,7 @@ config RANDOMIZE_BASE_MAX_OFFSET
config NODES_SHIFT
int
default "6"
- depends on NEED_MULTIPLE_NODES
+ depends on NUMA
config HW_PERF_EVENTS
bool "Enable hardware performance counter support for perf events"
@@ -3277,13 +3274,6 @@ config I8253
select CLKSRC_I8253
select CLKEVT_I8253
select MIPS_EXTERNAL_TIMER
-
-config ZONE_DMA
- bool
-
-config ZONE_DMA32
- bool
-
endmenu
config TRAD_SIGNALS
diff --git a/arch/mips/Kconfig.debug b/arch/mips/Kconfig.debug
index f5832a49a881..43dbf5930796 100644
--- a/arch/mips/Kconfig.debug
+++ b/arch/mips/Kconfig.debug
@@ -102,6 +102,14 @@ config DEBUG_ZBOOT
to reduce the kernel image size and speed up the booting procedure a
little.
+config ZBOOT_INGENIC_UART
+ int "UART to use for compressed kernel debugging"
+ depends on DEBUG_ZBOOT && MACH_INGENIC_SOC
+ default 0
+ range 0 4
+ help
+ Specify the UART that should be used for compressed kernel debugging.
+
config SPINLOCK_TEST
bool "Enable spinlock timing tests in debugfs"
depends on DEBUG_FS
diff --git a/arch/mips/alchemy/board-xxs1500.c b/arch/mips/alchemy/board-xxs1500.c
index b184baa4e56a..f175bce2987f 100644
--- a/arch/mips/alchemy/board-xxs1500.c
+++ b/arch/mips/alchemy/board-xxs1500.c
@@ -18,6 +18,7 @@
#include <asm/reboot.h>
#include <asm/setup.h>
#include <asm/mach-au1x00/au1000.h>
+#include <asm/mach-au1x00/gpio-au1000.h>
#include <prom.h>
const char *get_system_type(void)
diff --git a/arch/mips/ar7/clock.c b/arch/mips/ar7/clock.c
index 95def949c971..c717acbc5506 100644
--- a/arch/mips/ar7/clock.c
+++ b/arch/mips/ar7/clock.c
@@ -13,7 +13,9 @@
#include <linux/gcd.h>
#include <linux/io.h>
#include <linux/err.h>
+#include <linux/clkdev.h>
#include <linux/clk.h>
+#include <linux/clk-provider.h>
#include <asm/addrspace.h>
#include <asm/mach-ar7/ar7.h>
@@ -84,17 +86,17 @@ struct tnetd7200_clocks {
struct tnetd7200_clock usb;
};
-static struct clk bus_clk = {
+struct clk_rate {
+ u32 rate;
+};
+static struct clk_rate bus_clk = {
.rate = 125000000,
};
-static struct clk cpu_clk = {
+static struct clk_rate cpu_clk = {
.rate = 150000000,
};
-static struct clk dsp_clk;
-static struct clk vbus_clk;
-
static void approximate(int base, int target, int *prediv,
int *postdiv, int *mul)
{
@@ -240,6 +242,8 @@ static void __init tnetd7300_init_clocks(void)
struct tnetd7300_clocks *clocks =
ioremap(UR8_REGS_CLOCKS,
sizeof(struct tnetd7300_clocks));
+ u32 dsp_clk;
+ struct clk *clk;
bus_clk.rate = tnetd7300_get_clock(BUS_PLL_SOURCE_SHIFT,
&clocks->bus, bootcr, AR7_AFE_CLOCK);
@@ -250,12 +254,18 @@ static void __init tnetd7300_init_clocks(void)
else
cpu_clk.rate = bus_clk.rate;
- if (dsp_clk.rate == 250000000)
+ dsp_clk = tnetd7300_dsp_clock();
+ if (dsp_clk == 250000000)
tnetd7300_set_clock(DSP_PLL_SOURCE_SHIFT, &clocks->dsp,
- bootcr, dsp_clk.rate);
+ bootcr, dsp_clk);
iounmap(clocks);
iounmap(bootcr);
+
+ clk = clk_register_fixed_rate(NULL, "cpu", NULL, 0, cpu_clk.rate);
+ clkdev_create(clk, "cpu", NULL);
+ clk = clk_register_fixed_rate(NULL, "dsp", NULL, 0, dsp_clk);
+ clkdev_create(clk, "dsp", NULL);
}
static void tnetd7200_set_clock(int base, struct tnetd7200_clock *clock,
@@ -327,6 +337,7 @@ static void __init tnetd7200_init_clocks(void)
int cpu_base, cpu_mul, cpu_prediv, cpu_postdiv;
int dsp_base, dsp_mul, dsp_prediv, dsp_postdiv;
int usb_base, usb_mul, usb_prediv, usb_postdiv;
+ struct clk *clk;
cpu_base = tnetd7200_get_clock_base(TNETD7200_CLOCK_ID_CPU, bootcr);
dsp_base = tnetd7200_get_clock_base(TNETD7200_CLOCK_ID_DSP, bootcr);
@@ -395,100 +406,34 @@ static void __init tnetd7200_init_clocks(void)
usb_prediv, usb_postdiv, -1, usb_mul,
TNETD7200_DEF_USB_CLK);
- dsp_clk.rate = cpu_clk.rate;
-
iounmap(clocks);
iounmap(bootcr);
-}
-
-/*
- * Linux clock API
- */
-int clk_enable(struct clk *clk)
-{
- return 0;
-}
-EXPORT_SYMBOL(clk_enable);
-
-void clk_disable(struct clk *clk)
-{
-}
-EXPORT_SYMBOL(clk_disable);
-
-unsigned long clk_get_rate(struct clk *clk)
-{
- if (!clk)
- return 0;
- return clk->rate;
-}
-EXPORT_SYMBOL(clk_get_rate);
-
-struct clk *clk_get(struct device *dev, const char *id)
-{
- if (!strcmp(id, "bus"))
- return &bus_clk;
- /* cpmac and vbus share the same rate */
- if (!strcmp(id, "cpmac"))
- return &vbus_clk;
- if (!strcmp(id, "cpu"))
- return &cpu_clk;
- if (!strcmp(id, "dsp"))
- return &dsp_clk;
- if (!strcmp(id, "vbus"))
- return &vbus_clk;
- return ERR_PTR(-ENOENT);
+ clk = clk_register_fixed_rate(NULL, "cpu", NULL, 0, cpu_clk.rate);
+ clkdev_create(clk, "cpu", NULL);
+ clkdev_create(clk, "dsp", NULL);
}
-EXPORT_SYMBOL(clk_get);
-
-void clk_put(struct clk *clk)
-{
-}
-EXPORT_SYMBOL(clk_put);
void __init ar7_init_clocks(void)
{
+ struct clk *clk;
+
switch (ar7_chip_id()) {
case AR7_CHIP_7100:
case AR7_CHIP_7200:
tnetd7200_init_clocks();
break;
case AR7_CHIP_7300:
- dsp_clk.rate = tnetd7300_dsp_clock();
tnetd7300_init_clocks();
break;
default:
break;
}
+ clk = clk_register_fixed_rate(NULL, "bus", NULL, 0, bus_clk.rate);
+ clkdev_create(clk, "bus", NULL);
/* adjust vbus clock rate */
- vbus_clk.rate = bus_clk.rate / 2;
-}
-
-/* dummy functions, should not be called */
-long clk_round_rate(struct clk *clk, unsigned long rate)
-{
- WARN_ON(clk);
- return 0;
-}
-EXPORT_SYMBOL(clk_round_rate);
-
-int clk_set_rate(struct clk *clk, unsigned long rate)
-{
- WARN_ON(clk);
- return 0;
-}
-EXPORT_SYMBOL(clk_set_rate);
-
-int clk_set_parent(struct clk *clk, struct clk *parent)
-{
- WARN_ON(clk);
- return 0;
-}
-EXPORT_SYMBOL(clk_set_parent);
-
-struct clk *clk_get_parent(struct clk *clk)
-{
- WARN_ON(clk);
- return NULL;
+ clk = clk_register_fixed_factor(NULL, "vbus", "bus", 0, 1, 2);
+ clkdev_create(clk, "vbus", NULL);
+ clkdev_create(clk, "cpmac", "cpmac.1");
+ clkdev_create(clk, "cpmac", "cpmac.1");
}
-EXPORT_SYMBOL(clk_get_parent);
diff --git a/arch/mips/boot/compressed/uart-16550.c b/arch/mips/boot/compressed/uart-16550.c
index aee8d7b8f091..c18d7f72d9d9 100644
--- a/arch/mips/boot/compressed/uart-16550.c
+++ b/arch/mips/boot/compressed/uart-16550.c
@@ -19,8 +19,8 @@
#endif
#ifdef CONFIG_MACH_INGENIC
-#define INGENIC_UART0_BASE_ADDR 0x10030000
-#define PORT(offset) (CKSEG1ADDR(INGENIC_UART0_BASE_ADDR) + (4 * offset))
+#define INGENIC_UART_BASE_ADDR (0x10030000 + 0x1000 * CONFIG_ZBOOT_INGENIC_UART)
+#define PORT(offset) (CKSEG1ADDR(INGENIC_UART_BASE_ADDR) + (4 * offset))
#endif
#ifdef CONFIG_CPU_XLR
diff --git a/arch/mips/boot/dts/ingenic/ci20.dts b/arch/mips/boot/dts/ingenic/ci20.dts
index 8877c62609de..a688809beebc 100644
--- a/arch/mips/boot/dts/ingenic/ci20.dts
+++ b/arch/mips/boot/dts/ingenic/ci20.dts
@@ -118,6 +118,20 @@
assigned-clock-rates = <48000000>;
};
+&tcu {
+ /*
+ * 750 kHz for the system timers and clocksource,
+ * use channel #0 and #1 for the per cpu system timers,
+ * and use channel #2 for the clocksource.
+ *
+ * 3000 kHz for the OST timer to provide a higher
+ * precision clocksource.
+ */
+ assigned-clocks = <&tcu TCU_CLK_TIMER0>, <&tcu TCU_CLK_TIMER1>,
+ <&tcu TCU_CLK_TIMER2>, <&tcu TCU_CLK_OST>;
+ assigned-clock-rates = <750000>, <750000>, <750000>, <3000000>;
+};
+
&mmc0 {
status = "okay";
@@ -522,13 +536,3 @@
bias-disable;
};
};
-
-&tcu {
- /*
- * 750 kHz for the system timer and 3 MHz for the clocksource,
- * use channel #0 for the system timer, #1 for the clocksource.
- */
- assigned-clocks = <&tcu TCU_CLK_TIMER0>, <&tcu TCU_CLK_TIMER1>,
- <&tcu TCU_CLK_OST>;
- assigned-clock-rates = <750000>, <3000000>, <3000000>;
-};
diff --git a/arch/mips/boot/dts/ingenic/gcw0.dts b/arch/mips/boot/dts/ingenic/gcw0.dts
index f4c04f2263ea..4abb0318416c 100644
--- a/arch/mips/boot/dts/ingenic/gcw0.dts
+++ b/arch/mips/boot/dts/ingenic/gcw0.dts
@@ -74,7 +74,6 @@
simple-audio-card,widgets =
"Speaker", "Speaker",
"Headphone", "Headphones",
- "Line", "FM Radio",
"Microphone", "Built-in Mic";
simple-audio-card,routing =
"Headphones Amp INL", "LHPOUT",
@@ -85,8 +84,8 @@
"Speaker Amp INR", "ROUT",
"Speaker", "Speaker Amp OUTL",
"Speaker", "Speaker Amp OUTR",
- "LLINEIN", "FM Radio",
- "RLINEIN", "FM Radio",
+ "LLINEIN", "Cap-less",
+ "RLINEIN", "Cap-less",
"Built-in Mic", "MICBIAS",
"MIC1P", "Built-in Mic",
"MIC1N", "Built-in Mic";
diff --git a/arch/mips/boot/dts/ingenic/jz4780.dtsi b/arch/mips/boot/dts/ingenic/jz4780.dtsi
index 8d01feef7ff5..9e34f433b9b5 100644
--- a/arch/mips/boot/dts/ingenic/jz4780.dtsi
+++ b/arch/mips/boot/dts/ingenic/jz4780.dtsi
@@ -339,7 +339,7 @@
};
i2c0: i2c@10050000 {
- compatible = "ingenic,jz4780-i2c";
+ compatible = "ingenic,jz4780-i2c", "ingenic,jz4770-i2c";
#address-cells = <1>;
#size-cells = <0>;
@@ -357,7 +357,7 @@
};
i2c1: i2c@10051000 {
- compatible = "ingenic,jz4780-i2c";
+ compatible = "ingenic,jz4780-i2c", "ingenic,jz4770-i2c";
#address-cells = <1>;
#size-cells = <0>;
reg = <0x10051000 0x1000>;
@@ -374,7 +374,7 @@
};
i2c2: i2c@10052000 {
- compatible = "ingenic,jz4780-i2c";
+ compatible = "ingenic,jz4780-i2c", "ingenic,jz4770-i2c";
#address-cells = <1>;
#size-cells = <0>;
reg = <0x10052000 0x1000>;
@@ -391,7 +391,7 @@
};
i2c3: i2c@10053000 {
- compatible = "ingenic,jz4780-i2c";
+ compatible = "ingenic,jz4780-i2c", "ingenic,jz4770-i2c";
#address-cells = <1>;
#size-cells = <0>;
reg = <0x10053000 0x1000>;
@@ -408,7 +408,7 @@
};
i2c4: i2c@10054000 {
- compatible = "ingenic,jz4780-i2c";
+ compatible = "ingenic,jz4780-i2c", "ingenic,jz4770-i2c";
#address-cells = <1>;
#size-cells = <0>;
reg = <0x10054000 0x1000>;
diff --git a/arch/mips/boot/dts/ingenic/rs90.dts b/arch/mips/boot/dts/ingenic/rs90.dts
index 4eb1edbfc155..74fee7f01352 100644
--- a/arch/mips/boot/dts/ingenic/rs90.dts
+++ b/arch/mips/boot/dts/ingenic/rs90.dts
@@ -16,6 +16,18 @@
reg = <0x0 0x2000000>;
};
+ reserved-memory {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ vmem: video-memory@1f00000 {
+ compatible = "shared-dma-pool";
+ reg = <0x1f00000 0x100000>;
+ reusable;
+ };
+ };
+
vcc: regulator {
compatible = "regulator-fixed";
@@ -300,6 +312,8 @@
};
&lcd {
+ memory-region = <&vmem>;
+
pinctrl-names = "default";
pinctrl-0 = <&pins_lcd>;
};
diff --git a/arch/mips/boot/dts/ingenic/x1000.dtsi b/arch/mips/boot/dts/ingenic/x1000.dtsi
index aac9dedaf334..dec7909d4baa 100644
--- a/arch/mips/boot/dts/ingenic/x1000.dtsi
+++ b/arch/mips/boot/dts/ingenic/x1000.dtsi
@@ -80,6 +80,11 @@
status = "disabled";
};
+
+ mac_phy_ctrl: mac-phy-ctrl@e8 {
+ compatible = "syscon";
+ reg = <0xe8 0x4>;
+ };
};
ost: timer@12000000 {
@@ -347,6 +352,8 @@
clocks = <&cgu X1000_CLK_MAC>;
clock-names = "stmmaceth";
+ mode-reg = <&mac_phy_ctrl>;
+
status = "disabled";
mdio: mdio {
diff --git a/arch/mips/boot/dts/ingenic/x1830.dtsi b/arch/mips/boot/dts/ingenic/x1830.dtsi
index b21c93057356..215257f8bb1a 100644
--- a/arch/mips/boot/dts/ingenic/x1830.dtsi
+++ b/arch/mips/boot/dts/ingenic/x1830.dtsi
@@ -73,6 +73,11 @@
status = "disabled";
};
+
+ mac_phy_ctrl: mac-phy-ctrl@e8 {
+ compatible = "syscon";
+ reg = <0xe8 0x4>;
+ };
};
ost: timer@12000000 {
@@ -97,9 +102,9 @@
#clock-cells = <1>;
- clocks = <&cgu X1830_CLK_RTCLK
- &cgu X1830_CLK_EXCLK
- &cgu X1830_CLK_PCLK>;
+ clocks = <&cgu X1830_CLK_RTCLK>,
+ <&cgu X1830_CLK_EXCLK>,
+ <&cgu X1830_CLK_PCLK>;
clock-names = "rtc", "ext", "pclk";
interrupt-controller;
@@ -274,8 +279,7 @@
pdma: dma-controller@13420000 {
compatible = "ingenic,x1830-dma";
- reg = <0x13420000 0x400
- 0x13421000 0x40>;
+ reg = <0x13420000 0x400>, <0x13421000 0x40>;
#dma-cells = <2>;
interrupt-parent = <&intc>;
@@ -337,6 +341,8 @@
clocks = <&cgu X1830_CLK_MAC>;
clock-names = "stmmaceth";
+ mode-reg = <&mac_phy_ctrl>;
+
status = "disabled";
mdio: mdio {
diff --git a/arch/mips/boot/dts/loongson/Makefile b/arch/mips/boot/dts/loongson/Makefile
index 72267bfda9b4..5c6433e441ee 100644
--- a/arch/mips/boot/dts/loongson/Makefile
+++ b/arch/mips/boot/dts/loongson/Makefile
@@ -1,4 +1,4 @@
-# SPDX_License_Identifier: GPL_2.0
+# SPDX-License-Identifier: GPL-2.0
dtb-$(CONFIG_MACH_LOONGSON64) += loongson64_2core_2k1000.dtb
dtb-$(CONFIG_MACH_LOONGSON64) += loongson64c_4core_ls7a.dtb
dtb-$(CONFIG_MACH_LOONGSON64) += loongson64c_4core_rs780e.dtb
diff --git a/arch/mips/boot/dts/loongson/loongson64-2k1000.dtsi b/arch/mips/boot/dts/loongson/loongson64-2k1000.dtsi
index 569e814def83..bfc3d3243ee7 100644
--- a/arch/mips/boot/dts/loongson/loongson64-2k1000.dtsi
+++ b/arch/mips/boot/dts/loongson/loongson64-2k1000.dtsi
@@ -23,7 +23,7 @@
};
};
- memory {
+ memory@200000 {
compatible = "memory";
device_type = "memory";
reg = <0x00000000 0x00200000 0x00000000 0x0ee00000>, /* 238 MB at 2 MB */
@@ -114,6 +114,52 @@
ranges = <0x01000000 0x0 0x00000000 0x0 0x18000000 0x0 0x00010000>,
<0x02000000 0x0 0x40000000 0x0 0x40000000 0x0 0x40000000>;
+ gmac@3,0 {
+ compatible = "pci0014,7a03.0",
+ "pci0014,7a03",
+ "pciclass0c0320",
+ "pciclass0c03",
+ "loongson, pci-gmac";
+
+ reg = <0x1800 0x0 0x0 0x0 0x0>;
+ interrupts = <12 IRQ_TYPE_LEVEL_LOW>,
+ <13 IRQ_TYPE_LEVEL_LOW>;
+ interrupt-names = "macirq", "eth_lpi";
+ interrupt-parent = <&liointc0>;
+ phy-mode = "rgmii";
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "snps,dwmac-mdio";
+ phy0: ethernet-phy@0 {
+ reg = <0>;
+ };
+ };
+ };
+
+ gmac@3,1 {
+ compatible = "pci0014,7a03.0",
+ "pci0014,7a03",
+ "pciclass0c0320",
+ "pciclass0c03",
+ "loongson, pci-gmac";
+
+ reg = <0x1900 0x0 0x0 0x0 0x0>;
+ interrupts = <14 IRQ_TYPE_LEVEL_LOW>,
+ <15 IRQ_TYPE_LEVEL_LOW>;
+ interrupt-names = "macirq", "eth_lpi";
+ interrupt-parent = <&liointc0>;
+ phy-mode = "rgmii";
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "snps,dwmac-mdio";
+ phy1: ethernet-phy@1 {
+ reg = <0>;
+ };
+ };
+ };
+
ehci@4,1 {
compatible = "pci0014,7a14.0",
"pci0014,7a14",
@@ -163,8 +209,8 @@
};
pci_bridge@a,0 {
- compatible = "pci0014,7a19.0",
- "pci0014,7a19",
+ compatible = "pci0014,7a09.0",
+ "pci0014,7a09",
"pciclass060400",
"pciclass0604";
@@ -178,8 +224,8 @@
};
pci_bridge@b,0 {
- compatible = "pci0014,7a19.0",
- "pci0014,7a19",
+ compatible = "pci0014,7a09.0",
+ "pci0014,7a09",
"pciclass060400",
"pciclass0604";
@@ -193,8 +239,8 @@
};
pci_bridge@c,0 {
- compatible = "pci0014,7a19.0",
- "pci0014,7a19",
+ compatible = "pci0014,7a09.0",
+ "pci0014,7a09",
"pciclass060400",
"pciclass0604";
@@ -223,8 +269,8 @@
};
pci_bridge@e,0 {
- compatible = "pci0014,7a19.0",
- "pci0014,7a19",
+ compatible = "pci0014,7a09.0",
+ "pci0014,7a09",
"pciclass060400",
"pciclass0604";
diff --git a/arch/mips/boot/dts/loongson/loongson64g-package.dtsi b/arch/mips/boot/dts/loongson/loongson64g-package.dtsi
index 38abc570cd82..d4314f62ccc2 100644
--- a/arch/mips/boot/dts/loongson/loongson64g-package.dtsi
+++ b/arch/mips/boot/dts/loongson/loongson64g-package.dtsi
@@ -39,7 +39,7 @@
};
- cpu_uart0: serial@1fe001e0 {
+ cpu_uart0: serial@1fe00100 {
compatible = "ns16550a";
reg = <0 0x1fe00100 0x10>;
clock-frequency = <100000000>;
@@ -48,7 +48,7 @@
no-loopback-test;
};
- cpu_uart1: serial@1fe001e8 {
+ cpu_uart1: serial@1fe00110 {
status = "disabled";
compatible = "ns16550a";
reg = <0 0x1fe00110 0x10>;
diff --git a/arch/mips/boot/dts/loongson/loongson64v_4core_virtio.dts b/arch/mips/boot/dts/loongson/loongson64v_4core_virtio.dts
index 41f0b110d455..d0588d81e0c2 100644
--- a/arch/mips/boot/dts/loongson/loongson64v_4core_virtio.dts
+++ b/arch/mips/boot/dts/loongson/loongson64v_4core_virtio.dts
@@ -88,7 +88,7 @@
interrupt-map-mask = <0x1800 0x0 0x0 0x7>;
};
- isa {
+ isa@18000000 {
compatible = "isa";
#address-cells = <2>;
#size-cells = <1>;
diff --git a/arch/mips/boot/dts/loongson/ls7a-pch.dtsi b/arch/mips/boot/dts/loongson/ls7a-pch.dtsi
index f99a7a11fded..2f45fce2cdc4 100644
--- a/arch/mips/boot/dts/loongson/ls7a-pch.dtsi
+++ b/arch/mips/boot/dts/loongson/ls7a-pch.dtsi
@@ -186,7 +186,8 @@
compatible = "pci0014,7a03.0",
"pci0014,7a03",
"pciclass020000",
- "pciclass0200";
+ "pciclass0200",
+ "loongson, pci-gmac";
reg = <0x1800 0x0 0x0 0x0 0x0>;
interrupts = <12 IRQ_TYPE_LEVEL_HIGH>,
@@ -208,7 +209,8 @@
compatible = "pci0014,7a03.0",
"pci0014,7a03",
"pciclass020000",
- "pciclass0200";
+ "pciclass0200",
+ "loongson, pci-gmac";
reg = <0x1900 0x0 0x0 0x0 0x0>;
interrupts = <14 IRQ_TYPE_LEVEL_HIGH>,
@@ -407,7 +409,7 @@
};
};
- isa {
+ isa@18000000 {
compatible = "isa";
#address-cells = <2>;
#size-cells = <1>;
diff --git a/arch/mips/boot/dts/loongson/rs780e-pch.dtsi b/arch/mips/boot/dts/loongson/rs780e-pch.dtsi
index 871c866e0423..6f459511e6c9 100644
--- a/arch/mips/boot/dts/loongson/rs780e-pch.dtsi
+++ b/arch/mips/boot/dts/loongson/rs780e-pch.dtsi
@@ -21,7 +21,7 @@
<0x02000000 0 0x40000000 0 0x40000000 0 0x40000000>;
};
- isa {
+ isa@18000000 {
compatible = "isa";
#address-cells = <2>;
#size-cells = <1>;
diff --git a/arch/mips/boot/dts/mti/sead3.dts b/arch/mips/boot/dts/mti/sead3.dts
index 1cf6728af8fe..046c97a29710 100644
--- a/arch/mips/boot/dts/mti/sead3.dts
+++ b/arch/mips/boot/dts/mti/sead3.dts
@@ -244,7 +244,7 @@
no-loopback-test;
};
- eth@1f010000 {
+ ethernet@1f010000 {
compatible = "smsc,lan9115";
reg = <0x1f010000 0x10000>;
reg-io-width = <4>;
diff --git a/arch/mips/boot/dts/qca/Makefile b/arch/mips/boot/dts/qca/Makefile
index 4451cf45b0ad..6749f77068a8 100644
--- a/arch/mips/boot/dts/qca/Makefile
+++ b/arch/mips/boot/dts/qca/Makefile
@@ -4,4 +4,5 @@ dtb-$(CONFIG_ATH79) += ar9132_tl_wr1043nd_v1.dtb
dtb-$(CONFIG_ATH79) += ar9331_dpt_module.dtb
dtb-$(CONFIG_ATH79) += ar9331_dragino_ms14.dtb
dtb-$(CONFIG_ATH79) += ar9331_omega.dtb
+dtb-$(CONFIG_ATH79) += ar9331_openembed_som9331_board.dtb
dtb-$(CONFIG_ATH79) += ar9331_tl_mr3020.dtb
diff --git a/arch/mips/boot/dts/qca/ar9331.dtsi b/arch/mips/boot/dts/qca/ar9331.dtsi
index 83b3c0ce135a..c4102b280b47 100644
--- a/arch/mips/boot/dts/qca/ar9331.dtsi
+++ b/arch/mips/boot/dts/qca/ar9331.dtsi
@@ -148,6 +148,7 @@
fixed-link {
speed = <1000>;
full-duplex;
+ pause;
};
mdio {
@@ -183,6 +184,7 @@
fixed-link {
speed = <1000>;
full-duplex;
+ pause;
};
};
diff --git a/arch/mips/boot/dts/qca/ar9331_openembed_som9331_board.dts b/arch/mips/boot/dts/qca/ar9331_openembed_som9331_board.dts
new file mode 100644
index 000000000000..e6622f8e8c2b
--- /dev/null
+++ b/arch/mips/boot/dts/qca/ar9331_openembed_som9331_board.dts
@@ -0,0 +1,110 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/dts-v1/;
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/leds/common.h>
+
+#include "ar9331.dtsi"
+
+/ {
+ model = "OpenEmbed SOM9331 Board";
+ compatible = "openembed,som9331";
+
+ aliases {
+ serial0 = &uart;
+ };
+
+ memory@0 {
+ device_type = "memory";
+ reg = <0x0 0x4000000>;
+ };
+
+ leds {
+ compatible = "gpio-leds";
+
+ led-0 {
+ function = LED_FUNCTION_STATUS;
+ color = <LED_COLOR_ID_RED>;
+ gpios = <&gpio 27 GPIO_ACTIVE_LOW>;
+ default-state = "off";
+ };
+ };
+
+ gpio-keys {
+ compatible = "gpio-keys";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ button@0 {
+ label = "reset";
+ linux,code = <KEY_RESTART>;
+ gpios = <&gpio 11 GPIO_ACTIVE_HIGH>;
+ };
+ };
+};
+
+&ref {
+ clock-frequency = <25000000>;
+};
+
+&uart {
+ status = "okay";
+};
+
+&gpio {
+ status = "okay";
+};
+
+&usb {
+ dr_mode = "host";
+ status = "okay";
+};
+
+&usb_phy {
+ status = "okay";
+};
+
+&spi {
+ num-chipselects = <1>;
+ status = "okay";
+
+ /* Winbond 25Q64FVSIG SPI flash */
+ spiflash: w25q64@0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "winbond,w25q64", "jedec,spi-nor";
+ spi-max-frequency = <104000000>;
+ reg = <0>;
+ };
+};
+
+&eth0 {
+ status = "okay";
+};
+
+&eth1 {
+ status = "okay";
+};
+
+&switch_port1 {
+ label = "lan0";
+ status = "okay";
+};
+
+&switch_port3 {
+ label = "lan1";
+ status = "okay";
+};
+
+&phy_port0 {
+ status = "okay";
+};
+
+&phy_port2 {
+ status = "okay";
+};
+
+&phy_port4 {
+ status = "okay";
+};
diff --git a/arch/mips/cavium-octeon/octeon-usb.c b/arch/mips/cavium-octeon/octeon-usb.c
index 950e6c6e8629..6e4d3619137a 100644
--- a/arch/mips/cavium-octeon/octeon-usb.c
+++ b/arch/mips/cavium-octeon/octeon-usb.c
@@ -516,20 +516,13 @@ static int __init dwc3_octeon_device_init(void)
if (!pdev)
return -ENODEV;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (res == NULL) {
- put_device(&pdev->dev);
- dev_err(&pdev->dev, "No memory resources\n");
- return -ENXIO;
- }
-
/*
* The code below maps in the registers necessary for
* setting up the clocks and reseting PHYs. We must
* release the resources so the dwc3 subsystem doesn't
* know the difference.
*/
- base = devm_ioremap_resource(&pdev->dev, res);
+ base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(base)) {
put_device(&pdev->dev);
return PTR_ERR(base);
diff --git a/arch/mips/configs/loongson3_defconfig b/arch/mips/configs/loongson3_defconfig
index a18609cf0e5e..f02101ff04b3 100644
--- a/arch/mips/configs/loongson3_defconfig
+++ b/arch/mips/configs/loongson3_defconfig
@@ -13,7 +13,6 @@ CONFIG_TASK_DELAY_ACCT=y
CONFIG_TASK_XACCT=y
CONFIG_TASK_IO_ACCOUNTING=y
CONFIG_MEMCG=y
-CONFIG_MEMCG_SWAP=y
CONFIG_BLK_CGROUP=y
CONFIG_CGROUP_PIDS=y
CONFIG_CGROUP_FREEZER=y
@@ -31,9 +30,8 @@ CONFIG_EMBEDDED=y
CONFIG_PERF_EVENTS=y
CONFIG_MACH_LOONGSON64=y
CONFIG_CPU_HAS_MSA=y
-CONFIG_NR_CPUS=16
CONFIG_NUMA=y
-CONFIG_SMP=y
+CONFIG_NR_CPUS=16
CONFIG_HZ_256=y
CONFIG_KEXEC=y
CONFIG_MIPS32_O32=y
@@ -206,7 +204,6 @@ CONFIG_VIRTIO_NET=m
# CONFIG_NET_VENDOR_DEC is not set
# CONFIG_NET_VENDOR_DLINK is not set
# CONFIG_NET_VENDOR_EMULEX is not set
-# CONFIG_NET_VENDOR_HP is not set
# CONFIG_NET_VENDOR_I825XX is not set
CONFIG_E1000=y
CONFIG_E1000E=y
@@ -248,7 +245,6 @@ CONFIG_PPP_ASYNC=m
CONFIG_PPP_SYNC_TTY=m
CONFIG_ATH9K=m
CONFIG_HOSTAP=m
-CONFIG_INPUT_POLLDEV=m
CONFIG_INPUT_SPARSEKMAP=y
CONFIG_INPUT_MOUSEDEV=y
CONFIG_INPUT_MOUSEDEV_PSAUX=y
@@ -280,7 +276,6 @@ CONFIG_SENSORS_LM75=m
CONFIG_SENSORS_LM93=m
CONFIG_SENSORS_W83627HF=m
CONFIG_MEDIA_SUPPORT=m
-CONFIG_MEDIA_CAMERA_SUPPORT=y
CONFIG_MEDIA_USB_SUPPORT=y
CONFIG_USB_VIDEO_CLASS=m
CONFIG_DRM=y
@@ -391,18 +386,13 @@ CONFIG_SECURITY_SELINUX=y
CONFIG_SECURITY_SELINUX_BOOTPARAM=y
CONFIG_SECURITY_SELINUX_DISABLE=y
CONFIG_DEFAULT_SECURITY_DAC=y
-CONFIG_CRYPTO_AUTHENC=m
CONFIG_CRYPTO_HMAC=y
CONFIG_CRYPTO_MD5=y
-CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
-CONFIG_CRYPTO_ANUBIS=m
CONFIG_CRYPTO_BLOWFISH=m
CONFIG_CRYPTO_CAST5=m
CONFIG_CRYPTO_CAST6=m
-CONFIG_CRYPTO_KHAZAD=m
CONFIG_CRYPTO_SERPENT=m
-CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_DEFLATE=m
CONFIG_PRINTK_TIME=y
diff --git a/arch/mips/crypto/crc32-mips.c b/arch/mips/crypto/crc32-mips.c
index faa88a6a74c0..0a03529cf317 100644
--- a/arch/mips/crypto/crc32-mips.c
+++ b/arch/mips/crypto/crc32-mips.c
@@ -8,13 +8,13 @@
* Copyright (C) 2018 MIPS Tech, LLC
*/
-#include <linux/unaligned/access_ok.h>
#include <linux/cpufeature.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/string.h>
#include <asm/mipsregs.h>
+#include <asm/unaligned.h>
#include <crypto/internal/hash.h>
diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
index 27ad76791539..95e1f7f3597f 100644
--- a/arch/mips/include/asm/atomic.h
+++ b/arch/mips/include/asm/atomic.h
@@ -25,24 +25,25 @@
#include <asm/war.h>
#define ATOMIC_OPS(pfx, type) \
-static __always_inline type pfx##_read(const pfx##_t *v) \
+static __always_inline type arch_##pfx##_read(const pfx##_t *v) \
{ \
return READ_ONCE(v->counter); \
} \
\
-static __always_inline void pfx##_set(pfx##_t *v, type i) \
+static __always_inline void arch_##pfx##_set(pfx##_t *v, type i) \
{ \
WRITE_ONCE(v->counter, i); \
} \
\
-static __always_inline type pfx##_cmpxchg(pfx##_t *v, type o, type n) \
+static __always_inline type \
+arch_##pfx##_cmpxchg(pfx##_t *v, type o, type n) \
{ \
- return cmpxchg(&v->counter, o, n); \
+ return arch_cmpxchg(&v->counter, o, n); \
} \
\
-static __always_inline type pfx##_xchg(pfx##_t *v, type n) \
+static __always_inline type arch_##pfx##_xchg(pfx##_t *v, type n) \
{ \
- return xchg(&v->counter, n); \
+ return arch_xchg(&v->counter, n); \
}
ATOMIC_OPS(atomic, int)
@@ -53,7 +54,7 @@ ATOMIC_OPS(atomic64, s64)
#endif
#define ATOMIC_OP(pfx, op, type, c_op, asm_op, ll, sc) \
-static __inline__ void pfx##_##op(type i, pfx##_t * v) \
+static __inline__ void arch_##pfx##_##op(type i, pfx##_t * v) \
{ \
type temp; \
\
@@ -80,7 +81,8 @@ static __inline__ void pfx##_##op(type i, pfx##_t * v) \
}
#define ATOMIC_OP_RETURN(pfx, op, type, c_op, asm_op, ll, sc) \
-static __inline__ type pfx##_##op##_return_relaxed(type i, pfx##_t * v) \
+static __inline__ type \
+arch_##pfx##_##op##_return_relaxed(type i, pfx##_t * v) \
{ \
type temp, result; \
\
@@ -113,7 +115,8 @@ static __inline__ type pfx##_##op##_return_relaxed(type i, pfx##_t * v) \
}
#define ATOMIC_FETCH_OP(pfx, op, type, c_op, asm_op, ll, sc) \
-static __inline__ type pfx##_fetch_##op##_relaxed(type i, pfx##_t * v) \
+static __inline__ type \
+arch_##pfx##_fetch_##op##_relaxed(type i, pfx##_t * v) \
{ \
int temp, result; \
\
@@ -153,18 +156,18 @@ static __inline__ type pfx##_fetch_##op##_relaxed(type i, pfx##_t * v) \
ATOMIC_OPS(atomic, add, int, +=, addu, ll, sc)
ATOMIC_OPS(atomic, sub, int, -=, subu, ll, sc)
-#define atomic_add_return_relaxed atomic_add_return_relaxed
-#define atomic_sub_return_relaxed atomic_sub_return_relaxed
-#define atomic_fetch_add_relaxed atomic_fetch_add_relaxed
-#define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed
+#define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed
+#define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed
+#define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed
+#define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed
#ifdef CONFIG_64BIT
ATOMIC_OPS(atomic64, add, s64, +=, daddu, lld, scd)
ATOMIC_OPS(atomic64, sub, s64, -=, dsubu, lld, scd)
-# define atomic64_add_return_relaxed atomic64_add_return_relaxed
-# define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
-# define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
-# define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
+# define arch_atomic64_add_return_relaxed arch_atomic64_add_return_relaxed
+# define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return_relaxed
+# define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add_relaxed
+# define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub_relaxed
#endif /* CONFIG_64BIT */
#undef ATOMIC_OPS
@@ -176,17 +179,17 @@ ATOMIC_OPS(atomic, and, int, &=, and, ll, sc)
ATOMIC_OPS(atomic, or, int, |=, or, ll, sc)
ATOMIC_OPS(atomic, xor, int, ^=, xor, ll, sc)
-#define atomic_fetch_and_relaxed atomic_fetch_and_relaxed
-#define atomic_fetch_or_relaxed atomic_fetch_or_relaxed
-#define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed
+#define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed
+#define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed
+#define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed
#ifdef CONFIG_64BIT
ATOMIC_OPS(atomic64, and, s64, &=, and, lld, scd)
ATOMIC_OPS(atomic64, or, s64, |=, or, lld, scd)
ATOMIC_OPS(atomic64, xor, s64, ^=, xor, lld, scd)
-# define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
-# define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed
-# define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed
+# define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and_relaxed
+# define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or_relaxed
+# define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor_relaxed
#endif
#undef ATOMIC_OPS
@@ -203,7 +206,7 @@ ATOMIC_OPS(atomic64, xor, s64, ^=, xor, lld, scd)
* The function returns the old value of @v minus @i.
*/
#define ATOMIC_SIP_OP(pfx, type, op, ll, sc) \
-static __inline__ int pfx##_sub_if_positive(type i, pfx##_t * v) \
+static __inline__ int arch_##pfx##_sub_if_positive(type i, pfx##_t * v) \
{ \
type temp, result; \
\
@@ -255,11 +258,11 @@ static __inline__ int pfx##_sub_if_positive(type i, pfx##_t * v) \
}
ATOMIC_SIP_OP(atomic, int, subu, ll, sc)
-#define atomic_dec_if_positive(v) atomic_sub_if_positive(1, v)
+#define arch_atomic_dec_if_positive(v) arch_atomic_sub_if_positive(1, v)
#ifdef CONFIG_64BIT
ATOMIC_SIP_OP(atomic64, s64, dsubu, lld, scd)
-#define atomic64_dec_if_positive(v) atomic64_sub_if_positive(1, v)
+#define arch_atomic64_dec_if_positive(v) arch_atomic64_sub_if_positive(1, v)
#endif
#undef ATOMIC_SIP_OP
diff --git a/arch/mips/include/asm/cmpxchg.h b/arch/mips/include/asm/cmpxchg.h
index ed8f3f3c4304..0b983800f48b 100644
--- a/arch/mips/include/asm/cmpxchg.h
+++ b/arch/mips/include/asm/cmpxchg.h
@@ -90,7 +90,7 @@ unsigned long __xchg(volatile void *ptr, unsigned long x, int size)
}
}
-#define xchg(ptr, x) \
+#define arch_xchg(ptr, x) \
({ \
__typeof__(*(ptr)) __res; \
\
@@ -175,14 +175,14 @@ unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
}
}
-#define cmpxchg_local(ptr, old, new) \
+#define arch_cmpxchg_local(ptr, old, new) \
((__typeof__(*(ptr))) \
__cmpxchg((ptr), \
(unsigned long)(__typeof__(*(ptr)))(old), \
(unsigned long)(__typeof__(*(ptr)))(new), \
sizeof(*(ptr))))
-#define cmpxchg(ptr, old, new) \
+#define arch_cmpxchg(ptr, old, new) \
({ \
__typeof__(*(ptr)) __res; \
\
@@ -194,7 +194,7 @@ unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
if (__SYNC_loongson3_war == 0) \
smp_mb__before_llsc(); \
\
- __res = cmpxchg_local((ptr), (old), (new)); \
+ __res = arch_cmpxchg_local((ptr), (old), (new)); \
\
/* \
* In the Loongson3 workaround case __cmpxchg_asm() already \
@@ -208,21 +208,21 @@ unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
})
#ifdef CONFIG_64BIT
-#define cmpxchg64_local(ptr, o, n) \
+#define arch_cmpxchg64_local(ptr, o, n) \
({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
- cmpxchg_local((ptr), (o), (n)); \
+ arch_cmpxchg_local((ptr), (o), (n)); \
})
-#define cmpxchg64(ptr, o, n) \
+#define arch_cmpxchg64(ptr, o, n) \
({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
- cmpxchg((ptr), (o), (n)); \
+ arch_cmpxchg((ptr), (o), (n)); \
})
#else
# include <asm-generic/cmpxchg-local.h>
-# define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
+# define arch_cmpxchg64_local(ptr, o, n) __generic_cmpxchg64_local((ptr), (o), (n))
# ifdef CONFIG_SMP
@@ -294,7 +294,7 @@ static inline unsigned long __cmpxchg64(volatile void *ptr,
return ret;
}
-# define cmpxchg64(ptr, o, n) ({ \
+# define arch_cmpxchg64(ptr, o, n) ({ \
unsigned long long __old = (__typeof__(*(ptr)))(o); \
unsigned long long __new = (__typeof__(*(ptr)))(n); \
__typeof__(*(ptr)) __res; \
@@ -317,7 +317,7 @@ static inline unsigned long __cmpxchg64(volatile void *ptr,
})
# else /* !CONFIG_SMP */
-# define cmpxchg64(ptr, o, n) cmpxchg64_local((ptr), (o), (n))
+# define arch_cmpxchg64(ptr, o, n) arch_cmpxchg64_local((ptr), (o), (n))
# endif /* !CONFIG_SMP */
#endif /* !CONFIG_64BIT */
diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h
index 336e02b3b3ce..3d71081afc55 100644
--- a/arch/mips/include/asm/cpu-features.h
+++ b/arch/mips/include/asm/cpu-features.h
@@ -64,6 +64,8 @@
((MIPS_ISA_REV >= (ge)) && (MIPS_ISA_REV < (lt)))
#define __isa_range_or_flag(ge, lt, flag) \
(__isa_range(ge, lt) || ((MIPS_ISA_REV < (lt)) && __isa(flag)))
+#define __isa_range_and_ase(ge, lt, ase) \
+ (__isa_range(ge, lt) && __ase(ase))
/*
* SMP assumption: Options of CPU 0 are a superset of all processors.
@@ -421,7 +423,7 @@
#endif
#ifndef cpu_has_mipsmt
-#define cpu_has_mipsmt __isa_lt_and_ase(6, MIPS_ASE_MIPSMT)
+#define cpu_has_mipsmt __isa_range_and_ase(2, 6, MIPS_ASE_MIPSMT)
#endif
#ifndef cpu_has_vp
diff --git a/arch/mips/include/asm/highmem.h b/arch/mips/include/asm/highmem.h
index 292d0425717f..92a380210017 100644
--- a/arch/mips/include/asm/highmem.h
+++ b/arch/mips/include/asm/highmem.h
@@ -36,7 +36,7 @@ extern pte_t *pkmap_page_table;
* easily, subsequent pte tables have to be allocated in one physical
* chunk of RAM.
*/
-#ifdef CONFIG_PHYS_ADDR_T_64BIT
+#if defined(CONFIG_PHYS_ADDR_T_64BIT) || defined(CONFIG_MIPS_HUGE_TLB_SUPPORT)
#define LAST_PKMAP 512
#else
#define LAST_PKMAP 1024
diff --git a/arch/mips/include/asm/hugetlb.h b/arch/mips/include/asm/hugetlb.h
index 10e3be870df7..c2144409c0c4 100644
--- a/arch/mips/include/asm/hugetlb.h
+++ b/arch/mips/include/asm/hugetlb.h
@@ -46,7 +46,13 @@ static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep)
{
- flush_tlb_page(vma, addr & huge_page_mask(hstate_vma(vma)));
+ /*
+ * clear the huge pte entry firstly, so that the other smp threads will
+ * not get old pte entry after finishing flush_tlb_page and before
+ * setting new huge pte entry
+ */
+ huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
+ flush_tlb_page(vma, addr);
}
#define __HAVE_ARCH_HUGE_PTE_NONE
diff --git a/arch/mips/include/asm/irq.h b/arch/mips/include/asm/irq.h
index f021de661c3a..d1477ecb1af9 100644
--- a/arch/mips/include/asm/irq.h
+++ b/arch/mips/include/asm/irq.h
@@ -11,7 +11,6 @@
#include <linux/linkage.h>
#include <linux/smp.h>
-#include <linux/irqdomain.h>
#include <asm/mipsmtregs.h>
diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
index fca4547d580f..696f6b009377 100644
--- a/arch/mips/include/asm/kvm_host.h
+++ b/arch/mips/include/asm/kvm_host.h
@@ -109,10 +109,11 @@ static inline bool kvm_is_error_hva(unsigned long addr)
}
struct kvm_vm_stat {
- ulong remote_tlb_flush;
+ struct kvm_vm_stat_generic generic;
};
struct kvm_vcpu_stat {
+ struct kvm_vcpu_stat_generic generic;
u64 wait_exits;
u64 cache_exits;
u64 signal_exits;
@@ -142,12 +143,6 @@ struct kvm_vcpu_stat {
#ifdef CONFIG_CPU_LOONGSON64
u64 vz_cpucfg_exits;
#endif
- u64 halt_successful_poll;
- u64 halt_attempted_poll;
- u64 halt_poll_success_ns;
- u64 halt_poll_fail_ns;
- u64 halt_poll_invalid;
- u64 halt_wakeup;
};
struct kvm_arch_memory_slot {
diff --git a/arch/mips/include/asm/mach-ar7/ar7.h b/arch/mips/include/asm/mach-ar7/ar7.h
index dd09c3bf0601..cbe75ade3277 100644
--- a/arch/mips/include/asm/mach-ar7/ar7.h
+++ b/arch/mips/include/asm/mach-ar7/ar7.h
@@ -131,10 +131,6 @@ static inline u8 ar7_chip_rev(void)
0x14))) >> 16) & 0xff;
}
-struct clk {
- unsigned int rate;
-};
-
static inline int ar7_has_high_cpmac(void)
{
u16 chip_id = ar7_chip_id();
diff --git a/arch/mips/include/asm/mach-ralink/mt7620.h b/arch/mips/include/asm/mach-ralink/mt7620.h
index 757ce53d00e6..d51dfad8f543 100644
--- a/arch/mips/include/asm/mach-ralink/mt7620.h
+++ b/arch/mips/include/asm/mach-ralink/mt7620.h
@@ -83,52 +83,13 @@
#define MT7620_DDR2_SIZE_MIN 32
#define MT7620_DDR2_SIZE_MAX 256
-#define MT7620_GPIO_MODE_UART0_SHIFT 2
-#define MT7620_GPIO_MODE_UART0_MASK 0x7
-#define MT7620_GPIO_MODE_UART0(x) ((x) << MT7620_GPIO_MODE_UART0_SHIFT)
-#define MT7620_GPIO_MODE_UARTF 0x0
-#define MT7620_GPIO_MODE_PCM_UARTF 0x1
-#define MT7620_GPIO_MODE_PCM_I2S 0x2
-#define MT7620_GPIO_MODE_I2S_UARTF 0x3
-#define MT7620_GPIO_MODE_PCM_GPIO 0x4
-#define MT7620_GPIO_MODE_GPIO_UARTF 0x5
-#define MT7620_GPIO_MODE_GPIO_I2S 0x6
-#define MT7620_GPIO_MODE_GPIO 0x7
-
-#define MT7620_GPIO_MODE_NAND 0
-#define MT7620_GPIO_MODE_SD 1
-#define MT7620_GPIO_MODE_ND_SD_GPIO 2
-#define MT7620_GPIO_MODE_ND_SD_MASK 0x3
-#define MT7620_GPIO_MODE_ND_SD_SHIFT 18
-
-#define MT7620_GPIO_MODE_PCIE_RST 0
-#define MT7620_GPIO_MODE_PCIE_REF 1
-#define MT7620_GPIO_MODE_PCIE_GPIO 2
-#define MT7620_GPIO_MODE_PCIE_MASK 0x3
-#define MT7620_GPIO_MODE_PCIE_SHIFT 16
-
-#define MT7620_GPIO_MODE_WDT_RST 0
-#define MT7620_GPIO_MODE_WDT_REF 1
-#define MT7620_GPIO_MODE_WDT_GPIO 2
-#define MT7620_GPIO_MODE_WDT_MASK 0x3
-#define MT7620_GPIO_MODE_WDT_SHIFT 21
-
-#define MT7620_GPIO_MODE_MDIO 0
-#define MT7620_GPIO_MODE_MDIO_REFCLK 1
-#define MT7620_GPIO_MODE_MDIO_GPIO 2
-#define MT7620_GPIO_MODE_MDIO_MASK 0x3
-#define MT7620_GPIO_MODE_MDIO_SHIFT 7
-
-#define MT7620_GPIO_MODE_I2C 0
-#define MT7620_GPIO_MODE_UART1 5
-#define MT7620_GPIO_MODE_RGMII1 9
-#define MT7620_GPIO_MODE_RGMII2 10
-#define MT7620_GPIO_MODE_SPI 11
-#define MT7620_GPIO_MODE_SPI_REF_CLK 12
-#define MT7620_GPIO_MODE_WLED 13
-#define MT7620_GPIO_MODE_JTAG 15
-#define MT7620_GPIO_MODE_EPHY 15
-#define MT7620_GPIO_MODE_PA 20
+extern enum ralink_soc_type ralink_soc;
+
+static inline int is_mt76x8(void)
+{
+ return ralink_soc == MT762X_SOC_MT7628AN ||
+ ralink_soc == MT762X_SOC_MT7688;
+}
static inline int mt7620_get_eco(void)
{
diff --git a/arch/mips/include/asm/mach-ralink/pinmux.h b/arch/mips/include/asm/mach-ralink/pinmux.h
deleted file mode 100644
index 048309348be0..000000000000
--- a/arch/mips/include/asm/mach-ralink/pinmux.h
+++ /dev/null
@@ -1,52 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2012 John Crispin <john@phrozen.org>
- */
-
-#ifndef _RT288X_PINMUX_H__
-#define _RT288X_PINMUX_H__
-
-#define FUNC(name, value, pin_first, pin_count) \
- { name, value, pin_first, pin_count }
-
-#define GRP(_name, _func, _mask, _shift) \
- { .name = _name, .mask = _mask, .shift = _shift, \
- .func = _func, .gpio = _mask, \
- .func_count = ARRAY_SIZE(_func) }
-
-#define GRP_G(_name, _func, _mask, _gpio, _shift) \
- { .name = _name, .mask = _mask, .shift = _shift, \
- .func = _func, .gpio = _gpio, \
- .func_count = ARRAY_SIZE(_func) }
-
-struct rt2880_pmx_group;
-
-struct rt2880_pmx_func {
- const char *name;
- const char value;
-
- int pin_first;
- int pin_count;
- int *pins;
-
- int *groups;
- int group_count;
-
- int enabled;
-};
-
-struct rt2880_pmx_group {
- const char *name;
- int enabled;
-
- const u32 shift;
- const char mask;
- const char gpio;
-
- struct rt2880_pmx_func *func;
- int func_count;
-};
-
-extern struct rt2880_pmx_group *rt2880_pinmux_data;
-
-#endif
diff --git a/arch/mips/include/asm/mach-ralink/rt288x.h b/arch/mips/include/asm/mach-ralink/rt288x.h
index 5d10178f26af..5f213534f0f5 100644
--- a/arch/mips/include/asm/mach-ralink/rt288x.h
+++ b/arch/mips/include/asm/mach-ralink/rt288x.h
@@ -33,15 +33,6 @@
#define SYSTEM_CONFIG_CPUCLK_280 0x2
#define SYSTEM_CONFIG_CPUCLK_300 0x3
-#define RT2880_GPIO_MODE_I2C BIT(0)
-#define RT2880_GPIO_MODE_UART0 BIT(1)
-#define RT2880_GPIO_MODE_SPI BIT(2)
-#define RT2880_GPIO_MODE_UART1 BIT(3)
-#define RT2880_GPIO_MODE_JTAG BIT(4)
-#define RT2880_GPIO_MODE_MDIO BIT(5)
-#define RT2880_GPIO_MODE_SDRAM BIT(6)
-#define RT2880_GPIO_MODE_PCI BIT(7)
-
#define CLKCFG_SRAM_CS_N_WDT BIT(9)
#define RT2880_SDRAM_BASE 0x08000000
diff --git a/arch/mips/include/asm/mach-ralink/rt305x.h b/arch/mips/include/asm/mach-ralink/rt305x.h
index b54619dc4b88..4d8e8c8d83ce 100644
--- a/arch/mips/include/asm/mach-ralink/rt305x.h
+++ b/arch/mips/include/asm/mach-ralink/rt305x.h
@@ -114,30 +114,6 @@ static inline int soc_is_rt5350(void)
#define RT305X_GPIO_GE0_TXD0 40
#define RT305X_GPIO_GE0_RXCLK 51
-#define RT305X_GPIO_MODE_UART0_SHIFT 2
-#define RT305X_GPIO_MODE_UART0_MASK 0x7
-#define RT305X_GPIO_MODE_UART0(x) ((x) << RT305X_GPIO_MODE_UART0_SHIFT)
-#define RT305X_GPIO_MODE_UARTF 0
-#define RT305X_GPIO_MODE_PCM_UARTF 1
-#define RT305X_GPIO_MODE_PCM_I2S 2
-#define RT305X_GPIO_MODE_I2S_UARTF 3
-#define RT305X_GPIO_MODE_PCM_GPIO 4
-#define RT305X_GPIO_MODE_GPIO_UARTF 5
-#define RT305X_GPIO_MODE_GPIO_I2S 6
-#define RT305X_GPIO_MODE_GPIO 7
-
-#define RT305X_GPIO_MODE_I2C 0
-#define RT305X_GPIO_MODE_SPI 1
-#define RT305X_GPIO_MODE_UART1 5
-#define RT305X_GPIO_MODE_JTAG 6
-#define RT305X_GPIO_MODE_MDIO 7
-#define RT305X_GPIO_MODE_SDRAM 8
-#define RT305X_GPIO_MODE_RGMII 9
-#define RT5350_GPIO_MODE_PHY_LED 14
-#define RT5350_GPIO_MODE_SPI_CS1 21
-#define RT3352_GPIO_MODE_LNA 18
-#define RT3352_GPIO_MODE_PA 20
-
#define RT3352_SYSC_REG_SYSCFG0 0x010
#define RT3352_SYSC_REG_SYSCFG1 0x014
#define RT3352_SYSC_REG_CLKCFG1 0x030
diff --git a/arch/mips/include/asm/mach-ralink/rt3883.h b/arch/mips/include/asm/mach-ralink/rt3883.h
index 565f2548496a..f250de9c055b 100644
--- a/arch/mips/include/asm/mach-ralink/rt3883.h
+++ b/arch/mips/include/asm/mach-ralink/rt3883.h
@@ -109,40 +109,6 @@
#define RT3883_CLKCFG1_PCI_CLK_EN BIT(19)
#define RT3883_CLKCFG1_UPHY0_CLK_EN BIT(18)
-#define RT3883_GPIO_MODE_UART0_SHIFT 2
-#define RT3883_GPIO_MODE_UART0_MASK 0x7
-#define RT3883_GPIO_MODE_UART0(x) ((x) << RT3883_GPIO_MODE_UART0_SHIFT)
-#define RT3883_GPIO_MODE_UARTF 0x0
-#define RT3883_GPIO_MODE_PCM_UARTF 0x1
-#define RT3883_GPIO_MODE_PCM_I2S 0x2
-#define RT3883_GPIO_MODE_I2S_UARTF 0x3
-#define RT3883_GPIO_MODE_PCM_GPIO 0x4
-#define RT3883_GPIO_MODE_GPIO_UARTF 0x5
-#define RT3883_GPIO_MODE_GPIO_I2S 0x6
-#define RT3883_GPIO_MODE_GPIO 0x7
-
-#define RT3883_GPIO_MODE_I2C 0
-#define RT3883_GPIO_MODE_SPI 1
-#define RT3883_GPIO_MODE_UART1 5
-#define RT3883_GPIO_MODE_JTAG 6
-#define RT3883_GPIO_MODE_MDIO 7
-#define RT3883_GPIO_MODE_GE1 9
-#define RT3883_GPIO_MODE_GE2 10
-
-#define RT3883_GPIO_MODE_PCI_SHIFT 11
-#define RT3883_GPIO_MODE_PCI_MASK 0x7
-#define RT3883_GPIO_MODE_PCI (RT3883_GPIO_MODE_PCI_MASK << RT3883_GPIO_MODE_PCI_SHIFT)
-#define RT3883_GPIO_MODE_LNA_A_SHIFT 16
-#define RT3883_GPIO_MODE_LNA_A_MASK 0x3
-#define _RT3883_GPIO_MODE_LNA_A(_x) ((_x) << RT3883_GPIO_MODE_LNA_A_SHIFT)
-#define RT3883_GPIO_MODE_LNA_A_GPIO 0x3
-#define RT3883_GPIO_MODE_LNA_A _RT3883_GPIO_MODE_LNA_A(RT3883_GPIO_MODE_LNA_A_MASK)
-#define RT3883_GPIO_MODE_LNA_G_SHIFT 18
-#define RT3883_GPIO_MODE_LNA_G_MASK 0x3
-#define _RT3883_GPIO_MODE_LNA_G(_x) ((_x) << RT3883_GPIO_MODE_LNA_G_SHIFT)
-#define RT3883_GPIO_MODE_LNA_G_GPIO 0x3
-#define RT3883_GPIO_MODE_LNA_G _RT3883_GPIO_MODE_LNA_G(RT3883_GPIO_MODE_LNA_G_MASK)
-
#define RT3883_GPIO_I2C_SD 1
#define RT3883_GPIO_I2C_SCLK 2
#define RT3883_GPIO_SPI_CS0 3
diff --git a/arch/mips/include/asm/mach-ralink/spaces.h b/arch/mips/include/asm/mach-ralink/spaces.h
new file mode 100644
index 000000000000..87d085c9ad61
--- /dev/null
+++ b/arch/mips/include/asm/mach-ralink/spaces.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_MACH_RALINK_SPACES_H_
+#define __ASM_MACH_RALINK_SPACES_H_
+
+#define PCI_IOBASE _AC(0xa0000000, UL)
+#define PCI_IOSIZE SZ_16M
+#define IO_SPACE_LIMIT (PCI_IOSIZE - 1)
+
+#include <asm/mach-generic/spaces.h>
+#endif
diff --git a/arch/mips/include/asm/mips-boards/launch.h b/arch/mips/include/asm/mips-boards/launch.h
index f93aa5ee2e2e..3481ed4c117b 100644
--- a/arch/mips/include/asm/mips-boards/launch.h
+++ b/arch/mips/include/asm/mips-boards/launch.h
@@ -3,6 +3,9 @@
*
*/
+#ifndef _ASM_MIPS_BOARDS_LAUNCH_H
+#define _ASM_MIPS_BOARDS_LAUNCH_H
+
#ifndef _ASSEMBLER_
struct cpulaunch {
@@ -34,3 +37,5 @@ struct cpulaunch {
/* Polling period in count cycles for secondary CPU's */
#define LAUNCHPERIOD 10000
+
+#endif /* _ASM_MIPS_BOARDS_LAUNCH_H */
diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h
index 9c8099a6ffed..acdf8c69220b 100644
--- a/arch/mips/include/asm/mipsregs.h
+++ b/arch/mips/include/asm/mipsregs.h
@@ -2077,7 +2077,7 @@ _ASM_MACRO_0(tlbginvf, _ASM_INSN_IF_MIPS(0x4200000c)
({ int __res; \
__asm__ __volatile__( \
".set\tpush\n\t" \
- ".set\tmips32r2\n\t" \
+ ".set\tmips32r5\n\t" \
_ASM_SET_VIRT \
"mfgc0\t%0, " #source ", %1\n\t" \
".set\tpop" \
@@ -2090,7 +2090,7 @@ _ASM_MACRO_0(tlbginvf, _ASM_INSN_IF_MIPS(0x4200000c)
({ unsigned long long __res; \
__asm__ __volatile__( \
".set\tpush\n\t" \
- ".set\tmips64r2\n\t" \
+ ".set\tmips64r5\n\t" \
_ASM_SET_VIRT \
"dmfgc0\t%0, " #source ", %1\n\t" \
".set\tpop" \
@@ -2103,7 +2103,7 @@ _ASM_MACRO_0(tlbginvf, _ASM_INSN_IF_MIPS(0x4200000c)
do { \
__asm__ __volatile__( \
".set\tpush\n\t" \
- ".set\tmips32r2\n\t" \
+ ".set\tmips32r5\n\t" \
_ASM_SET_VIRT \
"mtgc0\t%z0, " #register ", %1\n\t" \
".set\tpop" \
@@ -2115,7 +2115,7 @@ do { \
do { \
__asm__ __volatile__( \
".set\tpush\n\t" \
- ".set\tmips64r2\n\t" \
+ ".set\tmips64r5\n\t" \
_ASM_SET_VIRT \
"dmtgc0\t%z0, " #register ", %1\n\t" \
".set\tpop" \
diff --git a/arch/mips/include/asm/mmzone.h b/arch/mips/include/asm/mmzone.h
index b826b8473e95..602a21aee9d4 100644
--- a/arch/mips/include/asm/mmzone.h
+++ b/arch/mips/include/asm/mmzone.h
@@ -8,7 +8,7 @@
#include <asm/page.h>
-#ifdef CONFIG_NEED_MULTIPLE_NODES
+#ifdef CONFIG_NUMA
# include <mmzone.h>
#endif
@@ -20,10 +20,4 @@
#define nid_to_addrbase(nid) 0
#endif
-#ifdef CONFIG_DISCONTIGMEM
-
-#define pfn_to_nid(pfn) pa_to_nid((pfn) << PAGE_SHIFT)
-
-#endif /* CONFIG_DISCONTIGMEM */
-
#endif /* _ASM_MMZONE_H_ */
diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
index 195ff4e9771f..96bc798c1ec1 100644
--- a/arch/mips/include/asm/page.h
+++ b/arch/mips/include/asm/page.h
@@ -239,7 +239,7 @@ static inline int pfn_valid(unsigned long pfn)
/* pfn_valid is defined in linux/mmzone.h */
-#elif defined(CONFIG_NEED_MULTIPLE_NODES)
+#elif defined(CONFIG_NUMA)
#define pfn_valid(pfn) \
({ \
diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h
index 8b18424b3120..4b2567d6b2df 100644
--- a/arch/mips/include/asm/pgalloc.h
+++ b/arch/mips/include/asm/pgalloc.h
@@ -28,7 +28,6 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
{
set_pmd(pmd, __pmd((unsigned long)page_address(pte)));
}
-#define pmd_pgtable(pmd) pmd_page(pmd)
/*
* Initialize a new pmd table with invalid pointers.
@@ -59,11 +58,15 @@ do { \
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
{
- pmd_t *pmd;
+ pmd_t *pmd = NULL;
+ struct page *pg;
- pmd = (pmd_t *) __get_free_pages(GFP_KERNEL, PMD_ORDER);
- if (pmd)
+ pg = alloc_pages(GFP_KERNEL | __GFP_ACCOUNT, PMD_ORDER);
+ if (pg) {
+ pgtable_pmd_page_ctor(pg);
+ pmd = (pmd_t *)page_address(pg);
pmd_init((unsigned long)pmd, (unsigned long)invalid_pte_table);
+ }
return pmd;
}
diff --git a/arch/mips/include/asm/pgtable-32.h b/arch/mips/include/asm/pgtable-32.h
index 6c0532d7b211..95df9c293d8d 100644
--- a/arch/mips/include/asm/pgtable-32.h
+++ b/arch/mips/include/asm/pgtable-32.h
@@ -93,7 +93,6 @@ extern int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1,
#endif
#define USER_PTRS_PER_PGD (0x80000000UL/PGDIR_SIZE)
-#define FIRST_USER_ADDRESS 0UL
#define VMALLOC_START MAP_BASE
diff --git a/arch/mips/include/asm/pgtable-64.h b/arch/mips/include/asm/pgtable-64.h
index 1e7d6ce9d8d6..046465906c82 100644
--- a/arch/mips/include/asm/pgtable-64.h
+++ b/arch/mips/include/asm/pgtable-64.h
@@ -137,7 +137,6 @@
#define PTRS_PER_PTE ((PAGE_SIZE << PTE_ORDER) / sizeof(pte_t))
#define USER_PTRS_PER_PGD ((TASK_SIZE64 / PGDIR_SIZE)?(TASK_SIZE64 / PGDIR_SIZE):1)
-#define FIRST_USER_ADDRESS 0UL
/*
* TLB refill handlers also map the vmalloc area into xuseg. Avoid
diff --git a/arch/mips/include/asm/unistd.h b/arch/mips/include/asm/unistd.h
index 5d70babfc9ee..c2196b1b6604 100644
--- a/arch/mips/include/asm/unistd.h
+++ b/arch/mips/include/asm/unistd.h
@@ -17,6 +17,10 @@
#include <asm/unistd_nr_n64.h>
#include <asm/unistd_nr_o32.h>
+#define __NR_N32_Linux 6000
+#define __NR_64_Linux 5000
+#define __NR_O32_Linux 4000
+
#ifdef CONFIG_MIPS32_N32
#define NR_syscalls (__NR_N32_Linux + __NR_N32_Linux_syscalls)
#elif defined(CONFIG_64BIT)
diff --git a/arch/mips/include/uapi/asm/mman.h b/arch/mips/include/uapi/asm/mman.h
index 57dc2ac4f8bd..40b210c65a5a 100644
--- a/arch/mips/include/uapi/asm/mman.h
+++ b/arch/mips/include/uapi/asm/mman.h
@@ -98,6 +98,9 @@
#define MADV_COLD 20 /* deactivate these pages */
#define MADV_PAGEOUT 21 /* reclaim these pages */
+#define MADV_POPULATE_READ 22 /* populate (prefault) page tables readable */
+#define MADV_POPULATE_WRITE 23 /* populate (prefault) page tables writable */
+
/* compatibility flags */
#define MAP_FILE 0
diff --git a/arch/mips/include/uapi/asm/socket.h b/arch/mips/include/uapi/asm/socket.h
index 2d949969313b..cdf404a831b2 100644
--- a/arch/mips/include/uapi/asm/socket.h
+++ b/arch/mips/include/uapi/asm/socket.h
@@ -138,6 +138,8 @@
#define SO_PREFER_BUSY_POLL 69
#define SO_BUSY_POLL_BUDGET 70
+#define SO_NETNS_COOKIE 71
+
#if !defined(__KERNEL__)
#if __BITS_PER_LONG == 64
diff --git a/arch/mips/ingenic/Kconfig b/arch/mips/ingenic/Kconfig
index 3238e16febd5..f595b339a4b8 100644
--- a/arch/mips/ingenic/Kconfig
+++ b/arch/mips/ingenic/Kconfig
@@ -4,9 +4,11 @@ config MACH_INGENIC_GENERIC
bool
select MACH_INGENIC
select MACH_JZ4740
+ select MACH_JZ4725B
select MACH_JZ4770
select MACH_JZ4780
select MACH_X1000
+ select MACH_X1830
choice
prompt "Machine type"
diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c
index 5735b2cd6f2a..04ca75278f02 100644
--- a/arch/mips/kernel/asm-offsets.c
+++ b/arch/mips/kernel/asm-offsets.c
@@ -78,7 +78,6 @@ void output_ptreg_defines(void)
void output_task_defines(void)
{
COMMENT("MIPS task_struct offsets.");
- OFFSET(TASK_STATE, task_struct, state);
OFFSET(TASK_THREAD_INFO, task_struct, stack);
OFFSET(TASK_FLAGS, task_struct, flags);
OFFSET(TASK_MM, task_struct, mm);
diff --git a/arch/mips/kernel/cmpxchg.c b/arch/mips/kernel/cmpxchg.c
index 89107deb03fc..ac9c8cfb2ba9 100644
--- a/arch/mips/kernel/cmpxchg.c
+++ b/arch/mips/kernel/cmpxchg.c
@@ -41,7 +41,7 @@ unsigned long __xchg_small(volatile void *ptr, unsigned long val, unsigned int s
do {
old32 = load32;
new32 = (load32 & ~mask) | (val << shift);
- load32 = cmpxchg(ptr32, old32, new32);
+ load32 = arch_cmpxchg(ptr32, old32, new32);
} while (load32 != old32);
return (load32 & mask) >> shift;
@@ -97,7 +97,7 @@ unsigned long __cmpxchg_small(volatile void *ptr, unsigned long old,
*/
old32 = (load32 & ~mask) | (old << shift);
new32 = (load32 & ~mask) | (new << shift);
- load32 = cmpxchg(ptr32, old32, new32);
+ load32 = arch_cmpxchg(ptr32, old32, new32);
if (load32 == old32)
return old;
}
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index 0ef240adefb5..630fcb4cb30e 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -1840,6 +1840,11 @@ static inline void cpu_probe_ingenic(struct cpuinfo_mips *c, unsigned int cpu)
*/
case PRID_COMP_INGENIC_D0:
c->isa_level &= ~MIPS_CPU_ISA_M32R2;
+
+ /* FPU is not properly detected on JZ4760(B). */
+ if (c->processor_id == 0x2ed0024f)
+ c->options |= MIPS_CPU_FPU;
+
fallthrough;
/*
diff --git a/arch/mips/kernel/kprobes.c b/arch/mips/kernel/kprobes.c
index 54dfba8fa77c..75bff0f77319 100644
--- a/arch/mips/kernel/kprobes.c
+++ b/arch/mips/kernel/kprobes.c
@@ -403,9 +403,6 @@ int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
struct kprobe *cur = kprobe_running();
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
- if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
- return 1;
-
if (kcb->kprobe_status & KPROBE_HIT_SS) {
resume_execution(cur, regs, kcb);
regs->cp0_status |= kcb->kprobe_old_SR;
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index bff080db0294..73c8e7990a97 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -662,7 +662,7 @@ unsigned long get_wchan(struct task_struct *task)
unsigned long ra = 0;
#endif
- if (!task || task == current || task->state == TASK_RUNNING)
+ if (!task || task == current || task_is_running(task))
goto out;
if (!task_stack_page(task))
goto out;
diff --git a/arch/mips/kernel/relocate.c b/arch/mips/kernel/relocate.c
index 499a5357c09f..56b51de2dc51 100644
--- a/arch/mips/kernel/relocate.c
+++ b/arch/mips/kernel/relocate.c
@@ -18,6 +18,7 @@
#include <linux/kernel.h>
#include <linux/libfdt.h>
#include <linux/of_fdt.h>
+#include <linux/panic_notifier.h>
#include <linux/sched/task.h>
#include <linux/start_kernel.h>
#include <linux/string.h>
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index ef86fbad8546..d542fb7af3ba 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -348,7 +348,6 @@ asmlinkage void start_secondary(void)
*/
calibrate_delay();
- preempt_disable();
cpu = smp_processor_id();
cpu_data[cpu].udelay_val = loops_per_jiffy;
diff --git a/arch/mips/kernel/syscalls/Makefile b/arch/mips/kernel/syscalls/Makefile
index 904452992992..10bf90dc02c0 100644
--- a/arch/mips/kernel/syscalls/Makefile
+++ b/arch/mips/kernel/syscalls/Makefile
@@ -5,9 +5,6 @@ uapi := arch/$(SRCARCH)/include/generated/uapi/asm
_dummy := $(shell [ -d '$(uapi)' ] || mkdir -p '$(uapi)') \
$(shell [ -d '$(kapi)' ] || mkdir -p '$(kapi)')
-syscalln32 := $(src)/syscall_n32.tbl
-syscalln64 := $(src)/syscall_n64.tbl
-syscallo32 := $(src)/syscall_o32.tbl
syshdr := $(srctree)/scripts/syscallhdr.sh
sysnr := $(srctree)/$(src)/syscallnr.sh
systbl := $(srctree)/scripts/syscalltbl.sh
@@ -18,43 +15,22 @@ quiet_cmd_syshdr = SYSHDR $@
quiet_cmd_sysnr = SYSNR $@
cmd_sysnr = $(CONFIG_SHELL) '$(sysnr)' '$<' '$@' \
'$(sysnr_abis_$(basetarget))' \
- '$(sysnr_pfx_$(basetarget))' \
- '$(sysnr_offset_$(basetarget))'
+ '$(sysnr_pfx_$(basetarget))'
quiet_cmd_systbl = SYSTBL $@
cmd_systbl = $(CONFIG_SHELL) $(systbl) $< $@
-$(uapi)/unistd_n32.h: $(syscalln32) $(syshdr) FORCE
- $(call if_changed,syshdr)
-
-$(uapi)/unistd_n64.h: $(syscalln64) $(syshdr) FORCE
- $(call if_changed,syshdr)
-
-$(uapi)/unistd_o32.h: $(syscallo32) $(syshdr) FORCE
+$(uapi)/unistd_%.h: $(src)/syscall_%.tbl $(syshdr) FORCE
$(call if_changed,syshdr)
sysnr_pfx_unistd_nr_n32 := N32
-sysnr_offset_unistd_nr_n32 := 6000
-$(kapi)/unistd_nr_n32.h: $(syscalln32) $(sysnr) FORCE
- $(call if_changed,sysnr)
-
sysnr_pfx_unistd_nr_n64 := 64
-sysnr_offset_unistd_nr_n64 := 5000
-$(kapi)/unistd_nr_n64.h: $(syscalln64) $(sysnr) FORCE
- $(call if_changed,sysnr)
-
sysnr_pfx_unistd_nr_o32 := O32
-sysnr_offset_unistd_nr_o32 := 4000
-$(kapi)/unistd_nr_o32.h: $(syscallo32) $(sysnr) FORCE
- $(call if_changed,sysnr)
-
-$(kapi)/syscall_table_n32.h: $(syscalln32) $(systbl) FORCE
- $(call if_changed,systbl)
-$(kapi)/syscall_table_n64.h: $(syscalln64) $(systbl) FORCE
- $(call if_changed,systbl)
+$(kapi)/unistd_nr_%.h: $(src)/syscall_%.tbl $(sysnr) FORCE
+ $(call if_changed,sysnr)
-$(kapi)/syscall_table_o32.h: $(syscallo32) $(systbl) FORCE
+$(kapi)/syscall_table_%.h: $(src)/syscall_%.tbl $(systbl) FORCE
$(call if_changed,systbl)
uapisyshdr-y += unistd_n32.h \
diff --git a/arch/mips/kernel/syscalls/syscall_n32.tbl b/arch/mips/kernel/syscalls/syscall_n32.tbl
index 5e0096657251..c2d2e19abea8 100644
--- a/arch/mips/kernel/syscalls/syscall_n32.tbl
+++ b/arch/mips/kernel/syscalls/syscall_n32.tbl
@@ -381,7 +381,7 @@
440 n32 process_madvise sys_process_madvise
441 n32 epoll_pwait2 compat_sys_epoll_pwait2
442 n32 mount_setattr sys_mount_setattr
-443 n32 quotactl_path sys_quotactl_path
+443 n32 quotactl_fd sys_quotactl_fd
444 n32 landlock_create_ruleset sys_landlock_create_ruleset
445 n32 landlock_add_rule sys_landlock_add_rule
446 n32 landlock_restrict_self sys_landlock_restrict_self
diff --git a/arch/mips/kernel/syscalls/syscall_n64.tbl b/arch/mips/kernel/syscalls/syscall_n64.tbl
index 9974f5f8e49b..ac653d08b1ea 100644
--- a/arch/mips/kernel/syscalls/syscall_n64.tbl
+++ b/arch/mips/kernel/syscalls/syscall_n64.tbl
@@ -357,7 +357,7 @@
440 n64 process_madvise sys_process_madvise
441 n64 epoll_pwait2 sys_epoll_pwait2
442 n64 mount_setattr sys_mount_setattr
-443 n64 quotactl_path sys_quotactl_path
+443 n64 quotactl_fd sys_quotactl_fd
444 n64 landlock_create_ruleset sys_landlock_create_ruleset
445 n64 landlock_add_rule sys_landlock_add_rule
446 n64 landlock_restrict_self sys_landlock_restrict_self
diff --git a/arch/mips/kernel/syscalls/syscall_o32.tbl b/arch/mips/kernel/syscalls/syscall_o32.tbl
index 39d6e71e57b6..253f2cd70b6b 100644
--- a/arch/mips/kernel/syscalls/syscall_o32.tbl
+++ b/arch/mips/kernel/syscalls/syscall_o32.tbl
@@ -430,7 +430,7 @@
440 o32 process_madvise sys_process_madvise
441 o32 epoll_pwait2 sys_epoll_pwait2 compat_sys_epoll_pwait2
442 o32 mount_setattr sys_mount_setattr
-443 o32 quotactl_path sys_quotactl_path
+443 o32 quotactl_fd sys_quotactl_fd
444 o32 landlock_create_ruleset sys_landlock_create_ruleset
445 o32 landlock_add_rule sys_landlock_add_rule
446 o32 landlock_restrict_self sys_landlock_restrict_self
diff --git a/arch/mips/kernel/syscalls/syscallnr.sh b/arch/mips/kernel/syscalls/syscallnr.sh
index 60bbdb3fe03a..c190bbefbfc2 100644
--- a/arch/mips/kernel/syscalls/syscallnr.sh
+++ b/arch/mips/kernel/syscalls/syscallnr.sh
@@ -5,7 +5,6 @@ in="$1"
out="$2"
my_abis=`echo "($3)" | tr ',' '|'`
prefix="$4"
-offset="$5"
fileguard=_UAPI_ASM_MIPS_`basename "$out" | sed \
-e 'y/abcdefghijklmnopqrstuvwxyz/ABCDEFGHIJKLMNOPQRSTUVWXYZ/' \
@@ -20,7 +19,6 @@ grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | (
nxt=$((nr+1))
done
- printf "#define __NR_%s_Linux\t%s\n" "${prefix}" "${offset}"
printf "#define __NR_%s_Linux_syscalls\t%s\n" "${prefix}" "${nxt}"
printf "\n"
printf "#endif /* %s */" "${fileguard}"
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 0b4e06303c55..6f07362de5ce 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -784,7 +784,6 @@ void force_fcr31_sig(unsigned long fcr31, void __user *fault_addr,
int process_fpemu_return(int sig, void __user *fault_addr, unsigned long fcr31)
{
int si_code;
- struct vm_area_struct *vma;
switch (sig) {
case 0:
@@ -800,8 +799,7 @@ int process_fpemu_return(int sig, void __user *fault_addr, unsigned long fcr31)
case SIGSEGV:
mmap_read_lock(current->mm);
- vma = find_vma(current->mm, (unsigned long)fault_addr);
- if (vma && (vma->vm_start <= (unsigned long)fault_addr))
+ if (vma_lookup(current->mm, (unsigned long)fault_addr))
si_code = SEGV_ACCERR;
else
si_code = SEGV_MAPERR;
diff --git a/arch/mips/kvm/Makefile b/arch/mips/kvm/Makefile
index 30cc060857c7..c67250a956b8 100644
--- a/arch/mips/kvm/Makefile
+++ b/arch/mips/kvm/Makefile
@@ -2,7 +2,7 @@
# Makefile for KVM support for MIPS
#
-common-objs-y = $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o eventfd.o)
+common-objs-y = $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o eventfd.o binary_stats.o)
EXTRA_CFLAGS += -Ivirt/kvm -Iarch/mips/kvm
diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
index 4d4af97dcc88..af9dd029a4e1 100644
--- a/arch/mips/kvm/mips.c
+++ b/arch/mips/kvm/mips.c
@@ -38,43 +38,63 @@
#define VECTORSPACING 0x100 /* for EI/VI mode */
#endif
-struct kvm_stats_debugfs_item debugfs_entries[] = {
- VCPU_STAT("wait", wait_exits),
- VCPU_STAT("cache", cache_exits),
- VCPU_STAT("signal", signal_exits),
- VCPU_STAT("interrupt", int_exits),
- VCPU_STAT("cop_unusable", cop_unusable_exits),
- VCPU_STAT("tlbmod", tlbmod_exits),
- VCPU_STAT("tlbmiss_ld", tlbmiss_ld_exits),
- VCPU_STAT("tlbmiss_st", tlbmiss_st_exits),
- VCPU_STAT("addrerr_st", addrerr_st_exits),
- VCPU_STAT("addrerr_ld", addrerr_ld_exits),
- VCPU_STAT("syscall", syscall_exits),
- VCPU_STAT("resvd_inst", resvd_inst_exits),
- VCPU_STAT("break_inst", break_inst_exits),
- VCPU_STAT("trap_inst", trap_inst_exits),
- VCPU_STAT("msa_fpe", msa_fpe_exits),
- VCPU_STAT("fpe", fpe_exits),
- VCPU_STAT("msa_disabled", msa_disabled_exits),
- VCPU_STAT("flush_dcache", flush_dcache_exits),
- VCPU_STAT("vz_gpsi", vz_gpsi_exits),
- VCPU_STAT("vz_gsfc", vz_gsfc_exits),
- VCPU_STAT("vz_hc", vz_hc_exits),
- VCPU_STAT("vz_grr", vz_grr_exits),
- VCPU_STAT("vz_gva", vz_gva_exits),
- VCPU_STAT("vz_ghfc", vz_ghfc_exits),
- VCPU_STAT("vz_gpa", vz_gpa_exits),
- VCPU_STAT("vz_resvd", vz_resvd_exits),
+const struct _kvm_stats_desc kvm_vm_stats_desc[] = {
+ KVM_GENERIC_VM_STATS()
+};
+static_assert(ARRAY_SIZE(kvm_vm_stats_desc) ==
+ sizeof(struct kvm_vm_stat) / sizeof(u64));
+
+const struct kvm_stats_header kvm_vm_stats_header = {
+ .name_size = KVM_STATS_NAME_SIZE,
+ .num_desc = ARRAY_SIZE(kvm_vm_stats_desc),
+ .id_offset = sizeof(struct kvm_stats_header),
+ .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
+ .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
+ sizeof(kvm_vm_stats_desc),
+};
+
+const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
+ KVM_GENERIC_VCPU_STATS(),
+ STATS_DESC_COUNTER(VCPU, wait_exits),
+ STATS_DESC_COUNTER(VCPU, cache_exits),
+ STATS_DESC_COUNTER(VCPU, signal_exits),
+ STATS_DESC_COUNTER(VCPU, int_exits),
+ STATS_DESC_COUNTER(VCPU, cop_unusable_exits),
+ STATS_DESC_COUNTER(VCPU, tlbmod_exits),
+ STATS_DESC_COUNTER(VCPU, tlbmiss_ld_exits),
+ STATS_DESC_COUNTER(VCPU, tlbmiss_st_exits),
+ STATS_DESC_COUNTER(VCPU, addrerr_st_exits),
+ STATS_DESC_COUNTER(VCPU, addrerr_ld_exits),
+ STATS_DESC_COUNTER(VCPU, syscall_exits),
+ STATS_DESC_COUNTER(VCPU, resvd_inst_exits),
+ STATS_DESC_COUNTER(VCPU, break_inst_exits),
+ STATS_DESC_COUNTER(VCPU, trap_inst_exits),
+ STATS_DESC_COUNTER(VCPU, msa_fpe_exits),
+ STATS_DESC_COUNTER(VCPU, fpe_exits),
+ STATS_DESC_COUNTER(VCPU, msa_disabled_exits),
+ STATS_DESC_COUNTER(VCPU, flush_dcache_exits),
+ STATS_DESC_COUNTER(VCPU, vz_gpsi_exits),
+ STATS_DESC_COUNTER(VCPU, vz_gsfc_exits),
+ STATS_DESC_COUNTER(VCPU, vz_hc_exits),
+ STATS_DESC_COUNTER(VCPU, vz_grr_exits),
+ STATS_DESC_COUNTER(VCPU, vz_gva_exits),
+ STATS_DESC_COUNTER(VCPU, vz_ghfc_exits),
+ STATS_DESC_COUNTER(VCPU, vz_gpa_exits),
+ STATS_DESC_COUNTER(VCPU, vz_resvd_exits),
#ifdef CONFIG_CPU_LOONGSON64
- VCPU_STAT("vz_cpucfg", vz_cpucfg_exits),
+ STATS_DESC_COUNTER(VCPU, vz_cpucfg_exits),
#endif
- VCPU_STAT("halt_successful_poll", halt_successful_poll),
- VCPU_STAT("halt_attempted_poll", halt_attempted_poll),
- VCPU_STAT("halt_poll_invalid", halt_poll_invalid),
- VCPU_STAT("halt_wakeup", halt_wakeup),
- VCPU_STAT("halt_poll_success_ns", halt_poll_success_ns),
- VCPU_STAT("halt_poll_fail_ns", halt_poll_fail_ns),
- {NULL}
+};
+static_assert(ARRAY_SIZE(kvm_vcpu_stats_desc) ==
+ sizeof(struct kvm_vcpu_stat) / sizeof(u64));
+
+const struct kvm_stats_header kvm_vcpu_stats_header = {
+ .name_size = KVM_STATS_NAME_SIZE,
+ .num_desc = ARRAY_SIZE(kvm_vcpu_stats_desc),
+ .id_offset = sizeof(struct kvm_stats_header),
+ .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
+ .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
+ sizeof(kvm_vcpu_stats_desc),
};
bool kvm_trace_guest_mode_change;
diff --git a/arch/mips/kvm/tlb.c b/arch/mips/kvm/tlb.c
index 1088114e5482..a3b50d5e3b25 100644
--- a/arch/mips/kvm/tlb.c
+++ b/arch/mips/kvm/tlb.c
@@ -58,8 +58,7 @@ static int _kvm_mips_host_tlb_inv(unsigned long entryhi)
tlb_probe_hazard();
idx = read_c0_index();
- if (idx >= current_cpu_data.tlbsize)
- BUG();
+ BUG_ON(idx >= current_cpu_data.tlbsize);
if (idx >= 0) {
write_c0_entryhi(UNIQUE_ENTRYHI(idx));
diff --git a/arch/mips/lantiq/xway/dma.c b/arch/mips/lantiq/xway/dma.c
index aeb1b989cd4e..63dccb2ed08b 100644
--- a/arch/mips/lantiq/xway/dma.c
+++ b/arch/mips/lantiq/xway/dma.c
@@ -12,6 +12,7 @@
#include <linux/spinlock.h>
#include <linux/clk.h>
#include <linux/err.h>
+#include <linux/of.h>
#include <lantiq_soc.h>
#include <xway_dma.h>
diff --git a/arch/mips/lib/mips-atomic.c b/arch/mips/lib/mips-atomic.c
index de03838b343b..a9b72eacfc0b 100644
--- a/arch/mips/lib/mips-atomic.c
+++ b/arch/mips/lib/mips-atomic.c
@@ -37,7 +37,7 @@
*/
notrace void arch_local_irq_disable(void)
{
- preempt_disable();
+ preempt_disable_notrace();
__asm__ __volatile__(
" .set push \n"
@@ -53,7 +53,7 @@ notrace void arch_local_irq_disable(void)
: /* no inputs */
: "memory");
- preempt_enable();
+ preempt_enable_notrace();
}
EXPORT_SYMBOL(arch_local_irq_disable);
@@ -61,7 +61,7 @@ notrace unsigned long arch_local_irq_save(void)
{
unsigned long flags;
- preempt_disable();
+ preempt_disable_notrace();
__asm__ __volatile__(
" .set push \n"
@@ -78,7 +78,7 @@ notrace unsigned long arch_local_irq_save(void)
: /* no inputs */
: "memory");
- preempt_enable();
+ preempt_enable_notrace();
return flags;
}
@@ -88,7 +88,7 @@ notrace void arch_local_irq_restore(unsigned long flags)
{
unsigned long __tmp1;
- preempt_disable();
+ preempt_disable_notrace();
__asm__ __volatile__(
" .set push \n"
@@ -106,7 +106,7 @@ notrace void arch_local_irq_restore(unsigned long flags)
: "0" (flags)
: "memory");
- preempt_enable();
+ preempt_enable_notrace();
}
EXPORT_SYMBOL(arch_local_irq_restore);
diff --git a/arch/mips/loongson64/env.c b/arch/mips/loongson64/env.c
index c8bb75d58f17..c961e2999f15 100644
--- a/arch/mips/loongson64/env.c
+++ b/arch/mips/loongson64/env.c
@@ -62,7 +62,7 @@ void __init prom_lefi_init_env(void)
struct efi_cpuinfo_loongson *ecpu;
struct irq_source_routing_table *eirq_source;
u32 id;
- u16 vendor, device;
+ u16 vendor;
/* firmware arguments are initialized in head.S */
boot_p = (struct boot_params *)fw_arg2;
@@ -166,7 +166,6 @@ void __init prom_lefi_init_env(void)
/* Read the ID of PCI host bridge to detect bridge type */
id = readl(HOST_BRIDGE_CONFIG_ADDR);
vendor = id & 0xffff;
- device = (id >> 16) & 0xffff;
switch (vendor) {
case PCI_VENDOR_ID_LOONGSON:
diff --git a/arch/mips/loongson64/numa.c b/arch/mips/loongson64/numa.c
index fa9b4a487a47..e8e3e48c5333 100644
--- a/arch/mips/loongson64/numa.c
+++ b/arch/mips/loongson64/numa.c
@@ -129,6 +129,9 @@ static void __init node_mem_init(unsigned int node)
if (node_end_pfn(0) >= (0xffffffff >> PAGE_SHIFT))
memblock_reserve((node_addrspace_offset | 0xfe000000),
32 << 20);
+
+ /* Reserve pfn range 0~node[0]->node_start_pfn */
+ memblock_reserve(0, PAGE_SIZE * start_pfn);
}
}
diff --git a/arch/mips/loongson64/reset.c b/arch/mips/loongson64/reset.c
index c97bfdc8c922..758d5d26aaaa 100644
--- a/arch/mips/loongson64/reset.c
+++ b/arch/mips/loongson64/reset.c
@@ -126,11 +126,12 @@ static void loongson_kexec_shutdown(void)
for_each_possible_cpu(cpu)
if (!cpu_online(cpu))
cpu_device_up(get_cpu_device(cpu));
+
+ secondary_kexec_args[0] = TO_UNCAC(0x3ff01000);
#endif
kexec_args[0] = kexec_argc;
kexec_args[1] = fw_arg1;
kexec_args[2] = fw_arg2;
- secondary_kexec_args[0] = TO_UNCAC(0x3ff01000);
memcpy((void *)fw_arg1, kexec_argv, KEXEC_ARGV_SIZE);
memcpy((void *)fw_arg2, kexec_envp, KEXEC_ENVP_SIZE);
}
@@ -141,7 +142,9 @@ static void loongson_crash_shutdown(struct pt_regs *regs)
kexec_args[0] = kdump_argc;
kexec_args[1] = fw_arg1;
kexec_args[2] = fw_arg2;
+#ifdef CONFIG_SMP
secondary_kexec_args[0] = TO_UNCAC(0x3ff01000);
+#endif
memcpy((void *)fw_arg1, kdump_argv, KEXEC_ARGV_SIZE);
memcpy((void *)fw_arg2, kexec_envp, KEXEC_ENVP_SIZE);
}
diff --git a/arch/mips/loongson64/smp.c b/arch/mips/loongson64/smp.c
index 6acde65f601b..09ebe84a17fe 100644
--- a/arch/mips/loongson64/smp.c
+++ b/arch/mips/loongson64/smp.c
@@ -51,11 +51,11 @@ static uint32_t core0_c0count[NR_CPUS];
__wbflush(); \
} while (0)
-u32 (*ipi_read_clear)(int cpu);
-void (*ipi_write_action)(int cpu, u32 action);
-void (*ipi_write_enable)(int cpu);
-void (*ipi_clear_buf)(int cpu);
-void (*ipi_write_buf)(int cpu, struct task_struct *idle);
+static u32 (*ipi_read_clear)(int cpu);
+static void (*ipi_write_action)(int cpu, u32 action);
+static void (*ipi_write_enable)(int cpu);
+static void (*ipi_clear_buf)(int cpu);
+static void (*ipi_write_buf)(int cpu, struct task_struct *idle);
/* send mail via Mail_Send register for 3A4000+ CPU */
static void csr_mail_send(uint64_t data, int cpu, int mailbox)
diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c
index a7bf0c80371c..830ab91e574f 100644
--- a/arch/mips/mm/cache.c
+++ b/arch/mips/mm/cache.c
@@ -158,31 +158,29 @@ unsigned long _page_cachable_default;
EXPORT_SYMBOL(_page_cachable_default);
#define PM(p) __pgprot(_page_cachable_default | (p))
-#define PVA(p) PM(_PAGE_VALID | _PAGE_ACCESSED | (p))
static inline void setup_protection_map(void)
{
protection_map[0] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
- protection_map[1] = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC);
- protection_map[2] = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
- protection_map[3] = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC);
- protection_map[4] = PVA(_PAGE_PRESENT);
- protection_map[5] = PVA(_PAGE_PRESENT);
- protection_map[6] = PVA(_PAGE_PRESENT);
- protection_map[7] = PVA(_PAGE_PRESENT);
+ protection_map[1] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC);
+ protection_map[2] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
+ protection_map[3] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC);
+ protection_map[4] = PM(_PAGE_PRESENT);
+ protection_map[5] = PM(_PAGE_PRESENT);
+ protection_map[6] = PM(_PAGE_PRESENT);
+ protection_map[7] = PM(_PAGE_PRESENT);
protection_map[8] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
- protection_map[9] = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC);
- protection_map[10] = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE |
+ protection_map[9] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC);
+ protection_map[10] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE |
_PAGE_NO_READ);
- protection_map[11] = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE);
- protection_map[12] = PVA(_PAGE_PRESENT);
- protection_map[13] = PVA(_PAGE_PRESENT);
- protection_map[14] = PVA(_PAGE_PRESENT);
- protection_map[15] = PVA(_PAGE_PRESENT);
+ protection_map[11] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE);
+ protection_map[12] = PM(_PAGE_PRESENT);
+ protection_map[13] = PM(_PAGE_PRESENT);
+ protection_map[14] = PM(_PAGE_PRESENT | _PAGE_WRITE);
+ protection_map[15] = PM(_PAGE_PRESENT | _PAGE_WRITE);
}
-#undef _PVA
#undef PM
void cpu_cache_init(void)
diff --git a/arch/mips/mm/dma-noncoherent.c b/arch/mips/mm/dma-noncoherent.c
index 212f3ce75a6b..3c4fc97b9f39 100644
--- a/arch/mips/mm/dma-noncoherent.c
+++ b/arch/mips/mm/dma-noncoherent.c
@@ -32,6 +32,7 @@ static inline bool cpu_needs_post_dma_flush(void)
case CPU_R12000:
case CPU_BMIPS5000:
case CPU_LOONGSON2EF:
+ case CPU_XBURST:
return true;
default:
/*
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
index c36358758969..19347dc6bbf8 100644
--- a/arch/mips/mm/init.c
+++ b/arch/mips/mm/init.c
@@ -394,7 +394,7 @@ void maar_init(void)
}
}
-#ifndef CONFIG_NEED_MULTIPLE_NODES
+#ifndef CONFIG_NUMA
void __init paging_init(void)
{
unsigned long max_zone_pfns[MAX_NR_ZONES];
@@ -454,9 +454,6 @@ void __init mem_init(void)
BUILD_BUG_ON(IS_ENABLED(CONFIG_32BIT) && (_PFN_SHIFT > PAGE_SHIFT));
#ifdef CONFIG_HIGHMEM
-#ifdef CONFIG_DISCONTIGMEM
-#error "CONFIG_HIGHMEM and CONFIG_DISCONTIGMEM dont work together yet"
-#endif
max_mapnr = highend_pfn ? highend_pfn : max_low_pfn;
#else
max_mapnr = max_low_pfn;
@@ -476,7 +473,7 @@ void __init mem_init(void)
0x80000000 - 4, KCORE_TEXT);
#endif
}
-#endif /* !CONFIG_NEED_MULTIPLE_NODES */
+#endif /* !CONFIG_NUMA */
void free_init_pages(const char *what, unsigned long begin, unsigned long end)
{
diff --git a/arch/mips/mti-malta/malta-platform.c b/arch/mips/mti-malta/malta-platform.c
index 11e9527c6e44..ee7471984fe7 100644
--- a/arch/mips/mti-malta/malta-platform.c
+++ b/arch/mips/mti-malta/malta-platform.c
@@ -33,7 +33,8 @@
.irq = int, \
.uartclk = 1843200, \
.iotype = UPIO_PORT, \
- .flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST, \
+ .flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | \
+ UPF_MAGIC_MULTIPLIER, \
.regshift = 0, \
}
diff --git a/arch/mips/pci/pci-rt3883.c b/arch/mips/pci/pci-rt3883.c
index aebd4964ea34..c48e23cf5b5e 100644
--- a/arch/mips/pci/pci-rt3883.c
+++ b/arch/mips/pci/pci-rt3883.c
@@ -13,6 +13,7 @@
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/of_pci.h>
diff --git a/arch/mips/pci/pci-xtalk-bridge.c b/arch/mips/pci/pci-xtalk-bridge.c
index d2216942af18..ab9bedb82b28 100644
--- a/arch/mips/pci/pci-xtalk-bridge.c
+++ b/arch/mips/pci/pci-xtalk-bridge.c
@@ -13,6 +13,7 @@
#include <linux/platform_data/xtalk-bridge.h>
#include <linux/nvmem-consumer.h>
#include <linux/crc16.h>
+#include <linux/irqdomain.h>
#include <asm/pci/bridge.h>
#include <asm/paccess.h>
diff --git a/arch/mips/pic32/Kconfig b/arch/mips/pic32/Kconfig
index 7acbb50c1dcd..bb6ab1f3e80d 100644
--- a/arch/mips/pic32/Kconfig
+++ b/arch/mips/pic32/Kconfig
@@ -17,7 +17,6 @@ config PIC32MZDA
select SYS_SUPPORTS_LITTLE_ENDIAN
select GPIOLIB
select COMMON_CLK
- select CLKDEV_LOOKUP
select LIBFDT
select USE_OF
select PINCTRL
diff --git a/arch/mips/ralink/Kconfig b/arch/mips/ralink/Kconfig
index ec4daa63c5e3..c800bf5559b5 100644
--- a/arch/mips/ralink/Kconfig
+++ b/arch/mips/ralink/Kconfig
@@ -28,22 +28,18 @@ choice
bool "RT288x"
select MIPS_AUTO_PFN_OFFSET
select MIPS_L1_CACHE_SHIFT_4
- select HAVE_LEGACY_CLK
select HAVE_PCI
config SOC_RT305X
bool "RT305x"
- select HAVE_LEGACY_CLK
config SOC_RT3883
bool "RT3883"
- select HAVE_LEGACY_CLK
select HAVE_PCI
config SOC_MT7620
bool "MT7620/8"
select CPU_MIPSR2_IRQ_VI
- select HAVE_LEGACY_CLK
select HAVE_PCI
config SOC_MT7621
@@ -54,7 +50,6 @@ choice
select SYS_SUPPORTS_MIPS_CPS
select SYS_SUPPORTS_HIGHMEM
select MIPS_GIC
- select COMMON_CLK
select CLKSRC_MIPS_GIC
select HAVE_PCI if PCI_MT7621
select SOC_BUS
diff --git a/arch/mips/ralink/clk.c b/arch/mips/ralink/clk.c
index f0bcb1051c30..5b02bb7e0829 100644
--- a/arch/mips/ralink/clk.c
+++ b/arch/mips/ralink/clk.c
@@ -10,79 +10,21 @@
#include <linux/export.h>
#include <linux/clkdev.h>
#include <linux/clk.h>
+#include <linux/clk-provider.h>
#include <asm/time.h>
#include "common.h"
-struct clk {
- struct clk_lookup cl;
- unsigned long rate;
-};
-
void ralink_clk_add(const char *dev, unsigned long rate)
{
- struct clk *clk = kzalloc(sizeof(struct clk), GFP_KERNEL);
+ struct clk *clk = clk_register_fixed_rate(NULL, dev, NULL, 0, rate);
if (!clk)
panic("failed to add clock");
- clk->cl.dev_id = dev;
- clk->cl.clk = clk;
-
- clk->rate = rate;
-
- clkdev_add(&clk->cl);
-}
-
-/*
- * Linux clock API
- */
-int clk_enable(struct clk *clk)
-{
- return 0;
-}
-EXPORT_SYMBOL_GPL(clk_enable);
-
-void clk_disable(struct clk *clk)
-{
-}
-EXPORT_SYMBOL_GPL(clk_disable);
-
-unsigned long clk_get_rate(struct clk *clk)
-{
- if (!clk)
- return 0;
-
- return clk->rate;
-}
-EXPORT_SYMBOL_GPL(clk_get_rate);
-
-int clk_set_rate(struct clk *clk, unsigned long rate)
-{
- return -1;
-}
-EXPORT_SYMBOL_GPL(clk_set_rate);
-
-long clk_round_rate(struct clk *clk, unsigned long rate)
-{
- return -1;
-}
-EXPORT_SYMBOL_GPL(clk_round_rate);
-
-int clk_set_parent(struct clk *clk, struct clk *parent)
-{
- WARN_ON(clk);
- return -1;
-}
-EXPORT_SYMBOL_GPL(clk_set_parent);
-
-struct clk *clk_get_parent(struct clk *clk)
-{
- WARN_ON(clk);
- return NULL;
+ clkdev_create(clk, NULL, "%s", dev);
}
-EXPORT_SYMBOL_GPL(clk_get_parent);
void __init plat_time_init(void)
{
diff --git a/arch/mips/ralink/mt7620.c b/arch/mips/ralink/mt7620.c
index 53a5969e61af..ae1fa0391c88 100644
--- a/arch/mips/ralink/mt7620.c
+++ b/arch/mips/ralink/mt7620.c
@@ -15,7 +15,6 @@
#include <asm/mipsregs.h>
#include <asm/mach-ralink/ralink_regs.h>
#include <asm/mach-ralink/mt7620.h>
-#include <asm/mach-ralink/pinmux.h>
#include "common.h"
@@ -50,320 +49,6 @@
/* does the board have sdram or ddram */
static int dram_type;
-static struct rt2880_pmx_func i2c_grp[] = { FUNC("i2c", 0, 1, 2) };
-static struct rt2880_pmx_func spi_grp[] = { FUNC("spi", 0, 3, 4) };
-static struct rt2880_pmx_func uartlite_grp[] = { FUNC("uartlite", 0, 15, 2) };
-static struct rt2880_pmx_func mdio_grp[] = {
- FUNC("mdio", MT7620_GPIO_MODE_MDIO, 22, 2),
- FUNC("refclk", MT7620_GPIO_MODE_MDIO_REFCLK, 22, 2),
-};
-static struct rt2880_pmx_func rgmii1_grp[] = { FUNC("rgmii1", 0, 24, 12) };
-static struct rt2880_pmx_func refclk_grp[] = { FUNC("spi refclk", 0, 37, 3) };
-static struct rt2880_pmx_func ephy_grp[] = { FUNC("ephy", 0, 40, 5) };
-static struct rt2880_pmx_func rgmii2_grp[] = { FUNC("rgmii2", 0, 60, 12) };
-static struct rt2880_pmx_func wled_grp[] = { FUNC("wled", 0, 72, 1) };
-static struct rt2880_pmx_func pa_grp[] = { FUNC("pa", 0, 18, 4) };
-static struct rt2880_pmx_func uartf_grp[] = {
- FUNC("uartf", MT7620_GPIO_MODE_UARTF, 7, 8),
- FUNC("pcm uartf", MT7620_GPIO_MODE_PCM_UARTF, 7, 8),
- FUNC("pcm i2s", MT7620_GPIO_MODE_PCM_I2S, 7, 8),
- FUNC("i2s uartf", MT7620_GPIO_MODE_I2S_UARTF, 7, 8),
- FUNC("pcm gpio", MT7620_GPIO_MODE_PCM_GPIO, 11, 4),
- FUNC("gpio uartf", MT7620_GPIO_MODE_GPIO_UARTF, 7, 4),
- FUNC("gpio i2s", MT7620_GPIO_MODE_GPIO_I2S, 7, 4),
-};
-static struct rt2880_pmx_func wdt_grp[] = {
- FUNC("wdt rst", 0, 17, 1),
- FUNC("wdt refclk", 0, 17, 1),
- };
-static struct rt2880_pmx_func pcie_rst_grp[] = {
- FUNC("pcie rst", MT7620_GPIO_MODE_PCIE_RST, 36, 1),
- FUNC("pcie refclk", MT7620_GPIO_MODE_PCIE_REF, 36, 1)
-};
-static struct rt2880_pmx_func nd_sd_grp[] = {
- FUNC("nand", MT7620_GPIO_MODE_NAND, 45, 15),
- FUNC("sd", MT7620_GPIO_MODE_SD, 47, 13)
-};
-
-static struct rt2880_pmx_group mt7620a_pinmux_data[] = {
- GRP("i2c", i2c_grp, 1, MT7620_GPIO_MODE_I2C),
- GRP("uartf", uartf_grp, MT7620_GPIO_MODE_UART0_MASK,
- MT7620_GPIO_MODE_UART0_SHIFT),
- GRP("spi", spi_grp, 1, MT7620_GPIO_MODE_SPI),
- GRP("uartlite", uartlite_grp, 1, MT7620_GPIO_MODE_UART1),
- GRP_G("wdt", wdt_grp, MT7620_GPIO_MODE_WDT_MASK,
- MT7620_GPIO_MODE_WDT_GPIO, MT7620_GPIO_MODE_WDT_SHIFT),
- GRP_G("mdio", mdio_grp, MT7620_GPIO_MODE_MDIO_MASK,
- MT7620_GPIO_MODE_MDIO_GPIO, MT7620_GPIO_MODE_MDIO_SHIFT),
- GRP("rgmii1", rgmii1_grp, 1, MT7620_GPIO_MODE_RGMII1),
- GRP("spi refclk", refclk_grp, 1, MT7620_GPIO_MODE_SPI_REF_CLK),
- GRP_G("pcie", pcie_rst_grp, MT7620_GPIO_MODE_PCIE_MASK,
- MT7620_GPIO_MODE_PCIE_GPIO, MT7620_GPIO_MODE_PCIE_SHIFT),
- GRP_G("nd_sd", nd_sd_grp, MT7620_GPIO_MODE_ND_SD_MASK,
- MT7620_GPIO_MODE_ND_SD_GPIO, MT7620_GPIO_MODE_ND_SD_SHIFT),
- GRP("rgmii2", rgmii2_grp, 1, MT7620_GPIO_MODE_RGMII2),
- GRP("wled", wled_grp, 1, MT7620_GPIO_MODE_WLED),
- GRP("ephy", ephy_grp, 1, MT7620_GPIO_MODE_EPHY),
- GRP("pa", pa_grp, 1, MT7620_GPIO_MODE_PA),
- { 0 }
-};
-
-static struct rt2880_pmx_func pwm1_grp_mt7628[] = {
- FUNC("sdxc d6", 3, 19, 1),
- FUNC("utif", 2, 19, 1),
- FUNC("gpio", 1, 19, 1),
- FUNC("pwm1", 0, 19, 1),
-};
-
-static struct rt2880_pmx_func pwm0_grp_mt7628[] = {
- FUNC("sdxc d7", 3, 18, 1),
- FUNC("utif", 2, 18, 1),
- FUNC("gpio", 1, 18, 1),
- FUNC("pwm0", 0, 18, 1),
-};
-
-static struct rt2880_pmx_func uart2_grp_mt7628[] = {
- FUNC("sdxc d5 d4", 3, 20, 2),
- FUNC("pwm", 2, 20, 2),
- FUNC("gpio", 1, 20, 2),
- FUNC("uart2", 0, 20, 2),
-};
-
-static struct rt2880_pmx_func uart1_grp_mt7628[] = {
- FUNC("sw_r", 3, 45, 2),
- FUNC("pwm", 2, 45, 2),
- FUNC("gpio", 1, 45, 2),
- FUNC("uart1", 0, 45, 2),
-};
-
-static struct rt2880_pmx_func i2c_grp_mt7628[] = {
- FUNC("-", 3, 4, 2),
- FUNC("debug", 2, 4, 2),
- FUNC("gpio", 1, 4, 2),
- FUNC("i2c", 0, 4, 2),
-};
-
-static struct rt2880_pmx_func refclk_grp_mt7628[] = { FUNC("refclk", 0, 37, 1) };
-static struct rt2880_pmx_func perst_grp_mt7628[] = { FUNC("perst", 0, 36, 1) };
-static struct rt2880_pmx_func wdt_grp_mt7628[] = { FUNC("wdt", 0, 38, 1) };
-static struct rt2880_pmx_func spi_grp_mt7628[] = { FUNC("spi", 0, 7, 4) };
-
-static struct rt2880_pmx_func sd_mode_grp_mt7628[] = {
- FUNC("jtag", 3, 22, 8),
- FUNC("utif", 2, 22, 8),
- FUNC("gpio", 1, 22, 8),
- FUNC("sdxc", 0, 22, 8),
-};
-
-static struct rt2880_pmx_func uart0_grp_mt7628[] = {
- FUNC("-", 3, 12, 2),
- FUNC("-", 2, 12, 2),
- FUNC("gpio", 1, 12, 2),
- FUNC("uart0", 0, 12, 2),
-};
-
-static struct rt2880_pmx_func i2s_grp_mt7628[] = {
- FUNC("antenna", 3, 0, 4),
- FUNC("pcm", 2, 0, 4),
- FUNC("gpio", 1, 0, 4),
- FUNC("i2s", 0, 0, 4),
-};
-
-static struct rt2880_pmx_func spi_cs1_grp_mt7628[] = {
- FUNC("-", 3, 6, 1),
- FUNC("refclk", 2, 6, 1),
- FUNC("gpio", 1, 6, 1),
- FUNC("spi cs1", 0, 6, 1),
-};
-
-static struct rt2880_pmx_func spis_grp_mt7628[] = {
- FUNC("pwm_uart2", 3, 14, 4),
- FUNC("utif", 2, 14, 4),
- FUNC("gpio", 1, 14, 4),
- FUNC("spis", 0, 14, 4),
-};
-
-static struct rt2880_pmx_func gpio_grp_mt7628[] = {
- FUNC("pcie", 3, 11, 1),
- FUNC("refclk", 2, 11, 1),
- FUNC("gpio", 1, 11, 1),
- FUNC("gpio", 0, 11, 1),
-};
-
-static struct rt2880_pmx_func p4led_kn_grp_mt7628[] = {
- FUNC("jtag", 3, 30, 1),
- FUNC("utif", 2, 30, 1),
- FUNC("gpio", 1, 30, 1),
- FUNC("p4led_kn", 0, 30, 1),
-};
-
-static struct rt2880_pmx_func p3led_kn_grp_mt7628[] = {
- FUNC("jtag", 3, 31, 1),
- FUNC("utif", 2, 31, 1),
- FUNC("gpio", 1, 31, 1),
- FUNC("p3led_kn", 0, 31, 1),
-};
-
-static struct rt2880_pmx_func p2led_kn_grp_mt7628[] = {
- FUNC("jtag", 3, 32, 1),
- FUNC("utif", 2, 32, 1),
- FUNC("gpio", 1, 32, 1),
- FUNC("p2led_kn", 0, 32, 1),
-};
-
-static struct rt2880_pmx_func p1led_kn_grp_mt7628[] = {
- FUNC("jtag", 3, 33, 1),
- FUNC("utif", 2, 33, 1),
- FUNC("gpio", 1, 33, 1),
- FUNC("p1led_kn", 0, 33, 1),
-};
-
-static struct rt2880_pmx_func p0led_kn_grp_mt7628[] = {
- FUNC("jtag", 3, 34, 1),
- FUNC("rsvd", 2, 34, 1),
- FUNC("gpio", 1, 34, 1),
- FUNC("p0led_kn", 0, 34, 1),
-};
-
-static struct rt2880_pmx_func wled_kn_grp_mt7628[] = {
- FUNC("rsvd", 3, 35, 1),
- FUNC("rsvd", 2, 35, 1),
- FUNC("gpio", 1, 35, 1),
- FUNC("wled_kn", 0, 35, 1),
-};
-
-static struct rt2880_pmx_func p4led_an_grp_mt7628[] = {
- FUNC("jtag", 3, 39, 1),
- FUNC("utif", 2, 39, 1),
- FUNC("gpio", 1, 39, 1),
- FUNC("p4led_an", 0, 39, 1),
-};
-
-static struct rt2880_pmx_func p3led_an_grp_mt7628[] = {
- FUNC("jtag", 3, 40, 1),
- FUNC("utif", 2, 40, 1),
- FUNC("gpio", 1, 40, 1),
- FUNC("p3led_an", 0, 40, 1),
-};
-
-static struct rt2880_pmx_func p2led_an_grp_mt7628[] = {
- FUNC("jtag", 3, 41, 1),
- FUNC("utif", 2, 41, 1),
- FUNC("gpio", 1, 41, 1),
- FUNC("p2led_an", 0, 41, 1),
-};
-
-static struct rt2880_pmx_func p1led_an_grp_mt7628[] = {
- FUNC("jtag", 3, 42, 1),
- FUNC("utif", 2, 42, 1),
- FUNC("gpio", 1, 42, 1),
- FUNC("p1led_an", 0, 42, 1),
-};
-
-static struct rt2880_pmx_func p0led_an_grp_mt7628[] = {
- FUNC("jtag", 3, 43, 1),
- FUNC("rsvd", 2, 43, 1),
- FUNC("gpio", 1, 43, 1),
- FUNC("p0led_an", 0, 43, 1),
-};
-
-static struct rt2880_pmx_func wled_an_grp_mt7628[] = {
- FUNC("rsvd", 3, 44, 1),
- FUNC("rsvd", 2, 44, 1),
- FUNC("gpio", 1, 44, 1),
- FUNC("wled_an", 0, 44, 1),
-};
-
-#define MT7628_GPIO_MODE_MASK 0x3
-
-#define MT7628_GPIO_MODE_P4LED_KN 58
-#define MT7628_GPIO_MODE_P3LED_KN 56
-#define MT7628_GPIO_MODE_P2LED_KN 54
-#define MT7628_GPIO_MODE_P1LED_KN 52
-#define MT7628_GPIO_MODE_P0LED_KN 50
-#define MT7628_GPIO_MODE_WLED_KN 48
-#define MT7628_GPIO_MODE_P4LED_AN 42
-#define MT7628_GPIO_MODE_P3LED_AN 40
-#define MT7628_GPIO_MODE_P2LED_AN 38
-#define MT7628_GPIO_MODE_P1LED_AN 36
-#define MT7628_GPIO_MODE_P0LED_AN 34
-#define MT7628_GPIO_MODE_WLED_AN 32
-#define MT7628_GPIO_MODE_PWM1 30
-#define MT7628_GPIO_MODE_PWM0 28
-#define MT7628_GPIO_MODE_UART2 26
-#define MT7628_GPIO_MODE_UART1 24
-#define MT7628_GPIO_MODE_I2C 20
-#define MT7628_GPIO_MODE_REFCLK 18
-#define MT7628_GPIO_MODE_PERST 16
-#define MT7628_GPIO_MODE_WDT 14
-#define MT7628_GPIO_MODE_SPI 12
-#define MT7628_GPIO_MODE_SDMODE 10
-#define MT7628_GPIO_MODE_UART0 8
-#define MT7628_GPIO_MODE_I2S 6
-#define MT7628_GPIO_MODE_CS1 4
-#define MT7628_GPIO_MODE_SPIS 2
-#define MT7628_GPIO_MODE_GPIO 0
-
-static struct rt2880_pmx_group mt7628an_pinmux_data[] = {
- GRP_G("pwm1", pwm1_grp_mt7628, MT7628_GPIO_MODE_MASK,
- 1, MT7628_GPIO_MODE_PWM1),
- GRP_G("pwm0", pwm0_grp_mt7628, MT7628_GPIO_MODE_MASK,
- 1, MT7628_GPIO_MODE_PWM0),
- GRP_G("uart2", uart2_grp_mt7628, MT7628_GPIO_MODE_MASK,
- 1, MT7628_GPIO_MODE_UART2),
- GRP_G("uart1", uart1_grp_mt7628, MT7628_GPIO_MODE_MASK,
- 1, MT7628_GPIO_MODE_UART1),
- GRP_G("i2c", i2c_grp_mt7628, MT7628_GPIO_MODE_MASK,
- 1, MT7628_GPIO_MODE_I2C),
- GRP("refclk", refclk_grp_mt7628, 1, MT7628_GPIO_MODE_REFCLK),
- GRP("perst", perst_grp_mt7628, 1, MT7628_GPIO_MODE_PERST),
- GRP("wdt", wdt_grp_mt7628, 1, MT7628_GPIO_MODE_WDT),
- GRP("spi", spi_grp_mt7628, 1, MT7628_GPIO_MODE_SPI),
- GRP_G("sdmode", sd_mode_grp_mt7628, MT7628_GPIO_MODE_MASK,
- 1, MT7628_GPIO_MODE_SDMODE),
- GRP_G("uart0", uart0_grp_mt7628, MT7628_GPIO_MODE_MASK,
- 1, MT7628_GPIO_MODE_UART0),
- GRP_G("i2s", i2s_grp_mt7628, MT7628_GPIO_MODE_MASK,
- 1, MT7628_GPIO_MODE_I2S),
- GRP_G("spi cs1", spi_cs1_grp_mt7628, MT7628_GPIO_MODE_MASK,
- 1, MT7628_GPIO_MODE_CS1),
- GRP_G("spis", spis_grp_mt7628, MT7628_GPIO_MODE_MASK,
- 1, MT7628_GPIO_MODE_SPIS),
- GRP_G("gpio", gpio_grp_mt7628, MT7628_GPIO_MODE_MASK,
- 1, MT7628_GPIO_MODE_GPIO),
- GRP_G("wled_an", wled_an_grp_mt7628, MT7628_GPIO_MODE_MASK,
- 1, MT7628_GPIO_MODE_WLED_AN),
- GRP_G("p0led_an", p0led_an_grp_mt7628, MT7628_GPIO_MODE_MASK,
- 1, MT7628_GPIO_MODE_P0LED_AN),
- GRP_G("p1led_an", p1led_an_grp_mt7628, MT7628_GPIO_MODE_MASK,
- 1, MT7628_GPIO_MODE_P1LED_AN),
- GRP_G("p2led_an", p2led_an_grp_mt7628, MT7628_GPIO_MODE_MASK,
- 1, MT7628_GPIO_MODE_P2LED_AN),
- GRP_G("p3led_an", p3led_an_grp_mt7628, MT7628_GPIO_MODE_MASK,
- 1, MT7628_GPIO_MODE_P3LED_AN),
- GRP_G("p4led_an", p4led_an_grp_mt7628, MT7628_GPIO_MODE_MASK,
- 1, MT7628_GPIO_MODE_P4LED_AN),
- GRP_G("wled_kn", wled_kn_grp_mt7628, MT7628_GPIO_MODE_MASK,
- 1, MT7628_GPIO_MODE_WLED_KN),
- GRP_G("p0led_kn", p0led_kn_grp_mt7628, MT7628_GPIO_MODE_MASK,
- 1, MT7628_GPIO_MODE_P0LED_KN),
- GRP_G("p1led_kn", p1led_kn_grp_mt7628, MT7628_GPIO_MODE_MASK,
- 1, MT7628_GPIO_MODE_P1LED_KN),
- GRP_G("p2led_kn", p2led_kn_grp_mt7628, MT7628_GPIO_MODE_MASK,
- 1, MT7628_GPIO_MODE_P2LED_KN),
- GRP_G("p3led_kn", p3led_kn_grp_mt7628, MT7628_GPIO_MODE_MASK,
- 1, MT7628_GPIO_MODE_P3LED_KN),
- GRP_G("p4led_kn", p4led_kn_grp_mt7628, MT7628_GPIO_MODE_MASK,
- 1, MT7628_GPIO_MODE_P4LED_KN),
- { 0 }
-};
-
-static inline int is_mt76x8(void)
-{
- return ralink_soc == MT762X_SOC_MT7628AN ||
- ralink_soc == MT762X_SOC_MT7688;
-}
-
static __init u32
mt7620_calc_rate(u32 ref_rate, u32 mul, u32 div)
{
@@ -710,9 +395,4 @@ void __init prom_soc_init(struct ralink_soc_info *soc_info)
(pmu0 & PMU_SW_SET) ? ("sw") : ("hw"));
pr_info("Digital PMU set to %s control\n",
(pmu1 & DIG_SW_SEL) ? ("sw") : ("hw"));
-
- if (is_mt76x8())
- rt2880_pinmux_data = mt7628an_pinmux_data;
- else
- rt2880_pinmux_data = mt7620a_pinmux_data;
}
diff --git a/arch/mips/ralink/mt7621.c b/arch/mips/ralink/mt7621.c
index f82ad2a621f6..bd71f5b14238 100644
--- a/arch/mips/ralink/mt7621.c
+++ b/arch/mips/ralink/mt7621.c
@@ -18,97 +18,10 @@
#include <asm/mach-ralink/ralink_regs.h>
#include <asm/mach-ralink/mt7621.h>
-#include <pinmux.h>
-
#include "common.h"
-#define MT7621_GPIO_MODE_UART1 1
-#define MT7621_GPIO_MODE_I2C 2
-#define MT7621_GPIO_MODE_UART3_MASK 0x3
-#define MT7621_GPIO_MODE_UART3_SHIFT 3
-#define MT7621_GPIO_MODE_UART3_GPIO 1
-#define MT7621_GPIO_MODE_UART2_MASK 0x3
-#define MT7621_GPIO_MODE_UART2_SHIFT 5
-#define MT7621_GPIO_MODE_UART2_GPIO 1
-#define MT7621_GPIO_MODE_JTAG 7
-#define MT7621_GPIO_MODE_WDT_MASK 0x3
-#define MT7621_GPIO_MODE_WDT_SHIFT 8
-#define MT7621_GPIO_MODE_WDT_GPIO 1
-#define MT7621_GPIO_MODE_PCIE_RST 0
-#define MT7621_GPIO_MODE_PCIE_REF 2
-#define MT7621_GPIO_MODE_PCIE_MASK 0x3
-#define MT7621_GPIO_MODE_PCIE_SHIFT 10
-#define MT7621_GPIO_MODE_PCIE_GPIO 1
-#define MT7621_GPIO_MODE_MDIO_MASK 0x3
-#define MT7621_GPIO_MODE_MDIO_SHIFT 12
-#define MT7621_GPIO_MODE_MDIO_GPIO 1
-#define MT7621_GPIO_MODE_RGMII1 14
-#define MT7621_GPIO_MODE_RGMII2 15
-#define MT7621_GPIO_MODE_SPI_MASK 0x3
-#define MT7621_GPIO_MODE_SPI_SHIFT 16
-#define MT7621_GPIO_MODE_SPI_GPIO 1
-#define MT7621_GPIO_MODE_SDHCI_MASK 0x3
-#define MT7621_GPIO_MODE_SDHCI_SHIFT 18
-#define MT7621_GPIO_MODE_SDHCI_GPIO 1
-
static void *detect_magic __initdata = detect_memory_region;
-static struct rt2880_pmx_func uart1_grp[] = { FUNC("uart1", 0, 1, 2) };
-static struct rt2880_pmx_func i2c_grp[] = { FUNC("i2c", 0, 3, 2) };
-static struct rt2880_pmx_func uart3_grp[] = {
- FUNC("uart3", 0, 5, 4),
- FUNC("i2s", 2, 5, 4),
- FUNC("spdif3", 3, 5, 4),
-};
-static struct rt2880_pmx_func uart2_grp[] = {
- FUNC("uart2", 0, 9, 4),
- FUNC("pcm", 2, 9, 4),
- FUNC("spdif2", 3, 9, 4),
-};
-static struct rt2880_pmx_func jtag_grp[] = { FUNC("jtag", 0, 13, 5) };
-static struct rt2880_pmx_func wdt_grp[] = {
- FUNC("wdt rst", 0, 18, 1),
- FUNC("wdt refclk", 2, 18, 1),
-};
-static struct rt2880_pmx_func pcie_rst_grp[] = {
- FUNC("pcie rst", MT7621_GPIO_MODE_PCIE_RST, 19, 1),
- FUNC("pcie refclk", MT7621_GPIO_MODE_PCIE_REF, 19, 1)
-};
-static struct rt2880_pmx_func mdio_grp[] = { FUNC("mdio", 0, 20, 2) };
-static struct rt2880_pmx_func rgmii2_grp[] = { FUNC("rgmii2", 0, 22, 12) };
-static struct rt2880_pmx_func spi_grp[] = {
- FUNC("spi", 0, 34, 7),
- FUNC("nand1", 2, 34, 7),
-};
-static struct rt2880_pmx_func sdhci_grp[] = {
- FUNC("sdhci", 0, 41, 8),
- FUNC("nand2", 2, 41, 8),
-};
-static struct rt2880_pmx_func rgmii1_grp[] = { FUNC("rgmii1", 0, 49, 12) };
-
-static struct rt2880_pmx_group mt7621_pinmux_data[] = {
- GRP("uart1", uart1_grp, 1, MT7621_GPIO_MODE_UART1),
- GRP("i2c", i2c_grp, 1, MT7621_GPIO_MODE_I2C),
- GRP_G("uart3", uart3_grp, MT7621_GPIO_MODE_UART3_MASK,
- MT7621_GPIO_MODE_UART3_GPIO, MT7621_GPIO_MODE_UART3_SHIFT),
- GRP_G("uart2", uart2_grp, MT7621_GPIO_MODE_UART2_MASK,
- MT7621_GPIO_MODE_UART2_GPIO, MT7621_GPIO_MODE_UART2_SHIFT),
- GRP("jtag", jtag_grp, 1, MT7621_GPIO_MODE_JTAG),
- GRP_G("wdt", wdt_grp, MT7621_GPIO_MODE_WDT_MASK,
- MT7621_GPIO_MODE_WDT_GPIO, MT7621_GPIO_MODE_WDT_SHIFT),
- GRP_G("pcie", pcie_rst_grp, MT7621_GPIO_MODE_PCIE_MASK,
- MT7621_GPIO_MODE_PCIE_GPIO, MT7621_GPIO_MODE_PCIE_SHIFT),
- GRP_G("mdio", mdio_grp, MT7621_GPIO_MODE_MDIO_MASK,
- MT7621_GPIO_MODE_MDIO_GPIO, MT7621_GPIO_MODE_MDIO_SHIFT),
- GRP("rgmii2", rgmii2_grp, 1, MT7621_GPIO_MODE_RGMII2),
- GRP_G("spi", spi_grp, MT7621_GPIO_MODE_SPI_MASK,
- MT7621_GPIO_MODE_SPI_GPIO, MT7621_GPIO_MODE_SPI_SHIFT),
- GRP_G("sdhci", sdhci_grp, MT7621_GPIO_MODE_SDHCI_MASK,
- MT7621_GPIO_MODE_SDHCI_GPIO, MT7621_GPIO_MODE_SDHCI_SHIFT),
- GRP("rgmii1", rgmii1_grp, 1, MT7621_GPIO_MODE_RGMII1),
- { 0 }
-};
-
phys_addr_t mips_cpc_default_phys_base(void)
{
panic("Cannot detect cpc address");
@@ -219,7 +132,6 @@ void __init prom_soc_init(struct ralink_soc_info *soc_info)
(rev & CHIP_REV_ECO_MASK));
soc_info->mem_detect = mt7621_memory_detect;
- rt2880_pinmux_data = mt7621_pinmux_data;
soc_dev_init(soc_info, rev);
diff --git a/arch/mips/ralink/of.c b/arch/mips/ralink/of.c
index 0c5de07da097..0135376c5de5 100644
--- a/arch/mips/ralink/of.c
+++ b/arch/mips/ralink/of.c
@@ -8,6 +8,7 @@
#include <linux/io.h>
#include <linux/clk.h>
+#include <linux/export.h>
#include <linux/init.h>
#include <linux/sizes.h>
#include <linux/of_fdt.h>
@@ -25,6 +26,7 @@
__iomem void *rt_sysc_membase;
__iomem void *rt_memc_membase;
+EXPORT_SYMBOL_GPL(rt_sysc_membase);
__iomem void *plat_of_remap_node(const char *node)
{
diff --git a/arch/mips/ralink/prom.c b/arch/mips/ralink/prom.c
index 25728def3503..aaac1e6ec7d9 100644
--- a/arch/mips/ralink/prom.c
+++ b/arch/mips/ralink/prom.c
@@ -18,7 +18,6 @@
#include "common.h"
struct ralink_soc_info soc_info;
-struct rt2880_pmx_group *rt2880_pinmux_data = NULL;
enum ralink_soc_type ralink_soc;
EXPORT_SYMBOL_GPL(ralink_soc);
diff --git a/arch/mips/ralink/rt288x.c b/arch/mips/ralink/rt288x.c
index 34083c70ec68..493335db2fe1 100644
--- a/arch/mips/ralink/rt288x.c
+++ b/arch/mips/ralink/rt288x.c
@@ -14,29 +14,9 @@
#include <asm/mipsregs.h>
#include <asm/mach-ralink/ralink_regs.h>
#include <asm/mach-ralink/rt288x.h>
-#include <asm/mach-ralink/pinmux.h>
#include "common.h"
-static struct rt2880_pmx_func i2c_func[] = { FUNC("i2c", 0, 1, 2) };
-static struct rt2880_pmx_func spi_func[] = { FUNC("spi", 0, 3, 4) };
-static struct rt2880_pmx_func uartlite_func[] = { FUNC("uartlite", 0, 7, 8) };
-static struct rt2880_pmx_func jtag_func[] = { FUNC("jtag", 0, 17, 5) };
-static struct rt2880_pmx_func mdio_func[] = { FUNC("mdio", 0, 22, 2) };
-static struct rt2880_pmx_func sdram_func[] = { FUNC("sdram", 0, 24, 16) };
-static struct rt2880_pmx_func pci_func[] = { FUNC("pci", 0, 40, 32) };
-
-static struct rt2880_pmx_group rt2880_pinmux_data_act[] = {
- GRP("i2c", i2c_func, 1, RT2880_GPIO_MODE_I2C),
- GRP("spi", spi_func, 1, RT2880_GPIO_MODE_SPI),
- GRP("uartlite", uartlite_func, 1, RT2880_GPIO_MODE_UART0),
- GRP("jtag", jtag_func, 1, RT2880_GPIO_MODE_JTAG),
- GRP("mdio", mdio_func, 1, RT2880_GPIO_MODE_MDIO),
- GRP("sdram", sdram_func, 1, RT2880_GPIO_MODE_SDRAM),
- GRP("pci", pci_func, 1, RT2880_GPIO_MODE_PCI),
- { 0 }
-};
-
void __init ralink_clk_init(void)
{
unsigned long cpu_rate, wmac_rate = 40000000;
@@ -106,6 +86,5 @@ void __init prom_soc_init(struct ralink_soc_info *soc_info)
soc_info->mem_size_min = RT2880_MEM_SIZE_MIN;
soc_info->mem_size_max = RT2880_MEM_SIZE_MAX;
- rt2880_pinmux_data = rt2880_pinmux_data_act;
ralink_soc = RT2880_SOC;
}
diff --git a/arch/mips/ralink/rt305x.c b/arch/mips/ralink/rt305x.c
index c5b63c142705..8b095a9dcb15 100644
--- a/arch/mips/ralink/rt305x.c
+++ b/arch/mips/ralink/rt305x.c
@@ -16,83 +16,9 @@
#include <asm/mipsregs.h>
#include <asm/mach-ralink/ralink_regs.h>
#include <asm/mach-ralink/rt305x.h>
-#include <asm/mach-ralink/pinmux.h>
#include "common.h"
-static struct rt2880_pmx_func i2c_func[] = { FUNC("i2c", 0, 1, 2) };
-static struct rt2880_pmx_func spi_func[] = { FUNC("spi", 0, 3, 4) };
-static struct rt2880_pmx_func uartf_func[] = {
- FUNC("uartf", RT305X_GPIO_MODE_UARTF, 7, 8),
- FUNC("pcm uartf", RT305X_GPIO_MODE_PCM_UARTF, 7, 8),
- FUNC("pcm i2s", RT305X_GPIO_MODE_PCM_I2S, 7, 8),
- FUNC("i2s uartf", RT305X_GPIO_MODE_I2S_UARTF, 7, 8),
- FUNC("pcm gpio", RT305X_GPIO_MODE_PCM_GPIO, 11, 4),
- FUNC("gpio uartf", RT305X_GPIO_MODE_GPIO_UARTF, 7, 4),
- FUNC("gpio i2s", RT305X_GPIO_MODE_GPIO_I2S, 7, 4),
-};
-static struct rt2880_pmx_func uartlite_func[] = { FUNC("uartlite", 0, 15, 2) };
-static struct rt2880_pmx_func jtag_func[] = { FUNC("jtag", 0, 17, 5) };
-static struct rt2880_pmx_func mdio_func[] = { FUNC("mdio", 0, 22, 2) };
-static struct rt2880_pmx_func rt5350_led_func[] = { FUNC("led", 0, 22, 5) };
-static struct rt2880_pmx_func rt5350_cs1_func[] = {
- FUNC("spi_cs1", 0, 27, 1),
- FUNC("wdg_cs1", 1, 27, 1),
-};
-static struct rt2880_pmx_func sdram_func[] = { FUNC("sdram", 0, 24, 16) };
-static struct rt2880_pmx_func rt3352_rgmii_func[] = {
- FUNC("rgmii", 0, 24, 12)
-};
-static struct rt2880_pmx_func rgmii_func[] = { FUNC("rgmii", 0, 40, 12) };
-static struct rt2880_pmx_func rt3352_lna_func[] = { FUNC("lna", 0, 36, 2) };
-static struct rt2880_pmx_func rt3352_pa_func[] = { FUNC("pa", 0, 38, 2) };
-static struct rt2880_pmx_func rt3352_led_func[] = { FUNC("led", 0, 40, 5) };
-static struct rt2880_pmx_func rt3352_cs1_func[] = {
- FUNC("spi_cs1", 0, 45, 1),
- FUNC("wdg_cs1", 1, 45, 1),
-};
-
-static struct rt2880_pmx_group rt3050_pinmux_data[] = {
- GRP("i2c", i2c_func, 1, RT305X_GPIO_MODE_I2C),
- GRP("spi", spi_func, 1, RT305X_GPIO_MODE_SPI),
- GRP("uartf", uartf_func, RT305X_GPIO_MODE_UART0_MASK,
- RT305X_GPIO_MODE_UART0_SHIFT),
- GRP("uartlite", uartlite_func, 1, RT305X_GPIO_MODE_UART1),
- GRP("jtag", jtag_func, 1, RT305X_GPIO_MODE_JTAG),
- GRP("mdio", mdio_func, 1, RT305X_GPIO_MODE_MDIO),
- GRP("rgmii", rgmii_func, 1, RT305X_GPIO_MODE_RGMII),
- GRP("sdram", sdram_func, 1, RT305X_GPIO_MODE_SDRAM),
- { 0 }
-};
-
-static struct rt2880_pmx_group rt3352_pinmux_data[] = {
- GRP("i2c", i2c_func, 1, RT305X_GPIO_MODE_I2C),
- GRP("spi", spi_func, 1, RT305X_GPIO_MODE_SPI),
- GRP("uartf", uartf_func, RT305X_GPIO_MODE_UART0_MASK,
- RT305X_GPIO_MODE_UART0_SHIFT),
- GRP("uartlite", uartlite_func, 1, RT305X_GPIO_MODE_UART1),
- GRP("jtag", jtag_func, 1, RT305X_GPIO_MODE_JTAG),
- GRP("mdio", mdio_func, 1, RT305X_GPIO_MODE_MDIO),
- GRP("rgmii", rt3352_rgmii_func, 1, RT305X_GPIO_MODE_RGMII),
- GRP("lna", rt3352_lna_func, 1, RT3352_GPIO_MODE_LNA),
- GRP("pa", rt3352_pa_func, 1, RT3352_GPIO_MODE_PA),
- GRP("led", rt3352_led_func, 1, RT5350_GPIO_MODE_PHY_LED),
- GRP("spi_cs1", rt3352_cs1_func, 2, RT5350_GPIO_MODE_SPI_CS1),
- { 0 }
-};
-
-static struct rt2880_pmx_group rt5350_pinmux_data[] = {
- GRP("i2c", i2c_func, 1, RT305X_GPIO_MODE_I2C),
- GRP("spi", spi_func, 1, RT305X_GPIO_MODE_SPI),
- GRP("uartf", uartf_func, RT305X_GPIO_MODE_UART0_MASK,
- RT305X_GPIO_MODE_UART0_SHIFT),
- GRP("uartlite", uartlite_func, 1, RT305X_GPIO_MODE_UART1),
- GRP("jtag", jtag_func, 1, RT305X_GPIO_MODE_JTAG),
- GRP("led", rt5350_led_func, 1, RT5350_GPIO_MODE_PHY_LED),
- GRP("spi_cs1", rt5350_cs1_func, 2, RT5350_GPIO_MODE_SPI_CS1),
- { 0 }
-};
-
static unsigned long rt5350_get_mem_size(void)
{
void __iomem *sysc = (void __iomem *) KSEG1ADDR(RT305X_SYSC_BASE);
@@ -265,14 +191,11 @@ void __init prom_soc_init(struct ralink_soc_info *soc_info)
soc_info->mem_base = RT305X_SDRAM_BASE;
if (soc_is_rt5350()) {
soc_info->mem_size = rt5350_get_mem_size();
- rt2880_pinmux_data = rt5350_pinmux_data;
} else if (soc_is_rt305x() || soc_is_rt3350()) {
soc_info->mem_size_min = RT305X_MEM_SIZE_MIN;
soc_info->mem_size_max = RT305X_MEM_SIZE_MAX;
- rt2880_pinmux_data = rt3050_pinmux_data;
} else if (soc_is_rt3352()) {
soc_info->mem_size_min = RT3352_MEM_SIZE_MIN;
soc_info->mem_size_max = RT3352_MEM_SIZE_MAX;
- rt2880_pinmux_data = rt3352_pinmux_data;
}
}
diff --git a/arch/mips/ralink/rt3883.c b/arch/mips/ralink/rt3883.c
index ff91f3531ad0..d9875f146d66 100644
--- a/arch/mips/ralink/rt3883.c
+++ b/arch/mips/ralink/rt3883.c
@@ -14,52 +14,9 @@
#include <asm/mipsregs.h>
#include <asm/mach-ralink/ralink_regs.h>
#include <asm/mach-ralink/rt3883.h>
-#include <asm/mach-ralink/pinmux.h>
#include "common.h"
-static struct rt2880_pmx_func i2c_func[] = { FUNC("i2c", 0, 1, 2) };
-static struct rt2880_pmx_func spi_func[] = { FUNC("spi", 0, 3, 4) };
-static struct rt2880_pmx_func uartf_func[] = {
- FUNC("uartf", RT3883_GPIO_MODE_UARTF, 7, 8),
- FUNC("pcm uartf", RT3883_GPIO_MODE_PCM_UARTF, 7, 8),
- FUNC("pcm i2s", RT3883_GPIO_MODE_PCM_I2S, 7, 8),
- FUNC("i2s uartf", RT3883_GPIO_MODE_I2S_UARTF, 7, 8),
- FUNC("pcm gpio", RT3883_GPIO_MODE_PCM_GPIO, 11, 4),
- FUNC("gpio uartf", RT3883_GPIO_MODE_GPIO_UARTF, 7, 4),
- FUNC("gpio i2s", RT3883_GPIO_MODE_GPIO_I2S, 7, 4),
-};
-static struct rt2880_pmx_func uartlite_func[] = { FUNC("uartlite", 0, 15, 2) };
-static struct rt2880_pmx_func jtag_func[] = { FUNC("jtag", 0, 17, 5) };
-static struct rt2880_pmx_func mdio_func[] = { FUNC("mdio", 0, 22, 2) };
-static struct rt2880_pmx_func lna_a_func[] = { FUNC("lna a", 0, 32, 3) };
-static struct rt2880_pmx_func lna_g_func[] = { FUNC("lna g", 0, 35, 3) };
-static struct rt2880_pmx_func pci_func[] = {
- FUNC("pci-dev", 0, 40, 32),
- FUNC("pci-host2", 1, 40, 32),
- FUNC("pci-host1", 2, 40, 32),
- FUNC("pci-fnc", 3, 40, 32)
-};
-static struct rt2880_pmx_func ge1_func[] = { FUNC("ge1", 0, 72, 12) };
-static struct rt2880_pmx_func ge2_func[] = { FUNC("ge2", 0, 84, 12) };
-
-static struct rt2880_pmx_group rt3883_pinmux_data[] = {
- GRP("i2c", i2c_func, 1, RT3883_GPIO_MODE_I2C),
- GRP("spi", spi_func, 1, RT3883_GPIO_MODE_SPI),
- GRP("uartf", uartf_func, RT3883_GPIO_MODE_UART0_MASK,
- RT3883_GPIO_MODE_UART0_SHIFT),
- GRP("uartlite", uartlite_func, 1, RT3883_GPIO_MODE_UART1),
- GRP("jtag", jtag_func, 1, RT3883_GPIO_MODE_JTAG),
- GRP("mdio", mdio_func, 1, RT3883_GPIO_MODE_MDIO),
- GRP("lna a", lna_a_func, 1, RT3883_GPIO_MODE_LNA_A),
- GRP("lna g", lna_g_func, 1, RT3883_GPIO_MODE_LNA_G),
- GRP("pci", pci_func, RT3883_GPIO_MODE_PCI_MASK,
- RT3883_GPIO_MODE_PCI_SHIFT),
- GRP("ge1", ge1_func, 1, RT3883_GPIO_MODE_GE1),
- GRP("ge2", ge2_func, 1, RT3883_GPIO_MODE_GE2),
- { 0 }
-};
-
void __init ralink_clk_init(void)
{
unsigned long cpu_rate, sys_rate;
@@ -142,7 +99,5 @@ void __init prom_soc_init(struct ralink_soc_info *soc_info)
soc_info->mem_size_min = RT3883_MEM_SIZE_MIN;
soc_info->mem_size_max = RT3883_MEM_SIZE_MAX;
- rt2880_pinmux_data = rt3883_pinmux_data;
-
ralink_soc = RT3883_SOC;
}
diff --git a/arch/mips/sgi-ip22/ip22-reset.c b/arch/mips/sgi-ip22/ip22-reset.c
index c374f3ceec38..9028dbbb45dd 100644
--- a/arch/mips/sgi-ip22/ip22-reset.c
+++ b/arch/mips/sgi-ip22/ip22-reset.c
@@ -12,6 +12,7 @@
#include <linux/kernel.h>
#include <linux/sched/signal.h>
#include <linux/notifier.h>
+#include <linux/panic_notifier.h>
#include <linux/pm.h>
#include <linux/timer.h>
diff --git a/arch/mips/sgi-ip27/ip27-irq.c b/arch/mips/sgi-ip27/ip27-irq.c
index 42df9fafa943..95c1bff1ab9f 100644
--- a/arch/mips/sgi-ip27/ip27-irq.c
+++ b/arch/mips/sgi-ip27/ip27-irq.c
@@ -9,6 +9,7 @@
#include <linux/interrupt.h>
#include <linux/irq.h>
+#include <linux/irqdomain.h>
#include <linux/ioport.h>
#include <linux/kernel.h>
#include <linux/bitops.h>
diff --git a/arch/mips/sgi-ip30/ip30-irq.c b/arch/mips/sgi-ip30/ip30-irq.c
index e8374e4c705b..ba87704073c8 100644
--- a/arch/mips/sgi-ip30/ip30-irq.c
+++ b/arch/mips/sgi-ip30/ip30-irq.c
@@ -6,6 +6,7 @@
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
+#include <linux/irqdomain.h>
#include <linux/percpu.h>
#include <linux/spinlock.h>
#include <linux/tick.h>
diff --git a/arch/mips/sgi-ip32/ip32-reset.c b/arch/mips/sgi-ip32/ip32-reset.c
index 20d8637340be..18d1c115cd53 100644
--- a/arch/mips/sgi-ip32/ip32-reset.c
+++ b/arch/mips/sgi-ip32/ip32-reset.c
@@ -12,6 +12,7 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/panic_notifier.h>
#include <linux/sched.h>
#include <linux/sched/signal.h>
#include <linux/notifier.h>
diff --git a/arch/nds32/include/asm/memory.h b/arch/nds32/include/asm/memory.h
index 940d32842793..62faafbc28e4 100644
--- a/arch/nds32/include/asm/memory.h
+++ b/arch/nds32/include/asm/memory.h
@@ -76,18 +76,12 @@
* virt_to_page(k) convert a _valid_ virtual address to struct page *
* virt_addr_valid(k) indicates whether a virtual address is valid
*/
-#ifndef CONFIG_DISCONTIGMEM
-
#define ARCH_PFN_OFFSET PHYS_PFN_OFFSET
#define pfn_valid(pfn) ((pfn) >= PHYS_PFN_OFFSET && (pfn) < (PHYS_PFN_OFFSET + max_mapnr))
#define virt_to_page(kaddr) (pfn_to_page(__pa(kaddr) >> PAGE_SHIFT))
#define virt_addr_valid(kaddr) ((unsigned long)(kaddr) >= PAGE_OFFSET && (unsigned long)(kaddr) < (unsigned long)high_memory)
-#else /* CONFIG_DISCONTIGMEM */
-#error CONFIG_DISCONTIGMEM is not supported yet.
-#endif /* !CONFIG_DISCONTIGMEM */
-
#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
#endif
diff --git a/arch/nds32/include/asm/pgalloc.h b/arch/nds32/include/asm/pgalloc.h
index 85c117347c86..a08e1ebca70e 100644
--- a/arch/nds32/include/asm/pgalloc.h
+++ b/arch/nds32/include/asm/pgalloc.h
@@ -12,11 +12,6 @@
#define __HAVE_ARCH_PTE_ALLOC_ONE
#include <asm-generic/pgalloc.h> /* for pte_{alloc,free}_one */
-/*
- * Since we have only two-level page tables, these are trivial
- */
-#define pmd_pgtable(pmd) pmd_page(pmd)
-
extern pgd_t *pgd_alloc(struct mm_struct *mm);
extern void pgd_free(struct mm_struct *mm, pgd_t * pgd);
diff --git a/arch/nds32/kernel/process.c b/arch/nds32/kernel/process.c
index c1327e552ec6..391895b54d13 100644
--- a/arch/nds32/kernel/process.c
+++ b/arch/nds32/kernel/process.c
@@ -239,7 +239,7 @@ unsigned long get_wchan(struct task_struct *p)
unsigned long stack_start, stack_end;
int count = 0;
- if (!p || p == current || p->state == TASK_RUNNING)
+ if (!p || p == current || task_is_running(p))
return 0;
if (IS_ENABLED(CONFIG_FRAME_POINTER)) {
diff --git a/arch/nios2/include/asm/irq.h b/arch/nios2/include/asm/irq.h
index 13ce37272279..c52c94884e93 100644
--- a/arch/nios2/include/asm/irq.h
+++ b/arch/nios2/include/asm/irq.h
@@ -10,6 +10,5 @@
#define NIOS2_CPU_NR_IRQS 32
#include <asm-generic/irq.h>
-#include <linux/irqdomain.h>
#endif
diff --git a/arch/nios2/include/asm/pgalloc.h b/arch/nios2/include/asm/pgalloc.h
index e6600d2a5ae0..3c4ae74d5798 100644
--- a/arch/nios2/include/asm/pgalloc.h
+++ b/arch/nios2/include/asm/pgalloc.h
@@ -25,7 +25,6 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
{
set_pmd(pmd, __pmd((unsigned long)page_address(pte)));
}
-#define pmd_pgtable(pmd) pmd_page(pmd)
/*
* Initialize a new pmd table with invalid pointers.
diff --git a/arch/nios2/include/asm/pgtable.h b/arch/nios2/include/asm/pgtable.h
index 2600d76c310c..4a995fa628ee 100644
--- a/arch/nios2/include/asm/pgtable.h
+++ b/arch/nios2/include/asm/pgtable.h
@@ -24,8 +24,6 @@
#include <asm/pgtable-bits.h>
#include <asm-generic/pgtable-nopmd.h>
-#define FIRST_USER_ADDRESS 0UL
-
#define VMALLOC_START CONFIG_NIOS2_KERNEL_MMU_REGION_BASE
#define VMALLOC_END (CONFIG_NIOS2_KERNEL_REGION_BASE - 1)
diff --git a/arch/nios2/kernel/irq.c b/arch/nios2/kernel/irq.c
index 5f3555ce4865..c6a1a9f6ac42 100644
--- a/arch/nios2/kernel/irq.c
+++ b/arch/nios2/kernel/irq.c
@@ -11,6 +11,7 @@
#include <linux/init.h>
#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
#include <linux/of.h>
static u32 ienable;
diff --git a/arch/nios2/kernel/process.c b/arch/nios2/kernel/process.c
index c5f916ca6845..9ff37ba2bb60 100644
--- a/arch/nios2/kernel/process.c
+++ b/arch/nios2/kernel/process.c
@@ -223,7 +223,7 @@ unsigned long get_wchan(struct task_struct *p)
unsigned long stack_page;
int count = 0;
- if (!p || p == current || p->state == TASK_RUNNING)
+ if (!p || p == current || task_is_running(p))
return 0;
stack_page = (unsigned long)p;
diff --git a/arch/openrisc/include/asm/atomic.h b/arch/openrisc/include/asm/atomic.h
index b589fac39b92..326167e4783a 100644
--- a/arch/openrisc/include/asm/atomic.h
+++ b/arch/openrisc/include/asm/atomic.h
@@ -13,7 +13,7 @@
/* Atomically perform op with v->counter and i */
#define ATOMIC_OP(op) \
-static inline void atomic_##op(int i, atomic_t *v) \
+static inline void arch_atomic_##op(int i, atomic_t *v) \
{ \
int tmp; \
\
@@ -30,7 +30,7 @@ static inline void atomic_##op(int i, atomic_t *v) \
/* Atomically perform op with v->counter and i, return the result */
#define ATOMIC_OP_RETURN(op) \
-static inline int atomic_##op##_return(int i, atomic_t *v) \
+static inline int arch_atomic_##op##_return(int i, atomic_t *v) \
{ \
int tmp; \
\
@@ -49,7 +49,7 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
/* Atomically perform op with v->counter and i, return orig v->counter */
#define ATOMIC_FETCH_OP(op) \
-static inline int atomic_fetch_##op(int i, atomic_t *v) \
+static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \
{ \
int tmp, old; \
\
@@ -75,6 +75,8 @@ ATOMIC_FETCH_OP(and)
ATOMIC_FETCH_OP(or)
ATOMIC_FETCH_OP(xor)
+ATOMIC_OP(add)
+ATOMIC_OP(sub)
ATOMIC_OP(and)
ATOMIC_OP(or)
ATOMIC_OP(xor)
@@ -83,16 +85,18 @@ ATOMIC_OP(xor)
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
-#define atomic_add_return atomic_add_return
-#define atomic_sub_return atomic_sub_return
-#define atomic_fetch_add atomic_fetch_add
-#define atomic_fetch_sub atomic_fetch_sub
-#define atomic_fetch_and atomic_fetch_and
-#define atomic_fetch_or atomic_fetch_or
-#define atomic_fetch_xor atomic_fetch_xor
-#define atomic_and atomic_and
-#define atomic_or atomic_or
-#define atomic_xor atomic_xor
+#define arch_atomic_add_return arch_atomic_add_return
+#define arch_atomic_sub_return arch_atomic_sub_return
+#define arch_atomic_fetch_add arch_atomic_fetch_add
+#define arch_atomic_fetch_sub arch_atomic_fetch_sub
+#define arch_atomic_fetch_and arch_atomic_fetch_and
+#define arch_atomic_fetch_or arch_atomic_fetch_or
+#define arch_atomic_fetch_xor arch_atomic_fetch_xor
+#define arch_atomic_add arch_atomic_add
+#define arch_atomic_sub arch_atomic_sub
+#define arch_atomic_and arch_atomic_and
+#define arch_atomic_or arch_atomic_or
+#define arch_atomic_xor arch_atomic_xor
/*
* Atomically add a to v->counter as long as v is not already u.
@@ -100,7 +104,7 @@ ATOMIC_OP(xor)
*
* This is often used through atomic_inc_not_zero()
*/
-static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
+static inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
{
int old, tmp;
@@ -119,8 +123,14 @@ static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
return old;
}
-#define atomic_fetch_add_unless atomic_fetch_add_unless
+#define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
-#include <asm-generic/atomic.h>
+#define arch_atomic_read(v) READ_ONCE((v)->counter)
+#define arch_atomic_set(v,i) WRITE_ONCE((v)->counter, (i))
+
+#include <asm/cmpxchg.h>
+
+#define arch_atomic_xchg(ptr, v) (arch_xchg(&(ptr)->counter, (v)))
+#define arch_atomic_cmpxchg(v, old, new) (arch_cmpxchg(&((v)->counter), (old), (new)))
#endif /* __ASM_OPENRISC_ATOMIC_H */
diff --git a/arch/openrisc/include/asm/barrier.h b/arch/openrisc/include/asm/barrier.h
new file mode 100644
index 000000000000..7538294721be
--- /dev/null
+++ b/arch/openrisc/include/asm/barrier.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_BARRIER_H
+#define __ASM_BARRIER_H
+
+#define mb() asm volatile ("l.msync" ::: "memory")
+
+#include <asm-generic/barrier.h>
+
+#endif /* __ASM_BARRIER_H */
diff --git a/arch/openrisc/include/asm/cmpxchg.h b/arch/openrisc/include/asm/cmpxchg.h
index f9cd43a39d72..79fd16162ccb 100644
--- a/arch/openrisc/include/asm/cmpxchg.h
+++ b/arch/openrisc/include/asm/cmpxchg.h
@@ -132,7 +132,7 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
}
}
-#define cmpxchg(ptr, o, n) \
+#define arch_cmpxchg(ptr, o, n) \
({ \
(__typeof__(*(ptr))) __cmpxchg((ptr), \
(unsigned long)(o), \
@@ -161,7 +161,7 @@ static inline unsigned long __xchg(volatile void *ptr, unsigned long with,
}
}
-#define xchg(ptr, with) \
+#define arch_xchg(ptr, with) \
({ \
(__typeof__(*(ptr))) __xchg((ptr), \
(unsigned long)(with), \
diff --git a/arch/openrisc/include/asm/pgalloc.h b/arch/openrisc/include/asm/pgalloc.h
index 88820299ecc4..b7b2b8d16fad 100644
--- a/arch/openrisc/include/asm/pgalloc.h
+++ b/arch/openrisc/include/asm/pgalloc.h
@@ -72,6 +72,4 @@ do { \
tlb_remove_page((tlb), (pte)); \
} while (0)
-#define pmd_pgtable(pmd) pmd_page(pmd)
-
#endif
diff --git a/arch/openrisc/include/asm/pgtable.h b/arch/openrisc/include/asm/pgtable.h
index 9425bedab4fc..4ac591c9ca33 100644
--- a/arch/openrisc/include/asm/pgtable.h
+++ b/arch/openrisc/include/asm/pgtable.h
@@ -73,7 +73,6 @@ extern void paging_init(void);
*/
#define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE)
-#define FIRST_USER_ADDRESS 0UL
/*
* Kernels own virtual memory area.
diff --git a/arch/openrisc/include/asm/tlbflush.h b/arch/openrisc/include/asm/tlbflush.h
index 185dcd3731ed..dbf030365ab4 100644
--- a/arch/openrisc/include/asm/tlbflush.h
+++ b/arch/openrisc/include/asm/tlbflush.h
@@ -25,7 +25,7 @@
* - flush_tlb_all() flushes all processes TLBs
* - flush_tlb_mm(mm) flushes the specified mm context TLB's
* - flush_tlb_page(vma, vmaddr) flushes one page
- * - flush_tlb_range(mm, start, end) flushes a range of pages
+ * - flush_tlb_range(vma, start, end) flushes a range of pages
*/
extern void local_flush_tlb_all(void);
extern void local_flush_tlb_mm(struct mm_struct *mm);
diff --git a/arch/openrisc/include/asm/unaligned.h b/arch/openrisc/include/asm/unaligned.h
deleted file mode 100644
index 14353f2101f2..000000000000
--- a/arch/openrisc/include/asm/unaligned.h
+++ /dev/null
@@ -1,47 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * OpenRISC Linux
- *
- * Linux architectural port borrowing liberally from similar works of
- * others. All original copyrights apply as per the original source
- * declaration.
- *
- * OpenRISC implementation:
- * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
- * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
- * et al.
- */
-
-#ifndef __ASM_OPENRISC_UNALIGNED_H
-#define __ASM_OPENRISC_UNALIGNED_H
-
-/*
- * This is copied from the generic implementation and the C-struct
- * variant replaced with the memmove variant. The GCC compiler
- * for the OR32 arch optimizes too aggressively for the C-struct
- * variant to work, so use the memmove variant instead.
- *
- * It may be worth considering implementing the unaligned access
- * exception handler and allowing unaligned accesses (access_ok.h)...
- * not sure if it would be much of a performance win without further
- * investigation.
- */
-#include <asm/byteorder.h>
-
-#if defined(__LITTLE_ENDIAN)
-# include <linux/unaligned/le_memmove.h>
-# include <linux/unaligned/be_byteshift.h>
-# include <linux/unaligned/generic.h>
-# define get_unaligned __get_unaligned_le
-# define put_unaligned __put_unaligned_le
-#elif defined(__BIG_ENDIAN)
-# include <linux/unaligned/be_memmove.h>
-# include <linux/unaligned/le_byteshift.h>
-# include <linux/unaligned/generic.h>
-# define get_unaligned __get_unaligned_be
-# define put_unaligned __put_unaligned_be
-#else
-# error need to define endianess
-#endif
-
-#endif /* __ASM_OPENRISC_UNALIGNED_H */
diff --git a/arch/openrisc/kernel/asm-offsets.c b/arch/openrisc/kernel/asm-offsets.c
index 18c703d1d761..710651d5aaae 100644
--- a/arch/openrisc/kernel/asm-offsets.c
+++ b/arch/openrisc/kernel/asm-offsets.c
@@ -37,7 +37,6 @@
int main(void)
{
/* offsets into the task_struct */
- DEFINE(TASK_STATE, offsetof(struct task_struct, state));
DEFINE(TASK_FLAGS, offsetof(struct task_struct, flags));
DEFINE(TASK_PTRACE, offsetof(struct task_struct, ptrace));
DEFINE(TASK_THREAD, offsetof(struct task_struct, thread));
diff --git a/arch/openrisc/kernel/setup.c b/arch/openrisc/kernel/setup.c
index 2416a9f91533..c6f9e7b9f7cb 100644
--- a/arch/openrisc/kernel/setup.c
+++ b/arch/openrisc/kernel/setup.c
@@ -278,6 +278,8 @@ void calibrate_delay(void)
pr_cont("%lu.%02lu BogoMIPS (lpj=%lu)\n",
loops_per_jiffy / (500000 / HZ),
(loops_per_jiffy / (5000 / HZ)) % 100, loops_per_jiffy);
+
+ of_node_put(cpu);
}
void __init setup_arch(char **cmdline_p)
diff --git a/arch/openrisc/kernel/smp.c b/arch/openrisc/kernel/smp.c
index 48e1092a64de..415e209732a3 100644
--- a/arch/openrisc/kernel/smp.c
+++ b/arch/openrisc/kernel/smp.c
@@ -145,8 +145,6 @@ asmlinkage __init void secondary_start_kernel(void)
set_cpu_online(cpu, true);
local_irq_enable();
-
- preempt_disable();
/*
* OK, it's off to the idle thread for us
*/
diff --git a/arch/openrisc/mm/init.c b/arch/openrisc/mm/init.c
index d5641198b90c..cfef61a7b6c2 100644
--- a/arch/openrisc/mm/init.c
+++ b/arch/openrisc/mm/init.c
@@ -75,7 +75,6 @@ static void __init map_ram(void)
/* These mark extents of read-only kernel pages...
* ...from vmlinux.lds.S
*/
- struct memblock_region *region;
v = PAGE_OFFSET;
@@ -121,7 +120,7 @@ static void __init map_ram(void)
}
printk(KERN_INFO "%s: Memory: 0x%x-0x%x\n", __func__,
- region->base, region->base + region->size);
+ start, end);
}
}
@@ -129,7 +128,6 @@ void __init paging_init(void)
{
extern void tlb_init(void);
- unsigned long end;
int i;
printk(KERN_INFO "Setting up paging and PTEs.\n");
@@ -145,8 +143,6 @@ void __init paging_init(void)
*/
current_pgd[smp_processor_id()] = init_mm.pgd;
- end = (unsigned long)__va(max_low_pfn * PAGE_SIZE);
-
map_ram();
zone_sizes_init();
diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
index 21b375c67e53..dd5a299ada69 100644
--- a/arch/parisc/include/asm/atomic.h
+++ b/arch/parisc/include/asm/atomic.h
@@ -56,7 +56,7 @@ extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
* are atomic, so a reader never sees inconsistent values.
*/
-static __inline__ void atomic_set(atomic_t *v, int i)
+static __inline__ void arch_atomic_set(atomic_t *v, int i)
{
unsigned long flags;
_atomic_spin_lock_irqsave(v, flags);
@@ -66,19 +66,19 @@ static __inline__ void atomic_set(atomic_t *v, int i)
_atomic_spin_unlock_irqrestore(v, flags);
}
-#define atomic_set_release(v, i) atomic_set((v), (i))
+#define arch_atomic_set_release(v, i) arch_atomic_set((v), (i))
-static __inline__ int atomic_read(const atomic_t *v)
+static __inline__ int arch_atomic_read(const atomic_t *v)
{
return READ_ONCE((v)->counter);
}
/* exported interface */
-#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
-#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
+#define arch_atomic_cmpxchg(v, o, n) (arch_cmpxchg(&((v)->counter), (o), (n)))
+#define arch_atomic_xchg(v, new) (arch_xchg(&((v)->counter), new))
#define ATOMIC_OP(op, c_op) \
-static __inline__ void atomic_##op(int i, atomic_t *v) \
+static __inline__ void arch_atomic_##op(int i, atomic_t *v) \
{ \
unsigned long flags; \
\
@@ -88,7 +88,7 @@ static __inline__ void atomic_##op(int i, atomic_t *v) \
}
#define ATOMIC_OP_RETURN(op, c_op) \
-static __inline__ int atomic_##op##_return(int i, atomic_t *v) \
+static __inline__ int arch_atomic_##op##_return(int i, atomic_t *v) \
{ \
unsigned long flags; \
int ret; \
@@ -101,7 +101,7 @@ static __inline__ int atomic_##op##_return(int i, atomic_t *v) \
}
#define ATOMIC_FETCH_OP(op, c_op) \
-static __inline__ int atomic_fetch_##op(int i, atomic_t *v) \
+static __inline__ int arch_atomic_fetch_##op(int i, atomic_t *v) \
{ \
unsigned long flags; \
int ret; \
@@ -141,7 +141,7 @@ ATOMIC_OPS(xor, ^=)
#define ATOMIC64_INIT(i) { (i) }
#define ATOMIC64_OP(op, c_op) \
-static __inline__ void atomic64_##op(s64 i, atomic64_t *v) \
+static __inline__ void arch_atomic64_##op(s64 i, atomic64_t *v) \
{ \
unsigned long flags; \
\
@@ -151,7 +151,7 @@ static __inline__ void atomic64_##op(s64 i, atomic64_t *v) \
}
#define ATOMIC64_OP_RETURN(op, c_op) \
-static __inline__ s64 atomic64_##op##_return(s64 i, atomic64_t *v) \
+static __inline__ s64 arch_atomic64_##op##_return(s64 i, atomic64_t *v) \
{ \
unsigned long flags; \
s64 ret; \
@@ -164,7 +164,7 @@ static __inline__ s64 atomic64_##op##_return(s64 i, atomic64_t *v) \
}
#define ATOMIC64_FETCH_OP(op, c_op) \
-static __inline__ s64 atomic64_fetch_##op(s64 i, atomic64_t *v) \
+static __inline__ s64 arch_atomic64_fetch_##op(s64 i, atomic64_t *v) \
{ \
unsigned long flags; \
s64 ret; \
@@ -200,7 +200,7 @@ ATOMIC64_OPS(xor, ^=)
#undef ATOMIC64_OP
static __inline__ void
-atomic64_set(atomic64_t *v, s64 i)
+arch_atomic64_set(atomic64_t *v, s64 i)
{
unsigned long flags;
_atomic_spin_lock_irqsave(v, flags);
@@ -210,18 +210,18 @@ atomic64_set(atomic64_t *v, s64 i)
_atomic_spin_unlock_irqrestore(v, flags);
}
-#define atomic64_set_release(v, i) atomic64_set((v), (i))
+#define arch_atomic64_set_release(v, i) arch_atomic64_set((v), (i))
static __inline__ s64
-atomic64_read(const atomic64_t *v)
+arch_atomic64_read(const atomic64_t *v)
{
return READ_ONCE((v)->counter);
}
/* exported interface */
-#define atomic64_cmpxchg(v, o, n) \
- ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
-#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
+#define arch_atomic64_cmpxchg(v, o, n) \
+ ((__typeof__((v)->counter))arch_cmpxchg(&((v)->counter), (o), (n)))
+#define arch_atomic64_xchg(v, new) (arch_xchg(&((v)->counter), new))
#endif /* !CONFIG_64BIT */
diff --git a/arch/parisc/include/asm/cmpxchg.h b/arch/parisc/include/asm/cmpxchg.h
index 84ee232278a6..5f274be10567 100644
--- a/arch/parisc/include/asm/cmpxchg.h
+++ b/arch/parisc/include/asm/cmpxchg.h
@@ -44,7 +44,7 @@ __xchg(unsigned long x, volatile void *ptr, int size)
** if (((unsigned long)p & 0xf) == 0)
** return __ldcw(p);
*/
-#define xchg(ptr, x) \
+#define arch_xchg(ptr, x) \
({ \
__typeof__(*(ptr)) __ret; \
__typeof__(*(ptr)) _x_ = (x); \
@@ -78,7 +78,7 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size)
return old;
}
-#define cmpxchg(ptr, o, n) \
+#define arch_cmpxchg(ptr, o, n) \
({ \
__typeof__(*(ptr)) _o_ = (o); \
__typeof__(*(ptr)) _n_ = (n); \
@@ -98,7 +98,7 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,
#endif
case 4: return __cmpxchg_u32(ptr, old, new_);
default:
- return __cmpxchg_local_generic(ptr, old, new_, size);
+ return __generic_cmpxchg_local(ptr, old, new_, size);
}
}
@@ -106,19 +106,19 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,
* cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
* them available.
*/
-#define cmpxchg_local(ptr, o, n) \
+#define arch_cmpxchg_local(ptr, o, n) \
((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \
(unsigned long)(n), sizeof(*(ptr))))
#ifdef CONFIG_64BIT
-#define cmpxchg64_local(ptr, o, n) \
+#define arch_cmpxchg64_local(ptr, o, n) \
({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
cmpxchg_local((ptr), (o), (n)); \
})
#else
-#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
+#define arch_cmpxchg64_local(ptr, o, n) __generic_cmpxchg64_local((ptr), (o), (n))
#endif
-#define cmpxchg64(ptr, o, n) __cmpxchg_u64(ptr, o, n)
+#define arch_cmpxchg64(ptr, o, n) __cmpxchg_u64(ptr, o, n)
#endif /* _ASM_PARISC_CMPXCHG_H_ */
diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h
index dda557085311..6a7e98e71f1d 100644
--- a/arch/parisc/include/asm/pgalloc.h
+++ b/arch/parisc/include/asm/pgalloc.h
@@ -69,6 +69,5 @@ pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
#define pmd_populate(mm, pmd, pte_page) \
pmd_populate_kernel(mm, pmd, page_address(pte_page))
-#define pmd_pgtable(pmd) pmd_page(pmd)
#endif
diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
index 39017210dbf0..7f33c29764cc 100644
--- a/arch/parisc/include/asm/pgtable.h
+++ b/arch/parisc/include/asm/pgtable.h
@@ -171,8 +171,6 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
* pgd entries used up by user/kernel:
*/
-#define FIRST_USER_ADDRESS 0UL
-
/* NB: The tlb miss handlers make certain assumptions about the order */
/* of the following bits, so be careful (One example, bits 25-31 */
/* are moved together in one instruction). */
diff --git a/arch/parisc/include/asm/unaligned.h b/arch/parisc/include/asm/unaligned.h
index e9029c7c2a69..3bda16773ba6 100644
--- a/arch/parisc/include/asm/unaligned.h
+++ b/arch/parisc/include/asm/unaligned.h
@@ -2,11 +2,7 @@
#ifndef _ASM_PARISC_UNALIGNED_H
#define _ASM_PARISC_UNALIGNED_H
-#include <linux/unaligned/be_struct.h>
-#include <linux/unaligned/le_byteshift.h>
-#include <linux/unaligned/generic.h>
-#define get_unaligned __get_unaligned_be
-#define put_unaligned __put_unaligned_be
+#include <asm-generic/unaligned.h>
#ifdef __KERNEL__
struct pt_regs;
diff --git a/arch/parisc/include/uapi/asm/mman.h b/arch/parisc/include/uapi/asm/mman.h
index ab78cba446ed..9e3c010c0f61 100644
--- a/arch/parisc/include/uapi/asm/mman.h
+++ b/arch/parisc/include/uapi/asm/mman.h
@@ -52,6 +52,9 @@
#define MADV_COLD 20 /* deactivate these pages */
#define MADV_PAGEOUT 21 /* reclaim these pages */
+#define MADV_POPULATE_READ 22 /* populate (prefault) page tables readable */
+#define MADV_POPULATE_WRITE 23 /* populate (prefault) page tables writable */
+
#define MADV_MERGEABLE 65 /* KSM may merge identical pages */
#define MADV_UNMERGEABLE 66 /* KSM may not merge identical pages */
diff --git a/arch/parisc/include/uapi/asm/socket.h b/arch/parisc/include/uapi/asm/socket.h
index f60904329bbc..5b5351cdcb33 100644
--- a/arch/parisc/include/uapi/asm/socket.h
+++ b/arch/parisc/include/uapi/asm/socket.h
@@ -119,6 +119,8 @@
#define SO_PREFER_BUSY_POLL 0x4043
#define SO_BUSY_POLL_BUDGET 0x4044
+#define SO_NETNS_COOKIE 0x4045
+
#if !defined(__KERNEL__)
#if __BITS_PER_LONG == 64
diff --git a/arch/parisc/kernel/asm-offsets.c b/arch/parisc/kernel/asm-offsets.c
index cd2cc1b1648c..33113ba24054 100644
--- a/arch/parisc/kernel/asm-offsets.c
+++ b/arch/parisc/kernel/asm-offsets.c
@@ -42,7 +42,6 @@
int main(void)
{
DEFINE(TASK_THREAD_INFO, offsetof(struct task_struct, stack));
- DEFINE(TASK_STATE, offsetof(struct task_struct, state));
DEFINE(TASK_FLAGS, offsetof(struct task_struct, flags));
DEFINE(TASK_SIGPENDING, offsetof(struct task_struct, pending));
DEFINE(TASK_PTRACE, offsetof(struct task_struct, ptrace));
diff --git a/arch/parisc/kernel/pdc_chassis.c b/arch/parisc/kernel/pdc_chassis.c
index 75ae88d13909..da154406d368 100644
--- a/arch/parisc/kernel/pdc_chassis.c
+++ b/arch/parisc/kernel/pdc_chassis.c
@@ -20,6 +20,7 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/kernel.h>
+#include <linux/panic_notifier.h>
#include <linux/reboot.h>
#include <linux/notifier.h>
#include <linux/cache.h>
diff --git a/arch/parisc/kernel/pdc_cons.c b/arch/parisc/kernel/pdc_cons.c
index 7ed404c60a9e..39ccad063533 100644
--- a/arch/parisc/kernel/pdc_cons.c
+++ b/arch/parisc/kernel/pdc_cons.c
@@ -103,22 +103,16 @@ static int pdc_console_tty_write(struct tty_struct *tty, const unsigned char *bu
return count;
}
-static int pdc_console_tty_write_room(struct tty_struct *tty)
+static unsigned int pdc_console_tty_write_room(struct tty_struct *tty)
{
return 32768; /* no limit, no buffer used */
}
-static int pdc_console_tty_chars_in_buffer(struct tty_struct *tty)
-{
- return 0; /* no buffer */
-}
-
static const struct tty_operations pdc_console_tty_ops = {
.open = pdc_console_tty_open,
.close = pdc_console_tty_close,
.write = pdc_console_tty_write,
.write_room = pdc_console_tty_write_room,
- .chars_in_buffer = pdc_console_tty_chars_in_buffer,
};
static void pdc_console_poll(struct timer_list *unused)
diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c
index b144fbe29bc1..184ec3c1eae4 100644
--- a/arch/parisc/kernel/process.c
+++ b/arch/parisc/kernel/process.c
@@ -249,7 +249,7 @@ get_wchan(struct task_struct *p)
unsigned long ip;
int count = 0;
- if (!p || p == current || p->state == TASK_RUNNING)
+ if (!p || p == current || task_is_running(p))
return 0;
/*
@@ -260,7 +260,7 @@ get_wchan(struct task_struct *p)
do {
if (unwind_once(&info) < 0)
return 0;
- if (p->state == TASK_RUNNING)
+ if (task_is_running(p))
return 0;
ip = info.ip;
if (!in_sched_functions(ip))
diff --git a/arch/parisc/kernel/smp.c b/arch/parisc/kernel/smp.c
index 10227f667c8a..1405b603b91b 100644
--- a/arch/parisc/kernel/smp.c
+++ b/arch/parisc/kernel/smp.c
@@ -302,7 +302,6 @@ void __init smp_callin(unsigned long pdce_proc)
#endif
smp_cpu_init(slave_id);
- preempt_disable();
flush_cache_all_local(); /* start with known state */
flush_tlb_all_local(NULL);
diff --git a/arch/parisc/kernel/syscalls/syscall.tbl b/arch/parisc/kernel/syscalls/syscall.tbl
index 5ac80b83d745..e26187b9ab87 100644
--- a/arch/parisc/kernel/syscalls/syscall.tbl
+++ b/arch/parisc/kernel/syscalls/syscall.tbl
@@ -440,7 +440,7 @@
440 common process_madvise sys_process_madvise
441 common epoll_pwait2 sys_epoll_pwait2 compat_sys_epoll_pwait2
442 common mount_setattr sys_mount_setattr
-443 common quotactl_path sys_quotactl_path
+443 common quotactl_fd sys_quotactl_fd
444 common landlock_create_ruleset sys_landlock_create_ruleset
445 common landlock_add_rule sys_landlock_add_rule
446 common landlock_restrict_self sys_landlock_restrict_self
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 088dd2afcfe4..d01e3401581d 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -140,7 +140,9 @@ config PPC
select ARCH_HAS_PTE_DEVMAP if PPC_BOOK3S_64
select ARCH_HAS_PTE_SPECIAL
select ARCH_HAS_SCALED_CPUTIME if VIRT_CPU_ACCOUNTING_NATIVE && PPC_BOOK3S_64
+ select ARCH_HAS_SET_MEMORY
select ARCH_HAS_STRICT_KERNEL_RWX if ((PPC_BOOK3S_64 || PPC32) && !HIBERNATION)
+ select ARCH_HAS_STRICT_MODULE_RWX if ARCH_HAS_STRICT_KERNEL_RWX && !PPC_BOOK3S_32
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
select ARCH_HAS_UACCESS_FLUSHCACHE
select ARCH_HAS_UBSAN_SANITIZE_ALL
@@ -187,7 +189,7 @@ config PPC
select GENERIC_VDSO_TIME_NS
select HAVE_ARCH_AUDITSYSCALL
select HAVE_ARCH_HUGE_VMALLOC if HAVE_ARCH_HUGE_VMAP
- select HAVE_ARCH_HUGE_VMAP if PPC_BOOK3S_64 && PPC_RADIX_MMU
+ select HAVE_ARCH_HUGE_VMAP if PPC_RADIX_MMU || PPC_8xx
select HAVE_ARCH_JUMP_LABEL
select HAVE_ARCH_JUMP_LABEL_RELATIVE
select HAVE_ARCH_KASAN if PPC32 && PPC_PAGE_SHIFT <= 14
@@ -266,6 +268,7 @@ config PPC
select PPC_DAWR if PPC64
select RTC_LIB
select SPARSE_IRQ
+ select STRICT_KERNEL_RWX if STRICT_MODULE_RWX
select SYSCTL_EXCEPTION_TRACE
select THREAD_INFO_IN_TASK
select VIRT_TO_BUS if !PPC64
@@ -289,6 +292,7 @@ config PANIC_TIMEOUT
config COMPAT
bool "Enable support for 32bit binaries"
depends on PPC64
+ depends on !CC_IS_CLANG || CLANG_VERSION >= 120000
default y if !CPU_LITTLE_ENDIAN
select ARCH_WANT_OLD_COMPAT_IPC
select COMPAT_OLD_SIGACTION
@@ -403,10 +407,6 @@ config PPC_ADV_DEBUG_DAC_RANGE
config PPC_DAWR
bool
-config ZONE_DMA
- bool
- default y if PPC_BOOK3E_64
-
config PGTABLE_LEVELS
int
default 2 if !PPC64
@@ -426,7 +426,7 @@ source "kernel/Kconfig.hz"
config MATH_EMULATION
bool "Math emulation"
- depends on 4xx || PPC_8xx || PPC_MPC832x || BOOKE
+ depends on 4xx || PPC_8xx || PPC_MPC832x || BOOKE || PPC_MICROWATT
select PPC_FPU_REGS
help
Some PowerPC chips designed for embedded applications do not have
@@ -671,7 +671,7 @@ config NODES_SHIFT
int
default "8" if PPC64
default "4"
- depends on NEED_MULTIPLE_NODES
+ depends on NUMA
config USE_PERCPU_NUMA_NODE_ID
def_bool y
diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug
index 6342f9da4545..205cd77f321f 100644
--- a/arch/powerpc/Kconfig.debug
+++ b/arch/powerpc/Kconfig.debug
@@ -84,6 +84,11 @@ config MSI_BITMAP_SELFTEST
config PPC_IRQ_SOFT_MASK_DEBUG
bool "Include extra checks for powerpc irq soft masking"
+ depends on PPC64
+
+config PPC_RFI_SRR_DEBUG
+ bool "Include extra checks for RFI SRR register validity"
+ depends on PPC_BOOK3S_64
config XMON
bool "Include xmon kernel debugger"
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
index 3212d076ac6a..712c5e8768ce 100644
--- a/arch/powerpc/Makefile
+++ b/arch/powerpc/Makefile
@@ -376,6 +376,16 @@ ppc64_book3e_allmodconfig:
$(Q)$(MAKE) KCONFIG_ALLCONFIG=$(srctree)/arch/powerpc/configs/85xx-64bit.config \
-f $(srctree)/Makefile allmodconfig
+PHONY += ppc32_randconfig
+ppc32_randconfig:
+ $(Q)$(MAKE) KCONFIG_ALLCONFIG=$(srctree)/arch/powerpc/configs/32-bit.config \
+ -f $(srctree)/Makefile randconfig
+
+PHONY += ppc64_randconfig
+ppc64_randconfig:
+ $(Q)$(MAKE) KCONFIG_ALLCONFIG=$(srctree)/arch/powerpc/configs/64-bit.config \
+ -f $(srctree)/Makefile randconfig
+
define archhelp
@echo '* zImage - Build default images selected by kernel config'
@echo ' zImage.* - Compressed kernel image (arch/$(ARCH)/boot/zImage.*)'
diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile
index 2b8da923ceca..e312ea802aa6 100644
--- a/arch/powerpc/boot/Makefile
+++ b/arch/powerpc/boot/Makefile
@@ -163,6 +163,8 @@ src-plat-$(CONFIG_PPC_POWERNV) += pseries-head.S
src-plat-$(CONFIG_PPC_IBM_CELL_BLADE) += pseries-head.S
src-plat-$(CONFIG_MVME7100) += motload-head.S mvme7100.c
+src-plat-$(CONFIG_PPC_MICROWATT) += fixed-head.S microwatt.c
+
src-wlib := $(sort $(src-wlib-y))
src-plat := $(sort $(src-plat-y))
src-boot := $(src-wlib) $(src-plat) empty.c
@@ -227,7 +229,7 @@ $(obj)/wrapper.a: $(obj-wlib) FORCE
hostprogs := addnote hack-coff mktree
-targets += $(patsubst $(obj)/%,%,$(obj-boot) wrapper.a)
+targets += $(patsubst $(obj)/%,%,$(obj-boot) wrapper.a) zImage.lds
extra-y := $(obj)/wrapper.a $(obj-plat) $(obj)/empty.o \
$(obj)/zImage.lds $(obj)/zImage.coff.lds $(obj)/zImage.ps3.lds
@@ -355,6 +357,8 @@ image-$(CONFIG_MVME5100) += dtbImage.mvme5100
# Board port in arch/powerpc/platform/amigaone/Kconfig
image-$(CONFIG_AMIGAONE) += cuImage.amigaone
+image-$(CONFIG_PPC_MICROWATT) += dtbImage.microwatt
+
# For 32-bit powermacs, build the COFF and miboot images
# as well as the ELF images.
ifdef CONFIG_PPC32
diff --git a/arch/powerpc/boot/decompress.c b/arch/powerpc/boot/decompress.c
index 6098b879ac97..977eb15a6d17 100644
--- a/arch/powerpc/boot/decompress.c
+++ b/arch/powerpc/boot/decompress.c
@@ -99,8 +99,8 @@ static void print_err(char *s)
* partial_decompress - decompresses part or all of a compressed buffer
* @inbuf: input buffer
* @input_size: length of the input buffer
- * @outbuf: input buffer
- * @output_size: length of the input buffer
+ * @outbuf: output buffer
+ * @output_size: length of the output buffer
* @skip number of output bytes to ignore
*
* This function takes compressed data from inbuf, decompresses and write it to
diff --git a/arch/powerpc/boot/devtree.c b/arch/powerpc/boot/devtree.c
index 5d91036ad626..58fbcfcc98c9 100644
--- a/arch/powerpc/boot/devtree.c
+++ b/arch/powerpc/boot/devtree.c
@@ -13,6 +13,7 @@
#include "string.h"
#include "stdio.h"
#include "ops.h"
+#include "of.h"
void dt_fixup_memory(u64 start, u64 size)
{
@@ -23,21 +24,25 @@ void dt_fixup_memory(u64 start, u64 size)
root = finddevice("/");
if (getprop(root, "#address-cells", &naddr, sizeof(naddr)) < 0)
naddr = 2;
+ else
+ naddr = be32_to_cpu(naddr);
if (naddr < 1 || naddr > 2)
fatal("Can't cope with #address-cells == %d in /\n\r", naddr);
if (getprop(root, "#size-cells", &nsize, sizeof(nsize)) < 0)
nsize = 1;
+ else
+ nsize = be32_to_cpu(nsize);
if (nsize < 1 || nsize > 2)
fatal("Can't cope with #size-cells == %d in /\n\r", nsize);
i = 0;
if (naddr == 2)
- memreg[i++] = start >> 32;
- memreg[i++] = start & 0xffffffff;
+ memreg[i++] = cpu_to_be32(start >> 32);
+ memreg[i++] = cpu_to_be32(start & 0xffffffff);
if (nsize == 2)
- memreg[i++] = size >> 32;
- memreg[i++] = size & 0xffffffff;
+ memreg[i++] = cpu_to_be32(size >> 32);
+ memreg[i++] = cpu_to_be32(size & 0xffffffff);
memory = finddevice("/memory");
if (! memory) {
@@ -45,9 +50,9 @@ void dt_fixup_memory(u64 start, u64 size)
setprop_str(memory, "device_type", "memory");
}
- printf("Memory <- <0x%x", memreg[0]);
+ printf("Memory <- <0x%x", be32_to_cpu(memreg[0]));
for (i = 1; i < (naddr + nsize); i++)
- printf(" 0x%x", memreg[i]);
+ printf(" 0x%x", be32_to_cpu(memreg[i]));
printf("> (%ldMB)\n\r", (unsigned long)(size >> 20));
setprop(memory, "reg", memreg, (naddr + nsize)*sizeof(u32));
@@ -65,10 +70,10 @@ void dt_fixup_cpu_clocks(u32 cpu, u32 tb, u32 bus)
printf("CPU bus-frequency <- 0x%x (%dMHz)\n\r", bus, MHZ(bus));
while ((devp = find_node_by_devtype(devp, "cpu"))) {
- setprop_val(devp, "clock-frequency", cpu);
- setprop_val(devp, "timebase-frequency", tb);
+ setprop_val(devp, "clock-frequency", cpu_to_be32(cpu));
+ setprop_val(devp, "timebase-frequency", cpu_to_be32(tb));
if (bus > 0)
- setprop_val(devp, "bus-frequency", bus);
+ setprop_val(devp, "bus-frequency", cpu_to_be32(bus));
}
timebase_period_ns = 1000000000 / tb;
@@ -80,7 +85,7 @@ void dt_fixup_clock(const char *path, u32 freq)
if (devp) {
printf("%s: clock-frequency <- %x (%dMHz)\n\r", path, freq, MHZ(freq));
- setprop_val(devp, "clock-frequency", freq);
+ setprop_val(devp, "clock-frequency", cpu_to_be32(freq));
}
}
@@ -133,8 +138,12 @@ void dt_get_reg_format(void *node, u32 *naddr, u32 *nsize)
{
if (getprop(node, "#address-cells", naddr, 4) != 4)
*naddr = 2;
+ else
+ *naddr = be32_to_cpu(*naddr);
if (getprop(node, "#size-cells", nsize, 4) != 4)
*nsize = 1;
+ else
+ *nsize = be32_to_cpu(*nsize);
}
static void copy_val(u32 *dest, u32 *src, int naddr)
@@ -163,9 +172,9 @@ static int add_reg(u32 *reg, u32 *add, int naddr)
int i, carry = 0;
for (i = MAX_ADDR_CELLS - 1; i >= MAX_ADDR_CELLS - naddr; i--) {
- u64 tmp = (u64)reg[i] + add[i] + carry;
+ u64 tmp = (u64)be32_to_cpu(reg[i]) + be32_to_cpu(add[i]) + carry;
carry = tmp >> 32;
- reg[i] = (u32)tmp;
+ reg[i] = cpu_to_be32((u32)tmp);
}
return !carry;
@@ -180,18 +189,18 @@ static int compare_reg(u32 *reg, u32 *range, u32 *rangesize)
u32 end;
for (i = 0; i < MAX_ADDR_CELLS; i++) {
- if (reg[i] < range[i])
+ if (be32_to_cpu(reg[i]) < be32_to_cpu(range[i]))
return 0;
- if (reg[i] > range[i])
+ if (be32_to_cpu(reg[i]) > be32_to_cpu(range[i]))
break;
}
for (i = 0; i < MAX_ADDR_CELLS; i++) {
- end = range[i] + rangesize[i];
+ end = be32_to_cpu(range[i]) + be32_to_cpu(rangesize[i]);
- if (reg[i] < end)
+ if (be32_to_cpu(reg[i]) < end)
break;
- if (reg[i] > end)
+ if (be32_to_cpu(reg[i]) > end)
return 0;
}
@@ -240,7 +249,6 @@ static int dt_xlate(void *node, int res, int reglen, unsigned long *addr,
return 0;
dt_get_reg_format(parent, &naddr, &nsize);
-
if (nsize > 2)
return 0;
@@ -252,10 +260,10 @@ static int dt_xlate(void *node, int res, int reglen, unsigned long *addr,
copy_val(last_addr, prop_buf + offset, naddr);
- ret_size = prop_buf[offset + naddr];
+ ret_size = be32_to_cpu(prop_buf[offset + naddr]);
if (nsize == 2) {
ret_size <<= 32;
- ret_size |= prop_buf[offset + naddr + 1];
+ ret_size |= be32_to_cpu(prop_buf[offset + naddr + 1]);
}
for (;;) {
@@ -278,7 +286,6 @@ static int dt_xlate(void *node, int res, int reglen, unsigned long *addr,
offset = find_range(last_addr, prop_buf, prev_naddr,
naddr, prev_nsize, buflen / 4);
-
if (offset < 0)
return 0;
@@ -296,8 +303,7 @@ static int dt_xlate(void *node, int res, int reglen, unsigned long *addr,
if (naddr > 2)
return 0;
- ret_addr = ((u64)last_addr[2] << 32) | last_addr[3];
-
+ ret_addr = ((u64)be32_to_cpu(last_addr[2]) << 32) | be32_to_cpu(last_addr[3]);
if (sizeof(void *) == 4 &&
(ret_addr >= 0x100000000ULL || ret_size > 0x100000000ULL ||
ret_addr + ret_size > 0x100000000ULL))
@@ -350,11 +356,14 @@ int dt_is_compatible(void *node, const char *compat)
int dt_get_virtual_reg(void *node, void **addr, int nres)
{
unsigned long xaddr;
- int n;
+ int n, i;
n = getprop(node, "virtual-reg", addr, nres * 4);
- if (n > 0)
+ if (n > 0) {
+ for (i = 0; i < n/4; i ++)
+ ((u32 *)addr)[i] = be32_to_cpu(((u32 *)addr)[i]);
return n / 4;
+ }
for (n = 0; n < nres; n++) {
if (!dt_xlate_reg(node, n, &xaddr, NULL))
diff --git a/arch/powerpc/boot/dts/fsl/p1010si-post.dtsi b/arch/powerpc/boot/dts/fsl/p1010si-post.dtsi
index c2717f31925a..ccda0a91abf0 100644
--- a/arch/powerpc/boot/dts/fsl/p1010si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/p1010si-post.dtsi
@@ -122,7 +122,15 @@
};
/include/ "pq3-i2c-0.dtsi"
+ i2c@3000 {
+ fsl,i2c-erratum-a004447;
+ };
+
/include/ "pq3-i2c-1.dtsi"
+ i2c@3100 {
+ fsl,i2c-erratum-a004447;
+ };
+
/include/ "pq3-duart-0.dtsi"
/include/ "pq3-espi-0.dtsi"
spi0: spi@7000 {
diff --git a/arch/powerpc/boot/dts/fsl/p2041si-post.dtsi b/arch/powerpc/boot/dts/fsl/p2041si-post.dtsi
index 872e4485dc3f..ddc018d42252 100644
--- a/arch/powerpc/boot/dts/fsl/p2041si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/p2041si-post.dtsi
@@ -371,7 +371,23 @@
};
/include/ "qoriq-i2c-0.dtsi"
+ i2c@118000 {
+ fsl,i2c-erratum-a004447;
+ };
+
+ i2c@118100 {
+ fsl,i2c-erratum-a004447;
+ };
+
/include/ "qoriq-i2c-1.dtsi"
+ i2c@119000 {
+ fsl,i2c-erratum-a004447;
+ };
+
+ i2c@119100 {
+ fsl,i2c-erratum-a004447;
+ };
+
/include/ "qoriq-duart-0.dtsi"
/include/ "qoriq-duart-1.dtsi"
/include/ "qoriq-gpio-0.dtsi"
diff --git a/arch/powerpc/boot/dts/microwatt.dts b/arch/powerpc/boot/dts/microwatt.dts
new file mode 100644
index 000000000000..974abbdda249
--- /dev/null
+++ b/arch/powerpc/boot/dts/microwatt.dts
@@ -0,0 +1,138 @@
+/dts-v1/;
+
+/ {
+ #size-cells = <0x02>;
+ #address-cells = <0x02>;
+ model-name = "microwatt";
+ compatible = "microwatt-soc";
+
+ aliases {
+ serial0 = &UART0;
+ };
+
+ reserved-memory {
+ #size-cells = <0x02>;
+ #address-cells = <0x02>;
+ ranges;
+ };
+
+ memory@0 {
+ device_type = "memory";
+ reg = <0x00000000 0x00000000 0x00000000 0x10000000>;
+ };
+
+ cpus {
+ #size-cells = <0x00>;
+ #address-cells = <0x01>;
+
+ ibm,powerpc-cpu-features {
+ display-name = "Microwatt";
+ isa = <3000>;
+ device_type = "cpu-features";
+ compatible = "ibm,powerpc-cpu-features";
+
+ mmu-radix {
+ isa = <3000>;
+ usable-privilege = <2>;
+ };
+
+ little-endian {
+ isa = <2050>;
+ usable-privilege = <3>;
+ hwcap-bit-nr = <1>;
+ };
+
+ cache-inhibited-large-page {
+ isa = <2040>;
+ usable-privilege = <2>;
+ };
+
+ fixed-point-v3 {
+ isa = <3000>;
+ usable-privilege = <3>;
+ };
+
+ no-execute {
+ isa = <2010>;
+ usable-privilege = <2>;
+ };
+
+ floating-point {
+ hwcap-bit-nr = <27>;
+ isa = <0>;
+ usable-privilege = <3>;
+ };
+ };
+
+ PowerPC,Microwatt@0 {
+ i-cache-sets = <2>;
+ ibm,dec-bits = <64>;
+ reservation-granule-size = <64>;
+ clock-frequency = <100000000>;
+ timebase-frequency = <100000000>;
+ i-tlb-sets = <1>;
+ ibm,ppc-interrupt-server#s = <0>;
+ i-cache-block-size = <64>;
+ d-cache-block-size = <64>;
+ d-cache-sets = <2>;
+ i-tlb-size = <64>;
+ cpu-version = <0x990000>;
+ status = "okay";
+ i-cache-size = <0x1000>;
+ ibm,processor-radix-AP-encodings = <0x0c 0xa0000010 0x20000015 0x4000001e>;
+ tlb-size = <0>;
+ tlb-sets = <0>;
+ device_type = "cpu";
+ d-tlb-size = <128>;
+ d-tlb-sets = <2>;
+ reg = <0>;
+ general-purpose;
+ 64-bit;
+ d-cache-size = <0x1000>;
+ ibm,chip-id = <0>;
+ };
+ };
+
+ soc@c0000000 {
+ compatible = "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ interrupt-parent = <&ICS>;
+
+ ranges = <0 0 0xc0000000 0x40000000>;
+
+ interrupt-controller@4000 {
+ compatible = "openpower,xics-presentation", "ibm,ppc-xicp";
+ ibm,interrupt-server-ranges = <0x0 0x1>;
+ reg = <0x4000 0x100>;
+ };
+
+ ICS: interrupt-controller@5000 {
+ compatible = "openpower,xics-sources";
+ interrupt-controller;
+ interrupt-ranges = <0x10 0x10>;
+ reg = <0x5000 0x100>;
+ #address-cells = <0>;
+ #size-cells = <0>;
+ #interrupt-cells = <2>;
+ };
+
+ UART0: serial@2000 {
+ device_type = "serial";
+ compatible = "ns16550";
+ reg = <0x2000 0x8>;
+ clock-frequency = <100000000>;
+ current-speed = <115200>;
+ reg-shift = <2>;
+ fifo-size = <16>;
+ interrupts = <0x10 0x1>;
+ };
+ };
+
+ chosen {
+ bootargs = "";
+ ibm,architecture-vec-5 = [19 00 10 00 00 00 00 00 00 00 00 00 00 00 00 00
+ 00 00 00 00 00 00 00 00 40 00 40];
+ stdout-path = &UART0;
+ };
+};
diff --git a/arch/powerpc/boot/microwatt.c b/arch/powerpc/boot/microwatt.c
new file mode 100644
index 000000000000..ca9d83617fc1
--- /dev/null
+++ b/arch/powerpc/boot/microwatt.c
@@ -0,0 +1,24 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <stddef.h>
+#include "stdio.h"
+#include "types.h"
+#include "io.h"
+#include "ops.h"
+
+BSS_STACK(8192);
+
+void platform_init(unsigned long r3, unsigned long r4, unsigned long r5)
+{
+ unsigned long heapsize = 16*1024*1024 - (unsigned long)_end;
+
+ /*
+ * Disable interrupts and turn off MSR_RI, since we'll
+ * shortly be overwriting the interrupt vectors.
+ */
+ __asm__ volatile("mtmsrd %0,1" : : "r" (0));
+
+ simple_alloc_init(_end, heapsize, 32, 64);
+ fdt_init(_dtb_start);
+ serial_console_init();
+}
diff --git a/arch/powerpc/boot/ns16550.c b/arch/powerpc/boot/ns16550.c
index b0da4466d419..f16d2be1d0f3 100644
--- a/arch/powerpc/boot/ns16550.c
+++ b/arch/powerpc/boot/ns16550.c
@@ -15,6 +15,7 @@
#include "stdio.h"
#include "io.h"
#include "ops.h"
+#include "of.h"
#define UART_DLL 0 /* Out: Divisor Latch Low */
#define UART_DLM 1 /* Out: Divisor Latch High */
@@ -58,16 +59,20 @@ int ns16550_console_init(void *devp, struct serial_console_data *scdp)
int n;
u32 reg_offset;
- if (dt_get_virtual_reg(devp, (void **)&reg_base, 1) < 1)
+ if (dt_get_virtual_reg(devp, (void **)&reg_base, 1) < 1) {
+ printf("virt reg parse fail...\r\n");
return -1;
+ }
n = getprop(devp, "reg-offset", &reg_offset, sizeof(reg_offset));
if (n == sizeof(reg_offset))
- reg_base += reg_offset;
+ reg_base += be32_to_cpu(reg_offset);
n = getprop(devp, "reg-shift", &reg_shift, sizeof(reg_shift));
if (n != sizeof(reg_shift))
reg_shift = 0;
+ else
+ reg_shift = be32_to_cpu(reg_shift);
scdp->open = ns16550_open;
scdp->putc = ns16550_putc;
diff --git a/arch/powerpc/boot/wrapper b/arch/powerpc/boot/wrapper
index cdb796b76e2e..1f4676bab786 100755
--- a/arch/powerpc/boot/wrapper
+++ b/arch/powerpc/boot/wrapper
@@ -342,6 +342,11 @@ gamecube|wii)
link_address='0x600000'
platformo="$object/$platform-head.o $object/$platform.o"
;;
+microwatt)
+ link_address='0x500000'
+ platformo="$object/fixed-head.o $object/$platform.o"
+ binary=y
+ ;;
treeboot-currituck)
link_address='0x1000000'
;;
diff --git a/arch/powerpc/boot/zImage.ps3.lds.S b/arch/powerpc/boot/zImage.ps3.lds.S
index 7b2ff2eaa73a..d0ffb493614d 100644
--- a/arch/powerpc/boot/zImage.ps3.lds.S
+++ b/arch/powerpc/boot/zImage.ps3.lds.S
@@ -8,7 +8,7 @@ SECTIONS
.kernel:vmlinux.bin : { *(.kernel:vmlinux.bin) }
_vmlinux_end = .;
- . = ALIGN(4096);
+ . = ALIGN(8);
_dtb_start = .;
.kernel:dtb : { *(.kernel:dtb) }
_dtb_end = .;
diff --git a/arch/powerpc/configs/32-bit.config b/arch/powerpc/configs/32-bit.config
new file mode 100644
index 000000000000..ad6546850c68
--- /dev/null
+++ b/arch/powerpc/configs/32-bit.config
@@ -0,0 +1 @@
+# CONFIG_PPC64 is not set
diff --git a/arch/powerpc/configs/64-bit.config b/arch/powerpc/configs/64-bit.config
new file mode 100644
index 000000000000..0fe6406929e2
--- /dev/null
+++ b/arch/powerpc/configs/64-bit.config
@@ -0,0 +1 @@
+CONFIG_PPC64=y
diff --git a/arch/powerpc/configs/microwatt_defconfig b/arch/powerpc/configs/microwatt_defconfig
new file mode 100644
index 000000000000..a08b739123da
--- /dev/null
+++ b/arch/powerpc/configs/microwatt_defconfig
@@ -0,0 +1,98 @@
+# CONFIG_SWAP is not set
+# CONFIG_CROSS_MEMORY_ATTACH is not set
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_PREEMPT_VOLUNTARY=y
+CONFIG_TICK_CPU_ACCOUNTING=y
+CONFIG_LOG_BUF_SHIFT=16
+CONFIG_PRINTK_SAFE_LOG_BUF_SHIFT=12
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_KALLSYMS_ALL=y
+CONFIG_EMBEDDED=y
+# CONFIG_VM_EVENT_COUNTERS is not set
+# CONFIG_SLUB_DEBUG is not set
+# CONFIG_COMPAT_BRK is not set
+# CONFIG_SLAB_MERGE_DEFAULT is not set
+CONFIG_PPC64=y
+# CONFIG_PPC_KUEP is not set
+# CONFIG_PPC_KUAP is not set
+CONFIG_CPU_LITTLE_ENDIAN=y
+CONFIG_NR_IRQS=64
+CONFIG_PANIC_TIMEOUT=10
+# CONFIG_PPC_POWERNV is not set
+# CONFIG_PPC_PSERIES is not set
+CONFIG_PPC_MICROWATT=y
+# CONFIG_PPC_OF_BOOT_TRAMPOLINE is not set
+CONFIG_CPU_FREQ=y
+CONFIG_HZ_100=y
+# CONFIG_PPC_MEM_KEYS is not set
+# CONFIG_SECCOMP is not set
+# CONFIG_MQ_IOSCHED_KYBER is not set
+# CONFIG_COREDUMP is not set
+# CONFIG_COMPACTION is not set
+# CONFIG_MIGRATION is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_PACKET_DIAG=y
+CONFIG_UNIX=y
+CONFIG_UNIX_DIAG=y
+CONFIG_INET=y
+CONFIG_INET_UDP_DIAG=y
+CONFIG_INET_RAW_DIAG=y
+# CONFIG_WIRELESS is not set
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+# CONFIG_STANDALONE is not set
+# CONFIG_PREVENT_FIRMWARE_BUILD is not set
+# CONFIG_FW_LOADER is not set
+# CONFIG_ALLOW_DEV_COREDUMP is not set
+CONFIG_MTD=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_PARTITIONED_MASTER=y
+CONFIG_MTD_SPI_NOR=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_NETDEVICES=y
+# CONFIG_WLAN is not set
+# CONFIG_INPUT is not set
+# CONFIG_SERIO is not set
+# CONFIG_VT is not set
+CONFIG_SERIAL_8250=y
+# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_OF_PLATFORM=y
+CONFIG_SERIAL_NONSTANDARD=y
+# CONFIG_NVRAM is not set
+CONFIG_RANDOM_TRUST_CPU=y
+CONFIG_SPI=y
+CONFIG_SPI_DEBUG=y
+CONFIG_SPI_BITBANG=y
+CONFIG_SPI_SPIDEV=y
+# CONFIG_HWMON is not set
+# CONFIG_USB_SUPPORT is not set
+# CONFIG_VIRTIO_MENU is not set
+# CONFIG_IOMMU_SUPPORT is not set
+# CONFIG_NVMEM is not set
+CONFIG_EXT4_FS=y
+# CONFIG_FILE_LOCKING is not set
+# CONFIG_DNOTIFY is not set
+# CONFIG_INOTIFY_USER is not set
+# CONFIG_MISC_FILESYSTEMS is not set
+# CONFIG_CRYPTO_HW is not set
+# CONFIG_XZ_DEC_X86 is not set
+# CONFIG_XZ_DEC_IA64 is not set
+# CONFIG_XZ_DEC_ARM is not set
+# CONFIG_XZ_DEC_ARMTHUMB is not set
+# CONFIG_XZ_DEC_SPARC is not set
+CONFIG_PRINTK_TIME=y
+# CONFIG_SYMBOLIC_ERRNAME is not set
+# CONFIG_DEBUG_BUGVERBOSE is not set
+# CONFIG_DEBUG_MISC is not set
+# CONFIG_SCHED_DEBUG is not set
+# CONFIG_FTRACE is not set
+# CONFIG_STRICT_DEVMEM is not set
+CONFIG_PPC_DISABLE_WERROR=y
+CONFIG_XMON=y
+CONFIG_XMON_DEFAULT=y
+# CONFIG_XMON_DEFAULT_RO_MODE is not set
+# CONFIG_RUNTIME_TESTING_MENU is not set
diff --git a/arch/powerpc/configs/mpc885_ads_defconfig b/arch/powerpc/configs/mpc885_ads_defconfig
index 949ff9ccda5e..d21f266cea9a 100644
--- a/arch/powerpc/configs/mpc885_ads_defconfig
+++ b/arch/powerpc/configs/mpc885_ads_defconfig
@@ -57,3 +57,28 @@ CONFIG_CRC32_SLICEBY4=y
CONFIG_DEBUG_INFO=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DETECT_HUNG_TASK=y
+CONFIG_PPC_16K_PAGES=y
+CONFIG_DEBUG_KERNEL=y
+CONFIG_DEBUG_FS=y
+CONFIG_PPC_PTDUMP=y
+CONFIG_MODULES=y
+CONFIG_SPI=y
+CONFIG_SPI_FSL_SPI=y
+CONFIG_CRYPTO=y
+CONFIG_CRYPTO_DEV_TALITOS=y
+CONFIG_8xx_GPIO=y
+CONFIG_WATCHDOG=y
+CONFIG_8xxx_WDT=y
+CONFIG_SMC_UCODE_PATCH=y
+CONFIG_ADVANCED_OPTIONS=y
+CONFIG_PIN_TLB=y
+CONFIG_PERF_EVENTS=y
+CONFIG_MATH_EMULATION=y
+CONFIG_VIRT_CPU_ACCOUNTING_NATIVE=y
+CONFIG_STRICT_KERNEL_RWX=y
+CONFIG_IPV6=y
+CONFIG_BPF_JIT=y
+CONFIG_DEBUG_VM_PGTABLE=y
+CONFIG_BDI_SWITCH=y
+CONFIG_PPC_EARLY_DEBUG=y
+CONFIG_PPC_EARLY_DEBUG_CPM_ADDR=0xff002008
diff --git a/arch/powerpc/configs/powernv_defconfig b/arch/powerpc/configs/powernv_defconfig
index 2c87e856d839..8bfeea6c7de7 100644
--- a/arch/powerpc/configs/powernv_defconfig
+++ b/arch/powerpc/configs/powernv_defconfig
@@ -309,6 +309,7 @@ CONFIG_SOFTLOCKUP_DETECTOR=y
CONFIG_HARDLOCKUP_DETECTOR=y
CONFIG_FUNCTION_TRACER=y
CONFIG_SCHED_TRACER=y
+CONFIG_STACK_TRACER=y
CONFIG_FTRACE_SYSCALLS=y
CONFIG_BLK_DEV_IO_TRACE=y
CONFIG_PPC_EMULATED_STATS=y
diff --git a/arch/powerpc/configs/ppc64_defconfig b/arch/powerpc/configs/ppc64_defconfig
index 701811c91a6f..0ad2291337a7 100644
--- a/arch/powerpc/configs/ppc64_defconfig
+++ b/arch/powerpc/configs/ppc64_defconfig
@@ -368,7 +368,9 @@ CONFIG_SOFTLOCKUP_DETECTOR=y
CONFIG_HARDLOCKUP_DETECTOR=y
CONFIG_DEBUG_MUTEXES=y
CONFIG_FUNCTION_TRACER=y
+CONFIG_FTRACE_SYSCALLS=y
CONFIG_SCHED_TRACER=y
+CONFIG_STACK_TRACER=y
CONFIG_BLK_DEV_IO_TRACE=y
CONFIG_CODE_PATCHING_SELFTEST=y
CONFIG_FTR_FIXUP_SELFTEST=y
diff --git a/arch/powerpc/configs/ppc6xx_defconfig b/arch/powerpc/configs/ppc6xx_defconfig
index 1fd9d1260f9e..ee09f7bb6ea9 100644
--- a/arch/powerpc/configs/ppc6xx_defconfig
+++ b/arch/powerpc/configs/ppc6xx_defconfig
@@ -621,7 +621,6 @@ CONFIG_HW_RANDOM=y
CONFIG_HW_RANDOM_VIRTIO=m
CONFIG_NVRAM=y
CONFIG_DTLK=m
-CONFIG_R3964=m
CONFIG_CARDMAN_4000=m
CONFIG_CARDMAN_4040=m
CONFIG_IPWIRELESS=m
diff --git a/arch/powerpc/configs/pseries_defconfig b/arch/powerpc/configs/pseries_defconfig
index 50168dde4ea5..b183629f1bcf 100644
--- a/arch/powerpc/configs/pseries_defconfig
+++ b/arch/powerpc/configs/pseries_defconfig
@@ -289,7 +289,9 @@ CONFIG_DEBUG_STACKOVERFLOW=y
CONFIG_SOFTLOCKUP_DETECTOR=y
CONFIG_HARDLOCKUP_DETECTOR=y
CONFIG_FUNCTION_TRACER=y
+CONFIG_FTRACE_SYSCALLS=y
CONFIG_SCHED_TRACER=y
+CONFIG_STACK_TRACER=y
CONFIG_BLK_DEV_IO_TRACE=y
CONFIG_CODE_PATCHING_SELFTEST=y
CONFIG_FTR_FIXUP_SELFTEST=y
diff --git a/arch/powerpc/include/asm/asm-prototypes.h b/arch/powerpc/include/asm/asm-prototypes.h
index 1c7b75834e04..222823861a67 100644
--- a/arch/powerpc/include/asm/asm-prototypes.h
+++ b/arch/powerpc/include/asm/asm-prototypes.h
@@ -71,8 +71,13 @@ void __init machine_init(u64 dt_ptr);
#endif
long system_call_exception(long r3, long r4, long r5, long r6, long r7, long r8, unsigned long r0, struct pt_regs *regs);
notrace unsigned long syscall_exit_prepare(unsigned long r3, struct pt_regs *regs, long scv);
-notrace unsigned long interrupt_exit_user_prepare(struct pt_regs *regs, unsigned long msr);
-notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs, unsigned long msr);
+notrace unsigned long interrupt_exit_user_prepare(struct pt_regs *regs);
+notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs);
+#ifdef CONFIG_PPC64
+unsigned long syscall_exit_restart(unsigned long r3, struct pt_regs *regs);
+unsigned long interrupt_exit_user_restart(struct pt_regs *regs);
+unsigned long interrupt_exit_kernel_restart(struct pt_regs *regs);
+#endif
long ppc_fadvise64_64(int fd, int advice, u32 offset_high, u32 offset_low,
u32 len_high, u32 len_low);
@@ -120,6 +125,7 @@ extern s32 patch__call_flush_branch_caches3;
extern s32 patch__flush_count_cache_return;
extern s32 patch__flush_link_stack_return;
extern s32 patch__call_kvm_flush_link_stack;
+extern s32 patch__call_kvm_flush_link_stack_p9;
extern s32 patch__memset_nocache, patch__memcpy_nocache;
extern long flush_branch_caches;
@@ -140,7 +146,7 @@ void kvmhv_load_host_pmu(void);
void kvmhv_save_guest_pmu(struct kvm_vcpu *vcpu, bool pmu_in_use);
void kvmhv_load_guest_pmu(struct kvm_vcpu *vcpu);
-int __kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu);
+void kvmppc_p9_enter_guest(struct kvm_vcpu *vcpu);
long kvmppc_h_set_dabr(struct kvm_vcpu *vcpu, unsigned long dabr);
long kvmppc_h_set_xdabr(struct kvm_vcpu *vcpu, unsigned long dabr,
diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
index 61c6e8b200e8..a1732a79e92a 100644
--- a/arch/powerpc/include/asm/atomic.h
+++ b/arch/powerpc/include/asm/atomic.h
@@ -23,7 +23,7 @@
#define __atomic_release_fence() \
__asm__ __volatile__(PPC_RELEASE_BARRIER "" : : : "memory")
-static __inline__ int atomic_read(const atomic_t *v)
+static __inline__ int arch_atomic_read(const atomic_t *v)
{
int t;
@@ -32,13 +32,13 @@ static __inline__ int atomic_read(const atomic_t *v)
return t;
}
-static __inline__ void atomic_set(atomic_t *v, int i)
+static __inline__ void arch_atomic_set(atomic_t *v, int i)
{
__asm__ __volatile__("stw%U0%X0 %1,%0" : "=m"UPD_CONSTR(v->counter) : "r"(i));
}
#define ATOMIC_OP(op, asm_op) \
-static __inline__ void atomic_##op(int a, atomic_t *v) \
+static __inline__ void arch_atomic_##op(int a, atomic_t *v) \
{ \
int t; \
\
@@ -53,7 +53,7 @@ static __inline__ void atomic_##op(int a, atomic_t *v) \
} \
#define ATOMIC_OP_RETURN_RELAXED(op, asm_op) \
-static inline int atomic_##op##_return_relaxed(int a, atomic_t *v) \
+static inline int arch_atomic_##op##_return_relaxed(int a, atomic_t *v) \
{ \
int t; \
\
@@ -70,7 +70,7 @@ static inline int atomic_##op##_return_relaxed(int a, atomic_t *v) \
}
#define ATOMIC_FETCH_OP_RELAXED(op, asm_op) \
-static inline int atomic_fetch_##op##_relaxed(int a, atomic_t *v) \
+static inline int arch_atomic_fetch_##op##_relaxed(int a, atomic_t *v) \
{ \
int res, t; \
\
@@ -94,11 +94,11 @@ static inline int atomic_fetch_##op##_relaxed(int a, atomic_t *v) \
ATOMIC_OPS(add, add)
ATOMIC_OPS(sub, subf)
-#define atomic_add_return_relaxed atomic_add_return_relaxed
-#define atomic_sub_return_relaxed atomic_sub_return_relaxed
+#define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed
+#define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed
-#define atomic_fetch_add_relaxed atomic_fetch_add_relaxed
-#define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed
+#define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed
+#define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed
#undef ATOMIC_OPS
#define ATOMIC_OPS(op, asm_op) \
@@ -109,16 +109,16 @@ ATOMIC_OPS(and, and)
ATOMIC_OPS(or, or)
ATOMIC_OPS(xor, xor)
-#define atomic_fetch_and_relaxed atomic_fetch_and_relaxed
-#define atomic_fetch_or_relaxed atomic_fetch_or_relaxed
-#define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed
+#define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed
+#define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed
+#define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed
#undef ATOMIC_OPS
#undef ATOMIC_FETCH_OP_RELAXED
#undef ATOMIC_OP_RETURN_RELAXED
#undef ATOMIC_OP
-static __inline__ void atomic_inc(atomic_t *v)
+static __inline__ void arch_atomic_inc(atomic_t *v)
{
int t;
@@ -131,9 +131,9 @@ static __inline__ void atomic_inc(atomic_t *v)
: "r" (&v->counter)
: "cc", "xer");
}
-#define atomic_inc atomic_inc
+#define arch_atomic_inc arch_atomic_inc
-static __inline__ int atomic_inc_return_relaxed(atomic_t *v)
+static __inline__ int arch_atomic_inc_return_relaxed(atomic_t *v)
{
int t;
@@ -149,7 +149,7 @@ static __inline__ int atomic_inc_return_relaxed(atomic_t *v)
return t;
}
-static __inline__ void atomic_dec(atomic_t *v)
+static __inline__ void arch_atomic_dec(atomic_t *v)
{
int t;
@@ -162,9 +162,9 @@ static __inline__ void atomic_dec(atomic_t *v)
: "r" (&v->counter)
: "cc", "xer");
}
-#define atomic_dec atomic_dec
+#define arch_atomic_dec arch_atomic_dec
-static __inline__ int atomic_dec_return_relaxed(atomic_t *v)
+static __inline__ int arch_atomic_dec_return_relaxed(atomic_t *v)
{
int t;
@@ -180,17 +180,20 @@ static __inline__ int atomic_dec_return_relaxed(atomic_t *v)
return t;
}
-#define atomic_inc_return_relaxed atomic_inc_return_relaxed
-#define atomic_dec_return_relaxed atomic_dec_return_relaxed
+#define arch_atomic_inc_return_relaxed arch_atomic_inc_return_relaxed
+#define arch_atomic_dec_return_relaxed arch_atomic_dec_return_relaxed
-#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
-#define atomic_cmpxchg_relaxed(v, o, n) \
- cmpxchg_relaxed(&((v)->counter), (o), (n))
-#define atomic_cmpxchg_acquire(v, o, n) \
- cmpxchg_acquire(&((v)->counter), (o), (n))
+#define arch_atomic_cmpxchg(v, o, n) \
+ (arch_cmpxchg(&((v)->counter), (o), (n)))
+#define arch_atomic_cmpxchg_relaxed(v, o, n) \
+ arch_cmpxchg_relaxed(&((v)->counter), (o), (n))
+#define arch_atomic_cmpxchg_acquire(v, o, n) \
+ arch_cmpxchg_acquire(&((v)->counter), (o), (n))
-#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
-#define atomic_xchg_relaxed(v, new) xchg_relaxed(&((v)->counter), (new))
+#define arch_atomic_xchg(v, new) \
+ (arch_xchg(&((v)->counter), new))
+#define arch_atomic_xchg_relaxed(v, new) \
+ arch_xchg_relaxed(&((v)->counter), (new))
/*
* Don't want to override the generic atomic_try_cmpxchg_acquire, because
@@ -199,7 +202,7 @@ static __inline__ int atomic_dec_return_relaxed(atomic_t *v)
* would be a surprise).
*/
static __always_inline bool
-atomic_try_cmpxchg_lock(atomic_t *v, int *old, int new)
+arch_atomic_try_cmpxchg_lock(atomic_t *v, int *old, int new)
{
int r, o = *old;
@@ -229,7 +232,7 @@ atomic_try_cmpxchg_lock(atomic_t *v, int *old, int new)
* Atomically adds @a to @v, so long as it was not @u.
* Returns the old value of @v.
*/
-static __inline__ int atomic_fetch_add_unless(atomic_t *v, int a, int u)
+static __inline__ int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
{
int t;
@@ -250,7 +253,7 @@ static __inline__ int atomic_fetch_add_unless(atomic_t *v, int a, int u)
return t;
}
-#define atomic_fetch_add_unless atomic_fetch_add_unless
+#define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
/**
* atomic_inc_not_zero - increment unless the number is zero
@@ -259,7 +262,7 @@ static __inline__ int atomic_fetch_add_unless(atomic_t *v, int a, int u)
* Atomically increments @v by 1, so long as @v is non-zero.
* Returns non-zero if @v was non-zero, and zero otherwise.
*/
-static __inline__ int atomic_inc_not_zero(atomic_t *v)
+static __inline__ int arch_atomic_inc_not_zero(atomic_t *v)
{
int t1, t2;
@@ -280,14 +283,14 @@ static __inline__ int atomic_inc_not_zero(atomic_t *v)
return t1;
}
-#define atomic_inc_not_zero(v) atomic_inc_not_zero((v))
+#define arch_atomic_inc_not_zero(v) arch_atomic_inc_not_zero((v))
/*
* Atomically test *v and decrement if it is greater than 0.
* The function returns the old value of *v minus 1, even if
* the atomic variable, v, was not decremented.
*/
-static __inline__ int atomic_dec_if_positive(atomic_t *v)
+static __inline__ int arch_atomic_dec_if_positive(atomic_t *v)
{
int t;
@@ -307,13 +310,13 @@ static __inline__ int atomic_dec_if_positive(atomic_t *v)
return t;
}
-#define atomic_dec_if_positive atomic_dec_if_positive
+#define arch_atomic_dec_if_positive arch_atomic_dec_if_positive
#ifdef __powerpc64__
#define ATOMIC64_INIT(i) { (i) }
-static __inline__ s64 atomic64_read(const atomic64_t *v)
+static __inline__ s64 arch_atomic64_read(const atomic64_t *v)
{
s64 t;
@@ -322,13 +325,13 @@ static __inline__ s64 atomic64_read(const atomic64_t *v)
return t;
}
-static __inline__ void atomic64_set(atomic64_t *v, s64 i)
+static __inline__ void arch_atomic64_set(atomic64_t *v, s64 i)
{
__asm__ __volatile__("std%U0%X0 %1,%0" : "=m"UPD_CONSTR(v->counter) : "r"(i));
}
#define ATOMIC64_OP(op, asm_op) \
-static __inline__ void atomic64_##op(s64 a, atomic64_t *v) \
+static __inline__ void arch_atomic64_##op(s64 a, atomic64_t *v) \
{ \
s64 t; \
\
@@ -344,7 +347,7 @@ static __inline__ void atomic64_##op(s64 a, atomic64_t *v) \
#define ATOMIC64_OP_RETURN_RELAXED(op, asm_op) \
static inline s64 \
-atomic64_##op##_return_relaxed(s64 a, atomic64_t *v) \
+arch_atomic64_##op##_return_relaxed(s64 a, atomic64_t *v) \
{ \
s64 t; \
\
@@ -362,7 +365,7 @@ atomic64_##op##_return_relaxed(s64 a, atomic64_t *v) \
#define ATOMIC64_FETCH_OP_RELAXED(op, asm_op) \
static inline s64 \
-atomic64_fetch_##op##_relaxed(s64 a, atomic64_t *v) \
+arch_atomic64_fetch_##op##_relaxed(s64 a, atomic64_t *v) \
{ \
s64 res, t; \
\
@@ -386,11 +389,11 @@ atomic64_fetch_##op##_relaxed(s64 a, atomic64_t *v) \
ATOMIC64_OPS(add, add)
ATOMIC64_OPS(sub, subf)
-#define atomic64_add_return_relaxed atomic64_add_return_relaxed
-#define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
+#define arch_atomic64_add_return_relaxed arch_atomic64_add_return_relaxed
+#define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return_relaxed
-#define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
-#define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
+#define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add_relaxed
+#define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub_relaxed
#undef ATOMIC64_OPS
#define ATOMIC64_OPS(op, asm_op) \
@@ -401,16 +404,16 @@ ATOMIC64_OPS(and, and)
ATOMIC64_OPS(or, or)
ATOMIC64_OPS(xor, xor)
-#define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
-#define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed
-#define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed
+#define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and_relaxed
+#define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or_relaxed
+#define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor_relaxed
#undef ATOPIC64_OPS
#undef ATOMIC64_FETCH_OP_RELAXED
#undef ATOMIC64_OP_RETURN_RELAXED
#undef ATOMIC64_OP
-static __inline__ void atomic64_inc(atomic64_t *v)
+static __inline__ void arch_atomic64_inc(atomic64_t *v)
{
s64 t;
@@ -423,9 +426,9 @@ static __inline__ void atomic64_inc(atomic64_t *v)
: "r" (&v->counter)
: "cc", "xer");
}
-#define atomic64_inc atomic64_inc
+#define arch_atomic64_inc arch_atomic64_inc
-static __inline__ s64 atomic64_inc_return_relaxed(atomic64_t *v)
+static __inline__ s64 arch_atomic64_inc_return_relaxed(atomic64_t *v)
{
s64 t;
@@ -441,7 +444,7 @@ static __inline__ s64 atomic64_inc_return_relaxed(atomic64_t *v)
return t;
}
-static __inline__ void atomic64_dec(atomic64_t *v)
+static __inline__ void arch_atomic64_dec(atomic64_t *v)
{
s64 t;
@@ -454,9 +457,9 @@ static __inline__ void atomic64_dec(atomic64_t *v)
: "r" (&v->counter)
: "cc", "xer");
}
-#define atomic64_dec atomic64_dec
+#define arch_atomic64_dec arch_atomic64_dec
-static __inline__ s64 atomic64_dec_return_relaxed(atomic64_t *v)
+static __inline__ s64 arch_atomic64_dec_return_relaxed(atomic64_t *v)
{
s64 t;
@@ -472,14 +475,14 @@ static __inline__ s64 atomic64_dec_return_relaxed(atomic64_t *v)
return t;
}
-#define atomic64_inc_return_relaxed atomic64_inc_return_relaxed
-#define atomic64_dec_return_relaxed atomic64_dec_return_relaxed
+#define arch_atomic64_inc_return_relaxed arch_atomic64_inc_return_relaxed
+#define arch_atomic64_dec_return_relaxed arch_atomic64_dec_return_relaxed
/*
* Atomically test *v and decrement if it is greater than 0.
* The function returns the old value of *v minus 1.
*/
-static __inline__ s64 atomic64_dec_if_positive(atomic64_t *v)
+static __inline__ s64 arch_atomic64_dec_if_positive(atomic64_t *v)
{
s64 t;
@@ -498,16 +501,19 @@ static __inline__ s64 atomic64_dec_if_positive(atomic64_t *v)
return t;
}
-#define atomic64_dec_if_positive atomic64_dec_if_positive
+#define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
-#define atomic64_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
-#define atomic64_cmpxchg_relaxed(v, o, n) \
- cmpxchg_relaxed(&((v)->counter), (o), (n))
-#define atomic64_cmpxchg_acquire(v, o, n) \
- cmpxchg_acquire(&((v)->counter), (o), (n))
+#define arch_atomic64_cmpxchg(v, o, n) \
+ (arch_cmpxchg(&((v)->counter), (o), (n)))
+#define arch_atomic64_cmpxchg_relaxed(v, o, n) \
+ arch_cmpxchg_relaxed(&((v)->counter), (o), (n))
+#define arch_atomic64_cmpxchg_acquire(v, o, n) \
+ arch_cmpxchg_acquire(&((v)->counter), (o), (n))
-#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
-#define atomic64_xchg_relaxed(v, new) xchg_relaxed(&((v)->counter), (new))
+#define arch_atomic64_xchg(v, new) \
+ (arch_xchg(&((v)->counter), new))
+#define arch_atomic64_xchg_relaxed(v, new) \
+ arch_xchg_relaxed(&((v)->counter), (new))
/**
* atomic64_fetch_add_unless - add unless the number is a given value
@@ -518,7 +524,7 @@ static __inline__ s64 atomic64_dec_if_positive(atomic64_t *v)
* Atomically adds @a to @v, so long as it was not @u.
* Returns the old value of @v.
*/
-static __inline__ s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
+static __inline__ s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
{
s64 t;
@@ -539,7 +545,7 @@ static __inline__ s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
return t;
}
-#define atomic64_fetch_add_unless atomic64_fetch_add_unless
+#define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless
/**
* atomic_inc64_not_zero - increment unless the number is zero
@@ -548,7 +554,7 @@ static __inline__ s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
* Atomically increments @v by 1, so long as @v is non-zero.
* Returns non-zero if @v was non-zero, and zero otherwise.
*/
-static __inline__ int atomic64_inc_not_zero(atomic64_t *v)
+static __inline__ int arch_atomic64_inc_not_zero(atomic64_t *v)
{
s64 t1, t2;
@@ -569,7 +575,7 @@ static __inline__ int atomic64_inc_not_zero(atomic64_t *v)
return t1 != 0;
}
-#define atomic64_inc_not_zero(v) atomic64_inc_not_zero((v))
+#define arch_atomic64_inc_not_zero(v) arch_atomic64_inc_not_zero((v))
#endif /* __powerpc64__ */
diff --git a/arch/powerpc/include/asm/barrier.h b/arch/powerpc/include/asm/barrier.h
index 7ae29cfb06c0..f0e687236484 100644
--- a/arch/powerpc/include/asm/barrier.h
+++ b/arch/powerpc/include/asm/barrier.h
@@ -46,6 +46,8 @@
# define SMPWMB eieio
#endif
+/* clang defines this macro for a builtin, which will not work with runtime patching */
+#undef __lwsync
#define __lwsync() __asm__ __volatile__ (stringify_in_c(LWSYNC) : : :"memory")
#define dma_rmb() __lwsync()
#define dma_wmb() __asm__ __volatile__ (stringify_in_c(SMPWMB) : : :"memory")
diff --git a/arch/powerpc/include/asm/book3s/32/hash.h b/arch/powerpc/include/asm/book3s/32/hash.h
deleted file mode 100644
index 2a0a467d2985..000000000000
--- a/arch/powerpc/include/asm/book3s/32/hash.h
+++ /dev/null
@@ -1,45 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_POWERPC_BOOK3S_32_HASH_H
-#define _ASM_POWERPC_BOOK3S_32_HASH_H
-#ifdef __KERNEL__
-
-/*
- * The "classic" 32-bit implementation of the PowerPC MMU uses a hash
- * table containing PTEs, together with a set of 16 segment registers,
- * to define the virtual to physical address mapping.
- *
- * We use the hash table as an extended TLB, i.e. a cache of currently
- * active mappings. We maintain a two-level page table tree, much
- * like that used by the i386, for the sake of the Linux memory
- * management code. Low-level assembler code in hash_low_32.S
- * (procedure hash_page) is responsible for extracting ptes from the
- * tree and putting them into the hash table when necessary, and
- * updating the accessed and modified bits in the page table tree.
- */
-
-#define _PAGE_PRESENT 0x001 /* software: pte contains a translation */
-#define _PAGE_HASHPTE 0x002 /* hash_page has made an HPTE for this pte */
-#define _PAGE_USER 0x004 /* usermode access allowed */
-#define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
-#define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
-#define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
-#define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
-#define _PAGE_DIRTY 0x080 /* C: page changed */
-#define _PAGE_ACCESSED 0x100 /* R: page referenced */
-#define _PAGE_EXEC 0x200 /* software: exec allowed */
-#define _PAGE_RW 0x400 /* software: user write access allowed */
-#define _PAGE_SPECIAL 0x800 /* software: Special page */
-
-#ifdef CONFIG_PTE_64BIT
-/* We never clear the high word of the pte */
-#define _PTE_NONE_MASK (0xffffffff00000000ULL | _PAGE_HASHPTE)
-#else
-#define _PTE_NONE_MASK _PAGE_HASHPTE
-#endif
-
-#define _PMD_PRESENT 0
-#define _PMD_PRESENT_MASK (PAGE_MASK)
-#define _PMD_BAD (~PAGE_MASK)
-
-#endif /* __KERNEL__ */
-#endif /* _ASM_POWERPC_BOOK3S_32_HASH_H */
diff --git a/arch/powerpc/include/asm/book3s/32/kup.h b/arch/powerpc/include/asm/book3s/32/kup.h
index 1670dfe9d4f1..64201125a287 100644
--- a/arch/powerpc/include/asm/book3s/32/kup.h
+++ b/arch/powerpc/include/asm/book3s/32/kup.h
@@ -7,35 +7,104 @@
#ifndef __ASSEMBLY__
+#include <linux/jump_label.h>
+
+extern struct static_key_false disable_kuap_key;
+extern struct static_key_false disable_kuep_key;
+
+static __always_inline bool kuap_is_disabled(void)
+{
+ return !IS_ENABLED(CONFIG_PPC_KUAP) || static_branch_unlikely(&disable_kuap_key);
+}
+
+static __always_inline bool kuep_is_disabled(void)
+{
+ return !IS_ENABLED(CONFIG_PPC_KUEP) || static_branch_unlikely(&disable_kuep_key);
+}
+
+static inline void kuep_lock(void)
+{
+ if (kuep_is_disabled())
+ return;
+
+ update_user_segments(mfsr(0) | SR_NX);
+}
+
+static inline void kuep_unlock(void)
+{
+ if (kuep_is_disabled())
+ return;
+
+ update_user_segments(mfsr(0) & ~SR_NX);
+}
+
#ifdef CONFIG_PPC_KUAP
#include <linux/sched.h>
-static inline void kuap_update_sr(u32 sr, u32 addr, u32 end)
+#define KUAP_NONE (~0UL)
+#define KUAP_ALL (~1UL)
+
+static inline void kuap_lock_one(unsigned long addr)
{
- addr &= 0xf0000000; /* align addr to start of segment */
- barrier(); /* make sure thread.kuap is updated before playing with SRs */
- while (addr < end) {
- mtsr(sr, addr);
- sr += 0x111; /* next VSID */
- sr &= 0xf0ffffff; /* clear VSID overflow */
- addr += 0x10000000; /* address of next segment */
- }
+ mtsr(mfsr(addr) | SR_KS, addr);
+ isync(); /* Context sync required after mtsr() */
+}
+
+static inline void kuap_unlock_one(unsigned long addr)
+{
+ mtsr(mfsr(addr) & ~SR_KS, addr);
+ isync(); /* Context sync required after mtsr() */
+}
+
+static inline void kuap_lock_all(void)
+{
+ update_user_segments(mfsr(0) | SR_KS);
+ isync(); /* Context sync required after mtsr() */
+}
+
+static inline void kuap_unlock_all(void)
+{
+ update_user_segments(mfsr(0) & ~SR_KS);
isync(); /* Context sync required after mtsr() */
}
+void kuap_lock_all_ool(void);
+void kuap_unlock_all_ool(void);
+
+static inline void kuap_lock(unsigned long addr, bool ool)
+{
+ if (likely(addr != KUAP_ALL))
+ kuap_lock_one(addr);
+ else if (!ool)
+ kuap_lock_all();
+ else
+ kuap_lock_all_ool();
+}
+
+static inline void kuap_unlock(unsigned long addr, bool ool)
+{
+ if (likely(addr != KUAP_ALL))
+ kuap_unlock_one(addr);
+ else if (!ool)
+ kuap_unlock_all();
+ else
+ kuap_unlock_all_ool();
+}
+
static inline void kuap_save_and_lock(struct pt_regs *regs)
{
unsigned long kuap = current->thread.kuap;
- u32 addr = kuap & 0xf0000000;
- u32 end = kuap << 28;
+
+ if (kuap_is_disabled())
+ return;
regs->kuap = kuap;
- if (unlikely(!kuap))
+ if (unlikely(kuap == KUAP_NONE))
return;
- current->thread.kuap = 0;
- kuap_update_sr(mfsr(addr) | SR_KS, addr, end); /* Set Ks */
+ current->thread.kuap = KUAP_NONE;
+ kuap_lock(kuap, false);
}
static inline void kuap_user_restore(struct pt_regs *regs)
@@ -44,22 +113,22 @@ static inline void kuap_user_restore(struct pt_regs *regs)
static inline void kuap_kernel_restore(struct pt_regs *regs, unsigned long kuap)
{
- u32 addr = regs->kuap & 0xf0000000;
- u32 end = regs->kuap << 28;
+ if (kuap_is_disabled())
+ return;
current->thread.kuap = regs->kuap;
- if (unlikely(regs->kuap == kuap))
- return;
-
- kuap_update_sr(mfsr(addr) & ~SR_KS, addr, end); /* Clear Ks */
+ kuap_unlock(regs->kuap, false);
}
static inline unsigned long kuap_get_and_assert_locked(void)
{
unsigned long kuap = current->thread.kuap;
- WARN_ON_ONCE(IS_ENABLED(CONFIG_PPC_KUAP_DEBUG) && kuap != 0);
+ if (kuap_is_disabled())
+ return KUAP_NONE;
+
+ WARN_ON_ONCE(IS_ENABLED(CONFIG_PPC_KUAP_DEBUG) && kuap != KUAP_NONE);
return kuap;
}
@@ -72,84 +141,78 @@ static inline void kuap_assert_locked(void)
static __always_inline void allow_user_access(void __user *to, const void __user *from,
u32 size, unsigned long dir)
{
- u32 addr, end;
+ if (kuap_is_disabled())
+ return;
BUILD_BUG_ON(!__builtin_constant_p(dir));
- BUILD_BUG_ON(dir & ~KUAP_READ_WRITE);
if (!(dir & KUAP_WRITE))
return;
- addr = (__force u32)to;
-
- if (unlikely(addr >= TASK_SIZE || !size))
- return;
-
- end = min(addr + size, TASK_SIZE);
-
- current->thread.kuap = (addr & 0xf0000000) | ((((end - 1) >> 28) + 1) & 0xf);
- kuap_update_sr(mfsr(addr) & ~SR_KS, addr, end); /* Clear Ks */
+ current->thread.kuap = (__force u32)to;
+ kuap_unlock_one((__force u32)to);
}
-static __always_inline void prevent_user_access(void __user *to, const void __user *from,
- u32 size, unsigned long dir)
+static __always_inline void prevent_user_access(unsigned long dir)
{
- u32 addr, end;
-
- BUILD_BUG_ON(!__builtin_constant_p(dir));
+ u32 kuap = current->thread.kuap;
- if (dir & KUAP_CURRENT_WRITE) {
- u32 kuap = current->thread.kuap;
-
- if (unlikely(!kuap))
- return;
+ if (kuap_is_disabled())
+ return;
- addr = kuap & 0xf0000000;
- end = kuap << 28;
- } else if (dir & KUAP_WRITE) {
- addr = (__force u32)to;
- end = min(addr + size, TASK_SIZE);
+ BUILD_BUG_ON(!__builtin_constant_p(dir));
- if (unlikely(addr >= TASK_SIZE || !size))
- return;
- } else {
+ if (!(dir & KUAP_WRITE))
return;
- }
- current->thread.kuap = 0;
- kuap_update_sr(mfsr(addr) | SR_KS, addr, end); /* set Ks */
+ current->thread.kuap = KUAP_NONE;
+ kuap_lock(kuap, true);
}
static inline unsigned long prevent_user_access_return(void)
{
unsigned long flags = current->thread.kuap;
- unsigned long addr = flags & 0xf0000000;
- unsigned long end = flags << 28;
- void __user *to = (__force void __user *)addr;
- if (flags)
- prevent_user_access(to, to, end - addr, KUAP_READ_WRITE);
+ if (kuap_is_disabled())
+ return KUAP_NONE;
+
+ if (flags != KUAP_NONE) {
+ current->thread.kuap = KUAP_NONE;
+ kuap_lock(flags, true);
+ }
return flags;
}
static inline void restore_user_access(unsigned long flags)
{
- unsigned long addr = flags & 0xf0000000;
- unsigned long end = flags << 28;
- void __user *to = (__force void __user *)addr;
+ if (kuap_is_disabled())
+ return;
- if (flags)
- allow_user_access(to, to, end - addr, KUAP_READ_WRITE);
+ if (flags != KUAP_NONE) {
+ current->thread.kuap = flags;
+ kuap_unlock(flags, true);
+ }
}
static inline bool
bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
{
- unsigned long begin = regs->kuap & 0xf0000000;
- unsigned long end = regs->kuap << 28;
+ unsigned long kuap = regs->kuap;
+
+ if (kuap_is_disabled())
+ return false;
+
+ if (!is_write || kuap == KUAP_ALL)
+ return false;
+ if (kuap == KUAP_NONE)
+ return true;
+
+ /* If faulting address doesn't match unlocked segment, unlock all */
+ if ((kuap ^ address) & 0xf0000000)
+ regs->kuap = KUAP_ALL;
- return is_write && (address < begin || address >= end);
+ return false;
}
#endif /* CONFIG_PPC_KUAP */
diff --git a/arch/powerpc/include/asm/book3s/32/mmu-hash.h b/arch/powerpc/include/asm/book3s/32/mmu-hash.h
index b85f8e114a9c..f5be185cbdf8 100644
--- a/arch/powerpc/include/asm/book3s/32/mmu-hash.h
+++ b/arch/powerpc/include/asm/book3s/32/mmu-hash.h
@@ -67,6 +67,16 @@ struct ppc_bat {
#ifndef __ASSEMBLY__
/*
+ * This macro defines the mapping from contexts to VSIDs (virtual
+ * segment IDs). We use a skew on both the context and the high 4 bits
+ * of the 32-bit virtual address (the "effective segment ID") in order
+ * to spread out the entries in the MMU hash table. Note, if this
+ * function is changed then hash functions will have to be
+ * changed to correspond.
+ */
+#define CTX_TO_VSID(c, id) ((((c) * (897 * 16)) + (id * 0x111)) & 0xffffff)
+
+/*
* Hardware Page Table Entry
* Note that the xpn and x bitfields are used only by processors that
* support extended addressing; otherwise, those bits are reserved.
@@ -102,6 +112,37 @@ extern s32 patch__hash_page_B, patch__hash_page_C;
extern s32 patch__flush_hash_A0, patch__flush_hash_A1, patch__flush_hash_A2;
extern s32 patch__flush_hash_B;
+#include <asm/reg.h>
+#include <asm/task_size_32.h>
+
+static __always_inline void update_user_segment(u32 n, u32 val)
+{
+ if (n << 28 < TASK_SIZE)
+ mtsr(val + n * 0x111, n << 28);
+}
+
+static __always_inline void update_user_segments(u32 val)
+{
+ val &= 0xf0ffffff;
+
+ update_user_segment(0, val);
+ update_user_segment(1, val);
+ update_user_segment(2, val);
+ update_user_segment(3, val);
+ update_user_segment(4, val);
+ update_user_segment(5, val);
+ update_user_segment(6, val);
+ update_user_segment(7, val);
+ update_user_segment(8, val);
+ update_user_segment(9, val);
+ update_user_segment(10, val);
+ update_user_segment(11, val);
+ update_user_segment(12, val);
+ update_user_segment(13, val);
+ update_user_segment(14, val);
+ update_user_segment(15, val);
+}
+
#endif /* !__ASSEMBLY__ */
/* We happily ignore the smaller BATs on 601, we don't actually use
diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h
index 83c65845a1a9..609c80f67194 100644
--- a/arch/powerpc/include/asm/book3s/32/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/32/pgtable.h
@@ -4,7 +4,43 @@
#include <asm-generic/pgtable-nopmd.h>
-#include <asm/book3s/32/hash.h>
+/*
+ * The "classic" 32-bit implementation of the PowerPC MMU uses a hash
+ * table containing PTEs, together with a set of 16 segment registers,
+ * to define the virtual to physical address mapping.
+ *
+ * We use the hash table as an extended TLB, i.e. a cache of currently
+ * active mappings. We maintain a two-level page table tree, much
+ * like that used by the i386, for the sake of the Linux memory
+ * management code. Low-level assembler code in hash_low_32.S
+ * (procedure hash_page) is responsible for extracting ptes from the
+ * tree and putting them into the hash table when necessary, and
+ * updating the accessed and modified bits in the page table tree.
+ */
+
+#define _PAGE_PRESENT 0x001 /* software: pte contains a translation */
+#define _PAGE_HASHPTE 0x002 /* hash_page has made an HPTE for this pte */
+#define _PAGE_USER 0x004 /* usermode access allowed */
+#define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
+#define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
+#define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
+#define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
+#define _PAGE_DIRTY 0x080 /* C: page changed */
+#define _PAGE_ACCESSED 0x100 /* R: page referenced */
+#define _PAGE_EXEC 0x200 /* software: exec allowed */
+#define _PAGE_RW 0x400 /* software: user write access allowed */
+#define _PAGE_SPECIAL 0x800 /* software: Special page */
+
+#ifdef CONFIG_PTE_64BIT
+/* We never clear the high word of the pte */
+#define _PTE_NONE_MASK (0xffffffff00000000ULL | _PAGE_HASHPTE)
+#else
+#define _PTE_NONE_MASK _PAGE_HASHPTE
+#endif
+
+#define _PMD_PRESENT 0
+#define _PMD_PRESENT_MASK (PAGE_MASK)
+#define _PMD_BAD (~PAGE_MASK)
/* And here we include common definitions */
diff --git a/arch/powerpc/include/asm/book3s/64/kup.h b/arch/powerpc/include/asm/book3s/64/kup.h
index 9700da3a4093..a1cc73a88710 100644
--- a/arch/powerpc/include/asm/book3s/64/kup.h
+++ b/arch/powerpc/include/asm/book3s/64/kup.h
@@ -398,8 +398,7 @@ static __always_inline void allow_user_access(void __user *to, const void __user
#endif /* !CONFIG_PPC_KUAP */
-static inline void prevent_user_access(void __user *to, const void __user *from,
- unsigned long size, unsigned long dir)
+static inline void prevent_user_access(unsigned long dir)
{
set_kuap(AMR_KUAP_BLOCKED);
if (static_branch_unlikely(&uaccess_flush_key))
diff --git a/arch/powerpc/include/asm/book3s/64/mmu.h b/arch/powerpc/include/asm/book3s/64/mmu.h
index eace8c3f7b0a..c02f42d1031e 100644
--- a/arch/powerpc/include/asm/book3s/64/mmu.h
+++ b/arch/powerpc/include/asm/book3s/64/mmu.h
@@ -19,6 +19,7 @@ struct mmu_psize_def {
int penc[MMU_PAGE_COUNT]; /* HPTE encoding */
unsigned int tlbiel; /* tlbiel supported for that page size */
unsigned long avpnm; /* bits to mask out in AVPN in the HPTE */
+ unsigned long h_rpt_pgsize; /* H_RPT_INVALIDATE page size encoding */
union {
unsigned long sllp; /* SLB L||LP (exact mask to use in slbmte) */
unsigned long ap; /* Ap encoding used by PowerISA 3.0 */
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
index a666d561b44d..4d9941b2fe51 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -232,6 +232,9 @@ extern unsigned long __pmd_frag_size_shift;
#define PTRS_PER_PUD (1 << PUD_INDEX_SIZE)
#define PTRS_PER_PGD (1 << PGD_INDEX_SIZE)
+#define MAX_PTRS_PER_PGD (1 << (H_PGD_INDEX_SIZE > RADIX_PGD_INDEX_SIZE ? \
+ H_PGD_INDEX_SIZE : RADIX_PGD_INDEX_SIZE))
+
/* PMD_SHIFT determines what a second-level page table entry can map */
#define PMD_SHIFT (PAGE_SHIFT + PTE_INDEX_SIZE)
#define PMD_SIZE (1UL << PMD_SHIFT)
diff --git a/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h b/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h
index 8b33601cdb9d..a46fd37ad552 100644
--- a/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h
+++ b/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h
@@ -4,6 +4,10 @@
#include <asm/hvcall.h>
+#define RIC_FLUSH_TLB 0
+#define RIC_FLUSH_PWC 1
+#define RIC_FLUSH_ALL 2
+
struct vm_area_struct;
struct mm_struct;
struct mmu_gather;
diff --git a/arch/powerpc/include/asm/book3s/pgtable.h b/arch/powerpc/include/asm/book3s/pgtable.h
index 0e1263455d73..ad130e15a126 100644
--- a/arch/powerpc/include/asm/book3s/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/pgtable.h
@@ -8,7 +8,6 @@
#include <asm/book3s/32/pgtable.h>
#endif
-#define FIRST_USER_ADDRESS 0UL
#ifndef __ASSEMBLY__
/* Insert a PTE, top-level function is out of line. It uses an inline
* low level function in the respective pgtable-* files
diff --git a/arch/powerpc/include/asm/checksum.h b/arch/powerpc/include/asm/checksum.h
index d5da7ddbf0fc..350de8f90250 100644
--- a/arch/powerpc/include/asm/checksum.h
+++ b/arch/powerpc/include/asm/checksum.h
@@ -91,7 +91,7 @@ static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, __u32 len,
}
#define HAVE_ARCH_CSUM_ADD
-static inline __wsum csum_add(__wsum csum, __wsum addend)
+static __always_inline __wsum csum_add(__wsum csum, __wsum addend)
{
#ifdef __powerpc64__
u64 res = (__force u64)csum;
diff --git a/arch/powerpc/include/asm/cmpxchg.h b/arch/powerpc/include/asm/cmpxchg.h
index cf091c4c22e5..05f246c0e36e 100644
--- a/arch/powerpc/include/asm/cmpxchg.h
+++ b/arch/powerpc/include/asm/cmpxchg.h
@@ -185,14 +185,14 @@ __xchg_relaxed(void *ptr, unsigned long x, unsigned int size)
BUILD_BUG_ON_MSG(1, "Unsupported size for __xchg_local");
return x;
}
-#define xchg_local(ptr,x) \
+#define arch_xchg_local(ptr,x) \
({ \
__typeof__(*(ptr)) _x_ = (x); \
(__typeof__(*(ptr))) __xchg_local((ptr), \
(unsigned long)_x_, sizeof(*(ptr))); \
})
-#define xchg_relaxed(ptr, x) \
+#define arch_xchg_relaxed(ptr, x) \
({ \
__typeof__(*(ptr)) _x_ = (x); \
(__typeof__(*(ptr))) __xchg_relaxed((ptr), \
@@ -467,7 +467,7 @@ __cmpxchg_acquire(void *ptr, unsigned long old, unsigned long new,
BUILD_BUG_ON_MSG(1, "Unsupported size for __cmpxchg_acquire");
return old;
}
-#define cmpxchg(ptr, o, n) \
+#define arch_cmpxchg(ptr, o, n) \
({ \
__typeof__(*(ptr)) _o_ = (o); \
__typeof__(*(ptr)) _n_ = (n); \
@@ -476,7 +476,7 @@ __cmpxchg_acquire(void *ptr, unsigned long old, unsigned long new,
})
-#define cmpxchg_local(ptr, o, n) \
+#define arch_cmpxchg_local(ptr, o, n) \
({ \
__typeof__(*(ptr)) _o_ = (o); \
__typeof__(*(ptr)) _n_ = (n); \
@@ -484,7 +484,7 @@ __cmpxchg_acquire(void *ptr, unsigned long old, unsigned long new,
(unsigned long)_n_, sizeof(*(ptr))); \
})
-#define cmpxchg_relaxed(ptr, o, n) \
+#define arch_cmpxchg_relaxed(ptr, o, n) \
({ \
__typeof__(*(ptr)) _o_ = (o); \
__typeof__(*(ptr)) _n_ = (n); \
@@ -493,7 +493,7 @@ __cmpxchg_acquire(void *ptr, unsigned long old, unsigned long new,
sizeof(*(ptr))); \
})
-#define cmpxchg_acquire(ptr, o, n) \
+#define arch_cmpxchg_acquire(ptr, o, n) \
({ \
__typeof__(*(ptr)) _o_ = (o); \
__typeof__(*(ptr)) _n_ = (n); \
@@ -502,29 +502,29 @@ __cmpxchg_acquire(void *ptr, unsigned long old, unsigned long new,
sizeof(*(ptr))); \
})
#ifdef CONFIG_PPC64
-#define cmpxchg64(ptr, o, n) \
+#define arch_cmpxchg64(ptr, o, n) \
({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
- cmpxchg((ptr), (o), (n)); \
+ arch_cmpxchg((ptr), (o), (n)); \
})
-#define cmpxchg64_local(ptr, o, n) \
+#define arch_cmpxchg64_local(ptr, o, n) \
({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
- cmpxchg_local((ptr), (o), (n)); \
+ arch_cmpxchg_local((ptr), (o), (n)); \
})
-#define cmpxchg64_relaxed(ptr, o, n) \
+#define arch_cmpxchg64_relaxed(ptr, o, n) \
({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
- cmpxchg_relaxed((ptr), (o), (n)); \
+ arch_cmpxchg_relaxed((ptr), (o), (n)); \
})
-#define cmpxchg64_acquire(ptr, o, n) \
+#define arch_cmpxchg64_acquire(ptr, o, n) \
({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
- cmpxchg_acquire((ptr), (o), (n)); \
+ arch_cmpxchg_acquire((ptr), (o), (n)); \
})
#else
#include <asm-generic/cmpxchg-local.h>
-#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
+#define arch_cmpxchg64_local(ptr, o, n) __generic_cmpxchg64_local((ptr), (o), (n))
#endif
#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/code-patching.h b/arch/powerpc/include/asm/code-patching.h
index f1d029bf906e..a95f63788c6b 100644
--- a/arch/powerpc/include/asm/code-patching.h
+++ b/arch/powerpc/include/asm/code-patching.h
@@ -23,13 +23,13 @@
#define BRANCH_ABSOLUTE 0x2
bool is_offset_in_branch_range(long offset);
-int create_branch(struct ppc_inst *instr, const struct ppc_inst *addr,
+int create_branch(struct ppc_inst *instr, const u32 *addr,
unsigned long target, int flags);
-int create_cond_branch(struct ppc_inst *instr, const struct ppc_inst *addr,
+int create_cond_branch(struct ppc_inst *instr, const u32 *addr,
unsigned long target, int flags);
-int patch_branch(struct ppc_inst *addr, unsigned long target, int flags);
-int patch_instruction(struct ppc_inst *addr, struct ppc_inst instr);
-int raw_patch_instruction(struct ppc_inst *addr, struct ppc_inst instr);
+int patch_branch(u32 *addr, unsigned long target, int flags);
+int patch_instruction(u32 *addr, struct ppc_inst instr);
+int raw_patch_instruction(u32 *addr, struct ppc_inst instr);
static inline unsigned long patch_site_addr(s32 *site)
{
@@ -38,18 +38,18 @@ static inline unsigned long patch_site_addr(s32 *site)
static inline int patch_instruction_site(s32 *site, struct ppc_inst instr)
{
- return patch_instruction((struct ppc_inst *)patch_site_addr(site), instr);
+ return patch_instruction((u32 *)patch_site_addr(site), instr);
}
static inline int patch_branch_site(s32 *site, unsigned long target, int flags)
{
- return patch_branch((struct ppc_inst *)patch_site_addr(site), target, flags);
+ return patch_branch((u32 *)patch_site_addr(site), target, flags);
}
static inline int modify_instruction(unsigned int *addr, unsigned int clr,
unsigned int set)
{
- return patch_instruction((struct ppc_inst *)addr, ppc_inst((*addr & ~clr) | set));
+ return patch_instruction(addr, ppc_inst((*addr & ~clr) | set));
}
static inline int modify_instruction_site(s32 *site, unsigned int clr, unsigned int set)
@@ -59,10 +59,8 @@ static inline int modify_instruction_site(s32 *site, unsigned int clr, unsigned
int instr_is_relative_branch(struct ppc_inst instr);
int instr_is_relative_link_branch(struct ppc_inst instr);
-int instr_is_branch_to_addr(const struct ppc_inst *instr, unsigned long addr);
-unsigned long branch_target(const struct ppc_inst *instr);
-int translate_branch(struct ppc_inst *instr, const struct ppc_inst *dest,
- const struct ppc_inst *src);
+unsigned long branch_target(const u32 *instr);
+int translate_branch(struct ppc_inst *instr, const u32 *dest, const u32 *src);
extern bool is_conditional_branch(struct ppc_inst instr);
#ifdef CONFIG_PPC_BOOK3E_64
void __patch_exception(int exc, unsigned long addr);
@@ -73,9 +71,9 @@ void __patch_exception(int exc, unsigned long addr);
#endif
#define OP_RT_RA_MASK 0xffff0000UL
-#define LIS_R2 (PPC_INST_ADDIS | __PPC_RT(R2))
-#define ADDIS_R2_R12 (PPC_INST_ADDIS | __PPC_RT(R2) | __PPC_RA(R12))
-#define ADDI_R2_R2 (PPC_INST_ADDI | __PPC_RT(R2) | __PPC_RA(R2))
+#define LIS_R2 (PPC_RAW_LIS(_R2, 0))
+#define ADDIS_R2_R12 (PPC_RAW_ADDIS(_R2, _R12, 0))
+#define ADDI_R2_R2 (PPC_RAW_ADDI(_R2, _R2, 0))
static inline unsigned long ppc_function_entry(void *func)
@@ -180,12 +178,10 @@ static inline unsigned long ppc_kallsyms_lookup_name(const char *name)
#define R2_STACK_OFFSET 40
#endif
-#define PPC_INST_LD_TOC (PPC_INST_LD | ___PPC_RT(__REG_R2) | \
- ___PPC_RA(__REG_R1) | R2_STACK_OFFSET)
+#define PPC_INST_LD_TOC PPC_RAW_LD(_R2, _R1, R2_STACK_OFFSET)
/* usually preceded by a mflr r0 */
-#define PPC_INST_STD_LR (PPC_INST_STD | ___PPC_RS(__REG_R0) | \
- ___PPC_RA(__REG_R1) | PPC_LR_STKOFF)
+#define PPC_INST_STD_LR PPC_RAW_STD(_R0, _R1, PPC_LR_STKOFF)
#endif /* CONFIG_PPC64 */
#endif /* _ASM_POWERPC_CODE_PATCHING_H */
diff --git a/arch/powerpc/include/asm/cputhreads.h b/arch/powerpc/include/asm/cputhreads.h
index 98c8bd155bf9..b167186aaee4 100644
--- a/arch/powerpc/include/asm/cputhreads.h
+++ b/arch/powerpc/include/asm/cputhreads.h
@@ -98,6 +98,36 @@ static inline int cpu_last_thread_sibling(int cpu)
return cpu | (threads_per_core - 1);
}
+/*
+ * tlb_thread_siblings are siblings which share a TLB. This is not
+ * architected, is not something a hypervisor could emulate and a future
+ * CPU may change behaviour even in compat mode, so this should only be
+ * used on PowerNV, and only with care.
+ */
+static inline int cpu_first_tlb_thread_sibling(int cpu)
+{
+ if (cpu_has_feature(CPU_FTR_ARCH_300) && (threads_per_core == 8))
+ return cpu & ~0x6; /* Big Core */
+ else
+ return cpu_first_thread_sibling(cpu);
+}
+
+static inline int cpu_last_tlb_thread_sibling(int cpu)
+{
+ if (cpu_has_feature(CPU_FTR_ARCH_300) && (threads_per_core == 8))
+ return cpu | 0x6; /* Big Core */
+ else
+ return cpu_last_thread_sibling(cpu);
+}
+
+static inline int cpu_tlb_thread_sibling_step(void)
+{
+ if (cpu_has_feature(CPU_FTR_ARCH_300) && (threads_per_core == 8))
+ return 2; /* Big Core */
+ else
+ return 1;
+}
+
static inline u32 get_tensr(void)
{
#ifdef CONFIG_BOOKE
diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h
index c1a8aac01cf9..bb6f78fcf981 100644
--- a/arch/powerpc/include/asm/exception-64s.h
+++ b/arch/powerpc/include/asm/exception-64s.h
@@ -35,6 +35,19 @@
/* PACA save area size in u64 units (exgen, exmc, etc) */
#define EX_SIZE 10
+/* PACA save area offsets */
+#define EX_R9 0
+#define EX_R10 8
+#define EX_R11 16
+#define EX_R12 24
+#define EX_R13 32
+#define EX_DAR 40
+#define EX_DSISR 48
+#define EX_CCR 52
+#define EX_CFAR 56
+#define EX_PPR 64
+#define EX_CTR 72
+
/*
* maximum recursive depth of MCE exceptions
*/
diff --git a/arch/powerpc/include/asm/head-64.h b/arch/powerpc/include/asm/head-64.h
index 4cb9efa2eb21..242204e12993 100644
--- a/arch/powerpc/include/asm/head-64.h
+++ b/arch/powerpc/include/asm/head-64.h
@@ -16,7 +16,7 @@
.section ".head.data.\name\()","a",@progbits
.endm
.macro use_ftsec name
- .section ".head.text.\name\()"
+ .section ".head.text.\name\()","ax",@progbits
.endm
/*
diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h
index 443050906018..9bcf345cb208 100644
--- a/arch/powerpc/include/asm/hvcall.h
+++ b/arch/powerpc/include/asm/hvcall.h
@@ -294,6 +294,13 @@
#define H_RESIZE_HPT_COMMIT 0x370
#define H_REGISTER_PROC_TBL 0x37C
#define H_SIGNAL_SYS_RESET 0x380
+#define H_ALLOCATE_VAS_WINDOW 0x388
+#define H_MODIFY_VAS_WINDOW 0x38C
+#define H_DEALLOCATE_VAS_WINDOW 0x390
+#define H_QUERY_VAS_WINDOW 0x394
+#define H_QUERY_VAS_CAPABILITIES 0x398
+#define H_QUERY_NX_CAPABILITIES 0x39C
+#define H_GET_NX_FAULT 0x3A0
#define H_INT_GET_SOURCE_INFO 0x3A8
#define H_INT_SET_SOURCE_CONFIG 0x3AC
#define H_INT_GET_SOURCE_CONFIG 0x3B0
@@ -393,6 +400,9 @@
#define H_CPU_BEHAV_FAVOUR_SECURITY_H (1ull << 60) // IBM bit 3
#define H_CPU_BEHAV_FLUSH_COUNT_CACHE (1ull << 58) // IBM bit 5
#define H_CPU_BEHAV_FLUSH_LINK_STACK (1ull << 57) // IBM bit 6
+#define H_CPU_BEHAV_NO_L1D_FLUSH_ENTRY (1ull << 56) // IBM bit 7
+#define H_CPU_BEHAV_NO_L1D_FLUSH_UACCESS (1ull << 55) // IBM bit 8
+#define H_CPU_BEHAV_NO_STF_BARRIER (1ull << 54) // IBM bit 9
/* Flag values used in H_REGISTER_PROC_TBL hcall */
#define PROC_TABLE_OP_MASK 0x18
@@ -413,9 +423,9 @@
#define H_RPTI_TYPE_NESTED 0x0001 /* Invalidate nested guest partition-scope */
#define H_RPTI_TYPE_TLB 0x0002 /* Invalidate TLB */
#define H_RPTI_TYPE_PWC 0x0004 /* Invalidate Page Walk Cache */
-/* Invalidate Process Table Entries if H_RPTI_TYPE_NESTED is clear */
+/* Invalidate caching of Process Table Entries if H_RPTI_TYPE_NESTED is clear */
#define H_RPTI_TYPE_PRT 0x0008
-/* Invalidate Partition Table Entries if H_RPTI_TYPE_NESTED is set */
+/* Invalidate caching of Partition Table Entries if H_RPTI_TYPE_NESTED is set */
#define H_RPTI_TYPE_PAT 0x0008
#define H_RPTI_TYPE_ALL (H_RPTI_TYPE_TLB | H_RPTI_TYPE_PWC | \
H_RPTI_TYPE_PRT)
@@ -448,6 +458,9 @@
*/
long plpar_hcall_norets(unsigned long opcode, ...);
+/* Variant which does not do hcall tracing */
+long plpar_hcall_norets_notrace(unsigned long opcode, ...);
+
/**
* plpar_hcall: - Make a pseries hypervisor call
* @opcode: The hypervisor call to make.
diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h
index 56a98936a6a9..21cc571ea9c2 100644
--- a/arch/powerpc/include/asm/hw_irq.h
+++ b/arch/powerpc/include/asm/hw_irq.h
@@ -18,8 +18,17 @@
* PACA flags in paca->irq_happened.
*
* This bits are set when interrupts occur while soft-disabled
- * and allow a proper replay. Additionally, PACA_IRQ_HARD_DIS
- * is set whenever we manually hard disable.
+ * and allow a proper replay.
+ *
+ * The PACA_IRQ_HARD_DIS is set whenever we hard disable. It is almost
+ * always in synch with the MSR[EE] state, except:
+ * - A window in interrupt entry, where hardware disables MSR[EE] and that
+ * must be "reconciled" with the soft mask state.
+ * - NMI interrupts that hit in awkward places, until they fix the state.
+ * - When local irqs are being enabled and state is being fixed up.
+ * - When returning from an interrupt there are some windows where this
+ * can become out of synch, but gets fixed before the RFI or before
+ * executing the next user instruction (see arch/powerpc/kernel/interrupt.c).
*/
#define PACA_IRQ_HARD_DIS 0x01
#define PACA_IRQ_DBELL 0x02
@@ -389,7 +398,15 @@ static inline bool arch_irq_disabled_regs(struct pt_regs *regs)
return !(regs->msr & MSR_EE);
}
-static inline void may_hard_irq_enable(void) { }
+static inline bool may_hard_irq_enable(void)
+{
+ return false;
+}
+
+static inline void do_hard_irq_enable(void)
+{
+ BUILD_BUG();
+}
static inline void irq_soft_mask_regs_set_state(struct pt_regs *regs, unsigned long val)
{
diff --git a/arch/powerpc/include/asm/inst.h b/arch/powerpc/include/asm/inst.h
index 268d3bd073c8..b11c0e2f9639 100644
--- a/arch/powerpc/include/asm/inst.h
+++ b/arch/powerpc/include/asm/inst.h
@@ -8,17 +8,17 @@
#define ___get_user_instr(gu_op, dest, ptr) \
({ \
- long __gui_ret = 0; \
- unsigned long __gui_ptr = (unsigned long)ptr; \
+ long __gui_ret; \
+ u32 __user *__gui_ptr = (u32 __user *)ptr; \
struct ppc_inst __gui_inst; \
unsigned int __prefix, __suffix; \
- __gui_ret = gu_op(__prefix, (unsigned int __user *)__gui_ptr); \
+ \
+ __chk_user_ptr(ptr); \
+ __gui_ret = gu_op(__prefix, __gui_ptr); \
if (__gui_ret == 0) { \
if ((__prefix >> 26) == OP_PREFIX) { \
- __gui_ret = gu_op(__suffix, \
- (unsigned int __user *)__gui_ptr + 1); \
- __gui_inst = ppc_inst_prefix(__prefix, \
- __suffix); \
+ __gui_ret = gu_op(__suffix, __gui_ptr + 1); \
+ __gui_inst = ppc_inst_prefix(__prefix, __suffix); \
} else { \
__gui_inst = ppc_inst(__prefix); \
} \
@@ -29,14 +29,15 @@
})
#else /* !CONFIG_PPC64 */
#define ___get_user_instr(gu_op, dest, ptr) \
- gu_op((dest).val, (u32 __user *)(ptr))
+({ \
+ __chk_user_ptr(ptr); \
+ gu_op((dest).val, (u32 __user *)(ptr)); \
+})
#endif /* CONFIG_PPC64 */
-#define get_user_instr(x, ptr) \
- ___get_user_instr(get_user, x, ptr)
+#define get_user_instr(x, ptr) ___get_user_instr(get_user, x, ptr)
-#define __get_user_instr(x, ptr) \
- ___get_user_instr(__get_user, x, ptr)
+#define __get_user_instr(x, ptr) ___get_user_instr(__get_user, x, ptr)
/*
* Instruction data type for POWER
@@ -59,9 +60,9 @@ static inline int ppc_inst_primary_opcode(struct ppc_inst x)
return ppc_inst_val(x) >> 26;
}
-#ifdef CONFIG_PPC64
-#define ppc_inst(x) ((struct ppc_inst){ .val = (x), .suffix = 0xff })
+#define ppc_inst(x) ((struct ppc_inst){ .val = (x) })
+#ifdef CONFIG_PPC64
#define ppc_inst_prefix(x, y) ((struct ppc_inst){ .val = (x), .suffix = (y) })
static inline u32 ppc_inst_suffix(struct ppc_inst x)
@@ -69,68 +70,43 @@ static inline u32 ppc_inst_suffix(struct ppc_inst x)
return x.suffix;
}
-static inline bool ppc_inst_prefixed(struct ppc_inst x)
-{
- return (ppc_inst_primary_opcode(x) == 1) && ppc_inst_suffix(x) != 0xff;
-}
+#else
+#define ppc_inst_prefix(x, y) ppc_inst(x)
-static inline struct ppc_inst ppc_inst_swab(struct ppc_inst x)
+static inline u32 ppc_inst_suffix(struct ppc_inst x)
{
- return ppc_inst_prefix(swab32(ppc_inst_val(x)),
- swab32(ppc_inst_suffix(x)));
+ return 0;
}
-static inline struct ppc_inst ppc_inst_read(const struct ppc_inst *ptr)
-{
- u32 val, suffix;
-
- val = *(u32 *)ptr;
- if ((val >> 26) == OP_PREFIX) {
- suffix = *((u32 *)ptr + 1);
- return ppc_inst_prefix(val, suffix);
- } else {
- return ppc_inst(val);
- }
-}
+#endif /* CONFIG_PPC64 */
-static inline bool ppc_inst_equal(struct ppc_inst x, struct ppc_inst y)
+static inline struct ppc_inst ppc_inst_read(const u32 *ptr)
{
- return *(u64 *)&x == *(u64 *)&y;
+ if (IS_ENABLED(CONFIG_PPC64) && (*ptr >> 26) == OP_PREFIX)
+ return ppc_inst_prefix(*ptr, *(ptr + 1));
+ else
+ return ppc_inst(*ptr);
}
-#else
-
-#define ppc_inst(x) ((struct ppc_inst){ .val = x })
-
-#define ppc_inst_prefix(x, y) ppc_inst(x)
-
static inline bool ppc_inst_prefixed(struct ppc_inst x)
{
- return false;
-}
-
-static inline u32 ppc_inst_suffix(struct ppc_inst x)
-{
- return 0;
+ return IS_ENABLED(CONFIG_PPC64) && ppc_inst_primary_opcode(x) == OP_PREFIX;
}
static inline struct ppc_inst ppc_inst_swab(struct ppc_inst x)
{
- return ppc_inst(swab32(ppc_inst_val(x)));
-}
-
-static inline struct ppc_inst ppc_inst_read(const struct ppc_inst *ptr)
-{
- return *ptr;
+ return ppc_inst_prefix(swab32(ppc_inst_val(x)), swab32(ppc_inst_suffix(x)));
}
static inline bool ppc_inst_equal(struct ppc_inst x, struct ppc_inst y)
{
- return ppc_inst_val(x) == ppc_inst_val(y);
+ if (ppc_inst_val(x) != ppc_inst_val(y))
+ return false;
+ if (!ppc_inst_prefixed(x))
+ return true;
+ return ppc_inst_suffix(x) == ppc_inst_suffix(y);
}
-#endif /* CONFIG_PPC64 */
-
static inline int ppc_inst_len(struct ppc_inst x)
{
return ppc_inst_prefixed(x) ? 8 : 4;
@@ -140,13 +116,13 @@ static inline int ppc_inst_len(struct ppc_inst x)
* Return the address of the next instruction, if the instruction @value was
* located at @location.
*/
-static inline struct ppc_inst *ppc_inst_next(void *location, struct ppc_inst *value)
+static inline u32 *ppc_inst_next(u32 *location, u32 *value)
{
struct ppc_inst tmp;
tmp = ppc_inst_read(value);
- return location + ppc_inst_len(tmp);
+ return (void *)location + ppc_inst_len(tmp);
}
static inline unsigned long ppc_inst_as_ulong(struct ppc_inst x)
@@ -178,6 +154,6 @@ static inline char *__ppc_inst_as_str(char str[PPC_INST_STR_LEN], struct ppc_ins
__str; \
})
-int copy_inst_from_kernel_nofault(struct ppc_inst *inst, struct ppc_inst *src);
+int copy_inst_from_kernel_nofault(struct ppc_inst *inst, u32 *src);
#endif /* _ASM_POWERPC_INST_H */
diff --git a/arch/powerpc/include/asm/interrupt.h b/arch/powerpc/include/asm/interrupt.h
index 44cde2e129b8..d4bdf7d274ac 100644
--- a/arch/powerpc/include/asm/interrupt.h
+++ b/arch/powerpc/include/asm/interrupt.h
@@ -73,13 +73,47 @@
#include <asm/kprobes.h>
#include <asm/runlatch.h>
+#ifdef CONFIG_PPC_BOOK3S_64
+extern char __end_soft_masked[];
+bool search_kernel_soft_mask_table(unsigned long addr);
+unsigned long search_kernel_restart_table(unsigned long addr);
+
+DECLARE_STATIC_KEY_FALSE(interrupt_exit_not_reentrant);
+
+static inline bool is_implicit_soft_masked(struct pt_regs *regs)
+{
+ if (regs->msr & MSR_PR)
+ return false;
+
+ if (regs->nip >= (unsigned long)__end_soft_masked)
+ return false;
+
+ return search_kernel_soft_mask_table(regs->nip);
+}
+
+static inline void srr_regs_clobbered(void)
+{
+ local_paca->srr_valid = 0;
+ local_paca->hsrr_valid = 0;
+}
+#else
+static inline bool is_implicit_soft_masked(struct pt_regs *regs)
+{
+ return false;
+}
+
+static inline void srr_regs_clobbered(void)
+{
+}
+#endif
+
static inline void nap_adjust_return(struct pt_regs *regs)
{
#ifdef CONFIG_PPC_970_NAP
if (unlikely(test_thread_local_flags(_TLF_NAPPING))) {
/* Can avoid a test-and-clear because NMIs do not call this */
clear_thread_local_flags(_TLF_NAPPING);
- regs->nip = (unsigned long)power4_idle_nap_return;
+ regs_set_return_ip(regs, (unsigned long)power4_idle_nap_return);
}
#endif
}
@@ -129,9 +163,18 @@ static inline void interrupt_enter_prepare(struct pt_regs *regs, struct interrup
* CT_WARN_ON comes here via program_check_exception,
* so avoid recursion.
*/
- if (TRAP(regs) != INTERRUPT_PROGRAM)
+ if (TRAP(regs) != INTERRUPT_PROGRAM) {
CT_WARN_ON(ct_state() != CONTEXT_KERNEL);
+ BUG_ON(is_implicit_soft_masked(regs));
+ }
+#ifdef CONFIG_PPC_BOOK3S
+ /* Move this under a debugging check */
+ if (arch_irq_disabled_regs(regs))
+ BUG_ON(search_kernel_restart_table(regs->nip));
+#endif
}
+ if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
+ BUG_ON(!arch_irq_disabled_regs(regs) && !(regs->msr & MSR_EE));
#endif
booke_restore_dbcr0();
@@ -153,8 +196,6 @@ static inline void interrupt_enter_prepare(struct pt_regs *regs, struct interrup
*/
static inline void interrupt_exit_prepare(struct pt_regs *regs, struct interrupt_state *state)
{
- if (user_mode(regs))
- kuep_unlock();
}
static inline void interrupt_async_enter_prepare(struct pt_regs *regs, struct interrupt_state *state)
@@ -188,6 +229,7 @@ struct interrupt_nmi_state {
u8 irq_soft_mask;
u8 irq_happened;
u8 ftrace_enabled;
+ u64 softe;
#endif
};
@@ -213,6 +255,7 @@ static inline void interrupt_nmi_enter_prepare(struct pt_regs *regs, struct inte
#ifdef CONFIG_PPC64
state->irq_soft_mask = local_paca->irq_soft_mask;
state->irq_happened = local_paca->irq_happened;
+ state->softe = regs->softe;
/*
* Set IRQS_ALL_DISABLED unconditionally so irqs_disabled() does
@@ -222,6 +265,14 @@ static inline void interrupt_nmi_enter_prepare(struct pt_regs *regs, struct inte
local_paca->irq_soft_mask = IRQS_ALL_DISABLED;
local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
+ if (is_implicit_soft_masked(regs)) {
+ // Adjust regs->softe soft implicit soft-mask, so
+ // arch_irq_disabled_regs(regs) behaves as expected.
+ regs->softe = IRQS_ALL_DISABLED;
+ }
+ if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
+ BUG_ON(!arch_irq_disabled_regs(regs) && !(regs->msr & MSR_EE));
+
/* Don't do any per-CPU operations until interrupt state is fixed */
if (nmi_disables_ftrace(regs)) {
@@ -253,11 +304,20 @@ static inline void interrupt_nmi_exit_prepare(struct pt_regs *regs, struct inter
*/
#ifdef CONFIG_PPC64
+#ifdef CONFIG_PPC_BOOK3S
+ if (arch_irq_disabled_regs(regs)) {
+ unsigned long rst = search_kernel_restart_table(regs->nip);
+ if (rst)
+ regs_set_return_ip(regs, rst);
+ }
+#endif
+
if (nmi_disables_ftrace(regs))
this_cpu_set_ftrace_enabled(state->ftrace_enabled);
/* Check we didn't change the pending interrupt mask. */
WARN_ON_ONCE((state->irq_happened | PACA_IRQ_HARD_DIS) != local_paca->irq_happened);
+ regs->softe = state->softe;
local_paca->irq_happened = state->irq_happened;
local_paca->irq_soft_mask = state->irq_soft_mask;
#endif
diff --git a/arch/powerpc/include/asm/irq.h b/arch/powerpc/include/asm/irq.h
index b2bd58830430..4982f3711fc3 100644
--- a/arch/powerpc/include/asm/irq.h
+++ b/arch/powerpc/include/asm/irq.h
@@ -6,7 +6,6 @@
/*
*/
-#include <linux/irqdomain.h>
#include <linux/threads.h>
#include <linux/list.h>
#include <linux/radix-tree.h>
@@ -23,8 +22,8 @@ extern atomic_t ppc_n_lost_interrupts;
/* Total number of virq in the platform */
#define NR_IRQS CONFIG_NR_IRQS
-/* Same thing, used by the generic IRQ code */
-#define NR_IRQS_LEGACY NUM_ISA_INTERRUPTS
+/* Number of irqs reserved for a legacy isa controller */
+#define NR_IRQS_LEGACY 16
extern irq_hw_number_t virq_to_hw(unsigned int virq);
diff --git a/arch/powerpc/include/asm/jump_label.h b/arch/powerpc/include/asm/jump_label.h
index 2d5c6bec2b4f..93ce3ec25387 100644
--- a/arch/powerpc/include/asm/jump_label.h
+++ b/arch/powerpc/include/asm/jump_label.h
@@ -50,7 +50,7 @@ l_yes:
1098: nop; \
.pushsection __jump_table, "aw"; \
.long 1098b - ., LABEL - .; \
- FTR_ENTRY_LONG KEY; \
+ FTR_ENTRY_LONG KEY - .; \
.popsection
#endif
diff --git a/arch/powerpc/include/asm/kup.h b/arch/powerpc/include/asm/kup.h
index ec96232529ac..1df763002726 100644
--- a/arch/powerpc/include/asm/kup.h
+++ b/arch/powerpc/include/asm/kup.h
@@ -5,14 +5,6 @@
#define KUAP_READ 1
#define KUAP_WRITE 2
#define KUAP_READ_WRITE (KUAP_READ | KUAP_WRITE)
-/*
- * For prevent_user_access() only.
- * Use the current saved situation instead of the to/from/size params.
- * Used on book3s/32
- */
-#define KUAP_CURRENT_READ 4
-#define KUAP_CURRENT_WRITE 8
-#define KUAP_CURRENT (KUAP_CURRENT_READ | KUAP_CURRENT_WRITE)
#ifdef CONFIG_PPC_BOOK3S_64
#include <asm/book3s/64/kup.h>
@@ -46,10 +38,7 @@ void setup_kuep(bool disabled);
static inline void setup_kuep(bool disabled) { }
#endif /* CONFIG_PPC_KUEP */
-#if defined(CONFIG_PPC_KUEP) && defined(CONFIG_PPC_BOOK3S_32)
-void kuep_lock(void);
-void kuep_unlock(void);
-#else
+#ifndef CONFIG_PPC_BOOK3S_32
static inline void kuep_lock(void) { }
static inline void kuep_unlock(void) { }
#endif
@@ -83,8 +72,7 @@ static inline unsigned long kuap_get_and_assert_locked(void)
#ifndef CONFIG_PPC_BOOK3S_64
static inline void allow_user_access(void __user *to, const void __user *from,
unsigned long size, unsigned long dir) { }
-static inline void prevent_user_access(void __user *to, const void __user *from,
- unsigned long size, unsigned long dir) { }
+static inline void prevent_user_access(unsigned long dir) { }
static inline unsigned long prevent_user_access_return(void) { return 0UL; }
static inline void restore_user_access(unsigned long flags) { }
#endif /* CONFIG_PPC_BOOK3S_64 */
@@ -96,53 +84,53 @@ static __always_inline void setup_kup(void)
setup_kuap(disable_kuap);
}
-static inline void allow_read_from_user(const void __user *from, unsigned long size)
+static __always_inline void allow_read_from_user(const void __user *from, unsigned long size)
{
barrier_nospec();
allow_user_access(NULL, from, size, KUAP_READ);
}
-static inline void allow_write_to_user(void __user *to, unsigned long size)
+static __always_inline void allow_write_to_user(void __user *to, unsigned long size)
{
allow_user_access(to, NULL, size, KUAP_WRITE);
}
-static inline void allow_read_write_user(void __user *to, const void __user *from,
- unsigned long size)
+static __always_inline void allow_read_write_user(void __user *to, const void __user *from,
+ unsigned long size)
{
barrier_nospec();
allow_user_access(to, from, size, KUAP_READ_WRITE);
}
-static inline void prevent_read_from_user(const void __user *from, unsigned long size)
+static __always_inline void prevent_read_from_user(const void __user *from, unsigned long size)
{
- prevent_user_access(NULL, from, size, KUAP_READ);
+ prevent_user_access(KUAP_READ);
}
-static inline void prevent_write_to_user(void __user *to, unsigned long size)
+static __always_inline void prevent_write_to_user(void __user *to, unsigned long size)
{
- prevent_user_access(to, NULL, size, KUAP_WRITE);
+ prevent_user_access(KUAP_WRITE);
}
-static inline void prevent_read_write_user(void __user *to, const void __user *from,
- unsigned long size)
+static __always_inline void prevent_read_write_user(void __user *to, const void __user *from,
+ unsigned long size)
{
- prevent_user_access(to, from, size, KUAP_READ_WRITE);
+ prevent_user_access(KUAP_READ_WRITE);
}
-static inline void prevent_current_access_user(void)
+static __always_inline void prevent_current_access_user(void)
{
- prevent_user_access(NULL, NULL, ~0UL, KUAP_CURRENT);
+ prevent_user_access(KUAP_READ_WRITE);
}
-static inline void prevent_current_read_from_user(void)
+static __always_inline void prevent_current_read_from_user(void)
{
- prevent_user_access(NULL, NULL, ~0UL, KUAP_CURRENT_READ);
+ prevent_user_access(KUAP_READ);
}
-static inline void prevent_current_write_to_user(void)
+static __always_inline void prevent_current_write_to_user(void)
{
- prevent_user_access(NULL, NULL, ~0UL, KUAP_CURRENT_WRITE);
+ prevent_user_access(KUAP_WRITE);
}
#endif /* !__ASSEMBLY__ */
diff --git a/arch/powerpc/include/asm/kvm_asm.h b/arch/powerpc/include/asm/kvm_asm.h
index a3633560493b..fbbf3cec92e9 100644
--- a/arch/powerpc/include/asm/kvm_asm.h
+++ b/arch/powerpc/include/asm/kvm_asm.h
@@ -147,6 +147,7 @@
#define KVM_GUEST_MODE_SKIP 2
#define KVM_GUEST_MODE_GUEST_HV 3
#define KVM_GUEST_MODE_HOST_HV 4
+#define KVM_GUEST_MODE_HV_P9 5 /* ISA >= v3.0 path */
#define KVM_INST_FETCH_FAILED -1
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
index e6b53c6e21e3..caaa0f592d8e 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -307,6 +307,9 @@ void kvmhv_set_ptbl_entry(unsigned int lpid, u64 dw0, u64 dw1);
void kvmhv_release_all_nested(struct kvm *kvm);
long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu);
long kvmhv_do_nested_tlbie(struct kvm_vcpu *vcpu);
+long do_h_rpt_invalidate_pat(struct kvm_vcpu *vcpu, unsigned long lpid,
+ unsigned long type, unsigned long pg_sizes,
+ unsigned long start, unsigned long end);
int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu,
u64 time_limit, unsigned long lpcr);
void kvmhv_save_hv_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *hr);
diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h
index 9bb9bb370b53..eaf3a562bf1e 100644
--- a/arch/powerpc/include/asm/kvm_book3s_64.h
+++ b/arch/powerpc/include/asm/kvm_book3s_64.h
@@ -153,10 +153,18 @@ static inline bool kvmhv_vcpu_is_radix(struct kvm_vcpu *vcpu)
return radix;
}
+int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpcr);
+
#define KVM_DEFAULT_HPT_ORDER 24 /* 16MB HPT by default */
#endif
/*
+ * Invalid HDSISR value which is used to indicate when HW has not set the reg.
+ * Used to work around an errata.
+ */
+#define HDSISR_CANARY 0x7fff
+
+/*
* We use a lock bit in HPTE dword 0 to synchronize updates and
* accesses to each HPTE, and another bit to indicate non-present
* HPTEs.
diff --git a/arch/powerpc/include/asm/kvm_guest.h b/arch/powerpc/include/asm/kvm_guest.h
index 2fca299f7e19..c63105d2c9e7 100644
--- a/arch/powerpc/include/asm/kvm_guest.h
+++ b/arch/powerpc/include/asm/kvm_guest.h
@@ -16,10 +16,10 @@ static inline bool is_kvm_guest(void)
return static_branch_unlikely(&kvm_guest);
}
-bool check_kvm_guest(void);
+int check_kvm_guest(void);
#else
static inline bool is_kvm_guest(void) { return false; }
-static inline bool check_kvm_guest(void) { return false; }
+static inline int check_kvm_guest(void) { return 0; }
#endif
#endif /* _ASM_POWERPC_KVM_GUEST_H_ */
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index 1e83359f286b..9f52f282b1aa 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -51,6 +51,7 @@
/* PPC-specific vcpu->requests bit members */
#define KVM_REQ_WATCHDOG KVM_ARCH_REQ(0)
#define KVM_REQ_EPR_EXIT KVM_ARCH_REQ(1)
+#define KVM_REQ_PENDING_TIMER KVM_ARCH_REQ(2)
#include <linux/mmu_notifier.h>
@@ -80,12 +81,13 @@ struct kvmppc_book3s_shadow_vcpu;
struct kvm_nested_guest;
struct kvm_vm_stat {
- ulong remote_tlb_flush;
- ulong num_2M_pages;
- ulong num_1G_pages;
+ struct kvm_vm_stat_generic generic;
+ u64 num_2M_pages;
+ u64 num_1G_pages;
};
struct kvm_vcpu_stat {
+ struct kvm_vcpu_stat_generic generic;
u64 sum_exits;
u64 mmio_exits;
u64 signal_exits;
@@ -101,14 +103,8 @@ struct kvm_vcpu_stat {
u64 emulated_inst_exits;
u64 dec_exits;
u64 ext_intr_exits;
- u64 halt_poll_success_ns;
- u64 halt_poll_fail_ns;
u64 halt_wait_ns;
- u64 halt_successful_poll;
- u64 halt_attempted_poll;
u64 halt_successful_wait;
- u64 halt_poll_invalid;
- u64 halt_wakeup;
u64 dbell_exits;
u64 gdbell_exits;
u64 ld;
@@ -297,7 +293,6 @@ struct kvm_arch {
u8 fwnmi_enabled;
u8 secure_guest;
u8 svm_enabled;
- bool threads_indep;
bool nested_enable;
bool dawr1_enabled;
pgd_t *pgtable;
@@ -683,7 +678,12 @@ struct kvm_vcpu_arch {
ulong fault_dar;
u32 fault_dsisr;
unsigned long intr_msr;
- ulong fault_gpa; /* guest real address of page fault (POWER9) */
+ /*
+ * POWER9 and later: fault_gpa contains the guest real address of page
+ * fault for a radix guest, or segment descriptor (equivalent to result
+ * from slbmfev of SLB entry that translated the EA) for hash guests.
+ */
+ ulong fault_gpa;
#endif
#ifdef CONFIG_BOOKE
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index 5bf8ae9bb2cc..2d88944f9f34 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -129,6 +129,7 @@ extern void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu);
extern int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu);
extern int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu);
extern void kvmppc_core_queue_machine_check(struct kvm_vcpu *vcpu, ulong flags);
+extern void kvmppc_core_queue_syscall(struct kvm_vcpu *vcpu);
extern void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags);
extern void kvmppc_core_queue_fpunavail(struct kvm_vcpu *vcpu);
extern void kvmppc_core_queue_vec_unavail(struct kvm_vcpu *vcpu);
@@ -606,6 +607,7 @@ extern void kvmppc_free_pimap(struct kvm *kvm);
extern int kvmppc_xics_rm_complete(struct kvm_vcpu *vcpu, u32 hcall);
extern void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu);
extern int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd);
+extern int kvmppc_xive_xics_hcall(struct kvm_vcpu *vcpu, u32 req);
extern u64 kvmppc_xics_get_icp(struct kvm_vcpu *vcpu);
extern int kvmppc_xics_set_icp(struct kvm_vcpu *vcpu, u64 icpval);
extern int kvmppc_xics_connect_vcpu(struct kvm_device *dev,
@@ -638,6 +640,8 @@ static inline int kvmppc_xics_enabled(struct kvm_vcpu *vcpu)
static inline void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu) { }
static inline int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd)
{ return 0; }
+static inline int kvmppc_xive_xics_hcall(struct kvm_vcpu *vcpu, u32 req)
+ { return 0; }
#endif
#ifdef CONFIG_KVM_XIVE
@@ -655,8 +659,6 @@ extern int kvmppc_xive_get_xive(struct kvm *kvm, u32 irq, u32 *server,
u32 *priority);
extern int kvmppc_xive_int_on(struct kvm *kvm, u32 irq);
extern int kvmppc_xive_int_off(struct kvm *kvm, u32 irq);
-extern void kvmppc_xive_init_module(void);
-extern void kvmppc_xive_exit_module(void);
extern int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
struct kvm_vcpu *vcpu, u32 cpu);
@@ -671,6 +673,8 @@ extern int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval);
extern int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq,
int level, bool line_status);
extern void kvmppc_xive_push_vcpu(struct kvm_vcpu *vcpu);
+extern void kvmppc_xive_pull_vcpu(struct kvm_vcpu *vcpu);
+extern void kvmppc_xive_rearm_escalation(struct kvm_vcpu *vcpu);
static inline int kvmppc_xive_enabled(struct kvm_vcpu *vcpu)
{
@@ -680,8 +684,6 @@ static inline int kvmppc_xive_enabled(struct kvm_vcpu *vcpu)
extern int kvmppc_xive_native_connect_vcpu(struct kvm_device *dev,
struct kvm_vcpu *vcpu, u32 cpu);
extern void kvmppc_xive_native_cleanup_vcpu(struct kvm_vcpu *vcpu);
-extern void kvmppc_xive_native_init_module(void);
-extern void kvmppc_xive_native_exit_module(void);
extern int kvmppc_xive_native_get_vp(struct kvm_vcpu *vcpu,
union kvmppc_one_reg *val);
extern int kvmppc_xive_native_set_vp(struct kvm_vcpu *vcpu,
@@ -695,8 +697,6 @@ static inline int kvmppc_xive_get_xive(struct kvm *kvm, u32 irq, u32 *server,
u32 *priority) { return -1; }
static inline int kvmppc_xive_int_on(struct kvm *kvm, u32 irq) { return -1; }
static inline int kvmppc_xive_int_off(struct kvm *kvm, u32 irq) { return -1; }
-static inline void kvmppc_xive_init_module(void) { }
-static inline void kvmppc_xive_exit_module(void) { }
static inline int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
struct kvm_vcpu *vcpu, u32 cpu) { return -EBUSY; }
@@ -711,14 +711,14 @@ static inline int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval) { retur
static inline int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq,
int level, bool line_status) { return -ENODEV; }
static inline void kvmppc_xive_push_vcpu(struct kvm_vcpu *vcpu) { }
+static inline void kvmppc_xive_pull_vcpu(struct kvm_vcpu *vcpu) { }
+static inline void kvmppc_xive_rearm_escalation(struct kvm_vcpu *vcpu) { }
static inline int kvmppc_xive_enabled(struct kvm_vcpu *vcpu)
{ return 0; }
static inline int kvmppc_xive_native_connect_vcpu(struct kvm_device *dev,
struct kvm_vcpu *vcpu, u32 cpu) { return -EBUSY; }
static inline void kvmppc_xive_native_cleanup_vcpu(struct kvm_vcpu *vcpu) { }
-static inline void kvmppc_xive_native_init_module(void) { }
-static inline void kvmppc_xive_native_exit_module(void) { }
static inline int kvmppc_xive_native_get_vp(struct kvm_vcpu *vcpu,
union kvmppc_one_reg *val)
{ return 0; }
@@ -754,7 +754,7 @@ long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu,
unsigned long tce_value, unsigned long npages);
long int kvmppc_rm_h_confer(struct kvm_vcpu *vcpu, int target,
unsigned int yield_count);
-long kvmppc_h_random(struct kvm_vcpu *vcpu);
+long kvmppc_rm_h_random(struct kvm_vcpu *vcpu);
void kvmhv_commence_exit(int trap);
void kvmppc_realmode_machine_check(struct kvm_vcpu *vcpu);
void kvmppc_subcore_enter_guest(void);
diff --git a/arch/powerpc/include/asm/livepatch.h b/arch/powerpc/include/asm/livepatch.h
index ae25e6e72997..4fe018cc207b 100644
--- a/arch/powerpc/include/asm/livepatch.h
+++ b/arch/powerpc/include/asm/livepatch.h
@@ -16,7 +16,7 @@ static inline void klp_arch_set_pc(struct ftrace_regs *fregs, unsigned long ip)
{
struct pt_regs *regs = ftrace_get_regs(fregs);
- regs->nip = ip;
+ regs_set_return_ip(regs, ip);
}
#define klp_get_ftrace_location klp_get_ftrace_location
diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h
index 607168b1aef4..27016b98ecb2 100644
--- a/arch/powerpc/include/asm/mmu.h
+++ b/arch/powerpc/include/asm/mmu.h
@@ -220,7 +220,7 @@ enum {
#elif defined(CONFIG_44x)
#define MMU_FTRS_ALWAYS MMU_FTR_TYPE_44x
#endif
-#if defined(CONFIG_E200) || defined(CONFIG_E500)
+#ifdef CONFIG_E500
#define MMU_FTRS_ALWAYS MMU_FTR_TYPE_FSL_E
#endif
@@ -324,7 +324,6 @@ static inline void assert_pte_locked(struct mm_struct *mm, unsigned long addr)
}
#endif /* !CONFIG_DEBUG_VM */
-#ifdef CONFIG_PPC_RADIX_MMU
static inline bool radix_enabled(void)
{
return mmu_has_feature(MMU_FTR_TYPE_RADIX);
@@ -334,17 +333,6 @@ static inline bool early_radix_enabled(void)
{
return early_mmu_has_feature(MMU_FTR_TYPE_RADIX);
}
-#else
-static inline bool radix_enabled(void)
-{
- return false;
-}
-
-static inline bool early_radix_enabled(void)
-{
- return false;
-}
-#endif
#ifdef CONFIG_STRICT_KERNEL_RWX
static inline bool strict_kernel_rwx_enabled(void)
@@ -357,6 +345,11 @@ static inline bool strict_kernel_rwx_enabled(void)
return false;
}
#endif
+
+static inline bool strict_module_rwx_enabled(void)
+{
+ return IS_ENABLED(CONFIG_STRICT_MODULE_RWX) && strict_kernel_rwx_enabled();
+}
#endif /* !__ASSEMBLY__ */
/* The kernel use the constants below to index in the page sizes array.
diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h
index 4bc45d3ed8b0..9ba6b585337f 100644
--- a/arch/powerpc/include/asm/mmu_context.h
+++ b/arch/powerpc/include/asm/mmu_context.h
@@ -57,7 +57,6 @@ static inline bool mm_iommu_is_devmem(struct mm_struct *mm, unsigned long hpa,
static inline void mm_iommu_init(struct mm_struct *mm) { }
#endif
extern void switch_slb(struct task_struct *tsk, struct mm_struct *mm);
-extern void set_context(unsigned long id, pgd_t *pgd);
#ifdef CONFIG_PPC_BOOK3S_64
extern void radix__switch_mmu_context(struct mm_struct *prev,
@@ -122,12 +121,6 @@ static inline bool need_extra_context(struct mm_struct *mm, unsigned long ea)
}
#endif
-#if defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE) && defined(CONFIG_PPC_RADIX_MMU)
-extern void radix_kvm_prefetch_workaround(struct mm_struct *mm);
-#else
-static inline void radix_kvm_prefetch_workaround(struct mm_struct *mm) { }
-#endif
-
extern void switch_cop(struct mm_struct *next);
extern int use_cop(unsigned long acop, struct mm_struct *mm);
extern void drop_cop(unsigned long acop, struct mm_struct *mm);
@@ -222,6 +215,18 @@ static inline void mm_context_add_copro(struct mm_struct *mm) { }
static inline void mm_context_remove_copro(struct mm_struct *mm) { }
#endif
+#if defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE) && defined(CONFIG_PPC_RADIX_MMU)
+void do_h_rpt_invalidate_prt(unsigned long pid, unsigned long lpid,
+ unsigned long type, unsigned long pg_sizes,
+ unsigned long start, unsigned long end);
+#else
+static inline void do_h_rpt_invalidate_prt(unsigned long pid,
+ unsigned long lpid,
+ unsigned long type,
+ unsigned long pg_sizes,
+ unsigned long start,
+ unsigned long end) { }
+#endif
extern void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk);
diff --git a/arch/powerpc/include/asm/mmzone.h b/arch/powerpc/include/asm/mmzone.h
index 6cda76b57c5d..4c6c6dbd182f 100644
--- a/arch/powerpc/include/asm/mmzone.h
+++ b/arch/powerpc/include/asm/mmzone.h
@@ -18,7 +18,7 @@
* flags field of the struct page
*/
-#ifdef CONFIG_NEED_MULTIPLE_NODES
+#ifdef CONFIG_NUMA
extern struct pglist_data *node_data[];
/*
@@ -41,7 +41,7 @@ u64 memory_hotplug_max(void);
#else
#define memory_hotplug_max() memblock_end_of_DRAM()
-#endif /* CONFIG_NEED_MULTIPLE_NODES */
+#endif /* CONFIG_NUMA */
#ifdef CONFIG_FA_DUMP
#define __HAVE_ARCH_RESERVED_KERNEL_PAGES
#endif
diff --git a/arch/powerpc/include/asm/nohash/32/hugetlb-8xx.h b/arch/powerpc/include/asm/nohash/32/hugetlb-8xx.h
index 39be9aea86db..64b6c608eca4 100644
--- a/arch/powerpc/include/asm/nohash/32/hugetlb-8xx.h
+++ b/arch/powerpc/include/asm/nohash/32/hugetlb-8xx.h
@@ -66,10 +66,9 @@ static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
}
#ifdef CONFIG_PPC_4K_PAGES
-static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
- struct page *page, int writable)
+static inline pte_t arch_make_huge_pte(pte_t entry, unsigned int shift, vm_flags_t flags)
{
- size_t size = huge_page_size(hstate_vma(vma));
+ size_t size = 1UL << shift;
if (size == SZ_16K)
return __pte(pte_val(entry) & ~_PAGE_HUGE);
diff --git a/arch/powerpc/include/asm/nohash/32/kup-8xx.h b/arch/powerpc/include/asm/nohash/32/kup-8xx.h
index 295ef5639609..882a0bc7887a 100644
--- a/arch/powerpc/include/asm/nohash/32/kup-8xx.h
+++ b/arch/powerpc/include/asm/nohash/32/kup-8xx.h
@@ -9,10 +9,22 @@
#ifndef __ASSEMBLY__
+#include <linux/jump_label.h>
+
#include <asm/reg.h>
+extern struct static_key_false disable_kuap_key;
+
+static __always_inline bool kuap_is_disabled(void)
+{
+ return static_branch_unlikely(&disable_kuap_key);
+}
+
static inline void kuap_save_and_lock(struct pt_regs *regs)
{
+ if (kuap_is_disabled())
+ return;
+
regs->kuap = mfspr(SPRN_MD_AP);
mtspr(SPRN_MD_AP, MD_APG_KUAP);
}
@@ -23,12 +35,20 @@ static inline void kuap_user_restore(struct pt_regs *regs)
static inline void kuap_kernel_restore(struct pt_regs *regs, unsigned long kuap)
{
+ if (kuap_is_disabled())
+ return;
+
mtspr(SPRN_MD_AP, regs->kuap);
}
static inline unsigned long kuap_get_and_assert_locked(void)
{
- unsigned long kuap = mfspr(SPRN_MD_AP);
+ unsigned long kuap;
+
+ if (kuap_is_disabled())
+ return MD_APG_INIT;
+
+ kuap = mfspr(SPRN_MD_AP);
if (IS_ENABLED(CONFIG_PPC_KUAP_DEBUG))
WARN_ON_ONCE(kuap >> 16 != MD_APG_KUAP >> 16);
@@ -38,25 +58,35 @@ static inline unsigned long kuap_get_and_assert_locked(void)
static inline void kuap_assert_locked(void)
{
- if (IS_ENABLED(CONFIG_PPC_KUAP_DEBUG))
+ if (IS_ENABLED(CONFIG_PPC_KUAP_DEBUG) && !kuap_is_disabled())
kuap_get_and_assert_locked();
}
static inline void allow_user_access(void __user *to, const void __user *from,
unsigned long size, unsigned long dir)
{
+ if (kuap_is_disabled())
+ return;
+
mtspr(SPRN_MD_AP, MD_APG_INIT);
}
-static inline void prevent_user_access(void __user *to, const void __user *from,
- unsigned long size, unsigned long dir)
+static inline void prevent_user_access(unsigned long dir)
{
+ if (kuap_is_disabled())
+ return;
+
mtspr(SPRN_MD_AP, MD_APG_KUAP);
}
static inline unsigned long prevent_user_access_return(void)
{
- unsigned long flags = mfspr(SPRN_MD_AP);
+ unsigned long flags;
+
+ if (kuap_is_disabled())
+ return MD_APG_INIT;
+
+ flags = mfspr(SPRN_MD_AP);
mtspr(SPRN_MD_AP, MD_APG_KUAP);
@@ -65,12 +95,18 @@ static inline unsigned long prevent_user_access_return(void)
static inline void restore_user_access(unsigned long flags)
{
+ if (kuap_is_disabled())
+ return;
+
mtspr(SPRN_MD_AP, flags);
}
static inline bool
bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
{
+ if (kuap_is_disabled())
+ return false;
+
return !((regs->kuap ^ MD_APG_KUAP) & 0xff000000);
}
diff --git a/arch/powerpc/include/asm/nohash/32/mmu-44x.h b/arch/powerpc/include/asm/nohash/32/mmu-44x.h
index 2d92a39d8f2e..43ceca128531 100644
--- a/arch/powerpc/include/asm/nohash/32/mmu-44x.h
+++ b/arch/powerpc/include/asm/nohash/32/mmu-44x.h
@@ -113,6 +113,7 @@ typedef struct {
/* patch sites */
extern s32 patch__tlb_44x_hwater_D, patch__tlb_44x_hwater_I;
+extern s32 patch__tlb_44x_kuep, patch__tlb_47x_kuep;
#endif /* !__ASSEMBLY__ */
diff --git a/arch/powerpc/include/asm/nohash/32/mmu-8xx.h b/arch/powerpc/include/asm/nohash/32/mmu-8xx.h
index 6e4faa0a9b35..997cec973406 100644
--- a/arch/powerpc/include/asm/nohash/32/mmu-8xx.h
+++ b/arch/powerpc/include/asm/nohash/32/mmu-8xx.h
@@ -178,6 +178,7 @@
#ifndef __ASSEMBLY__
#include <linux/mmdebug.h>
+#include <linux/sizes.h>
void mmu_pin_tlb(unsigned long top, bool readonly);
@@ -225,6 +226,48 @@ static inline unsigned int mmu_psize_to_shift(unsigned int mmu_psize)
BUG();
}
+static inline bool arch_vmap_try_size(unsigned long addr, unsigned long end, u64 pfn,
+ unsigned int max_page_shift, unsigned long size)
+{
+ if (end - addr < size)
+ return false;
+
+ if ((1UL << max_page_shift) < size)
+ return false;
+
+ if (!IS_ALIGNED(addr, size))
+ return false;
+
+ if (!IS_ALIGNED(PFN_PHYS(pfn), size))
+ return false;
+
+ return true;
+}
+
+static inline unsigned long arch_vmap_pte_range_map_size(unsigned long addr, unsigned long end,
+ u64 pfn, unsigned int max_page_shift)
+{
+ if (arch_vmap_try_size(addr, end, pfn, max_page_shift, SZ_512K))
+ return SZ_512K;
+ if (PAGE_SIZE == SZ_16K)
+ return SZ_16K;
+ if (arch_vmap_try_size(addr, end, pfn, max_page_shift, SZ_16K))
+ return SZ_16K;
+ return PAGE_SIZE;
+}
+#define arch_vmap_pte_range_map_size arch_vmap_pte_range_map_size
+
+static inline int arch_vmap_pte_supported_shift(unsigned long size)
+{
+ if (size >= SZ_512K)
+ return 19;
+ else if (size >= SZ_16K)
+ return 14;
+ else
+ return PAGE_SHIFT;
+}
+#define arch_vmap_pte_supported_shift arch_vmap_pte_supported_shift
+
/* patch sites */
extern s32 patch__itlbmiss_exit_1, patch__dtlbmiss_exit_1;
extern s32 patch__itlbmiss_perf, patch__dtlbmiss_perf;
diff --git a/arch/powerpc/include/asm/nohash/32/pgtable.h b/arch/powerpc/include/asm/nohash/32/pgtable.h
index 96522f7f0618..f06ae00f2a65 100644
--- a/arch/powerpc/include/asm/nohash/32/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/32/pgtable.h
@@ -54,7 +54,6 @@ extern int icache_44x_need_flush;
#define PGD_MASKED_BITS 0
#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
-#define FIRST_USER_ADDRESS 0UL
#define pte_ERROR(e) \
pr_err("%s:%d: bad pte %llx.\n", __FILE__, __LINE__, \
diff --git a/arch/powerpc/include/asm/nohash/64/pgtable.h b/arch/powerpc/include/asm/nohash/64/pgtable.h
index 57cd3892bfe0..53fbfdfac93d 100644
--- a/arch/powerpc/include/asm/nohash/64/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/64/pgtable.h
@@ -12,8 +12,6 @@
#include <asm/barrier.h>
#include <asm/asm-const.h>
-#define FIRST_USER_ADDRESS 0UL
-
/*
* Size of EA range mapped by our pagetables.
*/
diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h
index ec18ac818e3a..dc05a862e72a 100644
--- a/arch/powerpc/include/asm/paca.h
+++ b/arch/powerpc/include/asm/paca.h
@@ -149,11 +149,9 @@ struct paca_struct {
#endif /* CONFIG_PPC_BOOK3E */
#ifdef CONFIG_PPC_BOOK3S
- mm_context_id_t mm_ctx_id;
#ifdef CONFIG_PPC_MM_SLICES
unsigned char mm_ctx_low_slices_psize[BITS_PER_LONG / BITS_PER_BYTE];
unsigned char mm_ctx_high_slices_psize[SLICE_ARRAY_SIZE];
- unsigned long mm_ctx_slb_addr_limit;
#else
u16 mm_ctx_user_psize;
u16 mm_ctx_sllp;
@@ -167,9 +165,16 @@ struct paca_struct {
u64 kstack; /* Saved Kernel stack addr */
u64 saved_r1; /* r1 save for RTAS calls or PM or EE=0 */
u64 saved_msr; /* MSR saved here by enter_rtas */
+#ifdef CONFIG_PPC64
+ u64 exit_save_r1; /* Syscall/interrupt R1 save */
+#endif
#ifdef CONFIG_PPC_BOOK3E
u16 trap_save; /* Used when bad stack is encountered */
#endif
+#ifdef CONFIG_PPC_BOOK3S_64
+ u8 hsrr_valid; /* HSRRs set for HRFID */
+ u8 srr_valid; /* SRRs set for RFID */
+#endif
u8 irq_soft_mask; /* mask for irq soft masking */
u8 irq_happened; /* irq happened while soft-disabled */
u8 irq_work_pending; /* IRQ_WORK interrupt while soft-disable */
diff --git a/arch/powerpc/include/asm/paravirt.h b/arch/powerpc/include/asm/paravirt.h
index 5d1726bb28e7..bcb7b5f917be 100644
--- a/arch/powerpc/include/asm/paravirt.h
+++ b/arch/powerpc/include/asm/paravirt.h
@@ -28,19 +28,35 @@ static inline u32 yield_count_of(int cpu)
return be32_to_cpu(yield_count);
}
+/*
+ * Spinlock code confers and prods, so don't trace the hcalls because the
+ * tracing code takes spinlocks which can cause recursion deadlocks.
+ *
+ * These calls are made while the lock is not held: the lock slowpath yields if
+ * it can not acquire the lock, and unlock slow path might prod if a waiter has
+ * yielded). So this may not be a problem for simple spin locks because the
+ * tracing does not technically recurse on the lock, but we avoid it anyway.
+ *
+ * However the queued spin lock contended path is more strictly ordered: the
+ * H_CONFER hcall is made after the task has queued itself on the lock, so then
+ * recursing on that lock will cause the task to then queue up again behind the
+ * first instance (or worse: queued spinlocks use tricks that assume a context
+ * never waits on more than one spinlock, so such recursion may cause random
+ * corruption in the lock code).
+ */
static inline void yield_to_preempted(int cpu, u32 yield_count)
{
- plpar_hcall_norets(H_CONFER, get_hard_smp_processor_id(cpu), yield_count);
+ plpar_hcall_norets_notrace(H_CONFER, get_hard_smp_processor_id(cpu), yield_count);
}
static inline void prod_cpu(int cpu)
{
- plpar_hcall_norets(H_PROD, get_hard_smp_processor_id(cpu));
+ plpar_hcall_norets_notrace(H_PROD, get_hard_smp_processor_id(cpu));
}
static inline void yield_to_any(void)
{
- plpar_hcall_norets(H_CONFER, -1, 0);
+ plpar_hcall_norets_notrace(H_CONFER, -1, 0);
}
#else
static inline bool is_shared_processor(void)
diff --git a/arch/powerpc/include/asm/pgalloc.h b/arch/powerpc/include/asm/pgalloc.h
index 6dd78a2dc03a..3360cad78ace 100644
--- a/arch/powerpc/include/asm/pgalloc.h
+++ b/arch/powerpc/include/asm/pgalloc.h
@@ -70,9 +70,4 @@ extern struct kmem_cache *pgtable_cache[];
#include <asm/nohash/pgalloc.h>
#endif
-static inline pgtable_t pmd_pgtable(pmd_t pmd)
-{
- return (pgtable_t)pmd_page_vaddr(pmd);
-}
-
#endif /* _ASM_POWERPC_PGALLOC_H */
diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
index c6a676714f04..d564d0ecd4cd 100644
--- a/arch/powerpc/include/asm/pgtable.h
+++ b/arch/powerpc/include/asm/pgtable.h
@@ -41,6 +41,10 @@ struct mm_struct;
#ifndef __ASSEMBLY__
+#ifndef MAX_PTRS_PER_PGD
+#define MAX_PTRS_PER_PGD PTRS_PER_PGD
+#endif
+
/* Keep these as a macros to avoid include dependency mess */
#define pte_page(x) pfn_to_page(pte_pfn(x))
#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
@@ -72,6 +76,7 @@ extern unsigned long empty_zero_page[];
extern pgd_t swapper_pg_dir[];
extern void paging_init(void);
+void poking_init(void);
extern unsigned long ioremap_bot;
@@ -152,6 +157,12 @@ static inline bool p4d_is_leaf(p4d_t p4d)
}
#endif
+#define pmd_pgtable pmd_pgtable
+static inline pgtable_t pmd_pgtable(pmd_t pmd)
+{
+ return (pgtable_t)pmd_page_vaddr(pmd);
+}
+
#ifdef CONFIG_PPC64
#define is_ioremap_addr is_ioremap_addr
static inline bool is_ioremap_addr(const void *x)
diff --git a/arch/powerpc/include/asm/plpar_wrappers.h b/arch/powerpc/include/asm/plpar_wrappers.h
index ece84a430701..83e0f701ebc6 100644
--- a/arch/powerpc/include/asm/plpar_wrappers.h
+++ b/arch/powerpc/include/asm/plpar_wrappers.h
@@ -28,7 +28,11 @@ static inline void set_cede_latency_hint(u8 latency_hint)
static inline long cede_processor(void)
{
- return plpar_hcall_norets(H_CEDE);
+ /*
+ * We cannot call tracepoints inside RCU idle regions which
+ * means we must not trace H_CEDE.
+ */
+ return plpar_hcall_norets_notrace(H_CEDE);
}
static inline long extended_cede_processor(unsigned long latency_hint)
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
index ac41776661e9..bede76dd3db7 100644
--- a/arch/powerpc/include/asm/ppc-opcode.h
+++ b/arch/powerpc/include/asm/ppc-opcode.h
@@ -76,6 +76,40 @@
#define __REGA0_R30 30
#define __REGA0_R31 31
+/* For use with PPC_RAW_() macros */
+#define _R0 0
+#define _R1 1
+#define _R2 2
+#define _R3 3
+#define _R4 4
+#define _R5 5
+#define _R6 6
+#define _R7 7
+#define _R8 8
+#define _R9 9
+#define _R10 10
+#define _R11 11
+#define _R12 12
+#define _R13 13
+#define _R14 14
+#define _R15 15
+#define _R16 16
+#define _R17 17
+#define _R18 18
+#define _R19 19
+#define _R20 20
+#define _R21 21
+#define _R22 22
+#define _R23 23
+#define _R24 24
+#define _R25 25
+#define _R26 26
+#define _R27 27
+#define _R28 28
+#define _R29 29
+#define _R30 30
+#define _R31 31
+
#define IMM_L(i) ((uintptr_t)(i) & 0xffff)
#define IMM_DS(i) ((uintptr_t)(i) & 0xfffc)
#define IMM_DQ(i) ((uintptr_t)(i) & 0xfff0)
@@ -222,13 +256,11 @@
#define PPC_INST_LWSYNC 0x7c2004ac
#define PPC_INST_SYNC 0x7c0004ac
#define PPC_INST_SYNC_MASK 0xfc0007fe
-#define PPC_INST_ISYNC 0x4c00012c
#define PPC_INST_MCRXR 0x7c000400
#define PPC_INST_MCRXR_MASK 0xfc0007fe
#define PPC_INST_MFSPR_PVR 0x7c1f42a6
#define PPC_INST_MFSPR_PVR_MASK 0xfc1ffffe
#define PPC_INST_MTMSRD 0x7c000164
-#define PPC_INST_NOP 0x60000000
#define PPC_INST_POPCNTB 0x7c0000f4
#define PPC_INST_POPCNTB_MASK 0xfc0007fe
#define PPC_INST_RFEBB 0x4c000124
@@ -241,10 +273,10 @@
#define PPC_INST_MFSPR_DSCR_USER_MASK 0xfc1ffffe
#define PPC_INST_MTSPR_DSCR_USER 0x7c0303a6
#define PPC_INST_MTSPR_DSCR_USER_MASK 0xfc1ffffe
-#define PPC_INST_SC 0x44000002
#define PPC_INST_STRING 0x7c00042a
#define PPC_INST_STRING_MASK 0xfc0007fe
#define PPC_INST_STRING_GEN_MASK 0xfc00067e
+#define PPC_INST_SETB 0x7c000100
#define PPC_INST_STSWI 0x7c0005aa
#define PPC_INST_STSWX 0x7c00052a
#define PPC_INST_TRECHKPT 0x7c0007dd
@@ -252,18 +284,9 @@
#define PPC_INST_TSR 0x7c0005dd
#define PPC_INST_LD 0xe8000000
#define PPC_INST_STD 0xf8000000
-#define PPC_INST_MFLR 0x7c0802a6
-#define PPC_INST_MTCTR 0x7c0903a6
-#define PPC_INST_ADDI 0x38000000
#define PPC_INST_ADDIS 0x3c000000
#define PPC_INST_ADD 0x7c000214
-#define PPC_INST_BLR 0x4e800020
-#define PPC_INST_BCTR 0x4e800420
-#define PPC_INST_BCTRL 0x4e800421
#define PPC_INST_DIVD 0x7c0003d2
-#define PPC_INST_RLDICR 0x78000004
-#define PPC_INST_ORI 0x60000000
-#define PPC_INST_ORIS 0x64000000
#define PPC_INST_BRANCH 0x48000000
#define PPC_INST_BL 0x48000001
#define PPC_INST_BRANCH_COND 0x40800000
@@ -323,6 +346,8 @@
#define PPC_LO(v) ((v) & 0xffff)
#define PPC_HI(v) (((v) >> 16) & 0xffff)
#define PPC_HA(v) PPC_HI((v) + 0x8000)
+#define PPC_HIGHER(v) (((v) >> 32) & 0xffff)
+#define PPC_HIGHEST(v) (((v) >> 48) & 0xffff)
/*
* Only use the larx hint bit on 64bit CPUs. e500v1/v2 based CPUs will treat a
@@ -383,6 +408,10 @@
#define PPC_RAW_STBCIX(s, a, b) (0x7c0007aa | __PPC_RS(s) | __PPC_RA(a) | __PPC_RB(b))
#define PPC_RAW_DCBFPS(a, b) (0x7c0000ac | ___PPC_RA(a) | ___PPC_RB(b) | (4 << 21))
#define PPC_RAW_DCBSTPS(a, b) (0x7c0000ac | ___PPC_RA(a) | ___PPC_RB(b) | (6 << 21))
+#define PPC_RAW_SC() (0x44000002)
+#define PPC_RAW_SYNC() (0x7c0004ac)
+#define PPC_RAW_ISYNC() (0x4c00012c)
+
/*
* Define what the VSX XX1 form instructions will look like, then add
* the 128 bit load store instructions based on that.
@@ -404,10 +433,10 @@
#define PPC_RAW_STXVP(xsp, a, i) (0x18000001 | __PPC_XSP(xsp) | ___PPC_RA(a) | IMM_DQ(i))
#define PPC_RAW_LXVPX(xtp, a, b) (0x7c00029a | __PPC_XTP(xtp) | ___PPC_RA(a) | ___PPC_RB(b))
#define PPC_RAW_STXVPX(xsp, a, b) (0x7c00039a | __PPC_XSP(xsp) | ___PPC_RA(a) | ___PPC_RB(b))
-#define PPC_RAW_PLXVP(xtp, i, a, pr) \
- ((PPC_PREFIX_8LS | __PPC_PRFX_R(pr) | IMM_D0(i)) << 32 | (0xe8000000 | __PPC_XTP(xtp) | ___PPC_RA(a) | IMM_D1(i)))
-#define PPC_RAW_PSTXVP(xsp, i, a, pr) \
- ((PPC_PREFIX_8LS | __PPC_PRFX_R(pr) | IMM_D0(i)) << 32 | (0xf8000000 | __PPC_XSP(xsp) | ___PPC_RA(a) | IMM_D1(i)))
+#define PPC_RAW_PLXVP_P(xtp, i, a, pr) (PPC_PREFIX_8LS | __PPC_PRFX_R(pr) | IMM_D0(i))
+#define PPC_RAW_PLXVP_S(xtp, i, a, pr) (0xe8000000 | __PPC_XTP(xtp) | ___PPC_RA(a) | IMM_D1(i))
+#define PPC_RAW_PSTXVP_P(xsp, i, a, pr) (PPC_PREFIX_8LS | __PPC_PRFX_R(pr) | IMM_D0(i))
+#define PPC_RAW_PSTXVP_S(xsp, i, a, pr) (0xf8000000 | __PPC_XSP(xsp) | ___PPC_RA(a) | IMM_D1(i))
#define PPC_RAW_NAP (0x4c000364)
#define PPC_RAW_SLEEP (0x4c0003a4)
#define PPC_RAW_WINKLE (0x4c0003e4)
@@ -445,16 +474,17 @@
#define PPC_RAW_ADD_DOT(t, a, b) (PPC_INST_ADD | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b) | 0x1)
#define PPC_RAW_ADDC(t, a, b) (0x7c000014 | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b))
#define PPC_RAW_ADDC_DOT(t, a, b) (0x7c000014 | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b) | 0x1)
-#define PPC_RAW_NOP() (PPC_INST_NOP)
-#define PPC_RAW_BLR() (PPC_INST_BLR)
+#define PPC_RAW_NOP() PPC_RAW_ORI(0, 0, 0)
+#define PPC_RAW_BLR() (0x4e800020)
#define PPC_RAW_BLRL() (0x4e800021)
#define PPC_RAW_MTLR(r) (0x7c0803a6 | ___PPC_RT(r))
-#define PPC_RAW_MFLR(t) (PPC_INST_MFLR | ___PPC_RT(t))
-#define PPC_RAW_BCTR() (PPC_INST_BCTR)
-#define PPC_RAW_MTCTR(r) (PPC_INST_MTCTR | ___PPC_RT(r))
-#define PPC_RAW_ADDI(d, a, i) (PPC_INST_ADDI | ___PPC_RT(d) | ___PPC_RA(a) | IMM_L(i))
+#define PPC_RAW_MFLR(t) (0x7c0802a6 | ___PPC_RT(t))
+#define PPC_RAW_BCTR() (0x4e800420)
+#define PPC_RAW_BCTRL() (0x4e800421)
+#define PPC_RAW_MTCTR(r) (0x7c0903a6 | ___PPC_RT(r))
+#define PPC_RAW_ADDI(d, a, i) (0x38000000 | ___PPC_RT(d) | ___PPC_RA(a) | IMM_L(i))
#define PPC_RAW_LI(r, i) PPC_RAW_ADDI(r, 0, i)
-#define PPC_RAW_ADDIS(d, a, i) (PPC_INST_ADDIS | ___PPC_RT(d) | ___PPC_RA(a) | IMM_L(i))
+#define PPC_RAW_ADDIS(d, a, i) (0x3c000000 | ___PPC_RT(d) | ___PPC_RA(a) | IMM_L(i))
#define PPC_RAW_ADDIC(d, a, i) (0x30000000 | ___PPC_RT(d) | ___PPC_RA(a) | IMM_L(i))
#define PPC_RAW_ADDIC_DOT(d, a, i) (0x34000000 | ___PPC_RT(d) | ___PPC_RA(a) | IMM_L(i))
#define PPC_RAW_LIS(r, i) PPC_RAW_ADDIS(r, 0, i)
@@ -499,8 +529,8 @@
#define PPC_RAW_AND_DOT(d, a, b) (0x7c000039 | ___PPC_RA(d) | ___PPC_RS(a) | ___PPC_RB(b))
#define PPC_RAW_OR(d, a, b) (0x7c000378 | ___PPC_RA(d) | ___PPC_RS(a) | ___PPC_RB(b))
#define PPC_RAW_MR(d, a) PPC_RAW_OR(d, a, a)
-#define PPC_RAW_ORI(d, a, i) (PPC_INST_ORI | ___PPC_RA(d) | ___PPC_RS(a) | IMM_L(i))
-#define PPC_RAW_ORIS(d, a, i) (PPC_INST_ORIS | ___PPC_RA(d) | ___PPC_RS(a) | IMM_L(i))
+#define PPC_RAW_ORI(d, a, i) (0x60000000 | ___PPC_RA(d) | ___PPC_RS(a) | IMM_L(i))
+#define PPC_RAW_ORIS(d, a, i) (0x64000000 | ___PPC_RA(d) | ___PPC_RS(a) | IMM_L(i))
#define PPC_RAW_NOR(d, a, b) (0x7c0000f8 | ___PPC_RA(d) | ___PPC_RS(a) | ___PPC_RB(b))
#define PPC_RAW_XOR(d, a, b) (0x7c000278 | ___PPC_RA(d) | ___PPC_RS(a) | ___PPC_RB(b))
#define PPC_RAW_XORI(d, a, i) (0x68000000 | ___PPC_RA(d) | ___PPC_RS(a) | IMM_L(i))
@@ -519,7 +549,7 @@
(0x54000001 | ___PPC_RA(d) | ___PPC_RS(a) | __PPC_SH(i) | __PPC_MB(mb) | __PPC_ME(me))
#define PPC_RAW_RLWIMI(d, a, i, mb, me) (0x50000000 | ___PPC_RA(d) | ___PPC_RS(a) | __PPC_SH(i) | __PPC_MB(mb) | __PPC_ME(me))
#define PPC_RAW_RLDICL(d, a, i, mb) (0x78000000 | ___PPC_RA(d) | ___PPC_RS(a) | __PPC_SH64(i) | __PPC_MB64(mb))
-#define PPC_RAW_RLDICR(d, a, i, me) (PPC_INST_RLDICR | ___PPC_RA(d) | ___PPC_RS(a) | __PPC_SH64(i) | __PPC_ME64(me))
+#define PPC_RAW_RLDICR(d, a, i, me) (0x78000004 | ___PPC_RA(d) | ___PPC_RS(a) | __PPC_SH64(i) | __PPC_ME64(me))
/* slwi = rlwinm Rx, Ry, n, 0, 31-n */
#define PPC_RAW_SLWI(d, a, i) PPC_RAW_RLWINM(d, a, i, 0, 31-(i))
@@ -533,6 +563,8 @@
#define PPC_RAW_NEG(d, a) (0x7c0000d0 | ___PPC_RT(d) | ___PPC_RA(a))
#define PPC_RAW_MFSPR(d, spr) (0x7c0002a6 | ___PPC_RT(d) | __PPC_SPR(spr))
+#define PPC_RAW_MTSPR(spr, d) (0x7c0003a6 | ___PPC_RS(d) | __PPC_SPR(spr))
+#define PPC_RAW_EIEIO() (0x7c0006ac)
/* Deal with instructions that older assemblers aren't aware of */
#define PPC_BCCTR_FLUSH stringify_in_c(.long PPC_INST_BCCTR_FLUSH)
diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h
index d6739d700f0a..116c1519728a 100644
--- a/arch/powerpc/include/asm/ppc_asm.h
+++ b/arch/powerpc/include/asm/ppc_asm.h
@@ -762,6 +762,21 @@ END_FTR_SECTION_NESTED(CPU_FTR_CELL_TB_BUG, CPU_FTR_CELL_TB_BUG, 96)
stringify_in_c(.long (_target) - . ;) \
stringify_in_c(.previous)
+#define SOFT_MASK_TABLE(_start, _end) \
+ stringify_in_c(.section __soft_mask_table,"a";)\
+ stringify_in_c(.balign 8;) \
+ stringify_in_c(.llong (_start);) \
+ stringify_in_c(.llong (_end);) \
+ stringify_in_c(.previous)
+
+#define RESTART_TABLE(_start, _end, _target) \
+ stringify_in_c(.section __restart_table,"a";)\
+ stringify_in_c(.balign 8;) \
+ stringify_in_c(.llong (_start);) \
+ stringify_in_c(.llong (_end);) \
+ stringify_in_c(.llong (_target);) \
+ stringify_in_c(.previous)
+
#ifdef CONFIG_PPC_FSL_BOOK3E
#define BTB_FLUSH(reg) \
lis reg,BUCSR_INIT@h; \
diff --git a/arch/powerpc/include/asm/probes.h b/arch/powerpc/include/asm/probes.h
index 84dd1addd434..c5d984700d24 100644
--- a/arch/powerpc/include/asm/probes.h
+++ b/arch/powerpc/include/asm/probes.h
@@ -34,14 +34,14 @@ typedef u32 ppc_opcode_t;
/* Enable single stepping for the current task */
static inline void enable_single_step(struct pt_regs *regs)
{
- regs->msr |= MSR_SINGLESTEP;
+ regs_set_return_msr(regs, regs->msr | MSR_SINGLESTEP);
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
/*
* We turn off Critical Input Exception(CE) to ensure that the single
* step will be for the instruction we have the probe on; if we don't,
* it is possible we'd get the single step reported for CE.
*/
- regs->msr &= ~MSR_CE;
+ regs_set_return_msr(regs, regs->msr & ~MSR_CE);
mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) | DBCR0_IC | DBCR0_IDM);
#ifdef CONFIG_PPC_47x
isync();
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h
index 7bf8a15af224..f348e564f7dd 100644
--- a/arch/powerpc/include/asm/processor.h
+++ b/arch/powerpc/include/asm/processor.h
@@ -276,7 +276,15 @@ struct thread_struct {
#define SPEFSCR_INIT
#endif
-#ifdef CONFIG_PPC32
+#if defined(CONFIG_PPC_BOOK3S_32) && defined(CONFIG_PPC_KUAP)
+#define INIT_THREAD { \
+ .ksp = INIT_SP, \
+ .pgdir = swapper_pg_dir, \
+ .kuap = ~0UL, /* KUAP_NONE */ \
+ .fpexc_mode = MSR_FE0 | MSR_FE1, \
+ SPEFSCR_INIT \
+}
+#elif defined(CONFIG_PPC32)
#define INIT_THREAD { \
.ksp = INIT_SP, \
.pgdir = swapper_pg_dir, \
@@ -339,17 +347,6 @@ static inline unsigned long __pack_fe01(unsigned int fpmode)
#define spin_end() HMT_medium()
-#define spin_until_cond(cond) \
-do { \
- if (unlikely(!(cond))) { \
- spin_begin(); \
- do { \
- spin_cpu_relax(); \
- } while (!(cond)); \
- spin_end(); \
- } \
-} while (0)
-
#endif
/* Check that a certain kernel stack pointer is valid in task_struct p */
diff --git a/arch/powerpc/include/asm/ps3.h b/arch/powerpc/include/asm/ps3.h
index e646c7f218bc..8a0d8fb35328 100644
--- a/arch/powerpc/include/asm/ps3.h
+++ b/arch/powerpc/include/asm/ps3.h
@@ -71,6 +71,7 @@ struct ps3_dma_region_ops;
* @bus_addr: The 'translated' bus address of the region.
* @len: The length in bytes of the region.
* @offset: The offset from the start of memory of the region.
+ * @dma_mask: Device dma_mask.
* @ioid: The IOID of the device who owns this region
* @chunk_list: Opaque variable used by the ioc page manager.
* @region_ops: struct ps3_dma_region_ops - dma region operations
@@ -85,6 +86,7 @@ struct ps3_dma_region {
enum ps3_dma_region_type region_type;
unsigned long len;
unsigned long offset;
+ u64 dma_mask;
/* driver variables (set by ps3_dma_region_create) */
unsigned long bus_addr;
@@ -232,7 +234,7 @@ enum lv1_result {
static inline const char* ps3_result(int result)
{
-#if defined(DEBUG) || defined(PS3_VERBOSE_RESULT)
+#if defined(DEBUG) || defined(PS3_VERBOSE_RESULT) || defined(CONFIG_PS3_VERBOSE_RESULT)
switch (result) {
case LV1_SUCCESS:
return "LV1_SUCCESS (0)";
diff --git a/arch/powerpc/include/asm/pte-walk.h b/arch/powerpc/include/asm/pte-walk.h
index 33fa5dd8ee6a..714a35f0d425 100644
--- a/arch/powerpc/include/asm/pte-walk.h
+++ b/arch/powerpc/include/asm/pte-walk.h
@@ -31,6 +31,35 @@ static inline pte_t *find_init_mm_pte(unsigned long ea, unsigned *hshift)
pgd_t *pgdir = init_mm.pgd;
return __find_linux_pte(pgdir, ea, NULL, hshift);
}
+
+/*
+ * Convert a kernel vmap virtual address (vmalloc or ioremap space) to a
+ * physical address, without taking locks. This can be used in real-mode.
+ */
+static inline phys_addr_t ppc_find_vmap_phys(unsigned long addr)
+{
+ pte_t *ptep;
+ phys_addr_t pa;
+ int hugepage_shift;
+
+ /*
+ * init_mm does not free page tables, and does not do THP. It may
+ * have huge pages from huge vmalloc / ioremap etc.
+ */
+ ptep = find_init_mm_pte(addr, &hugepage_shift);
+ if (WARN_ON(!ptep))
+ return 0;
+
+ pa = PFN_PHYS(pte_pfn(*ptep));
+
+ if (!hugepage_shift)
+ hugepage_shift = PAGE_SHIFT;
+
+ pa |= addr & ((1ul << hugepage_shift) - 1);
+
+ return pa;
+}
+
/*
* This is what we should always use. Any other lockless page table lookup needs
* careful audit against THP split.
diff --git a/arch/powerpc/include/asm/ptrace.h b/arch/powerpc/include/asm/ptrace.h
index 9c9ab2746168..3e5d470a6155 100644
--- a/arch/powerpc/include/asm/ptrace.h
+++ b/arch/powerpc/include/asm/ptrace.h
@@ -19,6 +19,7 @@
#ifndef _ASM_POWERPC_PTRACE_H
#define _ASM_POWERPC_PTRACE_H
+#include <linux/err.h>
#include <uapi/asm/ptrace.h>
#include <asm/asm-const.h>
@@ -47,11 +48,12 @@ struct pt_regs
unsigned long result;
};
};
-
+#if defined(CONFIG_PPC64) || defined(CONFIG_PPC_KUAP)
union {
struct {
#ifdef CONFIG_PPC64
unsigned long ppr;
+ unsigned long exit_result;
#endif
union {
#ifdef CONFIG_PPC_KUAP
@@ -67,6 +69,7 @@ struct pt_regs
};
unsigned long __pad[4]; /* Maintain 16 byte interrupt stack alignment */
};
+#endif
};
#endif
@@ -121,54 +124,61 @@ struct pt_regs
#endif /* __powerpc64__ */
#ifndef __ASSEMBLY__
+#include <asm/paca.h>
-static inline unsigned long instruction_pointer(struct pt_regs *regs)
+#ifdef CONFIG_SMP
+extern unsigned long profile_pc(struct pt_regs *regs);
+#else
+#define profile_pc(regs) instruction_pointer(regs)
+#endif
+
+long do_syscall_trace_enter(struct pt_regs *regs);
+void do_syscall_trace_leave(struct pt_regs *regs);
+
+static inline void set_return_regs_changed(void)
{
- return regs->nip;
+#ifdef CONFIG_PPC_BOOK3S_64
+ local_paca->hsrr_valid = 0;
+ local_paca->srr_valid = 0;
+#endif
}
-static inline void instruction_pointer_set(struct pt_regs *regs,
- unsigned long val)
+static inline void regs_set_return_ip(struct pt_regs *regs, unsigned long ip)
{
- regs->nip = val;
+ regs->nip = ip;
+ set_return_regs_changed();
}
-static inline unsigned long user_stack_pointer(struct pt_regs *regs)
+static inline void regs_set_return_msr(struct pt_regs *regs, unsigned long msr)
{
- return regs->gpr[1];
+ regs->msr = msr;
+ set_return_regs_changed();
}
-static inline unsigned long frame_pointer(struct pt_regs *regs)
+static inline void regs_add_return_ip(struct pt_regs *regs, long offset)
{
- return 0;
+ regs_set_return_ip(regs, regs->nip + offset);
}
-#ifdef CONFIG_SMP
-extern unsigned long profile_pc(struct pt_regs *regs);
-#else
-#define profile_pc(regs) instruction_pointer(regs)
-#endif
-
-long do_syscall_trace_enter(struct pt_regs *regs);
-void do_syscall_trace_leave(struct pt_regs *regs);
+static inline unsigned long instruction_pointer(struct pt_regs *regs)
+{
+ return regs->nip;
+}
-#define kernel_stack_pointer(regs) ((regs)->gpr[1])
-static inline int is_syscall_success(struct pt_regs *regs)
+static inline void instruction_pointer_set(struct pt_regs *regs,
+ unsigned long val)
{
- return !(regs->ccr & 0x10000000);
+ regs_set_return_ip(regs, val);
}
-static inline long regs_return_value(struct pt_regs *regs)
+static inline unsigned long user_stack_pointer(struct pt_regs *regs)
{
- if (is_syscall_success(regs))
- return regs->gpr[3];
- else
- return -regs->gpr[3];
+ return regs->gpr[1];
}
-static inline void regs_set_return_value(struct pt_regs *regs, unsigned long rc)
+static inline unsigned long frame_pointer(struct pt_regs *regs)
{
- regs->gpr[3] = rc;
+ return 0;
}
#ifdef __powerpc64__
@@ -235,6 +245,31 @@ static __always_inline void set_trap_norestart(struct pt_regs *regs)
regs->trap |= 0x1;
}
+#define kernel_stack_pointer(regs) ((regs)->gpr[1])
+static inline int is_syscall_success(struct pt_regs *regs)
+{
+ if (trap_is_scv(regs))
+ return !IS_ERR_VALUE((unsigned long)regs->gpr[3]);
+ else
+ return !(regs->ccr & 0x10000000);
+}
+
+static inline long regs_return_value(struct pt_regs *regs)
+{
+ if (trap_is_scv(regs))
+ return regs->gpr[3];
+
+ if (is_syscall_success(regs))
+ return regs->gpr[3];
+ else
+ return -regs->gpr[3];
+}
+
+static inline void regs_set_return_value(struct pt_regs *regs, unsigned long rc)
+{
+ regs->gpr[3] = rc;
+}
+
#define arch_has_single_step() (1)
#define arch_has_block_step() (true)
#define ARCH_HAS_USER_SINGLE_STEP_REPORT
diff --git a/arch/powerpc/include/asm/qspinlock.h b/arch/powerpc/include/asm/qspinlock.h
index 07318bc63e3d..b676c4fb90fd 100644
--- a/arch/powerpc/include/asm/qspinlock.h
+++ b/arch/powerpc/include/asm/qspinlock.h
@@ -37,7 +37,7 @@ static __always_inline void queued_spin_lock(struct qspinlock *lock)
{
u32 val = 0;
- if (likely(atomic_try_cmpxchg_lock(&lock->val, &val, _Q_LOCKED_VAL)))
+ if (likely(arch_atomic_try_cmpxchg_lock(&lock->val, &val, _Q_LOCKED_VAL)))
return;
queued_spin_lock_slowpath(lock, val);
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index 7c81d3e563b2..be85cf156a1f 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -393,6 +393,7 @@
#define SPRN_PMMAR 0x356 /* Power Management Memory Activity Register */
#define SPRN_PSSCR 0x357 /* Processor Stop Status and Control Register (ISA 3.0) */
#define SPRN_PSSCR_PR 0x337 /* PSSCR ISA 3.0, privileged mode access */
+#define SPRN_TRIG2 0x372
#define SPRN_PMCR 0x374 /* Power Management Control Register */
#define SPRN_RWMR 0x375 /* Region-Weighting Mode Register */
@@ -1435,8 +1436,6 @@ static inline void mtsr(u32 val, u32 idx)
}
#endif
-#define proc_trap() asm volatile("trap")
-
extern unsigned long current_stack_frame(void);
register unsigned long current_stack_pointer asm("r1");
@@ -1447,16 +1446,6 @@ extern void scom970_write(unsigned int address, unsigned long value);
struct pt_regs;
extern void ppc_save_regs(struct pt_regs *regs);
-
-static inline void update_power8_hid0(unsigned long hid0)
-{
- /*
- * The HID0 update on Power8 should at the very least be
- * preceded by a SYNC instruction followed by an ISYNC
- * instruction
- */
- asm volatile("sync; mtspr %0,%1; isync":: "i"(SPRN_HID0), "r"(hid0));
-}
#endif /* __ASSEMBLY__ */
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_REG_H */
diff --git a/arch/powerpc/include/asm/security_features.h b/arch/powerpc/include/asm/security_features.h
index b774a4477d5f..792eefaf230b 100644
--- a/arch/powerpc/include/asm/security_features.h
+++ b/arch/powerpc/include/asm/security_features.h
@@ -92,6 +92,9 @@ static inline bool security_ftr_enabled(u64 feature)
// The L1-D cache should be flushed after user accesses from the kernel
#define SEC_FTR_L1D_FLUSH_UACCESS 0x0000000000008000ull
+// The STF flush should be executed on privilege state switch
+#define SEC_FTR_STF_BARRIER 0x0000000000010000ull
+
// Features enabled by default
#define SEC_FTR_DEFAULT \
(SEC_FTR_L1D_FLUSH_HV | \
@@ -99,6 +102,7 @@ static inline bool security_ftr_enabled(u64 feature)
SEC_FTR_BNDS_CHK_SPEC_BAR | \
SEC_FTR_L1D_FLUSH_ENTRY | \
SEC_FTR_L1D_FLUSH_UACCESS | \
+ SEC_FTR_STF_BARRIER | \
SEC_FTR_FAVOUR_SECURITY)
#endif /* _ASM_POWERPC_SECURITY_FEATURES_H */
diff --git a/arch/powerpc/include/asm/set_memory.h b/arch/powerpc/include/asm/set_memory.h
new file mode 100644
index 000000000000..b040094f7920
--- /dev/null
+++ b/arch/powerpc/include/asm/set_memory.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_SET_MEMORY_H
+#define _ASM_POWERPC_SET_MEMORY_H
+
+#define SET_MEMORY_RO 0
+#define SET_MEMORY_RW 1
+#define SET_MEMORY_NX 2
+#define SET_MEMORY_X 3
+
+int change_memory_attr(unsigned long addr, int numpages, long action);
+
+static inline int set_memory_ro(unsigned long addr, int numpages)
+{
+ return change_memory_attr(addr, numpages, SET_MEMORY_RO);
+}
+
+static inline int set_memory_rw(unsigned long addr, int numpages)
+{
+ return change_memory_attr(addr, numpages, SET_MEMORY_RW);
+}
+
+static inline int set_memory_nx(unsigned long addr, int numpages)
+{
+ return change_memory_attr(addr, numpages, SET_MEMORY_NX);
+}
+
+static inline int set_memory_x(unsigned long addr, int numpages)
+{
+ return change_memory_attr(addr, numpages, SET_MEMORY_X);
+}
+
+int set_memory_attr(unsigned long addr, int numpages, pgprot_t prot);
+
+#endif
diff --git a/arch/powerpc/include/asm/setup.h b/arch/powerpc/include/asm/setup.h
index e89bfebd4e00..6c1a7d217d1a 100644
--- a/arch/powerpc/include/asm/setup.h
+++ b/arch/powerpc/include/asm/setup.h
@@ -10,7 +10,6 @@ extern void ppc_printk_progress(char *s, unsigned short hex);
extern unsigned int rtas_data;
extern unsigned long long memory_limit;
extern bool init_mem_is_free;
-extern unsigned long klimit;
extern void *zalloc_maybe_bootmem(size_t size, gfp_t mask);
struct device_node;
diff --git a/arch/powerpc/include/asm/sstep.h b/arch/powerpc/include/asm/sstep.h
index 972ed0df154d..1df867c2e054 100644
--- a/arch/powerpc/include/asm/sstep.h
+++ b/arch/powerpc/include/asm/sstep.h
@@ -13,12 +13,11 @@ struct pt_regs;
* we don't allow putting a breakpoint on an mtmsrd instruction.
* Similarly we don't allow breakpoints on rfid instructions.
* These macros tell us if an instruction is a mtmsrd or rfid.
- * Note that IS_MTMSRD returns true for both an mtmsr (32-bit)
- * and an mtmsrd (64-bit).
+ * Note that these return true for both mtmsr/rfi (32-bit)
+ * and mtmsrd/rfid (64-bit).
*/
#define IS_MTMSRD(instr) ((ppc_inst_val(instr) & 0xfc0007be) == 0x7c000124)
-#define IS_RFID(instr) ((ppc_inst_val(instr) & 0xfc0007fe) == 0x4c000024)
-#define IS_RFI(instr) ((ppc_inst_val(instr) & 0xfc0007fe) == 0x4c000064)
+#define IS_RFID(instr) ((ppc_inst_val(instr) & 0xfc0007be) == 0x4c000024)
enum instruction_type {
COMPUTE, /* arith/logical/CR op, etc. */
diff --git a/arch/powerpc/include/asm/syscall.h b/arch/powerpc/include/asm/syscall.h
index fd1b518eed17..ba0f88f3a30d 100644
--- a/arch/powerpc/include/asm/syscall.h
+++ b/arch/powerpc/include/asm/syscall.h
@@ -41,11 +41,17 @@ static inline void syscall_rollback(struct task_struct *task,
static inline long syscall_get_error(struct task_struct *task,
struct pt_regs *regs)
{
- /*
- * If the system call failed,
- * regs->gpr[3] contains a positive ERRORCODE.
- */
- return (regs->ccr & 0x10000000UL) ? -regs->gpr[3] : 0;
+ if (trap_is_scv(regs)) {
+ unsigned long error = regs->gpr[3];
+
+ return IS_ERR_VALUE(error) ? error : 0;
+ } else {
+ /*
+ * If the system call failed,
+ * regs->gpr[3] contains a positive ERRORCODE.
+ */
+ return (regs->ccr & 0x10000000UL) ? -regs->gpr[3] : 0;
+ }
}
static inline long syscall_get_return_value(struct task_struct *task,
@@ -58,18 +64,22 @@ static inline void syscall_set_return_value(struct task_struct *task,
struct pt_regs *regs,
int error, long val)
{
- /*
- * In the general case it's not obvious that we must deal with CCR
- * here, as the syscall exit path will also do that for us. However
- * there are some places, eg. the signal code, which check ccr to
- * decide if the value in r3 is actually an error.
- */
- if (error) {
- regs->ccr |= 0x10000000L;
- regs->gpr[3] = error;
+ if (trap_is_scv(regs)) {
+ regs->gpr[3] = (long) error ?: val;
} else {
- regs->ccr &= ~0x10000000L;
- regs->gpr[3] = val;
+ /*
+ * In the general case it's not obvious that we must deal with
+ * CCR here, as the syscall exit path will also do that for us.
+ * However there are some places, eg. the signal code, which
+ * check ccr to decide if the value in r3 is actually an error.
+ */
+ if (error) {
+ regs->ccr |= 0x10000000L;
+ regs->gpr[3] = error;
+ } else {
+ regs->ccr &= ~0x10000000L;
+ regs->gpr[3] = val;
+ }
}
}
diff --git a/arch/powerpc/include/asm/time.h b/arch/powerpc/include/asm/time.h
index 8dd3cdb25338..8c2c3dd4ddba 100644
--- a/arch/powerpc/include/asm/time.h
+++ b/arch/powerpc/include/asm/time.h
@@ -97,6 +97,18 @@ extern void div128_by_32(u64 dividend_high, u64 dividend_low,
extern void secondary_cpu_time_init(void);
extern void __init time_init(void);
+#ifdef CONFIG_PPC64
+static inline unsigned long test_irq_work_pending(void)
+{
+ unsigned long x;
+
+ asm volatile("lbz %0,%1(13)"
+ : "=r" (x)
+ : "i" (offsetof(struct paca_struct, irq_work_pending)));
+ return x;
+}
+#endif
+
DECLARE_PER_CPU(u64, decrementers_next_tb);
/* Convert timebase ticks to nanoseconds */
diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
index a09e4240c5b1..22c79ab40006 100644
--- a/arch/powerpc/include/asm/uaccess.h
+++ b/arch/powerpc/include/asm/uaccess.h
@@ -157,7 +157,7 @@ do { \
"2: lwz%X1 %L0, %L1\n" \
EX_TABLE(1b, %l2) \
EX_TABLE(2b, %l2) \
- : "=r" (x) \
+ : "=&r" (x) \
: "m" (*addr) \
: \
: label)
diff --git a/arch/powerpc/include/asm/unaligned.h b/arch/powerpc/include/asm/unaligned.h
deleted file mode 100644
index ce69c5eff95e..000000000000
--- a/arch/powerpc/include/asm/unaligned.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_POWERPC_UNALIGNED_H
-#define _ASM_POWERPC_UNALIGNED_H
-
-#ifdef __KERNEL__
-
-/*
- * The PowerPC can do unaligned accesses itself based on its endian mode.
- */
-#include <linux/unaligned/access_ok.h>
-#include <linux/unaligned/generic.h>
-
-#ifdef __LITTLE_ENDIAN__
-#define get_unaligned __get_unaligned_le
-#define put_unaligned __put_unaligned_le
-#else
-#define get_unaligned __get_unaligned_be
-#define put_unaligned __put_unaligned_be
-#endif
-
-#endif /* __KERNEL__ */
-#endif /* _ASM_POWERPC_UNALIGNED_H */
diff --git a/arch/powerpc/include/asm/uprobes.h b/arch/powerpc/include/asm/uprobes.h
index 5bf65f5d44a9..fe683371336f 100644
--- a/arch/powerpc/include/asm/uprobes.h
+++ b/arch/powerpc/include/asm/uprobes.h
@@ -24,8 +24,8 @@ typedef ppc_opcode_t uprobe_opcode_t;
struct arch_uprobe {
union {
- struct ppc_inst insn;
- struct ppc_inst ixol;
+ u32 insn[2];
+ u32 ixol[2];
};
};
diff --git a/arch/powerpc/include/asm/vas.h b/arch/powerpc/include/asm/vas.h
index e33f80b0ea81..57573d9c1e09 100644
--- a/arch/powerpc/include/asm/vas.h
+++ b/arch/powerpc/include/asm/vas.h
@@ -5,8 +5,10 @@
#ifndef _ASM_POWERPC_VAS_H
#define _ASM_POWERPC_VAS_H
-
-struct vas_window;
+#include <linux/sched/mm.h>
+#include <linux/mmu_context.h>
+#include <asm/icswx.h>
+#include <uapi/asm/vas-api.h>
/*
* Min and max FIFO sizes are based on Version 1.05 Section 3.1.4.25
@@ -49,6 +51,64 @@ enum vas_cop_type {
};
/*
+ * User space VAS windows are opened by tasks and take references
+ * to pid and mm until windows are closed.
+ * Stores pid, mm, and tgid for each window.
+ */
+struct vas_user_win_ref {
+ struct pid *pid; /* PID of owner */
+ struct pid *tgid; /* Thread group ID of owner */
+ struct mm_struct *mm; /* Linux process mm_struct */
+};
+
+/*
+ * Common VAS window struct on PowerNV and PowerVM
+ */
+struct vas_window {
+ u32 winid;
+ u32 wcreds_max; /* Window credits */
+ enum vas_cop_type cop;
+ struct vas_user_win_ref task_ref;
+ char *dbgname;
+ struct dentry *dbgdir;
+};
+
+/*
+ * User space window operations used for powernv and powerVM
+ */
+struct vas_user_win_ops {
+ struct vas_window * (*open_win)(int vas_id, u64 flags,
+ enum vas_cop_type);
+ u64 (*paste_addr)(struct vas_window *);
+ int (*close_win)(struct vas_window *);
+};
+
+static inline void put_vas_user_win_ref(struct vas_user_win_ref *ref)
+{
+ /* Drop references to pid, tgid, and mm */
+ put_pid(ref->pid);
+ put_pid(ref->tgid);
+ if (ref->mm)
+ mmdrop(ref->mm);
+}
+
+static inline void vas_user_win_add_mm_context(struct vas_user_win_ref *ref)
+{
+ mm_context_add_vas_window(ref->mm);
+ /*
+ * Even a process that has no foreign real address mapping can
+ * use an unpaired COPY instruction (to no real effect). Issue
+ * CP_ABORT to clear any pending COPY and prevent a covert
+ * channel.
+ *
+ * __switch_to() will issue CP_ABORT on future context switches
+ * if process / thread has any open VAS window (Use
+ * current->mm->context.vas_windows).
+ */
+ asm volatile(PPC_CP_ABORT);
+}
+
+/*
* Receive window attributes specified by the (in-kernel) owner of window.
*/
struct vas_rx_win_attr {
@@ -100,6 +160,7 @@ struct vas_tx_win_attr {
bool rx_win_ord_mode;
};
+#ifdef CONFIG_PPC_POWERNV
/*
* Helper to map a chip id to VAS id.
* For POWER9, this is a 1:1 mapping. In the future this maybe a 1:N
@@ -162,6 +223,43 @@ int vas_copy_crb(void *crb, int offset);
*/
int vas_paste_crb(struct vas_window *win, int offset, bool re);
+int vas_register_api_powernv(struct module *mod, enum vas_cop_type cop_type,
+ const char *name);
+void vas_unregister_api_powernv(void);
+#endif
+
+#ifdef CONFIG_PPC_PSERIES
+
+/* VAS Capabilities */
+#define VAS_GZIP_QOS_FEAT 0x1
+#define VAS_GZIP_DEF_FEAT 0x2
+#define VAS_GZIP_QOS_FEAT_BIT PPC_BIT(VAS_GZIP_QOS_FEAT) /* Bit 1 */
+#define VAS_GZIP_DEF_FEAT_BIT PPC_BIT(VAS_GZIP_DEF_FEAT) /* Bit 2 */
+
+/* NX Capabilities */
+#define VAS_NX_GZIP_FEAT 0x1
+#define VAS_NX_GZIP_FEAT_BIT PPC_BIT(VAS_NX_GZIP_FEAT) /* Bit 1 */
+
+/*
+ * These structs are used to retrieve overall VAS capabilities that
+ * the hypervisor provides.
+ */
+struct hv_vas_all_caps {
+ __be64 descriptor;
+ __be64 feat_type;
+} __packed __aligned(0x1000);
+
+struct vas_all_caps {
+ u64 descriptor;
+ u64 feat_type;
+};
+
+int h_query_vas_capabilities(const u64 hcall, u8 query_type, u64 result);
+int vas_register_api_pseries(struct module *mod,
+ enum vas_cop_type cop_type, const char *name);
+void vas_unregister_api_pseries(void);
+#endif
+
/*
* Register / unregister coprocessor type to VAS API which will be exported
* to user space. Applications can use this API to open / close window
@@ -171,7 +269,12 @@ int vas_paste_crb(struct vas_window *win, int offset, bool re);
* used for others in future.
*/
int vas_register_coproc_api(struct module *mod, enum vas_cop_type cop_type,
- const char *name);
+ const char *name,
+ const struct vas_user_win_ops *vops);
void vas_unregister_coproc_api(void);
+int get_vas_user_win_ref(struct vas_user_win_ref *task_ref);
+void vas_update_csb(struct coprocessor_request_block *crb,
+ struct vas_user_win_ref *task_ref);
+void vas_dump_crb(struct coprocessor_request_block *crb);
#endif /* __ASM_POWERPC_VAS_H */
diff --git a/arch/powerpc/include/asm/xics.h b/arch/powerpc/include/asm/xics.h
index 8e903b3f9c24..d9cf192368ad 100644
--- a/arch/powerpc/include/asm/xics.h
+++ b/arch/powerpc/include/asm/xics.h
@@ -65,8 +65,12 @@ struct icp_ops {
extern const struct icp_ops *icp_ops;
+#ifdef CONFIG_PPC_ICS_NATIVE
/* Native ICS */
extern int ics_native_init(void);
+#else
+static inline int ics_native_init(void) { return -ENODEV; }
+#endif
/* RTAS ICS */
#ifdef CONFIG_PPC_ICS_RTAS
diff --git a/arch/powerpc/include/uapi/asm/papr_pdsm.h b/arch/powerpc/include/uapi/asm/papr_pdsm.h
index 50ef95e2f5b1..82488b1e7276 100644
--- a/arch/powerpc/include/uapi/asm/papr_pdsm.h
+++ b/arch/powerpc/include/uapi/asm/papr_pdsm.h
@@ -77,6 +77,9 @@
/* Indicate that the 'dimm_fuel_gauge' field is valid */
#define PDSM_DIMM_HEALTH_RUN_GAUGE_VALID 1
+/* Indicate that the 'dimm_dsc' field is valid */
+#define PDSM_DIMM_DSC_VALID 2
+
/*
* Struct exchanged between kernel & ndctl in for PAPR_PDSM_HEALTH
* Various flags indicate the health status of the dimm.
@@ -105,6 +108,9 @@ struct nd_papr_pdsm_health {
/* Extension flag PDSM_DIMM_HEALTH_RUN_GAUGE_VALID */
__u16 dimm_fuel_gauge;
+
+ /* Extension flag PDSM_DIMM_DSC_VALID */
+ __u64 dimm_dsc;
};
__u8 buf[ND_PDSM_PAYLOAD_MAX_SIZE];
};
diff --git a/arch/powerpc/include/uapi/asm/vas-api.h b/arch/powerpc/include/uapi/asm/vas-api.h
index ebd4b2424785..7c81301ecdba 100644
--- a/arch/powerpc/include/uapi/asm/vas-api.h
+++ b/arch/powerpc/include/uapi/asm/vas-api.h
@@ -13,11 +13,15 @@
#define VAS_MAGIC 'v'
#define VAS_TX_WIN_OPEN _IOW(VAS_MAGIC, 0x20, struct vas_tx_win_open_attr)
+/* Flags to VAS TX open window ioctl */
+/* To allocate a window with QoS credit, otherwise use default credit */
+#define VAS_TX_WIN_FLAG_QOS_CREDIT 0x0000000000000001
+
struct vas_tx_win_open_attr {
__u32 version;
__s16 vas_id; /* specific instance of vas or -1 for default */
__u16 reserved1;
- __u64 flags; /* Future use */
+ __u64 flags;
__u64 reserved2[6];
};
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 28af4efb4587..a47eefa09bcb 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -86,11 +86,7 @@ int main(void)
OFFSET(PACA_CANARY, paca_struct, canary);
#endif
#endif
- OFFSET(MMCONTEXTID, mm_struct, context.id);
-#ifdef CONFIG_PPC64
- DEFINE(SIGSEGV, SIGSEGV);
- DEFINE(NMI_MASK, NMI_MASK);
-#else
+#ifdef CONFIG_PPC32
#ifdef CONFIG_PPC_RTAS
OFFSET(RTAS_SP, thread_struct, rtas_sp);
#endif
@@ -119,7 +115,6 @@ int main(void)
#ifdef CONFIG_ALTIVEC
OFFSET(THREAD_VRSTATE, thread_struct, vr_state.vr);
OFFSET(THREAD_VRSAVEAREA, thread_struct, vr_save_area);
- OFFSET(THREAD_VRSAVE, thread_struct, vrsave);
OFFSET(THREAD_USED_VR, thread_struct, used_vr);
OFFSET(VRSTATE_VSCR, thread_vr_state, vscr);
OFFSET(THREAD_LOAD_VEC, thread_struct, load_vec);
@@ -150,22 +145,15 @@ int main(void)
#ifdef CONFIG_SPE
OFFSET(THREAD_EVR0, thread_struct, evr[0]);
OFFSET(THREAD_ACC, thread_struct, acc);
- OFFSET(THREAD_SPEFSCR, thread_struct, spefscr);
OFFSET(THREAD_USED_SPE, thread_struct, used_spe);
#endif /* CONFIG_SPE */
#endif /* CONFIG_PPC64 */
-#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
- OFFSET(THREAD_DBCR0, thread_struct, debug.dbcr0);
-#endif
#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
OFFSET(THREAD_KVM_SVCPU, thread_struct, kvm_shadow_vcpu);
#endif
#if defined(CONFIG_KVM) && defined(CONFIG_BOOKE)
OFFSET(THREAD_KVM_VCPU, thread_struct, kvm_vcpu);
#endif
-#if defined(CONFIG_PPC_BOOK3S_32) && defined(CONFIG_PPC_KUAP)
- OFFSET(KUAP, thread_struct, kuap);
-#endif
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
OFFSET(PACATMSCRATCH, paca_struct, tm_scratch);
@@ -185,19 +173,12 @@ int main(void)
sizeof(struct pt_regs) + 16);
#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
- OFFSET(TI_FLAGS, thread_info, flags);
OFFSET(TI_LOCAL_FLAGS, thread_info, local_flags);
- OFFSET(TI_PREEMPT, thread_info, preempt_count);
#ifdef CONFIG_PPC64
OFFSET(DCACHEL1BLOCKSIZE, ppc64_caches, l1d.block_size);
OFFSET(DCACHEL1LOGBLOCKSIZE, ppc64_caches, l1d.log_block_size);
- OFFSET(DCACHEL1BLOCKSPERPAGE, ppc64_caches, l1d.blocks_per_page);
- OFFSET(ICACHEL1BLOCKSIZE, ppc64_caches, l1i.block_size);
- OFFSET(ICACHEL1LOGBLOCKSIZE, ppc64_caches, l1i.log_block_size);
- OFFSET(ICACHEL1BLOCKSPERPAGE, ppc64_caches, l1i.blocks_per_page);
/* paca */
- DEFINE(PACA_SIZE, sizeof(struct paca_struct));
OFFSET(PACAPACAINDEX, paca_struct, paca_index);
OFFSET(PACAPROCSTART, paca_struct, cpu_start);
OFFSET(PACAKSAVE, paca_struct, kstack);
@@ -209,18 +190,13 @@ int main(void)
OFFSET(PACATOC, paca_struct, kernel_toc);
OFFSET(PACAKBASE, paca_struct, kernelbase);
OFFSET(PACAKMSR, paca_struct, kernel_msr);
+#ifdef CONFIG_PPC_BOOK3S_64
+ OFFSET(PACAHSRR_VALID, paca_struct, hsrr_valid);
+ OFFSET(PACASRR_VALID, paca_struct, srr_valid);
+#endif
OFFSET(PACAIRQSOFTMASK, paca_struct, irq_soft_mask);
OFFSET(PACAIRQHAPPENED, paca_struct, irq_happened);
OFFSET(PACA_FTRACE_ENABLED, paca_struct, ftrace_enabled);
-#ifdef CONFIG_PPC_BOOK3S
- OFFSET(PACACONTEXTID, paca_struct, mm_ctx_id);
-#ifdef CONFIG_PPC_MM_SLICES
- OFFSET(PACALOWSLICESPSIZE, paca_struct, mm_ctx_low_slices_psize);
- OFFSET(PACAHIGHSLICEPSIZE, paca_struct, mm_ctx_high_slices_psize);
- OFFSET(PACA_SLB_ADDR_LIMIT, paca_struct, mm_ctx_slb_addr_limit);
- DEFINE(MMUPSIZEDEFSIZE, sizeof(struct mmu_psize_def));
-#endif /* CONFIG_PPC_MM_SLICES */
-#endif
#ifdef CONFIG_PPC_BOOK3E
OFFSET(PACAPGD, paca_struct, pgd);
@@ -241,21 +217,9 @@ int main(void)
#endif /* CONFIG_PPC_BOOK3E */
#ifdef CONFIG_PPC_BOOK3S_64
- OFFSET(PACASLBCACHE, paca_struct, slb_cache);
- OFFSET(PACASLBCACHEPTR, paca_struct, slb_cache_ptr);
- OFFSET(PACASTABRR, paca_struct, stab_rr);
- OFFSET(PACAVMALLOCSLLP, paca_struct, vmalloc_sllp);
-#ifdef CONFIG_PPC_MM_SLICES
- OFFSET(MMUPSIZESLLP, mmu_psize_def, sllp);
-#else
- OFFSET(PACACONTEXTSLLP, paca_struct, mm_ctx_sllp);
-#endif /* CONFIG_PPC_MM_SLICES */
OFFSET(PACA_EXGEN, paca_struct, exgen);
OFFSET(PACA_EXMC, paca_struct, exmc);
OFFSET(PACA_EXNMI, paca_struct, exnmi);
-#ifdef CONFIG_PPC_PSERIES
- OFFSET(PACALPPACAPTR, paca_struct, lppaca_ptr);
-#endif
OFFSET(PACA_SLBSHADOWPTR, paca_struct, slb_shadow_ptr);
OFFSET(SLBSHADOW_STACKVSID, slb_shadow, save_area[SLB_NUM_BOLTED - 1].vsid);
OFFSET(SLBSHADOW_STACKESID, slb_shadow, save_area[SLB_NUM_BOLTED - 1].esid);
@@ -264,9 +228,7 @@ int main(void)
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
OFFSET(PACA_PMCINUSE, paca_struct, pmcregs_in_use);
#endif
- OFFSET(LPPACA_DTLIDX, lppaca, dtl_idx);
OFFSET(LPPACA_YIELDCOUNT, lppaca, yield_count);
- OFFSET(PACA_DTL_RIDX, paca_struct, dtl_ridx);
#endif /* CONFIG_PPC_BOOK3S_64 */
OFFSET(PACAEMERGSP, paca_struct, emergency_sp);
#ifdef CONFIG_PPC_BOOK3S_64
@@ -282,6 +244,9 @@ int main(void)
OFFSET(PACAHWCPUID, paca_struct, hw_cpu_id);
OFFSET(PACAKEXECSTATE, paca_struct, kexec_state);
OFFSET(PACA_DSCR_DEFAULT, paca_struct, dscr_default);
+#ifdef CONFIG_PPC64
+ OFFSET(PACA_EXIT_SAVE_R1, paca_struct, exit_save_r1);
+#endif
#ifdef CONFIG_PPC_BOOK3E
OFFSET(PACA_TRAP_SAVE, paca_struct, trap_save);
#endif
@@ -343,10 +308,6 @@ int main(void)
STACK_PT_REGS_OFFSET(STACK_REGS_AMR, amr);
STACK_PT_REGS_OFFSET(STACK_REGS_IAMR, iamr);
#endif
-#ifdef CONFIG_PPC_KUAP
- STACK_PT_REGS_OFFSET(STACK_REGS_KUAP, kuap);
-#endif
-
#if defined(CONFIG_PPC32)
#if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
@@ -368,10 +329,6 @@ int main(void)
#endif
#endif
-#ifndef CONFIG_PPC64
- OFFSET(MM_PGD, mm_struct, pgd);
-#endif /* ! CONFIG_PPC64 */
-
/* About the CPU features table */
OFFSET(CPU_SPEC_FEATURES, cpu_spec, cpu_features);
OFFSET(CPU_SPEC_SETUP, cpu_spec, cpu_setup);
@@ -404,13 +361,6 @@ int main(void)
DEFINE(BUG_ENTRY_SIZE, sizeof(struct bug_entry));
#endif
-#ifdef CONFIG_PPC_BOOK3S_64
- DEFINE(PGD_TABLE_SIZE, (sizeof(pgd_t) << max(RADIX_PGD_INDEX_SIZE, H_PGD_INDEX_SIZE)));
-#else
- DEFINE(PGD_TABLE_SIZE, PGD_TABLE_SIZE);
-#endif
- DEFINE(PTE_SIZE, sizeof(pte_t));
-
#ifdef CONFIG_KVM
OFFSET(VCPU_HOST_STACK, kvm_vcpu, arch.host_stack);
OFFSET(VCPU_HOST_PID, kvm_vcpu, arch.host_pid);
@@ -482,11 +432,9 @@ int main(void)
OFFSET(KVM_HOST_LPID, kvm, arch.host_lpid);
OFFSET(KVM_HOST_LPCR, kvm, arch.host_lpcr);
OFFSET(KVM_HOST_SDR1, kvm, arch.host_sdr1);
- OFFSET(KVM_NEED_FLUSH, kvm, arch.need_tlb_flush.bits);
OFFSET(KVM_ENABLED_HCALLS, kvm, arch.enabled_hcalls);
OFFSET(KVM_VRMA_SLB_V, kvm, arch.vrma_slb_v);
OFFSET(KVM_RADIX, kvm, arch.radix);
- OFFSET(KVM_FWNMI, kvm, arch.fwnmi_enabled);
OFFSET(KVM_SECURE_GUEST, kvm, arch.secure_guest);
OFFSET(VCPU_DSISR, kvm_vcpu, arch.shregs.dsisr);
OFFSET(VCPU_DAR, kvm_vcpu, arch.shregs.dar);
@@ -514,7 +462,6 @@ int main(void)
OFFSET(VCPU_DAWRX1, kvm_vcpu, arch.dawrx1);
OFFSET(VCPU_CIABR, kvm_vcpu, arch.ciabr);
OFFSET(VCPU_HFLAGS, kvm_vcpu, arch.hflags);
- OFFSET(VCPU_DEC, kvm_vcpu, arch.dec);
OFFSET(VCPU_DEC_EXPIRES, kvm_vcpu, arch.dec_expires);
OFFSET(VCPU_PENDING_EXC, kvm_vcpu, arch.pending_exceptions);
OFFSET(VCPU_CEDED, kvm_vcpu, arch.ceded);
@@ -525,7 +472,6 @@ int main(void)
OFFSET(VCPU_MMCRA, kvm_vcpu, arch.mmcra);
OFFSET(VCPU_MMCRS, kvm_vcpu, arch.mmcrs);
OFFSET(VCPU_PMC, kvm_vcpu, arch.pmc);
- OFFSET(VCPU_SPMC, kvm_vcpu, arch.spmc);
OFFSET(VCPU_SIAR, kvm_vcpu, arch.siar);
OFFSET(VCPU_SDAR, kvm_vcpu, arch.sdar);
OFFSET(VCPU_SIER, kvm_vcpu, arch.sier);
@@ -534,7 +480,6 @@ int main(void)
OFFSET(VCPU_SLB_NR, kvm_vcpu, arch.slb_nr);
OFFSET(VCPU_FAULT_DSISR, kvm_vcpu, arch.fault_dsisr);
OFFSET(VCPU_FAULT_DAR, kvm_vcpu, arch.fault_dar);
- OFFSET(VCPU_FAULT_GPA, kvm_vcpu, arch.fault_gpa);
OFFSET(VCPU_INTR_MSR, kvm_vcpu, arch.intr_msr);
OFFSET(VCPU_LAST_INST, kvm_vcpu, arch.last_inst);
OFFSET(VCPU_TRAP, kvm_vcpu, arch.trap);
@@ -646,10 +591,8 @@ int main(void)
HSTATE_FIELD(HSTATE_HWTHREAD_STATE, hwthread_state);
HSTATE_FIELD(HSTATE_KVM_VCPU, kvm_vcpu);
HSTATE_FIELD(HSTATE_KVM_VCORE, kvm_vcore);
- HSTATE_FIELD(HSTATE_XICS_PHYS, xics_phys);
HSTATE_FIELD(HSTATE_XIVE_TIMA_PHYS, xive_tima_phys);
HSTATE_FIELD(HSTATE_XIVE_TIMA_VIRT, xive_tima_virt);
- HSTATE_FIELD(HSTATE_SAVED_XIRR, saved_xirr);
HSTATE_FIELD(HSTATE_HOST_IPI, host_ipi);
HSTATE_FIELD(HSTATE_PTID, ptid);
HSTATE_FIELD(HSTATE_FAKE_SUSPEND, fake_suspend);
@@ -757,7 +700,6 @@ int main(void)
#endif
DEFINE(PPC_DBELL_SERVER, PPC_DBELL_SERVER);
- DEFINE(PPC_DBELL_MSGTYPE, PPC_DBELL_MSGTYPE);
#ifdef CONFIG_PPC_8xx
DEFINE(VIRT_IMMR_BASE, (u64)__fix_to_virt(FIX_IMMR_BASE));
diff --git a/arch/powerpc/kernel/crash_dump.c b/arch/powerpc/kernel/crash_dump.c
index 735e89337398..5693e1c67c2b 100644
--- a/arch/powerpc/kernel/crash_dump.c
+++ b/arch/powerpc/kernel/crash_dump.c
@@ -35,7 +35,7 @@ void __init reserve_kdump_trampoline(void)
static void __init create_trampoline(unsigned long addr)
{
- struct ppc_inst *p = (struct ppc_inst *)addr;
+ u32 *p = (u32 *)addr;
/* The maximum range of a single instruction branch, is the current
* instruction's address + (32 MB - 4) bytes. For the trampoline we
@@ -45,8 +45,8 @@ static void __init create_trampoline(unsigned long addr)
* branch to "addr" we jump to ("addr" + 32 MB). Although it requires
* two instructions it doesn't require any registers.
*/
- patch_instruction(p, ppc_inst(PPC_INST_NOP));
- patch_branch((void *)p + 4, addr + PHYSICAL_START, 0);
+ patch_instruction(p, ppc_inst(PPC_RAW_NOP()));
+ patch_branch(p + 1, addr + PHYSICAL_START, 0);
}
void __init setup_kdump_trampoline(void)
diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c
index f24cd53ff26e..3bbdcc86d01b 100644
--- a/arch/powerpc/kernel/eeh.c
+++ b/arch/powerpc/kernel/eeh.c
@@ -346,28 +346,7 @@ void eeh_slot_error_detail(struct eeh_pe *pe, int severity)
*/
static inline unsigned long eeh_token_to_phys(unsigned long token)
{
- pte_t *ptep;
- unsigned long pa;
- int hugepage_shift;
-
- /*
- * We won't find hugepages here(this is iomem). Hence we are not
- * worried about _PAGE_SPLITTING/collapse. Also we will not hit
- * page table free, because of init_mm.
- */
- ptep = find_init_mm_pte(token, &hugepage_shift);
- if (!ptep)
- return token;
-
- pa = pte_pfn(*ptep);
-
- /* On radix we can do hugepage mappings for io, so handle that */
- if (!hugepage_shift)
- hugepage_shift = PAGE_SHIFT;
-
- pa <<= PAGE_SHIFT;
- pa |= token & ((1ul << hugepage_shift) - 1);
- return pa;
+ return ppc_find_vmap_phys(token);
}
/*
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index 9160285cb2f4..0273a1349006 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -32,6 +32,7 @@
#include <asm/barrier.h>
#include <asm/kup.h>
#include <asm/bug.h>
+#include <asm/interrupt.h>
#include "head_32.h"
@@ -74,6 +75,24 @@ _ASM_NOKPROBE_SYMBOL(prepare_transfer_to_handler)
.globl transfer_to_syscall
transfer_to_syscall:
+ stw r11, GPR1(r1)
+ stw r11, 0(r1)
+ mflr r12
+ stw r12, _LINK(r1)
+#if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
+ rlwinm r9,r9,0,14,12 /* clear MSR_WE (necessary?) */
+#endif
+ lis r12,STACK_FRAME_REGS_MARKER@ha /* exception frame marker */
+ SAVE_GPR(2, r1)
+ addi r12,r12,STACK_FRAME_REGS_MARKER@l
+ stw r9,_MSR(r1)
+ li r2, INTERRUPT_SYSCALL
+ stw r12,8(r1)
+ stw r2,_TRAP(r1)
+ SAVE_GPR(0, r1)
+ SAVE_4GPRS(3, r1)
+ SAVE_2GPRS(7, r1)
+ addi r2,r10,-THREAD
SAVE_NVGPRS(r1)
/* Calling convention has r9 = orig r0, r10 = regs */
@@ -176,28 +195,6 @@ _GLOBAL(_switch)
/* r3-r12 are caller saved -- Cort */
SAVE_NVGPRS(r1)
stw r0,_NIP(r1) /* Return to switch caller */
- mfmsr r11
- li r0,MSR_FP /* Disable floating-point */
-#ifdef CONFIG_ALTIVEC
-BEGIN_FTR_SECTION
- oris r0,r0,MSR_VEC@h /* Disable altivec */
- mfspr r12,SPRN_VRSAVE /* save vrsave register value */
- stw r12,THREAD+THREAD_VRSAVE(r2)
-END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
-#endif /* CONFIG_ALTIVEC */
-#ifdef CONFIG_SPE
-BEGIN_FTR_SECTION
- oris r0,r0,MSR_SPE@h /* Disable SPE */
- mfspr r12,SPRN_SPEFSCR /* save spefscr register value */
- stw r12,THREAD+THREAD_SPEFSCR(r2)
-END_FTR_SECTION_IFSET(CPU_FTR_SPE)
-#endif /* CONFIG_SPE */
- and. r0,r0,r11 /* FP or altivec or SPE enabled? */
- beq+ 1f
- andc r11,r11,r0
- mtmsr r11
- isync
-1: stw r11,_MSR(r1)
mfcr r10
stw r10,_CCR(r1)
stw r1,KSP(r3) /* Set old stack pointer */
@@ -218,19 +215,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_SPE)
mr r3,r2
addi r2,r4,-THREAD /* Update current */
-#ifdef CONFIG_ALTIVEC
-BEGIN_FTR_SECTION
- lwz r0,THREAD+THREAD_VRSAVE(r2)
- mtspr SPRN_VRSAVE,r0 /* if G4, restore VRSAVE reg */
-END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
-#endif /* CONFIG_ALTIVEC */
-#ifdef CONFIG_SPE
-BEGIN_FTR_SECTION
- lwz r0,THREAD+THREAD_SPEFSCR(r2)
- mtspr SPRN_SPEFSCR,r0 /* restore SPEFSCR reg */
-END_FTR_SECTION_IFSET(CPU_FTR_SPE)
-#endif /* CONFIG_SPE */
-
lwz r0,_CCR(r1)
mtcrf 0xFF,r0
/* r3-r12 are destroyed -- Cort */
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 03727308d8cc..15720f8661a1 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -32,7 +32,6 @@
#include <asm/irqflags.h>
#include <asm/hw_irq.h>
#include <asm/context_tracking.h>
-#include <asm/tm.h>
#include <asm/ppc-opcode.h>
#include <asm/barrier.h>
#include <asm/export.h>
@@ -48,372 +47,7 @@
/*
* System calls.
*/
- .section ".toc","aw"
-SYS_CALL_TABLE:
- .tc sys_call_table[TC],sys_call_table
-
-#ifdef CONFIG_COMPAT
-COMPAT_SYS_CALL_TABLE:
- .tc compat_sys_call_table[TC],compat_sys_call_table
-#endif
-
-/* This value is used to mark exception frames on the stack. */
-exception_marker:
- .tc ID_EXC_MARKER[TC],STACK_FRAME_REGS_MARKER
-
.section ".text"
- .align 7
-
-#ifdef CONFIG_PPC_BOOK3S
-.macro system_call_vectored name trapnr
- .globl system_call_vectored_\name
-system_call_vectored_\name:
-_ASM_NOKPROBE_SYMBOL(system_call_vectored_\name)
-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
-BEGIN_FTR_SECTION
- extrdi. r10, r12, 1, (63-MSR_TS_T_LG) /* transaction active? */
- bne .Ltabort_syscall
-END_FTR_SECTION_IFSET(CPU_FTR_TM)
-#endif
- SCV_INTERRUPT_TO_KERNEL
- mr r10,r1
- ld r1,PACAKSAVE(r13)
- std r10,0(r1)
- std r11,_NIP(r1)
- std r12,_MSR(r1)
- std r0,GPR0(r1)
- std r10,GPR1(r1)
- std r2,GPR2(r1)
- ld r2,PACATOC(r13)
- mfcr r12
- li r11,0
- /* Can we avoid saving r3-r8 in common case? */
- std r3,GPR3(r1)
- std r4,GPR4(r1)
- std r5,GPR5(r1)
- std r6,GPR6(r1)
- std r7,GPR7(r1)
- std r8,GPR8(r1)
- /* Zero r9-r12, this should only be required when restoring all GPRs */
- std r11,GPR9(r1)
- std r11,GPR10(r1)
- std r11,GPR11(r1)
- std r11,GPR12(r1)
- std r9,GPR13(r1)
- SAVE_NVGPRS(r1)
- std r11,_XER(r1)
- std r11,_LINK(r1)
- std r11,_CTR(r1)
-
- li r11,\trapnr
- std r11,_TRAP(r1)
- std r12,_CCR(r1)
- addi r10,r1,STACK_FRAME_OVERHEAD
- ld r11,exception_marker@toc(r2)
- std r11,-16(r10) /* "regshere" marker */
-
-BEGIN_FTR_SECTION
- HMT_MEDIUM
-END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
-
- /*
- * scv enters with MSR[EE]=1 and is immediately considered soft-masked.
- * The entry vector already sets PACAIRQSOFTMASK to IRQS_ALL_DISABLED,
- * and interrupts may be masked and pending already.
- * system_call_exception() will call trace_hardirqs_off() which means
- * interrupts could already have been blocked before trace_hardirqs_off,
- * but this is the best we can do.
- */
-
- /* Calling convention has r9 = orig r0, r10 = regs */
- mr r9,r0
- bl system_call_exception
-
-.Lsyscall_vectored_\name\()_exit:
- addi r4,r1,STACK_FRAME_OVERHEAD
- li r5,1 /* scv */
- bl syscall_exit_prepare
-
- ld r2,_CCR(r1)
- ld r4,_NIP(r1)
- ld r5,_MSR(r1)
-
-BEGIN_FTR_SECTION
- stdcx. r0,0,r1 /* to clear the reservation */
-END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
-
-BEGIN_FTR_SECTION
- HMT_MEDIUM_LOW
-END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
-
- cmpdi r3,0
- bne .Lsyscall_vectored_\name\()_restore_regs
-
- /* rfscv returns with LR->NIA and CTR->MSR */
- mtlr r4
- mtctr r5
-
- /* Could zero these as per ABI, but we may consider a stricter ABI
- * which preserves these if libc implementations can benefit, so
- * restore them for now until further measurement is done. */
- ld r0,GPR0(r1)
- ld r4,GPR4(r1)
- ld r5,GPR5(r1)
- ld r6,GPR6(r1)
- ld r7,GPR7(r1)
- ld r8,GPR8(r1)
- /* Zero volatile regs that may contain sensitive kernel data */
- li r9,0
- li r10,0
- li r11,0
- li r12,0
- mtspr SPRN_XER,r0
-
- /*
- * We don't need to restore AMR on the way back to userspace for KUAP.
- * The value of AMR only matters while we're in the kernel.
- */
- mtcr r2
- ld r2,GPR2(r1)
- ld r3,GPR3(r1)
- ld r13,GPR13(r1)
- ld r1,GPR1(r1)
- RFSCV_TO_USER
- b . /* prevent speculative execution */
-
-.Lsyscall_vectored_\name\()_restore_regs:
- li r3,0
- mtmsrd r3,1
- mtspr SPRN_SRR0,r4
- mtspr SPRN_SRR1,r5
-
- ld r3,_CTR(r1)
- ld r4,_LINK(r1)
- ld r5,_XER(r1)
-
- REST_NVGPRS(r1)
- ld r0,GPR0(r1)
- mtcr r2
- mtctr r3
- mtlr r4
- mtspr SPRN_XER,r5
- REST_10GPRS(2, r1)
- REST_2GPRS(12, r1)
- ld r1,GPR1(r1)
- RFI_TO_USER
-.endm
-
-system_call_vectored common 0x3000
-/*
- * We instantiate another entry copy for the SIGILL variant, with TRAP=0x7ff0
- * which is tested by system_call_exception when r0 is -1 (as set by vector
- * entry code).
- */
-system_call_vectored sigill 0x7ff0
-
-
-/*
- * Entered via kernel return set up by kernel/sstep.c, must match entry regs
- */
- .globl system_call_vectored_emulate
-system_call_vectored_emulate:
-_ASM_NOKPROBE_SYMBOL(system_call_vectored_emulate)
- li r10,IRQS_ALL_DISABLED
- stb r10,PACAIRQSOFTMASK(r13)
- b system_call_vectored_common
-#endif
-
- .balign IFETCH_ALIGN_BYTES
- .globl system_call_common_real
-system_call_common_real:
- ld r10,PACAKMSR(r13) /* get MSR value for kernel */
- mtmsrd r10
-
- .balign IFETCH_ALIGN_BYTES
- .globl system_call_common
-system_call_common:
-_ASM_NOKPROBE_SYMBOL(system_call_common)
-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
-BEGIN_FTR_SECTION
- extrdi. r10, r12, 1, (63-MSR_TS_T_LG) /* transaction active? */
- bne .Ltabort_syscall
-END_FTR_SECTION_IFSET(CPU_FTR_TM)
-#endif
- mr r10,r1
- ld r1,PACAKSAVE(r13)
- std r10,0(r1)
- std r11,_NIP(r1)
- std r12,_MSR(r1)
- std r0,GPR0(r1)
- std r10,GPR1(r1)
- std r2,GPR2(r1)
-#ifdef CONFIG_PPC_FSL_BOOK3E
-START_BTB_FLUSH_SECTION
- BTB_FLUSH(r10)
-END_BTB_FLUSH_SECTION
-#endif
- ld r2,PACATOC(r13)
- mfcr r12
- li r11,0
- /* Can we avoid saving r3-r8 in common case? */
- std r3,GPR3(r1)
- std r4,GPR4(r1)
- std r5,GPR5(r1)
- std r6,GPR6(r1)
- std r7,GPR7(r1)
- std r8,GPR8(r1)
- /* Zero r9-r12, this should only be required when restoring all GPRs */
- std r11,GPR9(r1)
- std r11,GPR10(r1)
- std r11,GPR11(r1)
- std r11,GPR12(r1)
- std r9,GPR13(r1)
- SAVE_NVGPRS(r1)
- std r11,_XER(r1)
- std r11,_CTR(r1)
- mflr r10
-
- /*
- * This clears CR0.SO (bit 28), which is the error indication on
- * return from this system call.
- */
- rldimi r12,r11,28,(63-28)
- li r11,0xc00
- std r10,_LINK(r1)
- std r11,_TRAP(r1)
- std r12,_CCR(r1)
- addi r10,r1,STACK_FRAME_OVERHEAD
- ld r11,exception_marker@toc(r2)
- std r11,-16(r10) /* "regshere" marker */
-
- /*
- * We always enter kernel from userspace with irq soft-mask enabled and
- * nothing pending. system_call_exception() will call
- * trace_hardirqs_off().
- */
- li r11,IRQS_ALL_DISABLED
- li r12,PACA_IRQ_HARD_DIS
- stb r11,PACAIRQSOFTMASK(r13)
- stb r12,PACAIRQHAPPENED(r13)
-
- /* Calling convention has r9 = orig r0, r10 = regs */
- mr r9,r0
- bl system_call_exception
-
-.Lsyscall_exit:
- addi r4,r1,STACK_FRAME_OVERHEAD
- li r5,0 /* !scv */
- bl syscall_exit_prepare
-
- ld r2,_CCR(r1)
- ld r4,_NIP(r1)
- ld r5,_MSR(r1)
- ld r6,_LINK(r1)
-
-BEGIN_FTR_SECTION
- stdcx. r0,0,r1 /* to clear the reservation */
-END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
-
- mtspr SPRN_SRR0,r4
- mtspr SPRN_SRR1,r5
- mtlr r6
-
- cmpdi r3,0
- bne .Lsyscall_restore_regs
- /* Zero volatile regs that may contain sensitive kernel data */
- li r0,0
- li r4,0
- li r5,0
- li r6,0
- li r7,0
- li r8,0
- li r9,0
- li r10,0
- li r11,0
- li r12,0
- mtctr r0
- mtspr SPRN_XER,r0
-.Lsyscall_restore_regs_cont:
-
-BEGIN_FTR_SECTION
- HMT_MEDIUM_LOW
-END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
-
- /*
- * We don't need to restore AMR on the way back to userspace for KUAP.
- * The value of AMR only matters while we're in the kernel.
- */
- mtcr r2
- ld r2,GPR2(r1)
- ld r3,GPR3(r1)
- ld r13,GPR13(r1)
- ld r1,GPR1(r1)
- RFI_TO_USER
- b . /* prevent speculative execution */
-
-.Lsyscall_restore_regs:
- ld r3,_CTR(r1)
- ld r4,_XER(r1)
- REST_NVGPRS(r1)
- mtctr r3
- mtspr SPRN_XER,r4
- ld r0,GPR0(r1)
- REST_8GPRS(4, r1)
- ld r12,GPR12(r1)
- b .Lsyscall_restore_regs_cont
-
-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
-.Ltabort_syscall:
- /* Firstly we need to enable TM in the kernel */
- mfmsr r10
- li r9, 1
- rldimi r10, r9, MSR_TM_LG, 63-MSR_TM_LG
- mtmsrd r10, 0
-
- /* tabort, this dooms the transaction, nothing else */
- li r9, (TM_CAUSE_SYSCALL|TM_CAUSE_PERSISTENT)
- TABORT(R9)
-
- /*
- * Return directly to userspace. We have corrupted user register state,
- * but userspace will never see that register state. Execution will
- * resume after the tbegin of the aborted transaction with the
- * checkpointed register state.
- */
- li r9, MSR_RI
- andc r10, r10, r9
- mtmsrd r10, 1
- mtspr SPRN_SRR0, r11
- mtspr SPRN_SRR1, r12
- RFI_TO_USER
- b . /* prevent speculative execution */
-#endif
-
-#ifdef CONFIG_PPC_BOOK3S
-_GLOBAL(ret_from_fork_scv)
- bl schedule_tail
- REST_NVGPRS(r1)
- li r3,0 /* fork() return value */
- b .Lsyscall_vectored_common_exit
-#endif
-
-_GLOBAL(ret_from_fork)
- bl schedule_tail
- REST_NVGPRS(r1)
- li r3,0 /* fork() return value */
- b .Lsyscall_exit
-
-_GLOBAL(ret_from_kernel_thread)
- bl schedule_tail
- REST_NVGPRS(r1)
- mtctr r14
- mr r3,r15
-#ifdef PPC64_ELF_ABI_v2
- mr r12,r14
-#endif
- bctrl
- li r3,0
- b .Lsyscall_exit
#ifdef CONFIG_PPC_BOOK3S_64
@@ -630,156 +264,6 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
addi r1,r1,SWITCH_FRAME_SIZE
blr
- /*
- * If MSR EE/RI was never enabled, IRQs not reconciled, NVGPRs not
- * touched, no exit work created, then this can be used.
- */
- .balign IFETCH_ALIGN_BYTES
- .globl fast_interrupt_return
-fast_interrupt_return:
-_ASM_NOKPROBE_SYMBOL(fast_interrupt_return)
- kuap_check_amr r3, r4
- ld r5,_MSR(r1)
- andi. r0,r5,MSR_PR
-#ifdef CONFIG_PPC_BOOK3S
- bne .Lfast_user_interrupt_return_amr
- kuap_kernel_restore r3, r4
- andi. r0,r5,MSR_RI
- li r3,0 /* 0 return value, no EMULATE_STACK_STORE */
- bne+ .Lfast_kernel_interrupt_return
- addi r3,r1,STACK_FRAME_OVERHEAD
- bl unrecoverable_exception
- b . /* should not get here */
-#else
- bne .Lfast_user_interrupt_return
- b .Lfast_kernel_interrupt_return
-#endif
-
- .balign IFETCH_ALIGN_BYTES
- .globl interrupt_return
-interrupt_return:
-_ASM_NOKPROBE_SYMBOL(interrupt_return)
- ld r4,_MSR(r1)
- andi. r0,r4,MSR_PR
- beq .Lkernel_interrupt_return
- addi r3,r1,STACK_FRAME_OVERHEAD
- bl interrupt_exit_user_prepare
- cmpdi r3,0
- bne- .Lrestore_nvgprs
-
-#ifdef CONFIG_PPC_BOOK3S
-.Lfast_user_interrupt_return_amr:
- kuap_user_restore r3, r4
-#endif
-.Lfast_user_interrupt_return:
- ld r11,_NIP(r1)
- ld r12,_MSR(r1)
-BEGIN_FTR_SECTION
- ld r10,_PPR(r1)
- mtspr SPRN_PPR,r10
-END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
- mtspr SPRN_SRR0,r11
- mtspr SPRN_SRR1,r12
-
-BEGIN_FTR_SECTION
- stdcx. r0,0,r1 /* to clear the reservation */
-FTR_SECTION_ELSE
- ldarx r0,0,r1
-ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
-
- ld r3,_CCR(r1)
- ld r4,_LINK(r1)
- ld r5,_CTR(r1)
- ld r6,_XER(r1)
- li r0,0
-
- REST_4GPRS(7, r1)
- REST_2GPRS(11, r1)
- REST_GPR(13, r1)
-
- mtcr r3
- mtlr r4
- mtctr r5
- mtspr SPRN_XER,r6
-
- REST_4GPRS(2, r1)
- REST_GPR(6, r1)
- REST_GPR(0, r1)
- REST_GPR(1, r1)
- RFI_TO_USER
- b . /* prevent speculative execution */
-
-.Lrestore_nvgprs:
- REST_NVGPRS(r1)
- b .Lfast_user_interrupt_return
-
- .balign IFETCH_ALIGN_BYTES
-.Lkernel_interrupt_return:
- addi r3,r1,STACK_FRAME_OVERHEAD
- bl interrupt_exit_kernel_prepare
-
-.Lfast_kernel_interrupt_return:
- cmpdi cr1,r3,0
- ld r11,_NIP(r1)
- ld r12,_MSR(r1)
- mtspr SPRN_SRR0,r11
- mtspr SPRN_SRR1,r12
-
-BEGIN_FTR_SECTION
- stdcx. r0,0,r1 /* to clear the reservation */
-FTR_SECTION_ELSE
- ldarx r0,0,r1
-ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
-
- ld r3,_LINK(r1)
- ld r4,_CTR(r1)
- ld r5,_XER(r1)
- ld r6,_CCR(r1)
- li r0,0
-
- REST_4GPRS(7, r1)
- REST_2GPRS(11, r1)
-
- mtlr r3
- mtctr r4
- mtspr SPRN_XER,r5
-
- /*
- * Leaving a stale exception_marker on the stack can confuse
- * the reliable stack unwinder later on. Clear it.
- */
- std r0,STACK_FRAME_OVERHEAD-16(r1)
-
- REST_4GPRS(2, r1)
-
- bne- cr1,1f /* emulate stack store */
- mtcr r6
- REST_GPR(6, r1)
- REST_GPR(0, r1)
- REST_GPR(1, r1)
- RFI_TO_KERNEL
- b . /* prevent speculative execution */
-
-1: /*
- * Emulate stack store with update. New r1 value was already calculated
- * and updated in our interrupt regs by emulate_loadstore, but we can't
- * store the previous value of r1 to the stack before re-loading our
- * registers from it, otherwise they could be clobbered. Use
- * PACA_EXGEN as temporary storage to hold the store data, as
- * interrupts are disabled here so it won't be clobbered.
- */
- mtcr r6
- std r9,PACA_EXGEN+0(r13)
- addi r9,r1,INT_FRAME_SIZE /* get original r1 */
- REST_GPR(6, r1)
- REST_GPR(0, r1)
- REST_GPR(1, r1)
- std r9,0(r1) /* perform store component of stdu */
- ld r9,PACA_EXGEN+0(r13)
-
- RFI_TO_KERNEL
- b . /* prevent speculative execution */
-
#ifdef CONFIG_PPC_RTAS
/*
* On CHRP, the Run-Time Abstraction Services (RTAS) have to be
diff --git a/arch/powerpc/kernel/epapr_paravirt.c b/arch/powerpc/kernel/epapr_paravirt.c
index 2ed14d4a47f5..93b0f3ec8fb0 100644
--- a/arch/powerpc/kernel/epapr_paravirt.c
+++ b/arch/powerpc/kernel/epapr_paravirt.c
@@ -38,9 +38,9 @@ static int __init early_init_dt_scan_epapr(unsigned long node,
for (i = 0; i < (len / 4); i++) {
struct ppc_inst inst = ppc_inst(be32_to_cpu(insts[i]));
- patch_instruction((struct ppc_inst *)(epapr_hypercall_start + i), inst);
+ patch_instruction(epapr_hypercall_start + i, inst);
#if !defined(CONFIG_64BIT) || defined(CONFIG_PPC_BOOK3E_64)
- patch_instruction((struct ppc_inst *)(epapr_ev_idle_start + i), inst);
+ patch_instruction(epapr_ev_idle_start + i, inst);
#endif
}
diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
index 7c3654b0d0f4..1401787b0b93 100644
--- a/arch/powerpc/kernel/exceptions-64e.S
+++ b/arch/powerpc/kernel/exceptions-64e.S
@@ -26,6 +26,10 @@
#include <asm/feature-fixups.h>
#include <asm/context_tracking.h>
+/* 64e interrupt returns always use SRR registers */
+#define fast_interrupt_return fast_interrupt_return_srr
+#define interrupt_return interrupt_return_srr
+
/* XXX This will ultimately add space for a special exception save
* structure used to save things like SRR0/SRR1, SPRGs, MAS, etc...
* when taking special interrupts. For now we don't support that,
@@ -340,6 +344,12 @@ ret_from_mc_except:
andi. r10,r10,IRQS_DISABLED; /* yes -> go out of line */ \
bne masked_interrupt_book3e_##n
+/*
+ * Additional regs must be re-loaded from paca before EXCEPTION_COMMON* is
+ * called, because that does SAVE_NVGPRS which must see the original register
+ * values, otherwise the scratch values might be restored when exiting the
+ * interrupt.
+ */
#define PROLOG_ADDITION_2REGS_GEN(n) \
std r14,PACA_EXGEN+EX_R14(r13); \
std r15,PACA_EXGEN+EX_R15(r13)
@@ -535,6 +545,10 @@ __end_interrupts:
PROLOG_ADDITION_2REGS)
mfspr r14,SPRN_DEAR
mfspr r15,SPRN_ESR
+ std r14,_DAR(r1)
+ std r15,_DSISR(r1)
+ ld r14,PACA_EXGEN+EX_R14(r13)
+ ld r15,PACA_EXGEN+EX_R15(r13)
EXCEPTION_COMMON(0x300)
b storage_fault_common
@@ -544,6 +558,10 @@ __end_interrupts:
PROLOG_ADDITION_2REGS)
li r15,0
mr r14,r10
+ std r14,_DAR(r1)
+ std r15,_DSISR(r1)
+ ld r14,PACA_EXGEN+EX_R14(r13)
+ ld r15,PACA_EXGEN+EX_R15(r13)
EXCEPTION_COMMON(0x400)
b storage_fault_common
@@ -557,6 +575,10 @@ __end_interrupts:
PROLOG_ADDITION_2REGS)
mfspr r14,SPRN_DEAR
mfspr r15,SPRN_ESR
+ std r14,_DAR(r1)
+ std r15,_DSISR(r1)
+ ld r14,PACA_EXGEN+EX_R14(r13)
+ ld r15,PACA_EXGEN+EX_R15(r13)
EXCEPTION_COMMON(0x600)
b alignment_more /* no room, go out of line */
@@ -565,10 +587,10 @@ __end_interrupts:
NORMAL_EXCEPTION_PROLOG(0x700, BOOKE_INTERRUPT_PROGRAM,
PROLOG_ADDITION_1REG)
mfspr r14,SPRN_ESR
- EXCEPTION_COMMON(0x700)
std r14,_DSISR(r1)
- addi r3,r1,STACK_FRAME_OVERHEAD
ld r14,PACA_EXGEN+EX_R14(r13)
+ EXCEPTION_COMMON(0x700)
+ addi r3,r1,STACK_FRAME_OVERHEAD
bl program_check_exception
REST_NVGPRS(r1)
b interrupt_return
@@ -725,11 +747,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
* normal exception
*/
mfspr r14,SPRN_DBSR
- EXCEPTION_COMMON_CRIT(0xd00)
std r14,_DSISR(r1)
- addi r3,r1,STACK_FRAME_OVERHEAD
ld r14,PACA_EXCRIT+EX_R14(r13)
ld r15,PACA_EXCRIT+EX_R15(r13)
+ EXCEPTION_COMMON_CRIT(0xd00)
+ addi r3,r1,STACK_FRAME_OVERHEAD
bl DebugException
REST_NVGPRS(r1)
b interrupt_return
@@ -796,11 +818,11 @@ kernel_dbg_exc:
* normal exception
*/
mfspr r14,SPRN_DBSR
- EXCEPTION_COMMON_DBG(0xd08)
std r14,_DSISR(r1)
- addi r3,r1,STACK_FRAME_OVERHEAD
ld r14,PACA_EXDBG+EX_R14(r13)
ld r15,PACA_EXDBG+EX_R15(r13)
+ EXCEPTION_COMMON_DBG(0xd08)
+ addi r3,r1,STACK_FRAME_OVERHEAD
bl DebugException
REST_NVGPRS(r1)
b interrupt_return
@@ -879,6 +901,34 @@ kernel_dbg_exc:
bl unknown_exception
b interrupt_return
+.macro SEARCH_RESTART_TABLE
+#ifdef CONFIG_RELOCATABLE
+ ld r11,PACATOC(r13)
+ ld r14,__start___restart_table@got(r11)
+ ld r15,__stop___restart_table@got(r11)
+#else
+ LOAD_REG_IMMEDIATE_SYM(r14, r11, __start___restart_table)
+ LOAD_REG_IMMEDIATE_SYM(r15, r11, __stop___restart_table)
+#endif
+300:
+ cmpd r14,r15
+ beq 302f
+ ld r11,0(r14)
+ cmpld r10,r11
+ blt 301f
+ ld r11,8(r14)
+ cmpld r10,r11
+ bge 301f
+ ld r11,16(r14)
+ b 303f
+301:
+ addi r14,r14,24
+ b 300b
+302:
+ li r11,0
+303:
+.endm
+
/*
* An interrupt came in while soft-disabled; We mark paca->irq_happened
* accordingly and if the interrupt is level sensitive, we hard disable
@@ -887,6 +937,9 @@ kernel_dbg_exc:
*/
.macro masked_interrupt_book3e paca_irq full_mask
+ std r14,PACA_EXGEN+EX_R14(r13)
+ std r15,PACA_EXGEN+EX_R15(r13)
+
lbz r10,PACAIRQHAPPENED(r13)
.if \full_mask == 1
ori r10,r10,\paca_irq | PACA_IRQ_HARD_DIS
@@ -896,15 +949,23 @@ kernel_dbg_exc:
stb r10,PACAIRQHAPPENED(r13)
.if \full_mask == 1
- rldicl r10,r11,48,1 /* clear MSR_EE */
- rotldi r11,r10,16
+ xori r11,r11,MSR_EE /* clear MSR_EE */
mtspr SPRN_SRR1,r11
.endif
+ mfspr r10,SPRN_SRR0
+ SEARCH_RESTART_TABLE
+ cmpdi r11,0
+ beq 1f
+ mtspr SPRN_SRR0,r11 /* return to restart address */
+1:
+
lwz r11,PACA_EXGEN+EX_CR(r13)
mtcr r11
ld r10,PACA_EXGEN+EX_R10(r13)
ld r11,PACA_EXGEN+EX_R11(r13)
+ ld r14,PACA_EXGEN+EX_R14(r13)
+ ld r15,PACA_EXGEN+EX_R15(r13)
mfspr r13,SPRN_SPRG_GEN_SCRATCH
rfi
b .
@@ -931,11 +992,7 @@ masked_interrupt_book3e_0x2c0:
* original values stashed away in the PACA
*/
storage_fault_common:
- std r14,_DAR(r1)
- std r15,_DSISR(r1)
addi r3,r1,STACK_FRAME_OVERHEAD
- ld r14,PACA_EXGEN+EX_R14(r13)
- ld r15,PACA_EXGEN+EX_R15(r13)
bl do_page_fault
b interrupt_return
@@ -944,11 +1001,7 @@ storage_fault_common:
* continues here.
*/
alignment_more:
- std r14,_DAR(r1)
- std r15,_DSISR(r1)
addi r3,r1,STACK_FRAME_OVERHEAD
- ld r14,PACA_EXGEN+EX_R14(r13)
- ld r15,PACA_EXGEN+EX_R15(r13)
bl alignment_exception
REST_NVGPRS(r1)
b interrupt_return
@@ -1272,7 +1325,12 @@ a2_tlbinit_code_start:
a2_tlbinit_after_linear_map:
/* Now we branch the new virtual address mapped by this entry */
+#ifdef CONFIG_RELOCATABLE
+ ld r5,PACATOC(r13)
+ ld r3,1f@got(r5)
+#else
LOAD_REG_IMMEDIATE_SYM(r3, r5, 1f)
+#endif
mtctr r3
bctr
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index fa8e52a0239e..4aec59a77d4c 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -21,22 +21,6 @@
#include <asm/feature-fixups.h>
#include <asm/kup.h>
-/* PACA save area offsets (exgen, exmc, etc) */
-#define EX_R9 0
-#define EX_R10 8
-#define EX_R11 16
-#define EX_R12 24
-#define EX_R13 32
-#define EX_DAR 40
-#define EX_DSISR 48
-#define EX_CCR 52
-#define EX_CFAR 56
-#define EX_PPR 64
-#define EX_CTR 72
-.if EX_SIZE != 10
- .error "EX_SIZE is wrong"
-.endif
-
/*
* Following are fixed section helper macros.
*
@@ -133,7 +117,6 @@ name:
#define IBRANCH_TO_COMMON .L_IBRANCH_TO_COMMON_\name\() /* ENTRY branch to common */
#define IREALMODE_COMMON .L_IREALMODE_COMMON_\name\() /* Common runs in realmode */
#define IMASK .L_IMASK_\name\() /* IRQ soft-mask bit */
-#define IKVM_SKIP .L_IKVM_SKIP_\name\() /* Generate KVM skip handler */
#define IKVM_REAL .L_IKVM_REAL_\name\() /* Real entry tests KVM */
#define __IKVM_REAL(name) .L_IKVM_REAL_ ## name
#define IKVM_VIRT .L_IKVM_VIRT_\name\() /* Virt entry tests KVM */
@@ -190,9 +173,6 @@ do_define_int n
.ifndef IMASK
IMASK=0
.endif
- .ifndef IKVM_SKIP
- IKVM_SKIP=0
- .endif
.ifndef IKVM_REAL
IKVM_REAL=0
.endif
@@ -207,8 +187,6 @@ do_define_int n
.endif
.endm
-#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
-#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
/*
* All interrupts which set HSRR registers, as well as SRESET and MCE and
* syscall when invoked with "sc 1" switch to MSR[HV]=1 (HVMODE) to be taken,
@@ -238,88 +216,28 @@ do_define_int n
/*
* If an interrupt is taken while a guest is running, it is immediately routed
- * to KVM to handle. If both HV and PR KVM arepossible, KVM interrupts go first
- * to kvmppc_interrupt_hv, which handles the PR guest case.
+ * to KVM to handle.
*/
-#define kvmppc_interrupt kvmppc_interrupt_hv
-#else
-#define kvmppc_interrupt kvmppc_interrupt_pr
-#endif
-.macro KVMTEST name
+.macro KVMTEST name handler
+#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
lbz r10,HSTATE_IN_GUEST(r13)
cmpwi r10,0
- bne \name\()_kvm
-.endm
-
-.macro GEN_KVM name
- .balign IFETCH_ALIGN_BYTES
-\name\()_kvm:
-
- .if IKVM_SKIP
- cmpwi r10,KVM_GUEST_MODE_SKIP
- beq 89f
- .else
-BEGIN_FTR_SECTION
- ld r10,IAREA+EX_CFAR(r13)
- std r10,HSTATE_CFAR(r13)
-END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
- .endif
-
- ld r10,IAREA+EX_CTR(r13)
- mtctr r10
-BEGIN_FTR_SECTION
- ld r10,IAREA+EX_PPR(r13)
- std r10,HSTATE_PPR(r13)
-END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
- ld r11,IAREA+EX_R11(r13)
- ld r12,IAREA+EX_R12(r13)
- std r12,HSTATE_SCRATCH0(r13)
- sldi r12,r9,32
- ld r9,IAREA+EX_R9(r13)
- ld r10,IAREA+EX_R10(r13)
/* HSRR variants have the 0x2 bit added to their trap number */
.if IHSRR_IF_HVMODE
BEGIN_FTR_SECTION
- ori r12,r12,(IVEC + 0x2)
+ li r10,(IVEC + 0x2)
FTR_SECTION_ELSE
- ori r12,r12,(IVEC)
+ li r10,(IVEC)
ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
.elseif IHSRR
- ori r12,r12,(IVEC+ 0x2)
+ li r10,(IVEC + 0x2)
.else
- ori r12,r12,(IVEC)
+ li r10,(IVEC)
.endif
- b kvmppc_interrupt
-
- .if IKVM_SKIP
-89: mtocrf 0x80,r9
- ld r10,IAREA+EX_CTR(r13)
- mtctr r10
- ld r9,IAREA+EX_R9(r13)
- ld r10,IAREA+EX_R10(r13)
- ld r11,IAREA+EX_R11(r13)
- ld r12,IAREA+EX_R12(r13)
- .if IHSRR_IF_HVMODE
- BEGIN_FTR_SECTION
- b kvmppc_skip_Hinterrupt
- FTR_SECTION_ELSE
- b kvmppc_skip_interrupt
- ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
- .elseif IHSRR
- b kvmppc_skip_Hinterrupt
- .else
- b kvmppc_skip_interrupt
- .endif
- .endif
-.endm
-
-#else
-.macro KVMTEST name
-.endm
-.macro GEN_KVM name
-.endm
+ bne \handler
#endif
+.endm
/*
* This is the BOOK3S interrupt entry code macro.
@@ -461,7 +379,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
DEFINE_FIXED_SYMBOL(\name\()_common_real)
\name\()_common_real:
.if IKVM_REAL
- KVMTEST \name
+ KVMTEST \name kvm_interrupt
.endif
ld r10,PACAKMSR(r13) /* get MSR value for kernel */
@@ -484,7 +402,7 @@ DEFINE_FIXED_SYMBOL(\name\()_common_real)
DEFINE_FIXED_SYMBOL(\name\()_common_virt)
\name\()_common_virt:
.if IKVM_VIRT
- KVMTEST \name
+ KVMTEST \name kvm_interrupt
1:
.endif
.endif /* IVIRT */
@@ -498,7 +416,7 @@ DEFINE_FIXED_SYMBOL(\name\()_common_virt)
DEFINE_FIXED_SYMBOL(\name\()_common_real)
\name\()_common_real:
.if IKVM_REAL
- KVMTEST \name
+ KVMTEST \name kvm_interrupt
.endif
.endm
@@ -510,18 +428,31 @@ DEFINE_FIXED_SYMBOL(\name\()_common_real)
/* If coming from user, skip soft-mask tests. */
andi. r10,r12,MSR_PR
- bne 2f
-
- /* Kernel code running below __end_interrupts is implicitly
- * soft-masked */
- LOAD_HANDLER(r10, __end_interrupts)
+ bne 3f
+
+ /*
+ * Kernel code running below __end_soft_masked may be
+ * implicitly soft-masked if it is within the regions
+ * in the soft mask table.
+ */
+ LOAD_HANDLER(r10, __end_soft_masked)
cmpld r11,r10
+ bge+ 1f
+
+ /* SEARCH_SOFT_MASK_TABLE clobbers r9,r10,r12 */
+ mtctr r12
+ stw r9,PACA_EXGEN+EX_CCR(r13)
+ SEARCH_SOFT_MASK_TABLE
+ cmpdi r12,0
+ mfctr r12 /* Restore r12 to SRR1 */
+ lwz r9,PACA_EXGEN+EX_CCR(r13)
+ beq 1f /* Not in soft-mask table */
li r10,IMASK
- blt- 1f
+ b 2f /* In soft-mask table, always mask */
/* Test the soft mask state against our interrupt's bit */
- lbz r10,PACAIRQSOFTMASK(r13)
-1: andi. r10,r10,IMASK
+1: lbz r10,PACAIRQSOFTMASK(r13)
+2: andi. r10,r10,IMASK
/* Associate vector numbers with bits in paca->irq_happened */
.if IVEC == 0x500 || IVEC == 0xea0
li r10,PACA_IRQ_EE
@@ -552,7 +483,7 @@ DEFINE_FIXED_SYMBOL(\name\()_common_real)
.if ISTACK
andi. r10,r12,MSR_PR /* See if coming from user */
-2: mr r10,r1 /* Save r1 */
+3: mr r10,r1 /* Save r1 */
subi r1,r1,INT_FRAME_SIZE /* alloc frame on kernel stack */
beq- 100f
ld r1,PACAKSAVE(r13) /* kernel stack to use */
@@ -567,6 +498,20 @@ DEFINE_FIXED_SYMBOL(\name\()_common_real)
std r0,GPR0(r1) /* save r0 in stackframe */
std r10,GPR1(r1) /* save r1 in stackframe */
+ /* Mark our [H]SRRs valid for return */
+ li r10,1
+ .if IHSRR_IF_HVMODE
+ BEGIN_FTR_SECTION
+ stb r10,PACAHSRR_VALID(r13)
+ FTR_SECTION_ELSE
+ stb r10,PACASRR_VALID(r13)
+ ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
+ .elseif IHSRR
+ stb r10,PACAHSRR_VALID(r13)
+ .else
+ stb r10,PACASRR_VALID(r13)
+ .endif
+
.if ISET_RI
li r10,MSR_RI
mtmsrd r10,1 /* Set MSR_RI */
@@ -659,6 +604,66 @@ END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
__GEN_COMMON_BODY \name
.endm
+.macro SEARCH_RESTART_TABLE
+#ifdef CONFIG_RELOCATABLE
+ mr r12,r2
+ ld r2,PACATOC(r13)
+ LOAD_REG_ADDR(r9, __start___restart_table)
+ LOAD_REG_ADDR(r10, __stop___restart_table)
+ mr r2,r12
+#else
+ LOAD_REG_IMMEDIATE_SYM(r9, r12, __start___restart_table)
+ LOAD_REG_IMMEDIATE_SYM(r10, r12, __stop___restart_table)
+#endif
+300:
+ cmpd r9,r10
+ beq 302f
+ ld r12,0(r9)
+ cmpld r11,r12
+ blt 301f
+ ld r12,8(r9)
+ cmpld r11,r12
+ bge 301f
+ ld r12,16(r9)
+ b 303f
+301:
+ addi r9,r9,24
+ b 300b
+302:
+ li r12,0
+303:
+.endm
+
+.macro SEARCH_SOFT_MASK_TABLE
+#ifdef CONFIG_RELOCATABLE
+ mr r12,r2
+ ld r2,PACATOC(r13)
+ LOAD_REG_ADDR(r9, __start___soft_mask_table)
+ LOAD_REG_ADDR(r10, __stop___soft_mask_table)
+ mr r2,r12
+#else
+ LOAD_REG_IMMEDIATE_SYM(r9, r12, __start___soft_mask_table)
+ LOAD_REG_IMMEDIATE_SYM(r10, r12, __stop___soft_mask_table)
+#endif
+300:
+ cmpd r9,r10
+ beq 302f
+ ld r12,0(r9)
+ cmpld r11,r12
+ blt 301f
+ ld r12,8(r9)
+ cmpld r11,r12
+ bge 301f
+ li r12,1
+ b 303f
+301:
+ addi r9,r9,16
+ b 300b
+302:
+ li r12,0
+303:
+.endm
+
/*
* Restore all registers including H/SRR0/1 saved in a stack frame of a
* standard exception.
@@ -666,10 +671,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
.macro EXCEPTION_RESTORE_REGS hsrr=0
/* Move original SRR0 and SRR1 into the respective regs */
ld r9,_MSR(r1)
+ li r10,0
.if \hsrr
mtspr SPRN_HSRR1,r9
+ stb r10,PACAHSRR_VALID(r13)
.else
mtspr SPRN_SRR1,r9
+ stb r10,PACASRR_VALID(r13)
.endif
ld r9,_NIP(r1)
.if \hsrr
@@ -786,17 +794,17 @@ __start_interrupts:
* scv instructions enter the kernel without changing EE, RI, ME, or HV.
* In particular, this means we can take a maskable interrupt at any point
* in the scv handler, which is unlike any other interrupt. This is solved
- * by treating the instruction addresses below __end_interrupts as being
- * soft-masked.
+ * by treating the instruction addresses in the handler as being soft-masked,
+ * by adding a SOFT_MASK_TABLE entry for them.
*
* AIL-0 mode scv exceptions go to 0x17000-0x17fff, but we set AIL-3 and
* ensure scv is never executed with relocation off, which means AIL-0
* should never happen.
*
- * Before leaving the below __end_interrupts text, at least of the following
- * must be true:
+ * Before leaving the following inside-__end_soft_masked text, at least of the
+ * following must be true:
* - MSR[PR]=1 (i.e., return to userspace)
- * - MSR_EE|MSR_RI is set (no reentrant exceptions)
+ * - MSR_EE|MSR_RI is clear (no reentrant exceptions)
* - Standard kernel environment is set up (stack, paca, etc)
*
* Call convention:
@@ -804,6 +812,7 @@ __start_interrupts:
* syscall register convention is in Documentation/powerpc/syscall64-abi.rst
*/
EXC_VIRT_BEGIN(system_call_vectored, 0x3000, 0x1000)
+1:
/* SCV 0 */
mr r9,r13
GET_PACA(r13)
@@ -833,8 +842,11 @@ EXC_VIRT_BEGIN(system_call_vectored, 0x3000, 0x1000)
b system_call_vectored_sigill
#endif
.endr
+2:
EXC_VIRT_END(system_call_vectored, 0x3000, 0x1000)
+SOFT_MASK_TABLE(1b, 2b) // Treat scv vectors as soft-masked, see comment above.
+
#ifdef CONFIG_RELOCATABLE
TRAMP_VIRT_BEGIN(system_call_vectored_tramp)
__LOAD_HANDLER(r10, system_call_vectored_common)
@@ -1000,8 +1012,6 @@ EXC_COMMON_BEGIN(system_reset_common)
EXCEPTION_RESTORE_REGS
RFI_TO_USER_OR_KERNEL
- GEN_KVM system_reset
-
/**
* Interrupt 0x200 - Machine Check Interrupt (MCE).
@@ -1070,7 +1080,6 @@ INT_DEFINE_BEGIN(machine_check)
ISET_RI=0
IDAR=1
IDSISR=1
- IKVM_SKIP=1
IKVM_REAL=1
INT_DEFINE_END(machine_check)
@@ -1166,7 +1175,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
/*
* Check if we are coming from guest. If yes, then run the normal
* exception handler which will take the
- * machine_check_kvm->kvmppc_interrupt branch to deliver the MC event
+ * machine_check_kvm->kvm_interrupt branch to deliver the MC event
* to guest.
*/
lbz r11,HSTATE_IN_GUEST(r13)
@@ -1234,9 +1243,7 @@ EXC_COMMON_BEGIN(machine_check_common)
mtmsrd r10,1
addi r3,r1,STACK_FRAME_OVERHEAD
bl machine_check_exception
- b interrupt_return
-
- GEN_KVM machine_check
+ b interrupt_return_srr
#ifdef CONFIG_PPC_P7_NAP
@@ -1342,7 +1349,6 @@ INT_DEFINE_BEGIN(data_access)
IVEC=0x300
IDAR=1
IDSISR=1
- IKVM_SKIP=1
IKVM_REAL=1
INT_DEFINE_END(data_access)
@@ -1363,7 +1369,7 @@ BEGIN_MMU_FTR_SECTION
MMU_FTR_SECTION_ELSE
bl do_page_fault
ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
- b interrupt_return
+ b interrupt_return_srr
1: bl do_break
/*
@@ -1371,9 +1377,7 @@ ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
* If so, we need to restore them with their updated values.
*/
REST_NVGPRS(r1)
- b interrupt_return
-
- GEN_KVM data_access
+ b interrupt_return_srr
/**
@@ -1396,7 +1400,6 @@ ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
INT_DEFINE_BEGIN(data_access_slb)
IVEC=0x380
IDAR=1
- IKVM_SKIP=1
IKVM_REAL=1
INT_DEFINE_END(data_access_slb)
@@ -1414,7 +1417,7 @@ BEGIN_MMU_FTR_SECTION
bl do_slb_fault
cmpdi r3,0
bne- 1f
- b fast_interrupt_return
+ b fast_interrupt_return_srr
1: /* Error case */
MMU_FTR_SECTION_ELSE
/* Radix case, access is outside page table range */
@@ -1423,9 +1426,7 @@ ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
std r3,RESULT(r1)
addi r3,r1,STACK_FRAME_OVERHEAD
bl do_bad_slb_fault
- b interrupt_return
-
- GEN_KVM data_access_slb
+ b interrupt_return_srr
/**
@@ -1461,9 +1462,7 @@ BEGIN_MMU_FTR_SECTION
MMU_FTR_SECTION_ELSE
bl do_page_fault
ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
- b interrupt_return
-
- GEN_KVM instruction_access
+ b interrupt_return_srr
/**
@@ -1498,7 +1497,7 @@ BEGIN_MMU_FTR_SECTION
bl do_slb_fault
cmpdi r3,0
bne- 1f
- b fast_interrupt_return
+ b fast_interrupt_return_srr
1: /* Error case */
MMU_FTR_SECTION_ELSE
/* Radix case, access is outside page table range */
@@ -1507,9 +1506,7 @@ ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
std r3,RESULT(r1)
addi r3,r1,STACK_FRAME_OVERHEAD
bl do_bad_slb_fault
- b interrupt_return
-
- GEN_KVM instruction_access_slb
+ b interrupt_return_srr
/**
@@ -1553,9 +1550,11 @@ EXC_COMMON_BEGIN(hardware_interrupt_common)
GEN_COMMON hardware_interrupt
addi r3,r1,STACK_FRAME_OVERHEAD
bl do_IRQ
- b interrupt_return
-
- GEN_KVM hardware_interrupt
+ BEGIN_FTR_SECTION
+ b interrupt_return_hsrr
+ FTR_SECTION_ELSE
+ b interrupt_return_srr
+ ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
/**
@@ -1582,9 +1581,7 @@ EXC_COMMON_BEGIN(alignment_common)
addi r3,r1,STACK_FRAME_OVERHEAD
bl alignment_exception
REST_NVGPRS(r1) /* instruction emulation may change GPRs */
- b interrupt_return
-
- GEN_KVM alignment
+ b interrupt_return_srr
/**
@@ -1691,9 +1688,7 @@ EXC_COMMON_BEGIN(program_check_common)
addi r3,r1,STACK_FRAME_OVERHEAD
bl program_check_exception
REST_NVGPRS(r1) /* instruction emulation may change GPRs */
- b interrupt_return
-
- GEN_KVM program_check
+ b interrupt_return_srr
/*
@@ -1736,16 +1731,14 @@ BEGIN_FTR_SECTION
END_FTR_SECTION_IFSET(CPU_FTR_TM)
#endif
bl load_up_fpu
- b fast_interrupt_return
+ b fast_interrupt_return_srr
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2: /* User process was in a transaction */
addi r3,r1,STACK_FRAME_OVERHEAD
bl fp_unavailable_tm
- b interrupt_return
+ b interrupt_return_srr
#endif
- GEN_KVM fp_unavailable
-
/**
* Interrupt 0x900 - Decrementer Interrupt.
@@ -1782,9 +1775,7 @@ EXC_COMMON_BEGIN(decrementer_common)
GEN_COMMON decrementer
addi r3,r1,STACK_FRAME_OVERHEAD
bl timer_interrupt
- b interrupt_return
-
- GEN_KVM decrementer
+ b interrupt_return_srr
/**
@@ -1821,6 +1812,8 @@ EXC_COMMON_BEGIN(hdecrementer_common)
*
* Be careful to avoid touching the kernel stack.
*/
+ li r10,0
+ stb r10,PACAHSRR_VALID(r13)
ld r10,PACA_EXGEN+EX_CTR(r13)
mtctr r10
mtcrf 0x80,r9
@@ -1831,8 +1824,6 @@ EXC_COMMON_BEGIN(hdecrementer_common)
ld r13,PACA_EXGEN+EX_R13(r13)
HRFI_TO_KERNEL
- GEN_KVM hdecrementer
-
/**
* Interrupt 0xa00 - Directed Privileged Doorbell Interrupt.
@@ -1870,9 +1861,7 @@ EXC_COMMON_BEGIN(doorbell_super_common)
#else
bl unknown_async_exception
#endif
- b interrupt_return
-
- GEN_KVM doorbell_super
+ b interrupt_return_srr
EXC_REAL_NONE(0xb00, 0x100)
@@ -1923,7 +1912,7 @@ INT_DEFINE_END(system_call)
GET_PACA(r13)
std r10,PACA_EXGEN+EX_R10(r13)
INTERRUPT_TO_KERNEL
- KVMTEST system_call /* uses r10, branch to system_call_kvm */
+ KVMTEST system_call kvm_hcall /* uses r10, branch to kvm_hcall */
mfctr r9
#else
mr r9,r13
@@ -1949,8 +1938,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE)
mtctr r10
bctr
.else
- li r10,MSR_RI
- mtmsrd r10,1 /* Set RI (EE=0) */
#ifdef CONFIG_RELOCATABLE
__LOAD_HANDLER(r10, system_call_common)
mtctr r10
@@ -1979,14 +1966,16 @@ EXC_VIRT_BEGIN(system_call, 0x4c00, 0x100)
EXC_VIRT_END(system_call, 0x4c00, 0x100)
#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
-TRAMP_REAL_BEGIN(system_call_kvm)
- /*
- * This is a hcall, so register convention is as above, with these
- * differences:
- * r13 = PACA
- * ctr = orig r13
- * orig r10 saved in PACA
- */
+TRAMP_REAL_BEGIN(kvm_hcall)
+ std r9,PACA_EXGEN+EX_R9(r13)
+ std r11,PACA_EXGEN+EX_R11(r13)
+ std r12,PACA_EXGEN+EX_R12(r13)
+ mfcr r9
+ mfctr r10
+ std r10,PACA_EXGEN+EX_R13(r13)
+ li r10,0
+ std r10,PACA_EXGEN+EX_CFAR(r13)
+ std r10,PACA_EXGEN+EX_CTR(r13)
/*
* Save the PPR (on systems that support it) before changing to
* HMT_MEDIUM. That allows the KVM code to save that value into the
@@ -1994,31 +1983,24 @@ TRAMP_REAL_BEGIN(system_call_kvm)
*/
BEGIN_FTR_SECTION
mfspr r10,SPRN_PPR
- std r10,HSTATE_PPR(r13)
+ std r10,PACA_EXGEN+EX_PPR(r13)
END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
+
HMT_MEDIUM
- mfctr r10
- SET_SCRATCH0(r10)
- mfcr r10
- std r12,HSTATE_SCRATCH0(r13)
- sldi r12,r10,32
- ori r12,r12,0xc00
+
#ifdef CONFIG_RELOCATABLE
/*
- * Requires __LOAD_FAR_HANDLER beause kvmppc_interrupt lives
+ * Requires __LOAD_FAR_HANDLER beause kvmppc_hcall lives
* outside the head section.
*/
- __LOAD_FAR_HANDLER(r10, kvmppc_interrupt)
+ __LOAD_FAR_HANDLER(r10, kvmppc_hcall)
mtctr r10
- ld r10,PACA_EXGEN+EX_R10(r13)
bctr
#else
- ld r10,PACA_EXGEN+EX_R10(r13)
- b kvmppc_interrupt
+ b kvmppc_hcall
#endif
#endif
-
/**
* Interrupt 0xd00 - Trace Interrupt.
* This is a synchronous interrupt in response to instruction step or
@@ -2041,9 +2023,7 @@ EXC_COMMON_BEGIN(single_step_common)
GEN_COMMON single_step
addi r3,r1,STACK_FRAME_OVERHEAD
bl single_step_exception
- b interrupt_return
-
- GEN_KVM single_step
+ b interrupt_return_srr
/**
@@ -2063,7 +2043,6 @@ INT_DEFINE_BEGIN(h_data_storage)
IHSRR=1
IDAR=1
IDSISR=1
- IKVM_SKIP=1
IKVM_REAL=1
IKVM_VIRT=1
INT_DEFINE_END(h_data_storage)
@@ -2082,9 +2061,7 @@ BEGIN_MMU_FTR_SECTION
MMU_FTR_SECTION_ELSE
bl unknown_exception
ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_TYPE_RADIX)
- b interrupt_return
-
- GEN_KVM h_data_storage
+ b interrupt_return_hsrr
/**
@@ -2109,9 +2086,7 @@ EXC_COMMON_BEGIN(h_instr_storage_common)
GEN_COMMON h_instr_storage
addi r3,r1,STACK_FRAME_OVERHEAD
bl unknown_exception
- b interrupt_return
-
- GEN_KVM h_instr_storage
+ b interrupt_return_hsrr
/**
@@ -2135,9 +2110,7 @@ EXC_COMMON_BEGIN(emulation_assist_common)
addi r3,r1,STACK_FRAME_OVERHEAD
bl emulation_assist_interrupt
REST_NVGPRS(r1) /* instruction emulation may change GPRs */
- b interrupt_return
-
- GEN_KVM emulation_assist
+ b interrupt_return_hsrr
/**
@@ -2210,15 +2183,11 @@ EXC_COMMON_BEGIN(hmi_exception_early_common)
EXCEPTION_RESTORE_REGS hsrr=1
GEN_INT_ENTRY hmi_exception, virt=0
- GEN_KVM hmi_exception_early
-
EXC_COMMON_BEGIN(hmi_exception_common)
GEN_COMMON hmi_exception
addi r3,r1,STACK_FRAME_OVERHEAD
bl handle_hmi_exception
- b interrupt_return
-
- GEN_KVM hmi_exception
+ b interrupt_return_hsrr
/**
@@ -2248,9 +2217,7 @@ EXC_COMMON_BEGIN(h_doorbell_common)
#else
bl unknown_async_exception
#endif
- b interrupt_return
-
- GEN_KVM h_doorbell
+ b interrupt_return_hsrr
/**
@@ -2276,9 +2243,7 @@ EXC_COMMON_BEGIN(h_virt_irq_common)
GEN_COMMON h_virt_irq
addi r3,r1,STACK_FRAME_OVERHEAD
bl do_IRQ
- b interrupt_return
-
- GEN_KVM h_virt_irq
+ b interrupt_return_hsrr
EXC_REAL_NONE(0xec0, 0x20)
@@ -2321,9 +2286,7 @@ EXC_COMMON_BEGIN(performance_monitor_common)
GEN_COMMON performance_monitor
addi r3,r1,STACK_FRAME_OVERHEAD
bl performance_monitor_exception
- b interrupt_return
-
- GEN_KVM performance_monitor
+ b interrupt_return_srr
/**
@@ -2360,21 +2323,19 @@ BEGIN_FTR_SECTION
END_FTR_SECTION_NESTED(CPU_FTR_TM, CPU_FTR_TM, 69)
#endif
bl load_up_altivec
- b fast_interrupt_return
+ b fast_interrupt_return_srr
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2: /* User process was in a transaction */
addi r3,r1,STACK_FRAME_OVERHEAD
bl altivec_unavailable_tm
- b interrupt_return
+ b interrupt_return_srr
#endif
1:
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
#endif
addi r3,r1,STACK_FRAME_OVERHEAD
bl altivec_unavailable_exception
- b interrupt_return
-
- GEN_KVM altivec_unavailable
+ b interrupt_return_srr
/**
@@ -2415,16 +2376,14 @@ BEGIN_FTR_SECTION
2: /* User process was in a transaction */
addi r3,r1,STACK_FRAME_OVERHEAD
bl vsx_unavailable_tm
- b interrupt_return
+ b interrupt_return_srr
#endif
1:
END_FTR_SECTION_IFSET(CPU_FTR_VSX)
#endif
addi r3,r1,STACK_FRAME_OVERHEAD
bl vsx_unavailable_exception
- b interrupt_return
-
- GEN_KVM vsx_unavailable
+ b interrupt_return_srr
/**
@@ -2452,9 +2411,7 @@ EXC_COMMON_BEGIN(facility_unavailable_common)
addi r3,r1,STACK_FRAME_OVERHEAD
bl facility_unavailable_exception
REST_NVGPRS(r1) /* instruction emulation may change GPRs */
- b interrupt_return
-
- GEN_KVM facility_unavailable
+ b interrupt_return_srr
/**
@@ -2482,9 +2439,7 @@ EXC_COMMON_BEGIN(h_facility_unavailable_common)
addi r3,r1,STACK_FRAME_OVERHEAD
bl facility_unavailable_exception
REST_NVGPRS(r1) /* XXX Shouldn't be necessary in practice */
- b interrupt_return
-
- GEN_KVM h_facility_unavailable
+ b interrupt_return_hsrr
EXC_REAL_NONE(0xfa0, 0x20)
@@ -2513,9 +2468,7 @@ EXC_COMMON_BEGIN(cbe_system_error_common)
GEN_COMMON cbe_system_error
addi r3,r1,STACK_FRAME_OVERHEAD
bl cbe_system_error_exception
- b interrupt_return
-
- GEN_KVM cbe_system_error
+ b interrupt_return_hsrr
#else /* CONFIG_CBE_RAS */
EXC_REAL_NONE(0x1200, 0x100)
@@ -2546,9 +2499,7 @@ EXC_COMMON_BEGIN(instruction_breakpoint_common)
GEN_COMMON instruction_breakpoint
addi r3,r1,STACK_FRAME_OVERHEAD
bl instruction_breakpoint_exception
- b interrupt_return
-
- GEN_KVM instruction_breakpoint
+ b interrupt_return_srr
EXC_REAL_NONE(0x1400, 0x100)
@@ -2656,6 +2607,8 @@ BEGIN_FTR_SECTION
ld r10,PACA_EXGEN+EX_CFAR(r13)
mtspr SPRN_CFAR,r10
END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
+ li r10,0
+ stb r10,PACAHSRR_VALID(r13)
ld r10,PACA_EXGEN+EX_R10(r13)
ld r11,PACA_EXGEN+EX_R11(r13)
ld r12,PACA_EXGEN+EX_R12(r13)
@@ -2668,9 +2621,7 @@ EXC_COMMON_BEGIN(denorm_exception_common)
GEN_COMMON denorm_exception
addi r3,r1,STACK_FRAME_OVERHEAD
bl unknown_exception
- b interrupt_return
-
- GEN_KVM denorm_exception
+ b interrupt_return_hsrr
#ifdef CONFIG_CBE_RAS
@@ -2687,9 +2638,7 @@ EXC_COMMON_BEGIN(cbe_maintenance_common)
GEN_COMMON cbe_maintenance
addi r3,r1,STACK_FRAME_OVERHEAD
bl cbe_maintenance_exception
- b interrupt_return
-
- GEN_KVM cbe_maintenance
+ b interrupt_return_hsrr
#else /* CONFIG_CBE_RAS */
EXC_REAL_NONE(0x1600, 0x100)
@@ -2719,9 +2668,7 @@ EXC_COMMON_BEGIN(altivec_assist_common)
#else
bl unknown_exception
#endif
- b interrupt_return
-
- GEN_KVM altivec_assist
+ b interrupt_return_srr
#ifdef CONFIG_CBE_RAS
@@ -2738,9 +2685,7 @@ EXC_COMMON_BEGIN(cbe_thermal_common)
GEN_COMMON cbe_thermal
addi r3,r1,STACK_FRAME_OVERHEAD
bl cbe_thermal_exception
- b interrupt_return
-
- GEN_KVM cbe_thermal
+ b interrupt_return_hsrr
#else /* CONFIG_CBE_RAS */
EXC_REAL_NONE(0x1800, 0x100)
@@ -2765,7 +2710,6 @@ INT_DEFINE_END(soft_nmi)
* and run it entirely with interrupts hard disabled.
*/
EXC_COMMON_BEGIN(soft_nmi_common)
- mfspr r11,SPRN_SRR0
mr r10,r1
ld r1,PACAEMERGSP(r13)
subi r1,r1,INT_FRAME_SIZE
@@ -2779,6 +2723,7 @@ EXC_COMMON_BEGIN(soft_nmi_common)
mtmsrd r9,1
kuap_kernel_restore r9, r10
+
EXCEPTION_RESTORE_REGS hsrr=0
RFI_TO_KERNEL
@@ -2800,19 +2745,24 @@ masked_Hinterrupt:
.else
masked_interrupt:
.endif
- lbz r11,PACAIRQHAPPENED(r13)
- or r11,r11,r10
- stb r11,PACAIRQHAPPENED(r13)
+ stw r9,PACA_EXGEN+EX_CCR(r13)
+ lbz r9,PACAIRQHAPPENED(r13)
+ or r9,r9,r10
+ stb r9,PACAIRQHAPPENED(r13)
+
+ .if ! \hsrr
cmpwi r10,PACA_IRQ_DEC
bne 1f
- lis r10,0x7fff
- ori r10,r10,0xffff
- mtspr SPRN_DEC,r10
+ LOAD_REG_IMMEDIATE(r9, 0x7fffffff)
+ mtspr SPRN_DEC,r9
#ifdef CONFIG_PPC_WATCHDOG
+ lwz r9,PACA_EXGEN+EX_CCR(r13)
b soft_nmi_common
#else
b 2f
#endif
+ .endif
+
1: andi. r10,r10,PACA_IRQ_MUST_HARD_MASK
beq 2f
xori r12,r12,MSR_EE /* clear MSR_EE */
@@ -2821,11 +2771,29 @@ masked_interrupt:
.else
mtspr SPRN_SRR1,r12
.endif
- ori r11,r11,PACA_IRQ_HARD_DIS
- stb r11,PACAIRQHAPPENED(r13)
+ ori r9,r9,PACA_IRQ_HARD_DIS
+ stb r9,PACAIRQHAPPENED(r13)
2: /* done */
- ld r10,PACA_EXGEN+EX_CTR(r13)
- mtctr r10
+ li r9,0
+ .if \hsrr
+ stb r9,PACAHSRR_VALID(r13)
+ .else
+ stb r9,PACASRR_VALID(r13)
+ .endif
+
+ SEARCH_RESTART_TABLE
+ cmpdi r12,0
+ beq 3f
+ .if \hsrr
+ mtspr SPRN_HSRR0,r12
+ .else
+ mtspr SPRN_SRR0,r12
+ .endif
+3:
+
+ ld r9,PACA_EXGEN+EX_CTR(r13)
+ mtctr r9
+ lwz r9,PACA_EXGEN+EX_CCR(r13)
mtcrf 0x80,r9
std r1,PACAR1(r13)
ld r9,PACA_EXGEN+EX_R9(r13)
@@ -2994,6 +2962,15 @@ TRAMP_REAL_BEGIN(rfscv_flush_fallback)
USE_TEXT_SECTION()
+#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
+kvm_interrupt:
+ /*
+ * The conditional branch in KVMTEST can't reach all the way,
+ * make a stub.
+ */
+ b kvmppc_interrupt
+#endif
+
_GLOBAL(do_uaccess_flush)
UACCESS_FLUSH_FIXUP_SECTION
nop
@@ -3009,32 +2986,6 @@ EXPORT_SYMBOL(do_uaccess_flush)
MASKED_INTERRUPT
MASKED_INTERRUPT hsrr=1
-#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
-kvmppc_skip_interrupt:
- /*
- * Here all GPRs are unchanged from when the interrupt happened
- * except for r13, which is saved in SPRG_SCRATCH0.
- */
- mfspr r13, SPRN_SRR0
- addi r13, r13, 4
- mtspr SPRN_SRR0, r13
- GET_SCRATCH0(r13)
- RFI_TO_KERNEL
- b .
-
-kvmppc_skip_Hinterrupt:
- /*
- * Here all GPRs are unchanged from when the interrupt happened
- * except for r13, which is saved in SPRG_SCRATCH0.
- */
- mfspr r13, SPRN_HSRR0
- addi r13, r13, 4
- mtspr SPRN_HSRR0, r13
- GET_SCRATCH0(r13)
- HRFI_TO_KERNEL
- b .
-#endif
-
/*
* Relocation-on interrupts: A subset of the interrupts can be delivered
* with IR=1/DR=1, if AIL==2 and MSR.HV won't be changed by delivering
@@ -3053,7 +3004,7 @@ kvmppc_skip_Hinterrupt:
USE_FIXED_SECTION(virt_trampolines)
/*
- * All code below __end_interrupts is treated as soft-masked. If
+ * All code below __end_soft_masked is treated as soft-masked. If
* any code runs here with MSR[EE]=1, it must then cope with pending
* soft interrupt being raised (i.e., by ensuring it is replayed).
*
diff --git a/arch/powerpc/kernel/firmware.c b/arch/powerpc/kernel/firmware.c
index c9e2819b095a..c7022c41cc31 100644
--- a/arch/powerpc/kernel/firmware.c
+++ b/arch/powerpc/kernel/firmware.c
@@ -23,18 +23,20 @@ EXPORT_SYMBOL_GPL(powerpc_firmware_features);
#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_KVM_GUEST)
DEFINE_STATIC_KEY_FALSE(kvm_guest);
-bool check_kvm_guest(void)
+int __init check_kvm_guest(void)
{
struct device_node *hyper_node;
hyper_node = of_find_node_by_path("/hypervisor");
if (!hyper_node)
- return false;
+ return 0;
if (!of_device_is_compatible(hyper_node, "linux,kvm"))
- return false;
+ return 0;
static_branch_enable(&kvm_guest);
- return true;
+
+ return 0;
}
+core_initcall(check_kvm_guest); // before kvm_guest_init()
#endif
diff --git a/arch/powerpc/kernel/fpu.S b/arch/powerpc/kernel/fpu.S
index 2c57ece6671c..6010adcee16e 100644
--- a/arch/powerpc/kernel/fpu.S
+++ b/arch/powerpc/kernel/fpu.S
@@ -103,6 +103,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX)
ori r12,r12,MSR_FP
or r12,r12,r4
std r12,_MSR(r1)
+#ifdef CONFIG_PPC_BOOK3S_64
+ li r4,0
+ stb r4,PACASRR_VALID(r13)
+#endif
#endif
li r4,1
stb r4,THREAD_LOAD_FP(r5)
diff --git a/arch/powerpc/kernel/head_32.h b/arch/powerpc/kernel/head_32.h
index a8221ddcbd66..6b1ec9e3541b 100644
--- a/arch/powerpc/kernel/head_32.h
+++ b/arch/powerpc/kernel/head_32.h
@@ -142,42 +142,23 @@ _ASM_NOKPROBE_SYMBOL(\name\()_virt)
.macro SYSCALL_ENTRY trapno
mfspr r9, SPRN_SRR1
- mfspr r10, SPRN_SRR0
+ mfspr r12, SPRN_SRR0
LOAD_REG_IMMEDIATE(r11, MSR_KERNEL) /* can take exceptions */
- lis r12, 1f@h
- ori r12, r12, 1f@l
+ lis r10, 1f@h
+ ori r10, r10, 1f@l
mtspr SPRN_SRR1, r11
- mtspr SPRN_SRR0, r12
- mfspr r12,SPRN_SPRG_THREAD
+ mtspr SPRN_SRR0, r10
+ mfspr r10,SPRN_SPRG_THREAD
mr r11, r1
- lwz r1,TASK_STACK-THREAD(r12)
- tovirt(r12, r12)
+ lwz r1,TASK_STACK-THREAD(r10)
+ tovirt(r10, r10)
addi r1, r1, THREAD_SIZE - INT_FRAME_SIZE
rfi
1:
- stw r11,GPR1(r1)
- stw r11,0(r1)
- mr r11, r1
- stw r10,_NIP(r11)
- mflr r10
- stw r10, _LINK(r11)
- mfcr r10
- rlwinm r10,r10,0,4,2 /* Clear SO bit in CR */
- stw r10,_CCR(r11) /* save registers */
-#ifdef CONFIG_40x
- rlwinm r9,r9,0,14,12 /* clear MSR_WE (necessary?) */
-#endif
- lis r10,STACK_FRAME_REGS_MARKER@ha /* exception frame marker */
- stw r2,GPR2(r11)
- addi r10,r10,STACK_FRAME_REGS_MARKER@l
- stw r9,_MSR(r11)
- li r2, \trapno
- stw r10,8(r11)
- stw r2,_TRAP(r11)
- SAVE_GPR(0, r11)
- SAVE_4GPRS(3, r11)
- SAVE_2GPRS(7, r11)
- addi r2,r12,-THREAD
+ stw r12,_NIP(r1)
+ mfcr r12
+ rlwinm r12,r12,0,4,2 /* Clear SO bit in CR */
+ stw r12,_CCR(r1)
b transfer_to_syscall /* jump to handler */
.endm
diff --git a/arch/powerpc/kernel/head_40x.S b/arch/powerpc/kernel/head_40x.S
index e1360b88b6cb..7d72ee5ab387 100644
--- a/arch/powerpc/kernel/head_40x.S
+++ b/arch/powerpc/kernel/head_40x.S
@@ -701,39 +701,3 @@ _GLOBAL(abort)
mfspr r13,SPRN_DBCR0
oris r13,r13,DBCR0_RST_SYSTEM@h
mtspr SPRN_DBCR0,r13
-
-_GLOBAL(set_context)
-
-#ifdef CONFIG_BDI_SWITCH
- /* Context switch the PTE pointer for the Abatron BDI2000.
- * The PGDIR is the second parameter.
- */
- lis r5, abatron_pteptrs@ha
- stw r4, abatron_pteptrs@l + 0x4(r5)
-#endif
- sync
- mtspr SPRN_PID,r3
- isync /* Need an isync to flush shadow */
- /* TLBs after changing PID */
- blr
-
-/* We put a few things here that have to be page-aligned. This stuff
- * goes at the beginning of the data segment, which is page-aligned.
- */
- .data
- .align 12
- .globl sdata
-sdata:
- .globl empty_zero_page
-empty_zero_page:
- .space 4096
-EXPORT_SYMBOL(empty_zero_page)
- .globl swapper_pg_dir
-swapper_pg_dir:
- .space PGD_TABLE_SIZE
-
-/* Room for two PTE pointers, usually the kernel and current user pointers
- * to their respective root page table.
- */
-abatron_pteptrs:
- .space 8
diff --git a/arch/powerpc/kernel/head_44x.S b/arch/powerpc/kernel/head_44x.S
index 5c106ac36626..ddc978a2d381 100644
--- a/arch/powerpc/kernel/head_44x.S
+++ b/arch/powerpc/kernel/head_44x.S
@@ -532,6 +532,10 @@ finish_tlb_load_44x:
andi. r10,r12,_PAGE_USER /* User page ? */
beq 1f /* nope, leave U bits empty */
rlwimi r11,r11,3,26,28 /* yes, copy S bits to U */
+#ifdef CONFIG_PPC_KUEP
+0: rlwinm r11,r11,0,~PPC44x_TLB_SX /* Clear SX if User page */
+ patch_site 0b, patch__tlb_44x_kuep
+#endif
1: tlbwe r11,r13,PPC44x_TLB_ATTRIB /* Write ATTRIB */
/* Done...restore registers and get out of here.
@@ -743,6 +747,10 @@ finish_tlb_load_47x:
andi. r10,r12,_PAGE_USER /* User page ? */
beq 1f /* nope, leave U bits empty */
rlwimi r11,r11,3,26,28 /* yes, copy S bits to U */
+#ifdef CONFIG_PPC_KUEP
+0: rlwinm r11,r11,0,~PPC47x_TLB2_SX /* Clear SX if User page */
+ patch_site 0b, patch__tlb_47x_kuep
+#endif
1: tlbwe r11,r13,2
/* Done...restore registers and get out of here.
@@ -780,20 +788,6 @@ _GLOBAL(__fixup_440A_mcheck)
sync
blr
-_GLOBAL(set_context)
-
-#ifdef CONFIG_BDI_SWITCH
- /* Context switch the PTE pointer for the Abatron BDI2000.
- * The PGDIR is the second parameter.
- */
- lis r5, abatron_pteptrs@h
- ori r5, r5, abatron_pteptrs@l
- stw r4, 0x4(r5)
-#endif
- mtspr SPRN_PID,r3
- isync /* Force context change */
- blr
-
/*
* Init CPU state. This is called at boot time or for secondary CPUs
* to setup initial TLB entries, setup IVORs, etc...
@@ -1239,34 +1233,8 @@ head_start_common:
isync
blr
-/*
- * We put a few things here that have to be page-aligned. This stuff
- * goes at the beginning of the data segment, which is page-aligned.
- */
- .data
- .align PAGE_SHIFT
- .globl sdata
-sdata:
- .globl empty_zero_page
-empty_zero_page:
- .space PAGE_SIZE
-EXPORT_SYMBOL(empty_zero_page)
-
-/*
- * To support >32-bit physical addresses, we use an 8KB pgdir.
- */
- .globl swapper_pg_dir
-swapper_pg_dir:
- .space PGD_TABLE_SIZE
-
-/*
- * Room for two PTE pointers, usually the kernel and current user pointers
- * to their respective root page table.
- */
-abatron_pteptrs:
- .space 8
-
#ifdef CONFIG_SMP
+ .data
.align 12
temp_boot_stack:
.space 1024
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index ece7f97bafff..79930b0bc781 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -194,8 +194,9 @@ CLOSE_FIXED_SECTION(first_256B)
/* This value is used to mark exception frames on the stack. */
.section ".toc","aw"
+/* This value is used to mark exception frames on the stack. */
exception_marker:
- .tc ID_72656773_68657265[TC],0x7265677368657265
+ .tc ID_EXC_MARKER[TC],STACK_FRAME_REGS_MARKER
.previous
/*
@@ -211,6 +212,8 @@ OPEN_TEXT_SECTION(0x100)
USE_TEXT_SECTION()
+#include "interrupt_64.S"
+
#ifdef CONFIG_PPC_BOOK3E
/*
* The booting_thread_hwid holds the thread id we want to boot in cpu
@@ -997,23 +1000,3 @@ start_here_common:
0: trap
EMIT_BUG_ENTRY 0b, __FILE__, __LINE__, 0
.previous
-
-/*
- * We put a few things here that have to be page-aligned.
- * This stuff goes at the beginning of the bss, which is page-aligned.
- */
- .section ".bss"
-/*
- * pgd dir should be aligned to PGD_TABLE_SIZE which is 64K.
- * We will need to find a better way to fix this
- */
- .align 16
-
- .globl swapper_pg_dir
-swapper_pg_dir:
- .space PGD_TABLE_SIZE
-
- .globl empty_zero_page
-empty_zero_page:
- .space PAGE_SIZE
-EXPORT_SYMBOL(empty_zero_page)
diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S
index 7d445e4342c0..9bdb95f5694f 100644
--- a/arch/powerpc/kernel/head_8xx.S
+++ b/arch/powerpc/kernel/head_8xx.S
@@ -786,28 +786,3 @@ _GLOBAL(mmu_pin_tlb)
mtspr SPRN_SRR1, r10
mtspr SPRN_SRR0, r11
rfi
-
-/*
- * We put a few things here that have to be page-aligned.
- * This stuff goes at the beginning of the data segment,
- * which is page-aligned.
- */
- .data
- .globl sdata
-sdata:
- .globl empty_zero_page
- .align PAGE_SHIFT
-empty_zero_page:
- .space PAGE_SIZE
-EXPORT_SYMBOL(empty_zero_page)
-
- .globl swapper_pg_dir
-swapper_pg_dir:
- .space PGD_TABLE_SIZE
-
-/* Room for two PTE table pointers, usually the kernel and current user
- * pointer to their respective root page table (pgdir).
- */
- .globl abatron_pteptrs
-abatron_pteptrs:
- .space 8
diff --git a/arch/powerpc/kernel/head_book3s_32.S b/arch/powerpc/kernel/head_book3s_32.S
index 065178f19a3d..764edd860ed4 100644
--- a/arch/powerpc/kernel/head_book3s_32.S
+++ b/arch/powerpc/kernel/head_book3s_32.S
@@ -518,8 +518,6 @@ BEGIN_FTR_SECTION
rlwinm r1,r1,0,~_PAGE_COHERENT /* clear M (coherence not required) */
END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
mtspr SPRN_RPA,r1
- mfspr r2,SPRN_SRR1 /* Need to restore CR0 */
- mtcrf 0x80,r2
BEGIN_MMU_FTR_SECTION
li r0,1
mfspr r1,SPRN_SPRG_603_LRU
@@ -531,9 +529,15 @@ BEGIN_MMU_FTR_SECTION
mfspr r2,SPRN_SRR1
rlwimi r2,r0,31-14,14,14
mtspr SPRN_SRR1,r2
-END_MMU_FTR_SECTION_IFSET(MMU_FTR_NEED_DTLB_SW_LRU)
+ mtcrf 0x80,r2
+ tlbld r3
+ rfi
+MMU_FTR_SECTION_ELSE
+ mfspr r2,SPRN_SRR1 /* Need to restore CR0 */
+ mtcrf 0x80,r2
tlbld r3
rfi
+ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_NEED_DTLB_SW_LRU)
DataAddressInvalid:
mfspr r3,SPRN_SRR1
rlwinm r1,r3,9,6,6 /* Get load/store bit */
@@ -607,9 +611,15 @@ BEGIN_MMU_FTR_SECTION
mfspr r2,SPRN_SRR1
rlwimi r2,r0,31-14,14,14
mtspr SPRN_SRR1,r2
-END_MMU_FTR_SECTION_IFSET(MMU_FTR_NEED_DTLB_SW_LRU)
+ mtcrf 0x80,r2
+ tlbld r3
+ rfi
+MMU_FTR_SECTION_ELSE
+ mfspr r2,SPRN_SRR1 /* Need to restore CR0 */
+ mtcrf 0x80,r2
tlbld r3
rfi
+ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_NEED_DTLB_SW_LRU)
#ifndef CONFIG_ALTIVEC
#define altivec_assist_exception unknown_exception
@@ -756,9 +766,6 @@ PerformanceMonitor:
* the kernel image to physical address PHYSICAL_START.
*/
relocate_kernel:
- addis r9,r26,klimit@ha /* fetch klimit */
- lwz r25,klimit@l(r9)
- addis r25,r25,-KERNELBASE@h
lis r3,PHYSICAL_START@h /* Destination base address */
li r6,0 /* Destination offset */
li r5,0x4000 /* # bytes of memory to copy */
@@ -766,7 +773,8 @@ relocate_kernel:
addi r0,r3,4f@l /* jump to the address of 4f */
mtctr r0 /* in copy and do the rest. */
bctr /* jump to the copy */
-4: mr r5,r25
+4: lis r5,_end-KERNELBASE@h
+ ori r5,r5,_end-KERNELBASE@l
bl copy_and_flush /* copy the rest */
b turn_on_mmu
@@ -924,12 +932,6 @@ _GLOBAL(load_segment_registers)
li r0, NUM_USER_SEGMENTS /* load up user segment register values */
mtctr r0 /* for context 0 */
li r3, 0 /* Kp = 0, Ks = 0, VSID = 0 */
-#ifdef CONFIG_PPC_KUEP
- oris r3, r3, SR_NX@h /* Set Nx */
-#endif
-#ifdef CONFIG_PPC_KUAP
- oris r3, r3, SR_KS@h /* Set Ks */
-#endif
li r4, 0
3: mtsrin r3, r4
addi r3, r3, 0x111 /* increment VSID */
@@ -1024,58 +1026,6 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_HPTE_TABLE)
rfi
/*
- * void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next);
- *
- * Set up the segment registers for a new context.
- */
-_ENTRY(switch_mmu_context)
- lwz r3,MMCONTEXTID(r4)
- cmpwi cr0,r3,0
- blt- 4f
- mulli r3,r3,897 /* multiply context by skew factor */
- rlwinm r3,r3,4,8,27 /* VSID = (context & 0xfffff) << 4 */
-#ifdef CONFIG_PPC_KUEP
- oris r3, r3, SR_NX@h /* Set Nx */
-#endif
-#ifdef CONFIG_PPC_KUAP
- oris r3, r3, SR_KS@h /* Set Ks */
-#endif
- li r0,NUM_USER_SEGMENTS
- mtctr r0
-
-#ifdef CONFIG_BDI_SWITCH
- /* Context switch the PTE pointer for the Abatron BDI2000.
- * The PGDIR is passed as second argument.
- */
- lwz r4, MM_PGD(r4)
- lis r5, abatron_pteptrs@ha
- stw r4, abatron_pteptrs@l + 0x4(r5)
-#endif
-BEGIN_MMU_FTR_SECTION
-#ifndef CONFIG_BDI_SWITCH
- lwz r4, MM_PGD(r4)
-#endif
- tophys(r4, r4)
- rlwinm r4, r4, 4, 0xffff01ff
- mtspr SPRN_SDR1, r4
-END_MMU_FTR_SECTION_IFCLR(MMU_FTR_HPTE_TABLE)
- li r4,0
- isync
-3:
- mtsrin r3,r4
- addi r3,r3,0x111 /* next VSID */
- rlwinm r3,r3,0,8,3 /* clear out any overflow from VSID field */
- addis r4,r4,0x1000 /* address of next segment */
- bdnz 3b
- sync
- isync
- blr
-4: trap
- EMIT_BUG_ENTRY 4b,__FILE__,__LINE__,0
- blr
-EXPORT_SYMBOL(switch_mmu_context)
-
-/*
* An undocumented "feature" of 604e requires that the v bit
* be cleared before changing BAT values.
*
@@ -1256,61 +1206,4 @@ setup_usbgecko_bat:
blr
#endif
-#ifdef CONFIG_8260
-/* Jump into the system reset for the rom.
- * We first disable the MMU, and then jump to the ROM reset address.
- *
- * r3 is the board info structure, r4 is the location for starting.
- * I use this for building a small kernel that can load other kernels,
- * rather than trying to write or rely on a rom monitor that can tftp load.
- */
- .globl m8260_gorom
-m8260_gorom:
- mfmsr r0
- rlwinm r0,r0,0,17,15 /* clear MSR_EE in r0 */
- sync
- mtmsr r0
- sync
- mfspr r11, SPRN_HID0
- lis r10, 0
- ori r10,r10,HID0_ICE|HID0_DCE
- andc r11, r11, r10
- mtspr SPRN_HID0, r11
- isync
- li r5, MSR_ME|MSR_RI
- lis r6,2f@h
- addis r6,r6,-KERNELBASE@h
- ori r6,r6,2f@l
- mtspr SPRN_SRR0,r6
- mtspr SPRN_SRR1,r5
- isync
- sync
- rfi
-2:
- mtlr r4
- blr
-#endif
-
-
-/*
- * We put a few things here that have to be page-aligned.
- * This stuff goes at the beginning of the data segment,
- * which is page-aligned.
- */
.data
- .globl sdata
-sdata:
- .globl empty_zero_page
-empty_zero_page:
- .space 4096
-EXPORT_SYMBOL(empty_zero_page)
-
- .globl swapper_pg_dir
-swapper_pg_dir:
- .space PGD_TABLE_SIZE
-
-/* Room for two PTE pointers, usually the kernel and current user pointers
- * to their respective root page table.
- */
-abatron_pteptrs:
- .space 8
diff --git a/arch/powerpc/kernel/head_booke.h b/arch/powerpc/kernel/head_booke.h
index f82470091697..87b806e8eded 100644
--- a/arch/powerpc/kernel/head_booke.h
+++ b/arch/powerpc/kernel/head_booke.h
@@ -128,37 +128,20 @@ BEGIN_FTR_SECTION
mr r12, r13
lwz r13, THREAD_NORMSAVE(2)(r10)
FTR_SECTION_ELSE
-#endif
mfcr r12
-#ifdef CONFIG_KVM_BOOKE_HV
ALT_FTR_SECTION_END_IFSET(CPU_FTR_EMB_HV)
+#else
+ mfcr r12
#endif
mfspr r9, SPRN_SRR1
BOOKE_CLEAR_BTB(r11)
- lwz r11, TASK_STACK - THREAD(r10)
+ mr r11, r1
+ lwz r1, TASK_STACK - THREAD(r10)
rlwinm r12,r12,0,4,2 /* Clear SO bit in CR */
- ALLOC_STACK_FRAME(r11, THREAD_SIZE - INT_FRAME_SIZE)
- stw r12, _CCR(r11) /* save various registers */
- mflr r12
- stw r12,_LINK(r11)
+ ALLOC_STACK_FRAME(r1, THREAD_SIZE - INT_FRAME_SIZE)
+ stw r12, _CCR(r1)
mfspr r12,SPRN_SRR0
- stw r1, GPR1(r11)
- stw r1, 0(r11)
- mr r1, r11
- stw r12,_NIP(r11)
- rlwinm r9,r9,0,14,12 /* clear MSR_WE (necessary?) */
- lis r12, STACK_FRAME_REGS_MARKER@ha /* exception frame marker */
- stw r2,GPR2(r11)
- addi r12, r12, STACK_FRAME_REGS_MARKER@l
- stw r9,_MSR(r11)
- li r2, \trapno
- stw r12, 8(r11)
- stw r2,_TRAP(r11)
- SAVE_GPR(0, r11)
- SAVE_4GPRS(3, r11)
- SAVE_2GPRS(7, r11)
-
- addi r2,r10,-THREAD
+ stw r12,_NIP(r1)
b transfer_to_syscall /* jump to handler */
.endm
diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S
index a1a5c3f10dc4..0f9642f36b49 100644
--- a/arch/powerpc/kernel/head_fsl_booke.S
+++ b/arch/powerpc/kernel/head_fsl_booke.S
@@ -985,20 +985,6 @@ _GLOBAL(abort)
mtspr SPRN_DBCR0,r13
isync
-_GLOBAL(set_context)
-
-#ifdef CONFIG_BDI_SWITCH
- /* Context switch the PTE pointer for the Abatron BDI2000.
- * The PGDIR is the second parameter.
- */
- lis r5, abatron_pteptrs@h
- ori r5, r5, abatron_pteptrs@l
- stw r4, 0x4(r5)
-#endif
- mtspr SPRN_PID,r3
- isync /* Force context change */
- blr
-
#ifdef CONFIG_SMP
/* When we get here, r24 needs to hold the CPU # */
.globl __secondary_start
@@ -1226,26 +1212,3 @@ _GLOBAL(restore_to_as0)
*/
3: mr r3,r5
bl _start
-
-/*
- * We put a few things here that have to be page-aligned. This stuff
- * goes at the beginning of the data segment, which is page-aligned.
- */
- .data
- .align 12
- .globl sdata
-sdata:
- .globl empty_zero_page
-empty_zero_page:
- .space 4096
-EXPORT_SYMBOL(empty_zero_page)
- .globl swapper_pg_dir
-swapper_pg_dir:
- .space PGD_TABLE_SIZE
-
-/*
- * Room for two PTE pointers, usually the kernel and current user pointers
- * to their respective root page table.
- */
-abatron_pteptrs:
- .space 8
diff --git a/arch/powerpc/kernel/hw_breakpoint.c b/arch/powerpc/kernel/hw_breakpoint.c
index 8fc7a14e4d71..21a638aff72f 100644
--- a/arch/powerpc/kernel/hw_breakpoint.c
+++ b/arch/powerpc/kernel/hw_breakpoint.c
@@ -486,7 +486,7 @@ void thread_change_pc(struct task_struct *tsk, struct pt_regs *regs)
return;
reset:
- regs->msr &= ~MSR_SE;
+ regs_set_return_msr(regs, regs->msr & ~MSR_SE);
for (i = 0; i < nr_wp_slots(); i++) {
info = counter_arch_bp(__this_cpu_read(bp_per_reg[i]));
__set_breakpoint(i, info);
@@ -537,7 +537,7 @@ static bool stepping_handler(struct pt_regs *regs, struct perf_event **bp,
current->thread.last_hit_ubp[i] = bp[i];
info[i] = NULL;
}
- regs->msr |= MSR_SE;
+ regs_set_return_msr(regs, regs->msr | MSR_SE);
return false;
}
diff --git a/arch/powerpc/kernel/interrupt.c b/arch/powerpc/kernel/interrupt.c
index e4559f8914eb..21bbd615ca41 100644
--- a/arch/powerpc/kernel/interrupt.c
+++ b/arch/powerpc/kernel/interrupt.c
@@ -3,6 +3,7 @@
#include <linux/context_tracking.h>
#include <linux/err.h>
#include <linux/compat.h>
+#include <linux/sched/debug.h> /* for show_regs */
#include <asm/asm-prototypes.h>
#include <asm/kup.h>
@@ -26,6 +27,53 @@ unsigned long global_dbcr0[NR_CPUS];
typedef long (*syscall_fn)(long, long, long, long, long, long);
+#ifdef CONFIG_PPC_BOOK3S_64
+DEFINE_STATIC_KEY_FALSE(interrupt_exit_not_reentrant);
+static inline bool exit_must_hard_disable(void)
+{
+ return static_branch_unlikely(&interrupt_exit_not_reentrant);
+}
+#else
+static inline bool exit_must_hard_disable(void)
+{
+ return true;
+}
+#endif
+
+/*
+ * local irqs must be disabled. Returns false if the caller must re-enable
+ * them, check for new work, and try again.
+ *
+ * This should be called with local irqs disabled, but if they were previously
+ * enabled when the interrupt handler returns (indicating a process-context /
+ * synchronous interrupt) then irqs_enabled should be true.
+ *
+ * restartable is true then EE/RI can be left on because interrupts are handled
+ * with a restart sequence.
+ */
+static notrace __always_inline bool prep_irq_for_enabled_exit(bool restartable)
+{
+ /* This must be done with RI=1 because tracing may touch vmaps */
+ trace_hardirqs_on();
+
+ if (exit_must_hard_disable() || !restartable)
+ __hard_EE_RI_disable();
+
+#ifdef CONFIG_PPC64
+ /* This pattern matches prep_irq_for_idle */
+ if (unlikely(lazy_irq_pending_nocheck())) {
+ if (exit_must_hard_disable() || !restartable) {
+ local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
+ __hard_RI_enable();
+ }
+ trace_hardirqs_off();
+
+ return false;
+ }
+#endif
+ return true;
+}
+
/* Has to run notrace because it is entered not completely "reconciled" */
notrace long system_call_exception(long r3, long r4, long r5,
long r6, long r7, long r8,
@@ -34,9 +82,6 @@ notrace long system_call_exception(long r3, long r4, long r5,
syscall_fn f;
kuep_lock();
-#ifdef CONFIG_PPC32
- kuap_save_and_lock(regs);
-#endif
regs->orig_gpr3 = r3;
@@ -147,71 +192,6 @@ notrace long system_call_exception(long r3, long r4, long r5,
return f(r3, r4, r5, r6, r7, r8);
}
-/*
- * local irqs must be disabled. Returns false if the caller must re-enable
- * them, check for new work, and try again.
- *
- * This should be called with local irqs disabled, but if they were previously
- * enabled when the interrupt handler returns (indicating a process-context /
- * synchronous interrupt) then irqs_enabled should be true.
- */
-static notrace __always_inline bool __prep_irq_for_enabled_exit(bool clear_ri)
-{
- /* This must be done with RI=1 because tracing may touch vmaps */
- trace_hardirqs_on();
-
- /* This pattern matches prep_irq_for_idle */
- if (clear_ri)
- __hard_EE_RI_disable();
- else
- __hard_irq_disable();
-#ifdef CONFIG_PPC64
- if (unlikely(lazy_irq_pending_nocheck())) {
- /* Took an interrupt, may have more exit work to do. */
- if (clear_ri)
- __hard_RI_enable();
- trace_hardirqs_off();
- local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
-
- return false;
- }
- local_paca->irq_happened = 0;
- irq_soft_mask_set(IRQS_ENABLED);
-#endif
- return true;
-}
-
-static notrace inline bool prep_irq_for_enabled_exit(bool clear_ri, bool irqs_enabled)
-{
- if (__prep_irq_for_enabled_exit(clear_ri))
- return true;
-
- /*
- * Must replay pending soft-masked interrupts now. Don't just
- * local_irq_enabe(); local_irq_disable(); because if we are
- * returning from an asynchronous interrupt here, another one
- * might hit after irqs are enabled, and it would exit via this
- * same path allowing another to fire, and so on unbounded.
- *
- * If interrupts were enabled when this interrupt exited,
- * indicating a process context (synchronous) interrupt,
- * local_irq_enable/disable can be used, which will enable
- * interrupts rather than keeping them masked (unclear how
- * much benefit this is over just replaying for all cases,
- * because we immediately disable again, so all we're really
- * doing is allowing hard interrupts to execute directly for
- * a very small time, rather than being masked and replayed).
- */
- if (irqs_enabled) {
- local_irq_enable();
- local_irq_disable();
- } else {
- replay_soft_interrupts();
- }
-
- return false;
-}
-
static notrace void booke_load_dbcr0(void)
{
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
@@ -234,57 +214,92 @@ static notrace void booke_load_dbcr0(void)
#endif
}
-/*
- * This should be called after a syscall returns, with r3 the return value
- * from the syscall. If this function returns non-zero, the system call
- * exit assembly should additionally load all GPR registers and CTR and XER
- * from the interrupt frame.
- *
- * The function graph tracer can not trace the return side of this function,
- * because RI=0 and soft mask state is "unreconciled", so it is marked notrace.
- */
-notrace unsigned long syscall_exit_prepare(unsigned long r3,
- struct pt_regs *regs,
- long scv)
+static void check_return_regs_valid(struct pt_regs *regs)
{
- unsigned long ti_flags;
- unsigned long ret = 0;
- bool is_not_scv = !IS_ENABLED(CONFIG_PPC_BOOK3S_64) || !scv;
+#ifdef CONFIG_PPC_BOOK3S_64
+ unsigned long trap, srr0, srr1;
+ static bool warned;
+ u8 *validp;
+ char *h;
- CT_WARN_ON(ct_state() == CONTEXT_USER);
+ if (trap_is_scv(regs))
+ return;
- kuap_assert_locked();
+ trap = regs->trap;
+ // EE in HV mode sets HSRRs like 0xea0
+ if (cpu_has_feature(CPU_FTR_HVMODE) && trap == INTERRUPT_EXTERNAL)
+ trap = 0xea0;
+
+ switch (trap) {
+ case 0x980:
+ case INTERRUPT_H_DATA_STORAGE:
+ case 0xe20:
+ case 0xe40:
+ case INTERRUPT_HMI:
+ case 0xe80:
+ case 0xea0:
+ case INTERRUPT_H_FAC_UNAVAIL:
+ case 0x1200:
+ case 0x1500:
+ case 0x1600:
+ case 0x1800:
+ validp = &local_paca->hsrr_valid;
+ if (!*validp)
+ return;
+
+ srr0 = mfspr(SPRN_HSRR0);
+ srr1 = mfspr(SPRN_HSRR1);
+ h = "H";
+
+ break;
+ default:
+ validp = &local_paca->srr_valid;
+ if (!*validp)
+ return;
+
+ srr0 = mfspr(SPRN_SRR0);
+ srr1 = mfspr(SPRN_SRR1);
+ h = "";
+ break;
+ }
- regs->result = r3;
+ if (srr0 == regs->nip && srr1 == regs->msr)
+ return;
- /* Check whether the syscall is issued inside a restartable sequence */
- rseq_syscall(regs);
+ /*
+ * A NMI / soft-NMI interrupt may have come in after we found
+ * srr_valid and before the SRRs are loaded. The interrupt then
+ * comes in and clobbers SRRs and clears srr_valid. Then we load
+ * the SRRs here and test them above and find they don't match.
+ *
+ * Test validity again after that, to catch such false positives.
+ *
+ * This test in general will have some window for false negatives
+ * and may not catch and fix all such cases if an NMI comes in
+ * later and clobbers SRRs without clearing srr_valid, but hopefully
+ * such things will get caught most of the time, statistically
+ * enough to be able to get a warning out.
+ */
+ barrier();
- ti_flags = current_thread_info()->flags;
+ if (!*validp)
+ return;
- if (unlikely(r3 >= (unsigned long)-MAX_ERRNO) && is_not_scv) {
- if (likely(!(ti_flags & (_TIF_NOERROR | _TIF_RESTOREALL)))) {
- r3 = -r3;
- regs->ccr |= 0x10000000; /* Set SO bit in CR */
- }
+ if (!warned) {
+ warned = true;
+ printk("%sSRR0 was: %lx should be: %lx\n", h, srr0, regs->nip);
+ printk("%sSRR1 was: %lx should be: %lx\n", h, srr1, regs->msr);
+ show_regs(regs);
}
- if (unlikely(ti_flags & _TIF_PERSYSCALL_MASK)) {
- if (ti_flags & _TIF_RESTOREALL)
- ret = _TIF_RESTOREALL;
- else
- regs->gpr[3] = r3;
- clear_bits(_TIF_PERSYSCALL_MASK, &current_thread_info()->flags);
- } else {
- regs->gpr[3] = r3;
- }
-
- if (unlikely(ti_flags & _TIF_SYSCALL_DOTRACE)) {
- do_syscall_trace_leave(regs);
- ret |= _TIF_RESTOREALL;
- }
+ *validp = 0; /* fixup */
+#endif
+}
- local_irq_disable();
+static notrace unsigned long
+interrupt_exit_user_prepare_main(unsigned long ret, struct pt_regs *regs)
+{
+ unsigned long ti_flags;
again:
ti_flags = READ_ONCE(current_thread_info()->flags);
@@ -306,7 +321,7 @@ again:
ti_flags = READ_ONCE(current_thread_info()->flags);
}
- if (IS_ENABLED(CONFIG_PPC_BOOK3S) && IS_ENABLED(CONFIG_PPC_FPU)) {
+ if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && IS_ENABLED(CONFIG_PPC_FPU)) {
if (IS_ENABLED(CONFIG_PPC_TRANSACTIONAL_MEM) &&
unlikely((ti_flags & _TIF_RESTORE_TM))) {
restore_tm_state(regs);
@@ -330,10 +345,10 @@ again:
}
}
- user_enter_irqoff();
+ check_return_regs_valid(regs);
- /* scv need not set RI=0 because SRRs are not used */
- if (unlikely(!__prep_irq_for_enabled_exit(is_not_scv))) {
+ user_enter_irqoff();
+ if (!prep_irq_for_enabled_exit(true)) {
user_exit_irqoff();
local_irq_enable();
local_irq_disable();
@@ -355,89 +370,131 @@ again:
return ret;
}
-notrace unsigned long interrupt_exit_user_prepare(struct pt_regs *regs, unsigned long msr)
+/*
+ * This should be called after a syscall returns, with r3 the return value
+ * from the syscall. If this function returns non-zero, the system call
+ * exit assembly should additionally load all GPR registers and CTR and XER
+ * from the interrupt frame.
+ *
+ * The function graph tracer can not trace the return side of this function,
+ * because RI=0 and soft mask state is "unreconciled", so it is marked notrace.
+ */
+notrace unsigned long syscall_exit_prepare(unsigned long r3,
+ struct pt_regs *regs,
+ long scv)
{
unsigned long ti_flags;
- unsigned long flags;
unsigned long ret = 0;
+ bool is_not_scv = !IS_ENABLED(CONFIG_PPC_BOOK3S_64) || !scv;
- if (!IS_ENABLED(CONFIG_BOOKE) && !IS_ENABLED(CONFIG_40x))
- BUG_ON(!(regs->msr & MSR_RI));
- BUG_ON(!(regs->msr & MSR_PR));
- BUG_ON(arch_irq_disabled_regs(regs));
CT_WARN_ON(ct_state() == CONTEXT_USER);
- /*
- * We don't need to restore AMR on the way back to userspace for KUAP.
- * AMR can only have been unlocked if we interrupted the kernel.
- */
kuap_assert_locked();
- local_irq_save(flags);
-
-again:
- ti_flags = READ_ONCE(current_thread_info()->flags);
- while (unlikely(ti_flags & (_TIF_USER_WORK_MASK & ~_TIF_RESTORE_TM))) {
- local_irq_enable(); /* returning to user: may enable */
- if (ti_flags & _TIF_NEED_RESCHED) {
- schedule();
- } else {
- if (ti_flags & _TIF_SIGPENDING)
- ret |= _TIF_RESTOREALL;
- do_notify_resume(regs, ti_flags);
- }
- local_irq_disable();
- ti_flags = READ_ONCE(current_thread_info()->flags);
- }
+ regs->result = r3;
- if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && IS_ENABLED(CONFIG_PPC_FPU)) {
- if (IS_ENABLED(CONFIG_PPC_TRANSACTIONAL_MEM) &&
- unlikely((ti_flags & _TIF_RESTORE_TM))) {
- restore_tm_state(regs);
- } else {
- unsigned long mathflags = MSR_FP;
+ /* Check whether the syscall is issued inside a restartable sequence */
+ rseq_syscall(regs);
- if (cpu_has_feature(CPU_FTR_VSX))
- mathflags |= MSR_VEC | MSR_VSX;
- else if (cpu_has_feature(CPU_FTR_ALTIVEC))
- mathflags |= MSR_VEC;
+ ti_flags = current_thread_info()->flags;
- /* See above restore_math comment */
- if ((regs->msr & mathflags) != mathflags)
- restore_math(regs);
+ if (unlikely(r3 >= (unsigned long)-MAX_ERRNO) && is_not_scv) {
+ if (likely(!(ti_flags & (_TIF_NOERROR | _TIF_RESTOREALL)))) {
+ r3 = -r3;
+ regs->ccr |= 0x10000000; /* Set SO bit in CR */
}
}
- user_enter_irqoff();
+ if (unlikely(ti_flags & _TIF_PERSYSCALL_MASK)) {
+ if (ti_flags & _TIF_RESTOREALL)
+ ret = _TIF_RESTOREALL;
+ else
+ regs->gpr[3] = r3;
+ clear_bits(_TIF_PERSYSCALL_MASK, &current_thread_info()->flags);
+ } else {
+ regs->gpr[3] = r3;
+ }
- if (unlikely(!__prep_irq_for_enabled_exit(true))) {
- user_exit_irqoff();
- local_irq_enable();
- local_irq_disable();
- goto again;
+ if (unlikely(ti_flags & _TIF_SYSCALL_DOTRACE)) {
+ do_syscall_trace_leave(regs);
+ ret |= _TIF_RESTOREALL;
}
- booke_load_dbcr0();
+ local_irq_disable();
+ ret = interrupt_exit_user_prepare_main(ret, regs);
-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
- local_paca->tm_scratch = regs->msr;
+#ifdef CONFIG_PPC64
+ regs->exit_result = ret;
#endif
- account_cpu_user_exit();
+ return ret;
+}
- /* Restore user access locks last */
- kuap_user_restore(regs);
+#ifdef CONFIG_PPC64
+notrace unsigned long syscall_exit_restart(unsigned long r3, struct pt_regs *regs)
+{
+ /*
+ * This is called when detecting a soft-pending interrupt as well as
+ * an alternate-return interrupt. So we can't just have the alternate
+ * return path clear SRR1[MSR] and set PACA_IRQ_HARD_DIS (unless
+ * the soft-pending case were to fix things up as well). RI might be
+ * disabled, in which case it gets re-enabled by __hard_irq_disable().
+ */
+ __hard_irq_disable();
+ local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
+
+#ifdef CONFIG_PPC_BOOK3S_64
+ set_kuap(AMR_KUAP_BLOCKED);
+#endif
+
+ trace_hardirqs_off();
+ user_exit_irqoff();
+ account_cpu_user_entry();
+
+ BUG_ON(!user_mode(regs));
+
+ regs->exit_result = interrupt_exit_user_prepare_main(regs->exit_result, regs);
+
+ return regs->exit_result;
+}
+#endif
+
+notrace unsigned long interrupt_exit_user_prepare(struct pt_regs *regs)
+{
+ unsigned long ret;
+
+ if (!IS_ENABLED(CONFIG_BOOKE) && !IS_ENABLED(CONFIG_40x))
+ BUG_ON(!(regs->msr & MSR_RI));
+ BUG_ON(!(regs->msr & MSR_PR));
+ BUG_ON(arch_irq_disabled_regs(regs));
+ CT_WARN_ON(ct_state() == CONTEXT_USER);
+
+ /*
+ * We don't need to restore AMR on the way back to userspace for KUAP.
+ * AMR can only have been unlocked if we interrupted the kernel.
+ */
+ kuap_assert_locked();
+
+ local_irq_disable();
+
+ ret = interrupt_exit_user_prepare_main(0, regs);
+
+#ifdef CONFIG_PPC64
+ regs->exit_result = ret;
+#endif
return ret;
}
void preempt_schedule_irq(void);
-notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs, unsigned long msr)
+notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs)
{
unsigned long flags;
unsigned long ret = 0;
unsigned long kuap;
+ bool stack_store = current_thread_info()->flags &
+ _TIF_EMULATE_STACK_STORE;
if (!IS_ENABLED(CONFIG_BOOKE) && !IS_ENABLED(CONFIG_40x) &&
unlikely(!(regs->msr & MSR_RI)))
@@ -452,11 +509,6 @@ notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs, unsign
kuap = kuap_get_and_assert_locked();
- if (unlikely(current_thread_info()->flags & _TIF_EMULATE_STACK_STORE)) {
- clear_bits(_TIF_EMULATE_STACK_STORE, &current_thread_info()->flags);
- ret = 1;
- }
-
local_irq_save(flags);
if (!arch_irq_disabled_regs(regs)) {
@@ -471,17 +523,58 @@ again:
}
}
- if (unlikely(!prep_irq_for_enabled_exit(true, !irqs_disabled_flags(flags))))
+ check_return_regs_valid(regs);
+
+ /*
+ * Stack store exit can't be restarted because the interrupt
+ * stack frame might have been clobbered.
+ */
+ if (!prep_irq_for_enabled_exit(unlikely(stack_store))) {
+ /*
+ * Replay pending soft-masked interrupts now. Don't
+ * just local_irq_enabe(); local_irq_disable(); because
+ * if we are returning from an asynchronous interrupt
+ * here, another one might hit after irqs are enabled,
+ * and it would exit via this same path allowing
+ * another to fire, and so on unbounded.
+ */
+ hard_irq_disable();
+ replay_soft_interrupts();
+ /* Took an interrupt, may have more exit work to do. */
goto again;
- } else {
- /* Returning to a kernel context with local irqs disabled. */
- __hard_EE_RI_disable();
+ }
#ifdef CONFIG_PPC64
+ /*
+ * An interrupt may clear MSR[EE] and set this concurrently,
+ * but it will be marked pending and the exit will be retried.
+ * This leaves a racy window where MSR[EE]=0 and HARD_DIS is
+ * clear, until interrupt_exit_kernel_restart() calls
+ * hard_irq_disable(), which will set HARD_DIS again.
+ */
+ local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS;
+
+ } else {
+ check_return_regs_valid(regs);
+
+ if (unlikely(stack_store))
+ __hard_EE_RI_disable();
+ /*
+ * Returning to a kernel context with local irqs disabled.
+ * Here, if EE was enabled in the interrupted context, enable
+ * it on return as well. A problem exists here where a soft
+ * masked interrupt may have cleared MSR[EE] and set HARD_DIS
+ * here, and it will still exist on return to the caller. This
+ * will be resolved by the masked interrupt firing again.
+ */
if (regs->msr & MSR_EE)
local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS;
-#endif
+#endif /* CONFIG_PPC64 */
}
+ if (unlikely(stack_store)) {
+ clear_bits(_TIF_EMULATE_STACK_STORE, &current_thread_info()->flags);
+ ret = 1;
+ }
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
local_paca->tm_scratch = regs->msr;
@@ -496,3 +589,46 @@ again:
return ret;
}
+
+#ifdef CONFIG_PPC64
+notrace unsigned long interrupt_exit_user_restart(struct pt_regs *regs)
+{
+ __hard_irq_disable();
+ local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
+
+#ifdef CONFIG_PPC_BOOK3S_64
+ set_kuap(AMR_KUAP_BLOCKED);
+#endif
+
+ trace_hardirqs_off();
+ user_exit_irqoff();
+ account_cpu_user_entry();
+
+ BUG_ON(!user_mode(regs));
+
+ regs->exit_result |= interrupt_exit_user_prepare(regs);
+
+ return regs->exit_result;
+}
+
+/*
+ * No real need to return a value here because the stack store case does not
+ * get restarted.
+ */
+notrace unsigned long interrupt_exit_kernel_restart(struct pt_regs *regs)
+{
+ __hard_irq_disable();
+ local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
+
+#ifdef CONFIG_PPC_BOOK3S_64
+ set_kuap(AMR_KUAP_BLOCKED);
+#endif
+
+ if (regs->softe == IRQS_ENABLED)
+ trace_hardirqs_off();
+
+ BUG_ON(user_mode(regs));
+
+ return interrupt_exit_kernel_prepare(regs);
+}
+#endif
diff --git a/arch/powerpc/kernel/interrupt_64.S b/arch/powerpc/kernel/interrupt_64.S
new file mode 100644
index 000000000000..4063e8a3f704
--- /dev/null
+++ b/arch/powerpc/kernel/interrupt_64.S
@@ -0,0 +1,770 @@
+#include <asm/asm-offsets.h>
+#include <asm/bug.h>
+#ifdef CONFIG_PPC_BOOK3S
+#include <asm/exception-64s.h>
+#else
+#include <asm/exception-64e.h>
+#endif
+#include <asm/feature-fixups.h>
+#include <asm/head-64.h>
+#include <asm/hw_irq.h>
+#include <asm/kup.h>
+#include <asm/mmu.h>
+#include <asm/ppc_asm.h>
+#include <asm/ptrace.h>
+#include <asm/tm.h>
+
+ .section ".toc","aw"
+SYS_CALL_TABLE:
+ .tc sys_call_table[TC],sys_call_table
+
+#ifdef CONFIG_COMPAT
+COMPAT_SYS_CALL_TABLE:
+ .tc compat_sys_call_table[TC],compat_sys_call_table
+#endif
+ .previous
+
+ .align 7
+
+.macro DEBUG_SRR_VALID srr
+#ifdef CONFIG_PPC_RFI_SRR_DEBUG
+ .ifc \srr,srr
+ mfspr r11,SPRN_SRR0
+ ld r12,_NIP(r1)
+100: tdne r11,r12
+ EMIT_BUG_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE)
+ mfspr r11,SPRN_SRR1
+ ld r12,_MSR(r1)
+100: tdne r11,r12
+ EMIT_BUG_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE)
+ .else
+ mfspr r11,SPRN_HSRR0
+ ld r12,_NIP(r1)
+100: tdne r11,r12
+ EMIT_BUG_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE)
+ mfspr r11,SPRN_HSRR1
+ ld r12,_MSR(r1)
+100: tdne r11,r12
+ EMIT_BUG_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE)
+ .endif
+#endif
+.endm
+
+#ifdef CONFIG_PPC_BOOK3S
+.macro system_call_vectored name trapnr
+ .globl system_call_vectored_\name
+system_call_vectored_\name:
+_ASM_NOKPROBE_SYMBOL(system_call_vectored_\name)
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+BEGIN_FTR_SECTION
+ extrdi. r10, r12, 1, (63-MSR_TS_T_LG) /* transaction active? */
+ bne tabort_syscall
+END_FTR_SECTION_IFSET(CPU_FTR_TM)
+#endif
+ SCV_INTERRUPT_TO_KERNEL
+ mr r10,r1
+ ld r1,PACAKSAVE(r13)
+ std r10,0(r1)
+ std r11,_NIP(r1)
+ std r12,_MSR(r1)
+ std r0,GPR0(r1)
+ std r10,GPR1(r1)
+ std r2,GPR2(r1)
+ ld r2,PACATOC(r13)
+ mfcr r12
+ li r11,0
+ /* Can we avoid saving r3-r8 in common case? */
+ std r3,GPR3(r1)
+ std r4,GPR4(r1)
+ std r5,GPR5(r1)
+ std r6,GPR6(r1)
+ std r7,GPR7(r1)
+ std r8,GPR8(r1)
+ /* Zero r9-r12, this should only be required when restoring all GPRs */
+ std r11,GPR9(r1)
+ std r11,GPR10(r1)
+ std r11,GPR11(r1)
+ std r11,GPR12(r1)
+ std r9,GPR13(r1)
+ SAVE_NVGPRS(r1)
+ std r11,_XER(r1)
+ std r11,_LINK(r1)
+ std r11,_CTR(r1)
+
+ li r11,\trapnr
+ std r11,_TRAP(r1)
+ std r12,_CCR(r1)
+ addi r10,r1,STACK_FRAME_OVERHEAD
+ ld r11,exception_marker@toc(r2)
+ std r11,-16(r10) /* "regshere" marker */
+
+BEGIN_FTR_SECTION
+ HMT_MEDIUM
+END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
+
+ /*
+ * scv enters with MSR[EE]=1 and is immediately considered soft-masked.
+ * The entry vector already sets PACAIRQSOFTMASK to IRQS_ALL_DISABLED,
+ * and interrupts may be masked and pending already.
+ * system_call_exception() will call trace_hardirqs_off() which means
+ * interrupts could already have been blocked before trace_hardirqs_off,
+ * but this is the best we can do.
+ */
+
+ /* Calling convention has r9 = orig r0, r10 = regs */
+ mr r9,r0
+ bl system_call_exception
+
+.Lsyscall_vectored_\name\()_exit:
+ addi r4,r1,STACK_FRAME_OVERHEAD
+ li r5,1 /* scv */
+ bl syscall_exit_prepare
+ std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
+.Lsyscall_vectored_\name\()_rst_start:
+ lbz r11,PACAIRQHAPPENED(r13)
+ andi. r11,r11,(~PACA_IRQ_HARD_DIS)@l
+ bne- syscall_vectored_\name\()_restart
+ li r11,IRQS_ENABLED
+ stb r11,PACAIRQSOFTMASK(r13)
+ li r11,0
+ stb r11,PACAIRQHAPPENED(r13) # clear out possible HARD_DIS
+
+ ld r2,_CCR(r1)
+ ld r4,_NIP(r1)
+ ld r5,_MSR(r1)
+
+BEGIN_FTR_SECTION
+ stdcx. r0,0,r1 /* to clear the reservation */
+END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
+
+BEGIN_FTR_SECTION
+ HMT_MEDIUM_LOW
+END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
+
+ cmpdi r3,0
+ bne .Lsyscall_vectored_\name\()_restore_regs
+
+ /* rfscv returns with LR->NIA and CTR->MSR */
+ mtlr r4
+ mtctr r5
+
+ /* Could zero these as per ABI, but we may consider a stricter ABI
+ * which preserves these if libc implementations can benefit, so
+ * restore them for now until further measurement is done. */
+ ld r0,GPR0(r1)
+ ld r4,GPR4(r1)
+ ld r5,GPR5(r1)
+ ld r6,GPR6(r1)
+ ld r7,GPR7(r1)
+ ld r8,GPR8(r1)
+ /* Zero volatile regs that may contain sensitive kernel data */
+ li r9,0
+ li r10,0
+ li r11,0
+ li r12,0
+ mtspr SPRN_XER,r0
+
+ /*
+ * We don't need to restore AMR on the way back to userspace for KUAP.
+ * The value of AMR only matters while we're in the kernel.
+ */
+ mtcr r2
+ ld r2,GPR2(r1)
+ ld r3,GPR3(r1)
+ ld r13,GPR13(r1)
+ ld r1,GPR1(r1)
+ RFSCV_TO_USER
+ b . /* prevent speculative execution */
+
+.Lsyscall_vectored_\name\()_restore_regs:
+ mtspr SPRN_SRR0,r4
+ mtspr SPRN_SRR1,r5
+
+ ld r3,_CTR(r1)
+ ld r4,_LINK(r1)
+ ld r5,_XER(r1)
+
+ REST_NVGPRS(r1)
+ ld r0,GPR0(r1)
+ mtcr r2
+ mtctr r3
+ mtlr r4
+ mtspr SPRN_XER,r5
+ REST_10GPRS(2, r1)
+ REST_2GPRS(12, r1)
+ ld r1,GPR1(r1)
+ RFI_TO_USER
+.Lsyscall_vectored_\name\()_rst_end:
+
+syscall_vectored_\name\()_restart:
+_ASM_NOKPROBE_SYMBOL(syscall_vectored_\name\()_restart)
+ GET_PACA(r13)
+ ld r1,PACA_EXIT_SAVE_R1(r13)
+ ld r2,PACATOC(r13)
+ ld r3,RESULT(r1)
+ addi r4,r1,STACK_FRAME_OVERHEAD
+ li r11,IRQS_ALL_DISABLED
+ stb r11,PACAIRQSOFTMASK(r13)
+ bl syscall_exit_restart
+ std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
+ b .Lsyscall_vectored_\name\()_rst_start
+1:
+
+SOFT_MASK_TABLE(.Lsyscall_vectored_\name\()_rst_start, 1b)
+RESTART_TABLE(.Lsyscall_vectored_\name\()_rst_start, .Lsyscall_vectored_\name\()_rst_end, syscall_vectored_\name\()_restart)
+
+.endm
+
+system_call_vectored common 0x3000
+
+/*
+ * We instantiate another entry copy for the SIGILL variant, with TRAP=0x7ff0
+ * which is tested by system_call_exception when r0 is -1 (as set by vector
+ * entry code).
+ */
+system_call_vectored sigill 0x7ff0
+
+
+/*
+ * Entered via kernel return set up by kernel/sstep.c, must match entry regs
+ */
+ .globl system_call_vectored_emulate
+system_call_vectored_emulate:
+_ASM_NOKPROBE_SYMBOL(system_call_vectored_emulate)
+ li r10,IRQS_ALL_DISABLED
+ stb r10,PACAIRQSOFTMASK(r13)
+ b system_call_vectored_common
+#endif /* CONFIG_PPC_BOOK3S */
+
+ .balign IFETCH_ALIGN_BYTES
+ .globl system_call_common_real
+system_call_common_real:
+_ASM_NOKPROBE_SYMBOL(system_call_common_real)
+ ld r10,PACAKMSR(r13) /* get MSR value for kernel */
+ mtmsrd r10
+
+ .balign IFETCH_ALIGN_BYTES
+ .globl system_call_common
+system_call_common:
+_ASM_NOKPROBE_SYMBOL(system_call_common)
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+BEGIN_FTR_SECTION
+ extrdi. r10, r12, 1, (63-MSR_TS_T_LG) /* transaction active? */
+ bne tabort_syscall
+END_FTR_SECTION_IFSET(CPU_FTR_TM)
+#endif
+ mr r10,r1
+ ld r1,PACAKSAVE(r13)
+ std r10,0(r1)
+ std r11,_NIP(r1)
+ std r12,_MSR(r1)
+ std r0,GPR0(r1)
+ std r10,GPR1(r1)
+ std r2,GPR2(r1)
+#ifdef CONFIG_PPC_FSL_BOOK3E
+START_BTB_FLUSH_SECTION
+ BTB_FLUSH(r10)
+END_BTB_FLUSH_SECTION
+#endif
+ ld r2,PACATOC(r13)
+ mfcr r12
+ li r11,0
+ /* Can we avoid saving r3-r8 in common case? */
+ std r3,GPR3(r1)
+ std r4,GPR4(r1)
+ std r5,GPR5(r1)
+ std r6,GPR6(r1)
+ std r7,GPR7(r1)
+ std r8,GPR8(r1)
+ /* Zero r9-r12, this should only be required when restoring all GPRs */
+ std r11,GPR9(r1)
+ std r11,GPR10(r1)
+ std r11,GPR11(r1)
+ std r11,GPR12(r1)
+ std r9,GPR13(r1)
+ SAVE_NVGPRS(r1)
+ std r11,_XER(r1)
+ std r11,_CTR(r1)
+ mflr r10
+
+ /*
+ * This clears CR0.SO (bit 28), which is the error indication on
+ * return from this system call.
+ */
+ rldimi r12,r11,28,(63-28)
+ li r11,0xc00
+ std r10,_LINK(r1)
+ std r11,_TRAP(r1)
+ std r12,_CCR(r1)
+ addi r10,r1,STACK_FRAME_OVERHEAD
+ ld r11,exception_marker@toc(r2)
+ std r11,-16(r10) /* "regshere" marker */
+
+#ifdef CONFIG_PPC_BOOK3S
+ li r11,1
+ stb r11,PACASRR_VALID(r13)
+#endif
+
+ /*
+ * We always enter kernel from userspace with irq soft-mask enabled and
+ * nothing pending. system_call_exception() will call
+ * trace_hardirqs_off().
+ */
+ li r11,IRQS_ALL_DISABLED
+ li r12,-1 /* Set MSR_EE and MSR_RI */
+ stb r11,PACAIRQSOFTMASK(r13)
+ mtmsrd r12,1
+
+ /* Calling convention has r9 = orig r0, r10 = regs */
+ mr r9,r0
+ bl system_call_exception
+
+.Lsyscall_exit:
+ addi r4,r1,STACK_FRAME_OVERHEAD
+ li r5,0 /* !scv */
+ bl syscall_exit_prepare
+ std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
+#ifdef CONFIG_PPC_BOOK3S
+.Lsyscall_rst_start:
+ lbz r11,PACAIRQHAPPENED(r13)
+ andi. r11,r11,(~PACA_IRQ_HARD_DIS)@l
+ bne- syscall_restart
+#endif
+ li r11,IRQS_ENABLED
+ stb r11,PACAIRQSOFTMASK(r13)
+ li r11,0
+ stb r11,PACAIRQHAPPENED(r13) # clear out possible HARD_DIS
+
+ ld r2,_CCR(r1)
+ ld r6,_LINK(r1)
+ mtlr r6
+
+#ifdef CONFIG_PPC_BOOK3S
+ lbz r4,PACASRR_VALID(r13)
+ cmpdi r4,0
+ bne 1f
+ li r4,0
+ stb r4,PACASRR_VALID(r13)
+#endif
+ ld r4,_NIP(r1)
+ ld r5,_MSR(r1)
+ mtspr SPRN_SRR0,r4
+ mtspr SPRN_SRR1,r5
+1:
+ DEBUG_SRR_VALID srr
+
+BEGIN_FTR_SECTION
+ stdcx. r0,0,r1 /* to clear the reservation */
+END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
+
+ cmpdi r3,0
+ bne .Lsyscall_restore_regs
+ /* Zero volatile regs that may contain sensitive kernel data */
+ li r0,0
+ li r4,0
+ li r5,0
+ li r6,0
+ li r7,0
+ li r8,0
+ li r9,0
+ li r10,0
+ li r11,0
+ li r12,0
+ mtctr r0
+ mtspr SPRN_XER,r0
+.Lsyscall_restore_regs_cont:
+
+BEGIN_FTR_SECTION
+ HMT_MEDIUM_LOW
+END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
+
+ /*
+ * We don't need to restore AMR on the way back to userspace for KUAP.
+ * The value of AMR only matters while we're in the kernel.
+ */
+ mtcr r2
+ ld r2,GPR2(r1)
+ ld r3,GPR3(r1)
+ ld r13,GPR13(r1)
+ ld r1,GPR1(r1)
+ RFI_TO_USER
+ b . /* prevent speculative execution */
+
+.Lsyscall_restore_regs:
+ ld r3,_CTR(r1)
+ ld r4,_XER(r1)
+ REST_NVGPRS(r1)
+ mtctr r3
+ mtspr SPRN_XER,r4
+ ld r0,GPR0(r1)
+ REST_8GPRS(4, r1)
+ ld r12,GPR12(r1)
+ b .Lsyscall_restore_regs_cont
+.Lsyscall_rst_end:
+
+#ifdef CONFIG_PPC_BOOK3S
+syscall_restart:
+_ASM_NOKPROBE_SYMBOL(syscall_restart)
+ GET_PACA(r13)
+ ld r1,PACA_EXIT_SAVE_R1(r13)
+ ld r2,PACATOC(r13)
+ ld r3,RESULT(r1)
+ addi r4,r1,STACK_FRAME_OVERHEAD
+ li r11,IRQS_ALL_DISABLED
+ stb r11,PACAIRQSOFTMASK(r13)
+ bl syscall_exit_restart
+ std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
+ b .Lsyscall_rst_start
+1:
+
+SOFT_MASK_TABLE(.Lsyscall_rst_start, 1b)
+RESTART_TABLE(.Lsyscall_rst_start, .Lsyscall_rst_end, syscall_restart)
+#endif
+
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+tabort_syscall:
+_ASM_NOKPROBE_SYMBOL(tabort_syscall)
+ /* Firstly we need to enable TM in the kernel */
+ mfmsr r10
+ li r9, 1
+ rldimi r10, r9, MSR_TM_LG, 63-MSR_TM_LG
+ mtmsrd r10, 0
+
+ /* tabort, this dooms the transaction, nothing else */
+ li r9, (TM_CAUSE_SYSCALL|TM_CAUSE_PERSISTENT)
+ TABORT(R9)
+
+ /*
+ * Return directly to userspace. We have corrupted user register state,
+ * but userspace will never see that register state. Execution will
+ * resume after the tbegin of the aborted transaction with the
+ * checkpointed register state.
+ */
+ li r9, MSR_RI
+ andc r10, r10, r9
+ mtmsrd r10, 1
+ mtspr SPRN_SRR0, r11
+ mtspr SPRN_SRR1, r12
+ RFI_TO_USER
+ b . /* prevent speculative execution */
+#endif
+
+ /*
+ * If MSR EE/RI was never enabled, IRQs not reconciled, NVGPRs not
+ * touched, no exit work created, then this can be used.
+ */
+ .balign IFETCH_ALIGN_BYTES
+ .globl fast_interrupt_return_srr
+fast_interrupt_return_srr:
+_ASM_NOKPROBE_SYMBOL(fast_interrupt_return_srr)
+ kuap_check_amr r3, r4
+ ld r5,_MSR(r1)
+ andi. r0,r5,MSR_PR
+#ifdef CONFIG_PPC_BOOK3S
+ beq 1f
+ kuap_user_restore r3, r4
+ b .Lfast_user_interrupt_return_srr
+1: kuap_kernel_restore r3, r4
+ andi. r0,r5,MSR_RI
+ li r3,0 /* 0 return value, no EMULATE_STACK_STORE */
+ bne+ .Lfast_kernel_interrupt_return_srr
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ bl unrecoverable_exception
+ b . /* should not get here */
+#else
+ bne .Lfast_user_interrupt_return_srr
+ b .Lfast_kernel_interrupt_return_srr
+#endif
+
+.macro interrupt_return_macro srr
+ .balign IFETCH_ALIGN_BYTES
+ .globl interrupt_return_\srr
+interrupt_return_\srr\():
+_ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\())
+ ld r4,_MSR(r1)
+ andi. r0,r4,MSR_PR
+ beq interrupt_return_\srr\()_kernel
+interrupt_return_\srr\()_user: /* make backtraces match the _kernel variant */
+_ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()_user)
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ bl interrupt_exit_user_prepare
+ cmpdi r3,0
+ bne- .Lrestore_nvgprs_\srr
+.Lrestore_nvgprs_\srr\()_cont:
+ std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
+#ifdef CONFIG_PPC_BOOK3S
+.Linterrupt_return_\srr\()_user_rst_start:
+ lbz r11,PACAIRQHAPPENED(r13)
+ andi. r11,r11,(~PACA_IRQ_HARD_DIS)@l
+ bne- interrupt_return_\srr\()_user_restart
+#endif
+ li r11,IRQS_ENABLED
+ stb r11,PACAIRQSOFTMASK(r13)
+ li r11,0
+ stb r11,PACAIRQHAPPENED(r13) # clear out possible HARD_DIS
+
+.Lfast_user_interrupt_return_\srr\():
+#ifdef CONFIG_PPC_BOOK3S
+ .ifc \srr,srr
+ lbz r4,PACASRR_VALID(r13)
+ .else
+ lbz r4,PACAHSRR_VALID(r13)
+ .endif
+ cmpdi r4,0
+ li r4,0
+ bne 1f
+#endif
+ ld r11,_NIP(r1)
+ ld r12,_MSR(r1)
+ .ifc \srr,srr
+ mtspr SPRN_SRR0,r11
+ mtspr SPRN_SRR1,r12
+1:
+#ifdef CONFIG_PPC_BOOK3S
+ stb r4,PACASRR_VALID(r13)
+#endif
+ .else
+ mtspr SPRN_HSRR0,r11
+ mtspr SPRN_HSRR1,r12
+1:
+#ifdef CONFIG_PPC_BOOK3S
+ stb r4,PACAHSRR_VALID(r13)
+#endif
+ .endif
+ DEBUG_SRR_VALID \srr
+
+#ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
+ lbz r4,PACAIRQSOFTMASK(r13)
+ tdnei r4,IRQS_ENABLED
+#endif
+
+BEGIN_FTR_SECTION
+ ld r10,_PPR(r1)
+ mtspr SPRN_PPR,r10
+END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
+
+BEGIN_FTR_SECTION
+ stdcx. r0,0,r1 /* to clear the reservation */
+FTR_SECTION_ELSE
+ ldarx r0,0,r1
+ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
+
+ ld r3,_CCR(r1)
+ ld r4,_LINK(r1)
+ ld r5,_CTR(r1)
+ ld r6,_XER(r1)
+ li r0,0
+
+ REST_4GPRS(7, r1)
+ REST_2GPRS(11, r1)
+ REST_GPR(13, r1)
+
+ mtcr r3
+ mtlr r4
+ mtctr r5
+ mtspr SPRN_XER,r6
+
+ REST_4GPRS(2, r1)
+ REST_GPR(6, r1)
+ REST_GPR(0, r1)
+ REST_GPR(1, r1)
+ .ifc \srr,srr
+ RFI_TO_USER
+ .else
+ HRFI_TO_USER
+ .endif
+ b . /* prevent speculative execution */
+.Linterrupt_return_\srr\()_user_rst_end:
+
+.Lrestore_nvgprs_\srr\():
+ REST_NVGPRS(r1)
+ b .Lrestore_nvgprs_\srr\()_cont
+
+#ifdef CONFIG_PPC_BOOK3S
+interrupt_return_\srr\()_user_restart:
+_ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()_user_restart)
+ GET_PACA(r13)
+ ld r1,PACA_EXIT_SAVE_R1(r13)
+ ld r2,PACATOC(r13)
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ li r11,IRQS_ALL_DISABLED
+ stb r11,PACAIRQSOFTMASK(r13)
+ bl interrupt_exit_user_restart
+ std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
+ b .Linterrupt_return_\srr\()_user_rst_start
+1:
+
+SOFT_MASK_TABLE(.Linterrupt_return_\srr\()_user_rst_start, 1b)
+RESTART_TABLE(.Linterrupt_return_\srr\()_user_rst_start, .Linterrupt_return_\srr\()_user_rst_end, interrupt_return_\srr\()_user_restart)
+#endif
+
+ .balign IFETCH_ALIGN_BYTES
+interrupt_return_\srr\()_kernel:
+_ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()_kernel)
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ bl interrupt_exit_kernel_prepare
+
+ std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
+.Linterrupt_return_\srr\()_kernel_rst_start:
+ ld r11,SOFTE(r1)
+ cmpwi r11,IRQS_ENABLED
+ stb r11,PACAIRQSOFTMASK(r13)
+ bne 1f
+#ifdef CONFIG_PPC_BOOK3S
+ lbz r11,PACAIRQHAPPENED(r13)
+ andi. r11,r11,(~PACA_IRQ_HARD_DIS)@l
+ bne- interrupt_return_\srr\()_kernel_restart
+#endif
+ li r11,0
+ stb r11,PACAIRQHAPPENED(r13) # clear out possible HARD_DIS
+1:
+
+.Lfast_kernel_interrupt_return_\srr\():
+ cmpdi cr1,r3,0
+#ifdef CONFIG_PPC_BOOK3S
+ .ifc \srr,srr
+ lbz r4,PACASRR_VALID(r13)
+ .else
+ lbz r4,PACAHSRR_VALID(r13)
+ .endif
+ cmpdi r4,0
+ li r4,0
+ bne 1f
+#endif
+ ld r11,_NIP(r1)
+ ld r12,_MSR(r1)
+ .ifc \srr,srr
+ mtspr SPRN_SRR0,r11
+ mtspr SPRN_SRR1,r12
+1:
+#ifdef CONFIG_PPC_BOOK3S
+ stb r4,PACASRR_VALID(r13)
+#endif
+ .else
+ mtspr SPRN_HSRR0,r11
+ mtspr SPRN_HSRR1,r12
+1:
+#ifdef CONFIG_PPC_BOOK3S
+ stb r4,PACAHSRR_VALID(r13)
+#endif
+ .endif
+ DEBUG_SRR_VALID \srr
+
+BEGIN_FTR_SECTION
+ stdcx. r0,0,r1 /* to clear the reservation */
+FTR_SECTION_ELSE
+ ldarx r0,0,r1
+ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
+
+ ld r3,_LINK(r1)
+ ld r4,_CTR(r1)
+ ld r5,_XER(r1)
+ ld r6,_CCR(r1)
+ li r0,0
+
+ REST_4GPRS(7, r1)
+ REST_2GPRS(11, r1)
+
+ mtlr r3
+ mtctr r4
+ mtspr SPRN_XER,r5
+
+ /*
+ * Leaving a stale exception_marker on the stack can confuse
+ * the reliable stack unwinder later on. Clear it.
+ */
+ std r0,STACK_FRAME_OVERHEAD-16(r1)
+
+ REST_4GPRS(2, r1)
+
+ bne- cr1,1f /* emulate stack store */
+ mtcr r6
+ REST_GPR(6, r1)
+ REST_GPR(0, r1)
+ REST_GPR(1, r1)
+ .ifc \srr,srr
+ RFI_TO_KERNEL
+ .else
+ HRFI_TO_KERNEL
+ .endif
+ b . /* prevent speculative execution */
+
+1: /*
+ * Emulate stack store with update. New r1 value was already calculated
+ * and updated in our interrupt regs by emulate_loadstore, but we can't
+ * store the previous value of r1 to the stack before re-loading our
+ * registers from it, otherwise they could be clobbered. Use
+ * PACA_EXGEN as temporary storage to hold the store data, as
+ * interrupts are disabled here so it won't be clobbered.
+ */
+ mtcr r6
+ std r9,PACA_EXGEN+0(r13)
+ addi r9,r1,INT_FRAME_SIZE /* get original r1 */
+ REST_GPR(6, r1)
+ REST_GPR(0, r1)
+ REST_GPR(1, r1)
+ std r9,0(r1) /* perform store component of stdu */
+ ld r9,PACA_EXGEN+0(r13)
+
+ .ifc \srr,srr
+ RFI_TO_KERNEL
+ .else
+ HRFI_TO_KERNEL
+ .endif
+ b . /* prevent speculative execution */
+.Linterrupt_return_\srr\()_kernel_rst_end:
+
+#ifdef CONFIG_PPC_BOOK3S
+interrupt_return_\srr\()_kernel_restart:
+_ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()_kernel_restart)
+ GET_PACA(r13)
+ ld r1,PACA_EXIT_SAVE_R1(r13)
+ ld r2,PACATOC(r13)
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ li r11,IRQS_ALL_DISABLED
+ stb r11,PACAIRQSOFTMASK(r13)
+ bl interrupt_exit_kernel_restart
+ std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
+ b .Linterrupt_return_\srr\()_kernel_rst_start
+1:
+
+SOFT_MASK_TABLE(.Linterrupt_return_\srr\()_kernel_rst_start, 1b)
+RESTART_TABLE(.Linterrupt_return_\srr\()_kernel_rst_start, .Linterrupt_return_\srr\()_kernel_rst_end, interrupt_return_\srr\()_kernel_restart)
+#endif
+
+.endm
+
+interrupt_return_macro srr
+#ifdef CONFIG_PPC_BOOK3S
+interrupt_return_macro hsrr
+
+ .globl __end_soft_masked
+__end_soft_masked:
+DEFINE_FIXED_SYMBOL(__end_soft_masked)
+#endif /* CONFIG_PPC_BOOK3S */
+
+#ifdef CONFIG_PPC_BOOK3S
+_GLOBAL(ret_from_fork_scv)
+ bl schedule_tail
+ REST_NVGPRS(r1)
+ li r3,0 /* fork() return value */
+ b .Lsyscall_vectored_common_exit
+#endif
+
+_GLOBAL(ret_from_fork)
+ bl schedule_tail
+ REST_NVGPRS(r1)
+ li r3,0 /* fork() return value */
+ b .Lsyscall_exit
+
+_GLOBAL(ret_from_kernel_thread)
+ bl schedule_tail
+ REST_NVGPRS(r1)
+ mtctr r14
+ mr r3,r15
+#ifdef PPC64_ELF_ABI_v2
+ mr r12,r14
+#endif
+ bctrl
+ li r3,0
+ b .Lsyscall_exit
diff --git a/arch/powerpc/kernel/io-workarounds.c b/arch/powerpc/kernel/io-workarounds.c
index 51bbaae94ccc..c877f074d174 100644
--- a/arch/powerpc/kernel/io-workarounds.c
+++ b/arch/powerpc/kernel/io-workarounds.c
@@ -55,7 +55,6 @@ static struct iowa_bus *iowa_pci_find(unsigned long vaddr, unsigned long paddr)
#ifdef CONFIG_PPC_INDIRECT_MMIO
struct iowa_bus *iowa_mem_find_bus(const PCI_IO_ADDR addr)
{
- unsigned hugepage_shift;
struct iowa_bus *bus;
int token;
@@ -65,22 +64,13 @@ struct iowa_bus *iowa_mem_find_bus(const PCI_IO_ADDR addr)
bus = &iowa_busses[token - 1];
else {
unsigned long vaddr, paddr;
- pte_t *ptep;
vaddr = (unsigned long)PCI_FIX_ADDR(addr);
if (vaddr < PHB_IO_BASE || vaddr >= PHB_IO_END)
return NULL;
- /*
- * We won't find huge pages here (iomem). Also can't hit
- * a page table free due to init_mm
- */
- ptep = find_init_mm_pte(vaddr, &hugepage_shift);
- if (ptep == NULL)
- paddr = 0;
- else {
- WARN_ON(hugepage_shift);
- paddr = pte_pfn(*ptep) << PAGE_SHIFT;
- }
+
+ paddr = ppc_find_vmap_phys(vaddr);
+
bus = iowa_pci_find(vaddr, paddr);
if (bus == NULL)
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index 57d6b85e9b96..2af89a5e379f 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -898,7 +898,6 @@ void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl,
unsigned int order;
unsigned int nio_pages, io_order;
struct page *page;
- size_t size_io = size;
size = PAGE_ALIGN(size);
order = get_order(size);
@@ -925,9 +924,8 @@ void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl,
memset(ret, 0, size);
/* Set up tces to cover the allocated range */
- size_io = IOMMU_PAGE_ALIGN(size_io, tbl);
- nio_pages = size_io >> tbl->it_page_shift;
- io_order = get_iommu_order(size_io, tbl);
+ nio_pages = size >> tbl->it_page_shift;
+ io_order = get_iommu_order(size, tbl);
mapping = iommu_alloc(dev, tbl, ret, nio_pages, DMA_BIDIRECTIONAL,
mask >> tbl->it_page_shift, io_order, 0);
if (mapping == DMA_MAPPING_ERROR) {
@@ -942,9 +940,10 @@ void iommu_free_coherent(struct iommu_table *tbl, size_t size,
void *vaddr, dma_addr_t dma_handle)
{
if (tbl) {
- size_t size_io = IOMMU_PAGE_ALIGN(size, tbl);
- unsigned int nio_pages = size_io >> tbl->it_page_shift;
+ unsigned int nio_pages;
+ size = PAGE_ALIGN(size);
+ nio_pages = size >> tbl->it_page_shift;
iommu_free(tbl, dma_handle, nio_pages);
size = PAGE_ALIGN(size);
free_pages((unsigned long)vaddr, get_order(size));
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 72cb45393ef2..91e63eac4e8f 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -121,6 +121,7 @@ void replay_soft_interrupts(void)
ppc_save_regs(&regs);
regs.softe = IRQS_ENABLED;
+ regs.msr |= MSR_EE;
again:
if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
@@ -217,6 +218,100 @@ static inline void replay_soft_interrupts_irqrestore(void)
#define replay_soft_interrupts_irqrestore() replay_soft_interrupts()
#endif
+#ifdef CONFIG_CC_HAS_ASM_GOTO
+notrace void arch_local_irq_restore(unsigned long mask)
+{
+ unsigned char irq_happened;
+
+ /* Write the new soft-enabled value if it is a disable */
+ if (mask) {
+ irq_soft_mask_set(mask);
+ return;
+ }
+
+ /*
+ * After the stb, interrupts are unmasked and there are no interrupts
+ * pending replay. The restart sequence makes this atomic with
+ * respect to soft-masked interrupts. If this was just a simple code
+ * sequence, a soft-masked interrupt could become pending right after
+ * the comparison and before the stb.
+ *
+ * This allows interrupts to be unmasked without hard disabling, and
+ * also without new hard interrupts coming in ahead of pending ones.
+ */
+ asm_volatile_goto(
+"1: \n"
+" lbz 9,%0(13) \n"
+" cmpwi 9,0 \n"
+" bne %l[happened] \n"
+" stb 9,%1(13) \n"
+"2: \n"
+ RESTART_TABLE(1b, 2b, 1b)
+ : : "i" (offsetof(struct paca_struct, irq_happened)),
+ "i" (offsetof(struct paca_struct, irq_soft_mask))
+ : "cr0", "r9"
+ : happened);
+
+ if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
+ WARN_ON_ONCE(!(mfmsr() & MSR_EE));
+
+ return;
+
+happened:
+ irq_happened = get_irq_happened();
+ if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
+ WARN_ON_ONCE(!irq_happened);
+
+ if (irq_happened == PACA_IRQ_HARD_DIS) {
+ if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
+ WARN_ON_ONCE(mfmsr() & MSR_EE);
+ irq_soft_mask_set(IRQS_ENABLED);
+ local_paca->irq_happened = 0;
+ __hard_irq_enable();
+ return;
+ }
+
+ /* Have interrupts to replay, need to hard disable first */
+ if (!(irq_happened & PACA_IRQ_HARD_DIS)) {
+ if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) {
+ if (!(mfmsr() & MSR_EE)) {
+ /*
+ * An interrupt could have come in and cleared
+ * MSR[EE] and set IRQ_HARD_DIS, so check
+ * IRQ_HARD_DIS again and warn if it is still
+ * clear.
+ */
+ irq_happened = get_irq_happened();
+ WARN_ON_ONCE(!(irq_happened & PACA_IRQ_HARD_DIS));
+ }
+ }
+ __hard_irq_disable();
+ local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
+ } else {
+ if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) {
+ if (WARN_ON_ONCE(mfmsr() & MSR_EE))
+ __hard_irq_disable();
+ }
+ }
+
+ /*
+ * Disable preempt here, so that the below preempt_enable will
+ * perform resched if required (a replayed interrupt may set
+ * need_resched).
+ */
+ preempt_disable();
+ irq_soft_mask_set(IRQS_ALL_DISABLED);
+ trace_hardirqs_off();
+
+ replay_soft_interrupts_irqrestore();
+ local_paca->irq_happened = 0;
+
+ trace_hardirqs_on();
+ irq_soft_mask_set(IRQS_ENABLED);
+ __hard_irq_enable();
+ preempt_enable();
+}
+#else
notrace void arch_local_irq_restore(unsigned long mask)
{
unsigned char irq_happened;
@@ -288,6 +383,7 @@ notrace void arch_local_irq_restore(unsigned long mask)
__hard_irq_enable();
preempt_enable();
}
+#endif
EXPORT_SYMBOL(arch_local_irq_restore);
/*
diff --git a/arch/powerpc/kernel/jump_label.c b/arch/powerpc/kernel/jump_label.c
index ce87dc5ea23c..5277cf582c16 100644
--- a/arch/powerpc/kernel/jump_label.c
+++ b/arch/powerpc/kernel/jump_label.c
@@ -11,10 +11,10 @@
void arch_jump_label_transform(struct jump_entry *entry,
enum jump_label_type type)
{
- struct ppc_inst *addr = (struct ppc_inst *)jump_entry_code(entry);
+ u32 *addr = (u32 *)jump_entry_code(entry);
if (type == JUMP_LABEL_JMP)
patch_branch(addr, jump_entry_target(entry), 0);
else
- patch_instruction(addr, ppc_inst(PPC_INST_NOP));
+ patch_instruction(addr, ppc_inst(PPC_RAW_NOP()));
}
diff --git a/arch/powerpc/kernel/kgdb.c b/arch/powerpc/kernel/kgdb.c
index 7dd2ad3603ad..bdee7262c080 100644
--- a/arch/powerpc/kernel/kgdb.c
+++ b/arch/powerpc/kernel/kgdb.c
@@ -147,7 +147,7 @@ static int kgdb_handle_breakpoint(struct pt_regs *regs)
return 0;
if (*(u32 *)regs->nip == BREAK_INSTR)
- regs->nip += BREAK_INSTR_SIZE;
+ regs_add_return_ip(regs, BREAK_INSTR_SIZE);
return 1;
}
@@ -372,7 +372,7 @@ int dbg_set_reg(int regno, void *mem, struct pt_regs *regs)
void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc)
{
- regs->nip = pc;
+ regs_set_return_ip(regs, pc);
}
/*
@@ -394,7 +394,7 @@ int kgdb_arch_handle_exception(int vector, int signo, int err_code,
case 'c':
/* handle the optional parameter */
if (kgdb_hex2long(&ptr, &addr))
- linux_regs->nip = addr;
+ regs_set_return_ip(linux_regs, addr);
atomic_set(&kgdb_cpu_doing_single_step, -1);
/* set the trace bit if we're stepping */
@@ -402,9 +402,9 @@ int kgdb_arch_handle_exception(int vector, int signo, int err_code,
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
mtspr(SPRN_DBCR0,
mfspr(SPRN_DBCR0) | DBCR0_IC | DBCR0_IDM);
- linux_regs->msr |= MSR_DE;
+ regs_set_return_msr(linux_regs, linux_regs->msr | MSR_DE);
#else
- linux_regs->msr |= MSR_SE;
+ regs_set_return_msr(linux_regs, linux_regs->msr | MSR_SE);
#endif
atomic_set(&kgdb_cpu_doing_single_step,
raw_smp_processor_id());
@@ -417,11 +417,10 @@ int kgdb_arch_handle_exception(int vector, int signo, int err_code,
int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
{
+ u32 instr, *addr = (u32 *)bpt->bpt_addr;
int err;
- unsigned int instr;
- struct ppc_inst *addr = (struct ppc_inst *)bpt->bpt_addr;
- err = get_kernel_nofault(instr, (unsigned *) addr);
+ err = get_kernel_nofault(instr, addr);
if (err)
return err;
@@ -429,7 +428,7 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
if (err)
return -EFAULT;
- *(unsigned int *)bpt->saved_instr = instr;
+ *(u32 *)bpt->saved_instr = instr;
return 0;
}
@@ -438,7 +437,7 @@ int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
{
int err;
unsigned int instr = *(unsigned int *)bpt->saved_instr;
- struct ppc_inst *addr = (struct ppc_inst *)bpt->bpt_addr;
+ u32 *addr = (u32 *)bpt->bpt_addr;
err = patch_instruction(addr, ppc_inst(instr));
if (err)
diff --git a/arch/powerpc/kernel/kprobes-ftrace.c b/arch/powerpc/kernel/kprobes-ftrace.c
index 660138f6c4b2..7154d58338cc 100644
--- a/arch/powerpc/kernel/kprobes-ftrace.c
+++ b/arch/powerpc/kernel/kprobes-ftrace.c
@@ -39,7 +39,7 @@ void kprobe_ftrace_handler(unsigned long nip, unsigned long parent_nip,
* On powerpc, NIP is *before* this instruction for the
* pre handler
*/
- regs->nip -= MCOUNT_INSN_SIZE;
+ regs_add_return_ip(regs, -MCOUNT_INSN_SIZE);
__this_cpu_write(current_kprobe, p);
kcb->kprobe_status = KPROBE_HIT_ACTIVE;
@@ -48,7 +48,7 @@ void kprobe_ftrace_handler(unsigned long nip, unsigned long parent_nip,
* Emulate singlestep (and also recover regs->nip)
* as if there is a nop
*/
- regs->nip += MCOUNT_INSN_SIZE;
+ regs_add_return_ip(regs, MCOUNT_INSN_SIZE);
if (unlikely(p->post_handler)) {
kcb->kprobe_status = KPROBE_HIT_SSDONE;
p->post_handler(p, regs, 0);
diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c
index 01ab2163659e..cbc28d1a2e1b 100644
--- a/arch/powerpc/kernel/kprobes.c
+++ b/arch/powerpc/kernel/kprobes.c
@@ -19,11 +19,13 @@
#include <linux/extable.h>
#include <linux/kdebug.h>
#include <linux/slab.h>
+#include <linux/moduleloader.h>
#include <asm/code-patching.h>
#include <asm/cacheflush.h>
#include <asm/sstep.h>
#include <asm/sections.h>
#include <asm/inst.h>
+#include <asm/set_memory.h>
#include <linux/uaccess.h>
DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
@@ -103,28 +105,42 @@ kprobe_opcode_t *kprobe_lookup_name(const char *name, unsigned int offset)
return addr;
}
+void *alloc_insn_page(void)
+{
+ void *page;
+
+ page = module_alloc(PAGE_SIZE);
+ if (!page)
+ return NULL;
+
+ if (strict_module_rwx_enabled()) {
+ set_memory_ro((unsigned long)page, 1);
+ set_memory_x((unsigned long)page, 1);
+ }
+ return page;
+}
+
int arch_prepare_kprobe(struct kprobe *p)
{
int ret = 0;
struct kprobe *prev;
- struct ppc_inst insn = ppc_inst_read((struct ppc_inst *)p->addr);
- struct ppc_inst prefix = ppc_inst_read((struct ppc_inst *)(p->addr - 1));
+ struct ppc_inst insn = ppc_inst_read(p->addr);
if ((unsigned long)p->addr & 0x03) {
printk("Attempt to register kprobe at an unaligned address\n");
ret = -EINVAL;
- } else if (IS_MTMSRD(insn) || IS_RFID(insn) || IS_RFI(insn)) {
- printk("Cannot register a kprobe on rfi/rfid or mtmsr[d]\n");
+ } else if (IS_MTMSRD(insn) || IS_RFID(insn)) {
+ printk("Cannot register a kprobe on mtmsr[d]/rfi[d]\n");
ret = -EINVAL;
- } else if (ppc_inst_prefixed(prefix)) {
+ } else if ((unsigned long)p->addr & ~PAGE_MASK &&
+ ppc_inst_prefixed(ppc_inst_read(p->addr - 1))) {
printk("Cannot register a kprobe on the second word of prefixed instruction\n");
ret = -EINVAL;
}
preempt_disable();
prev = get_kprobe(p->addr - 1);
preempt_enable_no_resched();
- if (prev &&
- ppc_inst_prefixed(ppc_inst_read((struct ppc_inst *)prev->ainsn.insn))) {
+ if (prev && ppc_inst_prefixed(ppc_inst_read(prev->ainsn.insn))) {
printk("Cannot register a kprobe on the second word of prefixed instruction\n");
ret = -EINVAL;
}
@@ -138,7 +154,7 @@ int arch_prepare_kprobe(struct kprobe *p)
}
if (!ret) {
- patch_instruction((struct ppc_inst *)p->ainsn.insn, insn);
+ patch_instruction(p->ainsn.insn, insn);
p->opcode = ppc_inst_val(insn);
}
@@ -149,13 +165,13 @@ NOKPROBE_SYMBOL(arch_prepare_kprobe);
void arch_arm_kprobe(struct kprobe *p)
{
- patch_instruction((struct ppc_inst *)p->addr, ppc_inst(BREAKPOINT_INSTRUCTION));
+ WARN_ON_ONCE(patch_instruction(p->addr, ppc_inst(BREAKPOINT_INSTRUCTION)));
}
NOKPROBE_SYMBOL(arch_arm_kprobe);
void arch_disarm_kprobe(struct kprobe *p)
{
- patch_instruction((struct ppc_inst *)p->addr, ppc_inst(p->opcode));
+ WARN_ON_ONCE(patch_instruction(p->addr, ppc_inst(p->opcode)));
}
NOKPROBE_SYMBOL(arch_disarm_kprobe);
@@ -178,7 +194,7 @@ static nokprobe_inline void prepare_singlestep(struct kprobe *p, struct pt_regs
* variant as values in regs could play a part in
* if the trap is taken or not
*/
- regs->nip = (unsigned long)p->ainsn.insn;
+ regs_set_return_ip(regs, (unsigned long)p->ainsn.insn);
}
static nokprobe_inline void save_previous_kprobe(struct kprobe_ctlblk *kcb)
@@ -228,7 +244,7 @@ NOKPROBE_SYMBOL(arch_prepare_kretprobe);
static int try_to_emulate(struct kprobe *p, struct pt_regs *regs)
{
int ret;
- struct ppc_inst insn = ppc_inst_read((struct ppc_inst *)p->ainsn.insn);
+ struct ppc_inst insn = ppc_inst_read(p->ainsn.insn);
/* regs->nip is also adjusted if emulate_step returns 1 */
ret = emulate_step(regs, insn);
@@ -319,8 +335,9 @@ int kprobe_handler(struct pt_regs *regs)
kprobe_opcode_t insn = *p->ainsn.insn;
if (kcb->kprobe_status == KPROBE_HIT_SS && is_trap(insn)) {
/* Turn off 'trace' bits */
- regs->msr &= ~MSR_SINGLESTEP;
- regs->msr |= kcb->kprobe_saved_msr;
+ regs_set_return_msr(regs,
+ (regs->msr & ~MSR_SINGLESTEP) |
+ kcb->kprobe_saved_msr);
goto no_kprobe;
}
@@ -415,7 +432,7 @@ static int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
* we end up emulating it in kprobe_handler(), which increments the nip
* again.
*/
- regs->nip = orig_ret_address - 4;
+ regs_set_return_ip(regs, orig_ret_address - 4);
regs->link = orig_ret_address;
return 0;
@@ -439,7 +456,7 @@ int kprobe_post_handler(struct pt_regs *regs)
if (!cur || user_mode(regs))
return 0;
- len = ppc_inst_len(ppc_inst_read((struct ppc_inst *)cur->ainsn.insn));
+ len = ppc_inst_len(ppc_inst_read(cur->ainsn.insn));
/* make sure we got here for instruction we have a kprobe on */
if (((unsigned long)cur->ainsn.insn + len) != regs->nip)
return 0;
@@ -450,8 +467,8 @@ int kprobe_post_handler(struct pt_regs *regs)
}
/* Adjust nip to after the single-stepped instruction */
- regs->nip = (unsigned long)cur->addr + len;
- regs->msr |= kcb->kprobe_saved_msr;
+ regs_set_return_ip(regs, (unsigned long)cur->addr + len);
+ regs_set_return_msr(regs, regs->msr | kcb->kprobe_saved_msr);
/*Restore back the original saved kprobes variables and continue. */
if (kcb->kprobe_status == KPROBE_REENTER) {
@@ -490,9 +507,11 @@ int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
* and allow the page fault handler to continue as a
* normal page fault.
*/
- regs->nip = (unsigned long)cur->addr;
- regs->msr &= ~MSR_SINGLESTEP; /* Turn off 'trace' bits */
- regs->msr |= kcb->kprobe_saved_msr;
+ regs_set_return_ip(regs, (unsigned long)cur->addr);
+ /* Turn off 'trace' bits */
+ regs_set_return_msr(regs,
+ (regs->msr & ~MSR_SINGLESTEP) |
+ kcb->kprobe_saved_msr);
if (kcb->kprobe_status == KPROBE_REENTER)
restore_previous_kprobe(kcb);
else
@@ -502,28 +521,11 @@ int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
case KPROBE_HIT_ACTIVE:
case KPROBE_HIT_SSDONE:
/*
- * We increment the nmissed count for accounting,
- * we can also use npre/npostfault count for accounting
- * these specific fault cases.
- */
- kprobes_inc_nmissed_count(cur);
-
- /*
- * We come here because instructions in the pre/post
- * handler caused the page_fault, this could happen
- * if handler tries to access user space by
- * copy_from_user(), get_user() etc. Let the
- * user-specified handler try to fix it first.
- */
- if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
- return 1;
-
- /*
* In case the user-specified fault handler returned
* zero, try to fix up.
*/
if ((entry = search_exception_tables(regs->nip)) != NULL) {
- regs->nip = extable_fixup(entry);
+ regs_set_return_ip(regs, extable_fixup(entry));
return 1;
}
diff --git a/arch/powerpc/kernel/legacy_serial.c b/arch/powerpc/kernel/legacy_serial.c
index 8b2c1a8553a0..cfc03e016ff2 100644
--- a/arch/powerpc/kernel/legacy_serial.c
+++ b/arch/powerpc/kernel/legacy_serial.c
@@ -356,13 +356,16 @@ static void __init setup_legacy_serial_console(int console)
static int __init ioremap_legacy_serial_console(void)
{
- struct legacy_serial_info *info = &legacy_serial_infos[legacy_serial_console];
- struct plat_serial8250_port *port = &legacy_serial_ports[legacy_serial_console];
+ struct plat_serial8250_port *port;
+ struct legacy_serial_info *info;
void __iomem *vaddr;
if (legacy_serial_console < 0)
return 0;
+ info = &legacy_serial_infos[legacy_serial_console];
+ port = &legacy_serial_ports[legacy_serial_console];
+
if (!info->early_addr)
return 0;
diff --git a/arch/powerpc/kernel/mce.c b/arch/powerpc/kernel/mce.c
index 9a3c2a84a2ac..47a683cd00d2 100644
--- a/arch/powerpc/kernel/mce.c
+++ b/arch/powerpc/kernel/mce.c
@@ -18,6 +18,7 @@
#include <linux/extable.h>
#include <linux/ftrace.h>
#include <linux/memblock.h>
+#include <linux/of.h>
#include <asm/interrupt.h>
#include <asm/machdep.h>
@@ -273,7 +274,7 @@ void mce_common_process_ue(struct pt_regs *regs,
entry = search_kernel_exception_table(regs->nip);
if (entry) {
mce_err->ignore_event = true;
- regs->nip = extable_fixup(entry);
+ regs_set_return_ip(regs, extable_fixup(entry));
}
}
diff --git a/arch/powerpc/kernel/mce_power.c b/arch/powerpc/kernel/mce_power.c
index 667104d4c455..c2f55fe7092d 100644
--- a/arch/powerpc/kernel/mce_power.c
+++ b/arch/powerpc/kernel/mce_power.c
@@ -463,7 +463,7 @@ static int mce_find_instr_ea_and_phys(struct pt_regs *regs, uint64_t *addr,
pfn = addr_to_pfn(regs, regs->nip);
if (pfn != ULONG_MAX) {
instr_addr = (pfn << PAGE_SHIFT) + (regs->nip & ~PAGE_MASK);
- instr = ppc_inst_read((struct ppc_inst *)instr_addr);
+ instr = ppc_inst_read((u32 *)instr_addr);
if (!analyse_instr(&op, &tmp, instr)) {
pfn = addr_to_pfn(regs, op.ea);
*addr = op.ea;
@@ -481,12 +481,11 @@ static int mce_find_instr_ea_and_phys(struct pt_regs *regs, uint64_t *addr,
return -1;
}
-static int mce_handle_ierror(struct pt_regs *regs,
+static int mce_handle_ierror(struct pt_regs *regs, unsigned long srr1,
const struct mce_ierror_table table[],
struct mce_error_info *mce_err, uint64_t *addr,
uint64_t *phys_addr)
{
- uint64_t srr1 = regs->msr;
int handled = 0;
int i;
@@ -695,19 +694,19 @@ static long mce_handle_ue_error(struct pt_regs *regs,
}
static long mce_handle_error(struct pt_regs *regs,
+ unsigned long srr1,
const struct mce_derror_table dtable[],
const struct mce_ierror_table itable[])
{
struct mce_error_info mce_err = { 0 };
uint64_t addr, phys_addr = ULONG_MAX;
- uint64_t srr1 = regs->msr;
long handled;
if (SRR1_MC_LOADSTORE(srr1))
handled = mce_handle_derror(regs, dtable, &mce_err, &addr,
&phys_addr);
else
- handled = mce_handle_ierror(regs, itable, &mce_err, &addr,
+ handled = mce_handle_ierror(regs, srr1, itable, &mce_err, &addr,
&phys_addr);
if (!handled && mce_err.error_type == MCE_ERROR_TYPE_UE)
@@ -723,16 +722,20 @@ long __machine_check_early_realmode_p7(struct pt_regs *regs)
/* P7 DD1 leaves top bits of DSISR undefined */
regs->dsisr &= 0x0000ffff;
- return mce_handle_error(regs, mce_p7_derror_table, mce_p7_ierror_table);
+ return mce_handle_error(regs, regs->msr,
+ mce_p7_derror_table, mce_p7_ierror_table);
}
long __machine_check_early_realmode_p8(struct pt_regs *regs)
{
- return mce_handle_error(regs, mce_p8_derror_table, mce_p8_ierror_table);
+ return mce_handle_error(regs, regs->msr,
+ mce_p8_derror_table, mce_p8_ierror_table);
}
long __machine_check_early_realmode_p9(struct pt_regs *regs)
{
+ unsigned long srr1 = regs->msr;
+
/*
* On POWER9 DD2.1 and below, it's possible to get a machine check
* caused by a paste instruction where only DSISR bit 25 is set. This
@@ -746,10 +749,39 @@ long __machine_check_early_realmode_p9(struct pt_regs *regs)
if (SRR1_MC_LOADSTORE(regs->msr) && regs->dsisr == 0x02000000)
return 1;
- return mce_handle_error(regs, mce_p9_derror_table, mce_p9_ierror_table);
+ /*
+ * Async machine check due to bad real address from store or foreign
+ * link time out comes with the load/store bit (PPC bit 42) set in
+ * SRR1, but the cause comes in SRR1 not DSISR. Clear bit 42 so we're
+ * directed to the ierror table so it will find the cause (which
+ * describes it correctly as a store error).
+ */
+ if (SRR1_MC_LOADSTORE(srr1) &&
+ ((srr1 & 0x081c0000) == 0x08140000 ||
+ (srr1 & 0x081c0000) == 0x08180000)) {
+ srr1 &= ~PPC_BIT(42);
+ }
+
+ return mce_handle_error(regs, srr1,
+ mce_p9_derror_table, mce_p9_ierror_table);
}
long __machine_check_early_realmode_p10(struct pt_regs *regs)
{
- return mce_handle_error(regs, mce_p10_derror_table, mce_p10_ierror_table);
+ unsigned long srr1 = regs->msr;
+
+ /*
+ * Async machine check due to bad real address from store comes with
+ * the load/store bit (PPC bit 42) set in SRR1, but the cause comes in
+ * SRR1 not DSISR. Clear bit 42 so we're directed to the ierror table
+ * so it will find the cause (which describes it correctly as a store
+ * error).
+ */
+ if (SRR1_MC_LOADSTORE(srr1) &&
+ (srr1 & 0x081c0000) == 0x08140000) {
+ srr1 &= ~PPC_BIT(42);
+ }
+
+ return mce_handle_error(regs, srr1,
+ mce_p10_derror_table, mce_p10_ierror_table);
}
diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S
index 6a076bef2932..39ab15419592 100644
--- a/arch/powerpc/kernel/misc_32.S
+++ b/arch/powerpc/kernel/misc_32.S
@@ -388,9 +388,3 @@ _GLOBAL(start_secondary_resume)
bl start_secondary
b .
#endif /* CONFIG_SMP */
-
-/*
- * This routine is just here to keep GCC happy - sigh...
- */
-_GLOBAL(__main)
- blr
diff --git a/arch/powerpc/kernel/module.c b/arch/powerpc/kernel/module.c
index 3f35c8d20be7..ed04a3ba66fe 100644
--- a/arch/powerpc/kernel/module.c
+++ b/arch/powerpc/kernel/module.c
@@ -92,12 +92,14 @@ int module_finalize(const Elf_Ehdr *hdr,
static __always_inline void *
__module_alloc(unsigned long size, unsigned long start, unsigned long end)
{
+ pgprot_t prot = strict_module_rwx_enabled() ? PAGE_KERNEL : PAGE_KERNEL_EXEC;
+
/*
* Don't do huge page allocations for modules yet until more testing
* is done. STRICT_MODULE_RWX may require extra work to support this
* too.
*/
- return __vmalloc_node_range(size, 1, start, end, GFP_KERNEL, PAGE_KERNEL_EXEC,
+ return __vmalloc_node_range(size, 1, start, end, GFP_KERNEL, prot,
VM_FLUSH_RESET_PERMS | VM_NO_HUGE_VMAP,
NUMA_NO_NODE, __builtin_return_address(0));
}
diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
index c27b8687b82a..f417afc08d33 100644
--- a/arch/powerpc/kernel/module_32.c
+++ b/arch/powerpc/kernel/module_32.c
@@ -145,10 +145,9 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
static inline int entry_matches(struct ppc_plt_entry *entry, Elf32_Addr val)
{
- if (entry->jump[0] != (PPC_INST_ADDIS | __PPC_RT(R12) | PPC_HA(val)))
+ if (entry->jump[0] != PPC_RAW_LIS(_R12, PPC_HA(val)))
return 0;
- if (entry->jump[1] != (PPC_INST_ADDI | __PPC_RT(R12) | __PPC_RA(R12) |
- PPC_LO(val)))
+ if (entry->jump[1] != PPC_RAW_ADDI(_R12, _R12, PPC_LO(val)))
return 0;
return 1;
}
@@ -175,16 +174,10 @@ static uint32_t do_plt_call(void *location,
entry++;
}
- /*
- * lis r12, sym@ha
- * addi r12, r12, sym@l
- * mtctr r12
- * bctr
- */
- entry->jump[0] = PPC_INST_ADDIS | __PPC_RT(R12) | PPC_HA(val);
- entry->jump[1] = PPC_INST_ADDI | __PPC_RT(R12) | __PPC_RA(R12) | PPC_LO(val);
- entry->jump[2] = PPC_INST_MTCTR | __PPC_RS(R12);
- entry->jump[3] = PPC_INST_BCTR;
+ entry->jump[0] = PPC_RAW_LIS(_R12, PPC_HA(val));
+ entry->jump[1] = PPC_RAW_ADDI(_R12, _R12, PPC_LO(val));
+ entry->jump[2] = PPC_RAW_MTCTR(_R12);
+ entry->jump[3] = PPC_RAW_BCTR();
pr_debug("Initialized plt for 0x%x at %p\n", val, entry);
return (uint32_t)entry;
diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c
index ae2b188365b1..6baa676e7cb6 100644
--- a/arch/powerpc/kernel/module_64.c
+++ b/arch/powerpc/kernel/module_64.c
@@ -122,27 +122,19 @@ struct ppc64_stub_entry
* the stub, but it's significantly shorter to put these values at the
* end of the stub code, and patch the stub address (32-bits relative
* to the TOC ptr, r2) into the stub.
- *
- * addis r11,r2, <high>
- * addi r11,r11, <low>
- * std r2,R2_STACK_OFFSET(r1)
- * ld r12,32(r11)
- * ld r2,40(r11)
- * mtctr r12
- * bctr
*/
static u32 ppc64_stub_insns[] = {
- PPC_INST_ADDIS | __PPC_RT(R11) | __PPC_RA(R2),
- PPC_INST_ADDI | __PPC_RT(R11) | __PPC_RA(R11),
+ PPC_RAW_ADDIS(_R11, _R2, 0),
+ PPC_RAW_ADDI(_R11, _R11, 0),
/* Save current r2 value in magic place on the stack. */
- PPC_INST_STD | __PPC_RS(R2) | __PPC_RA(R1) | R2_STACK_OFFSET,
- PPC_INST_LD | __PPC_RT(R12) | __PPC_RA(R11) | 32,
+ PPC_RAW_STD(_R2, _R1, R2_STACK_OFFSET),
+ PPC_RAW_LD(_R12, _R11, 32),
#ifdef PPC64_ELF_ABI_v1
/* Set up new r2 from function descriptor */
- PPC_INST_LD | __PPC_RT(R2) | __PPC_RA(R11) | 40,
+ PPC_RAW_LD(_R2, _R11, 40),
#endif
- PPC_INST_MTCTR | __PPC_RS(R12),
- PPC_INST_BCTR,
+ PPC_RAW_MTCTR(_R12),
+ PPC_RAW_BCTR(),
};
/* Count how many different 24-bit relocations (different symbol,
@@ -336,21 +328,12 @@ int module_frob_arch_sections(Elf64_Ehdr *hdr,
#ifdef CONFIG_MPROFILE_KERNEL
-#define PACATOC offsetof(struct paca_struct, kernel_toc)
-
-/*
- * ld r12,PACATOC(r13)
- * addis r12,r12,<high>
- * addi r12,r12,<low>
- * mtctr r12
- * bctr
- */
static u32 stub_insns[] = {
- PPC_INST_LD | __PPC_RT(R12) | __PPC_RA(R13) | PACATOC,
- PPC_INST_ADDIS | __PPC_RT(R12) | __PPC_RA(R12),
- PPC_INST_ADDI | __PPC_RT(R12) | __PPC_RA(R12),
- PPC_INST_MTCTR | __PPC_RS(R12),
- PPC_INST_BCTR,
+ PPC_RAW_LD(_R12, _R13, offsetof(struct paca_struct, kernel_toc)),
+ PPC_RAW_ADDIS(_R12, _R12, 0),
+ PPC_RAW_ADDI(_R12, _R12, 0),
+ PPC_RAW_MTCTR(_R12),
+ PPC_RAW_BCTR(),
};
/*
@@ -507,7 +490,7 @@ static int restore_r2(const char *name, u32 *instruction, struct module *me)
if (!instr_is_relative_link_branch(ppc_inst(*prev_insn)))
return 1;
- if (*instruction != PPC_INST_NOP) {
+ if (*instruction != PPC_RAW_NOP()) {
pr_err("%s: Expected nop after call, got %08x at %pS\n",
me->name, *instruction, instruction);
return 0;
@@ -696,21 +679,17 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
* ld r2, ...(r12)
* add r2, r2, r12
*/
- if ((((uint32_t *)location)[0] & ~0xfffc) !=
- (PPC_INST_LD | __PPC_RT(R2) | __PPC_RA(R12)))
+ if ((((uint32_t *)location)[0] & ~0xfffc) != PPC_RAW_LD(_R2, _R12, 0))
break;
- if (((uint32_t *)location)[1] !=
- (PPC_INST_ADD | __PPC_RT(R2) | __PPC_RA(R2) | __PPC_RB(R12)))
+ if (((uint32_t *)location)[1] != PPC_RAW_ADD(_R2, _R2, _R12))
break;
/*
* If found, replace it with:
* addis r2, r12, (.TOC.-func)@ha
* addi r2, r2, (.TOC.-func)@l
*/
- ((uint32_t *)location)[0] = PPC_INST_ADDIS | __PPC_RT(R2) |
- __PPC_RA(R12) | PPC_HA(value);
- ((uint32_t *)location)[1] = PPC_INST_ADDI | __PPC_RT(R2) |
- __PPC_RA(R2) | PPC_LO(value);
+ ((uint32_t *)location)[0] = PPC_RAW_ADDIS(_R2, _R12, PPC_HA(value));
+ ((uint32_t *)location)[1] = PPC_RAW_ADDI(_R2, _R2, PPC_LO(value));
break;
case R_PPC64_REL16_HA:
diff --git a/arch/powerpc/kernel/optprobes.c b/arch/powerpc/kernel/optprobes.c
index cdf87086fa33..c79899abcec8 100644
--- a/arch/powerpc/kernel/optprobes.c
+++ b/arch/powerpc/kernel/optprobes.c
@@ -18,24 +18,16 @@
#include <asm/ppc-opcode.h>
#include <asm/inst.h>
-#define TMPL_CALL_HDLR_IDX \
- (optprobe_template_call_handler - optprobe_template_entry)
-#define TMPL_EMULATE_IDX \
- (optprobe_template_call_emulate - optprobe_template_entry)
-#define TMPL_RET_IDX \
- (optprobe_template_ret - optprobe_template_entry)
-#define TMPL_OP_IDX \
- (optprobe_template_op_address - optprobe_template_entry)
-#define TMPL_INSN_IDX \
- (optprobe_template_insn - optprobe_template_entry)
-#define TMPL_END_IDX \
- (optprobe_template_end - optprobe_template_entry)
-
-DEFINE_INSN_CACHE_OPS(ppc_optinsn);
+#define TMPL_CALL_HDLR_IDX (optprobe_template_call_handler - optprobe_template_entry)
+#define TMPL_EMULATE_IDX (optprobe_template_call_emulate - optprobe_template_entry)
+#define TMPL_RET_IDX (optprobe_template_ret - optprobe_template_entry)
+#define TMPL_OP_IDX (optprobe_template_op_address - optprobe_template_entry)
+#define TMPL_INSN_IDX (optprobe_template_insn - optprobe_template_entry)
+#define TMPL_END_IDX (optprobe_template_end - optprobe_template_entry)
static bool insn_page_in_use;
-static void *__ppc_alloc_insn_page(void)
+void *alloc_optinsn_page(void)
{
if (insn_page_in_use)
return NULL;
@@ -43,20 +35,11 @@ static void *__ppc_alloc_insn_page(void)
return &optinsn_slot;
}
-static void __ppc_free_insn_page(void *page __maybe_unused)
+void free_optinsn_page(void *page)
{
insn_page_in_use = false;
}
-struct kprobe_insn_cache kprobe_ppc_optinsn_slots = {
- .mutex = __MUTEX_INITIALIZER(kprobe_ppc_optinsn_slots.mutex),
- .pages = LIST_HEAD_INIT(kprobe_ppc_optinsn_slots.pages),
- /* insn_size initialized later */
- .alloc = __ppc_alloc_insn_page,
- .free = __ppc_free_insn_page,
- .nr_garbage = 0,
-};
-
/*
* Check if we can optimize this probe. Returns NIP post-emulation if this can
* be optimized and 0 otherwise.
@@ -66,6 +49,7 @@ static unsigned long can_optimize(struct kprobe *p)
struct pt_regs regs;
struct instruction_op op;
unsigned long nip = 0;
+ unsigned long addr = (unsigned long)p->addr;
/*
* kprobe placed for kretprobe during boot time
@@ -73,7 +57,7 @@ static unsigned long can_optimize(struct kprobe *p)
* So further checks can be skipped.
*/
if (p->addr == (kprobe_opcode_t *)&kretprobe_trampoline)
- return (unsigned long)p->addr + sizeof(kprobe_opcode_t);
+ return addr + sizeof(kprobe_opcode_t);
/*
* We only support optimizing kernel addresses, but not
@@ -81,11 +65,11 @@ static unsigned long can_optimize(struct kprobe *p)
*
* FIXME: Optimize kprobes placed in module addresses.
*/
- if (!is_kernel_addr((unsigned long)p->addr))
+ if (!is_kernel_addr(addr))
return 0;
memset(&regs, 0, sizeof(struct pt_regs));
- regs.nip = (unsigned long)p->addr;
+ regs.nip = addr;
regs.trap = 0x0;
regs.msr = MSR_KERNEL;
@@ -100,9 +84,8 @@ static unsigned long can_optimize(struct kprobe *p)
* Ensure that the instruction is not a conditional branch,
* and that can be emulated.
*/
- if (!is_conditional_branch(ppc_inst_read((struct ppc_inst *)p->ainsn.insn)) &&
- analyse_instr(&op, &regs,
- ppc_inst_read((struct ppc_inst *)p->ainsn.insn)) == 1) {
+ if (!is_conditional_branch(ppc_inst_read(p->ainsn.insn)) &&
+ analyse_instr(&op, &regs, ppc_inst_read(p->ainsn.insn)) == 1) {
emulate_update_regs(&regs, &op);
nip = regs.nip;
}
@@ -123,7 +106,7 @@ static void optimized_callback(struct optimized_kprobe *op,
kprobes_inc_nmissed_count(&op->kp);
} else {
__this_cpu_write(current_kprobe, &op->kp);
- regs->nip = (unsigned long)op->kp.addr;
+ regs_set_return_ip(regs, (unsigned long)op->kp.addr);
get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE;
opt_pre_handler(&op->kp, regs);
__this_cpu_write(current_kprobe, NULL);
@@ -136,19 +119,15 @@ NOKPROBE_SYMBOL(optimized_callback);
void arch_remove_optimized_kprobe(struct optimized_kprobe *op)
{
if (op->optinsn.insn) {
- free_ppc_optinsn_slot(op->optinsn.insn, 1);
+ free_optinsn_slot(op->optinsn.insn, 1);
op->optinsn.insn = NULL;
}
}
static void patch_imm32_load_insns(unsigned long val, int reg, kprobe_opcode_t *addr)
{
- patch_instruction((struct ppc_inst *)addr,
- ppc_inst(PPC_RAW_LIS(reg, IMM_H(val))));
- addr++;
-
- patch_instruction((struct ppc_inst *)addr,
- ppc_inst(PPC_RAW_ORI(reg, reg, IMM_L(val))));
+ patch_instruction(addr++, ppc_inst(PPC_RAW_LIS(reg, PPC_HI(val))));
+ patch_instruction(addr, ppc_inst(PPC_RAW_ORI(reg, reg, PPC_LO(val))));
}
/*
@@ -157,34 +136,11 @@ static void patch_imm32_load_insns(unsigned long val, int reg, kprobe_opcode_t *
*/
static void patch_imm64_load_insns(unsigned long long val, int reg, kprobe_opcode_t *addr)
{
- /* lis reg,(op)@highest */
- patch_instruction((struct ppc_inst *)addr,
- ppc_inst(PPC_INST_ADDIS | ___PPC_RT(reg) |
- ((val >> 48) & 0xffff)));
- addr++;
-
- /* ori reg,reg,(op)@higher */
- patch_instruction((struct ppc_inst *)addr,
- ppc_inst(PPC_INST_ORI | ___PPC_RA(reg) |
- ___PPC_RS(reg) | ((val >> 32) & 0xffff)));
- addr++;
-
- /* rldicr reg,reg,32,31 */
- patch_instruction((struct ppc_inst *)addr,
- ppc_inst(PPC_INST_RLDICR | ___PPC_RA(reg) |
- ___PPC_RS(reg) | __PPC_SH64(32) | __PPC_ME64(31)));
- addr++;
-
- /* oris reg,reg,(op)@h */
- patch_instruction((struct ppc_inst *)addr,
- ppc_inst(PPC_INST_ORIS | ___PPC_RA(reg) |
- ___PPC_RS(reg) | ((val >> 16) & 0xffff)));
- addr++;
-
- /* ori reg,reg,(op)@l */
- patch_instruction((struct ppc_inst *)addr,
- ppc_inst(PPC_INST_ORI | ___PPC_RA(reg) |
- ___PPC_RS(reg) | (val & 0xffff)));
+ patch_instruction(addr++, ppc_inst(PPC_RAW_LIS(reg, PPC_HIGHEST(val))));
+ patch_instruction(addr++, ppc_inst(PPC_RAW_ORI(reg, reg, PPC_HIGHER(val))));
+ patch_instruction(addr++, ppc_inst(PPC_RAW_SLDI(reg, reg, 32)));
+ patch_instruction(addr++, ppc_inst(PPC_RAW_ORIS(reg, reg, PPC_HI(val))));
+ patch_instruction(addr, ppc_inst(PPC_RAW_ORI(reg, reg, PPC_LO(val))));
}
static void patch_imm_load_insns(unsigned long val, int reg, kprobe_opcode_t *addr)
@@ -198,19 +154,18 @@ static void patch_imm_load_insns(unsigned long val, int reg, kprobe_opcode_t *ad
int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p)
{
struct ppc_inst branch_op_callback, branch_emulate_step, temp;
- kprobe_opcode_t *op_callback_addr, *emulate_step_addr, *buff;
+ unsigned long op_callback_addr, emulate_step_addr;
+ kprobe_opcode_t *buff;
long b_offset;
unsigned long nip, size;
int rc, i;
- kprobe_ppc_optinsn_slots.insn_size = MAX_OPTINSN_SIZE;
-
nip = can_optimize(p);
if (!nip)
return -EILSEQ;
/* Allocate instruction slot for detour buffer */
- buff = get_ppc_optinsn_slot();
+ buff = get_optinsn_slot();
if (!buff)
return -ENOMEM;
@@ -228,8 +183,7 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p)
goto error;
/* Check if the return address is also within 32MB range */
- b_offset = (unsigned long)(buff + TMPL_RET_IDX) -
- (unsigned long)nip;
+ b_offset = (unsigned long)(buff + TMPL_RET_IDX) - nip;
if (!is_offset_in_branch_range(b_offset))
goto error;
@@ -238,8 +192,7 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p)
size = (TMPL_END_IDX * sizeof(kprobe_opcode_t)) / sizeof(int);
pr_devel("Copying template to %p, size %lu\n", buff, size);
for (i = 0; i < size; i++) {
- rc = patch_instruction((struct ppc_inst *)(buff + i),
- ppc_inst(*(optprobe_template_entry + i)));
+ rc = patch_instruction(buff + i, ppc_inst(*(optprobe_template_entry + i)));
if (rc < 0)
goto error;
}
@@ -253,51 +206,48 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p)
/*
* 2. branch to optimized_callback() and emulate_step()
*/
- op_callback_addr = (kprobe_opcode_t *)ppc_kallsyms_lookup_name("optimized_callback");
- emulate_step_addr = (kprobe_opcode_t *)ppc_kallsyms_lookup_name("emulate_step");
+ op_callback_addr = ppc_kallsyms_lookup_name("optimized_callback");
+ emulate_step_addr = ppc_kallsyms_lookup_name("emulate_step");
if (!op_callback_addr || !emulate_step_addr) {
WARN(1, "Unable to lookup optimized_callback()/emulate_step()\n");
goto error;
}
- rc = create_branch(&branch_op_callback,
- (struct ppc_inst *)(buff + TMPL_CALL_HDLR_IDX),
- (unsigned long)op_callback_addr,
- BRANCH_SET_LINK);
+ rc = create_branch(&branch_op_callback, buff + TMPL_CALL_HDLR_IDX,
+ op_callback_addr, BRANCH_SET_LINK);
- rc |= create_branch(&branch_emulate_step,
- (struct ppc_inst *)(buff + TMPL_EMULATE_IDX),
- (unsigned long)emulate_step_addr,
- BRANCH_SET_LINK);
+ rc |= create_branch(&branch_emulate_step, buff + TMPL_EMULATE_IDX,
+ emulate_step_addr, BRANCH_SET_LINK);
if (rc)
goto error;
- patch_instruction((struct ppc_inst *)(buff + TMPL_CALL_HDLR_IDX),
- branch_op_callback);
- patch_instruction((struct ppc_inst *)(buff + TMPL_EMULATE_IDX),
- branch_emulate_step);
+ patch_instruction(buff + TMPL_CALL_HDLR_IDX, branch_op_callback);
+ patch_instruction(buff + TMPL_EMULATE_IDX, branch_emulate_step);
/*
* 3. load instruction to be emulated into relevant register, and
*/
- temp = ppc_inst_read((struct ppc_inst *)p->ainsn.insn);
- patch_imm_load_insns(ppc_inst_as_ulong(temp), 4, buff + TMPL_INSN_IDX);
+ if (IS_ENABLED(CONFIG_PPC64)) {
+ temp = ppc_inst_read(p->ainsn.insn);
+ patch_imm_load_insns(ppc_inst_as_ulong(temp), 4, buff + TMPL_INSN_IDX);
+ } else {
+ patch_imm_load_insns((unsigned long)p->ainsn.insn, 4, buff + TMPL_INSN_IDX);
+ }
/*
* 4. branch back from trampoline
*/
- patch_branch((struct ppc_inst *)(buff + TMPL_RET_IDX), (unsigned long)nip, 0);
+ patch_branch(buff + TMPL_RET_IDX, nip, 0);
- flush_icache_range((unsigned long)buff,
- (unsigned long)(&buff[TMPL_END_IDX]));
+ flush_icache_range((unsigned long)buff, (unsigned long)(&buff[TMPL_END_IDX]));
op->optinsn.insn = buff;
return 0;
error:
- free_ppc_optinsn_slot(buff, 0);
+ free_optinsn_slot(buff, 0);
return -ERANGE;
}
@@ -328,12 +278,9 @@ void arch_optimize_kprobes(struct list_head *oplist)
* Backup instructions which will be replaced
* by jump address
*/
- memcpy(op->optinsn.copied_insn, op->kp.addr,
- RELATIVEJUMP_SIZE);
- create_branch(&instr,
- (struct ppc_inst *)op->kp.addr,
- (unsigned long)op->optinsn.insn, 0);
- patch_instruction((struct ppc_inst *)op->kp.addr, instr);
+ memcpy(op->optinsn.copied_insn, op->kp.addr, RELATIVEJUMP_SIZE);
+ create_branch(&instr, op->kp.addr, (unsigned long)op->optinsn.insn, 0);
+ patch_instruction(op->kp.addr, instr);
list_del_init(&op->list);
}
}
@@ -343,8 +290,7 @@ void arch_unoptimize_kprobe(struct optimized_kprobe *op)
arch_arm_kprobe(&op->kp);
}
-void arch_unoptimize_kprobes(struct list_head *oplist,
- struct list_head *done_list)
+void arch_unoptimize_kprobes(struct list_head *oplist, struct list_head *done_list)
{
struct optimized_kprobe *op;
struct optimized_kprobe *tmp;
@@ -355,8 +301,7 @@ void arch_unoptimize_kprobes(struct list_head *oplist,
}
}
-int arch_within_optimized_kprobe(struct optimized_kprobe *op,
- unsigned long addr)
+int arch_within_optimized_kprobe(struct optimized_kprobe *op, unsigned long addr)
{
return ((unsigned long)op->kp.addr <= addr &&
(unsigned long)op->kp.addr + RELATIVEJUMP_SIZE > addr);
diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c
index 7f5aae3c387d..9bd30cac852b 100644
--- a/arch/powerpc/kernel/paca.c
+++ b/arch/powerpc/kernel/paca.c
@@ -346,10 +346,8 @@ void copy_mm_to_paca(struct mm_struct *mm)
#ifdef CONFIG_PPC_BOOK3S
mm_context_t *context = &mm->context;
- get_paca()->mm_ctx_id = context->id;
#ifdef CONFIG_PPC_MM_SLICES
VM_BUG_ON(!mm_ctx_slb_addr_limit(context));
- get_paca()->mm_ctx_slb_addr_limit = mm_ctx_slb_addr_limit(context);
memcpy(&get_paca()->mm_ctx_low_slices_psize, mm_ctx_low_slices(context),
LOW_SLICE_ARRAY_SZ);
memcpy(&get_paca()->mm_ctx_high_slices_psize, mm_ctx_high_slices(context),
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 89e34aa273e2..185beb290580 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -96,7 +96,8 @@ static void check_if_tm_restore_required(struct task_struct *tsk)
if (tsk == current && tsk->thread.regs &&
MSR_TM_ACTIVE(tsk->thread.regs->msr) &&
!test_thread_flag(TIF_RESTORE_TM)) {
- tsk->thread.ckpt_regs.msr = tsk->thread.regs->msr;
+ regs_set_return_msr(&tsk->thread.ckpt_regs,
+ tsk->thread.regs->msr);
set_thread_flag(TIF_RESTORE_TM);
}
}
@@ -161,7 +162,7 @@ static void __giveup_fpu(struct task_struct *tsk)
msr &= ~(MSR_FP|MSR_FE0|MSR_FE1);
if (cpu_has_feature(CPU_FTR_VSX))
msr &= ~MSR_VSX;
- tsk->thread.regs->msr = msr;
+ regs_set_return_msr(tsk->thread.regs, msr);
}
void giveup_fpu(struct task_struct *tsk)
@@ -244,7 +245,7 @@ static void __giveup_altivec(struct task_struct *tsk)
msr &= ~MSR_VEC;
if (cpu_has_feature(CPU_FTR_VSX))
msr &= ~MSR_VSX;
- tsk->thread.regs->msr = msr;
+ regs_set_return_msr(tsk->thread.regs, msr);
}
void giveup_altivec(struct task_struct *tsk)
@@ -559,7 +560,7 @@ void notrace restore_math(struct pt_regs *regs)
msr_check_and_clear(new_msr);
- regs->msr |= new_msr | fpexc_mode;
+ regs_set_return_msr(regs, regs->msr | new_msr | fpexc_mode);
}
}
#endif /* CONFIG_PPC_BOOK3S_64 */
@@ -1114,7 +1115,7 @@ void restore_tm_state(struct pt_regs *regs)
#endif
restore_math(regs);
- regs->msr |= msr_diff;
+ regs_set_return_msr(regs, regs->msr | msr_diff);
}
#else /* !CONFIG_PPC_TRANSACTIONAL_MEM */
@@ -1129,6 +1130,10 @@ static inline void save_sprs(struct thread_struct *t)
if (cpu_has_feature(CPU_FTR_ALTIVEC))
t->vrsave = mfspr(SPRN_VRSAVE);
#endif
+#ifdef CONFIG_SPE
+ if (cpu_has_feature(CPU_FTR_SPE))
+ t->spefscr = mfspr(SPRN_SPEFSCR);
+#endif
#ifdef CONFIG_PPC_BOOK3S_64
if (cpu_has_feature(CPU_FTR_DSCR))
t->dscr = mfspr(SPRN_DSCR);
@@ -1159,6 +1164,11 @@ static inline void restore_sprs(struct thread_struct *old_thread,
old_thread->vrsave != new_thread->vrsave)
mtspr(SPRN_VRSAVE, new_thread->vrsave);
#endif
+#ifdef CONFIG_SPE
+ if (cpu_has_feature(CPU_FTR_SPE) &&
+ old_thread->spefscr != new_thread->spefscr)
+ mtspr(SPRN_SPEFSCR, new_thread->spefscr);
+#endif
#ifdef CONFIG_PPC_BOOK3S_64
if (cpu_has_feature(CPU_FTR_DSCR)) {
u64 dscr = get_paca()->dscr_default;
@@ -1213,6 +1223,19 @@ struct task_struct *__switch_to(struct task_struct *prev,
__flush_tlb_pending(batch);
batch->active = 0;
}
+
+ /*
+ * On POWER9 the copy-paste buffer can only paste into
+ * foreign real addresses, so unprivileged processes can not
+ * see the data or use it in any way unless they have
+ * foreign real mappings. If the new process has the foreign
+ * real address mappings, we must issue a cp_abort to clear
+ * any state and prevent snooping, corruption or a covert
+ * channel. ISA v3.1 supports paste into local memory.
+ */
+ if (new->mm && (cpu_has_feature(CPU_FTR_ARCH_31) ||
+ atomic_read(&new->mm->context.vas_windows)))
+ asm volatile(PPC_CP_ABORT);
#endif /* CONFIG_PPC_BOOK3S_64 */
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
@@ -1248,43 +1271,48 @@ struct task_struct *__switch_to(struct task_struct *prev,
}
/*
- * Call restore_sprs() before calling _switch(). If we move it after
- * _switch() then we miss out on calling it for new tasks. The reason
- * for this is we manually create a stack frame for new tasks that
- * directly returns through ret_from_fork() or
+ * Call restore_sprs() and set_return_regs_changed() before calling
+ * _switch(). If we move it after _switch() then we miss out on calling
+ * it for new tasks. The reason for this is we manually create a stack
+ * frame for new tasks that directly returns through ret_from_fork() or
* ret_from_kernel_thread(). See copy_thread() for details.
*/
restore_sprs(old_thread, new_thread);
+ set_return_regs_changed(); /* _switch changes stack (and regs) */
+
#ifdef CONFIG_PPC32
kuap_assert_locked();
#endif
last = _switch(old_thread, new_thread);
+ /*
+ * Nothing after _switch will be run for newly created tasks,
+ * because they switch directly to ret_from_fork/ret_from_kernel_thread
+ * etc. Code added here should have a comment explaining why that is
+ * okay.
+ */
+
#ifdef CONFIG_PPC_BOOK3S_64
+ /*
+ * This applies to a process that was context switched while inside
+ * arch_enter_lazy_mmu_mode(), to re-activate the batch that was
+ * deactivated above, before _switch(). This will never be the case
+ * for new tasks.
+ */
if (current_thread_info()->local_flags & _TLF_LAZY_MMU) {
current_thread_info()->local_flags &= ~_TLF_LAZY_MMU;
batch = this_cpu_ptr(&ppc64_tlb_batch);
batch->active = 1;
}
- if (current->thread.regs) {
+ /*
+ * Math facilities are masked out of the child MSR in copy_thread.
+ * A new task does not need to restore_math because it will
+ * demand fault them.
+ */
+ if (current->thread.regs)
restore_math(current->thread.regs);
-
- /*
- * On POWER9 the copy-paste buffer can only paste into
- * foreign real addresses, so unprivileged processes can not
- * see the data or use it in any way unless they have
- * foreign real mappings. If the new process has the foreign
- * real address mappings, we must issue a cp_abort to clear
- * any state and prevent snooping, corruption or a covert
- * channel. ISA v3.1 supports paste into local memory.
- */
- if (current->mm &&
- (cpu_has_feature(CPU_FTR_ARCH_31) ||
- atomic_read(&current->mm->context.vas_windows)))
- asm volatile(PPC_CP_ABORT);
- }
#endif /* CONFIG_PPC_BOOK3S_64 */
return last;
@@ -1736,6 +1764,9 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
#ifdef CONFIG_ALTIVEC
p->thread.vr_save_area = NULL;
#endif
+#if defined(CONFIG_PPC_BOOK3S_32) && defined(CONFIG_PPC_KUAP)
+ p->thread.kuap = KUAP_NONE;
+#endif
setup_ksp_vsid(p, sp);
@@ -1838,13 +1869,14 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
}
regs->gpr[2] = toc;
}
- regs->nip = entry;
- regs->msr = MSR_USER64;
+ regs_set_return_ip(regs, entry);
+ regs_set_return_msr(regs, MSR_USER64);
} else {
- regs->nip = start;
regs->gpr[2] = 0;
- regs->msr = MSR_USER32;
+ regs_set_return_ip(regs, start);
+ regs_set_return_msr(regs, MSR_USER32);
}
+
#endif
#ifdef CONFIG_VSX
current->thread.used_vsr = 0;
@@ -1875,7 +1907,6 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
current->thread.tm_tfiar = 0;
current->thread.load_tm = 0;
#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
-
}
EXPORT_SYMBOL(start_thread);
@@ -1923,9 +1954,10 @@ int set_fpexc_mode(struct task_struct *tsk, unsigned int val)
if (val > PR_FP_EXC_PRECISE)
return -EINVAL;
tsk->thread.fpexc_mode = __pack_fe01(val);
- if (regs != NULL && (regs->msr & MSR_FP) != 0)
- regs->msr = (regs->msr & ~(MSR_FE0|MSR_FE1))
- | tsk->thread.fpexc_mode;
+ if (regs != NULL && (regs->msr & MSR_FP) != 0) {
+ regs_set_return_msr(regs, (regs->msr & ~(MSR_FE0|MSR_FE1))
+ | tsk->thread.fpexc_mode);
+ }
return 0;
}
@@ -1971,9 +2003,9 @@ int set_endian(struct task_struct *tsk, unsigned int val)
return -EINVAL;
if (val == PR_ENDIAN_BIG)
- regs->msr &= ~MSR_LE;
+ regs_set_return_msr(regs, regs->msr & ~MSR_LE);
else if (val == PR_ENDIAN_LITTLE || val == PR_ENDIAN_PPC_LITTLE)
- regs->msr |= MSR_LE;
+ regs_set_return_msr(regs, regs->msr | MSR_LE);
else
return -EINVAL;
@@ -2084,7 +2116,7 @@ static unsigned long __get_wchan(struct task_struct *p)
unsigned long ip, sp;
int count = 0;
- if (!p || p == current || p->state == TASK_RUNNING)
+ if (!p || p == current || task_is_running(p))
return 0;
sp = p->thread.ksp;
@@ -2094,7 +2126,7 @@ static unsigned long __get_wchan(struct task_struct *p)
do {
sp = *(unsigned long *)sp;
if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD) ||
- p->state == TASK_RUNNING)
+ task_is_running(p))
return 0;
if (count > 0) {
ip = ((unsigned long *)sp)[STACK_FRAME_LR_SAVE];
@@ -2121,8 +2153,9 @@ unsigned long get_wchan(struct task_struct *p)
static int kstack_depth_to_print = CONFIG_PRINT_STACK_DEPTH;
-void show_stack(struct task_struct *tsk, unsigned long *stack,
- const char *loglvl)
+void __no_sanitize_address show_stack(struct task_struct *tsk,
+ unsigned long *stack,
+ const char *loglvl)
{
unsigned long sp, ip, lr, newsp;
int count = 0;
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index fbe9deebc8e1..f620e04dc9bf 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -758,7 +758,7 @@ void __init early_init_devtree(void *params)
first_memblock_size = min_t(u64, first_memblock_size, memory_limit);
setup_initial_memory_limit(memstart_addr, first_memblock_size);
/* Reserve MEMBLOCK regions used by kernel, initrd, dt, etc... */
- memblock_reserve(PHYSICAL_START, __pa(klimit) - PHYSICAL_START);
+ memblock_reserve(PHYSICAL_START, __pa(_end) - PHYSICAL_START);
/* If relocatable, reserve first 32k for interrupt vectors etc. */
if (PHYSICAL_START > MEMORY_START)
memblock_reserve(MEMORY_START, 0x8000);
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index 41ed7e33d897..a5bf355ce1d6 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -27,10 +27,12 @@
#include <linux/initrd.h>
#include <linux/bitops.h>
#include <linux/pgtable.h>
+#include <linux/printk.h>
#include <asm/prom.h>
#include <asm/rtas.h>
#include <asm/page.h>
#include <asm/processor.h>
+#include <asm/interrupt.h>
#include <asm/irq.h>
#include <asm/io.h>
#include <asm/smp.h>
@@ -242,13 +244,31 @@ static int __init prom_strcmp(const char *cs, const char *ct)
return 0;
}
-static char __init *prom_strcpy(char *dest, const char *src)
+static ssize_t __init prom_strscpy_pad(char *dest, const char *src, size_t n)
{
- char *tmp = dest;
+ ssize_t rc;
+ size_t i;
- while ((*dest++ = *src++) != '\0')
- /* nothing */;
- return tmp;
+ if (n == 0 || n > INT_MAX)
+ return -E2BIG;
+
+ // Copy up to n bytes
+ for (i = 0; i < n && src[i] != '\0'; i++)
+ dest[i] = src[i];
+
+ rc = i;
+
+ // If we copied all n then we have run out of space for the nul
+ if (rc == n) {
+ // Rewind by one character to ensure nul termination
+ i--;
+ rc = -E2BIG;
+ }
+
+ for (; i < n; i++)
+ dest[i] = '\0';
+
+ return rc;
}
static int __init prom_strncmp(const char *cs, const char *ct, size_t count)
@@ -701,13 +721,12 @@ static int __init prom_setprop(phandle node, const char *nodename,
}
/* We can't use the standard versions because of relocation headaches. */
-#define isxdigit(c) (('0' <= (c) && (c) <= '9') \
- || ('a' <= (c) && (c) <= 'f') \
- || ('A' <= (c) && (c) <= 'F'))
+#define prom_isxdigit(c) \
+ (('0' <= (c) && (c) <= '9') || ('a' <= (c) && (c) <= 'f') || ('A' <= (c) && (c) <= 'F'))
-#define isdigit(c) ('0' <= (c) && (c) <= '9')
-#define islower(c) ('a' <= (c) && (c) <= 'z')
-#define toupper(c) (islower(c) ? ((c) - 'a' + 'A') : (c))
+#define prom_isdigit(c) ('0' <= (c) && (c) <= '9')
+#define prom_islower(c) ('a' <= (c) && (c) <= 'z')
+#define prom_toupper(c) (prom_islower(c) ? ((c) - 'a' + 'A') : (c))
static unsigned long prom_strtoul(const char *cp, const char **endp)
{
@@ -716,14 +735,14 @@ static unsigned long prom_strtoul(const char *cp, const char **endp)
if (*cp == '0') {
base = 8;
cp++;
- if (toupper(*cp) == 'X') {
+ if (prom_toupper(*cp) == 'X') {
cp++;
base = 16;
}
}
- while (isxdigit(*cp) &&
- (value = isdigit(*cp) ? *cp - '0' : toupper(*cp) - 'A' + 10) < base) {
+ while (prom_isxdigit(*cp) &&
+ (value = prom_isdigit(*cp) ? *cp - '0' : prom_toupper(*cp) - 'A' + 10) < base) {
result = result * base + value;
cp++;
}
@@ -927,6 +946,10 @@ struct option_vector6 {
u8 os_name;
} __packed;
+struct option_vector7 {
+ u8 os_id[256];
+} __packed;
+
struct ibm_arch_vec {
struct { u32 mask, val; } pvrs[14];
@@ -949,6 +972,9 @@ struct ibm_arch_vec {
u8 vec6_len;
struct option_vector6 vec6;
+
+ u8 vec7_len;
+ struct option_vector7 vec7;
} __packed;
static const struct ibm_arch_vec ibm_architecture_vec_template __initconst = {
@@ -1095,6 +1121,9 @@ static const struct ibm_arch_vec ibm_architecture_vec_template __initconst = {
.secondary_pteg = 0,
.os_name = OV6_LINUX,
},
+
+ /* option vector 7: OS Identification */
+ .vec7_len = VECTOR_LENGTH(sizeof(struct option_vector7)),
};
static struct ibm_arch_vec __prombss ibm_architecture_vec ____cacheline_aligned;
@@ -1323,6 +1352,8 @@ static void __init prom_check_platform_support(void)
memcpy(&ibm_architecture_vec, &ibm_architecture_vec_template,
sizeof(ibm_architecture_vec));
+ prom_strscpy_pad(ibm_architecture_vec.vec7.os_id, linux_banner, 256);
+
if (prop_len > 1) {
int i;
u8 vec[8];
@@ -1762,6 +1793,8 @@ static int prom_rtas_hcall(uint64_t args)
asm volatile("sc 1\n" : "=r" (arg1) :
"r" (arg1),
"r" (arg2) :);
+ srr_regs_clobbered();
+
return arg1;
}
@@ -2702,7 +2735,7 @@ static void __init flatten_device_tree(void)
/* Add "phandle" in there, we'll need it */
namep = make_room(&mem_start, &mem_end, 16, 1);
- prom_strcpy(namep, "phandle");
+ prom_strscpy_pad(namep, "phandle", sizeof("phandle"));
mem_start = (unsigned long)namep + prom_strlen(namep) + 1;
/* Build string array */
@@ -3210,54 +3243,6 @@ static void __init prom_check_initrd(unsigned long r3, unsigned long r4)
#endif /* CONFIG_BLK_DEV_INITRD */
}
-#ifdef CONFIG_PPC64
-#ifdef CONFIG_RELOCATABLE
-static void reloc_toc(void)
-{
-}
-
-static void unreloc_toc(void)
-{
-}
-#else
-static void __reloc_toc(unsigned long offset, unsigned long nr_entries)
-{
- unsigned long i;
- unsigned long *toc_entry;
-
- /* Get the start of the TOC by using r2 directly. */
- asm volatile("addi %0,2,-0x8000" : "=b" (toc_entry));
-
- for (i = 0; i < nr_entries; i++) {
- *toc_entry = *toc_entry + offset;
- toc_entry++;
- }
-}
-
-static void reloc_toc(void)
-{
- unsigned long offset = reloc_offset();
- unsigned long nr_entries =
- (__prom_init_toc_end - __prom_init_toc_start) / sizeof(long);
-
- __reloc_toc(offset, nr_entries);
-
- mb();
-}
-
-static void unreloc_toc(void)
-{
- unsigned long offset = reloc_offset();
- unsigned long nr_entries =
- (__prom_init_toc_end - __prom_init_toc_start) / sizeof(long);
-
- mb();
-
- __reloc_toc(-offset, nr_entries);
-}
-#endif
-#endif
-
#ifdef CONFIG_PPC_SVM
/*
* Perform the Enter Secure Mode ultracall.
@@ -3291,14 +3276,12 @@ static void __init setup_secure_guest(unsigned long kbase, unsigned long fdt)
* relocated it so the check will fail. Restore the original image by
* relocating it back to the kernel virtual base address.
*/
- if (IS_ENABLED(CONFIG_RELOCATABLE))
- relocate(KERNELBASE);
+ relocate(KERNELBASE);
ret = enter_secure_mode(kbase, fdt);
/* Relocate the kernel again. */
- if (IS_ENABLED(CONFIG_RELOCATABLE))
- relocate(kbase);
+ relocate(kbase);
if (ret != U_SUCCESS) {
prom_printf("Returned %d from switching to secure mode.\n", ret);
@@ -3326,8 +3309,6 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
#ifdef CONFIG_PPC32
unsigned long offset = reloc_offset();
reloc_got2(offset);
-#else
- reloc_toc();
#endif
/*
@@ -3504,8 +3485,6 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
#ifdef CONFIG_PPC32
reloc_got2(-offset);
-#else
- unreloc_toc();
#endif
/* Move to secure memory if we're supposed to be secure guests. */
diff --git a/arch/powerpc/kernel/ptrace/ptrace-adv.c b/arch/powerpc/kernel/ptrace/ptrace-adv.c
index 3990c01ef8cf..399f5d94a3df 100644
--- a/arch/powerpc/kernel/ptrace/ptrace-adv.c
+++ b/arch/powerpc/kernel/ptrace/ptrace-adv.c
@@ -12,7 +12,7 @@ void user_enable_single_step(struct task_struct *task)
if (regs != NULL) {
task->thread.debug.dbcr0 &= ~DBCR0_BT;
task->thread.debug.dbcr0 |= DBCR0_IDM | DBCR0_IC;
- regs->msr |= MSR_DE;
+ regs_set_return_msr(regs, regs->msr | MSR_DE);
}
set_tsk_thread_flag(task, TIF_SINGLESTEP);
}
@@ -24,7 +24,7 @@ void user_enable_block_step(struct task_struct *task)
if (regs != NULL) {
task->thread.debug.dbcr0 &= ~DBCR0_IC;
task->thread.debug.dbcr0 = DBCR0_IDM | DBCR0_BT;
- regs->msr |= MSR_DE;
+ regs_set_return_msr(regs, regs->msr | MSR_DE);
}
set_tsk_thread_flag(task, TIF_SINGLESTEP);
}
@@ -50,7 +50,7 @@ void user_disable_single_step(struct task_struct *task)
* All debug events were off.....
*/
task->thread.debug.dbcr0 &= ~DBCR0_IDM;
- regs->msr &= ~MSR_DE;
+ regs_set_return_msr(regs, regs->msr & ~MSR_DE);
}
}
clear_tsk_thread_flag(task, TIF_SINGLESTEP);
@@ -82,6 +82,7 @@ int ptrace_get_debugreg(struct task_struct *child, unsigned long addr,
int ptrace_set_debugreg(struct task_struct *task, unsigned long addr, unsigned long data)
{
+ struct pt_regs *regs = task->thread.regs;
#ifdef CONFIG_HAVE_HW_BREAKPOINT
int ret;
struct thread_struct *thread = &task->thread;
@@ -112,7 +113,7 @@ int ptrace_set_debugreg(struct task_struct *task, unsigned long addr, unsigned l
dbcr_dac(task) &= ~(DBCR_DAC1R | DBCR_DAC1W);
if (!DBCR_ACTIVE_EVENTS(task->thread.debug.dbcr0,
task->thread.debug.dbcr1)) {
- task->thread.regs->msr &= ~MSR_DE;
+ regs_set_return_msr(regs, regs->msr & ~MSR_DE);
task->thread.debug.dbcr0 &= ~DBCR0_IDM;
}
return 0;
@@ -132,7 +133,7 @@ int ptrace_set_debugreg(struct task_struct *task, unsigned long addr, unsigned l
dbcr_dac(task) |= DBCR_DAC1R;
if (data & 0x2UL)
dbcr_dac(task) |= DBCR_DAC1W;
- task->thread.regs->msr |= MSR_DE;
+ regs_set_return_msr(regs, regs->msr | MSR_DE);
return 0;
}
@@ -220,7 +221,7 @@ static long set_instruction_bp(struct task_struct *child,
}
out:
child->thread.debug.dbcr0 |= DBCR0_IDM;
- child->thread.regs->msr |= MSR_DE;
+ regs_set_return_msr(child->thread.regs, child->thread.regs->msr | MSR_DE);
return slot;
}
@@ -336,7 +337,7 @@ static int set_dac(struct task_struct *child, struct ppc_hw_breakpoint *bp_info)
return -ENOSPC;
}
child->thread.debug.dbcr0 |= DBCR0_IDM;
- child->thread.regs->msr |= MSR_DE;
+ regs_set_return_msr(child->thread.regs, child->thread.regs->msr | MSR_DE);
return slot + 4;
}
@@ -430,7 +431,7 @@ static int set_dac_range(struct task_struct *child,
child->thread.debug.dbcr2 |= DBCR2_DAC12MX;
else /* PPC_BREAKPOINT_MODE_MASK */
child->thread.debug.dbcr2 |= DBCR2_DAC12MM;
- child->thread.regs->msr |= MSR_DE;
+ regs_set_return_msr(child->thread.regs, child->thread.regs->msr | MSR_DE);
return 5;
}
@@ -485,7 +486,8 @@ long ppc_del_hwdebug(struct task_struct *child, long data)
if (!DBCR_ACTIVE_EVENTS(child->thread.debug.dbcr0,
child->thread.debug.dbcr1)) {
child->thread.debug.dbcr0 &= ~DBCR0_IDM;
- child->thread.regs->msr &= ~MSR_DE;
+ regs_set_return_msr(child->thread.regs,
+ child->thread.regs->msr & ~MSR_DE);
}
}
return rc;
diff --git a/arch/powerpc/kernel/ptrace/ptrace-noadv.c b/arch/powerpc/kernel/ptrace/ptrace-noadv.c
index aa36fcad36cd..a5dd7d2e2c9e 100644
--- a/arch/powerpc/kernel/ptrace/ptrace-noadv.c
+++ b/arch/powerpc/kernel/ptrace/ptrace-noadv.c
@@ -11,10 +11,8 @@ void user_enable_single_step(struct task_struct *task)
{
struct pt_regs *regs = task->thread.regs;
- if (regs != NULL) {
- regs->msr &= ~MSR_BE;
- regs->msr |= MSR_SE;
- }
+ if (regs != NULL)
+ regs_set_return_msr(regs, (regs->msr & ~MSR_BE) | MSR_SE);
set_tsk_thread_flag(task, TIF_SINGLESTEP);
}
@@ -22,10 +20,8 @@ void user_enable_block_step(struct task_struct *task)
{
struct pt_regs *regs = task->thread.regs;
- if (regs != NULL) {
- regs->msr &= ~MSR_SE;
- regs->msr |= MSR_BE;
- }
+ if (regs != NULL)
+ regs_set_return_msr(regs, (regs->msr & ~MSR_SE) | MSR_BE);
set_tsk_thread_flag(task, TIF_SINGLESTEP);
}
@@ -34,7 +30,7 @@ void user_disable_single_step(struct task_struct *task)
struct pt_regs *regs = task->thread.regs;
if (regs != NULL)
- regs->msr &= ~(MSR_SE | MSR_BE);
+ regs_set_return_msr(regs, regs->msr & ~(MSR_SE | MSR_BE));
clear_tsk_thread_flag(task, TIF_SINGLESTEP);
}
diff --git a/arch/powerpc/kernel/ptrace/ptrace-view.c b/arch/powerpc/kernel/ptrace/ptrace-view.c
index 773bcc4ca843..b8be1d6668b5 100644
--- a/arch/powerpc/kernel/ptrace/ptrace-view.c
+++ b/arch/powerpc/kernel/ptrace/ptrace-view.c
@@ -113,8 +113,9 @@ static unsigned long get_user_msr(struct task_struct *task)
static __always_inline int set_user_msr(struct task_struct *task, unsigned long msr)
{
- task->thread.regs->msr &= ~MSR_DEBUGCHANGE;
- task->thread.regs->msr |= msr & MSR_DEBUGCHANGE;
+ unsigned long newmsr = (task->thread.regs->msr & ~MSR_DEBUGCHANGE) |
+ (msr & MSR_DEBUGCHANGE);
+ regs_set_return_msr(task->thread.regs, newmsr);
return 0;
}
diff --git a/arch/powerpc/kernel/rtas-rtc.c b/arch/powerpc/kernel/rtas-rtc.c
index a28239b8b0c0..33c07c8af6c8 100644
--- a/arch/powerpc/kernel/rtas-rtc.c
+++ b/arch/powerpc/kernel/rtas-rtc.c
@@ -12,7 +12,7 @@
#define MAX_RTC_WAIT 5000 /* 5 sec */
-#define RTAS_CLOCK_BUSY (-2)
+
time64_t __init rtas_get_boot_time(void)
{
int ret[8];
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
index 6bada744402b..99f2cce635fb 100644
--- a/arch/powerpc/kernel/rtas.c
+++ b/arch/powerpc/kernel/rtas.c
@@ -25,6 +25,7 @@
#include <linux/reboot.h>
#include <linux/syscalls.h>
+#include <asm/interrupt.h>
#include <asm/prom.h>
#include <asm/rtas.h>
#include <asm/hvcall.h>
@@ -46,6 +47,13 @@
/* This is here deliberately so it's only used in this file */
void enter_rtas(unsigned long);
+static inline void do_enter_rtas(unsigned long args)
+{
+ enter_rtas(args);
+
+ srr_regs_clobbered(); /* rtas uses SRRs, invalidate */
+}
+
struct rtas_t rtas = {
.lock = __ARCH_SPIN_LOCK_UNLOCKED
};
@@ -384,7 +392,7 @@ static char *__fetch_rtas_last_error(char *altbuf)
save_args = rtas.args;
rtas.args = err_args;
- enter_rtas(__pa(&rtas.args));
+ do_enter_rtas(__pa(&rtas.args));
err_args = rtas.args;
rtas.args = save_args;
@@ -430,7 +438,7 @@ va_rtas_call_unlocked(struct rtas_args *args, int token, int nargs, int nret,
for (i = 0; i < nret; ++i)
args->rets[i] = 0;
- enter_rtas(__pa(args));
+ do_enter_rtas(__pa(args));
}
void rtas_call_unlocked(struct rtas_args *args, int token, int nargs, int nret, ...)
@@ -1138,7 +1146,7 @@ SYSCALL_DEFINE1(rtas, struct rtas_args __user *, uargs)
flags = lock_rtas();
rtas.args = args;
- enter_rtas(__pa(&rtas.args));
+ do_enter_rtas(__pa(&rtas.args));
args = rtas.args;
/* A -1 return code indicates that the last command couldn't
diff --git a/arch/powerpc/kernel/security.c b/arch/powerpc/kernel/security.c
index 0fdfcdd9d880..cc51fa52e783 100644
--- a/arch/powerpc/kernel/security.c
+++ b/arch/powerpc/kernel/security.c
@@ -300,9 +300,7 @@ static void stf_barrier_enable(bool enable)
void setup_stf_barrier(void)
{
enum stf_barrier_type type;
- bool enable, hv;
-
- hv = cpu_has_feature(CPU_FTR_HVMODE);
+ bool enable;
/* Default to fallback in case fw-features are not available */
if (cpu_has_feature(CPU_FTR_ARCH_300))
@@ -315,8 +313,7 @@ void setup_stf_barrier(void)
type = STF_BARRIER_NONE;
enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) &&
- (security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR) ||
- (security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV) && hv));
+ security_ftr_enabled(SEC_FTR_STF_BARRIER);
if (type == STF_BARRIER_FALLBACK) {
pr_info("stf-barrier: fallback barrier available\n");
@@ -432,26 +429,29 @@ device_initcall(stf_barrier_debugfs_init);
static void update_branch_cache_flush(void)
{
- u32 *site;
+ u32 *site, __maybe_unused *site2;
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
site = &patch__call_kvm_flush_link_stack;
+ site2 = &patch__call_kvm_flush_link_stack_p9;
// This controls the branch from guest_exit_cont to kvm_flush_link_stack
if (link_stack_flush_type == BRANCH_CACHE_FLUSH_NONE) {
- patch_instruction_site(site, ppc_inst(PPC_INST_NOP));
+ patch_instruction_site(site, ppc_inst(PPC_RAW_NOP()));
+ patch_instruction_site(site2, ppc_inst(PPC_RAW_NOP()));
} else {
// Could use HW flush, but that could also flush count cache
patch_branch_site(site, (u64)&kvm_flush_link_stack, BRANCH_SET_LINK);
+ patch_branch_site(site2, (u64)&kvm_flush_link_stack, BRANCH_SET_LINK);
}
#endif
// Patch out the bcctr first, then nop the rest
site = &patch__call_flush_branch_caches3;
- patch_instruction_site(site, ppc_inst(PPC_INST_NOP));
+ patch_instruction_site(site, ppc_inst(PPC_RAW_NOP()));
site = &patch__call_flush_branch_caches2;
- patch_instruction_site(site, ppc_inst(PPC_INST_NOP));
+ patch_instruction_site(site, ppc_inst(PPC_RAW_NOP()));
site = &patch__call_flush_branch_caches1;
- patch_instruction_site(site, ppc_inst(PPC_INST_NOP));
+ patch_instruction_site(site, ppc_inst(PPC_RAW_NOP()));
// This controls the branch from _switch to flush_branch_caches
if (count_cache_flush_type == BRANCH_CACHE_FLUSH_NONE &&
@@ -474,12 +474,12 @@ static void update_branch_cache_flush(void)
// If we just need to flush the link stack, early return
if (count_cache_flush_type == BRANCH_CACHE_FLUSH_NONE) {
patch_instruction_site(&patch__flush_link_stack_return,
- ppc_inst(PPC_INST_BLR));
+ ppc_inst(PPC_RAW_BLR()));
// If we have flush instruction, early return
} else if (count_cache_flush_type == BRANCH_CACHE_FLUSH_HW) {
patch_instruction_site(&patch__flush_count_cache_return,
- ppc_inst(PPC_INST_BLR));
+ ppc_inst(PPC_RAW_BLR()));
}
}
}
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index 74a98fff2c2f..26328fd2990c 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -9,6 +9,7 @@
#undef DEBUG
#include <linux/export.h>
+#include <linux/panic_notifier.h>
#include <linux/string.h>
#include <linux/sched.h>
#include <linux/init.h>
@@ -91,8 +92,6 @@ EXPORT_SYMBOL_GPL(boot_cpuid);
int dcache_bsize;
int icache_bsize;
-unsigned long klimit = (unsigned long) _end;
-
/*
* This still seems to be needed... -- paulus
*/
@@ -930,7 +929,7 @@ void __init setup_arch(char **cmdline_p)
init_mm.start_code = (unsigned long)_stext;
init_mm.end_code = (unsigned long) _etext;
init_mm.end_data = (unsigned long) _edata;
- init_mm.brk = klimit;
+ init_mm.brk = (unsigned long)_end;
mm_iommu_init(&init_mm);
irqstack_early_init();
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
index d7c1f92152af..7ec5c47fce0e 100644
--- a/arch/powerpc/kernel/setup_32.c
+++ b/arch/powerpc/kernel/setup_32.c
@@ -74,7 +74,7 @@ EXPORT_SYMBOL(DMA_MODE_WRITE);
*/
notrace void __init machine_init(u64 dt_ptr)
{
- struct ppc_inst *addr = (struct ppc_inst *)patch_site_addr(&patch__memset_nocache);
+ u32 *addr = (u32 *)patch_site_addr(&patch__memset_nocache);
struct ppc_inst insn;
/* Configure static keys first, now that we're relocated. */
@@ -85,7 +85,7 @@ notrace void __init machine_init(u64 dt_ptr)
/* Enable early debugging if any specified (see udbg.h) */
udbg_early_init();
- patch_instruction_site(&patch__memcpy_nocache, ppc_inst(PPC_INST_NOP));
+ patch_instruction_site(&patch__memcpy_nocache, ppc_inst(PPC_RAW_NOP()));
create_cond_branch(&insn, addr, branch_target(addr), 0x820000);
patch_instruction(addr, insn); /* replace b by bne cr0 */
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index b779d25761cf..1ff258f6c76c 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -33,6 +33,7 @@
#include <linux/pgtable.h>
#include <asm/debugfs.h>
+#include <asm/kvm_guest.h>
#include <asm/io.h>
#include <asm/kdump.h>
#include <asm/prom.h>
@@ -369,11 +370,11 @@ void __init early_setup(unsigned long dt_ptr)
apply_feature_fixups();
setup_feature_keys();
- early_ioremap_setup();
-
/* Initialize the hash table or TLB handling */
early_init_mmu();
+ early_ioremap_setup();
+
/*
* After firmware and early platform setup code has set things up,
* we note the SPR values for configurable control/performance
@@ -788,7 +789,7 @@ static void * __init pcpu_alloc_bootmem(unsigned int cpu, size_t size,
size_t align)
{
const unsigned long goal = __pa(MAX_DMA_ADDRESS);
-#ifdef CONFIG_NEED_MULTIPLE_NODES
+#ifdef CONFIG_NUMA
int node = early_cpu_to_node(cpu);
void *ptr;
@@ -939,16 +940,20 @@ u64 hw_nmi_get_sample_period(int watchdog_thresh)
* disable it by default. Book3S has a soft-nmi hardlockup detector based
* on the decrementer interrupt, so it does not suffer from this problem.
*
- * It is likely to get false positives in VM guests, so disable it there
- * by default too.
+ * It is likely to get false positives in KVM guests, so disable it there
+ * by default too. PowerVM will not stop or arbitrarily oversubscribe
+ * CPUs, but give a minimum regular allotment even with SPLPAR, so enable
+ * the detector for non-KVM guests, assume PowerVM.
*/
static int __init disable_hardlockup_detector(void)
{
#ifdef CONFIG_HARDLOCKUP_DETECTOR_PERF
hardlockup_detector_disable();
#else
- if (firmware_has_feature(FW_FEATURE_LPAR))
- hardlockup_detector_disable();
+ if (firmware_has_feature(FW_FEATURE_LPAR)) {
+ if (is_kvm_guest())
+ hardlockup_detector_disable();
+ }
#endif
return 0;
diff --git a/arch/powerpc/kernel/signal.c b/arch/powerpc/kernel/signal.c
index 9ded046edb0e..e600764a926c 100644
--- a/arch/powerpc/kernel/signal.c
+++ b/arch/powerpc/kernel/signal.c
@@ -214,7 +214,7 @@ static void check_syscall_restart(struct pt_regs *regs, struct k_sigaction *ka,
regs->gpr[0] = __NR_restart_syscall;
else
regs->gpr[3] = regs->orig_gpr3;
- regs->nip -= 4;
+ regs_add_return_ip(regs, -4);
regs->result = 0;
} else {
if (trap_is_scv(regs)) {
@@ -322,16 +322,16 @@ static unsigned long get_tm_stackpointer(struct task_struct *tsk)
* For signals taken in non-TM or suspended mode, we use the
* normal/non-checkpointed stack pointer.
*/
-
- unsigned long ret = tsk->thread.regs->gpr[1];
+ struct pt_regs *regs = tsk->thread.regs;
+ unsigned long ret = regs->gpr[1];
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
BUG_ON(tsk != current);
- if (MSR_TM_ACTIVE(tsk->thread.regs->msr)) {
+ if (MSR_TM_ACTIVE(regs->msr)) {
preempt_disable();
tm_reclaim_current(TM_CAUSE_SIGNAL);
- if (MSR_TM_TRANSACTIONAL(tsk->thread.regs->msr))
+ if (MSR_TM_TRANSACTIONAL(regs->msr))
ret = tsk->thread.ckpt_regs.gpr[1];
/*
@@ -341,7 +341,7 @@ static unsigned long get_tm_stackpointer(struct task_struct *tsk)
* (tm_recheckpoint_new_task() would recheckpoint). Besides, we
* enter the signal handler in non-transactional state.
*/
- tsk->thread.regs->msr &= ~MSR_TS_MASK;
+ regs_set_return_msr(regs, regs->msr & ~MSR_TS_MASK);
preempt_enable();
}
#endif
diff --git a/arch/powerpc/kernel/signal.h b/arch/powerpc/kernel/signal.h
index f4aafa337c2e..1f07317964e4 100644
--- a/arch/powerpc/kernel/signal.h
+++ b/arch/powerpc/kernel/signal.h
@@ -166,9 +166,9 @@ copy_ckfpr_from_user(struct task_struct *task, void __user *from)
}
#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
#else
-#define unsafe_copy_fpr_to_user(to, task, label) do { } while (0)
+#define unsafe_copy_fpr_to_user(to, task, label) do { if (0) goto label;} while (0)
-#define unsafe_copy_fpr_from_user(task, from, label) do { } while (0)
+#define unsafe_copy_fpr_from_user(task, from, label) do { if (0) goto label;} while (0)
static inline unsigned long
copy_fpr_to_user(void __user *to, struct task_struct *task)
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
index 8f05ed0da292..0608581967f0 100644
--- a/arch/powerpc/kernel/signal_32.c
+++ b/arch/powerpc/kernel/signal_32.c
@@ -354,14 +354,8 @@ static void prepare_save_tm_user_regs(void)
{
WARN_ON(tm_suspend_disabled);
-#ifdef CONFIG_ALTIVEC
if (cpu_has_feature(CPU_FTR_ALTIVEC))
current->thread.ckvrsave = mfspr(SPRN_VRSAVE);
-#endif
-#ifdef CONFIG_SPE
- if (current->thread.used_spe)
- flush_spe_to_thread(current);
-#endif
}
static int save_tm_user_regs_unsafe(struct pt_regs *regs, struct mcontext __user *frame,
@@ -379,7 +373,6 @@ static int save_tm_user_regs_unsafe(struct pt_regs *regs, struct mcontext __user
*/
unsafe_put_user((msr >> 32), &tm_frame->mc_gregs[PT_MSR], failed);
-#ifdef CONFIG_ALTIVEC
/* save altivec registers */
if (current->thread.used_vr) {
unsafe_copy_to_user(&frame->mc_vregs, &current->thread.ckvr_state,
@@ -412,7 +405,6 @@ static int save_tm_user_regs_unsafe(struct pt_regs *regs, struct mcontext __user
else
unsafe_put_user(current->thread.ckvrsave,
(u32 __user *)&tm_frame->mc_vregs[32], failed);
-#endif /* CONFIG_ALTIVEC */
unsafe_copy_ckfpr_to_user(&frame->mc_fregs, current, failed);
if (msr & MSR_FP)
@@ -420,7 +412,6 @@ static int save_tm_user_regs_unsafe(struct pt_regs *regs, struct mcontext __user
else
unsafe_copy_ckfpr_to_user(&tm_frame->mc_fregs, current, failed);
-#ifdef CONFIG_VSX
/*
* Copy VSR 0-31 upper half from thread_struct to local
* buffer, then write that to userspace. Also set MSR_VSX in
@@ -436,23 +427,6 @@ static int save_tm_user_regs_unsafe(struct pt_regs *regs, struct mcontext __user
msr |= MSR_VSX;
}
-#endif /* CONFIG_VSX */
-#ifdef CONFIG_SPE
- /* SPE regs are not checkpointed with TM, so this section is
- * simply the same as in __unsafe_save_user_regs().
- */
- if (current->thread.used_spe) {
- unsafe_copy_to_user(&frame->mc_vregs, current->thread.evr,
- ELF_NEVRREG * sizeof(u32), failed);
- /* set MSR_SPE in the saved MSR value to indicate that
- * frame->mc_vregs contains valid data */
- msr |= MSR_SPE;
- }
-
- /* We always copy to/from spefscr */
- unsafe_put_user(current->thread.spefscr,
- (u32 __user *)&frame->mc_vregs + ELF_NEVRREG, failed);
-#endif /* CONFIG_SPE */
unsafe_put_user(msr, &frame->mc_gregs[PT_MSR], failed);
@@ -505,14 +479,14 @@ static long restore_user_regs(struct pt_regs *regs,
/* if doing signal return, restore the previous little-endian mode */
if (sig)
- regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE);
+ regs_set_return_msr(regs, (regs->msr & ~MSR_LE) | (msr & MSR_LE));
#ifdef CONFIG_ALTIVEC
/*
* Force the process to reload the altivec registers from
* current->thread when it next does altivec instructions
*/
- regs->msr &= ~MSR_VEC;
+ regs_set_return_msr(regs, regs->msr & ~MSR_VEC);
if (msr & MSR_VEC) {
/* restore altivec registers from the stack */
unsafe_copy_from_user(&current->thread.vr_state, &sr->mc_vregs,
@@ -534,7 +508,7 @@ static long restore_user_regs(struct pt_regs *regs,
* Force the process to reload the VSX registers from
* current->thread when it next does VSX instruction.
*/
- regs->msr &= ~MSR_VSX;
+ regs_set_return_msr(regs, regs->msr & ~MSR_VSX);
if (msr & MSR_VSX) {
/*
* Restore altivec registers from the stack to a local
@@ -550,12 +524,12 @@ static long restore_user_regs(struct pt_regs *regs,
* force the process to reload the FP registers from
* current->thread when it next does FP instructions
*/
- regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1);
+ regs_set_return_msr(regs, regs->msr & ~(MSR_FP | MSR_FE0 | MSR_FE1));
#ifdef CONFIG_SPE
/* force the process to reload the spe registers from
current->thread when it next does spe instructions */
- regs->msr &= ~MSR_SPE;
+ regs_set_return_msr(regs, regs->msr & ~MSR_SPE);
if (msr & MSR_SPE) {
/* restore spe registers from the stack */
unsafe_copy_from_user(current->thread.evr, &sr->mc_vregs,
@@ -587,9 +561,7 @@ static long restore_tm_user_regs(struct pt_regs *regs,
struct mcontext __user *tm_sr)
{
unsigned long msr, msr_hi;
-#ifdef CONFIG_VSX
int i;
-#endif
if (tm_suspend_disabled)
return 1;
@@ -608,10 +580,9 @@ static long restore_tm_user_regs(struct pt_regs *regs,
unsafe_get_user(msr, &sr->mc_gregs[PT_MSR], failed);
/* Restore the previous little-endian mode */
- regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE);
+ regs_set_return_msr(regs, (regs->msr & ~MSR_LE) | (msr & MSR_LE));
-#ifdef CONFIG_ALTIVEC
- regs->msr &= ~MSR_VEC;
+ regs_set_return_msr(regs, regs->msr & ~MSR_VEC);
if (msr & MSR_VEC) {
/* restore altivec registers from the stack */
unsafe_copy_from_user(&current->thread.ckvr_state, &sr->mc_vregs,
@@ -629,14 +600,12 @@ static long restore_tm_user_regs(struct pt_regs *regs,
(u32 __user *)&sr->mc_vregs[32], failed);
if (cpu_has_feature(CPU_FTR_ALTIVEC))
mtspr(SPRN_VRSAVE, current->thread.ckvrsave);
-#endif /* CONFIG_ALTIVEC */
- regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1);
+ regs_set_return_msr(regs, regs->msr & ~(MSR_FP | MSR_FE0 | MSR_FE1));
unsafe_copy_fpr_from_user(current, &sr->mc_fregs, failed);
-#ifdef CONFIG_VSX
- regs->msr &= ~MSR_VSX;
+ regs_set_return_msr(regs, regs->msr & ~MSR_VSX);
if (msr & MSR_VSX) {
/*
* Restore altivec registers from the stack to a local
@@ -649,24 +618,6 @@ static long restore_tm_user_regs(struct pt_regs *regs,
current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
current->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
}
-#endif /* CONFIG_VSX */
-
-#ifdef CONFIG_SPE
- /* SPE regs are not checkpointed with TM, so this section is
- * simply the same as in restore_user_regs().
- */
- regs->msr &= ~MSR_SPE;
- if (msr & MSR_SPE) {
- unsafe_copy_from_user(current->thread.evr, &sr->mc_vregs,
- ELF_NEVRREG * sizeof(u32), failed);
- current->thread.used_spe = true;
- } else if (current->thread.used_spe)
- memset(current->thread.evr, 0, ELF_NEVRREG * sizeof(u32));
-
- /* Always get SPEFSCR back */
- unsafe_get_user(current->thread.spefscr,
- (u32 __user *)&sr->mc_vregs + ELF_NEVRREG, failed);
-#endif /* CONFIG_SPE */
user_read_access_end();
@@ -675,7 +626,6 @@ static long restore_tm_user_regs(struct pt_regs *regs,
unsafe_restore_general_regs(regs, tm_sr, failed);
-#ifdef CONFIG_ALTIVEC
/* restore altivec registers from the stack */
if (msr & MSR_VEC)
unsafe_copy_from_user(&current->thread.vr_state, &tm_sr->mc_vregs,
@@ -684,11 +634,9 @@ static long restore_tm_user_regs(struct pt_regs *regs,
/* Always get VRSAVE back */
unsafe_get_user(current->thread.vrsave,
(u32 __user *)&tm_sr->mc_vregs[32], failed);
-#endif /* CONFIG_ALTIVEC */
unsafe_copy_ckfpr_from_user(current, &tm_sr->mc_fregs, failed);
-#ifdef CONFIG_VSX
if (msr & MSR_VSX) {
/*
* Restore altivec registers from the stack to a local
@@ -697,7 +645,6 @@ static long restore_tm_user_regs(struct pt_regs *regs,
unsafe_copy_vsx_from_user(current, &tm_sr->mc_vsregs, failed);
current->thread.used_vsr = true;
}
-#endif /* CONFIG_VSX */
/* Get the top half of the MSR from the user context */
unsafe_get_user(msr_hi, &tm_sr->mc_gregs[PT_MSR], failed);
@@ -725,7 +672,7 @@ static long restore_tm_user_regs(struct pt_regs *regs,
*
* Pull in the MSR TM bits from the user context
*/
- regs->msr = (regs->msr & ~MSR_TS_MASK) | (msr_hi & MSR_TS_MASK);
+ regs_set_return_msr(regs, (regs->msr & ~MSR_TS_MASK) | (msr_hi & MSR_TS_MASK));
/* Now, recheckpoint. This loads up all of the checkpointed (older)
* registers, including FP and V[S]Rs. After recheckpointing, the
* transactional versions should be loaded.
@@ -740,14 +687,12 @@ static long restore_tm_user_regs(struct pt_regs *regs,
msr_check_and_set(msr & (MSR_FP | MSR_VEC));
if (msr & MSR_FP) {
load_fp_state(&current->thread.fp_state);
- regs->msr |= (MSR_FP | current->thread.fpexc_mode);
+ regs_set_return_msr(regs, regs->msr | (MSR_FP | current->thread.fpexc_mode));
}
-#ifdef CONFIG_ALTIVEC
if (msr & MSR_VEC) {
load_vr_state(&current->thread.vr_state);
- regs->msr |= MSR_VEC;
+ regs_set_return_msr(regs, regs->msr | MSR_VEC);
}
-#endif
preempt_enable();
@@ -828,10 +773,8 @@ int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset,
tramp = VDSO32_SYMBOL(tsk->mm->context.vdso, sigtramp_rt32);
} else {
tramp = (unsigned long)mctx->mc_pad;
- /* Set up the sigreturn trampoline: li r0,sigret; sc */
- unsafe_put_user(PPC_INST_ADDI + __NR_rt_sigreturn, &mctx->mc_pad[0],
- failed);
- unsafe_put_user(PPC_INST_SC, &mctx->mc_pad[1], failed);
+ unsafe_put_user(PPC_RAW_LI(_R0, __NR_rt_sigreturn), &mctx->mc_pad[0], failed);
+ unsafe_put_user(PPC_RAW_SC(), &mctx->mc_pad[1], failed);
asm("dcbst %y0; sync; icbi %y0; sync" :: "Z" (mctx->mc_pad[0]));
}
unsafe_put_sigset_t(&frame->uc.uc_sigmask, oldset, failed);
@@ -858,10 +801,10 @@ int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset,
regs->gpr[4] = (unsigned long)&frame->info;
regs->gpr[5] = (unsigned long)&frame->uc;
regs->gpr[6] = (unsigned long)frame;
- regs->nip = (unsigned long) ksig->ka.sa.sa_handler;
+ regs_set_return_ip(regs, (unsigned long) ksig->ka.sa.sa_handler);
/* enter the signal handler in native-endian mode */
- regs->msr &= ~MSR_LE;
- regs->msr |= (MSR_KERNEL & MSR_LE);
+ regs_set_return_msr(regs, (regs->msr & ~MSR_LE) | (MSR_KERNEL & MSR_LE));
+
return 0;
failed:
@@ -926,9 +869,8 @@ int handle_signal32(struct ksignal *ksig, sigset_t *oldset,
tramp = VDSO32_SYMBOL(tsk->mm->context.vdso, sigtramp32);
} else {
tramp = (unsigned long)mctx->mc_pad;
- /* Set up the sigreturn trampoline: li r0,sigret; sc */
- unsafe_put_user(PPC_INST_ADDI + __NR_sigreturn, &mctx->mc_pad[0], failed);
- unsafe_put_user(PPC_INST_SC, &mctx->mc_pad[1], failed);
+ unsafe_put_user(PPC_RAW_LI(_R0, __NR_sigreturn), &mctx->mc_pad[0], failed);
+ unsafe_put_user(PPC_RAW_SC(), &mctx->mc_pad[1], failed);
asm("dcbst %y0; sync; icbi %y0; sync" :: "Z" (mctx->mc_pad[0]));
}
user_access_end();
@@ -947,10 +889,10 @@ int handle_signal32(struct ksignal *ksig, sigset_t *oldset,
regs->gpr[1] = newsp;
regs->gpr[3] = ksig->sig;
regs->gpr[4] = (unsigned long) sc;
- regs->nip = (unsigned long)ksig->ka.sa.sa_handler;
+ regs_set_return_ip(regs, (unsigned long) ksig->ka.sa.sa_handler);
/* enter the signal handler in native-endian mode */
- regs->msr &= ~MSR_LE;
- regs->msr |= (MSR_KERNEL & MSR_LE);
+ regs_set_return_msr(regs, (regs->msr & ~MSR_LE) | (MSR_KERNEL & MSR_LE));
+
return 0;
failed:
@@ -1200,7 +1142,7 @@ SYSCALL_DEFINE0(rt_sigreturn)
* set, and recheckpoint was not called. This avoid
* hitting a TM Bad thing at RFID
*/
- regs->msr &= ~MSR_TS_MASK;
+ regs_set_return_msr(regs, regs->msr & ~MSR_TS_MASK);
}
/* Fall through, for non-TM restore */
#endif
@@ -1289,7 +1231,7 @@ SYSCALL_DEFINE3(debug_setcontext, struct ucontext __user *, ctx,
affect the contents of these registers. After this point,
failure is a problem, anyway, and it's very unlikely unless
the user is really doing something wrong. */
- regs->msr = new_msr;
+ regs_set_return_msr(regs, new_msr);
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
current->thread.debug.dbcr0 = new_dbcr0;
#endif
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
index dca66481d0c2..1831bba0582e 100644
--- a/arch/powerpc/kernel/signal_64.c
+++ b/arch/powerpc/kernel/signal_64.c
@@ -354,7 +354,7 @@ static long notrace __unsafe_restore_sigcontext(struct task_struct *tsk, sigset_
/* get MSR separately, transfer the LE bit if doing signal return */
unsafe_get_user(msr, &sc->gp_regs[PT_MSR], efault_out);
if (sig)
- regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE);
+ regs_set_return_msr(regs, (regs->msr & ~MSR_LE) | (msr & MSR_LE));
unsafe_get_user(regs->orig_gpr3, &sc->gp_regs[PT_ORIG_R3], efault_out);
unsafe_get_user(regs->ctr, &sc->gp_regs[PT_CTR], efault_out);
unsafe_get_user(regs->link, &sc->gp_regs[PT_LNK], efault_out);
@@ -376,7 +376,7 @@ static long notrace __unsafe_restore_sigcontext(struct task_struct *tsk, sigset_
* This has to be done before copying stuff into tsk->thread.fpr/vr
* for the reasons explained in the previous comment.
*/
- regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1 | MSR_VEC | MSR_VSX);
+ regs_set_return_msr(regs, regs->msr & ~(MSR_FP | MSR_FE0 | MSR_FE1 | MSR_VEC | MSR_VSX));
#ifdef CONFIG_ALTIVEC
unsafe_get_user(v_regs, &sc->v_regs, efault_out);
@@ -468,7 +468,7 @@ static long restore_tm_sigcontexts(struct task_struct *tsk,
return -EINVAL;
/* pull in MSR LE from user context */
- regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE);
+ regs_set_return_msr(regs, (regs->msr & ~MSR_LE) | (msr & MSR_LE));
/* The following non-GPR non-FPR non-VR state is also checkpointed: */
err |= __get_user(regs->ctr, &tm_sc->gp_regs[PT_CTR]);
@@ -495,7 +495,7 @@ static long restore_tm_sigcontexts(struct task_struct *tsk,
* This has to be done before copying stuff into tsk->thread.fpr/vr
* for the reasons explained in the previous comment.
*/
- regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1 | MSR_VEC | MSR_VSX);
+ regs_set_return_msr(regs, regs->msr & ~(MSR_FP | MSR_FE0 | MSR_FE1 | MSR_VEC | MSR_VSX));
#ifdef CONFIG_ALTIVEC
err |= __get_user(v_regs, &sc->v_regs);
@@ -565,7 +565,7 @@ static long restore_tm_sigcontexts(struct task_struct *tsk,
preempt_disable();
/* pull in MSR TS bits from user context */
- regs->msr |= msr & MSR_TS_MASK;
+ regs_set_return_msr(regs, regs->msr | (msr & MSR_TS_MASK));
/*
* Ensure that TM is enabled in regs->msr before we leave the signal
@@ -583,7 +583,7 @@ static long restore_tm_sigcontexts(struct task_struct *tsk,
* to be de-scheduled with MSR[TS] set but without calling
* tm_recheckpoint(). This can cause a bug.
*/
- regs->msr |= MSR_TM;
+ regs_set_return_msr(regs, regs->msr | MSR_TM);
/* This loads the checkpointed FP/VEC state, if used */
tm_recheckpoint(&tsk->thread);
@@ -591,11 +591,11 @@ static long restore_tm_sigcontexts(struct task_struct *tsk,
msr_check_and_set(msr & (MSR_FP | MSR_VEC));
if (msr & MSR_FP) {
load_fp_state(&tsk->thread.fp_state);
- regs->msr |= (MSR_FP | tsk->thread.fpexc_mode);
+ regs_set_return_msr(regs, regs->msr | (MSR_FP | tsk->thread.fpexc_mode));
}
if (msr & MSR_VEC) {
load_vr_state(&tsk->thread.vr_state);
- regs->msr |= MSR_VEC;
+ regs_set_return_msr(regs, regs->msr | MSR_VEC);
}
preempt_enable();
@@ -618,15 +618,12 @@ static long setup_trampoline(unsigned int syscall, unsigned int __user *tramp)
int i;
long err = 0;
- /* bctrl # call the handler */
- err |= __put_user(PPC_INST_BCTRL, &tramp[0]);
- /* addi r1, r1, __SIGNAL_FRAMESIZE # Pop the dummy stackframe */
- err |= __put_user(PPC_INST_ADDI | __PPC_RT(R1) | __PPC_RA(R1) |
- (__SIGNAL_FRAMESIZE & 0xffff), &tramp[1]);
- /* li r0, __NR_[rt_]sigreturn| */
- err |= __put_user(PPC_INST_ADDI | (syscall & 0xffff), &tramp[2]);
- /* sc */
- err |= __put_user(PPC_INST_SC, &tramp[3]);
+ /* Call the handler and pop the dummy stackframe*/
+ err |= __put_user(PPC_RAW_BCTRL(), &tramp[0]);
+ err |= __put_user(PPC_RAW_ADDI(_R1, _R1, __SIGNAL_FRAMESIZE), &tramp[1]);
+
+ err |= __put_user(PPC_RAW_LI(_R0, syscall), &tramp[2]);
+ err |= __put_user(PPC_RAW_SC(), &tramp[3]);
/* Minimal traceback info */
for (i=TRAMP_TRACEBACK; i < TRAMP_SIZE ;i++)
@@ -720,6 +717,7 @@ SYSCALL_DEFINE3(swapcontext, struct ucontext __user *, old_ctx,
/* This returns like rt_sigreturn */
set_thread_flag(TIF_RESTOREALL);
+
return 0;
efault_out:
@@ -786,7 +784,7 @@ SYSCALL_DEFINE0(rt_sigreturn)
* the MSR[TS] that came from user context later, at
* restore_tm_sigcontexts.
*/
- regs->msr &= ~MSR_TS_MASK;
+ regs_set_return_msr(regs, regs->msr & ~MSR_TS_MASK);
if (__get_user(msr, &uc->uc_mcontext.gp_regs[PT_MSR]))
goto badframe;
@@ -818,7 +816,8 @@ SYSCALL_DEFINE0(rt_sigreturn)
* MSR[TS] set, but without CPU in the proper state,
* causing a TM bad thing.
*/
- current->thread.regs->msr &= ~MSR_TS_MASK;
+ regs_set_return_msr(current->thread.regs,
+ current->thread.regs->msr & ~MSR_TS_MASK);
if (!user_read_access_begin(&uc->uc_mcontext, sizeof(uc->uc_mcontext)))
goto badframe;
@@ -832,6 +831,7 @@ SYSCALL_DEFINE0(rt_sigreturn)
goto badframe;
set_thread_flag(TIF_RESTOREALL);
+
return 0;
badframe_block:
@@ -902,24 +902,23 @@ int handle_rt_signal64(struct ksignal *ksig, sigset_t *set,
unsafe_copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set), badframe_block);
user_write_access_end();
+ /* Save the siginfo outside of the unsafe block. */
+ if (copy_siginfo_to_user(&frame->info, &ksig->info))
+ goto badframe;
+
/* Make sure signal handler doesn't get spurious FP exceptions */
tsk->thread.fp_state.fpscr = 0;
/* Set up to return from userspace. */
if (tsk->mm->context.vdso) {
- regs->nip = VDSO64_SYMBOL(tsk->mm->context.vdso, sigtramp_rt64);
+ regs_set_return_ip(regs, VDSO64_SYMBOL(tsk->mm->context.vdso, sigtramp_rt64));
} else {
err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
if (err)
goto badframe;
- regs->nip = (unsigned long) &frame->tramp[0];
+ regs_set_return_ip(regs, (unsigned long) &frame->tramp[0]);
}
-
- /* Save the siginfo outside of the unsafe block. */
- if (copy_siginfo_to_user(&frame->info, &ksig->info))
- goto badframe;
-
/* Allocate a dummy caller frame for the signal handler. */
newsp = ((unsigned long)frame) - __SIGNAL_FRAMESIZE;
err |= put_user(regs->gpr[1], (unsigned long __user *)newsp);
@@ -942,14 +941,13 @@ int handle_rt_signal64(struct ksignal *ksig, sigset_t *set,
}
/* enter the signal handler in native-endian mode */
- regs->msr &= ~MSR_LE;
- regs->msr |= (MSR_KERNEL & MSR_LE);
+ regs_set_return_msr(regs, (regs->msr & ~MSR_LE) | (MSR_KERNEL & MSR_LE));
regs->gpr[1] = newsp;
regs->gpr[3] = ksig->sig;
regs->result = 0;
if (ksig->ka.sa.sa_flags & SA_SIGINFO) {
- err |= get_user(regs->gpr[4], (unsigned long __user *)&frame->pinfo);
- err |= get_user(regs->gpr[5], (unsigned long __user *)&frame->puc);
+ regs->gpr[4] = (unsigned long)&frame->info;
+ regs->gpr[5] = (unsigned long)&frame->uc;
regs->gpr[6] = (unsigned long) frame;
} else {
regs->gpr[4] = (unsigned long)&frame->uc.uc_mcontext;
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 2e05c783440a..447b78a87c8f 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -619,6 +619,8 @@ static void nmi_stop_this_cpu(struct pt_regs *regs)
/*
* IRQs are already hard disabled by the smp_handle_nmi_ipi.
*/
+ set_cpu_online(smp_processor_id(), false);
+
spin_begin();
while (1)
spin_cpu_relax();
@@ -634,6 +636,15 @@ void smp_send_stop(void)
static void stop_this_cpu(void *dummy)
{
hard_irq_disable();
+
+ /*
+ * Offlining CPUs in stop_this_cpu can result in scheduler warnings,
+ * (see commit de6e5d38417e), but printk_safe_flush_on_panic() wants
+ * to know other CPUs are offline before it breaks locks to flush
+ * printk buffers, in case we panic()ed while holding the lock.
+ */
+ set_cpu_online(smp_processor_id(), false);
+
spin_begin();
while (1)
spin_cpu_relax();
@@ -1047,7 +1058,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
zalloc_cpumask_var_node(&per_cpu(cpu_coregroup_map, cpu),
GFP_KERNEL, cpu_to_node(cpu));
-#ifdef CONFIG_NEED_MULTIPLE_NODES
+#ifdef CONFIG_NUMA
/*
* numa_node_id() works after this.
*/
@@ -1541,13 +1552,16 @@ void start_secondary(void *unused)
{
unsigned int cpu = raw_smp_processor_id();
+ /* PPC64 calls setup_kup() in early_setup_secondary() */
+ if (IS_ENABLED(CONFIG_PPC32))
+ setup_kup();
+
mmgrab(&init_mm);
current->active_mm = &init_mm;
smp_store_cpu_info(cpu);
set_dec(tb_ticks_per_jiffy);
rcu_cpu_starting(cpu);
- preempt_disable();
cpu_callin_map[cpu] = 1;
if (smp_ops->setup_cpu)
diff --git a/arch/powerpc/kernel/stacktrace.c b/arch/powerpc/kernel/stacktrace.c
index 1deb1bf331dd..2b0d04a1b7d2 100644
--- a/arch/powerpc/kernel/stacktrace.c
+++ b/arch/powerpc/kernel/stacktrace.c
@@ -23,8 +23,8 @@
#include <asm/paca.h>
-void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
- struct task_struct *task, struct pt_regs *regs)
+void __no_sanitize_address arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
+ struct task_struct *task, struct pt_regs *regs)
{
unsigned long sp;
@@ -61,8 +61,8 @@ void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
*
* If the task is not 'current', the caller *must* ensure the task is inactive.
*/
-int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry,
- void *cookie, struct task_struct *task)
+int __no_sanitize_address arch_stack_walk_reliable(stack_trace_consume_fn consume_entry,
+ void *cookie, struct task_struct *task)
{
unsigned long sp;
unsigned long newsp;
@@ -172,17 +172,31 @@ static void handle_backtrace_ipi(struct pt_regs *regs)
static void raise_backtrace_ipi(cpumask_t *mask)
{
+ struct paca_struct *p;
unsigned int cpu;
+ u64 delay_us;
for_each_cpu(cpu, mask) {
- if (cpu == smp_processor_id())
+ if (cpu == smp_processor_id()) {
handle_backtrace_ipi(NULL);
- else
- smp_send_safe_nmi_ipi(cpu, handle_backtrace_ipi, 5 * USEC_PER_SEC);
- }
+ continue;
+ }
- for_each_cpu(cpu, mask) {
- struct paca_struct *p = paca_ptrs[cpu];
+ delay_us = 5 * USEC_PER_SEC;
+
+ if (smp_send_safe_nmi_ipi(cpu, handle_backtrace_ipi, delay_us)) {
+ // Now wait up to 5s for the other CPU to do its backtrace
+ while (cpumask_test_cpu(cpu, mask) && delay_us) {
+ udelay(1);
+ delay_us--;
+ }
+
+ // Other CPU cleared itself from the mask
+ if (delay_us)
+ continue;
+ }
+
+ p = paca_ptrs[cpu];
cpumask_clear_cpu(cpu, mask);
diff --git a/arch/powerpc/kernel/syscalls.c b/arch/powerpc/kernel/syscalls.c
index a552c9e68d7e..bf4ae0f0e36c 100644
--- a/arch/powerpc/kernel/syscalls.c
+++ b/arch/powerpc/kernel/syscalls.c
@@ -114,7 +114,8 @@ SYSCALL_DEFINE0(switch_endian)
{
struct thread_info *ti;
- current->thread.regs->msr ^= MSR_LE;
+ regs_set_return_msr(current->thread.regs,
+ current->thread.regs->msr ^ MSR_LE);
/*
* Set TIF_RESTOREALL so that r3 isn't clobbered on return to
diff --git a/arch/powerpc/kernel/syscalls/syscall.tbl b/arch/powerpc/kernel/syscalls/syscall.tbl
index 2e68fbb57cc6..aef2a290e71a 100644
--- a/arch/powerpc/kernel/syscalls/syscall.tbl
+++ b/arch/powerpc/kernel/syscalls/syscall.tbl
@@ -522,7 +522,7 @@
440 common process_madvise sys_process_madvise
441 common epoll_pwait2 sys_epoll_pwait2 compat_sys_epoll_pwait2
442 common mount_setattr sys_mount_setattr
-443 common quotactl_path sys_quotactl_path
+443 common quotactl_fd sys_quotactl_fd
444 common landlock_create_ruleset sys_landlock_create_ruleset
445 common landlock_add_rule sys_landlock_add_rule
446 common landlock_restrict_self sys_landlock_restrict_self
diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c
index 2e08640bb3b4..5ff0e55d0db1 100644
--- a/arch/powerpc/kernel/sysfs.c
+++ b/arch/powerpc/kernel/sysfs.c
@@ -843,14 +843,14 @@ static int register_cpu_online(unsigned int cpu)
#ifdef HAS_PPC_PMC_IBM
case PPC_PMC_IBM:
attrs = ibm_common_attrs;
- nattrs = sizeof(ibm_common_attrs) / sizeof(struct device_attribute);
+ nattrs = ARRAY_SIZE(ibm_common_attrs);
pmc_attrs = classic_pmc_attrs;
break;
#endif /* HAS_PPC_PMC_IBM */
#ifdef HAS_PPC_PMC_G4
case PPC_PMC_G4:
attrs = g4_common_attrs;
- nattrs = sizeof(g4_common_attrs) / sizeof(struct device_attribute);
+ nattrs = ARRAY_SIZE(g4_common_attrs);
pmc_attrs = classic_pmc_attrs;
break;
#endif /* HAS_PPC_PMC_G4 */
@@ -858,7 +858,7 @@ static int register_cpu_online(unsigned int cpu)
case PPC_PMC_PA6T:
/* PA Semi starts counting at PMC0 */
attrs = pa6t_attrs;
- nattrs = sizeof(pa6t_attrs) / sizeof(struct device_attribute);
+ nattrs = ARRAY_SIZE(pa6t_attrs);
pmc_attrs = NULL;
break;
#endif
@@ -940,14 +940,14 @@ static int unregister_cpu_online(unsigned int cpu)
#ifdef HAS_PPC_PMC_IBM
case PPC_PMC_IBM:
attrs = ibm_common_attrs;
- nattrs = sizeof(ibm_common_attrs) / sizeof(struct device_attribute);
+ nattrs = ARRAY_SIZE(ibm_common_attrs);
pmc_attrs = classic_pmc_attrs;
break;
#endif /* HAS_PPC_PMC_IBM */
#ifdef HAS_PPC_PMC_G4
case PPC_PMC_G4:
attrs = g4_common_attrs;
- nattrs = sizeof(g4_common_attrs) / sizeof(struct device_attribute);
+ nattrs = ARRAY_SIZE(g4_common_attrs);
pmc_attrs = classic_pmc_attrs;
break;
#endif /* HAS_PPC_PMC_G4 */
@@ -955,7 +955,7 @@ static int unregister_cpu_online(unsigned int cpu)
case PPC_PMC_PA6T:
/* PA Semi starts counting at PMC0 */
attrs = pa6t_attrs;
- nattrs = sizeof(pa6t_attrs) / sizeof(struct device_attribute);
+ nattrs = ARRAY_SIZE(pa6t_attrs);
pmc_attrs = NULL;
break;
#endif
diff --git a/arch/powerpc/kernel/tau_6xx.c b/arch/powerpc/kernel/tau_6xx.c
index 6c31af7f4fa8..b9a047d92ec0 100644
--- a/arch/powerpc/kernel/tau_6xx.c
+++ b/arch/powerpc/kernel/tau_6xx.c
@@ -201,7 +201,7 @@ static int __init TAU_init(void)
tau_int_enable = IS_ENABLED(CONFIG_TAU_INT) &&
!strcmp(cur_cpu_spec->platform, "ppc750");
- tau_workq = alloc_workqueue("tau", WQ_UNBOUND, 1, 0);
+ tau_workq = alloc_workqueue("tau", WQ_UNBOUND, 1);
if (!tau_workq)
return -ENOMEM;
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index b67d93a609a2..e45ce427bffb 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -231,24 +231,13 @@ static u64 scan_dispatch_log(u64 stop_tb)
void notrace accumulate_stolen_time(void)
{
u64 sst, ust;
- unsigned long save_irq_soft_mask = irq_soft_mask_return();
struct cpu_accounting_data *acct = &local_paca->accounting;
- /* We are called early in the exception entry, before
- * soft/hard_enabled are sync'ed to the expected state
- * for the exception. We are hard disabled but the PACA
- * needs to reflect that so various debug stuff doesn't
- * complain
- */
- irq_soft_mask_set(IRQS_DISABLED);
-
sst = scan_dispatch_log(acct->starttime_user);
ust = scan_dispatch_log(acct->starttime);
acct->stime -= sst;
acct->utime -= ust;
acct->steal_time += ust + sst;
-
- irq_soft_mask_set(save_irq_soft_mask);
}
static inline u64 calculate_stolen_time(u64 stop_tb)
@@ -508,16 +497,6 @@ EXPORT_SYMBOL(profile_pc);
* 64-bit uses a byte in the PACA, 32-bit uses a per-cpu variable...
*/
#ifdef CONFIG_PPC64
-static inline unsigned long test_irq_work_pending(void)
-{
- unsigned long x;
-
- asm volatile("lbz %0,%1(13)"
- : "=r" (x)
- : "i" (offsetof(struct paca_struct, irq_work_pending)));
- return x;
-}
-
static inline void set_irq_work_pending_flag(void)
{
asm volatile("stb %0,%1(13)" : :
diff --git a/arch/powerpc/kernel/trace/ftrace.c b/arch/powerpc/kernel/trace/ftrace.c
index ffe9537195aa..d89c5df4f206 100644
--- a/arch/powerpc/kernel/trace/ftrace.c
+++ b/arch/powerpc/kernel/trace/ftrace.c
@@ -49,7 +49,7 @@ ftrace_call_replace(unsigned long ip, unsigned long addr, int link)
addr = ppc_function_entry((void *)addr);
/* if (link) set op to 'bl' else 'b' */
- create_branch(&op, (struct ppc_inst *)ip, addr, link ? 1 : 0);
+ create_branch(&op, (u32 *)ip, addr, link ? 1 : 0);
return op;
}
@@ -79,7 +79,7 @@ ftrace_modify_code(unsigned long ip, struct ppc_inst old, struct ppc_inst new)
}
/* replace the text with the new text */
- if (patch_instruction((struct ppc_inst *)ip, new))
+ if (patch_instruction((u32 *)ip, new))
return -EPERM;
return 0;
@@ -94,7 +94,7 @@ static int test_24bit_addr(unsigned long ip, unsigned long addr)
addr = ppc_function_entry((void *)addr);
/* use the create_branch to verify that this offset can be branched */
- return create_branch(&op, (struct ppc_inst *)ip, addr, 0) == 0;
+ return create_branch(&op, (u32 *)ip, addr, 0) == 0;
}
static int is_bl_op(struct ppc_inst op)
@@ -162,7 +162,7 @@ __ftrace_make_nop(struct module *mod,
#ifdef CONFIG_MPROFILE_KERNEL
/* When using -mkernel_profile there is no load to jump over */
- pop = ppc_inst(PPC_INST_NOP);
+ pop = ppc_inst(PPC_RAW_NOP());
if (copy_inst_from_kernel_nofault(&op, (void *)(ip - 4))) {
pr_err("Fetching instruction at %lx failed.\n", ip - 4);
@@ -170,7 +170,7 @@ __ftrace_make_nop(struct module *mod,
}
/* We expect either a mflr r0, or a std r0, LRSAVE(r1) */
- if (!ppc_inst_equal(op, ppc_inst(PPC_INST_MFLR)) &&
+ if (!ppc_inst_equal(op, ppc_inst(PPC_RAW_MFLR(_R0))) &&
!ppc_inst_equal(op, ppc_inst(PPC_INST_STD_LR))) {
pr_err("Unexpected instruction %s around bl _mcount\n",
ppc_inst_as_str(op));
@@ -203,12 +203,12 @@ __ftrace_make_nop(struct module *mod,
}
if (!ppc_inst_equal(op, ppc_inst(PPC_INST_LD_TOC))) {
- pr_err("Expected %08x found %s\n", PPC_INST_LD_TOC, ppc_inst_as_str(op));
+ pr_err("Expected %08lx found %s\n", PPC_INST_LD_TOC, ppc_inst_as_str(op));
return -EINVAL;
}
#endif /* CONFIG_MPROFILE_KERNEL */
- if (patch_instruction((struct ppc_inst *)ip, pop)) {
+ if (patch_instruction((u32 *)ip, pop)) {
pr_err("Patching NOP failed.\n");
return -EPERM;
}
@@ -278,9 +278,9 @@ __ftrace_make_nop(struct module *mod,
return -EINVAL;
}
- op = ppc_inst(PPC_INST_NOP);
+ op = ppc_inst(PPC_RAW_NOP());
- if (patch_instruction((struct ppc_inst *)ip, op))
+ if (patch_instruction((u32 *)ip, op))
return -EPERM;
return 0;
@@ -380,7 +380,7 @@ static int setup_mcount_compiler_tramp(unsigned long tramp)
return -1;
}
- if (patch_branch((struct ppc_inst *)tramp, ptr, 0)) {
+ if (patch_branch((u32 *)tramp, ptr, 0)) {
pr_debug("REL24 out of range!\n");
return -1;
}
@@ -424,7 +424,7 @@ static int __ftrace_make_nop_kernel(struct dyn_ftrace *rec, unsigned long addr)
}
}
- if (patch_instruction((struct ppc_inst *)ip, ppc_inst(PPC_INST_NOP))) {
+ if (patch_instruction((u32 *)ip, ppc_inst(PPC_RAW_NOP()))) {
pr_err("Patching NOP failed.\n");
return -EPERM;
}
@@ -446,7 +446,7 @@ int ftrace_make_nop(struct module *mod,
if (test_24bit_addr(ip, addr)) {
/* within range */
old = ftrace_call_replace(ip, addr, 1);
- new = ppc_inst(PPC_INST_NOP);
+ new = ppc_inst(PPC_RAW_NOP());
return ftrace_modify_code(ip, old, new);
} else if (core_kernel_text(ip))
return __ftrace_make_nop_kernel(rec, addr);
@@ -510,7 +510,7 @@ static int
expected_nop_sequence(void *ip, struct ppc_inst op0, struct ppc_inst op1)
{
/* look for patched "NOP" on ppc64 with -mprofile-kernel */
- if (!ppc_inst_equal(op0, ppc_inst(PPC_INST_NOP)))
+ if (!ppc_inst_equal(op0, ppc_inst(PPC_RAW_NOP())))
return 0;
return 1;
}
@@ -589,14 +589,14 @@ __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
{
int err;
struct ppc_inst op;
- unsigned long ip = rec->ip;
+ u32 *ip = (u32 *)rec->ip;
/* read where this goes */
- if (copy_inst_from_kernel_nofault(&op, (void *)ip))
+ if (copy_inst_from_kernel_nofault(&op, ip))
return -EFAULT;
/* It should be pointing to a nop */
- if (!ppc_inst_equal(op, ppc_inst(PPC_INST_NOP))) {
+ if (!ppc_inst_equal(op, ppc_inst(PPC_RAW_NOP()))) {
pr_err("Expected NOP but have %s\n", ppc_inst_as_str(op));
return -EINVAL;
}
@@ -608,8 +608,7 @@ __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
}
/* create the branch to the trampoline */
- err = create_branch(&op, (struct ppc_inst *)ip,
- rec->arch.mod->arch.tramp, BRANCH_SET_LINK);
+ err = create_branch(&op, ip, rec->arch.mod->arch.tramp, BRANCH_SET_LINK);
if (err) {
pr_err("REL24 out of range!\n");
return -EINVAL;
@@ -617,7 +616,7 @@ __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
pr_devel("write to %lx\n", rec->ip);
- if (patch_instruction((struct ppc_inst *)ip, op))
+ if (patch_instruction(ip, op))
return -EPERM;
return 0;
@@ -653,7 +652,7 @@ static int __ftrace_make_call_kernel(struct dyn_ftrace *rec, unsigned long addr)
return -EFAULT;
}
- if (!ppc_inst_equal(op, ppc_inst(PPC_INST_NOP))) {
+ if (!ppc_inst_equal(op, ppc_inst(PPC_RAW_NOP()))) {
pr_err("Unexpected call sequence at %p: %s\n", ip, ppc_inst_as_str(op));
return -EINVAL;
}
@@ -684,7 +683,7 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
*/
if (test_24bit_addr(ip, addr)) {
/* within range */
- old = ppc_inst(PPC_INST_NOP);
+ old = ppc_inst(PPC_RAW_NOP());
new = ftrace_call_replace(ip, addr, 1);
return ftrace_modify_code(ip, old, new);
} else if (core_kernel_text(ip))
@@ -762,7 +761,7 @@ __ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
/* The new target may be within range */
if (test_24bit_addr(ip, addr)) {
/* within range */
- if (patch_branch((struct ppc_inst *)ip, addr, BRANCH_SET_LINK)) {
+ if (patch_branch((u32 *)ip, addr, BRANCH_SET_LINK)) {
pr_err("REL24 out of range!\n");
return -EINVAL;
}
@@ -790,12 +789,12 @@ __ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
}
/* Ensure branch is within 24 bits */
- if (create_branch(&op, (struct ppc_inst *)ip, tramp, BRANCH_SET_LINK)) {
+ if (create_branch(&op, (u32 *)ip, tramp, BRANCH_SET_LINK)) {
pr_err("Branch out of range\n");
return -EINVAL;
}
- if (patch_branch((struct ppc_inst *)ip, tramp, BRANCH_SET_LINK)) {
+ if (patch_branch((u32 *)ip, tramp, BRANCH_SET_LINK)) {
pr_err("REL24 out of range!\n");
return -EINVAL;
}
@@ -851,7 +850,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
struct ppc_inst old, new;
int ret;
- old = ppc_inst_read((struct ppc_inst *)&ftrace_call);
+ old = ppc_inst_read((u32 *)&ftrace_call);
new = ftrace_call_replace(ip, (unsigned long)func, 1);
ret = ftrace_modify_code(ip, old, new);
@@ -859,7 +858,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
/* Also update the regs callback function */
if (!ret) {
ip = (unsigned long)(&ftrace_regs_call);
- old = ppc_inst_read((struct ppc_inst *)&ftrace_regs_call);
+ old = ppc_inst_read((u32 *)&ftrace_regs_call);
new = ftrace_call_replace(ip, (unsigned long)func, 1);
ret = ftrace_modify_code(ip, old, new);
}
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index b4ab95c9e94a..dfbce527c98e 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -67,6 +67,7 @@
#include <asm/kprobes.h>
#include <asm/stacktrace.h>
#include <asm/nmi.h>
+#include <asm/disassemble.h>
#if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC_CORE)
int (*__debugger)(struct pt_regs *regs) __read_mostly;
@@ -427,7 +428,7 @@ void hv_nmi_check_nonrecoverable(struct pt_regs *regs)
return;
nonrecoverable:
- regs->msr &= ~MSR_RI;
+ regs_set_return_msr(regs, regs->msr & ~MSR_RI);
#endif
}
DEFINE_INTERRUPT_HANDLER_NMI(system_reset_exception)
@@ -537,11 +538,11 @@ static inline int check_io_access(struct pt_regs *regs)
* For the debug message, we look at the preceding
* load or store.
*/
- if (*nip == PPC_INST_NOP)
+ if (*nip == PPC_RAW_NOP())
nip -= 2;
- else if (*nip == PPC_INST_ISYNC)
+ else if (*nip == PPC_RAW_ISYNC())
--nip;
- if (*nip == PPC_INST_SYNC || (*nip >> 26) == OP_TRAP) {
+ if (*nip == PPC_RAW_SYNC() || get_op(*nip) == OP_TRAP) {
unsigned int rb;
--nip;
@@ -549,8 +550,8 @@ static inline int check_io_access(struct pt_regs *regs)
printk(KERN_DEBUG "%s bad port %lx at %p\n",
(*nip & 0x100)? "OUT to": "IN from",
regs->gpr[rb] - _IO_BASE, nip);
- regs->msr |= MSR_RI;
- regs->nip = extable_fixup(entry);
+ regs_set_return_msr(regs, regs->msr | MSR_RI);
+ regs_set_return_ip(regs, extable_fixup(entry));
return 1;
}
}
@@ -586,8 +587,8 @@ static inline int check_io_access(struct pt_regs *regs)
#define REASON_BOUNDARY SRR1_BOUNDARY
#define single_stepping(regs) ((regs)->msr & MSR_SE)
-#define clear_single_step(regs) ((regs)->msr &= ~MSR_SE)
-#define clear_br_trace(regs) ((regs)->msr &= ~MSR_BE)
+#define clear_single_step(regs) (regs_set_return_msr((regs), (regs)->msr & ~MSR_SE))
+#define clear_br_trace(regs) (regs_set_return_msr((regs), (regs)->msr & ~MSR_BE))
#endif
#define inst_length(reason) (((reason) & REASON_PREFIXED) ? 8 : 4)
@@ -1031,7 +1032,7 @@ static void p9_hmi_special_emu(struct pt_regs *regs)
#endif /* !__LITTLE_ENDIAN__ */
/* Go to next instruction */
- regs->nip += 4;
+ regs_add_return_ip(regs, 4);
}
#endif /* CONFIG_VSX */
@@ -1476,7 +1477,7 @@ static void do_program_check(struct pt_regs *regs)
if (!(regs->msr & MSR_PR) && /* not user-mode */
report_bug(bugaddr, regs) == BUG_TRAP_TYPE_WARN) {
- regs->nip += 4;
+ regs_add_return_ip(regs, 4);
return;
}
_exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
@@ -1538,7 +1539,7 @@ static void do_program_check(struct pt_regs *regs)
if (reason & (REASON_ILLEGAL | REASON_PRIVILEGED)) {
switch (emulate_instruction(regs)) {
case 0:
- regs->nip += 4;
+ regs_add_return_ip(regs, 4);
emulate_single_step(regs);
return;
case -EFAULT:
@@ -1566,7 +1567,7 @@ DEFINE_INTERRUPT_HANDLER(program_check_exception)
*/
DEFINE_INTERRUPT_HANDLER(emulation_assist_interrupt)
{
- regs->msr |= REASON_ILLEGAL;
+ regs_set_return_msr(regs, regs->msr | REASON_ILLEGAL);
do_program_check(regs);
}
@@ -1593,7 +1594,7 @@ DEFINE_INTERRUPT_HANDLER(alignment_exception)
if (fixed == 1) {
/* skip over emulated instruction */
- regs->nip += inst_length(reason);
+ regs_add_return_ip(regs, inst_length(reason));
emulate_single_step(regs);
return;
}
@@ -1659,7 +1660,7 @@ static void tm_unavailable(struct pt_regs *regs)
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
if (user_mode(regs)) {
current->thread.load_tm++;
- regs->msr |= MSR_TM;
+ regs_set_return_msr(regs, regs->msr | MSR_TM);
tm_enable();
tm_restore_sprs(&current->thread);
return;
@@ -1751,7 +1752,7 @@ DEFINE_INTERRUPT_HANDLER(facility_unavailable_exception)
pr_err("DSCR based mfspr emulation failed\n");
return;
}
- regs->nip += 4;
+ regs_add_return_ip(regs, 4);
emulate_single_step(regs);
}
return;
@@ -1948,7 +1949,7 @@ static void handle_debug(struct pt_regs *regs, unsigned long debug_status)
*/
if (DBCR_ACTIVE_EVENTS(current->thread.debug.dbcr0,
current->thread.debug.dbcr1))
- regs->msr |= MSR_DE;
+ regs_set_return_msr(regs, regs->msr | MSR_DE);
else
/* Make sure the IDM flag is off */
current->thread.debug.dbcr0 &= ~DBCR0_IDM;
@@ -1969,7 +1970,7 @@ DEFINE_INTERRUPT_HANDLER(DebugException)
* instead of stopping here when hitting a BT
*/
if (debug_status & DBSR_BT) {
- regs->msr &= ~MSR_DE;
+ regs_set_return_msr(regs, regs->msr & ~MSR_DE);
/* Disable BT */
mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_BT);
@@ -1980,7 +1981,7 @@ DEFINE_INTERRUPT_HANDLER(DebugException)
if (user_mode(regs)) {
current->thread.debug.dbcr0 &= ~DBCR0_BT;
current->thread.debug.dbcr0 |= DBCR0_IDM | DBCR0_IC;
- regs->msr |= MSR_DE;
+ regs_set_return_msr(regs, regs->msr | MSR_DE);
return;
}
@@ -1994,7 +1995,7 @@ DEFINE_INTERRUPT_HANDLER(DebugException)
if (debugger_sstep(regs))
return;
} else if (debug_status & DBSR_IC) { /* Instruction complete */
- regs->msr &= ~MSR_DE;
+ regs_set_return_msr(regs, regs->msr & ~MSR_DE);
/* Disable instruction completion */
mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_IC);
@@ -2016,7 +2017,7 @@ DEFINE_INTERRUPT_HANDLER(DebugException)
current->thread.debug.dbcr0 &= ~DBCR0_IC;
if (DBCR_ACTIVE_EVENTS(current->thread.debug.dbcr0,
current->thread.debug.dbcr1))
- regs->msr |= MSR_DE;
+ regs_set_return_msr(regs, regs->msr | MSR_DE);
else
/* Make sure the IDM bit is off */
current->thread.debug.dbcr0 &= ~DBCR0_IDM;
@@ -2044,7 +2045,7 @@ DEFINE_INTERRUPT_HANDLER(altivec_assist_exception)
PPC_WARN_EMULATED(altivec, regs);
err = emulate_altivec(regs);
if (err == 0) {
- regs->nip += 4; /* skip emulated instruction */
+ regs_add_return_ip(regs, 4); /* skip emulated instruction */
emulate_single_step(regs);
return;
}
@@ -2109,7 +2110,7 @@ DEFINE_INTERRUPT_HANDLER(SPEFloatingPointException)
err = do_spe_mathemu(regs);
if (err == 0) {
- regs->nip += 4; /* skip emulated instruction */
+ regs_add_return_ip(regs, 4); /* skip emulated instruction */
emulate_single_step(regs);
return;
}
@@ -2140,10 +2141,10 @@ DEFINE_INTERRUPT_HANDLER(SPEFloatingPointRoundException)
giveup_spe(current);
preempt_enable();
- regs->nip -= 4;
+ regs_add_return_ip(regs, -4);
err = speround_handler(regs);
if (err == 0) {
- regs->nip += 4; /* skip emulated instruction */
+ regs_add_return_ip(regs, 4); /* skip emulated instruction */
emulate_single_step(regs);
return;
}
diff --git a/arch/powerpc/kernel/udbg_16550.c b/arch/powerpc/kernel/udbg_16550.c
index 9356b60d6030..8513aa49614e 100644
--- a/arch/powerpc/kernel/udbg_16550.c
+++ b/arch/powerpc/kernel/udbg_16550.c
@@ -296,3 +296,42 @@ void __init udbg_init_40x_realmode(void)
}
#endif /* CONFIG_PPC_EARLY_DEBUG_40x */
+
+#ifdef CONFIG_PPC_EARLY_DEBUG_MICROWATT
+
+#define UDBG_UART_MW_ADDR ((void __iomem *)0xc0002000)
+
+static u8 udbg_uart_in_isa300_rm(unsigned int reg)
+{
+ uint64_t msr = mfmsr();
+ uint8_t c;
+
+ mtmsr(msr & ~(MSR_EE|MSR_DR));
+ isync();
+ eieio();
+ c = __raw_rm_readb(UDBG_UART_MW_ADDR + (reg << 2));
+ mtmsr(msr);
+ isync();
+ return c;
+}
+
+static void udbg_uart_out_isa300_rm(unsigned int reg, u8 val)
+{
+ uint64_t msr = mfmsr();
+
+ mtmsr(msr & ~(MSR_EE|MSR_DR));
+ isync();
+ eieio();
+ __raw_rm_writeb(val, UDBG_UART_MW_ADDR + (reg << 2));
+ mtmsr(msr);
+ isync();
+}
+
+void __init udbg_init_debug_microwatt(void)
+{
+ udbg_uart_in = udbg_uart_in_isa300_rm;
+ udbg_uart_out = udbg_uart_out_isa300_rm;
+ udbg_use_uart();
+}
+
+#endif /* CONFIG_PPC_EARLY_DEBUG_MICROWATT */
diff --git a/arch/powerpc/kernel/uprobes.c b/arch/powerpc/kernel/uprobes.c
index 186f69b11e94..c6975467d9ff 100644
--- a/arch/powerpc/kernel/uprobes.c
+++ b/arch/powerpc/kernel/uprobes.c
@@ -42,7 +42,7 @@ int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe,
return -EINVAL;
if (cpu_has_feature(CPU_FTR_ARCH_31) &&
- ppc_inst_prefixed(auprobe->insn) &&
+ ppc_inst_prefixed(ppc_inst_read(auprobe->insn)) &&
(addr & 0x3f) == 60) {
pr_info_ratelimited("Cannot register a uprobe on 64 byte unaligned prefixed instruction\n");
return -EINVAL;
@@ -62,7 +62,7 @@ int arch_uprobe_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
autask->saved_trap_nr = current->thread.trap_nr;
current->thread.trap_nr = UPROBE_TRAP_NR;
- regs->nip = current->utask->xol_vaddr;
+ regs_set_return_ip(regs, current->utask->xol_vaddr);
user_enable_single_step(current);
return 0;
@@ -119,7 +119,7 @@ int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
* support doesn't exist and have to fix-up the next instruction
* to be executed.
*/
- regs->nip = (unsigned long)ppc_inst_next((void *)utask->vaddr, &auprobe->insn);
+ regs_set_return_ip(regs, (unsigned long)ppc_inst_next((void *)utask->vaddr, auprobe->insn));
user_disable_single_step(current);
return 0;
@@ -182,7 +182,7 @@ bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
* emulate_step() returns 1 if the insn was successfully emulated.
* For all other cases, we need to single-step in hardware.
*/
- ret = emulate_step(regs, ppc_inst_read(&auprobe->insn));
+ ret = emulate_step(regs, ppc_inst_read(auprobe->insn));
if (ret > 0)
return true;
diff --git a/arch/powerpc/kernel/vector.S b/arch/powerpc/kernel/vector.S
index f5a52f444e36..fc120fac1910 100644
--- a/arch/powerpc/kernel/vector.S
+++ b/arch/powerpc/kernel/vector.S
@@ -73,6 +73,10 @@ _GLOBAL(load_up_altivec)
addi r5,r4,THREAD /* Get THREAD */
oris r12,r12,MSR_VEC@h
std r12,_MSR(r1)
+#ifdef CONFIG_PPC_BOOK3S_64
+ li r4,0
+ stb r4,PACASRR_VALID(r13)
+#endif
#endif
li r4,1
stb r4,THREAD_LOAD_VEC(r5)
@@ -131,7 +135,9 @@ _GLOBAL(load_up_vsx)
/* enable use of VSX after return */
oris r12,r12,MSR_VSX@h
std r12,_MSR(r1)
- b fast_interrupt_return
+ li r4,0
+ stb r4,PACASRR_VALID(r13)
+ b fast_interrupt_return_srr
#endif /* CONFIG_VSX */
diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
index 72fa3c00229a..40bdefe9caa7 100644
--- a/arch/powerpc/kernel/vmlinux.lds.S
+++ b/arch/powerpc/kernel/vmlinux.lds.S
@@ -9,6 +9,22 @@
#define EMITS_PT_NOTE
#define RO_EXCEPTION_TABLE_ALIGN 0
+#define SOFT_MASK_TABLE(align) \
+ . = ALIGN(align); \
+ __soft_mask_table : AT(ADDR(__soft_mask_table) - LOAD_OFFSET) { \
+ __start___soft_mask_table = .; \
+ KEEP(*(__soft_mask_table)) \
+ __stop___soft_mask_table = .; \
+ }
+
+#define RESTART_TABLE(align) \
+ . = ALIGN(align); \
+ __restart_table : AT(ADDR(__restart_table) - LOAD_OFFSET) { \
+ __start___restart_table = .; \
+ KEEP(*(__restart_table)) \
+ __stop___restart_table = .; \
+ }
+
#include <asm/page.h>
#include <asm-generic/vmlinux.lds.h>
#include <asm/cache.h>
@@ -124,6 +140,9 @@ SECTIONS
RO_DATA(PAGE_SIZE)
#ifdef CONFIG_PPC64
+ SOFT_MASK_TABLE(8)
+ RESTART_TABLE(8)
+
. = ALIGN(8);
__stf_entry_barrier_fixup : AT(ADDR(__stf_entry_barrier_fixup) - LOAD_OFFSET) {
__start___stf_entry_barrier_fixup = .;
diff --git a/arch/powerpc/kernel/watchdog.c b/arch/powerpc/kernel/watchdog.c
index c9a8f4781a10..a165635fd214 100644
--- a/arch/powerpc/kernel/watchdog.c
+++ b/arch/powerpc/kernel/watchdog.c
@@ -24,6 +24,7 @@
#include <linux/kdebug.h>
#include <linux/sched/debug.h>
#include <linux/delay.h>
+#include <linux/processor.h>
#include <linux/smp.h>
#include <asm/interrupt.h>
diff --git a/arch/powerpc/kexec/core.c b/arch/powerpc/kexec/core.c
index 56da5eb2b923..48525e8b5730 100644
--- a/arch/powerpc/kexec/core.c
+++ b/arch/powerpc/kexec/core.c
@@ -68,11 +68,11 @@ void machine_kexec_cleanup(struct kimage *image)
void arch_crash_save_vmcoreinfo(void)
{
-#ifdef CONFIG_NEED_MULTIPLE_NODES
+#ifdef CONFIG_NUMA
VMCOREINFO_SYMBOL(node_data);
VMCOREINFO_LENGTH(node_data, MAX_NUMNODES);
#endif
-#ifndef CONFIG_NEED_MULTIPLE_NODES
+#ifndef CONFIG_NUMA
VMCOREINFO_SYMBOL(contig_page_data);
#endif
#if defined(CONFIG_PPC64) && defined(CONFIG_SPARSEMEM_VMEMMAP)
diff --git a/arch/powerpc/kexec/crash.c b/arch/powerpc/kexec/crash.c
index 0196d0c211ac..10f997e6bb95 100644
--- a/arch/powerpc/kexec/crash.c
+++ b/arch/powerpc/kexec/crash.c
@@ -105,8 +105,8 @@ void crash_ipi_callback(struct pt_regs *regs)
static void crash_kexec_prepare_cpus(int cpu)
{
unsigned int msecs;
- unsigned int ncpus = num_online_cpus() - 1;/* Excluding the panic cpu */
- int tries = 0;
+ volatile unsigned int ncpus = num_online_cpus() - 1;/* Excluding the panic cpu */
+ volatile int tries = 0;
int (*old_handler)(struct pt_regs *regs);
printk(KERN_EMERG "Sending IPI to other CPUs\n");
diff --git a/arch/powerpc/kvm/Makefile b/arch/powerpc/kvm/Makefile
index 2bfeaa13befb..583c14ef596e 100644
--- a/arch/powerpc/kvm/Makefile
+++ b/arch/powerpc/kvm/Makefile
@@ -6,7 +6,7 @@
ccflags-y := -Ivirt/kvm -Iarch/powerpc/kvm
KVM := ../../../virt/kvm
-common-objs-y = $(KVM)/kvm_main.o $(KVM)/eventfd.o
+common-objs-y = $(KVM)/kvm_main.o $(KVM)/eventfd.o $(KVM)/binary_stats.o
common-objs-$(CONFIG_KVM_VFIO) += $(KVM)/vfio.o
common-objs-$(CONFIG_KVM_MMIO) += $(KVM)/coalesced_mmio.o
@@ -57,6 +57,7 @@ kvm-pr-y := \
book3s_32_mmu.o
kvm-book3s_64-builtin-objs-$(CONFIG_KVM_BOOK3S_64_HANDLER) += \
+ book3s_64_entry.o \
tm.o
ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
@@ -86,6 +87,7 @@ kvm-book3s_64-builtin-tm-objs-$(CONFIG_PPC_TRANSACTIONAL_MEM) += \
ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
kvm-book3s_64-builtin-objs-$(CONFIG_KVM_BOOK3S_64_HANDLER) += \
book3s_hv_hmi.o \
+ book3s_hv_p9_entry.o \
book3s_hv_rmhandlers.o \
book3s_hv_rm_mmu.o \
book3s_hv_ras.o \
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index 2b691f4d1f26..79833f78d1da 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -38,37 +38,66 @@
/* #define EXIT_DEBUG */
-struct kvm_stats_debugfs_item debugfs_entries[] = {
- VCPU_STAT("exits", sum_exits),
- VCPU_STAT("mmio", mmio_exits),
- VCPU_STAT("sig", signal_exits),
- VCPU_STAT("sysc", syscall_exits),
- VCPU_STAT("inst_emu", emulated_inst_exits),
- VCPU_STAT("dec", dec_exits),
- VCPU_STAT("ext_intr", ext_intr_exits),
- VCPU_STAT("queue_intr", queue_intr),
- VCPU_STAT("halt_poll_success_ns", halt_poll_success_ns),
- VCPU_STAT("halt_poll_fail_ns", halt_poll_fail_ns),
- VCPU_STAT("halt_wait_ns", halt_wait_ns),
- VCPU_STAT("halt_successful_poll", halt_successful_poll),
- VCPU_STAT("halt_attempted_poll", halt_attempted_poll),
- VCPU_STAT("halt_successful_wait", halt_successful_wait),
- VCPU_STAT("halt_poll_invalid", halt_poll_invalid),
- VCPU_STAT("halt_wakeup", halt_wakeup),
- VCPU_STAT("pf_storage", pf_storage),
- VCPU_STAT("sp_storage", sp_storage),
- VCPU_STAT("pf_instruc", pf_instruc),
- VCPU_STAT("sp_instruc", sp_instruc),
- VCPU_STAT("ld", ld),
- VCPU_STAT("ld_slow", ld_slow),
- VCPU_STAT("st", st),
- VCPU_STAT("st_slow", st_slow),
- VCPU_STAT("pthru_all", pthru_all),
- VCPU_STAT("pthru_host", pthru_host),
- VCPU_STAT("pthru_bad_aff", pthru_bad_aff),
- VM_STAT("largepages_2M", num_2M_pages, .mode = 0444),
- VM_STAT("largepages_1G", num_1G_pages, .mode = 0444),
- { NULL }
+const struct _kvm_stats_desc kvm_vm_stats_desc[] = {
+ KVM_GENERIC_VM_STATS(),
+ STATS_DESC_ICOUNTER(VM, num_2M_pages),
+ STATS_DESC_ICOUNTER(VM, num_1G_pages)
+};
+static_assert(ARRAY_SIZE(kvm_vm_stats_desc) ==
+ sizeof(struct kvm_vm_stat) / sizeof(u64));
+
+const struct kvm_stats_header kvm_vm_stats_header = {
+ .name_size = KVM_STATS_NAME_SIZE,
+ .num_desc = ARRAY_SIZE(kvm_vm_stats_desc),
+ .id_offset = sizeof(struct kvm_stats_header),
+ .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
+ .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
+ sizeof(kvm_vm_stats_desc),
+};
+
+const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
+ KVM_GENERIC_VCPU_STATS(),
+ STATS_DESC_COUNTER(VCPU, sum_exits),
+ STATS_DESC_COUNTER(VCPU, mmio_exits),
+ STATS_DESC_COUNTER(VCPU, signal_exits),
+ STATS_DESC_COUNTER(VCPU, light_exits),
+ STATS_DESC_COUNTER(VCPU, itlb_real_miss_exits),
+ STATS_DESC_COUNTER(VCPU, itlb_virt_miss_exits),
+ STATS_DESC_COUNTER(VCPU, dtlb_real_miss_exits),
+ STATS_DESC_COUNTER(VCPU, dtlb_virt_miss_exits),
+ STATS_DESC_COUNTER(VCPU, syscall_exits),
+ STATS_DESC_COUNTER(VCPU, isi_exits),
+ STATS_DESC_COUNTER(VCPU, dsi_exits),
+ STATS_DESC_COUNTER(VCPU, emulated_inst_exits),
+ STATS_DESC_COUNTER(VCPU, dec_exits),
+ STATS_DESC_COUNTER(VCPU, ext_intr_exits),
+ STATS_DESC_TIME_NSEC(VCPU, halt_wait_ns),
+ STATS_DESC_COUNTER(VCPU, halt_successful_wait),
+ STATS_DESC_COUNTER(VCPU, dbell_exits),
+ STATS_DESC_COUNTER(VCPU, gdbell_exits),
+ STATS_DESC_COUNTER(VCPU, ld),
+ STATS_DESC_COUNTER(VCPU, st),
+ STATS_DESC_COUNTER(VCPU, pf_storage),
+ STATS_DESC_COUNTER(VCPU, pf_instruc),
+ STATS_DESC_COUNTER(VCPU, sp_storage),
+ STATS_DESC_COUNTER(VCPU, sp_instruc),
+ STATS_DESC_COUNTER(VCPU, queue_intr),
+ STATS_DESC_COUNTER(VCPU, ld_slow),
+ STATS_DESC_COUNTER(VCPU, st_slow),
+ STATS_DESC_COUNTER(VCPU, pthru_all),
+ STATS_DESC_COUNTER(VCPU, pthru_host),
+ STATS_DESC_COUNTER(VCPU, pthru_bad_aff)
+};
+static_assert(ARRAY_SIZE(kvm_vcpu_stats_desc) ==
+ sizeof(struct kvm_vcpu_stat) / sizeof(u64));
+
+const struct kvm_stats_header kvm_vcpu_stats_header = {
+ .name_size = KVM_STATS_NAME_SIZE,
+ .num_desc = ARRAY_SIZE(kvm_vcpu_stats_desc),
+ .id_offset = sizeof(struct kvm_stats_header),
+ .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
+ .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
+ sizeof(kvm_vcpu_stats_desc),
};
static inline void kvmppc_update_int_pending(struct kvm_vcpu *vcpu,
@@ -171,6 +200,12 @@ void kvmppc_core_queue_machine_check(struct kvm_vcpu *vcpu, ulong flags)
}
EXPORT_SYMBOL_GPL(kvmppc_core_queue_machine_check);
+void kvmppc_core_queue_syscall(struct kvm_vcpu *vcpu)
+{
+ kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_SYSCALL, 0);
+}
+EXPORT_SYMBOL(kvmppc_core_queue_syscall);
+
void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags)
{
/* might as well deliver this straight away */
@@ -1044,13 +1079,10 @@ static int kvmppc_book3s_init(void)
#ifdef CONFIG_KVM_XICS
#ifdef CONFIG_KVM_XIVE
if (xics_on_xive()) {
- kvmppc_xive_init_module();
kvm_register_device_ops(&kvm_xive_ops, KVM_DEV_TYPE_XICS);
- if (kvmppc_xive_native_supported()) {
- kvmppc_xive_native_init_module();
+ if (kvmppc_xive_native_supported())
kvm_register_device_ops(&kvm_xive_native_ops,
KVM_DEV_TYPE_XIVE);
- }
} else
#endif
kvm_register_device_ops(&kvm_xics_ops, KVM_DEV_TYPE_XICS);
@@ -1060,12 +1092,6 @@ static int kvmppc_book3s_init(void)
static void kvmppc_book3s_exit(void)
{
-#ifdef CONFIG_KVM_XICS
- if (xics_on_xive()) {
- kvmppc_xive_exit_module();
- kvmppc_xive_native_exit_module();
- }
-#endif
#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
kvmppc_book3s_exit_pr();
#endif
diff --git a/arch/powerpc/kvm/book3s_32_mmu_host.c b/arch/powerpc/kvm/book3s_32_mmu_host.c
index e8e7b2c530d1..4b3a8d80cfa3 100644
--- a/arch/powerpc/kvm/book3s_32_mmu_host.c
+++ b/arch/powerpc/kvm/book3s_32_mmu_host.c
@@ -353,9 +353,6 @@ void kvmppc_mmu_destroy_pr(struct kvm_vcpu *vcpu)
preempt_enable();
}
-/* From mm/mmu_context_hash32.c */
-#define CTX_TO_VSID(c, id) ((((c) * (897 * 16)) + (id * 0x111)) & 0xffffff)
-
int kvmppc_mmu_init_pr(struct kvm_vcpu *vcpu)
{
struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
diff --git a/arch/powerpc/kvm/book3s_64_entry.S b/arch/powerpc/kvm/book3s_64_entry.S
new file mode 100644
index 000000000000..983b8c18bc31
--- /dev/null
+++ b/arch/powerpc/kvm/book3s_64_entry.S
@@ -0,0 +1,416 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#include <asm/asm-offsets.h>
+#include <asm/cache.h>
+#include <asm/code-patching-asm.h>
+#include <asm/exception-64s.h>
+#include <asm/export.h>
+#include <asm/kvm_asm.h>
+#include <asm/kvm_book3s_asm.h>
+#include <asm/mmu.h>
+#include <asm/ppc_asm.h>
+#include <asm/ptrace.h>
+#include <asm/reg.h>
+#include <asm/ultravisor-api.h>
+
+/*
+ * These are branched to from interrupt handlers in exception-64s.S which set
+ * IKVM_REAL or IKVM_VIRT, if HSTATE_IN_GUEST was found to be non-zero.
+ */
+
+/*
+ * This is a hcall, so register convention is as
+ * Documentation/powerpc/papr_hcalls.rst.
+ *
+ * This may also be a syscall from PR-KVM userspace that is to be
+ * reflected to the PR guest kernel, so registers may be set up for
+ * a system call rather than hcall. We don't currently clobber
+ * anything here, but the 0xc00 handler has already clobbered CTR
+ * and CR0, so PR-KVM can not support a guest kernel that preserves
+ * those registers across its system calls.
+ *
+ * The state of registers is as kvmppc_interrupt, except CFAR is not
+ * saved, R13 is not in SCRATCH0, and R10 does not contain the trap.
+ */
+.global kvmppc_hcall
+.balign IFETCH_ALIGN_BYTES
+kvmppc_hcall:
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+ lbz r10,HSTATE_IN_GUEST(r13)
+ cmpwi r10,KVM_GUEST_MODE_HV_P9
+ beq kvmppc_p9_exit_hcall
+#endif
+ ld r10,PACA_EXGEN+EX_R13(r13)
+ SET_SCRATCH0(r10)
+ li r10,0xc00
+ /* Now we look like kvmppc_interrupt */
+ li r11,PACA_EXGEN
+ b .Lgot_save_area
+
+/*
+ * KVM interrupt entry occurs after GEN_INT_ENTRY runs, and follows that
+ * call convention:
+ *
+ * guest R9-R13, CTR, CFAR, PPR saved in PACA EX_xxx save area
+ * guest (H)DAR, (H)DSISR are also in the save area for relevant interrupts
+ * guest R13 also saved in SCRATCH0
+ * R13 = PACA
+ * R11 = (H)SRR0
+ * R12 = (H)SRR1
+ * R9 = guest CR
+ * PPR is set to medium
+ *
+ * With the addition for KVM:
+ * R10 = trap vector
+ */
+.global kvmppc_interrupt
+.balign IFETCH_ALIGN_BYTES
+kvmppc_interrupt:
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+ std r10,HSTATE_SCRATCH0(r13)
+ lbz r10,HSTATE_IN_GUEST(r13)
+ cmpwi r10,KVM_GUEST_MODE_HV_P9
+ beq kvmppc_p9_exit_interrupt
+ ld r10,HSTATE_SCRATCH0(r13)
+#endif
+ li r11,PACA_EXGEN
+ cmpdi r10,0x200
+ bgt+ .Lgot_save_area
+ li r11,PACA_EXMC
+ beq .Lgot_save_area
+ li r11,PACA_EXNMI
+.Lgot_save_area:
+ add r11,r11,r13
+BEGIN_FTR_SECTION
+ ld r12,EX_CFAR(r11)
+ std r12,HSTATE_CFAR(r13)
+END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
+ ld r12,EX_CTR(r11)
+ mtctr r12
+BEGIN_FTR_SECTION
+ ld r12,EX_PPR(r11)
+ std r12,HSTATE_PPR(r13)
+END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
+ ld r12,EX_R12(r11)
+ std r12,HSTATE_SCRATCH0(r13)
+ sldi r12,r9,32
+ or r12,r12,r10
+ ld r9,EX_R9(r11)
+ ld r10,EX_R10(r11)
+ ld r11,EX_R11(r11)
+
+ /*
+ * Hcalls and other interrupts come here after normalising register
+ * contents and save locations:
+ *
+ * R12 = (guest CR << 32) | interrupt vector
+ * R13 = PACA
+ * guest R12 saved in shadow HSTATE_SCRATCH0
+ * guest R13 saved in SPRN_SCRATCH0
+ */
+ std r9,HSTATE_SCRATCH2(r13)
+ lbz r9,HSTATE_IN_GUEST(r13)
+ cmpwi r9,KVM_GUEST_MODE_SKIP
+ beq- .Lmaybe_skip
+.Lno_skip:
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
+ cmpwi r9,KVM_GUEST_MODE_GUEST
+ beq kvmppc_interrupt_pr
+#endif
+ b kvmppc_interrupt_hv
+#else
+ b kvmppc_interrupt_pr
+#endif
+
+/*
+ * "Skip" interrupts are part of a trick KVM uses a with hash guests to load
+ * the faulting instruction in guest memory from the the hypervisor without
+ * walking page tables.
+ *
+ * When the guest takes a fault that requires the hypervisor to load the
+ * instruction (e.g., MMIO emulation), KVM is running in real-mode with HV=1
+ * and the guest MMU context loaded. It sets KVM_GUEST_MODE_SKIP, and sets
+ * MSR[DR]=1 while leaving MSR[IR]=0, so it continues to fetch HV instructions
+ * but loads and stores will access the guest context. This is used to load
+ * the faulting instruction using the faulting guest effective address.
+ *
+ * However the guest context may not be able to translate, or it may cause a
+ * machine check or other issue, which results in a fault in the host
+ * (even with KVM-HV).
+ *
+ * These faults come here because KVM_GUEST_MODE_SKIP was set, so if they
+ * are (or are likely) caused by that load, the instruction is skipped by
+ * just returning with the PC advanced +4, where it is noticed the load did
+ * not execute and it goes to the slow path which walks the page tables to
+ * read guest memory.
+ */
+.Lmaybe_skip:
+ cmpwi r12,BOOK3S_INTERRUPT_MACHINE_CHECK
+ beq 1f
+ cmpwi r12,BOOK3S_INTERRUPT_DATA_STORAGE
+ beq 1f
+ cmpwi r12,BOOK3S_INTERRUPT_DATA_SEGMENT
+ beq 1f
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+ /* HSRR interrupts get 2 added to interrupt number */
+ cmpwi r12,BOOK3S_INTERRUPT_H_DATA_STORAGE | 0x2
+ beq 2f
+#endif
+ b .Lno_skip
+1: mfspr r9,SPRN_SRR0
+ addi r9,r9,4
+ mtspr SPRN_SRR0,r9
+ ld r12,HSTATE_SCRATCH0(r13)
+ ld r9,HSTATE_SCRATCH2(r13)
+ GET_SCRATCH0(r13)
+ RFI_TO_KERNEL
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+2: mfspr r9,SPRN_HSRR0
+ addi r9,r9,4
+ mtspr SPRN_HSRR0,r9
+ ld r12,HSTATE_SCRATCH0(r13)
+ ld r9,HSTATE_SCRATCH2(r13)
+ GET_SCRATCH0(r13)
+ HRFI_TO_KERNEL
+#endif
+
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+
+/* Stack frame offsets for kvmppc_p9_enter_guest */
+#define SFS (144 + STACK_FRAME_MIN_SIZE)
+#define STACK_SLOT_NVGPRS (SFS - 144) /* 18 gprs */
+
+/*
+ * void kvmppc_p9_enter_guest(struct vcpu *vcpu);
+ *
+ * Enter the guest on a ISAv3.0 or later system.
+ */
+.balign IFETCH_ALIGN_BYTES
+_GLOBAL(kvmppc_p9_enter_guest)
+EXPORT_SYMBOL_GPL(kvmppc_p9_enter_guest)
+ mflr r0
+ std r0,PPC_LR_STKOFF(r1)
+ stdu r1,-SFS(r1)
+
+ std r1,HSTATE_HOST_R1(r13)
+
+ mfcr r4
+ stw r4,SFS+8(r1)
+
+ reg = 14
+ .rept 18
+ std reg,STACK_SLOT_NVGPRS + ((reg - 14) * 8)(r1)
+ reg = reg + 1
+ .endr
+
+ ld r4,VCPU_LR(r3)
+ mtlr r4
+ ld r4,VCPU_CTR(r3)
+ mtctr r4
+ ld r4,VCPU_XER(r3)
+ mtspr SPRN_XER,r4
+
+ ld r1,VCPU_CR(r3)
+
+BEGIN_FTR_SECTION
+ ld r4,VCPU_CFAR(r3)
+ mtspr SPRN_CFAR,r4
+END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
+BEGIN_FTR_SECTION
+ ld r4,VCPU_PPR(r3)
+ mtspr SPRN_PPR,r4
+END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
+
+ reg = 4
+ .rept 28
+ ld reg,__VCPU_GPR(reg)(r3)
+ reg = reg + 1
+ .endr
+
+ ld r4,VCPU_KVM(r3)
+ lbz r4,KVM_SECURE_GUEST(r4)
+ cmpdi r4,0
+ ld r4,VCPU_GPR(R4)(r3)
+ bne .Lret_to_ultra
+
+ mtcr r1
+
+ ld r0,VCPU_GPR(R0)(r3)
+ ld r1,VCPU_GPR(R1)(r3)
+ ld r2,VCPU_GPR(R2)(r3)
+ ld r3,VCPU_GPR(R3)(r3)
+
+ HRFI_TO_GUEST
+ b .
+
+ /*
+ * Use UV_RETURN ultracall to return control back to the Ultravisor
+ * after processing an hypercall or interrupt that was forwarded
+ * (a.k.a. reflected) to the Hypervisor.
+ *
+ * All registers have already been reloaded except the ucall requires:
+ * R0 = hcall result
+ * R2 = SRR1, so UV can detect a synthesized interrupt (if any)
+ * R3 = UV_RETURN
+ */
+.Lret_to_ultra:
+ mtcr r1
+ ld r1,VCPU_GPR(R1)(r3)
+
+ ld r0,VCPU_GPR(R3)(r3)
+ mfspr r2,SPRN_SRR1
+ LOAD_REG_IMMEDIATE(r3, UV_RETURN)
+ sc 2
+
+/*
+ * kvmppc_p9_exit_hcall and kvmppc_p9_exit_interrupt are branched to from
+ * above if the interrupt was taken for a guest that was entered via
+ * kvmppc_p9_enter_guest().
+ *
+ * The exit code recovers the host stack and vcpu pointer, saves all guest GPRs
+ * and CR, LR, XER as well as guest MSR and NIA into the VCPU, then re-
+ * establishes the host stack and registers to return from the
+ * kvmppc_p9_enter_guest() function, which saves CTR and other guest registers
+ * (SPRs and FP, VEC, etc).
+ */
+.balign IFETCH_ALIGN_BYTES
+kvmppc_p9_exit_hcall:
+ mfspr r11,SPRN_SRR0
+ mfspr r12,SPRN_SRR1
+ li r10,0xc00
+ std r10,HSTATE_SCRATCH0(r13)
+
+.balign IFETCH_ALIGN_BYTES
+kvmppc_p9_exit_interrupt:
+ /*
+ * If set to KVM_GUEST_MODE_HV_P9 but we're still in the
+ * hypervisor, that means we can't return from the entry stack.
+ */
+ rldicl. r10,r12,64-MSR_HV_LG,63
+ bne- kvmppc_p9_bad_interrupt
+
+ std r1,HSTATE_SCRATCH1(r13)
+ std r3,HSTATE_SCRATCH2(r13)
+ ld r1,HSTATE_HOST_R1(r13)
+ ld r3,HSTATE_KVM_VCPU(r13)
+
+ std r9,VCPU_CR(r3)
+
+1:
+ std r11,VCPU_PC(r3)
+ std r12,VCPU_MSR(r3)
+
+ reg = 14
+ .rept 18
+ std reg,__VCPU_GPR(reg)(r3)
+ reg = reg + 1
+ .endr
+
+ /* r1, r3, r9-r13 are saved to vcpu by C code */
+ std r0,VCPU_GPR(R0)(r3)
+ std r2,VCPU_GPR(R2)(r3)
+ reg = 4
+ .rept 5
+ std reg,__VCPU_GPR(reg)(r3)
+ reg = reg + 1
+ .endr
+
+ ld r2,PACATOC(r13)
+
+ mflr r4
+ std r4,VCPU_LR(r3)
+ mfspr r4,SPRN_XER
+ std r4,VCPU_XER(r3)
+
+ reg = 14
+ .rept 18
+ ld reg,STACK_SLOT_NVGPRS + ((reg - 14) * 8)(r1)
+ reg = reg + 1
+ .endr
+
+ lwz r4,SFS+8(r1)
+ mtcr r4
+
+ /*
+ * Flush the link stack here, before executing the first blr on the
+ * way out of the guest.
+ *
+ * The link stack won't match coming out of the guest anyway so the
+ * only cost is the flush itself. The call clobbers r0.
+ */
+1: nop
+ patch_site 1b patch__call_kvm_flush_link_stack_p9
+
+ addi r1,r1,SFS
+ ld r0,PPC_LR_STKOFF(r1)
+ mtlr r0
+ blr
+
+/*
+ * Took an interrupt somewhere right before HRFID to guest, so registers are
+ * in a bad way. Return things hopefully enough to run host virtual code and
+ * run the Linux interrupt handler (SRESET or MCE) to print something useful.
+ *
+ * We could be really clever and save all host registers in known locations
+ * before setting HSTATE_IN_GUEST, then restoring them all here, and setting
+ * return address to a fixup that sets them up again. But that's a lot of
+ * effort for a small bit of code. Lots of other things to do first.
+ */
+kvmppc_p9_bad_interrupt:
+BEGIN_MMU_FTR_SECTION
+ /*
+ * Hash host doesn't try to recover MMU (requires host SLB reload)
+ */
+ b .
+END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_RADIX)
+ /*
+ * Clean up guest registers to give host a chance to run.
+ */
+ li r10,0
+ mtspr SPRN_AMR,r10
+ mtspr SPRN_IAMR,r10
+ mtspr SPRN_CIABR,r10
+ mtspr SPRN_DAWRX0,r10
+BEGIN_FTR_SECTION
+ mtspr SPRN_DAWRX1,r10
+END_FTR_SECTION_IFSET(CPU_FTR_DAWR1)
+ mtspr SPRN_PID,r10
+
+ /*
+ * Switch to host MMU mode
+ */
+ ld r10, HSTATE_KVM_VCPU(r13)
+ ld r10, VCPU_KVM(r10)
+ lwz r10, KVM_HOST_LPID(r10)
+ mtspr SPRN_LPID,r10
+
+ ld r10, HSTATE_KVM_VCPU(r13)
+ ld r10, VCPU_KVM(r10)
+ ld r10, KVM_HOST_LPCR(r10)
+ mtspr SPRN_LPCR,r10
+
+ /*
+ * Set GUEST_MODE_NONE so the handler won't branch to KVM, and clear
+ * MSR_RI in r12 ([H]SRR1) so the handler won't try to return.
+ */
+ li r10,KVM_GUEST_MODE_NONE
+ stb r10,HSTATE_IN_GUEST(r13)
+ li r10,MSR_RI
+ andc r12,r12,r10
+
+ /*
+ * Go back to interrupt handler. MCE and SRESET have their specific
+ * PACA save area so they should be used directly. They set up their
+ * own stack. The other handlers all use EXGEN. They will use the
+ * guest r1 if it looks like a kernel stack, so just load the
+ * emergency stack and go to program check for all other interrupts.
+ */
+ ld r10,HSTATE_SCRATCH0(r13)
+ cmpwi r10,BOOK3S_INTERRUPT_MACHINE_CHECK
+ beq machine_check_common
+
+ cmpwi r10,BOOK3S_INTERRUPT_SYSTEM_RESET
+ beq system_reset_common
+
+ b .
+#endif
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index 2d9193cd73be..c63e263312a4 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -840,7 +840,7 @@ bool kvm_unmap_gfn_range_hv(struct kvm *kvm, struct kvm_gfn_range *range)
kvm_unmap_radix(kvm, range->slot, gfn);
} else {
for (gfn = range->start; gfn < range->end; gfn++)
- kvm_unmap_rmapp(kvm, range->slot, range->start);
+ kvm_unmap_rmapp(kvm, range->slot, gfn);
}
return false;
diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c
index d909c069363e..b5905ae4377c 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_radix.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c
@@ -21,6 +21,7 @@
#include <asm/pte-walk.h>
#include <asm/ultravisor.h>
#include <asm/kvm_book3s_uvmem.h>
+#include <asm/plpar_wrappers.h>
/*
* Supported radix tree geometry.
@@ -318,9 +319,19 @@ void kvmppc_radix_tlbie_page(struct kvm *kvm, unsigned long addr,
}
psi = shift_to_mmu_psize(pshift);
- rb = addr | (mmu_get_ap(psi) << PPC_BITLSHIFT(58));
- rc = plpar_hcall_norets(H_TLB_INVALIDATE, H_TLBIE_P1_ENC(0, 0, 1),
- lpid, rb);
+
+ if (!firmware_has_feature(FW_FEATURE_RPT_INVALIDATE)) {
+ rb = addr | (mmu_get_ap(psi) << PPC_BITLSHIFT(58));
+ rc = plpar_hcall_norets(H_TLB_INVALIDATE, H_TLBIE_P1_ENC(0, 0, 1),
+ lpid, rb);
+ } else {
+ rc = pseries_rpt_invalidate(lpid, H_RPTI_TARGET_CMMU,
+ H_RPTI_TYPE_NESTED |
+ H_RPTI_TYPE_TLB,
+ psize_to_rpti_pgsize(psi),
+ addr, addr + psize);
+ }
+
if (rc)
pr_err("KVM: TLB page invalidation hcall failed, rc=%ld\n", rc);
}
@@ -334,8 +345,14 @@ static void kvmppc_radix_flush_pwc(struct kvm *kvm, unsigned int lpid)
return;
}
- rc = plpar_hcall_norets(H_TLB_INVALIDATE, H_TLBIE_P1_ENC(1, 0, 1),
- lpid, TLBIEL_INVAL_SET_LPID);
+ if (!firmware_has_feature(FW_FEATURE_RPT_INVALIDATE))
+ rc = plpar_hcall_norets(H_TLB_INVALIDATE, H_TLBIE_P1_ENC(1, 0, 1),
+ lpid, TLBIEL_INVAL_SET_LPID);
+ else
+ rc = pseries_rpt_invalidate(lpid, H_RPTI_TARGET_CMMU,
+ H_RPTI_TYPE_NESTED |
+ H_RPTI_TYPE_PWC, H_RPTI_PAGE_ALL,
+ 0, -1UL);
if (rc)
pr_err("KVM: TLB PWC invalidation hcall failed, rc=%ld\n", rc);
}
diff --git a/arch/powerpc/kvm/book3s_64_vio_hv.c b/arch/powerpc/kvm/book3s_64_vio_hv.c
index 083a4e037718..dc6591548f0c 100644
--- a/arch/powerpc/kvm/book3s_64_vio_hv.c
+++ b/arch/powerpc/kvm/book3s_64_vio_hv.c
@@ -391,10 +391,6 @@ long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
/* udbg_printf("H_PUT_TCE(): liobn=0x%lx ioba=0x%lx, tce=0x%lx\n", */
/* liobn, ioba, tce); */
- /* For radix, we might be in virtual mode, so punt */
- if (kvm_is_radix(vcpu->kvm))
- return H_TOO_HARD;
-
stt = kvmppc_find_table(vcpu->kvm, liobn);
if (!stt)
return H_TOO_HARD;
@@ -489,10 +485,6 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
bool prereg = false;
struct kvmppc_spapr_tce_iommu_table *stit;
- /* For radix, we might be in virtual mode, so punt */
- if (kvm_is_radix(vcpu->kvm))
- return H_TOO_HARD;
-
/*
* used to check for invalidations in progress
*/
@@ -602,10 +594,6 @@ long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu,
long i, ret;
struct kvmppc_spapr_tce_iommu_table *stit;
- /* For radix, we might be in virtual mode, so punt */
- if (kvm_is_radix(vcpu->kvm))
- return H_TOO_HARD;
-
stt = kvmppc_find_table(vcpu->kvm, liobn);
if (!stt)
return H_TOO_HARD;
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 28a80d240b76..1d1fcc290fca 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -76,6 +76,7 @@
#include <asm/kvm_book3s_uvmem.h>
#include <asm/ultravisor.h>
#include <asm/dtl.h>
+#include <asm/plpar_wrappers.h>
#include "book3s.h"
@@ -103,13 +104,9 @@ static int target_smt_mode;
module_param(target_smt_mode, int, 0644);
MODULE_PARM_DESC(target_smt_mode, "Target threads per core (0 = max)");
-static bool indep_threads_mode = true;
-module_param(indep_threads_mode, bool, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(indep_threads_mode, "Independent-threads mode (only on POWER9)");
-
static bool one_vm_per_core;
module_param(one_vm_per_core, bool, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(one_vm_per_core, "Only run vCPUs from the same VM on a core (requires indep_threads_mode=N)");
+MODULE_PARM_DESC(one_vm_per_core, "Only run vCPUs from the same VM on a core (requires POWER8 or older)");
#ifdef CONFIG_KVM_XICS
static const struct kernel_param_ops module_param_ops = {
@@ -134,9 +131,6 @@ static inline bool nesting_enabled(struct kvm *kvm)
return kvm->arch.nested_enable && kvm_is_radix(kvm);
}
-/* If set, the threads on each CPU core have to be in the same MMU mode */
-static bool no_mixing_hpt_and_radix __read_mostly;
-
static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu);
/*
@@ -236,7 +230,7 @@ static void kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu *vcpu)
waitp = kvm_arch_vcpu_get_wait(vcpu);
if (rcuwait_wake_up(waitp))
- ++vcpu->stat.halt_wakeup;
+ ++vcpu->stat.generic.halt_wakeup;
cpu = READ_ONCE(vcpu->arch.thread_cpu);
if (cpu >= 0 && kvmppc_ipi_thread(cpu))
@@ -807,7 +801,8 @@ static int kvmppc_h_set_mode(struct kvm_vcpu *vcpu, unsigned long mflags,
* KVM does not support mflags=2 (AIL=2) and AIL=1 is reserved.
* Keep this in synch with kvmppc_filter_guest_lpcr_hv.
*/
- if (mflags != 0 && mflags != 3)
+ if (cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG) &&
+ kvmhv_vcpu_is_radix(vcpu) && mflags == 3)
return H_UNSUPPORTED_FLAG_START;
return H_TOO_HARD;
default:
@@ -899,6 +894,10 @@ static int kvm_arch_vcpu_yield_to(struct kvm_vcpu *target)
* H_SUCCESS if the source vcore wasn't idle (e.g. if it may
* have useful work to do and should not confer) so we don't
* recheck that here.
+ *
+ * In the case of the P9 single vcpu per vcore case, the real
+ * mode handler is not called but no other threads are in the
+ * source vcore.
*/
spin_lock(&vcore->lock);
@@ -924,8 +923,71 @@ static int kvmppc_get_yield_count(struct kvm_vcpu *vcpu)
return yield_count;
}
+/*
+ * H_RPT_INVALIDATE hcall handler for nested guests.
+ *
+ * Handles only nested process-scoped invalidation requests in L0.
+ */
+static int kvmppc_nested_h_rpt_invalidate(struct kvm_vcpu *vcpu)
+{
+ unsigned long type = kvmppc_get_gpr(vcpu, 6);
+ unsigned long pid, pg_sizes, start, end;
+
+ /*
+ * The partition-scoped invalidations aren't handled here in L0.
+ */
+ if (type & H_RPTI_TYPE_NESTED)
+ return RESUME_HOST;
+
+ pid = kvmppc_get_gpr(vcpu, 4);
+ pg_sizes = kvmppc_get_gpr(vcpu, 7);
+ start = kvmppc_get_gpr(vcpu, 8);
+ end = kvmppc_get_gpr(vcpu, 9);
+
+ do_h_rpt_invalidate_prt(pid, vcpu->arch.nested->shadow_lpid,
+ type, pg_sizes, start, end);
+
+ kvmppc_set_gpr(vcpu, 3, H_SUCCESS);
+ return RESUME_GUEST;
+}
+
+static long kvmppc_h_rpt_invalidate(struct kvm_vcpu *vcpu,
+ unsigned long id, unsigned long target,
+ unsigned long type, unsigned long pg_sizes,
+ unsigned long start, unsigned long end)
+{
+ if (!kvm_is_radix(vcpu->kvm))
+ return H_UNSUPPORTED;
+
+ if (end < start)
+ return H_P5;
+
+ /*
+ * Partition-scoped invalidation for nested guests.
+ */
+ if (type & H_RPTI_TYPE_NESTED) {
+ if (!nesting_enabled(vcpu->kvm))
+ return H_FUNCTION;
+
+ /* Support only cores as target */
+ if (target != H_RPTI_TARGET_CMMU)
+ return H_P2;
+
+ return do_h_rpt_invalidate_pat(vcpu, id, type, pg_sizes,
+ start, end);
+ }
+
+ /*
+ * Process-scoped invalidation for L1 guests.
+ */
+ do_h_rpt_invalidate_prt(id, vcpu->kvm->arch.lpid,
+ type, pg_sizes, start, end);
+ return H_SUCCESS;
+}
+
int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
{
+ struct kvm *kvm = vcpu->kvm;
unsigned long req = kvmppc_get_gpr(vcpu, 3);
unsigned long target, ret = H_SUCCESS;
int yield_count;
@@ -937,11 +999,57 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
return RESUME_HOST;
switch (req) {
+ case H_REMOVE:
+ ret = kvmppc_h_remove(vcpu, kvmppc_get_gpr(vcpu, 4),
+ kvmppc_get_gpr(vcpu, 5),
+ kvmppc_get_gpr(vcpu, 6));
+ if (ret == H_TOO_HARD)
+ return RESUME_HOST;
+ break;
+ case H_ENTER:
+ ret = kvmppc_h_enter(vcpu, kvmppc_get_gpr(vcpu, 4),
+ kvmppc_get_gpr(vcpu, 5),
+ kvmppc_get_gpr(vcpu, 6),
+ kvmppc_get_gpr(vcpu, 7));
+ if (ret == H_TOO_HARD)
+ return RESUME_HOST;
+ break;
+ case H_READ:
+ ret = kvmppc_h_read(vcpu, kvmppc_get_gpr(vcpu, 4),
+ kvmppc_get_gpr(vcpu, 5));
+ if (ret == H_TOO_HARD)
+ return RESUME_HOST;
+ break;
+ case H_CLEAR_MOD:
+ ret = kvmppc_h_clear_mod(vcpu, kvmppc_get_gpr(vcpu, 4),
+ kvmppc_get_gpr(vcpu, 5));
+ if (ret == H_TOO_HARD)
+ return RESUME_HOST;
+ break;
+ case H_CLEAR_REF:
+ ret = kvmppc_h_clear_ref(vcpu, kvmppc_get_gpr(vcpu, 4),
+ kvmppc_get_gpr(vcpu, 5));
+ if (ret == H_TOO_HARD)
+ return RESUME_HOST;
+ break;
+ case H_PROTECT:
+ ret = kvmppc_h_protect(vcpu, kvmppc_get_gpr(vcpu, 4),
+ kvmppc_get_gpr(vcpu, 5),
+ kvmppc_get_gpr(vcpu, 6));
+ if (ret == H_TOO_HARD)
+ return RESUME_HOST;
+ break;
+ case H_BULK_REMOVE:
+ ret = kvmppc_h_bulk_remove(vcpu);
+ if (ret == H_TOO_HARD)
+ return RESUME_HOST;
+ break;
+
case H_CEDE:
break;
case H_PROD:
target = kvmppc_get_gpr(vcpu, 4);
- tvcpu = kvmppc_find_vcpu(vcpu->kvm, target);
+ tvcpu = kvmppc_find_vcpu(kvm, target);
if (!tvcpu) {
ret = H_PARAMETER;
break;
@@ -955,7 +1063,7 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
target = kvmppc_get_gpr(vcpu, 4);
if (target == -1)
break;
- tvcpu = kvmppc_find_vcpu(vcpu->kvm, target);
+ tvcpu = kvmppc_find_vcpu(kvm, target);
if (!tvcpu) {
ret = H_PARAMETER;
break;
@@ -971,12 +1079,12 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
kvmppc_get_gpr(vcpu, 6));
break;
case H_RTAS:
- if (list_empty(&vcpu->kvm->arch.rtas_tokens))
+ if (list_empty(&kvm->arch.rtas_tokens))
return RESUME_HOST;
- idx = srcu_read_lock(&vcpu->kvm->srcu);
+ idx = srcu_read_lock(&kvm->srcu);
rc = kvmppc_rtas_hcall(vcpu);
- srcu_read_unlock(&vcpu->kvm->srcu, idx);
+ srcu_read_unlock(&kvm->srcu, idx);
if (rc == -ENOENT)
return RESUME_HOST;
@@ -1060,15 +1168,23 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
if (!powernv_get_random_long(&vcpu->arch.regs.gpr[4]))
ret = H_HARDWARE;
break;
+ case H_RPT_INVALIDATE:
+ ret = kvmppc_h_rpt_invalidate(vcpu, kvmppc_get_gpr(vcpu, 4),
+ kvmppc_get_gpr(vcpu, 5),
+ kvmppc_get_gpr(vcpu, 6),
+ kvmppc_get_gpr(vcpu, 7),
+ kvmppc_get_gpr(vcpu, 8),
+ kvmppc_get_gpr(vcpu, 9));
+ break;
case H_SET_PARTITION_TABLE:
ret = H_FUNCTION;
- if (nesting_enabled(vcpu->kvm))
+ if (nesting_enabled(kvm))
ret = kvmhv_set_partition_table(vcpu);
break;
case H_ENTER_NESTED:
ret = H_FUNCTION;
- if (!nesting_enabled(vcpu->kvm))
+ if (!nesting_enabled(kvm))
break;
ret = kvmhv_enter_nested_guest(vcpu);
if (ret == H_INTERRUPT) {
@@ -1083,12 +1199,12 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
break;
case H_TLB_INVALIDATE:
ret = H_FUNCTION;
- if (nesting_enabled(vcpu->kvm))
+ if (nesting_enabled(kvm))
ret = kvmhv_do_nested_tlbie(vcpu);
break;
case H_COPY_TOFROM_GUEST:
ret = H_FUNCTION;
- if (nesting_enabled(vcpu->kvm))
+ if (nesting_enabled(kvm))
ret = kvmhv_copy_tofrom_guest_nested(vcpu);
break;
case H_PAGE_INIT:
@@ -1099,7 +1215,7 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
case H_SVM_PAGE_IN:
ret = H_UNSUPPORTED;
if (kvmppc_get_srr1(vcpu) & MSR_S)
- ret = kvmppc_h_svm_page_in(vcpu->kvm,
+ ret = kvmppc_h_svm_page_in(kvm,
kvmppc_get_gpr(vcpu, 4),
kvmppc_get_gpr(vcpu, 5),
kvmppc_get_gpr(vcpu, 6));
@@ -1107,7 +1223,7 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
case H_SVM_PAGE_OUT:
ret = H_UNSUPPORTED;
if (kvmppc_get_srr1(vcpu) & MSR_S)
- ret = kvmppc_h_svm_page_out(vcpu->kvm,
+ ret = kvmppc_h_svm_page_out(kvm,
kvmppc_get_gpr(vcpu, 4),
kvmppc_get_gpr(vcpu, 5),
kvmppc_get_gpr(vcpu, 6));
@@ -1115,12 +1231,12 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
case H_SVM_INIT_START:
ret = H_UNSUPPORTED;
if (kvmppc_get_srr1(vcpu) & MSR_S)
- ret = kvmppc_h_svm_init_start(vcpu->kvm);
+ ret = kvmppc_h_svm_init_start(kvm);
break;
case H_SVM_INIT_DONE:
ret = H_UNSUPPORTED;
if (kvmppc_get_srr1(vcpu) & MSR_S)
- ret = kvmppc_h_svm_init_done(vcpu->kvm);
+ ret = kvmppc_h_svm_init_done(kvm);
break;
case H_SVM_INIT_ABORT:
/*
@@ -1130,24 +1246,26 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
* Instead the kvm->arch.secure_guest flag is checked inside
* kvmppc_h_svm_init_abort().
*/
- ret = kvmppc_h_svm_init_abort(vcpu->kvm);
+ ret = kvmppc_h_svm_init_abort(kvm);
break;
default:
return RESUME_HOST;
}
+ WARN_ON_ONCE(ret == H_TOO_HARD);
kvmppc_set_gpr(vcpu, 3, ret);
vcpu->arch.hcall_needed = 0;
return RESUME_GUEST;
}
/*
- * Handle H_CEDE in the nested virtualization case where we haven't
- * called the real-mode hcall handlers in book3s_hv_rmhandlers.S.
+ * Handle H_CEDE in the P9 path where we don't call the real-mode hcall
+ * handlers in book3s_hv_rmhandlers.S.
+ *
* This has to be done early, not in kvmppc_pseries_do_hcall(), so
* that the cede logic in kvmppc_run_single_vcpu() works properly.
*/
-static void kvmppc_nested_cede(struct kvm_vcpu *vcpu)
+static void kvmppc_cede(struct kvm_vcpu *vcpu)
{
vcpu->arch.shregs.msr |= MSR_EE;
vcpu->arch.ceded = 1;
@@ -1178,6 +1296,7 @@ static int kvmppc_hcall_impl_hv(unsigned long cmd)
case H_XIRR_X:
#endif
case H_PAGE_INIT:
+ case H_RPT_INVALIDATE:
return 1;
}
@@ -1400,13 +1519,39 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
}
case BOOK3S_INTERRUPT_SYSCALL:
{
- /* hcall - punt to userspace */
int i;
- /* hypercall with MSR_PR has already been handled in rmode,
- * and never reaches here.
- */
+ if (unlikely(vcpu->arch.shregs.msr & MSR_PR)) {
+ /*
+ * Guest userspace executed sc 1. This can only be
+ * reached by the P9 path because the old path
+ * handles this case in realmode hcall handlers.
+ */
+ if (!kvmhv_vcpu_is_radix(vcpu)) {
+ /*
+ * A guest could be running PR KVM, so this
+ * may be a PR KVM hcall. It must be reflected
+ * to the guest kernel as a sc interrupt.
+ */
+ kvmppc_core_queue_syscall(vcpu);
+ } else {
+ /*
+ * Radix guests can not run PR KVM or nested HV
+ * hash guests which might run PR KVM, so this
+ * is always a privilege fault. Send a program
+ * check to guest kernel.
+ */
+ kvmppc_core_queue_program(vcpu, SRR1_PROGPRIV);
+ }
+ r = RESUME_GUEST;
+ break;
+ }
+ /*
+ * hcall - gather args and set exit_reason. This will next be
+ * handled by kvmppc_pseries_do_hcall which may be able to deal
+ * with it and resume guest, or may punt to userspace.
+ */
run->papr_hcall.nr = kvmppc_get_gpr(vcpu, 3);
for (i = 0; i < 9; ++i)
run->papr_hcall.args[i] = kvmppc_get_gpr(vcpu, 4 + i);
@@ -1419,20 +1564,102 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
* We get these next two if the guest accesses a page which it thinks
* it has mapped but which is not actually present, either because
* it is for an emulated I/O device or because the corresonding
- * host page has been paged out. Any other HDSI/HISI interrupts
- * have been handled already.
+ * host page has been paged out.
+ *
+ * Any other HDSI/HISI interrupts have been handled already for P7/8
+ * guests. For POWER9 hash guests not using rmhandlers, basic hash
+ * fault handling is done here.
*/
- case BOOK3S_INTERRUPT_H_DATA_STORAGE:
- r = RESUME_PAGE_FAULT;
+ case BOOK3S_INTERRUPT_H_DATA_STORAGE: {
+ unsigned long vsid;
+ long err;
+
+ if (vcpu->arch.fault_dsisr == HDSISR_CANARY) {
+ r = RESUME_GUEST; /* Just retry if it's the canary */
+ break;
+ }
+
+ if (kvm_is_radix(vcpu->kvm) || !cpu_has_feature(CPU_FTR_ARCH_300)) {
+ /*
+ * Radix doesn't require anything, and pre-ISAv3.0 hash
+ * already attempted to handle this in rmhandlers. The
+ * hash fault handling below is v3 only (it uses ASDR
+ * via fault_gpa).
+ */
+ r = RESUME_PAGE_FAULT;
+ break;
+ }
+
+ if (!(vcpu->arch.fault_dsisr & (DSISR_NOHPTE | DSISR_PROTFAULT))) {
+ kvmppc_core_queue_data_storage(vcpu,
+ vcpu->arch.fault_dar, vcpu->arch.fault_dsisr);
+ r = RESUME_GUEST;
+ break;
+ }
+
+ if (!(vcpu->arch.shregs.msr & MSR_DR))
+ vsid = vcpu->kvm->arch.vrma_slb_v;
+ else
+ vsid = vcpu->arch.fault_gpa;
+
+ err = kvmppc_hpte_hv_fault(vcpu, vcpu->arch.fault_dar,
+ vsid, vcpu->arch.fault_dsisr, true);
+ if (err == 0) {
+ r = RESUME_GUEST;
+ } else if (err == -1 || err == -2) {
+ r = RESUME_PAGE_FAULT;
+ } else {
+ kvmppc_core_queue_data_storage(vcpu,
+ vcpu->arch.fault_dar, err);
+ r = RESUME_GUEST;
+ }
break;
- case BOOK3S_INTERRUPT_H_INST_STORAGE:
+ }
+ case BOOK3S_INTERRUPT_H_INST_STORAGE: {
+ unsigned long vsid;
+ long err;
+
vcpu->arch.fault_dar = kvmppc_get_pc(vcpu);
vcpu->arch.fault_dsisr = vcpu->arch.shregs.msr &
DSISR_SRR1_MATCH_64S;
- if (vcpu->arch.shregs.msr & HSRR1_HISI_WRITE)
- vcpu->arch.fault_dsisr |= DSISR_ISSTORE;
- r = RESUME_PAGE_FAULT;
+ if (kvm_is_radix(vcpu->kvm) || !cpu_has_feature(CPU_FTR_ARCH_300)) {
+ /*
+ * Radix doesn't require anything, and pre-ISAv3.0 hash
+ * already attempted to handle this in rmhandlers. The
+ * hash fault handling below is v3 only (it uses ASDR
+ * via fault_gpa).
+ */
+ if (vcpu->arch.shregs.msr & HSRR1_HISI_WRITE)
+ vcpu->arch.fault_dsisr |= DSISR_ISSTORE;
+ r = RESUME_PAGE_FAULT;
+ break;
+ }
+
+ if (!(vcpu->arch.fault_dsisr & SRR1_ISI_NOPT)) {
+ kvmppc_core_queue_inst_storage(vcpu,
+ vcpu->arch.fault_dsisr);
+ r = RESUME_GUEST;
+ break;
+ }
+
+ if (!(vcpu->arch.shregs.msr & MSR_IR))
+ vsid = vcpu->kvm->arch.vrma_slb_v;
+ else
+ vsid = vcpu->arch.fault_gpa;
+
+ err = kvmppc_hpte_hv_fault(vcpu, vcpu->arch.fault_dar,
+ vsid, vcpu->arch.fault_dsisr, false);
+ if (err == 0) {
+ r = RESUME_GUEST;
+ } else if (err == -1) {
+ r = RESUME_PAGE_FAULT;
+ } else {
+ kvmppc_core_queue_inst_storage(vcpu, err);
+ r = RESUME_GUEST;
+ }
break;
+ }
+
/*
* This occurs if the guest executes an illegal instruction.
* If the guest debug is disabled, generate a program interrupt
@@ -1593,6 +1820,23 @@ static int kvmppc_handle_nested_exit(struct kvm_vcpu *vcpu)
if (!xics_on_xive())
kvmppc_xics_rm_complete(vcpu, 0);
break;
+ case BOOK3S_INTERRUPT_SYSCALL:
+ {
+ unsigned long req = kvmppc_get_gpr(vcpu, 3);
+
+ /*
+ * The H_RPT_INVALIDATE hcalls issued by nested
+ * guests for process-scoped invalidations when
+ * GTSE=0, are handled here in L0.
+ */
+ if (req == H_RPT_INVALIDATE) {
+ r = kvmppc_nested_h_rpt_invalidate(vcpu);
+ break;
+ }
+
+ r = RESUME_HOST;
+ break;
+ }
default:
r = RESUME_HOST;
break;
@@ -1654,6 +1898,14 @@ unsigned long kvmppc_filter_lpcr_hv(struct kvm *kvm, unsigned long lpcr)
lpcr &= ~LPCR_AIL;
if ((lpcr & LPCR_AIL) != LPCR_AIL_3)
lpcr &= ~LPCR_AIL; /* LPCR[AIL]=1/2 is disallowed */
+ /*
+ * On some POWER9s we force AIL off for radix guests to prevent
+ * executing in MSR[HV]=1 mode with the MMU enabled and PIDR set to
+ * guest, which can result in Q0 translations with LPID=0 PID=PIDR to
+ * be cached, which the host TLB management does not expect.
+ */
+ if (kvm_is_radix(kvm) && cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG))
+ lpcr &= ~LPCR_AIL;
/*
* On POWER9, allow userspace to enable large decrementer for the
@@ -2233,7 +2485,7 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
*/
static int threads_per_vcore(struct kvm *kvm)
{
- if (kvm->arch.threads_indep)
+ if (cpu_has_feature(CPU_FTR_ARCH_300))
return 1;
return threads_per_subcore;
}
@@ -2657,7 +2909,7 @@ static void radix_flush_cpu(struct kvm *kvm, int cpu, struct kvm_vcpu *vcpu)
cpumask_t *cpu_in_guest;
int i;
- cpu = cpu_first_thread_sibling(cpu);
+ cpu = cpu_first_tlb_thread_sibling(cpu);
if (nested) {
cpumask_set_cpu(cpu, &nested->need_tlb_flush);
cpu_in_guest = &nested->cpu_in_guest;
@@ -2671,9 +2923,10 @@ static void radix_flush_cpu(struct kvm *kvm, int cpu, struct kvm_vcpu *vcpu)
* the other side is the first smp_mb() in kvmppc_run_core().
*/
smp_mb();
- for (i = 0; i < threads_per_core; ++i)
- if (cpumask_test_cpu(cpu + i, cpu_in_guest))
- smp_call_function_single(cpu + i, do_nothing, NULL, 1);
+ for (i = cpu; i <= cpu_last_tlb_thread_sibling(cpu);
+ i += cpu_tlb_thread_sibling_step())
+ if (cpumask_test_cpu(i, cpu_in_guest))
+ smp_call_function_single(i, do_nothing, NULL, 1);
}
static void kvmppc_prepare_radix_vcpu(struct kvm_vcpu *vcpu, int pcpu)
@@ -2704,8 +2957,8 @@ static void kvmppc_prepare_radix_vcpu(struct kvm_vcpu *vcpu, int pcpu)
*/
if (prev_cpu != pcpu) {
if (prev_cpu >= 0 &&
- cpu_first_thread_sibling(prev_cpu) !=
- cpu_first_thread_sibling(pcpu))
+ cpu_first_tlb_thread_sibling(prev_cpu) !=
+ cpu_first_tlb_thread_sibling(pcpu))
radix_flush_cpu(kvm, prev_cpu, vcpu);
if (nested)
nested->prev_cpu[vcpu->arch.nested_vcpu_id] = pcpu;
@@ -2967,9 +3220,6 @@ static void prepare_threads(struct kvmppc_vcore *vc)
for_each_runnable_thread(i, vcpu, vc) {
if (signal_pending(vcpu->arch.run_task))
vcpu->arch.ret = -EINTR;
- else if (no_mixing_hpt_and_radix &&
- kvm_is_radix(vc->kvm) != radix_enabled())
- vcpu->arch.ret = -EINVAL;
else if (vcpu->arch.vpa.update_pending ||
vcpu->arch.slb_shadow.update_pending ||
vcpu->arch.dtl.update_pending)
@@ -3176,6 +3426,9 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
int trap;
bool is_power8;
+ if (WARN_ON_ONCE(cpu_has_feature(CPU_FTR_ARCH_300)))
+ return;
+
/*
* Remove from the list any threads that have a signal pending
* or need a VPA update done
@@ -3203,9 +3456,6 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
* Make sure we are running on primary threads, and that secondary
* threads are offline. Also check if the number of threads in this
* guest are greater than the current system threads per guest.
- * On POWER9, we need to be not in independent-threads mode if
- * this is a HPT guest on a radix host machine where the
- * CPU threads may not be in different MMU modes.
*/
if ((controlled_threads > 1) &&
((vc->num_threads > threads_per_subcore) || !on_primary_thread())) {
@@ -3230,18 +3480,6 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
collect_piggybacks(&core_info, target_threads);
/*
- * On radix, arrange for TLB flushing if necessary.
- * This has to be done before disabling interrupts since
- * it uses smp_call_function().
- */
- pcpu = smp_processor_id();
- if (kvm_is_radix(vc->kvm)) {
- for (sub = 0; sub < core_info.n_subcores; ++sub)
- for_each_runnable_thread(i, vcpu, core_info.vc[sub])
- kvmppc_prepare_radix_vcpu(vcpu, pcpu);
- }
-
- /*
* Hard-disable interrupts, and check resched flag and signals.
* If we need to reschedule or deliver a signal, clean up
* and return without going into the guest(s).
@@ -3273,8 +3511,7 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
cmd_bit = stat_bit = 0;
split = core_info.n_subcores;
sip = NULL;
- is_power8 = cpu_has_feature(CPU_FTR_ARCH_207S)
- && !cpu_has_feature(CPU_FTR_ARCH_300);
+ is_power8 = cpu_has_feature(CPU_FTR_ARCH_207S);
if (split > 1) {
sip = &split_info;
@@ -3478,184 +3715,113 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
trace_kvmppc_run_core(vc, 1);
}
-/*
- * Load up hypervisor-mode registers on P9.
- */
-static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu *vcpu, u64 time_limit,
- unsigned long lpcr)
+static void load_spr_state(struct kvm_vcpu *vcpu)
{
- struct kvmppc_vcore *vc = vcpu->arch.vcore;
- s64 hdec;
- u64 tb, purr, spurr;
- int trap;
- unsigned long host_hfscr = mfspr(SPRN_HFSCR);
- unsigned long host_ciabr = mfspr(SPRN_CIABR);
- unsigned long host_dawr0 = mfspr(SPRN_DAWR0);
- unsigned long host_dawrx0 = mfspr(SPRN_DAWRX0);
- unsigned long host_psscr = mfspr(SPRN_PSSCR);
- unsigned long host_pidr = mfspr(SPRN_PID);
- unsigned long host_dawr1 = 0;
- unsigned long host_dawrx1 = 0;
-
- if (cpu_has_feature(CPU_FTR_DAWR1)) {
- host_dawr1 = mfspr(SPRN_DAWR1);
- host_dawrx1 = mfspr(SPRN_DAWRX1);
- }
+ mtspr(SPRN_DSCR, vcpu->arch.dscr);
+ mtspr(SPRN_IAMR, vcpu->arch.iamr);
+ mtspr(SPRN_PSPB, vcpu->arch.pspb);
+ mtspr(SPRN_FSCR, vcpu->arch.fscr);
+ mtspr(SPRN_TAR, vcpu->arch.tar);
+ mtspr(SPRN_EBBHR, vcpu->arch.ebbhr);
+ mtspr(SPRN_EBBRR, vcpu->arch.ebbrr);
+ mtspr(SPRN_BESCR, vcpu->arch.bescr);
+ mtspr(SPRN_WORT, vcpu->arch.wort);
+ mtspr(SPRN_TIDR, vcpu->arch.tid);
+ mtspr(SPRN_AMR, vcpu->arch.amr);
+ mtspr(SPRN_UAMOR, vcpu->arch.uamor);
/*
- * P8 and P9 suppress the HDEC exception when LPCR[HDICE] = 0,
- * so set HDICE before writing HDEC.
+ * DAR, DSISR, and for nested HV, SPRGs must be set with MSR[RI]
+ * clear (or hstate set appropriately to catch those registers
+ * being clobbered if we take a MCE or SRESET), so those are done
+ * later.
*/
- mtspr(SPRN_LPCR, vcpu->kvm->arch.host_lpcr | LPCR_HDICE);
- isync();
-
- hdec = time_limit - mftb();
- if (hdec < 0) {
- mtspr(SPRN_LPCR, vcpu->kvm->arch.host_lpcr);
- isync();
- return BOOK3S_INTERRUPT_HV_DECREMENTER;
- }
- mtspr(SPRN_HDEC, hdec);
-
- if (vc->tb_offset) {
- u64 new_tb = mftb() + vc->tb_offset;
- mtspr(SPRN_TBU40, new_tb);
- tb = mftb();
- if ((tb & 0xffffff) < (new_tb & 0xffffff))
- mtspr(SPRN_TBU40, new_tb + 0x1000000);
- vc->tb_offset_applied = vc->tb_offset;
- }
-
- if (vc->pcr)
- mtspr(SPRN_PCR, vc->pcr | PCR_MASK);
- mtspr(SPRN_DPDES, vc->dpdes);
- mtspr(SPRN_VTB, vc->vtb);
-
- local_paca->kvm_hstate.host_purr = mfspr(SPRN_PURR);
- local_paca->kvm_hstate.host_spurr = mfspr(SPRN_SPURR);
- mtspr(SPRN_PURR, vcpu->arch.purr);
- mtspr(SPRN_SPURR, vcpu->arch.spurr);
-
- if (dawr_enabled()) {
- mtspr(SPRN_DAWR0, vcpu->arch.dawr0);
- mtspr(SPRN_DAWRX0, vcpu->arch.dawrx0);
- if (cpu_has_feature(CPU_FTR_DAWR1)) {
- mtspr(SPRN_DAWR1, vcpu->arch.dawr1);
- mtspr(SPRN_DAWRX1, vcpu->arch.dawrx1);
- }
- }
- mtspr(SPRN_CIABR, vcpu->arch.ciabr);
- mtspr(SPRN_IC, vcpu->arch.ic);
- mtspr(SPRN_PID, vcpu->arch.pid);
-
- mtspr(SPRN_PSSCR, vcpu->arch.psscr | PSSCR_EC |
- (local_paca->kvm_hstate.fake_suspend << PSSCR_FAKE_SUSPEND_LG));
- mtspr(SPRN_HFSCR, vcpu->arch.hfscr);
-
- mtspr(SPRN_SPRG0, vcpu->arch.shregs.sprg0);
- mtspr(SPRN_SPRG1, vcpu->arch.shregs.sprg1);
- mtspr(SPRN_SPRG2, vcpu->arch.shregs.sprg2);
- mtspr(SPRN_SPRG3, vcpu->arch.shregs.sprg3);
-
- mtspr(SPRN_AMOR, ~0UL);
-
- mtspr(SPRN_LPCR, lpcr);
- isync();
-
- kvmppc_xive_push_vcpu(vcpu);
-
- mtspr(SPRN_SRR0, vcpu->arch.shregs.srr0);
- mtspr(SPRN_SRR1, vcpu->arch.shregs.srr1);
-
- trap = __kvmhv_vcpu_entry_p9(vcpu);
-
- /* Advance host PURR/SPURR by the amount used by guest */
- purr = mfspr(SPRN_PURR);
- spurr = mfspr(SPRN_SPURR);
- mtspr(SPRN_PURR, local_paca->kvm_hstate.host_purr +
- purr - vcpu->arch.purr);
- mtspr(SPRN_SPURR, local_paca->kvm_hstate.host_spurr +
- spurr - vcpu->arch.spurr);
- vcpu->arch.purr = purr;
- vcpu->arch.spurr = spurr;
+ if (!(vcpu->arch.ctrl & 1))
+ mtspr(SPRN_CTRLT, mfspr(SPRN_CTRLF) & ~1);
+}
- vcpu->arch.ic = mfspr(SPRN_IC);
- vcpu->arch.pid = mfspr(SPRN_PID);
- vcpu->arch.psscr = mfspr(SPRN_PSSCR) & PSSCR_GUEST_VIS;
+static void store_spr_state(struct kvm_vcpu *vcpu)
+{
+ vcpu->arch.ctrl = mfspr(SPRN_CTRLF);
- vcpu->arch.shregs.sprg0 = mfspr(SPRN_SPRG0);
- vcpu->arch.shregs.sprg1 = mfspr(SPRN_SPRG1);
- vcpu->arch.shregs.sprg2 = mfspr(SPRN_SPRG2);
- vcpu->arch.shregs.sprg3 = mfspr(SPRN_SPRG3);
+ vcpu->arch.iamr = mfspr(SPRN_IAMR);
+ vcpu->arch.pspb = mfspr(SPRN_PSPB);
+ vcpu->arch.fscr = mfspr(SPRN_FSCR);
+ vcpu->arch.tar = mfspr(SPRN_TAR);
+ vcpu->arch.ebbhr = mfspr(SPRN_EBBHR);
+ vcpu->arch.ebbrr = mfspr(SPRN_EBBRR);
+ vcpu->arch.bescr = mfspr(SPRN_BESCR);
+ vcpu->arch.wort = mfspr(SPRN_WORT);
+ vcpu->arch.tid = mfspr(SPRN_TIDR);
+ vcpu->arch.amr = mfspr(SPRN_AMR);
+ vcpu->arch.uamor = mfspr(SPRN_UAMOR);
+ vcpu->arch.dscr = mfspr(SPRN_DSCR);
+}
- /* Preserve PSSCR[FAKE_SUSPEND] until we've called kvmppc_save_tm_hv */
- mtspr(SPRN_PSSCR, host_psscr |
- (local_paca->kvm_hstate.fake_suspend << PSSCR_FAKE_SUSPEND_LG));
- mtspr(SPRN_HFSCR, host_hfscr);
- mtspr(SPRN_CIABR, host_ciabr);
- mtspr(SPRN_DAWR0, host_dawr0);
- mtspr(SPRN_DAWRX0, host_dawrx0);
- if (cpu_has_feature(CPU_FTR_DAWR1)) {
- mtspr(SPRN_DAWR1, host_dawr1);
- mtspr(SPRN_DAWRX1, host_dawrx1);
- }
- mtspr(SPRN_PID, host_pidr);
+/*
+ * Privileged (non-hypervisor) host registers to save.
+ */
+struct p9_host_os_sprs {
+ unsigned long dscr;
+ unsigned long tidr;
+ unsigned long iamr;
+ unsigned long amr;
+ unsigned long fscr;
+};
- /*
- * Since this is radix, do a eieio; tlbsync; ptesync sequence in
- * case we interrupted the guest between a tlbie and a ptesync.
- */
- asm volatile("eieio; tlbsync; ptesync");
+static void save_p9_host_os_sprs(struct p9_host_os_sprs *host_os_sprs)
+{
+ host_os_sprs->dscr = mfspr(SPRN_DSCR);
+ host_os_sprs->tidr = mfspr(SPRN_TIDR);
+ host_os_sprs->iamr = mfspr(SPRN_IAMR);
+ host_os_sprs->amr = mfspr(SPRN_AMR);
+ host_os_sprs->fscr = mfspr(SPRN_FSCR);
+}
- /*
- * cp_abort is required if the processor supports local copy-paste
- * to clear the copy buffer that was under control of the guest.
- */
- if (cpu_has_feature(CPU_FTR_ARCH_31))
- asm volatile(PPC_CP_ABORT);
+/* vcpu guest regs must already be saved */
+static void restore_p9_host_os_sprs(struct kvm_vcpu *vcpu,
+ struct p9_host_os_sprs *host_os_sprs)
+{
+ mtspr(SPRN_PSPB, 0);
+ mtspr(SPRN_WORT, 0);
+ mtspr(SPRN_UAMOR, 0);
- mtspr(SPRN_LPID, vcpu->kvm->arch.host_lpid); /* restore host LPID */
- isync();
+ mtspr(SPRN_DSCR, host_os_sprs->dscr);
+ mtspr(SPRN_TIDR, host_os_sprs->tidr);
+ mtspr(SPRN_IAMR, host_os_sprs->iamr);
- vc->dpdes = mfspr(SPRN_DPDES);
- vc->vtb = mfspr(SPRN_VTB);
- mtspr(SPRN_DPDES, 0);
- if (vc->pcr)
- mtspr(SPRN_PCR, PCR_MASK);
+ if (host_os_sprs->amr != vcpu->arch.amr)
+ mtspr(SPRN_AMR, host_os_sprs->amr);
- if (vc->tb_offset_applied) {
- u64 new_tb = mftb() - vc->tb_offset_applied;
- mtspr(SPRN_TBU40, new_tb);
- tb = mftb();
- if ((tb & 0xffffff) < (new_tb & 0xffffff))
- mtspr(SPRN_TBU40, new_tb + 0x1000000);
- vc->tb_offset_applied = 0;
- }
+ if (host_os_sprs->fscr != vcpu->arch.fscr)
+ mtspr(SPRN_FSCR, host_os_sprs->fscr);
- mtspr(SPRN_HDEC, 0x7fffffff);
- mtspr(SPRN_LPCR, vcpu->kvm->arch.host_lpcr);
+ /* Save guest CTRL register, set runlatch to 1 */
+ if (!(vcpu->arch.ctrl & 1))
+ mtspr(SPRN_CTRLT, 1);
+}
- return trap;
+static inline bool hcall_is_xics(unsigned long req)
+{
+ return req == H_EOI || req == H_CPPR || req == H_IPI ||
+ req == H_IPOLL || req == H_XIRR || req == H_XIRR_X;
}
/*
- * Virtual-mode guest entry for POWER9 and later when the host and
- * guest are both using the radix MMU. The LPIDR has already been set.
+ * Guest entry for POWER9 and later CPUs.
*/
static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
unsigned long lpcr)
{
struct kvmppc_vcore *vc = vcpu->arch.vcore;
- unsigned long host_dscr = mfspr(SPRN_DSCR);
- unsigned long host_tidr = mfspr(SPRN_TIDR);
- unsigned long host_iamr = mfspr(SPRN_IAMR);
- unsigned long host_amr = mfspr(SPRN_AMR);
- unsigned long host_fscr = mfspr(SPRN_FSCR);
+ struct p9_host_os_sprs host_os_sprs;
s64 dec;
u64 tb;
int trap, save_pmu;
+ WARN_ON_ONCE(vcpu->arch.ceded);
+
dec = mfspr(SPRN_DEC);
tb = mftb();
if (dec < 0)
@@ -3664,7 +3830,7 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
if (local_paca->kvm_hstate.dec_expires < time_limit)
time_limit = local_paca->kvm_hstate.dec_expires;
- vcpu->arch.ceded = 0;
+ save_p9_host_os_sprs(&host_os_sprs);
kvmhv_save_host_pmu(); /* saves it to PACA kvm_hstate */
@@ -3693,24 +3859,20 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
#endif
mtspr(SPRN_VRSAVE, vcpu->arch.vrsave);
- mtspr(SPRN_DSCR, vcpu->arch.dscr);
- mtspr(SPRN_IAMR, vcpu->arch.iamr);
- mtspr(SPRN_PSPB, vcpu->arch.pspb);
- mtspr(SPRN_FSCR, vcpu->arch.fscr);
- mtspr(SPRN_TAR, vcpu->arch.tar);
- mtspr(SPRN_EBBHR, vcpu->arch.ebbhr);
- mtspr(SPRN_EBBRR, vcpu->arch.ebbrr);
- mtspr(SPRN_BESCR, vcpu->arch.bescr);
- mtspr(SPRN_WORT, vcpu->arch.wort);
- mtspr(SPRN_TIDR, vcpu->arch.tid);
- mtspr(SPRN_DAR, vcpu->arch.shregs.dar);
- mtspr(SPRN_DSISR, vcpu->arch.shregs.dsisr);
- mtspr(SPRN_AMR, vcpu->arch.amr);
- mtspr(SPRN_UAMOR, vcpu->arch.uamor);
-
- if (!(vcpu->arch.ctrl & 1))
- mtspr(SPRN_CTRLT, mfspr(SPRN_CTRLF) & ~1);
+ load_spr_state(vcpu);
+ /*
+ * When setting DEC, we must always deal with irq_work_raise via NMI vs
+ * setting DEC. The problem occurs right as we switch into guest mode
+ * if a NMI hits and sets pending work and sets DEC, then that will
+ * apply to the guest and not bring us back to the host.
+ *
+ * irq_work_raise could check a flag (or possibly LPCR[HDICE] for
+ * example) and set HDEC to 1? That wouldn't solve the nested hv
+ * case which needs to abort the hcall or zero the time limit.
+ *
+ * XXX: Another day's problem.
+ */
mtspr(SPRN_DEC, vcpu->arch.dec_expires - mftb());
if (kvmhv_on_pseries()) {
@@ -3718,7 +3880,7 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
* We need to save and restore the guest visible part of the
* psscr (i.e. using SPRN_PSSCR_PR) since the hypervisor
* doesn't do this for us. Note only required if pseries since
- * this is done in kvmhv_load_hv_regs_and_go() below otherwise.
+ * this is done in kvmhv_vcpu_entry_p9() below otherwise.
*/
unsigned long host_psscr;
/* call our hypervisor to load up HV regs and go */
@@ -3738,6 +3900,8 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
hvregs.vcpu_token = vcpu->vcpu_id;
}
hvregs.hdec_expiry = time_limit;
+ mtspr(SPRN_DAR, vcpu->arch.shregs.dar);
+ mtspr(SPRN_DSISR, vcpu->arch.shregs.dsisr);
trap = plpar_hcall_norets(H_ENTER_NESTED, __pa(&hvregs),
__pa(&vcpu->arch.regs));
kvmhv_restore_hv_return_state(vcpu, &hvregs);
@@ -3750,15 +3914,41 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
/* H_CEDE has to be handled now, not later */
if (trap == BOOK3S_INTERRUPT_SYSCALL && !vcpu->arch.nested &&
kvmppc_get_gpr(vcpu, 3) == H_CEDE) {
- kvmppc_nested_cede(vcpu);
+ kvmppc_cede(vcpu);
kvmppc_set_gpr(vcpu, 3, 0);
trap = 0;
}
} else {
- trap = kvmhv_load_hv_regs_and_go(vcpu, time_limit, lpcr);
+ kvmppc_xive_push_vcpu(vcpu);
+ trap = kvmhv_vcpu_entry_p9(vcpu, time_limit, lpcr);
+ if (trap == BOOK3S_INTERRUPT_SYSCALL && !vcpu->arch.nested &&
+ !(vcpu->arch.shregs.msr & MSR_PR)) {
+ unsigned long req = kvmppc_get_gpr(vcpu, 3);
+
+ /* H_CEDE has to be handled now, not later */
+ if (req == H_CEDE) {
+ kvmppc_cede(vcpu);
+ kvmppc_xive_rearm_escalation(vcpu); /* may un-cede */
+ kvmppc_set_gpr(vcpu, 3, 0);
+ trap = 0;
+
+ /* XICS hcalls must be handled before xive is pulled */
+ } else if (hcall_is_xics(req)) {
+ int ret;
+
+ ret = kvmppc_xive_xics_hcall(vcpu, req);
+ if (ret != H_TOO_HARD) {
+ kvmppc_set_gpr(vcpu, 3, ret);
+ trap = 0;
+ }
+ }
+ }
+ kvmppc_xive_pull_vcpu(vcpu);
+
+ if (kvm_is_radix(vcpu->kvm))
+ vcpu->arch.slb_max = 0;
}
- vcpu->arch.slb_max = 0;
dec = mfspr(SPRN_DEC);
if (!(lpcr & LPCR_LD)) /* Sign extend if not using large decrementer */
dec = (s32) dec;
@@ -3766,36 +3956,10 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
vcpu->arch.dec_expires = dec + tb;
vcpu->cpu = -1;
vcpu->arch.thread_cpu = -1;
- /* Save guest CTRL register, set runlatch to 1 */
- vcpu->arch.ctrl = mfspr(SPRN_CTRLF);
- if (!(vcpu->arch.ctrl & 1))
- mtspr(SPRN_CTRLT, vcpu->arch.ctrl | 1);
- vcpu->arch.iamr = mfspr(SPRN_IAMR);
- vcpu->arch.pspb = mfspr(SPRN_PSPB);
- vcpu->arch.fscr = mfspr(SPRN_FSCR);
- vcpu->arch.tar = mfspr(SPRN_TAR);
- vcpu->arch.ebbhr = mfspr(SPRN_EBBHR);
- vcpu->arch.ebbrr = mfspr(SPRN_EBBRR);
- vcpu->arch.bescr = mfspr(SPRN_BESCR);
- vcpu->arch.wort = mfspr(SPRN_WORT);
- vcpu->arch.tid = mfspr(SPRN_TIDR);
- vcpu->arch.amr = mfspr(SPRN_AMR);
- vcpu->arch.uamor = mfspr(SPRN_UAMOR);
- vcpu->arch.dscr = mfspr(SPRN_DSCR);
+ store_spr_state(vcpu);
- mtspr(SPRN_PSPB, 0);
- mtspr(SPRN_WORT, 0);
- mtspr(SPRN_UAMOR, 0);
- mtspr(SPRN_DSCR, host_dscr);
- mtspr(SPRN_TIDR, host_tidr);
- mtspr(SPRN_IAMR, host_iamr);
-
- if (host_amr != vcpu->arch.amr)
- mtspr(SPRN_AMR, host_amr);
-
- if (host_fscr != vcpu->arch.fscr)
- mtspr(SPRN_FSCR, host_fscr);
+ restore_p9_host_os_sprs(vcpu, &host_os_sprs);
msr_check_and_set(MSR_FP | MSR_VEC | MSR_VSX);
store_fp_state(&vcpu->arch.fp);
@@ -3825,6 +3989,9 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
vc->in_guest = 0;
mtspr(SPRN_DEC, local_paca->kvm_hstate.dec_expires - mftb());
+ /* We may have raced with new irq work */
+ if (test_irq_work_pending())
+ set_dec(1);
mtspr(SPRN_SPRG_VDSO_WRITE, local_paca->sprg_vdso);
kvmhv_load_host_pmu();
@@ -3925,7 +4092,7 @@ static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc)
cur = start_poll = ktime_get();
if (vc->halt_poll_ns) {
ktime_t stop = ktime_add_ns(start_poll, vc->halt_poll_ns);
- ++vc->runner->stat.halt_attempted_poll;
+ ++vc->runner->stat.generic.halt_attempted_poll;
vc->vcore_state = VCORE_POLLING;
spin_unlock(&vc->lock);
@@ -3936,13 +4103,13 @@ static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc)
break;
}
cur = ktime_get();
- } while (single_task_running() && ktime_before(cur, stop));
+ } while (kvm_vcpu_can_poll(cur, stop));
spin_lock(&vc->lock);
vc->vcore_state = VCORE_INACTIVE;
if (!do_sleep) {
- ++vc->runner->stat.halt_successful_poll;
+ ++vc->runner->stat.generic.halt_successful_poll;
goto out;
}
}
@@ -3954,7 +4121,7 @@ static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc)
do_sleep = 0;
/* If we polled, count this as a successful poll */
if (vc->halt_poll_ns)
- ++vc->runner->stat.halt_successful_poll;
+ ++vc->runner->stat.generic.halt_successful_poll;
goto out;
}
@@ -3981,13 +4148,13 @@ out:
ktime_to_ns(cur) - ktime_to_ns(start_wait);
/* Attribute failed poll time */
if (vc->halt_poll_ns)
- vc->runner->stat.halt_poll_fail_ns +=
+ vc->runner->stat.generic.halt_poll_fail_ns +=
ktime_to_ns(start_wait) -
ktime_to_ns(start_poll);
} else {
/* Attribute successful poll time */
if (vc->halt_poll_ns)
- vc->runner->stat.halt_poll_success_ns +=
+ vc->runner->stat.generic.halt_poll_success_ns +=
ktime_to_ns(cur) -
ktime_to_ns(start_poll);
}
@@ -4014,7 +4181,6 @@ out:
/*
* This never fails for a radix guest, as none of the operations it does
* for a radix guest can fail or have a way to report failure.
- * kvmhv_run_single_vcpu() relies on this fact.
*/
static int kvmhv_setup_mmu(struct kvm_vcpu *vcpu)
{
@@ -4170,7 +4336,7 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit,
{
struct kvm_run *run = vcpu->run;
int trap, r, pcpu;
- int srcu_idx, lpid;
+ int srcu_idx;
struct kvmppc_vcore *vc;
struct kvm *kvm = vcpu->kvm;
struct kvm_nested_guest *nested = vcpu->arch.nested;
@@ -4193,8 +4359,15 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit,
vc->runner = vcpu;
/* See if the MMU is ready to go */
- if (!kvm->arch.mmu_ready)
- kvmhv_setup_mmu(vcpu);
+ if (!kvm->arch.mmu_ready) {
+ r = kvmhv_setup_mmu(vcpu);
+ if (r) {
+ run->exit_reason = KVM_EXIT_FAIL_ENTRY;
+ run->fail_entry.hardware_entry_failure_reason = 0;
+ vcpu->arch.ret = r;
+ return r;
+ }
+ }
if (need_resched())
cond_resched();
@@ -4207,7 +4380,8 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit,
preempt_disable();
pcpu = smp_processor_id();
vc->pcpu = pcpu;
- kvmppc_prepare_radix_vcpu(vcpu, pcpu);
+ if (kvm_is_radix(kvm))
+ kvmppc_prepare_radix_vcpu(vcpu, pcpu);
local_irq_disable();
hard_irq_disable();
@@ -4244,13 +4418,6 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit,
vc->vcore_state = VCORE_RUNNING;
trace_kvmppc_run_core(vc, 0);
- if (cpu_has_feature(CPU_FTR_HVMODE)) {
- lpid = nested ? nested->shadow_lpid : kvm->arch.lpid;
- mtspr(SPRN_LPID, lpid);
- isync();
- kvmppc_check_need_tlb_flush(kvm, pcpu, nested);
- }
-
guest_enter_irqoff();
srcu_idx = srcu_read_lock(&kvm->srcu);
@@ -4269,11 +4436,6 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit,
srcu_read_unlock(&kvm->srcu, srcu_idx);
- if (cpu_has_feature(CPU_FTR_HVMODE)) {
- mtspr(SPRN_LPID, kvm->arch.host_lpid);
- isync();
- }
-
set_irq_happened(trap);
kvmppc_set_host_core(pcpu);
@@ -4419,19 +4581,23 @@ static int kvmppc_vcpu_run_hv(struct kvm_vcpu *vcpu)
vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;
do {
- /*
- * The TLB prefetch bug fixup is only in the kvmppc_run_vcpu
- * path, which also handles hash and dependent threads mode.
- */
- if (kvm->arch.threads_indep && kvm_is_radix(kvm) &&
- !cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG))
+ if (cpu_has_feature(CPU_FTR_ARCH_300))
r = kvmhv_run_single_vcpu(vcpu, ~(u64)0,
vcpu->arch.vcore->lpcr);
else
r = kvmppc_run_vcpu(vcpu);
- if (run->exit_reason == KVM_EXIT_PAPR_HCALL &&
- !(vcpu->arch.shregs.msr & MSR_PR)) {
+ if (run->exit_reason == KVM_EXIT_PAPR_HCALL) {
+ if (WARN_ON_ONCE(vcpu->arch.shregs.msr & MSR_PR)) {
+ /*
+ * These should have been caught reflected
+ * into the guest by now. Final sanity check:
+ * don't allow userspace to execute hcalls in
+ * the hypervisor.
+ */
+ r = RESUME_GUEST;
+ continue;
+ }
trace_kvm_hcall_enter(vcpu);
r = kvmppc_pseries_do_hcall(vcpu);
trace_kvm_hcall_exit(vcpu, r);
@@ -4455,12 +4621,14 @@ static int kvmppc_vcpu_run_hv(struct kvm_vcpu *vcpu)
mtspr(SPRN_EBBRR, ebb_regs[1]);
mtspr(SPRN_BESCR, ebb_regs[2]);
mtspr(SPRN_TAR, user_tar);
- mtspr(SPRN_FSCR, current->thread.fscr);
}
mtspr(SPRN_VRSAVE, user_vrsave);
vcpu->arch.state = KVMPPC_VCPU_NOTREADY;
atomic_dec(&kvm->arch.vcpus_running);
+
+ srr_regs_clobbered();
+
return r;
}
@@ -4759,8 +4927,8 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
/* Look up the VMA for the start of this memory slot */
hva = memslot->userspace_addr;
mmap_read_lock(kvm->mm);
- vma = find_vma(kvm->mm, hva);
- if (!vma || vma->vm_start > hva || (vma->vm_flags & VM_IO))
+ vma = vma_lookup(kvm->mm, hva);
+ if (!vma || (vma->vm_flags & VM_IO))
goto up_out;
psize = vma_kernel_pagesize(vma);
@@ -5039,18 +5207,8 @@ static int kvmppc_core_init_vm_hv(struct kvm *kvm)
/*
* Track that we now have a HV mode VM active. This blocks secondary
* CPU threads from coming online.
- * On POWER9, we only need to do this if the "indep_threads_mode"
- * module parameter has been set to N.
*/
- if (cpu_has_feature(CPU_FTR_ARCH_300)) {
- if (!indep_threads_mode && !cpu_has_feature(CPU_FTR_HVMODE)) {
- pr_warn("KVM: Ignoring indep_threads_mode=N in nested hypervisor\n");
- kvm->arch.threads_indep = true;
- } else {
- kvm->arch.threads_indep = indep_threads_mode;
- }
- }
- if (!kvm->arch.threads_indep)
+ if (!cpu_has_feature(CPU_FTR_ARCH_300))
kvm_hv_vm_activated();
/*
@@ -5091,7 +5249,7 @@ static void kvmppc_core_destroy_vm_hv(struct kvm *kvm)
{
debugfs_remove_recursive(kvm->arch.debugfs_dir);
- if (!kvm->arch.threads_indep)
+ if (!cpu_has_feature(CPU_FTR_ARCH_300))
kvm_hv_vm_deactivated();
kvmppc_free_vcores(kvm);
@@ -5512,7 +5670,9 @@ static int kvmhv_enable_nested(struct kvm *kvm)
{
if (!nested)
return -EPERM;
- if (!cpu_has_feature(CPU_FTR_ARCH_300) || no_mixing_hpt_and_radix)
+ if (!cpu_has_feature(CPU_FTR_ARCH_300))
+ return -ENODEV;
+ if (!radix_enabled())
return -ENODEV;
/* kvm == NULL means the caller is testing if the capability exists */
@@ -5675,11 +5835,25 @@ static int kvmhv_enable_dawr1(struct kvm *kvm)
static bool kvmppc_hash_v3_possible(void)
{
- if (radix_enabled() && no_mixing_hpt_and_radix)
+ if (!cpu_has_feature(CPU_FTR_ARCH_300))
+ return false;
+
+ if (!cpu_has_feature(CPU_FTR_HVMODE))
return false;
- return cpu_has_feature(CPU_FTR_ARCH_300) &&
- cpu_has_feature(CPU_FTR_HVMODE);
+ /*
+ * POWER9 chips before version 2.02 can't have some threads in
+ * HPT mode and some in radix mode on the same core.
+ */
+ if (radix_enabled()) {
+ unsigned int pvr = mfspr(SPRN_PVR);
+ if ((pvr >> 16) == PVR_POWER9 &&
+ (((pvr & 0xe000) == 0 && (pvr & 0xfff) < 0x202) ||
+ ((pvr & 0xe000) == 0x2000 && (pvr & 0xfff) < 0x101)))
+ return false;
+ }
+
+ return true;
}
static struct kvmppc_ops kvm_ops_hv = {
@@ -5823,18 +5997,6 @@ static int kvmppc_book3s_init_hv(void)
if (kvmppc_radix_possible())
r = kvmppc_radix_init();
- /*
- * POWER9 chips before version 2.02 can't have some threads in
- * HPT mode and some in radix mode on the same core.
- */
- if (cpu_has_feature(CPU_FTR_ARCH_300)) {
- unsigned int pvr = mfspr(SPRN_PVR);
- if ((pvr >> 16) == PVR_POWER9 &&
- (((pvr & 0xe000) == 0 && (pvr & 0xfff) < 0x202) ||
- ((pvr & 0xe000) == 0x2000 && (pvr & 0xfff) < 0x101)))
- no_mixing_hpt_and_radix = true;
- }
-
r = kvmppc_uvmem_init();
if (r < 0)
pr_err("KVM-HV: kvmppc_uvmem_init failed %d\n", r);
diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c
index 7a0e33a9c980..be8ef1c5b1bf 100644
--- a/arch/powerpc/kvm/book3s_hv_builtin.c
+++ b/arch/powerpc/kvm/book3s_hv_builtin.c
@@ -35,21 +35,6 @@
#include "book3s_xive.h"
/*
- * The XIVE module will populate these when it loads
- */
-unsigned long (*__xive_vm_h_xirr)(struct kvm_vcpu *vcpu);
-unsigned long (*__xive_vm_h_ipoll)(struct kvm_vcpu *vcpu, unsigned long server);
-int (*__xive_vm_h_ipi)(struct kvm_vcpu *vcpu, unsigned long server,
- unsigned long mfrr);
-int (*__xive_vm_h_cppr)(struct kvm_vcpu *vcpu, unsigned long cppr);
-int (*__xive_vm_h_eoi)(struct kvm_vcpu *vcpu, unsigned long xirr);
-EXPORT_SYMBOL_GPL(__xive_vm_h_xirr);
-EXPORT_SYMBOL_GPL(__xive_vm_h_ipoll);
-EXPORT_SYMBOL_GPL(__xive_vm_h_ipi);
-EXPORT_SYMBOL_GPL(__xive_vm_h_cppr);
-EXPORT_SYMBOL_GPL(__xive_vm_h_eoi);
-
-/*
* Hash page table alignment on newer cpus(CPU_FTR_ARCH_206)
* should be power of 2.
*/
@@ -196,16 +181,9 @@ int kvmppc_hwrng_present(void)
}
EXPORT_SYMBOL_GPL(kvmppc_hwrng_present);
-long kvmppc_h_random(struct kvm_vcpu *vcpu)
+long kvmppc_rm_h_random(struct kvm_vcpu *vcpu)
{
- int r;
-
- /* Only need to do the expensive mfmsr() on radix */
- if (kvm_is_radix(vcpu->kvm) && (mfmsr() & MSR_IR))
- r = powernv_get_random_long(&vcpu->arch.regs.gpr[4]);
- else
- r = powernv_get_random_real_mode(&vcpu->arch.regs.gpr[4]);
- if (r)
+ if (powernv_get_random_real_mode(&vcpu->arch.regs.gpr[4]))
return H_SUCCESS;
return H_HARDWARE;
@@ -221,15 +199,6 @@ void kvmhv_rm_send_ipi(int cpu)
void __iomem *xics_phys;
unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER);
- /* For a nested hypervisor, use the XICS via hcall */
- if (kvmhv_on_pseries()) {
- unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
-
- plpar_hcall_raw(H_IPI, retbuf, get_hard_smp_processor_id(cpu),
- IPI_PRIORITY);
- return;
- }
-
/* On POWER9 we can use msgsnd for any destination cpu. */
if (cpu_has_feature(CPU_FTR_ARCH_300)) {
msg |= get_hard_smp_processor_id(cpu);
@@ -442,19 +411,12 @@ static long kvmppc_read_one_intr(bool *again)
return 1;
/* Now read the interrupt from the ICP */
- if (kvmhv_on_pseries()) {
- unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
-
- rc = plpar_hcall_raw(H_XIRR, retbuf, 0xFF);
- xirr = cpu_to_be32(retbuf[0]);
- } else {
- xics_phys = local_paca->kvm_hstate.xics_phys;
- rc = 0;
- if (!xics_phys)
- rc = opal_int_get_xirr(&xirr, false);
- else
- xirr = __raw_rm_readl(xics_phys + XICS_XIRR);
- }
+ xics_phys = local_paca->kvm_hstate.xics_phys;
+ rc = 0;
+ if (!xics_phys)
+ rc = opal_int_get_xirr(&xirr, false);
+ else
+ xirr = __raw_rm_readl(xics_phys + XICS_XIRR);
if (rc < 0)
return 1;
@@ -483,13 +445,7 @@ static long kvmppc_read_one_intr(bool *again)
*/
if (xisr == XICS_IPI) {
rc = 0;
- if (kvmhv_on_pseries()) {
- unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
-
- plpar_hcall_raw(H_IPI, retbuf,
- hard_smp_processor_id(), 0xff);
- plpar_hcall_raw(H_EOI, retbuf, h_xirr);
- } else if (xics_phys) {
+ if (xics_phys) {
__raw_rm_writeb(0xff, xics_phys + XICS_MFRR);
__raw_rm_writel(xirr, xics_phys + XICS_XIRR);
} else {
@@ -515,13 +471,7 @@ static long kvmppc_read_one_intr(bool *again)
/* We raced with the host,
* we need to resend that IPI, bummer
*/
- if (kvmhv_on_pseries()) {
- unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
-
- plpar_hcall_raw(H_IPI, retbuf,
- hard_smp_processor_id(),
- IPI_PRIORITY);
- } else if (xics_phys)
+ if (xics_phys)
__raw_rm_writeb(IPI_PRIORITY,
xics_phys + XICS_MFRR);
else
@@ -541,22 +491,13 @@ static long kvmppc_read_one_intr(bool *again)
}
#ifdef CONFIG_KVM_XICS
-static inline bool is_rm(void)
-{
- return !(mfmsr() & MSR_DR);
-}
-
unsigned long kvmppc_rm_h_xirr(struct kvm_vcpu *vcpu)
{
if (!kvmppc_xics_enabled(vcpu))
return H_TOO_HARD;
- if (xics_on_xive()) {
- if (is_rm())
- return xive_rm_h_xirr(vcpu);
- if (unlikely(!__xive_vm_h_xirr))
- return H_NOT_AVAILABLE;
- return __xive_vm_h_xirr(vcpu);
- } else
+ if (xics_on_xive())
+ return xive_rm_h_xirr(vcpu);
+ else
return xics_rm_h_xirr(vcpu);
}
@@ -565,13 +506,9 @@ unsigned long kvmppc_rm_h_xirr_x(struct kvm_vcpu *vcpu)
if (!kvmppc_xics_enabled(vcpu))
return H_TOO_HARD;
vcpu->arch.regs.gpr[5] = get_tb();
- if (xics_on_xive()) {
- if (is_rm())
- return xive_rm_h_xirr(vcpu);
- if (unlikely(!__xive_vm_h_xirr))
- return H_NOT_AVAILABLE;
- return __xive_vm_h_xirr(vcpu);
- } else
+ if (xics_on_xive())
+ return xive_rm_h_xirr(vcpu);
+ else
return xics_rm_h_xirr(vcpu);
}
@@ -579,13 +516,9 @@ unsigned long kvmppc_rm_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server)
{
if (!kvmppc_xics_enabled(vcpu))
return H_TOO_HARD;
- if (xics_on_xive()) {
- if (is_rm())
- return xive_rm_h_ipoll(vcpu, server);
- if (unlikely(!__xive_vm_h_ipoll))
- return H_NOT_AVAILABLE;
- return __xive_vm_h_ipoll(vcpu, server);
- } else
+ if (xics_on_xive())
+ return xive_rm_h_ipoll(vcpu, server);
+ else
return H_TOO_HARD;
}
@@ -594,13 +527,9 @@ int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
{
if (!kvmppc_xics_enabled(vcpu))
return H_TOO_HARD;
- if (xics_on_xive()) {
- if (is_rm())
- return xive_rm_h_ipi(vcpu, server, mfrr);
- if (unlikely(!__xive_vm_h_ipi))
- return H_NOT_AVAILABLE;
- return __xive_vm_h_ipi(vcpu, server, mfrr);
- } else
+ if (xics_on_xive())
+ return xive_rm_h_ipi(vcpu, server, mfrr);
+ else
return xics_rm_h_ipi(vcpu, server, mfrr);
}
@@ -608,13 +537,9 @@ int kvmppc_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr)
{
if (!kvmppc_xics_enabled(vcpu))
return H_TOO_HARD;
- if (xics_on_xive()) {
- if (is_rm())
- return xive_rm_h_cppr(vcpu, cppr);
- if (unlikely(!__xive_vm_h_cppr))
- return H_NOT_AVAILABLE;
- return __xive_vm_h_cppr(vcpu, cppr);
- } else
+ if (xics_on_xive())
+ return xive_rm_h_cppr(vcpu, cppr);
+ else
return xics_rm_h_cppr(vcpu, cppr);
}
@@ -622,13 +547,9 @@ int kvmppc_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr)
{
if (!kvmppc_xics_enabled(vcpu))
return H_TOO_HARD;
- if (xics_on_xive()) {
- if (is_rm())
- return xive_rm_h_eoi(vcpu, xirr);
- if (unlikely(!__xive_vm_h_eoi))
- return H_NOT_AVAILABLE;
- return __xive_vm_h_eoi(vcpu, xirr);
- } else
+ if (xics_on_xive())
+ return xive_rm_h_eoi(vcpu, xirr);
+ else
return xics_rm_h_eoi(vcpu, xirr);
}
#endif /* CONFIG_KVM_XICS */
@@ -800,7 +721,7 @@ void kvmppc_check_need_tlb_flush(struct kvm *kvm, int pcpu,
* Thus we make all 4 threads use the same bit.
*/
if (cpu_has_feature(CPU_FTR_ARCH_300))
- pcpu = cpu_first_thread_sibling(pcpu);
+ pcpu = cpu_first_tlb_thread_sibling(pcpu);
if (nested)
need_tlb_flush = &nested->need_tlb_flush;
diff --git a/arch/powerpc/kvm/book3s_hv_interrupts.S b/arch/powerpc/kvm/book3s_hv_interrupts.S
index 327417d79eac..4444f83cb133 100644
--- a/arch/powerpc/kvm/book3s_hv_interrupts.S
+++ b/arch/powerpc/kvm/book3s_hv_interrupts.S
@@ -58,7 +58,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
/*
* Put whatever is in the decrementer into the
* hypervisor decrementer.
- * Because of a hardware deviation in P8 and P9,
+ * Because of a hardware deviation in P8,
* we need to set LPCR[HDICE] before writing HDEC.
*/
ld r5, HSTATE_KVM_VCORE(r13)
@@ -67,15 +67,10 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
ori r8, r9, LPCR_HDICE
mtspr SPRN_LPCR, r8
isync
- andis. r0, r9, LPCR_LD@h
mfspr r8,SPRN_DEC
mftb r7
-BEGIN_FTR_SECTION
- /* On POWER9, don't sign-extend if host LPCR[LD] bit is set */
- bne 32f
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
extsw r8,r8
-32: mtspr SPRN_HDEC,r8
+ mtspr SPRN_HDEC,r8
add r8,r8,r7
std r8,HSTATE_DECEXP(r13)
diff --git a/arch/powerpc/kvm/book3s_hv_nested.c b/arch/powerpc/kvm/book3s_hv_nested.c
index 60724f674421..8543ad538b0c 100644
--- a/arch/powerpc/kvm/book3s_hv_nested.c
+++ b/arch/powerpc/kvm/book3s_hv_nested.c
@@ -19,6 +19,7 @@
#include <asm/pgalloc.h>
#include <asm/pte-walk.h>
#include <asm/reg.h>
+#include <asm/plpar_wrappers.h>
static struct patb_entry *pseries_partition_tb;
@@ -53,7 +54,8 @@ void kvmhv_save_hv_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *hr)
hr->dawrx1 = vcpu->arch.dawrx1;
}
-static void byteswap_pt_regs(struct pt_regs *regs)
+/* Use noinline_for_stack due to https://bugs.llvm.org/show_bug.cgi?id=49610 */
+static noinline_for_stack void byteswap_pt_regs(struct pt_regs *regs)
{
unsigned long *addr = (unsigned long *) regs;
@@ -467,8 +469,15 @@ static void kvmhv_flush_lpid(unsigned int lpid)
return;
}
- rc = plpar_hcall_norets(H_TLB_INVALIDATE, H_TLBIE_P1_ENC(2, 0, 1),
- lpid, TLBIEL_INVAL_SET_LPID);
+ if (!firmware_has_feature(FW_FEATURE_RPT_INVALIDATE))
+ rc = plpar_hcall_norets(H_TLB_INVALIDATE, H_TLBIE_P1_ENC(2, 0, 1),
+ lpid, TLBIEL_INVAL_SET_LPID);
+ else
+ rc = pseries_rpt_invalidate(lpid, H_RPTI_TARGET_CMMU,
+ H_RPTI_TYPE_NESTED |
+ H_RPTI_TYPE_TLB | H_RPTI_TYPE_PWC |
+ H_RPTI_TYPE_PAT,
+ H_RPTI_PAGE_ALL, 0, -1UL);
if (rc)
pr_err("KVM: TLB LPID invalidation hcall failed, rc=%ld\n", rc);
}
@@ -1214,6 +1223,113 @@ long kvmhv_do_nested_tlbie(struct kvm_vcpu *vcpu)
return H_SUCCESS;
}
+static long do_tlb_invalidate_nested_all(struct kvm_vcpu *vcpu,
+ unsigned long lpid, unsigned long ric)
+{
+ struct kvm *kvm = vcpu->kvm;
+ struct kvm_nested_guest *gp;
+
+ gp = kvmhv_get_nested(kvm, lpid, false);
+ if (gp) {
+ kvmhv_emulate_tlbie_lpid(vcpu, gp, ric);
+ kvmhv_put_nested(gp);
+ }
+ return H_SUCCESS;
+}
+
+/*
+ * Number of pages above which we invalidate the entire LPID rather than
+ * flush individual pages.
+ */
+static unsigned long tlb_range_flush_page_ceiling __read_mostly = 33;
+
+static long do_tlb_invalidate_nested_tlb(struct kvm_vcpu *vcpu,
+ unsigned long lpid,
+ unsigned long pg_sizes,
+ unsigned long start,
+ unsigned long end)
+{
+ int ret = H_P4;
+ unsigned long addr, nr_pages;
+ struct mmu_psize_def *def;
+ unsigned long psize, ap, page_size;
+ bool flush_lpid;
+
+ for (psize = 0; psize < MMU_PAGE_COUNT; psize++) {
+ def = &mmu_psize_defs[psize];
+ if (!(pg_sizes & def->h_rpt_pgsize))
+ continue;
+
+ nr_pages = (end - start) >> def->shift;
+ flush_lpid = nr_pages > tlb_range_flush_page_ceiling;
+ if (flush_lpid)
+ return do_tlb_invalidate_nested_all(vcpu, lpid,
+ RIC_FLUSH_TLB);
+ addr = start;
+ ap = mmu_get_ap(psize);
+ page_size = 1UL << def->shift;
+ do {
+ ret = kvmhv_emulate_tlbie_tlb_addr(vcpu, lpid, ap,
+ get_epn(addr));
+ if (ret)
+ return H_P4;
+ addr += page_size;
+ } while (addr < end);
+ }
+ return ret;
+}
+
+/*
+ * Performs partition-scoped invalidations for nested guests
+ * as part of H_RPT_INVALIDATE hcall.
+ */
+long do_h_rpt_invalidate_pat(struct kvm_vcpu *vcpu, unsigned long lpid,
+ unsigned long type, unsigned long pg_sizes,
+ unsigned long start, unsigned long end)
+{
+ /*
+ * If L2 lpid isn't valid, we need to return H_PARAMETER.
+ *
+ * However, nested KVM issues a L2 lpid flush call when creating
+ * partition table entries for L2. This happens even before the
+ * corresponding shadow lpid is created in HV which happens in
+ * H_ENTER_NESTED call. Since we can't differentiate this case from
+ * the invalid case, we ignore such flush requests and return success.
+ */
+ if (!kvmhv_find_nested(vcpu->kvm, lpid))
+ return H_SUCCESS;
+
+ /*
+ * A flush all request can be handled by a full lpid flush only.
+ */
+ if ((type & H_RPTI_TYPE_NESTED_ALL) == H_RPTI_TYPE_NESTED_ALL)
+ return do_tlb_invalidate_nested_all(vcpu, lpid, RIC_FLUSH_ALL);
+
+ /*
+ * We don't need to handle a PWC flush like process table here,
+ * because intermediate partition scoped table in nested guest doesn't
+ * really have PWC. Only level we have PWC is in L0 and for nested
+ * invalidate at L0 we always do kvm_flush_lpid() which does
+ * radix__flush_all_lpid(). For range invalidate at any level, we
+ * are not removing the higher level page tables and hence there is
+ * no PWC invalidate needed.
+ *
+ * if (type & H_RPTI_TYPE_PWC) {
+ * ret = do_tlb_invalidate_nested_all(vcpu, lpid, RIC_FLUSH_PWC);
+ * if (ret)
+ * return H_P4;
+ * }
+ */
+
+ if (start == 0 && end == -1)
+ return do_tlb_invalidate_nested_all(vcpu, lpid, RIC_FLUSH_TLB);
+
+ if (type & H_RPTI_TYPE_TLB)
+ return do_tlb_invalidate_nested_tlb(vcpu, lpid, pg_sizes,
+ start, end);
+ return H_SUCCESS;
+}
+
/* Used to convert a nested guest real address to a L1 guest real address */
static int kvmhv_translate_addr_nested(struct kvm_vcpu *vcpu,
struct kvm_nested_guest *gp,
diff --git a/arch/powerpc/kvm/book3s_hv_p9_entry.c b/arch/powerpc/kvm/book3s_hv_p9_entry.c
new file mode 100644
index 000000000000..83f592eadcd2
--- /dev/null
+++ b/arch/powerpc/kvm/book3s_hv_p9_entry.c
@@ -0,0 +1,508 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/kernel.h>
+#include <linux/kvm_host.h>
+#include <asm/asm-prototypes.h>
+#include <asm/dbell.h>
+#include <asm/kvm_ppc.h>
+#include <asm/ppc-opcode.h>
+
+#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
+static void __start_timing(struct kvm_vcpu *vcpu, struct kvmhv_tb_accumulator *next)
+{
+ struct kvmppc_vcore *vc = vcpu->arch.vcore;
+ u64 tb = mftb() - vc->tb_offset_applied;
+
+ vcpu->arch.cur_activity = next;
+ vcpu->arch.cur_tb_start = tb;
+}
+
+static void __accumulate_time(struct kvm_vcpu *vcpu, struct kvmhv_tb_accumulator *next)
+{
+ struct kvmppc_vcore *vc = vcpu->arch.vcore;
+ struct kvmhv_tb_accumulator *curr;
+ u64 tb = mftb() - vc->tb_offset_applied;
+ u64 prev_tb;
+ u64 delta;
+ u64 seq;
+
+ curr = vcpu->arch.cur_activity;
+ vcpu->arch.cur_activity = next;
+ prev_tb = vcpu->arch.cur_tb_start;
+ vcpu->arch.cur_tb_start = tb;
+
+ if (!curr)
+ return;
+
+ delta = tb - prev_tb;
+
+ seq = curr->seqcount;
+ curr->seqcount = seq + 1;
+ smp_wmb();
+ curr->tb_total += delta;
+ if (seq == 0 || delta < curr->tb_min)
+ curr->tb_min = delta;
+ if (delta > curr->tb_max)
+ curr->tb_max = delta;
+ smp_wmb();
+ curr->seqcount = seq + 2;
+}
+
+#define start_timing(vcpu, next) __start_timing(vcpu, next)
+#define end_timing(vcpu) __start_timing(vcpu, NULL)
+#define accumulate_time(vcpu, next) __accumulate_time(vcpu, next)
+#else
+#define start_timing(vcpu, next) do {} while (0)
+#define end_timing(vcpu) do {} while (0)
+#define accumulate_time(vcpu, next) do {} while (0)
+#endif
+
+static inline void mfslb(unsigned int idx, u64 *slbee, u64 *slbev)
+{
+ asm volatile("slbmfev %0,%1" : "=r" (*slbev) : "r" (idx));
+ asm volatile("slbmfee %0,%1" : "=r" (*slbee) : "r" (idx));
+}
+
+static inline void mtslb(u64 slbee, u64 slbev)
+{
+ asm volatile("slbmte %0,%1" :: "r" (slbev), "r" (slbee));
+}
+
+static inline void clear_slb_entry(unsigned int idx)
+{
+ mtslb(idx, 0);
+}
+
+static inline void slb_clear_invalidate_partition(void)
+{
+ clear_slb_entry(0);
+ asm volatile(PPC_SLBIA(6));
+}
+
+/*
+ * Malicious or buggy radix guests may have inserted SLB entries
+ * (only 0..3 because radix always runs with UPRT=1), so these must
+ * be cleared here to avoid side-channels. slbmte is used rather
+ * than slbia, as it won't clear cached translations.
+ */
+static void radix_clear_slb(void)
+{
+ int i;
+
+ for (i = 0; i < 4; i++)
+ clear_slb_entry(i);
+}
+
+static void switch_mmu_to_guest_radix(struct kvm *kvm, struct kvm_vcpu *vcpu, u64 lpcr)
+{
+ struct kvm_nested_guest *nested = vcpu->arch.nested;
+ u32 lpid;
+
+ lpid = nested ? nested->shadow_lpid : kvm->arch.lpid;
+
+ /*
+ * All the isync()s are overkill but trivially follow the ISA
+ * requirements. Some can likely be replaced with justification
+ * comment for why they are not needed.
+ */
+ isync();
+ mtspr(SPRN_LPID, lpid);
+ isync();
+ mtspr(SPRN_LPCR, lpcr);
+ isync();
+ mtspr(SPRN_PID, vcpu->arch.pid);
+ isync();
+}
+
+static void switch_mmu_to_guest_hpt(struct kvm *kvm, struct kvm_vcpu *vcpu, u64 lpcr)
+{
+ u32 lpid;
+ int i;
+
+ lpid = kvm->arch.lpid;
+
+ mtspr(SPRN_LPID, lpid);
+ mtspr(SPRN_LPCR, lpcr);
+ mtspr(SPRN_PID, vcpu->arch.pid);
+
+ for (i = 0; i < vcpu->arch.slb_max; i++)
+ mtslb(vcpu->arch.slb[i].orige, vcpu->arch.slb[i].origv);
+
+ isync();
+}
+
+static void switch_mmu_to_host(struct kvm *kvm, u32 pid)
+{
+ isync();
+ mtspr(SPRN_PID, pid);
+ isync();
+ mtspr(SPRN_LPID, kvm->arch.host_lpid);
+ isync();
+ mtspr(SPRN_LPCR, kvm->arch.host_lpcr);
+ isync();
+
+ if (!radix_enabled())
+ slb_restore_bolted_realmode();
+}
+
+static void save_clear_host_mmu(struct kvm *kvm)
+{
+ if (!radix_enabled()) {
+ /*
+ * Hash host could save and restore host SLB entries to
+ * reduce SLB fault overheads of VM exits, but for now the
+ * existing code clears all entries and restores just the
+ * bolted ones when switching back to host.
+ */
+ slb_clear_invalidate_partition();
+ }
+}
+
+static void save_clear_guest_mmu(struct kvm *kvm, struct kvm_vcpu *vcpu)
+{
+ if (kvm_is_radix(kvm)) {
+ radix_clear_slb();
+ } else {
+ int i;
+ int nr = 0;
+
+ /*
+ * This must run before switching to host (radix host can't
+ * access all SLBs).
+ */
+ for (i = 0; i < vcpu->arch.slb_nr; i++) {
+ u64 slbee, slbev;
+ mfslb(i, &slbee, &slbev);
+ if (slbee & SLB_ESID_V) {
+ vcpu->arch.slb[nr].orige = slbee | i;
+ vcpu->arch.slb[nr].origv = slbev;
+ nr++;
+ }
+ }
+ vcpu->arch.slb_max = nr;
+ slb_clear_invalidate_partition();
+ }
+}
+
+int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpcr)
+{
+ struct kvm *kvm = vcpu->kvm;
+ struct kvm_nested_guest *nested = vcpu->arch.nested;
+ struct kvmppc_vcore *vc = vcpu->arch.vcore;
+ s64 hdec;
+ u64 tb, purr, spurr;
+ u64 *exsave;
+ bool ri_set;
+ int trap;
+ unsigned long msr;
+ unsigned long host_hfscr;
+ unsigned long host_ciabr;
+ unsigned long host_dawr0;
+ unsigned long host_dawrx0;
+ unsigned long host_psscr;
+ unsigned long host_pidr;
+ unsigned long host_dawr1;
+ unsigned long host_dawrx1;
+
+ hdec = time_limit - mftb();
+ if (hdec < 0)
+ return BOOK3S_INTERRUPT_HV_DECREMENTER;
+
+ WARN_ON_ONCE(vcpu->arch.shregs.msr & MSR_HV);
+ WARN_ON_ONCE(!(vcpu->arch.shregs.msr & MSR_ME));
+
+ start_timing(vcpu, &vcpu->arch.rm_entry);
+
+ vcpu->arch.ceded = 0;
+
+ if (vc->tb_offset) {
+ u64 new_tb = mftb() + vc->tb_offset;
+ mtspr(SPRN_TBU40, new_tb);
+ tb = mftb();
+ if ((tb & 0xffffff) < (new_tb & 0xffffff))
+ mtspr(SPRN_TBU40, new_tb + 0x1000000);
+ vc->tb_offset_applied = vc->tb_offset;
+ }
+
+ msr = mfmsr();
+
+ host_hfscr = mfspr(SPRN_HFSCR);
+ host_ciabr = mfspr(SPRN_CIABR);
+ host_dawr0 = mfspr(SPRN_DAWR0);
+ host_dawrx0 = mfspr(SPRN_DAWRX0);
+ host_psscr = mfspr(SPRN_PSSCR);
+ host_pidr = mfspr(SPRN_PID);
+ if (cpu_has_feature(CPU_FTR_DAWR1)) {
+ host_dawr1 = mfspr(SPRN_DAWR1);
+ host_dawrx1 = mfspr(SPRN_DAWRX1);
+ }
+
+ if (vc->pcr)
+ mtspr(SPRN_PCR, vc->pcr | PCR_MASK);
+ mtspr(SPRN_DPDES, vc->dpdes);
+ mtspr(SPRN_VTB, vc->vtb);
+
+ local_paca->kvm_hstate.host_purr = mfspr(SPRN_PURR);
+ local_paca->kvm_hstate.host_spurr = mfspr(SPRN_SPURR);
+ mtspr(SPRN_PURR, vcpu->arch.purr);
+ mtspr(SPRN_SPURR, vcpu->arch.spurr);
+
+ if (dawr_enabled()) {
+ mtspr(SPRN_DAWR0, vcpu->arch.dawr0);
+ mtspr(SPRN_DAWRX0, vcpu->arch.dawrx0);
+ if (cpu_has_feature(CPU_FTR_DAWR1)) {
+ mtspr(SPRN_DAWR1, vcpu->arch.dawr1);
+ mtspr(SPRN_DAWRX1, vcpu->arch.dawrx1);
+ }
+ }
+ mtspr(SPRN_CIABR, vcpu->arch.ciabr);
+ mtspr(SPRN_IC, vcpu->arch.ic);
+
+ mtspr(SPRN_PSSCR, vcpu->arch.psscr | PSSCR_EC |
+ (local_paca->kvm_hstate.fake_suspend << PSSCR_FAKE_SUSPEND_LG));
+
+ mtspr(SPRN_HFSCR, vcpu->arch.hfscr);
+
+ mtspr(SPRN_HSRR0, vcpu->arch.regs.nip);
+ mtspr(SPRN_HSRR1, (vcpu->arch.shregs.msr & ~MSR_HV) | MSR_ME);
+
+ /*
+ * On POWER9 DD2.1 and below, sometimes on a Hypervisor Data Storage
+ * Interrupt (HDSI) the HDSISR is not be updated at all.
+ *
+ * To work around this we put a canary value into the HDSISR before
+ * returning to a guest and then check for this canary when we take a
+ * HDSI. If we find the canary on a HDSI, we know the hardware didn't
+ * update the HDSISR. In this case we return to the guest to retake the
+ * HDSI which should correctly update the HDSISR the second time HDSI
+ * entry.
+ *
+ * Just do this on all p9 processors for now.
+ */
+ mtspr(SPRN_HDSISR, HDSISR_CANARY);
+
+ mtspr(SPRN_SPRG0, vcpu->arch.shregs.sprg0);
+ mtspr(SPRN_SPRG1, vcpu->arch.shregs.sprg1);
+ mtspr(SPRN_SPRG2, vcpu->arch.shregs.sprg2);
+ mtspr(SPRN_SPRG3, vcpu->arch.shregs.sprg3);
+
+ mtspr(SPRN_AMOR, ~0UL);
+
+ local_paca->kvm_hstate.in_guest = KVM_GUEST_MODE_HV_P9;
+
+ /*
+ * Hash host, hash guest, or radix guest with prefetch bug, all have
+ * to disable the MMU before switching to guest MMU state.
+ */
+ if (!radix_enabled() || !kvm_is_radix(kvm) ||
+ cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG))
+ __mtmsrd(msr & ~(MSR_IR|MSR_DR|MSR_RI), 0);
+
+ save_clear_host_mmu(kvm);
+
+ if (kvm_is_radix(kvm)) {
+ switch_mmu_to_guest_radix(kvm, vcpu, lpcr);
+ if (!cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG))
+ __mtmsrd(0, 1); /* clear RI */
+
+ } else {
+ switch_mmu_to_guest_hpt(kvm, vcpu, lpcr);
+ }
+
+ /* TLBIEL uses LPID=LPIDR, so run this after setting guest LPID */
+ kvmppc_check_need_tlb_flush(kvm, vc->pcpu, nested);
+
+ /*
+ * P9 suppresses the HDEC exception when LPCR[HDICE] = 0,
+ * so set guest LPCR (with HDICE) before writing HDEC.
+ */
+ mtspr(SPRN_HDEC, hdec);
+
+ mtspr(SPRN_DAR, vcpu->arch.shregs.dar);
+ mtspr(SPRN_DSISR, vcpu->arch.shregs.dsisr);
+ mtspr(SPRN_SRR0, vcpu->arch.shregs.srr0);
+ mtspr(SPRN_SRR1, vcpu->arch.shregs.srr1);
+
+ accumulate_time(vcpu, &vcpu->arch.guest_time);
+
+ kvmppc_p9_enter_guest(vcpu);
+
+ accumulate_time(vcpu, &vcpu->arch.rm_intr);
+
+ /* XXX: Could get these from r11/12 and paca exsave instead */
+ vcpu->arch.shregs.srr0 = mfspr(SPRN_SRR0);
+ vcpu->arch.shregs.srr1 = mfspr(SPRN_SRR1);
+ vcpu->arch.shregs.dar = mfspr(SPRN_DAR);
+ vcpu->arch.shregs.dsisr = mfspr(SPRN_DSISR);
+
+ /* 0x2 bit for HSRR is only used by PR and P7/8 HV paths, clear it */
+ trap = local_paca->kvm_hstate.scratch0 & ~0x2;
+
+ /* HSRR interrupts leave MSR[RI] unchanged, SRR interrupts clear it. */
+ ri_set = false;
+ if (likely(trap > BOOK3S_INTERRUPT_MACHINE_CHECK)) {
+ if (trap != BOOK3S_INTERRUPT_SYSCALL &&
+ (vcpu->arch.shregs.msr & MSR_RI))
+ ri_set = true;
+ exsave = local_paca->exgen;
+ } else if (trap == BOOK3S_INTERRUPT_SYSTEM_RESET) {
+ exsave = local_paca->exnmi;
+ } else { /* trap == 0x200 */
+ exsave = local_paca->exmc;
+ }
+
+ vcpu->arch.regs.gpr[1] = local_paca->kvm_hstate.scratch1;
+ vcpu->arch.regs.gpr[3] = local_paca->kvm_hstate.scratch2;
+
+ /*
+ * Only set RI after reading machine check regs (DAR, DSISR, SRR0/1)
+ * and hstate scratch (which we need to move into exsave to make
+ * re-entrant vs SRESET/MCE)
+ */
+ if (ri_set) {
+ if (unlikely(!(mfmsr() & MSR_RI))) {
+ __mtmsrd(MSR_RI, 1);
+ WARN_ON_ONCE(1);
+ }
+ } else {
+ WARN_ON_ONCE(mfmsr() & MSR_RI);
+ __mtmsrd(MSR_RI, 1);
+ }
+
+ vcpu->arch.regs.gpr[9] = exsave[EX_R9/sizeof(u64)];
+ vcpu->arch.regs.gpr[10] = exsave[EX_R10/sizeof(u64)];
+ vcpu->arch.regs.gpr[11] = exsave[EX_R11/sizeof(u64)];
+ vcpu->arch.regs.gpr[12] = exsave[EX_R12/sizeof(u64)];
+ vcpu->arch.regs.gpr[13] = exsave[EX_R13/sizeof(u64)];
+ vcpu->arch.ppr = exsave[EX_PPR/sizeof(u64)];
+ vcpu->arch.cfar = exsave[EX_CFAR/sizeof(u64)];
+ vcpu->arch.regs.ctr = exsave[EX_CTR/sizeof(u64)];
+
+ vcpu->arch.last_inst = KVM_INST_FETCH_FAILED;
+
+ if (unlikely(trap == BOOK3S_INTERRUPT_MACHINE_CHECK)) {
+ vcpu->arch.fault_dar = exsave[EX_DAR/sizeof(u64)];
+ vcpu->arch.fault_dsisr = exsave[EX_DSISR/sizeof(u64)];
+ kvmppc_realmode_machine_check(vcpu);
+
+ } else if (unlikely(trap == BOOK3S_INTERRUPT_HMI)) {
+ kvmppc_realmode_hmi_handler();
+
+ } else if (trap == BOOK3S_INTERRUPT_H_EMUL_ASSIST) {
+ vcpu->arch.emul_inst = mfspr(SPRN_HEIR);
+
+ } else if (trap == BOOK3S_INTERRUPT_H_DATA_STORAGE) {
+ vcpu->arch.fault_dar = exsave[EX_DAR/sizeof(u64)];
+ vcpu->arch.fault_dsisr = exsave[EX_DSISR/sizeof(u64)];
+ vcpu->arch.fault_gpa = mfspr(SPRN_ASDR);
+
+ } else if (trap == BOOK3S_INTERRUPT_H_INST_STORAGE) {
+ vcpu->arch.fault_gpa = mfspr(SPRN_ASDR);
+
+ } else if (trap == BOOK3S_INTERRUPT_H_FAC_UNAVAIL) {
+ vcpu->arch.hfscr = mfspr(SPRN_HFSCR);
+
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ /*
+ * Softpatch interrupt for transactional memory emulation cases
+ * on POWER9 DD2.2. This is early in the guest exit path - we
+ * haven't saved registers or done a treclaim yet.
+ */
+ } else if (trap == BOOK3S_INTERRUPT_HV_SOFTPATCH) {
+ vcpu->arch.emul_inst = mfspr(SPRN_HEIR);
+
+ /*
+ * The cases we want to handle here are those where the guest
+ * is in real suspend mode and is trying to transition to
+ * transactional mode.
+ */
+ if (local_paca->kvm_hstate.fake_suspend &&
+ (vcpu->arch.shregs.msr & MSR_TS_S)) {
+ if (kvmhv_p9_tm_emulation_early(vcpu)) {
+ /* Prevent it being handled again. */
+ trap = 0;
+ }
+ }
+#endif
+ }
+
+ accumulate_time(vcpu, &vcpu->arch.rm_exit);
+
+ /* Advance host PURR/SPURR by the amount used by guest */
+ purr = mfspr(SPRN_PURR);
+ spurr = mfspr(SPRN_SPURR);
+ mtspr(SPRN_PURR, local_paca->kvm_hstate.host_purr +
+ purr - vcpu->arch.purr);
+ mtspr(SPRN_SPURR, local_paca->kvm_hstate.host_spurr +
+ spurr - vcpu->arch.spurr);
+ vcpu->arch.purr = purr;
+ vcpu->arch.spurr = spurr;
+
+ vcpu->arch.ic = mfspr(SPRN_IC);
+ vcpu->arch.pid = mfspr(SPRN_PID);
+ vcpu->arch.psscr = mfspr(SPRN_PSSCR) & PSSCR_GUEST_VIS;
+
+ vcpu->arch.shregs.sprg0 = mfspr(SPRN_SPRG0);
+ vcpu->arch.shregs.sprg1 = mfspr(SPRN_SPRG1);
+ vcpu->arch.shregs.sprg2 = mfspr(SPRN_SPRG2);
+ vcpu->arch.shregs.sprg3 = mfspr(SPRN_SPRG3);
+
+ /* Preserve PSSCR[FAKE_SUSPEND] until we've called kvmppc_save_tm_hv */
+ mtspr(SPRN_PSSCR, host_psscr |
+ (local_paca->kvm_hstate.fake_suspend << PSSCR_FAKE_SUSPEND_LG));
+ mtspr(SPRN_HFSCR, host_hfscr);
+ mtspr(SPRN_CIABR, host_ciabr);
+ mtspr(SPRN_DAWR0, host_dawr0);
+ mtspr(SPRN_DAWRX0, host_dawrx0);
+ if (cpu_has_feature(CPU_FTR_DAWR1)) {
+ mtspr(SPRN_DAWR1, host_dawr1);
+ mtspr(SPRN_DAWRX1, host_dawrx1);
+ }
+
+ if (kvm_is_radix(kvm)) {
+ /*
+ * Since this is radix, do a eieio; tlbsync; ptesync sequence
+ * in case we interrupted the guest between a tlbie and a
+ * ptesync.
+ */
+ asm volatile("eieio; tlbsync; ptesync");
+ }
+
+ /*
+ * cp_abort is required if the processor supports local copy-paste
+ * to clear the copy buffer that was under control of the guest.
+ */
+ if (cpu_has_feature(CPU_FTR_ARCH_31))
+ asm volatile(PPC_CP_ABORT);
+
+ vc->dpdes = mfspr(SPRN_DPDES);
+ vc->vtb = mfspr(SPRN_VTB);
+ mtspr(SPRN_DPDES, 0);
+ if (vc->pcr)
+ mtspr(SPRN_PCR, PCR_MASK);
+
+ if (vc->tb_offset_applied) {
+ u64 new_tb = mftb() - vc->tb_offset_applied;
+ mtspr(SPRN_TBU40, new_tb);
+ tb = mftb();
+ if ((tb & 0xffffff) < (new_tb & 0xffffff))
+ mtspr(SPRN_TBU40, new_tb + 0x1000000);
+ vc->tb_offset_applied = 0;
+ }
+
+ mtspr(SPRN_HDEC, 0x7fffffff);
+
+ save_clear_guest_mmu(kvm, vcpu);
+ switch_mmu_to_host(kvm, host_pidr);
+ local_paca->kvm_hstate.in_guest = KVM_GUEST_MODE_NONE;
+
+ /*
+ * If we are in real mode, only switch MMU on after the MMU is
+ * switched to host, to avoid the P9_RADIX_PREFETCH_BUG.
+ */
+ __mtmsrd(msr, 0);
+
+ end_timing(vcpu);
+
+ return trap;
+}
+EXPORT_SYMBOL_GPL(kvmhv_vcpu_entry_p9);
diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
index 7af7c70f1468..632b2545072b 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
@@ -23,20 +23,9 @@
#include <asm/pte-walk.h>
/* Translate address of a vmalloc'd thing to a linear map address */
-static void *real_vmalloc_addr(void *x)
+static void *real_vmalloc_addr(void *addr)
{
- unsigned long addr = (unsigned long) x;
- pte_t *p;
- /*
- * assume we don't have huge pages in vmalloc space...
- * So don't worry about THP collapse/split. Called
- * Only in realmode with MSR_EE = 0, hence won't need irq_save/restore.
- */
- p = find_init_mm_pte(addr, NULL);
- if (!p || !pte_present(*p))
- return NULL;
- addr = (pte_pfn(*p) << PAGE_SHIFT) | (addr & ~PAGE_MASK);
- return __va(addr);
+ return __va(ppc_find_vmap_phys((unsigned long)addr));
}
/* Return 1 if we need to do a global tlbie, 0 if we can use tlbiel */
@@ -57,6 +46,10 @@ static int global_invalidates(struct kvm *kvm)
else
global = 1;
+ /* LPID has been switched to host if in virt mode so can't do local */
+ if (!global && (mfmsr() & (MSR_IR|MSR_DR)))
+ global = 1;
+
if (!global) {
/* any other core might now have stale TLB entries... */
smp_wmb();
@@ -67,7 +60,7 @@ static int global_invalidates(struct kvm *kvm)
* so use the bit for the first thread to represent the core.
*/
if (cpu_has_feature(CPU_FTR_ARCH_300))
- cpu = cpu_first_thread_sibling(cpu);
+ cpu = cpu_first_tlb_thread_sibling(cpu);
cpumask_clear_cpu(cpu, &kvm->arch.need_tlb_flush);
}
@@ -409,6 +402,7 @@ long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
vcpu->arch.pgdir, true,
&vcpu->arch.regs.gpr[4]);
}
+EXPORT_SYMBOL_GPL(kvmppc_h_enter);
#ifdef __BIG_ENDIAN__
#define LOCK_TOKEN (*(u32 *)(&get_paca()->lock_token))
@@ -553,6 +547,7 @@ long kvmppc_h_remove(struct kvm_vcpu *vcpu, unsigned long flags,
return kvmppc_do_h_remove(vcpu->kvm, flags, pte_index, avpn,
&vcpu->arch.regs.gpr[4]);
}
+EXPORT_SYMBOL_GPL(kvmppc_h_remove);
long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu)
{
@@ -671,6 +666,7 @@ long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu)
return ret;
}
+EXPORT_SYMBOL_GPL(kvmppc_h_bulk_remove);
long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
unsigned long pte_index, unsigned long avpn)
@@ -741,6 +737,7 @@ long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
return H_SUCCESS;
}
+EXPORT_SYMBOL_GPL(kvmppc_h_protect);
long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags,
unsigned long pte_index)
@@ -781,6 +778,7 @@ long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags,
}
return H_SUCCESS;
}
+EXPORT_SYMBOL_GPL(kvmppc_h_read);
long kvmppc_h_clear_ref(struct kvm_vcpu *vcpu, unsigned long flags,
unsigned long pte_index)
@@ -829,6 +827,7 @@ long kvmppc_h_clear_ref(struct kvm_vcpu *vcpu, unsigned long flags,
unlock_hpte(hpte, v & ~HPTE_V_HVLOCK);
return ret;
}
+EXPORT_SYMBOL_GPL(kvmppc_h_clear_ref);
long kvmppc_h_clear_mod(struct kvm_vcpu *vcpu, unsigned long flags,
unsigned long pte_index)
@@ -876,6 +875,7 @@ long kvmppc_h_clear_mod(struct kvm_vcpu *vcpu, unsigned long flags,
unlock_hpte(hpte, v & ~HPTE_V_HVLOCK);
return ret;
}
+EXPORT_SYMBOL_GPL(kvmppc_h_clear_mod);
static int kvmppc_get_hpa(struct kvm_vcpu *vcpu, unsigned long mmu_seq,
unsigned long gpa, int writing, unsigned long *hpa,
@@ -1294,3 +1294,4 @@ long kvmppc_hpte_hv_fault(struct kvm_vcpu *vcpu, unsigned long addr,
return -1; /* send fault up to host kernel mode */
}
+EXPORT_SYMBOL_GPL(kvmppc_hpte_hv_fault);
diff --git a/arch/powerpc/kvm/book3s_hv_rm_xics.c b/arch/powerpc/kvm/book3s_hv_rm_xics.c
index c2c9c733f359..0a11ec88a0ae 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_xics.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_xics.c
@@ -141,13 +141,6 @@ static void icp_rm_set_vcpu_irq(struct kvm_vcpu *vcpu,
return;
}
- if (xive_enabled() && kvmhv_on_pseries()) {
- /* No XICS access or hypercalls available, too hard */
- this_icp->rm_action |= XICS_RM_KICK_VCPU;
- this_icp->rm_kick_target = vcpu;
- return;
- }
-
/*
* Check if the core is loaded,
* if not, find an available host core to post to wake the VCPU,
@@ -771,14 +764,6 @@ static void icp_eoi(struct irq_chip *c, u32 hwirq, __be32 xirr, bool *again)
void __iomem *xics_phys;
int64_t rc;
- if (kvmhv_on_pseries()) {
- unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
-
- iosync();
- plpar_hcall_raw(H_EOI, retbuf, hwirq);
- return;
- }
-
rc = pnv_opal_pci_msi_eoi(c, hwirq);
if (rc)
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index 5e634db4809b..8dd437d7a2c6 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -25,18 +25,10 @@
#include <asm/export.h>
#include <asm/tm.h>
#include <asm/opal.h>
-#include <asm/xive-regs.h>
#include <asm/thread_info.h>
#include <asm/asm-compat.h>
#include <asm/feature-fixups.h>
#include <asm/cpuidle.h>
-#include <asm/ultravisor-api.h>
-
-/* Sign-extend HDEC if not on POWER9 */
-#define EXTEND_HDEC(reg) \
-BEGIN_FTR_SECTION; \
- extsw reg, reg; \
-END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
/* Values in HSTATE_NAPPING(r13) */
#define NAPPING_CEDE 1
@@ -44,9 +36,8 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
#define NAPPING_UNSPLIT 3
/* Stack frame offsets for kvmppc_hv_entry */
-#define SFS 208
+#define SFS 160
#define STACK_SLOT_TRAP (SFS-4)
-#define STACK_SLOT_SHORT_PATH (SFS-8)
#define STACK_SLOT_TID (SFS-16)
#define STACK_SLOT_PSSCR (SFS-24)
#define STACK_SLOT_PID (SFS-32)
@@ -57,10 +48,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
#define STACK_SLOT_HFSCR (SFS-72)
#define STACK_SLOT_AMR (SFS-80)
#define STACK_SLOT_UAMOR (SFS-88)
-#define STACK_SLOT_DAWR1 (SFS-96)
-#define STACK_SLOT_DAWRX1 (SFS-104)
-/* the following is used by the P9 short path */
-#define STACK_SLOT_NVGPRS (SFS-152) /* 18 gprs */
+#define STACK_SLOT_FSCR (SFS-96)
/*
* Call kvmppc_hv_entry in real mode.
@@ -136,15 +124,6 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
/* Return the trap number on this thread as the return value */
mr r3, r12
- /*
- * If we came back from the guest via a relocation-on interrupt,
- * we will be in virtual mode at this point, which makes it a
- * little easier to get back to the caller.
- */
- mfmsr r0
- andi. r0, r0, MSR_IR /* in real mode? */
- bne .Lvirt_return
-
/* RFI into the highmem handler */
mfmsr r6
li r0, MSR_RI
@@ -154,11 +133,6 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
mtsrr1 r7
RFI_TO_KERNEL
- /* Virtual-mode return */
-.Lvirt_return:
- mtlr r8
- blr
-
kvmppc_primary_no_guest:
/* We handle this much like a ceded vcpu */
/* put the HDEC into the DEC, since HDEC interrupts don't wake us */
@@ -245,7 +219,7 @@ kvm_novcpu_wakeup:
/* See if our timeslice has expired (HDEC is negative) */
mfspr r0, SPRN_HDEC
- EXTEND_HDEC(r0)
+ extsw r0, r0
li r12, BOOK3S_INTERRUPT_HV_DECREMENTER
cmpdi r0, 0
blt kvm_novcpu_exit
@@ -347,10 +321,8 @@ kvm_secondary_got_guest:
lbz r4, HSTATE_PTID(r13)
cmpwi r4, 0
bne 63f
- LOAD_REG_ADDR(r6, decrementer_max)
- ld r6, 0(r6)
+ lis r6,0x7fff /* MAX_INT@h */
mtspr SPRN_HDEC, r6
-BEGIN_FTR_SECTION
/* and set per-LPAR registers, if doing dynamic micro-threading */
ld r6, HSTATE_SPLIT_MODE(r13)
cmpdi r6, 0
@@ -362,7 +334,6 @@ BEGIN_FTR_SECTION
ld r0, KVM_SPLIT_LDBAR(r6)
mtspr SPRN_LDBAR, r0
isync
-END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
63:
/* Order load of vcpu after load of vcore */
lwsync
@@ -433,7 +404,6 @@ kvm_no_guest:
blr
53:
-BEGIN_FTR_SECTION
HMT_LOW
ld r5, HSTATE_KVM_VCORE(r13)
cmpdi r5, 0
@@ -448,14 +418,6 @@ BEGIN_FTR_SECTION
b kvm_unsplit_nap
60: HMT_MEDIUM
b kvm_secondary_got_guest
-FTR_SECTION_ELSE
- HMT_LOW
- ld r5, HSTATE_KVM_VCORE(r13)
- cmpdi r5, 0
- beq kvm_no_guest
- HMT_MEDIUM
- b kvm_secondary_got_guest
-ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
54: li r0, KVM_HWTHREAD_IN_KVM
stb r0, HSTATE_HWTHREAD_STATE(r13)
@@ -581,13 +543,11 @@ kvmppc_hv_entry:
bne 10f
lwz r7,KVM_LPID(r9)
-BEGIN_FTR_SECTION
ld r6,KVM_SDR1(r9)
li r0,LPID_RSVD /* switch to reserved LPID */
mtspr SPRN_LPID,r0
ptesync
mtspr SPRN_SDR1,r6 /* switch to partition page table */
-END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
mtspr SPRN_LPID,r7
isync
@@ -668,16 +628,6 @@ kvmppc_got_guest:
/* Save host values of some registers */
BEGIN_FTR_SECTION
- mfspr r5, SPRN_TIDR
- mfspr r6, SPRN_PSSCR
- mfspr r7, SPRN_PID
- std r5, STACK_SLOT_TID(r1)
- std r6, STACK_SLOT_PSSCR(r1)
- std r7, STACK_SLOT_PID(r1)
- mfspr r5, SPRN_HFSCR
- std r5, STACK_SLOT_HFSCR(r1)
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
-BEGIN_FTR_SECTION
mfspr r5, SPRN_CIABR
mfspr r6, SPRN_DAWR0
mfspr r7, SPRN_DAWRX0
@@ -686,13 +636,9 @@ BEGIN_FTR_SECTION
std r6, STACK_SLOT_DAWR0(r1)
std r7, STACK_SLOT_DAWRX0(r1)
std r8, STACK_SLOT_IAMR(r1)
+ mfspr r5, SPRN_FSCR
+ std r5, STACK_SLOT_FSCR(r1)
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
-BEGIN_FTR_SECTION
- mfspr r6, SPRN_DAWR1
- mfspr r7, SPRN_DAWRX1
- std r6, STACK_SLOT_DAWR1(r1)
- std r7, STACK_SLOT_DAWRX1(r1)
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S | CPU_FTR_DAWR1)
mfspr r5, SPRN_AMR
std r5, STACK_SLOT_AMR(r1)
@@ -710,13 +656,9 @@ BEGIN_FTR_SECTION
END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
-/*
- * Branch around the call if both CPU_FTR_TM and
- * CPU_FTR_P9_TM_HV_ASSIST are off.
- */
BEGIN_FTR_SECTION
b 91f
-END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0)
+END_FTR_SECTION_IFCLR(CPU_FTR_TM)
/*
* NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS (but not CR)
*/
@@ -783,12 +725,6 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
ld r6, VCPU_DAWRX0(r4)
mtspr SPRN_DAWR0, r5
mtspr SPRN_DAWRX0, r6
-BEGIN_FTR_SECTION
- ld r5, VCPU_DAWR1(r4)
- ld r6, VCPU_DAWRX1(r4)
- mtspr SPRN_DAWR1, r5
- mtspr SPRN_DAWRX1, r6
-END_FTR_SECTION_IFSET(CPU_FTR_DAWR1)
1:
ld r7, VCPU_CIABR(r4)
ld r8, VCPU_TAR(r4)
@@ -806,7 +742,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_DAWR1)
mtspr SPRN_BESCR, r6
mtspr SPRN_PID, r7
mtspr SPRN_WORT, r8
-BEGIN_FTR_SECTION
/* POWER8-only registers */
ld r5, VCPU_TCSCR(r4)
ld r6, VCPU_ACOP(r4)
@@ -817,18 +752,6 @@ BEGIN_FTR_SECTION
mtspr SPRN_CSIGR, r7
mtspr SPRN_TACR, r8
nop
-FTR_SECTION_ELSE
- /* POWER9-only registers */
- ld r5, VCPU_TID(r4)
- ld r6, VCPU_PSSCR(r4)
- lbz r8, HSTATE_FAKE_SUSPEND(r13)
- oris r6, r6, PSSCR_EC@h /* This makes stop trap to HV */
- rldimi r6, r8, PSSCR_FAKE_SUSPEND_LG, 63 - PSSCR_FAKE_SUSPEND_LG
- ld r7, VCPU_HFSCR(r4)
- mtspr SPRN_TIDR, r5
- mtspr SPRN_PSSCR, r6
- mtspr SPRN_HFSCR, r7
-ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
8:
ld r5, VCPU_SPRG0(r4)
@@ -898,23 +821,15 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
/* Check if HDEC expires soon */
mfspr r3, SPRN_HDEC
- EXTEND_HDEC(r3)
+ extsw r3, r3
cmpdi r3, 512 /* 1 microsecond */
blt hdec_soon
- ld r6, VCPU_KVM(r4)
- lbz r0, KVM_RADIX(r6)
- cmpwi r0, 0
- bne 9f
-
- /* For hash guest, clear out and reload the SLB */
-BEGIN_MMU_FTR_SECTION
- /* Radix host won't have populated the SLB, so no need to clear */
+ /* Clear out and reload the SLB */
li r6, 0
slbmte r6, r6
PPC_SLBIA(6)
ptesync
-END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_RADIX)
/* Load up guest SLB entries (N.B. slb_max will be 0 for radix) */
lwz r5,VCPU_SLB_MAX(r4)
@@ -929,96 +844,9 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_RADIX)
bdnz 1b
9:
-#ifdef CONFIG_KVM_XICS
- /* We are entering the guest on that thread, push VCPU to XIVE */
- ld r11, VCPU_XIVE_SAVED_STATE(r4)
- li r9, TM_QW1_OS
- lwz r8, VCPU_XIVE_CAM_WORD(r4)
- cmpwi r8, 0
- beq no_xive
- li r7, TM_QW1_OS + TM_WORD2
- mfmsr r0
- andi. r0, r0, MSR_DR /* in real mode? */
- beq 2f
- ld r10, HSTATE_XIVE_TIMA_VIRT(r13)
- cmpldi cr1, r10, 0
- beq cr1, no_xive
- eieio
- stdx r11,r9,r10
- stwx r8,r7,r10
- b 3f
-2: ld r10, HSTATE_XIVE_TIMA_PHYS(r13)
- cmpldi cr1, r10, 0
- beq cr1, no_xive
- eieio
- stdcix r11,r9,r10
- stwcix r8,r7,r10
-3: li r9, 1
- stb r9, VCPU_XIVE_PUSHED(r4)
- eieio
-
- /*
- * We clear the irq_pending flag. There is a small chance of a
- * race vs. the escalation interrupt happening on another
- * processor setting it again, but the only consequence is to
- * cause a spurrious wakeup on the next H_CEDE which is not an
- * issue.
- */
- li r0,0
- stb r0, VCPU_IRQ_PENDING(r4)
-
- /*
- * In single escalation mode, if the escalation interrupt is
- * on, we mask it.
- */
- lbz r0, VCPU_XIVE_ESC_ON(r4)
- cmpwi cr1, r0,0
- beq cr1, 1f
- li r9, XIVE_ESB_SET_PQ_01
- beq 4f /* in real mode? */
- ld r10, VCPU_XIVE_ESC_VADDR(r4)
- ldx r0, r10, r9
- b 5f
-4: ld r10, VCPU_XIVE_ESC_RADDR(r4)
- ldcix r0, r10, r9
-5: sync
-
- /* We have a possible subtle race here: The escalation interrupt might
- * have fired and be on its way to the host queue while we mask it,
- * and if we unmask it early enough (re-cede right away), there is
- * a theorical possibility that it fires again, thus landing in the
- * target queue more than once which is a big no-no.
- *
- * Fortunately, solving this is rather easy. If the above load setting
- * PQ to 01 returns a previous value where P is set, then we know the
- * escalation interrupt is somewhere on its way to the host. In that
- * case we simply don't clear the xive_esc_on flag below. It will be
- * eventually cleared by the handler for the escalation interrupt.
- *
- * Then, when doing a cede, we check that flag again before re-enabling
- * the escalation interrupt, and if set, we abort the cede.
- */
- andi. r0, r0, XIVE_ESB_VAL_P
- bne- 1f
-
- /* Now P is 0, we can clear the flag */
- li r0, 0
- stb r0, VCPU_XIVE_ESC_ON(r4)
-1:
-no_xive:
-#endif /* CONFIG_KVM_XICS */
-
- li r0, 0
- stw r0, STACK_SLOT_SHORT_PATH(r1)
-
deliver_guest_interrupt: /* r4 = vcpu, r13 = paca */
/* Check if we can deliver an external or decrementer interrupt now */
ld r0, VCPU_PENDING_EXC(r4)
-BEGIN_FTR_SECTION
- /* On POWER9, also check for emulated doorbell interrupt */
- lbz r3, VCPU_DBELL_REQ(r4)
- or r0, r0, r3
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
cmpdi r0, 0
beq 71f
mr r3, r4
@@ -1030,7 +858,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
mtspr SPRN_SRR0, r6
mtspr SPRN_SRR1, r7
-fast_guest_entry_c:
ld r10, VCPU_PC(r4)
ld r11, VCPU_MSR(r4)
/* r11 = vcpu->arch.msr & ~MSR_HV */
@@ -1092,18 +919,8 @@ BEGIN_FTR_SECTION
mtspr SPRN_PPR, r0
END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
-/* Move canary into DSISR to check for later */
-BEGIN_FTR_SECTION
- li r0, 0x7fff
- mtspr SPRN_HDSISR, r0
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
-
- ld r6, VCPU_KVM(r4)
- lbz r7, KVM_SECURE_GUEST(r6)
- cmpdi r7, 0
ld r6, VCPU_GPR(R6)(r4)
ld r7, VCPU_GPR(R7)(r4)
- bne ret_to_ultra
ld r0, VCPU_CR(r4)
mtcr r0
@@ -1114,117 +931,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
ld r4, VCPU_GPR(R4)(r4)
HRFI_TO_GUEST
b .
-/*
- * Use UV_RETURN ultracall to return control back to the Ultravisor after
- * processing an hypercall or interrupt that was forwarded (a.k.a. reflected)
- * to the Hypervisor.
- *
- * All registers have already been loaded, except:
- * R0 = hcall result
- * R2 = SRR1, so UV can detect a synthesized interrupt (if any)
- * R3 = UV_RETURN
- */
-ret_to_ultra:
- ld r0, VCPU_CR(r4)
- mtcr r0
-
- ld r0, VCPU_GPR(R3)(r4)
- mfspr r2, SPRN_SRR1
- li r3, 0
- ori r3, r3, UV_RETURN
- ld r4, VCPU_GPR(R4)(r4)
- sc 2
-
-/*
- * Enter the guest on a P9 or later system where we have exactly
- * one vcpu per vcore and we don't need to go to real mode
- * (which implies that host and guest are both using radix MMU mode).
- * r3 = vcpu pointer
- * Most SPRs and all the VSRs have been loaded already.
- */
-_GLOBAL(__kvmhv_vcpu_entry_p9)
-EXPORT_SYMBOL_GPL(__kvmhv_vcpu_entry_p9)
- mflr r0
- std r0, PPC_LR_STKOFF(r1)
- stdu r1, -SFS(r1)
-
- li r0, 1
- stw r0, STACK_SLOT_SHORT_PATH(r1)
-
- std r3, HSTATE_KVM_VCPU(r13)
- mfcr r4
- stw r4, SFS+8(r1)
-
- std r1, HSTATE_HOST_R1(r13)
-
- reg = 14
- .rept 18
- std reg, STACK_SLOT_NVGPRS + ((reg - 14) * 8)(r1)
- reg = reg + 1
- .endr
-
- reg = 14
- .rept 18
- ld reg, __VCPU_GPR(reg)(r3)
- reg = reg + 1
- .endr
-
- mfmsr r10
- std r10, HSTATE_HOST_MSR(r13)
-
- mr r4, r3
- b fast_guest_entry_c
-guest_exit_short_path:
- /*
- * Malicious or buggy radix guests may have inserted SLB entries
- * (only 0..3 because radix always runs with UPRT=1), so these must
- * be cleared here to avoid side-channels. slbmte is used rather
- * than slbia, as it won't clear cached translations.
- */
- li r0,0
- slbmte r0,r0
- li r4,1
- slbmte r0,r4
- li r4,2
- slbmte r0,r4
- li r4,3
- slbmte r0,r4
-
- li r0, KVM_GUEST_MODE_NONE
- stb r0, HSTATE_IN_GUEST(r13)
-
- reg = 14
- .rept 18
- std reg, __VCPU_GPR(reg)(r9)
- reg = reg + 1
- .endr
-
- reg = 14
- .rept 18
- ld reg, STACK_SLOT_NVGPRS + ((reg - 14) * 8)(r1)
- reg = reg + 1
- .endr
-
- lwz r4, SFS+8(r1)
- mtcr r4
-
- mr r3, r12 /* trap number */
-
- addi r1, r1, SFS
- ld r0, PPC_LR_STKOFF(r1)
- mtlr r0
-
- /* If we are in real mode, do a rfid to get back to the caller */
- mfmsr r4
- andi. r5, r4, MSR_IR
- bnelr
- rldicl r5, r4, 64 - MSR_TS_S_LG, 62 /* extract TS field */
- mtspr SPRN_SRR0, r0
- ld r10, HSTATE_HOST_MSR(r13)
- rldimi r10, r5, MSR_TS_S_LG, 63 - MSR_TS_T_LG
- mtspr SPRN_SRR1, r10
- RFI_TO_KERNEL
- b .
secondary_too_late:
li r12, 0
@@ -1265,21 +971,16 @@ hdec_soon:
kvmppc_interrupt_hv:
/*
* Register contents:
+ * R9 = HSTATE_IN_GUEST
* R12 = (guest CR << 32) | interrupt vector
* R13 = PACA
* guest R12 saved in shadow VCPU SCRATCH0
* guest R13 saved in SPRN_SCRATCH0
+ * guest R9 saved in HSTATE_SCRATCH2
*/
- std r9, HSTATE_SCRATCH2(r13)
- lbz r9, HSTATE_IN_GUEST(r13)
- cmpwi r9, KVM_GUEST_MODE_HOST_HV
- beq kvmppc_bad_host_intr
-#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
- cmpwi r9, KVM_GUEST_MODE_GUEST
- ld r9, HSTATE_SCRATCH2(r13)
- beq kvmppc_interrupt_pr
-#endif
/* We're now back in the host but in guest MMU context */
+ cmpwi r9,KVM_GUEST_MODE_HOST_HV
+ beq kvmppc_bad_host_intr
li r9, KVM_GUEST_MODE_HOST_HV
stb r9, HSTATE_IN_GUEST(r13)
@@ -1397,7 +1098,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER
bne 2f
mfspr r3,SPRN_HDEC
- EXTEND_HDEC(r3)
+ extsw r3, r3
cmpdi r3,0
mr r4,r9
bge fast_guest_return
@@ -1409,14 +1110,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
/* Hypervisor doorbell - exit only if host IPI flag set */
cmpwi r12, BOOK3S_INTERRUPT_H_DOORBELL
bne 3f
-BEGIN_FTR_SECTION
- PPC_MSGSYNC
- lwsync
- /* always exit if we're running a nested guest */
- ld r0, VCPU_NESTED(r9)
- cmpdi r0, 0
- bne guest_exit_cont
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
lbz r0, HSTATE_HOST_IPI(r13)
cmpwi r0, 0
beq maybe_reenter_guest
@@ -1446,62 +1139,16 @@ guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */
mr r4, r9
bl kvmhv_accumulate_time
#endif
-#ifdef CONFIG_KVM_XICS
- /* We are exiting, pull the VP from the XIVE */
- lbz r0, VCPU_XIVE_PUSHED(r9)
- cmpwi cr0, r0, 0
- beq 1f
- li r7, TM_SPC_PULL_OS_CTX
- li r6, TM_QW1_OS
- mfmsr r0
- andi. r0, r0, MSR_DR /* in real mode? */
- beq 2f
- ld r10, HSTATE_XIVE_TIMA_VIRT(r13)
- cmpldi cr0, r10, 0
- beq 1f
- /* First load to pull the context, we ignore the value */
- eieio
- lwzx r11, r7, r10
- /* Second load to recover the context state (Words 0 and 1) */
- ldx r11, r6, r10
- b 3f
-2: ld r10, HSTATE_XIVE_TIMA_PHYS(r13)
- cmpldi cr0, r10, 0
- beq 1f
- /* First load to pull the context, we ignore the value */
- eieio
- lwzcix r11, r7, r10
- /* Second load to recover the context state (Words 0 and 1) */
- ldcix r11, r6, r10
-3: std r11, VCPU_XIVE_SAVED_STATE(r9)
- /* Fixup some of the state for the next load */
- li r10, 0
- li r0, 0xff
- stb r10, VCPU_XIVE_PUSHED(r9)
- stb r10, (VCPU_XIVE_SAVED_STATE+3)(r9)
- stb r0, (VCPU_XIVE_SAVED_STATE+4)(r9)
- eieio
-1:
-#endif /* CONFIG_KVM_XICS */
/*
* Possibly flush the link stack here, before we do a blr in
- * guest_exit_short_path.
+ * kvmhv_switch_to_host.
*/
1: nop
patch_site 1b patch__call_kvm_flush_link_stack
- /* If we came in through the P9 short path, go back out to C now */
- lwz r0, STACK_SLOT_SHORT_PATH(r1)
- cmpwi r0, 0
- bne guest_exit_short_path
-
/* For hash guest, read the guest SLB and save it away */
- ld r5, VCPU_KVM(r9)
- lbz r0, KVM_RADIX(r5)
li r5, 0
- cmpwi r0, 0
- bne 0f /* for radix, save 0 entries */
lwz r0,VCPU_SLB_NR(r9) /* number of entries in SLB */
mtctr r0
li r6,0
@@ -1525,9 +1172,6 @@ guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */
stw r5,VCPU_SLB_MAX(r9)
/* load host SLB entries */
-BEGIN_MMU_FTR_SECTION
- b guest_bypass
-END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
ld r8,PACA_SLBSHADOWPTR(r13)
.rept SLB_NUM_BOLTED
@@ -1540,21 +1184,6 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
slbmte r6,r5
1: addi r8,r8,16
.endr
- b guest_bypass
-
-0: /*
- * Sanitise radix guest SLB, see guest_exit_short_path comment.
- * We clear vcpu->arch.slb_max to match earlier behaviour.
- */
- li r0,0
- stw r0,VCPU_SLB_MAX(r9)
- slbmte r0,r0
- li r4,1
- slbmte r0,r4
- li r4,2
- slbmte r0,r4
- li r4,3
- slbmte r0,r4
guest_bypass:
stw r12, STACK_SLOT_TRAP(r1)
@@ -1564,12 +1193,6 @@ guest_bypass:
ld r3, HSTATE_KVM_VCORE(r13)
mfspr r5,SPRN_DEC
mftb r6
- /* On P9, if the guest has large decr enabled, don't sign extend */
-BEGIN_FTR_SECTION
- ld r4, VCORE_LPCR(r3)
- andis. r4, r4, LPCR_LD@h
- bne 16f
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
extsw r5,r5
16: add r5,r5,r6
/* r5 is a guest timebase value here, convert to host TB */
@@ -1643,7 +1266,6 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
std r6, VCPU_BESCR(r9)
stw r7, VCPU_GUEST_PID(r9)
std r8, VCPU_WORT(r9)
-BEGIN_FTR_SECTION
mfspr r5, SPRN_TCSCR
mfspr r6, SPRN_ACOP
mfspr r7, SPRN_CSIGR
@@ -1652,17 +1274,10 @@ BEGIN_FTR_SECTION
std r6, VCPU_ACOP(r9)
std r7, VCPU_CSIGR(r9)
std r8, VCPU_TACR(r9)
-FTR_SECTION_ELSE
- mfspr r5, SPRN_TIDR
- mfspr r6, SPRN_PSSCR
- std r5, VCPU_TID(r9)
- rldicl r6, r6, 4, 50 /* r6 &= PSSCR_GUEST_VIS */
- rotldi r6, r6, 60
- std r6, VCPU_PSSCR(r9)
- /* Restore host HFSCR value */
- ld r7, STACK_SLOT_HFSCR(r1)
- mtspr SPRN_HFSCR, r7
-ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
+BEGIN_FTR_SECTION
+ ld r5, STACK_SLOT_FSCR(r1)
+ mtspr SPRN_FSCR, r5
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
/*
* Restore various registers to 0, where non-zero values
* set by the guest could disrupt the host.
@@ -1670,13 +1285,11 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
li r0, 0
mtspr SPRN_PSPB, r0
mtspr SPRN_WORT, r0
-BEGIN_FTR_SECTION
mtspr SPRN_TCSCR, r0
/* Set MMCRS to 1<<31 to freeze and disable the SPMC counters */
li r0, 1
sldi r0, r0, 31
mtspr SPRN_MMCRS, r0
-END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
/* Save and restore AMR, IAMR and UAMOR before turning on the MMU */
ld r8, STACK_SLOT_IAMR(r1)
@@ -1733,13 +1346,9 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
bl kvmppc_save_fp
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
-/*
- * Branch around the call if both CPU_FTR_TM and
- * CPU_FTR_P9_TM_HV_ASSIST are off.
- */
BEGIN_FTR_SECTION
b 91f
-END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0)
+END_FTR_SECTION_IFCLR(CPU_FTR_TM)
/*
* NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS (but not CR)
*/
@@ -1785,80 +1394,6 @@ BEGIN_FTR_SECTION
mtspr SPRN_DAWR0, r6
mtspr SPRN_DAWRX0, r7
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
-BEGIN_FTR_SECTION
- ld r6, STACK_SLOT_DAWR1(r1)
- ld r7, STACK_SLOT_DAWRX1(r1)
- mtspr SPRN_DAWR1, r6
- mtspr SPRN_DAWRX1, r7
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S | CPU_FTR_DAWR1)
-BEGIN_FTR_SECTION
- ld r5, STACK_SLOT_TID(r1)
- ld r6, STACK_SLOT_PSSCR(r1)
- ld r7, STACK_SLOT_PID(r1)
- mtspr SPRN_TIDR, r5
- mtspr SPRN_PSSCR, r6
- mtspr SPRN_PID, r7
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
-
-#ifdef CONFIG_PPC_RADIX_MMU
- /*
- * Are we running hash or radix ?
- */
- ld r5, VCPU_KVM(r9)
- lbz r0, KVM_RADIX(r5)
- cmpwi cr2, r0, 0
- beq cr2, 2f
-
- /*
- * Radix: do eieio; tlbsync; ptesync sequence in case we
- * interrupted the guest between a tlbie and a ptesync.
- */
- eieio
- tlbsync
- ptesync
-
-BEGIN_FTR_SECTION
- /* Radix: Handle the case where the guest used an illegal PID */
- LOAD_REG_ADDR(r4, mmu_base_pid)
- lwz r3, VCPU_GUEST_PID(r9)
- lwz r5, 0(r4)
- cmpw cr0,r3,r5
- blt 2f
-
- /*
- * Illegal PID, the HW might have prefetched and cached in the TLB
- * some translations for the LPID 0 / guest PID combination which
- * Linux doesn't know about, so we need to flush that PID out of
- * the TLB. First we need to set LPIDR to 0 so tlbiel applies to
- * the right context.
- */
- li r0,0
- mtspr SPRN_LPID,r0
- isync
-
- /* Then do a congruence class local flush */
- ld r6,VCPU_KVM(r9)
- lwz r0,KVM_TLB_SETS(r6)
- mtctr r0
- li r7,0x400 /* IS field = 0b01 */
- ptesync
- sldi r0,r3,32 /* RS has PID */
-1: PPC_TLBIEL(7,0,2,1,1) /* RIC=2, PRS=1, R=1 */
- addi r7,r7,0x1000
- bdnz 1b
- ptesync
-END_FTR_SECTION_IFSET(CPU_FTR_P9_RADIX_PREFETCH_BUG)
-
-2:
-#endif /* CONFIG_PPC_RADIX_MMU */
-
- /*
- * cp_abort is required if the processor supports local copy-paste
- * to clear the copy buffer that was under control of the guest.
- */
-BEGIN_FTR_SECTION
- PPC_CP_ABORT
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_31)
/*
* POWER7/POWER8 guest -> host partition switch code.
@@ -1895,13 +1430,11 @@ kvmhv_switch_to_host:
/* Primary thread switches back to host partition */
lwz r7,KVM_HOST_LPID(r4)
-BEGIN_FTR_SECTION
ld r6,KVM_HOST_SDR1(r4)
li r8,LPID_RSVD /* switch to reserved LPID */
mtspr SPRN_LPID,r8
ptesync
mtspr SPRN_SDR1,r6 /* switch to host page table */
-END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
mtspr SPRN_LPID,r7
isync
@@ -2110,26 +1643,13 @@ kvmppc_tm_emul:
* reflect the HDSI to the guest as a DSI.
*/
kvmppc_hdsi:
- ld r3, VCPU_KVM(r9)
- lbz r0, KVM_RADIX(r3)
mfspr r4, SPRN_HDAR
mfspr r6, SPRN_HDSISR
-BEGIN_FTR_SECTION
- /* Look for DSISR canary. If we find it, retry instruction */
- cmpdi r6, 0x7fff
- beq 6f
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
- cmpwi r0, 0
- bne .Lradix_hdsi /* on radix, just save DAR/DSISR/ASDR */
/* HPTE not found fault or protection fault? */
andis. r0, r6, (DSISR_NOHPTE | DSISR_PROTFAULT)@h
beq 1f /* if not, send it to the guest */
andi. r0, r11, MSR_DR /* data relocation enabled? */
beq 3f
-BEGIN_FTR_SECTION
- mfspr r5, SPRN_ASDR /* on POWER9, use ASDR to get VSID */
- b 4f
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
clrrdi r0, r4, 28
PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */
li r0, BOOK3S_INTERRUPT_DATA_SEGMENT
@@ -2197,31 +1717,15 @@ fast_interrupt_c_return:
stb r0, HSTATE_IN_GUEST(r13)
b guest_exit_cont
-.Lradix_hdsi:
- std r4, VCPU_FAULT_DAR(r9)
- stw r6, VCPU_FAULT_DSISR(r9)
-.Lradix_hisi:
- mfspr r5, SPRN_ASDR
- std r5, VCPU_FAULT_GPA(r9)
- b guest_exit_cont
-
/*
* Similarly for an HISI, reflect it to the guest as an ISI unless
* it is an HPTE not found fault for a page that we have paged out.
*/
kvmppc_hisi:
- ld r3, VCPU_KVM(r9)
- lbz r0, KVM_RADIX(r3)
- cmpwi r0, 0
- bne .Lradix_hisi /* for radix, just save ASDR */
andis. r0, r11, SRR1_ISI_NOPT@h
beq 1f
andi. r0, r11, MSR_IR /* instruction relocation enabled? */
beq 3f
-BEGIN_FTR_SECTION
- mfspr r5, SPRN_ASDR /* on POWER9, use ASDR to get VSID */
- b 4f
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
clrrdi r0, r10, 28
PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */
li r0, BOOK3S_INTERRUPT_INST_SEGMENT
@@ -2269,10 +1773,6 @@ hcall_try_real_mode:
andi. r0,r11,MSR_PR
/* sc 1 from userspace - reflect to guest syscall */
bne sc_1_fast_return
- /* sc 1 from nested guest - give it to L1 to handle */
- ld r0, VCPU_NESTED(r9)
- cmpdi r0, 0
- bne guest_exit_cont
clrrdi r3,r3,2
cmpldi r3,hcall_real_table_end - hcall_real_table
bge guest_exit_cont
@@ -2537,7 +2037,7 @@ hcall_real_table:
#else
.long 0 /* 0x2fc - H_XIRR_X*/
#endif
- .long DOTSYM(kvmppc_h_random) - hcall_real_table
+ .long DOTSYM(kvmppc_rm_h_random) - hcall_real_table
.globl hcall_real_table_end
hcall_real_table_end:
@@ -2668,13 +2168,9 @@ _GLOBAL(kvmppc_h_cede) /* r3 = vcpu pointer, r11 = msr, r13 = paca */
bl kvmppc_save_fp
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
-/*
- * Branch around the call if both CPU_FTR_TM and
- * CPU_FTR_P9_TM_HV_ASSIST are off.
- */
BEGIN_FTR_SECTION
b 91f
-END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0)
+END_FTR_SECTION_IFCLR(CPU_FTR_TM)
/*
* NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS (but not CR)
*/
@@ -2694,15 +2190,8 @@ END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0)
mfspr r3, SPRN_DEC
mfspr r4, SPRN_HDEC
mftb r5
-BEGIN_FTR_SECTION
- /* On P9 check whether the guest has large decrementer mode enabled */
- ld r6, HSTATE_KVM_VCORE(r13)
- ld r6, VCORE_LPCR(r6)
- andis. r6, r6, LPCR_LD@h
- bne 68f
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
extsw r3, r3
-68: EXTEND_HDEC(r4)
+ extsw r4, r4
cmpd r3, r4
ble 67f
mtspr SPRN_DEC, r4
@@ -2747,28 +2236,11 @@ BEGIN_FTR_SECTION
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
kvm_nap_sequence: /* desired LPCR value in r5 */
-BEGIN_FTR_SECTION
- /*
- * PSSCR bits: exit criterion = 1 (wakeup based on LPCR at sreset)
- * enable state loss = 1 (allow SMT mode switch)
- * requested level = 0 (just stop dispatching)
- */
- lis r3, (PSSCR_EC | PSSCR_ESL)@h
- /* Set LPCR_PECE_HVEE bit to enable wakeup by HV interrupts */
- li r4, LPCR_PECE_HVEE@higher
- sldi r4, r4, 32
- or r5, r5, r4
-FTR_SECTION_ELSE
li r3, PNV_THREAD_NAP
-ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_300)
mtspr SPRN_LPCR,r5
isync
-BEGIN_FTR_SECTION
- bl isa300_idle_stop_mayloss
-FTR_SECTION_ELSE
bl isa206_idle_insn_mayloss
-ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_300)
mfspr r0, SPRN_CTRLF
ori r0, r0, 1
@@ -2787,10 +2259,8 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_300)
beq kvm_end_cede
cmpwi r0, NAPPING_NOVCPU
beq kvm_novcpu_wakeup
-BEGIN_FTR_SECTION
cmpwi r0, NAPPING_UNSPLIT
beq kvm_unsplit_wakeup
-END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
twi 31,0,0 /* Nap state must not be zero */
33: mr r4, r3
@@ -2810,13 +2280,9 @@ kvm_end_cede:
#endif
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
-/*
- * Branch around the call if both CPU_FTR_TM and
- * CPU_FTR_P9_TM_HV_ASSIST are off.
- */
BEGIN_FTR_SECTION
b 91f
-END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0)
+END_FTR_SECTION_IFCLR(CPU_FTR_TM)
/*
* NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS (but not CR)
*/
@@ -2906,47 +2372,7 @@ kvm_cede_prodded:
/* we've ceded but we want to give control to the host */
kvm_cede_exit:
ld r9, HSTATE_KVM_VCPU(r13)
-#ifdef CONFIG_KVM_XICS
- /* are we using XIVE with single escalation? */
- ld r10, VCPU_XIVE_ESC_VADDR(r9)
- cmpdi r10, 0
- beq 3f
- li r6, XIVE_ESB_SET_PQ_00
- /*
- * If we still have a pending escalation, abort the cede,
- * and we must set PQ to 10 rather than 00 so that we don't
- * potentially end up with two entries for the escalation
- * interrupt in the XIVE interrupt queue. In that case
- * we also don't want to set xive_esc_on to 1 here in
- * case we race with xive_esc_irq().
- */
- lbz r5, VCPU_XIVE_ESC_ON(r9)
- cmpwi r5, 0
- beq 4f
- li r0, 0
- stb r0, VCPU_CEDED(r9)
- /*
- * The escalation interrupts are special as we don't EOI them.
- * There is no need to use the load-after-store ordering offset
- * to set PQ to 10 as we won't use StoreEOI.
- */
- li r6, XIVE_ESB_SET_PQ_10
- b 5f
-4: li r0, 1
- stb r0, VCPU_XIVE_ESC_ON(r9)
- /* make sure store to xive_esc_on is seen before xive_esc_irq runs */
- sync
-5: /* Enable XIVE escalation */
- mfmsr r0
- andi. r0, r0, MSR_DR /* in real mode? */
- beq 1f
- ldx r0, r10, r6
- b 2f
-1: ld r10, VCPU_XIVE_ESC_RADDR(r9)
- ldcix r0, r10, r6
-2: sync
-#endif /* CONFIG_KVM_XICS */
-3: b guest_exit_cont
+ b guest_exit_cont
/* Try to do machine check recovery in real mode */
machine_check_realmode:
@@ -3023,10 +2449,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
PPC_MSGCLR(6)
/* see if it's a host IPI */
li r3, 1
-BEGIN_FTR_SECTION
- PPC_MSGSYNC
- lwsync
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
lbz r0, HSTATE_HOST_IPI(r13)
cmpwi r0, 0
bnelr
@@ -3335,73 +2757,12 @@ kvmppc_bad_host_intr:
std r3, STACK_FRAME_OVERHEAD-16(r1)
/*
- * On POWER9 do a minimal restore of the MMU and call C code,
- * which will print a message and panic.
* XXX On POWER7 and POWER8, we just spin here since we don't
* know what the other threads are doing (and we don't want to
* coordinate with them) - but at least we now have register state
* in memory that we might be able to look at from another CPU.
*/
-BEGIN_FTR_SECTION
b .
-END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
- ld r9, HSTATE_KVM_VCPU(r13)
- ld r10, VCPU_KVM(r9)
-
- li r0, 0
- mtspr SPRN_AMR, r0
- mtspr SPRN_IAMR, r0
- mtspr SPRN_CIABR, r0
- mtspr SPRN_DAWRX0, r0
-BEGIN_FTR_SECTION
- mtspr SPRN_DAWRX1, r0
-END_FTR_SECTION_IFSET(CPU_FTR_DAWR1)
-
- /* Clear hash and radix guest SLB, see guest_exit_short_path comment. */
- slbmte r0, r0
- PPC_SLBIA(6)
-
-BEGIN_MMU_FTR_SECTION
- b 4f
-END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
-
- ptesync
- ld r8, PACA_SLBSHADOWPTR(r13)
- .rept SLB_NUM_BOLTED
- li r3, SLBSHADOW_SAVEAREA
- LDX_BE r5, r8, r3
- addi r3, r3, 8
- LDX_BE r6, r8, r3
- andis. r7, r5, SLB_ESID_V@h
- beq 3f
- slbmte r6, r5
-3: addi r8, r8, 16
- .endr
-
-4: lwz r7, KVM_HOST_LPID(r10)
- mtspr SPRN_LPID, r7
- mtspr SPRN_PID, r0
- ld r8, KVM_HOST_LPCR(r10)
- mtspr SPRN_LPCR, r8
- isync
- li r0, KVM_GUEST_MODE_NONE
- stb r0, HSTATE_IN_GUEST(r13)
-
- /*
- * Turn on the MMU and jump to C code
- */
- bcl 20, 31, .+4
-5: mflr r3
- addi r3, r3, 9f - 5b
- li r4, -1
- rldimi r3, r4, 62, 0 /* ensure 0xc000000000000000 bits are set */
- ld r4, PACAKMSR(r13)
- mtspr SPRN_SRR0, r3
- mtspr SPRN_SRR1, r4
- RFI_TO_KERNEL
-9: addi r3, r1, STACK_FRAME_OVERHEAD
- bl kvmppc_bad_interrupt
- b 9b
/*
* This mimics the MSR transition on IRQ delivery. The new guest MSR is taken
diff --git a/arch/powerpc/kvm/book3s_hv_uvmem.c b/arch/powerpc/kvm/book3s_hv_uvmem.c
index 84e5a2dc8be5..a7061ee3b157 100644
--- a/arch/powerpc/kvm/book3s_hv_uvmem.c
+++ b/arch/powerpc/kvm/book3s_hv_uvmem.c
@@ -90,6 +90,7 @@
#include <linux/migrate.h>
#include <linux/kvm_host.h>
#include <linux/ksm.h>
+#include <linux/of.h>
#include <asm/ultravisor.h>
#include <asm/mman.h>
#include <asm/kvm_ppc.h>
@@ -614,7 +615,7 @@ void kvmppc_uvmem_drop_pages(const struct kvm_memory_slot *slot,
/* Fetch the VMA if addr is not in the latest fetched one */
if (!vma || addr >= vma->vm_end) {
- vma = find_vma_intersection(kvm->mm, addr, addr+1);
+ vma = vma_lookup(kvm->mm, addr);
if (!vma) {
pr_err("Can't find VMA for gfn:0x%lx\n", gfn);
break;
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
index d7733b07f489..6bc9425acb32 100644
--- a/arch/powerpc/kvm/book3s_pr.c
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -25,6 +25,7 @@
#include <asm/cputable.h>
#include <asm/cacheflush.h>
#include <linux/uaccess.h>
+#include <asm/interrupt.h>
#include <asm/io.h>
#include <asm/kvm_ppc.h>
#include <asm/kvm_book3s.h>
@@ -493,7 +494,7 @@ static void kvmppc_set_msr_pr(struct kvm_vcpu *vcpu, u64 msr)
if (!vcpu->arch.pending_exceptions) {
kvm_vcpu_block(vcpu);
kvm_clear_request(KVM_REQ_UNHALT, vcpu);
- vcpu->stat.halt_wakeup++;
+ vcpu->stat.generic.halt_wakeup++;
/* Unset POW bit after we woke up */
msr &= ~MSR_POW;
@@ -1848,6 +1849,7 @@ static int kvmppc_vcpu_run_pr(struct kvm_vcpu *vcpu)
/* Make sure we save the guest TAR/EBB/DSCR state */
kvmppc_giveup_fac(vcpu, FSCR_TAR_LG);
+ srr_regs_clobbered();
out:
vcpu->mode = OUTSIDE_GUEST_MODE;
return ret;
diff --git a/arch/powerpc/kvm/book3s_pr_papr.c b/arch/powerpc/kvm/book3s_pr_papr.c
index 031c8015864a..ac14239f3424 100644
--- a/arch/powerpc/kvm/book3s_pr_papr.c
+++ b/arch/powerpc/kvm/book3s_pr_papr.c
@@ -378,7 +378,7 @@ int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd)
kvmppc_set_msr_fast(vcpu, kvmppc_get_msr(vcpu) | MSR_EE);
kvm_vcpu_block(vcpu);
kvm_clear_request(KVM_REQ_UNHALT, vcpu);
- vcpu->stat.halt_wakeup++;
+ vcpu->stat.generic.halt_wakeup++;
return EMULATE_DONE;
case H_LOGICAL_CI_LOAD:
return kvmppc_h_pr_logical_ci_load(vcpu);
diff --git a/arch/powerpc/kvm/book3s_segment.S b/arch/powerpc/kvm/book3s_segment.S
index 1f492aa4c8d6..202046a83fc1 100644
--- a/arch/powerpc/kvm/book3s_segment.S
+++ b/arch/powerpc/kvm/book3s_segment.S
@@ -164,12 +164,15 @@ kvmppc_interrupt_pr:
/* 64-bit entry. Register usage at this point:
*
* SPRG_SCRATCH0 = guest R13
+ * R9 = HSTATE_IN_GUEST
* R12 = (guest CR << 32) | exit handler id
* R13 = PACA
* HSTATE.SCRATCH0 = guest R12
+ * HSTATE.SCRATCH2 = guest R9
*/
#ifdef CONFIG_PPC64
/* Match 32-bit entry */
+ ld r9,HSTATE_SCRATCH2(r13)
rotldi r12, r12, 32 /* Flip R12 halves for stw */
stw r12, HSTATE_SCRATCH1(r13) /* CR is now in the low half */
srdi r12, r12, 32 /* shift trap into low half */
diff --git a/arch/powerpc/kvm/book3s_xive.c b/arch/powerpc/kvm/book3s_xive.c
index e7219b6f5f9a..8cfab3547494 100644
--- a/arch/powerpc/kvm/book3s_xive.c
+++ b/arch/powerpc/kvm/book3s_xive.c
@@ -14,6 +14,7 @@
#include <linux/percpu.h>
#include <linux/cpumask.h>
#include <linux/uaccess.h>
+#include <linux/irqdomain.h>
#include <asm/kvm_book3s.h>
#include <asm/kvm_ppc.h>
#include <asm/hvcall.h>
@@ -128,6 +129,71 @@ void kvmppc_xive_push_vcpu(struct kvm_vcpu *vcpu)
EXPORT_SYMBOL_GPL(kvmppc_xive_push_vcpu);
/*
+ * Pull a vcpu's context from the XIVE on guest exit.
+ * This assumes we are in virtual mode (MMU on)
+ */
+void kvmppc_xive_pull_vcpu(struct kvm_vcpu *vcpu)
+{
+ void __iomem *tima = local_paca->kvm_hstate.xive_tima_virt;
+
+ if (!vcpu->arch.xive_pushed)
+ return;
+
+ /*
+ * Should not have been pushed if there is no tima
+ */
+ if (WARN_ON(!tima))
+ return;
+
+ eieio();
+ /* First load to pull the context, we ignore the value */
+ __raw_readl(tima + TM_SPC_PULL_OS_CTX);
+ /* Second load to recover the context state (Words 0 and 1) */
+ vcpu->arch.xive_saved_state.w01 = __raw_readq(tima + TM_QW1_OS);
+
+ /* Fixup some of the state for the next load */
+ vcpu->arch.xive_saved_state.lsmfb = 0;
+ vcpu->arch.xive_saved_state.ack = 0xff;
+ vcpu->arch.xive_pushed = 0;
+ eieio();
+}
+EXPORT_SYMBOL_GPL(kvmppc_xive_pull_vcpu);
+
+void kvmppc_xive_rearm_escalation(struct kvm_vcpu *vcpu)
+{
+ void __iomem *esc_vaddr = (void __iomem *)vcpu->arch.xive_esc_vaddr;
+
+ if (!esc_vaddr)
+ return;
+
+ /* we are using XIVE with single escalation */
+
+ if (vcpu->arch.xive_esc_on) {
+ /*
+ * If we still have a pending escalation, abort the cede,
+ * and we must set PQ to 10 rather than 00 so that we don't
+ * potentially end up with two entries for the escalation
+ * interrupt in the XIVE interrupt queue. In that case
+ * we also don't want to set xive_esc_on to 1 here in
+ * case we race with xive_esc_irq().
+ */
+ vcpu->arch.ceded = 0;
+ /*
+ * The escalation interrupts are special as we don't EOI them.
+ * There is no need to use the load-after-store ordering offset
+ * to set PQ to 10 as we won't use StoreEOI.
+ */
+ __raw_readq(esc_vaddr + XIVE_ESB_SET_PQ_10);
+ } else {
+ vcpu->arch.xive_esc_on = true;
+ mb();
+ __raw_readq(esc_vaddr + XIVE_ESB_SET_PQ_00);
+ }
+ mb();
+}
+EXPORT_SYMBOL_GPL(kvmppc_xive_rearm_escalation);
+
+/*
* This is a simple trigger for a generic XIVE IRQ. This must
* only be called for interrupts that support a trigger page
*/
@@ -2075,6 +2141,36 @@ static int kvmppc_xive_create(struct kvm_device *dev, u32 type)
return 0;
}
+int kvmppc_xive_xics_hcall(struct kvm_vcpu *vcpu, u32 req)
+{
+ struct kvmppc_vcore *vc = vcpu->arch.vcore;
+
+ /* The VM should have configured XICS mode before doing XICS hcalls. */
+ if (!kvmppc_xics_enabled(vcpu))
+ return H_TOO_HARD;
+
+ switch (req) {
+ case H_XIRR:
+ return xive_vm_h_xirr(vcpu);
+ case H_CPPR:
+ return xive_vm_h_cppr(vcpu, kvmppc_get_gpr(vcpu, 4));
+ case H_EOI:
+ return xive_vm_h_eoi(vcpu, kvmppc_get_gpr(vcpu, 4));
+ case H_IPI:
+ return xive_vm_h_ipi(vcpu, kvmppc_get_gpr(vcpu, 4),
+ kvmppc_get_gpr(vcpu, 5));
+ case H_IPOLL:
+ return xive_vm_h_ipoll(vcpu, kvmppc_get_gpr(vcpu, 4));
+ case H_XIRR_X:
+ xive_vm_h_xirr(vcpu);
+ kvmppc_set_gpr(vcpu, 5, get_tb() + vc->tb_offset);
+ return H_SUCCESS;
+ }
+
+ return H_UNSUPPORTED;
+}
+EXPORT_SYMBOL_GPL(kvmppc_xive_xics_hcall);
+
int kvmppc_xive_debug_show_queues(struct seq_file *m, struct kvm_vcpu *vcpu)
{
struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
@@ -2257,21 +2353,3 @@ struct kvm_device_ops kvm_xive_ops = {
.get_attr = xive_get_attr,
.has_attr = xive_has_attr,
};
-
-void kvmppc_xive_init_module(void)
-{
- __xive_vm_h_xirr = xive_vm_h_xirr;
- __xive_vm_h_ipoll = xive_vm_h_ipoll;
- __xive_vm_h_ipi = xive_vm_h_ipi;
- __xive_vm_h_cppr = xive_vm_h_cppr;
- __xive_vm_h_eoi = xive_vm_h_eoi;
-}
-
-void kvmppc_xive_exit_module(void)
-{
- __xive_vm_h_xirr = NULL;
- __xive_vm_h_ipoll = NULL;
- __xive_vm_h_ipi = NULL;
- __xive_vm_h_cppr = NULL;
- __xive_vm_h_eoi = NULL;
-}
diff --git a/arch/powerpc/kvm/book3s_xive.h b/arch/powerpc/kvm/book3s_xive.h
index 86c24a4ad809..afe9eeac6d56 100644
--- a/arch/powerpc/kvm/book3s_xive.h
+++ b/arch/powerpc/kvm/book3s_xive.h
@@ -289,13 +289,6 @@ extern int xive_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
extern int xive_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr);
extern int xive_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr);
-extern unsigned long (*__xive_vm_h_xirr)(struct kvm_vcpu *vcpu);
-extern unsigned long (*__xive_vm_h_ipoll)(struct kvm_vcpu *vcpu, unsigned long server);
-extern int (*__xive_vm_h_ipi)(struct kvm_vcpu *vcpu, unsigned long server,
- unsigned long mfrr);
-extern int (*__xive_vm_h_cppr)(struct kvm_vcpu *vcpu, unsigned long cppr);
-extern int (*__xive_vm_h_eoi)(struct kvm_vcpu *vcpu, unsigned long xirr);
-
/*
* Common Xive routines for XICS-over-XIVE and XIVE native
*/
diff --git a/arch/powerpc/kvm/book3s_xive_native.c b/arch/powerpc/kvm/book3s_xive_native.c
index 76800c84f2a3..573ecaab3597 100644
--- a/arch/powerpc/kvm/book3s_xive_native.c
+++ b/arch/powerpc/kvm/book3s_xive_native.c
@@ -12,6 +12,7 @@
#include <linux/spinlock.h>
#include <linux/delay.h>
#include <linux/file.h>
+#include <linux/irqdomain.h>
#include <asm/uaccess.h>
#include <asm/kvm_book3s.h>
#include <asm/kvm_ppc.h>
@@ -1281,13 +1282,3 @@ struct kvm_device_ops kvm_xive_native_ops = {
.has_attr = kvmppc_xive_native_has_attr,
.mmap = kvmppc_xive_native_mmap,
};
-
-void kvmppc_xive_native_init_module(void)
-{
- ;
-}
-
-void kvmppc_xive_native_exit_module(void)
-{
- ;
-}
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index 7d5fe43f85c4..551b30d84aee 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -36,29 +36,59 @@
unsigned long kvmppc_booke_handlers;
-struct kvm_stats_debugfs_item debugfs_entries[] = {
- VCPU_STAT("mmio", mmio_exits),
- VCPU_STAT("sig", signal_exits),
- VCPU_STAT("itlb_r", itlb_real_miss_exits),
- VCPU_STAT("itlb_v", itlb_virt_miss_exits),
- VCPU_STAT("dtlb_r", dtlb_real_miss_exits),
- VCPU_STAT("dtlb_v", dtlb_virt_miss_exits),
- VCPU_STAT("sysc", syscall_exits),
- VCPU_STAT("isi", isi_exits),
- VCPU_STAT("dsi", dsi_exits),
- VCPU_STAT("inst_emu", emulated_inst_exits),
- VCPU_STAT("dec", dec_exits),
- VCPU_STAT("ext_intr", ext_intr_exits),
- VCPU_STAT("halt_successful_poll", halt_successful_poll),
- VCPU_STAT("halt_attempted_poll", halt_attempted_poll),
- VCPU_STAT("halt_poll_invalid", halt_poll_invalid),
- VCPU_STAT("halt_wakeup", halt_wakeup),
- VCPU_STAT("doorbell", dbell_exits),
- VCPU_STAT("guest doorbell", gdbell_exits),
- VCPU_STAT("halt_poll_success_ns", halt_poll_success_ns),
- VCPU_STAT("halt_poll_fail_ns", halt_poll_fail_ns),
- VM_STAT("remote_tlb_flush", remote_tlb_flush),
- { NULL }
+const struct _kvm_stats_desc kvm_vm_stats_desc[] = {
+ KVM_GENERIC_VM_STATS(),
+ STATS_DESC_ICOUNTER(VM, num_2M_pages),
+ STATS_DESC_ICOUNTER(VM, num_1G_pages)
+};
+static_assert(ARRAY_SIZE(kvm_vm_stats_desc) ==
+ sizeof(struct kvm_vm_stat) / sizeof(u64));
+
+const struct kvm_stats_header kvm_vm_stats_header = {
+ .name_size = KVM_STATS_NAME_SIZE,
+ .num_desc = ARRAY_SIZE(kvm_vm_stats_desc),
+ .id_offset = sizeof(struct kvm_stats_header),
+ .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
+ .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
+ sizeof(kvm_vm_stats_desc),
+};
+
+const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
+ KVM_GENERIC_VCPU_STATS(),
+ STATS_DESC_COUNTER(VCPU, sum_exits),
+ STATS_DESC_COUNTER(VCPU, mmio_exits),
+ STATS_DESC_COUNTER(VCPU, signal_exits),
+ STATS_DESC_COUNTER(VCPU, light_exits),
+ STATS_DESC_COUNTER(VCPU, itlb_real_miss_exits),
+ STATS_DESC_COUNTER(VCPU, itlb_virt_miss_exits),
+ STATS_DESC_COUNTER(VCPU, dtlb_real_miss_exits),
+ STATS_DESC_COUNTER(VCPU, dtlb_virt_miss_exits),
+ STATS_DESC_COUNTER(VCPU, syscall_exits),
+ STATS_DESC_COUNTER(VCPU, isi_exits),
+ STATS_DESC_COUNTER(VCPU, dsi_exits),
+ STATS_DESC_COUNTER(VCPU, emulated_inst_exits),
+ STATS_DESC_COUNTER(VCPU, dec_exits),
+ STATS_DESC_COUNTER(VCPU, ext_intr_exits),
+ STATS_DESC_TIME_NSEC(VCPU, halt_wait_ns),
+ STATS_DESC_COUNTER(VCPU, halt_successful_wait),
+ STATS_DESC_COUNTER(VCPU, dbell_exits),
+ STATS_DESC_COUNTER(VCPU, gdbell_exits),
+ STATS_DESC_COUNTER(VCPU, ld),
+ STATS_DESC_COUNTER(VCPU, st),
+ STATS_DESC_COUNTER(VCPU, pthru_all),
+ STATS_DESC_COUNTER(VCPU, pthru_host),
+ STATS_DESC_COUNTER(VCPU, pthru_bad_aff)
+};
+static_assert(ARRAY_SIZE(kvm_vcpu_stats_desc) ==
+ sizeof(struct kvm_vcpu_stat) / sizeof(u64));
+
+const struct kvm_stats_header kvm_vcpu_stats_header = {
+ .name_size = KVM_STATS_NAME_SIZE,
+ .num_desc = ARRAY_SIZE(kvm_vcpu_stats_desc),
+ .id_offset = sizeof(struct kvm_stats_header),
+ .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
+ .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
+ sizeof(kvm_vcpu_stats_desc),
};
/* TODO: use vcpu_printf() */
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index a2a68a958fa0..be33b5321a76 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -682,6 +682,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
r = !!(hv_enabled && kvmppc_hv_ops->enable_dawr1 &&
!kvmppc_hv_ops->enable_dawr1(NULL));
break;
+ case KVM_CAP_PPC_RPT_INVALIDATE:
+ r = 1;
+ break;
#endif
default:
r = 0;
diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile
index cc1a8a0f311e..99a7c9132422 100644
--- a/arch/powerpc/lib/Makefile
+++ b/arch/powerpc/lib/Makefile
@@ -39,7 +39,7 @@ extra-$(CONFIG_PPC64) += crtsavres.o
endif
obj-$(CONFIG_PPC_BOOK3S_64) += copyuser_power7.o copypage_power7.o \
- memcpy_power7.o
+ memcpy_power7.o restart_table.o
obj64-y += copypage_64.o copyuser_64.o mem_64.o hweight_64.o \
memcpy_64.o copy_mc_64.o
diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c
index 870b30d9be2f..f9a3019e37b4 100644
--- a/arch/powerpc/lib/code-patching.c
+++ b/arch/powerpc/lib/code-patching.c
@@ -18,8 +18,7 @@
#include <asm/setup.h>
#include <asm/inst.h>
-static int __patch_instruction(struct ppc_inst *exec_addr, struct ppc_inst instr,
- struct ppc_inst *patch_addr)
+static int __patch_instruction(u32 *exec_addr, struct ppc_inst instr, u32 *patch_addr)
{
if (!ppc_inst_prefixed(instr)) {
u32 val = ppc_inst_val(instr);
@@ -40,7 +39,7 @@ failed:
return -EFAULT;
}
-int raw_patch_instruction(struct ppc_inst *addr, struct ppc_inst instr)
+int raw_patch_instruction(u32 *addr, struct ppc_inst instr)
{
return __patch_instruction(addr, instr, addr);
}
@@ -70,22 +69,16 @@ static int text_area_cpu_down(unsigned int cpu)
}
/*
- * Run as a late init call. This allows all the boot time patching to be done
- * simply by patching the code, and then we're called here prior to
- * mark_rodata_ro(), which happens after all init calls are run. Although
- * BUG_ON() is rude, in this case it should only happen if ENOMEM, and we judge
- * it as being preferable to a kernel that will crash later when someone tries
- * to use patch_instruction().
+ * Although BUG_ON() is rude, in this case it should only happen if ENOMEM, and
+ * we judge it as being preferable to a kernel that will crash later when
+ * someone tries to use patch_instruction().
*/
-static int __init setup_text_poke_area(void)
+void __init poking_init(void)
{
BUG_ON(!cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
"powerpc/text_poke:online", text_area_cpu_up,
text_area_cpu_down));
-
- return 0;
}
-late_initcall(setup_text_poke_area);
/*
* This can be called for kernel text or a module.
@@ -148,10 +141,10 @@ static inline int unmap_patch_area(unsigned long addr)
return 0;
}
-static int do_patch_instruction(struct ppc_inst *addr, struct ppc_inst instr)
+static int do_patch_instruction(u32 *addr, struct ppc_inst instr)
{
int err;
- struct ppc_inst *patch_addr = NULL;
+ u32 *patch_addr = NULL;
unsigned long flags;
unsigned long text_poke_addr;
unsigned long kaddr = (unsigned long)addr;
@@ -172,7 +165,7 @@ static int do_patch_instruction(struct ppc_inst *addr, struct ppc_inst instr)
goto out;
}
- patch_addr = (struct ppc_inst *)(text_poke_addr + (kaddr & ~PAGE_MASK));
+ patch_addr = (u32 *)(text_poke_addr + (kaddr & ~PAGE_MASK));
__patch_instruction(addr, instr, patch_addr);
@@ -187,14 +180,14 @@ out:
}
#else /* !CONFIG_STRICT_KERNEL_RWX */
-static int do_patch_instruction(struct ppc_inst *addr, struct ppc_inst instr)
+static int do_patch_instruction(u32 *addr, struct ppc_inst instr)
{
return raw_patch_instruction(addr, instr);
}
#endif /* CONFIG_STRICT_KERNEL_RWX */
-int patch_instruction(struct ppc_inst *addr, struct ppc_inst instr)
+int patch_instruction(u32 *addr, struct ppc_inst instr)
{
/* Make sure we aren't patching a freed init section */
if (init_mem_is_free && init_section_contains(addr, 4)) {
@@ -205,7 +198,7 @@ int patch_instruction(struct ppc_inst *addr, struct ppc_inst instr)
}
NOKPROBE_SYMBOL(patch_instruction);
-int patch_branch(struct ppc_inst *addr, unsigned long target, int flags)
+int patch_branch(u32 *addr, unsigned long target, int flags)
{
struct ppc_inst instr;
@@ -257,8 +250,7 @@ bool is_conditional_branch(struct ppc_inst instr)
}
NOKPROBE_SYMBOL(is_conditional_branch);
-int create_branch(struct ppc_inst *instr,
- const struct ppc_inst *addr,
+int create_branch(struct ppc_inst *instr, const u32 *addr,
unsigned long target, int flags)
{
long offset;
@@ -278,7 +270,7 @@ int create_branch(struct ppc_inst *instr,
return 0;
}
-int create_cond_branch(struct ppc_inst *instr, const struct ppc_inst *addr,
+int create_cond_branch(struct ppc_inst *instr, const u32 *addr,
unsigned long target, int flags)
{
long offset;
@@ -325,39 +317,39 @@ int instr_is_relative_link_branch(struct ppc_inst instr)
return instr_is_relative_branch(instr) && (ppc_inst_val(instr) & BRANCH_SET_LINK);
}
-static unsigned long branch_iform_target(const struct ppc_inst *instr)
+static unsigned long branch_iform_target(const u32 *instr)
{
signed long imm;
- imm = ppc_inst_val(*instr) & 0x3FFFFFC;
+ imm = ppc_inst_val(ppc_inst_read(instr)) & 0x3FFFFFC;
/* If the top bit of the immediate value is set this is negative */
if (imm & 0x2000000)
imm -= 0x4000000;
- if ((ppc_inst_val(*instr) & BRANCH_ABSOLUTE) == 0)
+ if ((ppc_inst_val(ppc_inst_read(instr)) & BRANCH_ABSOLUTE) == 0)
imm += (unsigned long)instr;
return (unsigned long)imm;
}
-static unsigned long branch_bform_target(const struct ppc_inst *instr)
+static unsigned long branch_bform_target(const u32 *instr)
{
signed long imm;
- imm = ppc_inst_val(*instr) & 0xFFFC;
+ imm = ppc_inst_val(ppc_inst_read(instr)) & 0xFFFC;
/* If the top bit of the immediate value is set this is negative */
if (imm & 0x8000)
imm -= 0x10000;
- if ((ppc_inst_val(*instr) & BRANCH_ABSOLUTE) == 0)
+ if ((ppc_inst_val(ppc_inst_read(instr)) & BRANCH_ABSOLUTE) == 0)
imm += (unsigned long)instr;
return (unsigned long)imm;
}
-unsigned long branch_target(const struct ppc_inst *instr)
+unsigned long branch_target(const u32 *instr)
{
if (instr_is_branch_iform(ppc_inst_read(instr)))
return branch_iform_target(instr);
@@ -367,17 +359,7 @@ unsigned long branch_target(const struct ppc_inst *instr)
return 0;
}
-int instr_is_branch_to_addr(const struct ppc_inst *instr, unsigned long addr)
-{
- if (instr_is_branch_iform(ppc_inst_read(instr)) ||
- instr_is_branch_bform(ppc_inst_read(instr)))
- return branch_target(instr) == addr;
-
- return 0;
-}
-
-int translate_branch(struct ppc_inst *instr, const struct ppc_inst *dest,
- const struct ppc_inst *src)
+int translate_branch(struct ppc_inst *instr, const u32 *dest, const u32 *src)
{
unsigned long target;
target = branch_target(src);
@@ -404,12 +386,21 @@ void __patch_exception(int exc, unsigned long addr)
* instruction of the exception, not the first one
*/
- patch_branch((struct ppc_inst *)(ibase + (exc / 4) + 1), addr, 0);
+ patch_branch(ibase + (exc / 4) + 1, addr, 0);
}
#endif
#ifdef CONFIG_CODE_PATCHING_SELFTEST
+static int instr_is_branch_to_addr(const u32 *instr, unsigned long addr)
+{
+ if (instr_is_branch_iform(ppc_inst_read(instr)) ||
+ instr_is_branch_bform(ppc_inst_read(instr)))
+ return branch_target(instr) == addr;
+
+ return 0;
+}
+
static void __init test_trampoline(void)
{
asm ("nop;\n");
@@ -422,9 +413,9 @@ static void __init test_branch_iform(void)
{
int err;
struct ppc_inst instr;
- unsigned long addr;
-
- addr = (unsigned long)&instr;
+ u32 tmp[2];
+ u32 *iptr = tmp;
+ unsigned long addr = (unsigned long)tmp;
/* The simplest case, branch to self, no flags */
check(instr_is_branch_iform(ppc_inst(0x48000000)));
@@ -445,63 +436,68 @@ static void __init test_branch_iform(void)
check(!instr_is_branch_iform(ppc_inst(0x7bfffffd)));
/* Absolute branch to 0x100 */
- instr = ppc_inst(0x48000103);
- check(instr_is_branch_to_addr(&instr, 0x100));
+ patch_instruction(iptr, ppc_inst(0x48000103));
+ check(instr_is_branch_to_addr(iptr, 0x100));
/* Absolute branch to 0x420fc */
- instr = ppc_inst(0x480420ff);
- check(instr_is_branch_to_addr(&instr, 0x420fc));
+ patch_instruction(iptr, ppc_inst(0x480420ff));
+ check(instr_is_branch_to_addr(iptr, 0x420fc));
/* Maximum positive relative branch, + 20MB - 4B */
- instr = ppc_inst(0x49fffffc);
- check(instr_is_branch_to_addr(&instr, addr + 0x1FFFFFC));
+ patch_instruction(iptr, ppc_inst(0x49fffffc));
+ check(instr_is_branch_to_addr(iptr, addr + 0x1FFFFFC));
/* Smallest negative relative branch, - 4B */
- instr = ppc_inst(0x4bfffffc);
- check(instr_is_branch_to_addr(&instr, addr - 4));
+ patch_instruction(iptr, ppc_inst(0x4bfffffc));
+ check(instr_is_branch_to_addr(iptr, addr - 4));
/* Largest negative relative branch, - 32 MB */
- instr = ppc_inst(0x4a000000);
- check(instr_is_branch_to_addr(&instr, addr - 0x2000000));
+ patch_instruction(iptr, ppc_inst(0x4a000000));
+ check(instr_is_branch_to_addr(iptr, addr - 0x2000000));
/* Branch to self, with link */
- err = create_branch(&instr, &instr, addr, BRANCH_SET_LINK);
- check(instr_is_branch_to_addr(&instr, addr));
+ err = create_branch(&instr, iptr, addr, BRANCH_SET_LINK);
+ patch_instruction(iptr, instr);
+ check(instr_is_branch_to_addr(iptr, addr));
/* Branch to self - 0x100, with link */
- err = create_branch(&instr, &instr, addr - 0x100, BRANCH_SET_LINK);
- check(instr_is_branch_to_addr(&instr, addr - 0x100));
+ err = create_branch(&instr, iptr, addr - 0x100, BRANCH_SET_LINK);
+ patch_instruction(iptr, instr);
+ check(instr_is_branch_to_addr(iptr, addr - 0x100));
/* Branch to self + 0x100, no link */
- err = create_branch(&instr, &instr, addr + 0x100, 0);
- check(instr_is_branch_to_addr(&instr, addr + 0x100));
+ err = create_branch(&instr, iptr, addr + 0x100, 0);
+ patch_instruction(iptr, instr);
+ check(instr_is_branch_to_addr(iptr, addr + 0x100));
/* Maximum relative negative offset, - 32 MB */
- err = create_branch(&instr, &instr, addr - 0x2000000, BRANCH_SET_LINK);
- check(instr_is_branch_to_addr(&instr, addr - 0x2000000));
+ err = create_branch(&instr, iptr, addr - 0x2000000, BRANCH_SET_LINK);
+ patch_instruction(iptr, instr);
+ check(instr_is_branch_to_addr(iptr, addr - 0x2000000));
/* Out of range relative negative offset, - 32 MB + 4*/
- err = create_branch(&instr, &instr, addr - 0x2000004, BRANCH_SET_LINK);
+ err = create_branch(&instr, iptr, addr - 0x2000004, BRANCH_SET_LINK);
check(err);
/* Out of range relative positive offset, + 32 MB */
- err = create_branch(&instr, &instr, addr + 0x2000000, BRANCH_SET_LINK);
+ err = create_branch(&instr, iptr, addr + 0x2000000, BRANCH_SET_LINK);
check(err);
/* Unaligned target */
- err = create_branch(&instr, &instr, addr + 3, BRANCH_SET_LINK);
+ err = create_branch(&instr, iptr, addr + 3, BRANCH_SET_LINK);
check(err);
/* Check flags are masked correctly */
- err = create_branch(&instr, &instr, addr, 0xFFFFFFFC);
- check(instr_is_branch_to_addr(&instr, addr));
+ err = create_branch(&instr, iptr, addr, 0xFFFFFFFC);
+ patch_instruction(iptr, instr);
+ check(instr_is_branch_to_addr(iptr, addr));
check(ppc_inst_equal(instr, ppc_inst(0x48000000)));
}
static void __init test_create_function_call(void)
{
- struct ppc_inst *iptr;
+ u32 *iptr;
unsigned long dest;
struct ppc_inst instr;
/* Check we can create a function call */
- iptr = (struct ppc_inst *)ppc_function_entry(test_trampoline);
+ iptr = (u32 *)ppc_function_entry(test_trampoline);
dest = ppc_function_entry(test_create_function_call);
create_branch(&instr, iptr, dest, BRANCH_SET_LINK);
patch_instruction(iptr, instr);
@@ -512,10 +508,11 @@ static void __init test_branch_bform(void)
{
int err;
unsigned long addr;
- struct ppc_inst *iptr, instr;
+ struct ppc_inst instr;
+ u32 tmp[2];
+ u32 *iptr = tmp;
unsigned int flags;
- iptr = &instr;
addr = (unsigned long)iptr;
/* The simplest case, branch to self, no flags */
@@ -528,39 +525,43 @@ static void __init test_branch_bform(void)
check(!instr_is_branch_bform(ppc_inst(0x7bffffff)));
/* Absolute conditional branch to 0x100 */
- instr = ppc_inst(0x43ff0103);
- check(instr_is_branch_to_addr(&instr, 0x100));
+ patch_instruction(iptr, ppc_inst(0x43ff0103));
+ check(instr_is_branch_to_addr(iptr, 0x100));
/* Absolute conditional branch to 0x20fc */
- instr = ppc_inst(0x43ff20ff);
- check(instr_is_branch_to_addr(&instr, 0x20fc));
+ patch_instruction(iptr, ppc_inst(0x43ff20ff));
+ check(instr_is_branch_to_addr(iptr, 0x20fc));
/* Maximum positive relative conditional branch, + 32 KB - 4B */
- instr = ppc_inst(0x43ff7ffc);
- check(instr_is_branch_to_addr(&instr, addr + 0x7FFC));
+ patch_instruction(iptr, ppc_inst(0x43ff7ffc));
+ check(instr_is_branch_to_addr(iptr, addr + 0x7FFC));
/* Smallest negative relative conditional branch, - 4B */
- instr = ppc_inst(0x43fffffc);
- check(instr_is_branch_to_addr(&instr, addr - 4));
+ patch_instruction(iptr, ppc_inst(0x43fffffc));
+ check(instr_is_branch_to_addr(iptr, addr - 4));
/* Largest negative relative conditional branch, - 32 KB */
- instr = ppc_inst(0x43ff8000);
- check(instr_is_branch_to_addr(&instr, addr - 0x8000));
+ patch_instruction(iptr, ppc_inst(0x43ff8000));
+ check(instr_is_branch_to_addr(iptr, addr - 0x8000));
/* All condition code bits set & link */
flags = 0x3ff000 | BRANCH_SET_LINK;
/* Branch to self */
err = create_cond_branch(&instr, iptr, addr, flags);
- check(instr_is_branch_to_addr(&instr, addr));
+ patch_instruction(iptr, instr);
+ check(instr_is_branch_to_addr(iptr, addr));
/* Branch to self - 0x100 */
err = create_cond_branch(&instr, iptr, addr - 0x100, flags);
- check(instr_is_branch_to_addr(&instr, addr - 0x100));
+ patch_instruction(iptr, instr);
+ check(instr_is_branch_to_addr(iptr, addr - 0x100));
/* Branch to self + 0x100 */
err = create_cond_branch(&instr, iptr, addr + 0x100, flags);
- check(instr_is_branch_to_addr(&instr, addr + 0x100));
+ patch_instruction(iptr, instr);
+ check(instr_is_branch_to_addr(iptr, addr + 0x100));
/* Maximum relative negative offset, - 32 KB */
err = create_cond_branch(&instr, iptr, addr - 0x8000, flags);
- check(instr_is_branch_to_addr(&instr, addr - 0x8000));
+ patch_instruction(iptr, instr);
+ check(instr_is_branch_to_addr(iptr, addr - 0x8000));
/* Out of range relative negative offset, - 32 KB + 4*/
err = create_cond_branch(&instr, iptr, addr - 0x8004, flags);
@@ -576,7 +577,8 @@ static void __init test_branch_bform(void)
/* Check flags are masked correctly */
err = create_cond_branch(&instr, iptr, addr, 0xFFFFFFFC);
- check(instr_is_branch_to_addr(&instr, addr));
+ patch_instruction(iptr, instr);
+ check(instr_is_branch_to_addr(iptr, addr));
check(ppc_inst_equal(instr, ppc_inst(0x43FF0000)));
}
@@ -715,9 +717,9 @@ static void __init test_prefixed_patching(void)
extern unsigned int code_patching_test1_expected[];
extern unsigned int end_code_patching_test1[];
- __patch_instruction((struct ppc_inst *)code_patching_test1,
+ __patch_instruction(code_patching_test1,
ppc_inst_prefix(OP_PREFIX << 26, 0x00000000),
- (struct ppc_inst *)code_patching_test1);
+ code_patching_test1);
check(!memcmp(code_patching_test1,
code_patching_test1_expected,
diff --git a/arch/powerpc/lib/error-inject.c b/arch/powerpc/lib/error-inject.c
index 407b992fb02f..e834079d2b5c 100644
--- a/arch/powerpc/lib/error-inject.c
+++ b/arch/powerpc/lib/error-inject.c
@@ -11,6 +11,6 @@ void override_function_with_return(struct pt_regs *regs)
* function in the kernel/module, captured on a kprobe. We don't need
* to worry about 32-bit userspace on a 64-bit kernel.
*/
- regs->nip = regs->link;
+ regs_set_return_ip(regs, regs->link);
}
NOKPROBE_SYMBOL(override_function_with_return);
diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c
index 1fd31b4b0e13..cda17bee5afe 100644
--- a/arch/powerpc/lib/feature-fixups.c
+++ b/arch/powerpc/lib/feature-fixups.c
@@ -14,8 +14,10 @@
#include <linux/string.h>
#include <linux/init.h>
#include <linux/sched/mm.h>
+#include <linux/stop_machine.h>
#include <asm/cputable.h>
#include <asm/code-patching.h>
+#include <asm/interrupt.h>
#include <asm/page.h>
#include <asm/sections.h>
#include <asm/setup.h>
@@ -32,26 +34,25 @@ struct fixup_entry {
long alt_end_off;
};
-static struct ppc_inst *calc_addr(struct fixup_entry *fcur, long offset)
+static u32 *calc_addr(struct fixup_entry *fcur, long offset)
{
/*
* We store the offset to the code as a negative offset from
* the start of the alt_entry, to support the VDSO. This
* routine converts that back into an actual address.
*/
- return (struct ppc_inst *)((unsigned long)fcur + offset);
+ return (u32 *)((unsigned long)fcur + offset);
}
-static int patch_alt_instruction(struct ppc_inst *src, struct ppc_inst *dest,
- struct ppc_inst *alt_start, struct ppc_inst *alt_end)
+static int patch_alt_instruction(u32 *src, u32 *dest, u32 *alt_start, u32 *alt_end)
{
int err;
struct ppc_inst instr;
instr = ppc_inst_read(src);
- if (instr_is_relative_branch(*src)) {
- struct ppc_inst *target = (struct ppc_inst *)branch_target(src);
+ if (instr_is_relative_branch(ppc_inst_read(src))) {
+ u32 *target = (u32 *)branch_target(src);
/* Branch within the section doesn't need translating */
if (target < alt_start || target > alt_end) {
@@ -68,7 +69,7 @@ static int patch_alt_instruction(struct ppc_inst *src, struct ppc_inst *dest,
static int patch_feature_section(unsigned long value, struct fixup_entry *fcur)
{
- struct ppc_inst *start, *end, *alt_start, *alt_end, *src, *dest, nop;
+ u32 *start, *end, *alt_start, *alt_end, *src, *dest;
start = calc_addr(fcur, fcur->start_off);
end = calc_addr(fcur, fcur->end_off);
@@ -90,9 +91,8 @@ static int patch_feature_section(unsigned long value, struct fixup_entry *fcur)
return 1;
}
- nop = ppc_inst(PPC_INST_NOP);
- for (; dest < end; dest = ppc_inst_next(dest, &nop))
- raw_patch_instruction(dest, nop);
+ for (; dest < end; dest++)
+ raw_patch_instruction(dest, ppc_inst(PPC_RAW_NOP()));
return 0;
}
@@ -127,21 +127,21 @@ static void do_stf_entry_barrier_fixups(enum stf_barrier_type types)
start = PTRRELOC(&__start___stf_entry_barrier_fixup);
end = PTRRELOC(&__stop___stf_entry_barrier_fixup);
- instrs[0] = 0x60000000; /* nop */
- instrs[1] = 0x60000000; /* nop */
- instrs[2] = 0x60000000; /* nop */
+ instrs[0] = PPC_RAW_NOP();
+ instrs[1] = PPC_RAW_NOP();
+ instrs[2] = PPC_RAW_NOP();
i = 0;
if (types & STF_BARRIER_FALLBACK) {
- instrs[i++] = 0x7d4802a6; /* mflr r10 */
- instrs[i++] = 0x60000000; /* branch patched below */
- instrs[i++] = 0x7d4803a6; /* mtlr r10 */
+ instrs[i++] = PPC_RAW_MFLR(_R10);
+ instrs[i++] = PPC_RAW_NOP(); /* branch patched below */
+ instrs[i++] = PPC_RAW_MTLR(_R10);
} else if (types & STF_BARRIER_EIEIO) {
- instrs[i++] = 0x7e0006ac; /* eieio + bit 6 hint */
+ instrs[i++] = PPC_RAW_EIEIO() | 0x02000000; /* eieio + bit 6 hint */
} else if (types & STF_BARRIER_SYNC_ORI) {
- instrs[i++] = 0x7c0004ac; /* hwsync */
- instrs[i++] = 0xe94d0000; /* ld r10,0(r13) */
- instrs[i++] = 0x63ff0000; /* ori 31,31,0 speculation barrier */
+ instrs[i++] = PPC_RAW_SYNC();
+ instrs[i++] = PPC_RAW_LD(_R10, _R13, 0);
+ instrs[i++] = PPC_RAW_ORI(_R31, _R31, 0); /* speculation barrier */
}
for (i = 0; start < end; start++, i++) {
@@ -149,17 +149,17 @@ static void do_stf_entry_barrier_fixups(enum stf_barrier_type types)
pr_devel("patching dest %lx\n", (unsigned long)dest);
- patch_instruction((struct ppc_inst *)dest, ppc_inst(instrs[0]));
-
- if (types & STF_BARRIER_FALLBACK)
- patch_branch((struct ppc_inst *)(dest + 1),
- (unsigned long)&stf_barrier_fallback,
- BRANCH_SET_LINK);
- else
- patch_instruction((struct ppc_inst *)(dest + 1),
- ppc_inst(instrs[1]));
-
- patch_instruction((struct ppc_inst *)(dest + 2), ppc_inst(instrs[2]));
+ // See comment in do_entry_flush_fixups() RE order of patching
+ if (types & STF_BARRIER_FALLBACK) {
+ patch_instruction(dest, ppc_inst(instrs[0]));
+ patch_instruction(dest + 2, ppc_inst(instrs[2]));
+ patch_branch(dest + 1,
+ (unsigned long)&stf_barrier_fallback, BRANCH_SET_LINK);
+ } else {
+ patch_instruction(dest + 1, ppc_inst(instrs[1]));
+ patch_instruction(dest + 2, ppc_inst(instrs[2]));
+ patch_instruction(dest, ppc_inst(instrs[0]));
+ }
}
printk(KERN_DEBUG "stf-barrier: patched %d entry locations (%s barrier)\n", i,
@@ -179,32 +179,31 @@ static void do_stf_exit_barrier_fixups(enum stf_barrier_type types)
start = PTRRELOC(&__start___stf_exit_barrier_fixup);
end = PTRRELOC(&__stop___stf_exit_barrier_fixup);
- instrs[0] = 0x60000000; /* nop */
- instrs[1] = 0x60000000; /* nop */
- instrs[2] = 0x60000000; /* nop */
- instrs[3] = 0x60000000; /* nop */
- instrs[4] = 0x60000000; /* nop */
- instrs[5] = 0x60000000; /* nop */
+ instrs[0] = PPC_RAW_NOP();
+ instrs[1] = PPC_RAW_NOP();
+ instrs[2] = PPC_RAW_NOP();
+ instrs[3] = PPC_RAW_NOP();
+ instrs[4] = PPC_RAW_NOP();
+ instrs[5] = PPC_RAW_NOP();
i = 0;
if (types & STF_BARRIER_FALLBACK || types & STF_BARRIER_SYNC_ORI) {
if (cpu_has_feature(CPU_FTR_HVMODE)) {
- instrs[i++] = 0x7db14ba6; /* mtspr 0x131, r13 (HSPRG1) */
- instrs[i++] = 0x7db04aa6; /* mfspr r13, 0x130 (HSPRG0) */
+ instrs[i++] = PPC_RAW_MTSPR(SPRN_HSPRG1, _R13);
+ instrs[i++] = PPC_RAW_MFSPR(_R13, SPRN_HSPRG0);
} else {
- instrs[i++] = 0x7db243a6; /* mtsprg 2,r13 */
- instrs[i++] = 0x7db142a6; /* mfsprg r13,1 */
+ instrs[i++] = PPC_RAW_MTSPR(SPRN_SPRG2, _R13);
+ instrs[i++] = PPC_RAW_MFSPR(_R13, SPRN_SPRG1);
}
- instrs[i++] = 0x7c0004ac; /* hwsync */
- instrs[i++] = 0xe9ad0000; /* ld r13,0(r13) */
- instrs[i++] = 0x63ff0000; /* ori 31,31,0 speculation barrier */
- if (cpu_has_feature(CPU_FTR_HVMODE)) {
- instrs[i++] = 0x7db14aa6; /* mfspr r13, 0x131 (HSPRG1) */
- } else {
- instrs[i++] = 0x7db242a6; /* mfsprg r13,2 */
- }
+ instrs[i++] = PPC_RAW_SYNC();
+ instrs[i++] = PPC_RAW_LD(_R13, _R13, 0);
+ instrs[i++] = PPC_RAW_ORI(_R31, _R31, 0); /* speculation barrier */
+ if (cpu_has_feature(CPU_FTR_HVMODE))
+ instrs[i++] = PPC_RAW_MFSPR(_R13, SPRN_HSPRG1);
+ else
+ instrs[i++] = PPC_RAW_MFSPR(_R13, SPRN_SPRG2);
} else if (types & STF_BARRIER_EIEIO) {
- instrs[i++] = 0x7e0006ac; /* eieio + bit 6 hint */
+ instrs[i++] = PPC_RAW_EIEIO() | 0x02000000; /* eieio + bit 6 hint */
}
for (i = 0; start < end; start++, i++) {
@@ -212,12 +211,12 @@ static void do_stf_exit_barrier_fixups(enum stf_barrier_type types)
pr_devel("patching dest %lx\n", (unsigned long)dest);
- patch_instruction((struct ppc_inst *)dest, ppc_inst(instrs[0]));
- patch_instruction((struct ppc_inst *)(dest + 1), ppc_inst(instrs[1]));
- patch_instruction((struct ppc_inst *)(dest + 2), ppc_inst(instrs[2]));
- patch_instruction((struct ppc_inst *)(dest + 3), ppc_inst(instrs[3]));
- patch_instruction((struct ppc_inst *)(dest + 4), ppc_inst(instrs[4]));
- patch_instruction((struct ppc_inst *)(dest + 5), ppc_inst(instrs[5]));
+ patch_instruction(dest, ppc_inst(instrs[0]));
+ patch_instruction(dest + 1, ppc_inst(instrs[1]));
+ patch_instruction(dest + 2, ppc_inst(instrs[2]));
+ patch_instruction(dest + 3, ppc_inst(instrs[3]));
+ patch_instruction(dest + 4, ppc_inst(instrs[4]));
+ patch_instruction(dest + 5, ppc_inst(instrs[5]));
}
printk(KERN_DEBUG "stf-barrier: patched %d exit locations (%s barrier)\n", i,
(types == STF_BARRIER_NONE) ? "no" :
@@ -227,11 +226,44 @@ static void do_stf_exit_barrier_fixups(enum stf_barrier_type types)
: "unknown");
}
+static bool stf_exit_reentrant = false;
+static bool rfi_exit_reentrant = false;
+
+static int __do_stf_barrier_fixups(void *data)
+{
+ enum stf_barrier_type *types = data;
+
+ do_stf_entry_barrier_fixups(*types);
+ do_stf_exit_barrier_fixups(*types);
+
+ return 0;
+}
void do_stf_barrier_fixups(enum stf_barrier_type types)
{
- do_stf_entry_barrier_fixups(types);
- do_stf_exit_barrier_fixups(types);
+ /*
+ * The call to the fallback entry flush, and the fallback/sync-ori exit
+ * flush can not be safely patched in/out while other CPUs are
+ * executing them. So call __do_stf_barrier_fixups() on one CPU while
+ * all other CPUs spin in the stop machine core with interrupts hard
+ * disabled.
+ *
+ * The branch to mark interrupt exits non-reentrant is enabled first,
+ * then stop_machine runs which will ensure all CPUs are out of the
+ * low level interrupt exit code before patching. After the patching,
+ * if allowed, then flip the branch to allow fast exits.
+ */
+ static_branch_enable(&interrupt_exit_not_reentrant);
+
+ stop_machine(__do_stf_barrier_fixups, &types, NULL);
+
+ if ((types & STF_BARRIER_FALLBACK) || (types & STF_BARRIER_SYNC_ORI))
+ stf_exit_reentrant = false;
+ else
+ stf_exit_reentrant = true;
+
+ if (stf_exit_reentrant && rfi_exit_reentrant)
+ static_branch_disable(&interrupt_exit_not_reentrant);
}
void do_uaccess_flush_fixups(enum l1d_flush_type types)
@@ -243,35 +275,35 @@ void do_uaccess_flush_fixups(enum l1d_flush_type types)
start = PTRRELOC(&__start___uaccess_flush_fixup);
end = PTRRELOC(&__stop___uaccess_flush_fixup);
- instrs[0] = 0x60000000; /* nop */
- instrs[1] = 0x60000000; /* nop */
- instrs[2] = 0x60000000; /* nop */
- instrs[3] = 0x4e800020; /* blr */
+ instrs[0] = PPC_RAW_NOP();
+ instrs[1] = PPC_RAW_NOP();
+ instrs[2] = PPC_RAW_NOP();
+ instrs[3] = PPC_RAW_BLR();
i = 0;
if (types == L1D_FLUSH_FALLBACK) {
- instrs[3] = 0x60000000; /* nop */
+ instrs[3] = PPC_RAW_NOP();
/* fallthrough to fallback flush */
}
if (types & L1D_FLUSH_ORI) {
- instrs[i++] = 0x63ff0000; /* ori 31,31,0 speculation barrier */
- instrs[i++] = 0x63de0000; /* ori 30,30,0 L1d flush*/
+ instrs[i++] = PPC_RAW_ORI(_R31, _R31, 0); /* speculation barrier */
+ instrs[i++] = PPC_RAW_ORI(_R30, _R30, 0); /* L1d flush */
}
if (types & L1D_FLUSH_MTTRIG)
- instrs[i++] = 0x7c12dba6; /* mtspr TRIG2,r0 (SPR #882) */
+ instrs[i++] = PPC_RAW_MTSPR(SPRN_TRIG2, _R0);
for (i = 0; start < end; start++, i++) {
dest = (void *)start + *start;
pr_devel("patching dest %lx\n", (unsigned long)dest);
- patch_instruction((struct ppc_inst *)dest, ppc_inst(instrs[0]));
+ patch_instruction(dest, ppc_inst(instrs[0]));
- patch_instruction((struct ppc_inst *)(dest + 1), ppc_inst(instrs[1]));
- patch_instruction((struct ppc_inst *)(dest + 2), ppc_inst(instrs[2]));
- patch_instruction((struct ppc_inst *)(dest + 3), ppc_inst(instrs[3]));
+ patch_instruction(dest + 1, ppc_inst(instrs[1]));
+ patch_instruction(dest + 2, ppc_inst(instrs[2]));
+ patch_instruction(dest + 3, ppc_inst(instrs[3]));
}
printk(KERN_DEBUG "uaccess-flush: patched %d locations (%s flush)\n", i,
@@ -284,30 +316,56 @@ void do_uaccess_flush_fixups(enum l1d_flush_type types)
: "unknown");
}
-void do_entry_flush_fixups(enum l1d_flush_type types)
+static int __do_entry_flush_fixups(void *data)
{
+ enum l1d_flush_type types = *(enum l1d_flush_type *)data;
unsigned int instrs[3], *dest;
long *start, *end;
int i;
- instrs[0] = 0x60000000; /* nop */
- instrs[1] = 0x60000000; /* nop */
- instrs[2] = 0x60000000; /* nop */
+ instrs[0] = PPC_RAW_NOP();
+ instrs[1] = PPC_RAW_NOP();
+ instrs[2] = PPC_RAW_NOP();
i = 0;
if (types == L1D_FLUSH_FALLBACK) {
- instrs[i++] = 0x7d4802a6; /* mflr r10 */
- instrs[i++] = 0x60000000; /* branch patched below */
- instrs[i++] = 0x7d4803a6; /* mtlr r10 */
+ instrs[i++] = PPC_RAW_MFLR(_R10);
+ instrs[i++] = PPC_RAW_NOP(); /* branch patched below */
+ instrs[i++] = PPC_RAW_MTLR(_R10);
}
if (types & L1D_FLUSH_ORI) {
- instrs[i++] = 0x63ff0000; /* ori 31,31,0 speculation barrier */
- instrs[i++] = 0x63de0000; /* ori 30,30,0 L1d flush*/
+ instrs[i++] = PPC_RAW_ORI(_R31, _R31, 0); /* speculation barrier */
+ instrs[i++] = PPC_RAW_ORI(_R30, _R30, 0); /* L1d flush */
}
if (types & L1D_FLUSH_MTTRIG)
- instrs[i++] = 0x7c12dba6; /* mtspr TRIG2,r0 (SPR #882) */
+ instrs[i++] = PPC_RAW_MTSPR(SPRN_TRIG2, _R0);
+
+ /*
+ * If we're patching in or out the fallback flush we need to be careful about the
+ * order in which we patch instructions. That's because it's possible we could
+ * take a page fault after patching one instruction, so the sequence of
+ * instructions must be safe even in a half patched state.
+ *
+ * To make that work, when patching in the fallback flush we patch in this order:
+ * - the mflr (dest)
+ * - the mtlr (dest + 2)
+ * - the branch (dest + 1)
+ *
+ * That ensures the sequence is safe to execute at any point. In contrast if we
+ * patch the mtlr last, it's possible we could return from the branch and not
+ * restore LR, leading to a crash later.
+ *
+ * When patching out the fallback flush (either with nops or another flush type),
+ * we patch in this order:
+ * - the branch (dest + 1)
+ * - the mtlr (dest + 2)
+ * - the mflr (dest)
+ *
+ * Note we are protected by stop_machine() from other CPUs executing the code in a
+ * semi-patched state.
+ */
start = PTRRELOC(&__start___entry_flush_fixup);
end = PTRRELOC(&__stop___entry_flush_fixup);
@@ -316,15 +374,16 @@ void do_entry_flush_fixups(enum l1d_flush_type types)
pr_devel("patching dest %lx\n", (unsigned long)dest);
- patch_instruction((struct ppc_inst *)dest, ppc_inst(instrs[0]));
-
- if (types == L1D_FLUSH_FALLBACK)
- patch_branch((struct ppc_inst *)(dest + 1), (unsigned long)&entry_flush_fallback,
- BRANCH_SET_LINK);
- else
- patch_instruction((struct ppc_inst *)(dest + 1), ppc_inst(instrs[1]));
-
- patch_instruction((struct ppc_inst *)(dest + 2), ppc_inst(instrs[2]));
+ if (types == L1D_FLUSH_FALLBACK) {
+ patch_instruction(dest, ppc_inst(instrs[0]));
+ patch_instruction(dest + 2, ppc_inst(instrs[2]));
+ patch_branch(dest + 1,
+ (unsigned long)&entry_flush_fallback, BRANCH_SET_LINK);
+ } else {
+ patch_instruction(dest + 1, ppc_inst(instrs[1]));
+ patch_instruction(dest + 2, ppc_inst(instrs[2]));
+ patch_instruction(dest, ppc_inst(instrs[0]));
+ }
}
start = PTRRELOC(&__start___scv_entry_flush_fixup);
@@ -334,15 +393,16 @@ void do_entry_flush_fixups(enum l1d_flush_type types)
pr_devel("patching dest %lx\n", (unsigned long)dest);
- patch_instruction((struct ppc_inst *)dest, ppc_inst(instrs[0]));
-
- if (types == L1D_FLUSH_FALLBACK)
- patch_branch((struct ppc_inst *)(dest + 1), (unsigned long)&scv_entry_flush_fallback,
- BRANCH_SET_LINK);
- else
- patch_instruction((struct ppc_inst *)(dest + 1), ppc_inst(instrs[1]));
-
- patch_instruction((struct ppc_inst *)(dest + 2), ppc_inst(instrs[2]));
+ if (types == L1D_FLUSH_FALLBACK) {
+ patch_instruction(dest, ppc_inst(instrs[0]));
+ patch_instruction(dest + 2, ppc_inst(instrs[2]));
+ patch_branch(dest + 1,
+ (unsigned long)&scv_entry_flush_fallback, BRANCH_SET_LINK);
+ } else {
+ patch_instruction(dest + 1, ppc_inst(instrs[1]));
+ patch_instruction(dest + 2, ppc_inst(instrs[2]));
+ patch_instruction(dest, ppc_inst(instrs[0]));
+ }
}
@@ -354,10 +414,24 @@ void do_entry_flush_fixups(enum l1d_flush_type types)
: "ori type" :
(types & L1D_FLUSH_MTTRIG) ? "mttrig type"
: "unknown");
+
+ return 0;
}
-void do_rfi_flush_fixups(enum l1d_flush_type types)
+void do_entry_flush_fixups(enum l1d_flush_type types)
{
+ /*
+ * The call to the fallback flush can not be safely patched in/out while
+ * other CPUs are executing it. So call __do_entry_flush_fixups() on one
+ * CPU while all other CPUs spin in the stop machine core with interrupts
+ * hard disabled.
+ */
+ stop_machine(__do_entry_flush_fixups, &types, NULL);
+}
+
+static int __do_rfi_flush_fixups(void *data)
+{
+ enum l1d_flush_type types = *(enum l1d_flush_type *)data;
unsigned int instrs[3], *dest;
long *start, *end;
int i;
@@ -365,31 +439,31 @@ void do_rfi_flush_fixups(enum l1d_flush_type types)
start = PTRRELOC(&__start___rfi_flush_fixup);
end = PTRRELOC(&__stop___rfi_flush_fixup);
- instrs[0] = 0x60000000; /* nop */
- instrs[1] = 0x60000000; /* nop */
- instrs[2] = 0x60000000; /* nop */
+ instrs[0] = PPC_RAW_NOP();
+ instrs[1] = PPC_RAW_NOP();
+ instrs[2] = PPC_RAW_NOP();
if (types & L1D_FLUSH_FALLBACK)
/* b .+16 to fallback flush */
- instrs[0] = 0x48000010;
+ instrs[0] = PPC_INST_BRANCH | 16;
i = 0;
if (types & L1D_FLUSH_ORI) {
- instrs[i++] = 0x63ff0000; /* ori 31,31,0 speculation barrier */
- instrs[i++] = 0x63de0000; /* ori 30,30,0 L1d flush*/
+ instrs[i++] = PPC_RAW_ORI(_R31, _R31, 0); /* speculation barrier */
+ instrs[i++] = PPC_RAW_ORI(_R30, _R30, 0); /* L1d flush */
}
if (types & L1D_FLUSH_MTTRIG)
- instrs[i++] = 0x7c12dba6; /* mtspr TRIG2,r0 (SPR #882) */
+ instrs[i++] = PPC_RAW_MTSPR(SPRN_TRIG2, _R0);
for (i = 0; start < end; start++, i++) {
dest = (void *)start + *start;
pr_devel("patching dest %lx\n", (unsigned long)dest);
- patch_instruction((struct ppc_inst *)dest, ppc_inst(instrs[0]));
- patch_instruction((struct ppc_inst *)(dest + 1), ppc_inst(instrs[1]));
- patch_instruction((struct ppc_inst *)(dest + 2), ppc_inst(instrs[2]));
+ patch_instruction(dest, ppc_inst(instrs[0]));
+ patch_instruction(dest + 1, ppc_inst(instrs[1]));
+ patch_instruction(dest + 2, ppc_inst(instrs[2]));
}
printk(KERN_DEBUG "rfi-flush: patched %d locations (%s flush)\n", i,
@@ -400,6 +474,29 @@ void do_rfi_flush_fixups(enum l1d_flush_type types)
: "ori type" :
(types & L1D_FLUSH_MTTRIG) ? "mttrig type"
: "unknown");
+
+ return 0;
+}
+
+void do_rfi_flush_fixups(enum l1d_flush_type types)
+{
+ /*
+ * stop_machine gets all CPUs out of the interrupt exit handler same
+ * as do_stf_barrier_fixups. do_rfi_flush_fixups patching can run
+ * without stop_machine, so this could be achieved with a broadcast
+ * IPI instead, but this matches the stf sequence.
+ */
+ static_branch_enable(&interrupt_exit_not_reentrant);
+
+ stop_machine(__do_rfi_flush_fixups, &types, NULL);
+
+ if (types & L1D_FLUSH_FALLBACK)
+ rfi_exit_reentrant = false;
+ else
+ rfi_exit_reentrant = true;
+
+ if (stf_exit_reentrant && rfi_exit_reentrant)
+ static_branch_disable(&interrupt_exit_not_reentrant);
}
void do_barrier_nospec_fixups_range(bool enable, void *fixup_start, void *fixup_end)
@@ -411,18 +508,18 @@ void do_barrier_nospec_fixups_range(bool enable, void *fixup_start, void *fixup_
start = fixup_start;
end = fixup_end;
- instr = 0x60000000; /* nop */
+ instr = PPC_RAW_NOP();
if (enable) {
pr_info("barrier-nospec: using ORI speculation barrier\n");
- instr = 0x63ff0000; /* ori 31,31,0 speculation barrier */
+ instr = PPC_RAW_ORI(_R31, _R31, 0); /* speculation barrier */
}
for (i = 0; start < end; start++, i++) {
dest = (void *)start + *start;
pr_devel("patching dest %lx\n", (unsigned long)dest);
- patch_instruction((struct ppc_inst *)dest, ppc_inst(instr));
+ patch_instruction(dest, ppc_inst(instr));
}
printk(KERN_DEBUG "barrier-nospec: patched %d locations\n", i);
@@ -452,21 +549,21 @@ void do_barrier_nospec_fixups_range(bool enable, void *fixup_start, void *fixup_
start = fixup_start;
end = fixup_end;
- instr[0] = PPC_INST_NOP;
- instr[1] = PPC_INST_NOP;
+ instr[0] = PPC_RAW_NOP();
+ instr[1] = PPC_RAW_NOP();
if (enable) {
pr_info("barrier-nospec: using isync; sync as speculation barrier\n");
- instr[0] = PPC_INST_ISYNC;
- instr[1] = PPC_INST_SYNC;
+ instr[0] = PPC_RAW_ISYNC();
+ instr[1] = PPC_RAW_SYNC();
}
for (i = 0; start < end; start++, i++) {
dest = (void *)start + *start;
pr_devel("patching dest %lx\n", (unsigned long)dest);
- patch_instruction((struct ppc_inst *)dest, ppc_inst(instr[0]));
- patch_instruction((struct ppc_inst *)(dest + 1), ppc_inst(instr[1]));
+ patch_instruction(dest, ppc_inst(instr[0]));
+ patch_instruction(dest + 1, ppc_inst(instr[1]));
}
printk(KERN_DEBUG "barrier-nospec: patched %d locations\n", i);
@@ -480,7 +577,7 @@ static void patch_btb_flush_section(long *curr)
end = (void *)curr + *(curr + 1);
for (; start < end; start++) {
pr_devel("patching dest %lx\n", (unsigned long)start);
- patch_instruction((struct ppc_inst *)start, ppc_inst(PPC_INST_NOP));
+ patch_instruction(start, ppc_inst(PPC_RAW_NOP()));
}
}
@@ -499,7 +596,7 @@ void do_btb_flush_fixups(void)
void do_lwsync_fixups(unsigned long value, void *fixup_start, void *fixup_end)
{
long *start, *end;
- struct ppc_inst *dest;
+ u32 *dest;
if (!(value & CPU_FTR_LWSYNC))
return ;
@@ -516,13 +613,14 @@ void do_lwsync_fixups(unsigned long value, void *fixup_start, void *fixup_end)
static void do_final_fixups(void)
{
#if defined(CONFIG_PPC64) && defined(CONFIG_RELOCATABLE)
- struct ppc_inst inst, *src, *dest, *end;
+ struct ppc_inst inst;
+ u32 *src, *dest, *end;
if (PHYSICAL_START == 0)
return;
- src = (struct ppc_inst *)(KERNELBASE + PHYSICAL_START);
- dest = (struct ppc_inst *)KERNELBASE;
+ src = (u32 *)(KERNELBASE + PHYSICAL_START);
+ dest = (u32 *)KERNELBASE;
end = (void *)src + (__end_interrupts - _stext);
while (src < end) {
diff --git a/arch/powerpc/lib/restart_table.c b/arch/powerpc/lib/restart_table.c
new file mode 100644
index 000000000000..bccb662c1b7b
--- /dev/null
+++ b/arch/powerpc/lib/restart_table.c
@@ -0,0 +1,56 @@
+#include <asm/interrupt.h>
+#include <asm/kprobes.h>
+
+struct soft_mask_table_entry {
+ unsigned long start;
+ unsigned long end;
+};
+
+struct restart_table_entry {
+ unsigned long start;
+ unsigned long end;
+ unsigned long fixup;
+};
+
+extern struct soft_mask_table_entry __start___soft_mask_table[];
+extern struct soft_mask_table_entry __stop___soft_mask_table[];
+
+extern struct restart_table_entry __start___restart_table[];
+extern struct restart_table_entry __stop___restart_table[];
+
+/* Given an address, look for it in the soft mask table */
+bool search_kernel_soft_mask_table(unsigned long addr)
+{
+ struct soft_mask_table_entry *smte = __start___soft_mask_table;
+
+ while (smte < __stop___soft_mask_table) {
+ unsigned long start = smte->start;
+ unsigned long end = smte->end;
+
+ if (addr >= start && addr < end)
+ return true;
+
+ smte++;
+ }
+ return false;
+}
+NOKPROBE_SYMBOL(search_kernel_soft_mask_table);
+
+/* Given an address, look for it in the kernel exception table */
+unsigned long search_kernel_restart_table(unsigned long addr)
+{
+ struct restart_table_entry *rte = __start___restart_table;
+
+ while (rte < __stop___restart_table) {
+ unsigned long start = rte->start;
+ unsigned long end = rte->end;
+ unsigned long fixup = rte->fixup;
+
+ if (addr >= start && addr < end)
+ return fixup;
+
+ rte++;
+ }
+ return 0;
+}
+NOKPROBE_SYMBOL(search_kernel_restart_table);
diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c
index 45bda2520755..d8d5f901cee1 100644
--- a/arch/powerpc/lib/sstep.c
+++ b/arch/powerpc/lib/sstep.c
@@ -1700,6 +1700,28 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
op->val = regs->ccr & imm;
goto compute_done;
+ case 128: /* setb */
+ if (!cpu_has_feature(CPU_FTR_ARCH_300))
+ goto unknown_opcode;
+ /*
+ * 'ra' encodes the CR field number (bfa) in the top 3 bits.
+ * Since each CR field is 4 bits,
+ * we can simply mask off the bottom two bits (bfa * 4)
+ * to yield the first bit in the CR field.
+ */
+ ra = ra & ~0x3;
+ /* 'val' stores bits of the CR field (bfa) */
+ val = regs->ccr >> (CR0_SHIFT - ra);
+ /* checks if the LT bit of CR field (bfa) is set */
+ if (val & 8)
+ op->val = -1;
+ /* checks if the GT bit of CR field (bfa) is set */
+ else if (val & 4)
+ op->val = 1;
+ else
+ op->val = 0;
+ goto compute_done;
+
case 144: /* mtcrf */
op->type = COMPUTE + SETCC;
imm = 0xf0000000UL;
@@ -3203,7 +3225,7 @@ void emulate_update_regs(struct pt_regs *regs, struct instruction_op *op)
default:
WARN_ON_ONCE(1);
}
- regs->nip = next_pc;
+ regs_set_return_ip(regs, next_pc);
}
NOKPROBE_SYMBOL(emulate_update_regs);
@@ -3541,7 +3563,7 @@ int emulate_step(struct pt_regs *regs, struct ppc_inst instr)
/* can't step mtmsr[d] that would clear MSR_RI */
return -1;
/* here op.val is the mask of bits to change */
- regs->msr = (regs->msr & ~op.val) | (val & op.val);
+ regs_set_return_msr(regs, (regs->msr & ~op.val) | (val & op.val));
goto instr_done;
#ifdef CONFIG_PPC64
@@ -3554,7 +3576,7 @@ int emulate_step(struct pt_regs *regs, struct ppc_inst instr)
if (IS_ENABLED(CONFIG_PPC_FAST_ENDIAN_SWITCH) &&
cpu_has_feature(CPU_FTR_REAL_LE) &&
regs->gpr[0] == 0x1ebe) {
- regs->msr ^= MSR_LE;
+ regs_set_return_msr(regs, regs->msr ^ MSR_LE);
goto instr_done;
}
regs->gpr[9] = regs->gpr[13];
@@ -3562,8 +3584,8 @@ int emulate_step(struct pt_regs *regs, struct ppc_inst instr)
regs->gpr[11] = regs->nip + 4;
regs->gpr[12] = regs->msr & MSR_MASK;
regs->gpr[13] = (unsigned long) get_paca();
- regs->nip = (unsigned long) &system_call_common;
- regs->msr = MSR_KERNEL;
+ regs_set_return_ip(regs, (unsigned long) &system_call_common);
+ regs_set_return_msr(regs, MSR_KERNEL);
return 1;
#ifdef CONFIG_PPC_BOOK3S_64
@@ -3573,8 +3595,8 @@ int emulate_step(struct pt_regs *regs, struct ppc_inst instr)
regs->gpr[11] = regs->nip + 4;
regs->gpr[12] = regs->msr & MSR_MASK;
regs->gpr[13] = (unsigned long) get_paca();
- regs->nip = (unsigned long) &system_call_vectored_emulate;
- regs->msr = MSR_KERNEL;
+ regs_set_return_ip(regs, (unsigned long) &system_call_vectored_emulate);
+ regs_set_return_msr(regs, MSR_KERNEL);
return 1;
#endif
@@ -3585,7 +3607,8 @@ int emulate_step(struct pt_regs *regs, struct ppc_inst instr)
return 0;
instr_done:
- regs->nip = truncate_if_32bit(regs->msr, regs->nip + GETLENGTH(op.type));
+ regs_set_return_ip(regs,
+ truncate_if_32bit(regs->msr, regs->nip + GETLENGTH(op.type)));
return 1;
}
NOKPROBE_SYMBOL(emulate_step);
diff --git a/arch/powerpc/lib/test_emulate_step.c b/arch/powerpc/lib/test_emulate_step.c
index 783d1b85ecfe..8b4f6b3e96c4 100644
--- a/arch/powerpc/lib/test_emulate_step.c
+++ b/arch/powerpc/lib/test_emulate_step.c
@@ -53,6 +53,8 @@
ppc_inst_prefix(PPC_PREFIX_MLS | __PPC_PRFX_R(pr) | IMM_H(i), \
PPC_RAW_ADDI(t, a, i))
+#define TEST_SETB(t, bfa) ppc_inst(PPC_INST_SETB | ___PPC_RT(t) | ___PPC_RA((bfa & 0x7) << 2))
+
static void __init init_pt_regs(struct pt_regs *regs)
{
@@ -824,8 +826,7 @@ static void __init test_plxvp_pstxvp(void)
* XTp = 32xTX + 2xTp
* let RA=3 R=0 D=d0||d1=0 R=0 Tp=1 TX=1
*/
- instr = ppc_inst_prefix(PPC_RAW_PLXVP(34, 0, 3, 0) >> 32,
- PPC_RAW_PLXVP(34, 0, 3, 0) & 0xffffffff);
+ instr = ppc_inst_prefix(PPC_RAW_PLXVP_P(34, 0, 3, 0), PPC_RAW_PLXVP_S(34, 0, 3, 0));
stepped = emulate_step(&regs, instr);
if (stepped == 1 && cpu_has_feature(CPU_FTR_VSX)) {
@@ -853,8 +854,7 @@ static void __init test_plxvp_pstxvp(void)
* XSp = 32xSX + 2xSp
* let RA=3 D=d0||d1=0 R=0 Sp=1 SX=1
*/
- instr = ppc_inst_prefix(PPC_RAW_PSTXVP(34, 0, 3, 0) >> 32,
- PPC_RAW_PSTXVP(34, 0, 3, 0) & 0xffffffff);
+ instr = ppc_inst_prefix(PPC_RAW_PSTXVP_P(34, 0, 3, 0), PPC_RAW_PSTXVP_S(34, 0, 3, 0));
stepped = emulate_step(&regs, instr);
@@ -922,7 +922,7 @@ static struct compute_test compute_tests[] = {
.subtests = {
{
.descr = "R0 = LONG_MAX",
- .instr = ppc_inst(PPC_INST_NOP),
+ .instr = ppc_inst(PPC_RAW_NOP()),
.regs = {
.gpr[0] = LONG_MAX,
}
@@ -930,6 +930,33 @@ static struct compute_test compute_tests[] = {
}
},
{
+ .mnemonic = "setb",
+ .cpu_feature = CPU_FTR_ARCH_300,
+ .subtests = {
+ {
+ .descr = "BFA = 1, CR = GT",
+ .instr = TEST_SETB(20, 1),
+ .regs = {
+ .ccr = 0x4000000,
+ }
+ },
+ {
+ .descr = "BFA = 4, CR = LT",
+ .instr = TEST_SETB(20, 4),
+ .regs = {
+ .ccr = 0x8000,
+ }
+ },
+ {
+ .descr = "BFA = 5, CR = EQ",
+ .instr = TEST_SETB(20, 5),
+ .regs = {
+ .ccr = 0x200,
+ }
+ }
+ }
+ },
+ {
.mnemonic = "add",
.subtests = {
{
@@ -1582,6 +1609,7 @@ static int __init emulate_compute_instr(struct pt_regs *regs,
if (!regs || !ppc_inst_val(instr))
return -EINVAL;
+ /* This is not a return frame regs */
regs->nip = patch_site_addr(&patch__exec_instr);
analysed = analyse_instr(&op, regs, instr);
diff --git a/arch/powerpc/math-emu/math.c b/arch/powerpc/math-emu/math.c
index 327165f26ca6..36761bd00f38 100644
--- a/arch/powerpc/math-emu/math.c
+++ b/arch/powerpc/math-emu/math.c
@@ -453,7 +453,7 @@ do_mathemu(struct pt_regs *regs)
break;
}
- regs->nip += 4;
+ regs_add_return_ip(regs, 4);
return 0;
illegal:
diff --git a/arch/powerpc/math-emu/math_efp.c b/arch/powerpc/math-emu/math_efp.c
index 0a05e51964c1..39b84e7452e1 100644
--- a/arch/powerpc/math-emu/math_efp.c
+++ b/arch/powerpc/math-emu/math_efp.c
@@ -710,7 +710,7 @@ update_regs:
illegal:
if (have_e500_cpu_a005_erratum) {
/* according to e500 cpu a005 erratum, reissue efp inst */
- regs->nip -= 4;
+ regs_add_return_ip(regs, -4);
pr_debug("re-issue efp inst: %08lx\n", speinsn);
return 0;
}
diff --git a/arch/powerpc/mm/Makefile b/arch/powerpc/mm/Makefile
index c3df3a8501d4..eae4ec2988fc 100644
--- a/arch/powerpc/mm/Makefile
+++ b/arch/powerpc/mm/Makefile
@@ -5,7 +5,7 @@
ccflags-$(CONFIG_PPC64) := $(NO_MINIMAL_TOC)
-obj-y := fault.o mem.o pgtable.o mmap.o maccess.o \
+obj-y := fault.o mem.o pgtable.o mmap.o maccess.o pageattr.o \
init_$(BITS).o pgtable_$(BITS).o \
pgtable-frag.o ioremap.o ioremap_$(BITS).o \
init-common.o mmu_context.o drmem.o \
@@ -13,7 +13,7 @@ obj-y := fault.o mem.o pgtable.o mmap.o maccess.o \
obj-$(CONFIG_PPC_MMU_NOHASH) += nohash/
obj-$(CONFIG_PPC_BOOK3S_32) += book3s32/
obj-$(CONFIG_PPC_BOOK3S_64) += book3s64/
-obj-$(CONFIG_NEED_MULTIPLE_NODES) += numa.o
+obj-$(CONFIG_NUMA) += numa.o
obj-$(CONFIG_PPC_MM_SLICES) += slice.o
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
obj-$(CONFIG_NOT_COHERENT_CACHE) += dma-noncoherent.o
diff --git a/arch/powerpc/mm/book3s32/Makefile b/arch/powerpc/mm/book3s32/Makefile
index 7f0c8a78ba0c..15f4773643d2 100644
--- a/arch/powerpc/mm/book3s32/Makefile
+++ b/arch/powerpc/mm/book3s32/Makefile
@@ -10,3 +10,4 @@ obj-y += mmu.o mmu_context.o
obj-$(CONFIG_PPC_BOOK3S_603) += nohash_low.o
obj-$(CONFIG_PPC_BOOK3S_604) += hash_low.o tlb.o
obj-$(CONFIG_PPC_KUEP) += kuep.o
+obj-$(CONFIG_PPC_KUAP) += kuap.o
diff --git a/arch/powerpc/mm/book3s32/hash_low.S b/arch/powerpc/mm/book3s32/hash_low.S
index fb4233a5bdf7..6925ce998557 100644
--- a/arch/powerpc/mm/book3s32/hash_low.S
+++ b/arch/powerpc/mm/book3s32/hash_low.S
@@ -27,8 +27,10 @@
#include <asm/code-patching-asm.h>
#ifdef CONFIG_PTE_64BIT
+#define PTE_T_SIZE 8
#define PTE_FLAGS_OFFSET 4 /* offset of PTE flags, in bytes */
#else
+#define PTE_T_SIZE 4
#define PTE_FLAGS_OFFSET 0
#endif
@@ -488,7 +490,7 @@ _GLOBAL(flush_hash_pages)
bne 2f
ble cr1,19f
addi r4,r4,0x1000
- addi r5,r5,PTE_SIZE
+ addi r5,r5,PTE_T_SIZE
addi r6,r6,-1
b 1b
@@ -573,7 +575,7 @@ _GLOBAL(flush_hash_pages)
8: ble cr1,9f /* if all ptes checked */
81: addi r6,r6,-1
- addi r5,r5,PTE_SIZE
+ addi r5,r5,PTE_T_SIZE
addi r4,r4,0x1000
lwz r0,0(r5) /* check next pte */
cmpwi cr1,r6,1
diff --git a/arch/powerpc/mm/book3s32/kuap.c b/arch/powerpc/mm/book3s32/kuap.c
new file mode 100644
index 000000000000..0f920f09af57
--- /dev/null
+++ b/arch/powerpc/mm/book3s32/kuap.c
@@ -0,0 +1,33 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <asm/kup.h>
+#include <asm/smp.h>
+
+struct static_key_false disable_kuap_key;
+EXPORT_SYMBOL(disable_kuap_key);
+
+void kuap_lock_all_ool(void)
+{
+ kuap_lock_all();
+}
+EXPORT_SYMBOL(kuap_lock_all_ool);
+
+void kuap_unlock_all_ool(void)
+{
+ kuap_unlock_all();
+}
+EXPORT_SYMBOL(kuap_unlock_all_ool);
+
+void setup_kuap(bool disabled)
+{
+ if (!disabled)
+ kuap_lock_all_ool();
+
+ if (smp_processor_id() != boot_cpuid)
+ return;
+
+ if (disabled)
+ static_branch_enable(&disable_kuap_key);
+ else
+ pr_info("Activating Kernel Userspace Access Protection\n");
+}
diff --git a/arch/powerpc/mm/book3s32/kuep.c b/arch/powerpc/mm/book3s32/kuep.c
index 8ed1b8634839..c20733d6e02c 100644
--- a/arch/powerpc/mm/book3s32/kuep.c
+++ b/arch/powerpc/mm/book3s32/kuep.c
@@ -1,40 +1,20 @@
// SPDX-License-Identifier: GPL-2.0-or-later
#include <asm/kup.h>
-#include <asm/reg.h>
-#include <asm/task_size_32.h>
-#include <asm/mmu.h>
+#include <asm/smp.h>
-#define KUEP_UPDATE_TWO_USER_SEGMENTS(n) do { \
- if (TASK_SIZE > ((n) << 28)) \
- mtsr(val1, (n) << 28); \
- if (TASK_SIZE > (((n) + 1) << 28)) \
- mtsr(val2, ((n) + 1) << 28); \
- val1 = (val1 + 0x222) & 0xf0ffffff; \
- val2 = (val2 + 0x222) & 0xf0ffffff; \
-} while (0)
+struct static_key_false disable_kuep_key;
-static __always_inline void kuep_update(u32 val)
+void setup_kuep(bool disabled)
{
- int val1 = val;
- int val2 = (val + 0x111) & 0xf0ffffff;
+ if (!disabled)
+ kuep_lock();
- KUEP_UPDATE_TWO_USER_SEGMENTS(0);
- KUEP_UPDATE_TWO_USER_SEGMENTS(2);
- KUEP_UPDATE_TWO_USER_SEGMENTS(4);
- KUEP_UPDATE_TWO_USER_SEGMENTS(6);
- KUEP_UPDATE_TWO_USER_SEGMENTS(8);
- KUEP_UPDATE_TWO_USER_SEGMENTS(10);
- KUEP_UPDATE_TWO_USER_SEGMENTS(12);
- KUEP_UPDATE_TWO_USER_SEGMENTS(14);
-}
+ if (smp_processor_id() != boot_cpuid)
+ return;
-void kuep_lock(void)
-{
- kuep_update(mfsr(0) | SR_NX);
-}
-
-void kuep_unlock(void)
-{
- kuep_update(mfsr(0) & ~SR_NX);
+ if (disabled)
+ static_branch_enable(&disable_kuep_key);
+ else
+ pr_info("Activating Kernel Userspace Execution Prevention\n");
}
diff --git a/arch/powerpc/mm/book3s32/mmu.c b/arch/powerpc/mm/book3s32/mmu.c
index 159930351d9f..27061583a010 100644
--- a/arch/powerpc/mm/book3s32/mmu.c
+++ b/arch/powerpc/mm/book3s32/mmu.c
@@ -445,26 +445,6 @@ void __init print_system_hash_info(void)
pr_info("Hash_mask = 0x%lx\n", Hash_mask);
}
-#ifdef CONFIG_PPC_KUEP
-void __init setup_kuep(bool disabled)
-{
- pr_info("Activating Kernel Userspace Execution Prevention\n");
-
- if (disabled)
- pr_warn("KUEP cannot be disabled yet on 6xx when compiled in\n");
-}
-#endif
-
-#ifdef CONFIG_PPC_KUAP
-void __init setup_kuap(bool disabled)
-{
- pr_info("Activating Kernel Userspace Access Protection\n");
-
- if (disabled)
- pr_warn("KUAP cannot be disabled yet on 6xx when compiled in\n");
-}
-#endif
-
void __init early_init_mmu(void)
{
}
diff --git a/arch/powerpc/mm/book3s32/mmu_context.c b/arch/powerpc/mm/book3s32/mmu_context.c
index 218996e40a8e..e2708e387dc3 100644
--- a/arch/powerpc/mm/book3s32/mmu_context.c
+++ b/arch/powerpc/mm/book3s32/mmu_context.c
@@ -24,6 +24,12 @@
#include <asm/mmu_context.h>
/*
+ * Room for two PTE pointers, usually the kernel and current user pointers
+ * to their respective root page table.
+ */
+void *abatron_pteptrs[2];
+
+/*
* On 32-bit PowerPC 6xx/7xx/7xxx CPUs, we use a set of 16 VSIDs
* (virtual segment identifiers) for each context. Although the
* hardware supports 24-bit VSIDs, and thus >1 million contexts,
@@ -39,19 +45,6 @@
#define LAST_CONTEXT 32767
#define FIRST_CONTEXT 1
-/*
- * This function defines the mapping from contexts to VSIDs (virtual
- * segment IDs). We use a skew on both the context and the high 4 bits
- * of the 32-bit virtual address (the "effective segment ID") in order
- * to spread out the entries in the MMU hash table. Note, if this
- * function is changed then arch/ppc/mm/hashtable.S will have to be
- * changed to correspond.
- *
- *
- * CTX_TO_VSID(ctx, va) (((ctx) * (897 * 16) + ((va) >> 28) * 0x111) \
- * & 0xffffff)
- */
-
static unsigned long next_mmu_context;
static unsigned long context_map[LAST_CONTEXT / BITS_PER_LONG + 1];
@@ -111,3 +104,32 @@ void __init mmu_context_init(void)
context_map[0] = (1 << FIRST_CONTEXT) - 1;
next_mmu_context = FIRST_CONTEXT;
}
+
+void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk)
+{
+ long id = next->context.id;
+ unsigned long val;
+
+ if (id < 0)
+ panic("mm_struct %p has no context ID", next);
+
+ isync();
+
+ val = CTX_TO_VSID(id, 0);
+ if (!kuep_is_disabled())
+ val |= SR_NX;
+ if (!kuap_is_disabled())
+ val |= SR_KS;
+
+ update_user_segments(val);
+
+ if (IS_ENABLED(CONFIG_BDI_SWITCH))
+ abatron_pteptrs[1] = next->pgd;
+
+ if (!mmu_has_feature(MMU_FTR_HPTE_TABLE))
+ mtspr(SPRN_SDR1, rol32(__pa(next->pgd), 4) & 0xffff01ff);
+
+ mb(); /* sync */
+ isync();
+}
+EXPORT_SYMBOL(switch_mmu_context);
diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c
index 96d9aa164007..ac5720371c0d 100644
--- a/arch/powerpc/mm/book3s64/hash_utils.c
+++ b/arch/powerpc/mm/book3s64/hash_utils.c
@@ -1522,8 +1522,8 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap,
}
EXPORT_SYMBOL_GPL(hash_page);
-DECLARE_INTERRUPT_HANDLER_RET(__do_hash_fault);
-DEFINE_INTERRUPT_HANDLER_RET(__do_hash_fault)
+DECLARE_INTERRUPT_HANDLER(__do_hash_fault);
+DEFINE_INTERRUPT_HANDLER(__do_hash_fault)
{
unsigned long ea = regs->dar;
unsigned long dsisr = regs->dsisr;
@@ -1533,6 +1533,11 @@ DEFINE_INTERRUPT_HANDLER_RET(__do_hash_fault)
unsigned int region_id;
long err;
+ if (unlikely(dsisr & (DSISR_BAD_FAULT_64S | DSISR_KEYFAULT))) {
+ hash__do_page_fault(regs);
+ return;
+ }
+
region_id = get_region_id(ea);
if ((region_id == VMALLOC_REGION_ID) || (region_id == IO_REGION_ID))
mm = &init_mm;
@@ -1571,9 +1576,10 @@ DEFINE_INTERRUPT_HANDLER_RET(__do_hash_fault)
bad_page_fault(regs, SIGBUS);
}
err = 0;
- }
- return err;
+ } else if (err) {
+ hash__do_page_fault(regs);
+ }
}
/*
@@ -1582,13 +1588,6 @@ DEFINE_INTERRUPT_HANDLER_RET(__do_hash_fault)
*/
DEFINE_INTERRUPT_HANDLER_RAW(do_hash_fault)
{
- unsigned long dsisr = regs->dsisr;
-
- if (unlikely(dsisr & (DSISR_BAD_FAULT_64S | DSISR_KEYFAULT))) {
- hash__do_page_fault(regs);
- return 0;
- }
-
/*
* If we are in an "NMI" (e.g., an interrupt when soft-disabled), then
* don't call hash_page, just fail the fault. This is required to
@@ -1607,8 +1606,7 @@ DEFINE_INTERRUPT_HANDLER_RAW(do_hash_fault)
return 0;
}
- if (__do_hash_fault(regs))
- hash__do_page_fault(regs);
+ __do_hash_fault(regs);
return 0;
}
diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c
index 5fef8db3b463..2176a5f70746 100644
--- a/arch/powerpc/mm/book3s64/radix_pgtable.c
+++ b/arch/powerpc/mm/book3s64/radix_pgtable.c
@@ -11,6 +11,7 @@
#include <linux/kernel.h>
#include <linux/sched/mm.h>
#include <linux/memblock.h>
+#include <linux/of.h>
#include <linux/of_fdt.h>
#include <linux/mm.h>
#include <linux/hugetlb.h>
@@ -357,30 +358,19 @@ static void __init radix_init_pgtable(void)
}
/* Find out how many PID bits are supported */
- if (!cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG)) {
- if (!mmu_pid_bits)
- mmu_pid_bits = 20;
- mmu_base_pid = 1;
- } else if (cpu_has_feature(CPU_FTR_HVMODE)) {
- if (!mmu_pid_bits)
- mmu_pid_bits = 20;
-#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+ if (!cpu_has_feature(CPU_FTR_HVMODE) &&
+ cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG)) {
/*
- * When KVM is possible, we only use the top half of the
- * PID space to avoid collisions between host and guest PIDs
- * which can cause problems due to prefetch when exiting the
- * guest with AIL=3
+ * Older versions of KVM on these machines perfer if the
+ * guest only uses the low 19 PID bits.
*/
- mmu_base_pid = 1 << (mmu_pid_bits - 1);
-#else
- mmu_base_pid = 1;
-#endif
- } else {
- /* The guest uses the bottom half of the PID space */
if (!mmu_pid_bits)
mmu_pid_bits = 19;
- mmu_base_pid = 1;
+ } else {
+ if (!mmu_pid_bits)
+ mmu_pid_bits = 20;
}
+ mmu_base_pid = 1;
/*
* Allocate Partition table and process table for the
@@ -486,6 +476,7 @@ static int __init radix_dt_scan_page_sizes(unsigned long node,
def = &mmu_psize_defs[idx];
def->shift = shift;
def->ap = ap;
+ def->h_rpt_pgsize = psize_to_rpti_pgsize(idx);
}
/* needed ? */
@@ -560,9 +551,13 @@ void __init radix__early_init_devtree(void)
*/
mmu_psize_defs[MMU_PAGE_4K].shift = 12;
mmu_psize_defs[MMU_PAGE_4K].ap = 0x0;
+ mmu_psize_defs[MMU_PAGE_4K].h_rpt_pgsize =
+ psize_to_rpti_pgsize(MMU_PAGE_4K);
mmu_psize_defs[MMU_PAGE_64K].shift = 16;
mmu_psize_defs[MMU_PAGE_64K].ap = 0x5;
+ mmu_psize_defs[MMU_PAGE_64K].h_rpt_pgsize =
+ psize_to_rpti_pgsize(MMU_PAGE_64K);
}
/*
diff --git a/arch/powerpc/mm/book3s64/radix_tlb.c b/arch/powerpc/mm/book3s64/radix_tlb.c
index 409e61210789..ae12fb5559cb 100644
--- a/arch/powerpc/mm/book3s64/radix_tlb.c
+++ b/arch/powerpc/mm/book3s64/radix_tlb.c
@@ -20,10 +20,6 @@
#include "internal.h"
-#define RIC_FLUSH_TLB 0
-#define RIC_FLUSH_PWC 1
-#define RIC_FLUSH_ALL 2
-
/*
* tlbiel instruction for radix, set invalidation
* i.e., r=1 and is=01 or is=10 or is=11
@@ -130,6 +126,21 @@ static __always_inline void __tlbie_pid(unsigned long pid, unsigned long ric)
trace_tlbie(0, 0, rb, rs, ric, prs, r);
}
+static __always_inline void __tlbie_pid_lpid(unsigned long pid,
+ unsigned long lpid,
+ unsigned long ric)
+{
+ unsigned long rb, rs, prs, r;
+
+ rb = PPC_BIT(53); /* IS = 1 */
+ rs = (pid << PPC_BITLSHIFT(31)) | (lpid & ~(PPC_BITMASK(0, 31)));
+ prs = 1; /* process scoped */
+ r = 1; /* radix format */
+
+ asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
+ : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
+ trace_tlbie(0, 0, rb, rs, ric, prs, r);
+}
static __always_inline void __tlbie_lpid(unsigned long lpid, unsigned long ric)
{
unsigned long rb,rs,prs,r;
@@ -190,6 +201,23 @@ static __always_inline void __tlbie_va(unsigned long va, unsigned long pid,
trace_tlbie(0, 0, rb, rs, ric, prs, r);
}
+static __always_inline void __tlbie_va_lpid(unsigned long va, unsigned long pid,
+ unsigned long lpid,
+ unsigned long ap, unsigned long ric)
+{
+ unsigned long rb, rs, prs, r;
+
+ rb = va & ~(PPC_BITMASK(52, 63));
+ rb |= ap << PPC_BITLSHIFT(58);
+ rs = (pid << PPC_BITLSHIFT(31)) | (lpid & ~(PPC_BITMASK(0, 31)));
+ prs = 1; /* process scoped */
+ r = 1; /* radix format */
+
+ asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
+ : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
+ trace_tlbie(0, 0, rb, rs, ric, prs, r);
+}
+
static __always_inline void __tlbie_lpid_va(unsigned long va, unsigned long lpid,
unsigned long ap, unsigned long ric)
{
@@ -235,6 +263,22 @@ static inline void fixup_tlbie_va_range(unsigned long va, unsigned long pid,
}
}
+static inline void fixup_tlbie_va_range_lpid(unsigned long va,
+ unsigned long pid,
+ unsigned long lpid,
+ unsigned long ap)
+{
+ if (cpu_has_feature(CPU_FTR_P9_TLBIE_ERAT_BUG)) {
+ asm volatile("ptesync" : : : "memory");
+ __tlbie_pid_lpid(0, lpid, RIC_FLUSH_TLB);
+ }
+
+ if (cpu_has_feature(CPU_FTR_P9_TLBIE_STQ_BUG)) {
+ asm volatile("ptesync" : : : "memory");
+ __tlbie_va_lpid(va, pid, lpid, ap, RIC_FLUSH_TLB);
+ }
+}
+
static inline void fixup_tlbie_pid(unsigned long pid)
{
/*
@@ -254,6 +298,25 @@ static inline void fixup_tlbie_pid(unsigned long pid)
}
}
+static inline void fixup_tlbie_pid_lpid(unsigned long pid, unsigned long lpid)
+{
+ /*
+ * We can use any address for the invalidation, pick one which is
+ * probably unused as an optimisation.
+ */
+ unsigned long va = ((1UL << 52) - 1);
+
+ if (cpu_has_feature(CPU_FTR_P9_TLBIE_ERAT_BUG)) {
+ asm volatile("ptesync" : : : "memory");
+ __tlbie_pid_lpid(0, lpid, RIC_FLUSH_TLB);
+ }
+
+ if (cpu_has_feature(CPU_FTR_P9_TLBIE_STQ_BUG)) {
+ asm volatile("ptesync" : : : "memory");
+ __tlbie_va_lpid(va, pid, lpid, mmu_get_ap(MMU_PAGE_64K),
+ RIC_FLUSH_TLB);
+ }
+}
static inline void fixup_tlbie_lpid_va(unsigned long va, unsigned long lpid,
unsigned long ap)
@@ -291,22 +354,30 @@ static inline void fixup_tlbie_lpid(unsigned long lpid)
/*
* We use 128 set in radix mode and 256 set in hpt mode.
*/
-static __always_inline void _tlbiel_pid(unsigned long pid, unsigned long ric)
+static inline void _tlbiel_pid(unsigned long pid, unsigned long ric)
{
int set;
asm volatile("ptesync": : :"memory");
- /*
- * Flush the first set of the TLB, and if we're doing a RIC_FLUSH_ALL,
- * also flush the entire Page Walk Cache.
- */
- __tlbiel_pid(pid, 0, ric);
+ switch (ric) {
+ case RIC_FLUSH_PWC:
- /* For PWC, only one flush is needed */
- if (ric == RIC_FLUSH_PWC) {
+ /* For PWC, only one flush is needed */
+ __tlbiel_pid(pid, 0, RIC_FLUSH_PWC);
ppc_after_tlbiel_barrier();
return;
+ case RIC_FLUSH_TLB:
+ __tlbiel_pid(pid, 0, RIC_FLUSH_TLB);
+ break;
+ case RIC_FLUSH_ALL:
+ default:
+ /*
+ * Flush the first set of the TLB, and if
+ * we're doing a RIC_FLUSH_ALL, also flush
+ * the entire Page Walk Cache.
+ */
+ __tlbiel_pid(pid, 0, RIC_FLUSH_ALL);
}
if (!cpu_has_feature(CPU_FTR_ARCH_31)) {
@@ -344,6 +415,31 @@ static inline void _tlbie_pid(unsigned long pid, unsigned long ric)
asm volatile("eieio; tlbsync; ptesync": : :"memory");
}
+static inline void _tlbie_pid_lpid(unsigned long pid, unsigned long lpid,
+ unsigned long ric)
+{
+ asm volatile("ptesync" : : : "memory");
+
+ /*
+ * Workaround the fact that the "ric" argument to __tlbie_pid
+ * must be a compile-time contraint to match the "i" constraint
+ * in the asm statement.
+ */
+ switch (ric) {
+ case RIC_FLUSH_TLB:
+ __tlbie_pid_lpid(pid, lpid, RIC_FLUSH_TLB);
+ fixup_tlbie_pid_lpid(pid, lpid);
+ break;
+ case RIC_FLUSH_PWC:
+ __tlbie_pid_lpid(pid, lpid, RIC_FLUSH_PWC);
+ break;
+ case RIC_FLUSH_ALL:
+ default:
+ __tlbie_pid_lpid(pid, lpid, RIC_FLUSH_ALL);
+ fixup_tlbie_pid_lpid(pid, lpid);
+ }
+ asm volatile("eieio; tlbsync; ptesync" : : : "memory");
+}
struct tlbiel_pid {
unsigned long pid;
unsigned long ric;
@@ -469,6 +565,20 @@ static inline void __tlbie_va_range(unsigned long start, unsigned long end,
fixup_tlbie_va_range(addr - page_size, pid, ap);
}
+static inline void __tlbie_va_range_lpid(unsigned long start, unsigned long end,
+ unsigned long pid, unsigned long lpid,
+ unsigned long page_size,
+ unsigned long psize)
+{
+ unsigned long addr;
+ unsigned long ap = mmu_get_ap(psize);
+
+ for (addr = start; addr < end; addr += page_size)
+ __tlbie_va_lpid(addr, pid, lpid, ap, RIC_FLUSH_TLB);
+
+ fixup_tlbie_va_range_lpid(addr - page_size, pid, lpid, ap);
+}
+
static __always_inline void _tlbie_va(unsigned long va, unsigned long pid,
unsigned long psize, unsigned long ric)
{
@@ -549,6 +659,18 @@ static inline void _tlbie_va_range(unsigned long start, unsigned long end,
asm volatile("eieio; tlbsync; ptesync": : :"memory");
}
+static inline void _tlbie_va_range_lpid(unsigned long start, unsigned long end,
+ unsigned long pid, unsigned long lpid,
+ unsigned long page_size,
+ unsigned long psize, bool also_pwc)
+{
+ asm volatile("ptesync" : : : "memory");
+ if (also_pwc)
+ __tlbie_pid_lpid(pid, lpid, RIC_FLUSH_PWC);
+ __tlbie_va_range_lpid(start, end, pid, lpid, page_size, psize);
+ asm volatile("eieio; tlbsync; ptesync" : : : "memory");
+}
+
static inline void _tlbiel_va_range_multicast(struct mm_struct *mm,
unsigned long start, unsigned long end,
unsigned long pid, unsigned long page_size,
@@ -1176,7 +1298,7 @@ void radix__tlb_flush(struct mmu_gather *tlb)
}
}
-static __always_inline void __radix__flush_tlb_range_psize(struct mm_struct *mm,
+static void __radix__flush_tlb_range_psize(struct mm_struct *mm,
unsigned long start, unsigned long end,
int psize, bool also_pwc)
{
@@ -1338,47 +1460,57 @@ void radix__flush_tlb_all(void)
}
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
-extern void radix_kvm_prefetch_workaround(struct mm_struct *mm)
+/*
+ * Performs process-scoped invalidations for a given LPID
+ * as part of H_RPT_INVALIDATE hcall.
+ */
+void do_h_rpt_invalidate_prt(unsigned long pid, unsigned long lpid,
+ unsigned long type, unsigned long pg_sizes,
+ unsigned long start, unsigned long end)
{
- unsigned long pid = mm->context.id;
+ unsigned long psize, nr_pages;
+ struct mmu_psize_def *def;
+ bool flush_pid;
- if (unlikely(pid == MMU_NO_CONTEXT))
+ /*
+ * A H_RPTI_TYPE_ALL request implies RIC=3, hence
+ * do a single IS=1 based flush.
+ */
+ if ((type & H_RPTI_TYPE_ALL) == H_RPTI_TYPE_ALL) {
+ _tlbie_pid_lpid(pid, lpid, RIC_FLUSH_ALL);
return;
+ }
- if (!cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG))
- return;
+ if (type & H_RPTI_TYPE_PWC)
+ _tlbie_pid_lpid(pid, lpid, RIC_FLUSH_PWC);
- /*
- * If this context hasn't run on that CPU before and KVM is
- * around, there's a slim chance that the guest on another
- * CPU just brought in obsolete translation into the TLB of
- * this CPU due to a bad prefetch using the guest PID on
- * the way into the hypervisor.
- *
- * We work around this here. If KVM is possible, we check if
- * any sibling thread is in KVM. If it is, the window may exist
- * and thus we flush that PID from the core.
- *
- * A potential future improvement would be to mark which PIDs
- * have never been used on the system and avoid it if the PID
- * is new and the process has no other cpumask bit set.
- */
- if (cpu_has_feature(CPU_FTR_HVMODE) && radix_enabled()) {
- int cpu = smp_processor_id();
- int sib = cpu_first_thread_sibling(cpu);
- bool flush = false;
-
- for (; sib <= cpu_last_thread_sibling(cpu) && !flush; sib++) {
- if (sib == cpu)
- continue;
- if (!cpu_possible(sib))
- continue;
- if (paca_ptrs[sib]->kvm_hstate.kvm_vcpu)
- flush = true;
+ /* Full PID flush */
+ if (start == 0 && end == -1)
+ return _tlbie_pid_lpid(pid, lpid, RIC_FLUSH_TLB);
+
+ /* Do range invalidation for all the valid page sizes */
+ for (psize = 0; psize < MMU_PAGE_COUNT; psize++) {
+ def = &mmu_psize_defs[psize];
+ if (!(pg_sizes & def->h_rpt_pgsize))
+ continue;
+
+ nr_pages = (end - start) >> def->shift;
+ flush_pid = nr_pages > tlb_single_page_flush_ceiling;
+
+ /*
+ * If the number of pages spanning the range is above
+ * the ceiling, convert the request into a full PID flush.
+ * And since PID flush takes out all the page sizes, there
+ * is no need to consider remaining page sizes.
+ */
+ if (flush_pid) {
+ _tlbie_pid_lpid(pid, lpid, RIC_FLUSH_TLB);
+ return;
}
- if (flush)
- _tlbiel_pid(pid, RIC_FLUSH_ALL);
+ _tlbie_va_range_lpid(start, end, pid, lpid,
+ (1UL << def->shift), psize, false);
}
}
-EXPORT_SYMBOL_GPL(radix_kvm_prefetch_workaround);
+EXPORT_SYMBOL_GPL(do_h_rpt_invalidate_prt);
+
#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
diff --git a/arch/powerpc/mm/ioremap_32.c b/arch/powerpc/mm/ioremap_32.c
index 743e11384dea..9d13143b8be4 100644
--- a/arch/powerpc/mm/ioremap_32.c
+++ b/arch/powerpc/mm/ioremap_32.c
@@ -70,10 +70,10 @@ __ioremap_caller(phys_addr_t addr, unsigned long size, pgprot_t prot, void *call
*/
pr_warn("ioremap() called early from %pS. Use early_ioremap() instead\n", caller);
- err = early_ioremap_range(ioremap_bot - size, p, size, prot);
+ err = early_ioremap_range(ioremap_bot - size - PAGE_SIZE, p, size, prot);
if (err)
return NULL;
- ioremap_bot -= size;
+ ioremap_bot -= size + PAGE_SIZE;
return (void __iomem *)ioremap_bot + offset;
}
diff --git a/arch/powerpc/mm/ioremap_64.c b/arch/powerpc/mm/ioremap_64.c
index ba5cbb0d66bd..3acece00b33e 100644
--- a/arch/powerpc/mm/ioremap_64.c
+++ b/arch/powerpc/mm/ioremap_64.c
@@ -38,7 +38,7 @@ void __iomem *__ioremap_caller(phys_addr_t addr, unsigned long size,
return NULL;
ret = (void __iomem *)ioremap_bot + offset;
- ioremap_bot += size;
+ ioremap_bot += size + PAGE_SIZE;
return ret;
}
diff --git a/arch/powerpc/mm/maccess.c b/arch/powerpc/mm/maccess.c
index a3c30a884076..aad7c47e0030 100644
--- a/arch/powerpc/mm/maccess.c
+++ b/arch/powerpc/mm/maccess.c
@@ -12,7 +12,7 @@ bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size)
return is_kernel_addr((unsigned long)unsafe_src);
}
-int copy_inst_from_kernel_nofault(struct ppc_inst *inst, struct ppc_inst *src)
+int copy_inst_from_kernel_nofault(struct ppc_inst *inst, u32 *src)
{
unsigned int val, suffix;
int err;
@@ -21,7 +21,7 @@ int copy_inst_from_kernel_nofault(struct ppc_inst *inst, struct ppc_inst *src)
if (err)
return err;
if (IS_ENABLED(CONFIG_PPC64) && get_op(val) == OP_PREFIX) {
- err = copy_from_kernel_nofault(&suffix, (void *)src + 4, 4);
+ err = copy_from_kernel_nofault(&suffix, src + 1, sizeof(suffix));
*inst = ppc_inst_prefix(val, suffix);
} else {
*inst = ppc_inst(val);
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index 043bbeaf407c..ad198b439222 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -20,6 +20,7 @@
#include <asm/machdep.h>
#include <asm/rtas.h>
#include <asm/kasan.h>
+#include <asm/sparsemem.h>
#include <asm/svm.h>
#include <mm/mmu_decl.h>
@@ -27,6 +28,9 @@
unsigned long long memory_limit;
bool init_mem_is_free;
+unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss;
+EXPORT_SYMBOL(empty_zero_page);
+
pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
unsigned long size, pgprot_t vma_prot)
{
@@ -126,7 +130,7 @@ void __ref arch_remove_memory(int nid, u64 start, u64 size,
}
#endif
-#ifndef CONFIG_NEED_MULTIPLE_NODES
+#ifndef CONFIG_NUMA
void __init mem_topology_setup(void)
{
max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
@@ -161,7 +165,7 @@ static int __init mark_nonram_nosave(void)
return 0;
}
-#else /* CONFIG_NEED_MULTIPLE_NODES */
+#else /* CONFIG_NUMA */
static int __init mark_nonram_nosave(void)
{
return 0;
@@ -298,6 +302,10 @@ void __init mem_init(void)
ioremap_bot, IOREMAP_TOP);
pr_info(" * 0x%08lx..0x%08lx : vmalloc & ioremap\n",
VMALLOC_START, VMALLOC_END);
+#ifdef MODULES_VADDR
+ pr_info(" * 0x%08lx..0x%08lx : modules\n",
+ MODULES_VADDR, MODULES_END);
+#endif
#endif /* CONFIG_PPC32 */
}
diff --git a/arch/powerpc/mm/mmu_context.c b/arch/powerpc/mm/mmu_context.c
index a857af401738..74246536b832 100644
--- a/arch/powerpc/mm/mmu_context.c
+++ b/arch/powerpc/mm/mmu_context.c
@@ -83,9 +83,7 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
if (cpu_has_feature(CPU_FTR_ALTIVEC))
asm volatile ("dssall");
- if (new_on_cpu)
- radix_kvm_prefetch_workaround(next);
- else
+ if (!new_on_cpu)
membarrier_arch_switch_mm(prev, next, tsk);
/*
diff --git a/arch/powerpc/mm/nohash/44x.c b/arch/powerpc/mm/nohash/44x.c
index 3d6ae7c72412..e079f26b267e 100644
--- a/arch/powerpc/mm/nohash/44x.c
+++ b/arch/powerpc/mm/nohash/44x.c
@@ -25,6 +25,7 @@
#include <asm/page.h>
#include <asm/cacheflush.h>
#include <asm/code-patching.h>
+#include <asm/smp.h>
#include <mm/mmu_decl.h>
@@ -239,3 +240,19 @@ void __init mmu_init_secondary(int cpu)
}
}
#endif /* CONFIG_SMP */
+
+#ifdef CONFIG_PPC_KUEP
+void setup_kuep(bool disabled)
+{
+ if (smp_processor_id() != boot_cpuid)
+ return;
+
+ if (disabled)
+ patch_instruction_site(&patch__tlb_44x_kuep, ppc_inst(PPC_RAW_NOP()));
+ else
+ pr_info("Activating Kernel Userspace Execution Prevention\n");
+
+ if (IS_ENABLED(CONFIG_PPC_47x) && disabled)
+ patch_instruction_site(&patch__tlb_47x_kuep, ppc_inst(PPC_RAW_NOP()));
+}
+#endif
diff --git a/arch/powerpc/mm/nohash/8xx.c b/arch/powerpc/mm/nohash/8xx.c
index 71bfdbedacee..60780e089118 100644
--- a/arch/powerpc/mm/nohash/8xx.c
+++ b/arch/powerpc/mm/nohash/8xx.c
@@ -212,37 +212,6 @@ void __init setup_initial_memory_limit(phys_addr_t first_memblock_base,
memblock_set_current_limit(min_t(u64, first_memblock_size, SZ_32M));
}
-/*
- * Set up to use a given MMU context.
- * id is context number, pgd is PGD pointer.
- *
- * We place the physical address of the new task page directory loaded
- * into the MMU base register, and set the ASID compare register with
- * the new "context."
- */
-void set_context(unsigned long id, pgd_t *pgd)
-{
- s16 offset = (s16)(__pa(swapper_pg_dir));
-
- /* Context switch the PTE pointer for the Abatron BDI2000.
- * The PGDIR is passed as second argument.
- */
- if (IS_ENABLED(CONFIG_BDI_SWITCH))
- abatron_pteptrs[1] = pgd;
-
- /* Register M_TWB will contain base address of level 1 table minus the
- * lower part of the kernel PGDIR base address, so that all accesses to
- * level 1 table are done relative to lower part of kernel PGDIR base
- * address.
- */
- mtspr(SPRN_M_TWB, __pa(pgd) - offset);
-
- /* Update context */
- mtspr(SPRN_M_CASID, id - 1);
- /* sync */
- mb();
-}
-
#ifdef CONFIG_PPC_KUEP
void __init setup_kuep(bool disabled)
{
@@ -256,12 +225,17 @@ void __init setup_kuep(bool disabled)
#endif
#ifdef CONFIG_PPC_KUAP
+struct static_key_false disable_kuap_key;
+EXPORT_SYMBOL(disable_kuap_key);
+
void __init setup_kuap(bool disabled)
{
- pr_info("Activating Kernel Userspace Access Protection\n");
+ if (disabled) {
+ static_branch_enable(&disable_kuap_key);
+ return;
+ }
- if (disabled)
- pr_warn("KUAP cannot be disabled yet on 8xx when compiled in\n");
+ pr_info("Activating Kernel Userspace Access Protection\n");
mtspr(SPRN_MD_AP, MD_APG_KUAP);
}
diff --git a/arch/powerpc/mm/nohash/mmu_context.c b/arch/powerpc/mm/nohash/mmu_context.c
index aac81c9f84a5..44b2b5e7cabe 100644
--- a/arch/powerpc/mm/nohash/mmu_context.c
+++ b/arch/powerpc/mm/nohash/mmu_context.c
@@ -21,21 +21,6 @@
* also clear mm->cpu_vm_mask bits when processes are migrated
*/
-//#define DEBUG_MAP_CONSISTENCY
-//#define DEBUG_CLAMP_LAST_CONTEXT 31
-//#define DEBUG_HARDER
-
-/* We don't use DEBUG because it tends to be compiled in always nowadays
- * and this would generate way too much output
- */
-#ifdef DEBUG_HARDER
-#define pr_hard(args...) printk(KERN_DEBUG args)
-#define pr_hardcont(args...) printk(KERN_CONT args)
-#else
-#define pr_hard(args...) do { } while(0)
-#define pr_hardcont(args...) do { } while(0)
-#endif
-
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/init.h>
@@ -47,10 +32,17 @@
#include <asm/mmu_context.h>
#include <asm/tlbflush.h>
+#include <asm/smp.h>
#include <mm/mmu_decl.h>
/*
+ * Room for two PTE table pointers, usually the kernel and current user
+ * pointer to their respective root page table (pgdir).
+ */
+void *abatron_pteptrs[2];
+
+/*
* The MPC8xx has only 16 contexts. We rotate through them on each task switch.
* A better way would be to keep track of tasks that own contexts, and implement
* an LRU usage. That way very active tasks don't always have to pay the TLB
@@ -68,9 +60,7 @@
* -- BenH
*/
#define FIRST_CONTEXT 1
-#ifdef DEBUG_CLAMP_LAST_CONTEXT
-#define LAST_CONTEXT DEBUG_CLAMP_LAST_CONTEXT
-#elif defined(CONFIG_PPC_8xx)
+#if defined(CONFIG_PPC_8xx)
#define LAST_CONTEXT 16
#elif defined(CONFIG_PPC_47x)
#define LAST_CONTEXT 65535
@@ -80,9 +70,7 @@
static unsigned int next_context, nr_free_contexts;
static unsigned long *context_map;
-#ifdef CONFIG_SMP
static unsigned long *stale_map[NR_CPUS];
-#endif
static struct mm_struct **context_mm;
static DEFINE_RAW_SPINLOCK(context_lock);
@@ -105,7 +93,6 @@ static DEFINE_RAW_SPINLOCK(context_lock);
* the stale map as we can just flush the local CPU
* -- benh
*/
-#ifdef CONFIG_SMP
static unsigned int steal_context_smp(unsigned int id)
{
struct mm_struct *mm;
@@ -127,7 +114,6 @@ static unsigned int steal_context_smp(unsigned int id)
id = FIRST_CONTEXT;
continue;
}
- pr_hardcont(" | steal %d from 0x%p", id, mm);
/* Mark this mm has having no context anymore */
mm->context.id = MMU_NO_CONTEXT;
@@ -158,34 +144,25 @@ static unsigned int steal_context_smp(unsigned int id)
/* This will cause the caller to try again */
return MMU_NO_CONTEXT;
}
-#endif /* CONFIG_SMP */
static unsigned int steal_all_contexts(void)
{
struct mm_struct *mm;
-#ifdef CONFIG_SMP
int cpu = smp_processor_id();
-#endif
unsigned int id;
for (id = FIRST_CONTEXT; id <= LAST_CONTEXT; id++) {
/* Pick up the victim mm */
mm = context_mm[id];
- pr_hardcont(" | steal %d from 0x%p", id, mm);
-
/* Mark this mm as having no context anymore */
mm->context.id = MMU_NO_CONTEXT;
if (id != FIRST_CONTEXT) {
context_mm[id] = NULL;
__clear_bit(id, context_map);
-#ifdef DEBUG_MAP_CONSISTENCY
- mm->context.active = 0;
-#endif
}
-#ifdef CONFIG_SMP
- __clear_bit(id, stale_map[cpu]);
-#endif
+ if (IS_ENABLED(CONFIG_SMP))
+ __clear_bit(id, stale_map[cpu]);
}
/* Flush the TLB for all contexts (not to be used on SMP) */
@@ -204,15 +181,11 @@ static unsigned int steal_all_contexts(void)
static unsigned int steal_context_up(unsigned int id)
{
struct mm_struct *mm;
-#ifdef CONFIG_SMP
int cpu = smp_processor_id();
-#endif
/* Pick up the victim mm */
mm = context_mm[id];
- pr_hardcont(" | steal %d from 0x%p", id, mm);
-
/* Flush the TLB for that context */
local_flush_tlb_mm(mm);
@@ -220,81 +193,64 @@ static unsigned int steal_context_up(unsigned int id)
mm->context.id = MMU_NO_CONTEXT;
/* XXX This clear should ultimately be part of local_flush_tlb_mm */
-#ifdef CONFIG_SMP
- __clear_bit(id, stale_map[cpu]);
-#endif
+ if (IS_ENABLED(CONFIG_SMP))
+ __clear_bit(id, stale_map[cpu]);
return id;
}
-#ifdef DEBUG_MAP_CONSISTENCY
-static void context_check_map(void)
+static void set_context(unsigned long id, pgd_t *pgd)
{
- unsigned int id, nrf, nact;
+ if (IS_ENABLED(CONFIG_PPC_8xx)) {
+ s16 offset = (s16)(__pa(swapper_pg_dir));
+
+ /*
+ * Register M_TWB will contain base address of level 1 table minus the
+ * lower part of the kernel PGDIR base address, so that all accesses to
+ * level 1 table are done relative to lower part of kernel PGDIR base
+ * address.
+ */
+ mtspr(SPRN_M_TWB, __pa(pgd) - offset);
- nrf = nact = 0;
- for (id = FIRST_CONTEXT; id <= LAST_CONTEXT; id++) {
- int used = test_bit(id, context_map);
- if (!used)
- nrf++;
- if (used != (context_mm[id] != NULL))
- pr_err("MMU: Context %d is %s and MM is %p !\n",
- id, used ? "used" : "free", context_mm[id]);
- if (context_mm[id] != NULL)
- nact += context_mm[id]->context.active;
- }
- if (nrf != nr_free_contexts) {
- pr_err("MMU: Free context count out of sync ! (%d vs %d)\n",
- nr_free_contexts, nrf);
- nr_free_contexts = nrf;
+ /* Update context */
+ mtspr(SPRN_M_CASID, id - 1);
+
+ /* sync */
+ mb();
+ } else {
+ if (IS_ENABLED(CONFIG_40x))
+ mb(); /* sync */
+
+ mtspr(SPRN_PID, id);
+ isync();
}
- if (nact > num_online_cpus())
- pr_err("MMU: More active contexts than CPUs ! (%d vs %d)\n",
- nact, num_online_cpus());
- if (FIRST_CONTEXT > 0 && !test_bit(0, context_map))
- pr_err("MMU: Context 0 has been freed !!!\n");
}
-#else
-static void context_check_map(void) { }
-#endif
void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk)
{
unsigned int id;
-#ifdef CONFIG_SMP
unsigned int i, cpu = smp_processor_id();
-#endif
unsigned long *map;
/* No lockless fast path .. yet */
raw_spin_lock(&context_lock);
- pr_hard("[%d] activating context for mm @%p, active=%d, id=%d",
- cpu, next, next->context.active, next->context.id);
-
-#ifdef CONFIG_SMP
- /* Mark us active and the previous one not anymore */
- next->context.active++;
- if (prev) {
- pr_hardcont(" (old=0x%p a=%d)", prev, prev->context.active);
- WARN_ON(prev->context.active < 1);
- prev->context.active--;
+ if (IS_ENABLED(CONFIG_SMP)) {
+ /* Mark us active and the previous one not anymore */
+ next->context.active++;
+ if (prev) {
+ WARN_ON(prev->context.active < 1);
+ prev->context.active--;
+ }
}
again:
-#endif /* CONFIG_SMP */
/* If we already have a valid assigned context, skip all that */
id = next->context.id;
- if (likely(id != MMU_NO_CONTEXT)) {
-#ifdef DEBUG_MAP_CONSISTENCY
- if (context_mm[id] != next)
- pr_err("MMU: mm 0x%p has id %d but context_mm[%d] says 0x%p\n",
- next, id, id, context_mm[id]);
-#endif
+ if (likely(id != MMU_NO_CONTEXT))
goto ctxt_ok;
- }
/* We really don't have a context, let's try to acquire one */
id = next_context;
@@ -304,14 +260,12 @@ void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next,
/* No more free contexts, let's try to steal one */
if (nr_free_contexts == 0) {
-#ifdef CONFIG_SMP
if (num_online_cpus() > 1) {
id = steal_context_smp(id);
if (id == MMU_NO_CONTEXT)
goto again;
goto stolen;
}
-#endif /* CONFIG_SMP */
if (IS_ENABLED(CONFIG_PPC_8xx))
id = steal_all_contexts();
else
@@ -330,20 +284,13 @@ void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next,
next_context = id + 1;
context_mm[id] = next;
next->context.id = id;
- pr_hardcont(" | new id=%d,nrf=%d", id, nr_free_contexts);
- context_check_map();
ctxt_ok:
/* If that context got marked stale on this CPU, then flush the
* local TLB for it and unmark it before we use it
*/
-#ifdef CONFIG_SMP
- if (test_bit(id, stale_map[cpu])) {
- pr_hardcont(" | stale flush %d [%d..%d]",
- id, cpu_first_thread_sibling(cpu),
- cpu_last_thread_sibling(cpu));
-
+ if (IS_ENABLED(CONFIG_SMP) && test_bit(id, stale_map[cpu])) {
local_flush_tlb_mm(next);
/* XXX This clear should ultimately be part of local_flush_tlb_mm */
@@ -353,10 +300,10 @@ void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next,
__clear_bit(id, stale_map[i]);
}
}
-#endif
/* Flick the MMU and release lock */
- pr_hardcont(" -> %d\n", id);
+ if (IS_ENABLED(CONFIG_BDI_SWITCH))
+ abatron_pteptrs[1] = next->pgd;
set_context(id, next->pgd);
raw_spin_unlock(&context_lock);
}
@@ -366,8 +313,6 @@ void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next,
*/
int init_new_context(struct task_struct *t, struct mm_struct *mm)
{
- pr_hard("initing context for mm @%p\n", mm);
-
/*
* We have MMU_NO_CONTEXT set to be ~0. Hence check
* explicitly against context.id == 0. This ensures that we properly
@@ -401,16 +346,12 @@ void destroy_context(struct mm_struct *mm)
if (id != MMU_NO_CONTEXT) {
__clear_bit(id, context_map);
mm->context.id = MMU_NO_CONTEXT;
-#ifdef DEBUG_MAP_CONSISTENCY
- mm->context.active = 0;
-#endif
context_mm[id] = NULL;
nr_free_contexts++;
}
raw_spin_unlock_irqrestore(&context_lock, flags);
}
-#ifdef CONFIG_SMP
static int mmu_ctx_cpu_prepare(unsigned int cpu)
{
/* We don't touch CPU 0 map, it's allocated at aboot and kept
@@ -419,7 +360,6 @@ static int mmu_ctx_cpu_prepare(unsigned int cpu)
if (cpu == boot_cpuid)
return 0;
- pr_devel("MMU: Allocating stale context map for CPU %d\n", cpu);
stale_map[cpu] = kzalloc(CTX_MAP_SIZE, GFP_KERNEL);
return 0;
}
@@ -430,7 +370,6 @@ static int mmu_ctx_cpu_dead(unsigned int cpu)
if (cpu == boot_cpuid)
return 0;
- pr_devel("MMU: Freeing stale context map for CPU %d\n", cpu);
kfree(stale_map[cpu]);
stale_map[cpu] = NULL;
@@ -440,8 +379,6 @@ static int mmu_ctx_cpu_dead(unsigned int cpu)
return 0;
}
-#endif /* CONFIG_SMP */
-
/*
* Initialize the context management stuff.
*/
@@ -465,16 +402,16 @@ void __init mmu_context_init(void)
if (!context_mm)
panic("%s: Failed to allocate %zu bytes\n", __func__,
sizeof(void *) * (LAST_CONTEXT + 1));
-#ifdef CONFIG_SMP
- stale_map[boot_cpuid] = memblock_alloc(CTX_MAP_SIZE, SMP_CACHE_BYTES);
- if (!stale_map[boot_cpuid])
- panic("%s: Failed to allocate %zu bytes\n", __func__,
- CTX_MAP_SIZE);
-
- cpuhp_setup_state_nocalls(CPUHP_POWERPC_MMU_CTX_PREPARE,
- "powerpc/mmu/ctx:prepare",
- mmu_ctx_cpu_prepare, mmu_ctx_cpu_dead);
-#endif
+ if (IS_ENABLED(CONFIG_SMP)) {
+ stale_map[boot_cpuid] = memblock_alloc(CTX_MAP_SIZE, SMP_CACHE_BYTES);
+ if (!stale_map[boot_cpuid])
+ panic("%s: Failed to allocate %zu bytes\n", __func__,
+ CTX_MAP_SIZE);
+
+ cpuhp_setup_state_nocalls(CPUHP_POWERPC_MMU_CTX_PREPARE,
+ "powerpc/mmu/ctx:prepare",
+ mmu_ctx_cpu_prepare, mmu_ctx_cpu_dead);
+ }
printk(KERN_INFO
"MMU: Allocated %zu bytes of context maps for %d contexts\n",
diff --git a/arch/powerpc/mm/nohash/tlb_low.S b/arch/powerpc/mm/nohash/tlb_low.S
index 68797e072f55..4613bf8e9aae 100644
--- a/arch/powerpc/mm/nohash/tlb_low.S
+++ b/arch/powerpc/mm/nohash/tlb_low.S
@@ -360,19 +360,6 @@ _GLOBAL(_tlbivax_bcast)
sync
wrtee r10
blr
-
-_GLOBAL(set_context)
-#ifdef CONFIG_BDI_SWITCH
- /* Context switch the PTE pointer for the Abatron BDI2000.
- * The PGDIR is the second parameter.
- */
- lis r5, abatron_pteptrs@h
- ori r5, r5, abatron_pteptrs@l
- stw r4, 0x4(r5)
-#endif
- mtspr SPRN_PID,r3
- isync /* Force context change */
- blr
#else
#error Unsupported processor type !
#endif
diff --git a/arch/powerpc/mm/pageattr.c b/arch/powerpc/mm/pageattr.c
new file mode 100644
index 000000000000..0876216ceee6
--- /dev/null
+++ b/arch/powerpc/mm/pageattr.c
@@ -0,0 +1,134 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * MMU-generic set_memory implementation for powerpc
+ *
+ * Copyright 2019-2021, IBM Corporation.
+ */
+
+#include <linux/mm.h>
+#include <linux/vmalloc.h>
+#include <linux/set_memory.h>
+
+#include <asm/mmu.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+
+
+/*
+ * Updates the attributes of a page in three steps:
+ *
+ * 1. invalidate the page table entry
+ * 2. flush the TLB
+ * 3. install the new entry with the updated attributes
+ *
+ * Invalidating the pte means there are situations where this will not work
+ * when in theory it should.
+ * For example:
+ * - removing write from page whilst it is being executed
+ * - setting a page read-only whilst it is being read by another CPU
+ *
+ */
+static int change_page_attr(pte_t *ptep, unsigned long addr, void *data)
+{
+ long action = (long)data;
+ pte_t pte;
+
+ spin_lock(&init_mm.page_table_lock);
+
+ /* invalidate the PTE so it's safe to modify */
+ pte = ptep_get_and_clear(&init_mm, addr, ptep);
+ flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
+
+ /* modify the PTE bits as desired, then apply */
+ switch (action) {
+ case SET_MEMORY_RO:
+ pte = pte_wrprotect(pte);
+ break;
+ case SET_MEMORY_RW:
+ pte = pte_mkwrite(pte_mkdirty(pte));
+ break;
+ case SET_MEMORY_NX:
+ pte = pte_exprotect(pte);
+ break;
+ case SET_MEMORY_X:
+ pte = pte_mkexec(pte);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ break;
+ }
+
+ set_pte_at(&init_mm, addr, ptep, pte);
+
+ /* See ptesync comment in radix__set_pte_at() */
+ if (radix_enabled())
+ asm volatile("ptesync": : :"memory");
+ spin_unlock(&init_mm.page_table_lock);
+
+ return 0;
+}
+
+int change_memory_attr(unsigned long addr, int numpages, long action)
+{
+ unsigned long start = ALIGN_DOWN(addr, PAGE_SIZE);
+ unsigned long size = numpages * PAGE_SIZE;
+
+ if (!numpages)
+ return 0;
+
+ if (WARN_ON_ONCE(is_vmalloc_or_module_addr((void *)addr) &&
+ is_vm_area_hugepages((void *)addr)))
+ return -EINVAL;
+
+#ifdef CONFIG_PPC_BOOK3S_64
+ /*
+ * On hash, the linear mapping is not in the Linux page table so
+ * apply_to_existing_page_range() will have no effect. If in the future
+ * the set_memory_* functions are used on the linear map this will need
+ * to be updated.
+ */
+ if (!radix_enabled()) {
+ int region = get_region_id(addr);
+
+ if (WARN_ON_ONCE(region != VMALLOC_REGION_ID && region != IO_REGION_ID))
+ return -EINVAL;
+ }
+#endif
+
+ return apply_to_existing_page_range(&init_mm, start, size,
+ change_page_attr, (void *)action);
+}
+
+/*
+ * Set the attributes of a page:
+ *
+ * This function is used by PPC32 at the end of init to set final kernel memory
+ * protection. It includes changing the maping of the page it is executing from
+ * and data pages it is using.
+ */
+static int set_page_attr(pte_t *ptep, unsigned long addr, void *data)
+{
+ pgprot_t prot = __pgprot((unsigned long)data);
+
+ spin_lock(&init_mm.page_table_lock);
+
+ set_pte_at(&init_mm, addr, ptep, pte_modify(*ptep, prot));
+ flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
+
+ spin_unlock(&init_mm.page_table_lock);
+
+ return 0;
+}
+
+int set_memory_attr(unsigned long addr, int numpages, pgprot_t prot)
+{
+ unsigned long start = ALIGN_DOWN(addr, PAGE_SIZE);
+ unsigned long sz = numpages * PAGE_SIZE;
+
+ if (numpages <= 0)
+ return 0;
+
+ return apply_to_existing_page_range(&init_mm, start, sz, set_page_attr,
+ (void *)pgprot_val(prot));
+}
diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c
index 354611940118..cd16b407f47e 100644
--- a/arch/powerpc/mm/pgtable.c
+++ b/arch/powerpc/mm/pgtable.c
@@ -28,6 +28,14 @@
#include <asm/hugetlb.h>
#include <asm/pte-walk.h>
+#ifdef CONFIG_PPC64
+#define PGD_ALIGN (sizeof(pgd_t) * MAX_PTRS_PER_PGD)
+#else
+#define PGD_ALIGN PAGE_SIZE
+#endif
+
+pgd_t swapper_pg_dir[MAX_PTRS_PER_PGD] __section(".bss..page_aligned") __aligned(PGD_ALIGN);
+
static inline int is_exec_fault(void)
{
return current->thread.regs && TRAP(current->thread.regs) == 0x400;
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
index e0ec67a16887..dcf5ecca19d9 100644
--- a/arch/powerpc/mm/pgtable_32.c
+++ b/arch/powerpc/mm/pgtable_32.c
@@ -23,6 +23,7 @@
#include <linux/highmem.h>
#include <linux/memblock.h>
#include <linux/slab.h>
+#include <linux/set_memory.h>
#include <asm/pgalloc.h>
#include <asm/fixmap.h>
@@ -132,64 +133,20 @@ void __init mapin_ram(void)
}
}
-static int __change_page_attr_noflush(struct page *page, pgprot_t prot)
-{
- pte_t *kpte;
- unsigned long address;
-
- BUG_ON(PageHighMem(page));
- address = (unsigned long)page_address(page);
-
- if (v_block_mapped(address))
- return 0;
- kpte = virt_to_kpte(address);
- if (!kpte)
- return -EINVAL;
- __set_pte_at(&init_mm, address, kpte, mk_pte(page, prot), 0);
-
- return 0;
-}
-
-/*
- * Change the page attributes of an page in the linear mapping.
- *
- * THIS DOES NOTHING WITH BAT MAPPINGS, DEBUG USE ONLY
- */
-static int change_page_attr(struct page *page, int numpages, pgprot_t prot)
-{
- int i, err = 0;
- unsigned long flags;
- struct page *start = page;
-
- local_irq_save(flags);
- for (i = 0; i < numpages; i++, page++) {
- err = __change_page_attr_noflush(page, prot);
- if (err)
- break;
- }
- wmb();
- local_irq_restore(flags);
- flush_tlb_kernel_range((unsigned long)page_address(start),
- (unsigned long)page_address(page));
- return err;
-}
-
void mark_initmem_nx(void)
{
- struct page *page = virt_to_page(_sinittext);
unsigned long numpages = PFN_UP((unsigned long)_einittext) -
PFN_DOWN((unsigned long)_sinittext);
if (v_block_mapped((unsigned long)_sinittext))
mmu_mark_initmem_nx();
else
- change_page_attr(page, numpages, PAGE_KERNEL);
+ set_memory_attr((unsigned long)_sinittext, numpages, PAGE_KERNEL);
}
#ifdef CONFIG_STRICT_KERNEL_RWX
void mark_rodata_ro(void)
{
- struct page *page;
unsigned long numpages;
if (v_block_mapped((unsigned long)_stext + 1)) {
@@ -198,20 +155,18 @@ void mark_rodata_ro(void)
return;
}
- page = virt_to_page(_stext);
numpages = PFN_UP((unsigned long)_etext) -
PFN_DOWN((unsigned long)_stext);
- change_page_attr(page, numpages, PAGE_KERNEL_ROX);
+ set_memory_attr((unsigned long)_stext, numpages, PAGE_KERNEL_ROX);
/*
* mark .rodata as read only. Use __init_begin rather than __end_rodata
* to cover NOTES and EXCEPTION_TABLE.
*/
- page = virt_to_page(__start_rodata);
numpages = PFN_UP((unsigned long)__init_begin) -
PFN_DOWN((unsigned long)__start_rodata);
- change_page_attr(page, numpages, PAGE_KERNEL_RO);
+ set_memory_attr((unsigned long)__start_rodata, numpages, PAGE_KERNEL_RO);
// mark_initmem_nx() should have already run by now
ptdump_check_wx();
@@ -221,9 +176,14 @@ void mark_rodata_ro(void)
#ifdef CONFIG_DEBUG_PAGEALLOC
void __kernel_map_pages(struct page *page, int numpages, int enable)
{
+ unsigned long addr = (unsigned long)page_address(page);
+
if (PageHighMem(page))
return;
- change_page_attr(page, numpages, enable ? PAGE_KERNEL : __pgprot(0));
+ if (enable)
+ set_memory_attr(addr, numpages, PAGE_KERNEL);
+ else
+ set_memory_attr(addr, numpages, __pgprot(0));
}
#endif /* CONFIG_DEBUG_PAGEALLOC */
diff --git a/arch/powerpc/mm/ptdump/ptdump.c b/arch/powerpc/mm/ptdump/ptdump.c
index aca354fb670b..5062c58b1e5b 100644
--- a/arch/powerpc/mm/ptdump/ptdump.c
+++ b/arch/powerpc/mm/ptdump/ptdump.c
@@ -58,8 +58,6 @@ struct pg_state {
const struct addr_marker *marker;
unsigned long start_address;
unsigned long start_pa;
- unsigned long last_pa;
- unsigned long page_size;
unsigned int level;
u64 current_flags;
bool check_wx;
@@ -163,8 +161,6 @@ static void dump_flag_info(struct pg_state *st, const struct flag_info
static void dump_addr(struct pg_state *st, unsigned long addr)
{
- unsigned long delta;
-
#ifdef CONFIG_PPC64
#define REG "0x%016lx"
#else
@@ -172,14 +168,8 @@ static void dump_addr(struct pg_state *st, unsigned long addr)
#endif
pt_dump_seq_printf(st->seq, REG "-" REG " ", st->start_address, addr - 1);
- if (st->start_pa == st->last_pa && st->start_address + st->page_size != addr) {
- pt_dump_seq_printf(st->seq, "[" REG "]", st->start_pa);
- delta = st->page_size >> 10;
- } else {
- pt_dump_seq_printf(st->seq, " " REG " ", st->start_pa);
- delta = (addr - st->start_address) >> 10;
- }
- pt_dump_size(st->seq, delta);
+ pt_dump_seq_printf(st->seq, " " REG " ", st->start_pa);
+ pt_dump_size(st->seq, (addr - st->start_address) >> 10);
}
static void note_prot_wx(struct pg_state *st, unsigned long addr)
@@ -208,7 +198,6 @@ static void note_page_update_state(struct pg_state *st, unsigned long addr,
st->current_flags = flag;
st->start_address = addr;
st->start_pa = pa;
- st->page_size = page_size;
while (addr >= st->marker[1].start_address) {
st->marker++;
@@ -220,7 +209,6 @@ static void note_page(struct pg_state *st, unsigned long addr,
unsigned int level, u64 val, unsigned long page_size)
{
u64 flag = val & pg_level[level].mask;
- u64 pa = val & PTE_RPN_MASK;
/* At first no level is set */
if (!st->level) {
@@ -232,12 +220,9 @@ static void note_page(struct pg_state *st, unsigned long addr,
* - we change levels in the tree.
* - the address is in a different section of memory and is thus
* used for a different purpose, regardless of the flags.
- * - the pa of this page is not adjacent to the last inspected page
*/
} else if (flag != st->current_flags || level != st->level ||
- addr >= st->marker[1].start_address ||
- (pa != st->last_pa + st->page_size &&
- (pa != st->start_pa || st->start_pa != st->last_pa))) {
+ addr >= st->marker[1].start_address) {
/* Check the PTE flags */
if (st->current_flags) {
@@ -259,7 +244,6 @@ static void note_page(struct pg_state *st, unsigned long addr,
*/
note_page_update_state(st, addr, level, val, page_size);
}
- st->last_pa = pa;
}
static void walk_pte(struct pg_state *st, pmd_t *pmd, unsigned long start)
diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c
index 798ac4350a82..53aefee3fe70 100644
--- a/arch/powerpc/net/bpf_jit_comp.c
+++ b/arch/powerpc/net/bpf_jit_comp.c
@@ -237,6 +237,7 @@ skip_codegen_passes:
fp->jited_len = alloclen;
bpf_flush_icache(bpf_hdr, (u8 *)bpf_hdr + (bpf_hdr->pages * PAGE_SIZE));
+ bpf_jit_binary_lock_ro(bpf_hdr);
if (!fp->is_func || extra_pass) {
bpf_prog_fill_jited_linfo(fp, addrs);
out_addrs:
@@ -257,15 +258,3 @@ out:
return fp;
}
-
-/* Overriding bpf_jit_free() as we don't set images read-only. */
-void bpf_jit_free(struct bpf_prog *fp)
-{
- unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
- struct bpf_binary_header *bpf_hdr = (void *)addr;
-
- if (fp->jited)
- bpf_jit_binary_free(bpf_hdr);
-
- bpf_prog_unlock_free(fp);
-}
diff --git a/arch/powerpc/net/bpf_jit_comp32.c b/arch/powerpc/net/bpf_jit_comp32.c
index bbb16099e8c7..cbe5b399ed86 100644
--- a/arch/powerpc/net/bpf_jit_comp32.c
+++ b/arch/powerpc/net/bpf_jit_comp32.c
@@ -108,20 +108,20 @@ void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx)
int i;
/* First arg comes in as a 32 bits pointer. */
- EMIT(PPC_RAW_MR(bpf_to_ppc(ctx, BPF_REG_1), __REG_R3));
+ EMIT(PPC_RAW_MR(bpf_to_ppc(ctx, BPF_REG_1), _R3));
EMIT(PPC_RAW_LI(bpf_to_ppc(ctx, BPF_REG_1) - 1, 0));
- EMIT(PPC_RAW_STWU(__REG_R1, __REG_R1, -BPF_PPC_STACKFRAME(ctx)));
+ EMIT(PPC_RAW_STWU(_R1, _R1, -BPF_PPC_STACKFRAME(ctx)));
/*
* Initialize tail_call_cnt in stack frame if we do tail calls.
* Otherwise, put in NOPs so that it can be skipped when we are
* invoked through a tail call.
*/
- if (ctx->seen & SEEN_TAILCALL) {
- EMIT(PPC_RAW_STW(bpf_to_ppc(ctx, BPF_REG_1) - 1, __REG_R1, bpf_jit_stack_offsetof(ctx, BPF_PPC_TC)));
- } else {
+ if (ctx->seen & SEEN_TAILCALL)
+ EMIT(PPC_RAW_STW(bpf_to_ppc(ctx, BPF_REG_1) - 1, _R1,
+ bpf_jit_stack_offsetof(ctx, BPF_PPC_TC)));
+ else
EMIT(PPC_RAW_NOP());
- }
#define BPF_TAILCALL_PROLOGUE_SIZE 16
@@ -130,30 +130,30 @@ void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx)
* save/restore LR unless we call other functions
*/
if (ctx->seen & SEEN_FUNC)
- EMIT(PPC_RAW_MFLR(__REG_R0));
+ EMIT(PPC_RAW_MFLR(_R0));
/*
* Back up non-volatile regs -- registers r18-r31
*/
for (i = BPF_PPC_NVR_MIN; i <= 31; i++)
if (bpf_is_seen_register(ctx, i))
- EMIT(PPC_RAW_STW(i, __REG_R1, bpf_jit_stack_offsetof(ctx, i)));
+ EMIT(PPC_RAW_STW(i, _R1, bpf_jit_stack_offsetof(ctx, i)));
/* If needed retrieve arguments 9 and 10, ie 5th 64 bits arg.*/
if (bpf_is_seen_register(ctx, bpf_to_ppc(ctx, BPF_REG_5))) {
- EMIT(PPC_RAW_LWZ(bpf_to_ppc(ctx, BPF_REG_5) - 1, __REG_R1, BPF_PPC_STACKFRAME(ctx)) + 8);
- EMIT(PPC_RAW_LWZ(bpf_to_ppc(ctx, BPF_REG_5), __REG_R1, BPF_PPC_STACKFRAME(ctx)) + 12);
+ EMIT(PPC_RAW_LWZ(bpf_to_ppc(ctx, BPF_REG_5) - 1, _R1, BPF_PPC_STACKFRAME(ctx)) + 8);
+ EMIT(PPC_RAW_LWZ(bpf_to_ppc(ctx, BPF_REG_5), _R1, BPF_PPC_STACKFRAME(ctx)) + 12);
}
/* Setup frame pointer to point to the bpf stack area */
if (bpf_is_seen_register(ctx, bpf_to_ppc(ctx, BPF_REG_FP))) {
EMIT(PPC_RAW_LI(bpf_to_ppc(ctx, BPF_REG_FP) - 1, 0));
- EMIT(PPC_RAW_ADDI(bpf_to_ppc(ctx, BPF_REG_FP), __REG_R1,
+ EMIT(PPC_RAW_ADDI(bpf_to_ppc(ctx, BPF_REG_FP), _R1,
STACK_FRAME_MIN_SIZE + ctx->stack_size));
}
if (ctx->seen & SEEN_FUNC)
- EMIT(PPC_RAW_STW(__REG_R0, __REG_R1, BPF_PPC_STACKFRAME(ctx) + PPC_LR_STKOFF));
+ EMIT(PPC_RAW_STW(_R0, _R1, BPF_PPC_STACKFRAME(ctx) + PPC_LR_STKOFF));
}
static void bpf_jit_emit_common_epilogue(u32 *image, struct codegen_context *ctx)
@@ -163,24 +163,24 @@ static void bpf_jit_emit_common_epilogue(u32 *image, struct codegen_context *ctx
/* Restore NVRs */
for (i = BPF_PPC_NVR_MIN; i <= 31; i++)
if (bpf_is_seen_register(ctx, i))
- EMIT(PPC_RAW_LWZ(i, __REG_R1, bpf_jit_stack_offsetof(ctx, i)));
+ EMIT(PPC_RAW_LWZ(i, _R1, bpf_jit_stack_offsetof(ctx, i)));
}
void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
{
- EMIT(PPC_RAW_MR(__REG_R3, bpf_to_ppc(ctx, BPF_REG_0)));
+ EMIT(PPC_RAW_MR(_R3, bpf_to_ppc(ctx, BPF_REG_0)));
bpf_jit_emit_common_epilogue(image, ctx);
/* Tear down our stack frame */
if (ctx->seen & SEEN_FUNC)
- EMIT(PPC_RAW_LWZ(__REG_R0, __REG_R1, BPF_PPC_STACKFRAME(ctx) + PPC_LR_STKOFF));
+ EMIT(PPC_RAW_LWZ(_R0, _R1, BPF_PPC_STACKFRAME(ctx) + PPC_LR_STKOFF));
- EMIT(PPC_RAW_ADDI(__REG_R1, __REG_R1, BPF_PPC_STACKFRAME(ctx)));
+ EMIT(PPC_RAW_ADDI(_R1, _R1, BPF_PPC_STACKFRAME(ctx)));
if (ctx->seen & SEEN_FUNC)
- EMIT(PPC_RAW_MTLR(__REG_R0));
+ EMIT(PPC_RAW_MTLR(_R0));
EMIT(PPC_RAW_BLR());
}
@@ -193,10 +193,10 @@ void bpf_jit_emit_func_call_rel(u32 *image, struct codegen_context *ctx, u64 fun
PPC_BL_ABS(func);
} else {
/* Load function address into r0 */
- EMIT(PPC_RAW_LIS(__REG_R0, IMM_H(func)));
- EMIT(PPC_RAW_ORI(__REG_R0, __REG_R0, IMM_L(func)));
- EMIT(PPC_RAW_MTLR(__REG_R0));
- EMIT(PPC_RAW_BLRL());
+ EMIT(PPC_RAW_LIS(_R0, IMM_H(func)));
+ EMIT(PPC_RAW_ORI(_R0, _R0, IMM_L(func)));
+ EMIT(PPC_RAW_MTCTR(_R0));
+ EMIT(PPC_RAW_BCTRL());
}
}
@@ -215,47 +215,47 @@ static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32
* if (index >= array->map.max_entries)
* goto out;
*/
- EMIT(PPC_RAW_LWZ(__REG_R0, b2p_bpf_array, offsetof(struct bpf_array, map.max_entries)));
- EMIT(PPC_RAW_CMPLW(b2p_index, __REG_R0));
- EMIT(PPC_RAW_LWZ(__REG_R0, __REG_R1, bpf_jit_stack_offsetof(ctx, BPF_PPC_TC)));
+ EMIT(PPC_RAW_LWZ(_R0, b2p_bpf_array, offsetof(struct bpf_array, map.max_entries)));
+ EMIT(PPC_RAW_CMPLW(b2p_index, _R0));
+ EMIT(PPC_RAW_LWZ(_R0, _R1, bpf_jit_stack_offsetof(ctx, BPF_PPC_TC)));
PPC_BCC(COND_GE, out);
/*
* if (tail_call_cnt > MAX_TAIL_CALL_CNT)
* goto out;
*/
- EMIT(PPC_RAW_CMPLWI(__REG_R0, MAX_TAIL_CALL_CNT));
+ EMIT(PPC_RAW_CMPLWI(_R0, MAX_TAIL_CALL_CNT));
/* tail_call_cnt++; */
- EMIT(PPC_RAW_ADDIC(__REG_R0, __REG_R0, 1));
+ EMIT(PPC_RAW_ADDIC(_R0, _R0, 1));
PPC_BCC(COND_GT, out);
/* prog = array->ptrs[index]; */
- EMIT(PPC_RAW_RLWINM(__REG_R3, b2p_index, 2, 0, 29));
- EMIT(PPC_RAW_ADD(__REG_R3, __REG_R3, b2p_bpf_array));
- EMIT(PPC_RAW_LWZ(__REG_R3, __REG_R3, offsetof(struct bpf_array, ptrs)));
- EMIT(PPC_RAW_STW(__REG_R0, __REG_R1, bpf_jit_stack_offsetof(ctx, BPF_PPC_TC)));
+ EMIT(PPC_RAW_RLWINM(_R3, b2p_index, 2, 0, 29));
+ EMIT(PPC_RAW_ADD(_R3, _R3, b2p_bpf_array));
+ EMIT(PPC_RAW_LWZ(_R3, _R3, offsetof(struct bpf_array, ptrs)));
+ EMIT(PPC_RAW_STW(_R0, _R1, bpf_jit_stack_offsetof(ctx, BPF_PPC_TC)));
/*
* if (prog == NULL)
* goto out;
*/
- EMIT(PPC_RAW_CMPLWI(__REG_R3, 0));
+ EMIT(PPC_RAW_CMPLWI(_R3, 0));
PPC_BCC(COND_EQ, out);
/* goto *(prog->bpf_func + prologue_size); */
- EMIT(PPC_RAW_LWZ(__REG_R3, __REG_R3, offsetof(struct bpf_prog, bpf_func)));
+ EMIT(PPC_RAW_LWZ(_R3, _R3, offsetof(struct bpf_prog, bpf_func)));
if (ctx->seen & SEEN_FUNC)
- EMIT(PPC_RAW_LWZ(__REG_R0, __REG_R1, BPF_PPC_STACKFRAME(ctx) + PPC_LR_STKOFF));
+ EMIT(PPC_RAW_LWZ(_R0, _R1, BPF_PPC_STACKFRAME(ctx) + PPC_LR_STKOFF));
- EMIT(PPC_RAW_ADDIC(__REG_R3, __REG_R3, BPF_TAILCALL_PROLOGUE_SIZE));
+ EMIT(PPC_RAW_ADDIC(_R3, _R3, BPF_TAILCALL_PROLOGUE_SIZE));
if (ctx->seen & SEEN_FUNC)
- EMIT(PPC_RAW_MTLR(__REG_R0));
+ EMIT(PPC_RAW_MTLR(_R0));
- EMIT(PPC_RAW_MTCTR(__REG_R3));
+ EMIT(PPC_RAW_MTCTR(_R3));
- EMIT(PPC_RAW_MR(__REG_R3, bpf_to_ppc(ctx, BPF_REG_1)));
+ EMIT(PPC_RAW_MR(_R3, bpf_to_ppc(ctx, BPF_REG_1)));
/* tear restore NVRs, ... */
bpf_jit_emit_common_epilogue(image, ctx);
@@ -352,8 +352,8 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
if (imm >= -32768 && imm < 32768) {
EMIT(PPC_RAW_ADDIC(dst_reg, dst_reg, imm));
} else {
- PPC_LI32(__REG_R0, imm);
- EMIT(PPC_RAW_ADDC(dst_reg, dst_reg, __REG_R0));
+ PPC_LI32(_R0, imm);
+ EMIT(PPC_RAW_ADDC(dst_reg, dst_reg, _R0));
}
if (imm >= 0)
EMIT(PPC_RAW_ADDZE(dst_reg_h, dst_reg_h));
@@ -362,11 +362,11 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
break;
case BPF_ALU64 | BPF_MUL | BPF_X: /* dst *= src */
bpf_set_seen_register(ctx, tmp_reg);
- EMIT(PPC_RAW_MULW(__REG_R0, dst_reg, src_reg_h));
+ EMIT(PPC_RAW_MULW(_R0, dst_reg, src_reg_h));
EMIT(PPC_RAW_MULW(dst_reg_h, dst_reg_h, src_reg));
EMIT(PPC_RAW_MULHWU(tmp_reg, dst_reg, src_reg));
EMIT(PPC_RAW_MULW(dst_reg, dst_reg, src_reg));
- EMIT(PPC_RAW_ADD(dst_reg_h, dst_reg_h, __REG_R0));
+ EMIT(PPC_RAW_ADD(dst_reg_h, dst_reg_h, _R0));
EMIT(PPC_RAW_ADD(dst_reg_h, dst_reg_h, tmp_reg));
break;
case BPF_ALU | BPF_MUL | BPF_X: /* (u32) dst *= (u32) src */
@@ -376,8 +376,8 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
if (imm >= -32768 && imm < 32768) {
EMIT(PPC_RAW_MULI(dst_reg, dst_reg, imm));
} else {
- PPC_LI32(__REG_R0, imm);
- EMIT(PPC_RAW_MULW(dst_reg, dst_reg, __REG_R0));
+ PPC_LI32(_R0, imm);
+ EMIT(PPC_RAW_MULW(dst_reg, dst_reg, _R0));
}
break;
case BPF_ALU64 | BPF_MUL | BPF_K: /* dst *= imm */
@@ -398,17 +398,17 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
EMIT(PPC_RAW_MULW(dst_reg_h, dst_reg_h, tmp_reg));
if (imm < 0)
EMIT(PPC_RAW_SUB(dst_reg_h, dst_reg_h, dst_reg));
- EMIT(PPC_RAW_MULHWU(__REG_R0, dst_reg, tmp_reg));
+ EMIT(PPC_RAW_MULHWU(_R0, dst_reg, tmp_reg));
EMIT(PPC_RAW_MULW(dst_reg, dst_reg, tmp_reg));
- EMIT(PPC_RAW_ADD(dst_reg_h, dst_reg_h, __REG_R0));
+ EMIT(PPC_RAW_ADD(dst_reg_h, dst_reg_h, _R0));
break;
case BPF_ALU | BPF_DIV | BPF_X: /* (u32) dst /= (u32) src */
EMIT(PPC_RAW_DIVWU(dst_reg, dst_reg, src_reg));
break;
case BPF_ALU | BPF_MOD | BPF_X: /* (u32) dst %= (u32) src */
- EMIT(PPC_RAW_DIVWU(__REG_R0, dst_reg, src_reg));
- EMIT(PPC_RAW_MULW(__REG_R0, src_reg, __REG_R0));
- EMIT(PPC_RAW_SUB(dst_reg, dst_reg, __REG_R0));
+ EMIT(PPC_RAW_DIVWU(_R0, dst_reg, src_reg));
+ EMIT(PPC_RAW_MULW(_R0, src_reg, _R0));
+ EMIT(PPC_RAW_SUB(dst_reg, dst_reg, _R0));
break;
case BPF_ALU64 | BPF_DIV | BPF_X: /* dst /= src */
return -EOPNOTSUPP;
@@ -420,8 +420,8 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
if (imm == 1)
break;
- PPC_LI32(__REG_R0, imm);
- EMIT(PPC_RAW_DIVWU(dst_reg, dst_reg, __REG_R0));
+ PPC_LI32(_R0, imm);
+ EMIT(PPC_RAW_DIVWU(dst_reg, dst_reg, _R0));
break;
case BPF_ALU | BPF_MOD | BPF_K: /* (u32) dst %= (u32) imm */
if (!imm)
@@ -430,9 +430,9 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
if (!is_power_of_2((u32)imm)) {
bpf_set_seen_register(ctx, tmp_reg);
PPC_LI32(tmp_reg, imm);
- EMIT(PPC_RAW_DIVWU(__REG_R0, dst_reg, tmp_reg));
- EMIT(PPC_RAW_MULW(__REG_R0, tmp_reg, __REG_R0));
- EMIT(PPC_RAW_SUB(dst_reg, dst_reg, __REG_R0));
+ EMIT(PPC_RAW_DIVWU(_R0, dst_reg, tmp_reg));
+ EMIT(PPC_RAW_MULW(_R0, tmp_reg, _R0));
+ EMIT(PPC_RAW_SUB(dst_reg, dst_reg, _R0));
break;
}
if (imm == 1)
@@ -503,8 +503,8 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 0,
32 - fls(imm), 32 - ffs(imm)));
} else {
- PPC_LI32(__REG_R0, imm);
- EMIT(PPC_RAW_AND(dst_reg, dst_reg, __REG_R0));
+ PPC_LI32(_R0, imm);
+ EMIT(PPC_RAW_AND(dst_reg, dst_reg, _R0));
}
break;
case BPF_ALU64 | BPF_OR | BPF_X: /* dst = dst | src */
@@ -555,12 +555,12 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
break;
case BPF_ALU64 | BPF_LSH | BPF_X: /* dst <<= src; */
bpf_set_seen_register(ctx, tmp_reg);
- EMIT(PPC_RAW_SUBFIC(__REG_R0, src_reg, 32));
+ EMIT(PPC_RAW_SUBFIC(_R0, src_reg, 32));
EMIT(PPC_RAW_SLW(dst_reg_h, dst_reg_h, src_reg));
EMIT(PPC_RAW_ADDI(tmp_reg, src_reg, 32));
- EMIT(PPC_RAW_SRW(__REG_R0, dst_reg, __REG_R0));
+ EMIT(PPC_RAW_SRW(_R0, dst_reg, _R0));
EMIT(PPC_RAW_SLW(tmp_reg, dst_reg, tmp_reg));
- EMIT(PPC_RAW_OR(dst_reg_h, dst_reg_h, __REG_R0));
+ EMIT(PPC_RAW_OR(dst_reg_h, dst_reg_h, _R0));
EMIT(PPC_RAW_SLW(dst_reg, dst_reg, src_reg));
EMIT(PPC_RAW_OR(dst_reg_h, dst_reg_h, tmp_reg));
break;
@@ -591,12 +591,12 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
break;
case BPF_ALU64 | BPF_RSH | BPF_X: /* dst >>= src */
bpf_set_seen_register(ctx, tmp_reg);
- EMIT(PPC_RAW_SUBFIC(__REG_R0, src_reg, 32));
+ EMIT(PPC_RAW_SUBFIC(_R0, src_reg, 32));
EMIT(PPC_RAW_SRW(dst_reg, dst_reg, src_reg));
EMIT(PPC_RAW_ADDI(tmp_reg, src_reg, 32));
- EMIT(PPC_RAW_SLW(__REG_R0, dst_reg_h, __REG_R0));
+ EMIT(PPC_RAW_SLW(_R0, dst_reg_h, _R0));
EMIT(PPC_RAW_SRW(tmp_reg, dst_reg_h, tmp_reg));
- EMIT(PPC_RAW_OR(dst_reg, dst_reg, __REG_R0));
+ EMIT(PPC_RAW_OR(dst_reg, dst_reg, _R0));
EMIT(PPC_RAW_SRW(dst_reg_h, dst_reg_h, src_reg));
EMIT(PPC_RAW_OR(dst_reg, dst_reg, tmp_reg));
break;
@@ -627,15 +627,15 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
break;
case BPF_ALU64 | BPF_ARSH | BPF_X: /* (s64) dst >>= src */
bpf_set_seen_register(ctx, tmp_reg);
- EMIT(PPC_RAW_SUBFIC(__REG_R0, src_reg, 32));
+ EMIT(PPC_RAW_SUBFIC(_R0, src_reg, 32));
EMIT(PPC_RAW_SRW(dst_reg, dst_reg, src_reg));
- EMIT(PPC_RAW_SLW(__REG_R0, dst_reg_h, __REG_R0));
+ EMIT(PPC_RAW_SLW(_R0, dst_reg_h, _R0));
EMIT(PPC_RAW_ADDI(tmp_reg, src_reg, 32));
- EMIT(PPC_RAW_OR(dst_reg, dst_reg, __REG_R0));
- EMIT(PPC_RAW_RLWINM(__REG_R0, tmp_reg, 0, 26, 26));
+ EMIT(PPC_RAW_OR(dst_reg, dst_reg, _R0));
+ EMIT(PPC_RAW_RLWINM(_R0, tmp_reg, 0, 26, 26));
EMIT(PPC_RAW_SRAW(tmp_reg, dst_reg_h, tmp_reg));
EMIT(PPC_RAW_SRAW(dst_reg_h, dst_reg_h, src_reg));
- EMIT(PPC_RAW_SLW(tmp_reg, tmp_reg, __REG_R0));
+ EMIT(PPC_RAW_SLW(tmp_reg, tmp_reg, _R0));
EMIT(PPC_RAW_OR(dst_reg, dst_reg, tmp_reg));
break;
case BPF_ALU | BPF_ARSH | BPF_K: /* (s32) dst >>= imm */
@@ -702,24 +702,24 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
* 2 bytes are already in their final position
* -- byte 2 and 4 (of bytes 1, 2, 3 and 4)
*/
- EMIT(PPC_RAW_RLWINM(__REG_R0, dst_reg, 8, 0, 31));
+ EMIT(PPC_RAW_RLWINM(_R0, dst_reg, 8, 0, 31));
/* Rotate 24 bits and insert byte 1 */
- EMIT(PPC_RAW_RLWIMI(__REG_R0, dst_reg, 24, 0, 7));
+ EMIT(PPC_RAW_RLWIMI(_R0, dst_reg, 24, 0, 7));
/* Rotate 24 bits and insert byte 3 */
- EMIT(PPC_RAW_RLWIMI(__REG_R0, dst_reg, 24, 16, 23));
- EMIT(PPC_RAW_MR(dst_reg, __REG_R0));
+ EMIT(PPC_RAW_RLWIMI(_R0, dst_reg, 24, 16, 23));
+ EMIT(PPC_RAW_MR(dst_reg, _R0));
break;
case 64:
bpf_set_seen_register(ctx, tmp_reg);
EMIT(PPC_RAW_RLWINM(tmp_reg, dst_reg, 8, 0, 31));
- EMIT(PPC_RAW_RLWINM(__REG_R0, dst_reg_h, 8, 0, 31));
+ EMIT(PPC_RAW_RLWINM(_R0, dst_reg_h, 8, 0, 31));
/* Rotate 24 bits and insert byte 1 */
EMIT(PPC_RAW_RLWIMI(tmp_reg, dst_reg, 24, 0, 7));
- EMIT(PPC_RAW_RLWIMI(__REG_R0, dst_reg_h, 24, 0, 7));
+ EMIT(PPC_RAW_RLWIMI(_R0, dst_reg_h, 24, 0, 7));
/* Rotate 24 bits and insert byte 3 */
EMIT(PPC_RAW_RLWIMI(tmp_reg, dst_reg, 24, 16, 23));
- EMIT(PPC_RAW_RLWIMI(__REG_R0, dst_reg_h, 24, 16, 23));
- EMIT(PPC_RAW_MR(dst_reg, __REG_R0));
+ EMIT(PPC_RAW_RLWIMI(_R0, dst_reg_h, 24, 16, 23));
+ EMIT(PPC_RAW_MR(dst_reg, _R0));
EMIT(PPC_RAW_MR(dst_reg_h, tmp_reg));
break;
}
@@ -744,32 +744,32 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
EMIT(PPC_RAW_STB(src_reg, dst_reg, off));
break;
case BPF_ST | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = imm */
- PPC_LI32(__REG_R0, imm);
- EMIT(PPC_RAW_STB(__REG_R0, dst_reg, off));
+ PPC_LI32(_R0, imm);
+ EMIT(PPC_RAW_STB(_R0, dst_reg, off));
break;
case BPF_STX | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = src */
EMIT(PPC_RAW_STH(src_reg, dst_reg, off));
break;
case BPF_ST | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = imm */
- PPC_LI32(__REG_R0, imm);
- EMIT(PPC_RAW_STH(__REG_R0, dst_reg, off));
+ PPC_LI32(_R0, imm);
+ EMIT(PPC_RAW_STH(_R0, dst_reg, off));
break;
case BPF_STX | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = src */
EMIT(PPC_RAW_STW(src_reg, dst_reg, off));
break;
case BPF_ST | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = imm */
- PPC_LI32(__REG_R0, imm);
- EMIT(PPC_RAW_STW(__REG_R0, dst_reg, off));
+ PPC_LI32(_R0, imm);
+ EMIT(PPC_RAW_STW(_R0, dst_reg, off));
break;
case BPF_STX | BPF_MEM | BPF_DW: /* (u64 *)(dst + off) = src */
EMIT(PPC_RAW_STW(src_reg_h, dst_reg, off));
EMIT(PPC_RAW_STW(src_reg, dst_reg, off + 4));
break;
case BPF_ST | BPF_MEM | BPF_DW: /* *(u64 *)(dst + off) = imm */
- PPC_LI32(__REG_R0, imm);
- EMIT(PPC_RAW_STW(__REG_R0, dst_reg, off + 4));
- PPC_EX32(__REG_R0, imm);
- EMIT(PPC_RAW_STW(__REG_R0, dst_reg, off));
+ PPC_LI32(_R0, imm);
+ EMIT(PPC_RAW_STW(_R0, dst_reg, off + 4));
+ PPC_EX32(_R0, imm);
+ EMIT(PPC_RAW_STW(_R0, dst_reg, off));
break;
/*
@@ -780,11 +780,11 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
/* Get offset into TMP_REG */
EMIT(PPC_RAW_LI(tmp_reg, off));
/* load value from memory into r0 */
- EMIT(PPC_RAW_LWARX(__REG_R0, tmp_reg, dst_reg, 0));
+ EMIT(PPC_RAW_LWARX(_R0, tmp_reg, dst_reg, 0));
/* add value from src_reg into this */
- EMIT(PPC_RAW_ADD(__REG_R0, __REG_R0, src_reg));
+ EMIT(PPC_RAW_ADD(_R0, _R0, src_reg));
/* store result back */
- EMIT(PPC_RAW_STWCX(__REG_R0, tmp_reg, dst_reg));
+ EMIT(PPC_RAW_STWCX(_R0, tmp_reg, dst_reg));
/* we're done if this succeeded */
PPC_BCC_SHORT(COND_NE, (ctx->idx - 3) * 4);
break;
@@ -852,14 +852,14 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
return ret;
if (bpf_is_seen_register(ctx, bpf_to_ppc(ctx, BPF_REG_5))) {
- EMIT(PPC_RAW_STW(bpf_to_ppc(ctx, BPF_REG_5) - 1, __REG_R1, 8));
- EMIT(PPC_RAW_STW(bpf_to_ppc(ctx, BPF_REG_5), __REG_R1, 12));
+ EMIT(PPC_RAW_STW(bpf_to_ppc(ctx, BPF_REG_5) - 1, _R1, 8));
+ EMIT(PPC_RAW_STW(bpf_to_ppc(ctx, BPF_REG_5), _R1, 12));
}
bpf_jit_emit_func_call_rel(image, ctx, func_addr);
- EMIT(PPC_RAW_MR(bpf_to_ppc(ctx, BPF_REG_0) - 1, __REG_R3));
- EMIT(PPC_RAW_MR(bpf_to_ppc(ctx, BPF_REG_0), __REG_R4));
+ EMIT(PPC_RAW_MR(bpf_to_ppc(ctx, BPF_REG_0) - 1, _R3));
+ EMIT(PPC_RAW_MR(bpf_to_ppc(ctx, BPF_REG_0), _R4));
break;
/*
@@ -967,12 +967,12 @@ cond_branch:
EMIT(PPC_RAW_CMPW(dst_reg, src_reg));
break;
case BPF_JMP | BPF_JSET | BPF_X:
- EMIT(PPC_RAW_AND_DOT(__REG_R0, dst_reg_h, src_reg_h));
+ EMIT(PPC_RAW_AND_DOT(_R0, dst_reg_h, src_reg_h));
PPC_BCC_SHORT(COND_NE, (ctx->idx + 2) * 4);
- EMIT(PPC_RAW_AND_DOT(__REG_R0, dst_reg, src_reg));
+ EMIT(PPC_RAW_AND_DOT(_R0, dst_reg, src_reg));
break;
case BPF_JMP32 | BPF_JSET | BPF_X: {
- EMIT(PPC_RAW_AND_DOT(__REG_R0, dst_reg, src_reg));
+ EMIT(PPC_RAW_AND_DOT(_R0, dst_reg, src_reg));
break;
case BPF_JMP | BPF_JNE | BPF_K:
case BPF_JMP | BPF_JEQ | BPF_K:
@@ -990,11 +990,11 @@ cond_branch:
EMIT(PPC_RAW_CMPLWI(dst_reg, imm));
} else {
/* sign-extending load ... but unsigned comparison */
- PPC_EX32(__REG_R0, imm);
- EMIT(PPC_RAW_CMPLW(dst_reg_h, __REG_R0));
- PPC_LI32(__REG_R0, imm);
+ PPC_EX32(_R0, imm);
+ EMIT(PPC_RAW_CMPLW(dst_reg_h, _R0));
+ PPC_LI32(_R0, imm);
PPC_BCC_SHORT(COND_NE, (ctx->idx + 2) * 4);
- EMIT(PPC_RAW_CMPLW(dst_reg, __REG_R0));
+ EMIT(PPC_RAW_CMPLW(dst_reg, _R0));
}
break;
case BPF_JMP32 | BPF_JNE | BPF_K:
@@ -1006,8 +1006,8 @@ cond_branch:
if (imm >= 0 && imm < 65536) {
EMIT(PPC_RAW_CMPLWI(dst_reg, imm));
} else {
- PPC_LI32(__REG_R0, imm);
- EMIT(PPC_RAW_CMPLW(dst_reg, __REG_R0));
+ PPC_LI32(_R0, imm);
+ EMIT(PPC_RAW_CMPLW(dst_reg, _R0));
}
break;
}
@@ -1022,9 +1022,9 @@ cond_branch:
} else {
/* sign-extending load */
EMIT(PPC_RAW_CMPWI(dst_reg_h, imm < 0 ? -1 : 0));
- PPC_LI32(__REG_R0, imm);
+ PPC_LI32(_R0, imm);
PPC_BCC_SHORT(COND_NE, (ctx->idx + 2) * 4);
- EMIT(PPC_RAW_CMPLW(dst_reg, __REG_R0));
+ EMIT(PPC_RAW_CMPLW(dst_reg, _R0));
}
break;
case BPF_JMP32 | BPF_JSGT | BPF_K:
@@ -1039,32 +1039,32 @@ cond_branch:
EMIT(PPC_RAW_CMPWI(dst_reg, imm));
} else {
/* sign-extending load */
- PPC_LI32(__REG_R0, imm);
- EMIT(PPC_RAW_CMPW(dst_reg, __REG_R0));
+ PPC_LI32(_R0, imm);
+ EMIT(PPC_RAW_CMPW(dst_reg, _R0));
}
break;
case BPF_JMP | BPF_JSET | BPF_K:
/* andi does not sign-extend the immediate */
if (imm >= 0 && imm < 32768) {
/* PPC_ANDI is _only/always_ dot-form */
- EMIT(PPC_RAW_ANDI(__REG_R0, dst_reg, imm));
+ EMIT(PPC_RAW_ANDI(_R0, dst_reg, imm));
} else {
- PPC_LI32(__REG_R0, imm);
+ PPC_LI32(_R0, imm);
if (imm < 0) {
EMIT(PPC_RAW_CMPWI(dst_reg_h, 0));
PPC_BCC_SHORT(COND_NE, (ctx->idx + 2) * 4);
}
- EMIT(PPC_RAW_AND_DOT(__REG_R0, dst_reg, __REG_R0));
+ EMIT(PPC_RAW_AND_DOT(_R0, dst_reg, _R0));
}
break;
case BPF_JMP32 | BPF_JSET | BPF_K:
/* andi does not sign-extend the immediate */
if (imm >= -32768 && imm < 32768) {
/* PPC_ANDI is _only/always_ dot-form */
- EMIT(PPC_RAW_ANDI(__REG_R0, dst_reg, imm));
+ EMIT(PPC_RAW_ANDI(_R0, dst_reg, imm));
} else {
- PPC_LI32(__REG_R0, imm);
- EMIT(PPC_RAW_AND_DOT(__REG_R0, dst_reg, __REG_R0));
+ PPC_LI32(_R0, imm);
+ EMIT(PPC_RAW_AND_DOT(_R0, dst_reg, _R0));
}
break;
}
diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c
index 57a8c1153851..5cad5b5a7e97 100644
--- a/arch/powerpc/net/bpf_jit_comp64.c
+++ b/arch/powerpc/net/bpf_jit_comp64.c
@@ -94,7 +94,7 @@ void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx)
* save/restore LR unless we call other functions
*/
if (ctx->seen & SEEN_FUNC) {
- EMIT(PPC_INST_MFLR | __PPC_RT(R0));
+ EMIT(PPC_RAW_MFLR(_R0));
PPC_BPF_STL(0, 1, PPC_LR_STKOFF);
}
@@ -153,8 +153,8 @@ static void bpf_jit_emit_func_call_hlp(u32 *image, struct codegen_context *ctx,
PPC_LI64(b2p[TMP_REG_2], func);
/* Load actual entry point from function descriptor */
PPC_BPF_LL(b2p[TMP_REG_1], b2p[TMP_REG_2], 0);
- /* ... and move it to LR */
- EMIT(PPC_RAW_MTLR(b2p[TMP_REG_1]));
+ /* ... and move it to CTR */
+ EMIT(PPC_RAW_MTCTR(b2p[TMP_REG_1]));
/*
* Load TOC from function descriptor at offset 8.
* We can clobber r2 since we get called through a
@@ -165,9 +165,9 @@ static void bpf_jit_emit_func_call_hlp(u32 *image, struct codegen_context *ctx,
#else
/* We can clobber r12 */
PPC_FUNC_ADDR(12, func);
- EMIT(PPC_RAW_MTLR(12));
+ EMIT(PPC_RAW_MTCTR(12));
#endif
- EMIT(PPC_RAW_BLRL());
+ EMIT(PPC_RAW_BCTRL());
}
void bpf_jit_emit_func_call_rel(u32 *image, struct codegen_context *ctx, u64 func)
@@ -202,8 +202,8 @@ void bpf_jit_emit_func_call_rel(u32 *image, struct codegen_context *ctx, u64 fun
PPC_BPF_LL(12, 12, 0);
#endif
- EMIT(PPC_RAW_MTLR(12));
- EMIT(PPC_RAW_BLRL());
+ EMIT(PPC_RAW_MTCTR(12));
+ EMIT(PPC_RAW_BCTRL());
}
static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 out)
diff --git a/arch/powerpc/perf/Makefile b/arch/powerpc/perf/Makefile
index c02854dea2b2..2f46e31c7612 100644
--- a/arch/powerpc/perf/Makefile
+++ b/arch/powerpc/perf/Makefile
@@ -1,9 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_PERF_EVENTS) += callchain.o callchain_$(BITS).o perf_regs.o
-ifdef CONFIG_COMPAT
-obj-$(CONFIG_PERF_EVENTS) += callchain_32.o
-endif
+obj-y += callchain.o callchain_$(BITS).o perf_regs.o
+obj-$(CONFIG_COMPAT) += callchain_32.o
obj-$(CONFIG_PPC_PERF_CTRS) += core-book3s.o bhrb.o
obj64-$(CONFIG_PPC_PERF_CTRS) += ppc970-pmu.o power5-pmu.o \
diff --git a/arch/powerpc/perf/callchain.c b/arch/powerpc/perf/callchain.c
index 6c028ee513c0..082f6d0308a4 100644
--- a/arch/powerpc/perf/callchain.c
+++ b/arch/powerpc/perf/callchain.c
@@ -40,7 +40,7 @@ static int valid_next_sp(unsigned long sp, unsigned long prev_sp)
return 0;
}
-void
+void __no_sanitize_address
perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
{
unsigned long sp, next_sp;
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index 16d4d1b6a1ff..bb0ee716de91 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -460,7 +460,7 @@ static __u64 power_pmu_bhrb_to(u64 addr)
sizeof(instr)))
return 0;
- return branch_target((struct ppc_inst *)&instr);
+ return branch_target(&instr);
}
/* Userspace: need copy instruction here then translate it */
@@ -468,7 +468,7 @@ static __u64 power_pmu_bhrb_to(u64 addr)
sizeof(instr)))
return 0;
- target = branch_target((struct ppc_inst *)&instr);
+ target = branch_target(&instr);
if ((!target) || (instr & BRANCH_ABSOLUTE))
return target;
@@ -2254,7 +2254,7 @@ unsigned long perf_instruction_pointer(struct pt_regs *regs)
bool use_siar = regs_use_siar(regs);
unsigned long siar = mfspr(SPRN_SIAR);
- if (ppmu->flags & PPMU_P10_DD1) {
+ if (ppmu && (ppmu->flags & PPMU_P10_DD1)) {
if (siar)
return siar;
else
diff --git a/arch/powerpc/perf/generic-compat-pmu.c b/arch/powerpc/perf/generic-compat-pmu.c
index eb8a6aaf4cc1..695975227e60 100644
--- a/arch/powerpc/perf/generic-compat-pmu.c
+++ b/arch/powerpc/perf/generic-compat-pmu.c
@@ -14,45 +14,119 @@
*
* 28 24 20 16 12 8 4 0
* | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - |
- * [ pmc ] [unit ] [ ] m [ pmcxsel ]
- * | |
- * | *- mark
- * |
- * |
- * *- combine
- *
- * Below uses IBM bit numbering.
- *
- * MMCR1[x:y] = unit (PMCxUNIT)
- * MMCR1[24] = pmc1combine[0]
- * MMCR1[25] = pmc1combine[1]
- * MMCR1[26] = pmc2combine[0]
- * MMCR1[27] = pmc2combine[1]
- * MMCR1[28] = pmc3combine[0]
- * MMCR1[29] = pmc3combine[1]
- * MMCR1[30] = pmc4combine[0]
- * MMCR1[31] = pmc4combine[1]
- *
+ * [ pmc ] [ pmcxsel ]
*/
/*
- * Some power9 event codes.
+ * Event codes defined in ISA v3.0B
*/
#define EVENT(_name, _code) _name = _code,
enum {
-EVENT(PM_CYC, 0x0001e)
-EVENT(PM_INST_CMPL, 0x00002)
+ /* Cycles, alternate code */
+ EVENT(PM_CYC_ALT, 0x100f0)
+ /* One or more instructions completed in a cycle */
+ EVENT(PM_CYC_INST_CMPL, 0x100f2)
+ /* Floating-point instruction completed */
+ EVENT(PM_FLOP_CMPL, 0x100f4)
+ /* Instruction ERAT/L1-TLB miss */
+ EVENT(PM_L1_ITLB_MISS, 0x100f6)
+ /* All instructions completed and none available */
+ EVENT(PM_NO_INST_AVAIL, 0x100f8)
+ /* A load-type instruction completed (ISA v3.0+) */
+ EVENT(PM_LD_CMPL, 0x100fc)
+ /* Instruction completed, alternate code (ISA v3.0+) */
+ EVENT(PM_INST_CMPL_ALT, 0x100fe)
+ /* A store-type instruction completed */
+ EVENT(PM_ST_CMPL, 0x200f0)
+ /* Instruction Dispatched */
+ EVENT(PM_INST_DISP, 0x200f2)
+ /* Run_cycles */
+ EVENT(PM_RUN_CYC, 0x200f4)
+ /* Data ERAT/L1-TLB miss/reload */
+ EVENT(PM_L1_DTLB_RELOAD, 0x200f6)
+ /* Taken branch completed */
+ EVENT(PM_BR_TAKEN_CMPL, 0x200fa)
+ /* Demand iCache Miss */
+ EVENT(PM_L1_ICACHE_MISS, 0x200fc)
+ /* L1 Dcache reload from memory */
+ EVENT(PM_L1_RELOAD_FROM_MEM, 0x200fe)
+ /* L1 Dcache store miss */
+ EVENT(PM_ST_MISS_L1, 0x300f0)
+ /* Alternate code for PM_INST_DISP */
+ EVENT(PM_INST_DISP_ALT, 0x300f2)
+ /* Branch direction or target mispredicted */
+ EVENT(PM_BR_MISPREDICT, 0x300f6)
+ /* Data TLB miss/reload */
+ EVENT(PM_DTLB_MISS, 0x300fc)
+ /* Demand LD - L3 Miss (not L2 hit and not L3 hit) */
+ EVENT(PM_DATA_FROM_L3MISS, 0x300fe)
+ /* L1 Dcache load miss */
+ EVENT(PM_LD_MISS_L1, 0x400f0)
+ /* Cycle when instruction(s) dispatched */
+ EVENT(PM_CYC_INST_DISP, 0x400f2)
+ /* Branch or branch target mispredicted */
+ EVENT(PM_BR_MPRED_CMPL, 0x400f6)
+ /* Instructions completed with run latch set */
+ EVENT(PM_RUN_INST_CMPL, 0x400fa)
+ /* Instruction TLB miss/reload */
+ EVENT(PM_ITLB_MISS, 0x400fc)
+ /* Load data not cached */
+ EVENT(PM_LD_NOT_CACHED, 0x400fe)
+ /* Instructions */
+ EVENT(PM_INST_CMPL, 0x500fa)
+ /* Cycles */
+ EVENT(PM_CYC, 0x600f4)
};
#undef EVENT
+/* Table of alternatives, sorted in increasing order of column 0 */
+/* Note that in each row, column 0 must be the smallest */
+static const unsigned int generic_event_alternatives[][MAX_ALT] = {
+ { PM_CYC_ALT, PM_CYC },
+ { PM_INST_CMPL_ALT, PM_INST_CMPL },
+ { PM_INST_DISP, PM_INST_DISP_ALT },
+};
+
+static int generic_get_alternatives(u64 event, unsigned int flags, u64 alt[])
+{
+ int num_alt = 0;
+
+ num_alt = isa207_get_alternatives(event, alt,
+ ARRAY_SIZE(generic_event_alternatives), flags,
+ generic_event_alternatives);
+
+ return num_alt;
+}
+
GENERIC_EVENT_ATTR(cpu-cycles, PM_CYC);
GENERIC_EVENT_ATTR(instructions, PM_INST_CMPL);
+GENERIC_EVENT_ATTR(stalled-cycles-frontend, PM_NO_INST_AVAIL);
+GENERIC_EVENT_ATTR(branch-misses, PM_BR_MPRED_CMPL);
+GENERIC_EVENT_ATTR(cache-misses, PM_LD_MISS_L1);
+
+CACHE_EVENT_ATTR(L1-dcache-load-misses, PM_LD_MISS_L1);
+CACHE_EVENT_ATTR(L1-dcache-store-misses, PM_ST_MISS_L1);
+CACHE_EVENT_ATTR(L1-icache-load-misses, PM_L1_ICACHE_MISS);
+CACHE_EVENT_ATTR(LLC-load-misses, PM_DATA_FROM_L3MISS);
+CACHE_EVENT_ATTR(branch-load-misses, PM_BR_MPRED_CMPL);
+CACHE_EVENT_ATTR(dTLB-load-misses, PM_DTLB_MISS);
+CACHE_EVENT_ATTR(iTLB-load-misses, PM_ITLB_MISS);
static struct attribute *generic_compat_events_attr[] = {
GENERIC_EVENT_PTR(PM_CYC),
GENERIC_EVENT_PTR(PM_INST_CMPL),
+ GENERIC_EVENT_PTR(PM_NO_INST_AVAIL),
+ GENERIC_EVENT_PTR(PM_BR_MPRED_CMPL),
+ GENERIC_EVENT_PTR(PM_LD_MISS_L1),
+ CACHE_EVENT_PTR(PM_LD_MISS_L1),
+ CACHE_EVENT_PTR(PM_ST_MISS_L1),
+ CACHE_EVENT_PTR(PM_L1_ICACHE_MISS),
+ CACHE_EVENT_PTR(PM_DATA_FROM_L3MISS),
+ CACHE_EVENT_PTR(PM_BR_MPRED_CMPL),
+ CACHE_EVENT_PTR(PM_DTLB_MISS),
+ CACHE_EVENT_PTR(PM_ITLB_MISS),
NULL
};
@@ -63,17 +137,11 @@ static struct attribute_group generic_compat_pmu_events_group = {
PMU_FORMAT_ATTR(event, "config:0-19");
PMU_FORMAT_ATTR(pmcxsel, "config:0-7");
-PMU_FORMAT_ATTR(mark, "config:8");
-PMU_FORMAT_ATTR(combine, "config:10-11");
-PMU_FORMAT_ATTR(unit, "config:12-15");
PMU_FORMAT_ATTR(pmc, "config:16-19");
static struct attribute *generic_compat_pmu_format_attr[] = {
&format_attr_event.attr,
&format_attr_pmcxsel.attr,
- &format_attr_mark.attr,
- &format_attr_combine.attr,
- &format_attr_unit.attr,
&format_attr_pmc.attr,
NULL,
};
@@ -92,6 +160,9 @@ static const struct attribute_group *generic_compat_pmu_attr_groups[] = {
static int compat_generic_events[] = {
[PERF_COUNT_HW_CPU_CYCLES] = PM_CYC,
[PERF_COUNT_HW_INSTRUCTIONS] = PM_INST_CMPL,
+ [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = PM_NO_INST_AVAIL,
+ [PERF_COUNT_HW_BRANCH_MISSES] = PM_BR_MPRED_CMPL,
+ [PERF_COUNT_HW_CACHE_MISSES] = PM_LD_MISS_L1,
};
#define C(x) PERF_COUNT_HW_CACHE_##x
@@ -105,11 +176,11 @@ static u64 generic_compat_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
[ C(L1D) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = 0,
- [ C(RESULT_MISS) ] = 0,
+ [ C(RESULT_MISS) ] = PM_LD_MISS_L1,
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = 0,
- [ C(RESULT_MISS) ] = 0,
+ [ C(RESULT_MISS) ] = PM_ST_MISS_L1,
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = 0,
@@ -119,7 +190,7 @@ static u64 generic_compat_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
[ C(L1I) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = 0,
- [ C(RESULT_MISS) ] = 0,
+ [ C(RESULT_MISS) ] = PM_L1_ICACHE_MISS,
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = 0,
@@ -133,7 +204,7 @@ static u64 generic_compat_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
[ C(LL) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = 0,
- [ C(RESULT_MISS) ] = 0,
+ [ C(RESULT_MISS) ] = PM_DATA_FROM_L3MISS,
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = 0,
@@ -147,7 +218,7 @@ static u64 generic_compat_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
[ C(DTLB) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = 0,
- [ C(RESULT_MISS) ] = 0,
+ [ C(RESULT_MISS) ] = PM_DTLB_MISS,
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = -1,
@@ -161,7 +232,7 @@ static u64 generic_compat_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
[ C(ITLB) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = 0,
- [ C(RESULT_MISS) ] = 0,
+ [ C(RESULT_MISS) ] = PM_ITLB_MISS,
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = -1,
@@ -175,7 +246,7 @@ static u64 generic_compat_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
[ C(BPU) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = 0,
- [ C(RESULT_MISS) ] = 0,
+ [ C(RESULT_MISS) ] = PM_BR_MPRED_CMPL,
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = -1,
@@ -204,13 +275,30 @@ static u64 generic_compat_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
#undef C
+/*
+ * We set MMCR0[CC5-6RUN] so we can use counters 5 and 6 for
+ * PM_INST_CMPL and PM_CYC.
+ */
+static int generic_compute_mmcr(u64 event[], int n_ev,
+ unsigned int hwc[], struct mmcr_regs *mmcr,
+ struct perf_event *pevents[], u32 flags)
+{
+ int ret;
+
+ ret = isa207_compute_mmcr(event, n_ev, hwc, mmcr, pevents, flags);
+ if (!ret)
+ mmcr->mmcr0 |= MMCR0_C56RUN;
+ return ret;
+}
+
static struct power_pmu generic_compat_pmu = {
.name = "GENERIC_COMPAT",
.n_counter = MAX_PMU_COUNTERS,
.add_fields = ISA207_ADD_FIELDS,
.test_adder = ISA207_TEST_ADDER,
- .compute_mmcr = isa207_compute_mmcr,
+ .compute_mmcr = generic_compute_mmcr,
.get_constraint = isa207_get_constraint,
+ .get_alternatives = generic_get_alternatives,
.disable_pmc = isa207_disable_pmc,
.flags = PPMU_HAS_SIER | PPMU_ARCH_207S,
.n_generic = ARRAY_SIZE(compat_generic_events),
@@ -223,6 +311,16 @@ int init_generic_compat_pmu(void)
{
int rc = 0;
+ /*
+ * From ISA v2.07 on, PMU features are architected;
+ * we require >= v3.0 because (a) that has PM_LD_CMPL and
+ * PM_INST_CMPL_ALT, which v2.07 doesn't have, and
+ * (b) we don't expect any non-IBM Power ISA
+ * implementations that conform to v2.07 but not v3.0.
+ */
+ if (!cpu_has_feature(CPU_FTR_ARCH_300))
+ return -ENODEV;
+
rc = register_power_pmu(&generic_compat_pmu);
if (rc)
return rc;
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_gpt.c b/arch/powerpc/platforms/52xx/mpc52xx_gpt.c
index 8c0d324f657e..3823df235f25 100644
--- a/arch/powerpc/platforms/52xx/mpc52xx_gpt.c
+++ b/arch/powerpc/platforms/52xx/mpc52xx_gpt.c
@@ -582,6 +582,7 @@ static long mpc52xx_wdt_ioctl(struct file *file, unsigned int cmd,
if (ret)
break;
/* fall through and return the timeout */
+ fallthrough;
case WDIOC_GETTIMEOUT:
/* we need to round here as to avoid e.g. the following
diff --git a/arch/powerpc/platforms/86xx/mpc86xx_smp.c b/arch/powerpc/platforms/86xx/mpc86xx_smp.c
index 87f524e4b09c..8a7e55acf090 100644
--- a/arch/powerpc/platforms/86xx/mpc86xx_smp.c
+++ b/arch/powerpc/platforms/86xx/mpc86xx_smp.c
@@ -73,7 +73,7 @@ smp_86xx_kick_cpu(int nr)
/* Setup fake reset vector to call __secondary_start_mpc86xx. */
target = (unsigned long) __secondary_start_mpc86xx;
- patch_branch((struct ppc_inst *)vector, target, BRANCH_SET_LINK);
+ patch_branch(vector, target, BRANCH_SET_LINK);
/* Kick that CPU */
smp_86xx_release_core(nr);
@@ -83,7 +83,7 @@ smp_86xx_kick_cpu(int nr)
mdelay(1);
/* Restore the exception vector */
- patch_instruction((struct ppc_inst *)vector, ppc_inst(save_vector));
+ patch_instruction(vector, ppc_inst(save_vector));
local_irq_restore(flags);
diff --git a/arch/powerpc/platforms/Kconfig b/arch/powerpc/platforms/Kconfig
index 7a5e8f4541e3..e02d29a9d12f 100644
--- a/arch/powerpc/platforms/Kconfig
+++ b/arch/powerpc/platforms/Kconfig
@@ -20,6 +20,8 @@ source "arch/powerpc/platforms/embedded6xx/Kconfig"
source "arch/powerpc/platforms/44x/Kconfig"
source "arch/powerpc/platforms/40x/Kconfig"
source "arch/powerpc/platforms/amigaone/Kconfig"
+source "arch/powerpc/platforms/book3s/Kconfig"
+source "arch/powerpc/platforms/microwatt/Kconfig"
config KVM_GUEST
bool "KVM Guest support"
@@ -49,6 +51,7 @@ config PPC_NATIVE
config PPC_OF_BOOT_TRAMPOLINE
bool "Support booting from Open Firmware or yaboot"
depends on PPC_BOOK3S_32 || PPC64
+ select RELOCATABLE if PPC64
default y
help
Support from booting from Open Firmware or yaboot using an
diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype
index f998e655b570..0e1bb1cedd13 100644
--- a/arch/powerpc/platforms/Kconfig.cputype
+++ b/arch/powerpc/platforms/Kconfig.cputype
@@ -61,6 +61,7 @@ config 44x
select 4xx_SOC
select HAVE_PCI
select PHYS_64BIT
+ select PPC_HAVE_KUEP
endchoice
@@ -111,6 +112,7 @@ config PPC_BOOK3E_64
select PPC_FPU # Make it a choice ?
select PPC_SMP_MUXED_IPI
select PPC_DOORBELL
+ select ZONE_DMA
endchoice
@@ -389,7 +391,7 @@ config PPC_HAVE_KUEP
config PPC_KUEP
bool "Kernel Userspace Execution Prevention"
depends on PPC_HAVE_KUEP
- default y if !PPC_BOOK3S_32
+ default y
help
Enable support for Kernel Userspace Execution Prevention (KUEP)
@@ -401,7 +403,7 @@ config PPC_HAVE_KUAP
config PPC_KUAP
bool "Kernel Userspace Access Protection"
depends on PPC_HAVE_KUAP
- default y if !PPC_BOOK3S_32
+ default y
help
Enable support for Kernel Userspace Access Protection (KUAP)
@@ -424,10 +426,6 @@ config PPC_MMU_NOHASH
def_bool y
depends on !PPC_BOOK3S
-config PPC_MMU_NOHASH_32
- def_bool y
- depends on PPC_MMU_NOHASH && PPC32
-
config PPC_BOOK3E_MMU
def_bool y
depends on FSL_BOOKE || PPC_BOOK3E
@@ -476,9 +474,9 @@ config SMP
If you don't know what to do here, say N.
config NR_CPUS
- int "Maximum number of CPUs (2-8192)"
- range 2 8192
- depends on SMP
+ int "Maximum number of CPUs (2-8192)" if SMP
+ range 2 8192 if SMP
+ default "1" if !SMP
default "32" if PPC64
default "4"
diff --git a/arch/powerpc/platforms/Makefile b/arch/powerpc/platforms/Makefile
index 143d4417f6cc..94470fb27c99 100644
--- a/arch/powerpc/platforms/Makefile
+++ b/arch/powerpc/platforms/Makefile
@@ -22,3 +22,5 @@ obj-$(CONFIG_PPC_CELL) += cell/
obj-$(CONFIG_PPC_PS3) += ps3/
obj-$(CONFIG_EMBEDDED6xx) += embedded6xx/
obj-$(CONFIG_AMIGAONE) += amigaone/
+obj-$(CONFIG_PPC_BOOK3S) += book3s/
+obj-$(CONFIG_PPC_MICROWATT) += microwatt/
diff --git a/arch/powerpc/platforms/book3s/Kconfig b/arch/powerpc/platforms/book3s/Kconfig
new file mode 100644
index 000000000000..34c931592ef0
--- /dev/null
+++ b/arch/powerpc/platforms/book3s/Kconfig
@@ -0,0 +1,15 @@
+# SPDX-License-Identifier: GPL-2.0
+config PPC_VAS
+ bool "IBM Virtual Accelerator Switchboard (VAS)"
+ depends on (PPC_POWERNV || PPC_PSERIES) && PPC_64K_PAGES
+ default y
+ help
+ This enables support for IBM Virtual Accelerator Switchboard (VAS).
+
+ VAS devices are found in POWER9-based and later systems, they
+ provide access to accelerator coprocessors such as NX-GZIP and
+ NX-842. This config allows the kernel to use NX-842 accelerators,
+ and user-mode APIs for the NX-GZIP accelerator on POWER9 PowerNV
+ and POWER10 PowerVM platforms.
+
+ If unsure, say "N".
diff --git a/arch/powerpc/platforms/book3s/Makefile b/arch/powerpc/platforms/book3s/Makefile
new file mode 100644
index 000000000000..e790f1910f61
--- /dev/null
+++ b/arch/powerpc/platforms/book3s/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_PPC_VAS) += vas-api.o
diff --git a/arch/powerpc/platforms/book3s/vas-api.c b/arch/powerpc/platforms/book3s/vas-api.c
new file mode 100644
index 000000000000..30172e52e16b
--- /dev/null
+++ b/arch/powerpc/platforms/book3s/vas-api.c
@@ -0,0 +1,493 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * VAS user space API for its accelerators (Only NX-GZIP is supported now)
+ * Copyright (C) 2019 Haren Myneni, IBM Corp
+ */
+
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/cdev.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/kthread.h>
+#include <linux/sched/signal.h>
+#include <linux/mmu_context.h>
+#include <linux/io.h>
+#include <asm/vas.h>
+#include <uapi/asm/vas-api.h>
+
+/*
+ * The driver creates the device node that can be used as follows:
+ * For NX-GZIP
+ *
+ * fd = open("/dev/crypto/nx-gzip", O_RDWR);
+ * rc = ioctl(fd, VAS_TX_WIN_OPEN, &attr);
+ * paste_addr = mmap(NULL, PAGE_SIZE, prot, MAP_SHARED, fd, 0ULL).
+ * vas_copy(&crb, 0, 1);
+ * vas_paste(paste_addr, 0, 1);
+ * close(fd) or exit process to close window.
+ *
+ * where "vas_copy" and "vas_paste" are defined in copy-paste.h.
+ * copy/paste returns to the user space directly. So refer NX hardware
+ * documententation for exact copy/paste usage and completion / error
+ * conditions.
+ */
+
+/*
+ * Wrapper object for the nx-gzip device - there is just one instance of
+ * this node for the whole system.
+ */
+static struct coproc_dev {
+ struct cdev cdev;
+ struct device *device;
+ char *name;
+ dev_t devt;
+ struct class *class;
+ enum vas_cop_type cop_type;
+ const struct vas_user_win_ops *vops;
+} coproc_device;
+
+struct coproc_instance {
+ struct coproc_dev *coproc;
+ struct vas_window *txwin;
+};
+
+static char *coproc_devnode(struct device *dev, umode_t *mode)
+{
+ return kasprintf(GFP_KERNEL, "crypto/%s", dev_name(dev));
+}
+
+/*
+ * Take reference to pid and mm
+ */
+int get_vas_user_win_ref(struct vas_user_win_ref *task_ref)
+{
+ /*
+ * Window opened by a child thread may not be closed when
+ * it exits. So take reference to its pid and release it
+ * when the window is free by parent thread.
+ * Acquire a reference to the task's pid to make sure
+ * pid will not be re-used - needed only for multithread
+ * applications.
+ */
+ task_ref->pid = get_task_pid(current, PIDTYPE_PID);
+ /*
+ * Acquire a reference to the task's mm.
+ */
+ task_ref->mm = get_task_mm(current);
+ if (!task_ref->mm) {
+ put_pid(task_ref->pid);
+ pr_err("VAS: pid(%d): mm_struct is not found\n",
+ current->pid);
+ return -EPERM;
+ }
+
+ mmgrab(task_ref->mm);
+ mmput(task_ref->mm);
+ /*
+ * Process closes window during exit. In the case of
+ * multithread application, the child thread can open
+ * window and can exit without closing it. So takes tgid
+ * reference until window closed to make sure tgid is not
+ * reused.
+ */
+ task_ref->tgid = find_get_pid(task_tgid_vnr(current));
+
+ return 0;
+}
+
+/*
+ * Successful return must release the task reference with
+ * put_task_struct
+ */
+static bool ref_get_pid_and_task(struct vas_user_win_ref *task_ref,
+ struct task_struct **tskp, struct pid **pidp)
+{
+ struct task_struct *tsk;
+ struct pid *pid;
+
+ pid = task_ref->pid;
+ tsk = get_pid_task(pid, PIDTYPE_PID);
+ if (!tsk) {
+ pid = task_ref->tgid;
+ tsk = get_pid_task(pid, PIDTYPE_PID);
+ /*
+ * Parent thread (tgid) will be closing window when it
+ * exits. So should not get here.
+ */
+ if (WARN_ON_ONCE(!tsk))
+ return false;
+ }
+
+ /* Return if the task is exiting. */
+ if (tsk->flags & PF_EXITING) {
+ put_task_struct(tsk);
+ return false;
+ }
+
+ *tskp = tsk;
+ *pidp = pid;
+
+ return true;
+}
+
+/*
+ * Update the CSB to indicate a translation error.
+ *
+ * User space will be polling on CSB after the request is issued.
+ * If NX can handle the request without any issues, it updates CSB.
+ * Whereas if NX encounters page fault, the kernel will handle the
+ * fault and update CSB with translation error.
+ *
+ * If we are unable to update the CSB means copy_to_user failed due to
+ * invalid csb_addr, send a signal to the process.
+ */
+void vas_update_csb(struct coprocessor_request_block *crb,
+ struct vas_user_win_ref *task_ref)
+{
+ struct coprocessor_status_block csb;
+ struct kernel_siginfo info;
+ struct task_struct *tsk;
+ void __user *csb_addr;
+ struct pid *pid;
+ int rc;
+
+ /*
+ * NX user space windows can not be opened for task->mm=NULL
+ * and faults will not be generated for kernel requests.
+ */
+ if (WARN_ON_ONCE(!task_ref->mm))
+ return;
+
+ csb_addr = (void __user *)be64_to_cpu(crb->csb_addr);
+
+ memset(&csb, 0, sizeof(csb));
+ csb.cc = CSB_CC_FAULT_ADDRESS;
+ csb.ce = CSB_CE_TERMINATION;
+ csb.cs = 0;
+ csb.count = 0;
+
+ /*
+ * NX operates and returns in BE format as defined CRB struct.
+ * So saves fault_storage_addr in BE as NX pastes in FIFO and
+ * expects user space to convert to CPU format.
+ */
+ csb.address = crb->stamp.nx.fault_storage_addr;
+ csb.flags = 0;
+
+ /*
+ * Process closes send window after all pending NX requests are
+ * completed. In multi-thread applications, a child thread can
+ * open a window and can exit without closing it. May be some
+ * requests are pending or this window can be used by other
+ * threads later. We should handle faults if NX encounters
+ * pages faults on these requests. Update CSB with translation
+ * error and fault address. If csb_addr passed by user space is
+ * invalid, send SEGV signal to pid saved in window. If the
+ * child thread is not running, send the signal to tgid.
+ * Parent thread (tgid) will close this window upon its exit.
+ *
+ * pid and mm references are taken when window is opened by
+ * process (pid). So tgid is used only when child thread opens
+ * a window and exits without closing it.
+ */
+
+ if (!ref_get_pid_and_task(task_ref, &tsk, &pid))
+ return;
+
+ kthread_use_mm(task_ref->mm);
+ rc = copy_to_user(csb_addr, &csb, sizeof(csb));
+ /*
+ * User space polls on csb.flags (first byte). So add barrier
+ * then copy first byte with csb flags update.
+ */
+ if (!rc) {
+ csb.flags = CSB_V;
+ /* Make sure update to csb.flags is visible now */
+ smp_mb();
+ rc = copy_to_user(csb_addr, &csb, sizeof(u8));
+ }
+ kthread_unuse_mm(task_ref->mm);
+ put_task_struct(tsk);
+
+ /* Success */
+ if (!rc)
+ return;
+
+
+ pr_debug("Invalid CSB address 0x%p signalling pid(%d)\n",
+ csb_addr, pid_vnr(pid));
+
+ clear_siginfo(&info);
+ info.si_signo = SIGSEGV;
+ info.si_errno = EFAULT;
+ info.si_code = SEGV_MAPERR;
+ info.si_addr = csb_addr;
+ /*
+ * process will be polling on csb.flags after request is sent to
+ * NX. So generally CSB update should not fail except when an
+ * application passes invalid csb_addr. So an error message will
+ * be displayed and leave it to user space whether to ignore or
+ * handle this signal.
+ */
+ rcu_read_lock();
+ rc = kill_pid_info(SIGSEGV, &info, pid);
+ rcu_read_unlock();
+
+ pr_devel("%s(): pid %d kill_proc_info() rc %d\n", __func__,
+ pid_vnr(pid), rc);
+}
+
+void vas_dump_crb(struct coprocessor_request_block *crb)
+{
+ struct data_descriptor_entry *dde;
+ struct nx_fault_stamp *nx;
+
+ dde = &crb->source;
+ pr_devel("SrcDDE: addr 0x%llx, len %d, count %d, idx %d, flags %d\n",
+ be64_to_cpu(dde->address), be32_to_cpu(dde->length),
+ dde->count, dde->index, dde->flags);
+
+ dde = &crb->target;
+ pr_devel("TgtDDE: addr 0x%llx, len %d, count %d, idx %d, flags %d\n",
+ be64_to_cpu(dde->address), be32_to_cpu(dde->length),
+ dde->count, dde->index, dde->flags);
+
+ nx = &crb->stamp.nx;
+ pr_devel("NX Stamp: PSWID 0x%x, FSA 0x%llx, flags 0x%x, FS 0x%x\n",
+ be32_to_cpu(nx->pswid),
+ be64_to_cpu(crb->stamp.nx.fault_storage_addr),
+ nx->flags, nx->fault_status);
+}
+
+static int coproc_open(struct inode *inode, struct file *fp)
+{
+ struct coproc_instance *cp_inst;
+
+ cp_inst = kzalloc(sizeof(*cp_inst), GFP_KERNEL);
+ if (!cp_inst)
+ return -ENOMEM;
+
+ cp_inst->coproc = container_of(inode->i_cdev, struct coproc_dev,
+ cdev);
+ fp->private_data = cp_inst;
+
+ return 0;
+}
+
+static int coproc_ioc_tx_win_open(struct file *fp, unsigned long arg)
+{
+ void __user *uptr = (void __user *)arg;
+ struct vas_tx_win_open_attr uattr;
+ struct coproc_instance *cp_inst;
+ struct vas_window *txwin;
+ int rc;
+
+ cp_inst = fp->private_data;
+
+ /*
+ * One window for file descriptor
+ */
+ if (cp_inst->txwin)
+ return -EEXIST;
+
+ rc = copy_from_user(&uattr, uptr, sizeof(uattr));
+ if (rc) {
+ pr_err("%s(): copy_from_user() returns %d\n", __func__, rc);
+ return -EFAULT;
+ }
+
+ if (uattr.version != 1) {
+ pr_err("Invalid window open API version\n");
+ return -EINVAL;
+ }
+
+ if (!cp_inst->coproc->vops && !cp_inst->coproc->vops->open_win) {
+ pr_err("VAS API is not registered\n");
+ return -EACCES;
+ }
+
+ txwin = cp_inst->coproc->vops->open_win(uattr.vas_id, uattr.flags,
+ cp_inst->coproc->cop_type);
+ if (IS_ERR(txwin)) {
+ pr_err("%s() VAS window open failed, %ld\n", __func__,
+ PTR_ERR(txwin));
+ return PTR_ERR(txwin);
+ }
+
+ cp_inst->txwin = txwin;
+
+ return 0;
+}
+
+static int coproc_release(struct inode *inode, struct file *fp)
+{
+ struct coproc_instance *cp_inst = fp->private_data;
+ int rc;
+
+ if (cp_inst->txwin) {
+ if (cp_inst->coproc->vops &&
+ cp_inst->coproc->vops->close_win) {
+ rc = cp_inst->coproc->vops->close_win(cp_inst->txwin);
+ if (rc)
+ return rc;
+ }
+ cp_inst->txwin = NULL;
+ }
+
+ kfree(cp_inst);
+ fp->private_data = NULL;
+
+ /*
+ * We don't know here if user has other receive windows
+ * open, so we can't really call clear_thread_tidr().
+ * So, once the process calls set_thread_tidr(), the
+ * TIDR value sticks around until process exits, resulting
+ * in an extra copy in restore_sprs().
+ */
+
+ return 0;
+}
+
+static int coproc_mmap(struct file *fp, struct vm_area_struct *vma)
+{
+ struct coproc_instance *cp_inst = fp->private_data;
+ struct vas_window *txwin;
+ unsigned long pfn;
+ u64 paste_addr;
+ pgprot_t prot;
+ int rc;
+
+ txwin = cp_inst->txwin;
+
+ if ((vma->vm_end - vma->vm_start) > PAGE_SIZE) {
+ pr_debug("%s(): size 0x%zx, PAGE_SIZE 0x%zx\n", __func__,
+ (vma->vm_end - vma->vm_start), PAGE_SIZE);
+ return -EINVAL;
+ }
+
+ /* Ensure instance has an open send window */
+ if (!txwin) {
+ pr_err("%s(): No send window open?\n", __func__);
+ return -EINVAL;
+ }
+
+ if (!cp_inst->coproc->vops && !cp_inst->coproc->vops->paste_addr) {
+ pr_err("%s(): VAS API is not registered\n", __func__);
+ return -EACCES;
+ }
+
+ paste_addr = cp_inst->coproc->vops->paste_addr(txwin);
+ if (!paste_addr) {
+ pr_err("%s(): Window paste address failed\n", __func__);
+ return -EINVAL;
+ }
+
+ pfn = paste_addr >> PAGE_SHIFT;
+
+ /* flags, page_prot from cxl_mmap(), except we want cachable */
+ vma->vm_flags |= VM_IO | VM_PFNMAP;
+ vma->vm_page_prot = pgprot_cached(vma->vm_page_prot);
+
+ prot = __pgprot(pgprot_val(vma->vm_page_prot) | _PAGE_DIRTY);
+
+ rc = remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff,
+ vma->vm_end - vma->vm_start, prot);
+
+ pr_devel("%s(): paste addr %llx at %lx, rc %d\n", __func__,
+ paste_addr, vma->vm_start, rc);
+
+ return rc;
+}
+
+static long coproc_ioctl(struct file *fp, unsigned int cmd, unsigned long arg)
+{
+ switch (cmd) {
+ case VAS_TX_WIN_OPEN:
+ return coproc_ioc_tx_win_open(fp, arg);
+ default:
+ return -EINVAL;
+ }
+}
+
+static struct file_operations coproc_fops = {
+ .open = coproc_open,
+ .release = coproc_release,
+ .mmap = coproc_mmap,
+ .unlocked_ioctl = coproc_ioctl,
+};
+
+/*
+ * Supporting only nx-gzip coprocessor type now, but this API code
+ * extended to other coprocessor types later.
+ */
+int vas_register_coproc_api(struct module *mod, enum vas_cop_type cop_type,
+ const char *name,
+ const struct vas_user_win_ops *vops)
+{
+ int rc = -EINVAL;
+ dev_t devno;
+
+ rc = alloc_chrdev_region(&coproc_device.devt, 1, 1, name);
+ if (rc) {
+ pr_err("Unable to allocate coproc major number: %i\n", rc);
+ return rc;
+ }
+
+ pr_devel("%s device allocated, dev [%i,%i]\n", name,
+ MAJOR(coproc_device.devt), MINOR(coproc_device.devt));
+
+ coproc_device.class = class_create(mod, name);
+ if (IS_ERR(coproc_device.class)) {
+ rc = PTR_ERR(coproc_device.class);
+ pr_err("Unable to create %s class %d\n", name, rc);
+ goto err_class;
+ }
+ coproc_device.class->devnode = coproc_devnode;
+ coproc_device.cop_type = cop_type;
+ coproc_device.vops = vops;
+
+ coproc_fops.owner = mod;
+ cdev_init(&coproc_device.cdev, &coproc_fops);
+
+ devno = MKDEV(MAJOR(coproc_device.devt), 0);
+ rc = cdev_add(&coproc_device.cdev, devno, 1);
+ if (rc) {
+ pr_err("cdev_add() failed %d\n", rc);
+ goto err_cdev;
+ }
+
+ coproc_device.device = device_create(coproc_device.class, NULL,
+ devno, NULL, name, MINOR(devno));
+ if (IS_ERR(coproc_device.device)) {
+ rc = PTR_ERR(coproc_device.device);
+ pr_err("Unable to create coproc-%d %d\n", MINOR(devno), rc);
+ goto err;
+ }
+
+ pr_devel("%s: Added dev [%d,%d]\n", __func__, MAJOR(devno),
+ MINOR(devno));
+
+ return 0;
+
+err:
+ cdev_del(&coproc_device.cdev);
+err_cdev:
+ class_destroy(coproc_device.class);
+err_class:
+ unregister_chrdev_region(coproc_device.devt, 1);
+ return rc;
+}
+
+void vas_unregister_coproc_api(void)
+{
+ dev_t devno;
+
+ cdev_del(&coproc_device.cdev);
+ devno = MKDEV(MAJOR(coproc_device.devt), 0);
+ device_destroy(coproc_device.class, devno);
+
+ class_destroy(coproc_device.class);
+ unregister_chrdev_region(coproc_device.devt, 1);
+}
diff --git a/arch/powerpc/platforms/cell/Kconfig b/arch/powerpc/platforms/cell/Kconfig
index e7c976bcadff..cb70c5f25bc6 100644
--- a/arch/powerpc/platforms/cell/Kconfig
+++ b/arch/powerpc/platforms/cell/Kconfig
@@ -35,6 +35,7 @@ config PPC_IBM_CELL_BLADE
config AXON_MSI
bool
depends on PPC_IBM_CELL_BLADE && PCI_MSI
+ select IRQ_DOMAIN_NOMAP
default y
menu "Cell Broadband Engine options"
diff --git a/arch/powerpc/platforms/cell/pmu.c b/arch/powerpc/platforms/cell/pmu.c
index 35bbd15582af..b207a7f99be5 100644
--- a/arch/powerpc/platforms/cell/pmu.c
+++ b/arch/powerpc/platforms/cell/pmu.c
@@ -10,6 +10,7 @@
*/
#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
#include <linux/types.h>
#include <linux/export.h>
#include <asm/io.h>
diff --git a/arch/powerpc/platforms/cell/spider-pci.c b/arch/powerpc/platforms/cell/spider-pci.c
index 93ea41680f54..a1c293f42a1f 100644
--- a/arch/powerpc/platforms/cell/spider-pci.c
+++ b/arch/powerpc/platforms/cell/spider-pci.c
@@ -25,10 +25,9 @@ struct spiderpci_iowa_private {
static void spiderpci_io_flush(struct iowa_bus *bus)
{
struct spiderpci_iowa_private *priv;
- u32 val;
priv = bus->private;
- val = in_be32(priv->regs + SPIDER_PCI_DUMMY_READ);
+ in_be32(priv->regs + SPIDER_PCI_DUMMY_READ);
iosync();
}
diff --git a/arch/powerpc/platforms/cell/spufs/switch.c b/arch/powerpc/platforms/cell/spufs/switch.c
index d56b4e3241cd..b41e81b22fdc 100644
--- a/arch/powerpc/platforms/cell/spufs/switch.c
+++ b/arch/powerpc/platforms/cell/spufs/switch.c
@@ -1657,14 +1657,13 @@ static inline void restore_spu_mb(struct spu_state *csa, struct spu *spu)
static inline void check_ppu_mb_stat(struct spu_state *csa, struct spu *spu)
{
struct spu_problem __iomem *prob = spu->problem;
- u32 dummy = 0;
/* Restore, Step 66:
* If CSA.MB_Stat[P]=0 (mailbox empty) then
* read from the PPU_MB register.
*/
if ((csa->prob.mb_stat_R & 0xFF) == 0) {
- dummy = in_be32(&prob->pu_mb_R);
+ in_be32(&prob->pu_mb_R);
eieio();
}
}
@@ -1672,14 +1671,13 @@ static inline void check_ppu_mb_stat(struct spu_state *csa, struct spu *spu)
static inline void check_ppuint_mb_stat(struct spu_state *csa, struct spu *spu)
{
struct spu_priv2 __iomem *priv2 = spu->priv2;
- u64 dummy = 0UL;
/* Restore, Step 66:
* If CSA.MB_Stat[I]=0 (mailbox empty) then
* read from the PPUINT_MB register.
*/
if ((csa->prob.mb_stat_R & 0xFF0000) == 0) {
- dummy = in_be64(&priv2->puint_mb_R);
+ in_be64(&priv2->puint_mb_R);
eieio();
spu_int_stat_clear(spu, 2, CLASS2_ENABLE_MAILBOX_INTR);
eieio();
diff --git a/arch/powerpc/platforms/embedded6xx/flipper-pic.c b/arch/powerpc/platforms/embedded6xx/flipper-pic.c
index d39a9213a3e6..609bda2ad5dd 100644
--- a/arch/powerpc/platforms/embedded6xx/flipper-pic.c
+++ b/arch/powerpc/platforms/embedded6xx/flipper-pic.c
@@ -12,6 +12,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/irq.h>
+#include <linux/irqdomain.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <asm/io.h>
diff --git a/arch/powerpc/platforms/embedded6xx/holly.c b/arch/powerpc/platforms/embedded6xx/holly.c
index 53065d564161..85521b3e7098 100644
--- a/arch/powerpc/platforms/embedded6xx/holly.c
+++ b/arch/powerpc/platforms/embedded6xx/holly.c
@@ -251,8 +251,8 @@ static int ppc750_machine_check_exception(struct pt_regs *regs)
/* Are we prepared to handle this fault */
if ((entry = search_exception_tables(regs->nip)) != NULL) {
tsi108_clear_pci_cfg_error();
- regs->msr |= MSR_RI;
- regs->nip = extable_fixup(entry);
+ regs_set_return_msr(regs, regs->msr | MSR_RI);
+ regs_set_return_ip(regs, extable_fixup(entry));
return 1;
}
return 0;
diff --git a/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c b/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c
index 5565647dc879..d8da6a483e59 100644
--- a/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c
+++ b/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c
@@ -173,8 +173,8 @@ static int mpc7448_machine_check_exception(struct pt_regs *regs)
/* Are we prepared to handle this fault */
if ((entry = search_exception_tables(regs->nip)) != NULL) {
tsi108_clear_pci_cfg_error();
- regs->msr |= MSR_RI;
- regs->nip = extable_fixup(entry);
+ regs_set_return_msr(regs, regs->msr | MSR_RI);
+ regs_set_return_ip(regs, extable_fixup(entry));
return 1;
}
return 0;
diff --git a/arch/powerpc/platforms/microwatt/Kconfig b/arch/powerpc/platforms/microwatt/Kconfig
new file mode 100644
index 000000000000..8f6a81978461
--- /dev/null
+++ b/arch/powerpc/platforms/microwatt/Kconfig
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-2.0
+config PPC_MICROWATT
+ depends on PPC_BOOK3S_64 && !SMP
+ bool "Microwatt SoC platform"
+ select PPC_XICS
+ select PPC_ICS_NATIVE
+ select PPC_ICP_NATIVE
+ select PPC_NATIVE
+ select PPC_UDBG_16550
+ select ARCH_RANDOM
+ help
+ This option enables support for FPGA-based Microwatt implementations.
+
diff --git a/arch/powerpc/platforms/microwatt/Makefile b/arch/powerpc/platforms/microwatt/Makefile
new file mode 100644
index 000000000000..116d6d3ad3f0
--- /dev/null
+++ b/arch/powerpc/platforms/microwatt/Makefile
@@ -0,0 +1 @@
+obj-y += setup.o rng.o
diff --git a/arch/powerpc/platforms/microwatt/rng.c b/arch/powerpc/platforms/microwatt/rng.c
new file mode 100644
index 000000000000..3d8ee6eb7dad
--- /dev/null
+++ b/arch/powerpc/platforms/microwatt/rng.c
@@ -0,0 +1,48 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Derived from arch/powerpc/platforms/powernv/rng.c, which is:
+ * Copyright 2013, Michael Ellerman, IBM Corporation.
+ */
+
+#define pr_fmt(fmt) "microwatt-rng: " fmt
+
+#include <linux/kernel.h>
+#include <linux/smp.h>
+#include <asm/archrandom.h>
+#include <asm/cputable.h>
+#include <asm/machdep.h>
+
+#define DARN_ERR 0xFFFFFFFFFFFFFFFFul
+
+int microwatt_get_random_darn(unsigned long *v)
+{
+ unsigned long val;
+
+ /* Using DARN with L=1 - 64-bit conditioned random number */
+ asm volatile(PPC_DARN(%0, 1) : "=r"(val));
+
+ if (val == DARN_ERR)
+ return 0;
+
+ *v = val;
+
+ return 1;
+}
+
+static __init int rng_init(void)
+{
+ unsigned long val;
+ int i;
+
+ for (i = 0; i < 10; i++) {
+ if (microwatt_get_random_darn(&val)) {
+ ppc_md.get_random_seed = microwatt_get_random_darn;
+ return 0;
+ }
+ }
+
+ pr_warn("Unable to use DARN for get_random_seed()\n");
+
+ return -EIO;
+}
+machine_subsys_initcall(, rng_init);
diff --git a/arch/powerpc/platforms/microwatt/setup.c b/arch/powerpc/platforms/microwatt/setup.c
new file mode 100644
index 000000000000..0b02603bdb74
--- /dev/null
+++ b/arch/powerpc/platforms/microwatt/setup.c
@@ -0,0 +1,41 @@
+/*
+ * Microwatt FPGA-based SoC platform setup code.
+ *
+ * Copyright 2020 Paul Mackerras (paulus@ozlabs.org), IBM Corp.
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/stddef.h>
+#include <linux/init.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+
+#include <asm/machdep.h>
+#include <asm/time.h>
+#include <asm/xics.h>
+#include <asm/udbg.h>
+
+static void __init microwatt_init_IRQ(void)
+{
+ xics_init();
+}
+
+static int __init microwatt_probe(void)
+{
+ return of_machine_is_compatible("microwatt-soc");
+}
+
+static int __init microwatt_populate(void)
+{
+ return of_platform_default_populate(NULL, NULL, NULL);
+}
+machine_arch_initcall(microwatt, microwatt_populate);
+
+define_machine(microwatt) {
+ .name = "microwatt",
+ .probe = microwatt_probe,
+ .init_IRQ = microwatt_init_IRQ,
+ .progress = udbg_progress,
+ .calibrate_decr = generic_calibrate_decr,
+};
diff --git a/arch/powerpc/platforms/pasemi/idle.c b/arch/powerpc/platforms/pasemi/idle.c
index 1c954c90b0f4..9b88e3cded7d 100644
--- a/arch/powerpc/platforms/pasemi/idle.c
+++ b/arch/powerpc/platforms/pasemi/idle.c
@@ -37,7 +37,7 @@ static int pasemi_system_reset_exception(struct pt_regs *regs)
*/
if (regs->msr & SRR1_WAKEMASK)
- regs->nip = regs->link;
+ regs_set_return_ip(regs, regs->link);
switch (regs->msr & SRR1_WAKEMASK) {
case SRR1_WAKEDEC:
@@ -58,7 +58,7 @@ static int pasemi_system_reset_exception(struct pt_regs *regs)
restore_astate(hard_smp_processor_id());
/* everything handled */
- regs->msr |= MSR_RI;
+ regs_set_return_msr(regs, regs->msr | MSR_RI);
return 1;
}
diff --git a/arch/powerpc/platforms/powermac/Kconfig b/arch/powerpc/platforms/powermac/Kconfig
index c02d8c503b29..b97bf12801eb 100644
--- a/arch/powerpc/platforms/powermac/Kconfig
+++ b/arch/powerpc/platforms/powermac/Kconfig
@@ -24,6 +24,7 @@ config PPC_PMAC32_PSURGE
bool "Support for powersurge upgrade cards" if EXPERT
depends on SMP && PPC32 && PPC_PMAC
select PPC_SMP_MUXED_IPI
+ select IRQ_DOMAIN_NOMAP
default y
help
The powersurge cpu boards can be used in the generation
diff --git a/arch/powerpc/platforms/powermac/bootx_init.c b/arch/powerpc/platforms/powermac/bootx_init.c
index 9d4ecd292255..d20ef35e6d9d 100644
--- a/arch/powerpc/platforms/powermac/bootx_init.c
+++ b/arch/powerpc/platforms/powermac/bootx_init.c
@@ -433,7 +433,7 @@ static void __init btext_welcome(boot_infos_t *bi)
bootx_printf("\nframe buffer at : 0x%x", bi->dispDeviceBase);
bootx_printf(" (phys), 0x%x", bi->logicalDisplayBase);
bootx_printf(" (log)");
- bootx_printf("\nklimit : 0x%x",(unsigned long)klimit);
+ bootx_printf("\nklimit : 0x%x",(unsigned long)_end);
bootx_printf("\nboot_info at : 0x%x", bi);
__asm__ __volatile__ ("mfmsr %0" : "=r" (flags));
bootx_printf("\nMSR : 0x%x", flags);
diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c
index adae2a6712e1..bdfea6d6ab69 100644
--- a/arch/powerpc/platforms/powermac/smp.c
+++ b/arch/powerpc/platforms/powermac/smp.c
@@ -810,7 +810,7 @@ static int smp_core99_kick_cpu(int nr)
* b __secondary_start_pmac_0 + nr*8
*/
target = (unsigned long) __secondary_start_pmac_0 + nr * 8;
- patch_branch((struct ppc_inst *)vector, target, BRANCH_SET_LINK);
+ patch_branch(vector, target, BRANCH_SET_LINK);
/* Put some life in our friend */
pmac_call_feature(PMAC_FTR_RESET_CPU, NULL, nr, 0);
@@ -823,7 +823,7 @@ static int smp_core99_kick_cpu(int nr)
mdelay(1);
/* Restore our exception vector */
- patch_instruction((struct ppc_inst *)vector, ppc_inst(save_vector));
+ patch_instruction(vector, ppc_inst(save_vector));
local_irq_restore(flags);
if (ppc_md.progress) ppc_md.progress("smp_core99_kick_cpu done", 0x347);
diff --git a/arch/powerpc/platforms/powernv/Kconfig b/arch/powerpc/platforms/powernv/Kconfig
index 619b093a0657..043eefbbdd28 100644
--- a/arch/powerpc/platforms/powernv/Kconfig
+++ b/arch/powerpc/platforms/powernv/Kconfig
@@ -33,20 +33,6 @@ config PPC_MEMTRACE
Enabling this option allows for runtime allocation of memory (RAM)
for hardware tracing.
-config PPC_VAS
- bool "IBM Virtual Accelerator Switchboard (VAS)"
- depends on PPC_POWERNV && PPC_64K_PAGES
- default y
- help
- This enables support for IBM Virtual Accelerator Switchboard (VAS).
-
- VAS allows accelerators in co-processors like NX-GZIP and NX-842
- to be accessible to kernel subsystems and user processes.
-
- VAS adapters are found in POWER9 based systems.
-
- If unsure, say N.
-
config SCOM_DEBUGFS
bool "Expose SCOM controllers via debugfs"
depends on DEBUG_FS
diff --git a/arch/powerpc/platforms/powernv/Makefile b/arch/powerpc/platforms/powernv/Makefile
index be2546b96816..dc7b37c23b60 100644
--- a/arch/powerpc/platforms/powernv/Makefile
+++ b/arch/powerpc/platforms/powernv/Makefile
@@ -18,7 +18,7 @@ obj-$(CONFIG_MEMORY_FAILURE) += opal-memory-errors.o
obj-$(CONFIG_OPAL_PRD) += opal-prd.o
obj-$(CONFIG_PERF_EVENTS) += opal-imc.o
obj-$(CONFIG_PPC_MEMTRACE) += memtrace.o
-obj-$(CONFIG_PPC_VAS) += vas.o vas-window.o vas-debug.o vas-fault.o vas-api.o
+obj-$(CONFIG_PPC_VAS) += vas.o vas-window.o vas-debug.o vas-fault.o
obj-$(CONFIG_OCXL_BASE) += ocxl.o
obj-$(CONFIG_SCOM_DEBUGFS) += opal-xscom.o
obj-$(CONFIG_PPC_SECURE_BOOT) += opal-secvar.o
diff --git a/arch/powerpc/platforms/powernv/idle.c b/arch/powerpc/platforms/powernv/idle.c
index 999997d9e9a9..528a7e0cf83a 100644
--- a/arch/powerpc/platforms/powernv/idle.c
+++ b/arch/powerpc/platforms/powernv/idle.c
@@ -604,7 +604,7 @@ struct p9_sprs {
u64 uamor;
};
-static unsigned long power9_idle_stop(unsigned long psscr, bool mmu_on)
+static unsigned long power9_idle_stop(unsigned long psscr)
{
int cpu = raw_smp_processor_id();
int first = cpu_first_thread_sibling(cpu);
@@ -620,8 +620,6 @@ static unsigned long power9_idle_stop(unsigned long psscr, bool mmu_on)
if (!(psscr & (PSSCR_EC|PSSCR_ESL))) {
/* EC=ESL=0 case */
- BUG_ON(!mmu_on);
-
/*
* Wake synchronously. SRESET via xscom may still cause
* a 0x100 powersave wakeup with SRR1 reason!
@@ -803,8 +801,7 @@ core_woken:
__slb_restore_bolted_realmode();
out:
- if (mmu_on)
- mtmsr(MSR_KERNEL);
+ mtmsr(MSR_KERNEL);
return srr1;
}
@@ -895,7 +892,7 @@ struct p10_sprs {
*/
};
-static unsigned long power10_idle_stop(unsigned long psscr, bool mmu_on)
+static unsigned long power10_idle_stop(unsigned long psscr)
{
int cpu = raw_smp_processor_id();
int first = cpu_first_thread_sibling(cpu);
@@ -909,8 +906,6 @@ static unsigned long power10_idle_stop(unsigned long psscr, bool mmu_on)
if (!(psscr & (PSSCR_EC|PSSCR_ESL))) {
/* EC=ESL=0 case */
- BUG_ON(!mmu_on);
-
/*
* Wake synchronously. SRESET via xscom may still cause
* a 0x100 powersave wakeup with SRR1 reason!
@@ -991,8 +986,7 @@ core_woken:
__slb_restore_bolted_realmode();
out:
- if (mmu_on)
- mtmsr(MSR_KERNEL);
+ mtmsr(MSR_KERNEL);
return srr1;
}
@@ -1002,40 +996,10 @@ static unsigned long arch300_offline_stop(unsigned long psscr)
{
unsigned long srr1;
-#ifndef CONFIG_KVM_BOOK3S_HV_POSSIBLE
- __ppc64_runlatch_off();
if (cpu_has_feature(CPU_FTR_ARCH_31))
- srr1 = power10_idle_stop(psscr, true);
+ srr1 = power10_idle_stop(psscr);
else
- srr1 = power9_idle_stop(psscr, true);
- __ppc64_runlatch_on();
-#else
- /*
- * Tell KVM we're entering idle.
- * This does not have to be done in real mode because the P9 MMU
- * is independent per-thread. Some steppings share radix/hash mode
- * between threads, but in that case KVM has a barrier sync in real
- * mode before and after switching between radix and hash.
- *
- * kvm_start_guest must still be called in real mode though, hence
- * the false argument.
- */
- local_paca->kvm_hstate.hwthread_state = KVM_HWTHREAD_IN_IDLE;
-
- __ppc64_runlatch_off();
- if (cpu_has_feature(CPU_FTR_ARCH_31))
- srr1 = power10_idle_stop(psscr, false);
- else
- srr1 = power9_idle_stop(psscr, false);
- __ppc64_runlatch_on();
-
- local_paca->kvm_hstate.hwthread_state = KVM_HWTHREAD_IN_KERNEL;
- /* Order setting hwthread_state vs. testing hwthread_req */
- smp_mb();
- if (local_paca->kvm_hstate.hwthread_req)
- srr1 = idle_kvm_start_guest(srr1);
- mtmsr(MSR_KERNEL);
-#endif
+ srr1 = power9_idle_stop(psscr);
return srr1;
}
@@ -1055,9 +1019,9 @@ void arch300_idle_type(unsigned long stop_psscr_val,
__ppc64_runlatch_off();
if (cpu_has_feature(CPU_FTR_ARCH_31))
- srr1 = power10_idle_stop(psscr, true);
+ srr1 = power10_idle_stop(psscr);
else
- srr1 = power9_idle_stop(psscr, true);
+ srr1 = power9_idle_stop(psscr);
__ppc64_runlatch_on();
fini_irq_for_idle_irqsoff();
diff --git a/arch/powerpc/platforms/powernv/opal-call.c b/arch/powerpc/platforms/powernv/opal-call.c
index 01401e3da7ca..f812c74c61e5 100644
--- a/arch/powerpc/platforms/powernv/opal-call.c
+++ b/arch/powerpc/platforms/powernv/opal-call.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/percpu.h>
#include <linux/jump_label.h>
+#include <asm/interrupt.h>
#include <asm/opal-api.h>
#include <asm/trace.h>
#include <asm/asm-prototypes.h>
@@ -100,6 +101,9 @@ static int64_t opal_call(int64_t a0, int64_t a1, int64_t a2, int64_t a3,
bool mmu = (msr & (MSR_IR|MSR_DR));
int64_t ret;
+ /* OPAL call / firmware may use SRR and/or HSRR */
+ srr_regs_clobbered();
+
msr &= ~MSR_EE;
if (unlikely(!mmu))
diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c
index 303d7c775740..2835376e61a4 100644
--- a/arch/powerpc/platforms/powernv/opal.c
+++ b/arch/powerpc/platforms/powernv/opal.c
@@ -773,7 +773,7 @@ bool opal_mce_check_early_recovery(struct pt_regs *regs)
* Setup regs->nip to rfi into fixup address.
*/
if (recover_addr)
- regs->nip = recover_addr;
+ regs_set_return_ip(regs, recover_addr);
out:
return !!recover_addr;
diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c
index b18468dc31ff..6bb3c52633fb 100644
--- a/arch/powerpc/platforms/powernv/pci.c
+++ b/arch/powerpc/platforms/powernv/pci.c
@@ -711,7 +711,7 @@ int pnv_pci_cfg_write(struct pci_dn *pdn,
return PCIBIOS_SUCCESSFUL;
}
-#if CONFIG_EEH
+#ifdef CONFIG_EEH
static bool pnv_pci_cfg_check(struct pci_dn *pdn)
{
struct eeh_dev *edev = NULL;
diff --git a/arch/powerpc/platforms/powernv/subcore.c b/arch/powerpc/platforms/powernv/subcore.c
index 73207b53dc2b..7e98b00ea2e8 100644
--- a/arch/powerpc/platforms/powernv/subcore.c
+++ b/arch/powerpc/platforms/powernv/subcore.c
@@ -169,6 +169,16 @@ static void update_hid_in_slw(u64 hid0)
}
}
+static inline void update_power8_hid0(unsigned long hid0)
+{
+ /*
+ * The HID0 update on Power8 should at the very least be
+ * preceded by a SYNC instruction followed by an ISYNC
+ * instruction
+ */
+ asm volatile("sync; mtspr %0,%1; isync":: "i"(SPRN_HID0), "r"(hid0));
+}
+
static void unsplit_core(void)
{
u64 hid0, mask;
diff --git a/arch/powerpc/platforms/powernv/vas-api.c b/arch/powerpc/platforms/powernv/vas-api.c
deleted file mode 100644
index 98ed5d8c5441..000000000000
--- a/arch/powerpc/platforms/powernv/vas-api.c
+++ /dev/null
@@ -1,278 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * VAS user space API for its accelerators (Only NX-GZIP is supported now)
- * Copyright (C) 2019 Haren Myneni, IBM Corp
- */
-
-#include <linux/kernel.h>
-#include <linux/device.h>
-#include <linux/cdev.h>
-#include <linux/fs.h>
-#include <linux/slab.h>
-#include <linux/uaccess.h>
-#include <asm/vas.h>
-#include <uapi/asm/vas-api.h>
-#include "vas.h"
-
-/*
- * The driver creates the device node that can be used as follows:
- * For NX-GZIP
- *
- * fd = open("/dev/crypto/nx-gzip", O_RDWR);
- * rc = ioctl(fd, VAS_TX_WIN_OPEN, &attr);
- * paste_addr = mmap(NULL, PAGE_SIZE, prot, MAP_SHARED, fd, 0ULL).
- * vas_copy(&crb, 0, 1);
- * vas_paste(paste_addr, 0, 1);
- * close(fd) or exit process to close window.
- *
- * where "vas_copy" and "vas_paste" are defined in copy-paste.h.
- * copy/paste returns to the user space directly. So refer NX hardware
- * documententation for exact copy/paste usage and completion / error
- * conditions.
- */
-
-/*
- * Wrapper object for the nx-gzip device - there is just one instance of
- * this node for the whole system.
- */
-static struct coproc_dev {
- struct cdev cdev;
- struct device *device;
- char *name;
- dev_t devt;
- struct class *class;
- enum vas_cop_type cop_type;
-} coproc_device;
-
-struct coproc_instance {
- struct coproc_dev *coproc;
- struct vas_window *txwin;
-};
-
-static char *coproc_devnode(struct device *dev, umode_t *mode)
-{
- return kasprintf(GFP_KERNEL, "crypto/%s", dev_name(dev));
-}
-
-static int coproc_open(struct inode *inode, struct file *fp)
-{
- struct coproc_instance *cp_inst;
-
- cp_inst = kzalloc(sizeof(*cp_inst), GFP_KERNEL);
- if (!cp_inst)
- return -ENOMEM;
-
- cp_inst->coproc = container_of(inode->i_cdev, struct coproc_dev,
- cdev);
- fp->private_data = cp_inst;
-
- return 0;
-}
-
-static int coproc_ioc_tx_win_open(struct file *fp, unsigned long arg)
-{
- void __user *uptr = (void __user *)arg;
- struct vas_tx_win_attr txattr = {};
- struct vas_tx_win_open_attr uattr;
- struct coproc_instance *cp_inst;
- struct vas_window *txwin;
- int rc, vasid;
-
- cp_inst = fp->private_data;
-
- /*
- * One window for file descriptor
- */
- if (cp_inst->txwin)
- return -EEXIST;
-
- rc = copy_from_user(&uattr, uptr, sizeof(uattr));
- if (rc) {
- pr_err("%s(): copy_from_user() returns %d\n", __func__, rc);
- return -EFAULT;
- }
-
- if (uattr.version != 1) {
- pr_err("Invalid version\n");
- return -EINVAL;
- }
-
- vasid = uattr.vas_id;
-
- vas_init_tx_win_attr(&txattr, cp_inst->coproc->cop_type);
-
- txattr.lpid = mfspr(SPRN_LPID);
- txattr.pidr = mfspr(SPRN_PID);
- txattr.user_win = true;
- txattr.rsvd_txbuf_count = false;
- txattr.pswid = false;
-
- pr_devel("Pid %d: Opening txwin, PIDR %ld\n", txattr.pidr,
- mfspr(SPRN_PID));
-
- txwin = vas_tx_win_open(vasid, cp_inst->coproc->cop_type, &txattr);
- if (IS_ERR(txwin)) {
- pr_err("%s() vas_tx_win_open() failed, %ld\n", __func__,
- PTR_ERR(txwin));
- return PTR_ERR(txwin);
- }
-
- cp_inst->txwin = txwin;
-
- return 0;
-}
-
-static int coproc_release(struct inode *inode, struct file *fp)
-{
- struct coproc_instance *cp_inst = fp->private_data;
-
- if (cp_inst->txwin) {
- vas_win_close(cp_inst->txwin);
- cp_inst->txwin = NULL;
- }
-
- kfree(cp_inst);
- fp->private_data = NULL;
-
- /*
- * We don't know here if user has other receive windows
- * open, so we can't really call clear_thread_tidr().
- * So, once the process calls set_thread_tidr(), the
- * TIDR value sticks around until process exits, resulting
- * in an extra copy in restore_sprs().
- */
-
- return 0;
-}
-
-static int coproc_mmap(struct file *fp, struct vm_area_struct *vma)
-{
- struct coproc_instance *cp_inst = fp->private_data;
- struct vas_window *txwin;
- unsigned long pfn;
- u64 paste_addr;
- pgprot_t prot;
- int rc;
-
- txwin = cp_inst->txwin;
-
- if ((vma->vm_end - vma->vm_start) > PAGE_SIZE) {
- pr_debug("%s(): size 0x%zx, PAGE_SIZE 0x%zx\n", __func__,
- (vma->vm_end - vma->vm_start), PAGE_SIZE);
- return -EINVAL;
- }
-
- /* Ensure instance has an open send window */
- if (!txwin) {
- pr_err("%s(): No send window open?\n", __func__);
- return -EINVAL;
- }
-
- vas_win_paste_addr(txwin, &paste_addr, NULL);
- pfn = paste_addr >> PAGE_SHIFT;
-
- /* flags, page_prot from cxl_mmap(), except we want cachable */
- vma->vm_flags |= VM_IO | VM_PFNMAP;
- vma->vm_page_prot = pgprot_cached(vma->vm_page_prot);
-
- prot = __pgprot(pgprot_val(vma->vm_page_prot) | _PAGE_DIRTY);
-
- rc = remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff,
- vma->vm_end - vma->vm_start, prot);
-
- pr_devel("%s(): paste addr %llx at %lx, rc %d\n", __func__,
- paste_addr, vma->vm_start, rc);
-
- return rc;
-}
-
-static long coproc_ioctl(struct file *fp, unsigned int cmd, unsigned long arg)
-{
- switch (cmd) {
- case VAS_TX_WIN_OPEN:
- return coproc_ioc_tx_win_open(fp, arg);
- default:
- return -EINVAL;
- }
-}
-
-static struct file_operations coproc_fops = {
- .open = coproc_open,
- .release = coproc_release,
- .mmap = coproc_mmap,
- .unlocked_ioctl = coproc_ioctl,
-};
-
-/*
- * Supporting only nx-gzip coprocessor type now, but this API code
- * extended to other coprocessor types later.
- */
-int vas_register_coproc_api(struct module *mod, enum vas_cop_type cop_type,
- const char *name)
-{
- int rc = -EINVAL;
- dev_t devno;
-
- rc = alloc_chrdev_region(&coproc_device.devt, 1, 1, name);
- if (rc) {
- pr_err("Unable to allocate coproc major number: %i\n", rc);
- return rc;
- }
-
- pr_devel("%s device allocated, dev [%i,%i]\n", name,
- MAJOR(coproc_device.devt), MINOR(coproc_device.devt));
-
- coproc_device.class = class_create(mod, name);
- if (IS_ERR(coproc_device.class)) {
- rc = PTR_ERR(coproc_device.class);
- pr_err("Unable to create %s class %d\n", name, rc);
- goto err_class;
- }
- coproc_device.class->devnode = coproc_devnode;
- coproc_device.cop_type = cop_type;
-
- coproc_fops.owner = mod;
- cdev_init(&coproc_device.cdev, &coproc_fops);
-
- devno = MKDEV(MAJOR(coproc_device.devt), 0);
- rc = cdev_add(&coproc_device.cdev, devno, 1);
- if (rc) {
- pr_err("cdev_add() failed %d\n", rc);
- goto err_cdev;
- }
-
- coproc_device.device = device_create(coproc_device.class, NULL,
- devno, NULL, name, MINOR(devno));
- if (IS_ERR(coproc_device.device)) {
- rc = PTR_ERR(coproc_device.device);
- pr_err("Unable to create coproc-%d %d\n", MINOR(devno), rc);
- goto err;
- }
-
- pr_devel("%s: Added dev [%d,%d]\n", __func__, MAJOR(devno),
- MINOR(devno));
-
- return 0;
-
-err:
- cdev_del(&coproc_device.cdev);
-err_cdev:
- class_destroy(coproc_device.class);
-err_class:
- unregister_chrdev_region(coproc_device.devt, 1);
- return rc;
-}
-EXPORT_SYMBOL_GPL(vas_register_coproc_api);
-
-void vas_unregister_coproc_api(void)
-{
- dev_t devno;
-
- cdev_del(&coproc_device.cdev);
- devno = MKDEV(MAJOR(coproc_device.devt), 0);
- device_destroy(coproc_device.class, devno);
-
- class_destroy(coproc_device.class);
- unregister_chrdev_region(coproc_device.devt, 1);
-}
-EXPORT_SYMBOL_GPL(vas_unregister_coproc_api);
diff --git a/arch/powerpc/platforms/powernv/vas-debug.c b/arch/powerpc/platforms/powernv/vas-debug.c
index 41fa90d2f4ab..3ce89a4b54be 100644
--- a/arch/powerpc/platforms/powernv/vas-debug.c
+++ b/arch/powerpc/platforms/powernv/vas-debug.c
@@ -9,6 +9,7 @@
#include <linux/slab.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
+#include <asm/vas.h>
#include "vas.h"
static struct dentry *vas_debugfs;
@@ -28,7 +29,7 @@ static char *cop_to_str(int cop)
static int info_show(struct seq_file *s, void *private)
{
- struct vas_window *window = s->private;
+ struct pnv_vas_window *window = s->private;
mutex_lock(&vas_mutex);
@@ -36,9 +37,9 @@ static int info_show(struct seq_file *s, void *private)
if (!window->hvwc_map)
goto unlock;
- seq_printf(s, "Type: %s, %s\n", cop_to_str(window->cop),
+ seq_printf(s, "Type: %s, %s\n", cop_to_str(window->vas_win.cop),
window->tx_win ? "Send" : "Receive");
- seq_printf(s, "Pid : %d\n", vas_window_pid(window));
+ seq_printf(s, "Pid : %d\n", vas_window_pid(&window->vas_win));
unlock:
mutex_unlock(&vas_mutex);
@@ -47,7 +48,7 @@ unlock:
DEFINE_SHOW_ATTRIBUTE(info);
-static inline void print_reg(struct seq_file *s, struct vas_window *win,
+static inline void print_reg(struct seq_file *s, struct pnv_vas_window *win,
char *name, u32 reg)
{
seq_printf(s, "0x%016llx %s\n", read_hvwc_reg(win, name, reg), name);
@@ -55,7 +56,7 @@ static inline void print_reg(struct seq_file *s, struct vas_window *win,
static int hvwc_show(struct seq_file *s, void *private)
{
- struct vas_window *window = s->private;
+ struct pnv_vas_window *window = s->private;
mutex_lock(&vas_mutex);
@@ -103,8 +104,10 @@ unlock:
DEFINE_SHOW_ATTRIBUTE(hvwc);
-void vas_window_free_dbgdir(struct vas_window *window)
+void vas_window_free_dbgdir(struct pnv_vas_window *pnv_win)
{
+ struct vas_window *window = &pnv_win->vas_win;
+
if (window->dbgdir) {
debugfs_remove_recursive(window->dbgdir);
kfree(window->dbgname);
@@ -113,21 +116,21 @@ void vas_window_free_dbgdir(struct vas_window *window)
}
}
-void vas_window_init_dbgdir(struct vas_window *window)
+void vas_window_init_dbgdir(struct pnv_vas_window *window)
{
struct dentry *d;
if (!window->vinst->dbgdir)
return;
- window->dbgname = kzalloc(16, GFP_KERNEL);
- if (!window->dbgname)
+ window->vas_win.dbgname = kzalloc(16, GFP_KERNEL);
+ if (!window->vas_win.dbgname)
return;
- snprintf(window->dbgname, 16, "w%d", window->winid);
+ snprintf(window->vas_win.dbgname, 16, "w%d", window->vas_win.winid);
- d = debugfs_create_dir(window->dbgname, window->vinst->dbgdir);
- window->dbgdir = d;
+ d = debugfs_create_dir(window->vas_win.dbgname, window->vinst->dbgdir);
+ window->vas_win.dbgdir = d;
debugfs_create_file("info", 0444, d, window, &info_fops);
debugfs_create_file("hvwc", 0444, d, window, &hvwc_fops);
diff --git a/arch/powerpc/platforms/powernv/vas-fault.c b/arch/powerpc/platforms/powernv/vas-fault.c
index 3d21fce254b7..a7aabc18039e 100644
--- a/arch/powerpc/platforms/powernv/vas-fault.c
+++ b/arch/powerpc/platforms/powernv/vas-fault.c
@@ -26,150 +26,6 @@
*/
#define VAS_FAULT_WIN_FIFO_SIZE (4 << 20)
-static void dump_crb(struct coprocessor_request_block *crb)
-{
- struct data_descriptor_entry *dde;
- struct nx_fault_stamp *nx;
-
- dde = &crb->source;
- pr_devel("SrcDDE: addr 0x%llx, len %d, count %d, idx %d, flags %d\n",
- be64_to_cpu(dde->address), be32_to_cpu(dde->length),
- dde->count, dde->index, dde->flags);
-
- dde = &crb->target;
- pr_devel("TgtDDE: addr 0x%llx, len %d, count %d, idx %d, flags %d\n",
- be64_to_cpu(dde->address), be32_to_cpu(dde->length),
- dde->count, dde->index, dde->flags);
-
- nx = &crb->stamp.nx;
- pr_devel("NX Stamp: PSWID 0x%x, FSA 0x%llx, flags 0x%x, FS 0x%x\n",
- be32_to_cpu(nx->pswid),
- be64_to_cpu(crb->stamp.nx.fault_storage_addr),
- nx->flags, nx->fault_status);
-}
-
-/*
- * Update the CSB to indicate a translation error.
- *
- * User space will be polling on CSB after the request is issued.
- * If NX can handle the request without any issues, it updates CSB.
- * Whereas if NX encounters page fault, the kernel will handle the
- * fault and update CSB with translation error.
- *
- * If we are unable to update the CSB means copy_to_user failed due to
- * invalid csb_addr, send a signal to the process.
- */
-static void update_csb(struct vas_window *window,
- struct coprocessor_request_block *crb)
-{
- struct coprocessor_status_block csb;
- struct kernel_siginfo info;
- struct task_struct *tsk;
- void __user *csb_addr;
- struct pid *pid;
- int rc;
-
- /*
- * NX user space windows can not be opened for task->mm=NULL
- * and faults will not be generated for kernel requests.
- */
- if (WARN_ON_ONCE(!window->mm || !window->user_win))
- return;
-
- csb_addr = (void __user *)be64_to_cpu(crb->csb_addr);
-
- memset(&csb, 0, sizeof(csb));
- csb.cc = CSB_CC_FAULT_ADDRESS;
- csb.ce = CSB_CE_TERMINATION;
- csb.cs = 0;
- csb.count = 0;
-
- /*
- * NX operates and returns in BE format as defined CRB struct.
- * So saves fault_storage_addr in BE as NX pastes in FIFO and
- * expects user space to convert to CPU format.
- */
- csb.address = crb->stamp.nx.fault_storage_addr;
- csb.flags = 0;
-
- pid = window->pid;
- tsk = get_pid_task(pid, PIDTYPE_PID);
- /*
- * Process closes send window after all pending NX requests are
- * completed. In multi-thread applications, a child thread can
- * open a window and can exit without closing it. May be some
- * requests are pending or this window can be used by other
- * threads later. We should handle faults if NX encounters
- * pages faults on these requests. Update CSB with translation
- * error and fault address. If csb_addr passed by user space is
- * invalid, send SEGV signal to pid saved in window. If the
- * child thread is not running, send the signal to tgid.
- * Parent thread (tgid) will close this window upon its exit.
- *
- * pid and mm references are taken when window is opened by
- * process (pid). So tgid is used only when child thread opens
- * a window and exits without closing it.
- */
- if (!tsk) {
- pid = window->tgid;
- tsk = get_pid_task(pid, PIDTYPE_PID);
- /*
- * Parent thread (tgid) will be closing window when it
- * exits. So should not get here.
- */
- if (WARN_ON_ONCE(!tsk))
- return;
- }
-
- /* Return if the task is exiting. */
- if (tsk->flags & PF_EXITING) {
- put_task_struct(tsk);
- return;
- }
-
- kthread_use_mm(window->mm);
- rc = copy_to_user(csb_addr, &csb, sizeof(csb));
- /*
- * User space polls on csb.flags (first byte). So add barrier
- * then copy first byte with csb flags update.
- */
- if (!rc) {
- csb.flags = CSB_V;
- /* Make sure update to csb.flags is visible now */
- smp_mb();
- rc = copy_to_user(csb_addr, &csb, sizeof(u8));
- }
- kthread_unuse_mm(window->mm);
- put_task_struct(tsk);
-
- /* Success */
- if (!rc)
- return;
-
- pr_debug("Invalid CSB address 0x%p signalling pid(%d)\n",
- csb_addr, pid_vnr(pid));
-
- clear_siginfo(&info);
- info.si_signo = SIGSEGV;
- info.si_errno = EFAULT;
- info.si_code = SEGV_MAPERR;
- info.si_addr = csb_addr;
-
- /*
- * process will be polling on csb.flags after request is sent to
- * NX. So generally CSB update should not fail except when an
- * application passes invalid csb_addr. So an error message will
- * be displayed and leave it to user space whether to ignore or
- * handle this signal.
- */
- rcu_read_lock();
- rc = kill_pid_info(SIGSEGV, &info, pid);
- rcu_read_unlock();
-
- pr_devel("%s(): pid %d kill_proc_info() rc %d\n", __func__,
- pid_vnr(pid), rc);
-}
-
static void dump_fifo(struct vas_instance *vinst, void *entry)
{
unsigned long *end = vinst->fault_fifo + vinst->fault_fifo_size;
@@ -212,7 +68,7 @@ irqreturn_t vas_fault_thread_fn(int irq, void *data)
struct vas_instance *vinst = data;
struct coprocessor_request_block *crb, *entry;
struct coprocessor_request_block buf;
- struct vas_window *window;
+ struct pnv_vas_window *window;
unsigned long flags;
void *fifo;
@@ -272,7 +128,7 @@ irqreturn_t vas_fault_thread_fn(int irq, void *data)
vinst->vas_id, vinst->fault_fifo, fifo,
vinst->fault_crbs);
- dump_crb(crb);
+ vas_dump_crb(crb);
window = vas_pswid_to_window(vinst,
be32_to_cpu(crb->stamp.nx.pswid));
@@ -293,7 +149,14 @@ irqreturn_t vas_fault_thread_fn(int irq, void *data)
WARN_ON_ONCE(1);
} else {
- update_csb(window, crb);
+ /*
+ * NX sees faults only with user space windows.
+ */
+ if (window->user_win)
+ vas_update_csb(crb, &window->vas_win.task_ref);
+ else
+ WARN_ON_ONCE(!window->user_win);
+
/*
* Return credit for send window after processing
* fault CRB.
@@ -336,6 +199,7 @@ irqreturn_t vas_fault_handler(int irq, void *dev_id)
int vas_setup_fault_window(struct vas_instance *vinst)
{
struct vas_rx_win_attr attr;
+ struct vas_window *win;
vinst->fault_fifo_size = VAS_FAULT_WIN_FIFO_SIZE;
vinst->fault_fifo = kzalloc(vinst->fault_fifo_size, GFP_KERNEL);
@@ -364,18 +228,17 @@ int vas_setup_fault_window(struct vas_instance *vinst)
attr.lnotify_pid = mfspr(SPRN_PID);
attr.lnotify_tid = mfspr(SPRN_PID);
- vinst->fault_win = vas_rx_win_open(vinst->vas_id, VAS_COP_TYPE_FAULT,
- &attr);
-
- if (IS_ERR(vinst->fault_win)) {
- pr_err("VAS: Error %ld opening FaultWin\n",
- PTR_ERR(vinst->fault_win));
+ win = vas_rx_win_open(vinst->vas_id, VAS_COP_TYPE_FAULT, &attr);
+ if (IS_ERR(win)) {
+ pr_err("VAS: Error %ld opening FaultWin\n", PTR_ERR(win));
kfree(vinst->fault_fifo);
- return PTR_ERR(vinst->fault_win);
+ return PTR_ERR(win);
}
+ vinst->fault_win = container_of(win, struct pnv_vas_window, vas_win);
+
pr_devel("VAS: Created FaultWin %d, LPID/PID/TID [%d/%d/%d]\n",
- vinst->fault_win->winid, attr.lnotify_lpid,
+ vinst->fault_win->vas_win.winid, attr.lnotify_lpid,
attr.lnotify_pid, attr.lnotify_tid);
return 0;
diff --git a/arch/powerpc/platforms/powernv/vas-trace.h b/arch/powerpc/platforms/powernv/vas-trace.h
index a449b9f0c12e..ca2e08f2ddc0 100644
--- a/arch/powerpc/platforms/powernv/vas-trace.h
+++ b/arch/powerpc/platforms/powernv/vas-trace.h
@@ -80,7 +80,7 @@ TRACE_EVENT( vas_tx_win_open,
TRACE_EVENT( vas_paste_crb,
TP_PROTO(struct task_struct *tsk,
- struct vas_window *win),
+ struct pnv_vas_window *win),
TP_ARGS(tsk, win),
@@ -96,7 +96,7 @@ TRACE_EVENT( vas_paste_crb,
TP_fast_assign(
__entry->pid = tsk->pid;
__entry->vasid = win->vinst->vas_id;
- __entry->winid = win->winid;
+ __entry->winid = win->vas_win.winid;
__entry->paste_kaddr = (unsigned long)win->paste_kaddr
),
diff --git a/arch/powerpc/platforms/powernv/vas-window.c b/arch/powerpc/platforms/powernv/vas-window.c
index 5f5fe63a3d1c..0f8d39fbf2b2 100644
--- a/arch/powerpc/platforms/powernv/vas-window.c
+++ b/arch/powerpc/platforms/powernv/vas-window.c
@@ -16,6 +16,7 @@
#include <linux/mmu_context.h>
#include <asm/switch_to.h>
#include <asm/ppc-opcode.h>
+#include <asm/vas.h>
#include "vas.h"
#include "copy-paste.h"
@@ -26,14 +27,14 @@
* Compute the paste address region for the window @window using the
* ->paste_base_addr and ->paste_win_id_shift we got from device tree.
*/
-void vas_win_paste_addr(struct vas_window *window, u64 *addr, int *len)
+void vas_win_paste_addr(struct pnv_vas_window *window, u64 *addr, int *len)
{
int winid;
u64 base, shift;
base = window->vinst->paste_base_addr;
shift = window->vinst->paste_win_id_shift;
- winid = window->winid;
+ winid = window->vas_win.winid;
*addr = base + (winid << shift);
if (len)
@@ -42,23 +43,23 @@ void vas_win_paste_addr(struct vas_window *window, u64 *addr, int *len)
pr_debug("Txwin #%d: Paste addr 0x%llx\n", winid, *addr);
}
-static inline void get_hvwc_mmio_bar(struct vas_window *window,
+static inline void get_hvwc_mmio_bar(struct pnv_vas_window *window,
u64 *start, int *len)
{
u64 pbaddr;
pbaddr = window->vinst->hvwc_bar_start;
- *start = pbaddr + window->winid * VAS_HVWC_SIZE;
+ *start = pbaddr + window->vas_win.winid * VAS_HVWC_SIZE;
*len = VAS_HVWC_SIZE;
}
-static inline void get_uwc_mmio_bar(struct vas_window *window,
+static inline void get_uwc_mmio_bar(struct pnv_vas_window *window,
u64 *start, int *len)
{
u64 pbaddr;
pbaddr = window->vinst->uwc_bar_start;
- *start = pbaddr + window->winid * VAS_UWC_SIZE;
+ *start = pbaddr + window->vas_win.winid * VAS_UWC_SIZE;
*len = VAS_UWC_SIZE;
}
@@ -67,7 +68,7 @@ static inline void get_uwc_mmio_bar(struct vas_window *window,
* space. Unlike MMIO regions (map_mmio_region() below), paste region must
* be mapped cache-able and is only applicable to send windows.
*/
-static void *map_paste_region(struct vas_window *txwin)
+static void *map_paste_region(struct pnv_vas_window *txwin)
{
int len;
void *map;
@@ -75,7 +76,7 @@ static void *map_paste_region(struct vas_window *txwin)
u64 start;
name = kasprintf(GFP_KERNEL, "window-v%d-w%d", txwin->vinst->vas_id,
- txwin->winid);
+ txwin->vas_win.winid);
if (!name)
goto free_name;
@@ -132,7 +133,7 @@ static void unmap_region(void *addr, u64 start, int len)
/*
* Unmap the paste address region for a window.
*/
-static void unmap_paste_region(struct vas_window *window)
+static void unmap_paste_region(struct pnv_vas_window *window)
{
int len;
u64 busaddr_start;
@@ -153,7 +154,7 @@ static void unmap_paste_region(struct vas_window *window)
* path, just minimize the time we hold the mutex for now. We can add
* a per-instance mutex later if necessary.
*/
-static void unmap_winctx_mmio_bars(struct vas_window *window)
+static void unmap_winctx_mmio_bars(struct pnv_vas_window *window)
{
int len;
void *uwc_map;
@@ -186,7 +187,7 @@ static void unmap_winctx_mmio_bars(struct vas_window *window)
* OS/User Window Context (UWC) MMIO Base Address Region for the given window.
* Map these bus addresses and save the mapped kernel addresses in @window.
*/
-static int map_winctx_mmio_bars(struct vas_window *window)
+static int map_winctx_mmio_bars(struct pnv_vas_window *window)
{
int len;
u64 start;
@@ -214,7 +215,7 @@ static int map_winctx_mmio_bars(struct vas_window *window)
* registers are not sequential. And, we can only write to offsets
* with valid registers.
*/
-static void reset_window_regs(struct vas_window *window)
+static void reset_window_regs(struct pnv_vas_window *window)
{
write_hvwc_reg(window, VREG(LPID), 0ULL);
write_hvwc_reg(window, VREG(PID), 0ULL);
@@ -270,7 +271,7 @@ static void reset_window_regs(struct vas_window *window)
* want to add fields to vas_winctx and move the initialization to
* init_vas_winctx_regs().
*/
-static void init_xlate_regs(struct vas_window *window, bool user_win)
+static void init_xlate_regs(struct pnv_vas_window *window, bool user_win)
{
u64 lpcr, val;
@@ -335,7 +336,7 @@ static void init_xlate_regs(struct vas_window *window, bool user_win)
*
* TODO: Reserved (aka dedicated) send buffers are not supported yet.
*/
-static void init_rsvd_tx_buf_count(struct vas_window *txwin,
+static void init_rsvd_tx_buf_count(struct pnv_vas_window *txwin,
struct vas_winctx *winctx)
{
write_hvwc_reg(txwin, VREG(TX_RSVD_BUF_COUNT), 0ULL);
@@ -357,7 +358,7 @@ static void init_rsvd_tx_buf_count(struct vas_window *txwin,
* as a one-time task? That could work for NX but what about other
* receivers? Let the receivers tell us the rx-fifo buffers for now.
*/
-static void init_winctx_regs(struct vas_window *window,
+static void init_winctx_regs(struct pnv_vas_window *window,
struct vas_winctx *winctx)
{
u64 val;
@@ -519,10 +520,10 @@ static int vas_assign_window_id(struct ida *ida)
return winid;
}
-static void vas_window_free(struct vas_window *window)
+static void vas_window_free(struct pnv_vas_window *window)
{
- int winid = window->winid;
struct vas_instance *vinst = window->vinst;
+ int winid = window->vas_win.winid;
unmap_winctx_mmio_bars(window);
@@ -533,10 +534,10 @@ static void vas_window_free(struct vas_window *window)
vas_release_window_id(&vinst->ida, winid);
}
-static struct vas_window *vas_window_alloc(struct vas_instance *vinst)
+static struct pnv_vas_window *vas_window_alloc(struct vas_instance *vinst)
{
int winid;
- struct vas_window *window;
+ struct pnv_vas_window *window;
winid = vas_assign_window_id(&vinst->ida);
if (winid < 0)
@@ -547,7 +548,7 @@ static struct vas_window *vas_window_alloc(struct vas_instance *vinst)
goto out_free;
window->vinst = vinst;
- window->winid = winid;
+ window->vas_win.winid = winid;
if (map_winctx_mmio_bars(window))
goto out_free;
@@ -562,7 +563,7 @@ out_free:
return ERR_PTR(-ENOMEM);
}
-static void put_rx_win(struct vas_window *rxwin)
+static void put_rx_win(struct pnv_vas_window *rxwin)
{
/* Better not be a send window! */
WARN_ON_ONCE(rxwin->tx_win);
@@ -578,10 +579,11 @@ static void put_rx_win(struct vas_window *rxwin)
*
* NOTE: We access ->windows[] table and assume that vinst->mutex is held.
*/
-static struct vas_window *get_user_rxwin(struct vas_instance *vinst, u32 pswid)
+static struct pnv_vas_window *get_user_rxwin(struct vas_instance *vinst,
+ u32 pswid)
{
int vasid, winid;
- struct vas_window *rxwin;
+ struct pnv_vas_window *rxwin;
decode_pswid(pswid, &vasid, &winid);
@@ -590,7 +592,7 @@ static struct vas_window *get_user_rxwin(struct vas_instance *vinst, u32 pswid)
rxwin = vinst->windows[winid];
- if (!rxwin || rxwin->tx_win || rxwin->cop != VAS_COP_TYPE_FTW)
+ if (!rxwin || rxwin->tx_win || rxwin->vas_win.cop != VAS_COP_TYPE_FTW)
return ERR_PTR(-EINVAL);
return rxwin;
@@ -602,10 +604,10 @@ static struct vas_window *get_user_rxwin(struct vas_instance *vinst, u32 pswid)
*
* See also function header of set_vinst_win().
*/
-static struct vas_window *get_vinst_rxwin(struct vas_instance *vinst,
+static struct pnv_vas_window *get_vinst_rxwin(struct vas_instance *vinst,
enum vas_cop_type cop, u32 pswid)
{
- struct vas_window *rxwin;
+ struct pnv_vas_window *rxwin;
mutex_lock(&vinst->mutex);
@@ -638,9 +640,9 @@ static struct vas_window *get_vinst_rxwin(struct vas_instance *vinst,
* window, we also save the window in the ->rxwin[] table.
*/
static void set_vinst_win(struct vas_instance *vinst,
- struct vas_window *window)
+ struct pnv_vas_window *window)
{
- int id = window->winid;
+ int id = window->vas_win.winid;
mutex_lock(&vinst->mutex);
@@ -649,8 +651,8 @@ static void set_vinst_win(struct vas_instance *vinst,
* unless its a user (FTW) window.
*/
if (!window->user_win && !window->tx_win) {
- WARN_ON_ONCE(vinst->rxwin[window->cop]);
- vinst->rxwin[window->cop] = window;
+ WARN_ON_ONCE(vinst->rxwin[window->vas_win.cop]);
+ vinst->rxwin[window->vas_win.cop] = window;
}
WARN_ON_ONCE(vinst->windows[id] != NULL);
@@ -663,16 +665,16 @@ static void set_vinst_win(struct vas_instance *vinst,
* Clear this window from the table(s) of windows for this VAS instance.
* See also function header of set_vinst_win().
*/
-static void clear_vinst_win(struct vas_window *window)
+static void clear_vinst_win(struct pnv_vas_window *window)
{
- int id = window->winid;
+ int id = window->vas_win.winid;
struct vas_instance *vinst = window->vinst;
mutex_lock(&vinst->mutex);
if (!window->user_win && !window->tx_win) {
- WARN_ON_ONCE(!vinst->rxwin[window->cop]);
- vinst->rxwin[window->cop] = NULL;
+ WARN_ON_ONCE(!vinst->rxwin[window->vas_win.cop]);
+ vinst->rxwin[window->vas_win.cop] = NULL;
}
WARN_ON_ONCE(vinst->windows[id] != window);
@@ -681,7 +683,7 @@ static void clear_vinst_win(struct vas_window *window)
mutex_unlock(&vinst->mutex);
}
-static void init_winctx_for_rxwin(struct vas_window *rxwin,
+static void init_winctx_for_rxwin(struct pnv_vas_window *rxwin,
struct vas_rx_win_attr *rxattr,
struct vas_winctx *winctx)
{
@@ -702,7 +704,7 @@ static void init_winctx_for_rxwin(struct vas_window *rxwin,
winctx->rx_fifo = rxattr->rx_fifo;
winctx->rx_fifo_size = rxattr->rx_fifo_size;
- winctx->wcreds_max = rxwin->wcreds_max;
+ winctx->wcreds_max = rxwin->vas_win.wcreds_max;
winctx->pin_win = rxattr->pin_win;
winctx->nx_win = rxattr->nx_win;
@@ -851,7 +853,7 @@ EXPORT_SYMBOL_GPL(vas_init_rx_win_attr);
struct vas_window *vas_rx_win_open(int vasid, enum vas_cop_type cop,
struct vas_rx_win_attr *rxattr)
{
- struct vas_window *rxwin;
+ struct pnv_vas_window *rxwin;
struct vas_winctx winctx;
struct vas_instance *vinst;
@@ -870,21 +872,21 @@ struct vas_window *vas_rx_win_open(int vasid, enum vas_cop_type cop,
rxwin = vas_window_alloc(vinst);
if (IS_ERR(rxwin)) {
pr_devel("Unable to allocate memory for Rx window\n");
- return rxwin;
+ return (struct vas_window *)rxwin;
}
rxwin->tx_win = false;
rxwin->nx_win = rxattr->nx_win;
rxwin->user_win = rxattr->user_win;
- rxwin->cop = cop;
- rxwin->wcreds_max = rxattr->wcreds_max;
+ rxwin->vas_win.cop = cop;
+ rxwin->vas_win.wcreds_max = rxattr->wcreds_max;
init_winctx_for_rxwin(rxwin, rxattr, &winctx);
init_winctx_regs(rxwin, &winctx);
set_vinst_win(vinst, rxwin);
- return rxwin;
+ return &rxwin->vas_win;
}
EXPORT_SYMBOL_GPL(vas_rx_win_open);
@@ -905,7 +907,7 @@ void vas_init_tx_win_attr(struct vas_tx_win_attr *txattr, enum vas_cop_type cop)
}
EXPORT_SYMBOL_GPL(vas_init_tx_win_attr);
-static void init_winctx_for_txwin(struct vas_window *txwin,
+static void init_winctx_for_txwin(struct pnv_vas_window *txwin,
struct vas_tx_win_attr *txattr,
struct vas_winctx *winctx)
{
@@ -926,7 +928,7 @@ static void init_winctx_for_txwin(struct vas_window *txwin,
*/
memset(winctx, 0, sizeof(struct vas_winctx));
- winctx->wcreds_max = txwin->wcreds_max;
+ winctx->wcreds_max = txwin->vas_win.wcreds_max;
winctx->user_win = txattr->user_win;
winctx->nx_win = txwin->rxwin->nx_win;
@@ -946,13 +948,13 @@ static void init_winctx_for_txwin(struct vas_window *txwin,
winctx->lpid = txattr->lpid;
winctx->pidr = txattr->pidr;
- winctx->rx_win_id = txwin->rxwin->winid;
+ winctx->rx_win_id = txwin->rxwin->vas_win.winid;
/*
* IRQ and fault window setup is successful. Set fault window
* for the send window so that ready to handle faults.
*/
if (txwin->vinst->virq)
- winctx->fault_win_id = txwin->vinst->fault_win->winid;
+ winctx->fault_win_id = txwin->vinst->fault_win->vas_win.winid;
winctx->dma_type = VAS_DMA_TYPE_INJECT;
winctx->tc_mode = txattr->tc_mode;
@@ -962,7 +964,8 @@ static void init_winctx_for_txwin(struct vas_window *txwin,
winctx->irq_port = txwin->vinst->irq_port;
winctx->pswid = txattr->pswid ? txattr->pswid :
- encode_pswid(txwin->vinst->vas_id, txwin->winid);
+ encode_pswid(txwin->vinst->vas_id,
+ txwin->vas_win.winid);
}
static bool tx_win_args_valid(enum vas_cop_type cop,
@@ -993,8 +996,8 @@ struct vas_window *vas_tx_win_open(int vasid, enum vas_cop_type cop,
struct vas_tx_win_attr *attr)
{
int rc;
- struct vas_window *txwin;
- struct vas_window *rxwin;
+ struct pnv_vas_window *txwin;
+ struct pnv_vas_window *rxwin;
struct vas_winctx winctx;
struct vas_instance *vinst;
@@ -1020,7 +1023,7 @@ struct vas_window *vas_tx_win_open(int vasid, enum vas_cop_type cop,
rxwin = get_vinst_rxwin(vinst, cop, attr->pswid);
if (IS_ERR(rxwin)) {
pr_devel("No RxWin for vasid %d, cop %d\n", vasid, cop);
- return rxwin;
+ return (struct vas_window *)rxwin;
}
txwin = vas_window_alloc(vinst);
@@ -1029,12 +1032,12 @@ struct vas_window *vas_tx_win_open(int vasid, enum vas_cop_type cop,
goto put_rxwin;
}
- txwin->cop = cop;
+ txwin->vas_win.cop = cop;
txwin->tx_win = 1;
txwin->rxwin = rxwin;
txwin->nx_win = txwin->rxwin->nx_win;
txwin->user_win = attr->user_win;
- txwin->wcreds_max = attr->wcreds_max ?: VAS_WCREDS_DEFAULT;
+ txwin->vas_win.wcreds_max = attr->wcreds_max ?: VAS_WCREDS_DEFAULT;
init_winctx_for_txwin(txwin, attr, &winctx);
@@ -1064,56 +1067,16 @@ struct vas_window *vas_tx_win_open(int vasid, enum vas_cop_type cop,
rc = -ENODEV;
goto free_window;
}
-
- /*
- * Window opened by a child thread may not be closed when
- * it exits. So take reference to its pid and release it
- * when the window is free by parent thread.
- * Acquire a reference to the task's pid to make sure
- * pid will not be re-used - needed only for multithread
- * applications.
- */
- txwin->pid = get_task_pid(current, PIDTYPE_PID);
- /*
- * Acquire a reference to the task's mm.
- */
- txwin->mm = get_task_mm(current);
-
- if (!txwin->mm) {
- put_pid(txwin->pid);
- pr_err("VAS: pid(%d): mm_struct is not found\n",
- current->pid);
- rc = -EPERM;
+ rc = get_vas_user_win_ref(&txwin->vas_win.task_ref);
+ if (rc)
goto free_window;
- }
- mmgrab(txwin->mm);
- mmput(txwin->mm);
- mm_context_add_vas_window(txwin->mm);
- /*
- * Process closes window during exit. In the case of
- * multithread application, the child thread can open
- * window and can exit without closing it. Expects parent
- * thread to use and close the window. So do not need
- * to take pid reference for parent thread.
- */
- txwin->tgid = find_get_pid(task_tgid_vnr(current));
- /*
- * Even a process that has no foreign real address mapping can
- * use an unpaired COPY instruction (to no real effect). Issue
- * CP_ABORT to clear any pending COPY and prevent a covert
- * channel.
- *
- * __switch_to() will issue CP_ABORT on future context switches
- * if process / thread has any open VAS window (Use
- * current->mm->context.vas_windows).
- */
- asm volatile(PPC_CP_ABORT);
+ vas_user_win_add_mm_context(&txwin->vas_win.task_ref);
}
set_vinst_win(vinst, txwin);
- return txwin;
+ return &txwin->vas_win;
free_window:
vas_window_free(txwin);
@@ -1132,12 +1095,14 @@ int vas_copy_crb(void *crb, int offset)
EXPORT_SYMBOL_GPL(vas_copy_crb);
#define RMA_LSMP_REPORT_ENABLE PPC_BIT(53)
-int vas_paste_crb(struct vas_window *txwin, int offset, bool re)
+int vas_paste_crb(struct vas_window *vwin, int offset, bool re)
{
+ struct pnv_vas_window *txwin;
int rc;
void *addr;
uint64_t val;
+ txwin = container_of(vwin, struct pnv_vas_window, vas_win);
trace_vas_paste_crb(current, txwin);
/*
@@ -1167,7 +1132,7 @@ int vas_paste_crb(struct vas_window *txwin, int offset, bool re)
else
rc = -EINVAL;
- pr_debug("Txwin #%d: Msg count %llu\n", txwin->winid,
+ pr_debug("Txwin #%d: Msg count %llu\n", txwin->vas_win.winid,
read_hvwc_reg(txwin, VREG(LRFIFO_PUSH)));
return rc;
@@ -1187,7 +1152,7 @@ EXPORT_SYMBOL_GPL(vas_paste_crb);
* user space. (NX-842 driver waits for CSB and Fast thread-wakeup
* doesn't use credit checking).
*/
-static void poll_window_credits(struct vas_window *window)
+static void poll_window_credits(struct pnv_vas_window *window)
{
u64 val;
int creds, mode;
@@ -1217,7 +1182,7 @@ retry:
* and issue CRB Kill to stop all pending requests. Need only
* if there is a bug in NX or fault handling in kernel.
*/
- if (creds < window->wcreds_max) {
+ if (creds < window->vas_win.wcreds_max) {
val = 0;
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(msecs_to_jiffies(10));
@@ -1228,7 +1193,8 @@ retry:
*/
if (!(count % 1000))
pr_warn_ratelimited("VAS: pid %d stuck. Waiting for credits returned for Window(%d). creds %d, Retries %d\n",
- vas_window_pid(window), window->winid,
+ vas_window_pid(&window->vas_win),
+ window->vas_win.winid,
creds, count);
goto retry;
@@ -1240,7 +1206,7 @@ retry:
* short time to queue a CRB, so window should not be busy for too long.
* Trying 5ms intervals.
*/
-static void poll_window_busy_state(struct vas_window *window)
+static void poll_window_busy_state(struct pnv_vas_window *window)
{
int busy;
u64 val;
@@ -1260,7 +1226,8 @@ retry:
*/
if (!(count % 1000))
pr_warn_ratelimited("VAS: pid %d stuck. Window (ID=%d) is in busy state. Retries %d\n",
- vas_window_pid(window), window->winid, count);
+ vas_window_pid(&window->vas_win),
+ window->vas_win.winid, count);
goto retry;
}
@@ -1282,7 +1249,7 @@ retry:
* casting out becomes necessary we should consider offloading the
* job to a worker thread, so the window close can proceed quickly.
*/
-static void poll_window_castout(struct vas_window *window)
+static void poll_window_castout(struct pnv_vas_window *window)
{
/* stub for now */
}
@@ -1291,7 +1258,7 @@ static void poll_window_castout(struct vas_window *window)
* Unpin and close a window so no new requests are accepted and the
* hardware can evict this window from cache if necessary.
*/
-static void unpin_close_window(struct vas_window *window)
+static void unpin_close_window(struct pnv_vas_window *window)
{
u64 val;
@@ -1313,11 +1280,15 @@ static void unpin_close_window(struct vas_window *window)
*
* Besides the hardware, kernel has some bookkeeping of course.
*/
-int vas_win_close(struct vas_window *window)
+int vas_win_close(struct vas_window *vwin)
{
- if (!window)
+ struct pnv_vas_window *window;
+
+ if (!vwin)
return 0;
+ window = container_of(vwin, struct pnv_vas_window, vas_win);
+
if (!window->tx_win && atomic_read(&window->num_txwins) != 0) {
pr_devel("Attempting to close an active Rx window!\n");
WARN_ON_ONCE(1);
@@ -1339,12 +1310,8 @@ int vas_win_close(struct vas_window *window)
/* if send window, drop reference to matching receive window */
if (window->tx_win) {
if (window->user_win) {
- /* Drop references to pid and mm */
- put_pid(window->pid);
- if (window->mm) {
- mm_context_remove_vas_window(window->mm);
- mmdrop(window->mm);
- }
+ put_vas_user_win_ref(&vwin->task_ref);
+ mm_context_remove_vas_window(vwin->task_ref.mm);
}
put_rx_win(window->rxwin);
}
@@ -1377,7 +1344,7 @@ EXPORT_SYMBOL_GPL(vas_win_close);
* - The kernel with return credit on fault window after reading entry
* from fault FIFO.
*/
-void vas_return_credit(struct vas_window *window, bool tx)
+void vas_return_credit(struct pnv_vas_window *window, bool tx)
{
uint64_t val;
@@ -1391,10 +1358,10 @@ void vas_return_credit(struct vas_window *window, bool tx)
}
}
-struct vas_window *vas_pswid_to_window(struct vas_instance *vinst,
+struct pnv_vas_window *vas_pswid_to_window(struct vas_instance *vinst,
uint32_t pswid)
{
- struct vas_window *window;
+ struct pnv_vas_window *window;
int winid;
if (!pswid) {
@@ -1431,13 +1398,74 @@ struct vas_window *vas_pswid_to_window(struct vas_instance *vinst,
* by NX).
*/
if (!window->tx_win || !window->user_win || !window->nx_win ||
- window->cop == VAS_COP_TYPE_FAULT ||
- window->cop == VAS_COP_TYPE_FTW) {
+ window->vas_win.cop == VAS_COP_TYPE_FAULT ||
+ window->vas_win.cop == VAS_COP_TYPE_FTW) {
pr_err("PSWID decode: id %d, tx %d, user %d, nx %d, cop %d\n",
winid, window->tx_win, window->user_win,
- window->nx_win, window->cop);
+ window->nx_win, window->vas_win.cop);
WARN_ON(1);
}
return window;
}
+
+static struct vas_window *vas_user_win_open(int vas_id, u64 flags,
+ enum vas_cop_type cop_type)
+{
+ struct vas_tx_win_attr txattr = {};
+
+ vas_init_tx_win_attr(&txattr, cop_type);
+
+ txattr.lpid = mfspr(SPRN_LPID);
+ txattr.pidr = mfspr(SPRN_PID);
+ txattr.user_win = true;
+ txattr.rsvd_txbuf_count = false;
+ txattr.pswid = false;
+
+ pr_devel("Pid %d: Opening txwin, PIDR %ld\n", txattr.pidr,
+ mfspr(SPRN_PID));
+
+ return vas_tx_win_open(vas_id, cop_type, &txattr);
+}
+
+static u64 vas_user_win_paste_addr(struct vas_window *txwin)
+{
+ struct pnv_vas_window *win;
+ u64 paste_addr;
+
+ win = container_of(txwin, struct pnv_vas_window, vas_win);
+ vas_win_paste_addr(win, &paste_addr, NULL);
+
+ return paste_addr;
+}
+
+static int vas_user_win_close(struct vas_window *txwin)
+{
+ vas_win_close(txwin);
+
+ return 0;
+}
+
+static const struct vas_user_win_ops vops = {
+ .open_win = vas_user_win_open,
+ .paste_addr = vas_user_win_paste_addr,
+ .close_win = vas_user_win_close,
+};
+
+/*
+ * Supporting only nx-gzip coprocessor type now, but this API code
+ * extended to other coprocessor types later.
+ */
+int vas_register_api_powernv(struct module *mod, enum vas_cop_type cop_type,
+ const char *name)
+{
+
+ return vas_register_coproc_api(mod, cop_type, name, &vops);
+}
+EXPORT_SYMBOL_GPL(vas_register_api_powernv);
+
+void vas_unregister_api_powernv(void)
+{
+ vas_unregister_coproc_api();
+}
+EXPORT_SYMBOL_GPL(vas_unregister_api_powernv);
diff --git a/arch/powerpc/platforms/powernv/vas.h b/arch/powerpc/platforms/powernv/vas.h
index c7db3190baca..8bb08e395de0 100644
--- a/arch/powerpc/platforms/powernv/vas.h
+++ b/arch/powerpc/platforms/powernv/vas.h
@@ -334,11 +334,11 @@ struct vas_instance {
int fifo_in_progress; /* To wake up thread or return IRQ_HANDLED */
spinlock_t fault_lock; /* Protects fifo_in_progress update */
void *fault_fifo;
- struct vas_window *fault_win; /* Fault window */
+ struct pnv_vas_window *fault_win; /* Fault window */
struct mutex mutex;
- struct vas_window *rxwin[VAS_COP_TYPE_MAX];
- struct vas_window *windows[VAS_WINDOWS_PER_CHIP];
+ struct pnv_vas_window *rxwin[VAS_COP_TYPE_MAX];
+ struct pnv_vas_window *windows[VAS_WINDOWS_PER_CHIP];
char *name;
char *dbgname;
@@ -346,32 +346,24 @@ struct vas_instance {
};
/*
- * In-kernel state a VAS window. One per window.
+ * In-kernel state a VAS window on PowerNV. One per window.
*/
-struct vas_window {
+struct pnv_vas_window {
+ struct vas_window vas_win;
/* Fields common to send and receive windows */
struct vas_instance *vinst;
- int winid;
bool tx_win; /* True if send window */
bool nx_win; /* True if NX window */
bool user_win; /* True if user space window */
void *hvwc_map; /* HV window context */
void *uwc_map; /* OS/User window context */
- struct pid *pid; /* Linux process id of owner */
- struct pid *tgid; /* Thread group ID of owner */
- struct mm_struct *mm; /* Linux process mm_struct */
- int wcreds_max; /* Window credits */
-
- char *dbgname;
- struct dentry *dbgdir;
/* Fields applicable only to send windows */
void *paste_kaddr;
char *paste_addr_name;
- struct vas_window *rxwin;
+ struct pnv_vas_window *rxwin;
- /* Feilds applicable only to receive windows */
- enum vas_cop_type cop;
+ /* Fields applicable only to receive windows */
atomic_t num_txwins;
};
@@ -430,32 +422,32 @@ extern struct mutex vas_mutex;
extern struct vas_instance *find_vas_instance(int vasid);
extern void vas_init_dbgdir(void);
extern void vas_instance_init_dbgdir(struct vas_instance *vinst);
-extern void vas_window_init_dbgdir(struct vas_window *win);
-extern void vas_window_free_dbgdir(struct vas_window *win);
+extern void vas_window_init_dbgdir(struct pnv_vas_window *win);
+extern void vas_window_free_dbgdir(struct pnv_vas_window *win);
extern int vas_setup_fault_window(struct vas_instance *vinst);
extern irqreturn_t vas_fault_thread_fn(int irq, void *data);
extern irqreturn_t vas_fault_handler(int irq, void *dev_id);
-extern void vas_return_credit(struct vas_window *window, bool tx);
-extern struct vas_window *vas_pswid_to_window(struct vas_instance *vinst,
+extern void vas_return_credit(struct pnv_vas_window *window, bool tx);
+extern struct pnv_vas_window *vas_pswid_to_window(struct vas_instance *vinst,
uint32_t pswid);
-extern void vas_win_paste_addr(struct vas_window *window, u64 *addr,
- int *len);
+extern void vas_win_paste_addr(struct pnv_vas_window *window, u64 *addr,
+ int *len);
static inline int vas_window_pid(struct vas_window *window)
{
- return pid_vnr(window->pid);
+ return pid_vnr(window->task_ref.pid);
}
-static inline void vas_log_write(struct vas_window *win, char *name,
+static inline void vas_log_write(struct pnv_vas_window *win, char *name,
void *regptr, u64 val)
{
if (val)
pr_debug("%swin #%d: %s reg %p, val 0x%016llx\n",
- win->tx_win ? "Tx" : "Rx", win->winid, name,
- regptr, val);
+ win->tx_win ? "Tx" : "Rx", win->vas_win.winid,
+ name, regptr, val);
}
-static inline void write_uwc_reg(struct vas_window *win, char *name,
+static inline void write_uwc_reg(struct pnv_vas_window *win, char *name,
s32 reg, u64 val)
{
void *regptr;
@@ -466,7 +458,7 @@ static inline void write_uwc_reg(struct vas_window *win, char *name,
out_be64(regptr, val);
}
-static inline void write_hvwc_reg(struct vas_window *win, char *name,
+static inline void write_hvwc_reg(struct pnv_vas_window *win, char *name,
s32 reg, u64 val)
{
void *regptr;
@@ -477,7 +469,7 @@ static inline void write_hvwc_reg(struct vas_window *win, char *name,
out_be64(regptr, val);
}
-static inline u64 read_hvwc_reg(struct vas_window *win,
+static inline u64 read_hvwc_reg(struct pnv_vas_window *win,
char *name __maybe_unused, s32 reg)
{
return in_be64(win->hvwc_map+reg);
diff --git a/arch/powerpc/platforms/ps3/Kconfig b/arch/powerpc/platforms/ps3/Kconfig
index e32406e918d0..a4048b8c8c50 100644
--- a/arch/powerpc/platforms/ps3/Kconfig
+++ b/arch/powerpc/platforms/ps3/Kconfig
@@ -7,6 +7,7 @@ config PPC_PS3
select USB_OHCI_BIG_ENDIAN_MMIO
select USB_EHCI_BIG_ENDIAN_MMIO
select HAVE_PCI
+ select IRQ_DOMAIN_NOMAP
help
This option enables support for the Sony PS3 game console
and other platforms using the PS3 hypervisor. Enabling this
@@ -85,6 +86,15 @@ config PS3_SYS_MANAGER
This support is required for PS3 system control. In
general, all users will say Y or M.
+config PS3_VERBOSE_RESULT
+ bool "PS3 Verbose LV1 hypercall results" if PS3_ADVANCED
+ depends on PPC_PS3
+ help
+ Enables more verbose log mesages for LV1 hypercall results.
+
+ If in doubt, say N here and reduce the size of the kernel by a
+ small amount.
+
config PS3_REPOSITORY_WRITE
bool "PS3 Repository write support" if PS3_ADVANCED
depends on PPC_PS3
diff --git a/arch/powerpc/platforms/ps3/interrupt.c b/arch/powerpc/platforms/ps3/interrupt.c
index 78f2339ed5cb..49871427f599 100644
--- a/arch/powerpc/platforms/ps3/interrupt.c
+++ b/arch/powerpc/platforms/ps3/interrupt.c
@@ -9,6 +9,7 @@
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/irq.h>
+#include <linux/irqdomain.h>
#include <asm/machdep.h>
#include <asm/udbg.h>
@@ -45,7 +46,7 @@
* implementation equates HV plug value to Linux virq value, constrains each
* interrupt to have a system wide unique plug number, and limits the range
* of the plug values to map into the first dword of the bitmaps. This
- * gives a usable range of plug values of {NUM_ISA_INTERRUPTS..63}. Note
+ * gives a usable range of plug values of {NR_IRQS_LEGACY..63}. Note
* that there is no constraint on how many in this set an individual thread
* can acquire.
*
@@ -721,7 +722,7 @@ static unsigned int ps3_get_irq(void)
}
#if defined(DEBUG)
- if (unlikely(plug < NUM_ISA_INTERRUPTS || plug > PS3_PLUG_MAX)) {
+ if (unlikely(plug < NR_IRQS_LEGACY || plug > PS3_PLUG_MAX)) {
dump_bmp(&per_cpu(ps3_private, 0));
dump_bmp(&per_cpu(ps3_private, 1));
BUG();
diff --git a/arch/powerpc/platforms/ps3/mm.c b/arch/powerpc/platforms/ps3/mm.c
index d094321964fb..a81eac35d900 100644
--- a/arch/powerpc/platforms/ps3/mm.c
+++ b/arch/powerpc/platforms/ps3/mm.c
@@ -6,6 +6,7 @@
* Copyright 2006 Sony Corp.
*/
+#include <linux/dma-mapping.h>
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/memblock.h>
@@ -1118,6 +1119,7 @@ int ps3_dma_region_init(struct ps3_system_bus_device *dev,
enum ps3_dma_region_type region_type, void *addr, unsigned long len)
{
unsigned long lpar_addr;
+ int result;
lpar_addr = addr ? ps3_mm_phys_to_lpar(__pa(addr)) : 0;
@@ -1129,6 +1131,16 @@ int ps3_dma_region_init(struct ps3_system_bus_device *dev,
r->offset -= map.r1.offset;
r->len = len ? len : ALIGN(map.total, 1 << r->page_size);
+ dev->core.dma_mask = &r->dma_mask;
+
+ result = dma_set_mask_and_coherent(&dev->core, DMA_BIT_MASK(32));
+
+ if (result < 0) {
+ dev_err(&dev->core, "%s:%d: dma_set_mask_and_coherent failed: %d\n",
+ __func__, __LINE__, result);
+ return result;
+ }
+
switch (dev->dev_type) {
case PS3_DEVICE_TYPE_SB:
r->region_ops = (USE_DYNAMIC_DMA)
diff --git a/arch/powerpc/platforms/ps3/setup.c b/arch/powerpc/platforms/ps3/setup.c
index e9ae5dd03593..3de9145c20bc 100644
--- a/arch/powerpc/platforms/ps3/setup.c
+++ b/arch/powerpc/platforms/ps3/setup.c
@@ -36,6 +36,7 @@ DEFINE_MUTEX(ps3_gpu_mutex);
EXPORT_SYMBOL_GPL(ps3_gpu_mutex);
static union ps3_firmware_version ps3_firmware_version;
+static char ps3_firmware_version_str[16];
void ps3_get_firmware_version(union ps3_firmware_version *v)
{
@@ -182,6 +183,40 @@ static int ps3_set_dabr(unsigned long dabr, unsigned long dabrx)
return lv1_set_dabr(dabr, dabrx) ? -1 : 0;
}
+static ssize_t ps3_fw_version_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%s", ps3_firmware_version_str);
+}
+
+static int __init ps3_setup_sysfs(void)
+{
+ static struct kobj_attribute attr = __ATTR(fw-version, S_IRUGO,
+ ps3_fw_version_show, NULL);
+ static struct kobject *kobj;
+ int result;
+
+ kobj = kobject_create_and_add("ps3", firmware_kobj);
+
+ if (!kobj) {
+ pr_warn("%s:%d: kobject_create_and_add failed.\n", __func__,
+ __LINE__);
+ return -ENOMEM;
+ }
+
+ result = sysfs_create_file(kobj, &attr.attr);
+
+ if (result) {
+ pr_warn("%s:%d: sysfs_create_file failed.\n", __func__,
+ __LINE__);
+ kobject_put(kobj);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+core_initcall(ps3_setup_sysfs);
+
static void __init ps3_setup_arch(void)
{
u64 tmp;
@@ -190,9 +225,11 @@ static void __init ps3_setup_arch(void)
lv1_get_version_info(&ps3_firmware_version.raw, &tmp);
- printk(KERN_INFO "PS3 firmware version %u.%u.%u\n",
- ps3_firmware_version.major, ps3_firmware_version.minor,
- ps3_firmware_version.rev);
+ snprintf(ps3_firmware_version_str, sizeof(ps3_firmware_version_str),
+ "%u.%u.%u", ps3_firmware_version.major,
+ ps3_firmware_version.minor, ps3_firmware_version.rev);
+
+ printk(KERN_INFO "PS3 firmware version %s\n", ps3_firmware_version_str);
ps3_spu_set_platform();
diff --git a/arch/powerpc/platforms/ps3/system-bus.c b/arch/powerpc/platforms/ps3/system-bus.c
index b431f41c6cb5..1a5665875165 100644
--- a/arch/powerpc/platforms/ps3/system-bus.c
+++ b/arch/powerpc/platforms/ps3/system-bus.c
@@ -64,9 +64,10 @@ static int ps3_open_hv_device_sb(struct ps3_system_bus_device *dev)
result = lv1_open_device(dev->bus_id, dev->dev_id, 0);
if (result) {
- pr_debug("%s:%d: lv1_open_device failed: %s\n", __func__,
- __LINE__, ps3_result(result));
- result = -EPERM;
+ pr_warn("%s:%d: lv1_open_device dev=%u.%u(%s) failed: %s\n",
+ __func__, __LINE__, dev->match_id, dev->match_sub_id,
+ dev_name(&dev->core), ps3_result(result));
+ result = -EPERM;
}
done:
@@ -120,7 +121,7 @@ static int ps3_open_hv_device_gpu(struct ps3_system_bus_device *dev)
result = lv1_gpu_open(0);
if (result) {
- pr_debug("%s:%d: lv1_gpu_open failed: %s\n", __func__,
+ pr_warn("%s:%d: lv1_gpu_open failed: %s\n", __func__,
__LINE__, ps3_result(result));
result = -EPERM;
}
diff --git a/arch/powerpc/platforms/pseries/Makefile b/arch/powerpc/platforms/pseries/Makefile
index c8a2b0b05ac0..4cda0ef87be0 100644
--- a/arch/powerpc/platforms/pseries/Makefile
+++ b/arch/powerpc/platforms/pseries/Makefile
@@ -30,3 +30,4 @@ obj-$(CONFIG_PPC_SVM) += svm.o
obj-$(CONFIG_FA_DUMP) += rtas-fadump.o
obj-$(CONFIG_SUSPEND) += suspend.o
+obj-$(CONFIG_PPC_VAS) += vas.o
diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c
index 3ac70790ec7a..b1f01ac0c29e 100644
--- a/arch/powerpc/platforms/pseries/dlpar.c
+++ b/arch/powerpc/platforms/pseries/dlpar.c
@@ -289,8 +289,7 @@ int dlpar_acquire_drc(u32 drc_index)
{
int dr_status, rc;
- rc = rtas_call(rtas_token("get-sensor-state"), 2, 2, &dr_status,
- DR_ENTITY_SENSE, drc_index);
+ rc = rtas_get_sensor(DR_ENTITY_SENSE, drc_index, &dr_status);
if (rc || dr_status != DR_ENTITY_UNUSABLE)
return -1;
@@ -311,8 +310,7 @@ int dlpar_release_drc(u32 drc_index)
{
int dr_status, rc;
- rc = rtas_call(rtas_token("get-sensor-state"), 2, 2, &dr_status,
- DR_ENTITY_SENSE, drc_index);
+ rc = rtas_get_sensor(DR_ENTITY_SENSE, drc_index, &dr_status);
if (rc || dr_status != DR_ENTITY_PRESENT)
return -1;
@@ -333,8 +331,7 @@ int dlpar_unisolate_drc(u32 drc_index)
{
int dr_status, rc;
- rc = rtas_call(rtas_token("get-sensor-state"), 2, 2, &dr_status,
- DR_ENTITY_SENSE, drc_index);
+ rc = rtas_get_sensor(DR_ENTITY_SENSE, drc_index, &dr_status);
if (rc || dr_status != DR_ENTITY_PRESENT)
return -1;
diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c
index 8377f1f7c78e..377d852f5a9a 100644
--- a/arch/powerpc/platforms/pseries/hotplug-memory.c
+++ b/arch/powerpc/platforms/pseries/hotplug-memory.c
@@ -348,7 +348,8 @@ static int pseries_remove_mem_node(struct device_node *np)
static bool lmb_is_removable(struct drmem_lmb *lmb)
{
- if (!(lmb->flags & DRCONF_MEM_ASSIGNED))
+ if ((lmb->flags & DRCONF_MEM_RESERVED) ||
+ !(lmb->flags & DRCONF_MEM_ASSIGNED))
return false;
#ifdef CONFIG_FA_DUMP
@@ -401,7 +402,7 @@ static int dlpar_remove_lmb(struct drmem_lmb *lmb)
static int dlpar_memory_remove_by_count(u32 lmbs_to_remove)
{
struct drmem_lmb *lmb;
- int lmbs_removed = 0;
+ int lmbs_reserved = 0;
int lmbs_available = 0;
int rc;
@@ -435,12 +436,12 @@ static int dlpar_memory_remove_by_count(u32 lmbs_to_remove)
*/
drmem_mark_lmb_reserved(lmb);
- lmbs_removed++;
- if (lmbs_removed == lmbs_to_remove)
+ lmbs_reserved++;
+ if (lmbs_reserved == lmbs_to_remove)
break;
}
- if (lmbs_removed != lmbs_to_remove) {
+ if (lmbs_reserved != lmbs_to_remove) {
pr_err("Memory hot-remove failed, adding LMB's back\n");
for_each_drmem_lmb(lmb) {
@@ -453,6 +454,10 @@ static int dlpar_memory_remove_by_count(u32 lmbs_to_remove)
lmb->drc_index);
drmem_remove_lmb_reservation(lmb);
+
+ lmbs_reserved--;
+ if (lmbs_reserved == 0)
+ break;
}
rc = -EINVAL;
@@ -466,6 +471,10 @@ static int dlpar_memory_remove_by_count(u32 lmbs_to_remove)
lmb->base_addr);
drmem_remove_lmb_reservation(lmb);
+
+ lmbs_reserved--;
+ if (lmbs_reserved == 0)
+ break;
}
rc = 0;
}
@@ -508,7 +517,6 @@ static int dlpar_memory_remove_by_index(u32 drc_index)
static int dlpar_memory_remove_by_ic(u32 lmbs_to_remove, u32 drc_index)
{
struct drmem_lmb *lmb, *start_lmb, *end_lmb;
- int lmbs_available = 0;
int rc;
pr_info("Attempting to hot-remove %u LMB(s) at %x\n",
@@ -521,18 +529,29 @@ static int dlpar_memory_remove_by_ic(u32 lmbs_to_remove, u32 drc_index)
if (rc)
return -EINVAL;
- /* Validate that there are enough LMBs to satisfy the request */
+ /*
+ * Validate that all LMBs in range are not reserved. Note that it
+ * is ok if they are !ASSIGNED since our goal here is to remove the
+ * LMB range, regardless of whether some LMBs were already removed
+ * by any other reason.
+ *
+ * This is a contrast to what is done in remove_by_count() where we
+ * check for both RESERVED and !ASSIGNED (via lmb_is_removable()),
+ * because we want to remove a fixed amount of LMBs in that function.
+ */
for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
- if (lmb->flags & DRCONF_MEM_RESERVED)
- break;
-
- lmbs_available++;
+ if (lmb->flags & DRCONF_MEM_RESERVED) {
+ pr_err("Memory at %llx (drc index %x) is reserved\n",
+ lmb->base_addr, lmb->drc_index);
+ return -EINVAL;
+ }
}
- if (lmbs_available < lmbs_to_remove)
- return -EINVAL;
-
for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
+ /*
+ * dlpar_remove_lmb() will error out if the LMB is already
+ * !ASSIGNED, but this case is a no-op for us.
+ */
if (!(lmb->flags & DRCONF_MEM_ASSIGNED))
continue;
@@ -551,6 +570,13 @@ static int dlpar_memory_remove_by_ic(u32 lmbs_to_remove, u32 drc_index)
if (!drmem_lmb_reserved(lmb))
continue;
+ /*
+ * Setting the isolation state of an UNISOLATED/CONFIGURED
+ * device to UNISOLATE is a no-op, but the hypervisor can
+ * use it as a hint that the LMB removal failed.
+ */
+ dlpar_unisolate_drc(lmb->drc_index);
+
rc = dlpar_add_lmb(lmb);
if (rc)
pr_err("Failed to add LMB, drc index %x\n",
@@ -585,10 +611,6 @@ static inline int pseries_remove_mem_node(struct device_node *np)
{
return 0;
}
-static inline int dlpar_memory_remove(struct pseries_hp_errorlog *hp_elog)
-{
- return -EOPNOTSUPP;
-}
static int dlpar_remove_lmb(struct drmem_lmb *lmb)
{
return -EOPNOTSUPP;
@@ -651,7 +673,7 @@ static int dlpar_memory_add_by_count(u32 lmbs_to_add)
{
struct drmem_lmb *lmb;
int lmbs_available = 0;
- int lmbs_added = 0;
+ int lmbs_reserved = 0;
int rc;
pr_info("Attempting to hot-add %d LMB(s)\n", lmbs_to_add);
@@ -661,6 +683,9 @@ static int dlpar_memory_add_by_count(u32 lmbs_to_add)
/* Validate that there are enough LMBs to satisfy the request */
for_each_drmem_lmb(lmb) {
+ if (lmb->flags & DRCONF_MEM_RESERVED)
+ continue;
+
if (!(lmb->flags & DRCONF_MEM_ASSIGNED))
lmbs_available++;
@@ -689,13 +714,12 @@ static int dlpar_memory_add_by_count(u32 lmbs_to_add)
* requested LMBs cannot be added.
*/
drmem_mark_lmb_reserved(lmb);
-
- lmbs_added++;
- if (lmbs_added == lmbs_to_add)
+ lmbs_reserved++;
+ if (lmbs_reserved == lmbs_to_add)
break;
}
- if (lmbs_added != lmbs_to_add) {
+ if (lmbs_reserved != lmbs_to_add) {
pr_err("Memory hot-add failed, removing any added LMBs\n");
for_each_drmem_lmb(lmb) {
@@ -710,6 +734,10 @@ static int dlpar_memory_add_by_count(u32 lmbs_to_add)
dlpar_release_drc(lmb->drc_index);
drmem_remove_lmb_reservation(lmb);
+ lmbs_reserved--;
+
+ if (lmbs_reserved == 0)
+ break;
}
rc = -EINVAL;
} else {
@@ -720,6 +748,10 @@ static int dlpar_memory_add_by_count(u32 lmbs_to_add)
pr_debug("Memory at %llx (drc index %x) was hot-added\n",
lmb->base_addr, lmb->drc_index);
drmem_remove_lmb_reservation(lmb);
+ lmbs_reserved--;
+
+ if (lmbs_reserved == 0)
+ break;
}
rc = 0;
}
@@ -764,7 +796,6 @@ static int dlpar_memory_add_by_index(u32 drc_index)
static int dlpar_memory_add_by_ic(u32 lmbs_to_add, u32 drc_index)
{
struct drmem_lmb *lmb, *start_lmb, *end_lmb;
- int lmbs_available = 0;
int rc;
pr_info("Attempting to hot-add %u LMB(s) at index %x\n",
@@ -779,15 +810,14 @@ static int dlpar_memory_add_by_ic(u32 lmbs_to_add, u32 drc_index)
/* Validate that the LMBs in this range are not reserved */
for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
- if (lmb->flags & DRCONF_MEM_RESERVED)
- break;
-
- lmbs_available++;
+ /* Fail immediately if the whole range can't be hot-added */
+ if (lmb->flags & DRCONF_MEM_RESERVED) {
+ pr_err("Memory at %llx (drc index %x) is reserved\n",
+ lmb->base_addr, lmb->drc_index);
+ return -EINVAL;
+ }
}
- if (lmbs_available < lmbs_to_add)
- return -EINVAL;
-
for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
if (lmb->flags & DRCONF_MEM_ASSIGNED)
continue;
diff --git a/arch/powerpc/platforms/pseries/hvCall.S b/arch/powerpc/platforms/pseries/hvCall.S
index 2136e42833af..ab9fc6506861 100644
--- a/arch/powerpc/platforms/pseries/hvCall.S
+++ b/arch/powerpc/platforms/pseries/hvCall.S
@@ -102,6 +102,20 @@ END_FTR_SECTION(0, 1); \
#define HCALL_BRANCH(LABEL)
#endif
+_GLOBAL_TOC(plpar_hcall_norets_notrace)
+ HMT_MEDIUM
+
+ mfcr r0
+ stw r0,8(r1)
+ HVSC /* invoke the hypervisor */
+
+ li r4,0
+ stb r4,PACASRR_VALID(r13)
+
+ lwz r0,8(r1)
+ mtcrf 0xff,r0
+ blr /* return r3 = status */
+
_GLOBAL_TOC(plpar_hcall_norets)
HMT_MEDIUM
@@ -110,6 +124,9 @@ _GLOBAL_TOC(plpar_hcall_norets)
HCALL_BRANCH(plpar_hcall_norets_trace)
HVSC /* invoke the hypervisor */
+ li r4,0
+ stb r4,PACASRR_VALID(r13)
+
lwz r0,8(r1)
mtcrf 0xff,r0
blr /* return r3 = status */
@@ -119,6 +136,10 @@ plpar_hcall_norets_trace:
HCALL_INST_PRECALL(R4)
HVSC
HCALL_INST_POSTCALL_NORETS
+
+ li r4,0
+ stb r4,PACASRR_VALID(r13)
+
lwz r0,8(r1)
mtcrf 0xff,r0
blr
@@ -149,6 +170,9 @@ _GLOBAL_TOC(plpar_hcall)
std r6, 16(r12)
std r7, 24(r12)
+ li r4,0
+ stb r4,PACASRR_VALID(r13)
+
lwz r0,8(r1)
mtcrf 0xff,r0
@@ -178,6 +202,9 @@ plpar_hcall_trace:
HCALL_INST_POSTCALL(r12)
+ li r4,0
+ stb r4,PACASRR_VALID(r13)
+
lwz r0,8(r1)
mtcrf 0xff,r0
@@ -213,6 +240,9 @@ _GLOBAL(plpar_hcall_raw)
std r6, 16(r12)
std r7, 24(r12)
+ li r4,0
+ stb r4,PACASRR_VALID(r13)
+
lwz r0,8(r1)
mtcrf 0xff,r0
@@ -252,6 +282,9 @@ _GLOBAL_TOC(plpar_hcall9)
std r11,56(r12)
std r0, 64(r12)
+ li r4,0
+ stb r4,PACASRR_VALID(r13)
+
lwz r0,8(r1)
mtcrf 0xff,r0
@@ -290,6 +323,9 @@ plpar_hcall9_trace:
HCALL_INST_POSTCALL(r12)
+ li r4,0
+ stb r4,PACASRR_VALID(r13)
+
lwz r0,8(r1)
mtcrf 0xff,r0
@@ -329,6 +365,9 @@ _GLOBAL(plpar_hcall9_raw)
std r11,56(r12)
std r0, 64(r12)
+ li r4,0
+ stb r4,PACASRR_VALID(r13)
+
lwz r0,8(r1)
mtcrf 0xff,r0
diff --git a/arch/powerpc/platforms/pseries/ibmebus.c b/arch/powerpc/platforms/pseries/ibmebus.c
index a15ab33646b3..c6c79ef55e13 100644
--- a/arch/powerpc/platforms/pseries/ibmebus.c
+++ b/arch/powerpc/platforms/pseries/ibmebus.c
@@ -42,6 +42,7 @@
#include <linux/kobject.h>
#include <linux/dma-map-ops.h>
#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
#include <linux/of.h>
#include <linux/slab.h>
#include <linux/stat.h>
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index 1f3152ad7213..dab356e3ff87 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -1829,30 +1829,28 @@ void hcall_tracepoint_unregfunc(void)
#endif
/*
- * Since the tracing code might execute hcalls we need to guard against
- * recursion. One example of this are spinlocks calling H_YIELD on
- * shared processor partitions.
+ * Keep track of hcall tracing depth and prevent recursion. Warn if any is
+ * detected because it may indicate a problem. This will not catch all
+ * problems with tracing code making hcalls, because the tracing might have
+ * been invoked from a non-hcall, so the first hcall could recurse into it
+ * without warning here, but this better than nothing.
+ *
+ * Hcalls with specific problems being traced should use the _notrace
+ * plpar_hcall variants.
*/
static DEFINE_PER_CPU(unsigned int, hcall_trace_depth);
-void __trace_hcall_entry(unsigned long opcode, unsigned long *args)
+notrace void __trace_hcall_entry(unsigned long opcode, unsigned long *args)
{
unsigned long flags;
unsigned int *depth;
- /*
- * We cannot call tracepoints inside RCU idle regions which
- * means we must not trace H_CEDE.
- */
- if (opcode == H_CEDE)
- return;
-
local_irq_save(flags);
depth = this_cpu_ptr(&hcall_trace_depth);
- if (*depth)
+ if (WARN_ON_ONCE(*depth))
goto out;
(*depth)++;
@@ -1864,19 +1862,16 @@ out:
local_irq_restore(flags);
}
-void __trace_hcall_exit(long opcode, long retval, unsigned long *retbuf)
+notrace void __trace_hcall_exit(long opcode, long retval, unsigned long *retbuf)
{
unsigned long flags;
unsigned int *depth;
- if (opcode == H_CEDE)
- return;
-
local_irq_save(flags);
depth = this_cpu_ptr(&hcall_trace_depth);
- if (*depth)
+ if (*depth) /* Don't warn again on the way out */
goto out;
(*depth)++;
diff --git a/arch/powerpc/platforms/pseries/papr_scm.c b/arch/powerpc/platforms/pseries/papr_scm.c
index ef26fe40efb0..f48e87ac89c9 100644
--- a/arch/powerpc/platforms/pseries/papr_scm.c
+++ b/arch/powerpc/platforms/pseries/papr_scm.c
@@ -18,6 +18,7 @@
#include <asm/plpar_wrappers.h>
#include <asm/papr_pdsm.h>
#include <asm/mce.h>
+#include <asm/unaligned.h>
#define BIND_ANY_ADDR (~0ul)
@@ -114,6 +115,9 @@ struct papr_scm_priv {
/* Health information for the dimm */
u64 health_bitmap;
+ /* Holds the last known dirty shutdown counter value */
+ u64 dirty_shutdown_counter;
+
/* length of the stat buffer as expected by phyp */
size_t stat_buffer_len;
};
@@ -260,7 +264,7 @@ err_out:
* Query the Dimm performance stats from PHYP and copy them (if returned) to
* provided struct papr_scm_perf_stats instance 'stats' that can hold atleast
* (num_stats + header) bytes.
- * - If buff_stats == NULL the return value is the size in byes of the buffer
+ * - If buff_stats == NULL the return value is the size in bytes of the buffer
* needed to hold all supported performance-statistics.
* - If buff_stats != NULL and num_stats == 0 then we copy all known
* performance-statistics to 'buff_stat' and expect to be large enough to
@@ -310,6 +314,13 @@ static ssize_t drc_pmem_query_stats(struct papr_scm_priv *p,
dev_err(&p->pdev->dev,
"Unknown performance stats, Err:0x%016lX\n", ret[0]);
return -ENOENT;
+ } else if (rc == H_AUTHORITY) {
+ dev_info(&p->pdev->dev,
+ "Permission denied while accessing performance stats");
+ return -EPERM;
+ } else if (rc == H_UNSUPPORTED) {
+ dev_dbg(&p->pdev->dev, "Performance stats unsupported\n");
+ return -EOPNOTSUPP;
} else if (rc != H_SUCCESS) {
dev_err(&p->pdev->dev,
"Failed to query performance stats, Err:%lld\n", rc);
@@ -596,6 +607,16 @@ free_stats:
return rc;
}
+/* Add the dirty-shutdown-counter value to the pdsm */
+static int papr_pdsm_dsc(struct papr_scm_priv *p,
+ union nd_pdsm_payload *payload)
+{
+ payload->health.extension_flags |= PDSM_DIMM_DSC_VALID;
+ payload->health.dimm_dsc = p->dirty_shutdown_counter;
+
+ return sizeof(struct nd_papr_pdsm_health);
+}
+
/* Fetch the DIMM health info and populate it in provided package. */
static int papr_pdsm_health(struct papr_scm_priv *p,
union nd_pdsm_payload *payload)
@@ -639,6 +660,8 @@ static int papr_pdsm_health(struct papr_scm_priv *p,
/* Populate the fuel gauge meter in the payload */
papr_pdsm_fuel_gauge(p, payload);
+ /* Populate the dirty-shutdown-counter field */
+ papr_pdsm_dsc(p, payload);
rc = sizeof(struct nd_papr_pdsm_health);
@@ -900,15 +923,41 @@ static ssize_t flags_show(struct device *dev,
}
DEVICE_ATTR_RO(flags);
+static ssize_t dirty_shutdown_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct nvdimm *dimm = to_nvdimm(dev);
+ struct papr_scm_priv *p = nvdimm_provider_data(dimm);
+
+ return sysfs_emit(buf, "%llu\n", p->dirty_shutdown_counter);
+}
+DEVICE_ATTR_RO(dirty_shutdown);
+
+static umode_t papr_nd_attribute_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct nvdimm *nvdimm = to_nvdimm(dev);
+ struct papr_scm_priv *p = nvdimm_provider_data(nvdimm);
+
+ /* For if perf-stats not available remove perf_stats sysfs */
+ if (attr == &dev_attr_perf_stats.attr && p->stat_buffer_len == 0)
+ return 0;
+
+ return attr->mode;
+}
+
/* papr_scm specific dimm attributes */
static struct attribute *papr_nd_attributes[] = {
&dev_attr_flags.attr,
&dev_attr_perf_stats.attr,
+ &dev_attr_dirty_shutdown.attr,
NULL,
};
static struct attribute_group papr_nd_attribute_group = {
.name = "papr",
+ .is_visible = papr_nd_attribute_visible,
.attrs = papr_nd_attributes,
};
@@ -924,7 +973,6 @@ static int papr_scm_nvdimm_init(struct papr_scm_priv *p)
struct nd_region_desc ndr_desc;
unsigned long dimm_flags;
int target_nid, online_nid;
- ssize_t stat_size;
p->bus_desc.ndctl = papr_scm_ndctl;
p->bus_desc.module = THIS_MODULE;
@@ -1009,16 +1057,6 @@ static int papr_scm_nvdimm_init(struct papr_scm_priv *p)
list_add_tail(&p->region_list, &papr_nd_regions);
mutex_unlock(&papr_ndr_lock);
- /* Try retriving the stat buffer and see if its supported */
- stat_size = drc_pmem_query_stats(p, NULL, 0);
- if (stat_size > 0) {
- p->stat_buffer_len = stat_size;
- dev_dbg(&p->pdev->dev, "Max perf-stat size %lu-bytes\n",
- p->stat_buffer_len);
- } else {
- dev_info(&p->pdev->dev, "Dimm performance stats unavailable\n");
- }
-
return 0;
err: nvdimm_bus_unregister(p->bus);
@@ -1094,8 +1132,10 @@ static int papr_scm_probe(struct platform_device *pdev)
u32 drc_index, metadata_size;
u64 blocks, block_size;
struct papr_scm_priv *p;
+ u8 uuid_raw[UUID_SIZE];
const char *uuid_str;
- u64 uuid[2];
+ ssize_t stat_size;
+ uuid_t uuid;
int rc;
/* check we have all the required DT properties */
@@ -1137,17 +1177,28 @@ static int papr_scm_probe(struct platform_device *pdev)
p->is_volatile = !of_property_read_bool(dn, "ibm,cache-flush-required");
p->hcall_flush_required = of_property_read_bool(dn, "ibm,hcall-flush-required");
+ if (of_property_read_u64(dn, "ibm,persistence-failed-count",
+ &p->dirty_shutdown_counter))
+ p->dirty_shutdown_counter = 0;
+
/* We just need to ensure that set cookies are unique across */
- uuid_parse(uuid_str, (uuid_t *) uuid);
+ uuid_parse(uuid_str, &uuid);
+
/*
- * cookie1 and cookie2 are not really little endian
- * we store a little endian representation of the
- * uuid str so that we can compare this with the label
- * area cookie irrespective of the endian config with which
- * the kernel is built.
+ * The cookie1 and cookie2 are not really little endian.
+ * We store a raw buffer representation of the
+ * uuid string so that we can compare this with the label
+ * area cookie irrespective of the endian configuration
+ * with which the kernel is built.
+ *
+ * Historically we stored the cookie in the below format.
+ * for a uuid string 72511b67-0b3b-42fd-8d1d-5be3cae8bcaa
+ * cookie1 was 0xfd423b0b671b5172
+ * cookie2 was 0xaabce8cae35b1d8d
*/
- p->nd_set.cookie1 = cpu_to_le64(uuid[0]);
- p->nd_set.cookie2 = cpu_to_le64(uuid[1]);
+ export_uuid(uuid_raw, &uuid);
+ p->nd_set.cookie1 = get_unaligned_le64(&uuid_raw[0]);
+ p->nd_set.cookie2 = get_unaligned_le64(&uuid_raw[8]);
/* might be zero */
p->metadata_size = metadata_size;
@@ -1172,6 +1223,14 @@ static int papr_scm_probe(struct platform_device *pdev)
p->res.name = pdev->name;
p->res.flags = IORESOURCE_MEM;
+ /* Try retrieving the stat buffer and see if its supported */
+ stat_size = drc_pmem_query_stats(p, NULL, 0);
+ if (stat_size > 0) {
+ p->stat_buffer_len = stat_size;
+ dev_dbg(&p->pdev->dev, "Max perf-stat size %lu-bytes\n",
+ p->stat_buffer_len);
+ }
+
rc = papr_scm_nvdimm_init(p);
if (rc)
goto err2;
diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c
index 9d4ef65da7f3..167f2e1b8d39 100644
--- a/arch/powerpc/platforms/pseries/ras.c
+++ b/arch/powerpc/platforms/pseries/ras.c
@@ -487,8 +487,8 @@ int pSeries_system_reset_exception(struct pt_regs *regs)
if ((be64_to_cpu(regs->msr) &
(MSR_LE|MSR_RI|MSR_DR|MSR_IR|MSR_ME|MSR_PR|
MSR_ILE|MSR_HV|MSR_SF)) == (MSR_DR|MSR_SF)) {
- regs->nip = be64_to_cpu((__be64)regs->nip);
- regs->msr = 0;
+ regs_set_return_ip(regs, be64_to_cpu((__be64)regs->nip));
+ regs_set_return_msr(regs, 0);
}
#endif
@@ -593,8 +593,6 @@ static int mce_handle_err_virtmode(struct pt_regs *regs,
mce_err.severity = MCE_SEV_SEVERE;
else if (severity == RTAS_SEVERITY_ERROR)
mce_err.severity = MCE_SEV_SEVERE;
- else if (severity == RTAS_SEVERITY_FATAL)
- mce_err.severity = MCE_SEV_FATAL;
else
mce_err.severity = MCE_SEV_FATAL;
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index 754e493b7c05..631a0d57b6cd 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -549,6 +549,15 @@ static void init_cpu_char_feature_flags(struct h_cpu_char_result *result)
if (!(result->behaviour & H_CPU_BEHAV_L1D_FLUSH_PR))
security_ftr_clear(SEC_FTR_L1D_FLUSH_PR);
+ if (result->behaviour & H_CPU_BEHAV_NO_L1D_FLUSH_ENTRY)
+ security_ftr_clear(SEC_FTR_L1D_FLUSH_ENTRY);
+
+ if (result->behaviour & H_CPU_BEHAV_NO_L1D_FLUSH_UACCESS)
+ security_ftr_clear(SEC_FTR_L1D_FLUSH_UACCESS);
+
+ if (result->behaviour & H_CPU_BEHAV_NO_STF_BARRIER)
+ security_ftr_clear(SEC_FTR_STF_BARRIER);
+
if (!(result->behaviour & H_CPU_BEHAV_BNDS_CHK_SPEC_BAR))
security_ftr_clear(SEC_FTR_BNDS_CHK_SPEC_BAR);
}
diff --git a/arch/powerpc/platforms/pseries/smp.c b/arch/powerpc/platforms/pseries/smp.c
index c70b4be9f0a5..096629f54576 100644
--- a/arch/powerpc/platforms/pseries/smp.c
+++ b/arch/powerpc/platforms/pseries/smp.c
@@ -211,7 +211,9 @@ static __init void pSeries_smp_probe(void)
if (!cpu_has_feature(CPU_FTR_SMT))
return;
- if (check_kvm_guest()) {
+ check_kvm_guest();
+
+ if (is_kvm_guest()) {
/*
* KVM emulates doorbells by disabling FSCR[MSGP] so msgsndp
* faults to the hypervisor which then reads the instruction
diff --git a/arch/powerpc/platforms/pseries/vas.c b/arch/powerpc/platforms/pseries/vas.c
new file mode 100644
index 000000000000..b5c1cf1bc64d
--- /dev/null
+++ b/arch/powerpc/platforms/pseries/vas.c
@@ -0,0 +1,595 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright 2020-21 IBM Corp.
+ */
+
+#define pr_fmt(fmt) "vas: " fmt
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/export.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
+#include <asm/machdep.h>
+#include <asm/hvcall.h>
+#include <asm/plpar_wrappers.h>
+#include <asm/vas.h>
+#include "vas.h"
+
+#define VAS_INVALID_WIN_ADDRESS 0xFFFFFFFFFFFFFFFFul
+#define VAS_DEFAULT_DOMAIN_ID 0xFFFFFFFFFFFFFFFFul
+/* The hypervisor allows one credit per window right now */
+#define DEF_WIN_CREDS 1
+
+static struct vas_all_caps caps_all;
+static bool copypaste_feat;
+
+static struct vas_caps vascaps[VAS_MAX_FEAT_TYPE];
+static DEFINE_MUTEX(vas_pseries_mutex);
+
+static long hcall_return_busy_check(long rc)
+{
+ /* Check if we are stalled for some time */
+ if (H_IS_LONG_BUSY(rc)) {
+ msleep(get_longbusy_msecs(rc));
+ rc = H_BUSY;
+ } else if (rc == H_BUSY) {
+ cond_resched();
+ }
+
+ return rc;
+}
+
+/*
+ * Allocate VAS window hcall
+ */
+static int h_allocate_vas_window(struct pseries_vas_window *win, u64 *domain,
+ u8 wintype, u16 credits)
+{
+ long retbuf[PLPAR_HCALL9_BUFSIZE] = {0};
+ long rc;
+
+ do {
+ rc = plpar_hcall9(H_ALLOCATE_VAS_WINDOW, retbuf, wintype,
+ credits, domain[0], domain[1], domain[2],
+ domain[3], domain[4], domain[5]);
+
+ rc = hcall_return_busy_check(rc);
+ } while (rc == H_BUSY);
+
+ if (rc == H_SUCCESS) {
+ if (win->win_addr == VAS_INVALID_WIN_ADDRESS) {
+ pr_err("H_ALLOCATE_VAS_WINDOW: COPY/PASTE is not supported\n");
+ return -ENOTSUPP;
+ }
+ win->vas_win.winid = retbuf[0];
+ win->win_addr = retbuf[1];
+ win->complete_irq = retbuf[2];
+ win->fault_irq = retbuf[3];
+ return 0;
+ }
+
+ pr_err("H_ALLOCATE_VAS_WINDOW error: %ld, wintype: %u, credits: %u\n",
+ rc, wintype, credits);
+
+ return -EIO;
+}
+
+/*
+ * Deallocate VAS window hcall.
+ */
+static int h_deallocate_vas_window(u64 winid)
+{
+ long rc;
+
+ do {
+ rc = plpar_hcall_norets(H_DEALLOCATE_VAS_WINDOW, winid);
+
+ rc = hcall_return_busy_check(rc);
+ } while (rc == H_BUSY);
+
+ if (rc == H_SUCCESS)
+ return 0;
+
+ pr_err("H_DEALLOCATE_VAS_WINDOW error: %ld, winid: %llu\n",
+ rc, winid);
+ return -EIO;
+}
+
+/*
+ * Modify VAS window.
+ * After the window is opened with allocate window hcall, configure it
+ * with flags and LPAR PID before using.
+ */
+static int h_modify_vas_window(struct pseries_vas_window *win)
+{
+ long rc;
+ u32 lpid = mfspr(SPRN_PID);
+
+ /*
+ * AMR value is not supported in Linux VAS implementation.
+ * The hypervisor ignores it if 0 is passed.
+ */
+ do {
+ rc = plpar_hcall_norets(H_MODIFY_VAS_WINDOW,
+ win->vas_win.winid, lpid, 0,
+ VAS_MOD_WIN_FLAGS, 0);
+
+ rc = hcall_return_busy_check(rc);
+ } while (rc == H_BUSY);
+
+ if (rc == H_SUCCESS)
+ return 0;
+
+ pr_err("H_MODIFY_VAS_WINDOW error: %ld, winid %u lpid %u\n",
+ rc, win->vas_win.winid, lpid);
+ return -EIO;
+}
+
+/*
+ * This hcall is used to determine the capabilities from the hypervisor.
+ * @hcall: H_QUERY_VAS_CAPABILITIES or H_QUERY_NX_CAPABILITIES
+ * @query_type: If 0 is passed, the hypervisor returns the overall
+ * capabilities which provides all feature(s) that are
+ * available. Then query the hypervisor to get the
+ * corresponding capabilities for the specific feature.
+ * Example: H_QUERY_VAS_CAPABILITIES provides VAS GZIP QoS
+ * and VAS GZIP Default capabilities.
+ * H_QUERY_NX_CAPABILITIES provides NX GZIP
+ * capabilities.
+ * @result: Return buffer to save capabilities.
+ */
+int h_query_vas_capabilities(const u64 hcall, u8 query_type, u64 result)
+{
+ long rc;
+
+ rc = plpar_hcall_norets(hcall, query_type, result);
+
+ if (rc == H_SUCCESS)
+ return 0;
+
+ pr_err("HCALL(%llx) error %ld, query_type %u, result buffer 0x%llx\n",
+ hcall, rc, query_type, result);
+ return -EIO;
+}
+EXPORT_SYMBOL_GPL(h_query_vas_capabilities);
+
+/*
+ * hcall to get fault CRB from the hypervisor.
+ */
+static int h_get_nx_fault(u32 winid, u64 buffer)
+{
+ long rc;
+
+ rc = plpar_hcall_norets(H_GET_NX_FAULT, winid, buffer);
+
+ if (rc == H_SUCCESS)
+ return 0;
+
+ pr_err("H_GET_NX_FAULT error: %ld, winid %u, buffer 0x%llx\n",
+ rc, winid, buffer);
+ return -EIO;
+
+}
+
+/*
+ * Handle the fault interrupt.
+ * When the fault interrupt is received for each window, query the
+ * hypervisor to get the fault CRB on the specific fault. Then
+ * process the CRB by updating CSB or send signal if the user space
+ * CSB is invalid.
+ * Note: The hypervisor forwards an interrupt for each fault request.
+ * So one fault CRB to process for each H_GET_NX_FAULT hcall.
+ */
+irqreturn_t pseries_vas_fault_thread_fn(int irq, void *data)
+{
+ struct pseries_vas_window *txwin = data;
+ struct coprocessor_request_block crb;
+ struct vas_user_win_ref *tsk_ref;
+ int rc;
+
+ rc = h_get_nx_fault(txwin->vas_win.winid, (u64)virt_to_phys(&crb));
+ if (!rc) {
+ tsk_ref = &txwin->vas_win.task_ref;
+ vas_dump_crb(&crb);
+ vas_update_csb(&crb, tsk_ref);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * Allocate window and setup IRQ mapping.
+ */
+static int allocate_setup_window(struct pseries_vas_window *txwin,
+ u64 *domain, u8 wintype)
+{
+ int rc;
+
+ rc = h_allocate_vas_window(txwin, domain, wintype, DEF_WIN_CREDS);
+ if (rc)
+ return rc;
+ /*
+ * On PowerVM, the hypervisor setup and forwards the fault
+ * interrupt per window. So the IRQ setup and fault handling
+ * will be done for each open window separately.
+ */
+ txwin->fault_virq = irq_create_mapping(NULL, txwin->fault_irq);
+ if (!txwin->fault_virq) {
+ pr_err("Failed irq mapping %d\n", txwin->fault_irq);
+ rc = -EINVAL;
+ goto out_win;
+ }
+
+ txwin->name = kasprintf(GFP_KERNEL, "vas-win-%d",
+ txwin->vas_win.winid);
+ if (!txwin->name) {
+ rc = -ENOMEM;
+ goto out_irq;
+ }
+
+ rc = request_threaded_irq(txwin->fault_virq, NULL,
+ pseries_vas_fault_thread_fn, IRQF_ONESHOT,
+ txwin->name, txwin);
+ if (rc) {
+ pr_err("VAS-Window[%d]: Request IRQ(%u) failed with %d\n",
+ txwin->vas_win.winid, txwin->fault_virq, rc);
+ goto out_free;
+ }
+
+ txwin->vas_win.wcreds_max = DEF_WIN_CREDS;
+
+ return 0;
+out_free:
+ kfree(txwin->name);
+out_irq:
+ irq_dispose_mapping(txwin->fault_virq);
+out_win:
+ h_deallocate_vas_window(txwin->vas_win.winid);
+ return rc;
+}
+
+static inline void free_irq_setup(struct pseries_vas_window *txwin)
+{
+ free_irq(txwin->fault_virq, txwin);
+ kfree(txwin->name);
+ irq_dispose_mapping(txwin->fault_virq);
+}
+
+static struct vas_window *vas_allocate_window(int vas_id, u64 flags,
+ enum vas_cop_type cop_type)
+{
+ long domain[PLPAR_HCALL9_BUFSIZE] = {VAS_DEFAULT_DOMAIN_ID};
+ struct vas_cop_feat_caps *cop_feat_caps;
+ struct vas_caps *caps;
+ struct pseries_vas_window *txwin;
+ int rc;
+
+ txwin = kzalloc(sizeof(*txwin), GFP_KERNEL);
+ if (!txwin)
+ return ERR_PTR(-ENOMEM);
+
+ /*
+ * A VAS window can have many credits which means that many
+ * requests can be issued simultaneously. But the hypervisor
+ * restricts one credit per window.
+ * The hypervisor introduces 2 different types of credits:
+ * Default credit type (Uses normal priority FIFO):
+ * A limited number of credits are assigned to partitions
+ * based on processor entitlement. But these credits may be
+ * over-committed on a system depends on whether the CPUs
+ * are in shared or dedicated modes - that is, more requests
+ * may be issued across the system than NX can service at
+ * once which can result in paste command failure (RMA_busy).
+ * Then the process has to resend requests or fall-back to
+ * SW compression.
+ * Quality of Service (QoS) credit type (Uses high priority FIFO):
+ * To avoid NX HW contention, the system admins can assign
+ * QoS credits for each LPAR so that this partition is
+ * guaranteed access to NX resources. These credits are
+ * assigned to partitions via the HMC.
+ * Refer PAPR for more information.
+ *
+ * Allocate window with QoS credits if user requested. Otherwise
+ * default credits are used.
+ */
+ if (flags & VAS_TX_WIN_FLAG_QOS_CREDIT)
+ caps = &vascaps[VAS_GZIP_QOS_FEAT_TYPE];
+ else
+ caps = &vascaps[VAS_GZIP_DEF_FEAT_TYPE];
+
+ cop_feat_caps = &caps->caps;
+
+ if (atomic_inc_return(&cop_feat_caps->used_lpar_creds) >
+ atomic_read(&cop_feat_caps->target_lpar_creds)) {
+ pr_err("Credits are not available to allocate window\n");
+ rc = -EINVAL;
+ goto out;
+ }
+
+ if (vas_id == -1) {
+ /*
+ * The user space is requesting to allocate a window on
+ * a VAS instance where the process is executing.
+ * On PowerVM, domain values are passed to the hypervisor
+ * to select VAS instance. Useful if the process is
+ * affinity to NUMA node.
+ * The hypervisor selects VAS instance if
+ * VAS_DEFAULT_DOMAIN_ID (-1) is passed for domain values.
+ * The h_allocate_vas_window hcall is defined to take a
+ * domain values as specified by h_home_node_associativity,
+ * So no unpacking needs to be done.
+ */
+ rc = plpar_hcall9(H_HOME_NODE_ASSOCIATIVITY, domain,
+ VPHN_FLAG_VCPU, smp_processor_id());
+ if (rc != H_SUCCESS) {
+ pr_err("H_HOME_NODE_ASSOCIATIVITY error: %d\n", rc);
+ goto out;
+ }
+ }
+
+ /*
+ * Allocate / Deallocate window hcalls and setup / free IRQs
+ * have to be protected with mutex.
+ * Open VAS window: Allocate window hcall and setup IRQ
+ * Close VAS window: Deallocate window hcall and free IRQ
+ * The hypervisor waits until all NX requests are
+ * completed before closing the window. So expects OS
+ * to handle NX faults, means IRQ can be freed only
+ * after the deallocate window hcall is returned.
+ * So once the window is closed with deallocate hcall before
+ * the IRQ is freed, it can be assigned to new allocate
+ * hcall with the same fault IRQ by the hypervisor. It can
+ * result in setup IRQ fail for the new window since the
+ * same fault IRQ is not freed by the OS before.
+ */
+ mutex_lock(&vas_pseries_mutex);
+ rc = allocate_setup_window(txwin, (u64 *)&domain[0],
+ cop_feat_caps->win_type);
+ mutex_unlock(&vas_pseries_mutex);
+ if (rc)
+ goto out;
+
+ /*
+ * Modify window and it is ready to use.
+ */
+ rc = h_modify_vas_window(txwin);
+ if (!rc)
+ rc = get_vas_user_win_ref(&txwin->vas_win.task_ref);
+ if (rc)
+ goto out_free;
+
+ vas_user_win_add_mm_context(&txwin->vas_win.task_ref);
+ txwin->win_type = cop_feat_caps->win_type;
+ mutex_lock(&vas_pseries_mutex);
+ list_add(&txwin->win_list, &caps->list);
+ mutex_unlock(&vas_pseries_mutex);
+
+ return &txwin->vas_win;
+
+out_free:
+ /*
+ * Window is not operational. Free IRQ before closing
+ * window so that do not have to hold mutex.
+ */
+ free_irq_setup(txwin);
+ h_deallocate_vas_window(txwin->vas_win.winid);
+out:
+ atomic_dec(&cop_feat_caps->used_lpar_creds);
+ kfree(txwin);
+ return ERR_PTR(rc);
+}
+
+static u64 vas_paste_address(struct vas_window *vwin)
+{
+ struct pseries_vas_window *win;
+
+ win = container_of(vwin, struct pseries_vas_window, vas_win);
+ return win->win_addr;
+}
+
+static int deallocate_free_window(struct pseries_vas_window *win)
+{
+ int rc = 0;
+
+ /*
+ * The hypervisor waits for all requests including faults
+ * are processed before closing the window - Means all
+ * credits have to be returned. In the case of fault
+ * request, a credit is returned after OS issues
+ * H_GET_NX_FAULT hcall.
+ * So free IRQ after executing H_DEALLOCATE_VAS_WINDOW
+ * hcall.
+ */
+ rc = h_deallocate_vas_window(win->vas_win.winid);
+ if (!rc)
+ free_irq_setup(win);
+
+ return rc;
+}
+
+static int vas_deallocate_window(struct vas_window *vwin)
+{
+ struct pseries_vas_window *win;
+ struct vas_cop_feat_caps *caps;
+ int rc = 0;
+
+ if (!vwin)
+ return -EINVAL;
+
+ win = container_of(vwin, struct pseries_vas_window, vas_win);
+
+ /* Should not happen */
+ if (win->win_type >= VAS_MAX_FEAT_TYPE) {
+ pr_err("Window (%u): Invalid window type %u\n",
+ vwin->winid, win->win_type);
+ return -EINVAL;
+ }
+
+ caps = &vascaps[win->win_type].caps;
+ mutex_lock(&vas_pseries_mutex);
+ rc = deallocate_free_window(win);
+ if (rc) {
+ mutex_unlock(&vas_pseries_mutex);
+ return rc;
+ }
+
+ list_del(&win->win_list);
+ atomic_dec(&caps->used_lpar_creds);
+ mutex_unlock(&vas_pseries_mutex);
+
+ put_vas_user_win_ref(&vwin->task_ref);
+ mm_context_remove_vas_window(vwin->task_ref.mm);
+
+ kfree(win);
+ return 0;
+}
+
+static const struct vas_user_win_ops vops_pseries = {
+ .open_win = vas_allocate_window, /* Open and configure window */
+ .paste_addr = vas_paste_address, /* To do copy/paste */
+ .close_win = vas_deallocate_window, /* Close window */
+};
+
+/*
+ * Supporting only nx-gzip coprocessor type now, but this API code
+ * extended to other coprocessor types later.
+ */
+int vas_register_api_pseries(struct module *mod, enum vas_cop_type cop_type,
+ const char *name)
+{
+ int rc;
+
+ if (!copypaste_feat)
+ return -ENOTSUPP;
+
+ rc = vas_register_coproc_api(mod, cop_type, name, &vops_pseries);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(vas_register_api_pseries);
+
+void vas_unregister_api_pseries(void)
+{
+ vas_unregister_coproc_api();
+}
+EXPORT_SYMBOL_GPL(vas_unregister_api_pseries);
+
+/*
+ * Get the specific capabilities based on the feature type.
+ * Right now supports GZIP default and GZIP QoS capabilities.
+ */
+static int get_vas_capabilities(u8 feat, enum vas_cop_feat_type type,
+ struct hv_vas_cop_feat_caps *hv_caps)
+{
+ struct vas_cop_feat_caps *caps;
+ struct vas_caps *vcaps;
+ int rc = 0;
+
+ vcaps = &vascaps[type];
+ memset(vcaps, 0, sizeof(*vcaps));
+ INIT_LIST_HEAD(&vcaps->list);
+
+ caps = &vcaps->caps;
+
+ rc = h_query_vas_capabilities(H_QUERY_VAS_CAPABILITIES, feat,
+ (u64)virt_to_phys(hv_caps));
+ if (rc)
+ return rc;
+
+ caps->user_mode = hv_caps->user_mode;
+ if (!(caps->user_mode & VAS_COPY_PASTE_USER_MODE)) {
+ pr_err("User space COPY/PASTE is not supported\n");
+ return -ENOTSUPP;
+ }
+
+ caps->descriptor = be64_to_cpu(hv_caps->descriptor);
+ caps->win_type = hv_caps->win_type;
+ if (caps->win_type >= VAS_MAX_FEAT_TYPE) {
+ pr_err("Unsupported window type %u\n", caps->win_type);
+ return -EINVAL;
+ }
+ caps->max_lpar_creds = be16_to_cpu(hv_caps->max_lpar_creds);
+ caps->max_win_creds = be16_to_cpu(hv_caps->max_win_creds);
+ atomic_set(&caps->target_lpar_creds,
+ be16_to_cpu(hv_caps->target_lpar_creds));
+ if (feat == VAS_GZIP_DEF_FEAT) {
+ caps->def_lpar_creds = be16_to_cpu(hv_caps->def_lpar_creds);
+
+ if (caps->max_win_creds < DEF_WIN_CREDS) {
+ pr_err("Window creds(%u) > max allowed window creds(%u)\n",
+ DEF_WIN_CREDS, caps->max_win_creds);
+ return -EINVAL;
+ }
+ }
+
+ copypaste_feat = true;
+
+ return 0;
+}
+
+static int __init pseries_vas_init(void)
+{
+ struct hv_vas_cop_feat_caps *hv_cop_caps;
+ struct hv_vas_all_caps *hv_caps;
+ int rc;
+
+ /*
+ * Linux supports user space COPY/PASTE only with Radix
+ */
+ if (!radix_enabled()) {
+ pr_err("API is supported only with radix page tables\n");
+ return -ENOTSUPP;
+ }
+
+ hv_caps = kmalloc(sizeof(*hv_caps), GFP_KERNEL);
+ if (!hv_caps)
+ return -ENOMEM;
+ /*
+ * Get VAS overall capabilities by passing 0 to feature type.
+ */
+ rc = h_query_vas_capabilities(H_QUERY_VAS_CAPABILITIES, 0,
+ (u64)virt_to_phys(hv_caps));
+ if (rc)
+ goto out;
+
+ caps_all.descriptor = be64_to_cpu(hv_caps->descriptor);
+ caps_all.feat_type = be64_to_cpu(hv_caps->feat_type);
+
+ hv_cop_caps = kmalloc(sizeof(*hv_cop_caps), GFP_KERNEL);
+ if (!hv_cop_caps) {
+ rc = -ENOMEM;
+ goto out;
+ }
+ /*
+ * QOS capabilities available
+ */
+ if (caps_all.feat_type & VAS_GZIP_QOS_FEAT_BIT) {
+ rc = get_vas_capabilities(VAS_GZIP_QOS_FEAT,
+ VAS_GZIP_QOS_FEAT_TYPE, hv_cop_caps);
+
+ if (rc)
+ goto out_cop;
+ }
+ /*
+ * Default capabilities available
+ */
+ if (caps_all.feat_type & VAS_GZIP_DEF_FEAT_BIT) {
+ rc = get_vas_capabilities(VAS_GZIP_DEF_FEAT,
+ VAS_GZIP_DEF_FEAT_TYPE, hv_cop_caps);
+ if (rc)
+ goto out_cop;
+ }
+
+ pr_info("GZIP feature is available\n");
+
+out_cop:
+ kfree(hv_cop_caps);
+out:
+ kfree(hv_caps);
+ return rc;
+}
+machine_device_initcall(pseries, pseries_vas_init);
diff --git a/arch/powerpc/platforms/pseries/vas.h b/arch/powerpc/platforms/pseries/vas.h
new file mode 100644
index 000000000000..4ecb3fcabd10
--- /dev/null
+++ b/arch/powerpc/platforms/pseries/vas.h
@@ -0,0 +1,125 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright 2020-21 IBM Corp.
+ */
+
+#ifndef _VAS_H
+#define _VAS_H
+#include <asm/vas.h>
+#include <linux/mutex.h>
+#include <linux/stringify.h>
+
+/*
+ * VAS window modify flags
+ */
+#define VAS_MOD_WIN_CLOSE PPC_BIT(0)
+#define VAS_MOD_WIN_JOBS_KILL PPC_BIT(1)
+#define VAS_MOD_WIN_DR PPC_BIT(3)
+#define VAS_MOD_WIN_PR PPC_BIT(4)
+#define VAS_MOD_WIN_SF PPC_BIT(5)
+#define VAS_MOD_WIN_TA PPC_BIT(6)
+#define VAS_MOD_WIN_FLAGS (VAS_MOD_WIN_JOBS_KILL | VAS_MOD_WIN_DR | \
+ VAS_MOD_WIN_PR | VAS_MOD_WIN_SF)
+
+#define VAS_WIN_ACTIVE 0x0
+#define VAS_WIN_CLOSED 0x1
+#define VAS_WIN_INACTIVE 0x2 /* Inactive due to HW failure */
+/* Process of being modified, deallocated, or quiesced */
+#define VAS_WIN_MOD_IN_PROCESS 0x3
+
+#define VAS_COPY_PASTE_USER_MODE 0x00000001
+#define VAS_COP_OP_USER_MODE 0x00000010
+
+/*
+ * Co-processor feature - GZIP QoS windows or GZIP default windows
+ */
+enum vas_cop_feat_type {
+ VAS_GZIP_QOS_FEAT_TYPE,
+ VAS_GZIP_DEF_FEAT_TYPE,
+ VAS_MAX_FEAT_TYPE,
+};
+
+/*
+ * Use to get feature specific capabilities from the
+ * hypervisor.
+ */
+struct hv_vas_cop_feat_caps {
+ __be64 descriptor;
+ u8 win_type; /* Default or QoS type */
+ u8 user_mode;
+ __be16 max_lpar_creds;
+ __be16 max_win_creds;
+ union {
+ __be16 reserved;
+ __be16 def_lpar_creds; /* Used for default capabilities */
+ };
+ __be16 target_lpar_creds;
+} __packed __aligned(0x1000);
+
+/*
+ * Feature specific (QoS or default) capabilities.
+ */
+struct vas_cop_feat_caps {
+ u64 descriptor;
+ u8 win_type; /* Default or QoS type */
+ u8 user_mode; /* User mode copy/paste or COP HCALL */
+ u16 max_lpar_creds; /* Max credits available in LPAR */
+ /* Max credits can be assigned per window */
+ u16 max_win_creds;
+ union {
+ u16 reserved; /* Used for QoS credit type */
+ u16 def_lpar_creds; /* Used for default credit type */
+ };
+ /* Total LPAR available credits. Can be different from max LPAR */
+ /* credits due to DLPAR operation */
+ atomic_t target_lpar_creds;
+ atomic_t used_lpar_creds; /* Used credits so far */
+ u16 avail_lpar_creds; /* Remaining available credits */
+};
+
+/*
+ * Feature (QoS or Default) specific to store capabilities and
+ * the list of open windows.
+ */
+struct vas_caps {
+ struct vas_cop_feat_caps caps;
+ struct list_head list; /* List of open windows */
+};
+
+/*
+ * To get window information from the hypervisor.
+ */
+struct hv_vas_win_lpar {
+ __be16 version;
+ u8 win_type;
+ u8 status;
+ __be16 credits; /* No of credits assigned to this window */
+ __be16 reserved;
+ __be32 pid; /* LPAR Process ID */
+ __be32 tid; /* LPAR Thread ID */
+ __be64 win_addr; /* Paste address */
+ __be32 interrupt; /* Interrupt when NX request completes */
+ __be32 fault; /* Interrupt when NX sees fault */
+ /* Associativity Domain Identifiers as returned in */
+ /* H_HOME_NODE_ASSOCIATIVITY */
+ __be64 domain[6];
+ __be64 win_util; /* Number of bytes processed */
+} __packed __aligned(0x1000);
+
+struct pseries_vas_window {
+ struct vas_window vas_win;
+ u64 win_addr; /* Physical paste address */
+ u8 win_type; /* QoS or Default window */
+ u32 complete_irq; /* Completion interrupt */
+ u32 fault_irq; /* Fault interrupt */
+ u64 domain[6]; /* Associativity domain Ids */
+ /* this window is allocated */
+ u64 util;
+
+ /* List of windows opened which is used for LPM */
+ struct list_head win_list;
+ u64 flags;
+ char *name;
+ int fault_virq;
+};
+#endif /* _VAS_H */
diff --git a/arch/powerpc/sysdev/ehv_pic.c b/arch/powerpc/sysdev/ehv_pic.c
index 48866e6c1efb..00705258ecf9 100644
--- a/arch/powerpc/sysdev/ehv_pic.c
+++ b/arch/powerpc/sysdev/ehv_pic.c
@@ -14,6 +14,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/irq.h>
+#include <linux/irqdomain.h>
#include <linux/smp.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
diff --git a/arch/powerpc/sysdev/fsl_mpic_err.c b/arch/powerpc/sysdev/fsl_mpic_err.c
index 13583bbc3e8e..5fa5fa215541 100644
--- a/arch/powerpc/sysdev/fsl_mpic_err.c
+++ b/arch/powerpc/sysdev/fsl_mpic_err.c
@@ -8,6 +8,7 @@
#include <linux/irq.h>
#include <linux/smp.h>
#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
#include <asm/io.h>
#include <asm/irq.h>
diff --git a/arch/powerpc/sysdev/fsl_pci.c b/arch/powerpc/sysdev/fsl_pci.c
index 69af73765783..b8f76f3fd994 100644
--- a/arch/powerpc/sysdev/fsl_pci.c
+++ b/arch/powerpc/sysdev/fsl_pci.c
@@ -1072,7 +1072,7 @@ int fsl_pci_mcheck_exception(struct pt_regs *regs)
ret = get_kernel_nofault(inst, (void *)regs->nip);
if (!ret && mcheck_handle_load(regs, inst)) {
- regs->nip += 4;
+ regs_add_return_ip(regs, 4);
return 1;
}
}
diff --git a/arch/powerpc/sysdev/fsl_rio.c b/arch/powerpc/sysdev/fsl_rio.c
index 07c164f7f8cf..5a95b8ea23d8 100644
--- a/arch/powerpc/sysdev/fsl_rio.c
+++ b/arch/powerpc/sysdev/fsl_rio.c
@@ -108,8 +108,8 @@ int fsl_rio_mcheck_exception(struct pt_regs *regs)
__func__);
out_be32((u32 *)(rio_regs_win + RIO_LTLEDCSR),
0);
- regs->msr |= MSR_RI;
- regs->nip = extable_fixup(entry);
+ regs_set_return_msr(regs, regs->msr | MSR_RI);
+ regs_set_return_ip(regs, extable_fixup(entry));
return 1;
}
}
diff --git a/arch/powerpc/sysdev/i8259.c b/arch/powerpc/sysdev/i8259.c
index c1d76c344351..dc1a151c63d7 100644
--- a/arch/powerpc/sysdev/i8259.c
+++ b/arch/powerpc/sysdev/i8259.c
@@ -260,7 +260,8 @@ void i8259_init(struct device_node *node, unsigned long intack_addr)
raw_spin_unlock_irqrestore(&i8259_lock, flags);
/* create a legacy host */
- i8259_host = irq_domain_add_legacy_isa(node, &i8259_host_ops, NULL);
+ i8259_host = irq_domain_add_legacy(node, NR_IRQS_LEGACY, 0, 0,
+ &i8259_host_ops, NULL);
if (i8259_host == NULL) {
printk(KERN_ERR "i8259: failed to allocate irq host !\n");
return;
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c
index b0426f28946a..995fb2ada507 100644
--- a/arch/powerpc/sysdev/mpic.c
+++ b/arch/powerpc/sysdev/mpic.c
@@ -602,7 +602,7 @@ static void __init mpic_scan_ht_pics(struct mpic *mpic)
/* Find an mpic associated with a given linux interrupt */
static struct mpic *mpic_find(unsigned int irq)
{
- if (irq < NUM_ISA_INTERRUPTS)
+ if (irq < NR_IRQS_LEGACY)
return NULL;
return irq_get_chip_data(irq);
diff --git a/arch/powerpc/sysdev/tsi108_pci.c b/arch/powerpc/sysdev/tsi108_pci.c
index 49f9541954f8..042bb38fa5c2 100644
--- a/arch/powerpc/sysdev/tsi108_pci.c
+++ b/arch/powerpc/sysdev/tsi108_pci.c
@@ -404,7 +404,8 @@ void __init tsi108_pci_int_init(struct device_node *node)
{
DBG("Tsi108_pci_int_init: initializing PCI interrupts\n");
- pci_irq_host = irq_domain_add_legacy_isa(node, &pci_irq_domain_ops, NULL);
+ pci_irq_host = irq_domain_add_legacy(node, NR_IRQS_LEGACY, 0, 0,
+ &pci_irq_domain_ops, NULL);
if (pci_irq_host == NULL) {
printk(KERN_ERR "pci_irq_host: failed to allocate irq domain!\n");
return;
diff --git a/arch/powerpc/sysdev/xics/Kconfig b/arch/powerpc/sysdev/xics/Kconfig
index 304614c920aa..063d9195891f 100644
--- a/arch/powerpc/sysdev/xics/Kconfig
+++ b/arch/powerpc/sysdev/xics/Kconfig
@@ -12,3 +12,6 @@ config PPC_ICP_HV
config PPC_ICS_RTAS
def_bool n
+
+config PPC_ICS_NATIVE
+ def_bool n
diff --git a/arch/powerpc/sysdev/xics/Makefile b/arch/powerpc/sysdev/xics/Makefile
index ba1e3117b1c0..747063927c6c 100644
--- a/arch/powerpc/sysdev/xics/Makefile
+++ b/arch/powerpc/sysdev/xics/Makefile
@@ -4,4 +4,5 @@ obj-y += xics-common.o
obj-$(CONFIG_PPC_ICP_NATIVE) += icp-native.o
obj-$(CONFIG_PPC_ICP_HV) += icp-hv.o
obj-$(CONFIG_PPC_ICS_RTAS) += ics-rtas.o
+obj-$(CONFIG_PPC_ICS_NATIVE) += ics-native.o
obj-$(CONFIG_PPC_POWERNV) += ics-opal.o icp-opal.o
diff --git a/arch/powerpc/sysdev/xics/icp-hv.c b/arch/powerpc/sysdev/xics/icp-hv.c
index 21b9d1bf39ff..6765d9e264a3 100644
--- a/arch/powerpc/sysdev/xics/icp-hv.c
+++ b/arch/powerpc/sysdev/xics/icp-hv.c
@@ -7,6 +7,7 @@
#include <linux/irq.h>
#include <linux/smp.h>
#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
#include <linux/cpu.h>
#include <linux/of.h>
diff --git a/arch/powerpc/sysdev/xics/icp-opal.c b/arch/powerpc/sysdev/xics/icp-opal.c
index 68fd2540b093..675d708863d5 100644
--- a/arch/powerpc/sysdev/xics/icp-opal.c
+++ b/arch/powerpc/sysdev/xics/icp-opal.c
@@ -7,6 +7,7 @@
#include <linux/irq.h>
#include <linux/smp.h>
#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
#include <linux/cpu.h>
#include <linux/of.h>
diff --git a/arch/powerpc/sysdev/xics/ics-native.c b/arch/powerpc/sysdev/xics/ics-native.c
new file mode 100644
index 000000000000..d450502f4053
--- /dev/null
+++ b/arch/powerpc/sysdev/xics/ics-native.c
@@ -0,0 +1,257 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * ICS backend for OPAL managed interrupts.
+ *
+ * Copyright 2011 IBM Corp.
+ */
+
+//#define DEBUG
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/irq.h>
+#include <linux/smp.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/cpu.h>
+#include <linux/of.h>
+#include <linux/spinlock.h>
+#include <linux/msi.h>
+#include <linux/list.h>
+
+#include <asm/prom.h>
+#include <asm/smp.h>
+#include <asm/machdep.h>
+#include <asm/irq.h>
+#include <asm/errno.h>
+#include <asm/xics.h>
+#include <asm/opal.h>
+#include <asm/firmware.h>
+
+struct ics_native {
+ struct ics ics;
+ struct device_node *node;
+ void __iomem *base;
+ u32 ibase;
+ u32 icount;
+};
+#define to_ics_native(_ics) container_of(_ics, struct ics_native, ics)
+
+static void __iomem *ics_native_xive(struct ics_native *in, unsigned int vec)
+{
+ return in->base + 0x800 + ((vec - in->ibase) << 2);
+}
+
+static void ics_native_unmask_irq(struct irq_data *d)
+{
+ unsigned int vec = (unsigned int)irqd_to_hwirq(d);
+ struct ics *ics = irq_data_get_irq_chip_data(d);
+ struct ics_native *in = to_ics_native(ics);
+ unsigned int server;
+
+ pr_devel("ics-native: unmask virq %d [hw 0x%x]\n", d->irq, vec);
+
+ if (vec < in->ibase || vec >= (in->ibase + in->icount))
+ return;
+
+ server = xics_get_irq_server(d->irq, irq_data_get_affinity_mask(d), 0);
+ out_be32(ics_native_xive(in, vec), (server << 8) | DEFAULT_PRIORITY);
+}
+
+static unsigned int ics_native_startup(struct irq_data *d)
+{
+#ifdef CONFIG_PCI_MSI
+ /*
+ * The generic MSI code returns with the interrupt disabled on the
+ * card, using the MSI mask bits. Firmware doesn't appear to unmask
+ * at that level, so we do it here by hand.
+ */
+ if (irq_data_get_msi_desc(d))
+ pci_msi_unmask_irq(d);
+#endif
+
+ /* unmask it */
+ ics_native_unmask_irq(d);
+ return 0;
+}
+
+static void ics_native_do_mask(struct ics_native *in, unsigned int vec)
+{
+ out_be32(ics_native_xive(in, vec), 0xff);
+}
+
+static void ics_native_mask_irq(struct irq_data *d)
+{
+ unsigned int vec = (unsigned int)irqd_to_hwirq(d);
+ struct ics *ics = irq_data_get_irq_chip_data(d);
+ struct ics_native *in = to_ics_native(ics);
+
+ pr_devel("ics-native: mask virq %d [hw 0x%x]\n", d->irq, vec);
+
+ if (vec < in->ibase || vec >= (in->ibase + in->icount))
+ return;
+ ics_native_do_mask(in, vec);
+}
+
+static int ics_native_set_affinity(struct irq_data *d,
+ const struct cpumask *cpumask,
+ bool force)
+{
+ unsigned int vec = (unsigned int)irqd_to_hwirq(d);
+ struct ics *ics = irq_data_get_irq_chip_data(d);
+ struct ics_native *in = to_ics_native(ics);
+ int server;
+ u32 xive;
+
+ if (vec < in->ibase || vec >= (in->ibase + in->icount))
+ return -EINVAL;
+
+ server = xics_get_irq_server(d->irq, cpumask, 1);
+ if (server == -1) {
+ pr_warn("%s: No online cpus in the mask %*pb for irq %d\n",
+ __func__, cpumask_pr_args(cpumask), d->irq);
+ return -1;
+ }
+
+ xive = in_be32(ics_native_xive(in, vec));
+ xive = (xive & 0xff) | (server << 8);
+ out_be32(ics_native_xive(in, vec), xive);
+
+ return IRQ_SET_MASK_OK;
+}
+
+static struct irq_chip ics_native_irq_chip = {
+ .name = "ICS",
+ .irq_startup = ics_native_startup,
+ .irq_mask = ics_native_mask_irq,
+ .irq_unmask = ics_native_unmask_irq,
+ .irq_eoi = NULL, /* Patched at init time */
+ .irq_set_affinity = ics_native_set_affinity,
+ .irq_set_type = xics_set_irq_type,
+ .irq_retrigger = xics_retrigger,
+};
+
+static int ics_native_map(struct ics *ics, unsigned int virq)
+{
+ unsigned int vec = (unsigned int)virq_to_hw(virq);
+ struct ics_native *in = to_ics_native(ics);
+
+ pr_devel("%s: vec=0x%x\n", __func__, vec);
+
+ if (vec < in->ibase || vec >= (in->ibase + in->icount))
+ return -EINVAL;
+
+ irq_set_chip_and_handler(virq, &ics_native_irq_chip, handle_fasteoi_irq);
+ irq_set_chip_data(virq, ics);
+
+ return 0;
+}
+
+static void ics_native_mask_unknown(struct ics *ics, unsigned long vec)
+{
+ struct ics_native *in = to_ics_native(ics);
+
+ if (vec < in->ibase || vec >= (in->ibase + in->icount))
+ return;
+
+ ics_native_do_mask(in, vec);
+}
+
+static long ics_native_get_server(struct ics *ics, unsigned long vec)
+{
+ struct ics_native *in = to_ics_native(ics);
+ u32 xive;
+
+ if (vec < in->ibase || vec >= (in->ibase + in->icount))
+ return -EINVAL;
+
+ xive = in_be32(ics_native_xive(in, vec));
+ return (xive >> 8) & 0xfff;
+}
+
+static int ics_native_host_match(struct ics *ics, struct device_node *node)
+{
+ struct ics_native *in = to_ics_native(ics);
+
+ return in->node == node;
+}
+
+static struct ics ics_native_template = {
+ .map = ics_native_map,
+ .mask_unknown = ics_native_mask_unknown,
+ .get_server = ics_native_get_server,
+ .host_match = ics_native_host_match,
+};
+
+static int __init ics_native_add_one(struct device_node *np)
+{
+ struct ics_native *ics;
+ u32 ranges[2];
+ int rc, count;
+
+ ics = kzalloc(sizeof(struct ics_native), GFP_KERNEL);
+ if (!ics)
+ return -ENOMEM;
+ ics->node = of_node_get(np);
+ memcpy(&ics->ics, &ics_native_template, sizeof(struct ics));
+
+ ics->base = of_iomap(np, 0);
+ if (!ics->base) {
+ pr_err("Failed to map %pOFP\n", np);
+ rc = -ENOMEM;
+ goto fail;
+ }
+
+ count = of_property_count_u32_elems(np, "interrupt-ranges");
+ if (count < 2 || count & 1) {
+ pr_err("Failed to read interrupt-ranges of %pOFP\n", np);
+ rc = -EINVAL;
+ goto fail;
+ }
+ if (count > 2) {
+ pr_warn("ICS %pOFP has %d ranges, only one supported\n",
+ np, count >> 1);
+ }
+ rc = of_property_read_u32_array(np, "interrupt-ranges",
+ ranges, 2);
+ if (rc) {
+ pr_err("Failed to read interrupt-ranges of %pOFP\n", np);
+ goto fail;
+ }
+ ics->ibase = ranges[0];
+ ics->icount = ranges[1];
+
+ pr_info("ICS native initialized for sources %d..%d\n",
+ ics->ibase, ics->ibase + ics->icount - 1);
+
+ /* Register ourselves */
+ xics_register_ics(&ics->ics);
+
+ return 0;
+fail:
+ of_node_put(ics->node);
+ kfree(ics);
+ return rc;
+}
+
+int __init ics_native_init(void)
+{
+ struct device_node *ics;
+ bool found_one = false;
+
+ /* We need to patch our irq chip's EOI to point to the
+ * right ICP
+ */
+ ics_native_irq_chip.irq_eoi = icp_ops->eoi;
+
+ /* Find native ICS in the device-tree */
+ for_each_compatible_node(ics, NULL, "openpower,xics-sources") {
+ if (ics_native_add_one(ics) == 0)
+ found_one = true;
+ }
+
+ if (found_one)
+ pr_info("ICS native backend registered\n");
+
+ return found_one ? 0 : -ENODEV;
+}
diff --git a/arch/powerpc/sysdev/xics/xics-common.c b/arch/powerpc/sysdev/xics/xics-common.c
index 7e4305c01bac..b14c502e56a8 100644
--- a/arch/powerpc/sysdev/xics/xics-common.c
+++ b/arch/powerpc/sysdev/xics/xics-common.c
@@ -201,7 +201,7 @@ void xics_migrate_irqs_away(void)
struct ics *ics;
/* We can't set affinity on ISA interrupts */
- if (virq < NUM_ISA_INTERRUPTS)
+ if (virq < NR_IRQS_LEGACY)
continue;
/* We only need to migrate enabled IRQS */
if (!desc->action)
@@ -477,6 +477,8 @@ void __init xics_init(void)
if (rc < 0)
rc = ics_opal_init();
if (rc < 0)
+ rc = ics_native_init();
+ if (rc < 0)
pr_warn("XICS: Cannot find a Source Controller !\n");
/* Initialize common bits */
diff --git a/arch/powerpc/sysdev/xive/Kconfig b/arch/powerpc/sysdev/xive/Kconfig
index 785c292d104b..97796c6b63f0 100644
--- a/arch/powerpc/sysdev/xive/Kconfig
+++ b/arch/powerpc/sysdev/xive/Kconfig
@@ -3,6 +3,7 @@ config PPC_XIVE
bool
select PPC_SMP_MUXED_IPI
select HARDIRQS_SW_RESEND
+ select IRQ_DOMAIN_NOMAP
config PPC_XIVE_NATIVE
bool
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
index c8173e92f19d..da4d7f225a40 100644
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -70,6 +70,9 @@ static cpumask_t cpus_in_xmon = CPU_MASK_NONE;
static unsigned long xmon_taken = 1;
static int xmon_owner;
static int xmon_gate;
+static int xmon_batch;
+static unsigned long xmon_batch_start_cpu;
+static cpumask_t xmon_batch_cpus = CPU_MASK_NONE;
#else
#define xmon_owner 0
#endif /* CONFIG_SMP */
@@ -100,7 +103,7 @@ static long *xmon_fault_jmp[NR_CPUS];
/* Breakpoint stuff */
struct bpt {
unsigned long address;
- struct ppc_inst *instr;
+ u32 *instr;
atomic_t ref_count;
int enabled;
unsigned long pad;
@@ -133,6 +136,12 @@ static void prdump(unsigned long, long);
static int ppc_inst_dump(unsigned long, long, int);
static void dump_log_buf(void);
+#ifdef CONFIG_SMP
+static int xmon_switch_cpu(unsigned long);
+static int xmon_batch_next_cpu(void);
+static int batch_cmds(struct pt_regs *);
+#endif
+
#ifdef CONFIG_PPC_POWERNV
static void dump_opal_msglog(void);
#else
@@ -216,7 +225,8 @@ Commands:\n\
#ifdef CONFIG_SMP
"\
c print cpus stopped in xmon\n\
- c# try to switch to cpu number h (in hex)\n"
+ c# try to switch to cpu number h (in hex)\n\
+ c# $ run command '$' (one of 'r','S' or 't') on all cpus in xmon\n"
#endif
"\
C checksum\n\
@@ -489,10 +499,10 @@ static void xmon_touch_watchdogs(void)
touch_nmi_watchdog();
}
-static int xmon_core(struct pt_regs *regs, int fromipi)
+static int xmon_core(struct pt_regs *regs, volatile int fromipi)
{
- int cmd = 0;
- struct bpt *bp;
+ volatile int cmd = 0;
+ struct bpt *volatile bp;
long recurse_jmp[JMP_BUF_LEN];
bool locked_down;
unsigned long offset;
@@ -514,7 +524,7 @@ static int xmon_core(struct pt_regs *regs, int fromipi)
bp = in_breakpoint_table(regs->nip, &offset);
if (bp != NULL) {
- regs->nip = bp->address + offset;
+ regs_set_return_ip(regs, bp->address + offset);
atomic_dec(&bp->ref_count);
}
@@ -644,7 +654,12 @@ static int xmon_core(struct pt_regs *regs, int fromipi)
spin_cpu_relax();
touch_nmi_watchdog();
} else {
- if (!locked_down)
+ cmd = 1;
+#ifdef CONFIG_SMP
+ if (xmon_batch)
+ cmd = batch_cmds(regs);
+#endif
+ if (!locked_down && cmd)
cmd = cmds(regs);
if (locked_down || cmd != 0) {
/* exiting xmon */
@@ -702,7 +717,7 @@ static int xmon_core(struct pt_regs *regs, int fromipi)
if (regs->msr & MSR_DE) {
bp = at_breakpoint(regs->nip);
if (bp != NULL) {
- regs->nip = (unsigned long) &bp->instr[0];
+ regs_set_return_ip(regs, (unsigned long) &bp->instr[0]);
atomic_inc(&bp->ref_count);
}
}
@@ -712,7 +727,7 @@ static int xmon_core(struct pt_regs *regs, int fromipi)
if (bp != NULL) {
int stepped = emulate_step(regs, ppc_inst_read(bp->instr));
if (stepped == 0) {
- regs->nip = (unsigned long) &bp->instr[0];
+ regs_set_return_ip(regs, (unsigned long) &bp->instr[0]);
atomic_inc(&bp->ref_count);
} else if (stepped < 0) {
printf("Couldn't single-step %s instruction\n",
@@ -766,7 +781,7 @@ static int xmon_bpt(struct pt_regs *regs)
/* Are we at the trap at bp->instr[1] for some bp? */
bp = in_breakpoint_table(regs->nip, &offset);
if (bp != NULL && (offset == 4 || offset == 8)) {
- regs->nip = bp->address + offset;
+ regs_set_return_ip(regs, bp->address + offset);
atomic_dec(&bp->ref_count);
return 1;
}
@@ -836,7 +851,7 @@ static int xmon_fault_handler(struct pt_regs *regs)
if ((regs->msr & (MSR_IR|MSR_PR|MSR_64BIT)) == (MSR_IR|MSR_64BIT)) {
bp = in_breakpoint_table(regs->nip, &offset);
if (bp != NULL) {
- regs->nip = bp->address + offset;
+ regs_set_return_ip(regs, bp->address + offset);
atomic_dec(&bp->ref_count);
}
}
@@ -857,7 +872,7 @@ static inline void force_enable_xmon(void)
static struct bpt *at_breakpoint(unsigned long pc)
{
int i;
- struct bpt *bp;
+ struct bpt *volatile bp;
bp = bpts;
for (i = 0; i < NBPTS; ++i, ++bp)
@@ -946,11 +961,11 @@ static void insert_bpts(void)
}
patch_instruction(bp->instr, instr);
- patch_instruction(ppc_inst_next(bp->instr, &instr),
+ patch_instruction(ppc_inst_next(bp->instr, bp->instr),
ppc_inst(bpinstr));
if (bp->enabled & BP_CIABR)
continue;
- if (patch_instruction((struct ppc_inst *)bp->address,
+ if (patch_instruction((u32 *)bp->address,
ppc_inst(bpinstr)) != 0) {
printf("Couldn't write instruction at %lx, "
"disabling breakpoint there\n", bp->address);
@@ -992,7 +1007,7 @@ static void remove_bpts(void)
if (mread_instr(bp->address, &instr)
&& ppc_inst_equal(instr, ppc_inst(bpinstr))
&& patch_instruction(
- (struct ppc_inst *)bp->address, ppc_inst_read(bp->instr)) != 0)
+ (u32 *)bp->address, ppc_inst_read(bp->instr)) != 0)
printf("Couldn't remove breakpoint at %lx\n",
bp->address);
}
@@ -1188,7 +1203,7 @@ cmds(struct pt_regs *excp)
#ifdef CONFIG_BOOKE
static int do_step(struct pt_regs *regs)
{
- regs->msr |= MSR_DE;
+ regs_set_return_msr(regs, regs->msr | MSR_DE);
mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) | DBCR0_IC | DBCR0_IDM);
return 1;
}
@@ -1221,7 +1236,7 @@ static int do_step(struct pt_regs *regs)
}
}
}
- regs->msr |= MSR_SE;
+ regs_set_return_msr(regs, regs->msr | MSR_SE);
return 1;
}
#endif
@@ -1243,11 +1258,112 @@ static void bootcmds(void)
}
}
+#ifdef CONFIG_SMP
+static int xmon_switch_cpu(unsigned long cpu)
+{
+ int timeout;
+
+ xmon_taken = 0;
+ mb();
+ xmon_owner = cpu;
+ timeout = 10000000;
+ while (!xmon_taken) {
+ if (--timeout == 0) {
+ if (test_and_set_bit(0, &xmon_taken))
+ break;
+ /* take control back */
+ mb();
+ xmon_owner = smp_processor_id();
+ printf("cpu 0x%lx didn't take control\n", cpu);
+ return 0;
+ }
+ barrier();
+ }
+ return 1;
+}
+
+static int xmon_batch_next_cpu(void)
+{
+ unsigned long cpu;
+
+ while (!cpumask_empty(&xmon_batch_cpus)) {
+ cpu = cpumask_next_wrap(smp_processor_id(), &xmon_batch_cpus,
+ xmon_batch_start_cpu, true);
+ if (cpu == nr_cpumask_bits)
+ break;
+ if (xmon_batch_start_cpu == -1)
+ xmon_batch_start_cpu = cpu;
+ if (xmon_switch_cpu(cpu))
+ return 0;
+ cpumask_clear_cpu(cpu, &xmon_batch_cpus);
+ }
+
+ xmon_batch = 0;
+ printf("%x:mon> \n", smp_processor_id());
+ return 1;
+}
+
+static int batch_cmds(struct pt_regs *excp)
+{
+ int cmd;
+
+ /* simulate command entry */
+ cmd = xmon_batch;
+ termch = '\n';
+
+ last_cmd = NULL;
+ xmon_regs = excp;
+
+ printf("%x:", smp_processor_id());
+ printf("mon> ");
+ printf("%c\n", (char)cmd);
+
+ switch (cmd) {
+ case 'r':
+ prregs(excp); /* print regs */
+ break;
+ case 'S':
+ super_regs();
+ break;
+ case 't':
+ backtrace(excp);
+ break;
+ }
+
+ cpumask_clear_cpu(smp_processor_id(), &xmon_batch_cpus);
+
+ return xmon_batch_next_cpu();
+}
+
static int cpu_cmd(void)
{
-#ifdef CONFIG_SMP
unsigned long cpu, first_cpu, last_cpu;
- int timeout;
+
+ cpu = skipbl();
+ if (cpu == '#') {
+ xmon_batch = skipbl();
+ if (xmon_batch) {
+ switch (xmon_batch) {
+ case 'r':
+ case 'S':
+ case 't':
+ cpumask_copy(&xmon_batch_cpus, &cpus_in_xmon);
+ if (cpumask_weight(&xmon_batch_cpus) <= 1) {
+ printf("There are no other cpus in xmon\n");
+ break;
+ }
+ xmon_batch_start_cpu = -1;
+ if (!xmon_batch_next_cpu())
+ return 1;
+ break;
+ default:
+ printf("c# only supports 'r', 'S' and 't' commands\n");
+ }
+ xmon_batch = 0;
+ return 0;
+ }
+ }
+ termch = cpu;
if (!scanhex(&cpu)) {
/* print cpus waiting or in xmon */
@@ -1279,27 +1395,15 @@ static int cpu_cmd(void)
#endif
return 0;
}
- xmon_taken = 0;
- mb();
- xmon_owner = cpu;
- timeout = 10000000;
- while (!xmon_taken) {
- if (--timeout == 0) {
- if (test_and_set_bit(0, &xmon_taken))
- break;
- /* take control back */
- mb();
- xmon_owner = smp_processor_id();
- printf("cpu 0x%lx didn't take control\n", cpu);
- return 0;
- }
- barrier();
- }
- return 1;
+
+ return xmon_switch_cpu(cpu);
+}
#else
+static int cpu_cmd(void)
+{
return 0;
-#endif /* CONFIG_SMP */
}
+#endif /* CONFIG_SMP */
static unsigned short fcstab[256] = {
0x0000, 0x1189, 0x2312, 0x329b, 0x4624, 0x57ad, 0x6536, 0x74bf,
@@ -2214,7 +2318,7 @@ mread_instr(unsigned long adrs, struct ppc_inst *instr)
if (setjmp(bus_error_jmp) == 0) {
catch_memory_errors = 1;
sync();
- *instr = ppc_inst_read((struct ppc_inst *)adrs);
+ *instr = ppc_inst_read((u32 *)adrs);
sync();
/* wait a little while to see if we get a machine check */
__delay(200);
@@ -2975,7 +3079,7 @@ static void
dump_log_buf(void)
{
struct kmsg_dump_iter iter;
- unsigned char buf[128];
+ static unsigned char buf[1024];
size_t len;
if (setjmp(bus_error_jmp) != 0) {
@@ -3005,7 +3109,7 @@ static void dump_opal_msglog(void)
{
unsigned char buf[128];
ssize_t res;
- loff_t pos = 0;
+ volatile loff_t pos = 0;
if (!firmware_has_feature(FW_FEATURE_OPAL)) {
printf("Machine is not running OPAL firmware.\n");
@@ -3160,8 +3264,9 @@ memzcan(void)
printf("%.8lx\n", a - mskip);
}
-static void show_task(struct task_struct *tsk)
+static void show_task(struct task_struct *volatile tsk)
{
+ unsigned int p_state = READ_ONCE(tsk->__state);
char state;
/*
@@ -3169,14 +3274,14 @@ static void show_task(struct task_struct *tsk)
* appropriate for calling from xmon. This could be moved
* to a common, generic, routine used by both.
*/
- state = (tsk->state == 0) ? 'R' :
- (tsk->state < 0) ? 'U' :
- (tsk->state & TASK_UNINTERRUPTIBLE) ? 'D' :
- (tsk->state & TASK_STOPPED) ? 'T' :
- (tsk->state & TASK_TRACED) ? 'C' :
+ state = (p_state == 0) ? 'R' :
+ (p_state < 0) ? 'U' :
+ (p_state & TASK_UNINTERRUPTIBLE) ? 'D' :
+ (p_state & TASK_STOPPED) ? 'T' :
+ (p_state & TASK_TRACED) ? 'C' :
(tsk->exit_state & EXIT_ZOMBIE) ? 'Z' :
(tsk->exit_state & EXIT_DEAD) ? 'E' :
- (tsk->state & TASK_INTERRUPTIBLE) ? 'S' : '?';
+ (p_state & TASK_INTERRUPTIBLE) ? 'S' : '?';
printf("%16px %16lx %16px %6d %6d %c %2d %s\n", tsk,
tsk->thread.ksp, tsk->thread.regs,
@@ -3204,7 +3309,7 @@ static void format_pte(void *ptep, unsigned long pte)
static void show_pte(unsigned long addr)
{
unsigned long tskv = 0;
- struct task_struct *tsk = NULL;
+ struct task_struct *volatile tsk = NULL;
struct mm_struct *mm;
pgd_t *pgdp;
p4d_t *p4dp;
@@ -3299,7 +3404,7 @@ static void show_pte(unsigned long addr)
static void show_tasks(void)
{
unsigned long tskv;
- struct task_struct *tsk = NULL;
+ struct task_struct *volatile tsk = NULL;
printf(" task_struct ->thread.ksp ->thread.regs PID PPID S P CMD\n");
@@ -3622,7 +3727,7 @@ static void xmon_print_symbol(unsigned long address, const char *mid,
const char *after)
{
char *modname;
- const char *name = NULL;
+ const char *volatile name = NULL;
unsigned long offset, size;
printf(REG, address);
@@ -4054,7 +4159,7 @@ void xmon_register_spus(struct list_head *list)
static void stop_spus(void)
{
struct spu *spu;
- int i;
+ volatile int i;
u64 tmp;
for (i = 0; i < XMON_NUM_SPUS; i++) {
@@ -4095,7 +4200,7 @@ static void stop_spus(void)
static void restart_spus(void)
{
struct spu *spu;
- int i;
+ volatile int i;
for (i = 0; i < XMON_NUM_SPUS; i++) {
if (!spu_info[i].spu)
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
index a8ad8eb76120..469a70bd8da6 100644
--- a/arch/riscv/Kconfig
+++ b/arch/riscv/Kconfig
@@ -34,6 +34,7 @@ config RISCV
select ARCH_OPTIONAL_KERNEL_RWX if ARCH_HAS_STRICT_KERNEL_RWX
select ARCH_OPTIONAL_KERNEL_RWX_DEFAULT
select ARCH_SUPPORTS_HUGETLBFS if MMU
+ select ARCH_USE_MEMTEST
select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU
select ARCH_WANT_FRAME_POINTERS
select ARCH_WANT_HUGE_PMD_SHARE if 64BIT
@@ -60,11 +61,11 @@ config RISCV
select GENERIC_TIME_VSYSCALL if MMU && 64BIT
select HANDLE_DOMAIN_IRQ
select HAVE_ARCH_AUDITSYSCALL
- select HAVE_ARCH_JUMP_LABEL
- select HAVE_ARCH_JUMP_LABEL_RELATIVE
+ select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL
+ select HAVE_ARCH_JUMP_LABEL_RELATIVE if !XIP_KERNEL
select HAVE_ARCH_KASAN if MMU && 64BIT
select HAVE_ARCH_KASAN_VMALLOC if MMU && 64BIT
- select HAVE_ARCH_KGDB
+ select HAVE_ARCH_KGDB if !XIP_KERNEL
select HAVE_ARCH_KGDB_QXFER_PKT
select HAVE_ARCH_MMAP_RND_BITS if MMU
select HAVE_ARCH_SECCOMP_FILTER
@@ -79,9 +80,9 @@ config RISCV
select HAVE_GCC_PLUGINS
select HAVE_GENERIC_VDSO if MMU && 64BIT
select HAVE_IRQ_TIME_ACCOUNTING
- select HAVE_KPROBES
- select HAVE_KPROBES_ON_FTRACE
- select HAVE_KRETPROBES
+ select HAVE_KPROBES if !XIP_KERNEL
+ select HAVE_KPROBES_ON_FTRACE if !XIP_KERNEL
+ select HAVE_KRETPROBES if !XIP_KERNEL
select HAVE_PCI
select HAVE_PERF_EVENTS
select HAVE_PERF_REGS
@@ -103,6 +104,7 @@ config RISCV
select SYSCTL_EXCEPTION_TRACE
select THREAD_INFO_IN_TASK
select UACCESS_MEMCPY if !MMU
+ select ZONE_DMA32 if 64BIT
config ARCH_MMAP_RND_BITS_MIN
default 18 if 64BIT
@@ -132,10 +134,6 @@ config MMU
Select if you want MMU-based virtualised addressing space
support by paged memory management. If unsure, say 'Y'.
-config ZONE_DMA32
- bool
- default y if 64BIT
-
config VA_BITS
int
default 32 if 32BIT
@@ -230,11 +228,11 @@ config ARCH_RV64I
bool "RV64I"
select 64BIT
select ARCH_SUPPORTS_INT128 if CC_HAS_INT128 && GCC_VERSION >= 50000
- select HAVE_DYNAMIC_FTRACE if MMU && $(cc-option,-fpatchable-function-entry=8)
+ select HAVE_DYNAMIC_FTRACE if !XIP_KERNEL && MMU && $(cc-option,-fpatchable-function-entry=8)
select HAVE_DYNAMIC_FTRACE_WITH_REGS if HAVE_DYNAMIC_FTRACE
- select HAVE_FTRACE_MCOUNT_RECORD
+ select HAVE_FTRACE_MCOUNT_RECORD if !XIP_KERNEL
select HAVE_FUNCTION_GRAPH_TRACER
- select HAVE_FUNCTION_TRACER
+ select HAVE_FUNCTION_TRACER if !XIP_KERNEL
select SWIOTLB if MMU
endchoice
@@ -331,7 +329,7 @@ config NODES_SHIFT
int "Maximum NUMA Nodes (as a power of 2)"
range 1 10
default "2"
- depends on NEED_MULTIPLE_NODES
+ depends on NUMA
help
Specify the maximum number of NUMA Nodes available on the target
system. Increases memory reserved to accommodate various tables.
diff --git a/arch/riscv/Kconfig.socs b/arch/riscv/Kconfig.socs
index ed963761fbd2..30676ebb16eb 100644
--- a/arch/riscv/Kconfig.socs
+++ b/arch/riscv/Kconfig.socs
@@ -14,6 +14,7 @@ config SOC_SIFIVE
select CLK_SIFIVE
select CLK_SIFIVE_PRCI
select SIFIVE_PLIC
+ select RISCV_ERRATA_ALTERNATIVE
select ERRATA_SIFIVE
help
This enables support for SiFive SoC platform hardware.
diff --git a/arch/riscv/Makefile b/arch/riscv/Makefile
index 3eb9590a0775..99ecd8bcfd77 100644
--- a/arch/riscv/Makefile
+++ b/arch/riscv/Makefile
@@ -16,7 +16,7 @@ ifeq ($(CONFIG_DYNAMIC_FTRACE),y)
CC_FLAGS_FTRACE := -fpatchable-function-entry=8
endif
-ifeq ($(CONFIG_64BIT)$(CONFIG_CMODEL_MEDLOW),yy)
+ifeq ($(CONFIG_CMODEL_MEDLOW),y)
KBUILD_CFLAGS_MODULE += -mcmodel=medany
endif
@@ -38,6 +38,15 @@ else
KBUILD_LDFLAGS += -melf32lriscv
endif
+ifeq ($(CONFIG_LD_IS_LLD),y)
+ KBUILD_CFLAGS += -mno-relax
+ KBUILD_AFLAGS += -mno-relax
+ifneq ($(LLVM_IAS),1)
+ KBUILD_CFLAGS += -Wa,-mno-relax
+ KBUILD_AFLAGS += -Wa,-mno-relax
+endif
+endif
+
# ISA string setting
riscv-march-$(CONFIG_ARCH_RV32I) := rv32ima
riscv-march-$(CONFIG_ARCH_RV64I) := rv64ima
diff --git a/arch/riscv/boot/dts/microchip/Makefile b/arch/riscv/boot/dts/microchip/Makefile
index 622b12771fd3..855c1502d912 100644
--- a/arch/riscv/boot/dts/microchip/Makefile
+++ b/arch/riscv/boot/dts/microchip/Makefile
@@ -1,2 +1,3 @@
# SPDX-License-Identifier: GPL-2.0
dtb-$(CONFIG_SOC_MICROCHIP_POLARFIRE) += microchip-mpfs-icicle-kit.dtb
+obj-$(CONFIG_BUILTIN_DTB) += $(addsuffix .o, $(dtb-y))
diff --git a/arch/riscv/boot/dts/sifive/Makefile b/arch/riscv/boot/dts/sifive/Makefile
index 74c47fe9fc22..d90e4eb0ade8 100644
--- a/arch/riscv/boot/dts/sifive/Makefile
+++ b/arch/riscv/boot/dts/sifive/Makefile
@@ -1,3 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
dtb-$(CONFIG_SOC_SIFIVE) += hifive-unleashed-a00.dtb \
hifive-unmatched-a00.dtb
+obj-$(CONFIG_BUILTIN_DTB) += $(addsuffix .o, $(dtb-y))
diff --git a/arch/riscv/boot/dts/sifive/fu740-c000.dtsi b/arch/riscv/boot/dts/sifive/fu740-c000.dtsi
index 8eef82e4199f..abbb960f90a0 100644
--- a/arch/riscv/boot/dts/sifive/fu740-c000.dtsi
+++ b/arch/riscv/boot/dts/sifive/fu740-c000.dtsi
@@ -273,7 +273,7 @@
cache-size = <2097152>;
cache-unified;
interrupt-parent = <&plic0>;
- interrupts = <19 20 21 22>;
+ interrupts = <19 21 22 20>;
reg = <0x0 0x2010000 0x0 0x1000>;
};
gpio: gpio@10060000 {
diff --git a/arch/riscv/errata/sifive/Makefile b/arch/riscv/errata/sifive/Makefile
index bdd5fc843b8e..2fde48db0619 100644
--- a/arch/riscv/errata/sifive/Makefile
+++ b/arch/riscv/errata/sifive/Makefile
@@ -1,2 +1,2 @@
-obj-y += errata_cip_453.o
+obj-$(CONFIG_ERRATA_SIFIVE_CIP_453) += errata_cip_453.o
obj-y += errata.o
diff --git a/arch/riscv/include/asm/alternative-macros.h b/arch/riscv/include/asm/alternative-macros.h
index 88c08705f64a..67406c376389 100644
--- a/arch/riscv/include/asm/alternative-macros.h
+++ b/arch/riscv/include/asm/alternative-macros.h
@@ -51,7 +51,7 @@
REG_ASM " " newlen "\n" \
".word " errata_id "\n"
-#define ALT_NEW_CONSTENT(vendor_id, errata_id, enable, new_c) \
+#define ALT_NEW_CONTENT(vendor_id, errata_id, enable, new_c) \
".if " __stringify(enable) " == 1\n" \
".pushsection .alternative, \"a\"\n" \
ALT_ENTRY("886b", "888f", __stringify(vendor_id), __stringify(errata_id), "889f - 888f") \
@@ -69,7 +69,7 @@
"886 :\n" \
old_c "\n" \
"887 :\n" \
- ALT_NEW_CONSTENT(vendor_id, errata_id, enable, new_c)
+ ALT_NEW_CONTENT(vendor_id, errata_id, enable, new_c)
#define _ALTERNATIVE_CFG(old_c, new_c, vendor_id, errata_id, CONFIG_k) \
__ALTERNATIVE_CFG(old_c, new_c, vendor_id, errata_id, IS_ENABLED(CONFIG_k))
diff --git a/arch/riscv/include/asm/atomic.h b/arch/riscv/include/asm/atomic.h
index 400a8c8b6de7..ac9bdf4fc404 100644
--- a/arch/riscv/include/asm/atomic.h
+++ b/arch/riscv/include/asm/atomic.h
@@ -25,22 +25,22 @@
#define __atomic_release_fence() \
__asm__ __volatile__(RISCV_RELEASE_BARRIER "" ::: "memory");
-static __always_inline int atomic_read(const atomic_t *v)
+static __always_inline int arch_atomic_read(const atomic_t *v)
{
return READ_ONCE(v->counter);
}
-static __always_inline void atomic_set(atomic_t *v, int i)
+static __always_inline void arch_atomic_set(atomic_t *v, int i)
{
WRITE_ONCE(v->counter, i);
}
#ifndef CONFIG_GENERIC_ATOMIC64
#define ATOMIC64_INIT(i) { (i) }
-static __always_inline s64 atomic64_read(const atomic64_t *v)
+static __always_inline s64 arch_atomic64_read(const atomic64_t *v)
{
return READ_ONCE(v->counter);
}
-static __always_inline void atomic64_set(atomic64_t *v, s64 i)
+static __always_inline void arch_atomic64_set(atomic64_t *v, s64 i)
{
WRITE_ONCE(v->counter, i);
}
@@ -53,7 +53,7 @@ static __always_inline void atomic64_set(atomic64_t *v, s64 i)
*/
#define ATOMIC_OP(op, asm_op, I, asm_type, c_type, prefix) \
static __always_inline \
-void atomic##prefix##_##op(c_type i, atomic##prefix##_t *v) \
+void arch_atomic##prefix##_##op(c_type i, atomic##prefix##_t *v) \
{ \
__asm__ __volatile__ ( \
" amo" #asm_op "." #asm_type " zero, %1, %0" \
@@ -87,7 +87,7 @@ ATOMIC_OPS(xor, xor, i)
*/
#define ATOMIC_FETCH_OP(op, asm_op, I, asm_type, c_type, prefix) \
static __always_inline \
-c_type atomic##prefix##_fetch_##op##_relaxed(c_type i, \
+c_type arch_atomic##prefix##_fetch_##op##_relaxed(c_type i, \
atomic##prefix##_t *v) \
{ \
register c_type ret; \
@@ -99,7 +99,7 @@ c_type atomic##prefix##_fetch_##op##_relaxed(c_type i, \
return ret; \
} \
static __always_inline \
-c_type atomic##prefix##_fetch_##op(c_type i, atomic##prefix##_t *v) \
+c_type arch_atomic##prefix##_fetch_##op(c_type i, atomic##prefix##_t *v) \
{ \
register c_type ret; \
__asm__ __volatile__ ( \
@@ -112,15 +112,15 @@ c_type atomic##prefix##_fetch_##op(c_type i, atomic##prefix##_t *v) \
#define ATOMIC_OP_RETURN(op, asm_op, c_op, I, asm_type, c_type, prefix) \
static __always_inline \
-c_type atomic##prefix##_##op##_return_relaxed(c_type i, \
+c_type arch_atomic##prefix##_##op##_return_relaxed(c_type i, \
atomic##prefix##_t *v) \
{ \
- return atomic##prefix##_fetch_##op##_relaxed(i, v) c_op I; \
+ return arch_atomic##prefix##_fetch_##op##_relaxed(i, v) c_op I; \
} \
static __always_inline \
-c_type atomic##prefix##_##op##_return(c_type i, atomic##prefix##_t *v) \
+c_type arch_atomic##prefix##_##op##_return(c_type i, atomic##prefix##_t *v) \
{ \
- return atomic##prefix##_fetch_##op(i, v) c_op I; \
+ return arch_atomic##prefix##_fetch_##op(i, v) c_op I; \
}
#ifdef CONFIG_GENERIC_ATOMIC64
@@ -138,26 +138,26 @@ c_type atomic##prefix##_##op##_return(c_type i, atomic##prefix##_t *v) \
ATOMIC_OPS(add, add, +, i)
ATOMIC_OPS(sub, add, +, -i)
-#define atomic_add_return_relaxed atomic_add_return_relaxed
-#define atomic_sub_return_relaxed atomic_sub_return_relaxed
-#define atomic_add_return atomic_add_return
-#define atomic_sub_return atomic_sub_return
+#define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed
+#define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed
+#define arch_atomic_add_return arch_atomic_add_return
+#define arch_atomic_sub_return arch_atomic_sub_return
-#define atomic_fetch_add_relaxed atomic_fetch_add_relaxed
-#define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed
-#define atomic_fetch_add atomic_fetch_add
-#define atomic_fetch_sub atomic_fetch_sub
+#define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed
+#define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed
+#define arch_atomic_fetch_add arch_atomic_fetch_add
+#define arch_atomic_fetch_sub arch_atomic_fetch_sub
#ifndef CONFIG_GENERIC_ATOMIC64
-#define atomic64_add_return_relaxed atomic64_add_return_relaxed
-#define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
-#define atomic64_add_return atomic64_add_return
-#define atomic64_sub_return atomic64_sub_return
-
-#define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
-#define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
-#define atomic64_fetch_add atomic64_fetch_add
-#define atomic64_fetch_sub atomic64_fetch_sub
+#define arch_atomic64_add_return_relaxed arch_atomic64_add_return_relaxed
+#define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return_relaxed
+#define arch_atomic64_add_return arch_atomic64_add_return
+#define arch_atomic64_sub_return arch_atomic64_sub_return
+
+#define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add_relaxed
+#define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub_relaxed
+#define arch_atomic64_fetch_add arch_atomic64_fetch_add
+#define arch_atomic64_fetch_sub arch_atomic64_fetch_sub
#endif
#undef ATOMIC_OPS
@@ -175,20 +175,20 @@ ATOMIC_OPS(and, and, i)
ATOMIC_OPS( or, or, i)
ATOMIC_OPS(xor, xor, i)
-#define atomic_fetch_and_relaxed atomic_fetch_and_relaxed
-#define atomic_fetch_or_relaxed atomic_fetch_or_relaxed
-#define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed
-#define atomic_fetch_and atomic_fetch_and
-#define atomic_fetch_or atomic_fetch_or
-#define atomic_fetch_xor atomic_fetch_xor
+#define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed
+#define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed
+#define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed
+#define arch_atomic_fetch_and arch_atomic_fetch_and
+#define arch_atomic_fetch_or arch_atomic_fetch_or
+#define arch_atomic_fetch_xor arch_atomic_fetch_xor
#ifndef CONFIG_GENERIC_ATOMIC64
-#define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
-#define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed
-#define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed
-#define atomic64_fetch_and atomic64_fetch_and
-#define atomic64_fetch_or atomic64_fetch_or
-#define atomic64_fetch_xor atomic64_fetch_xor
+#define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and_relaxed
+#define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or_relaxed
+#define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor_relaxed
+#define arch_atomic64_fetch_and arch_atomic64_fetch_and
+#define arch_atomic64_fetch_or arch_atomic64_fetch_or
+#define arch_atomic64_fetch_xor arch_atomic64_fetch_xor
#endif
#undef ATOMIC_OPS
@@ -197,7 +197,7 @@ ATOMIC_OPS(xor, xor, i)
#undef ATOMIC_OP_RETURN
/* This is required to provide a full barrier on success. */
-static __always_inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
+static __always_inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
{
int prev, rc;
@@ -214,10 +214,10 @@ static __always_inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
: "memory");
return prev;
}
-#define atomic_fetch_add_unless atomic_fetch_add_unless
+#define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
#ifndef CONFIG_GENERIC_ATOMIC64
-static __always_inline s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
+static __always_inline s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
{
s64 prev;
long rc;
@@ -235,7 +235,7 @@ static __always_inline s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u
: "memory");
return prev;
}
-#define atomic64_fetch_add_unless atomic64_fetch_add_unless
+#define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless
#endif
/*
@@ -244,45 +244,45 @@ static __always_inline s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u
*/
#define ATOMIC_OP(c_t, prefix, size) \
static __always_inline \
-c_t atomic##prefix##_xchg_relaxed(atomic##prefix##_t *v, c_t n) \
+c_t arch_atomic##prefix##_xchg_relaxed(atomic##prefix##_t *v, c_t n) \
{ \
return __xchg_relaxed(&(v->counter), n, size); \
} \
static __always_inline \
-c_t atomic##prefix##_xchg_acquire(atomic##prefix##_t *v, c_t n) \
+c_t arch_atomic##prefix##_xchg_acquire(atomic##prefix##_t *v, c_t n) \
{ \
return __xchg_acquire(&(v->counter), n, size); \
} \
static __always_inline \
-c_t atomic##prefix##_xchg_release(atomic##prefix##_t *v, c_t n) \
+c_t arch_atomic##prefix##_xchg_release(atomic##prefix##_t *v, c_t n) \
{ \
return __xchg_release(&(v->counter), n, size); \
} \
static __always_inline \
-c_t atomic##prefix##_xchg(atomic##prefix##_t *v, c_t n) \
+c_t arch_atomic##prefix##_xchg(atomic##prefix##_t *v, c_t n) \
{ \
return __xchg(&(v->counter), n, size); \
} \
static __always_inline \
-c_t atomic##prefix##_cmpxchg_relaxed(atomic##prefix##_t *v, \
+c_t arch_atomic##prefix##_cmpxchg_relaxed(atomic##prefix##_t *v, \
c_t o, c_t n) \
{ \
return __cmpxchg_relaxed(&(v->counter), o, n, size); \
} \
static __always_inline \
-c_t atomic##prefix##_cmpxchg_acquire(atomic##prefix##_t *v, \
+c_t arch_atomic##prefix##_cmpxchg_acquire(atomic##prefix##_t *v, \
c_t o, c_t n) \
{ \
return __cmpxchg_acquire(&(v->counter), o, n, size); \
} \
static __always_inline \
-c_t atomic##prefix##_cmpxchg_release(atomic##prefix##_t *v, \
+c_t arch_atomic##prefix##_cmpxchg_release(atomic##prefix##_t *v, \
c_t o, c_t n) \
{ \
return __cmpxchg_release(&(v->counter), o, n, size); \
} \
static __always_inline \
-c_t atomic##prefix##_cmpxchg(atomic##prefix##_t *v, c_t o, c_t n) \
+c_t arch_atomic##prefix##_cmpxchg(atomic##prefix##_t *v, c_t o, c_t n) \
{ \
return __cmpxchg(&(v->counter), o, n, size); \
}
@@ -298,19 +298,19 @@ c_t atomic##prefix##_cmpxchg(atomic##prefix##_t *v, c_t o, c_t n) \
ATOMIC_OPS()
-#define atomic_xchg_relaxed atomic_xchg_relaxed
-#define atomic_xchg_acquire atomic_xchg_acquire
-#define atomic_xchg_release atomic_xchg_release
-#define atomic_xchg atomic_xchg
-#define atomic_cmpxchg_relaxed atomic_cmpxchg_relaxed
-#define atomic_cmpxchg_acquire atomic_cmpxchg_acquire
-#define atomic_cmpxchg_release atomic_cmpxchg_release
-#define atomic_cmpxchg atomic_cmpxchg
+#define arch_atomic_xchg_relaxed arch_atomic_xchg_relaxed
+#define arch_atomic_xchg_acquire arch_atomic_xchg_acquire
+#define arch_atomic_xchg_release arch_atomic_xchg_release
+#define arch_atomic_xchg arch_atomic_xchg
+#define arch_atomic_cmpxchg_relaxed arch_atomic_cmpxchg_relaxed
+#define arch_atomic_cmpxchg_acquire arch_atomic_cmpxchg_acquire
+#define arch_atomic_cmpxchg_release arch_atomic_cmpxchg_release
+#define arch_atomic_cmpxchg arch_atomic_cmpxchg
#undef ATOMIC_OPS
#undef ATOMIC_OP
-static __always_inline int atomic_sub_if_positive(atomic_t *v, int offset)
+static __always_inline int arch_atomic_sub_if_positive(atomic_t *v, int offset)
{
int prev, rc;
@@ -328,10 +328,10 @@ static __always_inline int atomic_sub_if_positive(atomic_t *v, int offset)
return prev - offset;
}
-#define atomic_dec_if_positive(v) atomic_sub_if_positive(v, 1)
+#define arch_atomic_dec_if_positive(v) arch_atomic_sub_if_positive(v, 1)
#ifndef CONFIG_GENERIC_ATOMIC64
-static __always_inline s64 atomic64_sub_if_positive(atomic64_t *v, s64 offset)
+static __always_inline s64 arch_atomic64_sub_if_positive(atomic64_t *v, s64 offset)
{
s64 prev;
long rc;
@@ -350,7 +350,7 @@ static __always_inline s64 atomic64_sub_if_positive(atomic64_t *v, s64 offset)
return prev - offset;
}
-#define atomic64_dec_if_positive(v) atomic64_sub_if_positive(v, 1)
+#define arch_atomic64_dec_if_positive(v) arch_atomic64_sub_if_positive(v, 1)
#endif
#endif /* _ASM_RISCV_ATOMIC_H */
diff --git a/arch/riscv/include/asm/cmpxchg.h b/arch/riscv/include/asm/cmpxchg.h
index 262e5bbb2776..36dc962f6343 100644
--- a/arch/riscv/include/asm/cmpxchg.h
+++ b/arch/riscv/include/asm/cmpxchg.h
@@ -37,7 +37,7 @@
__ret; \
})
-#define xchg_relaxed(ptr, x) \
+#define arch_xchg_relaxed(ptr, x) \
({ \
__typeof__(*(ptr)) _x_ = (x); \
(__typeof__(*(ptr))) __xchg_relaxed((ptr), \
@@ -72,7 +72,7 @@
__ret; \
})
-#define xchg_acquire(ptr, x) \
+#define arch_xchg_acquire(ptr, x) \
({ \
__typeof__(*(ptr)) _x_ = (x); \
(__typeof__(*(ptr))) __xchg_acquire((ptr), \
@@ -107,7 +107,7 @@
__ret; \
})
-#define xchg_release(ptr, x) \
+#define arch_xchg_release(ptr, x) \
({ \
__typeof__(*(ptr)) _x_ = (x); \
(__typeof__(*(ptr))) __xchg_release((ptr), \
@@ -140,7 +140,7 @@
__ret; \
})
-#define xchg(ptr, x) \
+#define arch_xchg(ptr, x) \
({ \
__typeof__(*(ptr)) _x_ = (x); \
(__typeof__(*(ptr))) __xchg((ptr), _x_, sizeof(*(ptr))); \
@@ -149,13 +149,13 @@
#define xchg32(ptr, x) \
({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 4); \
- xchg((ptr), (x)); \
+ arch_xchg((ptr), (x)); \
})
#define xchg64(ptr, x) \
({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
- xchg((ptr), (x)); \
+ arch_xchg((ptr), (x)); \
})
/*
@@ -199,7 +199,7 @@
__ret; \
})
-#define cmpxchg_relaxed(ptr, o, n) \
+#define arch_cmpxchg_relaxed(ptr, o, n) \
({ \
__typeof__(*(ptr)) _o_ = (o); \
__typeof__(*(ptr)) _n_ = (n); \
@@ -245,7 +245,7 @@
__ret; \
})
-#define cmpxchg_acquire(ptr, o, n) \
+#define arch_cmpxchg_acquire(ptr, o, n) \
({ \
__typeof__(*(ptr)) _o_ = (o); \
__typeof__(*(ptr)) _n_ = (n); \
@@ -291,7 +291,7 @@
__ret; \
})
-#define cmpxchg_release(ptr, o, n) \
+#define arch_cmpxchg_release(ptr, o, n) \
({ \
__typeof__(*(ptr)) _o_ = (o); \
__typeof__(*(ptr)) _n_ = (n); \
@@ -337,7 +337,7 @@
__ret; \
})
-#define cmpxchg(ptr, o, n) \
+#define arch_cmpxchg(ptr, o, n) \
({ \
__typeof__(*(ptr)) _o_ = (o); \
__typeof__(*(ptr)) _n_ = (n); \
@@ -345,31 +345,31 @@
_o_, _n_, sizeof(*(ptr))); \
})
-#define cmpxchg_local(ptr, o, n) \
+#define arch_cmpxchg_local(ptr, o, n) \
(__cmpxchg_relaxed((ptr), (o), (n), sizeof(*(ptr))))
#define cmpxchg32(ptr, o, n) \
({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 4); \
- cmpxchg((ptr), (o), (n)); \
+ arch_cmpxchg((ptr), (o), (n)); \
})
#define cmpxchg32_local(ptr, o, n) \
({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 4); \
- cmpxchg_relaxed((ptr), (o), (n)) \
+ arch_cmpxchg_relaxed((ptr), (o), (n)) \
})
-#define cmpxchg64(ptr, o, n) \
+#define arch_cmpxchg64(ptr, o, n) \
({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
- cmpxchg((ptr), (o), (n)); \
+ arch_cmpxchg((ptr), (o), (n)); \
})
-#define cmpxchg64_local(ptr, o, n) \
+#define arch_cmpxchg64_local(ptr, o, n) \
({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
- cmpxchg_relaxed((ptr), (o), (n)); \
+ arch_cmpxchg_relaxed((ptr), (o), (n)); \
})
#endif /* _ASM_RISCV_CMPXCHG_H */
diff --git a/arch/riscv/include/asm/kexec.h b/arch/riscv/include/asm/kexec.h
index 1e954101906a..e4e291d40759 100644
--- a/arch/riscv/include/asm/kexec.h
+++ b/arch/riscv/include/asm/kexec.h
@@ -42,8 +42,8 @@ struct kimage_arch {
unsigned long fdt_addr;
};
-const extern unsigned char riscv_kexec_relocate[];
-const extern unsigned int riscv_kexec_relocate_size;
+extern const unsigned char riscv_kexec_relocate[];
+extern const unsigned int riscv_kexec_relocate_size;
typedef void (*riscv_kexec_method)(unsigned long first_ind_entry,
unsigned long jump_addr,
diff --git a/arch/riscv/include/asm/pgalloc.h b/arch/riscv/include/asm/pgalloc.h
index 23b1544e0ca5..0af6933a7100 100644
--- a/arch/riscv/include/asm/pgalloc.h
+++ b/arch/riscv/include/asm/pgalloc.h
@@ -38,8 +38,6 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
}
#endif /* __PAGETABLE_PMD_FOLDED */
-#define pmd_pgtable(pmd) pmd_page(pmd)
-
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
{
pgd_t *pgd;
diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
index 9469f464e71a..62f3fe7368f3 100644
--- a/arch/riscv/include/asm/pgtable.h
+++ b/arch/riscv/include/asm/pgtable.h
@@ -30,9 +30,8 @@
#define BPF_JIT_REGION_SIZE (SZ_128M)
#ifdef CONFIG_64BIT
-/* KASLR should leave at least 128MB for BPF after the kernel */
-#define BPF_JIT_REGION_START PFN_ALIGN((unsigned long)&_end)
-#define BPF_JIT_REGION_END (BPF_JIT_REGION_START + BPF_JIT_REGION_SIZE)
+#define BPF_JIT_REGION_START (BPF_JIT_REGION_END - BPF_JIT_REGION_SIZE)
+#define BPF_JIT_REGION_END (MODULES_END)
#else
#define BPF_JIT_REGION_START (PAGE_OFFSET - BPF_JIT_REGION_SIZE)
#define BPF_JIT_REGION_END (VMALLOC_END)
@@ -537,8 +536,6 @@ void setup_bootmem(void);
void paging_init(void);
void misc_mem_init(void);
-#define FIRST_USER_ADDRESS 0
-
/*
* ZERO_PAGE is a global shared page that is always zero,
* used for zero-mapped memory areas, etc.
diff --git a/arch/riscv/kernel/machine_kexec.c b/arch/riscv/kernel/machine_kexec.c
index cc048143fba5..9e99e1db156b 100644
--- a/arch/riscv/kernel/machine_kexec.c
+++ b/arch/riscv/kernel/machine_kexec.c
@@ -14,8 +14,9 @@
#include <asm/set_memory.h> /* For set_memory_x() */
#include <linux/compiler.h> /* For unreachable() */
#include <linux/cpu.h> /* For cpu_down() */
+#include <linux/reboot.h>
-/**
+/*
* kexec_image_info - Print received image details
*/
static void
@@ -39,7 +40,7 @@ kexec_image_info(const struct kimage *image)
}
}
-/**
+/*
* machine_kexec_prepare - Initialize kexec
*
* This function is called from do_kexec_load, when the user has
@@ -100,7 +101,7 @@ machine_kexec_prepare(struct kimage *image)
}
-/**
+/*
* machine_kexec_cleanup - Cleanup any leftovers from
* machine_kexec_prepare
*
@@ -135,7 +136,7 @@ void machine_shutdown(void)
#endif
}
-/**
+/*
* machine_crash_shutdown - Prepare to kexec after a kernel crash
*
* This function is called by crash_kexec just before machine_kexec
@@ -151,7 +152,7 @@ machine_crash_shutdown(struct pt_regs *regs)
pr_info("Starting crashdump kernel...\n");
}
-/**
+/*
* machine_kexec - Jump to the loaded kimage
*
* This function is called by kernel_kexec which is called by the
diff --git a/arch/riscv/kernel/probes/kprobes.c b/arch/riscv/kernel/probes/kprobes.c
index 10b965c34536..247e33fa5bc7 100644
--- a/arch/riscv/kernel/probes/kprobes.c
+++ b/arch/riscv/kernel/probes/kprobes.c
@@ -84,6 +84,7 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
return 0;
}
+#ifdef CONFIG_MMU
void *alloc_insn_page(void)
{
return __vmalloc_node_range(PAGE_SIZE, 1, VMALLOC_START, VMALLOC_END,
@@ -91,6 +92,7 @@ void *alloc_insn_page(void)
VM_FLUSH_RESET_PERMS, NUMA_NO_NODE,
__builtin_return_address(0));
}
+#endif
/* install breakpoint in text */
void __kprobes arch_arm_kprobe(struct kprobe *p)
@@ -277,23 +279,6 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned int trapnr)
case KPROBE_HIT_ACTIVE:
case KPROBE_HIT_SSDONE:
/*
- * We increment the nmissed count for accounting,
- * we can also use npre/npostfault count for accounting
- * these specific fault cases.
- */
- kprobes_inc_nmissed_count(cur);
-
- /*
- * We come here because instructions in the pre/post
- * handler caused the page_fault, this could happen
- * if handler tries to access user space by
- * copy_from_user(), get_user() etc. Let the
- * user-specified handler try to fix it first.
- */
- if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
- return 1;
-
- /*
* In case the user-specified fault handler returned
* zero, try to fix up.
*/
diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c
index 03901d3a8b02..9a1b7a0603b2 100644
--- a/arch/riscv/kernel/setup.c
+++ b/arch/riscv/kernel/setup.c
@@ -231,13 +231,13 @@ static void __init init_resources(void)
/* Clean-up any unused pre-allocated resources */
mem_res_sz = (num_resources - res_idx + 1) * sizeof(*mem_res);
- memblock_free((phys_addr_t) mem_res, mem_res_sz);
+ memblock_free(__pa(mem_res), mem_res_sz);
return;
error:
/* Better an empty resource tree than an inconsistent one */
release_child_resources(&iomem_resource);
- memblock_free((phys_addr_t) mem_res, mem_res_sz);
+ memblock_free(__pa(mem_res), mem_res_sz);
}
diff --git a/arch/riscv/kernel/smpboot.c b/arch/riscv/kernel/smpboot.c
index 9a408e2942ac..bd82375db51a 100644
--- a/arch/riscv/kernel/smpboot.c
+++ b/arch/riscv/kernel/smpboot.c
@@ -180,7 +180,6 @@ asmlinkage __visible void smp_callin(void)
* Disable preemption before enabling interrupts, so we don't try to
* schedule a CPU that hasn't actually started yet.
*/
- preempt_disable();
local_irq_enable();
cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
}
diff --git a/arch/riscv/kernel/stacktrace.c b/arch/riscv/kernel/stacktrace.c
index 2b3e0cb90d78..ff467b98c3e3 100644
--- a/arch/riscv/kernel/stacktrace.c
+++ b/arch/riscv/kernel/stacktrace.c
@@ -27,10 +27,10 @@ void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs,
fp = frame_pointer(regs);
sp = user_stack_pointer(regs);
pc = instruction_pointer(regs);
- } else if (task == NULL || task == current) {
- fp = (unsigned long)__builtin_frame_address(0);
- sp = sp_in_global;
- pc = (unsigned long)walk_stackframe;
+ } else if (task == current) {
+ fp = (unsigned long)__builtin_frame_address(1);
+ sp = (unsigned long)__builtin_frame_address(0);
+ pc = (unsigned long)__builtin_return_address(0);
} else {
/* task blocked in __switch_to */
fp = task->thread.s[0];
@@ -106,15 +106,15 @@ static bool print_trace_address(void *arg, unsigned long pc)
return true;
}
-void dump_backtrace(struct pt_regs *regs, struct task_struct *task,
+noinline void dump_backtrace(struct pt_regs *regs, struct task_struct *task,
const char *loglvl)
{
- pr_cont("%sCall Trace:\n", loglvl);
walk_stackframe(task, regs, print_trace_address, (void *)loglvl);
}
void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl)
{
+ pr_cont("%sCall Trace:\n", loglvl);
dump_backtrace(NULL, task, loglvl);
}
@@ -132,14 +132,14 @@ unsigned long get_wchan(struct task_struct *task)
{
unsigned long pc = 0;
- if (likely(task && task != current && task->state != TASK_RUNNING))
+ if (likely(task && task != current && !task_is_running(task)))
walk_stackframe(task, NULL, save_wchan, &pc);
return pc;
}
#ifdef CONFIG_STACKTRACE
-void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
+noinline void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
struct task_struct *task, struct pt_regs *regs)
{
walk_stackframe(task, regs, consume_entry, cookie);
diff --git a/arch/riscv/kernel/traps.c b/arch/riscv/kernel/traps.c
index 0721b9798595..7bc88d8aab97 100644
--- a/arch/riscv/kernel/traps.c
+++ b/arch/riscv/kernel/traps.c
@@ -86,8 +86,13 @@ static void do_trap_error(struct pt_regs *regs, int signo, int code,
}
}
+#if defined (CONFIG_XIP_KERNEL) && defined (CONFIG_RISCV_ERRATA_ALTERNATIVE)
+#define __trap_section __section(".xip.traps")
+#else
+#define __trap_section
+#endif
#define DO_ERROR_INFO(name, signo, code, str) \
-asmlinkage __visible void name(struct pt_regs *regs) \
+asmlinkage __visible __trap_section void name(struct pt_regs *regs) \
{ \
do_trap_error(regs, signo, code, regs->epc, "Oops - " str); \
}
@@ -111,7 +116,7 @@ DO_ERROR_INFO(do_trap_store_misaligned,
int handle_misaligned_load(struct pt_regs *regs);
int handle_misaligned_store(struct pt_regs *regs);
-asmlinkage void do_trap_load_misaligned(struct pt_regs *regs)
+asmlinkage void __trap_section do_trap_load_misaligned(struct pt_regs *regs)
{
if (!handle_misaligned_load(regs))
return;
@@ -119,7 +124,7 @@ asmlinkage void do_trap_load_misaligned(struct pt_regs *regs)
"Oops - load address misaligned");
}
-asmlinkage void do_trap_store_misaligned(struct pt_regs *regs)
+asmlinkage void __trap_section do_trap_store_misaligned(struct pt_regs *regs)
{
if (!handle_misaligned_store(regs))
return;
@@ -146,7 +151,7 @@ static inline unsigned long get_break_insn_length(unsigned long pc)
return GET_INSN_LENGTH(insn);
}
-asmlinkage __visible void do_trap_break(struct pt_regs *regs)
+asmlinkage __visible __trap_section void do_trap_break(struct pt_regs *regs)
{
#ifdef CONFIG_KPROBES
if (kprobe_single_step_handler(regs))
diff --git a/arch/riscv/kernel/vmlinux-xip.lds.S b/arch/riscv/kernel/vmlinux-xip.lds.S
index 4b29b9917f99..a3ff09c4c3f9 100644
--- a/arch/riscv/kernel/vmlinux-xip.lds.S
+++ b/arch/riscv/kernel/vmlinux-xip.lds.S
@@ -99,9 +99,22 @@ SECTIONS
}
PERCPU_SECTION(L1_CACHE_BYTES)
- . = ALIGN(PAGE_SIZE);
+ . = ALIGN(8);
+ .alternative : {
+ __alt_start = .;
+ *(.alternative)
+ __alt_end = .;
+ }
__init_end = .;
+ . = ALIGN(16);
+ .xip.traps : {
+ __xip_traps_start = .;
+ *(.xip.traps)
+ __xip_traps_end = .;
+ }
+
+ . = ALIGN(PAGE_SIZE);
.sdata : {
__global_pointer$ = . + 0x800;
*(.sdata*)
diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
index 4faf8bd157ea..4c4c92ce0bb8 100644
--- a/arch/riscv/mm/init.c
+++ b/arch/riscv/mm/init.c
@@ -746,14 +746,18 @@ void __init protect_kernel_text_data(void)
unsigned long init_data_start = (unsigned long)__init_data_begin;
unsigned long rodata_start = (unsigned long)__start_rodata;
unsigned long data_start = (unsigned long)_data;
- unsigned long max_low = (unsigned long)(__va(PFN_PHYS(max_low_pfn)));
+#if defined(CONFIG_64BIT) && defined(CONFIG_MMU)
+ unsigned long end_va = kernel_virt_addr + load_sz;
+#else
+ unsigned long end_va = (unsigned long)(__va(PFN_PHYS(max_low_pfn)));
+#endif
set_memory_ro(text_start, (init_text_start - text_start) >> PAGE_SHIFT);
set_memory_ro(init_text_start, (init_data_start - init_text_start) >> PAGE_SHIFT);
set_memory_nx(init_data_start, (rodata_start - init_data_start) >> PAGE_SHIFT);
/* rodata section is marked readonly in mark_rodata_ro */
set_memory_nx(rodata_start, (data_start - rodata_start) >> PAGE_SHIFT);
- set_memory_nx(data_start, (max_low - data_start) >> PAGE_SHIFT);
+ set_memory_nx(data_start, (end_va - data_start) >> PAGE_SHIFT);
}
void mark_rodata_ro(void)
diff --git a/arch/riscv/mm/kasan_init.c b/arch/riscv/mm/kasan_init.c
index 9daacae93e33..d7189c8714a9 100644
--- a/arch/riscv/mm/kasan_init.c
+++ b/arch/riscv/mm/kasan_init.c
@@ -169,7 +169,7 @@ static void __init kasan_shallow_populate(void *start, void *end)
void __init kasan_init(void)
{
- phys_addr_t _start, _end;
+ phys_addr_t p_start, p_end;
u64 i;
/*
@@ -189,9 +189,9 @@ void __init kasan_init(void)
(void *)kasan_mem_to_shadow((void *)VMALLOC_END));
/* Populate the linear mapping */
- for_each_mem_range(i, &_start, &_end) {
- void *start = (void *)__va(_start);
- void *end = (void *)__va(_end);
+ for_each_mem_range(i, &p_start, &p_end) {
+ void *start = (void *)__va(p_start);
+ void *end = (void *)__va(p_end);
if (start >= end)
break;
@@ -201,7 +201,7 @@ void __init kasan_init(void)
/* Populate kernel, BPF, modules mapping */
kasan_populate(kasan_mem_to_shadow((const void *)MODULES_VADDR),
- kasan_mem_to_shadow((const void *)BPF_JIT_REGION_END));
+ kasan_mem_to_shadow((const void *)MODULES_VADDR + SZ_2G));
for (i = 0; i < PTRS_PER_PTE; i++)
set_pte(&kasan_early_shadow_pte[i],
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index b4c7c34069f8..07b2328f29e0 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -2,9 +2,6 @@
config MMU
def_bool y
-config ZONE_DMA
- def_bool y
-
config CPU_BIG_ENDIAN
def_bool y
@@ -62,7 +59,7 @@ config S390
select ARCH_BINFMT_ELF_STATE
select ARCH_ENABLE_MEMORY_HOTPLUG if SPARSEMEM
select ARCH_ENABLE_MEMORY_HOTREMOVE
- select ARCH_ENABLE_SPLIT_PMD_PTLOCK
+ select ARCH_ENABLE_SPLIT_PMD_PTLOCK if PGTABLE_LEVELS > 2
select ARCH_HAS_DEBUG_VM_PGTABLE
select ARCH_HAS_DEBUG_WX
select ARCH_HAS_DEVMEM_IS_ALLOWED
@@ -117,6 +114,7 @@ config S390
select ARCH_USE_BUILTIN_BSWAP
select ARCH_USE_CMPXCHG_LOCKREF
select ARCH_WANTS_DYNAMIC_TASK_STRUCT
+ select ARCH_WANTS_NO_INSTR
select ARCH_WANT_DEFAULT_BPF_JIT
select ARCH_WANT_IPC_PARSE_VERSION
select BUILDTIME_TABLE_SORT
@@ -164,6 +162,7 @@ config S390
select HAVE_FUTEX_CMPXCHG if FUTEX
select HAVE_GCC_PLUGINS
select HAVE_GENERIC_VDSO
+ select HAVE_IOREMAP_PROT if PCI
select HAVE_IRQ_EXIT_ON_IRQ_STACK
select HAVE_KERNEL_BZIP2
select HAVE_KERNEL_GZIP
@@ -172,6 +171,7 @@ config S390
select HAVE_KERNEL_LZO
select HAVE_KERNEL_UNCOMPRESSED
select HAVE_KERNEL_XZ
+ select HAVE_KERNEL_ZSTD
select HAVE_KPROBES
select HAVE_KPROBES_ON_FTRACE
select HAVE_KRETPROBES
@@ -210,6 +210,7 @@ config S390
select THREAD_INFO_IN_TASK
select TTY
select VIRT_CPU_ACCOUNTING
+ select ZONE_DMA
# Note: keep the above list sorted alphabetically
config SCHED_OMIT_FRAME_POINTER
@@ -475,7 +476,7 @@ config NUMA
config NODES_SHIFT
int
- depends on NEED_MULTIPLE_NODES
+ depends on NUMA
default "1"
config SCHED_SMT
@@ -768,7 +769,7 @@ config VFIO_CCW
config VFIO_AP
def_tristate n
prompt "VFIO support for AP devices"
- depends on S390_AP_IOMMU && VFIO_MDEV_DEVICE && KVM
+ depends on S390_AP_IOMMU && VFIO_MDEV && KVM
depends on ZCRYPT
help
This driver grants access to Adjunct Processor (AP) devices
@@ -853,7 +854,7 @@ config CMM_IUCV
config APPLDATA_BASE
def_bool n
prompt "Linux - VM Monitor Stream, base infrastructure"
- depends on PROC_FS
+ depends on PROC_SYSCTL
help
This provides a kernel interface for creating and updating z/VM APPLDATA
monitor records. The monitor records are updated at certain time
diff --git a/arch/s390/Makefile b/arch/s390/Makefile
index e443ed9947bd..098abe3a56f3 100644
--- a/arch/s390/Makefile
+++ b/arch/s390/Makefile
@@ -28,6 +28,7 @@ KBUILD_CFLAGS_DECOMPRESSOR += -DDISABLE_BRANCH_PROFILING -D__NO_FORTIFY
KBUILD_CFLAGS_DECOMPRESSOR += -fno-delete-null-pointer-checks -msoft-float -mbackchain
KBUILD_CFLAGS_DECOMPRESSOR += -fno-asynchronous-unwind-tables
KBUILD_CFLAGS_DECOMPRESSOR += -ffreestanding
+KBUILD_CFLAGS_DECOMPRESSOR += -fno-stack-protector
KBUILD_CFLAGS_DECOMPRESSOR += $(call cc-disable-warning, address-of-packed-member)
KBUILD_CFLAGS_DECOMPRESSOR += $(if $(CONFIG_DEBUG_INFO),-g)
KBUILD_CFLAGS_DECOMPRESSOR += $(if $(CONFIG_DEBUG_INFO_DWARF4), $(call cc-option, -gdwarf-4,))
diff --git a/arch/s390/boot/als.c b/arch/s390/boot/als.c
index ff6801d401c4..47c48fbfb563 100644
--- a/arch/s390/boot/als.c
+++ b/arch/s390/boot/als.c
@@ -68,7 +68,7 @@ void print_missing_facilities(void)
first = 1;
for (i = 0; i < ARRAY_SIZE(als); i++) {
- val = ~S390_lowcore.stfle_fac_list[i] & als[i];
+ val = ~stfle_fac_list[i] & als[i];
for (j = 0; j < BITS_PER_LONG; j++) {
if (!(val & (1UL << (BITS_PER_LONG - 1 - j))))
continue;
@@ -106,9 +106,9 @@ void verify_facilities(void)
{
int i;
- __stfle(S390_lowcore.stfle_fac_list, ARRAY_SIZE(S390_lowcore.stfle_fac_list));
+ __stfle(stfle_fac_list, ARRAY_SIZE(stfle_fac_list));
for (i = 0; i < ARRAY_SIZE(als); i++) {
- if ((S390_lowcore.stfle_fac_list[i] & als[i]) != als[i])
+ if ((stfle_fac_list[i] & als[i]) != als[i])
facility_mismatch();
}
}
diff --git a/arch/s390/boot/boot.h b/arch/s390/boot/boot.h
index 8b50967f5804..ae04e1c93764 100644
--- a/arch/s390/boot/boot.h
+++ b/arch/s390/boot/boot.h
@@ -24,6 +24,7 @@ void __printf(1, 2) decompressor_printk(const char *fmt, ...);
extern const char kernel_version[];
extern unsigned long memory_limit;
+extern unsigned long vmalloc_size;
extern int vmalloc_size_set;
extern int kaslr_enabled;
diff --git a/arch/s390/boot/compressed/Makefile b/arch/s390/boot/compressed/Makefile
index de18dab518bb..660c799d875d 100644
--- a/arch/s390/boot/compressed/Makefile
+++ b/arch/s390/boot/compressed/Makefile
@@ -14,6 +14,7 @@ obj-y := $(if $(CONFIG_KERNEL_UNCOMPRESSED),,decompressor.o) info.o
obj-all := $(obj-y) piggy.o syms.o
targets := vmlinux.lds vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2
targets += vmlinux.bin.xz vmlinux.bin.lzma vmlinux.bin.lzo vmlinux.bin.lz4
+targets += vmlinux.bin.zst
targets += info.bin syms.bin vmlinux.syms $(obj-all)
KBUILD_AFLAGS := $(KBUILD_AFLAGS_DECOMPRESSOR)
@@ -33,7 +34,7 @@ $(obj)/vmlinux.syms: $(obj)/vmlinux.lds $(objtree)/arch/s390/boot/startup.a $(OB
quiet_cmd_dumpsyms = DUMPSYMS $<
define cmd_dumpsyms
- $(NM) -n -S --format=bsd "$<" | $(PERL) -ne '/(\w+)\s+(\w+)\s+[tT]\s+(\w+)/ and printf "%x %x %s\0",hex $$1,hex $$2,$$3' > "$@"
+ $(NM) -n -S --format=bsd "$<" | sed -nE 's/^0*([0-9a-fA-F]+) 0*([0-9a-fA-F]+) [tT] ([^ ]*)$$/\1 \2 \3/p' | tr '\n' '\0' > "$@"
endef
$(obj)/syms.bin: $(obj)/vmlinux.syms FORCE
@@ -63,6 +64,7 @@ suffix-$(CONFIG_KERNEL_LZ4) := .lz4
suffix-$(CONFIG_KERNEL_LZMA) := .lzma
suffix-$(CONFIG_KERNEL_LZO) := .lzo
suffix-$(CONFIG_KERNEL_XZ) := .xz
+suffix-$(CONFIG_KERNEL_ZSTD) := .zst
$(obj)/vmlinux.bin.gz: $(vmlinux.bin.all-y) FORCE
$(call if_changed,gzip)
@@ -76,6 +78,8 @@ $(obj)/vmlinux.bin.lzo: $(vmlinux.bin.all-y) FORCE
$(call if_changed,lzo)
$(obj)/vmlinux.bin.xz: $(vmlinux.bin.all-y) FORCE
$(call if_changed,xzkern)
+$(obj)/vmlinux.bin.zst: $(vmlinux.bin.all-y) FORCE
+ $(call if_changed,zstd22)
OBJCOPYFLAGS_piggy.o := -I binary -O elf64-s390 -B s390:64-bit --rename-section .data=.vmlinux.bin.compressed
$(obj)/piggy.o: $(obj)/vmlinux.bin$(suffix-y) FORCE
diff --git a/arch/s390/boot/compressed/decompressor.c b/arch/s390/boot/compressed/decompressor.c
index 3061b11c4d27..37a4a8d33c6c 100644
--- a/arch/s390/boot/compressed/decompressor.c
+++ b/arch/s390/boot/compressed/decompressor.c
@@ -28,8 +28,10 @@ extern char _end[];
extern unsigned char _compressed_start[];
extern unsigned char _compressed_end[];
-#ifdef CONFIG_HAVE_KERNEL_BZIP2
+#ifdef CONFIG_KERNEL_BZIP2
#define BOOT_HEAP_SIZE 0x400000
+#elif CONFIG_KERNEL_ZSTD
+#define BOOT_HEAP_SIZE 0x30000
#else
#define BOOT_HEAP_SIZE 0x10000
#endif
@@ -61,6 +63,10 @@ static unsigned long free_mem_end_ptr = (unsigned long) _end + BOOT_HEAP_SIZE;
#include "../../../../lib/decompress_unxz.c"
#endif
+#ifdef CONFIG_KERNEL_ZSTD
+#include "../../../../lib/decompress_unzstd.c"
+#endif
+
#define decompress_offset ALIGN((unsigned long)_end + BOOT_HEAP_SIZE, PAGE_SIZE)
unsigned long mem_safe_offset(void)
diff --git a/arch/s390/boot/head.S b/arch/s390/boot/head.S
index dacb7813f982..51693cfb65c2 100644
--- a/arch/s390/boot/head.S
+++ b/arch/s390/boot/head.S
@@ -401,6 +401,7 @@ SYM_CODE_END(startup_pgm_check_handler)
# Must be keept in sync with struct parmarea in setup.h
#
.org PARMAREA
+SYM_DATA_START(parmarea)
.quad 0 # IPL_DEVICE
.quad 0 # INITRD_START
.quad 0 # INITRD_SIZE
@@ -411,6 +412,8 @@ SYM_CODE_END(startup_pgm_check_handler)
.org COMMAND_LINE
.byte "root=/dev/ram0 ro"
.byte 0
+ .org PARMAREA+__PARMAREA_SIZE
+SYM_DATA_END(parmarea)
.org EARLY_SCCB_OFFSET
.fill 4096
diff --git a/arch/s390/boot/ipl_parm.c b/arch/s390/boot/ipl_parm.c
index d372a45fe10e..0f84c072625e 100644
--- a/arch/s390/boot/ipl_parm.c
+++ b/arch/s390/boot/ipl_parm.c
@@ -12,39 +12,44 @@
#include "boot.h"
char __bootdata(early_command_line)[COMMAND_LINE_SIZE];
+int __bootdata(noexec_disabled);
+
+unsigned int __bootdata_preserved(zlib_dfltcc_support) = ZLIB_DFLTCC_FULL;
struct ipl_parameter_block __bootdata_preserved(ipl_block);
int __bootdata_preserved(ipl_block_valid);
-unsigned int __bootdata_preserved(zlib_dfltcc_support) = ZLIB_DFLTCC_FULL;
-
-unsigned long __bootdata(vmalloc_size) = VMALLOC_DEFAULT_SIZE;
-int __bootdata(noexec_disabled);
+unsigned long vmalloc_size = VMALLOC_DEFAULT_SIZE;
unsigned long memory_limit;
int vmalloc_size_set;
int kaslr_enabled;
static inline int __diag308(unsigned long subcode, void *addr)
{
- register unsigned long _addr asm("0") = (unsigned long)addr;
- register unsigned long _rc asm("1") = 0;
unsigned long reg1, reg2;
- psw_t old = S390_lowcore.program_new_psw;
+ union register_pair r1;
+ psw_t old;
+ r1.even = (unsigned long) addr;
+ r1.odd = 0;
asm volatile(
- " epsw %0,%1\n"
- " st %0,%[psw_pgm]\n"
- " st %1,%[psw_pgm]+4\n"
- " larl %0,1f\n"
- " stg %0,%[psw_pgm]+8\n"
- " diag %[addr],%[subcode],0x308\n"
- "1: nopr %%r7\n"
- : "=&d" (reg1), "=&a" (reg2),
- [psw_pgm] "=Q" (S390_lowcore.program_new_psw),
- [addr] "+d" (_addr), "+d" (_rc)
- : [subcode] "d" (subcode)
+ " mvc 0(16,%[psw_old]),0(%[psw_pgm])\n"
+ " epsw %[reg1],%[reg2]\n"
+ " st %[reg1],0(%[psw_pgm])\n"
+ " st %[reg2],4(%[psw_pgm])\n"
+ " larl %[reg1],1f\n"
+ " stg %[reg1],8(%[psw_pgm])\n"
+ " diag %[r1],%[subcode],0x308\n"
+ "1: mvc 0(16,%[psw_pgm]),0(%[psw_old])\n"
+ : [r1] "+&d" (r1.pair),
+ [reg1] "=&d" (reg1),
+ [reg2] "=&a" (reg2),
+ "+Q" (S390_lowcore.program_new_psw),
+ "=Q" (old)
+ : [subcode] "d" (subcode),
+ [psw_old] "a" (&old),
+ [psw_pgm] "a" (&S390_lowcore.program_new_psw)
: "cc", "memory");
- S390_lowcore.program_new_psw = old;
- return _rc;
+ return r1.odd;
}
void store_ipl_parmblock(void)
@@ -165,12 +170,12 @@ static inline int has_ebcdic_char(const char *str)
void setup_boot_command_line(void)
{
- COMMAND_LINE[ARCH_COMMAND_LINE_SIZE - 1] = 0;
+ parmarea.command_line[ARCH_COMMAND_LINE_SIZE - 1] = 0;
/* convert arch command line to ascii if necessary */
- if (has_ebcdic_char(COMMAND_LINE))
- EBCASC(COMMAND_LINE, ARCH_COMMAND_LINE_SIZE);
+ if (has_ebcdic_char(parmarea.command_line))
+ EBCASC(parmarea.command_line, ARCH_COMMAND_LINE_SIZE);
/* copy arch command line */
- strcpy(early_command_line, strim(COMMAND_LINE));
+ strcpy(early_command_line, strim(parmarea.command_line));
/* append IPL PARM data to the boot command line */
if (!is_prot_virt_guest() && ipl_block_valid)
@@ -180,9 +185,9 @@ void setup_boot_command_line(void)
static void modify_facility(unsigned long nr, bool clear)
{
if (clear)
- __clear_facility(nr, S390_lowcore.stfle_fac_list);
+ __clear_facility(nr, stfle_fac_list);
else
- __set_facility(nr, S390_lowcore.stfle_fac_list);
+ __set_facility(nr, stfle_fac_list);
}
static void check_cleared_facilities(void)
@@ -191,7 +196,7 @@ static void check_cleared_facilities(void)
int i;
for (i = 0; i < ARRAY_SIZE(als); i++) {
- if ((S390_lowcore.stfle_fac_list[i] & als[i]) != als[i]) {
+ if ((stfle_fac_list[i] & als[i]) != als[i]) {
sclp_early_printk("Warning: The Linux kernel requires facilities cleared via command line option\n");
print_missing_facilities();
break;
diff --git a/arch/s390/boot/mem_detect.c b/arch/s390/boot/mem_detect.c
index 40168e59abd3..4e17adbde495 100644
--- a/arch/s390/boot/mem_detect.c
+++ b/arch/s390/boot/mem_detect.c
@@ -64,30 +64,37 @@ void add_mem_detect_block(u64 start, u64 end)
static int __diag260(unsigned long rx1, unsigned long rx2)
{
- register unsigned long _rx1 asm("2") = rx1;
- register unsigned long _rx2 asm("3") = rx2;
- register unsigned long _ry asm("4") = 0x10; /* storage configuration */
- int rc = -1; /* fail */
- unsigned long reg1, reg2;
- psw_t old = S390_lowcore.program_new_psw;
-
+ unsigned long reg1, reg2, ry;
+ union register_pair rx;
+ psw_t old;
+ int rc;
+
+ rx.even = rx1;
+ rx.odd = rx2;
+ ry = 0x10; /* storage configuration */
+ rc = -1; /* fail */
asm volatile(
- " epsw %0,%1\n"
- " st %0,%[psw_pgm]\n"
- " st %1,%[psw_pgm]+4\n"
- " larl %0,1f\n"
- " stg %0,%[psw_pgm]+8\n"
+ " mvc 0(16,%[psw_old]),0(%[psw_pgm])\n"
+ " epsw %[reg1],%[reg2]\n"
+ " st %[reg1],0(%[psw_pgm])\n"
+ " st %[reg2],4(%[psw_pgm])\n"
+ " larl %[reg1],1f\n"
+ " stg %[reg1],8(%[psw_pgm])\n"
" diag %[rx],%[ry],0x260\n"
" ipm %[rc]\n"
" srl %[rc],28\n"
- "1:\n"
- : "=&d" (reg1), "=&a" (reg2),
- [psw_pgm] "=Q" (S390_lowcore.program_new_psw),
- [rc] "+&d" (rc), [ry] "+d" (_ry)
- : [rx] "d" (_rx1), "d" (_rx2)
+ "1: mvc 0(16,%[psw_pgm]),0(%[psw_old])\n"
+ : [reg1] "=&d" (reg1),
+ [reg2] "=&a" (reg2),
+ [rc] "+&d" (rc),
+ [ry] "+&d" (ry),
+ "+Q" (S390_lowcore.program_new_psw),
+ "=Q" (old)
+ : [rx] "d" (rx.pair),
+ [psw_old] "a" (&old),
+ [psw_pgm] "a" (&S390_lowcore.program_new_psw)
: "cc", "memory");
- S390_lowcore.program_new_psw = old;
- return rc == 0 ? _ry : -1;
+ return rc == 0 ? ry : -1;
}
static int diag260(void)
@@ -111,24 +118,30 @@ static int diag260(void)
static int tprot(unsigned long addr)
{
- unsigned long pgm_addr;
+ unsigned long reg1, reg2;
int rc = -EFAULT;
- psw_t old = S390_lowcore.program_new_psw;
+ psw_t old;
- S390_lowcore.program_new_psw.mask = __extract_psw();
asm volatile(
- " larl %[pgm_addr],1f\n"
- " stg %[pgm_addr],%[psw_pgm_addr]\n"
+ " mvc 0(16,%[psw_old]),0(%[psw_pgm])\n"
+ " epsw %[reg1],%[reg2]\n"
+ " st %[reg1],0(%[psw_pgm])\n"
+ " st %[reg2],4(%[psw_pgm])\n"
+ " larl %[reg1],1f\n"
+ " stg %[reg1],8(%[psw_pgm])\n"
" tprot 0(%[addr]),0\n"
" ipm %[rc]\n"
" srl %[rc],28\n"
- "1:\n"
- : [pgm_addr] "=&d"(pgm_addr),
- [psw_pgm_addr] "=Q"(S390_lowcore.program_new_psw.addr),
- [rc] "+&d"(rc)
- : [addr] "a"(addr)
+ "1: mvc 0(16,%[psw_pgm]),0(%[psw_old])\n"
+ : [reg1] "=&d" (reg1),
+ [reg2] "=&a" (reg2),
+ [rc] "+&d" (rc),
+ "=Q" (S390_lowcore.program_new_psw.addr),
+ "=Q" (old)
+ : [psw_old] "a" (&old),
+ [psw_pgm] "a" (&S390_lowcore.program_new_psw),
+ [addr] "a" (addr)
: "cc", "memory");
- S390_lowcore.program_new_psw = old;
return rc;
}
diff --git a/arch/s390/boot/startup.c b/arch/s390/boot/startup.c
index 05f8eefa3dcf..bbe4df6c2f8b 100644
--- a/arch/s390/boot/startup.c
+++ b/arch/s390/boot/startup.c
@@ -5,6 +5,7 @@
#include <asm/sections.h>
#include <asm/cpu_mf.h>
#include <asm/setup.h>
+#include <asm/kasan.h>
#include <asm/kexec.h>
#include <asm/sclp.h>
#include <asm/diag.h>
@@ -15,8 +16,17 @@
extern char __boot_data_start[], __boot_data_end[];
extern char __boot_data_preserved_start[], __boot_data_preserved_end[];
unsigned long __bootdata_preserved(__kaslr_offset);
+unsigned long __bootdata_preserved(VMALLOC_START);
+unsigned long __bootdata_preserved(VMALLOC_END);
+struct page *__bootdata_preserved(vmemmap);
+unsigned long __bootdata_preserved(vmemmap_size);
+unsigned long __bootdata_preserved(MODULES_VADDR);
+unsigned long __bootdata_preserved(MODULES_END);
unsigned long __bootdata(ident_map_size);
+u64 __bootdata_preserved(stfle_fac_list[16]);
+u64 __bootdata_preserved(alt_stfle_fac_list[16]);
+
/*
* Some code and data needs to stay below 2 GB, even when the kernel would be
* relocated above 2 GB, because it has to use 31 bit addresses.
@@ -169,6 +179,86 @@ static void setup_ident_map_size(unsigned long max_physmem_end)
#endif
}
+static void setup_kernel_memory_layout(void)
+{
+ bool vmalloc_size_verified = false;
+ unsigned long vmemmap_off;
+ unsigned long vspace_left;
+ unsigned long rte_size;
+ unsigned long pages;
+ unsigned long vmax;
+
+ pages = ident_map_size / PAGE_SIZE;
+ /* vmemmap contains a multiple of PAGES_PER_SECTION struct pages */
+ vmemmap_size = SECTION_ALIGN_UP(pages) * sizeof(struct page);
+
+ /* choose kernel address space layout: 4 or 3 levels. */
+ vmemmap_off = round_up(ident_map_size, _REGION3_SIZE);
+ if (IS_ENABLED(CONFIG_KASAN) ||
+ vmalloc_size > _REGION2_SIZE ||
+ vmemmap_off + vmemmap_size + vmalloc_size + MODULES_LEN > _REGION2_SIZE)
+ vmax = _REGION1_SIZE;
+ else
+ vmax = _REGION2_SIZE;
+
+ /* keep vmemmap_off aligned to a top level region table entry */
+ rte_size = vmax == _REGION1_SIZE ? _REGION2_SIZE : _REGION3_SIZE;
+ MODULES_END = vmax;
+ if (is_prot_virt_host()) {
+ /*
+ * forcing modules and vmalloc area under the ultravisor
+ * secure storage limit, so that any vmalloc allocation
+ * we do could be used to back secure guest storage.
+ */
+ adjust_to_uv_max(&MODULES_END);
+ }
+
+#ifdef CONFIG_KASAN
+ if (MODULES_END < vmax) {
+ /* force vmalloc and modules below kasan shadow */
+ MODULES_END = min(MODULES_END, KASAN_SHADOW_START);
+ } else {
+ /*
+ * leave vmalloc and modules above kasan shadow but make
+ * sure they don't overlap with it
+ */
+ vmalloc_size = min(vmalloc_size, vmax - KASAN_SHADOW_END - MODULES_LEN);
+ vmalloc_size_verified = true;
+ vspace_left = KASAN_SHADOW_START;
+ }
+#endif
+ MODULES_VADDR = MODULES_END - MODULES_LEN;
+ VMALLOC_END = MODULES_VADDR;
+
+ if (vmalloc_size_verified) {
+ VMALLOC_START = VMALLOC_END - vmalloc_size;
+ } else {
+ vmemmap_off = round_up(ident_map_size, rte_size);
+
+ if (vmemmap_off + vmemmap_size > VMALLOC_END ||
+ vmalloc_size > VMALLOC_END - vmemmap_off - vmemmap_size) {
+ /*
+ * allow vmalloc area to occupy up to 1/2 of
+ * the rest virtual space left.
+ */
+ vmalloc_size = min(vmalloc_size, VMALLOC_END / 2);
+ }
+ VMALLOC_START = VMALLOC_END - vmalloc_size;
+ vspace_left = VMALLOC_START;
+ }
+
+ pages = vspace_left / (PAGE_SIZE + sizeof(struct page));
+ pages = SECTION_ALIGN_UP(pages);
+ vmemmap_off = round_up(vspace_left - pages * sizeof(struct page), rte_size);
+ /* keep vmemmap left most starting from a fresh region table entry */
+ vmemmap_off = min(vmemmap_off, round_up(ident_map_size, rte_size));
+ /* take care that identity map is lower then vmemmap */
+ ident_map_size = min(ident_map_size, vmemmap_off);
+ vmemmap_size = SECTION_ALIGN_UP(ident_map_size / PAGE_SIZE) * sizeof(struct page);
+ VMALLOC_START = max(vmemmap_off + vmemmap_size, VMALLOC_START);
+ vmemmap = (struct page *)vmemmap_off;
+}
+
/*
* This function clears the BSS section of the decompressed Linux kernel and NOT the decompressor's.
*/
@@ -208,6 +298,7 @@ void startup_kernel(void)
parse_boot_command_line();
setup_ident_map_size(detect_memory());
setup_vmalloc_size();
+ setup_kernel_memory_layout();
random_lma = __kaslr_offset = 0;
if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && kaslr_enabled) {
diff --git a/arch/s390/boot/uv.c b/arch/s390/boot/uv.c
index 87641dd65ccf..82b99b916243 100644
--- a/arch/s390/boot/uv.c
+++ b/arch/s390/boot/uv.c
@@ -44,3 +44,28 @@ void uv_query_info(void)
prot_virt_guest = 1;
#endif
}
+
+#if IS_ENABLED(CONFIG_KVM)
+static bool has_uv_sec_stor_limit(void)
+{
+ /*
+ * keep these conditions in line with setup_uv()
+ */
+ if (!is_prot_virt_host())
+ return false;
+
+ if (is_prot_virt_guest())
+ return false;
+
+ if (!test_facility(158))
+ return false;
+
+ return !!uv_info.max_sec_stor_addr;
+}
+
+void adjust_to_uv_max(unsigned long *vmax)
+{
+ if (has_uv_sec_stor_limit())
+ *vmax = min_t(unsigned long, *vmax, uv_info.max_sec_stor_addr);
+}
+#endif
diff --git a/arch/s390/hypfs/hypfs_sprp.c b/arch/s390/hypfs/hypfs_sprp.c
index 7d9fb496d155..f5f7e78ddc0c 100644
--- a/arch/s390/hypfs/hypfs_sprp.c
+++ b/arch/s390/hypfs/hypfs_sprp.c
@@ -25,14 +25,13 @@
static inline unsigned long __hypfs_sprp_diag304(void *data, unsigned long cmd)
{
- register unsigned long _data asm("2") = (unsigned long) data;
- register unsigned long _rc asm("3");
- register unsigned long _cmd asm("4") = cmd;
+ union register_pair r1 = { .even = (unsigned long)data, };
- asm volatile("diag %1,%2,0x304\n"
- : "=d" (_rc) : "d" (_data), "d" (_cmd) : "memory");
-
- return _rc;
+ asm volatile("diag %[r1],%[r3],0x304\n"
+ : [r1] "+&d" (r1.pair)
+ : [r3] "d" (cmd)
+ : "memory");
+ return r1.odd;
}
static unsigned long hypfs_sprp_diag304(void *data, unsigned long cmd)
diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
index 7c93c6573524..7138d189cc42 100644
--- a/arch/s390/include/asm/atomic.h
+++ b/arch/s390/include/asm/atomic.h
@@ -147,6 +147,4 @@ ATOMIC64_OPS(xor)
#define arch_atomic64_fetch_sub(_i, _v) arch_atomic64_fetch_add(-(s64)(_i), _v)
#define arch_atomic64_sub(_i, _v) arch_atomic64_add(-(s64)(_i), _v)
-#define ARCH_ATOMIC
-
#endif /* __ARCH_S390_ATOMIC__ */
diff --git a/arch/s390/include/asm/bitops.h b/arch/s390/include/asm/bitops.h
index 68da67d2c4c9..fd149480b6e2 100644
--- a/arch/s390/include/asm/bitops.h
+++ b/arch/s390/include/asm/bitops.h
@@ -299,13 +299,13 @@ static inline unsigned char __flogr(unsigned long word)
}
return bit;
} else {
- register unsigned long bit asm("4") = word;
- register unsigned long out asm("5");
+ union register_pair rp;
+ rp.even = word;
asm volatile(
- " flogr %[bit],%[bit]\n"
- : [bit] "+d" (bit), [out] "=d" (out) : : "cc");
- return bit;
+ " flogr %[rp],%[rp]\n"
+ : [rp] "+d" (rp.pair) : : "cc");
+ return rp.even;
}
}
diff --git a/arch/s390/include/asm/ccwgroup.h b/arch/s390/include/asm/ccwgroup.h
index ad3acb1e882b..20f169b6db4e 100644
--- a/arch/s390/include/asm/ccwgroup.h
+++ b/arch/s390/include/asm/ccwgroup.h
@@ -11,8 +11,7 @@ struct ccw_driver;
* @count: number of attached slave devices
* @dev: embedded device structure
* @cdev: variable number of slave devices, allocated as needed
- * @ungroup_work: work to be done when a ccwgroup notifier has action
- * type %BUS_NOTIFY_UNBIND_DRIVER
+ * @ungroup_work: used to ungroup the ccwgroup device
*/
struct ccwgroup_device {
enum {
diff --git a/arch/s390/include/asm/checksum.h b/arch/s390/include/asm/checksum.h
index a8c02cfbc712..cdd19d326345 100644
--- a/arch/s390/include/asm/checksum.h
+++ b/arch/s390/include/asm/checksum.h
@@ -29,13 +29,15 @@
*/
static inline __wsum csum_partial(const void *buff, int len, __wsum sum)
{
- register unsigned long reg2 asm("2") = (unsigned long) buff;
- register unsigned long reg3 asm("3") = (unsigned long) len;
+ union register_pair rp = {
+ .even = (unsigned long) buff,
+ .odd = (unsigned long) len,
+ };
asm volatile(
- "0: cksm %0,%1\n" /* do checksum on longs */
+ "0: cksm %[sum],%[rp]\n"
" jo 0b\n"
- : "+d" (sum), "+d" (reg2), "+d" (reg3) : : "cc", "memory");
+ : [sum] "+&d" (sum), [rp] "+&d" (rp.pair) : : "cc", "memory");
return sum;
}
diff --git a/arch/s390/include/asm/cio.h b/arch/s390/include/asm/cio.h
index ac02df906cae..f58c92f28701 100644
--- a/arch/s390/include/asm/cio.h
+++ b/arch/s390/include/asm/cio.h
@@ -9,6 +9,7 @@
#include <linux/bitops.h>
#include <linux/genalloc.h>
#include <asm/types.h>
+#include <asm/tpi.h>
#define LPM_ANYPATH 0xff
#define __MAX_CSSID 0
diff --git a/arch/s390/include/asm/cmpxchg.h b/arch/s390/include/asm/cmpxchg.h
index 1960a7295ae5..84c3f0d576c5 100644
--- a/arch/s390/include/asm/cmpxchg.h
+++ b/arch/s390/include/asm/cmpxchg.h
@@ -169,32 +169,36 @@ static __always_inline unsigned long __cmpxchg(unsigned long address,
#define system_has_cmpxchg_double() 1
-#define __cmpxchg_double(p1, p2, o1, o2, n1, n2) \
-({ \
- register __typeof__(*(p1)) __old1 asm("2") = (o1); \
- register __typeof__(*(p2)) __old2 asm("3") = (o2); \
- register __typeof__(*(p1)) __new1 asm("4") = (n1); \
- register __typeof__(*(p2)) __new2 asm("5") = (n2); \
- int cc; \
- asm volatile( \
- " cdsg %[old],%[new],%[ptr]\n" \
- " ipm %[cc]\n" \
- " srl %[cc],28" \
- : [cc] "=d" (cc), [old] "+d" (__old1), "+d" (__old2) \
- : [new] "d" (__new1), "d" (__new2), \
- [ptr] "Q" (*(p1)), "Q" (*(p2)) \
- : "memory", "cc"); \
- !cc; \
-})
+static __always_inline int __cmpxchg_double(unsigned long p1, unsigned long p2,
+ unsigned long o1, unsigned long o2,
+ unsigned long n1, unsigned long n2)
+{
+ union register_pair old = { .even = o1, .odd = o2, };
+ union register_pair new = { .even = n1, .odd = n2, };
+ int cc;
+
+ asm volatile(
+ " cdsg %[old],%[new],%[ptr]\n"
+ " ipm %[cc]\n"
+ " srl %[cc],28\n"
+ : [cc] "=&d" (cc), [old] "+&d" (old.pair)
+ : [new] "d" (new.pair),
+ [ptr] "QS" (*(unsigned long *)p1), "Q" (*(unsigned long *)p2)
+ : "memory", "cc");
+ return !cc;
+}
#define arch_cmpxchg_double(p1, p2, o1, o2, n1, n2) \
({ \
- __typeof__(p1) __p1 = (p1); \
- __typeof__(p2) __p2 = (p2); \
+ typeof(p1) __p1 = (p1); \
+ typeof(p2) __p2 = (p2); \
+ \
BUILD_BUG_ON(sizeof(*(p1)) != sizeof(long)); \
BUILD_BUG_ON(sizeof(*(p2)) != sizeof(long)); \
VM_BUG_ON((unsigned long)((__p1) + 1) != (unsigned long)(__p2));\
- __cmpxchg_double(__p1, __p2, o1, o2, n1, n2); \
+ __cmpxchg_double((unsigned long)__p1, (unsigned long)__p2, \
+ (unsigned long)(o1), (unsigned long)(o2), \
+ (unsigned long)(n1), (unsigned long)(n2)); \
})
#endif /* __ASM_CMPXCHG_H */
diff --git a/arch/s390/include/asm/cpu_mcf.h b/arch/s390/include/asm/cpu_mcf.h
index 3e4cbcb7c4cc..4dcefddb7751 100644
--- a/arch/s390/include/asm/cpu_mcf.h
+++ b/arch/s390/include/asm/cpu_mcf.h
@@ -92,9 +92,8 @@ struct cpu_cf_events {
struct cpumf_ctr_info info;
atomic_t ctr_set[CPUMF_CTR_SET_MAX];
atomic64_t alert;
- u64 state, tx_state;
+ u64 state;
unsigned int flags;
- unsigned int txn_flags;
};
DECLARE_PER_CPU(struct cpu_cf_events, cpu_cf_events);
diff --git a/arch/s390/include/asm/facility.h b/arch/s390/include/asm/facility.h
index 91b5d714d28f..e3aa354ab9f4 100644
--- a/arch/s390/include/asm/facility.h
+++ b/arch/s390/include/asm/facility.h
@@ -13,7 +13,10 @@
#include <linux/preempt.h>
#include <asm/lowcore.h>
-#define MAX_FACILITY_BIT (sizeof(((struct lowcore *)0)->stfle_fac_list) * 8)
+#define MAX_FACILITY_BIT (sizeof(stfle_fac_list) * 8)
+
+extern u64 stfle_fac_list[16];
+extern u64 alt_stfle_fac_list[16];
static inline void __set_facility(unsigned long nr, void *facilities)
{
@@ -56,18 +59,20 @@ static inline int test_facility(unsigned long nr)
if (__test_facility(nr, &facilities_als))
return 1;
}
- return __test_facility(nr, &S390_lowcore.stfle_fac_list);
+ return __test_facility(nr, &stfle_fac_list);
}
static inline unsigned long __stfle_asm(u64 *stfle_fac_list, int size)
{
- register unsigned long reg0 asm("0") = size - 1;
+ unsigned long reg0 = size - 1;
asm volatile(
- ".insn s,0xb2b00000,0(%1)" /* stfle */
- : "+d" (reg0)
- : "a" (stfle_fac_list)
- : "memory", "cc");
+ " lgr 0,%[reg0]\n"
+ " .insn s,0xb2b00000,%[list]\n" /* stfle */
+ " lgr %[reg0],0\n"
+ : [reg0] "+&d" (reg0), [list] "+Q" (*stfle_fac_list)
+ :
+ : "memory", "cc", "0");
return reg0;
}
@@ -79,13 +84,15 @@ static inline unsigned long __stfle_asm(u64 *stfle_fac_list, int size)
static inline void __stfle(u64 *stfle_fac_list, int size)
{
unsigned long nr;
+ u32 stfl_fac_list;
asm volatile(
" stfl 0(0)\n"
: "=m" (S390_lowcore.stfl_fac_list));
+ stfl_fac_list = S390_lowcore.stfl_fac_list;
+ memcpy(stfle_fac_list, &stfl_fac_list, 4);
nr = 4; /* bytes stored by stfl */
- memcpy(stfle_fac_list, &S390_lowcore.stfl_fac_list, 4);
- if (S390_lowcore.stfl_fac_list & 0x01000000) {
+ if (stfl_fac_list & 0x01000000) {
/* More facility bits available with stfle */
nr = __stfle_asm(stfle_fac_list, size);
nr = min_t(unsigned long, (nr + 1) * 8, size * 8);
diff --git a/arch/s390/include/asm/irqflags.h b/arch/s390/include/asm/irqflags.h
index 586df4c9e2f2..02427b205c11 100644
--- a/arch/s390/include/asm/irqflags.h
+++ b/arch/s390/include/asm/irqflags.h
@@ -32,45 +32,45 @@
})
/* set system mask. */
-static inline notrace void __arch_local_irq_ssm(unsigned long flags)
+static __always_inline void __arch_local_irq_ssm(unsigned long flags)
{
asm volatile("ssm %0" : : "Q" (flags) : "memory");
}
-static inline notrace unsigned long arch_local_save_flags(void)
+static __always_inline unsigned long arch_local_save_flags(void)
{
return __arch_local_irq_stnsm(0xff);
}
-static inline notrace unsigned long arch_local_irq_save(void)
+static __always_inline unsigned long arch_local_irq_save(void)
{
return __arch_local_irq_stnsm(0xfc);
}
-static inline notrace void arch_local_irq_disable(void)
+static __always_inline void arch_local_irq_disable(void)
{
arch_local_irq_save();
}
-static inline notrace void arch_local_irq_enable(void)
+static __always_inline void arch_local_irq_enable(void)
{
__arch_local_irq_stosm(0x03);
}
/* This only restores external and I/O interrupt state */
-static inline notrace void arch_local_irq_restore(unsigned long flags)
+static __always_inline void arch_local_irq_restore(unsigned long flags)
{
/* only disabled->disabled and disabled->enabled is valid */
if (flags & ARCH_IRQ_ENABLED)
arch_local_irq_enable();
}
-static inline notrace bool arch_irqs_disabled_flags(unsigned long flags)
+static __always_inline bool arch_irqs_disabled_flags(unsigned long flags)
{
return !(flags & ARCH_IRQ_ENABLED);
}
-static inline notrace bool arch_irqs_disabled(void)
+static __always_inline bool arch_irqs_disabled(void)
{
return arch_irqs_disabled_flags(arch_local_save_flags());
}
diff --git a/arch/s390/include/asm/kasan.h b/arch/s390/include/asm/kasan.h
index 76f351bd6645..2768d5db181f 100644
--- a/arch/s390/include/asm/kasan.h
+++ b/arch/s390/include/asm/kasan.h
@@ -16,7 +16,6 @@
extern void kasan_early_init(void);
extern void kasan_copy_shadow_mapping(void);
extern void kasan_free_early_identity(void);
-extern unsigned long kasan_vmax;
/*
* Estimate kasan memory requirements, which it will reserve
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index 8925f3969478..9b4473f76e56 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -361,6 +361,7 @@ struct sie_page {
};
struct kvm_vcpu_stat {
+ struct kvm_vcpu_stat_generic generic;
u64 exit_userspace;
u64 exit_null;
u64 exit_external_request;
@@ -370,13 +371,7 @@ struct kvm_vcpu_stat {
u64 exit_validity;
u64 exit_instruction;
u64 exit_pei;
- u64 halt_successful_poll;
- u64 halt_attempted_poll;
- u64 halt_poll_invalid;
u64 halt_no_poll_steal;
- u64 halt_wakeup;
- u64 halt_poll_success_ns;
- u64 halt_poll_fail_ns;
u64 instruction_lctl;
u64 instruction_lctlg;
u64 instruction_stctl;
@@ -755,12 +750,12 @@ struct kvm_vcpu_arch {
};
struct kvm_vm_stat {
+ struct kvm_vm_stat_generic generic;
u64 inject_io;
u64 inject_float_mchk;
u64 inject_pfault_done;
u64 inject_service_signal;
u64 inject_virtio;
- u64 remote_tlb_flush;
};
struct kvm_arch_memory_slot {
diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h
index 22bceeeba4bc..47bde5a20a41 100644
--- a/arch/s390/include/asm/lowcore.h
+++ b/arch/s390/include/asm/lowcore.h
@@ -17,15 +17,23 @@
#define LC_ORDER 1
#define LC_PAGES 2
+struct pgm_tdb {
+ u64 data[32];
+};
+
struct lowcore {
__u8 pad_0x0000[0x0014-0x0000]; /* 0x0000 */
__u32 ipl_parmblock_ptr; /* 0x0014 */
__u8 pad_0x0018[0x0080-0x0018]; /* 0x0018 */
__u32 ext_params; /* 0x0080 */
- __u16 ext_cpu_addr; /* 0x0084 */
- __u16 ext_int_code; /* 0x0086 */
- __u16 svc_ilc; /* 0x0088 */
- __u16 svc_code; /* 0x008a */
+ union {
+ struct {
+ __u16 ext_cpu_addr; /* 0x0084 */
+ __u16 ext_int_code; /* 0x0086 */
+ };
+ __u32 ext_int_code_addr;
+ };
+ __u32 svc_int_code; /* 0x0088 */
__u16 pgm_ilc; /* 0x008c */
__u16 pgm_code; /* 0x008e */
__u32 data_exc_code; /* 0x0090 */
@@ -40,10 +48,15 @@ struct lowcore {
__u8 pad_0x00a4[0x00a8-0x00a4]; /* 0x00a4 */
__u64 trans_exc_code; /* 0x00a8 */
__u64 monitor_code; /* 0x00b0 */
- __u16 subchannel_id; /* 0x00b8 */
- __u16 subchannel_nr; /* 0x00ba */
- __u32 io_int_parm; /* 0x00bc */
- __u32 io_int_word; /* 0x00c0 */
+ union {
+ struct {
+ __u16 subchannel_id; /* 0x00b8 */
+ __u16 subchannel_nr; /* 0x00ba */
+ __u32 io_int_parm; /* 0x00bc */
+ __u32 io_int_word; /* 0x00c0 */
+ };
+ struct tpi_info tpi_info; /* 0x00b8 */
+ };
__u8 pad_0x00c4[0x00c8-0x00c4]; /* 0x00c4 */
__u32 stfl_fac_list; /* 0x00c8 */
__u8 pad_0x00cc[0x00e8-0x00cc]; /* 0x00cc */
@@ -154,12 +167,7 @@ struct lowcore {
__u64 vmcore_info; /* 0x0e0c */
__u8 pad_0x0e14[0x0e18-0x0e14]; /* 0x0e14 */
__u64 os_info; /* 0x0e18 */
- __u8 pad_0x0e20[0x0f00-0x0e20]; /* 0x0e20 */
-
- /* Extended facility list */
- __u64 stfle_fac_list[16]; /* 0x0f00 */
- __u64 alt_stfle_fac_list[16]; /* 0x0f80 */
- __u8 pad_0x1000[0x11b0-0x1000]; /* 0x1000 */
+ __u8 pad_0x0e20[0x11b0-0x0e20]; /* 0x0e20 */
/* Pointer to the machine check extended save area */
__u64 mcesad; /* 0x11b0 */
@@ -185,7 +193,7 @@ struct lowcore {
__u8 pad_0x1400[0x1800-0x1400]; /* 0x1400 */
/* Transaction abort diagnostic block */
- __u8 pgm_tdb[256]; /* 0x1800 */
+ struct pgm_tdb pgm_tdb; /* 0x1800 */
__u8 pad_0x1900[0x2000-0x1900]; /* 0x1900 */
} __packed __aligned(8192);
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h
index e7cffc7b5c2f..c7937f369e62 100644
--- a/arch/s390/include/asm/mmu_context.h
+++ b/arch/s390/include/asm/mmu_context.h
@@ -70,8 +70,8 @@ static inline int init_new_context(struct task_struct *tsk,
return 0;
}
-static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
- struct task_struct *tsk)
+static inline void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
+ struct task_struct *tsk)
{
int cpu = smp_processor_id();
@@ -85,6 +85,17 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
if (prev != next)
cpumask_clear_cpu(cpu, &prev->context.cpu_attach_mask);
}
+#define switch_mm_irqs_off switch_mm_irqs_off
+
+static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
+ struct task_struct *tsk)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ switch_mm_irqs_off(prev, next, tsk);
+ local_irq_restore(flags);
+}
#define finish_arch_post_lock_switch finish_arch_post_lock_switch
static inline void finish_arch_post_lock_switch(void)
diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h
index cc98f9b78fd4..3ba945c6b9dc 100644
--- a/arch/s390/include/asm/page.h
+++ b/arch/s390/include/asm/page.h
@@ -55,22 +55,25 @@ static inline void storage_key_init_range(unsigned long start, unsigned long end
*/
static inline void copy_page(void *to, void *from)
{
- register void *reg2 asm ("2") = to;
- register unsigned long reg3 asm ("3") = 0x1000;
- register void *reg4 asm ("4") = from;
- register unsigned long reg5 asm ("5") = 0xb0001000;
+ union register_pair dst, src;
+
+ dst.even = (unsigned long) to;
+ dst.odd = 0x1000;
+ src.even = (unsigned long) from;
+ src.odd = 0xb0001000;
+
asm volatile(
- " mvcl 2,4"
- : "+d" (reg2), "+d" (reg3), "+d" (reg4), "+d" (reg5)
+ " mvcl %[dst],%[src]"
+ : [dst] "+&d" (dst.pair), [src] "+&d" (src.pair)
: : "memory", "cc");
}
#define clear_user_page(page, vaddr, pg) clear_page(page)
#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
-#define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \
- alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr)
-#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
+#define alloc_zeroed_user_highpage_movable(vma, vaddr) \
+ alloc_page_vma(GFP_HIGHUSER_MOVABLE | __GFP_ZERO, vma, vaddr)
+#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE_MOVABLE
/*
* These are used to make use of C type-checking..
diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h
index 10b67f8aab99..5509b224c2ec 100644
--- a/arch/s390/include/asm/pci.h
+++ b/arch/s390/include/asm/pci.h
@@ -133,7 +133,8 @@ struct zpci_dev {
u8 has_resources : 1;
u8 is_physfn : 1;
u8 util_str_avail : 1;
- u8 reserved : 3;
+ u8 irqs_registered : 1;
+ u8 reserved : 2;
unsigned int devfn; /* DEVFN part of the RID*/
struct mutex lock;
@@ -271,9 +272,13 @@ struct zpci_dev *get_zdev_by_fid(u32);
int zpci_dma_init(void);
void zpci_dma_exit(void);
+/* IRQ */
int __init zpci_irq_init(void);
void __init zpci_irq_exit(void);
+int zpci_set_irq(struct zpci_dev *zdev);
+int zpci_clear_irq(struct zpci_dev *zdev);
+
/* FMB */
int zpci_fmb_enable_device(struct zpci_dev *);
int zpci_fmb_disable_device(struct zpci_dev *);
diff --git a/arch/s390/include/asm/percpu.h b/arch/s390/include/asm/percpu.h
index 918f0ba4f4d2..cb5fc0690435 100644
--- a/arch/s390/include/asm/percpu.h
+++ b/arch/s390/include/asm/percpu.h
@@ -164,19 +164,20 @@
#define this_cpu_xchg_4(pcp, nval) arch_this_cpu_xchg(pcp, nval)
#define this_cpu_xchg_8(pcp, nval) arch_this_cpu_xchg(pcp, nval)
-#define arch_this_cpu_cmpxchg_double(pcp1, pcp2, o1, o2, n1, n2) \
-({ \
- typeof(pcp1) o1__ = (o1), n1__ = (n1); \
- typeof(pcp2) o2__ = (o2), n2__ = (n2); \
- typeof(pcp1) *p1__; \
- typeof(pcp2) *p2__; \
- int ret__; \
- preempt_disable_notrace(); \
- p1__ = raw_cpu_ptr(&(pcp1)); \
- p2__ = raw_cpu_ptr(&(pcp2)); \
- ret__ = __cmpxchg_double(p1__, p2__, o1__, o2__, n1__, n2__); \
- preempt_enable_notrace(); \
- ret__; \
+#define arch_this_cpu_cmpxchg_double(pcp1, pcp2, o1, o2, n1, n2) \
+({ \
+ typeof(pcp1) *p1__; \
+ typeof(pcp2) *p2__; \
+ int ret__; \
+ \
+ preempt_disable_notrace(); \
+ p1__ = raw_cpu_ptr(&(pcp1)); \
+ p2__ = raw_cpu_ptr(&(pcp2)); \
+ ret__ = __cmpxchg_double((unsigned long)p1__, (unsigned long)p2__, \
+ (unsigned long)(o1), (unsigned long)(o2), \
+ (unsigned long)(n1), (unsigned long)(n2)); \
+ preempt_enable_notrace(); \
+ ret__; \
})
#define this_cpu_cmpxchg_double_8 arch_this_cpu_cmpxchg_double
diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h
index 6b187cd72251..f14a555eff74 100644
--- a/arch/s390/include/asm/pgalloc.h
+++ b/arch/s390/include/asm/pgalloc.h
@@ -134,9 +134,6 @@ static inline void pmd_populate(struct mm_struct *mm,
#define pmd_populate_kernel(mm, pmd, pte) pmd_populate(mm, pmd, pte)
-#define pmd_pgtable(pmd) \
- ((pgtable_t)__va(pmd_val(pmd) & -sizeof(pte_t)*PTRS_PER_PTE))
-
/*
* page table entry allocation/free routines.
*/
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 29c7ecd5ad1d..dcac7b2df72c 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -17,6 +17,7 @@
#include <linux/page-flags.h>
#include <linux/radix-tree.h>
#include <linux/atomic.h>
+#include <asm/sections.h>
#include <asm/bug.h>
#include <asm/page.h>
#include <asm/uv.h>
@@ -65,8 +66,6 @@ extern unsigned long zero_page_mask;
/* TODO: s390 cannot support io_remap_pfn_range... */
-#define FIRST_USER_ADDRESS 0UL
-
#define pte_ERROR(e) \
printk("%s:%d: bad pte %p.\n", __FILE__, __LINE__, (void *) pte_val(e))
#define pmd_ERROR(e) \
@@ -86,16 +85,16 @@ extern unsigned long zero_page_mask;
* happen without trampolines and in addition the placement within a
* 2GB frame is branch prediction unit friendly.
*/
-extern unsigned long VMALLOC_START;
-extern unsigned long VMALLOC_END;
+extern unsigned long __bootdata_preserved(VMALLOC_START);
+extern unsigned long __bootdata_preserved(VMALLOC_END);
#define VMALLOC_DEFAULT_SIZE ((512UL << 30) - MODULES_LEN)
-extern struct page *vmemmap;
-extern unsigned long vmemmap_size;
+extern struct page *__bootdata_preserved(vmemmap);
+extern unsigned long __bootdata_preserved(vmemmap_size);
#define VMEM_MAX_PHYS ((unsigned long) vmemmap)
-extern unsigned long MODULES_VADDR;
-extern unsigned long MODULES_END;
+extern unsigned long __bootdata_preserved(MODULES_VADDR);
+extern unsigned long __bootdata_preserved(MODULES_END);
#define MODULES_VADDR MODULES_VADDR
#define MODULES_END MODULES_END
#define MODULES_LEN (1UL << 31)
@@ -344,8 +343,6 @@ static inline int is_module_addr(void *addr)
#define PTRS_PER_P4D _CRST_ENTRIES
#define PTRS_PER_PGD _CRST_ENTRIES
-#define MAX_PTRS_PER_P4D PTRS_PER_P4D
-
/*
* Segment table and region3 table entry encoding
* (R = read-only, I = invalid, y = young bit):
@@ -557,27 +554,25 @@ static inline int mm_uses_skeys(struct mm_struct *mm)
static inline void csp(unsigned int *ptr, unsigned int old, unsigned int new)
{
- register unsigned long reg2 asm("2") = old;
- register unsigned long reg3 asm("3") = new;
+ union register_pair r1 = { .even = old, .odd = new, };
unsigned long address = (unsigned long)ptr | 1;
asm volatile(
- " csp %0,%3"
- : "+d" (reg2), "+m" (*ptr)
- : "d" (reg3), "d" (address)
+ " csp %[r1],%[address]"
+ : [r1] "+&d" (r1.pair), "+m" (*ptr)
+ : [address] "d" (address)
: "cc");
}
static inline void cspg(unsigned long *ptr, unsigned long old, unsigned long new)
{
- register unsigned long reg2 asm("2") = old;
- register unsigned long reg3 asm("3") = new;
+ union register_pair r1 = { .even = old, .odd = new, };
unsigned long address = (unsigned long)ptr | 1;
asm volatile(
- " .insn rre,0xb98a0000,%0,%3"
- : "+d" (reg2), "+m" (*ptr)
- : "d" (reg3), "d" (address)
+ " .insn rre,0xb98a0000,%[r1],%[address]"
+ : [r1] "+&d" (r1.pair), "+m" (*ptr)
+ : [address] "d" (address)
: "cc");
}
@@ -591,14 +586,12 @@ static inline void crdte(unsigned long old, unsigned long new,
unsigned long table, unsigned long dtt,
unsigned long address, unsigned long asce)
{
- register unsigned long reg2 asm("2") = old;
- register unsigned long reg3 asm("3") = new;
- register unsigned long reg4 asm("4") = table | dtt;
- register unsigned long reg5 asm("5") = address;
+ union register_pair r1 = { .even = old, .odd = new, };
+ union register_pair r2 = { .even = table | dtt, .odd = address, };
- asm volatile(".insn rrf,0xb98f0000,%0,%2,%4,0"
- : "+d" (reg2)
- : "d" (reg3), "d" (reg4), "d" (reg5), "a" (asce)
+ asm volatile(".insn rrf,0xb98f0000,%[r1],%[r2],%[asce],0"
+ : [r1] "+&d" (r1.pair)
+ : [r2] "d" (r2.pair), [asce] "a" (asce)
: "memory", "cc");
}
@@ -866,6 +859,25 @@ static inline int pte_unused(pte_t pte)
}
/*
+ * Extract the pgprot value from the given pte while at the same time making it
+ * usable for kernel address space mappings where fault driven dirty and
+ * young/old accounting is not supported, i.e _PAGE_PROTECT and _PAGE_INVALID
+ * must not be set.
+ */
+static inline pgprot_t pte_pgprot(pte_t pte)
+{
+ unsigned long pte_flags = pte_val(pte) & _PAGE_CHG_MASK;
+
+ if (pte_write(pte))
+ pte_flags |= pgprot_val(PAGE_KERNEL);
+ else
+ pte_flags |= pgprot_val(PAGE_KERNEL_RO);
+ pte_flags |= pte_val(pte) & mio_wb_bit_mask;
+
+ return __pgprot(pte_flags);
+}
+
+/*
* pgd/pmd/pte modification functions
*/
@@ -1713,4 +1725,7 @@ extern void s390_reset_cmma(struct mm_struct *mm);
#define HAVE_ARCH_UNMAPPED_AREA
#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
+#define pmd_pgtable(pmd) \
+ ((pgtable_t)__va(pmd_val(pmd) & -sizeof(pte_t)*PTRS_PER_PTE))
+
#endif /* _S390_PAGE_H */
diff --git a/arch/s390/include/asm/preempt.h b/arch/s390/include/asm/preempt.h
index b49e0492842c..23ff51be7e29 100644
--- a/arch/s390/include/asm/preempt.h
+++ b/arch/s390/include/asm/preempt.h
@@ -32,7 +32,7 @@ static inline void preempt_count_set(int pc)
#define init_task_preempt_count(p) do { } while (0)
#define init_idle_preempt_count(p, cpu) do { \
- S390_lowcore.preempt_count = PREEMPT_ENABLED; \
+ S390_lowcore.preempt_count = PREEMPT_DISABLED; \
} while (0)
static inline void set_preempt_need_resched(void)
@@ -91,7 +91,7 @@ static inline void preempt_count_set(int pc)
#define init_task_preempt_count(p) do { } while (0)
#define init_idle_preempt_count(p, cpu) do { \
- S390_lowcore.preempt_count = PREEMPT_ENABLED; \
+ S390_lowcore.preempt_count = PREEMPT_DISABLED; \
} while (0)
static inline void set_preempt_need_resched(void)
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index 023a15dc25a3..ddc7858bbce4 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -129,7 +129,7 @@ struct thread_struct {
struct runtime_instr_cb *ri_cb;
struct gs_cb *gs_cb; /* Current guarded storage cb */
struct gs_cb *gs_bc_cb; /* Broadcast guarded storage cb */
- unsigned char trap_tdb[256]; /* Transaction abort diagnose block */
+ struct pgm_tdb trap_tdb; /* Transaction abort diagnose block */
/*
* Warning: 'fpu' is dynamically-sized. It *MUST* be at
* the end.
@@ -207,7 +207,7 @@ static __always_inline unsigned long current_stack_pointer(void)
return sp;
}
-static __no_kasan_or_inline unsigned short stap(void)
+static __always_inline unsigned short stap(void)
{
unsigned short cpu_address;
@@ -246,7 +246,7 @@ static inline void __load_psw(psw_t psw)
* Set PSW mask to specified value, while leaving the
* PSW addr pointing to the next instruction.
*/
-static __no_kasan_or_inline void __load_psw_mask(unsigned long mask)
+static __always_inline void __load_psw_mask(unsigned long mask)
{
unsigned long addr;
psw_t psw;
diff --git a/arch/s390/include/asm/ptrace.h b/arch/s390/include/asm/ptrace.h
index f828be78937f..c7850d649373 100644
--- a/arch/s390/include/asm/ptrace.h
+++ b/arch/s390/include/asm/ptrace.h
@@ -9,6 +9,7 @@
#include <linux/bits.h>
#include <uapi/asm/ptrace.h>
+#include <asm/tpi.h>
#define PIF_SYSCALL 0 /* inside a system call */
#define PIF_SYSCALL_RESTART 1 /* restart the current system call */
@@ -86,9 +87,14 @@ struct pt_regs
};
};
unsigned long orig_gpr2;
- unsigned int int_code;
- unsigned int int_parm;
- unsigned long int_parm_long;
+ union {
+ struct {
+ unsigned int int_code;
+ unsigned int int_parm;
+ unsigned long int_parm_long;
+ };
+ struct tpi_info tpi_info;
+ };
unsigned long flags;
unsigned long cr1;
};
diff --git a/arch/s390/include/asm/qdio.h b/arch/s390/include/asm/qdio.h
index 8fc52679543d..cb4f73c7228d 100644
--- a/arch/s390/include/asm/qdio.h
+++ b/arch/s390/include/asm/qdio.h
@@ -137,7 +137,6 @@ struct slibe {
* @user0: user defineable value
* @res4: reserved paramater
* @user1: user defineable value
- * @user2: user defineable value
*/
struct qaob {
u64 res0[6];
@@ -152,8 +151,7 @@ struct qaob {
u16 dcount[QDIO_MAX_ELEMENTS_PER_BUFFER];
u64 user0;
u64 res4[2];
- u64 user1;
- u64 user2;
+ u8 user1[16];
} __attribute__ ((packed, aligned(256)));
/**
diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h
index 3e388fa208d4..a8b75da3c1b8 100644
--- a/arch/s390/include/asm/setup.h
+++ b/arch/s390/include/asm/setup.h
@@ -78,6 +78,8 @@ struct parmarea {
char command_line[ARCH_COMMAND_LINE_SIZE]; /* 0x10480 */
};
+extern struct parmarea parmarea;
+
extern unsigned int zlib_dfltcc_support;
#define ZLIB_DFLTCC_DISABLED 0
#define ZLIB_DFLTCC_FULL 1
@@ -87,7 +89,6 @@ extern unsigned int zlib_dfltcc_support;
extern int noexec_disabled;
extern unsigned long ident_map_size;
-extern unsigned long vmalloc_size;
/* The Write Back bit position in the physaddr is given by the SLPC PCI */
extern unsigned long mio_wb_bit_mask;
diff --git a/arch/s390/include/asm/sigp.h b/arch/s390/include/asm/sigp.h
index 53ee795cd3d3..edee63da08e7 100644
--- a/arch/s390/include/asm/sigp.h
+++ b/arch/s390/include/asm/sigp.h
@@ -41,15 +41,17 @@
static inline int ____pcpu_sigp(u16 addr, u8 order, unsigned long parm,
u32 *status)
{
- register unsigned long reg1 asm ("1") = parm;
+ union register_pair r1 = { .odd = parm, };
int cc;
asm volatile(
- " sigp %1,%2,0(%3)\n"
- " ipm %0\n"
- " srl %0,28\n"
- : "=d" (cc), "+d" (reg1) : "d" (addr), "a" (order) : "cc");
- *status = reg1;
+ " sigp %[r1],%[addr],0(%[order])\n"
+ " ipm %[cc]\n"
+ " srl %[cc],28\n"
+ : [cc] "=&d" (cc), [r1] "+&d" (r1.pair)
+ : [addr] "d" (addr), [order] "a" (order)
+ : "cc");
+ *status = r1.even;
return cc;
}
diff --git a/arch/s390/include/asm/stacktrace.h b/arch/s390/include/asm/stacktrace.h
index 2b543163d90a..76c6034428be 100644
--- a/arch/s390/include/asm/stacktrace.h
+++ b/arch/s390/include/asm/stacktrace.h
@@ -91,12 +91,16 @@ struct stack_frame {
CALL_ARGS_4(arg1, arg2, arg3, arg4); \
register unsigned long r4 asm("6") = (unsigned long)(arg5)
-#define CALL_FMT_0 "=&d" (r2) :
-#define CALL_FMT_1 "+&d" (r2) :
-#define CALL_FMT_2 CALL_FMT_1 "d" (r3),
-#define CALL_FMT_3 CALL_FMT_2 "d" (r4),
-#define CALL_FMT_4 CALL_FMT_3 "d" (r5),
-#define CALL_FMT_5 CALL_FMT_4 "d" (r6),
+/*
+ * To keep this simple mark register 2-6 as being changed (volatile)
+ * by the called function, even though register 6 is saved/nonvolatile.
+ */
+#define CALL_FMT_0 "=&d" (r2)
+#define CALL_FMT_1 "+&d" (r2)
+#define CALL_FMT_2 CALL_FMT_1, "+&d" (r3)
+#define CALL_FMT_3 CALL_FMT_2, "+&d" (r4)
+#define CALL_FMT_4 CALL_FMT_3, "+&d" (r5)
+#define CALL_FMT_5 CALL_FMT_4, "+&d" (r6)
#define CALL_CLOBBER_5 "0", "1", "14", "cc", "memory"
#define CALL_CLOBBER_4 CALL_CLOBBER_5
@@ -118,7 +122,7 @@ struct stack_frame {
" brasl 14,%[_fn]\n" \
" la 15,0(%[_prev])\n" \
: [_prev] "=&a" (prev), CALL_FMT_##nr \
- [_stack] "R" (stack), \
+ : [_stack] "R" (stack), \
[_bc] "i" (offsetof(struct stack_frame, back_chain)), \
[_frame] "d" (frame), \
[_fn] "X" (fn) : CALL_CLOBBER_##nr); \
diff --git a/arch/s390/include/asm/string.h b/arch/s390/include/asm/string.h
index 4c0690fc5167..4fd66c5e8934 100644
--- a/arch/s390/include/asm/string.h
+++ b/arch/s390/include/asm/string.h
@@ -107,16 +107,18 @@ static inline void *memset64(uint64_t *s, uint64_t v, size_t count)
#ifdef __HAVE_ARCH_MEMCHR
static inline void *memchr(const void * s, int c, size_t n)
{
- register int r0 asm("0") = (char) c;
const void *ret = s + n;
asm volatile(
- "0: srst %0,%1\n"
+ " lgr 0,%[c]\n"
+ "0: srst %[ret],%[s]\n"
" jo 0b\n"
" jl 1f\n"
- " la %0,0\n"
+ " la %[ret],0\n"
"1:"
- : "+a" (ret), "+&a" (s) : "d" (r0) : "cc", "memory");
+ : [ret] "+&a" (ret), [s] "+&a" (s)
+ : [c] "d" (c)
+ : "cc", "memory", "0");
return (void *) ret;
}
#endif
@@ -124,13 +126,15 @@ static inline void *memchr(const void * s, int c, size_t n)
#ifdef __HAVE_ARCH_MEMSCAN
static inline void *memscan(void *s, int c, size_t n)
{
- register int r0 asm("0") = (char) c;
const void *ret = s + n;
asm volatile(
- "0: srst %0,%1\n"
+ " lgr 0,%[c]\n"
+ "0: srst %[ret],%[s]\n"
" jo 0b\n"
- : "+a" (ret), "+&a" (s) : "d" (r0) : "cc", "memory");
+ : [ret] "+&a" (ret), [s] "+&a" (s)
+ : [c] "d" (c)
+ : "cc", "memory", "0");
return (void *) ret;
}
#endif
@@ -138,17 +142,18 @@ static inline void *memscan(void *s, int c, size_t n)
#ifdef __HAVE_ARCH_STRCAT
static inline char *strcat(char *dst, const char *src)
{
- register int r0 asm("0") = 0;
- unsigned long dummy;
+ unsigned long dummy = 0;
char *ret = dst;
asm volatile(
- "0: srst %0,%1\n"
+ " lghi 0,0\n"
+ "0: srst %[dummy],%[dst]\n"
" jo 0b\n"
- "1: mvst %0,%2\n"
+ "1: mvst %[dummy],%[src]\n"
" jo 1b"
- : "=&a" (dummy), "+a" (dst), "+a" (src)
- : "d" (r0), "0" (0) : "cc", "memory" );
+ : [dummy] "+&a" (dummy), [dst] "+&a" (dst), [src] "+&a" (src)
+ :
+ : "cc", "memory", "0");
return ret;
}
#endif
@@ -156,14 +161,15 @@ static inline char *strcat(char *dst, const char *src)
#ifdef __HAVE_ARCH_STRCPY
static inline char *strcpy(char *dst, const char *src)
{
- register int r0 asm("0") = 0;
char *ret = dst;
asm volatile(
- "0: mvst %0,%1\n"
+ " lghi 0,0\n"
+ "0: mvst %[dst],%[src]\n"
" jo 0b"
- : "+&a" (dst), "+&a" (src) : "d" (r0)
- : "cc", "memory");
+ : [dst] "+&a" (dst), [src] "+&a" (src)
+ :
+ : "cc", "memory", "0");
return ret;
}
#endif
@@ -171,28 +177,33 @@ static inline char *strcpy(char *dst, const char *src)
#if defined(__HAVE_ARCH_STRLEN) || (defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__))
static inline size_t __no_sanitize_prefix_strfunc(strlen)(const char *s)
{
- register unsigned long r0 asm("0") = 0;
+ unsigned long end = 0;
const char *tmp = s;
asm volatile(
- "0: srst %0,%1\n"
+ " lghi 0,0\n"
+ "0: srst %[end],%[tmp]\n"
" jo 0b"
- : "+d" (r0), "+a" (tmp) : : "cc", "memory");
- return r0 - (unsigned long) s;
+ : [end] "+&a" (end), [tmp] "+&a" (tmp)
+ :
+ : "cc", "memory", "0");
+ return end - (unsigned long)s;
}
#endif
#ifdef __HAVE_ARCH_STRNLEN
static inline size_t strnlen(const char * s, size_t n)
{
- register int r0 asm("0") = 0;
const char *tmp = s;
const char *end = s + n;
asm volatile(
- "0: srst %0,%1\n"
+ " lghi 0,0\n"
+ "0: srst %[end],%[tmp]\n"
" jo 0b"
- : "+a" (end), "+a" (tmp) : "d" (r0) : "cc", "memory");
+ : [end] "+&a" (end), [tmp] "+&a" (tmp)
+ :
+ : "cc", "memory", "0");
return end - s;
}
#endif
diff --git a/arch/s390/include/asm/timex.h b/arch/s390/include/asm/timex.h
index f6326c6d2abe..50d9b04ecbd1 100644
--- a/arch/s390/include/asm/timex.h
+++ b/arch/s390/include/asm/timex.h
@@ -75,9 +75,12 @@ static inline void set_clock_comparator(__u64 time)
static inline void set_tod_programmable_field(u16 val)
{
- register unsigned long reg0 asm("0") = val;
-
- asm volatile("sckpf" : : "d" (reg0));
+ asm volatile(
+ " lgr 0,%[val]\n"
+ " sckpf\n"
+ :
+ : [val] "d" ((unsigned long)val)
+ : "0");
}
void clock_comparator_work(void);
@@ -138,16 +141,19 @@ struct ptff_qui {
#define ptff(ptff_block, len, func) \
({ \
struct addrtype { char _[len]; }; \
- register unsigned int reg0 asm("0") = func; \
- register unsigned long reg1 asm("1") = (unsigned long) (ptff_block);\
+ unsigned int reg0 = func; \
+ unsigned long reg1 = (unsigned long)(ptff_block); \
int rc; \
\
asm volatile( \
- " .word 0x0104\n" \
- " ipm %0\n" \
- " srl %0,28\n" \
- : "=d" (rc), "+m" (*(struct addrtype *) reg1) \
- : "d" (reg0), "d" (reg1) : "cc"); \
+ " lgr 0,%[reg0]\n" \
+ " lgr 1,%[reg1]\n" \
+ " .insn e,0x0104\n" \
+ " ipm %[rc]\n" \
+ " srl %[rc],28\n" \
+ : [rc] "=&d" (rc), "+m" (*(struct addrtype *)reg1) \
+ : [reg0] "d" (reg0), [reg1] "d" (reg1) \
+ : "cc", "0", "1"); \
rc; \
})
diff --git a/arch/s390/include/asm/tpi.h b/arch/s390/include/asm/tpi.h
new file mode 100644
index 000000000000..1ac538b8cbf5
--- /dev/null
+++ b/arch/s390/include/asm/tpi.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _ASM_S390_TPI_H
+#define _ASM_S390_TPI_H
+
+#include <linux/types.h>
+#include <uapi/asm/schid.h>
+
+#ifndef __ASSEMBLY__
+
+/* I/O-Interruption Code as stored by TEST PENDING INTERRUPTION (TPI). */
+struct tpi_info {
+ struct subchannel_id schid;
+ u32 intparm;
+ u32 adapter_IO:1;
+ u32 directed_irq:1;
+ u32 isc:3;
+ u32 :12;
+ u32 type:3;
+ u32 :12;
+} __packed __aligned(4);
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_S390_TPI_H */
diff --git a/arch/s390/include/asm/types.h b/arch/s390/include/asm/types.h
new file mode 100644
index 000000000000..0b5d550a0478
--- /dev/null
+++ b/arch/s390/include/asm/types.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+#ifndef _ASM_S390_TYPES_H
+#define _ASM_S390_TYPES_H
+
+#include <uapi/asm/types.h>
+
+#ifndef __ASSEMBLY__
+
+union register_pair {
+ unsigned __int128 pair;
+ struct {
+ unsigned long even;
+ unsigned long odd;
+ };
+};
+
+#endif /* __ASSEMBLY__ */
+#endif /* _ASM_S390_TYPES_H */
diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
index 4756d2937e54..2316f2440881 100644
--- a/arch/s390/include/asm/uaccess.h
+++ b/arch/s390/include/asm/uaccess.h
@@ -49,52 +49,51 @@ int __get_user_bad(void) __attribute__((noreturn));
#ifdef CONFIG_HAVE_MARCH_Z10_FEATURES
-#define __put_get_user_asm(to, from, size, spec) \
+#define __put_get_user_asm(to, from, size, insn) \
({ \
- register unsigned long __reg0 asm("0") = spec; \
int __rc; \
\
asm volatile( \
- "0: mvcos %1,%3,%2\n" \
- "1: xr %0,%0\n" \
+ insn " 0,%[spec]\n" \
+ "0: mvcos %[_to],%[_from],%[_size]\n" \
+ "1: xr %[rc],%[rc]\n" \
"2:\n" \
".pushsection .fixup, \"ax\"\n" \
- "3: lhi %0,%5\n" \
+ "3: lhi %[rc],%[retval]\n" \
" jg 2b\n" \
".popsection\n" \
EX_TABLE(0b,3b) EX_TABLE(1b,3b) \
- : "=d" (__rc), "+Q" (*(to)) \
- : "d" (size), "Q" (*(from)), \
- "d" (__reg0), "K" (-EFAULT) \
- : "cc"); \
+ : [rc] "=&d" (__rc), [_to] "+Q" (*(to)) \
+ : [_size] "d" (size), [_from] "Q" (*(from)), \
+ [retval] "K" (-EFAULT), [spec] "K" (0x81UL) \
+ : "cc", "0"); \
__rc; \
})
static __always_inline int __put_user_fn(void *x, void __user *ptr, unsigned long size)
{
- unsigned long spec = 0x810000UL;
int rc;
switch (size) {
case 1:
rc = __put_get_user_asm((unsigned char __user *)ptr,
(unsigned char *)x,
- size, spec);
+ size, "llilh");
break;
case 2:
rc = __put_get_user_asm((unsigned short __user *)ptr,
(unsigned short *)x,
- size, spec);
+ size, "llilh");
break;
case 4:
rc = __put_get_user_asm((unsigned int __user *)ptr,
(unsigned int *)x,
- size, spec);
+ size, "llilh");
break;
case 8:
rc = __put_get_user_asm((unsigned long __user *)ptr,
(unsigned long *)x,
- size, spec);
+ size, "llilh");
break;
default:
__put_user_bad();
@@ -105,29 +104,28 @@ static __always_inline int __put_user_fn(void *x, void __user *ptr, unsigned lon
static __always_inline int __get_user_fn(void *x, const void __user *ptr, unsigned long size)
{
- unsigned long spec = 0x81UL;
int rc;
switch (size) {
case 1:
rc = __put_get_user_asm((unsigned char *)x,
(unsigned char __user *)ptr,
- size, spec);
+ size, "lghi");
break;
case 2:
rc = __put_get_user_asm((unsigned short *)x,
(unsigned short __user *)ptr,
- size, spec);
+ size, "lghi");
break;
case 4:
rc = __put_get_user_asm((unsigned int *)x,
(unsigned int __user *)ptr,
- size, spec);
+ size, "lghi");
break;
case 8:
rc = __put_get_user_asm((unsigned long *)x,
(unsigned long __user *)ptr,
- size, spec);
+ size, "lghi");
break;
default:
__get_user_bad();
diff --git a/arch/s390/include/uapi/asm/schid.h b/arch/s390/include/uapi/asm/schid.h
index 58fca6f48410..a3e1cf168553 100644
--- a/arch/s390/include/uapi/asm/schid.h
+++ b/arch/s390/include/uapi/asm/schid.h
@@ -4,6 +4,8 @@
#include <linux/types.h>
+#ifndef __ASSEMBLY__
+
struct subchannel_id {
__u32 cssid : 8;
__u32 : 4;
@@ -13,5 +15,6 @@ struct subchannel_id {
__u32 sch_no : 16;
} __attribute__ ((packed, aligned(4)));
+#endif /* __ASSEMBLY__ */
#endif /* _UAPIASM_SCHID_H */
diff --git a/arch/s390/kernel/alternative.c b/arch/s390/kernel/alternative.c
index 8e1f2aee85ef..c22ea1c3ef84 100644
--- a/arch/s390/kernel/alternative.c
+++ b/arch/s390/kernel/alternative.c
@@ -76,8 +76,7 @@ static void __init_or_module __apply_alternatives(struct alt_instr *start,
instr = (u8 *)&a->instr_offset + a->instr_offset;
replacement = (u8 *)&a->repl_offset + a->repl_offset;
- if (!__test_facility(a->facility,
- S390_lowcore.alt_stfle_fac_list))
+ if (!__test_facility(a->facility, alt_stfle_fac_list))
continue;
if (unlikely(a->instrlen % 2 || a->replacementlen % 2)) {
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index 15e637728a4b..f53605a3dfcd 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -15,6 +15,7 @@
#include <asm/idle.h>
#include <asm/gmap.h>
#include <asm/nmi.h>
+#include <asm/setup.h>
#include <asm/stacktrace.h>
int main(void)
@@ -58,8 +59,6 @@ int main(void)
OFFSET(__LC_EXT_PARAMS, lowcore, ext_params);
OFFSET(__LC_EXT_CPU_ADDR, lowcore, ext_cpu_addr);
OFFSET(__LC_EXT_INT_CODE, lowcore, ext_int_code);
- OFFSET(__LC_SVC_ILC, lowcore, svc_ilc);
- OFFSET(__LC_SVC_INT_CODE, lowcore, svc_code);
OFFSET(__LC_PGM_ILC, lowcore, pgm_ilc);
OFFSET(__LC_PGM_INT_CODE, lowcore, pgm_code);
OFFSET(__LC_DATA_EXC_CODE, lowcore, data_exc_code);
@@ -77,8 +76,6 @@ int main(void)
OFFSET(__LC_SUBCHANNEL_NR, lowcore, subchannel_nr);
OFFSET(__LC_IO_INT_PARM, lowcore, io_int_parm);
OFFSET(__LC_IO_INT_WORD, lowcore, io_int_word);
- OFFSET(__LC_STFL_FAC_LIST, lowcore, stfl_fac_list);
- OFFSET(__LC_STFLE_FAC_LIST, lowcore, stfle_fac_list);
OFFSET(__LC_MCCK_CODE, lowcore, mcck_interruption_code);
OFFSET(__LC_EXT_DAMAGE_CODE, lowcore, external_damage_code);
OFFSET(__LC_MCCK_FAIL_STOR_ADDR, lowcore, failing_storage_address);
@@ -159,5 +156,7 @@ int main(void)
OFFSET(__KEXEC_SHA_REGION_START, kexec_sha_region, start);
OFFSET(__KEXEC_SHA_REGION_LEN, kexec_sha_region, len);
DEFINE(__KEXEC_SHA_REGION_SIZE, sizeof(struct kexec_sha_region));
+ /* sizeof kernel parameter area */
+ DEFINE(__PARMAREA_SIZE, sizeof(struct parmarea));
return 0;
}
diff --git a/arch/s390/kernel/cpcmd.c b/arch/s390/kernel/cpcmd.c
index 2da027359798..54efc279f54e 100644
--- a/arch/s390/kernel/cpcmd.c
+++ b/arch/s390/kernel/cpcmd.c
@@ -26,33 +26,35 @@ static char cpcmd_buf[241];
static int diag8_noresponse(int cmdlen)
{
- register unsigned long reg2 asm ("2") = (addr_t) cpcmd_buf;
- register unsigned long reg3 asm ("3") = cmdlen;
-
asm volatile(
- " diag %1,%0,0x8\n"
- : "+d" (reg3) : "d" (reg2) : "cc");
- return reg3;
+ " diag %[rx],%[ry],0x8\n"
+ : [ry] "+&d" (cmdlen)
+ : [rx] "d" ((addr_t) cpcmd_buf)
+ : "cc");
+ return cmdlen;
}
static int diag8_response(int cmdlen, char *response, int *rlen)
{
- unsigned long _cmdlen = cmdlen | 0x40000000L;
- unsigned long _rlen = *rlen;
- register unsigned long reg2 asm ("2") = (addr_t) cpcmd_buf;
- register unsigned long reg3 asm ("3") = (addr_t) response;
- register unsigned long reg4 asm ("4") = _cmdlen;
- register unsigned long reg5 asm ("5") = _rlen;
+ union register_pair rx, ry;
+ int cc;
+ rx.even = (addr_t) cpcmd_buf;
+ rx.odd = (addr_t) response;
+ ry.even = cmdlen | 0x40000000L;
+ ry.odd = *rlen;
asm volatile(
- " diag %2,%0,0x8\n"
- " brc 8,1f\n"
- " agr %1,%4\n"
- "1:\n"
- : "+d" (reg4), "+d" (reg5)
- : "d" (reg2), "d" (reg3), "d" (*rlen) : "cc");
- *rlen = reg5;
- return reg4;
+ " diag %[rx],%[ry],0x8\n"
+ " ipm %[cc]\n"
+ " srl %[cc],28\n"
+ : [cc] "=&d" (cc), [ry] "+&d" (ry.pair)
+ : [rx] "d" (rx.pair)
+ : "cc");
+ if (cc)
+ *rlen += ry.odd;
+ else
+ *rlen = ry.odd;
+ return ry.even;
}
/*
diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c
index bb958d32bd81..09b6c6402f9b 100644
--- a/arch/s390/kernel/debug.c
+++ b/arch/s390/kernel/debug.c
@@ -1418,7 +1418,7 @@ int debug_dflt_header_fn(debug_info_t *id, struct debug_view *view,
else
except_str = "-";
caller = (unsigned long) entry->caller;
- rc += sprintf(out_buf, "%02i %011ld:%06lu %1u %1s %04u %pK ",
+ rc += sprintf(out_buf, "%02i %011ld:%06lu %1u %1s %04u %px ",
area, sec, usec, level, except_str,
entry->cpu, (void *)caller);
return rc;
diff --git a/arch/s390/kernel/diag.c b/arch/s390/kernel/diag.c
index b8b0cd7b008f..a3f47464c3f1 100644
--- a/arch/s390/kernel/diag.c
+++ b/arch/s390/kernel/diag.c
@@ -141,16 +141,15 @@ EXPORT_SYMBOL(diag14);
static inline int __diag204(unsigned long *subcode, unsigned long size, void *addr)
{
- register unsigned long _subcode asm("0") = *subcode;
- register unsigned long _size asm("1") = size;
+ union register_pair rp = { .even = *subcode, .odd = size };
asm volatile(
- " diag %2,%0,0x204\n"
+ " diag %[addr],%[rp],0x204\n"
"0: nopr %%r7\n"
EX_TABLE(0b,0b)
- : "+d" (_subcode), "+d" (_size) : "d" (addr) : "memory");
- *subcode = _subcode;
- return _size;
+ : [rp] "+&d" (rp.pair) : [addr] "d" (addr) : "memory");
+ *subcode = rp.even;
+ return rp.odd;
}
int diag204(unsigned long subcode, unsigned long size, void *addr)
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index a361d2e70025..c2cf79d353cf 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -180,11 +180,9 @@ static noinline __init void setup_lowcore_early(void)
static noinline __init void setup_facility_list(void)
{
- memcpy(S390_lowcore.alt_stfle_fac_list,
- S390_lowcore.stfle_fac_list,
- sizeof(S390_lowcore.alt_stfle_fac_list));
+ memcpy(alt_stfle_fac_list, stfle_fac_list, sizeof(alt_stfle_fac_list));
if (!IS_ENABLED(CONFIG_KERNEL_NOBP))
- __clear_facility(82, S390_lowcore.alt_stfle_fac_list);
+ __clear_facility(82, alt_stfle_fac_list);
}
static __init void detect_diag9c(void)
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 12de7a9c85b3..3e8c6669373a 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -129,6 +129,27 @@ _LPP_OFFSET = __LC_LPP
"jnz .+8; .long 0xb2e8d000", 82
.endm
+#if IS_ENABLED(CONFIG_KVM)
+ /*
+ * The OUTSIDE macro jumps to the provided label in case the value
+ * in the provided register is outside of the provided range. The
+ * macro is useful for checking whether a PSW stored in a register
+ * pair points inside or outside of a block of instructions.
+ * @reg: register to check
+ * @start: start of the range
+ * @end: end of the range
+ * @outside_label: jump here if @reg is outside of [@start..@end)
+ */
+ .macro OUTSIDE reg,start,end,outside_label
+ lgr %r14,\reg
+ larl %r13,\start
+ slgr %r14,%r13
+ lghi %r13,\end - \start
+ clgr %r14,%r13
+ jhe \outside_label
+ .endm
+#endif
+
GEN_BR_THUNK %r14
GEN_BR_THUNK %r14,%r13
@@ -214,7 +235,7 @@ ENTRY(sie64a)
# are some corner cases (e.g. runtime instrumentation) where ILC is unpredictable.
# Other instructions between sie64a and .Lsie_done should not cause program
# interrupts. So lets use 3 nops as a landing pad for all possible rewinds.
-# See also .Lcleanup_sie_mcck/.Lcleanup_sie_int
+# See also .Lcleanup_sie
.Lrewind_pad6:
nopr 7
.Lrewind_pad4:
@@ -276,6 +297,7 @@ ENTRY(system_call)
xgr %r10,%r10
xgr %r11,%r11
la %r2,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs
+ mvc __PT_R8(64,%r2),__LC_SAVE_AREA_SYNC
lgr %r3,%r14
brasl %r14,__do_syscall
lctlg %c1,%c1,__LC_USER_ASCE
@@ -318,12 +340,7 @@ ENTRY(pgm_check_handler)
.Lpgm_skip_asce:
#if IS_ENABLED(CONFIG_KVM)
# cleanup critical section for program checks in sie64a
- lgr %r14,%r9
- larl %r13,.Lsie_gmap
- slgr %r14,%r13
- lghi %r13,.Lsie_done - .Lsie_gmap
- clgr %r14,%r13
- jhe 1f
+ OUTSIDE %r9,.Lsie_gmap,.Lsie_done,1f
lg %r14,__SF_SIE_CONTROL(%r15) # get control block pointer
ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE
lctlg %c1,%c1,__LC_KERNEL_ASCE # load primary asce
@@ -392,13 +409,8 @@ ENTRY(\name)
tmhh %r8,0x0001 # interrupting from user ?
jnz 1f
#if IS_ENABLED(CONFIG_KVM)
- lgr %r14,%r9
- larl %r13,.Lsie_gmap
- slgr %r14,%r13
- lghi %r13,.Lsie_done - .Lsie_gmap
- clgr %r14,%r13
- jhe 0f
- brasl %r14,.Lcleanup_sie_int
+ OUTSIDE %r9,.Lsie_gmap,.Lsie_done,0f
+ brasl %r14,.Lcleanup_sie
#endif
0: CHECK_STACK __LC_SAVE_AREA_ASYNC
aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
@@ -418,6 +430,7 @@ ENTRY(\name)
xgr %r6,%r6
xgr %r7,%r7
xgr %r10,%r10
+ xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC
stmg %r8,%r9,__PT_PSW(%r11)
tm %r8,0x0001 # coming from user space?
@@ -540,13 +553,10 @@ ENTRY(mcck_int_handler)
tmhh %r8,0x0001 # interrupting from user ?
jnz .Lmcck_user
#if IS_ENABLED(CONFIG_KVM)
- lgr %r14,%r9
- larl %r13,.Lsie_gmap
- slgr %r14,%r13
- lghi %r13,.Lsie_done - .Lsie_gmap
- clgr %r14,%r13
- jhe .Lmcck_stack
- brasl %r14,.Lcleanup_sie_mcck
+ OUTSIDE %r9,.Lsie_gmap,.Lsie_done,.Lmcck_stack
+ OUTSIDE %r9,.Lsie_entry,.Lsie_skip,5f
+ oi __LC_CPU_FLAGS+7, _CIF_MCCK_GUEST
+5: brasl %r14,.Lcleanup_sie
#endif
j .Lmcck_stack
.Lmcck_user:
@@ -648,21 +658,13 @@ ENDPROC(stack_overflow)
#endif
#if IS_ENABLED(CONFIG_KVM)
-.Lcleanup_sie_mcck:
- larl %r13,.Lsie_entry
- slgr %r9,%r13
- larl %r13,.Lsie_skip
- clgr %r9,%r13
- jh .Lcleanup_sie_int
- oi __LC_CPU_FLAGS+7, _CIF_MCCK_GUEST
-.Lcleanup_sie_int:
+.Lcleanup_sie:
BPENTER __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
lg %r9,__SF_SIE_CONTROL(%r15) # get control block pointer
ni __SIE_PROG0C+3(%r9),0xfe # no longer in SIE
lctlg %c1,%c1,__LC_KERNEL_ASCE
larl %r9,sie_exit # skip forward to sie_exit
BR_EX %r14,%r13
-
#endif
.section .rodata, "a"
#define SYSCALL(esame,emu) .quad __s390x_ ## esame
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
index dba04fbc37a2..50e2c21e0ec9 100644
--- a/arch/s390/kernel/ipl.c
+++ b/arch/s390/kernel/ipl.c
@@ -13,6 +13,7 @@
#include <linux/init.h>
#include <linux/device.h>
#include <linux/delay.h>
+#include <linux/panic_notifier.h>
#include <linux/reboot.h>
#include <linux/ctype.h>
#include <linux/fs.h>
@@ -162,16 +163,18 @@ static bool reipl_ccw_clear;
static inline int __diag308(unsigned long subcode, void *addr)
{
- register unsigned long _addr asm("0") = (unsigned long) addr;
- register unsigned long _rc asm("1") = 0;
+ union register_pair r1;
+ r1.even = (unsigned long) addr;
+ r1.odd = 0;
asm volatile(
- " diag %0,%2,0x308\n"
+ " diag %[r1],%[subcode],0x308\n"
"0: nopr %%r7\n"
EX_TABLE(0b,0b)
- : "+d" (_addr), "+d" (_rc)
- : "d" (subcode) : "cc", "memory");
- return _rc;
+ : [r1] "+&d" (r1.pair)
+ : [subcode] "d" (subcode)
+ : "cc", "memory");
+ return r1.odd;
}
int diag308(unsigned long subcode, void *addr)
diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c
index 714269e10eec..c0df4060d28d 100644
--- a/arch/s390/kernel/irq.c
+++ b/arch/s390/kernel/irq.c
@@ -146,8 +146,8 @@ void noinstr do_io_irq(struct pt_regs *regs)
account_idle_time_irq();
do {
- memcpy(&regs->int_code, &S390_lowcore.subchannel_id, 12);
- if (S390_lowcore.io_int_word & BIT(31))
+ regs->tpi_info = S390_lowcore.tpi_info;
+ if (S390_lowcore.tpi_info.adapter_IO)
do_irq_async(regs, THIN_INTERRUPT);
else
do_irq_async(regs, IO_INTERRUPT);
@@ -172,7 +172,7 @@ void noinstr do_ext_irq(struct pt_regs *regs)
if (user_mode(regs))
update_timer_sys();
- memcpy(&regs->int_code, &S390_lowcore.ext_cpu_addr, 4);
+ regs->int_code = S390_lowcore.ext_int_code_addr;
regs->int_parm = S390_lowcore.ext_params;
regs->int_parm_long = S390_lowcore.ext_params2;
diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c
index aae24dc75df6..528bb31815c3 100644
--- a/arch/s390/kernel/kprobes.c
+++ b/arch/s390/kernel/kprobes.c
@@ -44,11 +44,6 @@ void *alloc_insn_page(void)
return page;
}
-void free_insn_page(void *page)
-{
- module_memfree(page);
-}
-
static void *alloc_s390_insn_page(void)
{
if (xchg(&insn_page_in_use, 1) == 1)
@@ -446,23 +441,6 @@ static int kprobe_trap_handler(struct pt_regs *regs, int trapnr)
case KPROBE_HIT_ACTIVE:
case KPROBE_HIT_SSDONE:
/*
- * We increment the nmissed count for accounting,
- * we can also use npre/npostfault count for accounting
- * these specific fault cases.
- */
- kprobes_inc_nmissed_count(p);
-
- /*
- * We come here because instructions in the pre/post
- * handler caused the page_fault, this could happen
- * if handler tries to access user space by
- * copy_from_user(), get_user() etc. Let the
- * user-specified handler try to fix it first.
- */
- if (p->fault_handler && p->fault_handler(p, regs, trapnr))
- return 1;
-
- /*
* In case the user-specified fault handler returned
* zero, try to fix up.
*/
diff --git a/arch/s390/kernel/nospec-branch.c b/arch/s390/kernel/nospec-branch.c
index 29e511f5bf06..250e4dbf653c 100644
--- a/arch/s390/kernel/nospec-branch.c
+++ b/arch/s390/kernel/nospec-branch.c
@@ -17,11 +17,11 @@ static int __init nobp_setup_early(char *str)
* The user explicitely requested nobp=1, enable it and
* disable the expoline support.
*/
- __set_facility(82, S390_lowcore.alt_stfle_fac_list);
+ __set_facility(82, alt_stfle_fac_list);
if (IS_ENABLED(CONFIG_EXPOLINE))
nospec_disable = 1;
} else {
- __clear_facility(82, S390_lowcore.alt_stfle_fac_list);
+ __clear_facility(82, alt_stfle_fac_list);
}
return 0;
}
@@ -29,7 +29,7 @@ early_param("nobp", nobp_setup_early);
static int __init nospec_setup_early(char *str)
{
- __clear_facility(82, S390_lowcore.alt_stfle_fac_list);
+ __clear_facility(82, alt_stfle_fac_list);
return 0;
}
early_param("nospec", nospec_setup_early);
@@ -40,7 +40,7 @@ static int __init nospec_report(void)
pr_info("Spectre V2 mitigation: etokens\n");
if (__is_defined(CC_USING_EXPOLINE) && !nospec_disable)
pr_info("Spectre V2 mitigation: execute trampolines\n");
- if (__test_facility(82, S390_lowcore.alt_stfle_fac_list))
+ if (__test_facility(82, alt_stfle_fac_list))
pr_info("Spectre V2 mitigation: limited branch prediction\n");
return 0;
}
@@ -66,14 +66,14 @@ void __init nospec_auto_detect(void)
*/
if (__is_defined(CC_USING_EXPOLINE))
nospec_disable = 1;
- __clear_facility(82, S390_lowcore.alt_stfle_fac_list);
+ __clear_facility(82, alt_stfle_fac_list);
} else if (__is_defined(CC_USING_EXPOLINE)) {
/*
* The kernel has been compiled with expolines.
* Keep expolines enabled and disable nobp.
*/
nospec_disable = 0;
- __clear_facility(82, S390_lowcore.alt_stfle_fac_list);
+ __clear_facility(82, alt_stfle_fac_list);
}
/*
* If the kernel has not been compiled with expolines the
@@ -86,7 +86,7 @@ static int __init spectre_v2_setup_early(char *str)
{
if (str && !strncmp(str, "on", 2)) {
nospec_disable = 0;
- __clear_facility(82, S390_lowcore.alt_stfle_fac_list);
+ __clear_facility(82, alt_stfle_fac_list);
}
if (str && !strncmp(str, "off", 3))
nospec_disable = 1;
@@ -99,6 +99,7 @@ early_param("spectre_v2", spectre_v2_setup_early);
static void __init_or_module __nospec_revert(s32 *start, s32 *end)
{
enum { BRCL_EXPOLINE, BRASL_EXPOLINE } type;
+ static const u8 branch[] = { 0x47, 0x00, 0x07, 0x00 };
u8 *instr, *thunk, *br;
u8 insnbuf[6];
s32 *epo;
@@ -128,7 +129,7 @@ static void __init_or_module __nospec_revert(s32 *start, s32 *end)
if ((br[0] & 0xbf) != 0x07 || (br[1] & 0xf0) != 0xf0)
continue;
- memcpy(insnbuf + 2, (char[]) { 0x47, 0x00, 0x07, 0x00 }, 4);
+ memcpy(insnbuf + 2, branch, sizeof(branch));
switch (type) {
case BRCL_EXPOLINE:
insnbuf[0] = br[0];
diff --git a/arch/s390/kernel/nospec-sysfs.c b/arch/s390/kernel/nospec-sysfs.c
index 48f472bf9290..b4b5c8c21166 100644
--- a/arch/s390/kernel/nospec-sysfs.c
+++ b/arch/s390/kernel/nospec-sysfs.c
@@ -17,7 +17,7 @@ ssize_t cpu_show_spectre_v2(struct device *dev,
return sprintf(buf, "Mitigation: etokens\n");
if (__is_defined(CC_USING_EXPOLINE) && !nospec_disable)
return sprintf(buf, "Mitigation: execute trampolines\n");
- if (__test_facility(82, S390_lowcore.alt_stfle_fac_list))
+ if (__test_facility(82, alt_stfle_fac_list))
return sprintf(buf, "Mitigation: limited branch prediction\n");
return sprintf(buf, "Vulnerable\n");
}
diff --git a/arch/s390/kernel/perf_cpum_cf.c b/arch/s390/kernel/perf_cpum_cf.c
index 31a605bcbc6e..1b7a0525fbed 100644
--- a/arch/s390/kernel/perf_cpum_cf.c
+++ b/arch/s390/kernel/perf_cpum_cf.c
@@ -362,15 +362,9 @@ static void cpumf_pmu_start(struct perf_event *event, int flags)
struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
struct hw_perf_event *hwc = &event->hw;
- if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED)))
+ if (!(hwc->state & PERF_HES_STOPPED))
return;
- if (WARN_ON_ONCE(hwc->config == -1))
- return;
-
- if (flags & PERF_EF_RELOAD)
- WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
-
hwc->state = 0;
/* (Re-)enable and activate the counter set */
@@ -413,15 +407,6 @@ static int cpumf_pmu_add(struct perf_event *event, int flags)
{
struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
- /* Check authorization for the counter set to which this
- * counter belongs.
- * For group events transaction, the authorization check is
- * done in cpumf_pmu_commit_txn().
- */
- if (!(cpuhw->txn_flags & PERF_PMU_TXN_ADD))
- if (validate_ctr_auth(&event->hw))
- return -ENOENT;
-
ctr_set_enable(&cpuhw->state, event->hw.config_base);
event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
@@ -449,78 +434,6 @@ static void cpumf_pmu_del(struct perf_event *event, int flags)
ctr_set_disable(&cpuhw->state, event->hw.config_base);
}
-/*
- * Start group events scheduling transaction.
- * Set flags to perform a single test at commit time.
- *
- * We only support PERF_PMU_TXN_ADD transactions. Save the
- * transaction flags but otherwise ignore non-PERF_PMU_TXN_ADD
- * transactions.
- */
-static void cpumf_pmu_start_txn(struct pmu *pmu, unsigned int txn_flags)
-{
- struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
-
- WARN_ON_ONCE(cpuhw->txn_flags); /* txn already in flight */
-
- cpuhw->txn_flags = txn_flags;
- if (txn_flags & ~PERF_PMU_TXN_ADD)
- return;
-
- perf_pmu_disable(pmu);
- cpuhw->tx_state = cpuhw->state;
-}
-
-/*
- * Stop and cancel a group events scheduling tranctions.
- * Assumes cpumf_pmu_del() is called for each successful added
- * cpumf_pmu_add() during the transaction.
- */
-static void cpumf_pmu_cancel_txn(struct pmu *pmu)
-{
- unsigned int txn_flags;
- struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
-
- WARN_ON_ONCE(!cpuhw->txn_flags); /* no txn in flight */
-
- txn_flags = cpuhw->txn_flags;
- cpuhw->txn_flags = 0;
- if (txn_flags & ~PERF_PMU_TXN_ADD)
- return;
-
- WARN_ON(cpuhw->tx_state != cpuhw->state);
-
- perf_pmu_enable(pmu);
-}
-
-/*
- * Commit the group events scheduling transaction. On success, the
- * transaction is closed. On error, the transaction is kept open
- * until cpumf_pmu_cancel_txn() is called.
- */
-static int cpumf_pmu_commit_txn(struct pmu *pmu)
-{
- struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
- u64 state;
-
- WARN_ON_ONCE(!cpuhw->txn_flags); /* no txn in flight */
-
- if (cpuhw->txn_flags & ~PERF_PMU_TXN_ADD) {
- cpuhw->txn_flags = 0;
- return 0;
- }
-
- /* check if the updated state can be scheduled */
- state = cpuhw->state & ~((1 << CPUMF_LCCTL_ENABLE_SHIFT) - 1);
- state >>= CPUMF_LCCTL_ENABLE_SHIFT;
- if ((state & cpuhw->info.auth_ctl) != state)
- return -ENOENT;
-
- cpuhw->txn_flags = 0;
- perf_pmu_enable(pmu);
- return 0;
-}
-
/* Performance monitoring unit for s390x */
static struct pmu cpumf_pmu = {
.task_ctx_nr = perf_sw_context,
@@ -533,9 +446,6 @@ static struct pmu cpumf_pmu = {
.start = cpumf_pmu_start,
.stop = cpumf_pmu_stop,
.read = cpumf_pmu_read,
- .start_txn = cpumf_pmu_start_txn,
- .commit_txn = cpumf_pmu_commit_txn,
- .cancel_txn = cpumf_pmu_cancel_txn,
};
static int __init cpumf_pmu_init(void)
diff --git a/arch/s390/kernel/perf_cpum_cf_common.c b/arch/s390/kernel/perf_cpum_cf_common.c
index 6d53215c8484..2300fbaac556 100644
--- a/arch/s390/kernel/perf_cpum_cf_common.c
+++ b/arch/s390/kernel/perf_cpum_cf_common.c
@@ -30,7 +30,6 @@ DEFINE_PER_CPU(struct cpu_cf_events, cpu_cf_events) = {
.alert = ATOMIC64_INIT(0),
.state = 0,
.flags = 0,
- .txn_flags = 0,
};
/* Indicator whether the CPU-Measurement Counter Facility Support is ready */
static bool cpum_cf_initalized;
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index e20bed1ed34a..7ae5dde9c54d 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -180,7 +180,7 @@ unsigned long get_wchan(struct task_struct *p)
struct unwind_state state;
unsigned long ip = 0;
- if (!p || p == current || p->state == TASK_RUNNING || !task_stack_page(p))
+ if (!p || p == current || task_is_running(p) || !task_stack_page(p))
return 0;
if (!try_get_task_stack(p))
diff --git a/arch/s390/kernel/processor.c b/arch/s390/kernel/processor.c
index c92d04f876cb..82df39b17bb5 100644
--- a/arch/s390/kernel/processor.c
+++ b/arch/s390/kernel/processor.c
@@ -103,11 +103,9 @@ EXPORT_SYMBOL(cpu_have_feature);
static void show_facilities(struct seq_file *m)
{
unsigned int bit;
- long *facilities;
- facilities = (long *)&S390_lowcore.stfle_fac_list;
seq_puts(m, "facilities :");
- for_each_set_bit_inv(bit, facilities, MAX_FACILITY_BIT)
+ for_each_set_bit_inv(bit, (long *)&stfle_fac_list, MAX_FACILITY_BIT)
seq_printf(m, " %d", bit);
seq_putc(m, '\n');
}
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
index 18b3416fd663..0ea3d02b378d 100644
--- a/arch/s390/kernel/ptrace.c
+++ b/arch/s390/kernel/ptrace.c
@@ -975,10 +975,12 @@ static int s390_tdb_get(struct task_struct *target,
struct membuf to)
{
struct pt_regs *regs = task_pt_regs(target);
+ size_t size;
if (!(regs->int_code & 0x200))
return -ENODATA;
- return membuf_write(&to, target->thread.trap_tdb, 256);
+ size = sizeof(target->thread.trap_tdb.data);
+ return membuf_write(&to, target->thread.trap_tdb.data, size);
}
static int s390_tdb_set(struct task_struct *target,
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 5aab59ad5688..b58ee83f30e3 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -96,7 +96,6 @@ unsigned long int_hwcap = 0;
int __bootdata(noexec_disabled);
unsigned long __bootdata(ident_map_size);
-unsigned long __bootdata(vmalloc_size);
struct mem_detect_info __bootdata(mem_detect);
struct exception_table_entry *__bootdata_preserved(__start_dma_ex_table);
@@ -108,6 +107,9 @@ unsigned long __bootdata_preserved(__edma);
unsigned long __bootdata_preserved(__kaslr_offset);
unsigned int __bootdata_preserved(zlib_dfltcc_support);
EXPORT_SYMBOL(zlib_dfltcc_support);
+u64 __bootdata_preserved(stfle_fac_list[16]);
+EXPORT_SYMBOL(stfle_fac_list);
+u64 __bootdata_preserved(alt_stfle_fac_list[16]);
unsigned long VMALLOC_START;
EXPORT_SYMBOL(VMALLOC_START);
@@ -165,7 +167,7 @@ static void __init set_preferred_console(void)
else if (CONSOLE_IS_3270)
add_preferred_console("tty3270", 0, NULL);
else if (CONSOLE_IS_VT220)
- add_preferred_console("ttyS", 1, NULL);
+ add_preferred_console("ttysclp", 0, NULL);
else if (CONSOLE_IS_HVC)
add_preferred_console("hvc", 0, NULL);
}
@@ -338,27 +340,6 @@ int __init arch_early_irq_init(void)
return 0;
}
-static int __init stack_realloc(void)
-{
- unsigned long old, new;
-
- old = S390_lowcore.async_stack - STACK_INIT_OFFSET;
- new = stack_alloc();
- if (!new)
- panic("Couldn't allocate async stack");
- WRITE_ONCE(S390_lowcore.async_stack, new + STACK_INIT_OFFSET);
- free_pages(old, THREAD_SIZE_ORDER);
-
- old = S390_lowcore.mcck_stack - STACK_INIT_OFFSET;
- new = stack_alloc();
- if (!new)
- panic("Couldn't allocate machine check stack");
- WRITE_ONCE(S390_lowcore.mcck_stack, new + STACK_INIT_OFFSET);
- memblock_free_late(old, THREAD_SIZE);
- return 0;
-}
-early_initcall(stack_realloc);
-
void __init arch_call_rest_init(void)
{
unsigned long stack;
@@ -413,11 +394,6 @@ static void __init setup_lowcore_dat_off(void)
lc->lpp = LPP_MAGIC;
lc->machine_flags = S390_lowcore.machine_flags;
lc->preempt_count = S390_lowcore.preempt_count;
- lc->stfl_fac_list = S390_lowcore.stfl_fac_list;
- memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list,
- sizeof(lc->stfle_fac_list));
- memcpy(lc->alt_stfle_fac_list, S390_lowcore.alt_stfle_fac_list,
- sizeof(lc->alt_stfle_fac_list));
nmi_alloc_boot_cpu(lc);
lc->sys_enter_timer = S390_lowcore.sys_enter_timer;
lc->exit_timer = S390_lowcore.exit_timer;
@@ -568,53 +544,10 @@ static void __init setup_resources(void)
#endif
}
-static void __init setup_ident_map_size(void)
+static void __init setup_memory_end(void)
{
- unsigned long vmax, tmp;
-
- /* Choose kernel address space layout: 3 or 4 levels. */
- tmp = ident_map_size / PAGE_SIZE;
- tmp = tmp * (sizeof(struct page) + PAGE_SIZE);
- if (tmp + vmalloc_size + MODULES_LEN <= _REGION2_SIZE)
- vmax = _REGION2_SIZE; /* 3-level kernel page table */
- else
- vmax = _REGION1_SIZE; /* 4-level kernel page table */
- /* module area is at the end of the kernel address space. */
- MODULES_END = vmax;
- if (is_prot_virt_host())
- adjust_to_uv_max(&MODULES_END);
-#ifdef CONFIG_KASAN
- vmax = _REGION1_SIZE;
- MODULES_END = kasan_vmax;
-#endif
- MODULES_VADDR = MODULES_END - MODULES_LEN;
- VMALLOC_END = MODULES_VADDR;
- VMALLOC_START = VMALLOC_END - vmalloc_size;
-
- /* Split remaining virtual space between 1:1 mapping & vmemmap array */
- tmp = VMALLOC_START / (PAGE_SIZE + sizeof(struct page));
- /* vmemmap contains a multiple of PAGES_PER_SECTION struct pages */
- tmp = SECTION_ALIGN_UP(tmp);
- tmp = VMALLOC_START - tmp * sizeof(struct page);
- tmp &= ~((vmax >> 11) - 1); /* align to page table level */
- tmp = min(tmp, 1UL << MAX_PHYSMEM_BITS);
- vmemmap = (struct page *) tmp;
-
- /* Take care that ident_map_size <= vmemmap */
- ident_map_size = min(ident_map_size, (unsigned long)vmemmap);
-#ifdef CONFIG_KASAN
- ident_map_size = min(ident_map_size, KASAN_SHADOW_START);
-#endif
- vmemmap_size = SECTION_ALIGN_UP(ident_map_size / PAGE_SIZE) * sizeof(struct page);
-#ifdef CONFIG_KASAN
- /* move vmemmap above kasan shadow only if stands in a way */
- if (KASAN_SHADOW_END > (unsigned long)vmemmap &&
- (unsigned long)vmemmap + vmemmap_size > KASAN_SHADOW_START)
- vmemmap = max(vmemmap, (struct page *)KASAN_SHADOW_END);
-#endif
- max_pfn = max_low_pfn = PFN_DOWN(ident_map_size);
memblock_remove(ident_map_size, ULONG_MAX);
-
+ max_pfn = max_low_pfn = PFN_DOWN(ident_map_size);
pr_notice("The maximum memory size is %luMB\n", ident_map_size >> 20);
}
@@ -653,30 +586,6 @@ static void __init reserve_above_ident_map(void)
}
/*
- * Make sure that oldmem, where the dump is stored, is protected
- */
-static void __init reserve_oldmem(void)
-{
-#ifdef CONFIG_CRASH_DUMP
- if (OLDMEM_BASE)
- /* Forget all memory above the running kdump system */
- memblock_reserve(OLDMEM_SIZE, (phys_addr_t)ULONG_MAX);
-#endif
-}
-
-/*
- * Make sure that oldmem, where the dump is stored, is protected
- */
-static void __init remove_oldmem(void)
-{
-#ifdef CONFIG_CRASH_DUMP
- if (OLDMEM_BASE)
- /* Forget all memory above the running kdump system */
- memblock_remove(OLDMEM_SIZE, (phys_addr_t)ULONG_MAX);
-#endif
-}
-
-/*
* Reserve memory for kdump kernel to be loaded with kexec
*/
static void __init reserve_crashkernel(void)
@@ -1141,7 +1050,6 @@ void __init setup_arch(char **cmdline_p)
/* Do some memory reservations *before* memory is added to memblock */
reserve_above_ident_map();
- reserve_oldmem();
reserve_kernel();
reserve_initrd();
reserve_certificate_list();
@@ -1152,10 +1060,9 @@ void __init setup_arch(char **cmdline_p)
memblock_add_mem_detect_info();
free_mem_detect_info();
- remove_oldmem();
setup_uv();
- setup_ident_map_size();
+ setup_memory_end();
setup_memory();
dma_contiguous_reserve(ident_map_size);
vmcp_cma_reserve();
diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c
index 90163e6184f5..080e7aed181f 100644
--- a/arch/s390/kernel/signal.c
+++ b/arch/s390/kernel/signal.c
@@ -512,7 +512,6 @@ void arch_do_signal_or_restart(struct pt_regs *regs, bool has_signal)
/* No handlers present - check for system call restart */
clear_pt_regs_flag(regs, PIF_SYSCALL);
- clear_pt_regs_flag(regs, PIF_SYSCALL_RESTART);
if (current->thread.system_call) {
regs->int_code = current->thread.system_call;
switch (regs->gprs[2]) {
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 2fec2b80d35d..ff42d3aa0f00 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -74,7 +74,6 @@ enum {
static DEFINE_PER_CPU(struct cpu *, cpu_device);
struct pcpu {
- struct lowcore *lowcore; /* lowcore page(s) for the cpu */
unsigned long ec_mask; /* bit mask for ec_xxx functions */
unsigned long ec_clk; /* sigp timestamp for ec_xxx */
signed char state; /* physical cpu state */
@@ -194,20 +193,12 @@ static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
unsigned long async_stack, nodat_stack, mcck_stack;
struct lowcore *lc;
- if (pcpu != &pcpu_devices[0]) {
- pcpu->lowcore = (struct lowcore *)
- __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER);
- nodat_stack = __get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER);
- if (!pcpu->lowcore || !nodat_stack)
- goto out;
- } else {
- nodat_stack = pcpu->lowcore->nodat_stack - STACK_INIT_OFFSET;
- }
+ lc = (struct lowcore *) __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER);
+ nodat_stack = __get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER);
async_stack = stack_alloc();
mcck_stack = stack_alloc();
- if (!async_stack || !mcck_stack)
- goto out_stack;
- lc = pcpu->lowcore;
+ if (!lc || !nodat_stack || !async_stack || !mcck_stack)
+ goto out;
memcpy(lc, &S390_lowcore, 512);
memset((char *) lc + 512, 0, sizeof(*lc) - 512);
lc->async_stack = async_stack + STACK_INIT_OFFSET;
@@ -220,45 +211,42 @@ static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
lc->return_lpswe = gen_lpswe(__LC_RETURN_PSW);
lc->return_mcck_lpswe = gen_lpswe(__LC_RETURN_MCCK_PSW);
if (nmi_alloc_per_cpu(lc))
- goto out_stack;
+ goto out;
lowcore_ptr[cpu] = lc;
pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, (u32)(unsigned long) lc);
return 0;
-out_stack:
+out:
stack_free(mcck_stack);
stack_free(async_stack);
-out:
- if (pcpu != &pcpu_devices[0]) {
- free_pages(nodat_stack, THREAD_SIZE_ORDER);
- free_pages((unsigned long) pcpu->lowcore, LC_ORDER);
- }
+ free_pages(nodat_stack, THREAD_SIZE_ORDER);
+ free_pages((unsigned long) lc, LC_ORDER);
return -ENOMEM;
}
static void pcpu_free_lowcore(struct pcpu *pcpu)
{
- unsigned long async_stack, nodat_stack, mcck_stack, lowcore;
-
- nodat_stack = pcpu->lowcore->nodat_stack - STACK_INIT_OFFSET;
- async_stack = pcpu->lowcore->async_stack - STACK_INIT_OFFSET;
- mcck_stack = pcpu->lowcore->mcck_stack - STACK_INIT_OFFSET;
- lowcore = (unsigned long) pcpu->lowcore;
+ unsigned long async_stack, nodat_stack, mcck_stack;
+ struct lowcore *lc;
+ int cpu;
+ cpu = pcpu - pcpu_devices;
+ lc = lowcore_ptr[cpu];
+ nodat_stack = lc->nodat_stack - STACK_INIT_OFFSET;
+ async_stack = lc->async_stack - STACK_INIT_OFFSET;
+ mcck_stack = lc->mcck_stack - STACK_INIT_OFFSET;
pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, 0);
- lowcore_ptr[pcpu - pcpu_devices] = NULL;
- nmi_free_per_cpu(pcpu->lowcore);
+ lowcore_ptr[cpu] = NULL;
+ nmi_free_per_cpu(lc);
stack_free(async_stack);
stack_free(mcck_stack);
- if (pcpu == &pcpu_devices[0])
- return;
free_pages(nodat_stack, THREAD_SIZE_ORDER);
- free_pages(lowcore, LC_ORDER);
+ free_pages((unsigned long) lc, LC_ORDER);
}
static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu)
{
- struct lowcore *lc = pcpu->lowcore;
+ struct lowcore *lc = lowcore_ptr[cpu];
cpumask_set_cpu(cpu, &init_mm.context.cpu_attach_mask);
cpumask_set_cpu(cpu, mm_cpumask(&init_mm));
@@ -275,17 +263,16 @@ static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu)
lc->cregs_save_area[1] = lc->kernel_asce;
lc->cregs_save_area[7] = lc->user_asce;
save_access_regs((unsigned int *) lc->access_regs_save_area);
- memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list,
- sizeof(lc->stfle_fac_list));
- memcpy(lc->alt_stfle_fac_list, S390_lowcore.alt_stfle_fac_list,
- sizeof(lc->alt_stfle_fac_list));
arch_spin_lock_setup(cpu);
}
static void pcpu_attach_task(struct pcpu *pcpu, struct task_struct *tsk)
{
- struct lowcore *lc = pcpu->lowcore;
+ struct lowcore *lc;
+ int cpu;
+ cpu = pcpu - pcpu_devices;
+ lc = lowcore_ptr[cpu];
lc->kernel_stack = (unsigned long) task_stack_page(tsk)
+ THREAD_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
lc->current_task = (unsigned long) tsk;
@@ -301,8 +288,11 @@ static void pcpu_attach_task(struct pcpu *pcpu, struct task_struct *tsk)
static void pcpu_start_fn(struct pcpu *pcpu, void (*func)(void *), void *data)
{
- struct lowcore *lc = pcpu->lowcore;
+ struct lowcore *lc;
+ int cpu;
+ cpu = pcpu - pcpu_devices;
+ lc = lowcore_ptr[cpu];
lc->restart_stack = lc->nodat_stack;
lc->restart_fn = (unsigned long) func;
lc->restart_data = (unsigned long) data;
@@ -387,7 +377,7 @@ void smp_call_online_cpu(void (*func)(void *), void *data)
*/
void smp_call_ipl_cpu(void (*func)(void *), void *data)
{
- struct lowcore *lc = pcpu_devices->lowcore;
+ struct lowcore *lc = lowcore_ptr[0];
if (pcpu_devices[0].address == stap())
lc = &S390_lowcore;
@@ -600,18 +590,21 @@ EXPORT_SYMBOL(smp_ctl_clear_bit);
int smp_store_status(int cpu)
{
- struct pcpu *pcpu = pcpu_devices + cpu;
+ struct lowcore *lc;
+ struct pcpu *pcpu;
unsigned long pa;
- pa = __pa(&pcpu->lowcore->floating_pt_save_area);
+ pcpu = pcpu_devices + cpu;
+ lc = lowcore_ptr[cpu];
+ pa = __pa(&lc->floating_pt_save_area);
if (__pcpu_sigp_relax(pcpu->address, SIGP_STORE_STATUS_AT_ADDRESS,
pa) != SIGP_CC_ORDER_CODE_ACCEPTED)
return -EIO;
if (!MACHINE_HAS_VX && !MACHINE_HAS_GS)
return 0;
- pa = __pa(pcpu->lowcore->mcesad & MCESA_ORIGIN_MASK);
+ pa = __pa(lc->mcesad & MCESA_ORIGIN_MASK);
if (MACHINE_HAS_GS)
- pa |= pcpu->lowcore->mcesad & MCESA_LC_MASK;
+ pa |= lc->mcesad & MCESA_LC_MASK;
if (__pcpu_sigp_relax(pcpu->address, SIGP_STORE_ADDITIONAL_STATUS,
pa) != SIGP_CC_ORDER_CODE_ACCEPTED)
return -EIO;
@@ -878,7 +871,6 @@ static void smp_init_secondary(void)
restore_access_regs(S390_lowcore.access_regs_save_area);
cpu_init();
rcu_cpu_starting(cpu);
- preempt_disable();
init_cpu_timer();
vtime_init();
vdso_getcpu_init();
@@ -1012,7 +1004,6 @@ void __init smp_prepare_boot_cpu(void)
WARN_ON(!cpu_present(0) || !cpu_online(0));
pcpu->state = CPU_STATE_CONFIGURED;
- pcpu->lowcore = (struct lowcore *)(unsigned long) store_prefix();
S390_lowcore.percpu_offset = __per_cpu_offset[0];
smp_cpu_set_polarization(0, POLARIZATION_UNKNOWN);
}
@@ -1238,3 +1229,54 @@ out:
return rc;
}
subsys_initcall(s390_smp_init);
+
+static __always_inline void set_new_lowcore(struct lowcore *lc)
+{
+ union register_pair dst, src;
+ u32 pfx;
+
+ src.even = (unsigned long) &S390_lowcore;
+ src.odd = sizeof(S390_lowcore);
+ dst.even = (unsigned long) lc;
+ dst.odd = sizeof(*lc);
+ pfx = (unsigned long) lc;
+
+ asm volatile(
+ " mvcl %[dst],%[src]\n"
+ " spx %[pfx]\n"
+ : [dst] "+&d" (dst.pair), [src] "+&d" (src.pair)
+ : [pfx] "Q" (pfx)
+ : "memory", "cc");
+}
+
+static int __init smp_reinit_ipl_cpu(void)
+{
+ unsigned long async_stack, nodat_stack, mcck_stack;
+ struct lowcore *lc, *lc_ipl;
+ unsigned long flags;
+
+ lc_ipl = lowcore_ptr[0];
+ lc = (struct lowcore *) __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER);
+ nodat_stack = __get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER);
+ async_stack = stack_alloc();
+ mcck_stack = stack_alloc();
+ if (!lc || !nodat_stack || !async_stack || !mcck_stack)
+ panic("Couldn't allocate memory");
+
+ local_irq_save(flags);
+ local_mcck_disable();
+ set_new_lowcore(lc);
+ S390_lowcore.nodat_stack = nodat_stack + STACK_INIT_OFFSET;
+ S390_lowcore.async_stack = async_stack + STACK_INIT_OFFSET;
+ S390_lowcore.mcck_stack = mcck_stack + STACK_INIT_OFFSET;
+ lowcore_ptr[0] = lc;
+ local_mcck_enable();
+ local_irq_restore(flags);
+
+ free_pages(lc_ipl->async_stack - STACK_INIT_OFFSET, THREAD_SIZE_ORDER);
+ memblock_free_late(lc_ipl->mcck_stack - STACK_INIT_OFFSET, THREAD_SIZE);
+ memblock_free_late((unsigned long) lc_ipl, sizeof(*lc_ipl));
+
+ return 0;
+}
+early_initcall(smp_reinit_ipl_cpu);
diff --git a/arch/s390/kernel/sthyi.c b/arch/s390/kernel/sthyi.c
index 888cc2f166db..4d141e2c132e 100644
--- a/arch/s390/kernel/sthyi.c
+++ b/arch/s390/kernel/sthyi.c
@@ -395,19 +395,18 @@ out:
static int sthyi(u64 vaddr, u64 *rc)
{
- register u64 code asm("0") = 0;
- register u64 addr asm("2") = vaddr;
- register u64 rcode asm("3");
+ union register_pair r1 = { .even = 0, }; /* subcode */
+ union register_pair r2 = { .even = vaddr, };
int cc;
asm volatile(
- ".insn rre,0xB2560000,%[code],%[addr]\n"
+ ".insn rre,0xB2560000,%[r1],%[r2]\n"
"ipm %[cc]\n"
"srl %[cc],28\n"
- : [cc] "=d" (cc), "=d" (rcode)
- : [code] "d" (code), [addr] "a" (addr)
+ : [cc] "=&d" (cc), [r2] "+&d" (r2.pair)
+ : [r1] "d" (r1.pair)
: "memory", "cc");
- *rc = rcode;
+ *rc = r2.odd;
return cc;
}
diff --git a/arch/s390/kernel/syscall.c b/arch/s390/kernel/syscall.c
index 4e5cc7d2364e..76f7916cc30f 100644
--- a/arch/s390/kernel/syscall.c
+++ b/arch/s390/kernel/syscall.c
@@ -144,11 +144,8 @@ void noinstr __do_syscall(struct pt_regs *regs, int per_trap)
{
add_random_kstack_offset();
enter_from_user_mode(regs);
-
- memcpy(&regs->gprs[8], S390_lowcore.save_area_sync, 8 * sizeof(unsigned long));
- memcpy(&regs->int_code, &S390_lowcore.svc_ilc, sizeof(regs->int_code));
regs->psw = S390_lowcore.svc_old_psw;
-
+ regs->int_code = S390_lowcore.svc_int_code;
update_timer_sys();
local_irq_enable();
diff --git a/arch/s390/kernel/syscalls/syscall.tbl b/arch/s390/kernel/syscalls/syscall.tbl
index 7e4a2aba366d..64d51ab5a8b4 100644
--- a/arch/s390/kernel/syscalls/syscall.tbl
+++ b/arch/s390/kernel/syscalls/syscall.tbl
@@ -445,7 +445,7 @@
440 common process_madvise sys_process_madvise sys_process_madvise
441 common epoll_pwait2 sys_epoll_pwait2 compat_sys_epoll_pwait2
442 common mount_setattr sys_mount_setattr sys_mount_setattr
-443 common quotactl_path sys_quotactl_path sys_quotactl_path
+443 common quotactl_fd sys_quotactl_fd sys_quotactl_fd
444 common landlock_create_ruleset sys_landlock_create_ruleset sys_landlock_create_ruleset
445 common landlock_add_rule sys_landlock_add_rule sys_landlock_add_rule
446 common landlock_restrict_self sys_landlock_restrict_self sys_landlock_restrict_self
diff --git a/arch/s390/kernel/sysinfo.c b/arch/s390/kernel/sysinfo.c
index 2ac3c9b56a13..ef3f2659876c 100644
--- a/arch/s390/kernel/sysinfo.c
+++ b/arch/s390/kernel/sysinfo.c
@@ -25,19 +25,22 @@ int topology_max_mnest;
static inline int __stsi(void *sysinfo, int fc, int sel1, int sel2, int *lvl)
{
- register int r0 asm("0") = (fc << 28) | sel1;
- register int r1 asm("1") = sel2;
+ int r0 = (fc << 28) | sel1;
int rc = 0;
asm volatile(
- " stsi 0(%3)\n"
+ " lr 0,%[r0]\n"
+ " lr 1,%[r1]\n"
+ " stsi 0(%[sysinfo])\n"
"0: jz 2f\n"
- "1: lhi %1,%4\n"
- "2:\n"
+ "1: lhi %[rc],%[retval]\n"
+ "2: lr %[r0],0\n"
EX_TABLE(0b, 1b)
- : "+d" (r0), "+d" (rc)
- : "d" (r1), "a" (sysinfo), "K" (-EOPNOTSUPP)
- : "cc", "memory");
+ : [r0] "+d" (r0), [rc] "+d" (rc)
+ : [r1] "d" (sel2),
+ [sysinfo] "a" (sysinfo),
+ [retval] "K" (-EOPNOTSUPP)
+ : "cc", "0", "1", "memory");
*lvl = ((unsigned int) r0) >> 28;
return rc;
}
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c
index bfcc327acc6b..26aa2614ee35 100644
--- a/arch/s390/kernel/topology.c
+++ b/arch/s390/kernel/topology.c
@@ -66,7 +66,10 @@ static void cpu_group_map(cpumask_t *dst, struct mask_info *info, unsigned int c
{
static cpumask_t mask;
- cpumask_copy(&mask, cpumask_of(cpu));
+ cpumask_clear(&mask);
+ if (!cpu_online(cpu))
+ goto out;
+ cpumask_set_cpu(cpu, &mask);
switch (topology_mode) {
case TOPOLOGY_MODE_HW:
while (info) {
@@ -83,10 +86,10 @@ static void cpu_group_map(cpumask_t *dst, struct mask_info *info, unsigned int c
default:
fallthrough;
case TOPOLOGY_MODE_SINGLE:
- cpumask_copy(&mask, cpumask_of(cpu));
break;
}
cpumask_and(&mask, &mask, cpu_online_mask);
+out:
cpumask_copy(dst, &mask);
}
@@ -95,7 +98,10 @@ static void cpu_thread_map(cpumask_t *dst, unsigned int cpu)
static cpumask_t mask;
int i;
- cpumask_copy(&mask, cpumask_of(cpu));
+ cpumask_clear(&mask);
+ if (!cpu_online(cpu))
+ goto out;
+ cpumask_set_cpu(cpu, &mask);
if (topology_mode != TOPOLOGY_MODE_HW)
goto out;
cpu -= cpu % (smp_cpu_mtid + 1);
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c
index 8dd23c703718..019c5748b607 100644
--- a/arch/s390/kernel/traps.c
+++ b/arch/s390/kernel/traps.c
@@ -36,7 +36,7 @@ static inline void __user *get_trap_ip(struct pt_regs *regs)
unsigned long address;
if (regs->int_code & 0x200)
- address = *(unsigned long *)(current->thread.trap_tdb + 24);
+ address = current->thread.trap_tdb.data[3];
else
address = regs->psw.addr;
return (void __user *) (address - (regs->int_code >> 16));
@@ -318,7 +318,7 @@ void noinstr __do_pgm_check(struct pt_regs *regs)
if (S390_lowcore.pgm_code & 0x0200) {
/* transaction abort */
- memcpy(&current->thread.trap_tdb, &S390_lowcore.pgm_tdb, 256);
+ current->thread.trap_tdb = S390_lowcore.pgm_tdb;
}
if (S390_lowcore.pgm_code & PGM_INT_CODE_PER) {
diff --git a/arch/s390/kernel/uv.c b/arch/s390/kernel/uv.c
index 370f664580af..6be2167943bb 100644
--- a/arch/s390/kernel/uv.c
+++ b/arch/s390/kernel/uv.c
@@ -52,7 +52,7 @@ void __init setup_uv(void)
unsigned long uv_stor_base;
/*
- * keep these conditions in line with kasan init code has_uv_sec_stor_limit()
+ * keep these conditions in line with has_uv_sec_stor_limit()
*/
if (!is_prot_virt_host())
return;
@@ -91,12 +91,6 @@ fail:
prot_virt_host = 0;
}
-void adjust_to_uv_max(unsigned long *vmax)
-{
- if (uv_info.max_sec_stor_addr)
- *vmax = min_t(unsigned long, *vmax, uv_info.max_sec_stor_addr);
-}
-
/*
* Requests the Ultravisor to pin the page in the shared state. This will
* cause an intercept when the guest attempts to unshare the pinned page.
diff --git a/arch/s390/kvm/Makefile b/arch/s390/kvm/Makefile
index 12decca22e7c..b3aaadc60ead 100644
--- a/arch/s390/kvm/Makefile
+++ b/arch/s390/kvm/Makefile
@@ -4,7 +4,8 @@
# Copyright IBM Corp. 2008
KVM := ../../../virt/kvm
-common-objs = $(KVM)/kvm_main.o $(KVM)/eventfd.o $(KVM)/async_pf.o $(KVM)/irqchip.o $(KVM)/vfio.o
+common-objs = $(KVM)/kvm_main.o $(KVM)/eventfd.o $(KVM)/async_pf.o \
+ $(KVM)/irqchip.o $(KVM)/vfio.o $(KVM)/binary_stats.o
ccflags-y := -Ivirt/kvm -Iarch/s390/kvm
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 1296fc10f80c..b655a7d82bf0 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -58,112 +58,132 @@
#define VCPU_IRQS_MAX_BUF (sizeof(struct kvm_s390_irq) * \
(KVM_MAX_VCPUS + LOCAL_IRQS))
-struct kvm_stats_debugfs_item debugfs_entries[] = {
- VCPU_STAT("userspace_handled", exit_userspace),
- VCPU_STAT("exit_null", exit_null),
- VCPU_STAT("pfault_sync", pfault_sync),
- VCPU_STAT("exit_validity", exit_validity),
- VCPU_STAT("exit_stop_request", exit_stop_request),
- VCPU_STAT("exit_external_request", exit_external_request),
- VCPU_STAT("exit_io_request", exit_io_request),
- VCPU_STAT("exit_external_interrupt", exit_external_interrupt),
- VCPU_STAT("exit_instruction", exit_instruction),
- VCPU_STAT("exit_pei", exit_pei),
- VCPU_STAT("exit_program_interruption", exit_program_interruption),
- VCPU_STAT("exit_instr_and_program_int", exit_instr_and_program),
- VCPU_STAT("exit_operation_exception", exit_operation_exception),
- VCPU_STAT("halt_successful_poll", halt_successful_poll),
- VCPU_STAT("halt_attempted_poll", halt_attempted_poll),
- VCPU_STAT("halt_poll_invalid", halt_poll_invalid),
- VCPU_STAT("halt_no_poll_steal", halt_no_poll_steal),
- VCPU_STAT("halt_wakeup", halt_wakeup),
- VCPU_STAT("halt_poll_success_ns", halt_poll_success_ns),
- VCPU_STAT("halt_poll_fail_ns", halt_poll_fail_ns),
- VCPU_STAT("instruction_lctlg", instruction_lctlg),
- VCPU_STAT("instruction_lctl", instruction_lctl),
- VCPU_STAT("instruction_stctl", instruction_stctl),
- VCPU_STAT("instruction_stctg", instruction_stctg),
- VCPU_STAT("deliver_ckc", deliver_ckc),
- VCPU_STAT("deliver_cputm", deliver_cputm),
- VCPU_STAT("deliver_emergency_signal", deliver_emergency_signal),
- VCPU_STAT("deliver_external_call", deliver_external_call),
- VCPU_STAT("deliver_service_signal", deliver_service_signal),
- VCPU_STAT("deliver_virtio", deliver_virtio),
- VCPU_STAT("deliver_stop_signal", deliver_stop_signal),
- VCPU_STAT("deliver_prefix_signal", deliver_prefix_signal),
- VCPU_STAT("deliver_restart_signal", deliver_restart_signal),
- VCPU_STAT("deliver_program", deliver_program),
- VCPU_STAT("deliver_io", deliver_io),
- VCPU_STAT("deliver_machine_check", deliver_machine_check),
- VCPU_STAT("exit_wait_state", exit_wait_state),
- VCPU_STAT("inject_ckc", inject_ckc),
- VCPU_STAT("inject_cputm", inject_cputm),
- VCPU_STAT("inject_external_call", inject_external_call),
- VM_STAT("inject_float_mchk", inject_float_mchk),
- VCPU_STAT("inject_emergency_signal", inject_emergency_signal),
- VM_STAT("inject_io", inject_io),
- VCPU_STAT("inject_mchk", inject_mchk),
- VM_STAT("inject_pfault_done", inject_pfault_done),
- VCPU_STAT("inject_program", inject_program),
- VCPU_STAT("inject_restart", inject_restart),
- VM_STAT("inject_service_signal", inject_service_signal),
- VCPU_STAT("inject_set_prefix", inject_set_prefix),
- VCPU_STAT("inject_stop_signal", inject_stop_signal),
- VCPU_STAT("inject_pfault_init", inject_pfault_init),
- VM_STAT("inject_virtio", inject_virtio),
- VCPU_STAT("instruction_epsw", instruction_epsw),
- VCPU_STAT("instruction_gs", instruction_gs),
- VCPU_STAT("instruction_io_other", instruction_io_other),
- VCPU_STAT("instruction_lpsw", instruction_lpsw),
- VCPU_STAT("instruction_lpswe", instruction_lpswe),
- VCPU_STAT("instruction_pfmf", instruction_pfmf),
- VCPU_STAT("instruction_ptff", instruction_ptff),
- VCPU_STAT("instruction_stidp", instruction_stidp),
- VCPU_STAT("instruction_sck", instruction_sck),
- VCPU_STAT("instruction_sckpf", instruction_sckpf),
- VCPU_STAT("instruction_spx", instruction_spx),
- VCPU_STAT("instruction_stpx", instruction_stpx),
- VCPU_STAT("instruction_stap", instruction_stap),
- VCPU_STAT("instruction_iske", instruction_iske),
- VCPU_STAT("instruction_ri", instruction_ri),
- VCPU_STAT("instruction_rrbe", instruction_rrbe),
- VCPU_STAT("instruction_sske", instruction_sske),
- VCPU_STAT("instruction_ipte_interlock", instruction_ipte_interlock),
- VCPU_STAT("instruction_essa", instruction_essa),
- VCPU_STAT("instruction_stsi", instruction_stsi),
- VCPU_STAT("instruction_stfl", instruction_stfl),
- VCPU_STAT("instruction_tb", instruction_tb),
- VCPU_STAT("instruction_tpi", instruction_tpi),
- VCPU_STAT("instruction_tprot", instruction_tprot),
- VCPU_STAT("instruction_tsch", instruction_tsch),
- VCPU_STAT("instruction_sthyi", instruction_sthyi),
- VCPU_STAT("instruction_sie", instruction_sie),
- VCPU_STAT("instruction_sigp_sense", instruction_sigp_sense),
- VCPU_STAT("instruction_sigp_sense_running", instruction_sigp_sense_running),
- VCPU_STAT("instruction_sigp_external_call", instruction_sigp_external_call),
- VCPU_STAT("instruction_sigp_emergency", instruction_sigp_emergency),
- VCPU_STAT("instruction_sigp_cond_emergency", instruction_sigp_cond_emergency),
- VCPU_STAT("instruction_sigp_start", instruction_sigp_start),
- VCPU_STAT("instruction_sigp_stop", instruction_sigp_stop),
- VCPU_STAT("instruction_sigp_stop_store_status", instruction_sigp_stop_store_status),
- VCPU_STAT("instruction_sigp_store_status", instruction_sigp_store_status),
- VCPU_STAT("instruction_sigp_store_adtl_status", instruction_sigp_store_adtl_status),
- VCPU_STAT("instruction_sigp_set_arch", instruction_sigp_arch),
- VCPU_STAT("instruction_sigp_set_prefix", instruction_sigp_prefix),
- VCPU_STAT("instruction_sigp_restart", instruction_sigp_restart),
- VCPU_STAT("instruction_sigp_cpu_reset", instruction_sigp_cpu_reset),
- VCPU_STAT("instruction_sigp_init_cpu_reset", instruction_sigp_init_cpu_reset),
- VCPU_STAT("instruction_sigp_unknown", instruction_sigp_unknown),
- VCPU_STAT("instruction_diag_10", diagnose_10),
- VCPU_STAT("instruction_diag_44", diagnose_44),
- VCPU_STAT("instruction_diag_9c", diagnose_9c),
- VCPU_STAT("diag_9c_ignored", diagnose_9c_ignored),
- VCPU_STAT("diag_9c_forward", diagnose_9c_forward),
- VCPU_STAT("instruction_diag_258", diagnose_258),
- VCPU_STAT("instruction_diag_308", diagnose_308),
- VCPU_STAT("instruction_diag_500", diagnose_500),
- VCPU_STAT("instruction_diag_other", diagnose_other),
- { NULL }
+const struct _kvm_stats_desc kvm_vm_stats_desc[] = {
+ KVM_GENERIC_VM_STATS(),
+ STATS_DESC_COUNTER(VM, inject_io),
+ STATS_DESC_COUNTER(VM, inject_float_mchk),
+ STATS_DESC_COUNTER(VM, inject_pfault_done),
+ STATS_DESC_COUNTER(VM, inject_service_signal),
+ STATS_DESC_COUNTER(VM, inject_virtio)
+};
+static_assert(ARRAY_SIZE(kvm_vm_stats_desc) ==
+ sizeof(struct kvm_vm_stat) / sizeof(u64));
+
+const struct kvm_stats_header kvm_vm_stats_header = {
+ .name_size = KVM_STATS_NAME_SIZE,
+ .num_desc = ARRAY_SIZE(kvm_vm_stats_desc),
+ .id_offset = sizeof(struct kvm_stats_header),
+ .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
+ .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
+ sizeof(kvm_vm_stats_desc),
+};
+
+const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
+ KVM_GENERIC_VCPU_STATS(),
+ STATS_DESC_COUNTER(VCPU, exit_userspace),
+ STATS_DESC_COUNTER(VCPU, exit_null),
+ STATS_DESC_COUNTER(VCPU, exit_external_request),
+ STATS_DESC_COUNTER(VCPU, exit_io_request),
+ STATS_DESC_COUNTER(VCPU, exit_external_interrupt),
+ STATS_DESC_COUNTER(VCPU, exit_stop_request),
+ STATS_DESC_COUNTER(VCPU, exit_validity),
+ STATS_DESC_COUNTER(VCPU, exit_instruction),
+ STATS_DESC_COUNTER(VCPU, exit_pei),
+ STATS_DESC_COUNTER(VCPU, halt_no_poll_steal),
+ STATS_DESC_COUNTER(VCPU, instruction_lctl),
+ STATS_DESC_COUNTER(VCPU, instruction_lctlg),
+ STATS_DESC_COUNTER(VCPU, instruction_stctl),
+ STATS_DESC_COUNTER(VCPU, instruction_stctg),
+ STATS_DESC_COUNTER(VCPU, exit_program_interruption),
+ STATS_DESC_COUNTER(VCPU, exit_instr_and_program),
+ STATS_DESC_COUNTER(VCPU, exit_operation_exception),
+ STATS_DESC_COUNTER(VCPU, deliver_ckc),
+ STATS_DESC_COUNTER(VCPU, deliver_cputm),
+ STATS_DESC_COUNTER(VCPU, deliver_external_call),
+ STATS_DESC_COUNTER(VCPU, deliver_emergency_signal),
+ STATS_DESC_COUNTER(VCPU, deliver_service_signal),
+ STATS_DESC_COUNTER(VCPU, deliver_virtio),
+ STATS_DESC_COUNTER(VCPU, deliver_stop_signal),
+ STATS_DESC_COUNTER(VCPU, deliver_prefix_signal),
+ STATS_DESC_COUNTER(VCPU, deliver_restart_signal),
+ STATS_DESC_COUNTER(VCPU, deliver_program),
+ STATS_DESC_COUNTER(VCPU, deliver_io),
+ STATS_DESC_COUNTER(VCPU, deliver_machine_check),
+ STATS_DESC_COUNTER(VCPU, exit_wait_state),
+ STATS_DESC_COUNTER(VCPU, inject_ckc),
+ STATS_DESC_COUNTER(VCPU, inject_cputm),
+ STATS_DESC_COUNTER(VCPU, inject_external_call),
+ STATS_DESC_COUNTER(VCPU, inject_emergency_signal),
+ STATS_DESC_COUNTER(VCPU, inject_mchk),
+ STATS_DESC_COUNTER(VCPU, inject_pfault_init),
+ STATS_DESC_COUNTER(VCPU, inject_program),
+ STATS_DESC_COUNTER(VCPU, inject_restart),
+ STATS_DESC_COUNTER(VCPU, inject_set_prefix),
+ STATS_DESC_COUNTER(VCPU, inject_stop_signal),
+ STATS_DESC_COUNTER(VCPU, instruction_epsw),
+ STATS_DESC_COUNTER(VCPU, instruction_gs),
+ STATS_DESC_COUNTER(VCPU, instruction_io_other),
+ STATS_DESC_COUNTER(VCPU, instruction_lpsw),
+ STATS_DESC_COUNTER(VCPU, instruction_lpswe),
+ STATS_DESC_COUNTER(VCPU, instruction_pfmf),
+ STATS_DESC_COUNTER(VCPU, instruction_ptff),
+ STATS_DESC_COUNTER(VCPU, instruction_sck),
+ STATS_DESC_COUNTER(VCPU, instruction_sckpf),
+ STATS_DESC_COUNTER(VCPU, instruction_stidp),
+ STATS_DESC_COUNTER(VCPU, instruction_spx),
+ STATS_DESC_COUNTER(VCPU, instruction_stpx),
+ STATS_DESC_COUNTER(VCPU, instruction_stap),
+ STATS_DESC_COUNTER(VCPU, instruction_iske),
+ STATS_DESC_COUNTER(VCPU, instruction_ri),
+ STATS_DESC_COUNTER(VCPU, instruction_rrbe),
+ STATS_DESC_COUNTER(VCPU, instruction_sske),
+ STATS_DESC_COUNTER(VCPU, instruction_ipte_interlock),
+ STATS_DESC_COUNTER(VCPU, instruction_stsi),
+ STATS_DESC_COUNTER(VCPU, instruction_stfl),
+ STATS_DESC_COUNTER(VCPU, instruction_tb),
+ STATS_DESC_COUNTER(VCPU, instruction_tpi),
+ STATS_DESC_COUNTER(VCPU, instruction_tprot),
+ STATS_DESC_COUNTER(VCPU, instruction_tsch),
+ STATS_DESC_COUNTER(VCPU, instruction_sie),
+ STATS_DESC_COUNTER(VCPU, instruction_essa),
+ STATS_DESC_COUNTER(VCPU, instruction_sthyi),
+ STATS_DESC_COUNTER(VCPU, instruction_sigp_sense),
+ STATS_DESC_COUNTER(VCPU, instruction_sigp_sense_running),
+ STATS_DESC_COUNTER(VCPU, instruction_sigp_external_call),
+ STATS_DESC_COUNTER(VCPU, instruction_sigp_emergency),
+ STATS_DESC_COUNTER(VCPU, instruction_sigp_cond_emergency),
+ STATS_DESC_COUNTER(VCPU, instruction_sigp_start),
+ STATS_DESC_COUNTER(VCPU, instruction_sigp_stop),
+ STATS_DESC_COUNTER(VCPU, instruction_sigp_stop_store_status),
+ STATS_DESC_COUNTER(VCPU, instruction_sigp_store_status),
+ STATS_DESC_COUNTER(VCPU, instruction_sigp_store_adtl_status),
+ STATS_DESC_COUNTER(VCPU, instruction_sigp_arch),
+ STATS_DESC_COUNTER(VCPU, instruction_sigp_prefix),
+ STATS_DESC_COUNTER(VCPU, instruction_sigp_restart),
+ STATS_DESC_COUNTER(VCPU, instruction_sigp_init_cpu_reset),
+ STATS_DESC_COUNTER(VCPU, instruction_sigp_cpu_reset),
+ STATS_DESC_COUNTER(VCPU, instruction_sigp_unknown),
+ STATS_DESC_COUNTER(VCPU, diagnose_10),
+ STATS_DESC_COUNTER(VCPU, diagnose_44),
+ STATS_DESC_COUNTER(VCPU, diagnose_9c),
+ STATS_DESC_COUNTER(VCPU, diagnose_9c_ignored),
+ STATS_DESC_COUNTER(VCPU, diagnose_9c_forward),
+ STATS_DESC_COUNTER(VCPU, diagnose_258),
+ STATS_DESC_COUNTER(VCPU, diagnose_308),
+ STATS_DESC_COUNTER(VCPU, diagnose_500),
+ STATS_DESC_COUNTER(VCPU, diagnose_other),
+ STATS_DESC_COUNTER(VCPU, pfault_sync)
+};
+static_assert(ARRAY_SIZE(kvm_vcpu_stats_desc) ==
+ sizeof(struct kvm_vcpu_stat) / sizeof(u64));
+
+const struct kvm_stats_header kvm_vcpu_stats_header = {
+ .name_size = KVM_STATS_NAME_SIZE,
+ .num_desc = ARRAY_SIZE(kvm_vcpu_stats_desc),
+ .id_offset = sizeof(struct kvm_stats_header),
+ .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
+ .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
+ sizeof(kvm_vcpu_stats_desc),
};
/* allow nested virtualization in KVM (if enabled by user space) */
@@ -214,7 +234,7 @@ static unsigned long kvm_s390_fac_size(void)
BUILD_BUG_ON(SIZE_INTERNAL > S390_ARCH_FAC_MASK_SIZE_U64);
BUILD_BUG_ON(SIZE_INTERNAL > S390_ARCH_FAC_LIST_SIZE_U64);
BUILD_BUG_ON(SIZE_INTERNAL * sizeof(unsigned long) >
- sizeof(S390_lowcore.stfle_fac_list));
+ sizeof(stfle_fac_list));
return SIZE_INTERNAL;
}
@@ -329,31 +349,31 @@ static void allow_cpu_feat(unsigned long nr)
static inline int plo_test_bit(unsigned char nr)
{
- register unsigned long r0 asm("0") = (unsigned long) nr | 0x100;
+ unsigned long function = (unsigned long)nr | 0x100;
int cc;
asm volatile(
+ " lgr 0,%[function]\n"
/* Parameter registers are ignored for "test bit" */
" plo 0,0,0,0(0)\n"
" ipm %0\n"
" srl %0,28\n"
: "=d" (cc)
- : "d" (r0)
- : "cc");
+ : [function] "d" (function)
+ : "cc", "0");
return cc == 0;
}
static __always_inline void __insn32_query(unsigned int opcode, u8 *query)
{
- register unsigned long r0 asm("0") = 0; /* query function */
- register unsigned long r1 asm("1") = (unsigned long) query;
-
asm volatile(
- /* Parameter regs are ignored */
+ " lghi 0,0\n"
+ " lgr 1,%[query]\n"
+ /* Parameter registers are ignored */
" .insn rrf,%[opc] << 16,2,4,6,0\n"
:
- : "d" (r0), "a" (r1), [opc] "i" (opcode)
- : "cc", "memory");
+ : [query] "d" ((unsigned long)query), [opc] "i" (opcode)
+ : "cc", "memory", "0", "1");
}
#define INSN_SORTL 0xb938
@@ -713,6 +733,10 @@ int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
set_kvm_facility(kvm->arch.model.fac_mask, 152);
set_kvm_facility(kvm->arch.model.fac_list, 152);
}
+ if (test_facility(192)) {
+ set_kvm_facility(kvm->arch.model.fac_mask, 192);
+ set_kvm_facility(kvm->arch.model.fac_list, 192);
+ }
r = 0;
} else
r = -EINVAL;
@@ -1458,8 +1482,8 @@ static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
mach->ibc = sclp.ibc;
memcpy(&mach->fac_mask, kvm->arch.model.fac_mask,
S390_ARCH_FAC_LIST_SIZE_BYTE);
- memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list,
- sizeof(S390_lowcore.stfle_fac_list));
+ memcpy((unsigned long *)&mach->fac_list, stfle_fac_list,
+ sizeof(stfle_fac_list));
VM_EVENT(kvm, 3, "GET: host ibc: 0x%4.4x, host cpuid: 0x%16.16llx",
kvm->arch.model.ibc,
kvm->arch.model.cpuid);
@@ -2683,10 +2707,10 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
kvm->arch.model.fac_list = kvm->arch.sie_page2->fac_list;
for (i = 0; i < kvm_s390_fac_size(); i++) {
- kvm->arch.model.fac_mask[i] = S390_lowcore.stfle_fac_list[i] &
+ kvm->arch.model.fac_mask[i] = stfle_fac_list[i] &
(kvm_s390_fac_base[i] |
kvm_s390_fac_ext[i]);
- kvm->arch.model.fac_list[i] = S390_lowcore.stfle_fac_list[i] &
+ kvm->arch.model.fac_list[i] = stfle_fac_list[i] &
kvm_s390_fac_base[i];
}
kvm->arch.model.subfuncs = kvm_s390_available_subfunc;
@@ -5055,7 +5079,7 @@ static int __init kvm_s390_init(void)
for (i = 0; i < 16; i++)
kvm_s390_fac_base[i] |=
- S390_lowcore.stfle_fac_list[i] & nonhyp_mask(i);
+ stfle_fac_list[i] & nonhyp_mask(i);
return kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
}
diff --git a/arch/s390/kvm/pv.c b/arch/s390/kvm/pv.c
index 813b6e93dc83..c8841f476e91 100644
--- a/arch/s390/kvm/pv.c
+++ b/arch/s390/kvm/pv.c
@@ -140,7 +140,12 @@ static int kvm_s390_pv_alloc_vm(struct kvm *kvm)
/* Allocate variable storage */
vlen = ALIGN(virt * ((npages * PAGE_SIZE) / HPAGE_SIZE), PAGE_SIZE);
vlen += uv_info.guest_virt_base_stor_len;
- kvm->arch.pv.stor_var = vzalloc(vlen);
+ /*
+ * The Create Secure Configuration Ultravisor Call does not support
+ * using large pages for the virtual memory area.
+ * This is a hardware limitation.
+ */
+ kvm->arch.pv.stor_var = vmalloc_no_huge(vlen);
if (!kvm->arch.pv.stor_var)
goto out_err;
return 0;
diff --git a/arch/s390/lib/string.c b/arch/s390/lib/string.c
index 93b3209b94a2..ec5b76bde4d8 100644
--- a/arch/s390/lib/string.c
+++ b/arch/s390/lib/string.c
@@ -18,23 +18,30 @@
*/
static inline char *__strend(const char *s)
{
- register unsigned long r0 asm("0") = 0;
-
- asm volatile ("0: srst %0,%1\n"
- " jo 0b"
- : "+d" (r0), "+a" (s) : : "cc", "memory");
- return (char *) r0;
+ unsigned long e = 0;
+
+ asm volatile(
+ " lghi 0,0\n"
+ "0: srst %[e],%[s]\n"
+ " jo 0b\n"
+ : [e] "+&a" (e), [s] "+&a" (s)
+ :
+ : "cc", "memory", "0");
+ return (char *)e;
}
static inline char *__strnend(const char *s, size_t n)
{
- register unsigned long r0 asm("0") = 0;
const char *p = s + n;
- asm volatile ("0: srst %0,%1\n"
- " jo 0b"
- : "+d" (p), "+a" (s) : "d" (r0) : "cc", "memory");
- return (char *) p;
+ asm volatile(
+ " lghi 0,0\n"
+ "0: srst %[p],%[s]\n"
+ " jo 0b\n"
+ : [p] "+&d" (p), [s] "+&a" (s)
+ :
+ : "cc", "memory", "0");
+ return (char *)p;
}
/**
@@ -76,13 +83,15 @@ EXPORT_SYMBOL(strnlen);
#ifdef __HAVE_ARCH_STRCPY
char *strcpy(char *dest, const char *src)
{
- register int r0 asm("0") = 0;
char *ret = dest;
- asm volatile ("0: mvst %0,%1\n"
- " jo 0b"
- : "+&a" (dest), "+&a" (src) : "d" (r0)
- : "cc", "memory" );
+ asm volatile(
+ " lghi 0,0\n"
+ "0: mvst %[dest],%[src]\n"
+ " jo 0b\n"
+ : [dest] "+&a" (dest), [src] "+&a" (src)
+ :
+ : "cc", "memory", "0");
return ret;
}
EXPORT_SYMBOL(strcpy);
@@ -144,16 +153,18 @@ EXPORT_SYMBOL(strncpy);
#ifdef __HAVE_ARCH_STRCAT
char *strcat(char *dest, const char *src)
{
- register int r0 asm("0") = 0;
- unsigned long dummy;
+ unsigned long dummy = 0;
char *ret = dest;
- asm volatile ("0: srst %0,%1\n"
- " jo 0b\n"
- "1: mvst %0,%2\n"
- " jo 1b"
- : "=&a" (dummy), "+a" (dest), "+a" (src)
- : "d" (r0), "0" (0UL) : "cc", "memory" );
+ asm volatile(
+ " lghi 0,0\n"
+ "0: srst %[dummy],%[dest]\n"
+ " jo 0b\n"
+ "1: mvst %[dummy],%[src]\n"
+ " jo 1b\n"
+ : [dummy] "=&a" (dummy), [dest] "+&a" (dest), [src] "+&a" (src)
+ :
+ : "cc", "memory", "0");
return ret;
}
EXPORT_SYMBOL(strcat);
@@ -221,18 +232,20 @@ EXPORT_SYMBOL(strncat);
#ifdef __HAVE_ARCH_STRCMP
int strcmp(const char *s1, const char *s2)
{
- register int r0 asm("0") = 0;
int ret = 0;
- asm volatile ("0: clst %2,%3\n"
- " jo 0b\n"
- " je 1f\n"
- " ic %0,0(%2)\n"
- " ic %1,0(%3)\n"
- " sr %0,%1\n"
- "1:"
- : "+d" (ret), "+d" (r0), "+a" (s1), "+a" (s2)
- : : "cc", "memory");
+ asm volatile(
+ " lghi 0,0\n"
+ "0: clst %[s1],%[s2]\n"
+ " jo 0b\n"
+ " je 1f\n"
+ " ic %[ret],0(%[s1])\n"
+ " ic 0,0(%[s2])\n"
+ " sr %[ret],0\n"
+ "1:"
+ : [ret] "+&d" (ret), [s1] "+&a" (s1), [s2] "+&a" (s2)
+ :
+ : "cc", "memory", "0");
return ret;
}
EXPORT_SYMBOL(strcmp);
@@ -261,18 +274,18 @@ EXPORT_SYMBOL(strrchr);
static inline int clcle(const char *s1, unsigned long l1,
const char *s2, unsigned long l2)
{
- register unsigned long r2 asm("2") = (unsigned long) s1;
- register unsigned long r3 asm("3") = (unsigned long) l1;
- register unsigned long r4 asm("4") = (unsigned long) s2;
- register unsigned long r5 asm("5") = (unsigned long) l2;
+ union register_pair r1 = { .even = (unsigned long)s1, .odd = l1, };
+ union register_pair r3 = { .even = (unsigned long)s2, .odd = l2, };
int cc;
- asm volatile ("0: clcle %1,%3,0\n"
- " jo 0b\n"
- " ipm %0\n"
- " srl %0,28"
- : "=&d" (cc), "+a" (r2), "+a" (r3),
- "+a" (r4), "+a" (r5) : : "cc", "memory");
+ asm volatile(
+ "0: clcle %[r1],%[r3],0\n"
+ " jo 0b\n"
+ " ipm %[cc]\n"
+ " srl %[cc],28\n"
+ : [cc] "=&d" (cc), [r1] "+&d" (r1.pair), [r3] "+&d" (r3.pair)
+ :
+ : "cc", "memory");
return cc;
}
@@ -315,15 +328,18 @@ EXPORT_SYMBOL(strstr);
#ifdef __HAVE_ARCH_MEMCHR
void *memchr(const void *s, int c, size_t n)
{
- register int r0 asm("0") = (char) c;
const void *ret = s + n;
- asm volatile ("0: srst %0,%1\n"
- " jo 0b\n"
- " jl 1f\n"
- " la %0,0\n"
- "1:"
- : "+a" (ret), "+&a" (s) : "d" (r0) : "cc", "memory");
+ asm volatile(
+ " lgr 0,%[c]\n"
+ "0: srst %[ret],%[s]\n"
+ " jo 0b\n"
+ " jl 1f\n"
+ " la %[ret],0\n"
+ "1:"
+ : [ret] "+&a" (ret), [s] "+&a" (s)
+ : [c] "d" (c)
+ : "cc", "memory", "0");
return (void *) ret;
}
EXPORT_SYMBOL(memchr);
@@ -360,13 +376,16 @@ EXPORT_SYMBOL(memcmp);
#ifdef __HAVE_ARCH_MEMSCAN
void *memscan(void *s, int c, size_t n)
{
- register int r0 asm("0") = (char) c;
const void *ret = s + n;
- asm volatile ("0: srst %0,%1\n"
- " jo 0b\n"
- : "+a" (ret), "+&a" (s) : "d" (r0) : "cc", "memory");
- return (void *) ret;
+ asm volatile(
+ " lgr 0,%[c]\n"
+ "0: srst %[ret],%[s]\n"
+ " jo 0b\n"
+ : [ret] "+&a" (ret), [s] "+&a" (s)
+ : [c] "d" (c)
+ : "cc", "memory", "0");
+ return (void *)ret;
}
EXPORT_SYMBOL(memscan);
#endif
diff --git a/arch/s390/lib/uaccess.c b/arch/s390/lib/uaccess.c
index 2fece1fd210a..67606d932825 100644
--- a/arch/s390/lib/uaccess.c
+++ b/arch/s390/lib/uaccess.c
@@ -61,11 +61,11 @@ static inline int copy_with_mvcos(void)
static inline unsigned long copy_from_user_mvcos(void *x, const void __user *ptr,
unsigned long size)
{
- register unsigned long reg0 asm("0") = 0x81UL;
unsigned long tmp1, tmp2;
tmp1 = -4096UL;
asm volatile(
+ " lghi 0,%[spec]\n"
"0: .insn ss,0xc80000000000,0(%0,%2),0(%1),0\n"
"6: jz 4f\n"
"1: algr %0,%3\n"
@@ -84,7 +84,8 @@ static inline unsigned long copy_from_user_mvcos(void *x, const void __user *ptr
"5:\n"
EX_TABLE(0b,2b) EX_TABLE(3b,5b) EX_TABLE(6b,2b) EX_TABLE(7b,5b)
: "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
- : "d" (reg0) : "cc", "memory");
+ : [spec] "K" (0x81UL)
+ : "cc", "memory", "0");
return size;
}
@@ -133,11 +134,11 @@ EXPORT_SYMBOL(raw_copy_from_user);
static inline unsigned long copy_to_user_mvcos(void __user *ptr, const void *x,
unsigned long size)
{
- register unsigned long reg0 asm("0") = 0x810000UL;
unsigned long tmp1, tmp2;
tmp1 = -4096UL;
asm volatile(
+ " llilh 0,%[spec]\n"
"0: .insn ss,0xc80000000000,0(%0,%1),0(%2),0\n"
"6: jz 4f\n"
"1: algr %0,%3\n"
@@ -156,7 +157,8 @@ static inline unsigned long copy_to_user_mvcos(void __user *ptr, const void *x,
"5:\n"
EX_TABLE(0b,2b) EX_TABLE(3b,5b) EX_TABLE(6b,2b) EX_TABLE(7b,5b)
: "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
- : "d" (reg0) : "cc", "memory");
+ : [spec] "K" (0x81UL)
+ : "cc", "memory", "0");
return size;
}
@@ -205,12 +207,12 @@ EXPORT_SYMBOL(raw_copy_to_user);
static inline unsigned long copy_in_user_mvcos(void __user *to, const void __user *from,
unsigned long size)
{
- register unsigned long reg0 asm("0") = 0x810081UL;
unsigned long tmp1, tmp2;
tmp1 = -4096UL;
/* FIXME: copy with reduced length. */
asm volatile(
+ " lgr 0,%[spec]\n"
"0: .insn ss,0xc80000000000,0(%0,%1),0(%2),0\n"
" jz 2f\n"
"1: algr %0,%3\n"
@@ -221,7 +223,8 @@ static inline unsigned long copy_in_user_mvcos(void __user *to, const void __use
"3: \n"
EX_TABLE(0b,3b)
: "+a" (size), "+a" (to), "+a" (from), "+a" (tmp1), "=a" (tmp2)
- : "d" (reg0) : "cc", "memory");
+ : [spec] "d" (0x810081UL)
+ : "cc", "memory");
return size;
}
@@ -266,11 +269,11 @@ EXPORT_SYMBOL(raw_copy_in_user);
static inline unsigned long clear_user_mvcos(void __user *to, unsigned long size)
{
- register unsigned long reg0 asm("0") = 0x810000UL;
unsigned long tmp1, tmp2;
tmp1 = -4096UL;
asm volatile(
+ " llilh 0,%[spec]\n"
"0: .insn ss,0xc80000000000,0(%0,%1),0(%4),0\n"
" jz 4f\n"
"1: algr %0,%2\n"
@@ -288,7 +291,8 @@ static inline unsigned long clear_user_mvcos(void __user *to, unsigned long size
"5:\n"
EX_TABLE(0b,2b) EX_TABLE(3b,5b)
: "+a" (size), "+a" (to), "+a" (tmp1), "=a" (tmp2)
- : "a" (empty_zero_page), "d" (reg0) : "cc", "memory");
+ : "a" (empty_zero_page), [spec] "K" (0x81UL)
+ : "cc", "memory", "0");
return size;
}
@@ -338,10 +342,10 @@ EXPORT_SYMBOL(__clear_user);
static inline unsigned long strnlen_user_srst(const char __user *src,
unsigned long size)
{
- register unsigned long reg0 asm("0") = 0;
unsigned long tmp1, tmp2;
asm volatile(
+ " lghi 0,0\n"
" la %2,0(%1)\n"
" la %3,0(%0,%1)\n"
" slgr %0,%0\n"
@@ -353,7 +357,8 @@ static inline unsigned long strnlen_user_srst(const char __user *src,
"1: sacf 768\n"
EX_TABLE(0b,1b)
: "+a" (size), "+a" (src), "=a" (tmp1), "=a" (tmp2)
- : "d" (reg0) : "cc", "memory");
+ :
+ : "cc", "memory", "0");
return size;
}
diff --git a/arch/s390/lib/xor.c b/arch/s390/lib/xor.c
index 29d9470dbceb..a963c3d8ad0d 100644
--- a/arch/s390/lib/xor.c
+++ b/arch/s390/lib/xor.c
@@ -91,9 +91,6 @@ static void xor_xc_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
static void xor_xc_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
unsigned long *p3, unsigned long *p4, unsigned long *p5)
{
- /* Get around a gcc oddity */
- register unsigned long *reg7 asm ("7") = p5;
-
asm volatile(
" larl 1,2f\n"
" aghi %0,-1\n"
@@ -122,7 +119,7 @@ static void xor_xc_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
" xc 0(1,%1),0(%5)\n"
"3:\n"
: "+d" (bytes), "+a" (p1), "+a" (p2), "+a" (p3), "+a" (p4),
- "+a" (reg7)
+ "+a" (p5)
: : "0", "1", "cc", "memory");
}
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 826d01777361..8ae3dc5783fd 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -702,7 +702,7 @@ static void pfault_interrupt(struct ext_code ext_code,
* interrupt since it must be a leftover of a PFAULT
* CANCEL operation which didn't remove all pending
* completion interrupts. */
- if (tsk->state == TASK_RUNNING)
+ if (task_is_running(tsk))
tsk->thread.pfault_wait = -1;
}
} else {
diff --git a/arch/s390/mm/kasan_init.c b/arch/s390/mm/kasan_init.c
index db4d303aaaa9..a0fdc6dc5f9d 100644
--- a/arch/s390/mm/kasan_init.c
+++ b/arch/s390/mm/kasan_init.c
@@ -13,7 +13,6 @@
#include <asm/setup.h>
#include <asm/uv.h>
-unsigned long kasan_vmax;
static unsigned long segment_pos __initdata;
static unsigned long segment_low __initdata;
static unsigned long pgalloc_pos __initdata;
@@ -251,28 +250,9 @@ static void __init kasan_early_detect_facilities(void)
}
}
-static bool __init has_uv_sec_stor_limit(void)
-{
- /*
- * keep these conditions in line with setup_uv()
- */
- if (!is_prot_virt_host())
- return false;
-
- if (is_prot_virt_guest())
- return false;
-
- if (!test_facility(158))
- return false;
-
- return !!uv_info.max_sec_stor_addr;
-}
-
void __init kasan_early_init(void)
{
- unsigned long untracked_mem_end;
unsigned long shadow_alloc_size;
- unsigned long vmax_unlimited;
unsigned long initrd_end;
unsigned long memsize;
unsigned long pgt_prot = pgprot_val(PAGE_KERNEL_RO);
@@ -306,9 +286,6 @@ void __init kasan_early_init(void)
BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_START, P4D_SIZE));
BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, P4D_SIZE));
crst_table_init((unsigned long *)early_pg_dir, _REGION2_ENTRY_EMPTY);
- untracked_mem_end = kasan_vmax = vmax_unlimited = _REGION1_SIZE;
- if (has_uv_sec_stor_limit())
- kasan_vmax = min(vmax_unlimited, uv_info.max_sec_stor_addr);
/* init kasan zero shadow */
crst_table_init((unsigned long *)kasan_early_shadow_p4d,
@@ -375,18 +352,18 @@ void __init kasan_early_init(void)
*/
/* populate kasan shadow (for identity mapping and zero page mapping) */
kasan_early_pgtable_populate(__sha(0), __sha(memsize), POPULATE_MAP);
- if (IS_ENABLED(CONFIG_MODULES))
- untracked_mem_end = kasan_vmax - MODULES_LEN;
if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) {
- untracked_mem_end = kasan_vmax - vmalloc_size - MODULES_LEN;
/* shallowly populate kasan shadow for vmalloc and modules */
- kasan_early_pgtable_populate(__sha(untracked_mem_end), __sha(kasan_vmax),
+ kasan_early_pgtable_populate(__sha(VMALLOC_START), __sha(MODULES_END),
POPULATE_SHALLOW);
}
/* populate kasan shadow for untracked memory */
- kasan_early_pgtable_populate(__sha(ident_map_size), __sha(untracked_mem_end),
+ kasan_early_pgtable_populate(__sha(ident_map_size),
+ IS_ENABLED(CONFIG_KASAN_VMALLOC) ?
+ __sha(VMALLOC_START) :
+ __sha(MODULES_VADDR),
POPULATE_ZERO_SHADOW);
- kasan_early_pgtable_populate(__sha(kasan_vmax), __sha(vmax_unlimited),
+ kasan_early_pgtable_populate(__sha(MODULES_END), __sha(_REGION1_SIZE),
POPULATE_ZERO_SHADOW);
/* memory allocated for identity mapping structs will be freed later */
pgalloc_freeable = pgalloc_pos;
diff --git a/arch/s390/mm/maccess.c b/arch/s390/mm/maccess.c
index 1d17413b319a..1f1f906344ff 100644
--- a/arch/s390/mm/maccess.c
+++ b/arch/s390/mm/maccess.c
@@ -79,22 +79,21 @@ notrace void *s390_kernel_write(void *dst, const void *src, size_t size)
static int __no_sanitize_address __memcpy_real(void *dest, void *src, size_t count)
{
- register unsigned long _dest asm("2") = (unsigned long) dest;
- register unsigned long _len1 asm("3") = (unsigned long) count;
- register unsigned long _src asm("4") = (unsigned long) src;
- register unsigned long _len2 asm("5") = (unsigned long) count;
+ union register_pair _dst, _src;
int rc = -EFAULT;
+ _dst.even = (unsigned long) dest;
+ _dst.odd = (unsigned long) count;
+ _src.even = (unsigned long) src;
+ _src.odd = (unsigned long) count;
asm volatile (
- "0: mvcle %1,%2,0x0\n"
+ "0: mvcle %[dst],%[src],0\n"
"1: jo 0b\n"
- " lhi %0,0x0\n"
+ " lhi %[rc],0\n"
"2:\n"
EX_TABLE(1b,2b)
- : "+d" (rc), "+d" (_dest), "+d" (_src), "+d" (_len1),
- "+d" (_len2), "=m" (*((long *) dest))
- : "m" (*((long *) src))
- : "cc", "memory");
+ : [rc] "+&d" (rc), [dst] "+&d" (_dst.pair), [src] "+&d" (_src.pair)
+ : : "cc", "memory");
return rc;
}
diff --git a/arch/s390/mm/page-states.c b/arch/s390/mm/page-states.c
index 7f0e154a470a..68b153083a92 100644
--- a/arch/s390/mm/page-states.c
+++ b/arch/s390/mm/page-states.c
@@ -31,17 +31,17 @@ __setup("cmma=", cmma);
static inline int cmma_test_essa(void)
{
- register unsigned long tmp asm("0") = 0;
- register int rc asm("1");
+ unsigned long tmp = 0;
+ int rc = -EOPNOTSUPP;
/* test ESSA_GET_STATE */
asm volatile(
- " .insn rrf,0xb9ab0000,%1,%1,%2,0\n"
- "0: la %0,0\n"
+ " .insn rrf,0xb9ab0000,%[tmp],%[tmp],%[cmd],0\n"
+ "0: la %[rc],0\n"
"1:\n"
EX_TABLE(0b,1b)
- : "=&d" (rc), "+&d" (tmp)
- : "i" (ESSA_GET_STATE), "0" (-EOPNOTSUPP));
+ : [rc] "+&d" (rc), [tmp] "+&d" (tmp)
+ : [cmd] "i" (ESSA_GET_STATE));
return rc;
}
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 18205f851c24..eec3a9d7176e 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -691,7 +691,7 @@ static void ptep_zap_swap_entry(struct mm_struct *mm, swp_entry_t entry)
if (!non_swap_entry(entry))
dec_mm_counter(mm, MM_SWAPENTS);
else if (is_migration_entry(entry)) {
- struct page *page = migration_entry_to_page(entry);
+ struct page *page = pfn_swap_entry_to_page(entry);
dec_mm_counter(mm, mm_counter(page));
}
diff --git a/arch/s390/pci/pci_insn.c b/arch/s390/pci/pci_insn.c
index 02f9505c99a8..2e43996159f0 100644
--- a/arch/s390/pci/pci_insn.c
+++ b/arch/s390/pci/pci_insn.c
@@ -63,16 +63,15 @@ u8 zpci_mod_fc(u64 req, struct zpci_fib *fib, u8 *status)
/* Refresh PCI Translations */
static inline u8 __rpcit(u64 fn, u64 addr, u64 range, u8 *status)
{
- register u64 __addr asm("2") = addr;
- register u64 __range asm("3") = range;
+ union register_pair addr_range = {.even = addr, .odd = range};
u8 cc;
asm volatile (
- " .insn rre,0xb9d30000,%[fn],%[addr]\n"
+ " .insn rre,0xb9d30000,%[fn],%[addr_range]\n"
" ipm %[cc]\n"
" srl %[cc],28\n"
: [cc] "=d" (cc), [fn] "+d" (fn)
- : [addr] "d" (__addr), "d" (__range)
+ : [addr_range] "d" (addr_range.pair)
: "cc");
*status = fn >> 24 & 0xff;
return cc;
@@ -113,21 +112,19 @@ int __zpci_set_irq_ctrl(u16 ctl, u8 isc, union zpci_sic_iib *iib)
/* PCI Load */
static inline int ____pcilg(u64 *data, u64 req, u64 offset, u8 *status)
{
- register u64 __req asm("2") = req;
- register u64 __offset asm("3") = offset;
+ union register_pair req_off = {.even = req, .odd = offset};
int cc = -ENXIO;
u64 __data;
asm volatile (
- " .insn rre,0xb9d20000,%[data],%[req]\n"
+ " .insn rre,0xb9d20000,%[data],%[req_off]\n"
"0: ipm %[cc]\n"
" srl %[cc],28\n"
"1:\n"
EX_TABLE(0b, 1b)
- : [cc] "+d" (cc), [data] "=d" (__data), [req] "+d" (__req)
- : "d" (__offset)
- : "cc");
- *status = __req >> 24 & 0xff;
+ : [cc] "+d" (cc), [data] "=d" (__data),
+ [req_off] "+&d" (req_off.pair) :: "cc");
+ *status = req_off.even >> 24 & 0xff;
*data = __data;
return cc;
}
@@ -173,21 +170,19 @@ static inline int zpci_load_fh(u64 *data, const volatile void __iomem *addr,
static inline int __pcilg_mio(u64 *data, u64 ioaddr, u64 len, u8 *status)
{
- register u64 addr asm("2") = ioaddr;
- register u64 r3 asm("3") = len;
+ union register_pair ioaddr_len = {.even = ioaddr, .odd = len};
int cc = -ENXIO;
u64 __data;
asm volatile (
- " .insn rre,0xb9d60000,%[data],%[ioaddr]\n"
+ " .insn rre,0xb9d60000,%[data],%[ioaddr_len]\n"
"0: ipm %[cc]\n"
" srl %[cc],28\n"
"1:\n"
EX_TABLE(0b, 1b)
- : [cc] "+d" (cc), [data] "=d" (__data), "+d" (r3)
- : [ioaddr] "d" (addr)
- : "cc");
- *status = r3 >> 24 & 0xff;
+ : [cc] "+d" (cc), [data] "=d" (__data),
+ [ioaddr_len] "+&d" (ioaddr_len.pair) :: "cc");
+ *status = ioaddr_len.odd >> 24 & 0xff;
*data = __data;
return cc;
}
@@ -211,20 +206,19 @@ EXPORT_SYMBOL_GPL(zpci_load);
/* PCI Store */
static inline int __pcistg(u64 data, u64 req, u64 offset, u8 *status)
{
- register u64 __req asm("2") = req;
- register u64 __offset asm("3") = offset;
+ union register_pair req_off = {.even = req, .odd = offset};
int cc = -ENXIO;
asm volatile (
- " .insn rre,0xb9d00000,%[data],%[req]\n"
+ " .insn rre,0xb9d00000,%[data],%[req_off]\n"
"0: ipm %[cc]\n"
" srl %[cc],28\n"
"1:\n"
EX_TABLE(0b, 1b)
- : [cc] "+d" (cc), [req] "+d" (__req)
- : "d" (__offset), [data] "d" (data)
+ : [cc] "+d" (cc), [req_off] "+&d" (req_off.pair)
+ : [data] "d" (data)
: "cc");
- *status = __req >> 24 & 0xff;
+ *status = req_off.even >> 24 & 0xff;
return cc;
}
@@ -257,20 +251,19 @@ static inline int zpci_store_fh(const volatile void __iomem *addr, u64 data,
static inline int __pcistg_mio(u64 data, u64 ioaddr, u64 len, u8 *status)
{
- register u64 addr asm("2") = ioaddr;
- register u64 r3 asm("3") = len;
+ union register_pair ioaddr_len = {.even = ioaddr, .odd = len};
int cc = -ENXIO;
asm volatile (
- " .insn rre,0xb9d40000,%[data],%[ioaddr]\n"
+ " .insn rre,0xb9d40000,%[data],%[ioaddr_len]\n"
"0: ipm %[cc]\n"
" srl %[cc],28\n"
"1:\n"
EX_TABLE(0b, 1b)
- : [cc] "+d" (cc), "+d" (r3)
- : [data] "d" (data), [ioaddr] "d" (addr)
- : "cc");
- *status = r3 >> 24 & 0xff;
+ : [cc] "+d" (cc), [ioaddr_len] "+&d" (ioaddr_len.pair)
+ : [data] "d" (data)
+ : "cc", "memory");
+ *status = ioaddr_len.odd >> 24 & 0xff;
return cc;
}
diff --git a/arch/s390/pci/pci_irq.c b/arch/s390/pci/pci_irq.c
index 9dd5ad1b553d..9c7de9089939 100644
--- a/arch/s390/pci/pci_irq.c
+++ b/arch/s390/pci/pci_irq.c
@@ -35,7 +35,7 @@ static struct airq_iv *zpci_sbv;
*/
static struct airq_iv **zpci_ibv;
-/* Modify PCI: Register adapter interruptions */
+/* Modify PCI: Register floating adapter interruptions */
static int zpci_set_airq(struct zpci_dev *zdev)
{
u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_REG_INT);
@@ -53,7 +53,7 @@ static int zpci_set_airq(struct zpci_dev *zdev)
return zpci_mod_fc(req, &fib, &status) ? -EIO : 0;
}
-/* Modify PCI: Unregister adapter interruptions */
+/* Modify PCI: Unregister floating adapter interruptions */
static int zpci_clear_airq(struct zpci_dev *zdev)
{
u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_DEREG_INT);
@@ -98,6 +98,38 @@ static int zpci_clear_directed_irq(struct zpci_dev *zdev)
return cc ? -EIO : 0;
}
+/* Register adapter interruptions */
+int zpci_set_irq(struct zpci_dev *zdev)
+{
+ int rc;
+
+ if (irq_delivery == DIRECTED)
+ rc = zpci_set_directed_irq(zdev);
+ else
+ rc = zpci_set_airq(zdev);
+
+ if (!rc)
+ zdev->irqs_registered = 1;
+
+ return rc;
+}
+
+/* Clear adapter interruptions */
+int zpci_clear_irq(struct zpci_dev *zdev)
+{
+ int rc;
+
+ if (irq_delivery == DIRECTED)
+ rc = zpci_clear_directed_irq(zdev);
+ else
+ rc = zpci_clear_airq(zdev);
+
+ if (!rc)
+ zdev->irqs_registered = 0;
+
+ return rc;
+}
+
static int zpci_set_irq_affinity(struct irq_data *data, const struct cpumask *dest,
bool force)
{
@@ -311,10 +343,7 @@ int arch_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
zdev->msi_first_bit = bit;
zdev->msi_nr_irqs = msi_vecs;
- if (irq_delivery == DIRECTED)
- rc = zpci_set_directed_irq(zdev);
- else
- rc = zpci_set_airq(zdev);
+ rc = zpci_set_irq(zdev);
if (rc)
return rc;
@@ -328,10 +357,7 @@ void arch_teardown_msi_irqs(struct pci_dev *pdev)
int rc;
/* Disable interrupts */
- if (irq_delivery == DIRECTED)
- rc = zpci_clear_directed_irq(zdev);
- else
- rc = zpci_clear_airq(zdev);
+ rc = zpci_clear_irq(zdev);
if (rc)
return;
diff --git a/arch/s390/pci/pci_mmio.c b/arch/s390/pci/pci_mmio.c
index 474617b88648..ae683aa623ac 100644
--- a/arch/s390/pci/pci_mmio.c
+++ b/arch/s390/pci/pci_mmio.c
@@ -49,8 +49,7 @@ static inline int __pcistg_mio_inuser(
void __iomem *ioaddr, const void __user *src,
u64 ulen, u8 *status)
{
- register u64 addr asm("2") = (u64 __force) ioaddr;
- register u64 len asm("3") = ulen;
+ union register_pair ioaddr_len = {.even = (u64 __force)ioaddr, .odd = ulen};
int cc = -ENXIO;
u64 val = 0;
u64 cnt = ulen;
@@ -68,7 +67,7 @@ static inline int __pcistg_mio_inuser(
" aghi %[src],1\n"
" ogr %[val],%[tmp]\n"
" brctg %[cnt],0b\n"
- "1: .insn rre,0xb9d40000,%[val],%[ioaddr]\n"
+ "1: .insn rre,0xb9d40000,%[val],%[ioaddr_len]\n"
"2: ipm %[cc]\n"
" srl %[cc],28\n"
"3: sacf 768\n"
@@ -76,10 +75,9 @@ static inline int __pcistg_mio_inuser(
:
[src] "+a" (src), [cnt] "+d" (cnt),
[val] "+d" (val), [tmp] "=d" (tmp),
- [len] "+d" (len), [cc] "+d" (cc),
- [ioaddr] "+a" (addr)
+ [cc] "+d" (cc), [ioaddr_len] "+&d" (ioaddr_len.pair)
:: "cc", "memory");
- *status = len >> 24 & 0xff;
+ *status = ioaddr_len.odd >> 24 & 0xff;
/* did we read everything from user memory? */
if (!cc && cnt != 0)
@@ -195,8 +193,7 @@ static inline int __pcilg_mio_inuser(
void __user *dst, const void __iomem *ioaddr,
u64 ulen, u8 *status)
{
- register u64 addr asm("2") = (u64 __force) ioaddr;
- register u64 len asm("3") = ulen;
+ union register_pair ioaddr_len = {.even = (u64 __force)ioaddr, .odd = ulen};
u64 cnt = ulen;
int shift = ulen * 8;
int cc = -ENXIO;
@@ -209,7 +206,7 @@ static inline int __pcilg_mio_inuser(
*/
asm volatile (
" sacf 256\n"
- "0: .insn rre,0xb9d60000,%[val],%[ioaddr]\n"
+ "0: .insn rre,0xb9d60000,%[val],%[ioaddr_len]\n"
"1: ipm %[cc]\n"
" srl %[cc],28\n"
" ltr %[cc],%[cc]\n"
@@ -222,18 +219,17 @@ static inline int __pcilg_mio_inuser(
"4: sacf 768\n"
EX_TABLE(0b, 4b) EX_TABLE(1b, 4b) EX_TABLE(3b, 4b)
:
- [cc] "+d" (cc), [val] "=d" (val), [len] "+d" (len),
+ [ioaddr_len] "+&d" (ioaddr_len.pair),
+ [cc] "+d" (cc), [val] "=d" (val),
[dst] "+a" (dst), [cnt] "+d" (cnt), [tmp] "=d" (tmp),
[shift] "+d" (shift)
- :
- [ioaddr] "a" (addr)
- : "cc", "memory");
+ :: "cc", "memory");
/* did we write everything to the user space buffer? */
if (!cc && cnt != 0)
cc = -EFAULT;
- *status = len >> 24 & 0xff;
+ *status = ioaddr_len.odd >> 24 & 0xff;
return cc;
}
diff --git a/arch/s390/purgatory/Makefile b/arch/s390/purgatory/Makefile
index c57f8c40e992..21c4ebe29b9a 100644
--- a/arch/s390/purgatory/Makefile
+++ b/arch/s390/purgatory/Makefile
@@ -24,6 +24,7 @@ KBUILD_CFLAGS := -fno-strict-aliasing -Wall -Wstrict-prototypes
KBUILD_CFLAGS += -Wno-pointer-sign -Wno-sign-compare
KBUILD_CFLAGS += -fno-zero-initialized-in-bss -fno-builtin -ffreestanding
KBUILD_CFLAGS += -c -MD -Os -m64 -msoft-float -fno-common
+KBUILD_CFLAGS += -fno-stack-protector
KBUILD_CFLAGS += $(CLANG_FLAGS)
KBUILD_CFLAGS += $(call cc-option,-fno-PIE)
KBUILD_AFLAGS := $(filter-out -DCC_USING_EXPOLINE,$(KBUILD_AFLAGS))
diff --git a/arch/s390/tools/gen_facilities.c b/arch/s390/tools/gen_facilities.c
index 61ce5b59b828..606324e56e4e 100644
--- a/arch/s390/tools/gen_facilities.c
+++ b/arch/s390/tools/gen_facilities.c
@@ -115,6 +115,10 @@ static struct facility_def facility_defs[] = {
12, /* AP Query Configuration Information */
15, /* AP Facilities Test */
156, /* etoken facility */
+ 165, /* nnpa facility */
+ 193, /* bear enhancement facility */
+ 194, /* rdp enhancement facility */
+ 196, /* processor activity instrumentation facility */
-1 /* END */
}
},
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index 68129537e350..45a0549421cd 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -14,7 +14,6 @@ config SUPERH
select ARCH_HIBERNATION_POSSIBLE if MMU
select ARCH_MIGHT_HAVE_PC_PARPORT
select ARCH_WANT_IPC_PARSE_VERSION
- select CLKDEV_LOOKUP
select CPU_NO_EFFICIENT_FFS
select DMA_DECLARE_COHERENT
select GENERIC_ATOMIC64
diff --git a/arch/sh/include/asm/atomic-grb.h b/arch/sh/include/asm/atomic-grb.h
index aace62d42288..059791fd394f 100644
--- a/arch/sh/include/asm/atomic-grb.h
+++ b/arch/sh/include/asm/atomic-grb.h
@@ -3,7 +3,7 @@
#define __ASM_SH_ATOMIC_GRB_H
#define ATOMIC_OP(op) \
-static inline void atomic_##op(int i, atomic_t *v) \
+static inline void arch_atomic_##op(int i, atomic_t *v) \
{ \
int tmp; \
\
@@ -23,7 +23,7 @@ static inline void atomic_##op(int i, atomic_t *v) \
} \
#define ATOMIC_OP_RETURN(op) \
-static inline int atomic_##op##_return(int i, atomic_t *v) \
+static inline int arch_atomic_##op##_return(int i, atomic_t *v) \
{ \
int tmp; \
\
@@ -45,7 +45,7 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
}
#define ATOMIC_FETCH_OP(op) \
-static inline int atomic_fetch_##op(int i, atomic_t *v) \
+static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \
{ \
int res, tmp; \
\
diff --git a/arch/sh/include/asm/atomic-irq.h b/arch/sh/include/asm/atomic-irq.h
index ee523bd2120f..7665de9d00d0 100644
--- a/arch/sh/include/asm/atomic-irq.h
+++ b/arch/sh/include/asm/atomic-irq.h
@@ -11,7 +11,7 @@
*/
#define ATOMIC_OP(op, c_op) \
-static inline void atomic_##op(int i, atomic_t *v) \
+static inline void arch_atomic_##op(int i, atomic_t *v) \
{ \
unsigned long flags; \
\
@@ -21,7 +21,7 @@ static inline void atomic_##op(int i, atomic_t *v) \
}
#define ATOMIC_OP_RETURN(op, c_op) \
-static inline int atomic_##op##_return(int i, atomic_t *v) \
+static inline int arch_atomic_##op##_return(int i, atomic_t *v) \
{ \
unsigned long temp, flags; \
\
@@ -35,7 +35,7 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
}
#define ATOMIC_FETCH_OP(op, c_op) \
-static inline int atomic_fetch_##op(int i, atomic_t *v) \
+static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \
{ \
unsigned long temp, flags; \
\
diff --git a/arch/sh/include/asm/atomic-llsc.h b/arch/sh/include/asm/atomic-llsc.h
index 1d06e4d288dc..b63dcfbfa14e 100644
--- a/arch/sh/include/asm/atomic-llsc.h
+++ b/arch/sh/include/asm/atomic-llsc.h
@@ -17,7 +17,7 @@
*/
#define ATOMIC_OP(op) \
-static inline void atomic_##op(int i, atomic_t *v) \
+static inline void arch_atomic_##op(int i, atomic_t *v) \
{ \
unsigned long tmp; \
\
@@ -32,7 +32,7 @@ static inline void atomic_##op(int i, atomic_t *v) \
}
#define ATOMIC_OP_RETURN(op) \
-static inline int atomic_##op##_return(int i, atomic_t *v) \
+static inline int arch_atomic_##op##_return(int i, atomic_t *v) \
{ \
unsigned long temp; \
\
@@ -50,7 +50,7 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
}
#define ATOMIC_FETCH_OP(op) \
-static inline int atomic_fetch_##op(int i, atomic_t *v) \
+static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \
{ \
unsigned long res, temp; \
\
diff --git a/arch/sh/include/asm/atomic.h b/arch/sh/include/asm/atomic.h
index 7c2a8a703b9a..528bfeda78f5 100644
--- a/arch/sh/include/asm/atomic.h
+++ b/arch/sh/include/asm/atomic.h
@@ -19,8 +19,8 @@
#include <asm/cmpxchg.h>
#include <asm/barrier.h>
-#define atomic_read(v) READ_ONCE((v)->counter)
-#define atomic_set(v,i) WRITE_ONCE((v)->counter, (i))
+#define arch_atomic_read(v) READ_ONCE((v)->counter)
+#define arch_atomic_set(v,i) WRITE_ONCE((v)->counter, (i))
#if defined(CONFIG_GUSA_RB)
#include <asm/atomic-grb.h>
@@ -30,8 +30,8 @@
#include <asm/atomic-irq.h>
#endif
-#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
-#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
+#define arch_atomic_xchg(v, new) (arch_xchg(&((v)->counter), new))
+#define arch_atomic_cmpxchg(v, o, n) (arch_cmpxchg(&((v)->counter), (o), (n)))
#endif /* CONFIG_CPU_J2 */
diff --git a/arch/sh/include/asm/cmpxchg.h b/arch/sh/include/asm/cmpxchg.h
index e9501d85c278..0ed9b3f4a577 100644
--- a/arch/sh/include/asm/cmpxchg.h
+++ b/arch/sh/include/asm/cmpxchg.h
@@ -45,7 +45,7 @@ extern void __xchg_called_with_bad_pointer(void);
__xchg__res; \
})
-#define xchg(ptr,x) \
+#define arch_xchg(ptr,x) \
((__typeof__(*(ptr)))__xchg((ptr),(unsigned long)(x), sizeof(*(ptr))))
/* This function doesn't exist, so you'll get a linker error
@@ -63,7 +63,7 @@ static inline unsigned long __cmpxchg(volatile void * ptr, unsigned long old,
return old;
}
-#define cmpxchg(ptr,o,n) \
+#define arch_cmpxchg(ptr,o,n) \
({ \
__typeof__(*(ptr)) _o_ = (o); \
__typeof__(*(ptr)) _n_ = (n); \
diff --git a/arch/sh/include/asm/mmzone.h b/arch/sh/include/asm/mmzone.h
index 6552a088dc97..7b8dead2723d 100644
--- a/arch/sh/include/asm/mmzone.h
+++ b/arch/sh/include/asm/mmzone.h
@@ -2,7 +2,7 @@
#ifndef __ASM_SH_MMZONE_H
#define __ASM_SH_MMZONE_H
-#ifdef CONFIG_NEED_MULTIPLE_NODES
+#ifdef CONFIG_NUMA
#include <linux/numa.h>
extern struct pglist_data *node_data[];
@@ -31,7 +31,7 @@ static inline void
setup_bootmem_node(int nid, unsigned long start, unsigned long end)
{
}
-#endif /* CONFIG_NEED_MULTIPLE_NODES */
+#endif /* CONFIG_NUMA */
/* Platform specific mem init */
void __init plat_mem_setup(void);
diff --git a/arch/sh/include/asm/pgalloc.h b/arch/sh/include/asm/pgalloc.h
index 0e6b0be25e33..a9e98233c4d4 100644
--- a/arch/sh/include/asm/pgalloc.h
+++ b/arch/sh/include/asm/pgalloc.h
@@ -30,7 +30,6 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
{
set_pmd(pmd, __pmd((unsigned long)page_address(pte)));
}
-#define pmd_pgtable(pmd) pmd_page(pmd)
#define __pte_free_tlb(tlb,pte,addr) \
do { \
diff --git a/arch/sh/include/asm/pgtable.h b/arch/sh/include/asm/pgtable.h
index 27751e9470df..d7ddb1ec86a0 100644
--- a/arch/sh/include/asm/pgtable.h
+++ b/arch/sh/include/asm/pgtable.h
@@ -59,8 +59,6 @@ static inline unsigned long long neff_sign_extend(unsigned long val)
/* Entries per level */
#define PTRS_PER_PTE (PAGE_SIZE / (1 << PTE_MAGNITUDE))
-#define FIRST_USER_ADDRESS 0UL
-
#define PHYS_ADDR_MASK29 0x1fffffff
#define PHYS_ADDR_MASK32 0xffffffff
diff --git a/arch/sh/include/asm/unaligned-sh4a.h b/arch/sh/include/asm/unaligned-sh4a.h
deleted file mode 100644
index d311f00ed530..000000000000
--- a/arch/sh/include/asm/unaligned-sh4a.h
+++ /dev/null
@@ -1,199 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __ASM_SH_UNALIGNED_SH4A_H
-#define __ASM_SH_UNALIGNED_SH4A_H
-
-/*
- * SH-4A has support for unaligned 32-bit loads, and 32-bit loads only.
- * Support for 64-bit accesses are done through shifting and masking
- * relative to the endianness. Unaligned stores are not supported by the
- * instruction encoding, so these continue to use the packed
- * struct.
- *
- * The same note as with the movli.l/movco.l pair applies here, as long
- * as the load is guaranteed to be inlined, nothing else will hook in to
- * r0 and we get the return value for free.
- *
- * NOTE: Due to the fact we require r0 encoding, care should be taken to
- * avoid mixing these heavily with other r0 consumers, such as the atomic
- * ops. Failure to adhere to this can result in the compiler running out
- * of spill registers and blowing up when building at low optimization
- * levels. See http://gcc.gnu.org/bugzilla/show_bug.cgi?id=34777.
- */
-#include <linux/unaligned/packed_struct.h>
-#include <linux/types.h>
-#include <asm/byteorder.h>
-
-static inline u16 sh4a_get_unaligned_cpu16(const u8 *p)
-{
-#ifdef __LITTLE_ENDIAN
- return p[0] | p[1] << 8;
-#else
- return p[0] << 8 | p[1];
-#endif
-}
-
-static __always_inline u32 sh4a_get_unaligned_cpu32(const u8 *p)
-{
- unsigned long unaligned;
-
- __asm__ __volatile__ (
- "movua.l @%1, %0\n\t"
- : "=z" (unaligned)
- : "r" (p)
- );
-
- return unaligned;
-}
-
-/*
- * Even though movua.l supports auto-increment on the read side, it can
- * only store to r0 due to instruction encoding constraints, so just let
- * the compiler sort it out on its own.
- */
-static inline u64 sh4a_get_unaligned_cpu64(const u8 *p)
-{
-#ifdef __LITTLE_ENDIAN
- return (u64)sh4a_get_unaligned_cpu32(p + 4) << 32 |
- sh4a_get_unaligned_cpu32(p);
-#else
- return (u64)sh4a_get_unaligned_cpu32(p) << 32 |
- sh4a_get_unaligned_cpu32(p + 4);
-#endif
-}
-
-static inline u16 get_unaligned_le16(const void *p)
-{
- return le16_to_cpu(sh4a_get_unaligned_cpu16(p));
-}
-
-static inline u32 get_unaligned_le32(const void *p)
-{
- return le32_to_cpu(sh4a_get_unaligned_cpu32(p));
-}
-
-static inline u64 get_unaligned_le64(const void *p)
-{
- return le64_to_cpu(sh4a_get_unaligned_cpu64(p));
-}
-
-static inline u16 get_unaligned_be16(const void *p)
-{
- return be16_to_cpu(sh4a_get_unaligned_cpu16(p));
-}
-
-static inline u32 get_unaligned_be32(const void *p)
-{
- return be32_to_cpu(sh4a_get_unaligned_cpu32(p));
-}
-
-static inline u64 get_unaligned_be64(const void *p)
-{
- return be64_to_cpu(sh4a_get_unaligned_cpu64(p));
-}
-
-static inline void nonnative_put_le16(u16 val, u8 *p)
-{
- *p++ = val;
- *p++ = val >> 8;
-}
-
-static inline void nonnative_put_le32(u32 val, u8 *p)
-{
- nonnative_put_le16(val, p);
- nonnative_put_le16(val >> 16, p + 2);
-}
-
-static inline void nonnative_put_le64(u64 val, u8 *p)
-{
- nonnative_put_le32(val, p);
- nonnative_put_le32(val >> 32, p + 4);
-}
-
-static inline void nonnative_put_be16(u16 val, u8 *p)
-{
- *p++ = val >> 8;
- *p++ = val;
-}
-
-static inline void nonnative_put_be32(u32 val, u8 *p)
-{
- nonnative_put_be16(val >> 16, p);
- nonnative_put_be16(val, p + 2);
-}
-
-static inline void nonnative_put_be64(u64 val, u8 *p)
-{
- nonnative_put_be32(val >> 32, p);
- nonnative_put_be32(val, p + 4);
-}
-
-static inline void put_unaligned_le16(u16 val, void *p)
-{
-#ifdef __LITTLE_ENDIAN
- __put_unaligned_cpu16(val, p);
-#else
- nonnative_put_le16(val, p);
-#endif
-}
-
-static inline void put_unaligned_le32(u32 val, void *p)
-{
-#ifdef __LITTLE_ENDIAN
- __put_unaligned_cpu32(val, p);
-#else
- nonnative_put_le32(val, p);
-#endif
-}
-
-static inline void put_unaligned_le64(u64 val, void *p)
-{
-#ifdef __LITTLE_ENDIAN
- __put_unaligned_cpu64(val, p);
-#else
- nonnative_put_le64(val, p);
-#endif
-}
-
-static inline void put_unaligned_be16(u16 val, void *p)
-{
-#ifdef __BIG_ENDIAN
- __put_unaligned_cpu16(val, p);
-#else
- nonnative_put_be16(val, p);
-#endif
-}
-
-static inline void put_unaligned_be32(u32 val, void *p)
-{
-#ifdef __BIG_ENDIAN
- __put_unaligned_cpu32(val, p);
-#else
- nonnative_put_be32(val, p);
-#endif
-}
-
-static inline void put_unaligned_be64(u64 val, void *p)
-{
-#ifdef __BIG_ENDIAN
- __put_unaligned_cpu64(val, p);
-#else
- nonnative_put_be64(val, p);
-#endif
-}
-
-/*
- * While it's a bit non-obvious, even though the generic le/be wrappers
- * use the __get/put_xxx prefixing, they actually wrap in to the
- * non-prefixed get/put_xxx variants as provided above.
- */
-#include <linux/unaligned/generic.h>
-
-#ifdef __LITTLE_ENDIAN
-# define get_unaligned __get_unaligned_le
-# define put_unaligned __put_unaligned_le
-#else
-# define get_unaligned __get_unaligned_be
-# define put_unaligned __put_unaligned_be
-#endif
-
-#endif /* __ASM_SH_UNALIGNED_SH4A_H */
diff --git a/arch/sh/include/asm/unaligned.h b/arch/sh/include/asm/unaligned.h
deleted file mode 100644
index 0c92e2c73af4..000000000000
--- a/arch/sh/include/asm/unaligned.h
+++ /dev/null
@@ -1,13 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_SH_UNALIGNED_H
-#define _ASM_SH_UNALIGNED_H
-
-#ifdef CONFIG_CPU_SH4A
-/* SH-4A can handle unaligned loads in a relatively neutered fashion. */
-#include <asm/unaligned-sh4a.h>
-#else
-/* Otherwise, SH can't handle unaligned accesses. */
-#include <asm-generic/unaligned.h>
-#endif
-
-#endif /* _ASM_SH_UNALIGNED_H */
diff --git a/arch/sh/kernel/kprobes.c b/arch/sh/kernel/kprobes.c
index 756100b01e84..1c7f358ef0be 100644
--- a/arch/sh/kernel/kprobes.c
+++ b/arch/sh/kernel/kprobes.c
@@ -383,23 +383,6 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
case KPROBE_HIT_ACTIVE:
case KPROBE_HIT_SSDONE:
/*
- * We increment the nmissed count for accounting,
- * we can also use npre/npostfault count for accounting
- * these specific fault cases.
- */
- kprobes_inc_nmissed_count(cur);
-
- /*
- * We come here because instructions in the pre/post
- * handler caused the page_fault, this could happen
- * if handler tries to access user space by
- * copy_from_user(), get_user() etc. Let the
- * user-specified handler try to fix it first.
- */
- if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
- return 1;
-
- /*
* In case the user-specified fault handler returned
* zero, try to fix up.
*/
diff --git a/arch/sh/kernel/process_32.c b/arch/sh/kernel/process_32.c
index 1aa508eb0823..717de05c81f4 100644
--- a/arch/sh/kernel/process_32.c
+++ b/arch/sh/kernel/process_32.c
@@ -186,7 +186,7 @@ unsigned long get_wchan(struct task_struct *p)
{
unsigned long pc;
- if (!p || p == current || p->state == TASK_RUNNING)
+ if (!p || p == current || task_is_running(p))
return 0;
/*
diff --git a/arch/sh/kernel/smp.c b/arch/sh/kernel/smp.c
index 372acdc9033e..65924d9ec245 100644
--- a/arch/sh/kernel/smp.c
+++ b/arch/sh/kernel/smp.c
@@ -186,8 +186,6 @@ asmlinkage void start_secondary(void)
per_cpu_trap_init();
- preempt_disable();
-
notify_cpu_starting(cpu);
local_irq_enable();
diff --git a/arch/sh/kernel/syscalls/syscall.tbl b/arch/sh/kernel/syscalls/syscall.tbl
index f47a0dc55445..e0a70be77d84 100644
--- a/arch/sh/kernel/syscalls/syscall.tbl
+++ b/arch/sh/kernel/syscalls/syscall.tbl
@@ -445,7 +445,7 @@
440 common process_madvise sys_process_madvise
441 common epoll_pwait2 sys_epoll_pwait2
442 common mount_setattr sys_mount_setattr
-443 common quotactl_path sys_quotactl_path
+443 common quotactl_fd sys_quotactl_fd
444 common landlock_create_ruleset sys_landlock_create_ruleset
445 common landlock_add_rule sys_landlock_add_rule
446 common landlock_restrict_self sys_landlock_restrict_self
diff --git a/arch/sh/kernel/topology.c b/arch/sh/kernel/topology.c
index 7a989eed3b18..76af6db9daa2 100644
--- a/arch/sh/kernel/topology.c
+++ b/arch/sh/kernel/topology.c
@@ -46,7 +46,7 @@ static int __init topology_init(void)
{
int i, ret;
-#ifdef CONFIG_NEED_MULTIPLE_NODES
+#ifdef CONFIG_NUMA
for_each_online_node(i)
register_one_node(i);
#endif
diff --git a/arch/sh/kernel/traps.c b/arch/sh/kernel/traps.c
index f5beecdac693..e76b22157099 100644
--- a/arch/sh/kernel/traps.c
+++ b/arch/sh/kernel/traps.c
@@ -180,7 +180,6 @@ static inline void arch_ftrace_nmi_exit(void) { }
BUILD_TRAP_HANDLER(nmi)
{
- unsigned int cpu = smp_processor_id();
TRAP_HANDLER_DECL;
arch_ftrace_nmi_enter();
diff --git a/arch/sh/mm/Kconfig b/arch/sh/mm/Kconfig
index d551a9cac41e..ba569cfb4368 100644
--- a/arch/sh/mm/Kconfig
+++ b/arch/sh/mm/Kconfig
@@ -120,7 +120,7 @@ config NODES_SHIFT
int
default "3" if CPU_SUBTYPE_SHX3
default "1"
- depends on NEED_MULTIPLE_NODES
+ depends on NUMA
config ARCH_FLATMEM_ENABLE
def_bool y
diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c
index 168d7d4dd735..ce26c7f8950a 100644
--- a/arch/sh/mm/init.c
+++ b/arch/sh/mm/init.c
@@ -211,7 +211,7 @@ void __init allocate_pgdat(unsigned int nid)
get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
-#ifdef CONFIG_NEED_MULTIPLE_NODES
+#ifdef CONFIG_NUMA
NODE_DATA(nid) = memblock_alloc_try_nid(
sizeof(struct pglist_data),
SMP_CACHE_BYTES, MEMBLOCK_LOW_LIMIT,
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 164a5254c91c..c5fa7932b550 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -59,6 +59,7 @@ config SPARC32
select CLZ_TAB
select HAVE_UID16
select OLD_SIGACTION
+ select ZONE_DMA
config SPARC64
def_bool 64BIT
@@ -141,10 +142,6 @@ config HIGHMEM
default y if SPARC32
select KMAP_LOCAL
-config ZONE_DMA
- bool
- default y if SPARC32
-
config GENERIC_ISA_DMA
bool
default y if SPARC32
@@ -265,7 +262,7 @@ config NODES_SHIFT
int "Maximum NUMA Nodes (as a power of 2)"
range 4 5 if SPARC64
default "5"
- depends on NEED_MULTIPLE_NODES
+ depends on NUMA
help
Specify the maximum number of NUMA Nodes available on the target
system. Increases memory reserved to accommodate various tables.
diff --git a/arch/sparc/include/asm/atomic_32.h b/arch/sparc/include/asm/atomic_32.h
index efad5532f169..d775daa83d12 100644
--- a/arch/sparc/include/asm/atomic_32.h
+++ b/arch/sparc/include/asm/atomic_32.h
@@ -18,30 +18,30 @@
#include <asm/barrier.h>
#include <asm-generic/atomic64.h>
-int atomic_add_return(int, atomic_t *);
-int atomic_fetch_add(int, atomic_t *);
-int atomic_fetch_and(int, atomic_t *);
-int atomic_fetch_or(int, atomic_t *);
-int atomic_fetch_xor(int, atomic_t *);
-int atomic_cmpxchg(atomic_t *, int, int);
-int atomic_xchg(atomic_t *, int);
-int atomic_fetch_add_unless(atomic_t *, int, int);
-void atomic_set(atomic_t *, int);
+int arch_atomic_add_return(int, atomic_t *);
+int arch_atomic_fetch_add(int, atomic_t *);
+int arch_atomic_fetch_and(int, atomic_t *);
+int arch_atomic_fetch_or(int, atomic_t *);
+int arch_atomic_fetch_xor(int, atomic_t *);
+int arch_atomic_cmpxchg(atomic_t *, int, int);
+int arch_atomic_xchg(atomic_t *, int);
+int arch_atomic_fetch_add_unless(atomic_t *, int, int);
+void arch_atomic_set(atomic_t *, int);
-#define atomic_fetch_add_unless atomic_fetch_add_unless
+#define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
-#define atomic_set_release(v, i) atomic_set((v), (i))
+#define arch_atomic_set_release(v, i) arch_atomic_set((v), (i))
-#define atomic_read(v) READ_ONCE((v)->counter)
+#define arch_atomic_read(v) READ_ONCE((v)->counter)
-#define atomic_add(i, v) ((void)atomic_add_return( (int)(i), (v)))
-#define atomic_sub(i, v) ((void)atomic_add_return(-(int)(i), (v)))
+#define arch_atomic_add(i, v) ((void)arch_atomic_add_return( (int)(i), (v)))
+#define arch_atomic_sub(i, v) ((void)arch_atomic_add_return(-(int)(i), (v)))
-#define atomic_and(i, v) ((void)atomic_fetch_and((i), (v)))
-#define atomic_or(i, v) ((void)atomic_fetch_or((i), (v)))
-#define atomic_xor(i, v) ((void)atomic_fetch_xor((i), (v)))
+#define arch_atomic_and(i, v) ((void)arch_atomic_fetch_and((i), (v)))
+#define arch_atomic_or(i, v) ((void)arch_atomic_fetch_or((i), (v)))
+#define arch_atomic_xor(i, v) ((void)arch_atomic_fetch_xor((i), (v)))
-#define atomic_sub_return(i, v) (atomic_add_return(-(int)(i), (v)))
-#define atomic_fetch_sub(i, v) (atomic_fetch_add (-(int)(i), (v)))
+#define arch_atomic_sub_return(i, v) (arch_atomic_add_return(-(int)(i), (v)))
+#define arch_atomic_fetch_sub(i, v) (arch_atomic_fetch_add (-(int)(i), (v)))
#endif /* !(__ARCH_SPARC_ATOMIC__) */
diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
index 6b235d3d1d9d..077891686715 100644
--- a/arch/sparc/include/asm/atomic_64.h
+++ b/arch/sparc/include/asm/atomic_64.h
@@ -14,23 +14,23 @@
#define ATOMIC64_INIT(i) { (i) }
-#define atomic_read(v) READ_ONCE((v)->counter)
-#define atomic64_read(v) READ_ONCE((v)->counter)
+#define arch_atomic_read(v) READ_ONCE((v)->counter)
+#define arch_atomic64_read(v) READ_ONCE((v)->counter)
-#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
-#define atomic64_set(v, i) WRITE_ONCE(((v)->counter), (i))
+#define arch_atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
+#define arch_atomic64_set(v, i) WRITE_ONCE(((v)->counter), (i))
#define ATOMIC_OP(op) \
-void atomic_##op(int, atomic_t *); \
-void atomic64_##op(s64, atomic64_t *);
+void arch_atomic_##op(int, atomic_t *); \
+void arch_atomic64_##op(s64, atomic64_t *);
#define ATOMIC_OP_RETURN(op) \
-int atomic_##op##_return(int, atomic_t *); \
-s64 atomic64_##op##_return(s64, atomic64_t *);
+int arch_atomic_##op##_return(int, atomic_t *); \
+s64 arch_atomic64_##op##_return(s64, atomic64_t *);
#define ATOMIC_FETCH_OP(op) \
-int atomic_fetch_##op(int, atomic_t *); \
-s64 atomic64_fetch_##op(s64, atomic64_t *);
+int arch_atomic_fetch_##op(int, atomic_t *); \
+s64 arch_atomic64_fetch_##op(s64, atomic64_t *);
#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) ATOMIC_FETCH_OP(op)
@@ -49,18 +49,18 @@ ATOMIC_OPS(xor)
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
-#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
+#define arch_atomic_cmpxchg(v, o, n) (arch_cmpxchg(&((v)->counter), (o), (n)))
-static inline int atomic_xchg(atomic_t *v, int new)
+static inline int arch_atomic_xchg(atomic_t *v, int new)
{
- return xchg(&v->counter, new);
+ return arch_xchg(&v->counter, new);
}
-#define atomic64_cmpxchg(v, o, n) \
- ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
-#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
+#define arch_atomic64_cmpxchg(v, o, n) \
+ ((__typeof__((v)->counter))arch_cmpxchg(&((v)->counter), (o), (n)))
+#define arch_atomic64_xchg(v, new) (arch_xchg(&((v)->counter), new))
-s64 atomic64_dec_if_positive(atomic64_t *v);
-#define atomic64_dec_if_positive atomic64_dec_if_positive
+s64 arch_atomic64_dec_if_positive(atomic64_t *v);
+#define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
#endif /* !(__ARCH_SPARC64_ATOMIC__) */
diff --git a/arch/sparc/include/asm/cmpxchg_32.h b/arch/sparc/include/asm/cmpxchg_32.h
index a53d744d4212..27a57a3a7597 100644
--- a/arch/sparc/include/asm/cmpxchg_32.h
+++ b/arch/sparc/include/asm/cmpxchg_32.h
@@ -25,7 +25,7 @@ static inline unsigned long __xchg(unsigned long x, __volatile__ void * ptr, int
return x;
}
-#define xchg(ptr,x) ({(__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)));})
+#define arch_xchg(ptr,x) ({(__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)));})
/* Emulate cmpxchg() the same way we emulate atomics,
* by hashing the object address and indexing into an array
@@ -55,7 +55,7 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size)
return old;
}
-#define cmpxchg(ptr, o, n) \
+#define arch_cmpxchg(ptr, o, n) \
({ \
__typeof__(*(ptr)) _o_ = (o); \
__typeof__(*(ptr)) _n_ = (n); \
@@ -64,7 +64,7 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size)
})
u64 __cmpxchg_u64(u64 *ptr, u64 old, u64 new);
-#define cmpxchg64(ptr, old, new) __cmpxchg_u64(ptr, old, new)
+#define arch_cmpxchg64(ptr, old, new) __cmpxchg_u64(ptr, old, new)
#include <asm-generic/cmpxchg-local.h>
@@ -72,9 +72,9 @@ u64 __cmpxchg_u64(u64 *ptr, u64 old, u64 new);
* cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
* them available.
*/
-#define cmpxchg_local(ptr, o, n) \
- ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\
+#define arch_cmpxchg_local(ptr, o, n) \
+ ((__typeof__(*(ptr)))__generic_cmpxchg_local((ptr), (unsigned long)(o),\
(unsigned long)(n), sizeof(*(ptr))))
-#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
+#define arch_cmpxchg64_local(ptr, o, n) __generic_cmpxchg64_local((ptr), (o), (n))
#endif /* __ARCH_SPARC_CMPXCHG__ */
diff --git a/arch/sparc/include/asm/cmpxchg_64.h b/arch/sparc/include/asm/cmpxchg_64.h
index 316faa0130ba..8c39a9981187 100644
--- a/arch/sparc/include/asm/cmpxchg_64.h
+++ b/arch/sparc/include/asm/cmpxchg_64.h
@@ -52,7 +52,7 @@ static inline unsigned long xchg64(__volatile__ unsigned long *m, unsigned long
return val;
}
-#define xchg(ptr,x) \
+#define arch_xchg(ptr,x) \
({ __typeof__(*(ptr)) __ret; \
__ret = (__typeof__(*(ptr))) \
__xchg((unsigned long)(x), (ptr), sizeof(*(ptr))); \
@@ -168,7 +168,7 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
return old;
}
-#define cmpxchg(ptr,o,n) \
+#define arch_cmpxchg(ptr,o,n) \
({ \
__typeof__(*(ptr)) _o_ = (o); \
__typeof__(*(ptr)) _n_ = (n); \
@@ -189,20 +189,20 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,
case 4:
case 8: return __cmpxchg(ptr, old, new, size);
default:
- return __cmpxchg_local_generic(ptr, old, new, size);
+ return __generic_cmpxchg_local(ptr, old, new, size);
}
return old;
}
-#define cmpxchg_local(ptr, o, n) \
+#define arch_cmpxchg_local(ptr, o, n) \
((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \
(unsigned long)(n), sizeof(*(ptr))))
-#define cmpxchg64_local(ptr, o, n) \
+#define arch_cmpxchg64_local(ptr, o, n) \
({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
cmpxchg_local((ptr), (o), (n)); \
})
-#define cmpxchg64(ptr, o, n) cmpxchg64_local((ptr), (o), (n))
+#define arch_cmpxchg64(ptr, o, n) arch_cmpxchg64_local((ptr), (o), (n))
#endif /* __ARCH_SPARC64_CMPXCHG__ */
diff --git a/arch/sparc/include/asm/mmzone.h b/arch/sparc/include/asm/mmzone.h
index 6543fb97a849..a236d8aa893a 100644
--- a/arch/sparc/include/asm/mmzone.h
+++ b/arch/sparc/include/asm/mmzone.h
@@ -2,7 +2,7 @@
#ifndef _SPARC64_MMZONE_H
#define _SPARC64_MMZONE_H
-#ifdef CONFIG_NEED_MULTIPLE_NODES
+#ifdef CONFIG_NUMA
#include <linux/cpumask.h>
@@ -13,6 +13,6 @@ extern struct pglist_data *node_data[];
extern int numa_cpu_lookup_table[];
extern cpumask_t numa_cpumask_lookup_table[];
-#endif /* CONFIG_NEED_MULTIPLE_NODES */
+#endif /* CONFIG_NUMA */
#endif /* _SPARC64_MMZONE_H */
diff --git a/arch/sparc/include/asm/pgalloc_32.h b/arch/sparc/include/asm/pgalloc_32.h
index 9d353e6dc5a9..4f73e87b22a3 100644
--- a/arch/sparc/include/asm/pgalloc_32.h
+++ b/arch/sparc/include/asm/pgalloc_32.h
@@ -51,7 +51,6 @@ static inline void free_pmd_fast(pmd_t * pmd)
#define __pmd_free_tlb(tlb, pmd, addr) pmd_free((tlb)->mm, pmd)
#define pmd_populate(mm, pmd, pte) pmd_set(pmd, pte)
-#define pmd_pgtable(pmd) (pgtable_t)__pmd_page(pmd)
void pmd_set(pmd_t *pmdp, pte_t *ptep);
#define pmd_populate_kernel pmd_populate
diff --git a/arch/sparc/include/asm/pgalloc_64.h b/arch/sparc/include/asm/pgalloc_64.h
index a8dafc550985..7b5561d17ab1 100644
--- a/arch/sparc/include/asm/pgalloc_64.h
+++ b/arch/sparc/include/asm/pgalloc_64.h
@@ -67,7 +67,6 @@ void pte_free(struct mm_struct *mm, pgtable_t ptepage);
#define pmd_populate_kernel(MM, PMD, PTE) pmd_set(MM, PMD, PTE)
#define pmd_populate(MM, PMD, PTE) pmd_set(MM, PMD, PTE)
-#define pmd_pgtable(PMD) ((pte_t *)pmd_page_vaddr(PMD))
void pgtable_free(void *table, bool is_page);
diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
index a5cf79c149fe..ebaf374b55ab 100644
--- a/arch/sparc/include/asm/pgtable_32.h
+++ b/arch/sparc/include/asm/pgtable_32.h
@@ -48,7 +48,6 @@ unsigned long __init bootmem_init(unsigned long *pages_avail);
#define PTRS_PER_PMD 64
#define PTRS_PER_PGD 256
#define USER_PTRS_PER_PGD PAGE_OFFSET / PGDIR_SIZE
-#define FIRST_USER_ADDRESS 0UL
#define PTE_SIZE (PTRS_PER_PTE*4)
#define PAGE_NONE SRMMU_PAGE_NONE
@@ -433,4 +432,6 @@ static inline int io_remap_pfn_range(struct vm_area_struct *vma,
/* We provide our own get_unmapped_area to cope with VA holes for userland */
#define HAVE_ARCH_UNMAPPED_AREA
+#define pmd_pgtable(pmd) ((pgtable_t)__pmd_page(pmd))
+
#endif /* !(_SPARC_PGTABLE_H) */
diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h
index 550d3904de65..e0ee48ec3903 100644
--- a/arch/sparc/include/asm/pgtable_64.h
+++ b/arch/sparc/include/asm/pgtable_64.h
@@ -95,9 +95,6 @@ bool kern_addr_valid(unsigned long addr);
#define PTRS_PER_PUD (1UL << PUD_BITS)
#define PTRS_PER_PGD (1UL << PGDIR_BITS)
-/* Kernel has a separate 44bit address space. */
-#define FIRST_USER_ADDRESS 0UL
-
#define pmd_ERROR(e) \
pr_err("%s:%d: bad pmd %p(%016lx) seen at (%pS)\n", \
__FILE__, __LINE__, &(e), pmd_val(e), __builtin_return_address(0))
@@ -377,8 +374,7 @@ static inline pgprot_t pgprot_noncached(pgprot_t prot)
#define pgprot_noncached pgprot_noncached
#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
-extern pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
- struct page *page, int writable);
+pte_t arch_make_huge_pte(pte_t entry, unsigned int shift, vm_flags_t flags);
#define arch_make_huge_pte arch_make_huge_pte
static inline unsigned long __pte_default_huge_mask(void)
{
@@ -1121,6 +1117,8 @@ extern unsigned long cmdline_memory_size;
asmlinkage void do_sparc64_fault(struct pt_regs *regs);
+#define pmd_pgtable(PMD) ((pte_t *)pmd_page_vaddr(PMD))
+
#ifdef CONFIG_HUGETLB_PAGE
#define pud_leaf_size pud_leaf_size
diff --git a/arch/sparc/include/asm/unaligned.h b/arch/sparc/include/asm/unaligned.h
deleted file mode 100644
index 7971d89d2f54..000000000000
--- a/arch/sparc/include/asm/unaligned.h
+++ /dev/null
@@ -1,11 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_SPARC_UNALIGNED_H
-#define _ASM_SPARC_UNALIGNED_H
-
-#include <linux/unaligned/be_struct.h>
-#include <linux/unaligned/le_byteshift.h>
-#include <linux/unaligned/generic.h>
-#define get_unaligned __get_unaligned_be
-#define put_unaligned __put_unaligned_be
-
-#endif /* _ASM_SPARC_UNALIGNED_H */
diff --git a/arch/sparc/include/asm/vio.h b/arch/sparc/include/asm/vio.h
index 059f0eb678e0..8a1a83bbb6d5 100644
--- a/arch/sparc/include/asm/vio.h
+++ b/arch/sparc/include/asm/vio.h
@@ -362,7 +362,7 @@ struct vio_driver {
struct list_head node;
const struct vio_device_id *id_table;
int (*probe)(struct vio_dev *dev, const struct vio_device_id *id);
- int (*remove)(struct vio_dev *dev);
+ void (*remove)(struct vio_dev *dev);
void (*shutdown)(struct vio_dev *dev);
unsigned long driver_data;
struct device_driver driver;
diff --git a/arch/sparc/include/uapi/asm/socket.h b/arch/sparc/include/uapi/asm/socket.h
index 848a22fbac20..92675dc380fa 100644
--- a/arch/sparc/include/uapi/asm/socket.h
+++ b/arch/sparc/include/uapi/asm/socket.h
@@ -120,6 +120,8 @@
#define SO_PREFER_BUSY_POLL 0x0048
#define SO_BUSY_POLL_BUDGET 0x0049
+#define SO_NETNS_COOKIE 0x0050
+
#if !defined(__KERNEL__)
diff --git a/arch/sparc/kernel/ds.c b/arch/sparc/kernel/ds.c
index 522e5b51050c..4a5bdb0df779 100644
--- a/arch/sparc/kernel/ds.c
+++ b/arch/sparc/kernel/ds.c
@@ -1236,11 +1236,6 @@ out_err:
return err;
}
-static int ds_remove(struct vio_dev *vdev)
-{
- return 0;
-}
-
static const struct vio_device_id ds_match[] = {
{
.type = "domain-services-port",
@@ -1251,7 +1246,6 @@ static const struct vio_device_id ds_match[] = {
static struct vio_driver ds_driver = {
.id_table = ds_match,
.probe = ds_probe,
- .remove = ds_remove,
.name = "ds",
};
diff --git a/arch/sparc/kernel/kprobes.c b/arch/sparc/kernel/kprobes.c
index 217c21a6986a..4c05a4ee6a0e 100644
--- a/arch/sparc/kernel/kprobes.c
+++ b/arch/sparc/kernel/kprobes.c
@@ -346,23 +346,6 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
case KPROBE_HIT_ACTIVE:
case KPROBE_HIT_SSDONE:
/*
- * We increment the nmissed count for accounting,
- * we can also use npre/npostfault count for accounting
- * these specific fault cases.
- */
- kprobes_inc_nmissed_count(cur);
-
- /*
- * We come here because instructions in the pre/post
- * handler caused the page_fault, this could happen
- * if handler tries to access user space by
- * copy_from_user(), get_user() etc. Let the
- * user-specified handler try to fix it first.
- */
- if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
- return 1;
-
- /*
* In case the user-specified fault handler returned
* zero, try to fix up.
*/
diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
index 3b9794978e5b..93983d6d431d 100644
--- a/arch/sparc/kernel/process_32.c
+++ b/arch/sparc/kernel/process_32.c
@@ -376,8 +376,7 @@ unsigned long get_wchan(struct task_struct *task)
struct reg_window32 *rw;
int count = 0;
- if (!task || task == current ||
- task->state == TASK_RUNNING)
+ if (!task || task == current || task_is_running(task))
goto out;
fp = task_thread_info(task)->ksp + bias;
diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
index 7afd0a859a78..d33c58a58d4f 100644
--- a/arch/sparc/kernel/process_64.c
+++ b/arch/sparc/kernel/process_64.c
@@ -674,8 +674,7 @@ unsigned long get_wchan(struct task_struct *task)
unsigned long ret = 0;
int count = 0;
- if (!task || task == current ||
- task->state == TASK_RUNNING)
+ if (!task || task == current || task_is_running(task))
goto out;
tp = task_thread_info(task);
diff --git a/arch/sparc/kernel/smp_32.c b/arch/sparc/kernel/smp_32.c
index 50c127ab46d5..22b148e5a5f8 100644
--- a/arch/sparc/kernel/smp_32.c
+++ b/arch/sparc/kernel/smp_32.c
@@ -348,7 +348,6 @@ static void sparc_start_secondary(void *arg)
*/
arch_cpu_pre_starting(arg);
- preempt_disable();
cpu = smp_processor_id();
notify_cpu_starting(cpu);
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
index e38d8bf454e8..0224d8f19ed6 100644
--- a/arch/sparc/kernel/smp_64.c
+++ b/arch/sparc/kernel/smp_64.c
@@ -138,9 +138,6 @@ void smp_callin(void)
set_cpu_online(cpuid, true);
- /* idle thread is expected to have preempt disabled */
- preempt_disable();
-
local_irq_enable();
cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
@@ -1546,7 +1543,7 @@ static void * __init pcpu_alloc_bootmem(unsigned int cpu, size_t size,
size_t align)
{
const unsigned long goal = __pa(MAX_DMA_ADDRESS);
-#ifdef CONFIG_NEED_MULTIPLE_NODES
+#ifdef CONFIG_NUMA
int node = cpu_to_node(cpu);
void *ptr;
diff --git a/arch/sparc/kernel/sstate.c b/arch/sparc/kernel/sstate.c
index ac8677c3841e..3bcc4ddc6911 100644
--- a/arch/sparc/kernel/sstate.c
+++ b/arch/sparc/kernel/sstate.c
@@ -6,6 +6,7 @@
#include <linux/kernel.h>
#include <linux/notifier.h>
+#include <linux/panic_notifier.h>
#include <linux/reboot.h>
#include <linux/init.h>
diff --git a/arch/sparc/kernel/syscalls/syscall.tbl b/arch/sparc/kernel/syscalls/syscall.tbl
index b9e1c0e735b7..603f5a821502 100644
--- a/arch/sparc/kernel/syscalls/syscall.tbl
+++ b/arch/sparc/kernel/syscalls/syscall.tbl
@@ -488,7 +488,7 @@
440 common process_madvise sys_process_madvise
441 common epoll_pwait2 sys_epoll_pwait2 compat_sys_epoll_pwait2
442 common mount_setattr sys_mount_setattr
-443 common quotactl_path sys_quotactl_path
+443 common quotactl_fd sys_quotactl_fd
444 common landlock_create_ruleset sys_landlock_create_ruleset
445 common landlock_add_rule sys_landlock_add_rule
446 common landlock_restrict_self sys_landlock_restrict_self
diff --git a/arch/sparc/kernel/vio.c b/arch/sparc/kernel/vio.c
index 4f57056ed463..348a88691219 100644
--- a/arch/sparc/kernel/vio.c
+++ b/arch/sparc/kernel/vio.c
@@ -105,10 +105,10 @@ static int vio_device_remove(struct device *dev)
* routines to do so at the moment. TBD
*/
- return drv->remove(vdev);
+ drv->remove(vdev);
}
- return 1;
+ return 0;
}
static ssize_t devspec_show(struct device *dev,
diff --git a/arch/sparc/lib/atomic32.c b/arch/sparc/lib/atomic32.c
index 281fa634bb1a..8b81d0f00c97 100644
--- a/arch/sparc/lib/atomic32.c
+++ b/arch/sparc/lib/atomic32.c
@@ -29,7 +29,7 @@ static DEFINE_SPINLOCK(dummy);
#endif /* SMP */
#define ATOMIC_FETCH_OP(op, c_op) \
-int atomic_fetch_##op(int i, atomic_t *v) \
+int arch_atomic_fetch_##op(int i, atomic_t *v) \
{ \
int ret; \
unsigned long flags; \
@@ -41,10 +41,10 @@ int atomic_fetch_##op(int i, atomic_t *v) \
spin_unlock_irqrestore(ATOMIC_HASH(v), flags); \
return ret; \
} \
-EXPORT_SYMBOL(atomic_fetch_##op);
+EXPORT_SYMBOL(arch_atomic_fetch_##op);
#define ATOMIC_OP_RETURN(op, c_op) \
-int atomic_##op##_return(int i, atomic_t *v) \
+int arch_atomic_##op##_return(int i, atomic_t *v) \
{ \
int ret; \
unsigned long flags; \
@@ -55,7 +55,7 @@ int atomic_##op##_return(int i, atomic_t *v) \
spin_unlock_irqrestore(ATOMIC_HASH(v), flags); \
return ret; \
} \
-EXPORT_SYMBOL(atomic_##op##_return);
+EXPORT_SYMBOL(arch_atomic_##op##_return);
ATOMIC_OP_RETURN(add, +=)
@@ -67,7 +67,7 @@ ATOMIC_FETCH_OP(xor, ^=)
#undef ATOMIC_FETCH_OP
#undef ATOMIC_OP_RETURN
-int atomic_xchg(atomic_t *v, int new)
+int arch_atomic_xchg(atomic_t *v, int new)
{
int ret;
unsigned long flags;
@@ -78,9 +78,9 @@ int atomic_xchg(atomic_t *v, int new)
spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
return ret;
}
-EXPORT_SYMBOL(atomic_xchg);
+EXPORT_SYMBOL(arch_atomic_xchg);
-int atomic_cmpxchg(atomic_t *v, int old, int new)
+int arch_atomic_cmpxchg(atomic_t *v, int old, int new)
{
int ret;
unsigned long flags;
@@ -93,9 +93,9 @@ int atomic_cmpxchg(atomic_t *v, int old, int new)
spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
return ret;
}
-EXPORT_SYMBOL(atomic_cmpxchg);
+EXPORT_SYMBOL(arch_atomic_cmpxchg);
-int atomic_fetch_add_unless(atomic_t *v, int a, int u)
+int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
{
int ret;
unsigned long flags;
@@ -107,10 +107,10 @@ int atomic_fetch_add_unless(atomic_t *v, int a, int u)
spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
return ret;
}
-EXPORT_SYMBOL(atomic_fetch_add_unless);
+EXPORT_SYMBOL(arch_atomic_fetch_add_unless);
/* Atomic operations are already serializing */
-void atomic_set(atomic_t *v, int i)
+void arch_atomic_set(atomic_t *v, int i)
{
unsigned long flags;
@@ -118,7 +118,7 @@ void atomic_set(atomic_t *v, int i)
v->counter = i;
spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
}
-EXPORT_SYMBOL(atomic_set);
+EXPORT_SYMBOL(arch_atomic_set);
unsigned long ___set_bit(unsigned long *addr, unsigned long mask)
{
diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
index 456b65a30ecf..8245d4a97301 100644
--- a/arch/sparc/lib/atomic_64.S
+++ b/arch/sparc/lib/atomic_64.S
@@ -19,7 +19,7 @@
*/
#define ATOMIC_OP(op) \
-ENTRY(atomic_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
+ENTRY(arch_atomic_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
BACKOFF_SETUP(%o2); \
1: lduw [%o1], %g1; \
op %g1, %o0, %g7; \
@@ -30,11 +30,11 @@ ENTRY(atomic_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
retl; \
nop; \
2: BACKOFF_SPIN(%o2, %o3, 1b); \
-ENDPROC(atomic_##op); \
-EXPORT_SYMBOL(atomic_##op);
+ENDPROC(arch_atomic_##op); \
+EXPORT_SYMBOL(arch_atomic_##op);
#define ATOMIC_OP_RETURN(op) \
-ENTRY(atomic_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
+ENTRY(arch_atomic_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */\
BACKOFF_SETUP(%o2); \
1: lduw [%o1], %g1; \
op %g1, %o0, %g7; \
@@ -45,11 +45,11 @@ ENTRY(atomic_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
retl; \
sra %g1, 0, %o0; \
2: BACKOFF_SPIN(%o2, %o3, 1b); \
-ENDPROC(atomic_##op##_return); \
-EXPORT_SYMBOL(atomic_##op##_return);
+ENDPROC(arch_atomic_##op##_return); \
+EXPORT_SYMBOL(arch_atomic_##op##_return);
#define ATOMIC_FETCH_OP(op) \
-ENTRY(atomic_fetch_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
+ENTRY(arch_atomic_fetch_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
BACKOFF_SETUP(%o2); \
1: lduw [%o1], %g1; \
op %g1, %o0, %g7; \
@@ -60,8 +60,8 @@ ENTRY(atomic_fetch_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
retl; \
sra %g1, 0, %o0; \
2: BACKOFF_SPIN(%o2, %o3, 1b); \
-ENDPROC(atomic_fetch_##op); \
-EXPORT_SYMBOL(atomic_fetch_##op);
+ENDPROC(arch_atomic_fetch_##op); \
+EXPORT_SYMBOL(arch_atomic_fetch_##op);
ATOMIC_OP(add)
ATOMIC_OP_RETURN(add)
@@ -85,7 +85,7 @@ ATOMIC_FETCH_OP(xor)
#undef ATOMIC_OP
#define ATOMIC64_OP(op) \
-ENTRY(atomic64_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
+ENTRY(arch_atomic64_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
BACKOFF_SETUP(%o2); \
1: ldx [%o1], %g1; \
op %g1, %o0, %g7; \
@@ -96,11 +96,11 @@ ENTRY(atomic64_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
retl; \
nop; \
2: BACKOFF_SPIN(%o2, %o3, 1b); \
-ENDPROC(atomic64_##op); \
-EXPORT_SYMBOL(atomic64_##op);
+ENDPROC(arch_atomic64_##op); \
+EXPORT_SYMBOL(arch_atomic64_##op);
#define ATOMIC64_OP_RETURN(op) \
-ENTRY(atomic64_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
+ENTRY(arch_atomic64_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
BACKOFF_SETUP(%o2); \
1: ldx [%o1], %g1; \
op %g1, %o0, %g7; \
@@ -111,11 +111,11 @@ ENTRY(atomic64_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
retl; \
op %g1, %o0, %o0; \
2: BACKOFF_SPIN(%o2, %o3, 1b); \
-ENDPROC(atomic64_##op##_return); \
-EXPORT_SYMBOL(atomic64_##op##_return);
+ENDPROC(arch_atomic64_##op##_return); \
+EXPORT_SYMBOL(arch_atomic64_##op##_return);
#define ATOMIC64_FETCH_OP(op) \
-ENTRY(atomic64_fetch_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
+ENTRY(arch_atomic64_fetch_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
BACKOFF_SETUP(%o2); \
1: ldx [%o1], %g1; \
op %g1, %o0, %g7; \
@@ -126,8 +126,8 @@ ENTRY(atomic64_fetch_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
retl; \
mov %g1, %o0; \
2: BACKOFF_SPIN(%o2, %o3, 1b); \
-ENDPROC(atomic64_fetch_##op); \
-EXPORT_SYMBOL(atomic64_fetch_##op);
+ENDPROC(arch_atomic64_fetch_##op); \
+EXPORT_SYMBOL(arch_atomic64_fetch_##op);
ATOMIC64_OP(add)
ATOMIC64_OP_RETURN(add)
@@ -150,7 +150,7 @@ ATOMIC64_FETCH_OP(xor)
#undef ATOMIC64_OP_RETURN
#undef ATOMIC64_OP
-ENTRY(atomic64_dec_if_positive) /* %o0 = atomic_ptr */
+ENTRY(arch_atomic64_dec_if_positive) /* %o0 = atomic_ptr */
BACKOFF_SETUP(%o2)
1: ldx [%o0], %g1
brlez,pn %g1, 3f
@@ -162,5 +162,5 @@ ENTRY(atomic64_dec_if_positive) /* %o0 = atomic_ptr */
3: retl
sub %g1, 1, %o0
2: BACKOFF_SPIN(%o2, %o3, 1b)
-ENDPROC(atomic64_dec_if_positive)
-EXPORT_SYMBOL(atomic64_dec_if_positive)
+ENDPROC(arch_atomic64_dec_if_positive)
+EXPORT_SYMBOL(arch_atomic64_dec_if_positive)
diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
index 04d8790f6c32..0f49fada2093 100644
--- a/arch/sparc/mm/hugetlbpage.c
+++ b/arch/sparc/mm/hugetlbpage.c
@@ -177,10 +177,8 @@ static pte_t hugepage_shift_to_tte(pte_t entry, unsigned int shift)
return sun4u_hugepage_shift_to_tte(entry, shift);
}
-pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
- struct page *page, int writeable)
+pte_t arch_make_huge_pte(pte_t entry, unsigned int shift, vm_flags_t flags)
{
- unsigned int shift = huge_page_shift(hstate_vma(vma));
pte_t pte;
pte = hugepage_shift_to_tte(entry, shift);
@@ -188,7 +186,7 @@ pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
#ifdef CONFIG_SPARC64
/* If this vma has ADI enabled on it, turn on TTE.mcd
*/
- if (vma->vm_flags & VM_SPARC_ADI)
+ if (flags & VM_SPARC_ADI)
return pte_mkmcd(pte);
else
return pte_mknotmcd(pte);
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index e454f179cf5d..1b23639e2fcd 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -27,6 +27,7 @@
#include <linux/percpu.h>
#include <linux/mmzone.h>
#include <linux/gfp.h>
+#include <linux/bootmem_info.h>
#include <asm/head.h>
#include <asm/page.h>
@@ -903,7 +904,7 @@ struct node_mem_mask {
static struct node_mem_mask node_masks[MAX_NUMNODES];
static int num_node_masks;
-#ifdef CONFIG_NEED_MULTIPLE_NODES
+#ifdef CONFIG_NUMA
struct mdesc_mlgroup {
u64 node;
@@ -1059,7 +1060,7 @@ static void __init allocate_node_data(int nid)
{
struct pglist_data *p;
unsigned long start_pfn, end_pfn;
-#ifdef CONFIG_NEED_MULTIPLE_NODES
+#ifdef CONFIG_NUMA
NODE_DATA(nid) = memblock_alloc_node(sizeof(struct pglist_data),
SMP_CACHE_BYTES, nid);
@@ -1080,7 +1081,7 @@ static void __init allocate_node_data(int nid)
static void init_node_masks_nonnuma(void)
{
-#ifdef CONFIG_NEED_MULTIPLE_NODES
+#ifdef CONFIG_NUMA
int i;
#endif
@@ -1090,7 +1091,7 @@ static void init_node_masks_nonnuma(void)
node_masks[0].match = 0;
num_node_masks = 1;
-#ifdef CONFIG_NEED_MULTIPLE_NODES
+#ifdef CONFIG_NUMA
for (i = 0; i < NR_CPUS; i++)
numa_cpu_lookup_table[i] = 0;
@@ -1098,7 +1099,7 @@ static void init_node_masks_nonnuma(void)
#endif
}
-#ifdef CONFIG_NEED_MULTIPLE_NODES
+#ifdef CONFIG_NUMA
struct pglist_data *node_data[MAX_NUMNODES];
EXPORT_SYMBOL(numa_cpu_lookup_table);
@@ -2487,7 +2488,7 @@ int page_in_phys_avail(unsigned long paddr)
static void __init register_page_bootmem_info(void)
{
-#ifdef CONFIG_NEED_MULTIPLE_NODES
+#ifdef CONFIG_NUMA
int i;
for_each_online_node(i)
diff --git a/arch/um/configs/kunit_defconfig b/arch/um/configs/kunit_defconfig
deleted file mode 100644
index 9235b7d42d38..000000000000
--- a/arch/um/configs/kunit_defconfig
+++ /dev/null
@@ -1,3 +0,0 @@
-CONFIG_KUNIT=y
-CONFIG_KUNIT_TEST=y
-CONFIG_KUNIT_EXAMPLE_TEST=y
diff --git a/arch/um/drivers/line.c b/arch/um/drivers/line.c
index 1c70a31e7c5b..fbc623d2cc07 100644
--- a/arch/um/drivers/line.c
+++ b/arch/um/drivers/line.c
@@ -32,7 +32,7 @@ static irqreturn_t line_interrupt(int irq, void *data)
*
* Should be called while holding line->lock (this does not modify data).
*/
-static int write_room(struct line *line)
+static unsigned int write_room(struct line *line)
{
int n;
@@ -47,11 +47,11 @@ static int write_room(struct line *line)
return n - 1;
}
-int line_write_room(struct tty_struct *tty)
+unsigned int line_write_room(struct tty_struct *tty)
{
struct line *line = tty->driver_data;
unsigned long flags;
- int room;
+ unsigned int room;
spin_lock_irqsave(&line->lock, flags);
room = write_room(line);
@@ -60,11 +60,11 @@ int line_write_room(struct tty_struct *tty)
return room;
}
-int line_chars_in_buffer(struct tty_struct *tty)
+unsigned int line_chars_in_buffer(struct tty_struct *tty)
{
struct line *line = tty->driver_data;
unsigned long flags;
- int ret;
+ unsigned int ret;
spin_lock_irqsave(&line->lock, flags);
/* write_room subtracts 1 for the needed NULL, so we readd it.*/
@@ -211,11 +211,6 @@ out_up:
return ret;
}
-void line_set_termios(struct tty_struct *tty, struct ktermios * old)
-{
- /* nothing */
-}
-
void line_throttle(struct tty_struct *tty)
{
struct line *line = tty->driver_data;
diff --git a/arch/um/drivers/line.h b/arch/um/drivers/line.h
index 01d21e76144f..bdb16b96e76f 100644
--- a/arch/um/drivers/line.h
+++ b/arch/um/drivers/line.h
@@ -66,11 +66,10 @@ extern int line_setup(char **conf, unsigned nlines, char **def,
char *init, char *name);
extern int line_write(struct tty_struct *tty, const unsigned char *buf,
int len);
-extern void line_set_termios(struct tty_struct *tty, struct ktermios * old);
-extern int line_chars_in_buffer(struct tty_struct *tty);
+extern unsigned int line_chars_in_buffer(struct tty_struct *tty);
extern void line_flush_buffer(struct tty_struct *tty);
extern void line_flush_chars(struct tty_struct *tty);
-extern int line_write_room(struct tty_struct *tty);
+extern unsigned int line_write_room(struct tty_struct *tty);
extern void line_throttle(struct tty_struct *tty);
extern void line_unthrottle(struct tty_struct *tty);
diff --git a/arch/um/drivers/mconsole_kern.c b/arch/um/drivers/mconsole_kern.c
index 6d00af25ec6b..6ead1e240457 100644
--- a/arch/um/drivers/mconsole_kern.c
+++ b/arch/um/drivers/mconsole_kern.c
@@ -12,6 +12,7 @@
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/notifier.h>
+#include <linux/panic_notifier.h>
#include <linux/reboot.h>
#include <linux/sched/debug.h>
#include <linux/proc_fs.h>
@@ -140,7 +141,7 @@ void mconsole_proc(struct mc_request *req)
mconsole_reply(req, "Proc not available", 1, 0);
goto out;
}
- file = file_open_root(mnt->mnt_root, mnt, ptr, O_RDONLY, 0);
+ file = file_open_root_mnt(mnt, ptr, O_RDONLY, 0);
if (IS_ERR(file)) {
mconsole_reply(req, "Failed to open file", 1, 0);
printk(KERN_ERR "open /proc/%s: %ld\n", ptr, PTR_ERR(file));
diff --git a/arch/um/drivers/ssl.c b/arch/um/drivers/ssl.c
index 6476b28d7c5e..41eae2e8fb65 100644
--- a/arch/um/drivers/ssl.c
+++ b/arch/um/drivers/ssl.c
@@ -99,7 +99,6 @@ static const struct tty_operations ssl_ops = {
.chars_in_buffer = line_chars_in_buffer,
.flush_buffer = line_flush_buffer,
.flush_chars = line_flush_chars,
- .set_termios = line_set_termios,
.throttle = line_throttle,
.unthrottle = line_unthrottle,
.install = ssl_install,
diff --git a/arch/um/drivers/stdio_console.c b/arch/um/drivers/stdio_console.c
index 37b127941e6f..e8b762f4d8c2 100644
--- a/arch/um/drivers/stdio_console.c
+++ b/arch/um/drivers/stdio_console.c
@@ -106,7 +106,6 @@ static const struct tty_operations console_ops = {
.chars_in_buffer = line_chars_in_buffer,
.flush_buffer = line_flush_buffer,
.flush_chars = line_flush_chars,
- .set_termios = line_set_termios,
.throttle = line_throttle,
.unthrottle = line_unthrottle,
.hangup = line_hangup,
diff --git a/arch/um/include/asm/pgalloc.h b/arch/um/include/asm/pgalloc.h
index 2bbf28cf3aa9..8ec7cd46dd96 100644
--- a/arch/um/include/asm/pgalloc.h
+++ b/arch/um/include/asm/pgalloc.h
@@ -19,7 +19,6 @@
set_pmd(pmd, __pmd(_PAGE_TABLE + \
((unsigned long long)page_to_pfn(pte) << \
(unsigned long long) PAGE_SHIFT)))
-#define pmd_pgtable(pmd) pmd_page(pmd)
/*
* Allocate and free page tables.
diff --git a/arch/um/include/asm/pgtable-2level.h b/arch/um/include/asm/pgtable-2level.h
index 32106d31e4ab..8256ecc5b919 100644
--- a/arch/um/include/asm/pgtable-2level.h
+++ b/arch/um/include/asm/pgtable-2level.h
@@ -23,7 +23,6 @@
#define PTRS_PER_PTE 1024
#define USER_PTRS_PER_PGD ((TASK_SIZE + (PGDIR_SIZE - 1)) / PGDIR_SIZE)
#define PTRS_PER_PGD 1024
-#define FIRST_USER_ADDRESS 0UL
#define pte_ERROR(e) \
printk("%s:%d: bad pte %p(%08lx).\n", __FILE__, __LINE__, &(e), \
diff --git a/arch/um/include/asm/pgtable-3level.h b/arch/um/include/asm/pgtable-3level.h
index 7e6a4180db9d..9289a86643a9 100644
--- a/arch/um/include/asm/pgtable-3level.h
+++ b/arch/um/include/asm/pgtable-3level.h
@@ -41,7 +41,6 @@
#endif
#define USER_PTRS_PER_PGD ((TASK_SIZE + (PGDIR_SIZE - 1)) / PGDIR_SIZE)
-#define FIRST_USER_ADDRESS 0UL
#define pte_ERROR(e) \
printk("%s:%d: bad pte %p(%016lx).\n", __FILE__, __LINE__, &(e), \
diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
index c5011064b5dd..457a38db368b 100644
--- a/arch/um/kernel/process.c
+++ b/arch/um/kernel/process.c
@@ -369,7 +369,7 @@ unsigned long get_wchan(struct task_struct *p)
unsigned long stack_page, sp, ip;
bool seen_sched = 0;
- if ((p == NULL) || (p == current) || (p->state == TASK_RUNNING))
+ if ((p == NULL) || (p == current) || task_is_running(p))
return 0;
stack_page = (unsigned long) task_stack_page(p);
diff --git a/arch/um/kernel/skas/syscall.c b/arch/um/kernel/skas/syscall.c
index 3d91f89fd852..9ee19e566da3 100644
--- a/arch/um/kernel/skas/syscall.c
+++ b/arch/um/kernel/skas/syscall.c
@@ -41,7 +41,7 @@ void handle_syscall(struct uml_pt_regs *r)
goto out;
syscall = UPT_SYSCALL_NR(r);
- if (syscall >= 0 && syscall <= __NR_syscall_max)
+ if (syscall >= 0 && syscall < __NR_syscalls)
PT_REGS_SET_SYSCALL_RETURN(regs,
EXECUTE_SYSCALL(syscall, regs));
diff --git a/arch/um/kernel/um_arch.c b/arch/um/kernel/um_arch.c
index 74e07e748a9b..9512253947d5 100644
--- a/arch/um/kernel/um_arch.c
+++ b/arch/um/kernel/um_arch.c
@@ -7,6 +7,7 @@
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/module.h>
+#include <linux/panic_notifier.h>
#include <linux/seq_file.h>
#include <linux/string.h>
#include <linux/utsname.h>
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 0045e1b44190..49270655e827 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -33,6 +33,7 @@ config X86_64
select NEED_DMA_MAP_STATE
select SWIOTLB
select ARCH_HAS_ELFCORE_COMPAT
+ select ZONE_DMA32
config FORCE_DYNAMIC_FTRACE
def_bool y
@@ -63,7 +64,7 @@ config X86
select ARCH_ENABLE_HUGEPAGE_MIGRATION if X86_64 && HUGETLB_PAGE && MIGRATION
select ARCH_ENABLE_MEMORY_HOTPLUG if X86_64 || (X86_32 && HIGHMEM)
select ARCH_ENABLE_MEMORY_HOTREMOVE if MEMORY_HOTPLUG
- select ARCH_ENABLE_SPLIT_PMD_PTLOCK if X86_64 || X86_PAE
+ select ARCH_ENABLE_SPLIT_PMD_PTLOCK if (PGTABLE_LEVELS > 2) && (X86_64 || X86_PAE)
select ARCH_ENABLE_THP_MIGRATION if X86_64 && TRANSPARENT_HUGEPAGE
select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI
select ARCH_HAS_CACHE_LINE_SIZE
@@ -93,6 +94,7 @@ config X86
select ARCH_HAS_SYSCALL_WRAPPER
select ARCH_HAS_UBSAN_SANITIZE_ALL
select ARCH_HAS_DEBUG_WX
+ select ARCH_HAS_ZONE_DMA_SET if EXPERT
select ARCH_HAVE_NMI_SAFE_CMPXCHG
select ARCH_MIGHT_HAVE_ACPI_PDC if ACPI
select ARCH_MIGHT_HAVE_PC_PARPORT
@@ -103,8 +105,8 @@ config X86
select ARCH_SUPPORTS_DEBUG_PAGEALLOC
select ARCH_SUPPORTS_NUMA_BALANCING if X86_64
select ARCH_SUPPORTS_KMAP_LOCAL_FORCE_MAP if NR_CPUS <= 4096
- select ARCH_SUPPORTS_LTO_CLANG if X86_64
- select ARCH_SUPPORTS_LTO_CLANG_THIN if X86_64
+ select ARCH_SUPPORTS_LTO_CLANG
+ select ARCH_SUPPORTS_LTO_CLANG_THIN
select ARCH_USE_BUILTIN_BSWAP
select ARCH_USE_MEMTEST
select ARCH_USE_QUEUED_RWLOCKS
@@ -113,6 +115,7 @@ config X86
select ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
select ARCH_WANT_DEFAULT_BPF_JIT if X86_64
select ARCH_WANTS_DYNAMIC_TASK_STRUCT
+ select ARCH_WANTS_NO_INSTR
select ARCH_WANT_HUGE_PMD_SHARE
select ARCH_WANT_LD_ORPHAN_WARN
select ARCH_WANTS_THP_SWAP if X86_64
@@ -343,9 +346,6 @@ config ARCH_SUSPEND_POSSIBLE
config ARCH_WANT_GENERAL_HUGETLB
def_bool y
-config ZONE_DMA32
- def_bool y if X86_64
-
config AUDIT_ARCH
def_bool y if X86_64
@@ -393,16 +393,6 @@ config CC_HAS_SANE_STACKPROTECTOR
menu "Processor type and features"
-config ZONE_DMA
- bool "DMA memory allocation support" if EXPERT
- default y
- help
- DMA memory allocation support allows devices with less than 32-bit
- addressing to allocate within the first 16MB of address space.
- Disable if no such devices will be used.
-
- If unsure, say Y.
-
config SMP
bool "Symmetric multi-processing support"
help
@@ -1597,7 +1587,7 @@ config NODES_SHIFT
default "10" if MAXSMP
default "6" if X86_64
default "3"
- depends on NEED_MULTIPLE_NODES
+ depends on NUMA
help
Specify the maximum number of NUMA Nodes available on the target
system. Increases memory reserved to accommodate various tables.
@@ -1693,35 +1683,6 @@ config X86_BOOTPARAM_MEMORY_CORRUPTION_CHECK
Set whether the default state of memory_corruption_check is
on or off.
-config X86_RESERVE_LOW
- int "Amount of low memory, in kilobytes, to reserve for the BIOS"
- default 64
- range 4 640
- help
- Specify the amount of low memory to reserve for the BIOS.
-
- The first page contains BIOS data structures that the kernel
- must not use, so that page must always be reserved.
-
- By default we reserve the first 64K of physical RAM, as a
- number of BIOSes are known to corrupt that memory range
- during events such as suspend/resume or monitor cable
- insertion, so it must not be used by the kernel.
-
- You can set this to 4 if you are absolutely sure that you
- trust the BIOS to get all its memory reservations and usages
- right. If you know your BIOS have problems beyond the
- default 64K area, you can set this to 640 to avoid using the
- entire low memory range.
-
- If you have doubts about the BIOS (e.g. suspend/resume does
- not work or there's kernel crashes after certain hardware
- hotplug events) then you might want to enable
- X86_CHECK_BIOS_CORRUPTION=y to allow the kernel to check
- typical corruption patterns.
-
- Leave this to the default value of 64 if you are unsure.
-
config MATH_EMULATION
bool
depends on MODIFY_LDT_SYSCALL
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index c77c5d8a7b3e..53eceaf71ab7 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -178,11 +178,6 @@ ifeq ($(ACCUMULATE_OUTGOING_ARGS), 1)
KBUILD_CFLAGS += $(call cc-option,-maccumulate-outgoing-args,)
endif
-ifdef CONFIG_LTO_CLANG
-KBUILD_LDFLAGS += -plugin-opt=-code-model=kernel \
- -plugin-opt=-stack-alignment=$(if $(CONFIG_X86_32),4,8)
-endif
-
# Workaround for a gcc prelease that unfortunately was shipped in a suse release
KBUILD_CFLAGS += -Wno-sign-compare
#
@@ -202,7 +197,13 @@ ifdef CONFIG_RETPOLINE
endif
endif
-KBUILD_LDFLAGS := -m elf_$(UTS_MACHINE)
+KBUILD_LDFLAGS += -m elf_$(UTS_MACHINE)
+
+ifdef CONFIG_LTO_CLANG
+ifeq ($(shell test $(CONFIG_LLD_VERSION) -lt 130000; echo $$?),0)
+KBUILD_LDFLAGS += -plugin-opt=-stack-alignment=$(if $(CONFIG_X86_32),4,8)
+endif
+endif
ifdef CONFIG_X86_NEED_RELOCS
LDFLAGS_vmlinux := --emit-relocs --discard-none
@@ -256,7 +257,7 @@ drivers-$(CONFIG_FB) += arch/x86/video/
boot := arch/x86/boot
-BOOT_TARGETS = bzdisk fdimage fdimage144 fdimage288 isoimage
+BOOT_TARGETS = bzdisk fdimage fdimage144 fdimage288 hdimage isoimage
PHONY += bzImage $(BOOT_TARGETS)
@@ -314,8 +315,9 @@ define archhelp
echo ' fdimage - Create 1.4MB boot floppy image (arch/x86/boot/fdimage)'
echo ' fdimage144 - Create 1.4MB boot floppy image (arch/x86/boot/fdimage)'
echo ' fdimage288 - Create 2.8MB boot floppy image (arch/x86/boot/fdimage)'
+ echo ' hdimage - Create a BIOS/EFI hard disk image (arch/x86/boot/hdimage)'
echo ' isoimage - Create a boot CD-ROM image (arch/x86/boot/image.iso)'
- echo ' bzdisk/fdimage*/isoimage also accept:'
+ echo ' bzdisk/fdimage*/hdimage/isoimage also accept:'
echo ' FDARGS="..." arguments for the booted kernel'
echo ' FDINITRD=file initrd for the booted kernel'
echo ''
diff --git a/arch/x86/boot/.gitignore b/arch/x86/boot/.gitignore
index 9cc7f1357b9b..1189be057ebd 100644
--- a/arch/x86/boot/.gitignore
+++ b/arch/x86/boot/.gitignore
@@ -11,3 +11,4 @@ setup.elf
fdimage
mtools.conf
image.iso
+hdimage
diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
index fe605205b4ce..dfbc26a8e924 100644
--- a/arch/x86/boot/Makefile
+++ b/arch/x86/boot/Makefile
@@ -29,7 +29,7 @@ KCOV_INSTRUMENT := n
SVGA_MODE := -DSVGA_MODE=NORMAL_VGA
targets := vmlinux.bin setup.bin setup.elf bzImage
-targets += fdimage fdimage144 fdimage288 image.iso mtools.conf
+targets += fdimage fdimage144 fdimage288 image.iso hdimage
subdir- := compressed
setup-y += a20.o bioscall.o cmdline.o copy.o cpu.o cpuflags.o cpucheck.o
@@ -115,47 +115,49 @@ $(obj)/compressed/vmlinux: FORCE
$(Q)$(MAKE) $(build)=$(obj)/compressed $@
# Set this if you want to pass append arguments to the
-# bzdisk/fdimage/isoimage kernel
+# bzdisk/fdimage/hdimage/isoimage kernel
FDARGS =
-# Set this if you want an initrd included with the
-# bzdisk/fdimage/isoimage kernel
+# Set this if you want one or more initrds included in the image
FDINITRD =
-image_cmdline = default linux $(FDARGS) $(if $(FDINITRD),initrd=initrd.img,)
+imgdeps = $(obj)/bzImage $(obj)/mtools.conf $(src)/genimage.sh
$(obj)/mtools.conf: $(src)/mtools.conf.in
sed -e 's|@OBJ@|$(obj)|g' < $< > $@
+targets += mtools.conf
+
+# genimage.sh requires bash, but it also has a bunch of other
+# external dependencies.
quiet_cmd_genimage = GENIMAGE $3
-cmd_genimage = sh $(srctree)/$(src)/genimage.sh $2 $3 $(obj)/bzImage \
- $(obj)/mtools.conf '$(image_cmdline)' $(FDINITRD)
+cmd_genimage = $(BASH) $(srctree)/$(src)/genimage.sh $2 $3 $(obj)/bzImage \
+ $(obj)/mtools.conf '$(FDARGS)' $(FDINITRD)
-PHONY += bzdisk fdimage fdimage144 fdimage288 isoimage bzlilo install
+PHONY += bzdisk fdimage fdimage144 fdimage288 hdimage isoimage install
# This requires write access to /dev/fd0
-bzdisk: $(obj)/bzImage $(obj)/mtools.conf
+# All images require syslinux to be installed; hdimage also requires
+# EDK2/OVMF if the kernel is compiled with the EFI stub.
+bzdisk: $(imgdeps)
$(call cmd,genimage,bzdisk,/dev/fd0)
-# These require being root or having syslinux 2.02 or higher installed
-fdimage fdimage144: $(obj)/bzImage $(obj)/mtools.conf
+fdimage fdimage144: $(imgdeps)
$(call cmd,genimage,fdimage144,$(obj)/fdimage)
@$(kecho) 'Kernel: $(obj)/fdimage is ready'
-fdimage288: $(obj)/bzImage $(obj)/mtools.conf
+fdimage288: $(imgdeps)
$(call cmd,genimage,fdimage288,$(obj)/fdimage)
@$(kecho) 'Kernel: $(obj)/fdimage is ready'
-isoimage: $(obj)/bzImage
+hdimage: $(imgdeps)
+ $(call cmd,genimage,hdimage,$(obj)/hdimage)
+ @$(kecho) 'Kernel: $(obj)/hdimage is ready'
+
+isoimage: $(imgdeps)
$(call cmd,genimage,isoimage,$(obj)/image.iso)
@$(kecho) 'Kernel: $(obj)/image.iso is ready'
-bzlilo:
- if [ -f $(INSTALL_PATH)/vmlinuz ]; then mv $(INSTALL_PATH)/vmlinuz $(INSTALL_PATH)/vmlinuz.old; fi
- if [ -f $(INSTALL_PATH)/System.map ]; then mv $(INSTALL_PATH)/System.map $(INSTALL_PATH)/System.old; fi
- cat $(obj)/bzImage > $(INSTALL_PATH)/vmlinuz
- cp System.map $(INSTALL_PATH)/
- if [ -x /sbin/lilo ]; then /sbin/lilo; else /etc/lilo/install; fi
-
install:
- sh $(srctree)/$(src)/install.sh $(KERNELRELEASE) $(obj)/bzImage \
+ $(CONFIG_SHELL) $(srctree)/$(src)/install.sh \
+ $(KERNELRELEASE) $(obj)/bzImage \
System.map "$(INSTALL_PATH)"
diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
index 6e5522aebbbd..431bf7f846c3 100644
--- a/arch/x86/boot/compressed/Makefile
+++ b/arch/x86/boot/compressed/Makefile
@@ -30,6 +30,7 @@ targets := vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma \
KBUILD_CFLAGS := -m$(BITS) -O2
KBUILD_CFLAGS += -fno-strict-aliasing -fPIE
+KBUILD_CFLAGS += -Wundef
KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
cflags-$(CONFIG_X86_32) := -march=i386
cflags-$(CONFIG_X86_64) := -mcmodel=small -mno-red-zone
@@ -48,10 +49,10 @@ KBUILD_CFLAGS += $(call as-option,-Wa$(comma)-mrelax-relocations=no)
KBUILD_CFLAGS += -include $(srctree)/include/linux/hidden.h
KBUILD_CFLAGS += $(CLANG_FLAGS)
-# sev-es.c indirectly inludes inat-table.h which is generated during
+# sev.c indirectly inludes inat-table.h which is generated during
# compilation and stored in $(objtree). Add the directory to the includes so
# that the compiler finds it even with out-of-tree builds (make O=/some/path).
-CFLAGS_sev-es.o += -I$(objtree)/arch/x86/lib/
+CFLAGS_sev.o += -I$(objtree)/arch/x86/lib/
KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
GCOV_PROFILE := n
@@ -93,7 +94,7 @@ ifdef CONFIG_X86_64
vmlinux-objs-y += $(obj)/idt_64.o $(obj)/idt_handlers_64.o
vmlinux-objs-y += $(obj)/mem_encrypt.o
vmlinux-objs-y += $(obj)/pgtable_64.o
- vmlinux-objs-$(CONFIG_AMD_MEM_ENCRYPT) += $(obj)/sev-es.o
+ vmlinux-objs-$(CONFIG_AMD_MEM_ENCRYPT) += $(obj)/sev.o
endif
vmlinux-objs-$(CONFIG_ACPI) += $(obj)/acpi.o
diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
index dde042f64cca..743f13ea25c1 100644
--- a/arch/x86/boot/compressed/misc.c
+++ b/arch/x86/boot/compressed/misc.c
@@ -172,7 +172,7 @@ void __puthex(unsigned long value)
}
}
-#if CONFIG_X86_NEED_RELOCS
+#ifdef CONFIG_X86_NEED_RELOCS
static void handle_relocations(void *output, unsigned long output_len,
unsigned long virt_addr)
{
diff --git a/arch/x86/boot/compressed/misc.h b/arch/x86/boot/compressed/misc.h
index e5612f035498..31139256859f 100644
--- a/arch/x86/boot/compressed/misc.h
+++ b/arch/x86/boot/compressed/misc.h
@@ -79,7 +79,7 @@ struct mem_vector {
u64 size;
};
-#if CONFIG_RANDOMIZE_BASE
+#ifdef CONFIG_RANDOMIZE_BASE
/* kaslr.c */
void choose_random_location(unsigned long input,
unsigned long input_size,
diff --git a/arch/x86/boot/compressed/sev-es.c b/arch/x86/boot/compressed/sev.c
index 82041bd380e5..670e998fe930 100644
--- a/arch/x86/boot/compressed/sev-es.c
+++ b/arch/x86/boot/compressed/sev.c
@@ -13,7 +13,7 @@
#include "misc.h"
#include <asm/pgtable_types.h>
-#include <asm/sev-es.h>
+#include <asm/sev.h>
#include <asm/trapnr.h>
#include <asm/trap_pf.h>
#include <asm/msr-index.h>
@@ -117,7 +117,7 @@ static enum es_result vc_read_mem(struct es_em_ctxt *ctxt,
#include "../../lib/insn.c"
/* Include code for early handlers */
-#include "../../kernel/sev-es-shared.c"
+#include "../../kernel/sev-shared.c"
static bool early_setup_sev_es(void)
{
diff --git a/arch/x86/boot/genimage.sh b/arch/x86/boot/genimage.sh
index 6a10d52a4145..0673fdfc1a11 100644
--- a/arch/x86/boot/genimage.sh
+++ b/arch/x86/boot/genimage.sh
@@ -1,4 +1,4 @@
-#!/bin/sh
+#!/bin/bash
#
# This file is subject to the terms and conditions of the GNU General Public
# License. See the file "COPYING" in the main directory of this archive
@@ -8,15 +8,24 @@
#
# Adapted from code in arch/x86/boot/Makefile by H. Peter Anvin and others
#
-# "make fdimage/fdimage144/fdimage288/isoimage" script for x86 architecture
+# "make fdimage/fdimage144/fdimage288/hdimage/isoimage"
+# script for x86 architecture
#
# Arguments:
-# $1 - fdimage format
-# $2 - target image file
-# $3 - kernel bzImage file
-# $4 - mtool configuration file
-# $5 - kernel cmdline
-# $6 - inird image file
+# $1 - fdimage format
+# $2 - target image file
+# $3 - kernel bzImage file
+# $4 - mtools configuration file
+# $5 - kernel cmdline
+# $6+ - initrd image file(s)
+#
+# This script requires:
+# bash
+# syslinux
+# mtools (for fdimage* and hdimage)
+# edk2/OVMF (for hdimage)
+#
+# Otherwise try to stick to POSIX shell commands...
#
# Use "make V=1" to debug this script
@@ -26,105 +35,237 @@ case "${KBUILD_VERBOSE}" in
;;
esac
-verify () {
- if [ ! -f "$1" ]; then
- echo "" 1>&2
- echo " *** Missing file: $1" 1>&2
- echo "" 1>&2
- exit 1
+# Exit the top-level shell with an error
+topshell=$$
+trap 'exit 1' USR1
+die() {
+ echo "" 1>&2
+ echo " *** $*" 1>&2
+ echo "" 1>&2
+ kill -USR1 $topshell
+}
+
+# Verify the existence and readability of a file
+verify() {
+ if [ ! -f "$1" -o ! -r "$1" ]; then
+ die "Missing file: $1"
fi
}
+diskfmt="$1"
+FIMAGE="$2"
+FBZIMAGE="$3"
+MTOOLSRC="$4"
+KCMDLINE="$5"
+shift 5 # Remaining arguments = initrd files
+
+export MTOOLSRC
-export MTOOLSRC=$4
-FIMAGE=$2
-FBZIMAGE=$3
-KCMDLINE=$5
-FDINITRD=$6
+# common options for dd
+dd='dd iflag=fullblock'
# Make sure the files actually exist
verify "$FBZIMAGE"
-genbzdisk() {
- verify "$MTOOLSRC"
- mformat a:
- syslinux $FIMAGE
- echo "$KCMDLINE" | mcopy - a:syslinux.cfg
- if [ -f "$FDINITRD" ] ; then
- mcopy "$FDINITRD" a:initrd.img
+declare -a FDINITRDS
+irdpfx=' initrd='
+initrdopts_syslinux=''
+initrdopts_efi=''
+for f in "$@"; do
+ if [ -f "$f" -a -r "$f" ]; then
+ FDINITRDS=("${FDINITRDS[@]}" "$f")
+ fname="$(basename "$f")"
+ initrdopts_syslinux="${initrdopts_syslinux}${irdpfx}${fname}"
+ irdpfx=,
+ initrdopts_efi="${initrdopts_efi} initrd=${fname}"
fi
- mcopy $FBZIMAGE a:linux
+done
+
+# Read a $3-byte littleendian unsigned value at offset $2 from file $1
+le() {
+ local n=0
+ local m=1
+ for b in $(od -A n -v -j $2 -N $3 -t u1 "$1"); do
+ n=$((n + b*m))
+ m=$((m * 256))
+ done
+ echo $n
}
-genfdimage144() {
- verify "$MTOOLSRC"
- dd if=/dev/zero of=$FIMAGE bs=1024 count=1440 2> /dev/null
- mformat v:
- syslinux $FIMAGE
- echo "$KCMDLINE" | mcopy - v:syslinux.cfg
- if [ -f "$FDINITRD" ] ; then
- mcopy "$FDINITRD" v:initrd.img
- fi
- mcopy $FBZIMAGE v:linux
+# Get the EFI architecture name such that boot{name}.efi is the default
+# boot file name. Returns false with no output if the file is not an
+# EFI image or otherwise unknown.
+efiarch() {
+ [ -f "$1" ] || return
+ [ $(le "$1" 0 2) -eq 23117 ] || return # MZ magic
+ peoffs=$(le "$1" 60 4) # PE header offset
+ [ $peoffs -ge 64 ] || return
+ [ $(le "$1" $peoffs 4) -eq 17744 ] || return # PE magic
+ case $(le "$1" $((peoffs+4+20)) 2) in # PE type
+ 267) ;; # PE32
+ 523) ;; # PE32+
+ *) return 1 ;; # Invalid
+ esac
+ [ $(le "$1" $((peoffs+4+20+68)) 2) -eq 10 ] || return # EFI app
+ case $(le "$1" $((peoffs+4)) 2) in # Machine type
+ 332) echo i386 ;;
+ 450) echo arm ;;
+ 512) echo ia64 ;;
+ 20530) echo riscv32 ;;
+ 20580) echo riscv64 ;;
+ 20776) echo riscv128 ;;
+ 34404) echo x64 ;;
+ 43620) echo aa64 ;;
+ esac
}
-genfdimage288() {
- verify "$MTOOLSRC"
- dd if=/dev/zero of=$FIMAGE bs=1024 count=2880 2> /dev/null
- mformat w:
- syslinux $FIMAGE
- echo "$KCMDLINE" | mcopy - W:syslinux.cfg
- if [ -f "$FDINITRD" ] ; then
- mcopy "$FDINITRD" w:initrd.img
- fi
- mcopy $FBZIMAGE w:linux
+# Get the combined sizes in bytes of the files given, counting sparse
+# files as full length, and padding each file to a 4K block size
+filesizes() {
+ local t=0
+ local s
+ for s in $(ls -lnL "$@" 2>/dev/null | awk '/^-/{ print $5; }'); do
+ t=$((t + ((s+4095)/4096)*4096))
+ done
+ echo $t
}
-geniso() {
- tmp_dir=`dirname $FIMAGE`/isoimage
- rm -rf $tmp_dir
- mkdir $tmp_dir
- for i in lib lib64 share ; do
- for j in syslinux ISOLINUX ; do
- if [ -f /usr/$i/$j/isolinux.bin ] ; then
- isolinux=/usr/$i/$j/isolinux.bin
- fi
+# Expand directory names which should be in /usr/share into a list
+# of possible alternatives
+sharedirs() {
+ local dir file
+ for dir in /usr/share /usr/lib64 /usr/lib; do
+ for file; do
+ echo "$dir/$file"
+ echo "$dir/${file^^}"
done
- for j in syslinux syslinux/modules/bios ; do
- if [ -f /usr/$i/$j/ldlinux.c32 ]; then
- ldlinux=/usr/$i/$j/ldlinux.c32
- fi
+ done
+}
+efidirs() {
+ local dir file
+ for dir in /usr/share /boot /usr/lib64 /usr/lib; do
+ for file; do
+ echo "$dir/$file"
+ echo "$dir/${file^^}"
done
- if [ -n "$isolinux" -a -n "$ldlinux" ] ; then
- break
+ done
+}
+
+findsyslinux() {
+ local f="$(find -L $(sharedirs syslinux isolinux) \
+ -name "$1" -readable -type f -print -quit 2>/dev/null)"
+ if [ ! -f "$f" ]; then
+ die "Need a $1 file, please install syslinux/isolinux."
+ fi
+ echo "$f"
+ return 0
+}
+
+findovmf() {
+ local arch="$1"
+ shift
+ local -a names=(-false)
+ local name f
+ for name; do
+ names=("${names[@]}" -or -iname "$name")
+ done
+ for f in $(find -L $(efidirs edk2 ovmf) \
+ \( "${names[@]}" \) -readable -type f \
+ -print 2>/dev/null); do
+ if [ "$(efiarch "$f")" = "$arch" ]; then
+ echo "$f"
+ return 0
fi
done
- if [ -z "$isolinux" ] ; then
- echo 'Need an isolinux.bin file, please install syslinux/isolinux.'
- exit 1
+ die "Need a $1 file for $arch, please install EDK2/OVMF."
+}
+
+do_mcopy() {
+ if [ ${#FDINITRDS[@]} -gt 0 ]; then
+ mcopy "${FDINITRDS[@]}" "$1"
+ fi
+ if [ -n "$efishell" ]; then
+ mmd "$1"EFI "$1"EFI/Boot
+ mcopy "$efishell" "$1"EFI/Boot/boot${kefiarch}.efi
fi
- if [ -z "$ldlinux" ] ; then
- echo 'Need an ldlinux.c32 file, please install syslinux/isolinux.'
- exit 1
+ if [ -n "$kefiarch" ]; then
+ echo linux "$KCMDLINE$initrdopts_efi" | \
+ mcopy - "$1"startup.nsh
fi
- cp $isolinux $tmp_dir
- cp $ldlinux $tmp_dir
- cp $FBZIMAGE $tmp_dir/linux
- echo "$KCMDLINE" > $tmp_dir/isolinux.cfg
- if [ -f "$FDINITRD" ] ; then
- cp "$FDINITRD" $tmp_dir/initrd.img
+ echo default linux "$KCMDLINE$initrdopts_syslinux" | \
+ mcopy - "$1"syslinux.cfg
+ mcopy "$FBZIMAGE" "$1"linux
+}
+
+genbzdisk() {
+ verify "$MTOOLSRC"
+ mformat -v 'LINUX_BOOT' a:
+ syslinux "$FIMAGE"
+ do_mcopy a:
+}
+
+genfdimage144() {
+ verify "$MTOOLSRC"
+ $dd if=/dev/zero of="$FIMAGE" bs=1024 count=1440 2>/dev/null
+ mformat -v 'LINUX_BOOT' v:
+ syslinux "$FIMAGE"
+ do_mcopy v:
+}
+
+genfdimage288() {
+ verify "$MTOOLSRC"
+ $dd if=/dev/zero of="$FIMAGE" bs=1024 count=2880 2>/dev/null
+ mformat -v 'LINUX_BOOT' w:
+ syslinux "$FIMAGE"
+ do_mcopy w:
+}
+
+genhdimage() {
+ verify "$MTOOLSRC"
+ mbr="$(findsyslinux mbr.bin)"
+ kefiarch="$(efiarch "$FBZIMAGE")"
+ if [ -n "$kefiarch" ]; then
+ # The efishell provides command line handling
+ efishell="$(findovmf $kefiarch shell.efi shell${kefiarch}.efi)"
+ ptype='-T 0xef' # EFI system partition, no GPT
fi
- genisoimage -J -r -input-charset=utf-8 -quiet -o $FIMAGE \
- -b isolinux.bin -c boot.cat -no-emul-boot -boot-load-size 4 \
- -boot-info-table $tmp_dir
- isohybrid $FIMAGE 2>/dev/null || true
- rm -rf $tmp_dir
+ sizes=$(filesizes "$FBZIMAGE" "${FDINITRDS[@]}" "$efishell")
+ # Allow 1% + 1 MiB for filesystem and partition table overhead,
+ # syslinux, and config files
+ megs=$(((sizes + sizes/100 + 2*1024*1024 - 1)/(1024*1024)))
+ $dd if=/dev/zero of="$FIMAGE" bs=$((1024*1024)) count=$megs 2>/dev/null
+ mpartition -I -c -s 32 -h 64 -t $megs $ptype -b 512 -a h:
+ $dd if="$mbr" of="$FIMAGE" bs=440 count=1 conv=notrunc 2>/dev/null
+ mformat -v 'LINUX_BOOT' -s 32 -h 64 -t $megs h:
+ syslinux --offset $((512*512)) "$FIMAGE"
+ do_mcopy h:
+}
+
+geniso() {
+ tmp_dir="$(dirname "$FIMAGE")/isoimage"
+ rm -rf "$tmp_dir"
+ mkdir "$tmp_dir"
+ isolinux=$(findsyslinux isolinux.bin)
+ ldlinux=$(findsyslinux ldlinux.c32)
+ cp "$isolinux" "$ldlinux" "$tmp_dir"
+ cp "$FBZIMAGE" "$tmp_dir"/linux
+ echo default linux "$KCMDLINE" > "$tmp_dir"/isolinux.cfg
+ cp "${FDINITRDS[@]}" "$tmp_dir"/
+ genisoimage -J -r -appid 'LINUX_BOOT' -input-charset=utf-8 \
+ -quiet -o "$FIMAGE" -b isolinux.bin \
+ -c boot.cat -no-emul-boot -boot-load-size 4 \
+ -boot-info-table "$tmp_dir"
+ isohybrid "$FIMAGE" 2>/dev/null || true
+ rm -rf "$tmp_dir"
}
-case $1 in
+rm -f "$FIMAGE"
+
+case "$diskfmt" in
bzdisk) genbzdisk;;
fdimage144) genfdimage144;;
fdimage288) genfdimage288;;
+ hdimage) genhdimage;;
isoimage) geniso;;
- *) echo 'Unknown image format'; exit 1;
+ *) die "Unknown image format: $diskfmt";;
esac
diff --git a/arch/x86/boot/mtools.conf.in b/arch/x86/boot/mtools.conf.in
index efd6d2490c1d..9e2662d01364 100644
--- a/arch/x86/boot/mtools.conf.in
+++ b/arch/x86/boot/mtools.conf.in
@@ -14,4 +14,7 @@ drive v:
drive w:
file="@OBJ@/fdimage" cylinders=80 heads=2 sectors=36 filter
+# Hard disk
+drive h:
+ file="@OBJ@/hdimage" partition=1 mformat_only
diff --git a/arch/x86/crypto/curve25519-x86_64.c b/arch/x86/crypto/curve25519-x86_64.c
index 6706b6cb1d0f..38caf61cd5b7 100644
--- a/arch/x86/crypto/curve25519-x86_64.c
+++ b/arch/x86/crypto/curve25519-x86_64.c
@@ -1500,7 +1500,7 @@ static int __init curve25519_mod_init(void)
static void __exit curve25519_mod_exit(void)
{
if (IS_REACHABLE(CONFIG_CRYPTO_KPP) &&
- (boot_cpu_has(X86_FEATURE_BMI2) || boot_cpu_has(X86_FEATURE_ADX)))
+ static_branch_likely(&curve25519_use_bmi2_adx))
crypto_unregister_kpp(&curve25519_alg);
}
diff --git a/arch/x86/entry/Makefile b/arch/x86/entry/Makefile
index 08bf95dbc911..7fec5dcf6438 100644
--- a/arch/x86/entry/Makefile
+++ b/arch/x86/entry/Makefile
@@ -8,18 +8,8 @@ UBSAN_SANITIZE := n
KCOV_INSTRUMENT := n
CFLAGS_REMOVE_common.o = $(CC_FLAGS_FTRACE)
-CFLAGS_REMOVE_syscall_64.o = $(CC_FLAGS_FTRACE)
-CFLAGS_REMOVE_syscall_32.o = $(CC_FLAGS_FTRACE)
-CFLAGS_REMOVE_syscall_x32.o = $(CC_FLAGS_FTRACE)
CFLAGS_common.o += -fno-stack-protector
-CFLAGS_syscall_64.o += -fno-stack-protector
-CFLAGS_syscall_32.o += -fno-stack-protector
-CFLAGS_syscall_x32.o += -fno-stack-protector
-
-CFLAGS_syscall_64.o += $(call cc-option,-Wno-override-init,)
-CFLAGS_syscall_32.o += $(call cc-option,-Wno-override-init,)
-CFLAGS_syscall_x32.o += $(call cc-option,-Wno-override-init,)
obj-y := entry_$(BITS).o thunk_$(BITS).o syscall_$(BITS).o
obj-y += common.o
diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h
index 07a9331d55e7..a4c061fb7c6e 100644
--- a/arch/x86/entry/calling.h
+++ b/arch/x86/entry/calling.h
@@ -6,6 +6,7 @@
#include <asm/percpu.h>
#include <asm/asm-offsets.h>
#include <asm/processor-flags.h>
+#include <asm/ptrace-abi.h>
/*
@@ -62,42 +63,7 @@ For 32-bit we have the following conventions - kernel is built with
* for assembly code:
*/
-/* The layout forms the "struct pt_regs" on the stack: */
-/*
- * C ABI says these regs are callee-preserved. They aren't saved on kernel entry
- * unless syscall needs a complete, fully filled "struct pt_regs".
- */
-#define R15 0*8
-#define R14 1*8
-#define R13 2*8
-#define R12 3*8
-#define RBP 4*8
-#define RBX 5*8
-/* These regs are callee-clobbered. Always saved on kernel entry. */
-#define R11 6*8
-#define R10 7*8
-#define R9 8*8
-#define R8 9*8
-#define RAX 10*8
-#define RCX 11*8
-#define RDX 12*8
-#define RSI 13*8
-#define RDI 14*8
-/*
- * On syscall entry, this is syscall#. On CPU exception, this is error code.
- * On hw interrupt, it's IRQ number:
- */
-#define ORIG_RAX 15*8
-/* Return frame for iretq */
-#define RIP 16*8
-#define CS 17*8
-#define EFLAGS 18*8
-#define RSP 19*8
-#define SS 20*8
-
-#define SIZEOF_PTREGS 21*8
-
-.macro PUSH_AND_CLEAR_REGS rdx=%rdx rax=%rax save_ret=0
+.macro PUSH_REGS rdx=%rdx rax=%rax save_ret=0
.if \save_ret
pushq %rsi /* pt_regs->si */
movq 8(%rsp), %rsi /* temporarily store the return address in %rsi */
@@ -124,7 +90,9 @@ For 32-bit we have the following conventions - kernel is built with
.if \save_ret
pushq %rsi /* return address on top of stack */
.endif
+.endm
+.macro CLEAR_REGS
/*
* Sanitize registers of values that a speculation attack might
* otherwise want to exploit. The lower registers are likely clobbered
@@ -146,6 +114,11 @@ For 32-bit we have the following conventions - kernel is built with
.endm
+.macro PUSH_AND_CLEAR_REGS rdx=%rdx rax=%rax save_ret=0
+ PUSH_REGS rdx=\rdx, rax=\rax, save_ret=\save_ret
+ CLEAR_REGS
+.endm
+
.macro POP_REGS pop_rdi=1 skip_r11rcx=0
popq %r15
popq %r14
diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c
index 7b2542b13ebd..6c2826417b33 100644
--- a/arch/x86/entry/common.c
+++ b/arch/x86/entry/common.c
@@ -36,61 +36,97 @@
#include <asm/irq_stack.h>
#ifdef CONFIG_X86_64
-__visible noinstr void do_syscall_64(unsigned long nr, struct pt_regs *regs)
+
+static __always_inline bool do_syscall_x64(struct pt_regs *regs, int nr)
+{
+ /*
+ * Convert negative numbers to very high and thus out of range
+ * numbers for comparisons.
+ */
+ unsigned int unr = nr;
+
+ if (likely(unr < NR_syscalls)) {
+ unr = array_index_nospec(unr, NR_syscalls);
+ regs->ax = sys_call_table[unr](regs);
+ return true;
+ }
+ return false;
+}
+
+static __always_inline bool do_syscall_x32(struct pt_regs *regs, int nr)
+{
+ /*
+ * Adjust the starting offset of the table, and convert numbers
+ * < __X32_SYSCALL_BIT to very high and thus out of range
+ * numbers for comparisons.
+ */
+ unsigned int xnr = nr - __X32_SYSCALL_BIT;
+
+ if (IS_ENABLED(CONFIG_X86_X32_ABI) && likely(xnr < X32_NR_syscalls)) {
+ xnr = array_index_nospec(xnr, X32_NR_syscalls);
+ regs->ax = x32_sys_call_table[xnr](regs);
+ return true;
+ }
+ return false;
+}
+
+__visible noinstr void do_syscall_64(struct pt_regs *regs, int nr)
{
add_random_kstack_offset();
nr = syscall_enter_from_user_mode(regs, nr);
instrumentation_begin();
- if (likely(nr < NR_syscalls)) {
- nr = array_index_nospec(nr, NR_syscalls);
- regs->ax = sys_call_table[nr](regs);
-#ifdef CONFIG_X86_X32_ABI
- } else if (likely((nr & __X32_SYSCALL_BIT) &&
- (nr & ~__X32_SYSCALL_BIT) < X32_NR_syscalls)) {
- nr = array_index_nospec(nr & ~__X32_SYSCALL_BIT,
- X32_NR_syscalls);
- regs->ax = x32_sys_call_table[nr](regs);
-#endif
+
+ if (!do_syscall_x64(regs, nr) && !do_syscall_x32(regs, nr) && nr != -1) {
+ /* Invalid system call, but still a system call. */
+ regs->ax = __x64_sys_ni_syscall(regs);
}
+
instrumentation_end();
syscall_exit_to_user_mode(regs);
}
#endif
#if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
-static __always_inline unsigned int syscall_32_enter(struct pt_regs *regs)
+static __always_inline int syscall_32_enter(struct pt_regs *regs)
{
if (IS_ENABLED(CONFIG_IA32_EMULATION))
current_thread_info()->status |= TS_COMPAT;
- return (unsigned int)regs->orig_ax;
+ return (int)regs->orig_ax;
}
/*
* Invoke a 32-bit syscall. Called with IRQs on in CONTEXT_KERNEL.
*/
-static __always_inline void do_syscall_32_irqs_on(struct pt_regs *regs,
- unsigned int nr)
+static __always_inline void do_syscall_32_irqs_on(struct pt_regs *regs, int nr)
{
- if (likely(nr < IA32_NR_syscalls)) {
- nr = array_index_nospec(nr, IA32_NR_syscalls);
- regs->ax = ia32_sys_call_table[nr](regs);
+ /*
+ * Convert negative numbers to very high and thus out of range
+ * numbers for comparisons.
+ */
+ unsigned int unr = nr;
+
+ if (likely(unr < IA32_NR_syscalls)) {
+ unr = array_index_nospec(unr, IA32_NR_syscalls);
+ regs->ax = ia32_sys_call_table[unr](regs);
+ } else if (nr != -1) {
+ regs->ax = __ia32_sys_ni_syscall(regs);
}
}
/* Handles int $0x80 */
__visible noinstr void do_int80_syscall_32(struct pt_regs *regs)
{
- unsigned int nr = syscall_32_enter(regs);
+ int nr = syscall_32_enter(regs);
add_random_kstack_offset();
/*
- * Subtlety here: if ptrace pokes something larger than 2^32-1 into
- * orig_ax, the unsigned int return value truncates it. This may
- * or may not be necessary, but it matches the old asm behavior.
+ * Subtlety here: if ptrace pokes something larger than 2^31-1 into
+ * orig_ax, the int return value truncates it. This matches
+ * the semantics of syscall_get_nr().
*/
- nr = (unsigned int)syscall_enter_from_user_mode(regs, nr);
+ nr = syscall_enter_from_user_mode(regs, nr);
instrumentation_begin();
do_syscall_32_irqs_on(regs, nr);
@@ -101,7 +137,7 @@ __visible noinstr void do_int80_syscall_32(struct pt_regs *regs)
static noinstr bool __do_fast_syscall_32(struct pt_regs *regs)
{
- unsigned int nr = syscall_32_enter(regs);
+ int nr = syscall_32_enter(regs);
int res;
add_random_kstack_offset();
@@ -130,14 +166,13 @@ static noinstr bool __do_fast_syscall_32(struct pt_regs *regs)
/* User code screwed up. */
regs->ax = -EFAULT;
- instrumentation_end();
local_irq_disable();
+ instrumentation_end();
irqentry_exit_to_user_mode(regs);
return false;
}
- /* The case truncates any ptrace induced syscall nr > 2^32 -1 */
- nr = (unsigned int)syscall_enter_from_user_mode_work(regs, nr);
+ nr = syscall_enter_from_user_mode_work(regs, nr);
/* Now this is just like a normal syscall. */
do_syscall_32_irqs_on(regs, nr);
@@ -269,15 +304,16 @@ __visible noinstr void xen_pv_evtchn_do_upcall(struct pt_regs *regs)
irqentry_state_t state = irqentry_enter(regs);
bool inhcall;
+ instrumentation_begin();
run_sysvec_on_irqstack_cond(__xen_pv_evtchn_do_upcall, regs);
inhcall = get_and_clear_inhcall();
if (inhcall && !WARN_ON_ONCE(state.exit_rcu)) {
- instrumentation_begin();
irqentry_exit_cond_resched();
instrumentation_end();
restore_inhcall(inhcall);
} else {
+ instrumentation_end();
irqentry_exit(regs, state);
}
}
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index a16a5294d55f..e38a4cf795d9 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -107,8 +107,9 @@ SYM_INNER_LABEL(entry_SYSCALL_64_after_hwframe, SYM_L_GLOBAL)
PUSH_AND_CLEAR_REGS rax=$-ENOSYS
/* IRQs are off. */
- movq %rax, %rdi
- movq %rsp, %rsi
+ movq %rsp, %rdi
+ /* Sign extend the lower 32bit as syscall numbers are treated as int */
+ movslq %eax, %rsi
call do_syscall_64 /* returns with IRQs disabled */
/*
@@ -506,7 +507,7 @@ SYM_CODE_START(\asmsym)
movq %rsp, %rdi /* pt_regs pointer */
- call \cfunc
+ call kernel_\cfunc
/*
* No need to switch back to the IST stack. The current stack is either
@@ -517,7 +518,7 @@ SYM_CODE_START(\asmsym)
/* Switch to the regular task stack */
.Lfrom_usermode_switch_stack_\@:
- idtentry_body safe_stack_\cfunc, has_error_code=1
+ idtentry_body user_\cfunc, has_error_code=1
_ASM_NOKPROBE(\asmsym)
SYM_CODE_END(\asmsym)
diff --git a/arch/x86/entry/syscall_32.c b/arch/x86/entry/syscall_32.c
index 86eb0d89d46f..8cfc9bc73e7f 100644
--- a/arch/x86/entry/syscall_32.c
+++ b/arch/x86/entry/syscall_32.c
@@ -5,21 +5,21 @@
#include <linux/sys.h>
#include <linux/cache.h>
#include <linux/syscalls.h>
-#include <asm/unistd.h>
#include <asm/syscall.h>
-#define __SYSCALL_I386(nr, sym) extern long __ia32_##sym(const struct pt_regs *);
+#ifdef CONFIG_IA32_EMULATION
+#define __SYSCALL_WITH_COMPAT(nr, native, compat) __SYSCALL(nr, compat)
+#else
+#define __SYSCALL_WITH_COMPAT(nr, native, compat) __SYSCALL(nr, native)
+#endif
+
+#define __SYSCALL(nr, sym) extern long __ia32_##sym(const struct pt_regs *);
#include <asm/syscalls_32.h>
-#undef __SYSCALL_I386
+#undef __SYSCALL
-#define __SYSCALL_I386(nr, sym) [nr] = __ia32_##sym,
+#define __SYSCALL(nr, sym) __ia32_##sym,
-__visible const sys_call_ptr_t ia32_sys_call_table[__NR_ia32_syscall_max+1] = {
- /*
- * Smells like a compiler bug -- it doesn't work
- * when the & below is removed.
- */
- [0 ... __NR_ia32_syscall_max] = &__ia32_sys_ni_syscall,
+__visible const sys_call_ptr_t ia32_sys_call_table[] = {
#include <asm/syscalls_32.h>
};
diff --git a/arch/x86/entry/syscall_64.c b/arch/x86/entry/syscall_64.c
index 1594ec72bcbb..be120eec1fc9 100644
--- a/arch/x86/entry/syscall_64.c
+++ b/arch/x86/entry/syscall_64.c
@@ -5,23 +5,14 @@
#include <linux/sys.h>
#include <linux/cache.h>
#include <linux/syscalls.h>
-#include <asm/unistd.h>
#include <asm/syscall.h>
-#define __SYSCALL_X32(nr, sym)
-#define __SYSCALL_COMMON(nr, sym) __SYSCALL_64(nr, sym)
-
-#define __SYSCALL_64(nr, sym) extern long __x64_##sym(const struct pt_regs *);
+#define __SYSCALL(nr, sym) extern long __x64_##sym(const struct pt_regs *);
#include <asm/syscalls_64.h>
-#undef __SYSCALL_64
+#undef __SYSCALL
-#define __SYSCALL_64(nr, sym) [nr] = __x64_##sym,
+#define __SYSCALL(nr, sym) __x64_##sym,
-asmlinkage const sys_call_ptr_t sys_call_table[__NR_syscall_max+1] = {
- /*
- * Smells like a compiler bug -- it doesn't work
- * when the & below is removed.
- */
- [0 ... __NR_syscall_max] = &__x64_sys_ni_syscall,
+asmlinkage const sys_call_ptr_t sys_call_table[] = {
#include <asm/syscalls_64.h>
};
diff --git a/arch/x86/entry/syscall_x32.c b/arch/x86/entry/syscall_x32.c
index f2fe0a33bcfd..bdd0e03a1265 100644
--- a/arch/x86/entry/syscall_x32.c
+++ b/arch/x86/entry/syscall_x32.c
@@ -5,37 +5,14 @@
#include <linux/sys.h>
#include <linux/cache.h>
#include <linux/syscalls.h>
-#include <asm/unistd.h>
#include <asm/syscall.h>
-/*
- * Reuse the 64-bit entry points for the x32 versions that occupy different
- * slots in the syscall table.
- */
-#define __x32_sys_readv __x64_sys_readv
-#define __x32_sys_writev __x64_sys_writev
-#define __x32_sys_getsockopt __x64_sys_getsockopt
-#define __x32_sys_setsockopt __x64_sys_setsockopt
-#define __x32_sys_vmsplice __x64_sys_vmsplice
-#define __x32_sys_process_vm_readv __x64_sys_process_vm_readv
-#define __x32_sys_process_vm_writev __x64_sys_process_vm_writev
+#define __SYSCALL(nr, sym) extern long __x64_##sym(const struct pt_regs *);
+#include <asm/syscalls_x32.h>
+#undef __SYSCALL
-#define __SYSCALL_64(nr, sym)
+#define __SYSCALL(nr, sym) __x64_##sym,
-#define __SYSCALL_X32(nr, sym) extern long __x32_##sym(const struct pt_regs *);
-#define __SYSCALL_COMMON(nr, sym) extern long __x64_##sym(const struct pt_regs *);
-#include <asm/syscalls_64.h>
-#undef __SYSCALL_X32
-#undef __SYSCALL_COMMON
-
-#define __SYSCALL_X32(nr, sym) [nr] = __x32_##sym,
-#define __SYSCALL_COMMON(nr, sym) [nr] = __x64_##sym,
-
-asmlinkage const sys_call_ptr_t x32_sys_call_table[__NR_x32_syscall_max+1] = {
- /*
- * Smells like a compiler bug -- it doesn't work
- * when the & below is removed.
- */
- [0 ... __NR_x32_syscall_max] = &__x64_sys_ni_syscall,
-#include <asm/syscalls_64.h>
+asmlinkage const sys_call_ptr_t x32_sys_call_table[] = {
+#include <asm/syscalls_x32.h>
};
diff --git a/arch/x86/entry/syscalls/Makefile b/arch/x86/entry/syscalls/Makefile
index d8c4f6c9eadc..5b3efed0e4e8 100644
--- a/arch/x86/entry/syscalls/Makefile
+++ b/arch/x86/entry/syscalls/Makefile
@@ -9,47 +9,54 @@ _dummy := $(shell [ -d '$(out)' ] || mkdir -p '$(out)') \
syscall32 := $(src)/syscall_32.tbl
syscall64 := $(src)/syscall_64.tbl
-syshdr := $(srctree)/$(src)/syscallhdr.sh
-systbl := $(srctree)/$(src)/syscalltbl.sh
+syshdr := $(srctree)/scripts/syscallhdr.sh
+systbl := $(srctree)/scripts/syscalltbl.sh
+offset :=
+prefix :=
quiet_cmd_syshdr = SYSHDR $@
- cmd_syshdr = $(CONFIG_SHELL) '$(syshdr)' '$<' '$@' \
- '$(syshdr_abi_$(basetarget))' \
- '$(syshdr_pfx_$(basetarget))' \
- '$(syshdr_offset_$(basetarget))'
+ cmd_syshdr = $(CONFIG_SHELL) $(syshdr) --abis $(abis) --emit-nr \
+ $(if $(offset),--offset $(offset)) \
+ $(if $(prefix),--prefix $(prefix)) \
+ $< $@
quiet_cmd_systbl = SYSTBL $@
- cmd_systbl = $(CONFIG_SHELL) '$(systbl)' $< $@
+ cmd_systbl = $(CONFIG_SHELL) $(systbl) --abis $(abis) $< $@
quiet_cmd_hypercalls = HYPERCALLS $@
cmd_hypercalls = $(CONFIG_SHELL) '$<' $@ $(filter-out $<, $(real-prereqs))
-syshdr_abi_unistd_32 := i386
+$(uapi)/unistd_32.h: abis := i386
$(uapi)/unistd_32.h: $(syscall32) $(syshdr) FORCE
$(call if_changed,syshdr)
-syshdr_abi_unistd_32_ia32 := i386
-syshdr_pfx_unistd_32_ia32 := ia32_
+$(out)/unistd_32_ia32.h: abis := i386
+$(out)/unistd_32_ia32.h: prefix := ia32_
$(out)/unistd_32_ia32.h: $(syscall32) $(syshdr) FORCE
$(call if_changed,syshdr)
-syshdr_abi_unistd_x32 := common,x32
-syshdr_offset_unistd_x32 := __X32_SYSCALL_BIT
+$(uapi)/unistd_x32.h: abis := common,x32
+$(uapi)/unistd_x32.h: offset := __X32_SYSCALL_BIT
$(uapi)/unistd_x32.h: $(syscall64) $(syshdr) FORCE
$(call if_changed,syshdr)
-syshdr_abi_unistd_64 := common,64
+$(uapi)/unistd_64.h: abis := common,64
$(uapi)/unistd_64.h: $(syscall64) $(syshdr) FORCE
$(call if_changed,syshdr)
-syshdr_abi_unistd_64_x32 := x32
-syshdr_pfx_unistd_64_x32 := x32_
+$(out)/unistd_64_x32.h: abis := x32
+$(out)/unistd_64_x32.h: prefix := x32_
$(out)/unistd_64_x32.h: $(syscall64) $(syshdr) FORCE
$(call if_changed,syshdr)
+$(out)/syscalls_32.h: abis := i386
$(out)/syscalls_32.h: $(syscall32) $(systbl) FORCE
$(call if_changed,systbl)
+$(out)/syscalls_64.h: abis := common,64
$(out)/syscalls_64.h: $(syscall64) $(systbl) FORCE
$(call if_changed,systbl)
+$(out)/syscalls_x32.h: abis := common,x32
+$(out)/syscalls_x32.h: $(syscall64) $(systbl) FORCE
+ $(call if_changed,systbl)
$(out)/xen-hypercalls.h: $(srctree)/scripts/xen-hypercalls.sh FORCE
$(call if_changed,hypercalls)
@@ -60,6 +67,7 @@ uapisyshdr-y += unistd_32.h unistd_64.h unistd_x32.h
syshdr-y += syscalls_32.h
syshdr-$(CONFIG_X86_64) += unistd_32_ia32.h unistd_64_x32.h
syshdr-$(CONFIG_X86_64) += syscalls_64.h
+syshdr-$(CONFIG_X86_X32) += syscalls_x32.h
syshdr-$(CONFIG_XEN) += xen-hypercalls.h
uapisyshdr-y := $(addprefix $(uapi)/, $(uapisyshdr-y))
diff --git a/arch/x86/entry/syscalls/syscall_32.tbl b/arch/x86/entry/syscalls/syscall_32.tbl
index 28a1423ce32e..fba2f615119a 100644
--- a/arch/x86/entry/syscalls/syscall_32.tbl
+++ b/arch/x86/entry/syscalls/syscall_32.tbl
@@ -447,7 +447,7 @@
440 i386 process_madvise sys_process_madvise
441 i386 epoll_pwait2 sys_epoll_pwait2 compat_sys_epoll_pwait2
442 i386 mount_setattr sys_mount_setattr
-443 i386 quotactl_path sys_quotactl_path
+443 i386 quotactl_fd sys_quotactl_fd
444 i386 landlock_create_ruleset sys_landlock_create_ruleset
445 i386 landlock_add_rule sys_landlock_add_rule
446 i386 landlock_restrict_self sys_landlock_restrict_self
diff --git a/arch/x86/entry/syscalls/syscall_64.tbl b/arch/x86/entry/syscalls/syscall_64.tbl
index ecd551b08d05..af973e400053 100644
--- a/arch/x86/entry/syscalls/syscall_64.tbl
+++ b/arch/x86/entry/syscalls/syscall_64.tbl
@@ -364,7 +364,7 @@
440 common process_madvise sys_process_madvise
441 common epoll_pwait2 sys_epoll_pwait2
442 common mount_setattr sys_mount_setattr
-443 common quotactl_path sys_quotactl_path
+443 common quotactl_fd sys_quotactl_fd
444 common landlock_create_ruleset sys_landlock_create_ruleset
445 common landlock_add_rule sys_landlock_add_rule
446 common landlock_restrict_self sys_landlock_restrict_self
diff --git a/arch/x86/entry/syscalls/syscallhdr.sh b/arch/x86/entry/syscalls/syscallhdr.sh
deleted file mode 100644
index cc1e63857427..000000000000
--- a/arch/x86/entry/syscalls/syscallhdr.sh
+++ /dev/null
@@ -1,35 +0,0 @@
-#!/bin/sh
-# SPDX-License-Identifier: GPL-2.0
-
-in="$1"
-out="$2"
-my_abis=`echo "($3)" | tr ',' '|'`
-prefix="$4"
-offset="$5"
-
-fileguard=_ASM_X86_`basename "$out" | sed \
- -e 'y/abcdefghijklmnopqrstuvwxyz/ABCDEFGHIJKLMNOPQRSTUVWXYZ/' \
- -e 's/[^A-Z0-9_]/_/g' -e 's/__/_/g'`
-grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | (
- echo "#ifndef ${fileguard}"
- echo "#define ${fileguard} 1"
- echo ""
-
- max=0
- while read nr abi name entry ; do
- if [ -z "$offset" ]; then
- echo "#define __NR_${prefix}${name} $nr"
- else
- echo "#define __NR_${prefix}${name} ($offset + $nr)"
- fi
-
- max=$nr
- done
-
- echo ""
- echo "#ifdef __KERNEL__"
- echo "#define __NR_${prefix}syscall_max $max"
- echo "#endif"
- echo ""
- echo "#endif /* ${fileguard} */"
-) > "$out"
diff --git a/arch/x86/entry/syscalls/syscalltbl.sh b/arch/x86/entry/syscalls/syscalltbl.sh
deleted file mode 100644
index 929bde120d6b..000000000000
--- a/arch/x86/entry/syscalls/syscalltbl.sh
+++ /dev/null
@@ -1,46 +0,0 @@
-#!/bin/bash
-# SPDX-License-Identifier: GPL-2.0
-
-in="$1"
-out="$2"
-
-syscall_macro() {
- local abi="$1"
- local nr="$2"
- local entry="$3"
-
- echo "__SYSCALL_${abi}($nr, $entry)"
-}
-
-emit() {
- local abi="$1"
- local nr="$2"
- local entry="$3"
- local compat="$4"
-
- if [ "$abi" != "I386" -a -n "$compat" ]; then
- echo "a compat entry ($abi: $compat) for a 64-bit syscall makes no sense" >&2
- exit 1
- fi
-
- if [ -z "$compat" ]; then
- if [ -n "$entry" ]; then
- syscall_macro "$abi" "$nr" "$entry"
- fi
- else
- echo "#ifdef CONFIG_X86_32"
- if [ -n "$entry" ]; then
- syscall_macro "$abi" "$nr" "$entry"
- fi
- echo "#else"
- syscall_macro "$abi" "$nr" "$compat"
- echo "#endif"
- fi
-}
-
-grep '^[0-9]' "$in" | sort -n | (
- while read nr abi name entry compat; do
- abi=`echo "$abi" | tr '[a-z]' '[A-Z]'`
- emit "$abi" "$nr" "$entry" "$compat"
- done
-) > "$out"
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index 8e509325c2c3..1eb45139fcc6 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -396,10 +396,12 @@ int x86_reserve_hardware(void)
if (!atomic_inc_not_zero(&pmc_refcount)) {
mutex_lock(&pmc_reserve_mutex);
if (atomic_read(&pmc_refcount) == 0) {
- if (!reserve_pmc_hardware())
+ if (!reserve_pmc_hardware()) {
err = -EBUSY;
- else
+ } else {
reserve_ds_buffers();
+ reserve_lbr_buffers();
+ }
}
if (!err)
atomic_inc(&pmc_refcount);
@@ -1624,6 +1626,8 @@ static void x86_pmu_del(struct perf_event *event, int flags)
if (cpuc->txn_flags & PERF_PMU_TXN_ADD)
goto do_del;
+ __set_bit(event->hw.idx, cpuc->dirty);
+
/*
* Not a TXN, therefore cleanup properly.
*/
@@ -2472,6 +2476,31 @@ static int x86_pmu_event_init(struct perf_event *event)
return err;
}
+void perf_clear_dirty_counters(void)
+{
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+ int i;
+
+ /* Don't need to clear the assigned counter. */
+ for (i = 0; i < cpuc->n_events; i++)
+ __clear_bit(cpuc->assign[i], cpuc->dirty);
+
+ if (bitmap_empty(cpuc->dirty, X86_PMC_IDX_MAX))
+ return;
+
+ for_each_set_bit(i, cpuc->dirty, X86_PMC_IDX_MAX) {
+ /* Metrics and fake events don't have corresponding HW counters. */
+ if (is_metric_idx(i) || (i == INTEL_PMC_IDX_FIXED_VLBR))
+ continue;
+ else if (i >= INTEL_PMC_IDX_FIXED)
+ wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + (i - INTEL_PMC_IDX_FIXED), 0);
+ else
+ wrmsrl(x86_pmu_event_addr(i), 0);
+ }
+
+ bitmap_zero(cpuc->dirty, X86_PMC_IDX_MAX);
+}
+
static void x86_pmu_event_mapped(struct perf_event *event, struct mm_struct *mm)
{
if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED))
@@ -2495,7 +2524,6 @@ static void x86_pmu_event_mapped(struct perf_event *event, struct mm_struct *mm)
static void x86_pmu_event_unmapped(struct perf_event *event, struct mm_struct *mm)
{
-
if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED))
return;
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index 2521d03de5e0..fca7a6e2242f 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -280,6 +280,8 @@ static struct extra_reg intel_spr_extra_regs[] __read_mostly = {
INTEL_UEVENT_EXTRA_REG(0x012b, MSR_OFFCORE_RSP_1, 0x3fffffffffull, RSP_1),
INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
INTEL_UEVENT_EXTRA_REG(0x01c6, MSR_PEBS_FRONTEND, 0x7fff17, FE),
+ INTEL_UEVENT_EXTRA_REG(0x40ad, MSR_PEBS_FRONTEND, 0x7, FE),
+ INTEL_UEVENT_EXTRA_REG(0x04c2, MSR_PEBS_FRONTEND, 0x8, FE),
EVENT_EXTRA_END
};
@@ -4030,8 +4032,10 @@ spr_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
* The :ppp indicates the Precise Distribution (PDist) facility, which
* is only supported on the GP counter 0. If a :ppp event which is not
* available on the GP counter 0, error out.
+ * Exception: Instruction PDIR is only available on the fixed counter 0.
*/
- if (event->attr.precise_ip == 3) {
+ if ((event->attr.precise_ip == 3) &&
+ !constraint_match(&fixed0_constraint, event->hw.config)) {
if (c->idxmsk64 & BIT_ULL(0))
return &counter0_constraint;
@@ -6015,7 +6019,13 @@ __init int intel_pmu_init(void)
tsx_attr = hsw_tsx_events_attrs;
intel_pmu_pebs_data_source_skl(pmem);
- if (boot_cpu_has(X86_FEATURE_TSX_FORCE_ABORT)) {
+ /*
+ * Processors with CPUID.RTM_ALWAYS_ABORT have TSX deprecated by default.
+ * TSX force abort hooks are not required on these systems. Only deploy
+ * workaround when microcode has not enabled X86_FEATURE_RTM_ALWAYS_ABORT.
+ */
+ if (boot_cpu_has(X86_FEATURE_TSX_FORCE_ABORT) &&
+ !boot_cpu_has(X86_FEATURE_RTM_ALWAYS_ABORT)) {
x86_pmu.flags |= PMU_FL_TFA;
x86_pmu.get_event_constraints = tfa_get_event_constraints;
x86_pmu.enable_all = intel_tfa_pmu_enable_all;
@@ -6157,8 +6167,13 @@ __init int intel_pmu_init(void)
pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_CORE_IDX];
pmu->name = "cpu_core";
pmu->cpu_type = hybrid_big;
- pmu->num_counters = x86_pmu.num_counters + 2;
- pmu->num_counters_fixed = x86_pmu.num_counters_fixed + 1;
+ if (cpu_feature_enabled(X86_FEATURE_HYBRID_CPU)) {
+ pmu->num_counters = x86_pmu.num_counters + 2;
+ pmu->num_counters_fixed = x86_pmu.num_counters_fixed + 1;
+ } else {
+ pmu->num_counters = x86_pmu.num_counters;
+ pmu->num_counters_fixed = x86_pmu.num_counters_fixed;
+ }
pmu->max_pebs_events = min_t(unsigned, MAX_PEBS_EVENTS, pmu->num_counters);
pmu->unconstrained = (struct event_constraint)
__EVENT_CONSTRAINT(0, (1ULL << pmu->num_counters) - 1,
@@ -6253,7 +6268,7 @@ __init int intel_pmu_init(void)
* Check all LBT MSR here.
* Disable LBR access if any LBR MSRs can not be accessed.
*/
- if (x86_pmu.lbr_nr && !check_msr(x86_pmu.lbr_tos, 0x3UL))
+ if (x86_pmu.lbr_tos && !check_msr(x86_pmu.lbr_tos, 0x3UL))
x86_pmu.lbr_nr = 0;
for (i = 0; i < x86_pmu.lbr_nr; i++) {
if (!(check_msr(x86_pmu.lbr_from + i, 0xffffUL) &&
diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
index 1ec8fd311f38..8647713276a7 100644
--- a/arch/x86/events/intel/ds.c
+++ b/arch/x86/events/intel/ds.c
@@ -1187,6 +1187,9 @@ static void intel_pmu_pebs_via_pt_enable(struct perf_event *event)
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
struct hw_perf_event *hwc = &event->hw;
struct debug_store *ds = cpuc->ds;
+ u64 value = ds->pebs_event_reset[hwc->idx];
+ u32 base = MSR_RELOAD_PMC0;
+ unsigned int idx = hwc->idx;
if (!is_pebs_pt(event))
return;
@@ -1196,7 +1199,12 @@ static void intel_pmu_pebs_via_pt_enable(struct perf_event *event)
cpuc->pebs_enabled |= PEBS_OUTPUT_PT;
- wrmsrl(MSR_RELOAD_PMC0 + hwc->idx, ds->pebs_event_reset[hwc->idx]);
+ if (hwc->idx >= INTEL_PMC_IDX_FIXED) {
+ base = MSR_RELOAD_FIXED_CTR0;
+ idx = hwc->idx - INTEL_PMC_IDX_FIXED;
+ value = ds->pebs_event_reset[MAX_PEBS_EVENTS + idx];
+ }
+ wrmsrl(base + idx, value);
}
void intel_pmu_pebs_enable(struct perf_event *event)
@@ -1204,6 +1212,7 @@ void intel_pmu_pebs_enable(struct perf_event *event)
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
struct hw_perf_event *hwc = &event->hw;
struct debug_store *ds = cpuc->ds;
+ unsigned int idx = hwc->idx;
hwc->config &= ~ARCH_PERFMON_EVENTSEL_INT;
@@ -1222,19 +1231,18 @@ void intel_pmu_pebs_enable(struct perf_event *event)
}
}
+ if (idx >= INTEL_PMC_IDX_FIXED)
+ idx = MAX_PEBS_EVENTS + (idx - INTEL_PMC_IDX_FIXED);
+
/*
* Use auto-reload if possible to save a MSR write in the PMI.
* This must be done in pmu::start(), because PERF_EVENT_IOC_PERIOD.
*/
if (hwc->flags & PERF_X86_EVENT_AUTO_RELOAD) {
- unsigned int idx = hwc->idx;
-
- if (idx >= INTEL_PMC_IDX_FIXED)
- idx = MAX_PEBS_EVENTS + (idx - INTEL_PMC_IDX_FIXED);
ds->pebs_event_reset[idx] =
(u64)(-hwc->sample_period) & x86_pmu.cntval_mask;
} else {
- ds->pebs_event_reset[hwc->idx] = 0;
+ ds->pebs_event_reset[idx] = 0;
}
intel_pmu_pebs_via_pt_enable(event);
diff --git a/arch/x86/events/intel/lbr.c b/arch/x86/events/intel/lbr.c
index 76dbab6ac9fb..e8453de7a964 100644
--- a/arch/x86/events/intel/lbr.c
+++ b/arch/x86/events/intel/lbr.c
@@ -658,7 +658,6 @@ static inline bool branch_user_callstack(unsigned br_sel)
void intel_pmu_lbr_add(struct perf_event *event)
{
- struct kmem_cache *kmem_cache = event->pmu->task_ctx_cache;
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
if (!x86_pmu.lbr_nr)
@@ -696,11 +695,6 @@ void intel_pmu_lbr_add(struct perf_event *event)
perf_sched_cb_inc(event->ctx->pmu);
if (!cpuc->lbr_users++ && !event->total_time_running)
intel_pmu_lbr_reset();
-
- if (static_cpu_has(X86_FEATURE_ARCH_LBR) &&
- kmem_cache && !cpuc->lbr_xsave &&
- (cpuc->lbr_users != cpuc->lbr_pebs_users))
- cpuc->lbr_xsave = kmem_cache_alloc(kmem_cache, GFP_KERNEL);
}
void release_lbr_buffers(void)
@@ -722,6 +716,27 @@ void release_lbr_buffers(void)
}
}
+void reserve_lbr_buffers(void)
+{
+ struct kmem_cache *kmem_cache;
+ struct cpu_hw_events *cpuc;
+ int cpu;
+
+ if (!static_cpu_has(X86_FEATURE_ARCH_LBR))
+ return;
+
+ for_each_possible_cpu(cpu) {
+ cpuc = per_cpu_ptr(&cpu_hw_events, cpu);
+ kmem_cache = x86_get_pmu(cpu)->task_ctx_cache;
+ if (!kmem_cache || cpuc->lbr_xsave)
+ continue;
+
+ cpuc->lbr_xsave = kmem_cache_alloc_node(kmem_cache,
+ GFP_KERNEL | __GFP_ZERO,
+ cpu_to_node(cpu));
+ }
+}
+
void intel_pmu_lbr_del(struct perf_event *event)
{
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c
index df7b07d7fdcb..9bf4dbbc26e2 100644
--- a/arch/x86/events/intel/uncore.c
+++ b/arch/x86/events/intel/uncore.c
@@ -801,8 +801,6 @@ static void uncore_pmu_enable(struct pmu *pmu)
struct intel_uncore_box *box;
uncore_pmu = container_of(pmu, struct intel_uncore_pmu, pmu);
- if (!uncore_pmu)
- return;
box = uncore_pmu_to_box(uncore_pmu, smp_processor_id());
if (!box)
@@ -818,8 +816,6 @@ static void uncore_pmu_disable(struct pmu *pmu)
struct intel_uncore_box *box;
uncore_pmu = container_of(pmu, struct intel_uncore_pmu, pmu);
- if (!uncore_pmu)
- return;
box = uncore_pmu_to_box(uncore_pmu, smp_processor_id());
if (!box)
diff --git a/arch/x86/events/intel/uncore.h b/arch/x86/events/intel/uncore.h
index 291791002997..187d7287039c 100644
--- a/arch/x86/events/intel/uncore.h
+++ b/arch/x86/events/intel/uncore.h
@@ -92,6 +92,7 @@ struct intel_uncore_type {
/*
* Optional callbacks for managing mapping of Uncore units to PMONs
*/
+ int (*get_topology)(struct intel_uncore_type *type);
int (*set_mapping)(struct intel_uncore_type *type);
void (*cleanup_mapping)(struct intel_uncore_type *type);
};
diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c
index 63f097289a84..bb6eb1e5569c 100644
--- a/arch/x86/events/intel/uncore_snbep.c
+++ b/arch/x86/events/intel/uncore_snbep.c
@@ -348,6 +348,13 @@
#define SKX_M2M_PCI_PMON_CTR0 0x200
#define SKX_M2M_PCI_PMON_BOX_CTL 0x258
+/* Memory Map registers device ID */
+#define SNR_ICX_MESH2IIO_MMAP_DID 0x9a2
+#define SNR_ICX_SAD_CONTROL_CFG 0x3f4
+
+/* Getting I/O stack id in SAD_COTROL_CFG notation */
+#define SAD_CONTROL_STACK_ID(data) (((data) >> 4) & 0x7)
+
/* SNR Ubox */
#define SNR_U_MSR_PMON_CTR0 0x1f98
#define SNR_U_MSR_PMON_CTL0 0x1f91
@@ -1406,6 +1413,8 @@ static int snbep_pci2phy_map_init(int devid, int nodeid_loc, int idmap_loc, bool
die_id = i;
else
die_id = topology_phys_to_logical_pkg(i);
+ if (die_id < 0)
+ die_id = -ENODEV;
map->pbus_to_dieid[bus] = die_id;
break;
}
@@ -1452,14 +1461,14 @@ static int snbep_pci2phy_map_init(int devid, int nodeid_loc, int idmap_loc, bool
i = -1;
if (reverse) {
for (bus = 255; bus >= 0; bus--) {
- if (map->pbus_to_dieid[bus] >= 0)
+ if (map->pbus_to_dieid[bus] != -1)
i = map->pbus_to_dieid[bus];
else
map->pbus_to_dieid[bus] = i;
}
} else {
for (bus = 0; bus <= 255; bus++) {
- if (map->pbus_to_dieid[bus] >= 0)
+ if (map->pbus_to_dieid[bus] != -1)
i = map->pbus_to_dieid[bus];
else
map->pbus_to_dieid[bus] = i;
@@ -3680,12 +3689,19 @@ static inline u8 skx_iio_stack(struct intel_uncore_pmu *pmu, int die)
}
static umode_t
-skx_iio_mapping_visible(struct kobject *kobj, struct attribute *attr, int die)
+pmu_iio_mapping_visible(struct kobject *kobj, struct attribute *attr,
+ int die, int zero_bus_pmu)
{
struct intel_uncore_pmu *pmu = dev_to_uncore_pmu(kobj_to_dev(kobj));
- /* Root bus 0x00 is valid only for die 0 AND pmu_idx = 0. */
- return (!skx_iio_stack(pmu, die) && pmu->pmu_idx) ? 0 : attr->mode;
+ return (!skx_iio_stack(pmu, die) && pmu->pmu_idx != zero_bus_pmu) ? 0 : attr->mode;
+}
+
+static umode_t
+skx_iio_mapping_visible(struct kobject *kobj, struct attribute *attr, int die)
+{
+ /* Root bus 0x00 is valid only for pmu_idx = 0. */
+ return pmu_iio_mapping_visible(kobj, attr, die, 0);
}
static ssize_t skx_iio_mapping_show(struct device *dev,
@@ -3770,7 +3786,8 @@ static const struct attribute_group *skx_iio_attr_update[] = {
NULL,
};
-static int skx_iio_set_mapping(struct intel_uncore_type *type)
+static int
+pmu_iio_set_mapping(struct intel_uncore_type *type, struct attribute_group *ag)
{
char buf[64];
int ret;
@@ -3778,7 +3795,7 @@ static int skx_iio_set_mapping(struct intel_uncore_type *type)
struct attribute **attrs = NULL;
struct dev_ext_attribute *eas = NULL;
- ret = skx_iio_get_topology(type);
+ ret = type->get_topology(type);
if (ret < 0)
goto clear_attr_update;
@@ -3805,7 +3822,7 @@ static int skx_iio_set_mapping(struct intel_uncore_type *type)
eas[die].var = (void *)die;
attrs[die] = &eas[die].attr.attr;
}
- skx_iio_mapping_group.attrs = attrs;
+ ag->attrs = attrs;
return 0;
err:
@@ -3819,6 +3836,11 @@ clear_attr_update:
return ret;
}
+static int skx_iio_set_mapping(struct intel_uncore_type *type)
+{
+ return pmu_iio_set_mapping(type, &skx_iio_mapping_group);
+}
+
static void skx_iio_cleanup_mapping(struct intel_uncore_type *type)
{
struct attribute **attr = skx_iio_mapping_group.attrs;
@@ -3849,6 +3871,7 @@ static struct intel_uncore_type skx_uncore_iio = {
.ops = &skx_uncore_iio_ops,
.format_group = &skx_uncore_iio_format_group,
.attr_update = skx_iio_attr_update,
+ .get_topology = skx_iio_get_topology,
.set_mapping = skx_iio_set_mapping,
.cleanup_mapping = skx_iio_cleanup_mapping,
};
@@ -4391,6 +4414,91 @@ static const struct attribute_group snr_uncore_iio_format_group = {
.attrs = snr_uncore_iio_formats_attr,
};
+static umode_t
+snr_iio_mapping_visible(struct kobject *kobj, struct attribute *attr, int die)
+{
+ /* Root bus 0x00 is valid only for pmu_idx = 1. */
+ return pmu_iio_mapping_visible(kobj, attr, die, 1);
+}
+
+static struct attribute_group snr_iio_mapping_group = {
+ .is_visible = snr_iio_mapping_visible,
+};
+
+static const struct attribute_group *snr_iio_attr_update[] = {
+ &snr_iio_mapping_group,
+ NULL,
+};
+
+static int sad_cfg_iio_topology(struct intel_uncore_type *type, u8 *sad_pmon_mapping)
+{
+ u32 sad_cfg;
+ int die, stack_id, ret = -EPERM;
+ struct pci_dev *dev = NULL;
+
+ type->topology = kcalloc(uncore_max_dies(), sizeof(*type->topology),
+ GFP_KERNEL);
+ if (!type->topology)
+ return -ENOMEM;
+
+ while ((dev = pci_get_device(PCI_VENDOR_ID_INTEL, SNR_ICX_MESH2IIO_MMAP_DID, dev))) {
+ ret = pci_read_config_dword(dev, SNR_ICX_SAD_CONTROL_CFG, &sad_cfg);
+ if (ret) {
+ ret = pcibios_err_to_errno(ret);
+ break;
+ }
+
+ die = uncore_pcibus_to_dieid(dev->bus);
+ stack_id = SAD_CONTROL_STACK_ID(sad_cfg);
+ if (die < 0 || stack_id >= type->num_boxes) {
+ ret = -EPERM;
+ break;
+ }
+
+ /* Convert stack id from SAD_CONTROL to PMON notation. */
+ stack_id = sad_pmon_mapping[stack_id];
+
+ ((u8 *)&(type->topology[die].configuration))[stack_id] = dev->bus->number;
+ type->topology[die].segment = pci_domain_nr(dev->bus);
+ }
+
+ if (ret) {
+ kfree(type->topology);
+ type->topology = NULL;
+ }
+
+ return ret;
+}
+
+/*
+ * SNR has a static mapping of stack IDs from SAD_CONTROL_CFG notation to PMON
+ */
+enum {
+ SNR_QAT_PMON_ID,
+ SNR_CBDMA_DMI_PMON_ID,
+ SNR_NIS_PMON_ID,
+ SNR_DLB_PMON_ID,
+ SNR_PCIE_GEN3_PMON_ID
+};
+
+static u8 snr_sad_pmon_mapping[] = {
+ SNR_CBDMA_DMI_PMON_ID,
+ SNR_PCIE_GEN3_PMON_ID,
+ SNR_DLB_PMON_ID,
+ SNR_NIS_PMON_ID,
+ SNR_QAT_PMON_ID
+};
+
+static int snr_iio_get_topology(struct intel_uncore_type *type)
+{
+ return sad_cfg_iio_topology(type, snr_sad_pmon_mapping);
+}
+
+static int snr_iio_set_mapping(struct intel_uncore_type *type)
+{
+ return pmu_iio_set_mapping(type, &snr_iio_mapping_group);
+}
+
static struct intel_uncore_type snr_uncore_iio = {
.name = "iio",
.num_counters = 4,
@@ -4404,6 +4512,10 @@ static struct intel_uncore_type snr_uncore_iio = {
.msr_offset = SNR_IIO_MSR_OFFSET,
.ops = &ivbep_uncore_msr_ops,
.format_group = &snr_uncore_iio_format_group,
+ .attr_update = snr_iio_attr_update,
+ .get_topology = snr_iio_get_topology,
+ .set_mapping = snr_iio_set_mapping,
+ .cleanup_mapping = skx_iio_cleanup_mapping,
};
static struct intel_uncore_type snr_uncore_irp = {
@@ -4931,6 +5043,53 @@ static struct event_constraint icx_uncore_iio_constraints[] = {
EVENT_CONSTRAINT_END
};
+static umode_t
+icx_iio_mapping_visible(struct kobject *kobj, struct attribute *attr, int die)
+{
+ /* Root bus 0x00 is valid only for pmu_idx = 5. */
+ return pmu_iio_mapping_visible(kobj, attr, die, 5);
+}
+
+static struct attribute_group icx_iio_mapping_group = {
+ .is_visible = icx_iio_mapping_visible,
+};
+
+static const struct attribute_group *icx_iio_attr_update[] = {
+ &icx_iio_mapping_group,
+ NULL,
+};
+
+/*
+ * ICX has a static mapping of stack IDs from SAD_CONTROL_CFG notation to PMON
+ */
+enum {
+ ICX_PCIE1_PMON_ID,
+ ICX_PCIE2_PMON_ID,
+ ICX_PCIE3_PMON_ID,
+ ICX_PCIE4_PMON_ID,
+ ICX_PCIE5_PMON_ID,
+ ICX_CBDMA_DMI_PMON_ID
+};
+
+static u8 icx_sad_pmon_mapping[] = {
+ ICX_CBDMA_DMI_PMON_ID,
+ ICX_PCIE1_PMON_ID,
+ ICX_PCIE2_PMON_ID,
+ ICX_PCIE3_PMON_ID,
+ ICX_PCIE4_PMON_ID,
+ ICX_PCIE5_PMON_ID,
+};
+
+static int icx_iio_get_topology(struct intel_uncore_type *type)
+{
+ return sad_cfg_iio_topology(type, icx_sad_pmon_mapping);
+}
+
+static int icx_iio_set_mapping(struct intel_uncore_type *type)
+{
+ return pmu_iio_set_mapping(type, &icx_iio_mapping_group);
+}
+
static struct intel_uncore_type icx_uncore_iio = {
.name = "iio",
.num_counters = 4,
@@ -4945,6 +5104,10 @@ static struct intel_uncore_type icx_uncore_iio = {
.constraints = icx_uncore_iio_constraints,
.ops = &skx_uncore_iio_ops,
.format_group = &snr_uncore_iio_format_group,
+ .attr_update = icx_iio_attr_update,
+ .get_topology = icx_iio_get_topology,
+ .set_mapping = icx_iio_set_mapping,
+ .cleanup_mapping = skx_iio_cleanup_mapping,
};
static struct intel_uncore_type icx_uncore_irp = {
@@ -5097,9 +5260,10 @@ static struct intel_uncore_type icx_uncore_m2m = {
.perf_ctr = SNR_M2M_PCI_PMON_CTR0,
.event_ctl = SNR_M2M_PCI_PMON_CTL0,
.event_mask = SNBEP_PMON_RAW_EVENT_MASK,
+ .event_mask_ext = SNR_M2M_PCI_PMON_UMASK_EXT,
.box_ctl = SNR_M2M_PCI_PMON_BOX_CTL,
.ops = &snr_m2m_uncore_pci_ops,
- .format_group = &skx_uncore_format_group,
+ .format_group = &snr_m2m_uncore_format_group,
};
static struct attribute *icx_upi_uncore_formats_attr[] = {
diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
index 27fa85e7d4fd..2bf1c7ea2758 100644
--- a/arch/x86/events/perf_event.h
+++ b/arch/x86/events/perf_event.h
@@ -229,6 +229,7 @@ struct cpu_hw_events {
*/
struct perf_event *events[X86_PMC_IDX_MAX]; /* in counter order */
unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
+ unsigned long dirty[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
int enabled;
int n_events; /* the # of events in the below arrays */
@@ -1244,6 +1245,8 @@ void reserve_ds_buffers(void);
void release_lbr_buffers(void);
+void reserve_lbr_buffers(void);
+
extern struct event_constraint bts_constraint;
extern struct event_constraint vlbr_constraint;
@@ -1393,6 +1396,10 @@ static inline void release_lbr_buffers(void)
{
}
+static inline void reserve_lbr_buffers(void)
+{
+}
+
static inline int intel_pmu_init(void)
{
return 0;
diff --git a/arch/x86/events/rapl.c b/arch/x86/events/rapl.c
index 84a1042c3b01..85feafacc445 100644
--- a/arch/x86/events/rapl.c
+++ b/arch/x86/events/rapl.c
@@ -764,13 +764,14 @@ static struct rapl_model model_spr = {
.rapl_msrs = intel_rapl_spr_msrs,
};
-static struct rapl_model model_amd_fam17h = {
+static struct rapl_model model_amd_hygon = {
.events = BIT(PERF_RAPL_PKG),
.msr_power_unit = MSR_AMD_RAPL_POWER_UNIT,
.rapl_msrs = amd_rapl_msrs,
};
static const struct x86_cpu_id rapl_model_match[] __initconst = {
+ X86_MATCH_FEATURE(X86_FEATURE_RAPL, &model_amd_hygon),
X86_MATCH_INTEL_FAM6_MODEL(SANDYBRIDGE, &model_snb),
X86_MATCH_INTEL_FAM6_MODEL(SANDYBRIDGE_X, &model_snbep),
X86_MATCH_INTEL_FAM6_MODEL(IVYBRIDGE, &model_snb),
@@ -803,9 +804,6 @@ static const struct x86_cpu_id rapl_model_match[] __initconst = {
X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE, &model_skl),
X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, &model_skl),
X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, &model_spr),
- X86_MATCH_VENDOR_FAM(AMD, 0x17, &model_amd_fam17h),
- X86_MATCH_VENDOR_FAM(HYGON, 0x18, &model_amd_fam17h),
- X86_MATCH_VENDOR_FAM(AMD, 0x19, &model_amd_fam17h),
{},
};
MODULE_DEVICE_TABLE(x86cpu, rapl_model_match);
diff --git a/arch/x86/hyperv/hv_init.c b/arch/x86/hyperv/hv_init.c
index bb0ae4b5c00f..6952e219cba3 100644
--- a/arch/x86/hyperv/hv_init.c
+++ b/arch/x86/hyperv/hv_init.c
@@ -614,50 +614,3 @@ bool hv_is_isolation_supported(void)
return hv_get_isolation_type() != HV_ISOLATION_TYPE_NONE;
}
EXPORT_SYMBOL_GPL(hv_is_isolation_supported);
-
-/* Bit mask of the extended capability to query: see HV_EXT_CAPABILITY_xxx */
-bool hv_query_ext_cap(u64 cap_query)
-{
- /*
- * The address of the 'hv_extended_cap' variable will be used as an
- * output parameter to the hypercall below and so it should be
- * compatible with 'virt_to_phys'. Which means, it's address should be
- * directly mapped. Use 'static' to keep it compatible; stack variables
- * can be virtually mapped, making them imcompatible with
- * 'virt_to_phys'.
- * Hypercall input/output addresses should also be 8-byte aligned.
- */
- static u64 hv_extended_cap __aligned(8);
- static bool hv_extended_cap_queried;
- u64 status;
-
- /*
- * Querying extended capabilities is an extended hypercall. Check if the
- * partition supports extended hypercall, first.
- */
- if (!(ms_hyperv.priv_high & HV_ENABLE_EXTENDED_HYPERCALLS))
- return false;
-
- /* Extended capabilities do not change at runtime. */
- if (hv_extended_cap_queried)
- return hv_extended_cap & cap_query;
-
- status = hv_do_hypercall(HV_EXT_CALL_QUERY_CAPABILITIES, NULL,
- &hv_extended_cap);
-
- /*
- * The query extended capabilities hypercall should not fail under
- * any normal circumstances. Avoid repeatedly making the hypercall, on
- * error.
- */
- hv_extended_cap_queried = true;
- status &= HV_HYPERCALL_RESULT_MASK;
- if (status != HV_STATUS_SUCCESS) {
- pr_err("Hyper-V: Extended query capabilities hypercall failed 0x%llx\n",
- status);
- return false;
- }
-
- return hv_extended_cap & cap_query;
-}
-EXPORT_SYMBOL_GPL(hv_query_ext_cap);
diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
index a09fc37ead9d..5e5b9fc2747f 100644
--- a/arch/x86/ia32/ia32_aout.c
+++ b/arch/x86/ia32/ia32_aout.c
@@ -203,7 +203,7 @@ static int load_aout_binary(struct linux_binprm *bprm)
error = vm_mmap(bprm->file, N_TXTADDR(ex), ex.a_text,
PROT_READ | PROT_EXEC,
MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE |
- MAP_EXECUTABLE | MAP_32BIT,
+ MAP_32BIT,
fd_offset);
if (error != N_TXTADDR(ex))
@@ -212,7 +212,7 @@ static int load_aout_binary(struct linux_binprm *bprm)
error = vm_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
PROT_READ | PROT_WRITE | PROT_EXEC,
MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE |
- MAP_EXECUTABLE | MAP_32BIT,
+ MAP_32BIT,
fd_offset + ex.a_text);
if (error != N_DATADDR(ex))
return error;
diff --git a/arch/x86/include/asm/Kbuild b/arch/x86/include/asm/Kbuild
index b19ec8282d50..1e51650b79d7 100644
--- a/arch/x86/include/asm/Kbuild
+++ b/arch/x86/include/asm/Kbuild
@@ -3,6 +3,7 @@
generated-y += syscalls_32.h
generated-y += syscalls_64.h
+generated-y += syscalls_x32.h
generated-y += unistd_32_ia32.h
generated-y += unistd_64_x32.h
generated-y += xen-hypercalls.h
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
index 412b51e059c8..48067af94678 100644
--- a/arch/x86/include/asm/apic.h
+++ b/arch/x86/include/asm/apic.h
@@ -174,6 +174,7 @@ static inline int apic_is_clustered_box(void)
extern int setup_APIC_eilvt(u8 lvt_off, u8 vector, u8 msg_type, u8 mask);
extern void lapic_assign_system_vectors(void);
extern void lapic_assign_legacy_vector(unsigned int isairq, bool replace);
+extern void lapic_update_legacy_vectors(void);
extern void lapic_online(void);
extern void lapic_offline(void);
extern bool apic_needs_pit(void);
diff --git a/arch/x86/include/asm/asm.h b/arch/x86/include/asm/asm.h
index 0603c7423aca..3ad3da9a7d97 100644
--- a/arch/x86/include/asm/asm.h
+++ b/arch/x86/include/asm/asm.h
@@ -3,25 +3,26 @@
#define _ASM_X86_ASM_H
#ifdef __ASSEMBLY__
-# define __ASM_FORM(x) x
-# define __ASM_FORM_RAW(x) x
-# define __ASM_FORM_COMMA(x) x,
+# define __ASM_FORM(x, ...) x,## __VA_ARGS__
+# define __ASM_FORM_RAW(x, ...) x,## __VA_ARGS__
+# define __ASM_FORM_COMMA(x, ...) x,## __VA_ARGS__,
#else
#include <linux/stringify.h>
-
-# define __ASM_FORM(x) " " __stringify(x) " "
-# define __ASM_FORM_RAW(x) __stringify(x)
-# define __ASM_FORM_COMMA(x) " " __stringify(x) ","
+# define __ASM_FORM(x, ...) " " __stringify(x,##__VA_ARGS__) " "
+# define __ASM_FORM_RAW(x, ...) __stringify(x,##__VA_ARGS__)
+# define __ASM_FORM_COMMA(x, ...) " " __stringify(x,##__VA_ARGS__) ","
#endif
+#define _ASM_BYTES(x, ...) __ASM_FORM(.byte x,##__VA_ARGS__ ;)
+
#ifndef __x86_64__
/* 32 bit */
-# define __ASM_SEL(a,b) __ASM_FORM(a)
-# define __ASM_SEL_RAW(a,b) __ASM_FORM_RAW(a)
+# define __ASM_SEL(a,b) __ASM_FORM(a)
+# define __ASM_SEL_RAW(a,b) __ASM_FORM_RAW(a)
#else
/* 64 bit */
-# define __ASM_SEL(a,b) __ASM_FORM(b)
-# define __ASM_SEL_RAW(a,b) __ASM_FORM_RAW(b)
+# define __ASM_SEL(a,b) __ASM_FORM(b)
+# define __ASM_SEL_RAW(a,b) __ASM_FORM_RAW(b)
#endif
#define __ASM_SIZE(inst, ...) __ASM_SEL(inst##l##__VA_ARGS__, \
@@ -119,6 +120,8 @@
# define CC_OUT(c) [_cc_ ## c] "=qm"
#endif
+#ifdef __KERNEL__
+
/* Exception table entry */
#ifdef __ASSEMBLY__
# define _ASM_EXTABLE_HANDLE(from, to, handler) \
@@ -185,4 +188,6 @@ register unsigned long current_stack_pointer asm(_ASM_SP);
#define ASM_CALL_CONSTRAINT "+r" (current_stack_pointer)
#endif /* __ASSEMBLY__ */
+#endif /* __KERNEL__ */
+
#endif /* _ASM_X86_ASM_H */
diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
index f732741ad7c7..5e754e895767 100644
--- a/arch/x86/include/asm/atomic.h
+++ b/arch/x86/include/asm/atomic.h
@@ -269,6 +269,4 @@ static __always_inline int arch_atomic_fetch_xor(int i, atomic_t *v)
# include <asm/atomic64_64.h>
#endif
-#define ARCH_ATOMIC
-
#endif /* _ASM_X86_ATOMIC_H */
diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h
index 4819d5e5a335..3ba772a69cc8 100644
--- a/arch/x86/include/asm/barrier.h
+++ b/arch/x86/include/asm/barrier.h
@@ -54,11 +54,8 @@ static inline unsigned long array_index_mask_nospec(unsigned long index,
#define dma_rmb() barrier()
#define dma_wmb() barrier()
-#ifdef CONFIG_X86_32
-#define __smp_mb() asm volatile("lock; addl $0,-4(%%esp)" ::: "memory", "cc")
-#else
-#define __smp_mb() asm volatile("lock; addl $0,-4(%%rsp)" ::: "memory", "cc")
-#endif
+#define __smp_mb() asm volatile("lock; addl $0,-4(%%" _ASM_SP ")" ::: "memory", "cc")
+
#define __smp_rmb() dma_rmb()
#define __smp_wmb() barrier()
#define __smp_store_mb(var, value) do { (void)xchg(&var, value); } while (0)
diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
index ac37830ae941..d0ce5cfd3ac1 100644
--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -108,7 +108,7 @@
#define X86_FEATURE_EXTD_APICID ( 3*32+26) /* Extended APICID (8 bits) */
#define X86_FEATURE_AMD_DCM ( 3*32+27) /* AMD multi-node processor */
#define X86_FEATURE_APERFMPERF ( 3*32+28) /* P-State hardware coordination feedback capability (APERF/MPERF MSRs) */
-/* free ( 3*32+29) */
+#define X86_FEATURE_RAPL ( 3*32+29) /* AMD/Hygon RAPL interface */
#define X86_FEATURE_NONSTOP_TSC_S3 ( 3*32+30) /* TSC doesn't stop in S3 state */
#define X86_FEATURE_TSC_KNOWN_FREQ ( 3*32+31) /* TSC has known frequency */
@@ -378,6 +378,7 @@
#define X86_FEATURE_AVX512_VP2INTERSECT (18*32+ 8) /* AVX-512 Intersect for D/Q */
#define X86_FEATURE_SRBDS_CTRL (18*32+ 9) /* "" SRBDS mitigation MSR available */
#define X86_FEATURE_MD_CLEAR (18*32+10) /* VERW clears CPU buffers */
+#define X86_FEATURE_RTM_ALWAYS_ABORT (18*32+11) /* "" RTM transaction always aborts */
#define X86_FEATURE_TSX_FORCE_ABORT (18*32+13) /* "" TSX_FORCE_ABORT */
#define X86_FEATURE_SERIALIZE (18*32+14) /* SERIALIZE instruction */
#define X86_FEATURE_HYBRID_CPU (18*32+15) /* "" This part has CPUs of more than one type */
diff --git a/arch/x86/include/asm/crash.h b/arch/x86/include/asm/crash.h
index f58de66091e5..8b6bd63530dc 100644
--- a/arch/x86/include/asm/crash.h
+++ b/arch/x86/include/asm/crash.h
@@ -9,10 +9,4 @@ int crash_setup_memmap_entries(struct kimage *image,
struct boot_params *params);
void crash_smp_send_stop(void);
-#ifdef CONFIG_KEXEC_CORE
-void __init crash_reserve_low_1M(void);
-#else
-static inline void __init crash_reserve_low_1M(void) { }
-#endif
-
#endif /* _ASM_X86_CRASH_H */
diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
index 476082a83d1c..ab97b22ac04a 100644
--- a/arch/x86/include/asm/desc.h
+++ b/arch/x86/include/asm/desc.h
@@ -9,6 +9,7 @@
#include <asm/irq_vectors.h>
#include <asm/cpu_entry_area.h>
+#include <linux/debug_locks.h>
#include <linux/smp.h>
#include <linux/percpu.h>
@@ -224,6 +225,26 @@ static inline void store_idt(struct desc_ptr *dtr)
asm volatile("sidt %0":"=m" (*dtr));
}
+static inline void native_gdt_invalidate(void)
+{
+ const struct desc_ptr invalid_gdt = {
+ .address = 0,
+ .size = 0
+ };
+
+ native_load_gdt(&invalid_gdt);
+}
+
+static inline void native_idt_invalidate(void)
+{
+ const struct desc_ptr invalid_idt = {
+ .address = 0,
+ .size = 0
+ };
+
+ native_load_idt(&invalid_idt);
+}
+
/*
* The LTR instruction marks the TSS GDT entry as busy. On 64-bit, the GDT is
* a read-only remapping. To prevent a page fault, the GDT is switched to the
@@ -421,12 +442,10 @@ extern bool idt_is_f00f_address(unsigned long address);
#ifdef CONFIG_X86_64
extern void idt_setup_early_pf(void);
-extern void idt_setup_ist_traps(void);
#else
static inline void idt_setup_early_pf(void) { }
-static inline void idt_setup_ist_traps(void) { }
#endif
-extern void idt_invalidate(void *addr);
+extern void idt_invalidate(void);
#endif /* _ASM_X86_DESC_H */
diff --git a/arch/x86/include/asm/disabled-features.h b/arch/x86/include/asm/disabled-features.h
index b7dd944dc867..8f28fafa98b3 100644
--- a/arch/x86/include/asm/disabled-features.h
+++ b/arch/x86/include/asm/disabled-features.h
@@ -56,11 +56,8 @@
# define DISABLE_PTI (1 << (X86_FEATURE_PTI & 31))
#endif
-#ifdef CONFIG_IOMMU_SUPPORT
-# define DISABLE_ENQCMD 0
-#else
-# define DISABLE_ENQCMD (1 << (X86_FEATURE_ENQCMD & 31))
-#endif
+/* Force disable because it's broken beyond repair */
+#define DISABLE_ENQCMD (1 << (X86_FEATURE_ENQCMD & 31))
#ifdef CONFIG_X86_SGX
# define DISABLE_SGX 0
diff --git a/arch/x86/include/asm/fpu/api.h b/arch/x86/include/asm/fpu/api.h
index ed33a14188f6..23bef08a8388 100644
--- a/arch/x86/include/asm/fpu/api.h
+++ b/arch/x86/include/asm/fpu/api.h
@@ -106,10 +106,6 @@ extern int cpu_has_xfeatures(u64 xfeatures_mask, const char **feature_name);
*/
#define PASID_DISABLED 0
-#ifdef CONFIG_IOMMU_SUPPORT
-/* Update current's PASID MSR/state by mm's PASID. */
-void update_pasid(void);
-#else
static inline void update_pasid(void) { }
-#endif
+
#endif /* _ASM_X86_FPU_API_H */
diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h
index 8d33ad80704f..16bf4d4a8159 100644
--- a/arch/x86/include/asm/fpu/internal.h
+++ b/arch/x86/include/asm/fpu/internal.h
@@ -204,6 +204,14 @@ static inline void copy_fxregs_to_kernel(struct fpu *fpu)
asm volatile("fxsaveq %[fx]" : [fx] "=m" (fpu->state.fxsave));
}
+static inline void fxsave(struct fxregs_state *fx)
+{
+ if (IS_ENABLED(CONFIG_X86_32))
+ asm volatile( "fxsave %[fx]" : [fx] "=m" (*fx));
+ else
+ asm volatile("fxsaveq %[fx]" : [fx] "=m" (*fx));
+}
+
/* These macros all use (%edi)/(%rdi) as the single memory argument. */
#define XSAVE ".byte " REX_PREFIX "0x0f,0xae,0x27"
#define XSAVEOPT ".byte " REX_PREFIX "0x0f,0xae,0x37"
@@ -272,28 +280,6 @@ static inline void copy_fxregs_to_kernel(struct fpu *fpu)
* This function is called only during boot time when x86 caps are not set
* up and alternative can not be used yet.
*/
-static inline void copy_xregs_to_kernel_booting(struct xregs_state *xstate)
-{
- u64 mask = xfeatures_mask_all;
- u32 lmask = mask;
- u32 hmask = mask >> 32;
- int err;
-
- WARN_ON(system_state != SYSTEM_BOOTING);
-
- if (boot_cpu_has(X86_FEATURE_XSAVES))
- XSTATE_OP(XSAVES, xstate, lmask, hmask, err);
- else
- XSTATE_OP(XSAVE, xstate, lmask, hmask, err);
-
- /* We should never fault when copying to a kernel buffer: */
- WARN_ON_FPU(err);
-}
-
-/*
- * This function is called only during boot time when x86 caps are not set
- * up and alternative can not be used yet.
- */
static inline void copy_kernel_to_xregs_booting(struct xregs_state *xstate)
{
u64 mask = -1;
@@ -578,19 +564,19 @@ static inline void switch_fpu_finish(struct fpu *new_fpu)
* PKRU state is switched eagerly because it needs to be valid before we
* return to userland e.g. for a copy_to_user() operation.
*/
- if (current->mm) {
+ if (!(current->flags & PF_KTHREAD)) {
+ /*
+ * If the PKRU bit in xsave.header.xfeatures is not set,
+ * then the PKRU component was in init state, which means
+ * XRSTOR will set PKRU to 0. If the bit is not set then
+ * get_xsave_addr() will return NULL because the PKRU value
+ * in memory is not valid. This means pkru_val has to be
+ * set to 0 and not to init_pkru_value.
+ */
pk = get_xsave_addr(&new_fpu->state.xsave, XFEATURE_PKRU);
- if (pk)
- pkru_val = pk->pkru;
+ pkru_val = pk ? pk->pkru : 0;
}
__write_pkru(pkru_val);
-
- /*
- * Expensive PASID MSR write will be avoided in update_pasid() because
- * TIF_NEED_FPU_LOAD was set. And the PASID state won't be updated
- * unless it's different from mm->pasid to reduce overhead.
- */
- update_pasid();
}
#endif /* _ASM_X86_FPU_INTERNAL_H */
diff --git a/arch/x86/include/asm/hyperv-tlfs.h b/arch/x86/include/asm/hyperv-tlfs.h
index 606f5cc579b2..f1366ce609e3 100644
--- a/arch/x86/include/asm/hyperv-tlfs.h
+++ b/arch/x86/include/asm/hyperv-tlfs.h
@@ -52,7 +52,7 @@
* Support for passing hypercall input parameter block via XMM
* registers is available
*/
-#define HV_X64_HYPERCALL_PARAMS_XMM_AVAILABLE BIT(4)
+#define HV_X64_HYPERCALL_XMM_INPUT_AVAILABLE BIT(4)
/* Support for a virtual guest idle state is available */
#define HV_X64_GUEST_IDLE_STATE_AVAILABLE BIT(5)
/* Frequency MSRs available */
@@ -61,6 +61,11 @@
#define HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE BIT(10)
/* Support for debug MSRs available */
#define HV_FEATURE_DEBUG_MSRS_AVAILABLE BIT(11)
+/*
+ * Support for returning hypercall output block via XMM
+ * registers is available
+ */
+#define HV_X64_HYPERCALL_XMM_OUTPUT_AVAILABLE BIT(15)
/* stimer Direct Mode is available */
#define HV_STIMER_DIRECT_MODE_AVAILABLE BIT(19)
@@ -133,6 +138,15 @@
#define HV_X64_NESTED_GUEST_MAPPING_FLUSH BIT(18)
#define HV_X64_NESTED_MSR_BITMAP BIT(19)
+/*
+ * This is specific to AMD and specifies that enlightened TLB flush is
+ * supported. If guest opts in to this feature, ASID invalidations only
+ * flushes gva -> hpa mapping entries. To flush the TLB entries derived
+ * from NPT, hypercalls should be used (HvFlushGuestPhysicalAddressSpace
+ * or HvFlushGuestPhysicalAddressList).
+ */
+#define HV_X64_NESTED_ENLIGHTENED_TLB BIT(22)
+
/* HYPERV_CPUID_ISOLATION_CONFIG.EAX bits. */
#define HV_PARAVISOR_PRESENT BIT(0)
@@ -314,6 +328,9 @@ struct hv_tsc_emulation_status {
#define HV_X64_MSR_TSC_REFERENCE_ENABLE 0x00000001
#define HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT 12
+/* Number of XMM registers used in hypercall input/output */
+#define HV_HYPERCALL_MAX_XMM_REGISTERS 6
+
struct hv_nested_enlightenments_control {
struct {
__u32 directhypercall:1;
diff --git a/arch/x86/include/asm/idtentry.h b/arch/x86/include/asm/idtentry.h
index 73d45b0dfff2..1345088e9902 100644
--- a/arch/x86/include/asm/idtentry.h
+++ b/arch/x86/include/asm/idtentry.h
@@ -312,8 +312,8 @@ static __always_inline void __##func(struct pt_regs *regs)
*/
#define DECLARE_IDTENTRY_VC(vector, func) \
DECLARE_IDTENTRY_RAW_ERRORCODE(vector, func); \
- __visible noinstr void ist_##func(struct pt_regs *regs, unsigned long error_code); \
- __visible noinstr void safe_stack_##func(struct pt_regs *regs, unsigned long error_code)
+ __visible noinstr void kernel_##func(struct pt_regs *regs, unsigned long error_code); \
+ __visible noinstr void user_##func(struct pt_regs *regs, unsigned long error_code)
/**
* DEFINE_IDTENTRY_IST - Emit code for IST entry points
@@ -355,33 +355,24 @@ static __always_inline void __##func(struct pt_regs *regs)
DEFINE_IDTENTRY_RAW_ERRORCODE(func)
/**
- * DEFINE_IDTENTRY_VC_SAFE_STACK - Emit code for VMM communication handler
- which runs on a safe stack.
+ * DEFINE_IDTENTRY_VC_KERNEL - Emit code for VMM communication handler
+ when raised from kernel mode
* @func: Function name of the entry point
*
* Maps to DEFINE_IDTENTRY_RAW_ERRORCODE
*/
-#define DEFINE_IDTENTRY_VC_SAFE_STACK(func) \
- DEFINE_IDTENTRY_RAW_ERRORCODE(safe_stack_##func)
+#define DEFINE_IDTENTRY_VC_KERNEL(func) \
+ DEFINE_IDTENTRY_RAW_ERRORCODE(kernel_##func)
/**
- * DEFINE_IDTENTRY_VC_IST - Emit code for VMM communication handler
- which runs on the VC fall-back stack
+ * DEFINE_IDTENTRY_VC_USER - Emit code for VMM communication handler
+ when raised from user mode
* @func: Function name of the entry point
*
* Maps to DEFINE_IDTENTRY_RAW_ERRORCODE
*/
-#define DEFINE_IDTENTRY_VC_IST(func) \
- DEFINE_IDTENTRY_RAW_ERRORCODE(ist_##func)
-
-/**
- * DEFINE_IDTENTRY_VC - Emit code for VMM communication handler
- * @func: Function name of the entry point
- *
- * Maps to DEFINE_IDTENTRY_RAW_ERRORCODE
- */
-#define DEFINE_IDTENTRY_VC(func) \
- DEFINE_IDTENTRY_RAW_ERRORCODE(func)
+#define DEFINE_IDTENTRY_VC_USER(func) \
+ DEFINE_IDTENTRY_RAW_ERRORCODE(user_##func)
#else /* CONFIG_X86_64 */
@@ -504,7 +495,7 @@ __visible noinstr void func(struct pt_regs *regs, \
.align 8
SYM_CODE_START(irq_entries_start)
vector=FIRST_EXTERNAL_VECTOR
- .rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR)
+ .rept NR_EXTERNAL_VECTORS
UNWIND_HINT_IRET_REGS
0 :
.byte 0x6a, vector
@@ -520,7 +511,7 @@ SYM_CODE_END(irq_entries_start)
.align 8
SYM_CODE_START(spurious_entries_start)
vector=FIRST_SYSTEM_VECTOR
- .rept (NR_VECTORS - FIRST_SYSTEM_VECTOR)
+ .rept NR_SYSTEM_VECTORS
UNWIND_HINT_IRET_REGS
0 :
.byte 0x6a, vector
diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h
index 955b06d6325a..27158436f322 100644
--- a/arch/x86/include/asm/intel-family.h
+++ b/arch/x86/include/asm/intel-family.h
@@ -102,7 +102,8 @@
#define INTEL_FAM6_TIGERLAKE_L 0x8C /* Willow Cove */
#define INTEL_FAM6_TIGERLAKE 0x8D /* Willow Cove */
-#define INTEL_FAM6_SAPPHIRERAPIDS_X 0x8F /* Willow Cove */
+
+#define INTEL_FAM6_SAPPHIRERAPIDS_X 0x8F /* Golden Cove */
#define INTEL_FAM6_ALDERLAKE 0x97 /* Golden Cove / Gracemont */
#define INTEL_FAM6_ALDERLAKE_L 0x9A /* Golden Cove / Gracemont */
diff --git a/arch/x86/include/asm/irq_vectors.h b/arch/x86/include/asm/irq_vectors.h
index 889f8b1b5b7f..43dcb9284208 100644
--- a/arch/x86/include/asm/irq_vectors.h
+++ b/arch/x86/include/asm/irq_vectors.h
@@ -26,8 +26,8 @@
* This file enumerates the exact layout of them:
*/
+/* This is used as an interrupt vector when programming the APIC. */
#define NMI_VECTOR 0x02
-#define MCE_VECTOR 0x12
/*
* IDT vectors usable for external interrupt sources start at 0x20.
@@ -84,7 +84,7 @@
*/
#define IRQ_WORK_VECTOR 0xf6
-#define UV_BAU_MESSAGE 0xf5
+/* 0xf5 - unused, was UV_BAU_MESSAGE */
#define DEFERRED_ERROR_VECTOR 0xf4
/* Vector on which hypervisor callbacks will be delivered */
@@ -114,6 +114,9 @@
#define FIRST_SYSTEM_VECTOR NR_VECTORS
#endif
+#define NR_EXTERNAL_VECTORS (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR)
+#define NR_SYSTEM_VECTORS (NR_VECTORS - FIRST_SYSTEM_VECTOR)
+
/*
* Size the maximum number of interrupts.
*
diff --git a/arch/x86/include/asm/jump_label.h b/arch/x86/include/asm/jump_label.h
index 610a05374c02..0449b125d27f 100644
--- a/arch/x86/include/asm/jump_label.h
+++ b/arch/x86/include/asm/jump_label.h
@@ -4,8 +4,6 @@
#define HAVE_JUMP_LABEL_BATCH
-#define JUMP_LABEL_NOP_SIZE 5
-
#include <asm/asm.h>
#include <asm/nops.h>
@@ -14,15 +12,35 @@
#include <linux/stringify.h>
#include <linux/types.h>
+#define JUMP_TABLE_ENTRY \
+ ".pushsection __jump_table, \"aw\" \n\t" \
+ _ASM_ALIGN "\n\t" \
+ ".long 1b - . \n\t" \
+ ".long %l[l_yes] - . \n\t" \
+ _ASM_PTR "%c0 + %c1 - .\n\t" \
+ ".popsection \n\t"
+
+#ifdef CONFIG_STACK_VALIDATION
+
+static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
+{
+ asm_volatile_goto("1:"
+ "jmp %l[l_yes] # objtool NOPs this \n\t"
+ JUMP_TABLE_ENTRY
+ : : "i" (key), "i" (2 | branch) : : l_yes);
+
+ return false;
+l_yes:
+ return true;
+}
+
+#else
+
static __always_inline bool arch_static_branch(struct static_key * const key, const bool branch)
{
asm_volatile_goto("1:"
".byte " __stringify(BYTES_NOP5) "\n\t"
- ".pushsection __jump_table, \"aw\" \n\t"
- _ASM_ALIGN "\n\t"
- ".long 1b - ., %l[l_yes] - . \n\t"
- _ASM_PTR "%c0 + %c1 - .\n\t"
- ".popsection \n\t"
+ JUMP_TABLE_ENTRY
: : "i" (key), "i" (branch) : : l_yes);
return false;
@@ -30,16 +48,13 @@ l_yes:
return true;
}
+#endif /* STACK_VALIDATION */
+
static __always_inline bool arch_static_branch_jump(struct static_key * const key, const bool branch)
{
asm_volatile_goto("1:"
- ".byte 0xe9\n\t .long %l[l_yes] - 2f\n\t"
- "2:\n\t"
- ".pushsection __jump_table, \"aw\" \n\t"
- _ASM_ALIGN "\n\t"
- ".long 1b - ., %l[l_yes] - . \n\t"
- _ASM_PTR "%c0 + %c1 - .\n\t"
- ".popsection \n\t"
+ "jmp %l[l_yes]\n\t"
+ JUMP_TABLE_ENTRY
: : "i" (key), "i" (branch) : : l_yes);
return false;
@@ -47,41 +62,7 @@ l_yes:
return true;
}
-#else /* __ASSEMBLY__ */
-
-.macro STATIC_JUMP_IF_TRUE target, key, def
-.Lstatic_jump_\@:
- .if \def
- /* Equivalent to "jmp.d32 \target" */
- .byte 0xe9
- .long \target - .Lstatic_jump_after_\@
-.Lstatic_jump_after_\@:
- .else
- .byte BYTES_NOP5
- .endif
- .pushsection __jump_table, "aw"
- _ASM_ALIGN
- .long .Lstatic_jump_\@ - ., \target - .
- _ASM_PTR \key - .
- .popsection
-.endm
-
-.macro STATIC_JUMP_IF_FALSE target, key, def
-.Lstatic_jump_\@:
- .if \def
- .byte BYTES_NOP5
- .else
- /* Equivalent to "jmp.d32 \target" */
- .byte 0xe9
- .long \target - .Lstatic_jump_after_\@
-.Lstatic_jump_after_\@:
- .endif
- .pushsection __jump_table, "aw"
- _ASM_ALIGN
- .long .Lstatic_jump_\@ - ., \target - .
- _ASM_PTR \key + 1 - .
- .popsection
-.endm
+extern int arch_jump_entry_size(struct jump_entry *entry);
#endif /* __ASSEMBLY__ */
diff --git a/arch/x86/include/asm/kvm-x86-ops.h b/arch/x86/include/asm/kvm-x86-ops.h
index 323641097f63..a12a4987154e 100644
--- a/arch/x86/include/asm/kvm-x86-ops.h
+++ b/arch/x86/include/asm/kvm-x86-ops.h
@@ -87,7 +87,10 @@ KVM_X86_OP(set_identity_map_addr)
KVM_X86_OP(get_mt_mask)
KVM_X86_OP(load_mmu_pgd)
KVM_X86_OP_NULL(has_wbinvd_exit)
-KVM_X86_OP(write_l1_tsc_offset)
+KVM_X86_OP(get_l2_tsc_offset)
+KVM_X86_OP(get_l2_tsc_multiplier)
+KVM_X86_OP(write_tsc_offset)
+KVM_X86_OP(write_tsc_multiplier)
KVM_X86_OP(get_exit_info)
KVM_X86_OP(check_intercept)
KVM_X86_OP(handle_exit_irqoff)
@@ -99,14 +102,15 @@ KVM_X86_OP_NULL(post_block)
KVM_X86_OP_NULL(vcpu_blocking)
KVM_X86_OP_NULL(vcpu_unblocking)
KVM_X86_OP_NULL(update_pi_irte)
+KVM_X86_OP_NULL(start_assignment)
KVM_X86_OP_NULL(apicv_post_state_restore)
KVM_X86_OP_NULL(dy_apicv_has_pending_interrupt)
KVM_X86_OP_NULL(set_hv_timer)
KVM_X86_OP_NULL(cancel_hv_timer)
KVM_X86_OP(setup_mce)
KVM_X86_OP(smi_allowed)
-KVM_X86_OP(pre_enter_smm)
-KVM_X86_OP(pre_leave_smm)
+KVM_X86_OP(enter_smm)
+KVM_X86_OP(leave_smm)
KVM_X86_OP(enable_smi_window)
KVM_X86_OP_NULL(mem_enc_op)
KVM_X86_OP_NULL(mem_enc_reg_region)
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index cbbcee0a84f9..974cbfb1eefe 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -85,7 +85,7 @@
#define KVM_REQ_APICV_UPDATE \
KVM_ARCH_REQ_FLAGS(25, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
#define KVM_REQ_TLB_FLUSH_CURRENT KVM_ARCH_REQ(26)
-#define KVM_REQ_HV_TLB_FLUSH \
+#define KVM_REQ_TLB_FLUSH_GUEST \
KVM_ARCH_REQ_FLAGS(27, KVM_REQUEST_NO_WAKEUP)
#define KVM_REQ_APF_READY KVM_ARCH_REQ(28)
#define KVM_REQ_MSR_FILTER_CHANGED KVM_ARCH_REQ(29)
@@ -113,6 +113,7 @@
#define VALID_PAGE(x) ((x) != INVALID_PAGE)
#define UNMAPPED_GVA (~(gpa_t)0)
+#define INVALID_GPA (~(gpa_t)0)
/* KVM Hugepage definitions for x86 */
#define KVM_MAX_HUGEPAGE_LEVEL PG_LEVEL_1G
@@ -199,6 +200,7 @@ enum x86_intercept_stage;
#define KVM_NR_DB_REGS 4
+#define DR6_BUS_LOCK (1 << 11)
#define DR6_BD (1 << 13)
#define DR6_BS (1 << 14)
#define DR6_BT (1 << 15)
@@ -212,7 +214,7 @@ enum x86_intercept_stage;
* DR6_ACTIVE_LOW is also used as the init/reset value for DR6.
*/
#define DR6_ACTIVE_LOW 0xffff0ff0
-#define DR6_VOLATILE 0x0001e00f
+#define DR6_VOLATILE 0x0001e80f
#define DR6_FIXED_1 (DR6_ACTIVE_LOW & ~DR6_VOLATILE)
#define DR7_BP_EN_MASK 0x000000ff
@@ -267,12 +269,36 @@ enum x86_intercept_stage;
struct kvm_kernel_irq_routing_entry;
/*
- * the pages used as guest page table on soft mmu are tracked by
- * kvm_memory_slot.arch.gfn_track which is 16 bits, so the role bits used
- * by indirect shadow page can not be more than 15 bits.
+ * kvm_mmu_page_role tracks the properties of a shadow page (where shadow page
+ * also includes TDP pages) to determine whether or not a page can be used in
+ * the given MMU context. This is a subset of the overall kvm_mmu_role to
+ * minimize the size of kvm_memory_slot.arch.gfn_track, i.e. allows allocating
+ * 2 bytes per gfn instead of 4 bytes per gfn.
*
- * Currently, we used 14 bits that are @level, @gpte_is_8_bytes, @quadrant, @access,
- * @nxe, @cr0_wp, @smep_andnot_wp and @smap_andnot_wp.
+ * Indirect upper-level shadow pages are tracked for write-protection via
+ * gfn_track. As above, gfn_track is a 16 bit counter, so KVM must not create
+ * more than 2^16-1 upper-level shadow pages at a single gfn, otherwise
+ * gfn_track will overflow and explosions will ensure.
+ *
+ * A unique shadow page (SP) for a gfn is created if and only if an existing SP
+ * cannot be reused. The ability to reuse a SP is tracked by its role, which
+ * incorporates various mode bits and properties of the SP. Roughly speaking,
+ * the number of unique SPs that can theoretically be created is 2^n, where n
+ * is the number of bits that are used to compute the role.
+ *
+ * But, even though there are 18 bits in the mask below, not all combinations
+ * of modes and flags are possible. The maximum number of possible upper-level
+ * shadow pages for a single gfn is in the neighborhood of 2^13.
+ *
+ * - invalid shadow pages are not accounted.
+ * - level is effectively limited to four combinations, not 16 as the number
+ * bits would imply, as 4k SPs are not tracked (allowed to go unsync).
+ * - level is effectively unused for non-PAE paging because there is exactly
+ * one upper level (see 4k SP exception above).
+ * - quadrant is used only for non-PAE paging and is exclusive with
+ * gpte_is_8_bytes.
+ * - execonly and ad_disabled are used only for nested EPT, which makes it
+ * exclusive with quadrant.
*/
union kvm_mmu_page_role {
u32 word;
@@ -283,7 +309,7 @@ union kvm_mmu_page_role {
unsigned direct:1;
unsigned access:3;
unsigned invalid:1;
- unsigned nxe:1;
+ unsigned efer_nx:1;
unsigned cr0_wp:1;
unsigned smep_andnot_wp:1;
unsigned smap_andnot_wp:1;
@@ -301,13 +327,26 @@ union kvm_mmu_page_role {
};
};
-union kvm_mmu_extended_role {
/*
- * This structure complements kvm_mmu_page_role caching everything needed for
- * MMU configuration. If nothing in both these structures changed, MMU
- * re-configuration can be skipped. @valid bit is set on first usage so we don't
- * treat all-zero structure as valid data.
+ * kvm_mmu_extended_role complements kvm_mmu_page_role, tracking properties
+ * relevant to the current MMU configuration. When loading CR0, CR4, or EFER,
+ * including on nested transitions, if nothing in the full role changes then
+ * MMU re-configuration can be skipped. @valid bit is set on first usage so we
+ * don't treat all-zero structure as valid data.
+ *
+ * The properties that are tracked in the extended role but not the page role
+ * are for things that either (a) do not affect the validity of the shadow page
+ * or (b) are indirectly reflected in the shadow page's role. For example,
+ * CR4.PKE only affects permission checks for software walks of the guest page
+ * tables (because KVM doesn't support Protection Keys with shadow paging), and
+ * CR0.PG, CR4.PAE, and CR4.PSE are indirectly reflected in role.level.
+ *
+ * Note, SMEP and SMAP are not redundant with sm*p_andnot_wp in the page role.
+ * If CR0.WP=1, KVM can reuse shadow pages for the guest regardless of SMEP and
+ * SMAP, but the MMU's permission checks for software walks need to be SMEP and
+ * SMAP aware regardless of CR0.WP.
*/
+union kvm_mmu_extended_role {
u32 word;
struct {
unsigned int valid:1;
@@ -318,7 +357,7 @@ union kvm_mmu_extended_role {
unsigned int cr4_pke:1;
unsigned int cr4_smap:1;
unsigned int cr4_smep:1;
- unsigned int maxphyaddr:6;
+ unsigned int cr4_la57:1;
};
};
@@ -407,7 +446,7 @@ struct kvm_mmu {
u32 pkru_mask;
u64 *pae_root;
- u64 *lm_root;
+ u64 *pml4_root;
/*
* check zero bits on shadow page table entries, these
@@ -418,11 +457,6 @@ struct kvm_mmu {
struct rsvd_bits_validate guest_rsvd_check;
- /* Can have large pages at levels 2..last_nonleaf_level-1. */
- u8 last_nonleaf_level;
-
- bool nx;
-
u64 pdptrs[4]; /* pae */
};
@@ -541,6 +575,15 @@ struct kvm_vcpu_hv {
struct kvm_vcpu_hv_stimer stimer[HV_SYNIC_STIMER_COUNT];
DECLARE_BITMAP(stimer_pending_bitmap, HV_SYNIC_STIMER_COUNT);
cpumask_t tlb_flush;
+ bool enforce_cpuid;
+ struct {
+ u32 features_eax; /* HYPERV_CPUID_FEATURES.EAX */
+ u32 features_ebx; /* HYPERV_CPUID_FEATURES.EBX */
+ u32 features_edx; /* HYPERV_CPUID_FEATURES.EDX */
+ u32 enlightenments_eax; /* HYPERV_CPUID_ENLIGHTMENT_INFO.EAX */
+ u32 enlightenments_ebx; /* HYPERV_CPUID_ENLIGHTMENT_INFO.EBX */
+ u32 syndbg_cap_eax; /* HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES.EAX */
+ } cpuid_cache;
};
/* Xen HVM per vcpu emulation context */
@@ -705,7 +748,7 @@ struct kvm_vcpu_arch {
} st;
u64 l1_tsc_offset;
- u64 tsc_offset;
+ u64 tsc_offset; /* current tsc offset */
u64 last_guest_tsc;
u64 last_host_tsc;
u64 tsc_offset_adjustment;
@@ -719,7 +762,8 @@ struct kvm_vcpu_arch {
u32 virtual_tsc_khz;
s64 ia32_tsc_adjust_msr;
u64 msr_ia32_power_ctl;
- u64 tsc_scaling_ratio;
+ u64 l1_tsc_scaling_ratio;
+ u64 tsc_scaling_ratio; /* current scaling ratio */
atomic_t nmi_queued; /* unprocessed asynchronous NMIs */
unsigned nmi_pending; /* NMI queued after currently running handler */
@@ -827,7 +871,7 @@ struct kvm_vcpu_arch {
bool l1tf_flush_l1d;
/* Host CPU on which VM-entry was most recently attempted */
- unsigned int last_vmentry_cpu;
+ int last_vmentry_cpu;
/* AMD MSRC001_0015 Hardware Configuration */
u64 msr_hwcr;
@@ -849,6 +893,16 @@ struct kvm_vcpu_arch {
/* Protected Guests */
bool guest_state_protected;
+
+ /*
+ * Set when PDPTS were loaded directly by the userspace without
+ * reading the guest memory
+ */
+ bool pdptrs_from_userspace;
+
+#if IS_ENABLED(CONFIG_HYPERV)
+ hpa_t hv_root_tdp;
+#endif
};
struct kvm_lpage_info {
@@ -1000,7 +1054,7 @@ struct kvm_arch {
struct kvm_apic_map __rcu *apic_map;
atomic_t apic_map_dirty;
- bool apic_access_page_done;
+ bool apic_access_memslot_enabled;
unsigned long apicv_inhibit_reasons;
gpa_t wall_clock;
@@ -1060,11 +1114,19 @@ struct kvm_arch {
bool exception_payload_enabled;
bool bus_lock_detection_enabled;
+ /*
+ * If exit_on_emulation_error is set, and the in-kernel instruction
+ * emulator fails to emulate an instruction, allow userspace
+ * the opportunity to look at it.
+ */
+ bool exit_on_emulation_error;
/* Deflect RDMSR and WRMSR to user space when they trigger a #GP */
u32 user_space_msr_mask;
struct kvm_x86_msr_filter __rcu *msr_filter;
+ u32 hypercall_exit_enabled;
+
/* Guest can access the SGX PROVISIONKEY. */
bool sgx_provisioning_allowed;
@@ -1122,23 +1184,35 @@ struct kvm_arch {
*/
spinlock_t tdp_mmu_pages_lock;
#endif /* CONFIG_X86_64 */
+
+ /*
+ * If set, rmaps have been allocated for all memslots and should be
+ * allocated for any newly created or modified memslots.
+ */
+ bool memslots_have_rmaps;
+
+#if IS_ENABLED(CONFIG_HYPERV)
+ hpa_t hv_root_tdp;
+ spinlock_t hv_root_tdp_lock;
+#endif
};
struct kvm_vm_stat {
- ulong mmu_shadow_zapped;
- ulong mmu_pte_write;
- ulong mmu_pde_zapped;
- ulong mmu_flooded;
- ulong mmu_recycled;
- ulong mmu_cache_miss;
- ulong mmu_unsync;
- ulong remote_tlb_flush;
- ulong lpages;
- ulong nx_lpage_splits;
- ulong max_mmu_page_hash_collisions;
+ struct kvm_vm_stat_generic generic;
+ u64 mmu_shadow_zapped;
+ u64 mmu_pte_write;
+ u64 mmu_pde_zapped;
+ u64 mmu_flooded;
+ u64 mmu_recycled;
+ u64 mmu_cache_miss;
+ u64 mmu_unsync;
+ u64 lpages;
+ u64 nx_lpage_splits;
+ u64 max_mmu_page_hash_collisions;
};
struct kvm_vcpu_stat {
+ struct kvm_vcpu_stat_generic generic;
u64 pf_fixed;
u64 pf_guest;
u64 tlb_flush;
@@ -1152,10 +1226,6 @@ struct kvm_vcpu_stat {
u64 nmi_window_exits;
u64 l1d_flush;
u64 halt_exits;
- u64 halt_successful_poll;
- u64 halt_attempted_poll;
- u64 halt_poll_invalid;
- u64 halt_wakeup;
u64 request_irq_exits;
u64 irq_exits;
u64 host_state_reload;
@@ -1166,11 +1236,10 @@ struct kvm_vcpu_stat {
u64 irq_injections;
u64 nmi_injections;
u64 req_event;
- u64 halt_poll_success_ns;
- u64 halt_poll_fail_ns;
u64 nested_run;
u64 directed_yield_attempted;
u64 directed_yield_successful;
+ u64 guest_mode;
};
struct x86_instruction_info;
@@ -1302,8 +1371,10 @@ struct kvm_x86_ops {
bool (*has_wbinvd_exit)(void);
- /* Returns actual tsc_offset set in active VMCS */
- u64 (*write_l1_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset);
+ u64 (*get_l2_tsc_offset)(struct kvm_vcpu *vcpu);
+ u64 (*get_l2_tsc_multiplier)(struct kvm_vcpu *vcpu);
+ void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset);
+ void (*write_tsc_multiplier)(struct kvm_vcpu *vcpu, u64 multiplier);
/*
* Retrieve somewhat arbitrary exit information. Intended to be used
@@ -1350,6 +1421,7 @@ struct kvm_x86_ops {
int (*update_pi_irte)(struct kvm *kvm, unsigned int host_irq,
uint32_t guest_irq, bool set);
+ void (*start_assignment)(struct kvm *kvm);
void (*apicv_post_state_restore)(struct kvm_vcpu *vcpu);
bool (*dy_apicv_has_pending_interrupt)(struct kvm_vcpu *vcpu);
@@ -1360,8 +1432,8 @@ struct kvm_x86_ops {
void (*setup_mce)(struct kvm_vcpu *vcpu);
int (*smi_allowed)(struct kvm_vcpu *vcpu, bool for_injection);
- int (*pre_enter_smm)(struct kvm_vcpu *vcpu, char *smstate);
- int (*pre_leave_smm)(struct kvm_vcpu *vcpu, const char *smstate);
+ int (*enter_smm)(struct kvm_vcpu *vcpu, char *smstate);
+ int (*leave_smm)(struct kvm_vcpu *vcpu, const char *smstate);
void (*enable_smi_window)(struct kvm_vcpu *vcpu);
int (*mem_enc_op)(struct kvm *kvm, void __user *argp);
@@ -1417,8 +1489,10 @@ struct kvm_arch_async_pf {
bool direct_map;
};
+extern u32 __read_mostly kvm_nr_uret_msrs;
extern u64 __read_mostly host_efer;
extern bool __read_mostly allow_smaller_maxphyaddr;
+extern bool __read_mostly enable_apicv;
extern struct kvm_x86_ops kvm_x86_ops;
#define KVM_X86_OP(func) \
@@ -1459,6 +1533,7 @@ int kvm_mmu_create(struct kvm_vcpu *vcpu);
void kvm_mmu_init_vm(struct kvm *kvm);
void kvm_mmu_uninit_vm(struct kvm *kvm);
+void kvm_mmu_after_set_cpuid(struct kvm_vcpu *vcpu);
void kvm_mmu_reset_context(struct kvm_vcpu *vcpu);
void kvm_mmu_slot_remove_write_access(struct kvm *kvm,
struct kvm_memory_slot *memslot,
@@ -1473,7 +1548,6 @@ unsigned long kvm_mmu_calculate_default_mmu_pages(struct kvm *kvm);
void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned long kvm_nr_mmu_pages);
int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3);
-bool pdptrs_changed(struct kvm_vcpu *vcpu);
int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
const void *val, int bytes);
@@ -1646,6 +1720,7 @@ int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn);
void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu);
void kvm_mmu_free_roots(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
ulong roots_to_free);
+void kvm_mmu_free_guest_mode_roots(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu);
gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access,
struct x86_exception *exception);
gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva,
@@ -1658,7 +1733,6 @@ gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva,
struct x86_exception *exception);
bool kvm_apicv_activated(struct kvm *kvm);
-void kvm_apicv_init(struct kvm *kvm, bool enable);
void kvm_vcpu_update_apicv(struct kvm_vcpu *vcpu);
void kvm_request_apicv_update(struct kvm *kvm, bool activate,
unsigned long bit);
@@ -1671,8 +1745,7 @@ void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva);
void kvm_mmu_invalidate_gva(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
gva_t gva, hpa_t root_hpa);
void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid);
-void kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd, bool skip_tlb_flush,
- bool skip_mmu_sync);
+void kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd);
void kvm_configure_mmu(bool enable_tdp, int tdp_max_root_level,
int tdp_huge_page_level);
@@ -1775,11 +1848,19 @@ int kvm_pv_send_ipi(struct kvm *kvm, unsigned long ipi_bitmap_low,
unsigned long ipi_bitmap_high, u32 min,
unsigned long icr, int op_64_bit);
-void kvm_define_user_return_msr(unsigned index, u32 msr);
+int kvm_add_user_return_msr(u32 msr);
+int kvm_find_user_return_msr(u32 msr);
int kvm_set_user_return_msr(unsigned index, u64 val, u64 mask);
-u64 kvm_scale_tsc(struct kvm_vcpu *vcpu, u64 tsc);
+static inline bool kvm_is_supported_user_return_msr(u32 msr)
+{
+ return kvm_find_user_return_msr(msr) >= 0;
+}
+
+u64 kvm_scale_tsc(struct kvm_vcpu *vcpu, u64 tsc, u64 ratio);
u64 kvm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc);
+u64 kvm_calc_nested_tsc_offset(u64 l1_offset, u64 l2_offset, u64 l2_multiplier);
+u64 kvm_calc_nested_tsc_multiplier(u64 l1_multiplier, u64 l2_multiplier);
unsigned long kvm_get_linear_rip(struct kvm_vcpu *vcpu);
bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip);
@@ -1853,4 +1934,6 @@ static inline int kvm_cpu_get_apicid(int mps_cpu)
int kvm_cpu_dirty_log_size(void);
+int alloc_all_memslots_rmaps(struct kvm *kvm);
+
#endif /* _ASM_X86_KVM_HOST_H */
diff --git a/arch/x86/include/asm/kvm_para.h b/arch/x86/include/asm/kvm_para.h
index 338119852512..69299878b200 100644
--- a/arch/x86/include/asm/kvm_para.h
+++ b/arch/x86/include/asm/kvm_para.h
@@ -7,8 +7,6 @@
#include <linux/interrupt.h>
#include <uapi/asm/kvm_para.h>
-extern void kvmclock_init(void);
-
#ifdef CONFIG_KVM_GUEST
bool kvm_check_and_clear_guest_paused(void);
#else
@@ -86,13 +84,14 @@ static inline long kvm_hypercall4(unsigned int nr, unsigned long p1,
}
#ifdef CONFIG_KVM_GUEST
+void kvmclock_init(void);
+void kvmclock_disable(void);
bool kvm_para_available(void);
unsigned int kvm_arch_para_features(void);
unsigned int kvm_arch_para_hints(void);
void kvm_async_pf_task_wait_schedule(u32 token);
void kvm_async_pf_task_wake(u32 token);
u32 kvm_read_and_reset_apf_flags(void);
-void kvm_disable_steal_time(void);
bool __kvm_handle_async_pf(struct pt_regs *regs, u32 token);
DECLARE_STATIC_KEY_FALSE(kvm_async_pf_enabled);
@@ -137,11 +136,6 @@ static inline u32 kvm_read_and_reset_apf_flags(void)
return 0;
}
-static inline void kvm_disable_steal_time(void)
-{
- return;
-}
-
static __always_inline bool kvm_handle_async_pf(struct pt_regs *regs, u32 token)
{
return false;
diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h
index ddfb3cad8dff..0607ec4f5091 100644
--- a/arch/x86/include/asm/mce.h
+++ b/arch/x86/include/asm/mce.h
@@ -305,7 +305,7 @@ extern void apei_mce_report_mem_error(int corrected,
/* These may be used by multiple smca_hwid_mcatypes */
enum smca_bank_types {
SMCA_LS = 0, /* Load Store */
- SMCA_LS_V2, /* Load Store */
+ SMCA_LS_V2,
SMCA_IF, /* Instruction Fetch */
SMCA_L2_CACHE, /* L2 Cache */
SMCA_DE, /* Decoder Unit */
@@ -314,17 +314,22 @@ enum smca_bank_types {
SMCA_FP, /* Floating Point */
SMCA_L3_CACHE, /* L3 Cache */
SMCA_CS, /* Coherent Slave */
- SMCA_CS_V2, /* Coherent Slave */
+ SMCA_CS_V2,
SMCA_PIE, /* Power, Interrupts, etc. */
SMCA_UMC, /* Unified Memory Controller */
+ SMCA_UMC_V2,
SMCA_PB, /* Parameter Block */
SMCA_PSP, /* Platform Security Processor */
- SMCA_PSP_V2, /* Platform Security Processor */
+ SMCA_PSP_V2,
SMCA_SMU, /* System Management Unit */
- SMCA_SMU_V2, /* System Management Unit */
+ SMCA_SMU_V2,
SMCA_MP5, /* Microprocessor 5 Unit */
SMCA_NBIO, /* Northbridge IO Unit */
SMCA_PCIE, /* PCI Express Unit */
+ SMCA_PCIE_V2,
+ SMCA_XGMI_PCS, /* xGMI PCS Unit */
+ SMCA_XGMI_PHY, /* xGMI PHY Unit */
+ SMCA_WAFL_PHY, /* WAFL PHY Unit */
N_SMCA_BANK_TYPES
};
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 742d89a00721..a7c413432b33 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -537,9 +537,9 @@
/* K8 MSRs */
#define MSR_K8_TOP_MEM1 0xc001001a
#define MSR_K8_TOP_MEM2 0xc001001d
-#define MSR_K8_SYSCFG 0xc0010010
-#define MSR_K8_SYSCFG_MEM_ENCRYPT_BIT 23
-#define MSR_K8_SYSCFG_MEM_ENCRYPT BIT_ULL(MSR_K8_SYSCFG_MEM_ENCRYPT_BIT)
+#define MSR_AMD64_SYSCFG 0xc0010010
+#define MSR_AMD64_SYSCFG_MEM_ENCRYPT_BIT 23
+#define MSR_AMD64_SYSCFG_MEM_ENCRYPT BIT_ULL(MSR_AMD64_SYSCFG_MEM_ENCRYPT_BIT)
#define MSR_K8_INT_PENDING_MSG 0xc0010055
/* C1E active bits in int pending message */
#define K8_INTP_C1E_ACTIVE_MASK 0x18000000
@@ -772,6 +772,10 @@
#define MSR_TFA_RTM_FORCE_ABORT_BIT 0
#define MSR_TFA_RTM_FORCE_ABORT BIT_ULL(MSR_TFA_RTM_FORCE_ABORT_BIT)
+#define MSR_TFA_TSX_CPUID_CLEAR_BIT 1
+#define MSR_TFA_TSX_CPUID_CLEAR BIT_ULL(MSR_TFA_TSX_CPUID_CLEAR_BIT)
+#define MSR_TFA_SDV_ENABLE_RTM_BIT 2
+#define MSR_TFA_SDV_ENABLE_RTM BIT_ULL(MSR_TFA_SDV_ENABLE_RTM_BIT)
/* P4/Xeon+ specific */
#define MSR_IA32_MCG_EAX 0x00000180
diff --git a/arch/x86/include/asm/nops.h b/arch/x86/include/asm/nops.h
index c1e5e818ba16..c5573eaa5bb9 100644
--- a/arch/x86/include/asm/nops.h
+++ b/arch/x86/include/asm/nops.h
@@ -2,6 +2,8 @@
#ifndef _ASM_X86_NOPS_H
#define _ASM_X86_NOPS_H
+#include <asm/asm.h>
+
/*
* Define nops for use with alternative() and for tracing.
*/
@@ -57,20 +59,14 @@
#endif /* CONFIG_64BIT */
-#ifdef __ASSEMBLY__
-#define _ASM_MK_NOP(x) .byte x
-#else
-#define _ASM_MK_NOP(x) ".byte " __stringify(x) "\n"
-#endif
-
-#define ASM_NOP1 _ASM_MK_NOP(BYTES_NOP1)
-#define ASM_NOP2 _ASM_MK_NOP(BYTES_NOP2)
-#define ASM_NOP3 _ASM_MK_NOP(BYTES_NOP3)
-#define ASM_NOP4 _ASM_MK_NOP(BYTES_NOP4)
-#define ASM_NOP5 _ASM_MK_NOP(BYTES_NOP5)
-#define ASM_NOP6 _ASM_MK_NOP(BYTES_NOP6)
-#define ASM_NOP7 _ASM_MK_NOP(BYTES_NOP7)
-#define ASM_NOP8 _ASM_MK_NOP(BYTES_NOP8)
+#define ASM_NOP1 _ASM_BYTES(BYTES_NOP1)
+#define ASM_NOP2 _ASM_BYTES(BYTES_NOP2)
+#define ASM_NOP3 _ASM_BYTES(BYTES_NOP3)
+#define ASM_NOP4 _ASM_BYTES(BYTES_NOP4)
+#define ASM_NOP5 _ASM_BYTES(BYTES_NOP5)
+#define ASM_NOP6 _ASM_BYTES(BYTES_NOP6)
+#define ASM_NOP7 _ASM_BYTES(BYTES_NOP7)
+#define ASM_NOP8 _ASM_BYTES(BYTES_NOP8)
#define ASM_NOP_MAX 8
diff --git a/arch/x86/include/asm/page.h b/arch/x86/include/asm/page.h
index 7555b48803a8..4d5810c8fab7 100644
--- a/arch/x86/include/asm/page.h
+++ b/arch/x86/include/asm/page.h
@@ -34,9 +34,9 @@ static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
copy_page(to, from);
}
-#define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \
- alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr)
-#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
+#define alloc_zeroed_user_highpage_movable(vma, vaddr) \
+ alloc_page_vma(GFP_HIGHUSER_MOVABLE | __GFP_ZERO, vma, vaddr)
+#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE_MOVABLE
#ifndef __pa
#define __pa(x) __phys_addr((unsigned long)(x))
diff --git a/arch/x86/include/asm/page_64.h b/arch/x86/include/asm/page_64.h
index ca840fec7776..4bde0dc66100 100644
--- a/arch/x86/include/asm/page_64.h
+++ b/arch/x86/include/asm/page_64.h
@@ -75,7 +75,7 @@ void copy_page(void *to, void *from);
*
* With page table isolation enabled, we map the LDT in ... [stay tuned]
*/
-static inline unsigned long task_size_max(void)
+static __always_inline unsigned long task_size_max(void)
{
unsigned long ret;
diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
index 544f41a179fb..8fc1b5003713 100644
--- a/arch/x86/include/asm/perf_event.h
+++ b/arch/x86/include/asm/perf_event.h
@@ -478,6 +478,7 @@ struct x86_pmu_lbr {
extern void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap);
extern void perf_check_microcode(void);
+extern void perf_clear_dirty_counters(void);
extern int x86_perf_rdpmc_index(struct perf_event *event);
#else
static inline void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap)
diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
index 62ad61d6fefc..c7ec5bb88334 100644
--- a/arch/x86/include/asm/pgalloc.h
+++ b/arch/x86/include/asm/pgalloc.h
@@ -84,8 +84,6 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
set_pmd(pmd, __pmd(((pteval_t)pfn << PAGE_SHIFT) | _PAGE_TABLE));
}
-#define pmd_pgtable(pmd) pmd_page(pmd)
-
#if CONFIG_PGTABLE_LEVELS > 2
extern void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd);
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
index f24d7ef8fffa..40497a9020c6 100644
--- a/arch/x86/include/asm/pgtable_types.h
+++ b/arch/x86/include/asm/pgtable_types.h
@@ -7,8 +7,6 @@
#include <asm/page_types.h>
-#define FIRST_USER_ADDRESS 0UL
-
#define _PAGE_BIT_PRESENT 0 /* is present */
#define _PAGE_BIT_RW 1 /* writeable */
#define _PAGE_BIT_USER 2 /* userspace addressable */
diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h
index f8cb8af4de5c..fe5efbcba824 100644
--- a/arch/x86/include/asm/preempt.h
+++ b/arch/x86/include/asm/preempt.h
@@ -44,7 +44,7 @@ static __always_inline void preempt_count_set(int pc)
#define init_task_preempt_count(p) do { } while (0)
#define init_idle_preempt_count(p, cpu) do { \
- per_cpu(__preempt_count, (cpu)) = PREEMPT_ENABLED; \
+ per_cpu(__preempt_count, (cpu)) = PREEMPT_DISABLED; \
} while (0)
/*
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 154321d29050..364d0e42e280 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -663,6 +663,7 @@ extern void load_direct_gdt(int);
extern void load_fixmap_gdt(int);
extern void load_percpu_segment(int);
extern void cpu_init(void);
+extern void cpu_init_secondary(void);
extern void cpu_init_exception_handling(void);
extern void cr4_init(void);
@@ -787,8 +788,10 @@ DECLARE_PER_CPU(u64, msr_misc_features_shadow);
#ifdef CONFIG_CPU_SUP_AMD
extern u32 amd_get_nodes_per_socket(void);
+extern u32 amd_get_highest_perf(void);
#else
static inline u32 amd_get_nodes_per_socket(void) { return 0; }
+static inline u32 amd_get_highest_perf(void) { return 0; }
#endif
static inline uint32_t hypervisor_cpuid_base(const char *sig, uint32_t leaves)
diff --git a/arch/x86/include/asm/sev-common.h b/arch/x86/include/asm/sev-common.h
new file mode 100644
index 000000000000..2cef6c5a52c2
--- /dev/null
+++ b/arch/x86/include/asm/sev-common.h
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * AMD SEV header common between the guest and the hypervisor.
+ *
+ * Author: Brijesh Singh <brijesh.singh@amd.com>
+ */
+
+#ifndef __ASM_X86_SEV_COMMON_H
+#define __ASM_X86_SEV_COMMON_H
+
+#define GHCB_MSR_INFO_POS 0
+#define GHCB_DATA_LOW 12
+#define GHCB_MSR_INFO_MASK (BIT_ULL(GHCB_DATA_LOW) - 1)
+
+#define GHCB_DATA(v) \
+ (((unsigned long)(v) & ~GHCB_MSR_INFO_MASK) >> GHCB_DATA_LOW)
+
+/* SEV Information Request/Response */
+#define GHCB_MSR_SEV_INFO_RESP 0x001
+#define GHCB_MSR_SEV_INFO_REQ 0x002
+#define GHCB_MSR_VER_MAX_POS 48
+#define GHCB_MSR_VER_MAX_MASK 0xffff
+#define GHCB_MSR_VER_MIN_POS 32
+#define GHCB_MSR_VER_MIN_MASK 0xffff
+#define GHCB_MSR_CBIT_POS 24
+#define GHCB_MSR_CBIT_MASK 0xff
+#define GHCB_MSR_SEV_INFO(_max, _min, _cbit) \
+ ((((_max) & GHCB_MSR_VER_MAX_MASK) << GHCB_MSR_VER_MAX_POS) | \
+ (((_min) & GHCB_MSR_VER_MIN_MASK) << GHCB_MSR_VER_MIN_POS) | \
+ (((_cbit) & GHCB_MSR_CBIT_MASK) << GHCB_MSR_CBIT_POS) | \
+ GHCB_MSR_SEV_INFO_RESP)
+#define GHCB_MSR_INFO(v) ((v) & 0xfffUL)
+#define GHCB_MSR_PROTO_MAX(v) (((v) >> GHCB_MSR_VER_MAX_POS) & GHCB_MSR_VER_MAX_MASK)
+#define GHCB_MSR_PROTO_MIN(v) (((v) >> GHCB_MSR_VER_MIN_POS) & GHCB_MSR_VER_MIN_MASK)
+
+/* CPUID Request/Response */
+#define GHCB_MSR_CPUID_REQ 0x004
+#define GHCB_MSR_CPUID_RESP 0x005
+#define GHCB_MSR_CPUID_FUNC_POS 32
+#define GHCB_MSR_CPUID_FUNC_MASK 0xffffffff
+#define GHCB_MSR_CPUID_VALUE_POS 32
+#define GHCB_MSR_CPUID_VALUE_MASK 0xffffffff
+#define GHCB_MSR_CPUID_REG_POS 30
+#define GHCB_MSR_CPUID_REG_MASK 0x3
+#define GHCB_CPUID_REQ_EAX 0
+#define GHCB_CPUID_REQ_EBX 1
+#define GHCB_CPUID_REQ_ECX 2
+#define GHCB_CPUID_REQ_EDX 3
+#define GHCB_CPUID_REQ(fn, reg) \
+ (GHCB_MSR_CPUID_REQ | \
+ (((unsigned long)reg & GHCB_MSR_CPUID_REG_MASK) << GHCB_MSR_CPUID_REG_POS) | \
+ (((unsigned long)fn) << GHCB_MSR_CPUID_FUNC_POS))
+
+/* AP Reset Hold */
+#define GHCB_MSR_AP_RESET_HOLD_REQ 0x006
+#define GHCB_MSR_AP_RESET_HOLD_RESP 0x007
+
+/* GHCB Hypervisor Feature Request/Response */
+#define GHCB_MSR_HV_FT_REQ 0x080
+#define GHCB_MSR_HV_FT_RESP 0x081
+
+#define GHCB_MSR_TERM_REQ 0x100
+#define GHCB_MSR_TERM_REASON_SET_POS 12
+#define GHCB_MSR_TERM_REASON_SET_MASK 0xf
+#define GHCB_MSR_TERM_REASON_POS 16
+#define GHCB_MSR_TERM_REASON_MASK 0xff
+#define GHCB_SEV_TERM_REASON(reason_set, reason_val) \
+ (((((u64)reason_set) & GHCB_MSR_TERM_REASON_SET_MASK) << GHCB_MSR_TERM_REASON_SET_POS) | \
+ ((((u64)reason_val) & GHCB_MSR_TERM_REASON_MASK) << GHCB_MSR_TERM_REASON_POS))
+
+#define GHCB_SEV_ES_REASON_GENERAL_REQUEST 0
+#define GHCB_SEV_ES_REASON_PROTOCOL_UNSUPPORTED 1
+
+#define GHCB_RESP_CODE(v) ((v) & GHCB_MSR_INFO_MASK)
+
+#endif
diff --git a/arch/x86/include/asm/sev-es.h b/arch/x86/include/asm/sev.h
index cf1d957c7091..fa5cd05d3b5b 100644
--- a/arch/x86/include/asm/sev-es.h
+++ b/arch/x86/include/asm/sev.h
@@ -10,34 +10,12 @@
#include <linux/types.h>
#include <asm/insn.h>
+#include <asm/sev-common.h>
-#define GHCB_SEV_INFO 0x001UL
-#define GHCB_SEV_INFO_REQ 0x002UL
-#define GHCB_INFO(v) ((v) & 0xfffUL)
-#define GHCB_PROTO_MAX(v) (((v) >> 48) & 0xffffUL)
-#define GHCB_PROTO_MIN(v) (((v) >> 32) & 0xffffUL)
-#define GHCB_PROTO_OUR 0x0001UL
-#define GHCB_SEV_CPUID_REQ 0x004UL
-#define GHCB_CPUID_REQ_EAX 0
-#define GHCB_CPUID_REQ_EBX 1
-#define GHCB_CPUID_REQ_ECX 2
-#define GHCB_CPUID_REQ_EDX 3
-#define GHCB_CPUID_REQ(fn, reg) (GHCB_SEV_CPUID_REQ | \
- (((unsigned long)reg & 3) << 30) | \
- (((unsigned long)fn) << 32))
+#define GHCB_PROTO_OUR 0x0001UL
+#define GHCB_PROTOCOL_MAX 1ULL
+#define GHCB_DEFAULT_USAGE 0ULL
-#define GHCB_PROTOCOL_MAX 0x0001UL
-#define GHCB_DEFAULT_USAGE 0x0000UL
-
-#define GHCB_SEV_CPUID_RESP 0x005UL
-#define GHCB_SEV_TERMINATE 0x100UL
-#define GHCB_SEV_TERMINATE_REASON(reason_set, reason_val) \
- (((((u64)reason_set) & 0x7) << 12) | \
- ((((u64)reason_val) & 0xff) << 16))
-#define GHCB_SEV_ES_REASON_GENERAL_REQUEST 0
-#define GHCB_SEV_ES_REASON_PROTOCOL_UNSUPPORTED 1
-
-#define GHCB_SEV_GHCB_RESP_CODE(v) ((v) & 0xfff)
#define VMGEXIT() { asm volatile("rep; vmmcall\n\r"); }
enum es_result {
diff --git a/arch/x86/include/asm/sgx.h b/arch/x86/include/asm/sgx.h
index 9c31e0ebc55b..05f3e21f01a7 100644
--- a/arch/x86/include/asm/sgx.h
+++ b/arch/x86/include/asm/sgx.h
@@ -13,7 +13,7 @@
/*
* This file contains both data structures defined by SGX architecture and Linux
* defined software data structures and functions. The two should not be mixed
- * together for better readibility. The architectural definitions come first.
+ * together for better readability. The architectural definitions come first.
*/
/* The SGX specific CPUID function. */
diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
index b6ffe58c70fa..24a8d6c4fb18 100644
--- a/arch/x86/include/asm/stackprotector.h
+++ b/arch/x86/include/asm/stackprotector.h
@@ -11,7 +11,7 @@
* The same segment is shared by percpu area and stack canary. On
* x86_64, percpu symbols are zero based and %gs (64-bit) points to the
* base of percpu area. The first occupant of the percpu area is always
- * fixed_percpu_data which contains stack_canary at the approproate
+ * fixed_percpu_data which contains stack_canary at the appropriate
* offset. On x86_32, the stack canary is just a regular percpu
* variable.
*
diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h
index 772e60efe243..e322676039f4 100644
--- a/arch/x86/include/asm/svm.h
+++ b/arch/x86/include/asm/svm.h
@@ -156,6 +156,12 @@ struct __attribute__ ((__packed__)) vmcb_control_area {
u64 avic_physical_id; /* Offset 0xf8 */
u8 reserved_7[8];
u64 vmsa_pa; /* Used for an SEV-ES guest */
+ u8 reserved_8[720];
+ /*
+ * Offset 0x3e0, 32 bytes reserved
+ * for use by hypervisor/software.
+ */
+ u8 reserved_sw[32];
};
@@ -314,7 +320,7 @@ struct ghcb {
#define EXPECTED_VMCB_SAVE_AREA_SIZE 1032
-#define EXPECTED_VMCB_CONTROL_AREA_SIZE 272
+#define EXPECTED_VMCB_CONTROL_AREA_SIZE 1024
#define EXPECTED_GHCB_SIZE PAGE_SIZE
static inline void __unused_size_checks(void)
@@ -326,7 +332,6 @@ static inline void __unused_size_checks(void)
struct vmcb {
struct vmcb_control_area control;
- u8 reserved_control[1024 - sizeof(struct vmcb_control_area)];
struct vmcb_save_area save;
} __packed;
diff --git a/arch/x86/include/asm/syscall.h b/arch/x86/include/asm/syscall.h
index 7cbf733d11af..f7e2d82d24fb 100644
--- a/arch/x86/include/asm/syscall.h
+++ b/arch/x86/include/asm/syscall.h
@@ -21,13 +21,12 @@ extern const sys_call_ptr_t sys_call_table[];
#if defined(CONFIG_X86_32)
#define ia32_sys_call_table sys_call_table
-#endif
-
-#if defined(CONFIG_IA32_EMULATION)
+#else
+/*
+ * These may not exist, but still put the prototypes in so we
+ * can use IS_ENABLED().
+ */
extern const sys_call_ptr_t ia32_sys_call_table[];
-#endif
-
-#ifdef CONFIG_X86_X32_ABI
extern const sys_call_ptr_t x32_sys_call_table[];
#endif
@@ -160,7 +159,7 @@ static inline int syscall_get_arch(struct task_struct *task)
? AUDIT_ARCH_I386 : AUDIT_ARCH_X86_64;
}
-void do_syscall_64(unsigned long nr, struct pt_regs *regs);
+void do_syscall_64(struct pt_regs *regs, int nr);
void do_int80_syscall_32(struct pt_regs *regs);
long do_fast_syscall_32(struct pt_regs *regs);
diff --git a/arch/x86/include/asm/syscall_wrapper.h b/arch/x86/include/asm/syscall_wrapper.h
index 80c08c7d5e72..6a2827d0681f 100644
--- a/arch/x86/include/asm/syscall_wrapper.h
+++ b/arch/x86/include/asm/syscall_wrapper.h
@@ -17,7 +17,7 @@ extern long __ia32_sys_ni_syscall(const struct pt_regs *regs);
* __x64_sys_*() - 64-bit native syscall
* __ia32_sys_*() - 32-bit native syscall or common compat syscall
* __ia32_compat_sys_*() - 32-bit compat syscall
- * __x32_compat_sys_*() - 64-bit X32 compat syscall
+ * __x64_compat_sys_*() - 64-bit X32 compat syscall
*
* The registers are decoded according to the ABI:
* 64-bit: RDI, RSI, RDX, R10, R8, R9
@@ -166,17 +166,17 @@ extern long __ia32_sys_ni_syscall(const struct pt_regs *regs);
* with x86_64 obviously do not need such care.
*/
#define __X32_COMPAT_SYS_STUB0(name) \
- __SYS_STUB0(x32, compat_sys_##name)
+ __SYS_STUB0(x64, compat_sys_##name)
#define __X32_COMPAT_SYS_STUBx(x, name, ...) \
- __SYS_STUBx(x32, compat_sys##name, \
+ __SYS_STUBx(x64, compat_sys##name, \
SC_X86_64_REGS_TO_ARGS(x, __VA_ARGS__))
#define __X32_COMPAT_COND_SYSCALL(name) \
- __COND_SYSCALL(x32, compat_sys_##name)
+ __COND_SYSCALL(x64, compat_sys_##name)
#define __X32_COMPAT_SYS_NI(name) \
- __SYS_NI(x32, compat_sys_##name)
+ __SYS_NI(x64, compat_sys_##name)
#else /* CONFIG_X86_X32 */
#define __X32_COMPAT_SYS_STUB0(name)
#define __X32_COMPAT_SYS_STUBx(x, name, ...)
diff --git a/arch/x86/include/asm/thermal.h b/arch/x86/include/asm/thermal.h
index ddbdefd5b94f..91a7b6687c3b 100644
--- a/arch/x86/include/asm/thermal.h
+++ b/arch/x86/include/asm/thermal.h
@@ -3,11 +3,13 @@
#define _ASM_X86_THERMAL_H
#ifdef CONFIG_X86_THERMAL_VECTOR
+void therm_lvt_init(void);
void intel_init_thermal(struct cpuinfo_x86 *c);
bool x86_thermal_enabled(void);
void intel_thermal_interrupt(void);
#else
-static inline void intel_init_thermal(struct cpuinfo_x86 *c) { }
+static inline void therm_lvt_init(void) { }
+static inline void intel_init_thermal(struct cpuinfo_x86 *c) { }
#endif
#endif /* _ASM_X86_THERMAL_H */
diff --git a/arch/x86/include/asm/unaligned.h b/arch/x86/include/asm/unaligned.h
deleted file mode 100644
index 9c754a7447aa..000000000000
--- a/arch/x86/include/asm/unaligned.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_X86_UNALIGNED_H
-#define _ASM_X86_UNALIGNED_H
-
-/*
- * The x86 can do unaligned accesses itself.
- */
-
-#include <linux/unaligned/access_ok.h>
-#include <linux/unaligned/generic.h>
-
-#define get_unaligned __get_unaligned_le
-#define put_unaligned __put_unaligned_le
-
-#endif /* _ASM_X86_UNALIGNED_H */
diff --git a/arch/x86/include/asm/unistd.h b/arch/x86/include/asm/unistd.h
index c1c3d31b15c0..80e9d5206a71 100644
--- a/arch/x86/include/asm/unistd.h
+++ b/arch/x86/include/asm/unistd.h
@@ -13,7 +13,7 @@
# define __ARCH_WANT_SYS_OLD_MMAP
# define __ARCH_WANT_SYS_OLD_SELECT
-# define __NR_ia32_syscall_max __NR_syscall_max
+# define IA32_NR_syscalls (__NR_syscalls)
# else
@@ -26,12 +26,12 @@
# define __ARCH_WANT_COMPAT_SYS_PWRITEV64
# define __ARCH_WANT_COMPAT_SYS_PREADV64V2
# define __ARCH_WANT_COMPAT_SYS_PWRITEV64V2
+# define X32_NR_syscalls (__NR_x32_syscalls)
+# define IA32_NR_syscalls (__NR_ia32_syscalls)
# endif
-# define NR_syscalls (__NR_syscall_max + 1)
-# define X32_NR_syscalls (__NR_x32_syscall_max + 1)
-# define IA32_NR_syscalls (__NR_ia32_syscall_max + 1)
+# define NR_syscalls (__NR_syscalls)
# define __ARCH_WANT_NEW_STAT
# define __ARCH_WANT_OLD_READDIR
diff --git a/arch/x86/include/asm/vdso/clocksource.h b/arch/x86/include/asm/vdso/clocksource.h
index 119ac8612d89..136e5e57cfe1 100644
--- a/arch/x86/include/asm/vdso/clocksource.h
+++ b/arch/x86/include/asm/vdso/clocksource.h
@@ -7,4 +7,6 @@
VDSO_CLOCKMODE_PVCLOCK, \
VDSO_CLOCKMODE_HVCLOCK
+#define HAVE_VDSO_CLOCKMODE_HVCLOCK
+
#endif /* __ASM_VDSO_CLOCKSOURCE_H */
diff --git a/arch/x86/include/uapi/asm/hwcap2.h b/arch/x86/include/uapi/asm/hwcap2.h
index 5fdfcb47000f..054604aba9f0 100644
--- a/arch/x86/include/uapi/asm/hwcap2.h
+++ b/arch/x86/include/uapi/asm/hwcap2.h
@@ -2,10 +2,12 @@
#ifndef _ASM_X86_HWCAP2_H
#define _ASM_X86_HWCAP2_H
+#include <linux/const.h>
+
/* MONITOR/MWAIT enabled in Ring 3 */
-#define HWCAP2_RING3MWAIT (1 << 0)
+#define HWCAP2_RING3MWAIT _BITUL(0)
/* Kernel allows FSGSBASE instructions available in Ring 3 */
-#define HWCAP2_FSGSBASE BIT(1)
+#define HWCAP2_FSGSBASE _BITUL(1)
#endif
diff --git a/arch/x86/include/uapi/asm/kvm.h b/arch/x86/include/uapi/asm/kvm.h
index 5a3022c8af82..a6c327f8ad9e 100644
--- a/arch/x86/include/uapi/asm/kvm.h
+++ b/arch/x86/include/uapi/asm/kvm.h
@@ -159,6 +159,19 @@ struct kvm_sregs {
__u64 interrupt_bitmap[(KVM_NR_INTERRUPTS + 63) / 64];
};
+struct kvm_sregs2 {
+ /* out (KVM_GET_SREGS2) / in (KVM_SET_SREGS2) */
+ struct kvm_segment cs, ds, es, fs, gs, ss;
+ struct kvm_segment tr, ldt;
+ struct kvm_dtable gdt, idt;
+ __u64 cr0, cr2, cr3, cr4, cr8;
+ __u64 efer;
+ __u64 apic_base;
+ __u64 flags;
+ __u64 pdptrs[4];
+};
+#define KVM_SREGS2_FLAGS_PDPTRS_VALID 1
+
/* for KVM_GET_FPU and KVM_SET_FPU */
struct kvm_fpu {
__u8 fpr[8][16];
@@ -437,6 +450,8 @@ struct kvm_vmx_nested_state_hdr {
__u16 flags;
} smm;
+ __u16 pad;
+
__u32 flags;
__u64 preemption_timer_deadline;
};
diff --git a/arch/x86/include/uapi/asm/kvm_para.h b/arch/x86/include/uapi/asm/kvm_para.h
index 950afebfba88..5146bbab84d4 100644
--- a/arch/x86/include/uapi/asm/kvm_para.h
+++ b/arch/x86/include/uapi/asm/kvm_para.h
@@ -33,6 +33,8 @@
#define KVM_FEATURE_PV_SCHED_YIELD 13
#define KVM_FEATURE_ASYNC_PF_INT 14
#define KVM_FEATURE_MSI_EXT_DEST_ID 15
+#define KVM_FEATURE_HC_MAP_GPA_RANGE 16
+#define KVM_FEATURE_MIGRATION_CONTROL 17
#define KVM_HINTS_REALTIME 0
@@ -54,6 +56,7 @@
#define MSR_KVM_POLL_CONTROL 0x4b564d05
#define MSR_KVM_ASYNC_PF_INT 0x4b564d06
#define MSR_KVM_ASYNC_PF_ACK 0x4b564d07
+#define MSR_KVM_MIGRATION_CONTROL 0x4b564d08
struct kvm_steal_time {
__u64 steal;
@@ -90,6 +93,16 @@ struct kvm_clock_pairing {
/* MSR_KVM_ASYNC_PF_INT */
#define KVM_ASYNC_PF_VEC_MASK GENMASK(7, 0)
+/* MSR_KVM_MIGRATION_CONTROL */
+#define KVM_MIGRATION_READY (1 << 0)
+
+/* KVM_HC_MAP_GPA_RANGE */
+#define KVM_MAP_GPA_RANGE_PAGE_SZ_4K 0
+#define KVM_MAP_GPA_RANGE_PAGE_SZ_2M (1 << 0)
+#define KVM_MAP_GPA_RANGE_PAGE_SZ_1G (1 << 1)
+#define KVM_MAP_GPA_RANGE_ENC_STAT(n) (n << 4)
+#define KVM_MAP_GPA_RANGE_ENCRYPTED KVM_MAP_GPA_RANGE_ENC_STAT(1)
+#define KVM_MAP_GPA_RANGE_DECRYPTED KVM_MAP_GPA_RANGE_ENC_STAT(0)
/* Operations for KVM_HC_MMU_OP */
#define KVM_MMU_OP_WRITE_PTE 1
diff --git a/arch/x86/include/uapi/asm/svm.h b/arch/x86/include/uapi/asm/svm.h
index 554f75fe013c..efa969325ede 100644
--- a/arch/x86/include/uapi/asm/svm.h
+++ b/arch/x86/include/uapi/asm/svm.h
@@ -110,6 +110,9 @@
#define SVM_VMGEXIT_GET_AP_JUMP_TABLE 1
#define SVM_VMGEXIT_UNSUPPORTED_EVENT 0x8000ffff
+/* Exit code reserved for hypervisor/software use */
+#define SVM_EXIT_SW 0xf0000000
+
#define SVM_EXIT_ERR -1
#define SVM_EXIT_REASONS \
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index 0704c2a94272..3e625c61f008 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -20,7 +20,7 @@ CFLAGS_REMOVE_kvmclock.o = -pg
CFLAGS_REMOVE_ftrace.o = -pg
CFLAGS_REMOVE_early_printk.o = -pg
CFLAGS_REMOVE_head64.o = -pg
-CFLAGS_REMOVE_sev-es.o = -pg
+CFLAGS_REMOVE_sev.o = -pg
endif
KASAN_SANITIZE_head$(BITS).o := n
@@ -28,7 +28,7 @@ KASAN_SANITIZE_dumpstack.o := n
KASAN_SANITIZE_dumpstack_$(BITS).o := n
KASAN_SANITIZE_stacktrace.o := n
KASAN_SANITIZE_paravirt.o := n
-KASAN_SANITIZE_sev-es.o := n
+KASAN_SANITIZE_sev.o := n
# With some compiler versions the generated code results in boot hangs, caused
# by several compilation units. To be safe, disable all instrumentation.
@@ -102,6 +102,7 @@ obj-$(CONFIG_FUNCTION_TRACER) += ftrace_$(BITS).o
obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
obj-$(CONFIG_FTRACE_SYSCALLS) += ftrace.o
obj-$(CONFIG_X86_TSC) += trace_clock.o
+obj-$(CONFIG_TRACING) += trace.o
obj-$(CONFIG_CRASH_CORE) += crash_core_$(BITS).o
obj-$(CONFIG_KEXEC_CORE) += machine_kexec_$(BITS).o
obj-$(CONFIG_KEXEC_CORE) += relocate_kernel_$(BITS).o crash.o
@@ -148,7 +149,7 @@ obj-$(CONFIG_UNWINDER_ORC) += unwind_orc.o
obj-$(CONFIG_UNWINDER_FRAME_POINTER) += unwind_frame.o
obj-$(CONFIG_UNWINDER_GUESS) += unwind_guess.o
-obj-$(CONFIG_AMD_MEM_ENCRYPT) += sev-es.o
+obj-$(CONFIG_AMD_MEM_ENCRYPT) += sev.o
###
# 64 bit specific files
ifeq ($(CONFIG_X86_64),y)
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index e90310cbe73a..e55e0c1fad8c 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -5,6 +5,7 @@
* Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
* Copyright (C) 2001 Jun Nakajima <jun.nakajima@intel.com>
*/
+#define pr_fmt(fmt) "ACPI: " fmt
#include <linux/init.h>
#include <linux/acpi.h>
@@ -42,8 +43,6 @@ EXPORT_SYMBOL(acpi_disabled);
# include <asm/proto.h>
#endif /* X86 */
-#define PREFIX "ACPI: "
-
int acpi_noirq; /* skip ACPI IRQ initialization */
static int acpi_nobgrt; /* skip ACPI BGRT */
int acpi_pci_disabled; /* skip ACPI PCI scan and IRQ initialization */
@@ -130,15 +129,14 @@ static int __init acpi_parse_madt(struct acpi_table_header *table)
madt = (struct acpi_table_madt *)table;
if (!madt) {
- printk(KERN_WARNING PREFIX "Unable to map MADT\n");
+ pr_warn("Unable to map MADT\n");
return -ENODEV;
}
if (madt->address) {
acpi_lapic_addr = (u64) madt->address;
- printk(KERN_DEBUG PREFIX "Local APIC address 0x%08x\n",
- madt->address);
+ pr_debug("Local APIC address 0x%08x\n", madt->address);
}
default_acpi_madt_oem_check(madt->header.oem_id,
@@ -161,7 +159,7 @@ static int acpi_register_lapic(int id, u32 acpiid, u8 enabled)
int cpu;
if (id >= MAX_LOCAL_APIC) {
- printk(KERN_INFO PREFIX "skipped apicid that is too big\n");
+ pr_info("skipped apicid that is too big\n");
return -EINVAL;
}
@@ -213,13 +211,13 @@ acpi_parse_x2apic(union acpi_subtable_headers *header, const unsigned long end)
*/
if (!apic->apic_id_valid(apic_id)) {
if (enabled)
- pr_warn(PREFIX "x2apic entry ignored\n");
+ pr_warn("x2apic entry ignored\n");
return 0;
}
acpi_register_lapic(apic_id, processor->uid, enabled);
#else
- printk(KERN_WARNING PREFIX "x2apic entry ignored\n");
+ pr_warn("x2apic entry ignored\n");
#endif
return 0;
@@ -306,7 +304,7 @@ acpi_parse_x2apic_nmi(union acpi_subtable_headers *header,
acpi_table_print_madt_entry(&header->common);
if (x2apic_nmi->lint != 1)
- printk(KERN_WARNING PREFIX "NMI not connected to LINT 1!\n");
+ pr_warn("NMI not connected to LINT 1!\n");
return 0;
}
@@ -324,7 +322,7 @@ acpi_parse_lapic_nmi(union acpi_subtable_headers * header, const unsigned long e
acpi_table_print_madt_entry(&header->common);
if (lapic_nmi->lint != 1)
- printk(KERN_WARNING PREFIX "NMI not connected to LINT 1!\n");
+ pr_warn("NMI not connected to LINT 1!\n");
return 0;
}
@@ -514,14 +512,14 @@ acpi_parse_int_src_ovr(union acpi_subtable_headers * header,
if (intsrc->source_irq == 0) {
if (acpi_skip_timer_override) {
- printk(PREFIX "BIOS IRQ0 override ignored.\n");
+ pr_warn("BIOS IRQ0 override ignored.\n");
return 0;
}
if ((intsrc->global_irq == 2) && acpi_fix_pin2_polarity
&& (intsrc->inti_flags & ACPI_MADT_POLARITY_MASK)) {
intsrc->inti_flags &= ~ACPI_MADT_POLARITY_MASK;
- printk(PREFIX "BIOS IRQ0 pin2 override: forcing polarity to high active.\n");
+ pr_warn("BIOS IRQ0 pin2 override: forcing polarity to high active.\n");
}
}
@@ -597,7 +595,7 @@ void __init acpi_pic_sci_set_trigger(unsigned int irq, u16 trigger)
if (old == new)
return;
- printk(PREFIX "setting ELCR to %04x (from %04x)\n", new, old);
+ pr_warn("setting ELCR to %04x (from %04x)\n", new, old);
outb(new, 0x4d0);
outb(new >> 8, 0x4d1);
}
@@ -754,7 +752,7 @@ int acpi_map_cpu(acpi_handle handle, phys_cpuid_t physid, u32 acpi_id,
cpu = acpi_register_lapic(physid, acpi_id, ACPI_MADT_ENABLED);
if (cpu < 0) {
- pr_info(PREFIX "Unable to map lapic to logical cpu number\n");
+ pr_info("Unable to map lapic to logical cpu number\n");
return cpu;
}
@@ -870,8 +868,7 @@ static int __init acpi_parse_hpet(struct acpi_table_header *table)
struct acpi_table_hpet *hpet_tbl = (struct acpi_table_hpet *)table;
if (hpet_tbl->address.space_id != ACPI_SPACE_MEM) {
- printk(KERN_WARNING PREFIX "HPET timers must be located in "
- "memory.\n");
+ pr_warn("HPET timers must be located in memory.\n");
return -1;
}
@@ -883,9 +880,7 @@ static int __init acpi_parse_hpet(struct acpi_table_header *table)
* want to allocate a resource there.
*/
if (!hpet_address) {
- printk(KERN_WARNING PREFIX
- "HPET id: %#x base: %#lx is invalid\n",
- hpet_tbl->id, hpet_address);
+ pr_warn("HPET id: %#x base: %#lx is invalid\n", hpet_tbl->id, hpet_address);
return 0;
}
#ifdef CONFIG_X86_64
@@ -896,21 +891,17 @@ static int __init acpi_parse_hpet(struct acpi_table_header *table)
*/
if (hpet_address == 0xfed0000000000000UL) {
if (!hpet_force_user) {
- printk(KERN_WARNING PREFIX "HPET id: %#x "
- "base: 0xfed0000000000000 is bogus\n "
- "try hpet=force on the kernel command line to "
- "fix it up to 0xfed00000.\n", hpet_tbl->id);
+ pr_warn("HPET id: %#x base: 0xfed0000000000000 is bogus, try hpet=force on the kernel command line to fix it up to 0xfed00000.\n",
+ hpet_tbl->id);
hpet_address = 0;
return 0;
}
- printk(KERN_WARNING PREFIX
- "HPET id: %#x base: 0xfed0000000000000 fixed up "
- "to 0xfed00000.\n", hpet_tbl->id);
+ pr_warn("HPET id: %#x base: 0xfed0000000000000 fixed up to 0xfed00000.\n",
+ hpet_tbl->id);
hpet_address >>= 32;
}
#endif
- printk(KERN_INFO PREFIX "HPET id: %#x base: %#lx\n",
- hpet_tbl->id, hpet_address);
+ pr_info("HPET id: %#x base: %#lx\n", hpet_tbl->id, hpet_address);
/*
* Allocate and initialize the HPET firmware resource for adding into
@@ -955,24 +946,24 @@ late_initcall(hpet_insert_resource);
static int __init acpi_parse_fadt(struct acpi_table_header *table)
{
if (!(acpi_gbl_FADT.boot_flags & ACPI_FADT_LEGACY_DEVICES)) {
- pr_debug("ACPI: no legacy devices present\n");
+ pr_debug("no legacy devices present\n");
x86_platform.legacy.devices.pnpbios = 0;
}
if (acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID &&
!(acpi_gbl_FADT.boot_flags & ACPI_FADT_8042) &&
x86_platform.legacy.i8042 != X86_LEGACY_I8042_PLATFORM_ABSENT) {
- pr_debug("ACPI: i8042 controller is absent\n");
+ pr_debug("i8042 controller is absent\n");
x86_platform.legacy.i8042 = X86_LEGACY_I8042_FIRMWARE_ABSENT;
}
if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_CMOS_RTC) {
- pr_debug("ACPI: not registering RTC platform device\n");
+ pr_debug("not registering RTC platform device\n");
x86_platform.legacy.rtc = 0;
}
if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_VGA) {
- pr_debug("ACPI: probing for VGA not safe\n");
+ pr_debug("probing for VGA not safe\n");
x86_platform.legacy.no_vga = 1;
}
@@ -997,8 +988,7 @@ static int __init acpi_parse_fadt(struct acpi_table_header *table)
pmtmr_ioport = acpi_gbl_FADT.pm_timer_block;
}
if (pmtmr_ioport)
- printk(KERN_INFO PREFIX "PM-Timer IO Port: %#x\n",
- pmtmr_ioport);
+ pr_info("PM-Timer IO Port: %#x\n", pmtmr_ioport);
#endif
return 0;
}
@@ -1024,8 +1014,7 @@ static int __init early_acpi_parse_madt_lapic_addr_ovr(void)
count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC_OVERRIDE,
acpi_parse_lapic_addr_ovr, 0);
if (count < 0) {
- printk(KERN_ERR PREFIX
- "Error parsing LAPIC address override entry\n");
+ pr_err("Error parsing LAPIC address override entry\n");
return count;
}
@@ -1057,8 +1046,7 @@ static int __init acpi_parse_madt_lapic_entries(void)
sizeof(struct acpi_table_madt),
madt_proc, ARRAY_SIZE(madt_proc), MAX_LOCAL_APIC);
if (ret < 0) {
- printk(KERN_ERR PREFIX
- "Error parsing LAPIC/X2APIC entries\n");
+ pr_err("Error parsing LAPIC/X2APIC entries\n");
return ret;
}
@@ -1066,11 +1054,11 @@ static int __init acpi_parse_madt_lapic_entries(void)
x2count = madt_proc[1].count;
}
if (!count && !x2count) {
- printk(KERN_ERR PREFIX "No LAPIC entries present\n");
+ pr_err("No LAPIC entries present\n");
/* TBD: Cleanup to allow fallback to MPS */
return -ENODEV;
} else if (count < 0 || x2count < 0) {
- printk(KERN_ERR PREFIX "Error parsing LAPIC entry\n");
+ pr_err("Error parsing LAPIC entry\n");
/* TBD: Cleanup to allow fallback to MPS */
return count;
}
@@ -1080,7 +1068,7 @@ static int __init acpi_parse_madt_lapic_entries(void)
count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC_NMI,
acpi_parse_lapic_nmi, 0);
if (count < 0 || x2count < 0) {
- printk(KERN_ERR PREFIX "Error parsing LAPIC NMI entry\n");
+ pr_err("Error parsing LAPIC NMI entry\n");
/* TBD: Cleanup to allow fallback to MPS */
return count;
}
@@ -1139,7 +1127,7 @@ static void __init mp_config_acpi_legacy_irqs(void)
}
if (idx != mp_irq_entries) {
- printk(KERN_DEBUG "ACPI: IRQ%d used by override.\n", i);
+ pr_debug("ACPI: IRQ%d used by override.\n", i);
continue; /* IRQ already used */
}
@@ -1179,26 +1167,24 @@ static int __init acpi_parse_madt_ioapic_entries(void)
* if "noapic" boot option, don't look for IO-APICs
*/
if (skip_ioapic_setup) {
- printk(KERN_INFO PREFIX "Skipping IOAPIC probe "
- "due to 'noapic' option.\n");
+ pr_info("Skipping IOAPIC probe due to 'noapic' option.\n");
return -ENODEV;
}
count = acpi_table_parse_madt(ACPI_MADT_TYPE_IO_APIC, acpi_parse_ioapic,
MAX_IO_APICS);
if (!count) {
- printk(KERN_ERR PREFIX "No IOAPIC entries present\n");
+ pr_err("No IOAPIC entries present\n");
return -ENODEV;
} else if (count < 0) {
- printk(KERN_ERR PREFIX "Error parsing IOAPIC entry\n");
+ pr_err("Error parsing IOAPIC entry\n");
return count;
}
count = acpi_table_parse_madt(ACPI_MADT_TYPE_INTERRUPT_OVERRIDE,
acpi_parse_int_src_ovr, nr_irqs);
if (count < 0) {
- printk(KERN_ERR PREFIX
- "Error parsing interrupt source overrides entry\n");
+ pr_err("Error parsing interrupt source overrides entry\n");
/* TBD: Cleanup to allow fallback to MPS */
return count;
}
@@ -1218,7 +1204,7 @@ static int __init acpi_parse_madt_ioapic_entries(void)
count = acpi_table_parse_madt(ACPI_MADT_TYPE_NMI_SOURCE,
acpi_parse_nmi_src, nr_irqs);
if (count < 0) {
- printk(KERN_ERR PREFIX "Error parsing NMI SRC entry\n");
+ pr_err("Error parsing NMI SRC entry\n");
/* TBD: Cleanup to allow fallback to MPS */
return count;
}
@@ -1251,8 +1237,7 @@ static void __init early_acpi_process_madt(void)
/*
* Dell Precision Workstation 410, 610 come here.
*/
- printk(KERN_ERR PREFIX
- "Invalid BIOS MADT, disabling ACPI\n");
+ pr_err("Invalid BIOS MADT, disabling ACPI\n");
disable_acpi();
}
}
@@ -1289,8 +1274,7 @@ static void __init acpi_process_madt(void)
/*
* Dell Precision Workstation 410, 610 come here.
*/
- printk(KERN_ERR PREFIX
- "Invalid BIOS MADT, disabling ACPI\n");
+ pr_err("Invalid BIOS MADT, disabling ACPI\n");
disable_acpi();
}
} else {
@@ -1300,8 +1284,7 @@ static void __init acpi_process_madt(void)
* Boot with "acpi=off" to use MPS on such a system.
*/
if (smp_found_config) {
- printk(KERN_WARNING PREFIX
- "No APIC-table, disabling MPS\n");
+ pr_warn("No APIC-table, disabling MPS\n");
smp_found_config = 0;
}
}
@@ -1311,11 +1294,9 @@ static void __init acpi_process_madt(void)
* processors, where MPS only supports physical.
*/
if (acpi_lapic && acpi_ioapic)
- printk(KERN_INFO "Using ACPI (MADT) for SMP configuration "
- "information\n");
+ pr_info("Using ACPI (MADT) for SMP configuration information\n");
else if (acpi_lapic)
- printk(KERN_INFO "Using ACPI for processor (LAPIC) "
- "configuration information\n");
+ pr_info("Using ACPI for processor (LAPIC) configuration information\n");
#endif
return;
}
@@ -1323,8 +1304,7 @@ static void __init acpi_process_madt(void)
static int __init disable_acpi_irq(const struct dmi_system_id *d)
{
if (!acpi_force) {
- printk(KERN_NOTICE "%s detected: force use of acpi=noirq\n",
- d->ident);
+ pr_notice("%s detected: force use of acpi=noirq\n", d->ident);
acpi_noirq_set();
}
return 0;
@@ -1333,8 +1313,7 @@ static int __init disable_acpi_irq(const struct dmi_system_id *d)
static int __init disable_acpi_pci(const struct dmi_system_id *d)
{
if (!acpi_force) {
- printk(KERN_NOTICE "%s detected: force use of pci=noacpi\n",
- d->ident);
+ pr_notice("%s detected: force use of pci=noacpi\n", d->ident);
acpi_disable_pci();
}
return 0;
@@ -1343,11 +1322,10 @@ static int __init disable_acpi_pci(const struct dmi_system_id *d)
static int __init dmi_disable_acpi(const struct dmi_system_id *d)
{
if (!acpi_force) {
- printk(KERN_NOTICE "%s detected: acpi off\n", d->ident);
+ pr_notice("%s detected: acpi off\n", d->ident);
disable_acpi();
} else {
- printk(KERN_NOTICE
- "Warning: DMI blacklist says broken, but acpi forced\n");
+ pr_notice("Warning: DMI blacklist says broken, but acpi forced\n");
}
return 0;
}
@@ -1574,9 +1552,9 @@ int __init early_acpi_boot_init(void)
*/
if (acpi_blacklisted()) {
if (acpi_force) {
- printk(KERN_WARNING PREFIX "acpi=force override\n");
+ pr_warn("acpi=force override\n");
} else {
- printk(KERN_WARNING PREFIX "Disabling ACPI support\n");
+ pr_warn("Disabling ACPI support\n");
disable_acpi();
return 1;
}
@@ -1692,9 +1670,7 @@ int __init acpi_mps_check(void)
#if defined(CONFIG_X86_LOCAL_APIC) && !defined(CONFIG_X86_MPPARSE)
/* mptable code is not built-in*/
if (acpi_disabled || acpi_noirq) {
- printk(KERN_WARNING "MPS support code is not built-in.\n"
- "Using acpi=off or acpi=noirq or pci=noacpi "
- "may have problem\n");
+ pr_warn("MPS support code is not built-in, using acpi=off or acpi=noirq or pci=noacpi may have problem\n");
return 1;
}
#endif
diff --git a/arch/x86/kernel/acpi/cstate.c b/arch/x86/kernel/acpi/cstate.c
index 49ae4e1ac9cd..7de599eba7f0 100644
--- a/arch/x86/kernel/acpi/cstate.c
+++ b/arch/x86/kernel/acpi/cstate.c
@@ -197,7 +197,8 @@ static int __init ffh_cstate_init(void)
struct cpuinfo_x86 *c = &boot_cpu_data;
if (c->x86_vendor != X86_VENDOR_INTEL &&
- c->x86_vendor != X86_VENDOR_AMD)
+ c->x86_vendor != X86_VENDOR_AMD &&
+ c->x86_vendor != X86_VENDOR_HYGON)
return -1;
cpu_cstate_entry = alloc_percpu(struct cstate_entry);
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index 6974b5174495..e9da3dc71254 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -75,7 +75,7 @@ do { \
} \
} while (0)
-const unsigned char x86nops[] =
+static const unsigned char x86nops[] =
{
BYTES_NOP1,
BYTES_NOP2,
@@ -183,41 +183,69 @@ done:
}
/*
+ * optimize_nops_range() - Optimize a sequence of single byte NOPs (0x90)
+ *
+ * @instr: instruction byte stream
+ * @instrlen: length of the above
+ * @off: offset within @instr where the first NOP has been detected
+ *
+ * Return: number of NOPs found (and replaced).
+ */
+static __always_inline int optimize_nops_range(u8 *instr, u8 instrlen, int off)
+{
+ unsigned long flags;
+ int i = off, nnops;
+
+ while (i < instrlen) {
+ if (instr[i] != 0x90)
+ break;
+
+ i++;
+ }
+
+ nnops = i - off;
+
+ if (nnops <= 1)
+ return nnops;
+
+ local_irq_save(flags);
+ add_nops(instr + off, nnops);
+ local_irq_restore(flags);
+
+ DUMP_BYTES(instr, instrlen, "%px: [%d:%d) optimized NOPs: ", instr, off, i);
+
+ return nnops;
+}
+
+/*
* "noinline" to cause control flow change and thus invalidate I$ and
* cause refetch after modification.
*/
static void __init_or_module noinline optimize_nops(struct alt_instr *a, u8 *instr)
{
- unsigned long flags;
struct insn insn;
- int nop, i = 0;
+ int i = 0;
/*
- * Jump over the non-NOP insns, the remaining bytes must be single-byte
- * NOPs, optimize them.
+ * Jump over the non-NOP insns and optimize single-byte NOPs into bigger
+ * ones.
*/
for (;;) {
if (insn_decode_kernel(&insn, &instr[i]))
return;
+ /*
+ * See if this and any potentially following NOPs can be
+ * optimized.
+ */
if (insn.length == 1 && insn.opcode.bytes[0] == 0x90)
- break;
-
- if ((i += insn.length) >= a->instrlen)
- return;
- }
+ i += optimize_nops_range(instr, a->instrlen, i);
+ else
+ i += insn.length;
- for (nop = i; i < a->instrlen; i++) {
- if (WARN_ONCE(instr[i] != 0x90, "Not a NOP at 0x%px\n", &instr[i]))
+ if (i >= a->instrlen)
return;
}
-
- local_irq_save(flags);
- add_nops(instr + nop, i - nop);
- local_irq_restore(flags);
-
- DUMP_BYTES(instr, a->instrlen, "%px: [%d:%d) optimized NOPs: ",
- instr, nop, a->instrlen);
}
/*
@@ -273,8 +301,8 @@ void __init_or_module noinline apply_alternatives(struct alt_instr *start,
instr, instr, a->instrlen,
replacement, a->replacementlen);
- DUMP_BYTES(instr, a->instrlen, "%px: old_insn: ", instr);
- DUMP_BYTES(replacement, a->replacementlen, "%px: rpl_insn: ", replacement);
+ DUMP_BYTES(instr, a->instrlen, "%px: old_insn: ", instr);
+ DUMP_BYTES(replacement, a->replacementlen, "%px: rpl_insn: ", replacement);
memcpy(insn_buff, replacement, a->replacementlen);
insn_buff_sz = a->replacementlen;
diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c
index 09083094eb57..23dda362dc0f 100644
--- a/arch/x86/kernel/amd_nb.c
+++ b/arch/x86/kernel/amd_nb.c
@@ -25,6 +25,7 @@
#define PCI_DEVICE_ID_AMD_17H_M60H_DF_F4 0x144c
#define PCI_DEVICE_ID_AMD_17H_M70H_DF_F4 0x1444
#define PCI_DEVICE_ID_AMD_19H_DF_F4 0x1654
+#define PCI_DEVICE_ID_AMD_19H_M50H_DF_F4 0x166e
/* Protect the PCI config register pairs used for SMN and DF indirect access. */
static DEFINE_MUTEX(smn_mutex);
@@ -57,6 +58,7 @@ static const struct pci_device_id amd_nb_misc_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F3) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F3) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_DF_F3) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M50H_DF_F3) },
{}
};
@@ -72,6 +74,7 @@ static const struct pci_device_id amd_nb_link_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M60H_DF_F4) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F4) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_DF_F4) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M50H_DF_F4) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F4) },
{}
};
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index 4a39fb429f15..d262811ce14b 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -2604,6 +2604,7 @@ static void __init apic_bsp_setup(bool upmode)
end_local_APIC_setup();
irq_remap_enable_fault_handling();
setup_IO_APIC();
+ lapic_update_legacy_vectors();
}
#ifdef CONFIG_UP_LATE_INIT
diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
index 6dbdc7c22bb7..fb67ed5e7e6a 100644
--- a/arch/x86/kernel/apic/vector.c
+++ b/arch/x86/kernel/apic/vector.c
@@ -738,6 +738,26 @@ void lapic_assign_legacy_vector(unsigned int irq, bool replace)
irq_matrix_assign_system(vector_matrix, ISA_IRQ_VECTOR(irq), replace);
}
+void __init lapic_update_legacy_vectors(void)
+{
+ unsigned int i;
+
+ if (IS_ENABLED(CONFIG_X86_IO_APIC) && nr_ioapics > 0)
+ return;
+
+ /*
+ * If the IO/APIC is disabled via config, kernel command line or
+ * lack of enumeration then all legacy interrupts are routed
+ * through the PIC. Make sure that they are marked as legacy
+ * vectors. PIC_CASCADE_IRQ has already been marked in
+ * lapic_assign_system_vectors().
+ */
+ for (i = 0; i < nr_legacy_irqs(); i++) {
+ if (i != PIC_CASCADE_IR)
+ lapic_assign_legacy_vector(i, true);
+ }
+}
+
void __init lapic_assign_system_vectors(void)
{
unsigned int i, vector = 0;
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index 2d11384dc9ab..b7c003013d41 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -593,8 +593,8 @@ static void early_detect_mem_encrypt(struct cpuinfo_x86 *c)
*/
if (cpu_has(c, X86_FEATURE_SME) || cpu_has(c, X86_FEATURE_SEV)) {
/* Check if memory encryption is enabled */
- rdmsrl(MSR_K8_SYSCFG, msr);
- if (!(msr & MSR_K8_SYSCFG_MEM_ENCRYPT))
+ rdmsrl(MSR_AMD64_SYSCFG, msr);
+ if (!(msr & MSR_AMD64_SYSCFG_MEM_ENCRYPT))
goto clear_all;
/*
@@ -646,6 +646,10 @@ static void early_init_amd(struct cpuinfo_x86 *c)
if (c->x86_power & BIT(12))
set_cpu_cap(c, X86_FEATURE_ACC_POWER);
+ /* Bit 14 indicates the Runtime Average Power Limit interface. */
+ if (c->x86_power & BIT(14))
+ set_cpu_cap(c, X86_FEATURE_RAPL);
+
#ifdef CONFIG_X86_64
set_cpu_cap(c, X86_FEATURE_SYSCALL32);
#else
@@ -1165,3 +1169,19 @@ void set_dr_addr_mask(unsigned long mask, int dr)
break;
}
}
+
+u32 amd_get_highest_perf(void)
+{
+ struct cpuinfo_x86 *c = &boot_cpu_data;
+
+ if (c->x86 == 0x17 && ((c->x86_model >= 0x30 && c->x86_model < 0x40) ||
+ (c->x86_model >= 0x70 && c->x86_model < 0x80)))
+ return 166;
+
+ if (c->x86 == 0x19 && ((c->x86_model >= 0x20 && c->x86_model < 0x30) ||
+ (c->x86_model >= 0x40 && c->x86_model < 0x70)))
+ return 166;
+
+ return 255;
+}
+EXPORT_SYMBOL_GPL(amd_get_highest_perf);
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index a1b756c49a93..a99d00393206 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -1773,10 +1773,16 @@ void syscall_init(void)
wrmsrl_safe(MSR_IA32_SYSENTER_EIP, 0ULL);
#endif
- /* Flags to clear on syscall */
+ /*
+ * Flags to clear on syscall; clear as much as possible
+ * to minimize user space-kernel interference.
+ */
wrmsrl(MSR_SYSCALL_MASK,
- X86_EFLAGS_TF|X86_EFLAGS_DF|X86_EFLAGS_IF|
- X86_EFLAGS_IOPL|X86_EFLAGS_AC|X86_EFLAGS_NT);
+ X86_EFLAGS_CF|X86_EFLAGS_PF|X86_EFLAGS_AF|
+ X86_EFLAGS_ZF|X86_EFLAGS_SF|X86_EFLAGS_TF|
+ X86_EFLAGS_IF|X86_EFLAGS_DF|X86_EFLAGS_OF|
+ X86_EFLAGS_IOPL|X86_EFLAGS_NT|X86_EFLAGS_RF|
+ X86_EFLAGS_AC|X86_EFLAGS_ID);
}
#else /* CONFIG_X86_64 */
@@ -1938,13 +1944,12 @@ void cpu_init_exception_handling(void)
/*
* cpu_init() initializes state that is per-CPU. Some data is already
- * initialized (naturally) in the bootstrap process, such as the GDT
- * and IDT. We reload them nevertheless, this function acts as a
- * 'CPU state barrier', nothing should get across.
+ * initialized (naturally) in the bootstrap process, such as the GDT. We
+ * reload it nevertheless, this function acts as a 'CPU state barrier',
+ * nothing should get across.
*/
void cpu_init(void)
{
- struct tss_struct *tss = this_cpu_ptr(&cpu_tss_rw);
struct task_struct *cur = current;
int cpu = raw_smp_processor_id();
@@ -1957,8 +1962,6 @@ void cpu_init(void)
early_cpu_to_node(cpu) != NUMA_NO_NODE)
set_numa_node(early_cpu_to_node(cpu));
#endif
- setup_getcpu(cpu);
-
pr_debug("Initializing CPU#%d\n", cpu);
if (IS_ENABLED(CONFIG_X86_64) || cpu_feature_enabled(X86_FEATURE_VME) ||
@@ -1970,7 +1973,6 @@ void cpu_init(void)
* and set up the GDT descriptor:
*/
switch_to_new_gdt(cpu);
- load_current_idt();
if (IS_ENABLED(CONFIG_X86_64)) {
loadsegment(fs, 0);
@@ -1990,12 +1992,6 @@ void cpu_init(void)
initialize_tlbstate_and_flush();
enter_lazy_tlb(&init_mm, cur);
- /* Initialize the TSS. */
- tss_setup_ist(tss);
- tss_setup_io_bitmap(tss);
- set_tss_desc(cpu, &get_cpu_entry_area(cpu)->tss.x86_tss);
-
- load_TR_desc();
/*
* sp0 points to the entry trampoline stack regardless of what task
* is running.
@@ -2017,6 +2013,18 @@ void cpu_init(void)
load_fixmap_gdt(cpu);
}
+#ifdef CONFIG_SMP
+void cpu_init_secondary(void)
+{
+ /*
+ * Relies on the BP having set-up the IDT tables, which are loaded
+ * on this CPU in cpu_init_exception_handling().
+ */
+ cpu_init_exception_handling();
+ cpu_init();
+}
+#endif
+
/*
* The microcode loader calls this upon late microcode load to recheck features,
* only when microcode has been updated. Caller holds microcode_mutex and CPU
diff --git a/arch/x86/kernel/cpu/cpu.h b/arch/x86/kernel/cpu/cpu.h
index 67944128876d..95521302630d 100644
--- a/arch/x86/kernel/cpu/cpu.h
+++ b/arch/x86/kernel/cpu/cpu.h
@@ -48,6 +48,7 @@ extern const struct cpu_dev *const __x86_cpu_dev_start[],
enum tsx_ctrl_states {
TSX_CTRL_ENABLE,
TSX_CTRL_DISABLE,
+ TSX_CTRL_RTM_ALWAYS_ABORT,
TSX_CTRL_NOT_SUPPORTED,
};
@@ -56,6 +57,7 @@ extern __ro_after_init enum tsx_ctrl_states tsx_ctrl_state;
extern void __init tsx_init(void);
extern void tsx_enable(void);
extern void tsx_disable(void);
+extern void tsx_clear_cpuid(void);
#else
static inline void tsx_init(void) { }
#endif /* CONFIG_CPU_SUP_INTEL */
diff --git a/arch/x86/kernel/cpu/hygon.c b/arch/x86/kernel/cpu/hygon.c
index 0bd6c74e3ba1..6d50136f7ab9 100644
--- a/arch/x86/kernel/cpu/hygon.c
+++ b/arch/x86/kernel/cpu/hygon.c
@@ -260,6 +260,10 @@ static void early_init_hygon(struct cpuinfo_x86 *c)
if (c->x86_power & BIT(12))
set_cpu_cap(c, X86_FEATURE_ACC_POWER);
+ /* Bit 14 indicates the Runtime Average Power Limit interface. */
+ if (c->x86_power & BIT(14))
+ set_cpu_cap(c, X86_FEATURE_RAPL);
+
#ifdef CONFIG_X86_64
set_cpu_cap(c, X86_FEATURE_SYSCALL32);
#endif
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index 8adffc17fa8b..8321c43554a1 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -10,6 +10,7 @@
#include <linux/thread_info.h>
#include <linux/init.h>
#include <linux/uaccess.h>
+#include <linux/delay.h>
#include <asm/cpufeature.h>
#include <asm/msr.h>
@@ -41,6 +42,7 @@ enum split_lock_detect_state {
sld_off = 0,
sld_warn,
sld_fatal,
+ sld_ratelimit,
};
/*
@@ -717,8 +719,10 @@ static void init_intel(struct cpuinfo_x86 *c)
if (tsx_ctrl_state == TSX_CTRL_ENABLE)
tsx_enable();
- if (tsx_ctrl_state == TSX_CTRL_DISABLE)
+ else if (tsx_ctrl_state == TSX_CTRL_DISABLE)
tsx_disable();
+ else if (tsx_ctrl_state == TSX_CTRL_RTM_ALWAYS_ABORT)
+ tsx_clear_cpuid();
split_lock_init();
bus_lock_init();
@@ -997,13 +1001,30 @@ static const struct {
{ "off", sld_off },
{ "warn", sld_warn },
{ "fatal", sld_fatal },
+ { "ratelimit:", sld_ratelimit },
};
+static struct ratelimit_state bld_ratelimit;
+
static inline bool match_option(const char *arg, int arglen, const char *opt)
{
- int len = strlen(opt);
+ int len = strlen(opt), ratelimit;
+
+ if (strncmp(arg, opt, len))
+ return false;
+
+ /*
+ * Min ratelimit is 1 bus lock/sec.
+ * Max ratelimit is 1000 bus locks/sec.
+ */
+ if (sscanf(arg, "ratelimit:%d", &ratelimit) == 1 &&
+ ratelimit > 0 && ratelimit <= 1000) {
+ ratelimit_state_init(&bld_ratelimit, HZ, ratelimit);
+ ratelimit_set_flags(&bld_ratelimit, RATELIMIT_MSG_ON_RELEASE);
+ return true;
+ }
- return len == arglen && !strncmp(arg, opt, len);
+ return len == arglen;
}
static bool split_lock_verify_msr(bool on)
@@ -1082,6 +1103,15 @@ static void sld_update_msr(bool on)
static void split_lock_init(void)
{
+ /*
+ * #DB for bus lock handles ratelimit and #AC for split lock is
+ * disabled.
+ */
+ if (sld_state == sld_ratelimit) {
+ split_lock_verify_msr(false);
+ return;
+ }
+
if (cpu_model_supports_sld)
split_lock_verify_msr(sld_state != sld_off);
}
@@ -1154,6 +1184,12 @@ void handle_bus_lock(struct pt_regs *regs)
switch (sld_state) {
case sld_off:
break;
+ case sld_ratelimit:
+ /* Enforce no more than bld_ratelimit bus locks/sec. */
+ while (!__ratelimit(&bld_ratelimit))
+ msleep(20);
+ /* Warn on the bus lock. */
+ fallthrough;
case sld_warn:
pr_warn_ratelimited("#DB: %s/%d took a bus_lock trap at address: 0x%lx\n",
current->comm, current->pid, regs->ip);
@@ -1259,6 +1295,10 @@ static void sld_state_show(void)
" from non-WB" : "");
}
break;
+ case sld_ratelimit:
+ if (boot_cpu_has(X86_FEATURE_BUS_LOCK_DETECT))
+ pr_info("#DB: setting system wide bus lock rate limit to %u/sec\n", bld_ratelimit.burst);
+ break;
}
}
diff --git a/arch/x86/kernel/cpu/mce/amd.c b/arch/x86/kernel/cpu/mce/amd.c
index e486f96b3cb3..08831acc1d03 100644
--- a/arch/x86/kernel/cpu/mce/amd.c
+++ b/arch/x86/kernel/cpu/mce/amd.c
@@ -77,27 +77,29 @@ struct smca_bank_name {
};
static struct smca_bank_name smca_names[] = {
- [SMCA_LS] = { "load_store", "Load Store Unit" },
- [SMCA_LS_V2] = { "load_store", "Load Store Unit" },
- [SMCA_IF] = { "insn_fetch", "Instruction Fetch Unit" },
- [SMCA_L2_CACHE] = { "l2_cache", "L2 Cache" },
- [SMCA_DE] = { "decode_unit", "Decode Unit" },
- [SMCA_RESERVED] = { "reserved", "Reserved" },
- [SMCA_EX] = { "execution_unit", "Execution Unit" },
- [SMCA_FP] = { "floating_point", "Floating Point Unit" },
- [SMCA_L3_CACHE] = { "l3_cache", "L3 Cache" },
- [SMCA_CS] = { "coherent_slave", "Coherent Slave" },
- [SMCA_CS_V2] = { "coherent_slave", "Coherent Slave" },
- [SMCA_PIE] = { "pie", "Power, Interrupts, etc." },
- [SMCA_UMC] = { "umc", "Unified Memory Controller" },
- [SMCA_PB] = { "param_block", "Parameter Block" },
- [SMCA_PSP] = { "psp", "Platform Security Processor" },
- [SMCA_PSP_V2] = { "psp", "Platform Security Processor" },
- [SMCA_SMU] = { "smu", "System Management Unit" },
- [SMCA_SMU_V2] = { "smu", "System Management Unit" },
- [SMCA_MP5] = { "mp5", "Microprocessor 5 Unit" },
- [SMCA_NBIO] = { "nbio", "Northbridge IO Unit" },
- [SMCA_PCIE] = { "pcie", "PCI Express Unit" },
+ [SMCA_LS ... SMCA_LS_V2] = { "load_store", "Load Store Unit" },
+ [SMCA_IF] = { "insn_fetch", "Instruction Fetch Unit" },
+ [SMCA_L2_CACHE] = { "l2_cache", "L2 Cache" },
+ [SMCA_DE] = { "decode_unit", "Decode Unit" },
+ [SMCA_RESERVED] = { "reserved", "Reserved" },
+ [SMCA_EX] = { "execution_unit", "Execution Unit" },
+ [SMCA_FP] = { "floating_point", "Floating Point Unit" },
+ [SMCA_L3_CACHE] = { "l3_cache", "L3 Cache" },
+ [SMCA_CS ... SMCA_CS_V2] = { "coherent_slave", "Coherent Slave" },
+ [SMCA_PIE] = { "pie", "Power, Interrupts, etc." },
+
+ /* UMC v2 is separate because both of them can exist in a single system. */
+ [SMCA_UMC] = { "umc", "Unified Memory Controller" },
+ [SMCA_UMC_V2] = { "umc_v2", "Unified Memory Controller v2" },
+ [SMCA_PB] = { "param_block", "Parameter Block" },
+ [SMCA_PSP ... SMCA_PSP_V2] = { "psp", "Platform Security Processor" },
+ [SMCA_SMU ... SMCA_SMU_V2] = { "smu", "System Management Unit" },
+ [SMCA_MP5] = { "mp5", "Microprocessor 5 Unit" },
+ [SMCA_NBIO] = { "nbio", "Northbridge IO Unit" },
+ [SMCA_PCIE ... SMCA_PCIE_V2] = { "pcie", "PCI Express Unit" },
+ [SMCA_XGMI_PCS] = { "xgmi_pcs", "Ext Global Memory Interconnect PCS Unit" },
+ [SMCA_XGMI_PHY] = { "xgmi_phy", "Ext Global Memory Interconnect PHY Unit" },
+ [SMCA_WAFL_PHY] = { "wafl_phy", "WAFL PHY Unit" },
};
static const char *smca_get_name(enum smca_bank_types t)
@@ -155,6 +157,7 @@ static struct smca_hwid smca_hwid_mcatypes[] = {
/* Unified Memory Controller MCA type */
{ SMCA_UMC, HWID_MCATYPE(0x96, 0x0) },
+ { SMCA_UMC_V2, HWID_MCATYPE(0x96, 0x1) },
/* Parameter Block MCA type */
{ SMCA_PB, HWID_MCATYPE(0x05, 0x0) },
@@ -175,6 +178,16 @@ static struct smca_hwid smca_hwid_mcatypes[] = {
/* PCI Express Unit MCA type */
{ SMCA_PCIE, HWID_MCATYPE(0x46, 0x0) },
+ { SMCA_PCIE_V2, HWID_MCATYPE(0x46, 0x1) },
+
+ /* xGMI PCS MCA type */
+ { SMCA_XGMI_PCS, HWID_MCATYPE(0x50, 0x0) },
+
+ /* xGMI PHY MCA type */
+ { SMCA_XGMI_PHY, HWID_MCATYPE(0x259, 0x0) },
+
+ /* WAFL PHY MCA type */
+ { SMCA_WAFL_PHY, HWID_MCATYPE(0x267, 0x0) },
};
struct smca_bank smca_banks[MAX_NR_BANKS];
diff --git a/arch/x86/kernel/cpu/mce/apei.c b/arch/x86/kernel/cpu/mce/apei.c
index b58b85380ddb..0e3ae64d3b76 100644
--- a/arch/x86/kernel/cpu/mce/apei.c
+++ b/arch/x86/kernel/cpu/mce/apei.c
@@ -36,7 +36,8 @@ void apei_mce_report_mem_error(int severity, struct cper_sec_mem_err *mem_err)
mce_setup(&m);
m.bank = -1;
/* Fake a memory read error with unknown channel */
- m.status = MCI_STATUS_VAL | MCI_STATUS_EN | MCI_STATUS_ADDRV | 0x9f;
+ m.status = MCI_STATUS_VAL | MCI_STATUS_EN | MCI_STATUS_ADDRV | MCI_STATUS_MISCV | 0x9f;
+ m.misc = (MCI_MISC_ADDR_PHYS << 6) | PAGE_SHIFT;
if (severity >= GHES_SEV_RECOVERABLE)
m.status |= MCI_STATUS_UC;
diff --git a/arch/x86/kernel/cpu/mce/core.c b/arch/x86/kernel/cpu/mce/core.c
index bf7fe87a7e88..22791aadc085 100644
--- a/arch/x86/kernel/cpu/mce/core.c
+++ b/arch/x86/kernel/cpu/mce/core.c
@@ -1257,19 +1257,28 @@ static void kill_me_maybe(struct callback_head *cb)
{
struct task_struct *p = container_of(cb, struct task_struct, mce_kill_me);
int flags = MF_ACTION_REQUIRED;
+ int ret;
pr_err("Uncorrected hardware memory error in user-access at %llx", p->mce_addr);
if (!p->mce_ripv)
flags |= MF_MUST_KILL;
- if (!memory_failure(p->mce_addr >> PAGE_SHIFT, flags) &&
- !(p->mce_kflags & MCE_IN_KERNEL_COPYIN)) {
+ ret = memory_failure(p->mce_addr >> PAGE_SHIFT, flags);
+ if (!ret && !(p->mce_kflags & MCE_IN_KERNEL_COPYIN)) {
set_mce_nospec(p->mce_addr >> PAGE_SHIFT, p->mce_whole_page);
sync_core();
return;
}
+ /*
+ * -EHWPOISON from memory_failure() means that it already sent SIGBUS
+ * to the current process with the proper error info, so no need to
+ * send SIGBUS here again.
+ */
+ if (ret == -EHWPOISON)
+ return;
+
if (p->mce_vaddr != (void __user *)-1l) {
force_sig_mceerr(BUS_MCEERR_AR, p->mce_vaddr, PAGE_SHIFT);
} else {
diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c
index 22f13343b5da..cc8f1773deca 100644
--- a/arch/x86/kernel/cpu/mshyperv.c
+++ b/arch/x86/kernel/cpu/mshyperv.c
@@ -17,6 +17,7 @@
#include <linux/irq.h>
#include <linux/kexec.h>
#include <linux/i8253.h>
+#include <linux/panic_notifier.h>
#include <linux/random.h>
#include <asm/processor.h>
#include <asm/hypervisor.h>
@@ -236,7 +237,7 @@ static void __init hv_smp_prepare_cpus(unsigned int max_cpus)
for_each_present_cpu(i) {
if (i == 0)
continue;
- ret = hv_call_add_logical_proc(numa_cpu_node(i), i, cpu_physical_id(i));
+ ret = hv_call_add_logical_proc(numa_cpu_node(i), i, i);
BUG_ON(ret);
}
@@ -252,6 +253,7 @@ static void __init hv_smp_prepare_cpus(unsigned int max_cpus)
static void __init ms_hyperv_init_platform(void)
{
+ int hv_max_functions_eax;
int hv_host_info_eax;
int hv_host_info_ebx;
int hv_host_info_ecx;
@@ -269,6 +271,8 @@ static void __init ms_hyperv_init_platform(void)
ms_hyperv.misc_features = cpuid_edx(HYPERV_CPUID_FEATURES);
ms_hyperv.hints = cpuid_eax(HYPERV_CPUID_ENLIGHTMENT_INFO);
+ hv_max_functions_eax = cpuid_eax(HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS);
+
pr_info("Hyper-V: privilege flags low 0x%x, high 0x%x, hints 0x%x, misc 0x%x\n",
ms_hyperv.features, ms_hyperv.priv_high, ms_hyperv.hints,
ms_hyperv.misc_features);
@@ -298,8 +302,7 @@ static void __init ms_hyperv_init_platform(void)
/*
* Extract host information.
*/
- if (cpuid_eax(HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS) >=
- HYPERV_CPUID_VERSION) {
+ if (hv_max_functions_eax >= HYPERV_CPUID_VERSION) {
hv_host_info_eax = cpuid_eax(HYPERV_CPUID_VERSION);
hv_host_info_ebx = cpuid_ebx(HYPERV_CPUID_VERSION);
hv_host_info_ecx = cpuid_ecx(HYPERV_CPUID_VERSION);
@@ -325,9 +328,11 @@ static void __init ms_hyperv_init_platform(void)
ms_hyperv.isolation_config_a, ms_hyperv.isolation_config_b);
}
- if (ms_hyperv.hints & HV_X64_ENLIGHTENED_VMCS_RECOMMENDED) {
+ if (hv_max_functions_eax >= HYPERV_CPUID_NESTED_FEATURES) {
ms_hyperv.nested_features =
cpuid_eax(HYPERV_CPUID_NESTED_FEATURES);
+ pr_info("Hyper-V: Nested features: 0x%x\n",
+ ms_hyperv.nested_features);
}
/*
diff --git a/arch/x86/kernel/cpu/mtrr/cleanup.c b/arch/x86/kernel/cpu/mtrr/cleanup.c
index 0c3b372318b7..b5f43049fa5f 100644
--- a/arch/x86/kernel/cpu/mtrr/cleanup.c
+++ b/arch/x86/kernel/cpu/mtrr/cleanup.c
@@ -836,7 +836,7 @@ int __init amd_special_default_mtrr(void)
if (boot_cpu_data.x86 < 0xf)
return 0;
/* In case some hypervisor doesn't pass SYSCFG through: */
- if (rdmsr_safe(MSR_K8_SYSCFG, &l, &h) < 0)
+ if (rdmsr_safe(MSR_AMD64_SYSCFG, &l, &h) < 0)
return 0;
/*
* Memory between 4GB and top of mem is forced WB by this magic bit.
diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c
index b90f3f437765..558108296f3c 100644
--- a/arch/x86/kernel/cpu/mtrr/generic.c
+++ b/arch/x86/kernel/cpu/mtrr/generic.c
@@ -53,13 +53,13 @@ static inline void k8_check_syscfg_dram_mod_en(void)
(boot_cpu_data.x86 >= 0x0f)))
return;
- rdmsr(MSR_K8_SYSCFG, lo, hi);
+ rdmsr(MSR_AMD64_SYSCFG, lo, hi);
if (lo & K8_MTRRFIXRANGE_DRAM_MODIFY) {
pr_err(FW_WARN "MTRR: CPU %u: SYSCFG[MtrrFixDramModEn]"
" not cleared by BIOS, clearing this bit\n",
smp_processor_id());
lo &= ~K8_MTRRFIXRANGE_DRAM_MODIFY;
- mtrr_wrmsr(MSR_K8_SYSCFG, lo, hi);
+ mtrr_wrmsr(MSR_AMD64_SYSCFG, lo, hi);
}
}
diff --git a/arch/x86/kernel/cpu/perfctr-watchdog.c b/arch/x86/kernel/cpu/perfctr-watchdog.c
index 3ef5868ac588..7aecb2fc3186 100644
--- a/arch/x86/kernel/cpu/perfctr-watchdog.c
+++ b/arch/x86/kernel/cpu/perfctr-watchdog.c
@@ -63,7 +63,7 @@ static inline unsigned int nmi_perfctr_msr_to_bit(unsigned int msr)
case 15:
return msr - MSR_P4_BPU_PERFCTR0;
}
- fallthrough;
+ break;
case X86_VENDOR_ZHAOXIN:
case X86_VENDOR_CENTAUR:
return msr - MSR_ARCH_PERFMON_PERFCTR0;
@@ -96,7 +96,7 @@ static inline unsigned int nmi_evntsel_msr_to_bit(unsigned int msr)
case 15:
return msr - MSR_P4_BSU_ESCR0;
}
- fallthrough;
+ break;
case X86_VENDOR_ZHAOXIN:
case X86_VENDOR_CENTAUR:
return msr - MSR_ARCH_PERFMON_EVENTSEL0;
diff --git a/arch/x86/kernel/cpu/resctrl/internal.h b/arch/x86/kernel/cpu/resctrl/internal.h
index c4d320d02fd5..6a5f60a37219 100644
--- a/arch/x86/kernel/cpu/resctrl/internal.h
+++ b/arch/x86/kernel/cpu/resctrl/internal.h
@@ -70,6 +70,7 @@ DECLARE_STATIC_KEY_FALSE(rdt_mon_enable_key);
* struct mon_evt - Entry in the event list of a resource
* @evtid: event id
* @name: name of the event
+ * @list: entry in &rdt_resource->evt_list
*/
struct mon_evt {
u32 evtid;
@@ -78,10 +79,13 @@ struct mon_evt {
};
/**
- * struct mon_data_bits - Monitoring details for each event file
- * @rid: Resource id associated with the event file.
+ * union mon_data_bits - Monitoring details for each event file
+ * @priv: Used to store monitoring event data in @u
+ * as kernfs private data
+ * @rid: Resource id associated with the event file
* @evtid: Event id associated with the event file
* @domid: The domain to which the event file belongs
+ * @u: Name of the bit fields struct
*/
union mon_data_bits {
void *priv;
@@ -119,6 +123,7 @@ enum rdt_group_type {
* @RDT_MODE_PSEUDO_LOCKSETUP: Resource group will be used for Pseudo-Locking
* @RDT_MODE_PSEUDO_LOCKED: No sharing of this resource group's allocations
* allowed AND the allocations are Cache Pseudo-Locked
+ * @RDT_NUM_MODES: Total number of modes
*
* The mode of a resource group enables control over the allowed overlap
* between allocations associated with different resource groups (classes
@@ -142,7 +147,7 @@ enum rdtgrp_mode {
/**
* struct mongroup - store mon group's data in resctrl fs.
- * @mon_data_kn kernlfs node for the mon_data directory
+ * @mon_data_kn: kernfs node for the mon_data directory
* @parent: parent rdtgrp
* @crdtgrp_list: child rdtgroup node list
* @rmid: rmid for this rdtgroup
@@ -282,11 +287,11 @@ struct rftype {
/**
* struct mbm_state - status for each MBM counter in each domain
* @chunks: Total data moved (multiply by rdt_group.mon_scale to get bytes)
- * @prev_msr Value of IA32_QM_CTR for this RMID last time we read it
+ * @prev_msr: Value of IA32_QM_CTR for this RMID last time we read it
* @prev_bw_msr:Value of previous IA32_QM_CTR for bandwidth counting
- * @prev_bw The most recent bandwidth in MBps
- * @delta_bw Difference between the current and previous bandwidth
- * @delta_comp Indicates whether to compute the delta_bw
+ * @prev_bw: The most recent bandwidth in MBps
+ * @delta_bw: Difference between the current and previous bandwidth
+ * @delta_comp: Indicates whether to compute the delta_bw
*/
struct mbm_state {
u64 chunks;
@@ -456,11 +461,13 @@ struct rdt_parse_data {
* @data_width: Character width of data when displaying
* @domains: All domains for this resource
* @cache: Cache allocation related data
+ * @membw: If the component has bandwidth controls, their properties.
* @format_str: Per resource format string to show domain value
* @parse_ctrlval: Per resource function pointer to parse control values
* @evt_list: List of monitoring events
* @num_rmid: Number of RMIDs available
* @mon_scale: cqm counter * mon_scale = occupancy in bytes
+ * @mbm_width: Monitor width, to detect and correct for overflow.
* @fflags: flags to choose base and info files
*/
struct rdt_resource {
diff --git a/arch/x86/kernel/cpu/resctrl/pseudo_lock.c b/arch/x86/kernel/cpu/resctrl/pseudo_lock.c
index 05a89e33fde2..2207916cae65 100644
--- a/arch/x86/kernel/cpu/resctrl/pseudo_lock.c
+++ b/arch/x86/kernel/cpu/resctrl/pseudo_lock.c
@@ -49,6 +49,7 @@ static struct class *pseudo_lock_class;
/**
* get_prefetch_disable_bits - prefetch disable bits of supported platforms
+ * @void: It takes no parameters.
*
* Capture the list of platforms that have been validated to support
* pseudo-locking. This includes testing to ensure pseudo-locked regions
@@ -162,7 +163,7 @@ static struct rdtgroup *region_find_by_minor(unsigned int minor)
}
/**
- * pseudo_lock_pm_req - A power management QoS request list entry
+ * struct pseudo_lock_pm_req - A power management QoS request list entry
* @list: Entry within the @pm_reqs list for a pseudo-locked region
* @req: PM QoS request
*/
@@ -184,6 +185,7 @@ static void pseudo_lock_cstates_relax(struct pseudo_lock_region *plr)
/**
* pseudo_lock_cstates_constrain - Restrict cores from entering C6
+ * @plr: Pseudo-locked region
*
* To prevent the cache from being affected by power management entering
* C6 has to be avoided. This is accomplished by requesting a latency
@@ -196,6 +198,8 @@ static void pseudo_lock_cstates_relax(struct pseudo_lock_region *plr)
* the ACPI latencies need to be considered while keeping in mind that C2
* may be set to map to deeper sleep states. In this case the latency
* requirement needs to prevent entering C2 also.
+ *
+ * Return: 0 on success, <0 on failure
*/
static int pseudo_lock_cstates_constrain(struct pseudo_lock_region *plr)
{
@@ -520,7 +524,7 @@ static int pseudo_lock_fn(void *_rdtgrp)
/**
* rdtgroup_monitor_in_progress - Test if monitoring in progress
- * @r: resource group being queried
+ * @rdtgrp: resource group being queried
*
* Return: 1 if monitor groups have been created for this resource
* group, 0 otherwise.
@@ -1140,6 +1144,8 @@ out:
/**
* pseudo_lock_measure_cycles - Trigger latency measure to pseudo-locked region
+ * @rdtgrp: Resource group to which the pseudo-locked region belongs.
+ * @sel: Selector of which measurement to perform on a pseudo-locked region.
*
* The measurement of latency to access a pseudo-locked region should be
* done from a cpu that is associated with that pseudo-locked region.
diff --git a/arch/x86/kernel/cpu/sgx/encl.c b/arch/x86/kernel/cpu/sgx/encl.c
index 3be203297988..001808e3901c 100644
--- a/arch/x86/kernel/cpu/sgx/encl.c
+++ b/arch/x86/kernel/cpu/sgx/encl.c
@@ -383,7 +383,7 @@ const struct vm_operations_struct sgx_vm_ops = {
/**
* sgx_encl_release - Destroy an enclave instance
- * @kref: address of a kref inside &sgx_encl
+ * @ref: address of a kref inside &sgx_encl
*
* Used together with kref_put(). Frees all the resources associated with the
* enclave and the instance itself.
diff --git a/arch/x86/kernel/cpu/sgx/encl.h b/arch/x86/kernel/cpu/sgx/encl.h
index 6e74f85b6264..fec43ca65065 100644
--- a/arch/x86/kernel/cpu/sgx/encl.h
+++ b/arch/x86/kernel/cpu/sgx/encl.h
@@ -91,8 +91,8 @@ static inline int sgx_encl_find(struct mm_struct *mm, unsigned long addr,
{
struct vm_area_struct *result;
- result = find_vma(mm, addr);
- if (!result || result->vm_ops != &sgx_vm_ops || addr < result->vm_start)
+ result = vma_lookup(mm, addr);
+ if (!result || result->vm_ops != &sgx_vm_ops)
return -EINVAL;
*vma = result;
diff --git a/arch/x86/kernel/cpu/sgx/virt.c b/arch/x86/kernel/cpu/sgx/virt.c
index 6ad165a5c0cc..64511c4a5200 100644
--- a/arch/x86/kernel/cpu/sgx/virt.c
+++ b/arch/x86/kernel/cpu/sgx/virt.c
@@ -212,6 +212,7 @@ static int sgx_vepc_release(struct inode *inode, struct file *file)
list_splice_tail(&secs_pages, &zombie_secs_pages);
mutex_unlock(&zombie_secs_pages_lock);
+ xa_destroy(&vepc->page_array);
kfree(vepc);
return 0;
diff --git a/arch/x86/kernel/cpu/tsx.c b/arch/x86/kernel/cpu/tsx.c
index e2ad30e474f8..9c7a5f049292 100644
--- a/arch/x86/kernel/cpu/tsx.c
+++ b/arch/x86/kernel/cpu/tsx.c
@@ -2,7 +2,7 @@
/*
* Intel Transactional Synchronization Extensions (TSX) control.
*
- * Copyright (C) 2019 Intel Corporation
+ * Copyright (C) 2019-2021 Intel Corporation
*
* Author:
* Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
@@ -84,13 +84,46 @@ static enum tsx_ctrl_states x86_get_tsx_auto_mode(void)
return TSX_CTRL_ENABLE;
}
+void tsx_clear_cpuid(void)
+{
+ u64 msr;
+
+ /*
+ * MSR_TFA_TSX_CPUID_CLEAR bit is only present when both CPUID
+ * bits RTM_ALWAYS_ABORT and TSX_FORCE_ABORT are present.
+ */
+ if (boot_cpu_has(X86_FEATURE_RTM_ALWAYS_ABORT) &&
+ boot_cpu_has(X86_FEATURE_TSX_FORCE_ABORT)) {
+ rdmsrl(MSR_TSX_FORCE_ABORT, msr);
+ msr |= MSR_TFA_TSX_CPUID_CLEAR;
+ wrmsrl(MSR_TSX_FORCE_ABORT, msr);
+ }
+}
+
void __init tsx_init(void)
{
char arg[5] = {};
int ret;
- if (!tsx_ctrl_is_supported())
+ /*
+ * Hardware will always abort a TSX transaction if both CPUID bits
+ * RTM_ALWAYS_ABORT and TSX_FORCE_ABORT are set. In this case, it is
+ * better not to enumerate CPUID.RTM and CPUID.HLE bits. Clear them
+ * here.
+ */
+ if (boot_cpu_has(X86_FEATURE_RTM_ALWAYS_ABORT) &&
+ boot_cpu_has(X86_FEATURE_TSX_FORCE_ABORT)) {
+ tsx_ctrl_state = TSX_CTRL_RTM_ALWAYS_ABORT;
+ tsx_clear_cpuid();
+ setup_clear_cpu_cap(X86_FEATURE_RTM);
+ setup_clear_cpu_cap(X86_FEATURE_HLE);
return;
+ }
+
+ if (!tsx_ctrl_is_supported()) {
+ tsx_ctrl_state = TSX_CTRL_NOT_SUPPORTED;
+ return;
+ }
ret = cmdline_find_option(boot_command_line, "tsx", arg, sizeof(arg));
if (ret >= 0) {
diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
index 54ce999ed321..e8326a8d1c5d 100644
--- a/arch/x86/kernel/crash.c
+++ b/arch/x86/kernel/crash.c
@@ -70,19 +70,6 @@ static inline void cpu_crash_vmclear_loaded_vmcss(void)
rcu_read_unlock();
}
-/*
- * When the crashkernel option is specified, only use the low
- * 1M for the real mode trampoline.
- */
-void __init crash_reserve_low_1M(void)
-{
- if (cmdline_find_option(boot_command_line, "crashkernel", NULL, 0) < 0)
- return;
-
- memblock_reserve(0, 1<<20);
- pr_info("Reserving the low 1M of memory for crashkernel\n");
-}
-
#if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC)
static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c
index 6edd1e2ee8af..38837dad46e6 100644
--- a/arch/x86/kernel/early-quirks.c
+++ b/arch/x86/kernel/early-quirks.c
@@ -549,9 +549,11 @@ static const struct pci_device_id intel_early_ids[] __initconst = {
INTEL_CNL_IDS(&gen9_early_ops),
INTEL_ICL_11_IDS(&gen11_early_ops),
INTEL_EHL_IDS(&gen11_early_ops),
+ INTEL_JSL_IDS(&gen11_early_ops),
INTEL_TGL_12_IDS(&gen11_early_ops),
INTEL_RKL_IDS(&gen11_early_ops),
INTEL_ADLS_IDS(&gen11_early_ops),
+ INTEL_ADLP_IDS(&gen11_early_ops),
};
struct resource intel_graphics_stolen_res __ro_after_init = DEFINE_RES_MEM(0, 0);
diff --git a/arch/x86/kernel/fpu/signal.c b/arch/x86/kernel/fpu/signal.c
index a4ec65317a7f..b7b92cdf3add 100644
--- a/arch/x86/kernel/fpu/signal.c
+++ b/arch/x86/kernel/fpu/signal.c
@@ -221,28 +221,18 @@ sanitize_restored_user_xstate(union fpregs_state *state,
if (use_xsave()) {
/*
- * Note: we don't need to zero the reserved bits in the
- * xstate_header here because we either didn't copy them at all,
- * or we checked earlier that they aren't set.
+ * Clear all feature bits which are not set in
+ * user_xfeatures and clear all extended features
+ * for fx_only mode.
*/
+ u64 mask = fx_only ? XFEATURE_MASK_FPSSE : user_xfeatures;
/*
- * 'user_xfeatures' might have bits clear which are
- * set in header->xfeatures. This represents features that
- * were in init state prior to a signal delivery, and need
- * to be reset back to the init state. Clear any user
- * feature bits which are set in the kernel buffer to get
- * them back to the init state.
- *
- * Supervisor state is unchanged by input from userspace.
- * Ensure supervisor state bits stay set and supervisor
- * state is not modified.
+ * Supervisor state has to be preserved. The sigframe
+ * restore can only modify user features, i.e. @mask
+ * cannot contain them.
*/
- if (fx_only)
- header->xfeatures = XFEATURE_MASK_FPSSE;
- else
- header->xfeatures &= user_xfeatures |
- xfeatures_mask_supervisor();
+ header->xfeatures &= mask | xfeatures_mask_supervisor();
}
if (use_fxsr()) {
@@ -307,13 +297,17 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
return 0;
}
- if (!access_ok(buf, size))
- return -EACCES;
+ if (!access_ok(buf, size)) {
+ ret = -EACCES;
+ goto out;
+ }
- if (!static_cpu_has(X86_FEATURE_FPU))
- return fpregs_soft_set(current, NULL,
- 0, sizeof(struct user_i387_ia32_struct),
- NULL, buf) != 0;
+ if (!static_cpu_has(X86_FEATURE_FPU)) {
+ ret = fpregs_soft_set(current, NULL, 0,
+ sizeof(struct user_i387_ia32_struct),
+ NULL, buf);
+ goto out;
+ }
if (use_xsave()) {
struct _fpx_sw_bytes fx_sw_user;
@@ -369,6 +363,25 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
fpregs_unlock();
return 0;
}
+
+ /*
+ * The above did an FPU restore operation, restricted to
+ * the user portion of the registers, and failed, but the
+ * microcode might have modified the FPU registers
+ * nevertheless.
+ *
+ * If the FPU registers do not belong to current, then
+ * invalidate the FPU register state otherwise the task might
+ * preempt current and return to user space with corrupted
+ * FPU registers.
+ *
+ * In case current owns the FPU registers then no further
+ * action is required. The fixup below will handle it
+ * correctly.
+ */
+ if (test_thread_flag(TIF_NEED_FPU_LOAD))
+ __cpu_invalidate_fpregs_state();
+
fpregs_unlock();
} else {
/*
@@ -377,7 +390,7 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
*/
ret = __copy_from_user(&env, buf, sizeof(env));
if (ret)
- goto err_out;
+ goto out;
envp = &env;
}
@@ -405,16 +418,9 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
if (use_xsave() && !fx_only) {
u64 init_bv = xfeatures_mask_user() & ~user_xfeatures;
- if (using_compacted_format()) {
- ret = copy_user_to_xstate(&fpu->state.xsave, buf_fx);
- } else {
- ret = __copy_from_user(&fpu->state.xsave, buf_fx, state_size);
-
- if (!ret && state_size > offsetof(struct xregs_state, header))
- ret = validate_user_xstate_header(&fpu->state.xsave.header);
- }
+ ret = copy_user_to_xstate(&fpu->state.xsave, buf_fx);
if (ret)
- goto err_out;
+ goto out;
sanitize_restored_user_xstate(&fpu->state, envp, user_xfeatures,
fx_only);
@@ -434,7 +440,7 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
ret = __copy_from_user(&fpu->state.fxsave, buf_fx, state_size);
if (ret) {
ret = -EFAULT;
- goto err_out;
+ goto out;
}
sanitize_restored_user_xstate(&fpu->state, envp, user_xfeatures,
@@ -452,7 +458,7 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
} else {
ret = __copy_from_user(&fpu->state.fsave, buf_fx, state_size);
if (ret)
- goto err_out;
+ goto out;
fpregs_lock();
ret = copy_kernel_to_fregs_err(&fpu->state.fsave);
@@ -463,7 +469,7 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
fpregs_deactivate(fpu);
fpregs_unlock();
-err_out:
+out:
if (ret)
fpu__clear_user_states(fpu);
return ret;
diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c
index a85c64000218..1cadb2faf740 100644
--- a/arch/x86/kernel/fpu/xstate.c
+++ b/arch/x86/kernel/fpu/xstate.c
@@ -441,12 +441,35 @@ static void __init print_xstate_offset_size(void)
}
/*
+ * All supported features have either init state all zeros or are
+ * handled in setup_init_fpu() individually. This is an explicit
+ * feature list and does not use XFEATURE_MASK*SUPPORTED to catch
+ * newly added supported features at build time and make people
+ * actually look at the init state for the new feature.
+ */
+#define XFEATURES_INIT_FPSTATE_HANDLED \
+ (XFEATURE_MASK_FP | \
+ XFEATURE_MASK_SSE | \
+ XFEATURE_MASK_YMM | \
+ XFEATURE_MASK_OPMASK | \
+ XFEATURE_MASK_ZMM_Hi256 | \
+ XFEATURE_MASK_Hi16_ZMM | \
+ XFEATURE_MASK_PKRU | \
+ XFEATURE_MASK_BNDREGS | \
+ XFEATURE_MASK_BNDCSR | \
+ XFEATURE_MASK_PASID)
+
+/*
* setup the xstate image representing the init state
*/
static void __init setup_init_fpu_buf(void)
{
static int on_boot_cpu __initdata = 1;
+ BUILD_BUG_ON((XFEATURE_MASK_USER_SUPPORTED |
+ XFEATURE_MASK_SUPERVISOR_SUPPORTED) !=
+ XFEATURES_INIT_FPSTATE_HANDLED);
+
WARN_ON_FPU(!on_boot_cpu);
on_boot_cpu = 0;
@@ -466,10 +489,22 @@ static void __init setup_init_fpu_buf(void)
copy_kernel_to_xregs_booting(&init_fpstate.xsave);
/*
- * Dump the init state again. This is to identify the init state
- * of any feature which is not represented by all zero's.
+ * All components are now in init state. Read the state back so
+ * that init_fpstate contains all non-zero init state. This only
+ * works with XSAVE, but not with XSAVEOPT and XSAVES because
+ * those use the init optimization which skips writing data for
+ * components in init state.
+ *
+ * XSAVE could be used, but that would require to reshuffle the
+ * data when XSAVES is available because XSAVES uses xstate
+ * compaction. But doing so is a pointless exercise because most
+ * components have an all zeros init state except for the legacy
+ * ones (FP and SSE). Those can be saved with FXSAVE into the
+ * legacy area. Adding new features requires to ensure that init
+ * state is all zeroes or if not to add the necessary handling
+ * here.
*/
- copy_xregs_to_kernel_booting(&init_fpstate.xsave);
+ fxsave(&init_fpstate.fxsave);
}
static int xfeature_uncompacted_offset(int xfeature_nr)
@@ -1402,60 +1437,3 @@ int proc_pid_arch_status(struct seq_file *m, struct pid_namespace *ns,
return 0;
}
#endif /* CONFIG_PROC_PID_ARCH_STATUS */
-
-#ifdef CONFIG_IOMMU_SUPPORT
-void update_pasid(void)
-{
- u64 pasid_state;
- u32 pasid;
-
- if (!cpu_feature_enabled(X86_FEATURE_ENQCMD))
- return;
-
- if (!current->mm)
- return;
-
- pasid = READ_ONCE(current->mm->pasid);
- /* Set the valid bit in the PASID MSR/state only for valid pasid. */
- pasid_state = pasid == PASID_DISABLED ?
- pasid : pasid | MSR_IA32_PASID_VALID;
-
- /*
- * No need to hold fregs_lock() since the task's fpstate won't
- * be changed by others (e.g. ptrace) while the task is being
- * switched to or is in IPI.
- */
- if (!test_thread_flag(TIF_NEED_FPU_LOAD)) {
- /* The MSR is active and can be directly updated. */
- wrmsrl(MSR_IA32_PASID, pasid_state);
- } else {
- struct fpu *fpu = &current->thread.fpu;
- struct ia32_pasid_state *ppasid_state;
- struct xregs_state *xsave;
-
- /*
- * The CPU's xstate registers are not currently active. Just
- * update the PASID state in the memory buffer here. The
- * PASID MSR will be loaded when returning to user mode.
- */
- xsave = &fpu->state.xsave;
- xsave->header.xfeatures |= XFEATURE_MASK_PASID;
- ppasid_state = get_xsave_addr(xsave, XFEATURE_PASID);
- /*
- * Since XFEATURE_MASK_PASID is set in xfeatures, ppasid_state
- * won't be NULL and no need to check its value.
- *
- * Only update the task's PASID state when it's different
- * from the mm's pasid.
- */
- if (ppasid_state->pasid != pasid_state) {
- /*
- * Invalid fpregs so that state restoring will pick up
- * the PASID state.
- */
- __fpu_invalidate_fpregs_state(fpu);
- ppasid_state->pasid = pasid_state;
- }
- }
-}
-#endif /* CONFIG_IOMMU_SUPPORT */
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index 18be44163a50..de01903c3735 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -39,7 +39,7 @@
#include <asm/realmode.h>
#include <asm/extable.h>
#include <asm/trapnr.h>
-#include <asm/sev-es.h>
+#include <asm/sev.h>
/*
* Manage page tables very early on.
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index 04bddaaba8e2..d8b3ebd2bb85 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -62,7 +62,7 @@ SYM_CODE_START_NOALIGN(startup_64)
*/
/* Set up the stack for verify_cpu(), similar to initial_stack below */
- leaq (__end_init_task - SIZEOF_PTREGS)(%rip), %rsp
+ leaq (__end_init_task - FRAME_SIZE)(%rip), %rsp
leaq _text(%rip), %rdi
pushq %rsi
@@ -343,10 +343,10 @@ SYM_DATA(initial_vc_handler, .quad handle_vc_boot_ghcb)
#endif
/*
- * The SIZEOF_PTREGS gap is a convention which helps the in-kernel unwinder
+ * The FRAME_SIZE gap is a convention which helps the in-kernel unwinder
* reliably detect the end of the stack.
*/
-SYM_DATA(initial_stack, .quad init_thread_union + THREAD_SIZE - SIZEOF_PTREGS)
+SYM_DATA(initial_stack, .quad init_thread_union + THREAD_SIZE - FRAME_SIZE)
__FINITDATA
__INIT
diff --git a/arch/x86/kernel/idt.c b/arch/x86/kernel/idt.c
index d552f177eca0..df0fa695bb09 100644
--- a/arch/x86/kernel/idt.c
+++ b/arch/x86/kernel/idt.c
@@ -35,12 +35,16 @@
#define SYSG(_vector, _addr) \
G(_vector, _addr, DEFAULT_STACK, GATE_INTERRUPT, DPL3, __KERNEL_CS)
+#ifdef CONFIG_X86_64
/*
* Interrupt gate with interrupt stack. The _ist index is the index in
* the tss.ist[] array, but for the descriptor it needs to start at 1.
*/
#define ISTG(_vector, _addr, _ist) \
G(_vector, _addr, _ist + 1, GATE_INTERRUPT, DPL0, __KERNEL_CS)
+#else
+#define ISTG(_vector, _addr, _ist) INTG(_vector, _addr)
+#endif
/* Task gate */
#define TSKG(_vector, _gdt) \
@@ -74,7 +78,7 @@ static const __initconst struct idt_data early_idts[] = {
*/
static const __initconst struct idt_data def_idts[] = {
INTG(X86_TRAP_DE, asm_exc_divide_error),
- INTG(X86_TRAP_NMI, asm_exc_nmi),
+ ISTG(X86_TRAP_NMI, asm_exc_nmi, IST_INDEX_NMI),
INTG(X86_TRAP_BR, asm_exc_bounds),
INTG(X86_TRAP_UD, asm_exc_invalid_op),
INTG(X86_TRAP_NM, asm_exc_device_not_available),
@@ -91,12 +95,16 @@ static const __initconst struct idt_data def_idts[] = {
#ifdef CONFIG_X86_32
TSKG(X86_TRAP_DF, GDT_ENTRY_DOUBLEFAULT_TSS),
#else
- INTG(X86_TRAP_DF, asm_exc_double_fault),
+ ISTG(X86_TRAP_DF, asm_exc_double_fault, IST_INDEX_DF),
#endif
- INTG(X86_TRAP_DB, asm_exc_debug),
+ ISTG(X86_TRAP_DB, asm_exc_debug, IST_INDEX_DB),
#ifdef CONFIG_X86_MCE
- INTG(X86_TRAP_MC, asm_exc_machine_check),
+ ISTG(X86_TRAP_MC, asm_exc_machine_check, IST_INDEX_MCE),
+#endif
+
+#ifdef CONFIG_AMD_MEM_ENCRYPT
+ ISTG(X86_TRAP_VC, asm_exc_vmm_communication, IST_INDEX_VC),
#endif
SYSG(X86_TRAP_OF, asm_exc_overflow),
@@ -221,22 +229,6 @@ static const __initconst struct idt_data early_pf_idts[] = {
INTG(X86_TRAP_PF, asm_exc_page_fault),
};
-/*
- * The exceptions which use Interrupt stacks. They are setup after
- * cpu_init() when the TSS has been initialized.
- */
-static const __initconst struct idt_data ist_idts[] = {
- ISTG(X86_TRAP_DB, asm_exc_debug, IST_INDEX_DB),
- ISTG(X86_TRAP_NMI, asm_exc_nmi, IST_INDEX_NMI),
- ISTG(X86_TRAP_DF, asm_exc_double_fault, IST_INDEX_DF),
-#ifdef CONFIG_X86_MCE
- ISTG(X86_TRAP_MC, asm_exc_machine_check, IST_INDEX_MCE),
-#endif
-#ifdef CONFIG_AMD_MEM_ENCRYPT
- ISTG(X86_TRAP_VC, asm_exc_vmm_communication, IST_INDEX_VC),
-#endif
-};
-
/**
* idt_setup_early_pf - Initialize the idt table with early pagefault handler
*
@@ -254,14 +246,6 @@ void __init idt_setup_early_pf(void)
idt_setup_from_table(idt_table, early_pf_idts,
ARRAY_SIZE(early_pf_idts), true);
}
-
-/**
- * idt_setup_ist_traps - Initialize the idt table with traps using IST
- */
-void __init idt_setup_ist_traps(void)
-{
- idt_setup_from_table(idt_table, ist_idts, ARRAY_SIZE(ist_idts), true);
-}
#endif
static void __init idt_map_in_cea(void)
@@ -331,11 +315,10 @@ void __init idt_setup_early_handler(void)
/**
* idt_invalidate - Invalidate interrupt descriptor table
- * @addr: The virtual address of the 'invalid' IDT
*/
-void idt_invalidate(void *addr)
+void idt_invalidate(void)
{
- struct desc_ptr idt = { .address = (unsigned long) addr, .size = 0 };
+ static const struct desc_ptr idt = { .address = 0, .size = 0 };
load_idt(&idt);
}
diff --git a/arch/x86/kernel/jump_label.c b/arch/x86/kernel/jump_label.c
index 6a2eb62c85e6..674906fad43b 100644
--- a/arch/x86/kernel/jump_label.c
+++ b/arch/x86/kernel/jump_label.c
@@ -15,50 +15,75 @@
#include <asm/kprobes.h>
#include <asm/alternative.h>
#include <asm/text-patching.h>
+#include <asm/insn.h>
-static void bug_at(const void *ip, int line)
+int arch_jump_entry_size(struct jump_entry *entry)
{
- /*
- * The location is not an op that we were expecting.
- * Something went wrong. Crash the box, as something could be
- * corrupting the kernel.
- */
- pr_crit("jump_label: Fatal kernel bug, unexpected op at %pS [%p] (%5ph) %d\n", ip, ip, ip, line);
- BUG();
+ struct insn insn = {};
+
+ insn_decode_kernel(&insn, (void *)jump_entry_code(entry));
+ BUG_ON(insn.length != 2 && insn.length != 5);
+
+ return insn.length;
}
-static const void *
-__jump_label_set_jump_code(struct jump_entry *entry, enum jump_label_type type)
+struct jump_label_patch {
+ const void *code;
+ int size;
+};
+
+static struct jump_label_patch
+__jump_label_patch(struct jump_entry *entry, enum jump_label_type type)
{
- const void *expect, *code;
+ const void *expect, *code, *nop;
const void *addr, *dest;
- int line;
+ int size;
addr = (void *)jump_entry_code(entry);
dest = (void *)jump_entry_target(entry);
- code = text_gen_insn(JMP32_INSN_OPCODE, addr, dest);
+ size = arch_jump_entry_size(entry);
+ switch (size) {
+ case JMP8_INSN_SIZE:
+ code = text_gen_insn(JMP8_INSN_OPCODE, addr, dest);
+ nop = x86_nops[size];
+ break;
- if (type == JUMP_LABEL_JMP) {
- expect = x86_nops[5]; line = __LINE__;
- } else {
- expect = code; line = __LINE__;
+ case JMP32_INSN_SIZE:
+ code = text_gen_insn(JMP32_INSN_OPCODE, addr, dest);
+ nop = x86_nops[size];
+ break;
+
+ default: BUG();
}
- if (memcmp(addr, expect, JUMP_LABEL_NOP_SIZE))
- bug_at(addr, line);
+ if (type == JUMP_LABEL_JMP)
+ expect = nop;
+ else
+ expect = code;
+
+ if (memcmp(addr, expect, size)) {
+ /*
+ * The location is not an op that we were expecting.
+ * Something went wrong. Crash the box, as something could be
+ * corrupting the kernel.
+ */
+ pr_crit("jump_label: Fatal kernel bug, unexpected op at %pS [%p] (%5ph != %5ph)) size:%d type:%d\n",
+ addr, addr, addr, expect, size, type);
+ BUG();
+ }
if (type == JUMP_LABEL_NOP)
- code = x86_nops[5];
+ code = nop;
- return code;
+ return (struct jump_label_patch){.code = code, .size = size};
}
static inline void __jump_label_transform(struct jump_entry *entry,
enum jump_label_type type,
int init)
{
- const void *opcode = __jump_label_set_jump_code(entry, type);
+ const struct jump_label_patch jlp = __jump_label_patch(entry, type);
/*
* As long as only a single processor is running and the code is still
@@ -72,12 +97,11 @@ static inline void __jump_label_transform(struct jump_entry *entry,
* always nop being the 'currently valid' instruction
*/
if (init || system_state == SYSTEM_BOOTING) {
- text_poke_early((void *)jump_entry_code(entry), opcode,
- JUMP_LABEL_NOP_SIZE);
+ text_poke_early((void *)jump_entry_code(entry), jlp.code, jlp.size);
return;
}
- text_poke_bp((void *)jump_entry_code(entry), opcode, JUMP_LABEL_NOP_SIZE, NULL);
+ text_poke_bp((void *)jump_entry_code(entry), jlp.code, jlp.size, NULL);
}
static void __ref jump_label_transform(struct jump_entry *entry,
@@ -98,7 +122,7 @@ void arch_jump_label_transform(struct jump_entry *entry,
bool arch_jump_label_transform_queue(struct jump_entry *entry,
enum jump_label_type type)
{
- const void *opcode;
+ struct jump_label_patch jlp;
if (system_state == SYSTEM_BOOTING) {
/*
@@ -109,9 +133,8 @@ bool arch_jump_label_transform_queue(struct jump_entry *entry,
}
mutex_lock(&text_mutex);
- opcode = __jump_label_set_jump_code(entry, type);
- text_poke_queue((void *)jump_entry_code(entry),
- opcode, JUMP_LABEL_NOP_SIZE, NULL);
+ jlp = __jump_label_patch(entry, type);
+ text_poke_queue((void *)jump_entry_code(entry), jlp.code, jlp.size, NULL);
mutex_unlock(&text_mutex);
return true;
}
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index d3d65545cb8b..b6e046e4b289 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -422,12 +422,6 @@ void *alloc_insn_page(void)
return page;
}
-/* Recover page to RW mode before releasing it */
-void free_insn_page(void *page)
-{
- module_memfree(page);
-}
-
/* Kprobe x86 instruction emulation - only regs->ip or IF flag modifiers */
static void kprobe_emulate_ifmodifiers(struct kprobe *p, struct pt_regs *regs)
@@ -674,7 +668,7 @@ static int prepare_emulation(struct kprobe *p, struct insn *insn)
break;
if (insn->addr_bytes != sizeof(unsigned long))
- return -EOPNOTSUPP; /* Don't support differnt size */
+ return -EOPNOTSUPP; /* Don't support different size */
if (X86_MODRM_MOD(opcode) != 3)
return -EOPNOTSUPP; /* TODO: support memory addressing */
@@ -1102,24 +1096,6 @@ int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
restore_previous_kprobe(kcb);
else
reset_current_kprobe();
- } else if (kcb->kprobe_status == KPROBE_HIT_ACTIVE ||
- kcb->kprobe_status == KPROBE_HIT_SSDONE) {
- /*
- * We increment the nmissed count for accounting,
- * we can also use npre/npostfault count for accounting
- * these specific fault cases.
- */
- kprobes_inc_nmissed_count(cur);
-
- /*
- * We come here because instructions in the pre/post
- * handler caused the page_fault, this could happen
- * if handler tries to access user space by
- * copy_from_user(), get_user() etc. Let the
- * user-specified handler try to fix it first.
- */
- if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
- return 1;
}
return 0;
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index d307c22e5c18..a26643dc6bd6 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -26,6 +26,7 @@
#include <linux/kprobes.h>
#include <linux/nmi.h>
#include <linux/swait.h>
+#include <linux/syscore_ops.h>
#include <asm/timer.h>
#include <asm/cpu.h>
#include <asm/traps.h>
@@ -37,6 +38,7 @@
#include <asm/tlb.h>
#include <asm/cpuidle_haltpoll.h>
#include <asm/ptrace.h>
+#include <asm/reboot.h>
#include <asm/svm.h>
DEFINE_STATIC_KEY_FALSE(kvm_async_pf_enabled);
@@ -345,7 +347,7 @@ static void kvm_guest_cpu_init(void)
wrmsrl(MSR_KVM_ASYNC_PF_EN, pa);
__this_cpu_write(apf_reason.enabled, 1);
- pr_info("KVM setup async PF for cpu %d\n", smp_processor_id());
+ pr_info("setup async PF for cpu %d\n", smp_processor_id());
}
if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) {
@@ -371,34 +373,17 @@ static void kvm_pv_disable_apf(void)
wrmsrl(MSR_KVM_ASYNC_PF_EN, 0);
__this_cpu_write(apf_reason.enabled, 0);
- pr_info("Unregister pv shared memory for cpu %d\n", smp_processor_id());
+ pr_info("disable async PF for cpu %d\n", smp_processor_id());
}
-static void kvm_pv_guest_cpu_reboot(void *unused)
+static void kvm_disable_steal_time(void)
{
- /*
- * We disable PV EOI before we load a new kernel by kexec,
- * since MSR_KVM_PV_EOI_EN stores a pointer into old kernel's memory.
- * New kernel can re-enable when it boots.
- */
- if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
- wrmsrl(MSR_KVM_PV_EOI_EN, 0);
- kvm_pv_disable_apf();
- kvm_disable_steal_time();
-}
+ if (!has_steal_clock)
+ return;
-static int kvm_pv_reboot_notify(struct notifier_block *nb,
- unsigned long code, void *unused)
-{
- if (code == SYS_RESTART)
- on_each_cpu(kvm_pv_guest_cpu_reboot, NULL, 1);
- return NOTIFY_DONE;
+ wrmsr(MSR_KVM_STEAL_TIME, 0, 0);
}
-static struct notifier_block kvm_pv_reboot_nb = {
- .notifier_call = kvm_pv_reboot_notify,
-};
-
static u64 kvm_steal_clock(int cpu)
{
u64 steal;
@@ -416,14 +401,6 @@ static u64 kvm_steal_clock(int cpu)
return steal;
}
-void kvm_disable_steal_time(void)
-{
- if (!has_steal_clock)
- return;
-
- wrmsr(MSR_KVM_STEAL_TIME, 0, 0);
-}
-
static inline void __set_percpu_decrypted(void *ptr, unsigned long size)
{
early_set_memory_decrypted((unsigned long) ptr, size);
@@ -451,6 +428,27 @@ static void __init sev_map_percpu_data(void)
}
}
+static void kvm_guest_cpu_offline(bool shutdown)
+{
+ kvm_disable_steal_time();
+ if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
+ wrmsrl(MSR_KVM_PV_EOI_EN, 0);
+ kvm_pv_disable_apf();
+ if (!shutdown)
+ apf_task_wake_all();
+ kvmclock_disable();
+}
+
+static int kvm_cpu_online(unsigned int cpu)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ kvm_guest_cpu_init();
+ local_irq_restore(flags);
+ return 0;
+}
+
#ifdef CONFIG_SMP
static DEFINE_PER_CPU(cpumask_var_t, __pv_cpu_mask);
@@ -635,31 +633,64 @@ static void __init kvm_smp_prepare_boot_cpu(void)
kvm_spinlock_init();
}
-static void kvm_guest_cpu_offline(void)
+static int kvm_cpu_down_prepare(unsigned int cpu)
{
- kvm_disable_steal_time();
- if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
- wrmsrl(MSR_KVM_PV_EOI_EN, 0);
- kvm_pv_disable_apf();
- apf_task_wake_all();
+ unsigned long flags;
+
+ local_irq_save(flags);
+ kvm_guest_cpu_offline(false);
+ local_irq_restore(flags);
+ return 0;
}
-static int kvm_cpu_online(unsigned int cpu)
+#endif
+
+static int kvm_suspend(void)
{
- local_irq_disable();
- kvm_guest_cpu_init();
- local_irq_enable();
+ kvm_guest_cpu_offline(false);
+
return 0;
}
-static int kvm_cpu_down_prepare(unsigned int cpu)
+static void kvm_resume(void)
{
- local_irq_disable();
- kvm_guest_cpu_offline();
- local_irq_enable();
- return 0;
+ kvm_cpu_online(raw_smp_processor_id());
+}
+
+static struct syscore_ops kvm_syscore_ops = {
+ .suspend = kvm_suspend,
+ .resume = kvm_resume,
+};
+
+static void kvm_pv_guest_cpu_reboot(void *unused)
+{
+ kvm_guest_cpu_offline(true);
+}
+
+static int kvm_pv_reboot_notify(struct notifier_block *nb,
+ unsigned long code, void *unused)
+{
+ if (code == SYS_RESTART)
+ on_each_cpu(kvm_pv_guest_cpu_reboot, NULL, 1);
+ return NOTIFY_DONE;
}
+static struct notifier_block kvm_pv_reboot_nb = {
+ .notifier_call = kvm_pv_reboot_notify,
+};
+
+/*
+ * After a PV feature is registered, the host will keep writing to the
+ * registered memory location. If the guest happens to shutdown, this memory
+ * won't be valid. In cases like kexec, in which you install a new kernel, this
+ * means a random memory location will be kept being written.
+ */
+#ifdef CONFIG_KEXEC_CORE
+static void kvm_crash_shutdown(struct pt_regs *regs)
+{
+ kvm_guest_cpu_offline(true);
+ native_machine_crash_shutdown(regs);
+}
#endif
static void __init kvm_guest_init(void)
@@ -704,6 +735,12 @@ static void __init kvm_guest_init(void)
kvm_guest_cpu_init();
#endif
+#ifdef CONFIG_KEXEC_CORE
+ machine_ops.crash_shutdown = kvm_crash_shutdown;
+#endif
+
+ register_syscore_ops(&kvm_syscore_ops);
+
/*
* Hard lockup detection is enabled by default. Disable it, as guests
* can get false positives too easily, for example if the host is
diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c
index d37ed4e1d033..ad273e5861c1 100644
--- a/arch/x86/kernel/kvmclock.c
+++ b/arch/x86/kernel/kvmclock.c
@@ -20,7 +20,6 @@
#include <asm/hypervisor.h>
#include <asm/mem_encrypt.h>
#include <asm/x86_init.h>
-#include <asm/reboot.h>
#include <asm/kvmclock.h>
static int kvmclock __initdata = 1;
@@ -203,28 +202,9 @@ static void kvm_setup_secondary_clock(void)
}
#endif
-/*
- * After the clock is registered, the host will keep writing to the
- * registered memory location. If the guest happens to shutdown, this memory
- * won't be valid. In cases like kexec, in which you install a new kernel, this
- * means a random memory location will be kept being written. So before any
- * kind of shutdown from our side, we unregister the clock by writing anything
- * that does not have the 'enable' bit set in the msr
- */
-#ifdef CONFIG_KEXEC_CORE
-static void kvm_crash_shutdown(struct pt_regs *regs)
-{
- native_write_msr(msr_kvm_system_time, 0, 0);
- kvm_disable_steal_time();
- native_machine_crash_shutdown(regs);
-}
-#endif
-
-static void kvm_shutdown(void)
+void kvmclock_disable(void)
{
native_write_msr(msr_kvm_system_time, 0, 0);
- kvm_disable_steal_time();
- native_machine_shutdown();
}
static void __init kvmclock_init_mem(void)
@@ -351,10 +331,6 @@ void __init kvmclock_init(void)
#endif
x86_platform.save_sched_clock_state = kvm_save_sched_clock_state;
x86_platform.restore_sched_clock_state = kvm_restore_sched_clock_state;
- machine_ops.shutdown = kvm_shutdown;
-#ifdef CONFIG_KEXEC_CORE
- machine_ops.crash_shutdown = kvm_crash_shutdown;
-#endif
kvm_get_preset_lpj();
/*
diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
index 64b00b0d7fe8..1b373d79cedc 100644
--- a/arch/x86/kernel/machine_kexec_32.c
+++ b/arch/x86/kernel/machine_kexec_32.c
@@ -23,17 +23,6 @@
#include <asm/set_memory.h>
#include <asm/debugreg.h>
-static void set_gdt(void *newgdt, __u16 limit)
-{
- struct desc_ptr curgdt;
-
- /* ia32 supports unaligned loads & stores */
- curgdt.size = limit;
- curgdt.address = (unsigned long)newgdt;
-
- load_gdt(&curgdt);
-}
-
static void load_segments(void)
{
#define __STR(X) #X
@@ -232,8 +221,8 @@ void machine_kexec(struct kimage *image)
* The gdt & idt are now invalid.
* If you want to load them you must set up your own idt & gdt.
*/
- idt_invalidate(phys_to_virt(0));
- set_gdt(phys_to_virt(0), 0);
+ native_idt_invalidate();
+ native_gdt_invalidate();
/* now call it */
image->start = relocate_kernel_ptr((unsigned long)image->head,
diff --git a/arch/x86/kernel/machine_kexec_64.c b/arch/x86/kernel/machine_kexec_64.c
index c078b0d3ab0e..131f30fdcfbd 100644
--- a/arch/x86/kernel/machine_kexec_64.c
+++ b/arch/x86/kernel/machine_kexec_64.c
@@ -256,35 +256,6 @@ static int init_pgtable(struct kimage *image, unsigned long start_pgtable)
return init_transition_pgtable(image, level4p);
}
-static void set_idt(void *newidt, u16 limit)
-{
- struct desc_ptr curidt;
-
- /* x86-64 supports unaligned loads & stores */
- curidt.size = limit;
- curidt.address = (unsigned long)newidt;
-
- __asm__ __volatile__ (
- "lidtq %0\n"
- : : "m" (curidt)
- );
-};
-
-
-static void set_gdt(void *newgdt, u16 limit)
-{
- struct desc_ptr curgdt;
-
- /* x86-64 supports unaligned loads & stores */
- curgdt.size = limit;
- curgdt.address = (unsigned long)newgdt;
-
- __asm__ __volatile__ (
- "lgdtq %0\n"
- : : "m" (curgdt)
- );
-};
-
static void load_segments(void)
{
__asm__ __volatile__ (
@@ -379,8 +350,8 @@ void machine_kexec(struct kimage *image)
* The gdt & idt are now invalid.
* If you want to load them you must set up your own idt & gdt.
*/
- set_gdt(phys_to_virt(0), 0);
- set_idt(phys_to_virt(0), 0);
+ native_idt_invalidate();
+ native_gdt_invalidate();
/* now call it */
image->start = relocate_kernel((unsigned long)image->head,
diff --git a/arch/x86/kernel/mmconf-fam10h_64.c b/arch/x86/kernel/mmconf-fam10h_64.c
index b5cb49e57df8..c94dec6a1834 100644
--- a/arch/x86/kernel/mmconf-fam10h_64.c
+++ b/arch/x86/kernel/mmconf-fam10h_64.c
@@ -95,7 +95,7 @@ static void get_fam10h_pci_mmconf_base(void)
return;
/* SYS_CFG */
- address = MSR_K8_SYSCFG;
+ address = MSR_AMD64_SYSCFG;
rdmsrl(address, val);
/* TOP_MEM2 is not enabled? */
diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
index 2ef961cf4cfc..4bce802d25fb 100644
--- a/arch/x86/kernel/nmi.c
+++ b/arch/x86/kernel/nmi.c
@@ -33,7 +33,7 @@
#include <asm/reboot.h>
#include <asm/cache.h>
#include <asm/nospec-branch.h>
-#include <asm/sev-es.h>
+#include <asm/sev.h>
#define CREATE_TRACE_POINTS
#include <trace/events/nmi.h>
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 5e1f38179f49..e52b208b4641 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -931,7 +931,7 @@ unsigned long get_wchan(struct task_struct *p)
unsigned long start, bottom, top, sp, fp, ip, ret = 0;
int count = 0;
- if (p == current || p->state == TASK_RUNNING)
+ if (p == current || task_is_running(p))
return 0;
if (!try_get_task_stack(p))
@@ -975,7 +975,7 @@ unsigned long get_wchan(struct task_struct *p)
goto out;
}
fp = READ_ONCE_NOCHECK(*(unsigned long *)fp);
- } while (count++ < 16 && p->state != TASK_RUNNING);
+ } while (count++ < 16 && !task_is_running(p));
out:
put_task_stack(p);
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
index 87a4143aa7d7..4c208ea3bd9f 100644
--- a/arch/x86/kernel/ptrace.c
+++ b/arch/x86/kernel/ptrace.c
@@ -911,7 +911,7 @@ static int putreg32(struct task_struct *child, unsigned regno, u32 value)
* syscall with TS_COMPAT still set.
*/
regs->orig_ax = value;
- if (syscall_get_nr(child, regs) >= 0)
+ if (syscall_get_nr(child, regs) != -1)
child->thread_info.status |= TS_I386_REGS_POKED;
break;
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index b29657b76e3f..ebfb91108232 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -669,7 +669,7 @@ static void native_machine_emergency_restart(void)
break;
case BOOT_TRIPLE:
- idt_invalidate(NULL);
+ idt_invalidate();
__asm__ __volatile__("int3");
/* We're probably dead after this, but... */
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 72920af0b3c0..9f1d9215a9fb 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -14,6 +14,7 @@
#include <linux/initrd.h>
#include <linux/iscsi_ibft.h>
#include <linux/memblock.h>
+#include <linux/panic_notifier.h>
#include <linux/pci.h>
#include <linux/root_dev.h>
#include <linux/hugetlb.h>
@@ -44,6 +45,7 @@
#include <asm/pci-direct.h>
#include <asm/prom.h>
#include <asm/proto.h>
+#include <asm/thermal.h>
#include <asm/unwind.h>
#include <asm/vsyscall.h>
#include <linux/vmalloc.h>
@@ -637,11 +639,11 @@ static void __init trim_snb_memory(void)
* them from accessing certain memory ranges, namely anything below
* 1M and in the pages listed in bad_pages[] above.
*
- * To avoid these pages being ever accessed by SNB gfx devices
- * reserve all memory below the 1 MB mark and bad_pages that have
- * not already been reserved at boot time.
+ * To avoid these pages being ever accessed by SNB gfx devices reserve
+ * bad_pages that have not already been reserved at boot time.
+ * All memory below the 1 MB mark is anyway reserved later during
+ * setup_arch(), so there is no need to reserve it here.
*/
- memblock_reserve(0, 1<<20);
for (i = 0; i < ARRAY_SIZE(bad_pages); i++) {
if (memblock_reserve(bad_pages[i], PAGE_SIZE))
@@ -694,30 +696,6 @@ static void __init e820_add_kernel_range(void)
e820__range_add(start, size, E820_TYPE_RAM);
}
-static unsigned reserve_low = CONFIG_X86_RESERVE_LOW << 10;
-
-static int __init parse_reservelow(char *p)
-{
- unsigned long long size;
-
- if (!p)
- return -EINVAL;
-
- size = memparse(p, &p);
-
- if (size < 4096)
- size = 4096;
-
- if (size > 640*1024)
- size = 640*1024;
-
- reserve_low = size;
-
- return 0;
-}
-
-early_param("reservelow", parse_reservelow);
-
static void __init early_reserve_memory(void)
{
/*
@@ -733,14 +711,14 @@ static void __init early_reserve_memory(void)
* The first 4Kb of memory is a BIOS owned area, but generally it is
* not listed as such in the E820 table.
*
- * Reserve the first memory page and typically some additional
- * memory (64KiB by default) since some BIOSes are known to corrupt
- * low memory. See the Kconfig help text for X86_RESERVE_LOW.
+ * Reserve the first 64K of memory since some BIOSes are known to
+ * corrupt low memory. After the real mode trampoline is allocated the
+ * rest of the memory below 640k is reserved.
*
* In addition, make sure page 0 is always reserved because on
* systems with L1TF its contents can be leaked to user processes.
*/
- memblock_reserve(0, ALIGN(reserve_low, PAGE_SIZE));
+ memblock_reserve(0, SZ_64K);
early_reserve_initrd();
@@ -751,6 +729,7 @@ static void __init early_reserve_memory(void)
reserve_ibft_region();
reserve_bios_regions();
+ trim_snb_memory();
}
/*
@@ -1081,14 +1060,21 @@ void __init setup_arch(char **cmdline_p)
(max_pfn_mapped<<PAGE_SHIFT) - 1);
#endif
- reserve_real_mode();
-
/*
- * Reserving memory causing GPU hangs on Sandy Bridge integrated
- * graphics devices should be done after we allocated memory under
- * 1M for the real mode trampoline.
+ * Find free memory for the real mode trampoline and place it there. If
+ * there is not enough free memory under 1M, on EFI-enabled systems
+ * there will be additional attempt to reclaim the memory for the real
+ * mode trampoline at efi_free_boot_services().
+ *
+ * Unconditionally reserve the entire first 1M of RAM because BIOSes
+ * are known to corrupt low memory and several hundred kilobytes are not
+ * worth complex detection what memory gets clobbered. Windows does the
+ * same thing for very similar reasons.
+ *
+ * Moreover, on machines with SandyBridge graphics or in setups that use
+ * crashkernel the entire 1M is reserved anyway.
*/
- trim_snb_memory();
+ reserve_real_mode();
init_mem_mapping();
@@ -1226,6 +1212,14 @@ void __init setup_arch(char **cmdline_p)
x86_init.timers.wallclock_init();
+ /*
+ * This needs to run before setup_local_APIC() which soft-disables the
+ * local APIC temporarily and that masks the thermal LVT interrupt,
+ * leading to softlockups on machines which have configured SMI
+ * interrupt delivery.
+ */
+ therm_lvt_init();
+
mcheck_init();
register_refined_jiffies(CLOCK_TICK_RATE);
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
index 0941d2f44f2a..78a32b956e81 100644
--- a/arch/x86/kernel/setup_percpu.c
+++ b/arch/x86/kernel/setup_percpu.c
@@ -66,7 +66,7 @@ EXPORT_SYMBOL(__per_cpu_offset);
*/
static bool __init pcpu_need_numa(void)
{
-#ifdef CONFIG_NEED_MULTIPLE_NODES
+#ifdef CONFIG_NUMA
pg_data_t *last = NULL;
unsigned int cpu;
@@ -101,7 +101,7 @@ static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size,
unsigned long align)
{
const unsigned long goal = __pa(MAX_DMA_ADDRESS);
-#ifdef CONFIG_NEED_MULTIPLE_NODES
+#ifdef CONFIG_NUMA
int node = early_cpu_to_node(cpu);
void *ptr;
@@ -140,7 +140,7 @@ static void __init pcpu_fc_free(void *ptr, size_t size)
static int __init pcpu_cpu_distance(unsigned int from, unsigned int to)
{
-#ifdef CONFIG_NEED_MULTIPLE_NODES
+#ifdef CONFIG_NUMA
if (early_cpu_to_node(from) == early_cpu_to_node(to))
return LOCAL_DISTANCE;
else
diff --git a/arch/x86/kernel/sev-es-shared.c b/arch/x86/kernel/sev-shared.c
index 0aa9f13efd57..9f90f460a28c 100644
--- a/arch/x86/kernel/sev-es-shared.c
+++ b/arch/x86/kernel/sev-shared.c
@@ -26,13 +26,13 @@ static bool __init sev_es_check_cpu_features(void)
static void __noreturn sev_es_terminate(unsigned int reason)
{
- u64 val = GHCB_SEV_TERMINATE;
+ u64 val = GHCB_MSR_TERM_REQ;
/*
* Tell the hypervisor what went wrong - only reason-set 0 is
* currently supported.
*/
- val |= GHCB_SEV_TERMINATE_REASON(0, reason);
+ val |= GHCB_SEV_TERM_REASON(0, reason);
/* Request Guest Termination from Hypvervisor */
sev_es_wr_ghcb_msr(val);
@@ -47,15 +47,15 @@ static bool sev_es_negotiate_protocol(void)
u64 val;
/* Do the GHCB protocol version negotiation */
- sev_es_wr_ghcb_msr(GHCB_SEV_INFO_REQ);
+ sev_es_wr_ghcb_msr(GHCB_MSR_SEV_INFO_REQ);
VMGEXIT();
val = sev_es_rd_ghcb_msr();
- if (GHCB_INFO(val) != GHCB_SEV_INFO)
+ if (GHCB_MSR_INFO(val) != GHCB_MSR_SEV_INFO_RESP)
return false;
- if (GHCB_PROTO_MAX(val) < GHCB_PROTO_OUR ||
- GHCB_PROTO_MIN(val) > GHCB_PROTO_OUR)
+ if (GHCB_MSR_PROTO_MAX(val) < GHCB_PROTO_OUR ||
+ GHCB_MSR_PROTO_MIN(val) > GHCB_PROTO_OUR)
return false;
return true;
@@ -63,6 +63,7 @@ static bool sev_es_negotiate_protocol(void)
static __always_inline void vc_ghcb_invalidate(struct ghcb *ghcb)
{
+ ghcb->save.sw_exit_code = 0;
memset(ghcb->save.valid_bitmap, 0, sizeof(ghcb->save.valid_bitmap));
}
@@ -153,28 +154,28 @@ void __init do_vc_no_ghcb(struct pt_regs *regs, unsigned long exit_code)
sev_es_wr_ghcb_msr(GHCB_CPUID_REQ(fn, GHCB_CPUID_REQ_EAX));
VMGEXIT();
val = sev_es_rd_ghcb_msr();
- if (GHCB_SEV_GHCB_RESP_CODE(val) != GHCB_SEV_CPUID_RESP)
+ if (GHCB_RESP_CODE(val) != GHCB_MSR_CPUID_RESP)
goto fail;
regs->ax = val >> 32;
sev_es_wr_ghcb_msr(GHCB_CPUID_REQ(fn, GHCB_CPUID_REQ_EBX));
VMGEXIT();
val = sev_es_rd_ghcb_msr();
- if (GHCB_SEV_GHCB_RESP_CODE(val) != GHCB_SEV_CPUID_RESP)
+ if (GHCB_RESP_CODE(val) != GHCB_MSR_CPUID_RESP)
goto fail;
regs->bx = val >> 32;
sev_es_wr_ghcb_msr(GHCB_CPUID_REQ(fn, GHCB_CPUID_REQ_ECX));
VMGEXIT();
val = sev_es_rd_ghcb_msr();
- if (GHCB_SEV_GHCB_RESP_CODE(val) != GHCB_SEV_CPUID_RESP)
+ if (GHCB_RESP_CODE(val) != GHCB_MSR_CPUID_RESP)
goto fail;
regs->cx = val >> 32;
sev_es_wr_ghcb_msr(GHCB_CPUID_REQ(fn, GHCB_CPUID_REQ_EDX));
VMGEXIT();
val = sev_es_rd_ghcb_msr();
- if (GHCB_SEV_GHCB_RESP_CODE(val) != GHCB_SEV_CPUID_RESP)
+ if (GHCB_RESP_CODE(val) != GHCB_MSR_CPUID_RESP)
goto fail;
regs->dx = val >> 32;
diff --git a/arch/x86/kernel/sev-es.c b/arch/x86/kernel/sev.c
index 73873b007838..a6895e440bc3 100644
--- a/arch/x86/kernel/sev-es.c
+++ b/arch/x86/kernel/sev.c
@@ -7,12 +7,11 @@
* Author: Joerg Roedel <jroedel@suse.de>
*/
-#define pr_fmt(fmt) "SEV-ES: " fmt
+#define pr_fmt(fmt) "SEV: " fmt
#include <linux/sched/debug.h> /* For show_regs() */
#include <linux/percpu-defs.h>
#include <linux/mem_encrypt.h>
-#include <linux/lockdep.h>
#include <linux/printk.h>
#include <linux/mm_types.h>
#include <linux/set_memory.h>
@@ -22,7 +21,7 @@
#include <asm/cpu_entry_area.h>
#include <asm/stacktrace.h>
-#include <asm/sev-es.h>
+#include <asm/sev.h>
#include <asm/insn-eval.h>
#include <asm/fpu/internal.h>
#include <asm/processor.h>
@@ -192,19 +191,39 @@ void noinstr __sev_es_ist_exit(void)
this_cpu_write(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC], *(unsigned long *)ist);
}
-static __always_inline struct ghcb *sev_es_get_ghcb(struct ghcb_state *state)
+/*
+ * Nothing shall interrupt this code path while holding the per-CPU
+ * GHCB. The backup GHCB is only for NMIs interrupting this path.
+ *
+ * Callers must disable local interrupts around it.
+ */
+static noinstr struct ghcb *__sev_get_ghcb(struct ghcb_state *state)
{
struct sev_es_runtime_data *data;
struct ghcb *ghcb;
+ WARN_ON(!irqs_disabled());
+
data = this_cpu_read(runtime_data);
ghcb = &data->ghcb_page;
if (unlikely(data->ghcb_active)) {
/* GHCB is already in use - save its contents */
- if (unlikely(data->backup_ghcb_active))
- return NULL;
+ if (unlikely(data->backup_ghcb_active)) {
+ /*
+ * Backup-GHCB is also already in use. There is no way
+ * to continue here so just kill the machine. To make
+ * panic() work, mark GHCBs inactive so that messages
+ * can be printed out.
+ */
+ data->ghcb_active = false;
+ data->backup_ghcb_active = false;
+
+ instrumentation_begin();
+ panic("Unable to handle #VC exception! GHCB and Backup GHCB are already in use");
+ instrumentation_end();
+ }
/* Mark backup_ghcb active before writing to it */
data->backup_ghcb_active = true;
@@ -221,24 +240,6 @@ static __always_inline struct ghcb *sev_es_get_ghcb(struct ghcb_state *state)
return ghcb;
}
-static __always_inline void sev_es_put_ghcb(struct ghcb_state *state)
-{
- struct sev_es_runtime_data *data;
- struct ghcb *ghcb;
-
- data = this_cpu_read(runtime_data);
- ghcb = &data->ghcb_page;
-
- if (state->ghcb) {
- /* Restore GHCB from Backup */
- *ghcb = *state->ghcb;
- data->backup_ghcb_active = false;
- state->ghcb = NULL;
- } else {
- data->ghcb_active = false;
- }
-}
-
/* Needed in vc_early_forward_exception */
void do_early_exception(struct pt_regs *regs, int trapnr);
@@ -266,17 +267,24 @@ static int vc_fetch_insn_kernel(struct es_em_ctxt *ctxt,
static enum es_result __vc_decode_user_insn(struct es_em_ctxt *ctxt)
{
char buffer[MAX_INSN_SIZE];
- int res;
+ int insn_bytes;
- res = insn_fetch_from_user_inatomic(ctxt->regs, buffer);
- if (!res) {
+ insn_bytes = insn_fetch_from_user_inatomic(ctxt->regs, buffer);
+ if (insn_bytes == 0) {
+ /* Nothing could be copied */
ctxt->fi.vector = X86_TRAP_PF;
ctxt->fi.error_code = X86_PF_INSTR | X86_PF_USER;
ctxt->fi.cr2 = ctxt->regs->ip;
return ES_EXCEPTION;
+ } else if (insn_bytes == -EINVAL) {
+ /* Effective RIP could not be calculated */
+ ctxt->fi.vector = X86_TRAP_GP;
+ ctxt->fi.error_code = 0;
+ ctxt->fi.cr2 = 0;
+ return ES_EXCEPTION;
}
- if (!insn_decode_from_regs(&ctxt->insn, ctxt->regs, buffer, res))
+ if (!insn_decode_from_regs(&ctxt->insn, ctxt->regs, buffer, insn_bytes))
return ES_DECODE_FAILED;
if (ctxt->insn.immediate.got)
@@ -323,31 +331,44 @@ static enum es_result vc_write_mem(struct es_em_ctxt *ctxt,
u16 d2;
u8 d1;
- /* If instruction ran in kernel mode and the I/O buffer is in kernel space */
- if (!user_mode(ctxt->regs) && !access_ok(target, size)) {
- memcpy(dst, buf, size);
- return ES_OK;
- }
-
+ /*
+ * This function uses __put_user() independent of whether kernel or user
+ * memory is accessed. This works fine because __put_user() does no
+ * sanity checks of the pointer being accessed. All that it does is
+ * to report when the access failed.
+ *
+ * Also, this function runs in atomic context, so __put_user() is not
+ * allowed to sleep. The page-fault handler detects that it is running
+ * in atomic context and will not try to take mmap_sem and handle the
+ * fault, so additional pagefault_enable()/disable() calls are not
+ * needed.
+ *
+ * The access can't be done via copy_to_user() here because
+ * vc_write_mem() must not use string instructions to access unsafe
+ * memory. The reason is that MOVS is emulated by the #VC handler by
+ * splitting the move up into a read and a write and taking a nested #VC
+ * exception on whatever of them is the MMIO access. Using string
+ * instructions here would cause infinite nesting.
+ */
switch (size) {
case 1:
memcpy(&d1, buf, 1);
- if (put_user(d1, target))
+ if (__put_user(d1, target))
goto fault;
break;
case 2:
memcpy(&d2, buf, 2);
- if (put_user(d2, target))
+ if (__put_user(d2, target))
goto fault;
break;
case 4:
memcpy(&d4, buf, 4);
- if (put_user(d4, target))
+ if (__put_user(d4, target))
goto fault;
break;
case 8:
memcpy(&d8, buf, 8);
- if (put_user(d8, target))
+ if (__put_user(d8, target))
goto fault;
break;
default:
@@ -378,30 +399,43 @@ static enum es_result vc_read_mem(struct es_em_ctxt *ctxt,
u16 d2;
u8 d1;
- /* If instruction ran in kernel mode and the I/O buffer is in kernel space */
- if (!user_mode(ctxt->regs) && !access_ok(s, size)) {
- memcpy(buf, src, size);
- return ES_OK;
- }
-
+ /*
+ * This function uses __get_user() independent of whether kernel or user
+ * memory is accessed. This works fine because __get_user() does no
+ * sanity checks of the pointer being accessed. All that it does is
+ * to report when the access failed.
+ *
+ * Also, this function runs in atomic context, so __get_user() is not
+ * allowed to sleep. The page-fault handler detects that it is running
+ * in atomic context and will not try to take mmap_sem and handle the
+ * fault, so additional pagefault_enable()/disable() calls are not
+ * needed.
+ *
+ * The access can't be done via copy_from_user() here because
+ * vc_read_mem() must not use string instructions to access unsafe
+ * memory. The reason is that MOVS is emulated by the #VC handler by
+ * splitting the move up into a read and a write and taking a nested #VC
+ * exception on whatever of them is the MMIO access. Using string
+ * instructions here would cause infinite nesting.
+ */
switch (size) {
case 1:
- if (get_user(d1, s))
+ if (__get_user(d1, s))
goto fault;
memcpy(buf, &d1, 1);
break;
case 2:
- if (get_user(d2, s))
+ if (__get_user(d2, s))
goto fault;
memcpy(buf, &d2, 2);
break;
case 4:
- if (get_user(d4, s))
+ if (__get_user(d4, s))
goto fault;
memcpy(buf, &d4, 4);
break;
case 8:
- if (get_user(d8, s))
+ if (__get_user(d8, s))
goto fault;
memcpy(buf, &d8, 8);
break;
@@ -459,14 +493,39 @@ static enum es_result vc_slow_virt_to_phys(struct ghcb *ghcb, struct es_em_ctxt
}
/* Include code shared with pre-decompression boot stage */
-#include "sev-es-shared.c"
+#include "sev-shared.c"
+
+static noinstr void __sev_put_ghcb(struct ghcb_state *state)
+{
+ struct sev_es_runtime_data *data;
+ struct ghcb *ghcb;
+
+ WARN_ON(!irqs_disabled());
+
+ data = this_cpu_read(runtime_data);
+ ghcb = &data->ghcb_page;
+
+ if (state->ghcb) {
+ /* Restore GHCB from Backup */
+ *ghcb = *state->ghcb;
+ data->backup_ghcb_active = false;
+ state->ghcb = NULL;
+ } else {
+ /*
+ * Invalidate the GHCB so a VMGEXIT instruction issued
+ * from userspace won't appear to be valid.
+ */
+ vc_ghcb_invalidate(ghcb);
+ data->ghcb_active = false;
+ }
+}
void noinstr __sev_es_nmi_complete(void)
{
struct ghcb_state state;
struct ghcb *ghcb;
- ghcb = sev_es_get_ghcb(&state);
+ ghcb = __sev_get_ghcb(&state);
vc_ghcb_invalidate(ghcb);
ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_NMI_COMPLETE);
@@ -476,7 +535,7 @@ void noinstr __sev_es_nmi_complete(void)
sev_es_wr_ghcb_msr(__pa_nodebug(ghcb));
VMGEXIT();
- sev_es_put_ghcb(&state);
+ __sev_put_ghcb(&state);
}
static u64 get_jump_table_addr(void)
@@ -488,7 +547,7 @@ static u64 get_jump_table_addr(void)
local_irq_save(flags);
- ghcb = sev_es_get_ghcb(&state);
+ ghcb = __sev_get_ghcb(&state);
vc_ghcb_invalidate(ghcb);
ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_AP_JUMP_TABLE);
@@ -502,7 +561,7 @@ static u64 get_jump_table_addr(void)
ghcb_sw_exit_info_2_is_valid(ghcb))
ret = ghcb->save.sw_exit_info_2;
- sev_es_put_ghcb(&state);
+ __sev_put_ghcb(&state);
local_irq_restore(flags);
@@ -627,7 +686,7 @@ static void sev_es_ap_hlt_loop(void)
struct ghcb_state state;
struct ghcb *ghcb;
- ghcb = sev_es_get_ghcb(&state);
+ ghcb = __sev_get_ghcb(&state);
while (true) {
vc_ghcb_invalidate(ghcb);
@@ -644,7 +703,7 @@ static void sev_es_ap_hlt_loop(void)
break;
}
- sev_es_put_ghcb(&state);
+ __sev_put_ghcb(&state);
}
/*
@@ -734,7 +793,7 @@ void __init sev_es_init_vc_handling(void)
sev_es_setup_play_dead();
/* Secondary CPUs use the runtime #VC handler */
- initial_vc_handler = (unsigned long)safe_stack_exc_vmm_communication;
+ initial_vc_handler = (unsigned long)kernel_exc_vmm_communication;
}
static void __init vc_early_forward_exception(struct es_em_ctxt *ctxt)
@@ -1172,14 +1231,6 @@ static enum es_result vc_handle_trap_ac(struct ghcb *ghcb,
return ES_EXCEPTION;
}
-static __always_inline void vc_handle_trap_db(struct pt_regs *regs)
-{
- if (user_mode(regs))
- noist_exc_debug(regs);
- else
- exc_debug(regs);
-}
-
static enum es_result vc_handle_exitcode(struct es_em_ctxt *ctxt,
struct ghcb *ghcb,
unsigned long exit_code)
@@ -1255,6 +1306,10 @@ static __always_inline void vc_forward_exception(struct es_em_ctxt *ctxt)
case X86_TRAP_UD:
exc_invalid_op(ctxt->regs);
break;
+ case X86_TRAP_PF:
+ write_cr2(ctxt->fi.cr2);
+ exc_page_fault(ctxt->regs, error_code);
+ break;
case X86_TRAP_AC:
exc_alignment_check(ctxt->regs, error_code);
break;
@@ -1271,55 +1326,15 @@ static __always_inline bool on_vc_fallback_stack(struct pt_regs *regs)
return (sp >= __this_cpu_ist_bottom_va(VC2) && sp < __this_cpu_ist_top_va(VC2));
}
-/*
- * Main #VC exception handler. It is called when the entry code was able to
- * switch off the IST to a safe kernel stack.
- *
- * With the current implementation it is always possible to switch to a safe
- * stack because #VC exceptions only happen at known places, like intercepted
- * instructions or accesses to MMIO areas/IO ports. They can also happen with
- * code instrumentation when the hypervisor intercepts #DB, but the critical
- * paths are forbidden to be instrumented, so #DB exceptions currently also
- * only happen in safe places.
- */
-DEFINE_IDTENTRY_VC_SAFE_STACK(exc_vmm_communication)
+static bool vc_raw_handle_exception(struct pt_regs *regs, unsigned long error_code)
{
- struct sev_es_runtime_data *data = this_cpu_read(runtime_data);
- irqentry_state_t irq_state;
struct ghcb_state state;
struct es_em_ctxt ctxt;
enum es_result result;
struct ghcb *ghcb;
+ bool ret = true;
- /*
- * Handle #DB before calling into !noinstr code to avoid recursive #DB.
- */
- if (error_code == SVM_EXIT_EXCP_BASE + X86_TRAP_DB) {
- vc_handle_trap_db(regs);
- return;
- }
-
- irq_state = irqentry_nmi_enter(regs);
- lockdep_assert_irqs_disabled();
- instrumentation_begin();
-
- /*
- * This is invoked through an interrupt gate, so IRQs are disabled. The
- * code below might walk page-tables for user or kernel addresses, so
- * keep the IRQs disabled to protect us against concurrent TLB flushes.
- */
-
- ghcb = sev_es_get_ghcb(&state);
- if (!ghcb) {
- /*
- * Mark GHCBs inactive so that panic() is able to print the
- * message.
- */
- data->ghcb_active = false;
- data->backup_ghcb_active = false;
-
- panic("Unable to handle #VC exception! GHCB and Backup GHCB are already in use");
- }
+ ghcb = __sev_get_ghcb(&state);
vc_ghcb_invalidate(ghcb);
result = vc_init_em_ctxt(&ctxt, regs, error_code);
@@ -1327,7 +1342,7 @@ DEFINE_IDTENTRY_VC_SAFE_STACK(exc_vmm_communication)
if (result == ES_OK)
result = vc_handle_exitcode(&ctxt, ghcb, error_code);
- sev_es_put_ghcb(&state);
+ __sev_put_ghcb(&state);
/* Done - now check the result */
switch (result) {
@@ -1335,17 +1350,20 @@ DEFINE_IDTENTRY_VC_SAFE_STACK(exc_vmm_communication)
vc_finish_insn(&ctxt);
break;
case ES_UNSUPPORTED:
- pr_err_ratelimited("Unsupported exit-code 0x%02lx in early #VC exception (IP: 0x%lx)\n",
+ pr_err_ratelimited("Unsupported exit-code 0x%02lx in #VC exception (IP: 0x%lx)\n",
error_code, regs->ip);
- goto fail;
+ ret = false;
+ break;
case ES_VMM_ERROR:
pr_err_ratelimited("Failure in communication with VMM (exit-code 0x%02lx IP: 0x%lx)\n",
error_code, regs->ip);
- goto fail;
+ ret = false;
+ break;
case ES_DECODE_FAILED:
pr_err_ratelimited("Failed to decode instruction (exit-code 0x%02lx IP: 0x%lx)\n",
error_code, regs->ip);
- goto fail;
+ ret = false;
+ break;
case ES_EXCEPTION:
vc_forward_exception(&ctxt);
break;
@@ -1361,24 +1379,52 @@ DEFINE_IDTENTRY_VC_SAFE_STACK(exc_vmm_communication)
BUG();
}
-out:
- instrumentation_end();
- irqentry_nmi_exit(regs, irq_state);
+ return ret;
+}
- return;
+static __always_inline bool vc_is_db(unsigned long error_code)
+{
+ return error_code == SVM_EXIT_EXCP_BASE + X86_TRAP_DB;
+}
-fail:
- if (user_mode(regs)) {
- /*
- * Do not kill the machine if user-space triggered the
- * exception. Send SIGBUS instead and let user-space deal with
- * it.
- */
- force_sig_fault(SIGBUS, BUS_OBJERR, (void __user *)0);
- } else {
- pr_emerg("PANIC: Unhandled #VC exception in kernel space (result=%d)\n",
- result);
+/*
+ * Runtime #VC exception handler when raised from kernel mode. Runs in NMI mode
+ * and will panic when an error happens.
+ */
+DEFINE_IDTENTRY_VC_KERNEL(exc_vmm_communication)
+{
+ irqentry_state_t irq_state;
+
+ /*
+ * With the current implementation it is always possible to switch to a
+ * safe stack because #VC exceptions only happen at known places, like
+ * intercepted instructions or accesses to MMIO areas/IO ports. They can
+ * also happen with code instrumentation when the hypervisor intercepts
+ * #DB, but the critical paths are forbidden to be instrumented, so #DB
+ * exceptions currently also only happen in safe places.
+ *
+ * But keep this here in case the noinstr annotations are violated due
+ * to bug elsewhere.
+ */
+ if (unlikely(on_vc_fallback_stack(regs))) {
+ instrumentation_begin();
+ panic("Can't handle #VC exception from unsupported context\n");
+ instrumentation_end();
+ }
+
+ /*
+ * Handle #DB before calling into !noinstr code to avoid recursive #DB.
+ */
+ if (vc_is_db(error_code)) {
+ exc_debug(regs);
+ return;
+ }
+ irq_state = irqentry_nmi_enter(regs);
+
+ instrumentation_begin();
+
+ if (!vc_raw_handle_exception(regs, error_code)) {
/* Show some debug info */
show_regs(regs);
@@ -1389,23 +1435,38 @@ fail:
panic("Returned from Terminate-Request to Hypervisor\n");
}
- goto out;
+ instrumentation_end();
+ irqentry_nmi_exit(regs, irq_state);
}
-/* This handler runs on the #VC fall-back stack. It can cause further #VC exceptions */
-DEFINE_IDTENTRY_VC_IST(exc_vmm_communication)
+/*
+ * Runtime #VC exception handler when raised from user mode. Runs in IRQ mode
+ * and will kill the current task with SIGBUS when an error happens.
+ */
+DEFINE_IDTENTRY_VC_USER(exc_vmm_communication)
{
+ /*
+ * Handle #DB before calling into !noinstr code to avoid recursive #DB.
+ */
+ if (vc_is_db(error_code)) {
+ noist_exc_debug(regs);
+ return;
+ }
+
+ irqentry_enter_from_user_mode(regs);
instrumentation_begin();
- panic("Can't handle #VC exception from unsupported context\n");
- instrumentation_end();
-}
-DEFINE_IDTENTRY_VC(exc_vmm_communication)
-{
- if (likely(!on_vc_fallback_stack(regs)))
- safe_stack_exc_vmm_communication(regs, error_code);
- else
- ist_exc_vmm_communication(regs, error_code);
+ if (!vc_raw_handle_exception(regs, error_code)) {
+ /*
+ * Do not kill the machine if user-space triggered the
+ * exception. Send SIGBUS instead and let user-space deal with
+ * it.
+ */
+ force_sig_fault(SIGBUS, BUS_OBJERR, (void __user *)0);
+ }
+
+ instrumentation_end();
+ irqentry_exit_to_user_mode(regs);
}
bool __init handle_vc_boot_ghcb(struct pt_regs *regs)
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
index a06cb107c0e8..e12779a2714d 100644
--- a/arch/x86/kernel/signal.c
+++ b/arch/x86/kernel/signal.c
@@ -713,7 +713,7 @@ handle_signal(struct ksignal *ksig, struct pt_regs *regs)
save_v86_state((struct kernel_vm86_regs *) regs, VM86_SIGNAL);
/* Are we from a system call? */
- if (syscall_get_nr(current, regs) >= 0) {
+ if (syscall_get_nr(current, regs) != -1) {
/* If so, check system call restarting.. */
switch (syscall_get_error(current, regs)) {
case -ERESTART_RESTARTBLOCK:
@@ -793,7 +793,7 @@ void arch_do_signal_or_restart(struct pt_regs *regs, bool has_signal)
}
/* Did we come from a system call? */
- if (syscall_get_nr(current, regs) >= 0) {
+ if (syscall_get_nr(current, regs) != -1) {
/* Restart the system call - no handlers present */
switch (syscall_get_error(current, regs)) {
case -ERESTARTNOHAND:
diff --git a/arch/x86/kernel/signal_compat.c b/arch/x86/kernel/signal_compat.c
index 0e5d0a7e203b..06743ec054d2 100644
--- a/arch/x86/kernel/signal_compat.c
+++ b/arch/x86/kernel/signal_compat.c
@@ -127,6 +127,9 @@ static inline void signal_compat_build_tests(void)
BUILD_BUG_ON(offsetof(siginfo_t, si_addr) != 0x10);
BUILD_BUG_ON(offsetof(compat_siginfo_t, si_addr) != 0x0C);
+ BUILD_BUG_ON(offsetof(siginfo_t, si_trapno) != 0x18);
+ BUILD_BUG_ON(offsetof(compat_siginfo_t, si_trapno) != 0x10);
+
BUILD_BUG_ON(offsetof(siginfo_t, si_addr_lsb) != 0x18);
BUILD_BUG_ON(offsetof(compat_siginfo_t, si_addr_lsb) != 0x10);
@@ -138,8 +141,10 @@ static inline void signal_compat_build_tests(void)
BUILD_BUG_ON(offsetof(siginfo_t, si_pkey) != 0x20);
BUILD_BUG_ON(offsetof(compat_siginfo_t, si_pkey) != 0x14);
- BUILD_BUG_ON(offsetof(siginfo_t, si_perf) != 0x18);
- BUILD_BUG_ON(offsetof(compat_siginfo_t, si_perf) != 0x10);
+ BUILD_BUG_ON(offsetof(siginfo_t, si_perf_data) != 0x18);
+ BUILD_BUG_ON(offsetof(siginfo_t, si_perf_type) != 0x20);
+ BUILD_BUG_ON(offsetof(compat_siginfo_t, si_perf_data) != 0x10);
+ BUILD_BUG_ON(offsetof(compat_siginfo_t, si_perf_type) != 0x14);
CHECK_CSI_OFFSET(_sigpoll);
CHECK_CSI_SIZE (_sigpoll, 2*sizeof(int));
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 0ad5214f598a..9320285a5e29 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -232,11 +232,9 @@ static void notrace start_secondary(void *unused)
load_cr3(swapper_pg_dir);
__flush_tlb_all();
#endif
- cpu_init_exception_handling();
- cpu_init();
+ cpu_init_secondary();
rcu_cpu_starting(raw_smp_processor_id());
x86_cpuinit.early_percpu_clock_init();
- preempt_disable();
smp_callin();
enable_start_cpu0 = 0;
@@ -2043,7 +2041,7 @@ static bool amd_set_max_freq_ratio(void)
return false;
}
- highest_perf = perf_caps.highest_perf;
+ highest_perf = amd_get_highest_perf();
nominal_perf = perf_caps.nominal_perf;
if (!highest_perf || !nominal_perf) {
diff --git a/arch/x86/kernel/trace.c b/arch/x86/kernel/trace.c
new file mode 100644
index 000000000000..6b73b6f92ad3
--- /dev/null
+++ b/arch/x86/kernel/trace.c
@@ -0,0 +1,234 @@
+#include <asm/trace/irq_vectors.h>
+#include <linux/trace.h>
+
+#if defined(CONFIG_OSNOISE_TRACER) && defined(CONFIG_X86_LOCAL_APIC)
+/*
+ * trace_intel_irq_entry - record intel specific IRQ entry
+ */
+static void trace_intel_irq_entry(void *data, int vector)
+{
+ osnoise_trace_irq_entry(vector);
+}
+
+/*
+ * trace_intel_irq_exit - record intel specific IRQ exit
+ */
+static void trace_intel_irq_exit(void *data, int vector)
+{
+ char *vector_desc = (char *) data;
+
+ osnoise_trace_irq_exit(vector, vector_desc);
+}
+
+/*
+ * register_intel_irq_tp - Register intel specific IRQ entry tracepoints
+ */
+int osnoise_arch_register(void)
+{
+ int ret;
+
+ ret = register_trace_local_timer_entry(trace_intel_irq_entry, NULL);
+ if (ret)
+ goto out_err;
+
+ ret = register_trace_local_timer_exit(trace_intel_irq_exit, "local_timer");
+ if (ret)
+ goto out_timer_entry;
+
+#ifdef CONFIG_X86_THERMAL_VECTOR
+ ret = register_trace_thermal_apic_entry(trace_intel_irq_entry, NULL);
+ if (ret)
+ goto out_timer_exit;
+
+ ret = register_trace_thermal_apic_exit(trace_intel_irq_exit, "thermal_apic");
+ if (ret)
+ goto out_thermal_entry;
+#endif /* CONFIG_X86_THERMAL_VECTOR */
+
+#ifdef CONFIG_X86_MCE_AMD
+ ret = register_trace_deferred_error_apic_entry(trace_intel_irq_entry, NULL);
+ if (ret)
+ goto out_thermal_exit;
+
+ ret = register_trace_deferred_error_apic_exit(trace_intel_irq_exit, "deferred_error");
+ if (ret)
+ goto out_deferred_entry;
+#endif
+
+#ifdef CONFIG_X86_MCE_THRESHOLD
+ ret = register_trace_threshold_apic_entry(trace_intel_irq_entry, NULL);
+ if (ret)
+ goto out_deferred_exit;
+
+ ret = register_trace_threshold_apic_exit(trace_intel_irq_exit, "threshold_apic");
+ if (ret)
+ goto out_threshold_entry;
+#endif /* CONFIG_X86_MCE_THRESHOLD */
+
+#ifdef CONFIG_SMP
+ ret = register_trace_call_function_single_entry(trace_intel_irq_entry, NULL);
+ if (ret)
+ goto out_threshold_exit;
+
+ ret = register_trace_call_function_single_exit(trace_intel_irq_exit,
+ "call_function_single");
+ if (ret)
+ goto out_call_function_single_entry;
+
+ ret = register_trace_call_function_entry(trace_intel_irq_entry, NULL);
+ if (ret)
+ goto out_call_function_single_exit;
+
+ ret = register_trace_call_function_exit(trace_intel_irq_exit, "call_function");
+ if (ret)
+ goto out_call_function_entry;
+
+ ret = register_trace_reschedule_entry(trace_intel_irq_entry, NULL);
+ if (ret)
+ goto out_call_function_exit;
+
+ ret = register_trace_reschedule_exit(trace_intel_irq_exit, "reschedule");
+ if (ret)
+ goto out_reschedule_entry;
+#endif /* CONFIG_SMP */
+
+#ifdef CONFIG_IRQ_WORK
+ ret = register_trace_irq_work_entry(trace_intel_irq_entry, NULL);
+ if (ret)
+ goto out_reschedule_exit;
+
+ ret = register_trace_irq_work_exit(trace_intel_irq_exit, "irq_work");
+ if (ret)
+ goto out_irq_work_entry;
+#endif
+
+ ret = register_trace_x86_platform_ipi_entry(trace_intel_irq_entry, NULL);
+ if (ret)
+ goto out_irq_work_exit;
+
+ ret = register_trace_x86_platform_ipi_exit(trace_intel_irq_exit, "x86_platform_ipi");
+ if (ret)
+ goto out_x86_ipi_entry;
+
+ ret = register_trace_error_apic_entry(trace_intel_irq_entry, NULL);
+ if (ret)
+ goto out_x86_ipi_exit;
+
+ ret = register_trace_error_apic_exit(trace_intel_irq_exit, "error_apic");
+ if (ret)
+ goto out_error_apic_entry;
+
+ ret = register_trace_spurious_apic_entry(trace_intel_irq_entry, NULL);
+ if (ret)
+ goto out_error_apic_exit;
+
+ ret = register_trace_spurious_apic_exit(trace_intel_irq_exit, "spurious_apic");
+ if (ret)
+ goto out_spurious_apic_entry;
+
+ return 0;
+
+out_spurious_apic_entry:
+ unregister_trace_spurious_apic_entry(trace_intel_irq_entry, NULL);
+out_error_apic_exit:
+ unregister_trace_error_apic_exit(trace_intel_irq_exit, "error_apic");
+out_error_apic_entry:
+ unregister_trace_error_apic_entry(trace_intel_irq_entry, NULL);
+out_x86_ipi_exit:
+ unregister_trace_x86_platform_ipi_exit(trace_intel_irq_exit, "x86_platform_ipi");
+out_x86_ipi_entry:
+ unregister_trace_x86_platform_ipi_entry(trace_intel_irq_entry, NULL);
+out_irq_work_exit:
+
+#ifdef CONFIG_IRQ_WORK
+ unregister_trace_irq_work_exit(trace_intel_irq_exit, "irq_work");
+out_irq_work_entry:
+ unregister_trace_irq_work_entry(trace_intel_irq_entry, NULL);
+out_reschedule_exit:
+#endif
+
+#ifdef CONFIG_SMP
+ unregister_trace_reschedule_exit(trace_intel_irq_exit, "reschedule");
+out_reschedule_entry:
+ unregister_trace_reschedule_entry(trace_intel_irq_entry, NULL);
+out_call_function_exit:
+ unregister_trace_call_function_exit(trace_intel_irq_exit, "call_function");
+out_call_function_entry:
+ unregister_trace_call_function_entry(trace_intel_irq_entry, NULL);
+out_call_function_single_exit:
+ unregister_trace_call_function_single_exit(trace_intel_irq_exit, "call_function_single");
+out_call_function_single_entry:
+ unregister_trace_call_function_single_entry(trace_intel_irq_entry, NULL);
+out_threshold_exit:
+#endif
+
+#ifdef CONFIG_X86_MCE_THRESHOLD
+ unregister_trace_threshold_apic_exit(trace_intel_irq_exit, "threshold_apic");
+out_threshold_entry:
+ unregister_trace_threshold_apic_entry(trace_intel_irq_entry, NULL);
+out_deferred_exit:
+#endif
+
+#ifdef CONFIG_X86_MCE_AMD
+ unregister_trace_deferred_error_apic_exit(trace_intel_irq_exit, "deferred_error");
+out_deferred_entry:
+ unregister_trace_deferred_error_apic_entry(trace_intel_irq_entry, NULL);
+out_thermal_exit:
+#endif /* CONFIG_X86_MCE_AMD */
+
+#ifdef CONFIG_X86_THERMAL_VECTOR
+ unregister_trace_thermal_apic_exit(trace_intel_irq_exit, "thermal_apic");
+out_thermal_entry:
+ unregister_trace_thermal_apic_entry(trace_intel_irq_entry, NULL);
+out_timer_exit:
+#endif /* CONFIG_X86_THERMAL_VECTOR */
+
+ unregister_trace_local_timer_exit(trace_intel_irq_exit, "local_timer");
+out_timer_entry:
+ unregister_trace_local_timer_entry(trace_intel_irq_entry, NULL);
+out_err:
+ return -EINVAL;
+}
+
+void osnoise_arch_unregister(void)
+{
+ unregister_trace_spurious_apic_exit(trace_intel_irq_exit, "spurious_apic");
+ unregister_trace_spurious_apic_entry(trace_intel_irq_entry, NULL);
+ unregister_trace_error_apic_exit(trace_intel_irq_exit, "error_apic");
+ unregister_trace_error_apic_entry(trace_intel_irq_entry, NULL);
+ unregister_trace_x86_platform_ipi_exit(trace_intel_irq_exit, "x86_platform_ipi");
+ unregister_trace_x86_platform_ipi_entry(trace_intel_irq_entry, NULL);
+
+#ifdef CONFIG_IRQ_WORK
+ unregister_trace_irq_work_exit(trace_intel_irq_exit, "irq_work");
+ unregister_trace_irq_work_entry(trace_intel_irq_entry, NULL);
+#endif
+
+#ifdef CONFIG_SMP
+ unregister_trace_reschedule_exit(trace_intel_irq_exit, "reschedule");
+ unregister_trace_reschedule_entry(trace_intel_irq_entry, NULL);
+ unregister_trace_call_function_exit(trace_intel_irq_exit, "call_function");
+ unregister_trace_call_function_entry(trace_intel_irq_entry, NULL);
+ unregister_trace_call_function_single_exit(trace_intel_irq_exit, "call_function_single");
+ unregister_trace_call_function_single_entry(trace_intel_irq_entry, NULL);
+#endif
+
+#ifdef CONFIG_X86_MCE_THRESHOLD
+ unregister_trace_threshold_apic_exit(trace_intel_irq_exit, "threshold_apic");
+ unregister_trace_threshold_apic_entry(trace_intel_irq_entry, NULL);
+#endif
+
+#ifdef CONFIG_X86_MCE_AMD
+ unregister_trace_deferred_error_apic_exit(trace_intel_irq_exit, "deferred_error");
+ unregister_trace_deferred_error_apic_entry(trace_intel_irq_entry, NULL);
+#endif
+
+#ifdef CONFIG_X86_THERMAL_VECTOR
+ unregister_trace_thermal_apic_exit(trace_intel_irq_exit, "thermal_apic");
+ unregister_trace_thermal_apic_entry(trace_intel_irq_entry, NULL);
+#endif /* CONFIG_X86_THERMAL_VECTOR */
+
+ unregister_trace_local_timer_exit(trace_intel_irq_exit, "local_timer");
+ unregister_trace_local_timer_entry(trace_intel_irq_entry, NULL);
+}
+#endif /* CONFIG_OSNOISE_TRAECR && CONFIG_X86_LOCAL_APIC */
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index 853ea7a80806..ed540e09a399 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -1160,12 +1160,9 @@ void __init trap_init(void)
/* Init GHCB memory pages when running as an SEV-ES guest */
sev_es_init_vc_handling();
+ /* Initialize TSS before setting up traps so ISTs work */
+ cpu_init_exception_handling();
+ /* Setup traps as cpu_init() might #GP */
idt_setup_traps();
-
- /*
- * Should be a barrier for any external CPU state:
- */
cpu_init();
-
- idt_setup_ist_traps();
}
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 57ec01192180..2e076a459a0c 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -1128,6 +1128,7 @@ static int tsc_cs_enable(struct clocksource *cs)
static struct clocksource clocksource_tsc_early = {
.name = "tsc-early",
.rating = 299,
+ .uncertainty_margin = 32 * NSEC_PER_MSEC,
.read = read_tsc,
.mask = CLOCKSOURCE_MASK(64),
.flags = CLOCK_SOURCE_IS_CONTINUOUS |
@@ -1152,7 +1153,8 @@ static struct clocksource clocksource_tsc = {
.mask = CLOCKSOURCE_MASK(64),
.flags = CLOCK_SOURCE_IS_CONTINUOUS |
CLOCK_SOURCE_VALID_FOR_HRES |
- CLOCK_SOURCE_MUST_VERIFY,
+ CLOCK_SOURCE_MUST_VERIFY |
+ CLOCK_SOURCE_VERIFY_PERCPU,
.vdso_clock_mode = VDSO_CLOCKMODE_TSC,
.enable = tsc_cs_enable,
.resume = tsc_resume,
diff --git a/arch/x86/kernel/umip.c b/arch/x86/kernel/umip.c
index 8daa70b0d2da..576b47e7523d 100644
--- a/arch/x86/kernel/umip.c
+++ b/arch/x86/kernel/umip.c
@@ -346,14 +346,12 @@ bool fixup_umip_exception(struct pt_regs *regs)
if (!regs)
return false;
- nr_copied = insn_fetch_from_user(regs, buf);
-
/*
- * The insn_fetch_from_user above could have failed if user code
- * is protected by a memory protection key. Give up on emulation
- * in such a case. Should we issue a page fault?
+ * Give up on emulation if fetching the instruction failed. Should a
+ * page fault or a #GP be issued?
*/
- if (!nr_copied)
+ nr_copied = insn_fetch_from_user(regs, buf);
+ if (nr_copied <= 0)
return false;
if (!insn_decode_from_regs(&insn, regs, buf, nr_copied))
diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig
index f6b93a35ce14..ac69894eab88 100644
--- a/arch/x86/kvm/Kconfig
+++ b/arch/x86/kvm/Kconfig
@@ -22,8 +22,6 @@ config KVM
tristate "Kernel-based Virtual Machine (KVM) support"
depends on HAVE_KVM
depends on HIGH_RES_TIMERS
- # for TASKSTATS/TASK_DELAY_ACCT:
- depends on NET && MULTIUSER
depends on X86_LOCAL_APIC
select PREEMPT_NOTIFIERS
select MMU_NOTIFIER
@@ -36,8 +34,7 @@ config KVM
select KVM_ASYNC_PF
select USER_RETURN_NOTIFIER
select KVM_MMIO
- select TASKSTATS
- select TASK_DELAY_ACCT
+ select SCHED_INFO
select PERF_EVENTS
select HAVE_KVM_MSI
select HAVE_KVM_CPU_RELAX_INTERCEPT
@@ -46,6 +43,7 @@ config KVM
select KVM_GENERIC_DIRTYLOG_READ_PROTECT
select KVM_VFIO
select SRCU
+ select HAVE_KVM_PM_NOTIFIER if PM
help
Support hosting fully virtualized guest machines using hardware
virtualization extensions. You will need a fairly recent
diff --git a/arch/x86/kvm/Makefile b/arch/x86/kvm/Makefile
index c589db5d91b3..75dfd27b6e8a 100644
--- a/arch/x86/kvm/Makefile
+++ b/arch/x86/kvm/Makefile
@@ -11,13 +11,18 @@ KVM := ../../../virt/kvm
kvm-y += $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o \
$(KVM)/eventfd.o $(KVM)/irqchip.o $(KVM)/vfio.o \
- $(KVM)/dirty_ring.o
+ $(KVM)/dirty_ring.o $(KVM)/binary_stats.o
kvm-$(CONFIG_KVM_ASYNC_PF) += $(KVM)/async_pf.o
kvm-y += x86.o emulate.o i8259.o irq.o lapic.o \
i8254.o ioapic.o irq_comm.o cpuid.o pmu.o mtrr.o \
hyperv.o debugfs.o mmu/mmu.o mmu/page_track.o \
mmu/spte.o
+
+ifdef CONFIG_HYPERV
+kvm-y += kvm_onhyperv.o
+endif
+
kvm-$(CONFIG_X86_64) += mmu/tdp_iter.o mmu/tdp_mmu.o
kvm-$(CONFIG_KVM_XEN) += xen.o
@@ -27,6 +32,10 @@ kvm-intel-$(CONFIG_X86_SGX_KVM) += vmx/sgx.o
kvm-amd-y += svm/svm.o svm/vmenter.o svm/pmu.o svm/nested.o svm/avic.o svm/sev.o
+ifdef CONFIG_HYPERV
+kvm-amd-y += svm/svm_onhyperv.o
+endif
+
obj-$(CONFIG_KVM) += kvm.o
obj-$(CONFIG_KVM_INTEL) += kvm-intel.o
obj-$(CONFIG_KVM_AMD) += kvm-amd.o
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index 19606a341888..c42613cfb5ba 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -202,10 +202,10 @@ static void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
static_call(kvm_x86_vcpu_after_set_cpuid)(vcpu);
/*
- * Except for the MMU, which needs to be reset after any vendor
- * specific adjustments to the reserved GPA bits.
+ * Except for the MMU, which needs to do its thing any vendor specific
+ * adjustments to the reserved GPA bits.
*/
- kvm_mmu_reset_context(vcpu);
+ kvm_mmu_after_set_cpuid(vcpu);
}
static int is_efer_nx(void)
@@ -458,7 +458,7 @@ void kvm_set_cpu_caps(void)
F(AVX512_VPOPCNTDQ) | F(UMIP) | F(AVX512_VBMI2) | F(GFNI) |
F(VAES) | F(VPCLMULQDQ) | F(AVX512_VNNI) | F(AVX512_BITALG) |
F(CLDEMOTE) | F(MOVDIRI) | F(MOVDIR64B) | 0 /*WAITPKG*/ |
- F(SGX_LC)
+ F(SGX_LC) | F(BUS_LOCK_DETECT)
);
/* Set LA57 based on hardware capability. */
if (cpuid_ecx(7) & F(LA57))
@@ -567,6 +567,21 @@ void kvm_set_cpu_caps(void)
F(ACE2) | F(ACE2_EN) | F(PHE) | F(PHE_EN) |
F(PMM) | F(PMM_EN)
);
+
+ /*
+ * Hide RDTSCP and RDPID if either feature is reported as supported but
+ * probing MSR_TSC_AUX failed. This is purely a sanity check and
+ * should never happen, but the guest will likely crash if RDTSCP or
+ * RDPID is misreported, and KVM has botched MSR_TSC_AUX emulation in
+ * the past. For example, the sanity check may fire if this instance of
+ * KVM is running as L1 on top of an older, broken KVM.
+ */
+ if (WARN_ON((kvm_cpu_cap_has(X86_FEATURE_RDTSCP) ||
+ kvm_cpu_cap_has(X86_FEATURE_RDPID)) &&
+ !kvm_is_supported_user_return_msr(MSR_TSC_AUX))) {
+ kvm_cpu_cap_clear(X86_FEATURE_RDTSCP);
+ kvm_cpu_cap_clear(X86_FEATURE_RDPID);
+ }
}
EXPORT_SYMBOL_GPL(kvm_set_cpu_caps);
@@ -637,8 +652,10 @@ static int __do_cpuid_func_emulated(struct kvm_cpuid_array *array, u32 func)
case 7:
entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
entry->eax = 0;
- entry->ecx = F(RDPID);
+ if (kvm_cpu_cap_has(X86_FEATURE_RDTSCP))
+ entry->ecx = F(RDPID);
++array->nent;
+ break;
default:
break;
}
diff --git a/arch/x86/kvm/debugfs.c b/arch/x86/kvm/debugfs.c
index 7e818d64bb4d..95a98413dc32 100644
--- a/arch/x86/kvm/debugfs.c
+++ b/arch/x86/kvm/debugfs.c
@@ -17,6 +17,15 @@ static int vcpu_get_timer_advance_ns(void *data, u64 *val)
DEFINE_SIMPLE_ATTRIBUTE(vcpu_timer_advance_ns_fops, vcpu_get_timer_advance_ns, NULL, "%llu\n");
+static int vcpu_get_guest_mode(void *data, u64 *val)
+{
+ struct kvm_vcpu *vcpu = (struct kvm_vcpu *) data;
+ *val = vcpu->stat.guest_mode;
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(vcpu_guest_mode_fops, vcpu_get_guest_mode, NULL, "%lld\n");
+
static int vcpu_get_tsc_offset(void *data, u64 *val)
{
struct kvm_vcpu *vcpu = (struct kvm_vcpu *) data;
@@ -45,6 +54,8 @@ DEFINE_SIMPLE_ATTRIBUTE(vcpu_tsc_scaling_frac_fops, vcpu_get_tsc_scaling_frac_bi
void kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu, struct dentry *debugfs_dentry)
{
+ debugfs_create_file("guest_mode", 0444, debugfs_dentry, vcpu,
+ &vcpu_guest_mode_fops);
debugfs_create_file("tsc-offset", 0444, debugfs_dentry, vcpu,
&vcpu_tsc_offset_fops);
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 77e1c89a95a7..2837110e66ed 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -22,7 +22,6 @@
#include "kvm_cache_regs.h"
#include "kvm_emulate.h"
#include <linux/stringify.h>
-#include <asm/fpu/api.h>
#include <asm/debugreg.h>
#include <asm/nospec-branch.h>
@@ -1081,116 +1080,14 @@ static void fetch_register_operand(struct operand *op)
}
}
-static void emulator_get_fpu(void)
-{
- fpregs_lock();
-
- fpregs_assert_state_consistent();
- if (test_thread_flag(TIF_NEED_FPU_LOAD))
- switch_fpu_return();
-}
-
-static void emulator_put_fpu(void)
-{
- fpregs_unlock();
-}
-
-static void read_sse_reg(sse128_t *data, int reg)
-{
- emulator_get_fpu();
- switch (reg) {
- case 0: asm("movdqa %%xmm0, %0" : "=m"(*data)); break;
- case 1: asm("movdqa %%xmm1, %0" : "=m"(*data)); break;
- case 2: asm("movdqa %%xmm2, %0" : "=m"(*data)); break;
- case 3: asm("movdqa %%xmm3, %0" : "=m"(*data)); break;
- case 4: asm("movdqa %%xmm4, %0" : "=m"(*data)); break;
- case 5: asm("movdqa %%xmm5, %0" : "=m"(*data)); break;
- case 6: asm("movdqa %%xmm6, %0" : "=m"(*data)); break;
- case 7: asm("movdqa %%xmm7, %0" : "=m"(*data)); break;
-#ifdef CONFIG_X86_64
- case 8: asm("movdqa %%xmm8, %0" : "=m"(*data)); break;
- case 9: asm("movdqa %%xmm9, %0" : "=m"(*data)); break;
- case 10: asm("movdqa %%xmm10, %0" : "=m"(*data)); break;
- case 11: asm("movdqa %%xmm11, %0" : "=m"(*data)); break;
- case 12: asm("movdqa %%xmm12, %0" : "=m"(*data)); break;
- case 13: asm("movdqa %%xmm13, %0" : "=m"(*data)); break;
- case 14: asm("movdqa %%xmm14, %0" : "=m"(*data)); break;
- case 15: asm("movdqa %%xmm15, %0" : "=m"(*data)); break;
-#endif
- default: BUG();
- }
- emulator_put_fpu();
-}
-
-static void write_sse_reg(sse128_t *data, int reg)
-{
- emulator_get_fpu();
- switch (reg) {
- case 0: asm("movdqa %0, %%xmm0" : : "m"(*data)); break;
- case 1: asm("movdqa %0, %%xmm1" : : "m"(*data)); break;
- case 2: asm("movdqa %0, %%xmm2" : : "m"(*data)); break;
- case 3: asm("movdqa %0, %%xmm3" : : "m"(*data)); break;
- case 4: asm("movdqa %0, %%xmm4" : : "m"(*data)); break;
- case 5: asm("movdqa %0, %%xmm5" : : "m"(*data)); break;
- case 6: asm("movdqa %0, %%xmm6" : : "m"(*data)); break;
- case 7: asm("movdqa %0, %%xmm7" : : "m"(*data)); break;
-#ifdef CONFIG_X86_64
- case 8: asm("movdqa %0, %%xmm8" : : "m"(*data)); break;
- case 9: asm("movdqa %0, %%xmm9" : : "m"(*data)); break;
- case 10: asm("movdqa %0, %%xmm10" : : "m"(*data)); break;
- case 11: asm("movdqa %0, %%xmm11" : : "m"(*data)); break;
- case 12: asm("movdqa %0, %%xmm12" : : "m"(*data)); break;
- case 13: asm("movdqa %0, %%xmm13" : : "m"(*data)); break;
- case 14: asm("movdqa %0, %%xmm14" : : "m"(*data)); break;
- case 15: asm("movdqa %0, %%xmm15" : : "m"(*data)); break;
-#endif
- default: BUG();
- }
- emulator_put_fpu();
-}
-
-static void read_mmx_reg(u64 *data, int reg)
-{
- emulator_get_fpu();
- switch (reg) {
- case 0: asm("movq %%mm0, %0" : "=m"(*data)); break;
- case 1: asm("movq %%mm1, %0" : "=m"(*data)); break;
- case 2: asm("movq %%mm2, %0" : "=m"(*data)); break;
- case 3: asm("movq %%mm3, %0" : "=m"(*data)); break;
- case 4: asm("movq %%mm4, %0" : "=m"(*data)); break;
- case 5: asm("movq %%mm5, %0" : "=m"(*data)); break;
- case 6: asm("movq %%mm6, %0" : "=m"(*data)); break;
- case 7: asm("movq %%mm7, %0" : "=m"(*data)); break;
- default: BUG();
- }
- emulator_put_fpu();
-}
-
-static void write_mmx_reg(u64 *data, int reg)
-{
- emulator_get_fpu();
- switch (reg) {
- case 0: asm("movq %0, %%mm0" : : "m"(*data)); break;
- case 1: asm("movq %0, %%mm1" : : "m"(*data)); break;
- case 2: asm("movq %0, %%mm2" : : "m"(*data)); break;
- case 3: asm("movq %0, %%mm3" : : "m"(*data)); break;
- case 4: asm("movq %0, %%mm4" : : "m"(*data)); break;
- case 5: asm("movq %0, %%mm5" : : "m"(*data)); break;
- case 6: asm("movq %0, %%mm6" : : "m"(*data)); break;
- case 7: asm("movq %0, %%mm7" : : "m"(*data)); break;
- default: BUG();
- }
- emulator_put_fpu();
-}
-
static int em_fninit(struct x86_emulate_ctxt *ctxt)
{
if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
return emulate_nm(ctxt);
- emulator_get_fpu();
+ kvm_fpu_get();
asm volatile("fninit");
- emulator_put_fpu();
+ kvm_fpu_put();
return X86EMUL_CONTINUE;
}
@@ -1201,9 +1098,9 @@ static int em_fnstcw(struct x86_emulate_ctxt *ctxt)
if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
return emulate_nm(ctxt);
- emulator_get_fpu();
+ kvm_fpu_get();
asm volatile("fnstcw %0": "+m"(fcw));
- emulator_put_fpu();
+ kvm_fpu_put();
ctxt->dst.val = fcw;
@@ -1217,9 +1114,9 @@ static int em_fnstsw(struct x86_emulate_ctxt *ctxt)
if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
return emulate_nm(ctxt);
- emulator_get_fpu();
+ kvm_fpu_get();
asm volatile("fnstsw %0": "+m"(fsw));
- emulator_put_fpu();
+ kvm_fpu_put();
ctxt->dst.val = fsw;
@@ -1238,7 +1135,7 @@ static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
op->type = OP_XMM;
op->bytes = 16;
op->addr.xmm = reg;
- read_sse_reg(&op->vec_val, reg);
+ kvm_read_sse_reg(reg, &op->vec_val);
return;
}
if (ctxt->d & Mmx) {
@@ -1289,7 +1186,7 @@ static int decode_modrm(struct x86_emulate_ctxt *ctxt,
op->type = OP_XMM;
op->bytes = 16;
op->addr.xmm = ctxt->modrm_rm;
- read_sse_reg(&op->vec_val, ctxt->modrm_rm);
+ kvm_read_sse_reg(ctxt->modrm_rm, &op->vec_val);
return rc;
}
if (ctxt->d & Mmx) {
@@ -1866,10 +1763,10 @@ static int writeback(struct x86_emulate_ctxt *ctxt, struct operand *op)
op->bytes * op->count);
break;
case OP_XMM:
- write_sse_reg(&op->vec_val, op->addr.xmm);
+ kvm_write_sse_reg(op->addr.xmm, &op->vec_val);
break;
case OP_MM:
- write_mmx_reg(&op->mm_val, op->addr.mm);
+ kvm_write_mmx_reg(op->addr.mm, &op->mm_val);
break;
case OP_NONE:
/* no writeback */
@@ -2638,8 +2535,7 @@ static int em_rsm(struct x86_emulate_ctxt *ctxt)
if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_INSIDE_NMI_MASK) == 0)
ctxt->ops->set_nmi_mask(ctxt, false);
- ctxt->ops->set_hflags(ctxt, ctxt->ops->get_hflags(ctxt) &
- ~(X86EMUL_SMM_INSIDE_NMI_MASK | X86EMUL_SMM_MASK));
+ ctxt->ops->exiting_smm(ctxt);
/*
* Get back to real mode, to prepare a safe state in which to load
@@ -2678,12 +2574,12 @@ static int em_rsm(struct x86_emulate_ctxt *ctxt)
}
/*
- * Give pre_leave_smm() a chance to make ISA-specific changes to the
- * vCPU state (e.g. enter guest mode) before loading state from the SMM
+ * Give leave_smm() a chance to make ISA-specific changes to the vCPU
+ * state (e.g. enter guest mode) before loading state from the SMM
* state-save area.
*/
- if (ctxt->ops->pre_leave_smm(ctxt, buf))
- return X86EMUL_UNHANDLEABLE;
+ if (ctxt->ops->leave_smm(ctxt, buf))
+ goto emulate_shutdown;
#ifdef CONFIG_X86_64
if (emulator_has_longmode(ctxt))
@@ -2692,13 +2588,21 @@ static int em_rsm(struct x86_emulate_ctxt *ctxt)
#endif
ret = rsm_load_state_32(ctxt, buf);
- if (ret != X86EMUL_CONTINUE) {
- /* FIXME: should triple fault */
- return X86EMUL_UNHANDLEABLE;
- }
+ if (ret != X86EMUL_CONTINUE)
+ goto emulate_shutdown;
- ctxt->ops->post_leave_smm(ctxt);
+ /*
+ * Note, the ctxt->ops callbacks are responsible for handling side
+ * effects when writing MSRs and CRs, e.g. MMU context resets, CPUID
+ * runtime updates, etc... If that changes, e.g. this flow is moved
+ * out of the emulator to make it look more like enter_smm(), then
+ * those side effects need to be explicitly handled for both success
+ * and shutdown.
+ */
+ return X86EMUL_CONTINUE;
+emulate_shutdown:
+ ctxt->ops->triple_fault(ctxt);
return X86EMUL_CONTINUE;
}
@@ -4124,11 +4028,11 @@ static int em_fxsave(struct x86_emulate_ctxt *ctxt)
if (rc != X86EMUL_CONTINUE)
return rc;
- emulator_get_fpu();
+ kvm_fpu_get();
rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_state));
- emulator_put_fpu();
+ kvm_fpu_put();
if (rc != X86EMUL_CONTINUE)
return rc;
@@ -4172,7 +4076,7 @@ static int em_fxrstor(struct x86_emulate_ctxt *ctxt)
if (rc != X86EMUL_CONTINUE)
return rc;
- emulator_get_fpu();
+ kvm_fpu_get();
if (size < __fxstate_size(16)) {
rc = fxregs_fixup(&fx_state, size);
@@ -4189,7 +4093,7 @@ static int em_fxrstor(struct x86_emulate_ctxt *ctxt)
rc = asm_safe("fxrstor %[fx]", : [fx] "m"(fx_state));
out:
- emulator_put_fpu();
+ kvm_fpu_put();
return rc;
}
@@ -4502,7 +4406,7 @@ static const struct opcode group8[] = {
* from the register case of group9.
*/
static const struct gprefix pfx_0f_c7_7 = {
- N, N, N, II(DstMem | ModRM | Op3264 | EmulateOnUD, em_rdpid, rdtscp),
+ N, N, N, II(DstMem | ModRM | Op3264 | EmulateOnUD, em_rdpid, rdpid),
};
@@ -5111,7 +5015,7 @@ done:
return rc;
}
-int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
+int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len, int emulation_type)
{
int rc = X86EMUL_CONTINUE;
int mode = ctxt->mode;
@@ -5322,7 +5226,8 @@ done_prefixes:
ctxt->execute = opcode.u.execute;
- if (unlikely(ctxt->ud) && likely(!(ctxt->d & EmulateOnUD)))
+ if (unlikely(emulation_type & EMULTYPE_TRAP_UD) &&
+ likely(!(ctxt->d & EmulateOnUD)))
return EMULATION_FAILED;
if (unlikely(ctxt->d &
@@ -5436,9 +5341,9 @@ static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt)
{
int rc;
- emulator_get_fpu();
+ kvm_fpu_get();
rc = asm_safe("fwait");
- emulator_put_fpu();
+ kvm_fpu_put();
if (unlikely(rc != X86EMUL_CONTINUE))
return emulate_exception(ctxt, MF_VECTOR, 0, false);
@@ -5449,7 +5354,7 @@ static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt)
static void fetch_possible_mmx_operand(struct operand *op)
{
if (op->type == OP_MM)
- read_mmx_reg(&op->mm_val, op->addr.mm);
+ kvm_read_mmx_reg(op->addr.mm, &op->mm_val);
}
static int fastop(struct x86_emulate_ctxt *ctxt, fastop_t fop)
diff --git a/arch/x86/kvm/fpu.h b/arch/x86/kvm/fpu.h
new file mode 100644
index 000000000000..3ba12888bf66
--- /dev/null
+++ b/arch/x86/kvm/fpu.h
@@ -0,0 +1,140 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __KVM_FPU_H_
+#define __KVM_FPU_H_
+
+#include <asm/fpu/api.h>
+
+typedef u32 __attribute__((vector_size(16))) sse128_t;
+#define __sse128_u union { sse128_t vec; u64 as_u64[2]; u32 as_u32[4]; }
+#define sse128_lo(x) ({ __sse128_u t; t.vec = x; t.as_u64[0]; })
+#define sse128_hi(x) ({ __sse128_u t; t.vec = x; t.as_u64[1]; })
+#define sse128_l0(x) ({ __sse128_u t; t.vec = x; t.as_u32[0]; })
+#define sse128_l1(x) ({ __sse128_u t; t.vec = x; t.as_u32[1]; })
+#define sse128_l2(x) ({ __sse128_u t; t.vec = x; t.as_u32[2]; })
+#define sse128_l3(x) ({ __sse128_u t; t.vec = x; t.as_u32[3]; })
+#define sse128(lo, hi) ({ __sse128_u t; t.as_u64[0] = lo; t.as_u64[1] = hi; t.vec; })
+
+static inline void _kvm_read_sse_reg(int reg, sse128_t *data)
+{
+ switch (reg) {
+ case 0: asm("movdqa %%xmm0, %0" : "=m"(*data)); break;
+ case 1: asm("movdqa %%xmm1, %0" : "=m"(*data)); break;
+ case 2: asm("movdqa %%xmm2, %0" : "=m"(*data)); break;
+ case 3: asm("movdqa %%xmm3, %0" : "=m"(*data)); break;
+ case 4: asm("movdqa %%xmm4, %0" : "=m"(*data)); break;
+ case 5: asm("movdqa %%xmm5, %0" : "=m"(*data)); break;
+ case 6: asm("movdqa %%xmm6, %0" : "=m"(*data)); break;
+ case 7: asm("movdqa %%xmm7, %0" : "=m"(*data)); break;
+#ifdef CONFIG_X86_64
+ case 8: asm("movdqa %%xmm8, %0" : "=m"(*data)); break;
+ case 9: asm("movdqa %%xmm9, %0" : "=m"(*data)); break;
+ case 10: asm("movdqa %%xmm10, %0" : "=m"(*data)); break;
+ case 11: asm("movdqa %%xmm11, %0" : "=m"(*data)); break;
+ case 12: asm("movdqa %%xmm12, %0" : "=m"(*data)); break;
+ case 13: asm("movdqa %%xmm13, %0" : "=m"(*data)); break;
+ case 14: asm("movdqa %%xmm14, %0" : "=m"(*data)); break;
+ case 15: asm("movdqa %%xmm15, %0" : "=m"(*data)); break;
+#endif
+ default: BUG();
+ }
+}
+
+static inline void _kvm_write_sse_reg(int reg, const sse128_t *data)
+{
+ switch (reg) {
+ case 0: asm("movdqa %0, %%xmm0" : : "m"(*data)); break;
+ case 1: asm("movdqa %0, %%xmm1" : : "m"(*data)); break;
+ case 2: asm("movdqa %0, %%xmm2" : : "m"(*data)); break;
+ case 3: asm("movdqa %0, %%xmm3" : : "m"(*data)); break;
+ case 4: asm("movdqa %0, %%xmm4" : : "m"(*data)); break;
+ case 5: asm("movdqa %0, %%xmm5" : : "m"(*data)); break;
+ case 6: asm("movdqa %0, %%xmm6" : : "m"(*data)); break;
+ case 7: asm("movdqa %0, %%xmm7" : : "m"(*data)); break;
+#ifdef CONFIG_X86_64
+ case 8: asm("movdqa %0, %%xmm8" : : "m"(*data)); break;
+ case 9: asm("movdqa %0, %%xmm9" : : "m"(*data)); break;
+ case 10: asm("movdqa %0, %%xmm10" : : "m"(*data)); break;
+ case 11: asm("movdqa %0, %%xmm11" : : "m"(*data)); break;
+ case 12: asm("movdqa %0, %%xmm12" : : "m"(*data)); break;
+ case 13: asm("movdqa %0, %%xmm13" : : "m"(*data)); break;
+ case 14: asm("movdqa %0, %%xmm14" : : "m"(*data)); break;
+ case 15: asm("movdqa %0, %%xmm15" : : "m"(*data)); break;
+#endif
+ default: BUG();
+ }
+}
+
+static inline void _kvm_read_mmx_reg(int reg, u64 *data)
+{
+ switch (reg) {
+ case 0: asm("movq %%mm0, %0" : "=m"(*data)); break;
+ case 1: asm("movq %%mm1, %0" : "=m"(*data)); break;
+ case 2: asm("movq %%mm2, %0" : "=m"(*data)); break;
+ case 3: asm("movq %%mm3, %0" : "=m"(*data)); break;
+ case 4: asm("movq %%mm4, %0" : "=m"(*data)); break;
+ case 5: asm("movq %%mm5, %0" : "=m"(*data)); break;
+ case 6: asm("movq %%mm6, %0" : "=m"(*data)); break;
+ case 7: asm("movq %%mm7, %0" : "=m"(*data)); break;
+ default: BUG();
+ }
+}
+
+static inline void _kvm_write_mmx_reg(int reg, const u64 *data)
+{
+ switch (reg) {
+ case 0: asm("movq %0, %%mm0" : : "m"(*data)); break;
+ case 1: asm("movq %0, %%mm1" : : "m"(*data)); break;
+ case 2: asm("movq %0, %%mm2" : : "m"(*data)); break;
+ case 3: asm("movq %0, %%mm3" : : "m"(*data)); break;
+ case 4: asm("movq %0, %%mm4" : : "m"(*data)); break;
+ case 5: asm("movq %0, %%mm5" : : "m"(*data)); break;
+ case 6: asm("movq %0, %%mm6" : : "m"(*data)); break;
+ case 7: asm("movq %0, %%mm7" : : "m"(*data)); break;
+ default: BUG();
+ }
+}
+
+static inline void kvm_fpu_get(void)
+{
+ fpregs_lock();
+
+ fpregs_assert_state_consistent();
+ if (test_thread_flag(TIF_NEED_FPU_LOAD))
+ switch_fpu_return();
+}
+
+static inline void kvm_fpu_put(void)
+{
+ fpregs_unlock();
+}
+
+static inline void kvm_read_sse_reg(int reg, sse128_t *data)
+{
+ kvm_fpu_get();
+ _kvm_read_sse_reg(reg, data);
+ kvm_fpu_put();
+}
+
+static inline void kvm_write_sse_reg(int reg, const sse128_t *data)
+{
+ kvm_fpu_get();
+ _kvm_write_sse_reg(reg, data);
+ kvm_fpu_put();
+}
+
+static inline void kvm_read_mmx_reg(int reg, u64 *data)
+{
+ kvm_fpu_get();
+ _kvm_read_mmx_reg(reg, data);
+ kvm_fpu_put();
+}
+
+static inline void kvm_write_mmx_reg(int reg, const u64 *data)
+{
+ kvm_fpu_get();
+ _kvm_write_mmx_reg(reg, data);
+ kvm_fpu_put();
+}
+
+#endif
diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
index f98370a39936..b07592ca92f0 100644
--- a/arch/x86/kvm/hyperv.c
+++ b/arch/x86/kvm/hyperv.c
@@ -36,6 +36,7 @@
#include "trace.h"
#include "irq.h"
+#include "fpu.h"
/* "Hv#1" signature */
#define HYPERV_CPUID_SIGNATURE_EAX 0x31237648
@@ -273,15 +274,10 @@ static int synic_set_msr(struct kvm_vcpu_hv_synic *synic,
static bool kvm_hv_is_syndbg_enabled(struct kvm_vcpu *vcpu)
{
- struct kvm_cpuid_entry2 *entry;
-
- entry = kvm_find_cpuid_entry(vcpu,
- HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES,
- 0);
- if (!entry)
- return false;
+ struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
- return entry->eax & HV_X64_SYNDBG_CAP_ALLOW_KERNEL_DEBUGGING;
+ return hv_vcpu->cpuid_cache.syndbg_cap_eax &
+ HV_X64_SYNDBG_CAP_ALLOW_KERNEL_DEBUGGING;
}
static int kvm_hv_syndbg_complete_userspace(struct kvm_vcpu *vcpu)
@@ -635,11 +631,17 @@ static int stimer_set_config(struct kvm_vcpu_hv_stimer *stimer, u64 config,
union hv_stimer_config new_config = {.as_uint64 = config},
old_config = {.as_uint64 = stimer->config.as_uint64};
struct kvm_vcpu *vcpu = hv_stimer_to_vcpu(stimer);
+ struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
struct kvm_vcpu_hv_synic *synic = to_hv_synic(vcpu);
if (!synic->active && !host)
return 1;
+ if (unlikely(!host && hv_vcpu->enforce_cpuid && new_config.direct_mode &&
+ !(hv_vcpu->cpuid_cache.features_edx &
+ HV_STIMER_DIRECT_MODE_AVAILABLE)))
+ return 1;
+
trace_kvm_hv_stimer_set_config(hv_stimer_to_vcpu(stimer)->vcpu_id,
stimer->index, config, host);
@@ -1172,6 +1174,7 @@ void kvm_hv_invalidate_tsc_page(struct kvm *kvm)
{
struct kvm_hv *hv = to_kvm_hv(kvm);
u64 gfn;
+ int idx;
if (hv->hv_tsc_page_status == HV_TSC_PAGE_BROKEN ||
hv->hv_tsc_page_status == HV_TSC_PAGE_UNSET ||
@@ -1190,20 +1193,105 @@ void kvm_hv_invalidate_tsc_page(struct kvm *kvm)
gfn = hv->hv_tsc_page >> HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT;
hv->tsc_ref.tsc_sequence = 0;
+
+ /*
+ * Take the srcu lock as memslots will be accessed to check the gfn
+ * cache generation against the memslots generation.
+ */
+ idx = srcu_read_lock(&kvm->srcu);
if (kvm_write_guest(kvm, gfn_to_gpa(gfn),
&hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence)))
hv->hv_tsc_page_status = HV_TSC_PAGE_BROKEN;
+ srcu_read_unlock(&kvm->srcu, idx);
out_unlock:
mutex_unlock(&hv->hv_lock);
}
+
+static bool hv_check_msr_access(struct kvm_vcpu_hv *hv_vcpu, u32 msr)
+{
+ if (!hv_vcpu->enforce_cpuid)
+ return true;
+
+ switch (msr) {
+ case HV_X64_MSR_GUEST_OS_ID:
+ case HV_X64_MSR_HYPERCALL:
+ return hv_vcpu->cpuid_cache.features_eax &
+ HV_MSR_HYPERCALL_AVAILABLE;
+ case HV_X64_MSR_VP_RUNTIME:
+ return hv_vcpu->cpuid_cache.features_eax &
+ HV_MSR_VP_RUNTIME_AVAILABLE;
+ case HV_X64_MSR_TIME_REF_COUNT:
+ return hv_vcpu->cpuid_cache.features_eax &
+ HV_MSR_TIME_REF_COUNT_AVAILABLE;
+ case HV_X64_MSR_VP_INDEX:
+ return hv_vcpu->cpuid_cache.features_eax &
+ HV_MSR_VP_INDEX_AVAILABLE;
+ case HV_X64_MSR_RESET:
+ return hv_vcpu->cpuid_cache.features_eax &
+ HV_MSR_RESET_AVAILABLE;
+ case HV_X64_MSR_REFERENCE_TSC:
+ return hv_vcpu->cpuid_cache.features_eax &
+ HV_MSR_REFERENCE_TSC_AVAILABLE;
+ case HV_X64_MSR_SCONTROL:
+ case HV_X64_MSR_SVERSION:
+ case HV_X64_MSR_SIEFP:
+ case HV_X64_MSR_SIMP:
+ case HV_X64_MSR_EOM:
+ case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
+ return hv_vcpu->cpuid_cache.features_eax &
+ HV_MSR_SYNIC_AVAILABLE;
+ case HV_X64_MSR_STIMER0_CONFIG:
+ case HV_X64_MSR_STIMER1_CONFIG:
+ case HV_X64_MSR_STIMER2_CONFIG:
+ case HV_X64_MSR_STIMER3_CONFIG:
+ case HV_X64_MSR_STIMER0_COUNT:
+ case HV_X64_MSR_STIMER1_COUNT:
+ case HV_X64_MSR_STIMER2_COUNT:
+ case HV_X64_MSR_STIMER3_COUNT:
+ return hv_vcpu->cpuid_cache.features_eax &
+ HV_MSR_SYNTIMER_AVAILABLE;
+ case HV_X64_MSR_EOI:
+ case HV_X64_MSR_ICR:
+ case HV_X64_MSR_TPR:
+ case HV_X64_MSR_VP_ASSIST_PAGE:
+ return hv_vcpu->cpuid_cache.features_eax &
+ HV_MSR_APIC_ACCESS_AVAILABLE;
+ break;
+ case HV_X64_MSR_TSC_FREQUENCY:
+ case HV_X64_MSR_APIC_FREQUENCY:
+ return hv_vcpu->cpuid_cache.features_eax &
+ HV_ACCESS_FREQUENCY_MSRS;
+ case HV_X64_MSR_REENLIGHTENMENT_CONTROL:
+ case HV_X64_MSR_TSC_EMULATION_CONTROL:
+ case HV_X64_MSR_TSC_EMULATION_STATUS:
+ return hv_vcpu->cpuid_cache.features_eax &
+ HV_ACCESS_REENLIGHTENMENT;
+ case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
+ case HV_X64_MSR_CRASH_CTL:
+ return hv_vcpu->cpuid_cache.features_edx &
+ HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE;
+ case HV_X64_MSR_SYNDBG_OPTIONS:
+ case HV_X64_MSR_SYNDBG_CONTROL ... HV_X64_MSR_SYNDBG_PENDING_BUFFER:
+ return hv_vcpu->cpuid_cache.features_edx &
+ HV_FEATURE_DEBUG_MSRS_AVAILABLE;
+ default:
+ break;
+ }
+
+ return false;
+}
+
static int kvm_hv_set_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data,
bool host)
{
struct kvm *kvm = vcpu->kvm;
struct kvm_hv *hv = to_kvm_hv(kvm);
+ if (unlikely(!host && !hv_check_msr_access(to_hv_vcpu(vcpu), msr)))
+ return 1;
+
switch (msr) {
case HV_X64_MSR_GUEST_OS_ID:
hv->hv_guest_os_id = data;
@@ -1332,6 +1420,9 @@ static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
{
struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
+ if (unlikely(!host && !hv_check_msr_access(hv_vcpu, msr)))
+ return 1;
+
switch (msr) {
case HV_X64_MSR_VP_INDEX: {
struct kvm_hv *hv = to_kvm_hv(vcpu->kvm);
@@ -1446,6 +1537,9 @@ static int kvm_hv_get_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata,
struct kvm *kvm = vcpu->kvm;
struct kvm_hv *hv = to_kvm_hv(kvm);
+ if (unlikely(!host && !hv_check_msr_access(to_hv_vcpu(vcpu), msr)))
+ return 1;
+
switch (msr) {
case HV_X64_MSR_GUEST_OS_ID:
data = hv->hv_guest_os_id;
@@ -1495,6 +1589,9 @@ static int kvm_hv_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata,
u64 data = 0;
struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
+ if (unlikely(!host && !hv_check_msr_access(hv_vcpu, msr)))
+ return 1;
+
switch (msr) {
case HV_X64_MSR_VP_INDEX:
data = hv_vcpu->vp_index;
@@ -1623,8 +1720,22 @@ static __always_inline unsigned long *sparse_set_to_vcpu_mask(
return vcpu_bitmap;
}
-static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, u64 ingpa, u16 rep_cnt, bool ex)
+struct kvm_hv_hcall {
+ u64 param;
+ u64 ingpa;
+ u64 outgpa;
+ u16 code;
+ u16 rep_cnt;
+ u16 rep_idx;
+ bool fast;
+ bool rep;
+ sse128_t xmm[HV_HYPERCALL_MAX_XMM_REGISTERS];
+};
+
+static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc, bool ex)
{
+ int i;
+ gpa_t gpa;
struct kvm *kvm = vcpu->kvm;
struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
struct hv_tlb_flush_ex flush_ex;
@@ -1638,8 +1749,15 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, u64 ingpa, u16 rep_cnt, bool
bool all_cpus;
if (!ex) {
- if (unlikely(kvm_read_guest(kvm, ingpa, &flush, sizeof(flush))))
- return HV_STATUS_INVALID_HYPERCALL_INPUT;
+ if (hc->fast) {
+ flush.address_space = hc->ingpa;
+ flush.flags = hc->outgpa;
+ flush.processor_mask = sse128_lo(hc->xmm[0]);
+ } else {
+ if (unlikely(kvm_read_guest(kvm, hc->ingpa,
+ &flush, sizeof(flush))))
+ return HV_STATUS_INVALID_HYPERCALL_INPUT;
+ }
trace_kvm_hv_flush_tlb(flush.processor_mask,
flush.address_space, flush.flags);
@@ -1657,9 +1775,16 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, u64 ingpa, u16 rep_cnt, bool
all_cpus = (flush.flags & HV_FLUSH_ALL_PROCESSORS) ||
flush.processor_mask == 0;
} else {
- if (unlikely(kvm_read_guest(kvm, ingpa, &flush_ex,
- sizeof(flush_ex))))
- return HV_STATUS_INVALID_HYPERCALL_INPUT;
+ if (hc->fast) {
+ flush_ex.address_space = hc->ingpa;
+ flush_ex.flags = hc->outgpa;
+ memcpy(&flush_ex.hv_vp_set,
+ &hc->xmm[0], sizeof(hc->xmm[0]));
+ } else {
+ if (unlikely(kvm_read_guest(kvm, hc->ingpa, &flush_ex,
+ sizeof(flush_ex))))
+ return HV_STATUS_INVALID_HYPERCALL_INPUT;
+ }
trace_kvm_hv_flush_tlb_ex(flush_ex.hv_vp_set.valid_bank_mask,
flush_ex.hv_vp_set.format,
@@ -1670,20 +1795,28 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, u64 ingpa, u16 rep_cnt, bool
all_cpus = flush_ex.hv_vp_set.format !=
HV_GENERIC_SET_SPARSE_4K;
- sparse_banks_len =
- bitmap_weight((unsigned long *)&valid_bank_mask, 64) *
- sizeof(sparse_banks[0]);
+ sparse_banks_len = bitmap_weight((unsigned long *)&valid_bank_mask, 64);
if (!sparse_banks_len && !all_cpus)
goto ret_success;
- if (!all_cpus &&
- kvm_read_guest(kvm,
- ingpa + offsetof(struct hv_tlb_flush_ex,
- hv_vp_set.bank_contents),
- sparse_banks,
- sparse_banks_len))
- return HV_STATUS_INVALID_HYPERCALL_INPUT;
+ if (!all_cpus) {
+ if (hc->fast) {
+ if (sparse_banks_len > HV_HYPERCALL_MAX_XMM_REGISTERS - 1)
+ return HV_STATUS_INVALID_HYPERCALL_INPUT;
+ for (i = 0; i < sparse_banks_len; i += 2) {
+ sparse_banks[i] = sse128_lo(hc->xmm[i / 2 + 1]);
+ sparse_banks[i + 1] = sse128_hi(hc->xmm[i / 2 + 1]);
+ }
+ } else {
+ gpa = hc->ingpa + offsetof(struct hv_tlb_flush_ex,
+ hv_vp_set.bank_contents);
+ if (unlikely(kvm_read_guest(kvm, gpa, sparse_banks,
+ sparse_banks_len *
+ sizeof(sparse_banks[0]))))
+ return HV_STATUS_INVALID_HYPERCALL_INPUT;
+ }
+ }
}
cpumask_clear(&hv_vcpu->tlb_flush);
@@ -1696,13 +1829,13 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, u64 ingpa, u16 rep_cnt, bool
* vcpu->arch.cr3 may not be up-to-date for running vCPUs so we can't
* analyze it here, flush TLB regardless of the specified address space.
*/
- kvm_make_vcpus_request_mask(kvm, KVM_REQ_HV_TLB_FLUSH,
+ kvm_make_vcpus_request_mask(kvm, KVM_REQ_TLB_FLUSH_GUEST,
NULL, vcpu_mask, &hv_vcpu->tlb_flush);
ret_success:
- /* We always do full TLB flush, set rep_done = rep_cnt. */
+ /* We always do full TLB flush, set 'Reps completed' = 'Rep Count' */
return (u64)HV_STATUS_SUCCESS |
- ((u64)rep_cnt << HV_HYPERCALL_REP_COMP_OFFSET);
+ ((u64)hc->rep_cnt << HV_HYPERCALL_REP_COMP_OFFSET);
}
static void kvm_send_ipi_to_many(struct kvm *kvm, u32 vector,
@@ -1724,8 +1857,7 @@ static void kvm_send_ipi_to_many(struct kvm *kvm, u32 vector,
}
}
-static u64 kvm_hv_send_ipi(struct kvm_vcpu *vcpu, u64 ingpa, u64 outgpa,
- bool ex, bool fast)
+static u64 kvm_hv_send_ipi(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc, bool ex)
{
struct kvm *kvm = vcpu->kvm;
struct hv_send_ipi_ex send_ipi_ex;
@@ -1740,25 +1872,25 @@ static u64 kvm_hv_send_ipi(struct kvm_vcpu *vcpu, u64 ingpa, u64 outgpa,
bool all_cpus;
if (!ex) {
- if (!fast) {
- if (unlikely(kvm_read_guest(kvm, ingpa, &send_ipi,
+ if (!hc->fast) {
+ if (unlikely(kvm_read_guest(kvm, hc->ingpa, &send_ipi,
sizeof(send_ipi))))
return HV_STATUS_INVALID_HYPERCALL_INPUT;
sparse_banks[0] = send_ipi.cpu_mask;
vector = send_ipi.vector;
} else {
/* 'reserved' part of hv_send_ipi should be 0 */
- if (unlikely(ingpa >> 32 != 0))
+ if (unlikely(hc->ingpa >> 32 != 0))
return HV_STATUS_INVALID_HYPERCALL_INPUT;
- sparse_banks[0] = outgpa;
- vector = (u32)ingpa;
+ sparse_banks[0] = hc->outgpa;
+ vector = (u32)hc->ingpa;
}
all_cpus = false;
valid_bank_mask = BIT_ULL(0);
trace_kvm_hv_send_ipi(vector, sparse_banks[0]);
} else {
- if (unlikely(kvm_read_guest(kvm, ingpa, &send_ipi_ex,
+ if (unlikely(kvm_read_guest(kvm, hc->ingpa, &send_ipi_ex,
sizeof(send_ipi_ex))))
return HV_STATUS_INVALID_HYPERCALL_INPUT;
@@ -1778,8 +1910,8 @@ static u64 kvm_hv_send_ipi(struct kvm_vcpu *vcpu, u64 ingpa, u64 outgpa,
if (!all_cpus &&
kvm_read_guest(kvm,
- ingpa + offsetof(struct hv_send_ipi_ex,
- vp_set.bank_contents),
+ hc->ingpa + offsetof(struct hv_send_ipi_ex,
+ vp_set.bank_contents),
sparse_banks,
sparse_banks_len))
return HV_STATUS_INVALID_HYPERCALL_INPUT;
@@ -1801,12 +1933,67 @@ ret_success:
void kvm_hv_set_cpuid(struct kvm_vcpu *vcpu)
{
struct kvm_cpuid_entry2 *entry;
+ struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
entry = kvm_find_cpuid_entry(vcpu, HYPERV_CPUID_INTERFACE, 0);
- if (entry && entry->eax == HYPERV_CPUID_SIGNATURE_EAX)
+ if (entry && entry->eax == HYPERV_CPUID_SIGNATURE_EAX) {
vcpu->arch.hyperv_enabled = true;
- else
+ } else {
vcpu->arch.hyperv_enabled = false;
+ return;
+ }
+
+ if (!to_hv_vcpu(vcpu) && kvm_hv_vcpu_init(vcpu))
+ return;
+
+ hv_vcpu = to_hv_vcpu(vcpu);
+
+ entry = kvm_find_cpuid_entry(vcpu, HYPERV_CPUID_FEATURES, 0);
+ if (entry) {
+ hv_vcpu->cpuid_cache.features_eax = entry->eax;
+ hv_vcpu->cpuid_cache.features_ebx = entry->ebx;
+ hv_vcpu->cpuid_cache.features_edx = entry->edx;
+ } else {
+ hv_vcpu->cpuid_cache.features_eax = 0;
+ hv_vcpu->cpuid_cache.features_ebx = 0;
+ hv_vcpu->cpuid_cache.features_edx = 0;
+ }
+
+ entry = kvm_find_cpuid_entry(vcpu, HYPERV_CPUID_ENLIGHTMENT_INFO, 0);
+ if (entry) {
+ hv_vcpu->cpuid_cache.enlightenments_eax = entry->eax;
+ hv_vcpu->cpuid_cache.enlightenments_ebx = entry->ebx;
+ } else {
+ hv_vcpu->cpuid_cache.enlightenments_eax = 0;
+ hv_vcpu->cpuid_cache.enlightenments_ebx = 0;
+ }
+
+ entry = kvm_find_cpuid_entry(vcpu, HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES, 0);
+ if (entry)
+ hv_vcpu->cpuid_cache.syndbg_cap_eax = entry->eax;
+ else
+ hv_vcpu->cpuid_cache.syndbg_cap_eax = 0;
+}
+
+int kvm_hv_set_enforce_cpuid(struct kvm_vcpu *vcpu, bool enforce)
+{
+ struct kvm_vcpu_hv *hv_vcpu;
+ int ret = 0;
+
+ if (!to_hv_vcpu(vcpu)) {
+ if (enforce) {
+ ret = kvm_hv_vcpu_init(vcpu);
+ if (ret)
+ return ret;
+ } else {
+ return 0;
+ }
+ }
+
+ hv_vcpu = to_hv_vcpu(vcpu);
+ hv_vcpu->enforce_cpuid = enforce;
+
+ return ret;
}
bool kvm_hv_hypercall_enabled(struct kvm_vcpu *vcpu)
@@ -1839,20 +2026,21 @@ static int kvm_hv_hypercall_complete_userspace(struct kvm_vcpu *vcpu)
return kvm_hv_hypercall_complete(vcpu, vcpu->run->hyperv.u.hcall.result);
}
-static u16 kvm_hvcall_signal_event(struct kvm_vcpu *vcpu, bool fast, u64 param)
+static u16 kvm_hvcall_signal_event(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc)
{
struct kvm_hv *hv = to_kvm_hv(vcpu->kvm);
struct eventfd_ctx *eventfd;
- if (unlikely(!fast)) {
+ if (unlikely(!hc->fast)) {
int ret;
- gpa_t gpa = param;
+ gpa_t gpa = hc->ingpa;
- if ((gpa & (__alignof__(param) - 1)) ||
- offset_in_page(gpa) + sizeof(param) > PAGE_SIZE)
+ if ((gpa & (__alignof__(hc->ingpa) - 1)) ||
+ offset_in_page(gpa) + sizeof(hc->ingpa) > PAGE_SIZE)
return HV_STATUS_INVALID_ALIGNMENT;
- ret = kvm_vcpu_read_guest(vcpu, gpa, &param, sizeof(param));
+ ret = kvm_vcpu_read_guest(vcpu, gpa,
+ &hc->ingpa, sizeof(hc->ingpa));
if (ret < 0)
return HV_STATUS_INVALID_ALIGNMENT;
}
@@ -1862,15 +2050,15 @@ static u16 kvm_hvcall_signal_event(struct kvm_vcpu *vcpu, bool fast, u64 param)
* have no use for it, and in all known usecases it is zero, so just
* report lookup failure if it isn't.
*/
- if (param & 0xffff00000000ULL)
+ if (hc->ingpa & 0xffff00000000ULL)
return HV_STATUS_INVALID_PORT_ID;
/* remaining bits are reserved-zero */
- if (param & ~KVM_HYPERV_CONN_ID_MASK)
+ if (hc->ingpa & ~KVM_HYPERV_CONN_ID_MASK)
return HV_STATUS_INVALID_HYPERCALL_INPUT;
/* the eventfd is protected by vcpu->kvm->srcu, but conn_to_evt isn't */
rcu_read_lock();
- eventfd = idr_find(&hv->conn_to_evt, param);
+ eventfd = idr_find(&hv->conn_to_evt, hc->ingpa);
rcu_read_unlock();
if (!eventfd)
return HV_STATUS_INVALID_PORT_ID;
@@ -1879,11 +2067,80 @@ static u16 kvm_hvcall_signal_event(struct kvm_vcpu *vcpu, bool fast, u64 param)
return HV_STATUS_SUCCESS;
}
+static bool is_xmm_fast_hypercall(struct kvm_hv_hcall *hc)
+{
+ switch (hc->code) {
+ case HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST:
+ case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE:
+ case HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX:
+ case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX:
+ return true;
+ }
+
+ return false;
+}
+
+static void kvm_hv_hypercall_read_xmm(struct kvm_hv_hcall *hc)
+{
+ int reg;
+
+ kvm_fpu_get();
+ for (reg = 0; reg < HV_HYPERCALL_MAX_XMM_REGISTERS; reg++)
+ _kvm_read_sse_reg(reg, &hc->xmm[reg]);
+ kvm_fpu_put();
+}
+
+static bool hv_check_hypercall_access(struct kvm_vcpu_hv *hv_vcpu, u16 code)
+{
+ if (!hv_vcpu->enforce_cpuid)
+ return true;
+
+ switch (code) {
+ case HVCALL_NOTIFY_LONG_SPIN_WAIT:
+ return hv_vcpu->cpuid_cache.enlightenments_ebx &&
+ hv_vcpu->cpuid_cache.enlightenments_ebx != U32_MAX;
+ case HVCALL_POST_MESSAGE:
+ return hv_vcpu->cpuid_cache.features_ebx & HV_POST_MESSAGES;
+ case HVCALL_SIGNAL_EVENT:
+ return hv_vcpu->cpuid_cache.features_ebx & HV_SIGNAL_EVENTS;
+ case HVCALL_POST_DEBUG_DATA:
+ case HVCALL_RETRIEVE_DEBUG_DATA:
+ case HVCALL_RESET_DEBUG_SESSION:
+ /*
+ * Return 'true' when SynDBG is disabled so the resulting code
+ * will be HV_STATUS_INVALID_HYPERCALL_CODE.
+ */
+ return !kvm_hv_is_syndbg_enabled(hv_vcpu->vcpu) ||
+ hv_vcpu->cpuid_cache.features_ebx & HV_DEBUGGING;
+ case HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX:
+ case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX:
+ if (!(hv_vcpu->cpuid_cache.enlightenments_eax &
+ HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED))
+ return false;
+ fallthrough;
+ case HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST:
+ case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE:
+ return hv_vcpu->cpuid_cache.enlightenments_eax &
+ HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED;
+ case HVCALL_SEND_IPI_EX:
+ if (!(hv_vcpu->cpuid_cache.enlightenments_eax &
+ HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED))
+ return false;
+ fallthrough;
+ case HVCALL_SEND_IPI:
+ return hv_vcpu->cpuid_cache.enlightenments_eax &
+ HV_X64_CLUSTER_IPI_RECOMMENDED;
+ default:
+ break;
+ }
+
+ return true;
+}
+
int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
{
- u64 param, ingpa, outgpa, ret = HV_STATUS_SUCCESS;
- uint16_t code, rep_idx, rep_cnt;
- bool fast, rep;
+ struct kvm_hv_hcall hc;
+ u64 ret = HV_STATUS_SUCCESS;
/*
* hypercall generates UD from non zero cpl and real mode
@@ -1896,104 +2153,113 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
#ifdef CONFIG_X86_64
if (is_64_bit_mode(vcpu)) {
- param = kvm_rcx_read(vcpu);
- ingpa = kvm_rdx_read(vcpu);
- outgpa = kvm_r8_read(vcpu);
+ hc.param = kvm_rcx_read(vcpu);
+ hc.ingpa = kvm_rdx_read(vcpu);
+ hc.outgpa = kvm_r8_read(vcpu);
} else
#endif
{
- param = ((u64)kvm_rdx_read(vcpu) << 32) |
- (kvm_rax_read(vcpu) & 0xffffffff);
- ingpa = ((u64)kvm_rbx_read(vcpu) << 32) |
- (kvm_rcx_read(vcpu) & 0xffffffff);
- outgpa = ((u64)kvm_rdi_read(vcpu) << 32) |
- (kvm_rsi_read(vcpu) & 0xffffffff);
+ hc.param = ((u64)kvm_rdx_read(vcpu) << 32) |
+ (kvm_rax_read(vcpu) & 0xffffffff);
+ hc.ingpa = ((u64)kvm_rbx_read(vcpu) << 32) |
+ (kvm_rcx_read(vcpu) & 0xffffffff);
+ hc.outgpa = ((u64)kvm_rdi_read(vcpu) << 32) |
+ (kvm_rsi_read(vcpu) & 0xffffffff);
}
- code = param & 0xffff;
- fast = !!(param & HV_HYPERCALL_FAST_BIT);
- rep_cnt = (param >> HV_HYPERCALL_REP_COMP_OFFSET) & 0xfff;
- rep_idx = (param >> HV_HYPERCALL_REP_START_OFFSET) & 0xfff;
- rep = !!(rep_cnt || rep_idx);
+ hc.code = hc.param & 0xffff;
+ hc.fast = !!(hc.param & HV_HYPERCALL_FAST_BIT);
+ hc.rep_cnt = (hc.param >> HV_HYPERCALL_REP_COMP_OFFSET) & 0xfff;
+ hc.rep_idx = (hc.param >> HV_HYPERCALL_REP_START_OFFSET) & 0xfff;
+ hc.rep = !!(hc.rep_cnt || hc.rep_idx);
- trace_kvm_hv_hypercall(code, fast, rep_cnt, rep_idx, ingpa, outgpa);
+ if (hc.fast && is_xmm_fast_hypercall(&hc))
+ kvm_hv_hypercall_read_xmm(&hc);
- switch (code) {
+ trace_kvm_hv_hypercall(hc.code, hc.fast, hc.rep_cnt, hc.rep_idx,
+ hc.ingpa, hc.outgpa);
+
+ if (unlikely(!hv_check_hypercall_access(to_hv_vcpu(vcpu), hc.code))) {
+ ret = HV_STATUS_ACCESS_DENIED;
+ goto hypercall_complete;
+ }
+
+ switch (hc.code) {
case HVCALL_NOTIFY_LONG_SPIN_WAIT:
- if (unlikely(rep)) {
+ if (unlikely(hc.rep)) {
ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
break;
}
kvm_vcpu_on_spin(vcpu, true);
break;
case HVCALL_SIGNAL_EVENT:
- if (unlikely(rep)) {
+ if (unlikely(hc.rep)) {
ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
break;
}
- ret = kvm_hvcall_signal_event(vcpu, fast, ingpa);
+ ret = kvm_hvcall_signal_event(vcpu, &hc);
if (ret != HV_STATUS_INVALID_PORT_ID)
break;
fallthrough; /* maybe userspace knows this conn_id */
case HVCALL_POST_MESSAGE:
/* don't bother userspace if it has no way to handle it */
- if (unlikely(rep || !to_hv_synic(vcpu)->active)) {
+ if (unlikely(hc.rep || !to_hv_synic(vcpu)->active)) {
ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
break;
}
vcpu->run->exit_reason = KVM_EXIT_HYPERV;
vcpu->run->hyperv.type = KVM_EXIT_HYPERV_HCALL;
- vcpu->run->hyperv.u.hcall.input = param;
- vcpu->run->hyperv.u.hcall.params[0] = ingpa;
- vcpu->run->hyperv.u.hcall.params[1] = outgpa;
+ vcpu->run->hyperv.u.hcall.input = hc.param;
+ vcpu->run->hyperv.u.hcall.params[0] = hc.ingpa;
+ vcpu->run->hyperv.u.hcall.params[1] = hc.outgpa;
vcpu->arch.complete_userspace_io =
kvm_hv_hypercall_complete_userspace;
return 0;
case HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST:
- if (unlikely(fast || !rep_cnt || rep_idx)) {
+ if (unlikely(!hc.rep_cnt || hc.rep_idx)) {
ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
break;
}
- ret = kvm_hv_flush_tlb(vcpu, ingpa, rep_cnt, false);
+ ret = kvm_hv_flush_tlb(vcpu, &hc, false);
break;
case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE:
- if (unlikely(fast || rep)) {
+ if (unlikely(hc.rep)) {
ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
break;
}
- ret = kvm_hv_flush_tlb(vcpu, ingpa, rep_cnt, false);
+ ret = kvm_hv_flush_tlb(vcpu, &hc, false);
break;
case HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX:
- if (unlikely(fast || !rep_cnt || rep_idx)) {
+ if (unlikely(!hc.rep_cnt || hc.rep_idx)) {
ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
break;
}
- ret = kvm_hv_flush_tlb(vcpu, ingpa, rep_cnt, true);
+ ret = kvm_hv_flush_tlb(vcpu, &hc, true);
break;
case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX:
- if (unlikely(fast || rep)) {
+ if (unlikely(hc.rep)) {
ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
break;
}
- ret = kvm_hv_flush_tlb(vcpu, ingpa, rep_cnt, true);
+ ret = kvm_hv_flush_tlb(vcpu, &hc, true);
break;
case HVCALL_SEND_IPI:
- if (unlikely(rep)) {
+ if (unlikely(hc.rep)) {
ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
break;
}
- ret = kvm_hv_send_ipi(vcpu, ingpa, outgpa, false, fast);
+ ret = kvm_hv_send_ipi(vcpu, &hc, false);
break;
case HVCALL_SEND_IPI_EX:
- if (unlikely(fast || rep)) {
+ if (unlikely(hc.fast || hc.rep)) {
ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
break;
}
- ret = kvm_hv_send_ipi(vcpu, ingpa, outgpa, true, false);
+ ret = kvm_hv_send_ipi(vcpu, &hc, true);
break;
case HVCALL_POST_DEBUG_DATA:
case HVCALL_RETRIEVE_DEBUG_DATA:
- if (unlikely(fast)) {
+ if (unlikely(hc.fast)) {
ret = HV_STATUS_INVALID_PARAMETER;
break;
}
@@ -2012,9 +2278,9 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
}
vcpu->run->exit_reason = KVM_EXIT_HYPERV;
vcpu->run->hyperv.type = KVM_EXIT_HYPERV_HCALL;
- vcpu->run->hyperv.u.hcall.input = param;
- vcpu->run->hyperv.u.hcall.params[0] = ingpa;
- vcpu->run->hyperv.u.hcall.params[1] = outgpa;
+ vcpu->run->hyperv.u.hcall.input = hc.param;
+ vcpu->run->hyperv.u.hcall.params[0] = hc.ingpa;
+ vcpu->run->hyperv.u.hcall.params[1] = hc.outgpa;
vcpu->arch.complete_userspace_io =
kvm_hv_hypercall_complete_userspace;
return 0;
@@ -2024,6 +2290,7 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
break;
}
+hypercall_complete:
return kvm_hv_hypercall_complete(vcpu, ret);
}
@@ -2172,6 +2439,7 @@ int kvm_get_hv_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid,
ent->ebx |= HV_POST_MESSAGES;
ent->ebx |= HV_SIGNAL_EVENTS;
+ ent->edx |= HV_X64_HYPERCALL_XMM_INPUT_AVAILABLE;
ent->edx |= HV_FEATURE_FREQUENCY_MSRS_AVAILABLE;
ent->edx |= HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE;
diff --git a/arch/x86/kvm/hyperv.h b/arch/x86/kvm/hyperv.h
index 60547d5cb6d7..730da8537d05 100644
--- a/arch/x86/kvm/hyperv.h
+++ b/arch/x86/kvm/hyperv.h
@@ -138,6 +138,7 @@ void kvm_hv_invalidate_tsc_page(struct kvm *kvm);
void kvm_hv_init_vm(struct kvm *kvm);
void kvm_hv_destroy_vm(struct kvm *kvm);
void kvm_hv_set_cpuid(struct kvm_vcpu *vcpu);
+int kvm_hv_set_enforce_cpuid(struct kvm_vcpu *vcpu, bool enforce);
int kvm_vm_ioctl_hv_eventfd(struct kvm *kvm, struct kvm_hyperv_eventfd *args);
int kvm_get_hv_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid,
struct kvm_cpuid_entry2 __user *entries);
diff --git a/arch/x86/kvm/kvm_cache_regs.h b/arch/x86/kvm/kvm_cache_regs.h
index 3db5c42c9ecd..90e1ffdc05b7 100644
--- a/arch/x86/kvm/kvm_cache_regs.h
+++ b/arch/x86/kvm/kvm_cache_regs.h
@@ -55,6 +55,13 @@ static inline void kvm_register_mark_available(struct kvm_vcpu *vcpu,
__set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
}
+static inline void kvm_register_clear_available(struct kvm_vcpu *vcpu,
+ enum kvm_reg reg)
+{
+ __clear_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
+ __clear_bit(reg, (unsigned long *)&vcpu->arch.regs_dirty);
+}
+
static inline void kvm_register_mark_dirty(struct kvm_vcpu *vcpu,
enum kvm_reg reg)
{
@@ -118,6 +125,11 @@ static inline u64 kvm_pdptr_read(struct kvm_vcpu *vcpu, int index)
return vcpu->arch.walk_mmu->pdptrs[index];
}
+static inline void kvm_pdptr_write(struct kvm_vcpu *vcpu, int index, u64 value)
+{
+ vcpu->arch.walk_mmu->pdptrs[index] = value;
+}
+
static inline ulong kvm_read_cr0_bits(struct kvm_vcpu *vcpu, ulong mask)
{
ulong tmask = mask & KVM_POSSIBLE_CR0_GUEST_BITS;
@@ -162,6 +174,7 @@ static inline u64 kvm_read_edx_eax(struct kvm_vcpu *vcpu)
static inline void enter_guest_mode(struct kvm_vcpu *vcpu)
{
vcpu->arch.hflags |= HF_GUEST_MASK;
+ vcpu->stat.guest_mode = 1;
}
static inline void leave_guest_mode(struct kvm_vcpu *vcpu)
@@ -172,6 +185,8 @@ static inline void leave_guest_mode(struct kvm_vcpu *vcpu)
vcpu->arch.load_eoi_exitmap_pending = false;
kvm_make_request(KVM_REQ_LOAD_EOI_EXITMAP, vcpu);
}
+
+ vcpu->stat.guest_mode = 0;
}
static inline bool is_guest_mode(struct kvm_vcpu *vcpu)
diff --git a/arch/x86/kvm/kvm_emulate.h b/arch/x86/kvm/kvm_emulate.h
index 0d359115429a..68b420289d7e 100644
--- a/arch/x86/kvm/kvm_emulate.h
+++ b/arch/x86/kvm/kvm_emulate.h
@@ -13,6 +13,7 @@
#define _ASM_X86_KVM_X86_EMULATE_H
#include <asm/desc_defs.h>
+#include "fpu.h"
struct x86_emulate_ctxt;
enum x86_intercept;
@@ -229,15 +230,12 @@ struct x86_emulate_ops {
void (*set_nmi_mask)(struct x86_emulate_ctxt *ctxt, bool masked);
unsigned (*get_hflags)(struct x86_emulate_ctxt *ctxt);
- void (*set_hflags)(struct x86_emulate_ctxt *ctxt, unsigned hflags);
- int (*pre_leave_smm)(struct x86_emulate_ctxt *ctxt,
- const char *smstate);
- void (*post_leave_smm)(struct x86_emulate_ctxt *ctxt);
+ void (*exiting_smm)(struct x86_emulate_ctxt *ctxt);
+ int (*leave_smm)(struct x86_emulate_ctxt *ctxt, const char *smstate);
+ void (*triple_fault)(struct x86_emulate_ctxt *ctxt);
int (*set_xcr)(struct x86_emulate_ctxt *ctxt, u32 index, u64 xcr);
};
-typedef u32 __attribute__((vector_size(16))) sse128_t;
-
/* Type, address-of, and value of an instruction's operand. */
struct operand {
enum { OP_REG, OP_MEM, OP_MEM_STR, OP_IMM, OP_XMM, OP_MM, OP_NONE } type;
@@ -314,7 +312,6 @@ struct x86_emulate_ctxt {
int interruptibility;
bool perm_ok; /* do not check permissions if true */
- bool ud; /* inject an #UD if host doesn't support insn */
bool tf; /* TF value before instruction (after for syscall/sysret) */
bool have_exception;
@@ -468,6 +465,7 @@ enum x86_intercept {
x86_intercept_clgi,
x86_intercept_skinit,
x86_intercept_rdtscp,
+ x86_intercept_rdpid,
x86_intercept_icebp,
x86_intercept_wbinvd,
x86_intercept_monitor,
@@ -490,7 +488,7 @@ enum x86_intercept {
#define X86EMUL_MODE_HOST X86EMUL_MODE_PROT64
#endif
-int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len);
+int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len, int emulation_type);
bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt);
#define EMULATION_FAILED -1
#define EMULATION_OK 0
diff --git a/arch/x86/kvm/kvm_onhyperv.c b/arch/x86/kvm/kvm_onhyperv.c
new file mode 100644
index 000000000000..c7db2df50a7a
--- /dev/null
+++ b/arch/x86/kvm/kvm_onhyperv.c
@@ -0,0 +1,93 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * KVM L1 hypervisor optimizations on Hyper-V.
+ */
+
+#include <linux/kvm_host.h>
+#include <asm/mshyperv.h>
+
+#include "hyperv.h"
+#include "kvm_onhyperv.h"
+
+static int kvm_fill_hv_flush_list_func(struct hv_guest_mapping_flush_list *flush,
+ void *data)
+{
+ struct kvm_tlb_range *range = data;
+
+ return hyperv_fill_flush_guest_mapping_list(flush, range->start_gfn,
+ range->pages);
+}
+
+static inline int hv_remote_flush_root_tdp(hpa_t root_tdp,
+ struct kvm_tlb_range *range)
+{
+ if (range)
+ return hyperv_flush_guest_mapping_range(root_tdp,
+ kvm_fill_hv_flush_list_func, (void *)range);
+ else
+ return hyperv_flush_guest_mapping(root_tdp);
+}
+
+int hv_remote_flush_tlb_with_range(struct kvm *kvm,
+ struct kvm_tlb_range *range)
+{
+ struct kvm_arch *kvm_arch = &kvm->arch;
+ struct kvm_vcpu *vcpu;
+ int ret = 0, i, nr_unique_valid_roots;
+ hpa_t root;
+
+ spin_lock(&kvm_arch->hv_root_tdp_lock);
+
+ if (!VALID_PAGE(kvm_arch->hv_root_tdp)) {
+ nr_unique_valid_roots = 0;
+
+ /*
+ * Flush all valid roots, and see if all vCPUs have converged
+ * on a common root, in which case future flushes can skip the
+ * loop and flush the common root.
+ */
+ kvm_for_each_vcpu(i, vcpu, kvm) {
+ root = vcpu->arch.hv_root_tdp;
+ if (!VALID_PAGE(root) || root == kvm_arch->hv_root_tdp)
+ continue;
+
+ /*
+ * Set the tracked root to the first valid root. Keep
+ * this root for the entirety of the loop even if more
+ * roots are encountered as a low effort optimization
+ * to avoid flushing the same (first) root again.
+ */
+ if (++nr_unique_valid_roots == 1)
+ kvm_arch->hv_root_tdp = root;
+
+ if (!ret)
+ ret = hv_remote_flush_root_tdp(root, range);
+
+ /*
+ * Stop processing roots if a failure occurred and
+ * multiple valid roots have already been detected.
+ */
+ if (ret && nr_unique_valid_roots > 1)
+ break;
+ }
+
+ /*
+ * The optimized flush of a single root can't be used if there
+ * are multiple valid roots (obviously).
+ */
+ if (nr_unique_valid_roots > 1)
+ kvm_arch->hv_root_tdp = INVALID_PAGE;
+ } else {
+ ret = hv_remote_flush_root_tdp(kvm_arch->hv_root_tdp, range);
+ }
+
+ spin_unlock(&kvm_arch->hv_root_tdp_lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(hv_remote_flush_tlb_with_range);
+
+int hv_remote_flush_tlb(struct kvm *kvm)
+{
+ return hv_remote_flush_tlb_with_range(kvm, NULL);
+}
+EXPORT_SYMBOL_GPL(hv_remote_flush_tlb);
diff --git a/arch/x86/kvm/kvm_onhyperv.h b/arch/x86/kvm/kvm_onhyperv.h
new file mode 100644
index 000000000000..1c67abf2eba9
--- /dev/null
+++ b/arch/x86/kvm/kvm_onhyperv.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * KVM L1 hypervisor optimizations on Hyper-V.
+ */
+
+#ifndef __ARCH_X86_KVM_KVM_ONHYPERV_H__
+#define __ARCH_X86_KVM_KVM_ONHYPERV_H__
+
+#if IS_ENABLED(CONFIG_HYPERV)
+int hv_remote_flush_tlb_with_range(struct kvm *kvm,
+ struct kvm_tlb_range *range);
+int hv_remote_flush_tlb(struct kvm *kvm);
+
+static inline void hv_track_root_tdp(struct kvm_vcpu *vcpu, hpa_t root_tdp)
+{
+ struct kvm_arch *kvm_arch = &vcpu->kvm->arch;
+
+ if (kvm_x86_ops.tlb_remote_flush == hv_remote_flush_tlb) {
+ spin_lock(&kvm_arch->hv_root_tdp_lock);
+ vcpu->arch.hv_root_tdp = root_tdp;
+ if (root_tdp != kvm_arch->hv_root_tdp)
+ kvm_arch->hv_root_tdp = INVALID_PAGE;
+ spin_unlock(&kvm_arch->hv_root_tdp_lock);
+ }
+}
+#else /* !CONFIG_HYPERV */
+static inline void hv_track_root_tdp(struct kvm_vcpu *vcpu, hpa_t root_tdp)
+{
+}
+#endif /* !CONFIG_HYPERV */
+
+#endif
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 152591f9243a..ba5a27879f1d 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -1410,6 +1410,9 @@ int kvm_lapic_reg_read(struct kvm_lapic *apic, u32 offset, int len,
if (!apic_x2apic_mode(apic))
valid_reg_mask |= APIC_REG_MASK(APIC_ARBPRI);
+ if (alignment + len > 4)
+ return 1;
+
if (offset > 0x3f0 || !(valid_reg_mask & APIC_REG_MASK(offset)))
return 1;
@@ -1494,6 +1497,15 @@ static void limit_periodic_timer_frequency(struct kvm_lapic *apic)
static void cancel_hv_timer(struct kvm_lapic *apic);
+static void cancel_apic_timer(struct kvm_lapic *apic)
+{
+ hrtimer_cancel(&apic->lapic_timer.timer);
+ preempt_disable();
+ if (apic->lapic_timer.hv_timer_in_use)
+ cancel_hv_timer(apic);
+ preempt_enable();
+}
+
static void apic_update_lvtt(struct kvm_lapic *apic)
{
u32 timer_mode = kvm_lapic_get_reg(apic, APIC_LVTT) &
@@ -1502,11 +1514,7 @@ static void apic_update_lvtt(struct kvm_lapic *apic)
if (apic->lapic_timer.timer_mode != timer_mode) {
if (apic_lvtt_tscdeadline(apic) != (timer_mode ==
APIC_LVT_TIMER_TSCDEADLINE)) {
- hrtimer_cancel(&apic->lapic_timer.timer);
- preempt_disable();
- if (apic->lapic_timer.hv_timer_in_use)
- cancel_hv_timer(apic);
- preempt_enable();
+ cancel_apic_timer(apic);
kvm_lapic_set_reg(apic, APIC_TMICT, 0);
apic->lapic_timer.period = 0;
apic->lapic_timer.tscdeadline = 0;
@@ -1598,11 +1606,19 @@ static void __kvm_wait_lapic_expire(struct kvm_vcpu *vcpu)
guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc());
apic->lapic_timer.advance_expire_delta = guest_tsc - tsc_deadline;
+ if (lapic_timer_advance_dynamic) {
+ adjust_lapic_timer_advance(vcpu, apic->lapic_timer.advance_expire_delta);
+ /*
+ * If the timer fired early, reread the TSC to account for the
+ * overhead of the above adjustment to avoid waiting longer
+ * than is necessary.
+ */
+ if (guest_tsc < tsc_deadline)
+ guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc());
+ }
+
if (guest_tsc < tsc_deadline)
__wait_lapic_expire(vcpu, tsc_deadline - guest_tsc);
-
- if (lapic_timer_advance_dynamic)
- adjust_lapic_timer_advance(vcpu, apic->lapic_timer.advance_expire_delta);
}
void kvm_wait_lapic_expire(struct kvm_vcpu *vcpu)
@@ -1661,7 +1677,7 @@ static void apic_timer_expired(struct kvm_lapic *apic, bool from_timer_fn)
}
atomic_inc(&apic->lapic_timer.pending);
- kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu);
+ kvm_make_request(KVM_REQ_UNBLOCK, vcpu);
if (from_timer_fn)
kvm_vcpu_kick(vcpu);
}
@@ -1913,8 +1929,8 @@ void kvm_lapic_expired_hv_timer(struct kvm_vcpu *vcpu)
if (!apic->lapic_timer.hv_timer_in_use)
goto out;
WARN_ON(rcuwait_active(&vcpu->wait));
- cancel_hv_timer(apic);
apic_timer_expired(apic, false);
+ cancel_hv_timer(apic);
if (apic_lvtt_period(apic) && apic->lapic_timer.period) {
advance_periodic_target_expiration(apic);
@@ -2084,7 +2100,7 @@ int kvm_lapic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val)
if (apic_lvtt_tscdeadline(apic))
break;
- hrtimer_cancel(&apic->lapic_timer.timer);
+ cancel_apic_timer(apic);
kvm_lapic_set_reg(apic, APIC_TMICT, val);
start_apic_timer(apic);
break;
@@ -2615,6 +2631,7 @@ int kvm_apic_set_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s)
apic_manage_nmi_watchdog(apic, kvm_lapic_get_reg(apic, APIC_LVT0));
update_divide_count(apic);
__start_apic_timer(apic, APIC_TMCCT);
+ kvm_lapic_set_reg(apic, APIC_TMCCT, 0);
kvm_apic_update_apicv(vcpu);
apic->highest_isr_cache = -1;
if (vcpu->arch.apicv_active) {
@@ -2856,7 +2873,7 @@ int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data, unsigned long len)
return kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, addr, new_len);
}
-void kvm_apic_accept_events(struct kvm_vcpu *vcpu)
+int kvm_apic_accept_events(struct kvm_vcpu *vcpu)
{
struct kvm_lapic *apic = vcpu->arch.apic;
u8 sipi_vector;
@@ -2864,7 +2881,7 @@ void kvm_apic_accept_events(struct kvm_vcpu *vcpu)
unsigned long pe;
if (!lapic_in_kernel(vcpu))
- return;
+ return 0;
/*
* Read pending events before calling the check_events
@@ -2872,12 +2889,12 @@ void kvm_apic_accept_events(struct kvm_vcpu *vcpu)
*/
pe = smp_load_acquire(&apic->pending_events);
if (!pe)
- return;
+ return 0;
if (is_guest_mode(vcpu)) {
r = kvm_check_nested_events(vcpu);
if (r < 0)
- return;
+ return r == -EBUSY ? 0 : r;
/*
* If an event has happened and caused a vmexit,
* we know INITs are latched and therefore
@@ -2898,7 +2915,7 @@ void kvm_apic_accept_events(struct kvm_vcpu *vcpu)
WARN_ON_ONCE(vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED);
if (test_bit(KVM_APIC_SIPI, &pe))
clear_bit(KVM_APIC_SIPI, &apic->pending_events);
- return;
+ return 0;
}
if (test_bit(KVM_APIC_INIT, &pe)) {
@@ -2919,6 +2936,7 @@ void kvm_apic_accept_events(struct kvm_vcpu *vcpu)
vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
}
}
+ return 0;
}
void kvm_lapic_exit(void)
diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h
index 997c45a5963a..d7c25d0c1354 100644
--- a/arch/x86/kvm/lapic.h
+++ b/arch/x86/kvm/lapic.h
@@ -76,7 +76,7 @@ void kvm_free_lapic(struct kvm_vcpu *vcpu);
int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu);
int kvm_apic_accept_pic_intr(struct kvm_vcpu *vcpu);
int kvm_get_apic_interrupt(struct kvm_vcpu *vcpu);
-void kvm_apic_accept_events(struct kvm_vcpu *vcpu);
+int kvm_apic_accept_events(struct kvm_vcpu *vcpu);
void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event);
u64 kvm_lapic_get_cr8(struct kvm_vcpu *vcpu);
void kvm_lapic_set_tpr(struct kvm_vcpu *vcpu, unsigned long cr8);
diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
index 88d0ed5225a4..83e6c6965f1e 100644
--- a/arch/x86/kvm/mmu.h
+++ b/arch/x86/kvm/mmu.h
@@ -44,6 +44,12 @@
#define PT32_ROOT_LEVEL 2
#define PT32E_ROOT_LEVEL 3
+#define KVM_MMU_CR4_ROLE_BITS (X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE | \
+ X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_PKE | \
+ X86_CR4_LA57)
+
+#define KVM_MMU_CR0_ROLE_BITS (X86_CR0_PG | X86_CR0_WP)
+
static __always_inline u64 rsvd_bits(int s, int e)
{
BUILD_BUG_ON(__builtin_constant_p(e) && __builtin_constant_p(s) && e < s);
@@ -62,12 +68,9 @@ static __always_inline u64 rsvd_bits(int s, int e)
void kvm_mmu_set_mmio_spte_mask(u64 mmio_value, u64 mmio_mask, u64 access_mask);
void kvm_mmu_set_ept_masks(bool has_ad_bits, bool has_exec_only);
-void
-reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context);
-
-void kvm_init_mmu(struct kvm_vcpu *vcpu, bool reset_roots);
-void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu, u32 cr0, u32 cr4, u32 efer,
- gpa_t nested_cr3);
+void kvm_init_mmu(struct kvm_vcpu *vcpu);
+void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu, unsigned long cr0,
+ unsigned long cr4, u64 efer, gpa_t nested_cr3);
void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
bool accessed_dirty, gpa_t new_eptp);
bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu);
@@ -162,11 +165,6 @@ static inline bool is_writable_pte(unsigned long pte)
return pte & PT_WRITABLE_MASK;
}
-static inline bool is_write_protection(struct kvm_vcpu *vcpu)
-{
- return kvm_read_cr0_bits(vcpu, X86_CR0_WP);
-}
-
/*
* Check if a given access (described through the I/D, W/R and U/S bits of a
* page fault error code pfec) causes a permission fault with the given PTE
@@ -232,4 +230,14 @@ int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu);
int kvm_mmu_post_init_vm(struct kvm *kvm);
void kvm_mmu_pre_destroy_vm(struct kvm *kvm);
+static inline bool kvm_memslots_have_rmaps(struct kvm *kvm)
+{
+ /*
+ * Read memslot_have_rmaps before rmap pointers. Hence, threads reading
+ * memslots_have_rmaps in any lock context are guaranteed to see the
+ * pointers. Pairs with smp_store_release in alloc_all_memslots_rmaps.
+ */
+ return smp_load_acquire(&kvm->arch.memslots_have_rmaps);
+}
+
#endif
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 4b3ee244ebe0..845d114ae075 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -55,7 +55,7 @@
extern bool itlb_multihit_kvm_mitigation;
-static int __read_mostly nx_huge_pages = -1;
+int __read_mostly nx_huge_pages = -1;
#ifdef CONFIG_PREEMPT_RT
/* Recovery can cause latency spikes, disable it for PREEMPT_RT. */
static uint __read_mostly nx_huge_pages_recovery_ratio = 0;
@@ -176,9 +176,80 @@ static void mmu_spte_set(u64 *sptep, u64 spte);
static union kvm_mmu_page_role
kvm_mmu_calc_root_page_role(struct kvm_vcpu *vcpu);
+struct kvm_mmu_role_regs {
+ const unsigned long cr0;
+ const unsigned long cr4;
+ const u64 efer;
+};
+
#define CREATE_TRACE_POINTS
#include "mmutrace.h"
+/*
+ * Yes, lot's of underscores. They're a hint that you probably shouldn't be
+ * reading from the role_regs. Once the mmu_role is constructed, it becomes
+ * the single source of truth for the MMU's state.
+ */
+#define BUILD_MMU_ROLE_REGS_ACCESSOR(reg, name, flag) \
+static inline bool ____is_##reg##_##name(struct kvm_mmu_role_regs *regs)\
+{ \
+ return !!(regs->reg & flag); \
+}
+BUILD_MMU_ROLE_REGS_ACCESSOR(cr0, pg, X86_CR0_PG);
+BUILD_MMU_ROLE_REGS_ACCESSOR(cr0, wp, X86_CR0_WP);
+BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, pse, X86_CR4_PSE);
+BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, pae, X86_CR4_PAE);
+BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, smep, X86_CR4_SMEP);
+BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, smap, X86_CR4_SMAP);
+BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, pke, X86_CR4_PKE);
+BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, la57, X86_CR4_LA57);
+BUILD_MMU_ROLE_REGS_ACCESSOR(efer, nx, EFER_NX);
+BUILD_MMU_ROLE_REGS_ACCESSOR(efer, lma, EFER_LMA);
+
+/*
+ * The MMU itself (with a valid role) is the single source of truth for the
+ * MMU. Do not use the regs used to build the MMU/role, nor the vCPU. The
+ * regs don't account for dependencies, e.g. clearing CR4 bits if CR0.PG=1,
+ * and the vCPU may be incorrect/irrelevant.
+ */
+#define BUILD_MMU_ROLE_ACCESSOR(base_or_ext, reg, name) \
+static inline bool is_##reg##_##name(struct kvm_mmu *mmu) \
+{ \
+ return !!(mmu->mmu_role. base_or_ext . reg##_##name); \
+}
+BUILD_MMU_ROLE_ACCESSOR(ext, cr0, pg);
+BUILD_MMU_ROLE_ACCESSOR(base, cr0, wp);
+BUILD_MMU_ROLE_ACCESSOR(ext, cr4, pse);
+BUILD_MMU_ROLE_ACCESSOR(ext, cr4, pae);
+BUILD_MMU_ROLE_ACCESSOR(ext, cr4, smep);
+BUILD_MMU_ROLE_ACCESSOR(ext, cr4, smap);
+BUILD_MMU_ROLE_ACCESSOR(ext, cr4, pke);
+BUILD_MMU_ROLE_ACCESSOR(ext, cr4, la57);
+BUILD_MMU_ROLE_ACCESSOR(base, efer, nx);
+
+static struct kvm_mmu_role_regs vcpu_to_role_regs(struct kvm_vcpu *vcpu)
+{
+ struct kvm_mmu_role_regs regs = {
+ .cr0 = kvm_read_cr0_bits(vcpu, KVM_MMU_CR0_ROLE_BITS),
+ .cr4 = kvm_read_cr4_bits(vcpu, KVM_MMU_CR4_ROLE_BITS),
+ .efer = vcpu->arch.efer,
+ };
+
+ return regs;
+}
+
+static int role_regs_to_root_level(struct kvm_mmu_role_regs *regs)
+{
+ if (!____is_cr0_pg(regs))
+ return 0;
+ else if (____is_efer_lma(regs))
+ return ____is_cr4_la57(regs) ? PT64_ROOT_5LEVEL :
+ PT64_ROOT_4LEVEL;
+ else if (____is_cr4_pae(regs))
+ return PT32E_ROOT_LEVEL;
+ else
+ return PT32_ROOT_LEVEL;
+}
static inline bool kvm_available_flush_tlb_with_range(void)
{
@@ -208,11 +279,6 @@ void kvm_flush_remote_tlbs_with_address(struct kvm *kvm,
kvm_flush_remote_tlbs_with_range(kvm, &range);
}
-bool is_nx_huge_page_enabled(void)
-{
- return READ_ONCE(nx_huge_pages);
-}
-
static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn,
unsigned int access)
{
@@ -269,11 +335,6 @@ static int is_cpuid_PSE36(void)
return 1;
}
-static int is_nx(struct kvm_vcpu *vcpu)
-{
- return vcpu->arch.efer & EFER_NX;
-}
-
static gfn_t pse36_gfn_delta(u32 gpte)
{
int shift = 32 - PT32_DIR_PSE36_SHIFT - PAGE_SHIFT;
@@ -1177,8 +1238,7 @@ static bool __rmap_clear_dirty(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
* @gfn_offset: start of the BITS_PER_LONG pages we care about
* @mask: indicates which pages we should protect
*
- * Used when we do not need to care about huge page mappings: e.g. during dirty
- * logging we do not have any such mappings.
+ * Used when we do not need to care about huge page mappings.
*/
static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
struct kvm_memory_slot *slot,
@@ -1189,6 +1249,10 @@ static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
if (is_tdp_mmu_enabled(kvm))
kvm_tdp_mmu_clear_dirty_pt_masked(kvm, slot,
slot->base_gfn + gfn_offset, mask, true);
+
+ if (!kvm_memslots_have_rmaps(kvm))
+ return;
+
while (mask) {
rmap_head = __gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask),
PG_LEVEL_4K, slot);
@@ -1218,6 +1282,10 @@ static void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm,
if (is_tdp_mmu_enabled(kvm))
kvm_tdp_mmu_clear_dirty_pt_masked(kvm, slot,
slot->base_gfn + gfn_offset, mask, false);
+
+ if (!kvm_memslots_have_rmaps(kvm))
+ return;
+
while (mask) {
rmap_head = __gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask),
PG_LEVEL_4K, slot);
@@ -1235,13 +1303,36 @@ static void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm,
* It calls kvm_mmu_write_protect_pt_masked to write protect selected pages to
* enable dirty logging for them.
*
- * Used when we do not need to care about huge page mappings: e.g. during dirty
- * logging we do not have any such mappings.
+ * We need to care about huge page mappings: e.g. during dirty logging we may
+ * have such mappings.
*/
void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
struct kvm_memory_slot *slot,
gfn_t gfn_offset, unsigned long mask)
{
+ /*
+ * Huge pages are NOT write protected when we start dirty logging in
+ * initially-all-set mode; must write protect them here so that they
+ * are split to 4K on the first write.
+ *
+ * The gfn_offset is guaranteed to be aligned to 64, but the base_gfn
+ * of memslot has no such restriction, so the range can cross two large
+ * pages.
+ */
+ if (kvm_dirty_log_manual_protect_and_init_set(kvm)) {
+ gfn_t start = slot->base_gfn + gfn_offset + __ffs(mask);
+ gfn_t end = slot->base_gfn + gfn_offset + __fls(mask);
+
+ kvm_mmu_slot_gfn_write_protect(kvm, slot, start, PG_LEVEL_2M);
+
+ /* Cross two large pages? */
+ if (ALIGN(start << PAGE_SHIFT, PMD_SIZE) !=
+ ALIGN(end << PAGE_SHIFT, PMD_SIZE))
+ kvm_mmu_slot_gfn_write_protect(kvm, slot, end,
+ PG_LEVEL_2M);
+ }
+
+ /* Now handle 4K PTEs. */
if (kvm_x86_ops.cpu_dirty_log_size)
kvm_mmu_clear_dirty_pt_masked(kvm, slot, gfn_offset, mask);
else
@@ -1254,20 +1345,23 @@ int kvm_cpu_dirty_log_size(void)
}
bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
- struct kvm_memory_slot *slot, u64 gfn)
+ struct kvm_memory_slot *slot, u64 gfn,
+ int min_level)
{
struct kvm_rmap_head *rmap_head;
int i;
bool write_protected = false;
- for (i = PG_LEVEL_4K; i <= KVM_MAX_HUGEPAGE_LEVEL; ++i) {
- rmap_head = __gfn_to_rmap(gfn, i, slot);
- write_protected |= __rmap_write_protect(kvm, rmap_head, true);
+ if (kvm_memslots_have_rmaps(kvm)) {
+ for (i = min_level; i <= KVM_MAX_HUGEPAGE_LEVEL; ++i) {
+ rmap_head = __gfn_to_rmap(gfn, i, slot);
+ write_protected |= __rmap_write_protect(kvm, rmap_head, true);
+ }
}
if (is_tdp_mmu_enabled(kvm))
write_protected |=
- kvm_tdp_mmu_write_protect_gfn(kvm, slot, gfn);
+ kvm_tdp_mmu_write_protect_gfn(kvm, slot, gfn, min_level);
return write_protected;
}
@@ -1277,7 +1371,7 @@ static bool rmap_write_protect(struct kvm_vcpu *vcpu, u64 gfn)
struct kvm_memory_slot *slot;
slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
- return kvm_mmu_slot_gfn_write_protect(vcpu->kvm, slot, gfn);
+ return kvm_mmu_slot_gfn_write_protect(vcpu->kvm, slot, gfn, PG_LEVEL_4K);
}
static bool kvm_zap_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
@@ -1433,9 +1527,10 @@ static __always_inline bool kvm_handle_gfn_range(struct kvm *kvm,
bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
{
- bool flush;
+ bool flush = false;
- flush = kvm_handle_gfn_range(kvm, range, kvm_unmap_rmapp);
+ if (kvm_memslots_have_rmaps(kvm))
+ flush = kvm_handle_gfn_range(kvm, range, kvm_unmap_rmapp);
if (is_tdp_mmu_enabled(kvm))
flush |= kvm_tdp_mmu_unmap_gfn_range(kvm, range, flush);
@@ -1445,9 +1540,10 @@ bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
{
- bool flush;
+ bool flush = false;
- flush = kvm_handle_gfn_range(kvm, range, kvm_set_pte_rmapp);
+ if (kvm_memslots_have_rmaps(kvm))
+ flush = kvm_handle_gfn_range(kvm, range, kvm_set_pte_rmapp);
if (is_tdp_mmu_enabled(kvm))
flush |= kvm_tdp_mmu_set_spte_gfn(kvm, range);
@@ -1500,9 +1596,10 @@ static void rmap_recycle(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
{
- bool young;
+ bool young = false;
- young = kvm_handle_gfn_range(kvm, range, kvm_age_rmapp);
+ if (kvm_memslots_have_rmaps(kvm))
+ young = kvm_handle_gfn_range(kvm, range, kvm_age_rmapp);
if (is_tdp_mmu_enabled(kvm))
young |= kvm_tdp_mmu_age_gfn_range(kvm, range);
@@ -1512,9 +1609,10 @@ bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
{
- bool young;
+ bool young = false;
- young = kvm_handle_gfn_range(kvm, range, kvm_test_age_rmapp);
+ if (kvm_memslots_have_rmaps(kvm))
+ young = kvm_handle_gfn_range(kvm, range, kvm_test_age_rmapp);
if (is_tdp_mmu_enabled(kvm))
young |= kvm_tdp_mmu_test_age_gfn(kvm, range);
@@ -1748,17 +1846,10 @@ static void kvm_mmu_commit_zap_page(struct kvm *kvm,
&(_kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(_gfn)]) \
if ((_sp)->gfn != (_gfn) || (_sp)->role.direct) {} else
-static inline bool is_ept_sp(struct kvm_mmu_page *sp)
-{
- return sp->role.cr0_wp && sp->role.smap_andnot_wp;
-}
-
-/* @sp->gfn should be write-protected at the call site */
-static bool __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
- struct list_head *invalid_list)
+static bool kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
+ struct list_head *invalid_list)
{
- if ((!is_ept_sp(sp) && sp->role.gpte_is_8_bytes != !!is_pae(vcpu)) ||
- vcpu->arch.mmu->sync_page(vcpu, sp) == 0) {
+ if (vcpu->arch.mmu->sync_page(vcpu, sp) == 0) {
kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list);
return false;
}
@@ -1804,31 +1895,6 @@ static bool is_obsolete_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
unlikely(sp->mmu_valid_gen != kvm->arch.mmu_valid_gen);
}
-static bool kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
- struct list_head *invalid_list)
-{
- kvm_unlink_unsync_page(vcpu->kvm, sp);
- return __kvm_sync_page(vcpu, sp, invalid_list);
-}
-
-/* @gfn should be write-protected at the call site */
-static bool kvm_sync_pages(struct kvm_vcpu *vcpu, gfn_t gfn,
- struct list_head *invalid_list)
-{
- struct kvm_mmu_page *s;
- bool ret = false;
-
- for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn) {
- if (!s->unsync)
- continue;
-
- WARN_ON(s->role.level != PG_LEVEL_4K);
- ret |= kvm_sync_page(vcpu, s, invalid_list);
- }
-
- return ret;
-}
-
struct mmu_page_path {
struct kvm_mmu_page *parent[PT64_ROOT_MAX_LEVEL];
unsigned int idx[PT64_ROOT_MAX_LEVEL];
@@ -1923,6 +1989,7 @@ static void mmu_sync_children(struct kvm_vcpu *vcpu,
}
for_each_sp(pages, sp, parents, i) {
+ kvm_unlink_unsync_page(vcpu->kvm, sp);
flush |= kvm_sync_page(vcpu, sp, &invalid_list);
mmu_pages_clear_parents(&parents);
}
@@ -1958,8 +2025,6 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
struct hlist_head *sp_list;
unsigned quadrant;
struct kvm_mmu_page *sp;
- bool need_sync = false;
- bool flush = false;
int collisions = 0;
LIST_HEAD(invalid_list);
@@ -1982,20 +2047,39 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
continue;
}
- if (!need_sync && sp->unsync)
- need_sync = true;
-
- if (sp->role.word != role.word)
+ if (sp->role.word != role.word) {
+ /*
+ * If the guest is creating an upper-level page, zap
+ * unsync pages for the same gfn. While it's possible
+ * the guest is using recursive page tables, in all
+ * likelihood the guest has stopped using the unsync
+ * page and is installing a completely unrelated page.
+ * Unsync pages must not be left as is, because the new
+ * upper-level page will be write-protected.
+ */
+ if (level > PG_LEVEL_4K && sp->unsync)
+ kvm_mmu_prepare_zap_page(vcpu->kvm, sp,
+ &invalid_list);
continue;
+ }
if (direct_mmu)
goto trace_get_page;
if (sp->unsync) {
- /* The page is good, but __kvm_sync_page might still end
- * up zapping it. If so, break in order to rebuild it.
+ /*
+ * The page is good, but is stale. kvm_sync_page does
+ * get the latest guest state, but (unlike mmu_unsync_children)
+ * it doesn't write-protect the page or mark it synchronized!
+ * This way the validity of the mapping is ensured, but the
+ * overhead of write protection is not incurred until the
+ * guest invalidates the TLB mapping. This allows multiple
+ * SPs for a single gfn to be unsync.
+ *
+ * If the sync fails, the page is zapped. If so, break
+ * in order to rebuild it.
*/
- if (!__kvm_sync_page(vcpu, sp, &invalid_list))
+ if (!kvm_sync_page(vcpu, sp, &invalid_list))
break;
WARN_ON(!list_empty(&invalid_list));
@@ -2020,22 +2104,14 @@ trace_get_page:
sp->role = role;
hlist_add_head(&sp->hash_link, sp_list);
if (!direct) {
- /*
- * we should do write protection before syncing pages
- * otherwise the content of the synced shadow page may
- * be inconsistent with guest page table.
- */
account_shadowed(vcpu->kvm, sp);
if (level == PG_LEVEL_4K && rmap_write_protect(vcpu, gfn))
kvm_flush_remote_tlbs_with_address(vcpu->kvm, gfn, 1);
-
- if (level > PG_LEVEL_4K && need_sync)
- flush |= kvm_sync_pages(vcpu, gfn, &invalid_list);
}
trace_kvm_mmu_get_page(sp, true);
-
- kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush);
out:
+ kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
+
if (collisions > vcpu->kvm->stat.max_mmu_page_hash_collisions)
vcpu->kvm->stat.max_mmu_page_hash_collisions = collisions;
return sp;
@@ -2374,7 +2450,7 @@ static int make_mmu_pages_available(struct kvm_vcpu *vcpu)
* page is available, while the caller may end up allocating as many as
* four pages, e.g. for PAE roots or for 5-level paging. Temporarily
* exceeding the (arbitrary by default) limit will not harm the host,
- * being too agressive may unnecessarily kill the guest, and getting an
+ * being too aggressive may unnecessarily kill the guest, and getting an
* exact count is far more trouble than it's worth, especially in the
* page fault paths.
*/
@@ -2448,17 +2524,33 @@ static void kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
kvm_mmu_mark_parents_unsync(sp);
}
-bool mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn,
- bool can_unsync)
+/*
+ * Attempt to unsync any shadow pages that can be reached by the specified gfn,
+ * KVM is creating a writable mapping for said gfn. Returns 0 if all pages
+ * were marked unsync (or if there is no shadow page), -EPERM if the SPTE must
+ * be write-protected.
+ */
+int mmu_try_to_unsync_pages(struct kvm_vcpu *vcpu, gfn_t gfn, bool can_unsync)
{
struct kvm_mmu_page *sp;
+ /*
+ * Force write-protection if the page is being tracked. Note, the page
+ * track machinery is used to write-protect upper-level shadow pages,
+ * i.e. this guards the role.level == 4K assertion below!
+ */
if (kvm_page_track_is_active(vcpu, gfn, KVM_PAGE_TRACK_WRITE))
- return true;
+ return -EPERM;
+ /*
+ * The page is not write-tracked, mark existing shadow pages unsync
+ * unless KVM is synchronizing an unsync SP (can_unsync = false). In
+ * that case, KVM must complete emulation of the guest TLB flush before
+ * allowing shadow pages to become unsync (writable by the guest).
+ */
for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn) {
if (!can_unsync)
- return true;
+ return -EPERM;
if (sp->unsync)
continue;
@@ -2489,8 +2581,8 @@ bool mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn,
* 2.2 Guest issues TLB flush.
* That causes a VM Exit.
*
- * 2.3 kvm_mmu_sync_pages() reads sp->unsync.
- * Since it is false, so it just returns.
+ * 2.3 Walking of unsync pages sees sp->unsync is
+ * false and skips the page.
*
* 2.4 Guest accesses GVA X.
* Since the mapping in the SP was not updated,
@@ -2506,7 +2598,7 @@ bool mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn,
*/
smp_wmb();
- return false;
+ return 0;
}
static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
@@ -2827,9 +2919,6 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
gfn_t gfn = gpa >> PAGE_SHIFT;
gfn_t base_gfn = gfn;
- if (WARN_ON(!VALID_PAGE(vcpu->arch.mmu->root_hpa)))
- return RET_PF_RETRY;
-
level = kvm_mmu_hugepage_adjust(vcpu, gfn, max_level, &pfn,
huge_page_disallowed, &req_level);
@@ -3180,6 +3269,33 @@ void kvm_mmu_free_roots(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
}
EXPORT_SYMBOL_GPL(kvm_mmu_free_roots);
+void kvm_mmu_free_guest_mode_roots(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu)
+{
+ unsigned long roots_to_free = 0;
+ hpa_t root_hpa;
+ int i;
+
+ /*
+ * This should not be called while L2 is active, L2 can't invalidate
+ * _only_ its own roots, e.g. INVVPID unconditionally exits.
+ */
+ WARN_ON_ONCE(mmu->mmu_role.base.guest_mode);
+
+ for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
+ root_hpa = mmu->prev_roots[i].hpa;
+ if (!VALID_PAGE(root_hpa))
+ continue;
+
+ if (!to_shadow_page(root_hpa) ||
+ to_shadow_page(root_hpa)->role.guest_mode)
+ roots_to_free |= KVM_MMU_ROOT_PREVIOUS(i);
+ }
+
+ kvm_mmu_free_roots(vcpu, mmu, roots_to_free);
+}
+EXPORT_SYMBOL_GPL(kvm_mmu_free_guest_mode_roots);
+
+
static int mmu_check_root(struct kvm_vcpu *vcpu, gfn_t root_gfn)
{
int ret = 0;
@@ -3280,6 +3396,10 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
}
}
+ r = alloc_all_memslots_rmaps(vcpu->kvm);
+ if (r)
+ return r;
+
write_lock(&vcpu->kvm->mmu_lock);
r = make_mmu_pages_available(vcpu);
if (r < 0)
@@ -3310,12 +3430,12 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
if (mmu->shadow_root_level == PT64_ROOT_4LEVEL) {
pm_mask |= PT_ACCESSED_MASK | PT_WRITABLE_MASK | PT_USER_MASK;
- if (WARN_ON_ONCE(!mmu->lm_root)) {
+ if (WARN_ON_ONCE(!mmu->pml4_root)) {
r = -EIO;
goto out_unlock;
}
- mmu->lm_root[0] = __pa(mmu->pae_root) | pm_mask;
+ mmu->pml4_root[0] = __pa(mmu->pae_root) | pm_mask;
}
for (i = 0; i < 4; ++i) {
@@ -3335,7 +3455,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
}
if (mmu->shadow_root_level == PT64_ROOT_4LEVEL)
- mmu->root_hpa = __pa(mmu->lm_root);
+ mmu->root_hpa = __pa(mmu->pml4_root);
else
mmu->root_hpa = __pa(mmu->pae_root);
@@ -3350,7 +3470,7 @@ out_unlock:
static int mmu_alloc_special_roots(struct kvm_vcpu *vcpu)
{
struct kvm_mmu *mmu = vcpu->arch.mmu;
- u64 *lm_root, *pae_root;
+ u64 *pml4_root, *pae_root;
/*
* When shadowing 32-bit or PAE NPT with 64-bit NPT, the PML4 and PDP
@@ -3369,14 +3489,14 @@ static int mmu_alloc_special_roots(struct kvm_vcpu *vcpu)
if (WARN_ON_ONCE(mmu->shadow_root_level != PT64_ROOT_4LEVEL))
return -EIO;
- if (mmu->pae_root && mmu->lm_root)
+ if (mmu->pae_root && mmu->pml4_root)
return 0;
/*
* The special roots should always be allocated in concert. Yell and
* bail if KVM ends up in a state where only one of the roots is valid.
*/
- if (WARN_ON_ONCE(!tdp_enabled || mmu->pae_root || mmu->lm_root))
+ if (WARN_ON_ONCE(!tdp_enabled || mmu->pae_root || mmu->pml4_root))
return -EIO;
/*
@@ -3387,14 +3507,14 @@ static int mmu_alloc_special_roots(struct kvm_vcpu *vcpu)
if (!pae_root)
return -ENOMEM;
- lm_root = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);
- if (!lm_root) {
+ pml4_root = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);
+ if (!pml4_root) {
free_page((unsigned long)pae_root);
return -ENOMEM;
}
mmu->pae_root = pae_root;
- mmu->lm_root = lm_root;
+ mmu->pml4_root = pml4_root;
return 0;
}
@@ -3423,8 +3543,8 @@ void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
* flush strictly after those changes are made. We only need to
* ensure that the other CPU sets these flags before any actual
* changes to the page tables are made. The comments in
- * mmu_need_write_protect() describe what could go wrong if this
- * requirement isn't satisfied.
+ * mmu_try_to_unsync_pages() describe what could go wrong if
+ * this requirement isn't satisfied.
*/
if (!smp_load_acquire(&sp->unsync) &&
!smp_load_acquire(&sp->unsync_children))
@@ -3474,19 +3594,6 @@ static gpa_t nonpaging_gva_to_gpa_nested(struct kvm_vcpu *vcpu, gpa_t vaddr,
return vcpu->arch.nested_mmu.translate_gpa(vcpu, vaddr, access, exception);
}
-static bool
-__is_rsvd_bits_set(struct rsvd_bits_validate *rsvd_check, u64 pte, int level)
-{
- int bit7 = (pte >> 7) & 1;
-
- return pte & rsvd_check->rsvd_bits_mask[bit7][level-1];
-}
-
-static bool __is_bad_mt_xwr(struct rsvd_bits_validate *rsvd_check, u64 pte)
-{
- return rsvd_check->bad_mt_xwr & BIT_ULL(pte & 0x3f);
-}
-
static bool mmio_info_in_cache(struct kvm_vcpu *vcpu, u64 addr, bool direct)
{
/*
@@ -3540,12 +3647,7 @@ static bool get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep)
int root, leaf, level;
bool reserved = false;
- if (!VALID_PAGE(vcpu->arch.mmu->root_hpa)) {
- *sptep = 0ull;
- return reserved;
- }
-
- if (is_tdp_mmu_root(vcpu->kvm, vcpu->arch.mmu->root_hpa))
+ if (is_tdp_mmu(vcpu->arch.mmu))
leaf = kvm_tdp_mmu_get_walk(vcpu, addr, sptes, &root);
else
leaf = get_walk(vcpu, addr, sptes, &root);
@@ -3569,13 +3671,7 @@ static bool get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep)
rsvd_check = &vcpu->arch.mmu->shadow_zero_check;
for (level = root; level >= leaf; level--)
- /*
- * Use a bitwise-OR instead of a logical-OR to aggregate the
- * reserved bit and EPT's invalid memtype/XWR checks to avoid
- * adding a Jcc in the loop.
- */
- reserved |= __is_bad_mt_xwr(rsvd_check, sptes[level]) |
- __is_rsvd_bits_set(rsvd_check, sptes[level], level);
+ reserved |= is_rsvd_spte(rsvd_check, sptes[level], level);
if (reserved) {
pr_err("%s: reserved bits set on MMU-present spte, addr 0x%llx, hierarchy:\n",
@@ -3583,7 +3679,7 @@ static bool get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep)
for (level = root; level >= leaf; level--)
pr_err("------ spte = 0x%llx level = %d, rsvd bits = 0x%llx",
sptes[level], level,
- rsvd_check->rsvd_bits_mask[(sptes[level] >> 7) & 1][level-1]);
+ get_rsvd_bits(rsvd_check, sptes[level], level));
}
return reserved;
@@ -3717,6 +3813,7 @@ static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
static int direct_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
bool prefault, int max_level, bool is_tdp)
{
+ bool is_tdp_mmu_fault = is_tdp_mmu(vcpu->arch.mmu);
bool write = error_code & PFERR_WRITE_MASK;
bool map_writable;
@@ -3729,7 +3826,7 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
if (page_fault_handle_page_track(vcpu, error_code, gfn))
return RET_PF_EMULATE;
- if (!is_tdp_mmu_root(vcpu->kvm, vcpu->arch.mmu->root_hpa)) {
+ if (!is_tdp_mmu_fault) {
r = fast_page_fault(vcpu, gpa, error_code);
if (r != RET_PF_INVALID)
return r;
@@ -3751,7 +3848,7 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
r = RET_PF_RETRY;
- if (is_tdp_mmu_root(vcpu->kvm, vcpu->arch.mmu->root_hpa))
+ if (is_tdp_mmu_fault)
read_lock(&vcpu->kvm->mmu_lock);
else
write_lock(&vcpu->kvm->mmu_lock);
@@ -3762,7 +3859,7 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
if (r)
goto out_unlock;
- if (is_tdp_mmu_root(vcpu->kvm, vcpu->arch.mmu->root_hpa))
+ if (is_tdp_mmu_fault)
r = kvm_tdp_mmu_map(vcpu, gpa, error_code, map_writable, max_level,
pfn, prefault);
else
@@ -3770,7 +3867,7 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
prefault, is_tdp);
out_unlock:
- if (is_tdp_mmu_root(vcpu->kvm, vcpu->arch.mmu->root_hpa))
+ if (is_tdp_mmu_fault)
read_unlock(&vcpu->kvm->mmu_lock);
else
write_unlock(&vcpu->kvm->mmu_lock);
@@ -3840,17 +3937,13 @@ int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
max_level, true);
}
-static void nonpaging_init_context(struct kvm_vcpu *vcpu,
- struct kvm_mmu *context)
+static void nonpaging_init_context(struct kvm_mmu *context)
{
context->page_fault = nonpaging_page_fault;
context->gva_to_gpa = nonpaging_gva_to_gpa;
context->sync_page = nonpaging_sync_page;
context->invlpg = NULL;
- context->root_level = 0;
- context->shadow_root_level = PT32E_ROOT_LEVEL;
context->direct_map = true;
- context->nx = false;
}
static inline bool is_root_usable(struct kvm_mmu_root_info *root, gpa_t pgd,
@@ -3913,8 +4006,7 @@ static bool fast_pgd_switch(struct kvm_vcpu *vcpu, gpa_t new_pgd,
}
static void __kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd,
- union kvm_mmu_page_role new_role,
- bool skip_tlb_flush, bool skip_mmu_sync)
+ union kvm_mmu_page_role new_role)
{
if (!fast_pgd_switch(vcpu, new_pgd, new_role)) {
kvm_mmu_free_roots(vcpu, vcpu->arch.mmu, KVM_MMU_ROOT_CURRENT);
@@ -3929,10 +4021,10 @@ static void __kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd,
*/
kvm_make_request(KVM_REQ_LOAD_MMU_PGD, vcpu);
- if (!skip_mmu_sync || force_flush_and_sync_on_reuse)
+ if (force_flush_and_sync_on_reuse) {
kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
- if (!skip_tlb_flush || force_flush_and_sync_on_reuse)
kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
+ }
/*
* The last MMIO access's GVA and GPA are cached in the VCPU. When
@@ -3951,11 +4043,9 @@ static void __kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd,
to_shadow_page(vcpu->arch.mmu->root_hpa));
}
-void kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd, bool skip_tlb_flush,
- bool skip_mmu_sync)
+void kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd)
{
- __kvm_mmu_new_pgd(vcpu, new_pgd, kvm_mmu_calc_root_page_role(vcpu),
- skip_tlb_flush, skip_mmu_sync);
+ __kvm_mmu_new_pgd(vcpu, new_pgd, kvm_mmu_calc_root_page_role(vcpu));
}
EXPORT_SYMBOL_GPL(kvm_mmu_new_pgd);
@@ -3981,26 +4071,6 @@ static bool sync_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn,
return false;
}
-static inline bool is_last_gpte(struct kvm_mmu *mmu,
- unsigned level, unsigned gpte)
-{
- /*
- * The RHS has bit 7 set iff level < mmu->last_nonleaf_level.
- * If it is clear, there are no large pages at this level, so clear
- * PT_PAGE_SIZE_MASK in gpte if that is the case.
- */
- gpte &= level - mmu->last_nonleaf_level;
-
- /*
- * PG_LEVEL_4K always terminates. The RHS has bit 7 set
- * iff level <= PG_LEVEL_4K, which for our purpose means
- * level == PG_LEVEL_4K; set PT_PAGE_SIZE_MASK in gpte then.
- */
- gpte |= level - PG_LEVEL_4K - 1;
-
- return gpte & PT_PAGE_SIZE_MASK;
-}
-
#define PTTYPE_EPT 18 /* arbitrary */
#define PTTYPE PTTYPE_EPT
#include "paging_tmpl.h"
@@ -4015,8 +4085,7 @@ static inline bool is_last_gpte(struct kvm_mmu *mmu,
#undef PTTYPE
static void
-__reset_rsvds_bits_mask(struct kvm_vcpu *vcpu,
- struct rsvd_bits_validate *rsvd_check,
+__reset_rsvds_bits_mask(struct rsvd_bits_validate *rsvd_check,
u64 pa_bits_rsvd, int level, bool nx, bool gbpages,
bool pse, bool amd)
{
@@ -4105,14 +4174,29 @@ __reset_rsvds_bits_mask(struct kvm_vcpu *vcpu,
}
}
+static bool guest_can_use_gbpages(struct kvm_vcpu *vcpu)
+{
+ /*
+ * If TDP is enabled, let the guest use GBPAGES if they're supported in
+ * hardware. The hardware page walker doesn't let KVM disable GBPAGES,
+ * i.e. won't treat them as reserved, and KVM doesn't redo the GVA->GPA
+ * walk for performance and complexity reasons. Not to mention KVM
+ * _can't_ solve the problem because GVA->GPA walks aren't visible to
+ * KVM once a TDP translation is installed. Mimic hardware behavior so
+ * that KVM's is at least consistent, i.e. doesn't randomly inject #PF.
+ */
+ return tdp_enabled ? boot_cpu_has(X86_FEATURE_GBPAGES) :
+ guest_cpuid_has(vcpu, X86_FEATURE_GBPAGES);
+}
+
static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu,
struct kvm_mmu *context)
{
- __reset_rsvds_bits_mask(vcpu, &context->guest_rsvd_check,
+ __reset_rsvds_bits_mask(&context->guest_rsvd_check,
vcpu->arch.reserved_gpa_bits,
- context->root_level, context->nx,
- guest_cpuid_has(vcpu, X86_FEATURE_GBPAGES),
- is_pse(vcpu),
+ context->root_level, is_efer_nx(context),
+ guest_can_use_gbpages(vcpu),
+ is_cr4_pse(context),
guest_cpuid_is_amd_or_hygon(vcpu));
}
@@ -4165,24 +4249,32 @@ static inline u64 reserved_hpa_bits(void)
* table in guest or amd nested guest, its mmu features completely
* follow the features in guest.
*/
-void
-reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context)
+static void reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu,
+ struct kvm_mmu *context)
{
- bool uses_nx = context->nx ||
- context->mmu_role.base.smep_andnot_wp;
+ /*
+ * KVM uses NX when TDP is disabled to handle a variety of scenarios,
+ * notably for huge SPTEs if iTLB multi-hit mitigation is enabled and
+ * to generate correct permissions for CR0.WP=0/CR4.SMEP=1/EFER.NX=0.
+ * The iTLB multi-hit workaround can be toggled at any time, so assume
+ * NX can be used by any non-nested shadow MMU to avoid having to reset
+ * MMU contexts. Note, KVM forces EFER.NX=1 when TDP is disabled.
+ */
+ bool uses_nx = is_efer_nx(context) || !tdp_enabled;
+
+ /* @amd adds a check on bit of SPTEs, which KVM shouldn't use anyways. */
+ bool is_amd = true;
+ /* KVM doesn't use 2-level page tables for the shadow MMU. */
+ bool is_pse = false;
struct rsvd_bits_validate *shadow_zero_check;
int i;
- /*
- * Passing "true" to the last argument is okay; it adds a check
- * on bit 8 of the SPTEs which KVM doesn't use anyway.
- */
+ WARN_ON_ONCE(context->shadow_root_level < PT32E_ROOT_LEVEL);
+
shadow_zero_check = &context->shadow_zero_check;
- __reset_rsvds_bits_mask(vcpu, shadow_zero_check,
- reserved_hpa_bits(),
+ __reset_rsvds_bits_mask(shadow_zero_check, reserved_hpa_bits(),
context->shadow_root_level, uses_nx,
- guest_cpuid_has(vcpu, X86_FEATURE_GBPAGES),
- is_pse(vcpu), true);
+ guest_can_use_gbpages(vcpu), is_pse, is_amd);
if (!shadow_me_mask)
return;
@@ -4193,7 +4285,6 @@ reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context)
}
}
-EXPORT_SYMBOL_GPL(reset_shadow_zero_bits_mask);
static inline bool boot_cpu_is_amd(void)
{
@@ -4215,11 +4306,10 @@ reset_tdp_shadow_zero_bits_mask(struct kvm_vcpu *vcpu,
shadow_zero_check = &context->shadow_zero_check;
if (boot_cpu_is_amd())
- __reset_rsvds_bits_mask(vcpu, shadow_zero_check,
- reserved_hpa_bits(),
+ __reset_rsvds_bits_mask(shadow_zero_check, reserved_hpa_bits(),
context->shadow_root_level, false,
boot_cpu_has(X86_FEATURE_GBPAGES),
- true, true);
+ false, true);
else
__reset_rsvds_bits_mask_ept(shadow_zero_check,
reserved_hpa_bits(), false);
@@ -4255,8 +4345,7 @@ reset_ept_shadow_zero_bits_mask(struct kvm_vcpu *vcpu,
(7 & (access) ? 128 : 0))
-static void update_permission_bitmask(struct kvm_vcpu *vcpu,
- struct kvm_mmu *mmu, bool ept)
+static void update_permission_bitmask(struct kvm_mmu *mmu, bool ept)
{
unsigned byte;
@@ -4264,9 +4353,10 @@ static void update_permission_bitmask(struct kvm_vcpu *vcpu,
const u8 w = BYTE_MASK(ACC_WRITE_MASK);
const u8 u = BYTE_MASK(ACC_USER_MASK);
- bool cr4_smep = kvm_read_cr4_bits(vcpu, X86_CR4_SMEP) != 0;
- bool cr4_smap = kvm_read_cr4_bits(vcpu, X86_CR4_SMAP) != 0;
- bool cr0_wp = is_write_protection(vcpu);
+ bool cr4_smep = is_cr4_smep(mmu);
+ bool cr4_smap = is_cr4_smap(mmu);
+ bool cr0_wp = is_cr0_wp(mmu);
+ bool efer_nx = is_efer_nx(mmu);
for (byte = 0; byte < ARRAY_SIZE(mmu->permissions); ++byte) {
unsigned pfec = byte << 1;
@@ -4292,7 +4382,7 @@ static void update_permission_bitmask(struct kvm_vcpu *vcpu,
u8 kf = (pfec & PFERR_USER_MASK) ? 0 : u;
/* Not really needed: !nx will cause pte.nx to fault */
- if (!mmu->nx)
+ if (!efer_nx)
ff = 0;
/* Allow supervisor writes if !cr0.wp */
@@ -4351,24 +4441,17 @@ static void update_permission_bitmask(struct kvm_vcpu *vcpu,
* away both AD and WD. For all reads or if the last condition holds, WD
* only will be masked away.
*/
-static void update_pkru_bitmask(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
- bool ept)
+static void update_pkru_bitmask(struct kvm_mmu *mmu)
{
unsigned bit;
bool wp;
- if (ept) {
- mmu->pkru_mask = 0;
- return;
- }
-
- /* PKEY is enabled only if CR4.PKE and EFER.LMA are both set. */
- if (!kvm_read_cr4_bits(vcpu, X86_CR4_PKE) || !is_long_mode(vcpu)) {
+ if (!is_cr4_pke(mmu)) {
mmu->pkru_mask = 0;
return;
}
- wp = is_write_protection(vcpu);
+ wp = is_cr0_wp(mmu);
for (bit = 0; bit < ARRAY_SIZE(mmu->permissions); ++bit) {
unsigned pfec, pkey_bits;
@@ -4402,81 +4485,51 @@ static void update_pkru_bitmask(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
}
}
-static void update_last_nonleaf_level(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu)
+static void reset_guest_paging_metadata(struct kvm_vcpu *vcpu,
+ struct kvm_mmu *mmu)
{
- unsigned root_level = mmu->root_level;
+ if (!is_cr0_pg(mmu))
+ return;
- mmu->last_nonleaf_level = root_level;
- if (root_level == PT32_ROOT_LEVEL && is_pse(vcpu))
- mmu->last_nonleaf_level++;
+ reset_rsvds_bits_mask(vcpu, mmu);
+ update_permission_bitmask(mmu, false);
+ update_pkru_bitmask(mmu);
}
-static void paging64_init_context_common(struct kvm_vcpu *vcpu,
- struct kvm_mmu *context,
- int level)
+static void paging64_init_context(struct kvm_mmu *context)
{
- context->nx = is_nx(vcpu);
- context->root_level = level;
-
- reset_rsvds_bits_mask(vcpu, context);
- update_permission_bitmask(vcpu, context, false);
- update_pkru_bitmask(vcpu, context, false);
- update_last_nonleaf_level(vcpu, context);
-
- MMU_WARN_ON(!is_pae(vcpu));
context->page_fault = paging64_page_fault;
context->gva_to_gpa = paging64_gva_to_gpa;
context->sync_page = paging64_sync_page;
context->invlpg = paging64_invlpg;
- context->shadow_root_level = level;
context->direct_map = false;
}
-static void paging64_init_context(struct kvm_vcpu *vcpu,
- struct kvm_mmu *context)
+static void paging32_init_context(struct kvm_mmu *context)
{
- int root_level = is_la57_mode(vcpu) ?
- PT64_ROOT_5LEVEL : PT64_ROOT_4LEVEL;
-
- paging64_init_context_common(vcpu, context, root_level);
-}
-
-static void paging32_init_context(struct kvm_vcpu *vcpu,
- struct kvm_mmu *context)
-{
- context->nx = false;
- context->root_level = PT32_ROOT_LEVEL;
-
- reset_rsvds_bits_mask(vcpu, context);
- update_permission_bitmask(vcpu, context, false);
- update_pkru_bitmask(vcpu, context, false);
- update_last_nonleaf_level(vcpu, context);
-
context->page_fault = paging32_page_fault;
context->gva_to_gpa = paging32_gva_to_gpa;
context->sync_page = paging32_sync_page;
context->invlpg = paging32_invlpg;
- context->shadow_root_level = PT32E_ROOT_LEVEL;
context->direct_map = false;
}
-static void paging32E_init_context(struct kvm_vcpu *vcpu,
- struct kvm_mmu *context)
-{
- paging64_init_context_common(vcpu, context, PT32E_ROOT_LEVEL);
-}
-
-static union kvm_mmu_extended_role kvm_calc_mmu_role_ext(struct kvm_vcpu *vcpu)
+static union kvm_mmu_extended_role kvm_calc_mmu_role_ext(struct kvm_vcpu *vcpu,
+ struct kvm_mmu_role_regs *regs)
{
union kvm_mmu_extended_role ext = {0};
- ext.cr0_pg = !!is_paging(vcpu);
- ext.cr4_pae = !!is_pae(vcpu);
- ext.cr4_smep = !!kvm_read_cr4_bits(vcpu, X86_CR4_SMEP);
- ext.cr4_smap = !!kvm_read_cr4_bits(vcpu, X86_CR4_SMAP);
- ext.cr4_pse = !!is_pse(vcpu);
- ext.cr4_pke = !!kvm_read_cr4_bits(vcpu, X86_CR4_PKE);
- ext.maxphyaddr = cpuid_maxphyaddr(vcpu);
+ if (____is_cr0_pg(regs)) {
+ ext.cr0_pg = 1;
+ ext.cr4_pae = ____is_cr4_pae(regs);
+ ext.cr4_smep = ____is_cr4_smep(regs);
+ ext.cr4_smap = ____is_cr4_smap(regs);
+ ext.cr4_pse = ____is_cr4_pse(regs);
+
+ /* PKEY and LA57 are active iff long mode is active. */
+ ext.cr4_pke = ____is_efer_lma(regs) && ____is_cr4_pke(regs);
+ ext.cr4_la57 = ____is_efer_lma(regs) && ____is_cr4_la57(regs);
+ }
ext.valid = 1;
@@ -4484,20 +4537,23 @@ static union kvm_mmu_extended_role kvm_calc_mmu_role_ext(struct kvm_vcpu *vcpu)
}
static union kvm_mmu_role kvm_calc_mmu_role_common(struct kvm_vcpu *vcpu,
+ struct kvm_mmu_role_regs *regs,
bool base_only)
{
union kvm_mmu_role role = {0};
role.base.access = ACC_ALL;
- role.base.nxe = !!is_nx(vcpu);
- role.base.cr0_wp = is_write_protection(vcpu);
+ if (____is_cr0_pg(regs)) {
+ role.base.efer_nx = ____is_efer_nx(regs);
+ role.base.cr0_wp = ____is_cr0_wp(regs);
+ }
role.base.smm = is_smm(vcpu);
role.base.guest_mode = is_guest_mode(vcpu);
if (base_only)
return role;
- role.ext = kvm_calc_mmu_role_ext(vcpu);
+ role.ext = kvm_calc_mmu_role_ext(vcpu, regs);
return role;
}
@@ -4512,9 +4568,10 @@ static inline int kvm_mmu_get_tdp_level(struct kvm_vcpu *vcpu)
}
static union kvm_mmu_role
-kvm_calc_tdp_mmu_root_page_role(struct kvm_vcpu *vcpu, bool base_only)
+kvm_calc_tdp_mmu_root_page_role(struct kvm_vcpu *vcpu,
+ struct kvm_mmu_role_regs *regs, bool base_only)
{
- union kvm_mmu_role role = kvm_calc_mmu_role_common(vcpu, base_only);
+ union kvm_mmu_role role = kvm_calc_mmu_role_common(vcpu, regs, base_only);
role.base.ad_disabled = (shadow_accessed_mask == 0);
role.base.level = kvm_mmu_get_tdp_level(vcpu);
@@ -4527,8 +4584,9 @@ kvm_calc_tdp_mmu_root_page_role(struct kvm_vcpu *vcpu, bool base_only)
static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
{
struct kvm_mmu *context = &vcpu->arch.root_mmu;
+ struct kvm_mmu_role_regs regs = vcpu_to_role_regs(vcpu);
union kvm_mmu_role new_role =
- kvm_calc_tdp_mmu_root_page_role(vcpu, false);
+ kvm_calc_tdp_mmu_root_page_role(vcpu, &regs, false);
if (new_role.as_u64 == context->mmu_role.as_u64)
return;
@@ -4542,60 +4600,44 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
context->get_guest_pgd = get_cr3;
context->get_pdptr = kvm_pdptr_read;
context->inject_page_fault = kvm_inject_page_fault;
+ context->root_level = role_regs_to_root_level(&regs);
- if (!is_paging(vcpu)) {
- context->nx = false;
+ if (!is_cr0_pg(context))
context->gva_to_gpa = nonpaging_gva_to_gpa;
- context->root_level = 0;
- } else if (is_long_mode(vcpu)) {
- context->nx = is_nx(vcpu);
- context->root_level = is_la57_mode(vcpu) ?
- PT64_ROOT_5LEVEL : PT64_ROOT_4LEVEL;
- reset_rsvds_bits_mask(vcpu, context);
- context->gva_to_gpa = paging64_gva_to_gpa;
- } else if (is_pae(vcpu)) {
- context->nx = is_nx(vcpu);
- context->root_level = PT32E_ROOT_LEVEL;
- reset_rsvds_bits_mask(vcpu, context);
+ else if (is_cr4_pae(context))
context->gva_to_gpa = paging64_gva_to_gpa;
- } else {
- context->nx = false;
- context->root_level = PT32_ROOT_LEVEL;
- reset_rsvds_bits_mask(vcpu, context);
+ else
context->gva_to_gpa = paging32_gva_to_gpa;
- }
- update_permission_bitmask(vcpu, context, false);
- update_pkru_bitmask(vcpu, context, false);
- update_last_nonleaf_level(vcpu, context);
+ reset_guest_paging_metadata(vcpu, context);
reset_tdp_shadow_zero_bits_mask(vcpu, context);
}
static union kvm_mmu_role
-kvm_calc_shadow_root_page_role_common(struct kvm_vcpu *vcpu, bool base_only)
+kvm_calc_shadow_root_page_role_common(struct kvm_vcpu *vcpu,
+ struct kvm_mmu_role_regs *regs, bool base_only)
{
- union kvm_mmu_role role = kvm_calc_mmu_role_common(vcpu, base_only);
+ union kvm_mmu_role role = kvm_calc_mmu_role_common(vcpu, regs, base_only);
- role.base.smep_andnot_wp = role.ext.cr4_smep &&
- !is_write_protection(vcpu);
- role.base.smap_andnot_wp = role.ext.cr4_smap &&
- !is_write_protection(vcpu);
- role.base.gpte_is_8_bytes = !!is_pae(vcpu);
+ role.base.smep_andnot_wp = role.ext.cr4_smep && !____is_cr0_wp(regs);
+ role.base.smap_andnot_wp = role.ext.cr4_smap && !____is_cr0_wp(regs);
+ role.base.gpte_is_8_bytes = ____is_cr0_pg(regs) && ____is_cr4_pae(regs);
return role;
}
static union kvm_mmu_role
-kvm_calc_shadow_mmu_root_page_role(struct kvm_vcpu *vcpu, bool base_only)
+kvm_calc_shadow_mmu_root_page_role(struct kvm_vcpu *vcpu,
+ struct kvm_mmu_role_regs *regs, bool base_only)
{
union kvm_mmu_role role =
- kvm_calc_shadow_root_page_role_common(vcpu, base_only);
+ kvm_calc_shadow_root_page_role_common(vcpu, regs, base_only);
- role.base.direct = !is_paging(vcpu);
+ role.base.direct = !____is_cr0_pg(regs);
- if (!is_long_mode(vcpu))
+ if (!____is_efer_lma(regs))
role.base.level = PT32E_ROOT_LEVEL;
- else if (is_la57_mode(vcpu))
+ else if (____is_cr4_la57(regs))
role.base.level = PT64_ROOT_5LEVEL;
else
role.base.level = PT64_ROOT_4LEVEL;
@@ -4604,37 +4646,44 @@ kvm_calc_shadow_mmu_root_page_role(struct kvm_vcpu *vcpu, bool base_only)
}
static void shadow_mmu_init_context(struct kvm_vcpu *vcpu, struct kvm_mmu *context,
- u32 cr0, u32 cr4, u32 efer,
+ struct kvm_mmu_role_regs *regs,
union kvm_mmu_role new_role)
{
- if (!(cr0 & X86_CR0_PG))
- nonpaging_init_context(vcpu, context);
- else if (efer & EFER_LMA)
- paging64_init_context(vcpu, context);
- else if (cr4 & X86_CR4_PAE)
- paging32E_init_context(vcpu, context);
- else
- paging32_init_context(vcpu, context);
+ if (new_role.as_u64 == context->mmu_role.as_u64)
+ return;
context->mmu_role.as_u64 = new_role.as_u64;
+
+ if (!is_cr0_pg(context))
+ nonpaging_init_context(context);
+ else if (is_cr4_pae(context))
+ paging64_init_context(context);
+ else
+ paging32_init_context(context);
+ context->root_level = role_regs_to_root_level(regs);
+
+ reset_guest_paging_metadata(vcpu, context);
+ context->shadow_root_level = new_role.base.level;
+
reset_shadow_zero_bits_mask(vcpu, context);
}
-static void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, u32 cr0, u32 cr4, u32 efer)
+static void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu,
+ struct kvm_mmu_role_regs *regs)
{
struct kvm_mmu *context = &vcpu->arch.root_mmu;
union kvm_mmu_role new_role =
- kvm_calc_shadow_mmu_root_page_role(vcpu, false);
+ kvm_calc_shadow_mmu_root_page_role(vcpu, regs, false);
- if (new_role.as_u64 != context->mmu_role.as_u64)
- shadow_mmu_init_context(vcpu, context, cr0, cr4, efer, new_role);
+ shadow_mmu_init_context(vcpu, context, regs, new_role);
}
static union kvm_mmu_role
-kvm_calc_shadow_npt_root_page_role(struct kvm_vcpu *vcpu)
+kvm_calc_shadow_npt_root_page_role(struct kvm_vcpu *vcpu,
+ struct kvm_mmu_role_regs *regs)
{
union kvm_mmu_role role =
- kvm_calc_shadow_root_page_role_common(vcpu, false);
+ kvm_calc_shadow_root_page_role_common(vcpu, regs, false);
role.base.direct = false;
role.base.level = kvm_mmu_get_tdp_level(vcpu);
@@ -4642,23 +4691,22 @@ kvm_calc_shadow_npt_root_page_role(struct kvm_vcpu *vcpu)
return role;
}
-void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu, u32 cr0, u32 cr4, u32 efer,
- gpa_t nested_cr3)
+void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu, unsigned long cr0,
+ unsigned long cr4, u64 efer, gpa_t nested_cr3)
{
struct kvm_mmu *context = &vcpu->arch.guest_mmu;
- union kvm_mmu_role new_role = kvm_calc_shadow_npt_root_page_role(vcpu);
+ struct kvm_mmu_role_regs regs = {
+ .cr0 = cr0,
+ .cr4 = cr4,
+ .efer = efer,
+ };
+ union kvm_mmu_role new_role;
- __kvm_mmu_new_pgd(vcpu, nested_cr3, new_role.base, false, false);
+ new_role = kvm_calc_shadow_npt_root_page_role(vcpu, &regs);
- if (new_role.as_u64 != context->mmu_role.as_u64) {
- shadow_mmu_init_context(vcpu, context, cr0, cr4, efer, new_role);
+ __kvm_mmu_new_pgd(vcpu, nested_cr3, new_role.base);
- /*
- * Override the level set by the common init helper, nested TDP
- * always uses the host's TDP configuration.
- */
- context->shadow_root_level = new_role.base.level;
- }
+ shadow_mmu_init_context(vcpu, context, &regs, new_role);
}
EXPORT_SYMBOL_GPL(kvm_init_shadow_npt_mmu);
@@ -4678,15 +4726,10 @@ kvm_calc_shadow_ept_root_page_role(struct kvm_vcpu *vcpu, bool accessed_dirty,
role.base.guest_mode = true;
role.base.access = ACC_ALL;
- /*
- * WP=1 and NOT_WP=1 is an impossible combination, use WP and the
- * SMAP variation to denote shadow EPT entries.
- */
- role.base.cr0_wp = true;
- role.base.smap_andnot_wp = true;
-
- role.ext = kvm_calc_mmu_role_ext(vcpu);
+ /* EPT, and thus nested EPT, does not consume CR0, CR4, nor EFER. */
+ role.ext.word = 0;
role.ext.execonly = execonly;
+ role.ext.valid = 1;
return role;
}
@@ -4700,14 +4743,15 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
kvm_calc_shadow_ept_root_page_role(vcpu, accessed_dirty,
execonly, level);
- __kvm_mmu_new_pgd(vcpu, new_eptp, new_role.base, true, true);
+ __kvm_mmu_new_pgd(vcpu, new_eptp, new_role.base);
if (new_role.as_u64 == context->mmu_role.as_u64)
return;
+ context->mmu_role.as_u64 = new_role.as_u64;
+
context->shadow_root_level = level;
- context->nx = true;
context->ept_ad = accessed_dirty;
context->page_fault = ept_page_fault;
context->gva_to_gpa = ept_gva_to_gpa;
@@ -4715,11 +4759,9 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
context->invlpg = ept_invlpg;
context->root_level = level;
context->direct_map = false;
- context->mmu_role.as_u64 = new_role.as_u64;
- update_permission_bitmask(vcpu, context, true);
- update_pkru_bitmask(vcpu, context, true);
- update_last_nonleaf_level(vcpu, context);
+ update_permission_bitmask(context, true);
+ update_pkru_bitmask(context);
reset_rsvds_bits_mask_ept(vcpu, context, execonly);
reset_ept_shadow_zero_bits_mask(vcpu, context, execonly);
}
@@ -4728,20 +4770,36 @@ EXPORT_SYMBOL_GPL(kvm_init_shadow_ept_mmu);
static void init_kvm_softmmu(struct kvm_vcpu *vcpu)
{
struct kvm_mmu *context = &vcpu->arch.root_mmu;
+ struct kvm_mmu_role_regs regs = vcpu_to_role_regs(vcpu);
- kvm_init_shadow_mmu(vcpu,
- kvm_read_cr0_bits(vcpu, X86_CR0_PG),
- kvm_read_cr4_bits(vcpu, X86_CR4_PAE),
- vcpu->arch.efer);
+ kvm_init_shadow_mmu(vcpu, &regs);
context->get_guest_pgd = get_cr3;
context->get_pdptr = kvm_pdptr_read;
context->inject_page_fault = kvm_inject_page_fault;
}
+static union kvm_mmu_role
+kvm_calc_nested_mmu_role(struct kvm_vcpu *vcpu, struct kvm_mmu_role_regs *regs)
+{
+ union kvm_mmu_role role;
+
+ role = kvm_calc_shadow_root_page_role_common(vcpu, regs, false);
+
+ /*
+ * Nested MMUs are used only for walking L2's gva->gpa, they never have
+ * shadow pages of their own and so "direct" has no meaning. Set it
+ * to "true" to try to detect bogus usage of the nested MMU.
+ */
+ role.base.direct = true;
+ role.base.level = role_regs_to_root_level(regs);
+ return role;
+}
+
static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu)
{
- union kvm_mmu_role new_role = kvm_calc_mmu_role_common(vcpu, false);
+ struct kvm_mmu_role_regs regs = vcpu_to_role_regs(vcpu);
+ union kvm_mmu_role new_role = kvm_calc_nested_mmu_role(vcpu, &regs);
struct kvm_mmu *g_context = &vcpu->arch.nested_mmu;
if (new_role.as_u64 == g_context->mmu_role.as_u64)
@@ -4751,6 +4809,7 @@ static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu)
g_context->get_guest_pgd = get_cr3;
g_context->get_pdptr = kvm_pdptr_read;
g_context->inject_page_fault = kvm_inject_page_fault;
+ g_context->root_level = new_role.base.level;
/*
* L2 page tables are never shadowed, so there is no need to sync
@@ -4766,44 +4825,20 @@ static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu)
* nested page tables as the second level of translation. Basically
* the gva_to_gpa functions between mmu and nested_mmu are swapped.
*/
- if (!is_paging(vcpu)) {
- g_context->nx = false;
- g_context->root_level = 0;
+ if (!is_paging(vcpu))
g_context->gva_to_gpa = nonpaging_gva_to_gpa_nested;
- } else if (is_long_mode(vcpu)) {
- g_context->nx = is_nx(vcpu);
- g_context->root_level = is_la57_mode(vcpu) ?
- PT64_ROOT_5LEVEL : PT64_ROOT_4LEVEL;
- reset_rsvds_bits_mask(vcpu, g_context);
+ else if (is_long_mode(vcpu))
g_context->gva_to_gpa = paging64_gva_to_gpa_nested;
- } else if (is_pae(vcpu)) {
- g_context->nx = is_nx(vcpu);
- g_context->root_level = PT32E_ROOT_LEVEL;
- reset_rsvds_bits_mask(vcpu, g_context);
+ else if (is_pae(vcpu))
g_context->gva_to_gpa = paging64_gva_to_gpa_nested;
- } else {
- g_context->nx = false;
- g_context->root_level = PT32_ROOT_LEVEL;
- reset_rsvds_bits_mask(vcpu, g_context);
+ else
g_context->gva_to_gpa = paging32_gva_to_gpa_nested;
- }
- update_permission_bitmask(vcpu, g_context, false);
- update_pkru_bitmask(vcpu, g_context, false);
- update_last_nonleaf_level(vcpu, g_context);
+ reset_guest_paging_metadata(vcpu, g_context);
}
-void kvm_init_mmu(struct kvm_vcpu *vcpu, bool reset_roots)
+void kvm_init_mmu(struct kvm_vcpu *vcpu)
{
- if (reset_roots) {
- uint i;
-
- vcpu->arch.mmu->root_hpa = INVALID_PAGE;
-
- for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
- vcpu->arch.mmu->prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID;
- }
-
if (mmu_is_nested(vcpu))
init_kvm_nested_mmu(vcpu);
else if (tdp_enabled)
@@ -4816,20 +4851,53 @@ EXPORT_SYMBOL_GPL(kvm_init_mmu);
static union kvm_mmu_page_role
kvm_mmu_calc_root_page_role(struct kvm_vcpu *vcpu)
{
+ struct kvm_mmu_role_regs regs = vcpu_to_role_regs(vcpu);
union kvm_mmu_role role;
if (tdp_enabled)
- role = kvm_calc_tdp_mmu_root_page_role(vcpu, true);
+ role = kvm_calc_tdp_mmu_root_page_role(vcpu, &regs, true);
else
- role = kvm_calc_shadow_mmu_root_page_role(vcpu, true);
+ role = kvm_calc_shadow_mmu_root_page_role(vcpu, &regs, true);
return role.base;
}
+void kvm_mmu_after_set_cpuid(struct kvm_vcpu *vcpu)
+{
+ /*
+ * Invalidate all MMU roles to force them to reinitialize as CPUID
+ * information is factored into reserved bit calculations.
+ */
+ vcpu->arch.root_mmu.mmu_role.ext.valid = 0;
+ vcpu->arch.guest_mmu.mmu_role.ext.valid = 0;
+ vcpu->arch.nested_mmu.mmu_role.ext.valid = 0;
+ kvm_mmu_reset_context(vcpu);
+
+ /*
+ * KVM does not correctly handle changing guest CPUID after KVM_RUN, as
+ * MAXPHYADDR, GBPAGES support, AMD reserved bit behavior, etc.. aren't
+ * tracked in kvm_mmu_page_role. As a result, KVM may miss guest page
+ * faults due to reusing SPs/SPTEs. Alert userspace, but otherwise
+ * sweep the problem under the rug.
+ *
+ * KVM's horrific CPUID ABI makes the problem all but impossible to
+ * solve, as correctly handling multiple vCPU models (with respect to
+ * paging and physical address properties) in a single VM would require
+ * tracking all relevant CPUID information in kvm_mmu_page_role. That
+ * is very undesirable as it would double the memory requirements for
+ * gfn_track (see struct kvm_mmu_page_role comments), and in practice
+ * no sane VMM mucks with the core vCPU model on the fly.
+ */
+ if (vcpu->arch.last_vmentry_cpu != -1) {
+ pr_warn_ratelimited("KVM: KVM_SET_CPUID{,2} after KVM_RUN may cause guest instability\n");
+ pr_warn_ratelimited("KVM: KVM_SET_CPUID{,2} will fail after KVM_RUN starting with Linux 5.16\n");
+ }
+}
+
void kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
{
kvm_mmu_unload(vcpu);
- kvm_init_mmu(vcpu, true);
+ kvm_init_mmu(vcpu);
}
EXPORT_SYMBOL_GPL(kvm_mmu_reset_context);
@@ -5261,7 +5329,7 @@ static void free_mmu_pages(struct kvm_mmu *mmu)
if (!tdp_enabled && mmu->pae_root)
set_memory_encrypted((unsigned long)mmu->pae_root, 1);
free_page((unsigned long)mmu->pae_root);
- free_page((unsigned long)mmu->lm_root);
+ free_page((unsigned long)mmu->pml4_root);
}
static int __kvm_mmu_create(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu)
@@ -5467,7 +5535,13 @@ void kvm_mmu_init_vm(struct kvm *kvm)
{
struct kvm_page_track_notifier_node *node = &kvm->arch.mmu_sp_tracker;
- kvm_mmu_init_tdp_mmu(kvm);
+ if (!kvm_mmu_init_tdp_mmu(kvm))
+ /*
+ * No smp_load/store wrappers needed here as we are in
+ * VM init and there cannot be any memslots / other threads
+ * accessing this struct kvm yet.
+ */
+ kvm->arch.memslots_have_rmaps = true;
node->track_write = kvm_mmu_pte_write;
node->track_flush_slot = kvm_mmu_invalidate_zap_pages_in_memslot;
@@ -5490,29 +5564,29 @@ void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
int i;
bool flush = false;
- write_lock(&kvm->mmu_lock);
- for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
- slots = __kvm_memslots(kvm, i);
- kvm_for_each_memslot(memslot, slots) {
- gfn_t start, end;
-
- start = max(gfn_start, memslot->base_gfn);
- end = min(gfn_end, memslot->base_gfn + memslot->npages);
- if (start >= end)
- continue;
+ if (kvm_memslots_have_rmaps(kvm)) {
+ write_lock(&kvm->mmu_lock);
+ for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
+ slots = __kvm_memslots(kvm, i);
+ kvm_for_each_memslot(memslot, slots) {
+ gfn_t start, end;
+
+ start = max(gfn_start, memslot->base_gfn);
+ end = min(gfn_end, memslot->base_gfn + memslot->npages);
+ if (start >= end)
+ continue;
- flush = slot_handle_level_range(kvm, memslot, kvm_zap_rmapp,
- PG_LEVEL_4K,
- KVM_MAX_HUGEPAGE_LEVEL,
- start, end - 1, true, flush);
+ flush = slot_handle_level_range(kvm, memslot,
+ kvm_zap_rmapp, PG_LEVEL_4K,
+ KVM_MAX_HUGEPAGE_LEVEL, start,
+ end - 1, true, flush);
+ }
}
+ if (flush)
+ kvm_flush_remote_tlbs_with_address(kvm, gfn_start, gfn_end);
+ write_unlock(&kvm->mmu_lock);
}
- if (flush)
- kvm_flush_remote_tlbs_with_address(kvm, gfn_start, gfn_end);
-
- write_unlock(&kvm->mmu_lock);
-
if (is_tdp_mmu_enabled(kvm)) {
flush = false;
@@ -5539,12 +5613,15 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm,
struct kvm_memory_slot *memslot,
int start_level)
{
- bool flush;
+ bool flush = false;
- write_lock(&kvm->mmu_lock);
- flush = slot_handle_level(kvm, memslot, slot_rmap_write_protect,
- start_level, KVM_MAX_HUGEPAGE_LEVEL, false);
- write_unlock(&kvm->mmu_lock);
+ if (kvm_memslots_have_rmaps(kvm)) {
+ write_lock(&kvm->mmu_lock);
+ flush = slot_handle_level(kvm, memslot, slot_rmap_write_protect,
+ start_level, KVM_MAX_HUGEPAGE_LEVEL,
+ false);
+ write_unlock(&kvm->mmu_lock);
+ }
if (is_tdp_mmu_enabled(kvm)) {
read_lock(&kvm->mmu_lock);
@@ -5612,18 +5689,17 @@ void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm,
{
/* FIXME: const-ify all uses of struct kvm_memory_slot. */
struct kvm_memory_slot *slot = (struct kvm_memory_slot *)memslot;
- bool flush;
-
- write_lock(&kvm->mmu_lock);
- flush = slot_handle_leaf(kvm, slot, kvm_mmu_zap_collapsible_spte, true);
+ bool flush = false;
- if (flush)
- kvm_arch_flush_remote_tlbs_memslot(kvm, slot);
- write_unlock(&kvm->mmu_lock);
+ if (kvm_memslots_have_rmaps(kvm)) {
+ write_lock(&kvm->mmu_lock);
+ flush = slot_handle_leaf(kvm, slot, kvm_mmu_zap_collapsible_spte, true);
+ if (flush)
+ kvm_arch_flush_remote_tlbs_memslot(kvm, slot);
+ write_unlock(&kvm->mmu_lock);
+ }
if (is_tdp_mmu_enabled(kvm)) {
- flush = false;
-
read_lock(&kvm->mmu_lock);
flush = kvm_tdp_mmu_zap_collapsible_sptes(kvm, slot, flush);
if (flush)
@@ -5650,11 +5726,14 @@ void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm,
void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm,
struct kvm_memory_slot *memslot)
{
- bool flush;
+ bool flush = false;
- write_lock(&kvm->mmu_lock);
- flush = slot_handle_leaf(kvm, memslot, __rmap_clear_dirty, false);
- write_unlock(&kvm->mmu_lock);
+ if (kvm_memslots_have_rmaps(kvm)) {
+ write_lock(&kvm->mmu_lock);
+ flush = slot_handle_leaf(kvm, memslot, __rmap_clear_dirty,
+ false);
+ write_unlock(&kvm->mmu_lock);
+ }
if (is_tdp_mmu_enabled(kvm)) {
read_lock(&kvm->mmu_lock);
@@ -5957,6 +6036,7 @@ static int set_nx_huge_pages_recovery_ratio(const char *val, const struct kernel
static void kvm_recover_nx_lpages(struct kvm *kvm)
{
+ unsigned long nx_lpage_splits = kvm->stat.nx_lpage_splits;
int rcu_idx;
struct kvm_mmu_page *sp;
unsigned int ratio;
@@ -5968,7 +6048,7 @@ static void kvm_recover_nx_lpages(struct kvm *kvm)
write_lock(&kvm->mmu_lock);
ratio = READ_ONCE(nx_huge_pages_recovery_ratio);
- to_zap = ratio ? DIV_ROUND_UP(kvm->stat.nx_lpage_splits, ratio) : 0;
+ to_zap = ratio ? DIV_ROUND_UP(nx_lpage_splits, ratio) : 0;
for ( ; to_zap; --to_zap) {
if (list_empty(&kvm->arch.lpage_disallowed_mmu_pages))
break;
diff --git a/arch/x86/kvm/mmu/mmu_internal.h b/arch/x86/kvm/mmu/mmu_internal.h
index d64ccb417c60..35567293c1fd 100644
--- a/arch/x86/kvm/mmu/mmu_internal.h
+++ b/arch/x86/kvm/mmu/mmu_internal.h
@@ -116,14 +116,19 @@ static inline bool kvm_vcpu_ad_need_write_protect(struct kvm_vcpu *vcpu)
kvm_x86_ops.cpu_dirty_log_size;
}
-bool is_nx_huge_page_enabled(void);
-bool mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn,
- bool can_unsync);
+extern int nx_huge_pages;
+static inline bool is_nx_huge_page_enabled(void)
+{
+ return READ_ONCE(nx_huge_pages);
+}
+
+int mmu_try_to_unsync_pages(struct kvm_vcpu *vcpu, gfn_t gfn, bool can_unsync);
void kvm_mmu_gfn_disallow_lpage(struct kvm_memory_slot *slot, gfn_t gfn);
void kvm_mmu_gfn_allow_lpage(struct kvm_memory_slot *slot, gfn_t gfn);
bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
- struct kvm_memory_slot *slot, u64 gfn);
+ struct kvm_memory_slot *slot, u64 gfn,
+ int min_level);
void kvm_flush_remote_tlbs_with_address(struct kvm *kvm,
u64 start_gfn, u64 pages);
@@ -158,8 +163,6 @@ int kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, gfn_t gfn,
void disallowed_hugepage_adjust(u64 spte, gfn_t gfn, int cur_level,
kvm_pfn_t *pfnp, int *goal_levelp);
-bool is_nx_huge_page_enabled(void);
-
void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc);
void account_huge_nx_page(struct kvm *kvm, struct kvm_mmu_page *sp);
diff --git a/arch/x86/kvm/mmu/mmutrace.h b/arch/x86/kvm/mmu/mmutrace.h
index e798489b56b5..efbad33a0645 100644
--- a/arch/x86/kvm/mmu/mmutrace.h
+++ b/arch/x86/kvm/mmu/mmutrace.h
@@ -40,7 +40,7 @@
role.direct ? " direct" : "", \
access_str[role.access], \
role.invalid ? " invalid" : "", \
- role.nxe ? "" : "!", \
+ role.efer_nx ? "" : "!", \
role.ad_disabled ? "!" : "", \
__entry->root_count, \
__entry->unsync ? "unsync" : "sync", 0); \
diff --git a/arch/x86/kvm/mmu/page_track.c b/arch/x86/kvm/mmu/page_track.c
index 34bb0ec69bd8..91a9f7e0fd91 100644
--- a/arch/x86/kvm/mmu/page_track.c
+++ b/arch/x86/kvm/mmu/page_track.c
@@ -100,7 +100,7 @@ void kvm_slot_page_track_add_page(struct kvm *kvm,
kvm_mmu_gfn_disallow_lpage(slot, gfn);
if (mode == KVM_PAGE_TRACK_WRITE)
- if (kvm_mmu_slot_gfn_write_protect(kvm, slot, gfn))
+ if (kvm_mmu_slot_gfn_write_protect(kvm, slot, gfn, PG_LEVEL_4K))
kvm_flush_remote_tlbs(kvm);
}
EXPORT_SYMBOL_GPL(kvm_slot_page_track_add_page);
diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h
index 70b7e44e3035..490a028ddabe 100644
--- a/arch/x86/kvm/mmu/paging_tmpl.h
+++ b/arch/x86/kvm/mmu/paging_tmpl.h
@@ -90,8 +90,8 @@ struct guest_walker {
gpa_t pte_gpa[PT_MAX_FULL_LEVELS];
pt_element_t __user *ptep_user[PT_MAX_FULL_LEVELS];
bool pte_writable[PT_MAX_FULL_LEVELS];
- unsigned pt_access;
- unsigned pte_access;
+ unsigned int pt_access[PT_MAX_FULL_LEVELS];
+ unsigned int pte_access;
gfn_t gfn;
struct x86_exception fault;
};
@@ -305,6 +305,35 @@ static inline unsigned FNAME(gpte_pkeys)(struct kvm_vcpu *vcpu, u64 gpte)
return pkeys;
}
+static inline bool FNAME(is_last_gpte)(struct kvm_mmu *mmu,
+ unsigned int level, unsigned int gpte)
+{
+ /*
+ * For EPT and PAE paging (both variants), bit 7 is either reserved at
+ * all level or indicates a huge page (ignoring CR3/EPTP). In either
+ * case, bit 7 being set terminates the walk.
+ */
+#if PTTYPE == 32
+ /*
+ * 32-bit paging requires special handling because bit 7 is ignored if
+ * CR4.PSE=0, not reserved. Clear bit 7 in the gpte if the level is
+ * greater than the last level for which bit 7 is the PAGE_SIZE bit.
+ *
+ * The RHS has bit 7 set iff level < (2 + PSE). If it is clear, bit 7
+ * is not reserved and does not indicate a large page at this level,
+ * so clear PT_PAGE_SIZE_MASK in gpte if that is the case.
+ */
+ gpte &= level - (PT32_ROOT_LEVEL + mmu->mmu_role.ext.cr4_pse);
+#endif
+ /*
+ * PG_LEVEL_4K always terminates. The RHS has bit 7 set
+ * iff level <= PG_LEVEL_4K, which for our purpose means
+ * level == PG_LEVEL_4K; set PT_PAGE_SIZE_MASK in gpte then.
+ */
+ gpte |= level - PG_LEVEL_4K - 1;
+
+ return gpte & PT_PAGE_SIZE_MASK;
+}
/*
* Fetch a guest pte for a guest virtual address, or for an L2's GPA.
*/
@@ -418,13 +447,15 @@ retry_walk:
}
walker->ptes[walker->level - 1] = pte;
- } while (!is_last_gpte(mmu, walker->level, pte));
+
+ /* Convert to ACC_*_MASK flags for struct guest_walker. */
+ walker->pt_access[walker->level - 1] = FNAME(gpte_access)(pt_access ^ walk_nx_mask);
+ } while (!FNAME(is_last_gpte)(mmu, walker->level, pte));
pte_pkey = FNAME(gpte_pkeys)(vcpu, pte);
accessed_dirty = have_ad ? pte_access & PT_GUEST_ACCESSED_MASK : 0;
/* Convert to ACC_*_MASK flags for struct guest_walker. */
- walker->pt_access = FNAME(gpte_access)(pt_access ^ walk_nx_mask);
walker->pte_access = FNAME(gpte_access)(pte_access ^ walk_nx_mask);
errcode = permission_fault(vcpu, mmu, walker->pte_access, pte_pkey, access);
if (unlikely(errcode))
@@ -463,13 +494,13 @@ retry_walk:
}
pgprintk("%s: pte %llx pte_access %x pt_access %x\n",
- __func__, (u64)pte, walker->pte_access, walker->pt_access);
+ __func__, (u64)pte, walker->pte_access,
+ walker->pt_access[walker->level - 1]);
return 1;
error:
errcode |= write_fault | user_fault;
- if (fetch_fault && (mmu->nx ||
- kvm_read_cr4_bits(vcpu, X86_CR4_SMEP)))
+ if (fetch_fault && (is_efer_nx(mmu) || is_cr4_smep(mmu)))
errcode |= PFERR_FETCH_MASK;
walker->fault.vector = PF_VECTOR;
@@ -643,7 +674,7 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gpa_t addr,
bool huge_page_disallowed = exec && nx_huge_page_workaround_enabled;
struct kvm_mmu_page *sp = NULL;
struct kvm_shadow_walk_iterator it;
- unsigned direct_access, access = gw->pt_access;
+ unsigned int direct_access, access;
int top_level, level, req_level, ret;
gfn_t base_gfn = gw->gfn;
@@ -675,6 +706,7 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gpa_t addr,
sp = NULL;
if (!is_shadow_present_pte(*it.sptep)) {
table_gfn = gw->table_gfn[it.level - 2];
+ access = gw->pt_access[it.level - 2];
sp = kvm_mmu_get_page(vcpu, table_gfn, addr, it.level-1,
false, access);
}
@@ -763,7 +795,7 @@ FNAME(is_self_change_mapping)(struct kvm_vcpu *vcpu,
bool self_changed = false;
if (!(walker->pte_access & ACC_WRITE_MASK ||
- (!is_write_protection(vcpu) && !user_fault)))
+ (!is_cr0_wp(vcpu->arch.mmu) && !user_fault)))
return false;
for (level = walker->level; level <= walker->max_level; level++) {
@@ -861,8 +893,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gpa_t addr, u32 error_code,
* we will cache the incorrect access into mmio spte.
*/
if (write_fault && !(walker.pte_access & ACC_WRITE_MASK) &&
- !is_write_protection(vcpu) && !user_fault &&
- !is_noslot_pfn(pfn)) {
+ !is_cr0_wp(vcpu->arch.mmu) && !user_fault && !is_noslot_pfn(pfn)) {
walker.pte_access |= ACC_WRITE_MASK;
walker.pte_access &= ~ACC_USER_MASK;
@@ -872,7 +903,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gpa_t addr, u32 error_code,
* then we should prevent the kernel from executing it
* if SMEP is enabled.
*/
- if (kvm_read_cr4_bits(vcpu, X86_CR4_SMEP))
+ if (is_cr4_smep(vcpu->arch.mmu))
walker.pte_access &= ~ACC_EXEC_MASK;
}
@@ -1027,13 +1058,36 @@ static gpa_t FNAME(gva_to_gpa_nested)(struct kvm_vcpu *vcpu, gpa_t vaddr,
*/
static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
{
+ union kvm_mmu_page_role mmu_role = vcpu->arch.mmu->mmu_role.base;
int i, nr_present = 0;
bool host_writable;
gpa_t first_pte_gpa;
int set_spte_ret = 0;
- /* direct kvm_mmu_page can not be unsync. */
- BUG_ON(sp->role.direct);
+ /*
+ * Ignore various flags when verifying that it's safe to sync a shadow
+ * page using the current MMU context.
+ *
+ * - level: not part of the overall MMU role and will never match as the MMU's
+ * level tracks the root level
+ * - access: updated based on the new guest PTE
+ * - quadrant: not part of the overall MMU role (similar to level)
+ */
+ const union kvm_mmu_page_role sync_role_ign = {
+ .level = 0xf,
+ .access = 0x7,
+ .quadrant = 0x3,
+ };
+
+ /*
+ * Direct pages can never be unsync, and KVM should never attempt to
+ * sync a shadow page for a different MMU context, e.g. if the role
+ * differs then the memslot lookup (SMM vs. non-SMM) will be bogus, the
+ * reserved bits checks will be wrong, etc...
+ */
+ if (WARN_ON_ONCE(sp->role.direct ||
+ (sp->role.word ^ mmu_role.word) & ~sync_role_ign.word))
+ return 0;
first_pte_gpa = FNAME(get_level1_sp_gpa)(sp);
diff --git a/arch/x86/kvm/mmu/spte.c b/arch/x86/kvm/mmu/spte.c
index 66d43cec0c31..3e97cdb13eb7 100644
--- a/arch/x86/kvm/mmu/spte.c
+++ b/arch/x86/kvm/mmu/spte.c
@@ -103,13 +103,6 @@ int make_spte(struct kvm_vcpu *vcpu, unsigned int pte_access, int level,
spte |= SPTE_TDP_AD_WRPROT_ONLY_MASK;
/*
- * Bits 62:52 of PAE SPTEs are reserved. WARN if said bits are set
- * if PAE paging may be employed (shadow paging or any 32-bit KVM).
- */
- WARN_ON_ONCE((!tdp_enabled || !IS_ENABLED(CONFIG_X86_64)) &&
- (spte & SPTE_TDP_AD_MASK));
-
- /*
* For the EPT case, shadow_present_mask is 0 if hardware
* supports exec-only page table entries. In that case,
* ACC_USER_MASK and shadow_user_mask are used to represent
@@ -154,13 +147,19 @@ int make_spte(struct kvm_vcpu *vcpu, unsigned int pte_access, int level,
/*
* Optimization: for pte sync, if spte was writable the hash
* lookup is unnecessary (and expensive). Write protection
- * is responsibility of mmu_get_page / kvm_sync_page.
+ * is responsibility of kvm_mmu_get_page / kvm_mmu_sync_roots.
* Same reasoning can be applied to dirty page accounting.
*/
if (!can_unsync && is_writable_pte(old_spte))
goto out;
- if (mmu_need_write_protect(vcpu, gfn, can_unsync)) {
+ /*
+ * Unsync shadow pages that are reachable by the new, writable
+ * SPTE. Write-protect the SPTE if the page can't be unsync'd,
+ * e.g. it's write-tracked (upper-level SPs) or has one or more
+ * shadow pages and unsync'ing pages is not allowed.
+ */
+ if (mmu_try_to_unsync_pages(vcpu, gfn, can_unsync)) {
pgprintk("%s: found shadow page for %llx, marking ro\n",
__func__, gfn);
ret |= SET_SPTE_WRITE_PROTECTED_PT;
@@ -176,7 +175,10 @@ int make_spte(struct kvm_vcpu *vcpu, unsigned int pte_access, int level,
spte = mark_spte_for_access_track(spte);
out:
- WARN_ON(is_mmio_spte(spte));
+ WARN_ONCE(is_rsvd_spte(&vcpu->arch.mmu->shadow_zero_check, spte, level),
+ "spte = 0x%llx, level = %d, rsvd bits = 0x%llx", spte, level,
+ get_rsvd_bits(&vcpu->arch.mmu->shadow_zero_check, spte, level));
+
*new_spte = spte;
return ret;
}
diff --git a/arch/x86/kvm/mmu/spte.h b/arch/x86/kvm/mmu/spte.h
index bca0ba11cccf..7a5ce9314107 100644
--- a/arch/x86/kvm/mmu/spte.h
+++ b/arch/x86/kvm/mmu/spte.h
@@ -293,6 +293,38 @@ static inline bool is_dirty_spte(u64 spte)
return dirty_mask ? spte & dirty_mask : spte & PT_WRITABLE_MASK;
}
+static inline u64 get_rsvd_bits(struct rsvd_bits_validate *rsvd_check, u64 pte,
+ int level)
+{
+ int bit7 = (pte >> 7) & 1;
+
+ return rsvd_check->rsvd_bits_mask[bit7][level-1];
+}
+
+static inline bool __is_rsvd_bits_set(struct rsvd_bits_validate *rsvd_check,
+ u64 pte, int level)
+{
+ return pte & get_rsvd_bits(rsvd_check, pte, level);
+}
+
+static inline bool __is_bad_mt_xwr(struct rsvd_bits_validate *rsvd_check,
+ u64 pte)
+{
+ return rsvd_check->bad_mt_xwr & BIT_ULL(pte & 0x3f);
+}
+
+static __always_inline bool is_rsvd_spte(struct rsvd_bits_validate *rsvd_check,
+ u64 spte, int level)
+{
+ /*
+ * Use a bitwise-OR instead of a logical-OR to aggregate the reserved
+ * bits and EPT's invalid memtype/XWR checks to avoid an extra Jcc
+ * (this is extremely unlikely to be short-circuited as true).
+ */
+ return __is_bad_mt_xwr(rsvd_check, spte) |
+ __is_rsvd_bits_set(rsvd_check, spte, level);
+}
+
static inline bool spte_can_locklessly_be_made_writable(u64 spte)
{
return (spte & shadow_host_writable_mask) &&
diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
index 88f69a6cc492..0853370bd811 100644
--- a/arch/x86/kvm/mmu/tdp_mmu.c
+++ b/arch/x86/kvm/mmu/tdp_mmu.c
@@ -14,10 +14,10 @@ static bool __read_mostly tdp_mmu_enabled = false;
module_param_named(tdp_mmu, tdp_mmu_enabled, bool, 0644);
/* Initializes the TDP MMU for the VM, if enabled. */
-void kvm_mmu_init_tdp_mmu(struct kvm *kvm)
+bool kvm_mmu_init_tdp_mmu(struct kvm *kvm)
{
if (!tdp_enabled || !READ_ONCE(tdp_mmu_enabled))
- return;
+ return false;
/* This should not be changed for the lifetime of the VM. */
kvm->arch.tdp_mmu_enabled = true;
@@ -25,6 +25,8 @@ void kvm_mmu_init_tdp_mmu(struct kvm *kvm)
INIT_LIST_HEAD(&kvm->arch.tdp_mmu_roots);
spin_lock_init(&kvm->arch.tdp_mmu_pages_lock);
INIT_LIST_HEAD(&kvm->arch.tdp_mmu_pages);
+
+ return true;
}
static __always_inline void kvm_lockdep_assert_mmu_lock_held(struct kvm *kvm,
@@ -335,7 +337,7 @@ static void handle_removed_tdp_mmu_page(struct kvm *kvm, tdp_ptep_t pt,
for (i = 0; i < PT64_ENT_PER_PAGE; i++) {
sptep = rcu_dereference(pt) + i;
- gfn = base_gfn + (i * KVM_PAGES_PER_HPAGE(level - 1));
+ gfn = base_gfn + i * KVM_PAGES_PER_HPAGE(level);
if (shared) {
/*
@@ -377,18 +379,18 @@ static void handle_removed_tdp_mmu_page(struct kvm *kvm, tdp_ptep_t pt,
WRITE_ONCE(*sptep, REMOVED_SPTE);
}
handle_changed_spte(kvm, kvm_mmu_page_as_id(sp), gfn,
- old_child_spte, REMOVED_SPTE, level - 1,
+ old_child_spte, REMOVED_SPTE, level,
shared);
}
kvm_flush_remote_tlbs_with_address(kvm, gfn,
- KVM_PAGES_PER_HPAGE(level));
+ KVM_PAGES_PER_HPAGE(level + 1));
call_rcu(&sp->rcu_head, tdp_mmu_free_sp_rcu_callback);
}
/**
- * handle_changed_spte - handle bookkeeping associated with an SPTE change
+ * __handle_changed_spte - handle bookkeeping associated with an SPTE change
* @kvm: kvm instance
* @as_id: the address space of the paging structure the SPTE was a part of
* @gfn: the base GFN that was mapped by the SPTE
@@ -444,6 +446,13 @@ static void __handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
trace_kvm_tdp_mmu_spte_changed(as_id, gfn, level, old_spte, new_spte);
+ if (is_large_pte(old_spte) != is_large_pte(new_spte)) {
+ if (is_large_pte(old_spte))
+ atomic64_sub(1, (atomic64_t*)&kvm->stat.lpages);
+ else
+ atomic64_add(1, (atomic64_t*)&kvm->stat.lpages);
+ }
+
/*
* The only times a SPTE should be changed from a non-present to
* non-present state is when an MMIO entry is installed/modified/
@@ -905,7 +914,7 @@ static int tdp_mmu_map_handle_target_level(struct kvm_vcpu *vcpu, int write,
kvm_pfn_t pfn, bool prefault)
{
u64 new_spte;
- int ret = 0;
+ int ret = RET_PF_FIXED;
int make_spte_ret = 0;
if (unlikely(is_noslot_pfn(pfn)))
@@ -942,7 +951,11 @@ static int tdp_mmu_map_handle_target_level(struct kvm_vcpu *vcpu, int write,
rcu_dereference(iter->sptep));
}
- if (!prefault)
+ /*
+ * Increase pf_fixed in both RET_PF_EMULATE and RET_PF_FIXED to be
+ * consistent with legacy MMU behavior.
+ */
+ if (ret != RET_PF_SPURIOUS)
vcpu->stat.pf_fixed++;
return ret;
@@ -970,11 +983,6 @@ int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
int level;
int req_level;
- if (WARN_ON(!VALID_PAGE(vcpu->arch.mmu->root_hpa)))
- return RET_PF_RETRY;
- if (WARN_ON(!is_tdp_mmu_root(vcpu->kvm, vcpu->arch.mmu->root_hpa)))
- return RET_PF_RETRY;
-
level = kvm_mmu_hugepage_adjust(vcpu, gfn, max_level, &pfn,
huge_page_disallowed, &req_level);
@@ -1009,7 +1017,15 @@ int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
}
if (!is_shadow_present_pte(iter.old_spte)) {
- sp = alloc_tdp_mmu_page(vcpu, iter.gfn, iter.level);
+ /*
+ * If SPTE has been frozen by another thread, just
+ * give up and retry, avoiding unnecessary page table
+ * allocation and free.
+ */
+ if (is_removed_spte(iter.old_spte))
+ break;
+
+ sp = alloc_tdp_mmu_page(vcpu, iter.gfn, iter.level - 1);
child_pt = sp->spt;
new_spte = make_nonleaf_spte(child_pt,
@@ -1177,9 +1193,9 @@ bool kvm_tdp_mmu_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
}
/*
- * Remove write access from all the SPTEs mapping GFNs [start, end). If
- * skip_4k is set, SPTEs that map 4k pages, will not be write-protected.
- * Returns true if an SPTE has been changed and the TLBs need to be flushed.
+ * Remove write access from all SPTEs at or above min_level that map GFNs
+ * [start, end). Returns true if an SPTE has been changed and the TLBs need to
+ * be flushed.
*/
static bool wrprot_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
gfn_t start, gfn_t end, int min_level)
@@ -1447,15 +1463,22 @@ bool kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
* Returns true if an SPTE was set and a TLB flush is needed.
*/
static bool write_protect_gfn(struct kvm *kvm, struct kvm_mmu_page *root,
- gfn_t gfn)
+ gfn_t gfn, int min_level)
{
struct tdp_iter iter;
u64 new_spte;
bool spte_set = false;
+ BUG_ON(min_level > KVM_MAX_HUGEPAGE_LEVEL);
+
rcu_read_lock();
- tdp_root_for_each_leaf_pte(iter, root, gfn, gfn + 1) {
+ for_each_tdp_pte_min_level(iter, root->spt, root->role.level,
+ min_level, gfn, gfn + 1) {
+ if (!is_shadow_present_pte(iter.old_spte) ||
+ !is_last_spte(iter.old_spte, iter.level))
+ continue;
+
if (!is_writable_pte(iter.old_spte))
break;
@@ -1477,14 +1500,15 @@ static bool write_protect_gfn(struct kvm *kvm, struct kvm_mmu_page *root,
* Returns true if an SPTE was set and a TLB flush is needed.
*/
bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,
- struct kvm_memory_slot *slot, gfn_t gfn)
+ struct kvm_memory_slot *slot, gfn_t gfn,
+ int min_level)
{
struct kvm_mmu_page *root;
bool spte_set = false;
lockdep_assert_held_write(&kvm->mmu_lock);
for_each_tdp_mmu_root(kvm, root, slot->as_id)
- spte_set |= write_protect_gfn(kvm, root, gfn);
+ spte_set |= write_protect_gfn(kvm, root, gfn, min_level);
return spte_set;
}
diff --git a/arch/x86/kvm/mmu/tdp_mmu.h b/arch/x86/kvm/mmu/tdp_mmu.h
index 5fdf63090451..1cae4485b3bc 100644
--- a/arch/x86/kvm/mmu/tdp_mmu.h
+++ b/arch/x86/kvm/mmu/tdp_mmu.h
@@ -31,7 +31,7 @@ static inline bool kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, int as_id,
}
static inline bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
{
- gfn_t end = sp->gfn + KVM_PAGES_PER_HPAGE(sp->role.level);
+ gfn_t end = sp->gfn + KVM_PAGES_PER_HPAGE(sp->role.level + 1);
/*
* Don't allow yielding, as the caller may have a flush pending. Note,
@@ -74,37 +74,40 @@ bool kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
bool flush);
bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,
- struct kvm_memory_slot *slot, gfn_t gfn);
+ struct kvm_memory_slot *slot, gfn_t gfn,
+ int min_level);
int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes,
int *root_level);
#ifdef CONFIG_X86_64
-void kvm_mmu_init_tdp_mmu(struct kvm *kvm);
+bool kvm_mmu_init_tdp_mmu(struct kvm *kvm);
void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm);
static inline bool is_tdp_mmu_enabled(struct kvm *kvm) { return kvm->arch.tdp_mmu_enabled; }
static inline bool is_tdp_mmu_page(struct kvm_mmu_page *sp) { return sp->tdp_mmu_page; }
-#else
-static inline void kvm_mmu_init_tdp_mmu(struct kvm *kvm) {}
-static inline void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm) {}
-static inline bool is_tdp_mmu_enabled(struct kvm *kvm) { return false; }
-static inline bool is_tdp_mmu_page(struct kvm_mmu_page *sp) { return false; }
-#endif
-static inline bool is_tdp_mmu_root(struct kvm *kvm, hpa_t hpa)
+static inline bool is_tdp_mmu(struct kvm_mmu *mmu)
{
struct kvm_mmu_page *sp;
+ hpa_t hpa = mmu->root_hpa;
- if (!is_tdp_mmu_enabled(kvm))
- return false;
if (WARN_ON(!VALID_PAGE(hpa)))
return false;
+ /*
+ * A NULL shadow page is legal when shadowing a non-paging guest with
+ * PAE paging, as the MMU will be direct with root_hpa pointing at the
+ * pae_root page, not a shadow page.
+ */
sp = to_shadow_page(hpa);
- if (WARN_ON(!sp))
- return false;
-
- return is_tdp_mmu_page(sp) && sp->root_count;
+ return sp && is_tdp_mmu_page(sp) && sp->root_count;
}
+#else
+static inline bool kvm_mmu_init_tdp_mmu(struct kvm *kvm) { return false; }
+static inline void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm) {}
+static inline bool is_tdp_mmu_enabled(struct kvm *kvm) { return false; }
+static inline bool is_tdp_mmu_page(struct kvm_mmu_page *sp) { return false; }
+static inline bool is_tdp_mmu(struct kvm_mmu *mmu) { return false; }
+#endif
#endif /* __KVM_X86_MMU_TDP_MMU_H */
diff --git a/arch/x86/kvm/svm/avic.c b/arch/x86/kvm/svm/avic.c
index 712b4e0de481..1d01da64c333 100644
--- a/arch/x86/kvm/svm/avic.c
+++ b/arch/x86/kvm/svm/avic.c
@@ -27,12 +27,6 @@
#include "irq.h"
#include "svm.h"
-/* enable / disable AVIC */
-int avic;
-#ifdef CONFIG_X86_LOCAL_APIC
-module_param(avic, int, S_IRUGO);
-#endif
-
#define SVM_AVIC_DOORBELL 0xc001011b
#define AVIC_HPA_MASK ~((0xFFFULL << 52) | 0xFFF)
@@ -126,7 +120,7 @@ void avic_vm_destroy(struct kvm *kvm)
unsigned long flags;
struct kvm_svm *kvm_svm = to_kvm_svm(kvm);
- if (!avic)
+ if (!enable_apicv)
return;
if (kvm_svm->avic_logical_id_table_page)
@@ -149,7 +143,7 @@ int avic_vm_init(struct kvm *kvm)
struct page *l_page;
u32 vm_id;
- if (!avic)
+ if (!enable_apicv)
return 0;
/* Allocating physical APIC ID table (4KB) */
@@ -223,7 +217,7 @@ static u64 *avic_get_physical_id_entry(struct kvm_vcpu *vcpu,
return &avic_physical_id_table[index];
}
-/**
+/*
* Note:
* AVIC hardware walks the nested page table to check permissions,
* but does not use the SPA address specified in the leaf page
@@ -242,7 +236,7 @@ static int avic_update_access_page(struct kvm *kvm, bool activate)
* APICv mode change, which update APIC_ACCESS_PAGE_PRIVATE_MEMSLOT
* memory region. So, we need to ensure that kvm->mm == current->mm.
*/
- if ((kvm->arch.apic_access_page_done == activate) ||
+ if ((kvm->arch.apic_access_memslot_enabled == activate) ||
(kvm->mm != current->mm))
goto out;
@@ -255,7 +249,7 @@ static int avic_update_access_page(struct kvm *kvm, bool activate)
goto out;
}
- kvm->arch.apic_access_page_done = activate;
+ kvm->arch.apic_access_memslot_enabled = activate;
out:
mutex_unlock(&kvm->slots_lock);
return r;
@@ -571,7 +565,7 @@ int avic_init_vcpu(struct vcpu_svm *svm)
int ret;
struct kvm_vcpu *vcpu = &svm->vcpu;
- if (!avic || !irqchip_in_kernel(vcpu->kvm))
+ if (!enable_apicv || !irqchip_in_kernel(vcpu->kvm))
return 0;
ret = avic_init_backing_page(vcpu);
@@ -595,7 +589,7 @@ void avic_post_state_restore(struct kvm_vcpu *vcpu)
void svm_toggle_avic_for_irq_window(struct kvm_vcpu *vcpu, bool activate)
{
- if (!avic || !lapic_in_kernel(vcpu))
+ if (!enable_apicv || !lapic_in_kernel(vcpu))
return;
srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
@@ -655,7 +649,7 @@ void svm_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu)
struct vmcb *vmcb = svm->vmcb;
bool activated = kvm_vcpu_apicv_active(vcpu);
- if (!avic)
+ if (!enable_apicv)
return;
if (activated) {
@@ -766,7 +760,7 @@ out:
return ret;
}
-/**
+/*
* Note:
* The HW cannot support posting multicast/broadcast
* interrupts to a vCPU. So, we still use legacy interrupt
@@ -1007,7 +1001,7 @@ void avic_vcpu_put(struct kvm_vcpu *vcpu)
WRITE_ONCE(*(svm->avic_physical_id_cache), entry);
}
-/**
+/*
* This function is called during VCPU halt/unhalt.
*/
static void avic_set_running(struct kvm_vcpu *vcpu, bool is_run)
diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
index 540d43ba2cf4..21d03e3a5dfd 100644
--- a/arch/x86/kvm/svm/nested.c
+++ b/arch/x86/kvm/svm/nested.c
@@ -98,13 +98,18 @@ static void nested_svm_init_mmu_context(struct kvm_vcpu *vcpu)
WARN_ON(mmu_is_nested(vcpu));
vcpu->arch.mmu = &vcpu->arch.guest_mmu;
+
+ /*
+ * The NPT format depends on L1's CR4 and EFER, which is in vmcb01. Note,
+ * when called via KVM_SET_NESTED_STATE, that state may _not_ match current
+ * vCPU state. CR0.WP is explicitly ignored, while CR0.PG is required.
+ */
kvm_init_shadow_npt_mmu(vcpu, X86_CR0_PG, svm->vmcb01.ptr->save.cr4,
svm->vmcb01.ptr->save.efer,
svm->nested.ctl.nested_cr3);
vcpu->arch.mmu->get_guest_pgd = nested_svm_get_tdp_cr3;
vcpu->arch.mmu->get_pdptr = nested_svm_get_tdp_pdptr;
vcpu->arch.mmu->inject_page_fault = nested_svm_inject_npf_exit;
- reset_shadow_zero_bits_mask(vcpu, vcpu->arch.mmu);
vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu;
}
@@ -380,33 +385,47 @@ static inline bool nested_npt_enabled(struct vcpu_svm *svm)
return svm->nested.ctl.nested_ctl & SVM_NESTED_CTL_NP_ENABLE;
}
+static void nested_svm_transition_tlb_flush(struct kvm_vcpu *vcpu)
+{
+ /*
+ * TODO: optimize unconditional TLB flush/MMU sync. A partial list of
+ * things to fix before this can be conditional:
+ *
+ * - Flush TLBs for both L1 and L2 remote TLB flush
+ * - Honor L1's request to flush an ASID on nested VMRUN
+ * - Sync nested NPT MMU on VMRUN that flushes L2's ASID[*]
+ * - Don't crush a pending TLB flush in vmcb02 on nested VMRUN
+ * - Flush L1's ASID on KVM_REQ_TLB_FLUSH_GUEST
+ *
+ * [*] Unlike nested EPT, SVM's ASID management can invalidate nested
+ * NPT guest-physical mappings on VMRUN.
+ */
+ kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
+ kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
+}
+
/*
* Load guest's/host's cr3 on nested vmentry or vmexit. @nested_npt is true
* if we are emulating VM-Entry into a guest with NPT enabled.
*/
static int nested_svm_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3,
- bool nested_npt)
+ bool nested_npt, bool reload_pdptrs)
{
if (CC(kvm_vcpu_is_illegal_gpa(vcpu, cr3)))
return -EINVAL;
- if (!nested_npt && is_pae_paging(vcpu) &&
- (cr3 != kvm_read_cr3(vcpu) || pdptrs_changed(vcpu))) {
- if (CC(!load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3)))
- return -EINVAL;
- }
+ if (reload_pdptrs && !nested_npt && is_pae_paging(vcpu) &&
+ CC(!load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3)))
+ return -EINVAL;
- /*
- * TODO: optimize unconditional TLB flush/MMU sync here and in
- * kvm_init_shadow_npt_mmu().
- */
if (!nested_npt)
- kvm_mmu_new_pgd(vcpu, cr3, false, false);
+ kvm_mmu_new_pgd(vcpu, cr3);
vcpu->arch.cr3 = cr3;
kvm_register_mark_available(vcpu, VCPU_EXREG_CR3);
- kvm_init_mmu(vcpu, false);
+ /* Re-initialize the MMU, e.g. to pick up CR4 MMU role changes. */
+ kvm_init_mmu(vcpu);
return 0;
}
@@ -481,6 +500,7 @@ static void nested_vmcb02_prepare_save(struct vcpu_svm *svm, struct vmcb *vmcb12
static void nested_vmcb02_prepare_control(struct vcpu_svm *svm)
{
const u32 mask = V_INTR_MASKING_MASK | V_GIF_ENABLE_MASK | V_GIF_MASK;
+ struct kvm_vcpu *vcpu = &svm->vcpu;
/*
* Filled at exit: exit_code, exit_code_hi, exit_info_1, exit_info_2,
@@ -505,10 +525,10 @@ static void nested_vmcb02_prepare_control(struct vcpu_svm *svm)
/* nested_cr3. */
if (nested_npt_enabled(svm))
- nested_svm_init_mmu_context(&svm->vcpu);
+ nested_svm_init_mmu_context(vcpu);
- svm->vmcb->control.tsc_offset = svm->vcpu.arch.tsc_offset =
- svm->vcpu.arch.l1_tsc_offset + svm->nested.ctl.tsc_offset;
+ svm->vmcb->control.tsc_offset = vcpu->arch.tsc_offset =
+ vcpu->arch.l1_tsc_offset + svm->nested.ctl.tsc_offset;
svm->vmcb->control.int_ctl =
(svm->nested.ctl.int_ctl & ~mask) |
@@ -523,8 +543,10 @@ static void nested_vmcb02_prepare_control(struct vcpu_svm *svm)
svm->vmcb->control.pause_filter_count = svm->nested.ctl.pause_filter_count;
svm->vmcb->control.pause_filter_thresh = svm->nested.ctl.pause_filter_thresh;
+ nested_svm_transition_tlb_flush(vcpu);
+
/* Enter Guest-Mode */
- enter_guest_mode(&svm->vcpu);
+ enter_guest_mode(vcpu);
/*
* Merge guest and host intercepts - must be called with vcpu in
@@ -576,7 +598,7 @@ int enter_svm_guest_mode(struct kvm_vcpu *vcpu, u64 vmcb12_gpa,
nested_vmcb02_prepare_save(svm, vmcb12);
ret = nested_svm_load_cr3(&svm->vcpu, vmcb12->save.cr3,
- nested_npt_enabled(svm));
+ nested_npt_enabled(svm), true);
if (ret)
return ret;
@@ -596,8 +618,6 @@ int nested_svm_vmrun(struct kvm_vcpu *vcpu)
struct kvm_host_map map;
u64 vmcb12_gpa;
- ++vcpu->stat.nested_run;
-
if (is_smm(vcpu)) {
kvm_queue_exception(vcpu, UD_VECTOR);
return 1;
@@ -764,7 +784,6 @@ int nested_svm_vmexit(struct vcpu_svm *svm)
nested_svm_copy_common_state(svm->nested.vmcb02.ptr, svm->vmcb01.ptr);
svm_switch_vmcb(svm, &svm->vmcb01);
- WARN_ON_ONCE(svm->vmcb->control.exit_code != SVM_EXIT_VMRUN);
/*
* On vmexit the GIF is set to false and
@@ -804,9 +823,11 @@ int nested_svm_vmexit(struct vcpu_svm *svm)
kvm_vcpu_unmap(vcpu, &map, true);
+ nested_svm_transition_tlb_flush(vcpu);
+
nested_svm_uninit_mmu_context(vcpu);
- rc = nested_svm_load_cr3(vcpu, svm->vmcb->save.cr3, false);
+ rc = nested_svm_load_cr3(vcpu, svm->vmcb->save.cr3, false, true);
if (rc)
return 1;
@@ -872,6 +893,15 @@ void svm_free_nested(struct vcpu_svm *svm)
__free_page(virt_to_page(svm->nested.vmcb02.ptr));
svm->nested.vmcb02.ptr = NULL;
+ /*
+ * When last_vmcb12_gpa matches the current vmcb12 gpa,
+ * some vmcb12 fields are not loaded if they are marked clean
+ * in the vmcb12, since in this case they are up to date already.
+ *
+ * When the vmcb02 is freed, this optimization becomes invalid.
+ */
+ svm->nested.last_vmcb12_gpa = INVALID_GPA;
+
svm->nested.initialized = false;
}
@@ -884,9 +914,11 @@ void svm_leave_nested(struct vcpu_svm *svm)
if (is_guest_mode(vcpu)) {
svm->nested.nested_run_pending = 0;
+ svm->nested.vmcb12_gpa = INVALID_GPA;
+
leave_guest_mode(vcpu);
- svm_switch_vmcb(svm, &svm->nested.vmcb02);
+ svm_switch_vmcb(svm, &svm->vmcb01);
nested_svm_uninit_mmu_context(vcpu);
vmcb_mark_all_dirty(svm->vmcb);
@@ -1218,8 +1250,8 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu,
&user_kvm_nested_state->data.svm[0];
struct vmcb_control_area *ctl;
struct vmcb_save_area *save;
+ unsigned long cr0;
int ret;
- u32 cr0;
BUILD_BUG_ON(sizeof(struct vmcb_control_area) + sizeof(struct vmcb_save_area) >
KVM_STATE_NESTED_SVM_VMCB_SIZE);
@@ -1292,18 +1324,36 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu,
goto out_free;
/*
+ * While the nested guest CR3 is already checked and set by
+ * KVM_SET_SREGS, it was set when nested state was yet loaded,
+ * thus MMU might not be initialized correctly.
+ * Set it again to fix this.
+ */
+
+ ret = nested_svm_load_cr3(&svm->vcpu, vcpu->arch.cr3,
+ nested_npt_enabled(svm), false);
+ if (WARN_ON_ONCE(ret))
+ goto out_free;
+
+
+ /*
* All checks done, we can enter guest mode. Userspace provides
* vmcb12.control, which will be combined with L1 and stored into
* vmcb02, and the L1 save state which we store in vmcb01.
* L2 registers if needed are moved from the current VMCB to VMCB02.
*/
+ if (is_guest_mode(vcpu))
+ svm_leave_nested(svm);
+ else
+ svm->nested.vmcb02.ptr->save = svm->vmcb01.ptr->save;
+
+ svm_set_gif(svm, !!(kvm_state->flags & KVM_STATE_NESTED_GIF_SET));
+
svm->nested.nested_run_pending =
!!(kvm_state->flags & KVM_STATE_NESTED_RUN_PENDING);
svm->nested.vmcb12_gpa = kvm_state->hdr.svm.vmcb_pa;
- if (svm->current_vmcb == &svm->vmcb01)
- svm->nested.vmcb02.ptr->save = svm->vmcb01.ptr->save;
svm->vmcb01.ptr->save.es = save->es;
svm->vmcb01.ptr->save.cs = save->cs;
@@ -1343,9 +1393,15 @@ static bool svm_get_nested_state_pages(struct kvm_vcpu *vcpu)
if (WARN_ON(!is_guest_mode(vcpu)))
return true;
- if (nested_svm_load_cr3(&svm->vcpu, vcpu->arch.cr3,
- nested_npt_enabled(svm)))
- return false;
+ if (!vcpu->arch.pdptrs_from_userspace &&
+ !nested_npt_enabled(svm) && is_pae_paging(vcpu))
+ /*
+ * Reload the guest's PDPTRs since after a migration
+ * the guest CR3 might be restored prior to setting the nested
+ * state which can lead to a load of wrong PDPTRs.
+ */
+ if (CC(!load_pdptrs(vcpu, vcpu->arch.walk_mmu, vcpu->arch.cr3)))
+ return false;
if (!nested_svm_vmrun_msrpm(svm)) {
vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
index 1356ee095cd5..8d36f0c73071 100644
--- a/arch/x86/kvm/svm/sev.c
+++ b/arch/x86/kvm/svm/sev.c
@@ -199,9 +199,19 @@ static void sev_asid_free(struct kvm_sev_info *sev)
sev->misc_cg = NULL;
}
-static void sev_unbind_asid(struct kvm *kvm, unsigned int handle)
+static void sev_decommission(unsigned int handle)
{
struct sev_data_decommission decommission;
+
+ if (!handle)
+ return;
+
+ decommission.handle = handle;
+ sev_guest_decommission(&decommission, NULL);
+}
+
+static void sev_unbind_asid(struct kvm *kvm, unsigned int handle)
+{
struct sev_data_deactivate deactivate;
if (!handle)
@@ -214,9 +224,7 @@ static void sev_unbind_asid(struct kvm *kvm, unsigned int handle)
sev_guest_deactivate(&deactivate, NULL);
up_read(&sev_deactivate_lock);
- /* decommission handle */
- decommission.handle = handle;
- sev_guest_decommission(&decommission, NULL);
+ sev_decommission(handle);
}
static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp)
@@ -341,8 +349,10 @@ static int sev_launch_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
/* Bind ASID to this guest */
ret = sev_bind_asid(kvm, start.handle, error);
- if (ret)
+ if (ret) {
+ sev_decommission(start.handle);
goto e_free_session;
+ }
/* return handle to userspace */
params.handle = start.handle;
@@ -763,7 +773,7 @@ static int __sev_dbg_decrypt(struct kvm *kvm, unsigned long src_paddr,
}
static int __sev_dbg_decrypt_user(struct kvm *kvm, unsigned long paddr,
- unsigned long __user dst_uaddr,
+ void __user *dst_uaddr,
unsigned long dst_paddr,
int size, int *err)
{
@@ -787,8 +797,7 @@ static int __sev_dbg_decrypt_user(struct kvm *kvm, unsigned long paddr,
if (tpage) {
offset = paddr & 15;
- if (copy_to_user((void __user *)(uintptr_t)dst_uaddr,
- page_address(tpage) + offset, size))
+ if (copy_to_user(dst_uaddr, page_address(tpage) + offset, size))
ret = -EFAULT;
}
@@ -800,9 +809,9 @@ e_free:
}
static int __sev_dbg_encrypt_user(struct kvm *kvm, unsigned long paddr,
- unsigned long __user vaddr,
+ void __user *vaddr,
unsigned long dst_paddr,
- unsigned long __user dst_vaddr,
+ void __user *dst_vaddr,
int size, int *error)
{
struct page *src_tpage = NULL;
@@ -810,13 +819,12 @@ static int __sev_dbg_encrypt_user(struct kvm *kvm, unsigned long paddr,
int ret, len = size;
/* If source buffer is not aligned then use an intermediate buffer */
- if (!IS_ALIGNED(vaddr, 16)) {
+ if (!IS_ALIGNED((unsigned long)vaddr, 16)) {
src_tpage = alloc_page(GFP_KERNEL);
if (!src_tpage)
return -ENOMEM;
- if (copy_from_user(page_address(src_tpage),
- (void __user *)(uintptr_t)vaddr, size)) {
+ if (copy_from_user(page_address(src_tpage), vaddr, size)) {
__free_page(src_tpage);
return -EFAULT;
}
@@ -830,7 +838,7 @@ static int __sev_dbg_encrypt_user(struct kvm *kvm, unsigned long paddr,
* - copy the source buffer in an intermediate buffer
* - use the intermediate buffer as source buffer
*/
- if (!IS_ALIGNED(dst_vaddr, 16) || !IS_ALIGNED(size, 16)) {
+ if (!IS_ALIGNED((unsigned long)dst_vaddr, 16) || !IS_ALIGNED(size, 16)) {
int dst_offset;
dst_tpage = alloc_page(GFP_KERNEL);
@@ -855,7 +863,7 @@ static int __sev_dbg_encrypt_user(struct kvm *kvm, unsigned long paddr,
page_address(src_tpage), size);
else {
if (copy_from_user(page_address(dst_tpage) + dst_offset,
- (void __user *)(uintptr_t)vaddr, size)) {
+ vaddr, size)) {
ret = -EFAULT;
goto e_free;
}
@@ -935,15 +943,15 @@ static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec)
if (dec)
ret = __sev_dbg_decrypt_user(kvm,
__sme_page_pa(src_p[0]) + s_off,
- dst_vaddr,
+ (void __user *)dst_vaddr,
__sme_page_pa(dst_p[0]) + d_off,
len, &argp->error);
else
ret = __sev_dbg_encrypt_user(kvm,
__sme_page_pa(src_p[0]) + s_off,
- vaddr,
+ (void __user *)vaddr,
__sme_page_pa(dst_p[0]) + d_off,
- dst_vaddr,
+ (void __user *)dst_vaddr,
len, &argp->error);
sev_unpin_memory(kvm, src_p, n);
@@ -1105,10 +1113,9 @@ __sev_send_start_query_session_length(struct kvm *kvm, struct kvm_sev_cmd *argp,
struct sev_data_send_start data;
int ret;
+ memset(&data, 0, sizeof(data));
data.handle = sev->handle;
ret = sev_issue_cmd(kvm, SEV_CMD_SEND_START, &data, &argp->error);
- if (ret < 0)
- return ret;
params->session_len = data.session_len;
if (copy_to_user((void __user *)(uintptr_t)argp->data, params,
@@ -1217,10 +1224,9 @@ __sev_send_update_data_query_lengths(struct kvm *kvm, struct kvm_sev_cmd *argp,
struct sev_data_send_update_data data;
int ret;
+ memset(&data, 0, sizeof(data));
data.handle = sev->handle;
ret = sev_issue_cmd(kvm, SEV_CMD_SEND_UPDATE_DATA, &data, &argp->error);
- if (ret < 0)
- return ret;
params->hdr_len = data.hdr_len;
params->trans_len = data.trans_len;
@@ -1764,7 +1770,8 @@ e_mirror_unlock:
e_source_unlock:
mutex_unlock(&source_kvm->lock);
e_source_put:
- fput(source_kvm_file);
+ if (source_kvm_file)
+ fput(source_kvm_file);
return ret;
}
@@ -2198,7 +2205,7 @@ vmgexit_err:
return -EINVAL;
}
-static void pre_sev_es_run(struct vcpu_svm *svm)
+void sev_es_unmap_ghcb(struct vcpu_svm *svm)
{
if (!svm->ghcb)
return;
@@ -2234,9 +2241,6 @@ void pre_sev_run(struct vcpu_svm *svm, int cpu)
struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
int asid = sev_get_asid(svm->vcpu.kvm);
- /* Perform any SEV-ES pre-run actions */
- pre_sev_es_run(svm);
-
/* Assign the asid allocated with this SEV guest */
svm->asid = asid;
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index b649f92287a2..8834822c00cd 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -43,6 +43,9 @@
#include "svm.h"
#include "svm_ops.h"
+#include "kvm_onhyperv.h"
+#include "svm_onhyperv.h"
+
#define __ex(x) __kvm_handle_fault_on_reboot(x)
MODULE_AUTHOR("Qumranet");
@@ -185,6 +188,13 @@ module_param(vls, int, 0444);
static int vgif = true;
module_param(vgif, int, 0444);
+/*
+ * enable / disable AVIC. Because the defaults differ for APICv
+ * support between VMX and SVM we cannot use module_param_named.
+ */
+static bool avic;
+module_param(avic, bool, 0444);
+
bool __read_mostly dump_invalid_vmcb;
module_param(dump_invalid_vmcb, bool, 0644);
@@ -212,7 +222,7 @@ DEFINE_PER_CPU(struct svm_cpu_data *, svm_data);
* RDTSCP and RDPID are not used in the kernel, specifically to allow KVM to
* defer the restoration of TSC_AUX until the CPU returns to userspace.
*/
-#define TSC_AUX_URET_SLOT 0
+static int tsc_aux_uret_slot __read_mostly = -1;
static const u32 msrpm_ranges[] = {0, 0xc0000000, 0xc0010000};
@@ -447,6 +457,11 @@ static int has_svm(void)
return 0;
}
+ if (pgtable_l5_enabled()) {
+ pr_info("KVM doesn't yet support 5-level paging on AMD SVM\n");
+ return 0;
+ }
+
return 1;
}
@@ -668,6 +683,9 @@ static void set_msr_interception_bitmap(struct kvm_vcpu *vcpu, u32 *msrpm,
write ? clear_bit(bit_write, &tmp) : set_bit(bit_write, &tmp);
msrpm[offset] = tmp;
+
+ svm_hv_vmcb_dirty_nested_enlightenments(vcpu);
+
}
void set_msr_interception(struct kvm_vcpu *vcpu, u32 *msrpm, u32 msr,
@@ -858,8 +876,8 @@ static __init void svm_adjust_mmio_mask(void)
return;
/* If memory encryption is not enabled, use existing mask */
- rdmsrl(MSR_K8_SYSCFG, msr);
- if (!(msr & MSR_K8_SYSCFG_MEM_ENCRYPT))
+ rdmsrl(MSR_AMD64_SYSCFG, msr);
+ if (!(msr & MSR_AMD64_SYSCFG_MEM_ENCRYPT))
return;
enc_bit = cpuid_ebx(0x8000001f) & 0x3f;
@@ -934,6 +952,16 @@ static __init int svm_hardware_setup(void)
int r;
unsigned int order = get_order(IOPM_SIZE);
+ /*
+ * NX is required for shadow paging and for NPT if the NX huge pages
+ * mitigation is enabled.
+ */
+ if (!boot_cpu_has(X86_FEATURE_NX)) {
+ pr_err_ratelimited("NX (Execute Disable) not supported\n");
+ return -EOPNOTSUPP;
+ }
+ kvm_enable_efer_bits(EFER_NX);
+
iopm_pages = alloc_pages(GFP_KERNEL, order);
if (!iopm_pages)
@@ -947,9 +975,6 @@ static __init int svm_hardware_setup(void)
supported_xcr0 &= ~(XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR);
- if (boot_cpu_has(X86_FEATURE_NX))
- kvm_enable_efer_bits(EFER_NX);
-
if (boot_cpu_has(X86_FEATURE_FXSR_OPT))
kvm_enable_efer_bits(EFER_FFXSR);
@@ -959,8 +984,7 @@ static __init int svm_hardware_setup(void)
kvm_tsc_scaling_ratio_frac_bits = 32;
}
- if (boot_cpu_has(X86_FEATURE_RDTSCP))
- kvm_define_user_return_msr(TSC_AUX_URET_SLOT, MSR_TSC_AUX);
+ tsc_aux_uret_slot = kvm_add_user_return_msr(MSR_TSC_AUX);
/* Check for pause filtering support */
if (!boot_cpu_has(X86_FEATURE_PAUSEFILTER)) {
@@ -992,6 +1016,8 @@ static __init int svm_hardware_setup(void)
/* Note, SEV setup consumes npt_enabled. */
sev_hardware_setup();
+ svm_hv_hardware_setup();
+
svm_adjust_mmio_mask();
for_each_possible_cpu(cpu) {
@@ -1005,16 +1031,12 @@ static __init int svm_hardware_setup(void)
nrips = false;
}
- if (avic) {
- if (!npt_enabled ||
- !boot_cpu_has(X86_FEATURE_AVIC) ||
- !IS_ENABLED(CONFIG_X86_LOCAL_APIC)) {
- avic = false;
- } else {
- pr_info("AVIC enabled\n");
+ enable_apicv = avic = avic && npt_enabled && boot_cpu_has(X86_FEATURE_AVIC);
- amd_iommu_register_ga_log_notifier(&avic_ga_log_notifier);
- }
+ if (enable_apicv) {
+ pr_info("AVIC enabled\n");
+
+ amd_iommu_register_ga_log_notifier(&avic_ga_log_notifier);
}
if (vls) {
@@ -1078,29 +1100,35 @@ static void init_sys_seg(struct vmcb_seg *seg, uint32_t type)
seg->base = 0;
}
-static u64 svm_write_l1_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
+static u64 svm_get_l2_tsc_offset(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
- u64 g_tsc_offset = 0;
- if (is_guest_mode(vcpu)) {
- /* Write L1's TSC offset. */
- g_tsc_offset = svm->vmcb->control.tsc_offset -
- svm->vmcb01.ptr->control.tsc_offset;
- svm->vmcb01.ptr->control.tsc_offset = offset;
- }
+ return svm->nested.ctl.tsc_offset;
+}
- trace_kvm_write_tsc_offset(vcpu->vcpu_id,
- svm->vmcb->control.tsc_offset - g_tsc_offset,
- offset);
+static u64 svm_get_l2_tsc_multiplier(struct kvm_vcpu *vcpu)
+{
+ return kvm_default_tsc_scaling_ratio;
+}
- svm->vmcb->control.tsc_offset = offset + g_tsc_offset;
+static void svm_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
+{
+ struct vcpu_svm *svm = to_svm(vcpu);
+ svm->vmcb01.ptr->control.tsc_offset = vcpu->arch.l1_tsc_offset;
+ svm->vmcb->control.tsc_offset = offset;
vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
- return svm->vmcb->control.tsc_offset;
}
-static void svm_check_invpcid(struct vcpu_svm *svm)
+static void svm_write_tsc_multiplier(struct kvm_vcpu *vcpu, u64 multiplier)
+{
+ wrmsrl(MSR_AMD64_TSC_RATIO, multiplier);
+}
+
+/* Evaluate instruction intercepts that depend on guest CPUID features. */
+static void svm_recalc_instruction_intercepts(struct kvm_vcpu *vcpu,
+ struct vcpu_svm *svm)
{
/*
* Intercept INVPCID if shadow paging is enabled to sync/free shadow
@@ -1113,6 +1141,13 @@ static void svm_check_invpcid(struct vcpu_svm *svm)
else
svm_clr_intercept(svm, INTERCEPT_INVPCID);
}
+
+ if (kvm_cpu_cap_has(X86_FEATURE_RDTSCP)) {
+ if (guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP))
+ svm_clr_intercept(svm, INTERCEPT_RDTSCP);
+ else
+ svm_set_intercept(svm, INTERCEPT_RDTSCP);
+ }
}
static void init_vmcb(struct kvm_vcpu *vcpu)
@@ -1235,8 +1270,8 @@ static void init_vmcb(struct kvm_vcpu *vcpu)
svm->current_vmcb->asid_generation = 0;
svm->asid = 0;
- svm->nested.vmcb12_gpa = 0;
- svm->nested.last_vmcb12_gpa = 0;
+ svm->nested.vmcb12_gpa = INVALID_GPA;
+ svm->nested.last_vmcb12_gpa = INVALID_GPA;
vcpu->arch.hflags = 0;
if (!kvm_pause_in_guest(vcpu->kvm)) {
@@ -1248,7 +1283,7 @@ static void init_vmcb(struct kvm_vcpu *vcpu)
svm_clr_intercept(svm, INTERCEPT_PAUSE);
}
- svm_check_invpcid(svm);
+ svm_recalc_instruction_intercepts(vcpu, svm);
/*
* If the host supports V_SPEC_CTRL then disable the interception
@@ -1276,6 +1311,8 @@ static void init_vmcb(struct kvm_vcpu *vcpu)
}
}
+ svm_hv_init_vmcb(svm->vmcb);
+
vmcb_mark_all_dirty(svm->vmcb);
enable_gif(svm);
@@ -1424,6 +1461,9 @@ static void svm_prepare_guest_switch(struct kvm_vcpu *vcpu)
struct vcpu_svm *svm = to_svm(vcpu);
struct svm_cpu_data *sd = per_cpu(svm_data, vcpu->cpu);
+ if (sev_es_guest(vcpu->kvm))
+ sev_es_unmap_ghcb(svm);
+
if (svm->guest_state_loaded)
return;
@@ -1445,8 +1485,8 @@ static void svm_prepare_guest_switch(struct kvm_vcpu *vcpu)
}
}
- if (static_cpu_has(X86_FEATURE_RDTSCP))
- kvm_set_user_return_msr(TSC_AUX_URET_SLOT, svm->tsc_aux, -1ull);
+ if (likely(tsc_aux_uret_slot >= 0))
+ kvm_set_user_return_msr(tsc_aux_uret_slot, svm->tsc_aux, -1ull);
svm->guest_state_loaded = true;
}
@@ -2655,11 +2695,6 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
msr_info->data |= (u64)svm->sysenter_esp_hi << 32;
break;
case MSR_TSC_AUX:
- if (!boot_cpu_has(X86_FEATURE_RDTSCP))
- return 1;
- if (!msr_info->host_initiated &&
- !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP))
- return 1;
msr_info->data = svm->tsc_aux;
break;
/*
@@ -2876,30 +2911,13 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
svm->sysenter_esp_hi = guest_cpuid_is_intel(vcpu) ? (data >> 32) : 0;
break;
case MSR_TSC_AUX:
- if (!boot_cpu_has(X86_FEATURE_RDTSCP))
- return 1;
-
- if (!msr->host_initiated &&
- !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP))
- return 1;
-
- /*
- * Per Intel's SDM, bits 63:32 are reserved, but AMD's APM has
- * incomplete and conflicting architectural behavior. Current
- * AMD CPUs completely ignore bits 63:32, i.e. they aren't
- * reserved and always read as zeros. Emulate AMD CPU behavior
- * to avoid explosions if the vCPU is migrated from an AMD host
- * to an Intel host.
- */
- data = (u32)data;
-
/*
* TSC_AUX is usually changed only during boot and never read
* directly. Intercept TSC_AUX instead of exposing it to the
* guest via direct_access_msrs, and switch it via user return.
*/
preempt_disable();
- r = kvm_set_user_return_msr(TSC_AUX_URET_SLOT, data, -1ull);
+ r = kvm_set_user_return_msr(tsc_aux_uret_slot, data, -1ull);
preempt_enable();
if (r)
return 1;
@@ -3084,6 +3102,7 @@ static int (*const svm_exit_handlers[])(struct kvm_vcpu *vcpu) = {
[SVM_EXIT_STGI] = stgi_interception,
[SVM_EXIT_CLGI] = clgi_interception,
[SVM_EXIT_SKINIT] = skinit_interception,
+ [SVM_EXIT_RDTSCP] = kvm_handle_invalid_op,
[SVM_EXIT_WBINVD] = kvm_emulate_wbinvd,
[SVM_EXIT_MONITOR] = kvm_emulate_monitor,
[SVM_EXIT_MWAIT] = kvm_emulate_mwait,
@@ -3113,6 +3132,8 @@ static void dump_vmcb(struct kvm_vcpu *vcpu)
return;
}
+ pr_err("VMCB %p, last attempted VMRUN on CPU %d\n",
+ svm->current_vmcb->ptr, vcpu->arch.last_vmentry_cpu);
pr_err("VMCB Control Area:\n");
pr_err("%-20s%04x\n", "cr_read:", control->intercepts[INTERCEPT_CR] & 0xffff);
pr_err("%-20s%04x\n", "cr_write:", control->intercepts[INTERCEPT_CR] >> 16);
@@ -3769,6 +3790,8 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)
}
svm->vmcb->save.cr2 = vcpu->arch.cr2;
+ svm_hv_update_vp_id(svm->vmcb, vcpu);
+
/*
* Run with all-zero DR6 unless needed, so that we can get the exact cause
* of a #DB.
@@ -3842,6 +3865,12 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)
svm->next_rip = 0;
if (is_guest_mode(vcpu)) {
nested_sync_control_from_vmcb02(svm);
+
+ /* Track VMRUNs that have made past consistency checking */
+ if (svm->nested.nested_run_pending &&
+ svm->vmcb->control.exit_code != SVM_EXIT_ERR)
+ ++vcpu->stat.nested_run;
+
svm->nested.nested_run_pending = 0;
}
@@ -3853,10 +3882,8 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)
vcpu->arch.apf.host_apf_flags =
kvm_read_and_reset_apf_flags();
- if (npt_enabled) {
- vcpu->arch.regs_avail &= ~(1 << VCPU_EXREG_PDPTR);
- vcpu->arch.regs_dirty &= ~(1 << VCPU_EXREG_PDPTR);
- }
+ if (npt_enabled)
+ kvm_register_clear_available(vcpu, VCPU_EXREG_PDPTR);
/*
* We need to handle MC intercepts here before the vcpu has a chance to
@@ -3884,6 +3911,8 @@ static void svm_load_mmu_pgd(struct kvm_vcpu *vcpu, hpa_t root_hpa,
svm->vmcb->control.nested_cr3 = __sme_set(root_hpa);
vmcb_mark_dirty(svm->vmcb, VMCB_NPT);
+ hv_track_root_tdp(vcpu, root_hpa);
+
/* Loading L2's CR3 is handled by enter_svm_guest_mode. */
if (!test_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail))
return;
@@ -3972,8 +4001,7 @@ static void svm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
svm->nrips_enabled = kvm_cpu_cap_has(X86_FEATURE_NRIPS) &&
guest_cpuid_has(vcpu, X86_FEATURE_NRIPS);
- /* Check again if INVPCID interception if required */
- svm_check_invpcid(svm);
+ svm_recalc_instruction_intercepts(vcpu, svm);
/* For sev guests, the memory encryption bit is not reserved in CR3. */
if (sev_guest(vcpu->kvm)) {
@@ -4257,7 +4285,7 @@ static int svm_smi_allowed(struct kvm_vcpu *vcpu, bool for_injection)
return !svm_smi_blocked(vcpu);
}
-static int svm_pre_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
+static int svm_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
{
struct vcpu_svm *svm = to_svm(vcpu);
int ret;
@@ -4279,7 +4307,7 @@ static int svm_pre_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
return 0;
}
-static int svm_pre_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
+static int svm_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
{
struct vcpu_svm *svm = to_svm(vcpu);
struct kvm_host_map map;
@@ -4435,13 +4463,12 @@ static int svm_vm_init(struct kvm *kvm)
if (!pause_filter_count || !pause_filter_thresh)
kvm->arch.pause_in_guest = true;
- if (avic) {
+ if (enable_apicv) {
int ret = avic_vm_init(kvm);
if (ret)
return ret;
}
- kvm_apicv_init(kvm, avic);
return 0;
}
@@ -4532,7 +4559,10 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
.has_wbinvd_exit = svm_has_wbinvd_exit,
- .write_l1_tsc_offset = svm_write_l1_tsc_offset,
+ .get_l2_tsc_offset = svm_get_l2_tsc_offset,
+ .get_l2_tsc_multiplier = svm_get_l2_tsc_multiplier,
+ .write_tsc_offset = svm_write_tsc_offset,
+ .write_tsc_multiplier = svm_write_tsc_multiplier,
.load_mmu_pgd = svm_load_mmu_pgd,
@@ -4552,8 +4582,8 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
.setup_mce = svm_setup_mce,
.smi_allowed = svm_smi_allowed,
- .pre_enter_smm = svm_pre_enter_smm,
- .pre_leave_smm = svm_pre_leave_smm,
+ .enter_smm = svm_enter_smm,
+ .leave_smm = svm_leave_smm,
.enable_smi_window = svm_enable_smi_window,
.mem_enc_op = svm_mem_enc_op,
diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
index 84b3133c2251..f89b623bb591 100644
--- a/arch/x86/kvm/svm/svm.h
+++ b/arch/x86/kvm/svm/svm.h
@@ -20,6 +20,7 @@
#include <linux/bits.h>
#include <asm/svm.h>
+#include <asm/sev-common.h>
#define __sme_page_pa(x) __sme_set(page_to_pfn(x) << PAGE_SHIFT)
@@ -31,6 +32,11 @@
extern u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly;
extern bool npt_enabled;
+/*
+ * Clean bits in VMCB.
+ * VMCB_ALL_CLEAN_MASK might also need to
+ * be updated if this enum is modified.
+ */
enum {
VMCB_INTERCEPTS, /* Intercept vectors, TSC offset,
pause filter count */
@@ -48,9 +54,17 @@ enum {
* AVIC PHYSICAL_TABLE pointer,
* AVIC LOGICAL_TABLE pointer
*/
- VMCB_DIRTY_MAX,
+ VMCB_SW = 31, /* Reserved for hypervisor/software use */
};
+#define VMCB_ALL_CLEAN_MASK ( \
+ (1U << VMCB_INTERCEPTS) | (1U << VMCB_PERM_MAP) | \
+ (1U << VMCB_ASID) | (1U << VMCB_INTR) | \
+ (1U << VMCB_NPT) | (1U << VMCB_CR) | (1U << VMCB_DR) | \
+ (1U << VMCB_DT) | (1U << VMCB_SEG) | (1U << VMCB_CR2) | \
+ (1U << VMCB_LBR) | (1U << VMCB_AVIC) | \
+ (1U << VMCB_SW))
+
/* TPR and CR2 are always written before VMRUN */
#define VMCB_ALWAYS_DIRTY_MASK ((1U << VMCB_INTR) | (1U << VMCB_CR2))
@@ -237,10 +251,15 @@ static inline void vmcb_mark_all_dirty(struct vmcb *vmcb)
static inline void vmcb_mark_all_clean(struct vmcb *vmcb)
{
- vmcb->control.clean = ((1 << VMCB_DIRTY_MAX) - 1)
+ vmcb->control.clean = VMCB_ALL_CLEAN_MASK
& ~VMCB_ALWAYS_DIRTY_MASK;
}
+static inline bool vmcb_is_clean(struct vmcb *vmcb, int bit)
+{
+ return (vmcb->control.clean & (1 << bit));
+}
+
static inline void vmcb_mark_dirty(struct vmcb *vmcb, int bit)
{
vmcb->control.clean &= ~(1 << bit);
@@ -479,8 +498,6 @@ extern struct kvm_x86_nested_ops svm_nested_ops;
#define VMCB_AVIC_APIC_BAR_MASK 0xFFFFFFFFFF000ULL
-extern int avic;
-
static inline void avic_update_vapic_bar(struct vcpu_svm *svm, u64 data)
{
svm->vmcb->control.avic_vapic_bar = data & VMCB_AVIC_APIC_BAR_MASK;
@@ -525,40 +542,9 @@ void svm_vcpu_unblocking(struct kvm_vcpu *vcpu);
/* sev.c */
-#define GHCB_VERSION_MAX 1ULL
-#define GHCB_VERSION_MIN 1ULL
-
-#define GHCB_MSR_INFO_POS 0
-#define GHCB_MSR_INFO_MASK (BIT_ULL(12) - 1)
-
-#define GHCB_MSR_SEV_INFO_RESP 0x001
-#define GHCB_MSR_SEV_INFO_REQ 0x002
-#define GHCB_MSR_VER_MAX_POS 48
-#define GHCB_MSR_VER_MAX_MASK 0xffff
-#define GHCB_MSR_VER_MIN_POS 32
-#define GHCB_MSR_VER_MIN_MASK 0xffff
-#define GHCB_MSR_CBIT_POS 24
-#define GHCB_MSR_CBIT_MASK 0xff
-#define GHCB_MSR_SEV_INFO(_max, _min, _cbit) \
- ((((_max) & GHCB_MSR_VER_MAX_MASK) << GHCB_MSR_VER_MAX_POS) | \
- (((_min) & GHCB_MSR_VER_MIN_MASK) << GHCB_MSR_VER_MIN_POS) | \
- (((_cbit) & GHCB_MSR_CBIT_MASK) << GHCB_MSR_CBIT_POS) | \
- GHCB_MSR_SEV_INFO_RESP)
-
-#define GHCB_MSR_CPUID_REQ 0x004
-#define GHCB_MSR_CPUID_RESP 0x005
-#define GHCB_MSR_CPUID_FUNC_POS 32
-#define GHCB_MSR_CPUID_FUNC_MASK 0xffffffff
-#define GHCB_MSR_CPUID_VALUE_POS 32
-#define GHCB_MSR_CPUID_VALUE_MASK 0xffffffff
-#define GHCB_MSR_CPUID_REG_POS 30
-#define GHCB_MSR_CPUID_REG_MASK 0x3
-
-#define GHCB_MSR_TERM_REQ 0x100
-#define GHCB_MSR_TERM_REASON_SET_POS 12
-#define GHCB_MSR_TERM_REASON_SET_MASK 0xf
-#define GHCB_MSR_TERM_REASON_POS 16
-#define GHCB_MSR_TERM_REASON_MASK 0xff
+#define GHCB_VERSION_MAX 1ULL
+#define GHCB_VERSION_MIN 1ULL
+
extern unsigned int max_sev_asid;
@@ -581,6 +567,7 @@ void sev_es_init_vmcb(struct vcpu_svm *svm);
void sev_es_create_vcpu(struct vcpu_svm *svm);
void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector);
void sev_es_prepare_guest_switch(struct vcpu_svm *svm, unsigned int cpu);
+void sev_es_unmap_ghcb(struct vcpu_svm *svm);
/* vmenter.S */
diff --git a/arch/x86/kvm/svm/svm_onhyperv.c b/arch/x86/kvm/svm/svm_onhyperv.c
new file mode 100644
index 000000000000..98aa981c04ec
--- /dev/null
+++ b/arch/x86/kvm/svm/svm_onhyperv.c
@@ -0,0 +1,41 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * KVM L1 hypervisor optimizations on Hyper-V for SVM.
+ */
+
+#include <linux/kvm_host.h>
+#include "kvm_cache_regs.h"
+
+#include <asm/mshyperv.h>
+
+#include "svm.h"
+#include "svm_ops.h"
+
+#include "hyperv.h"
+#include "kvm_onhyperv.h"
+#include "svm_onhyperv.h"
+
+int svm_hv_enable_direct_tlbflush(struct kvm_vcpu *vcpu)
+{
+ struct hv_enlightenments *hve;
+ struct hv_partition_assist_pg **p_hv_pa_pg =
+ &to_kvm_hv(vcpu->kvm)->hv_pa_pg;
+
+ if (!*p_hv_pa_pg)
+ *p_hv_pa_pg = kzalloc(PAGE_SIZE, GFP_KERNEL);
+
+ if (!*p_hv_pa_pg)
+ return -ENOMEM;
+
+ hve = (struct hv_enlightenments *)to_svm(vcpu)->vmcb->control.reserved_sw;
+
+ hve->partition_assist_page = __pa(*p_hv_pa_pg);
+ hve->hv_vm_id = (unsigned long)vcpu->kvm;
+ if (!hve->hv_enlightenments_control.nested_flush_hypercall) {
+ hve->hv_enlightenments_control.nested_flush_hypercall = 1;
+ vmcb_mark_dirty(to_svm(vcpu)->vmcb, VMCB_HV_NESTED_ENLIGHTENMENTS);
+ }
+
+ return 0;
+}
+
diff --git a/arch/x86/kvm/svm/svm_onhyperv.h b/arch/x86/kvm/svm/svm_onhyperv.h
new file mode 100644
index 000000000000..9b9a55abc29f
--- /dev/null
+++ b/arch/x86/kvm/svm/svm_onhyperv.h
@@ -0,0 +1,130 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * KVM L1 hypervisor optimizations on Hyper-V for SVM.
+ */
+
+#ifndef __ARCH_X86_KVM_SVM_ONHYPERV_H__
+#define __ARCH_X86_KVM_SVM_ONHYPERV_H__
+
+#if IS_ENABLED(CONFIG_HYPERV)
+#include <asm/mshyperv.h>
+
+#include "hyperv.h"
+#include "kvm_onhyperv.h"
+
+static struct kvm_x86_ops svm_x86_ops;
+
+/*
+ * Hyper-V uses the software reserved 32 bytes in VMCB
+ * control area to expose SVM enlightenments to guests.
+ */
+struct hv_enlightenments {
+ struct __packed hv_enlightenments_control {
+ u32 nested_flush_hypercall:1;
+ u32 msr_bitmap:1;
+ u32 enlightened_npt_tlb: 1;
+ u32 reserved:29;
+ } __packed hv_enlightenments_control;
+ u32 hv_vp_id;
+ u64 hv_vm_id;
+ u64 partition_assist_page;
+ u64 reserved;
+} __packed;
+
+/*
+ * Hyper-V uses the software reserved clean bit in VMCB
+ */
+#define VMCB_HV_NESTED_ENLIGHTENMENTS VMCB_SW
+
+int svm_hv_enable_direct_tlbflush(struct kvm_vcpu *vcpu);
+
+static inline void svm_hv_init_vmcb(struct vmcb *vmcb)
+{
+ struct hv_enlightenments *hve =
+ (struct hv_enlightenments *)vmcb->control.reserved_sw;
+
+ if (npt_enabled &&
+ ms_hyperv.nested_features & HV_X64_NESTED_ENLIGHTENED_TLB)
+ hve->hv_enlightenments_control.enlightened_npt_tlb = 1;
+}
+
+static inline void svm_hv_hardware_setup(void)
+{
+ if (npt_enabled &&
+ ms_hyperv.nested_features & HV_X64_NESTED_ENLIGHTENED_TLB) {
+ pr_info("kvm: Hyper-V enlightened NPT TLB flush enabled\n");
+ svm_x86_ops.tlb_remote_flush = hv_remote_flush_tlb;
+ svm_x86_ops.tlb_remote_flush_with_range =
+ hv_remote_flush_tlb_with_range;
+ }
+
+ if (ms_hyperv.nested_features & HV_X64_NESTED_DIRECT_FLUSH) {
+ int cpu;
+
+ pr_info("kvm: Hyper-V Direct TLB Flush enabled\n");
+ for_each_online_cpu(cpu) {
+ struct hv_vp_assist_page *vp_ap =
+ hv_get_vp_assist_page(cpu);
+
+ if (!vp_ap)
+ continue;
+
+ vp_ap->nested_control.features.directhypercall = 1;
+ }
+ svm_x86_ops.enable_direct_tlbflush =
+ svm_hv_enable_direct_tlbflush;
+ }
+}
+
+static inline void svm_hv_vmcb_dirty_nested_enlightenments(
+ struct kvm_vcpu *vcpu)
+{
+ struct vmcb *vmcb = to_svm(vcpu)->vmcb;
+ struct hv_enlightenments *hve =
+ (struct hv_enlightenments *)vmcb->control.reserved_sw;
+
+ /*
+ * vmcb can be NULL if called during early vcpu init.
+ * And its okay not to mark vmcb dirty during vcpu init
+ * as we mark it dirty unconditionally towards end of vcpu
+ * init phase.
+ */
+ if (vmcb && vmcb_is_clean(vmcb, VMCB_HV_NESTED_ENLIGHTENMENTS) &&
+ hve->hv_enlightenments_control.msr_bitmap)
+ vmcb_mark_dirty(vmcb, VMCB_HV_NESTED_ENLIGHTENMENTS);
+}
+
+static inline void svm_hv_update_vp_id(struct vmcb *vmcb,
+ struct kvm_vcpu *vcpu)
+{
+ struct hv_enlightenments *hve =
+ (struct hv_enlightenments *)vmcb->control.reserved_sw;
+ u32 vp_index = kvm_hv_get_vpindex(vcpu);
+
+ if (hve->hv_vp_id != vp_index) {
+ hve->hv_vp_id = vp_index;
+ vmcb_mark_dirty(vmcb, VMCB_HV_NESTED_ENLIGHTENMENTS);
+ }
+}
+#else
+
+static inline void svm_hv_init_vmcb(struct vmcb *vmcb)
+{
+}
+
+static inline void svm_hv_hardware_setup(void)
+{
+}
+
+static inline void svm_hv_vmcb_dirty_nested_enlightenments(
+ struct kvm_vcpu *vcpu)
+{
+}
+
+static inline void svm_hv_update_vp_id(struct vmcb *vmcb,
+ struct kvm_vcpu *vcpu)
+{
+}
+#endif /* CONFIG_HYPERV */
+
+#endif /* __ARCH_X86_KVM_SVM_ONHYPERV_H__ */
diff --git a/arch/x86/kvm/trace.h b/arch/x86/kvm/trace.h
index a61c015870e3..b484141ea15b 100644
--- a/arch/x86/kvm/trace.h
+++ b/arch/x86/kvm/trace.h
@@ -997,7 +997,7 @@ TRACE_EVENT(kvm_wait_lapic_expire,
__entry->delta < 0 ? "early" : "late")
);
-TRACE_EVENT(kvm_enter_smm,
+TRACE_EVENT(kvm_smm_transition,
TP_PROTO(unsigned int vcpu_id, u64 smbase, bool entering),
TP_ARGS(vcpu_id, smbase, entering),
@@ -1550,16 +1550,16 @@ TRACE_EVENT(kvm_nested_vmenter_failed,
TP_ARGS(msg, err),
TP_STRUCT__entry(
- __field(const char *, msg)
+ __string(msg, msg)
__field(u32, err)
),
TP_fast_assign(
- __entry->msg = msg;
+ __assign_str(msg, msg);
__entry->err = err;
),
- TP_printk("%s%s", __entry->msg, !__entry->err ? "" :
+ TP_printk("%s%s", __get_str(msg), !__entry->err ? "" :
__print_symbolic(__entry->err, VMX_VMENTER_INSTRUCTION_ERRORS))
);
diff --git a/arch/x86/kvm/vmx/capabilities.h b/arch/x86/kvm/vmx/capabilities.h
index d1d77985e889..4705ad55abb5 100644
--- a/arch/x86/kvm/vmx/capabilities.h
+++ b/arch/x86/kvm/vmx/capabilities.h
@@ -12,7 +12,6 @@ extern bool __read_mostly enable_ept;
extern bool __read_mostly enable_unrestricted_guest;
extern bool __read_mostly enable_ept_ad_bits;
extern bool __read_mostly enable_pml;
-extern bool __read_mostly enable_apicv;
extern int __read_mostly pt_mode;
#define PT_MODE_SYSTEM 0
@@ -90,8 +89,7 @@ static inline bool cpu_has_vmx_preemption_timer(void)
static inline bool cpu_has_vmx_posted_intr(void)
{
- return IS_ENABLED(CONFIG_X86_LOCAL_APIC) &&
- vmcs_config.pin_based_exec_ctrl & PIN_BASED_POSTED_INTR;
+ return vmcs_config.pin_based_exec_ctrl & PIN_BASED_POSTED_INTR;
}
static inline bool cpu_has_load_ia32_efer(void)
@@ -398,6 +396,9 @@ static inline u64 vmx_supported_debugctl(void)
{
u64 debugctl = 0;
+ if (boot_cpu_has(X86_FEATURE_BUS_LOCK_DETECT))
+ debugctl |= DEBUGCTLMSR_BUS_LOCK_DETECT;
+
if (vmx_get_perf_capabilities() & PMU_CAP_LBR_FMT)
debugctl |= DEBUGCTLMSR_LBR_MASK;
diff --git a/arch/x86/kvm/vmx/evmcs.c b/arch/x86/kvm/vmx/evmcs.c
index 41f24661af04..896b2a50b4aa 100644
--- a/arch/x86/kvm/vmx/evmcs.c
+++ b/arch/x86/kvm/vmx/evmcs.c
@@ -319,6 +319,9 @@ bool nested_enlightened_vmentry(struct kvm_vcpu *vcpu, u64 *evmcs_gpa)
if (unlikely(!assist_page.enlighten_vmentry))
return false;
+ if (unlikely(!evmptr_is_valid(assist_page.current_nested_vmcs)))
+ return false;
+
*evmcs_gpa = assist_page.current_nested_vmcs;
return true;
diff --git a/arch/x86/kvm/vmx/evmcs.h b/arch/x86/kvm/vmx/evmcs.h
index bd41d9462355..2ec9b46f0d0c 100644
--- a/arch/x86/kvm/vmx/evmcs.h
+++ b/arch/x86/kvm/vmx/evmcs.h
@@ -197,6 +197,14 @@ static inline void evmcs_load(u64 phys_addr) {}
static inline void evmcs_touch_msr_bitmap(void) {}
#endif /* IS_ENABLED(CONFIG_HYPERV) */
+#define EVMPTR_INVALID (-1ULL)
+#define EVMPTR_MAP_PENDING (-2ULL)
+
+static inline bool evmptr_is_valid(u64 evmptr)
+{
+ return evmptr != EVMPTR_INVALID && evmptr != EVMPTR_MAP_PENDING;
+}
+
enum nested_evmptrld_status {
EVMPTRLD_DISABLED,
EVMPTRLD_SUCCEEDED,
diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index bced76637823..1a52134b0c42 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -173,9 +173,13 @@ static int nested_vmx_failValid(struct kvm_vcpu *vcpu,
| X86_EFLAGS_ZF);
get_vmcs12(vcpu)->vm_instruction_error = vm_instruction_error;
/*
- * We don't need to force a shadow sync because
- * VM_INSTRUCTION_ERROR is not shadowed
+ * We don't need to force sync to shadow VMCS because
+ * VM_INSTRUCTION_ERROR is not shadowed. Enlightened VMCS 'shadows' all
+ * fields and thus must be synced.
*/
+ if (to_vmx(vcpu)->nested.hv_evmcs_vmptr != EVMPTR_INVALID)
+ to_vmx(vcpu)->nested.need_vmcs12_to_shadow_sync = true;
+
return kvm_skip_emulated_instruction(vcpu);
}
@@ -187,7 +191,8 @@ static int nested_vmx_fail(struct kvm_vcpu *vcpu, u32 vm_instruction_error)
* failValid writes the error number to the current VMCS, which
* can't be done if there isn't a current VMCS.
*/
- if (vmx->nested.current_vmptr == -1ull && !vmx->nested.hv_evmcs)
+ if (vmx->nested.current_vmptr == -1ull &&
+ !evmptr_is_valid(vmx->nested.hv_evmcs_vmptr))
return nested_vmx_failInvalid(vcpu);
return nested_vmx_failValid(vcpu, vm_instruction_error);
@@ -221,12 +226,12 @@ static inline void nested_release_evmcs(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
- if (!vmx->nested.hv_evmcs)
- return;
+ if (evmptr_is_valid(vmx->nested.hv_evmcs_vmptr)) {
+ kvm_vcpu_unmap(vcpu, &vmx->nested.hv_evmcs_map, true);
+ vmx->nested.hv_evmcs = NULL;
+ }
- kvm_vcpu_unmap(vcpu, &vmx->nested.hv_evmcs_map, true);
- vmx->nested.hv_evmcs_vmptr = 0;
- vmx->nested.hv_evmcs = NULL;
+ vmx->nested.hv_evmcs_vmptr = EVMPTR_INVALID;
}
static void vmx_sync_vmcs_host_state(struct vcpu_vmx *vmx,
@@ -346,16 +351,21 @@ static void nested_ept_inject_page_fault(struct kvm_vcpu *vcpu,
vmcs12->guest_physical_address = fault->address;
}
+static void nested_ept_new_eptp(struct kvm_vcpu *vcpu)
+{
+ kvm_init_shadow_ept_mmu(vcpu,
+ to_vmx(vcpu)->nested.msrs.ept_caps &
+ VMX_EPT_EXECUTE_ONLY_BIT,
+ nested_ept_ad_enabled(vcpu),
+ nested_ept_get_eptp(vcpu));
+}
+
static void nested_ept_init_mmu_context(struct kvm_vcpu *vcpu)
{
WARN_ON(mmu_is_nested(vcpu));
vcpu->arch.mmu = &vcpu->arch.guest_mmu;
- kvm_init_shadow_ept_mmu(vcpu,
- to_vmx(vcpu)->nested.msrs.ept_caps &
- VMX_EPT_EXECUTE_ONLY_BIT,
- nested_ept_ad_enabled(vcpu),
- nested_ept_get_eptp(vcpu));
+ nested_ept_new_eptp(vcpu);
vcpu->arch.mmu->get_guest_pgd = nested_ept_get_eptp;
vcpu->arch.mmu->inject_page_fault = nested_ept_inject_page_fault;
vcpu->arch.mmu->get_pdptr = kvm_pdptr_read;
@@ -1058,54 +1068,13 @@ static void prepare_vmx_msr_autostore_list(struct kvm_vcpu *vcpu,
}
/*
- * Returns true if the MMU needs to be sync'd on nested VM-Enter/VM-Exit.
- * tl;dr: the MMU needs a sync if L0 is using shadow paging and L1 didn't
- * enable VPID for L2 (implying it expects a TLB flush on VMX transitions).
- * Here's why.
- *
- * If EPT is enabled by L0 a sync is never needed:
- * - if it is disabled by L1, then L0 is not shadowing L1 or L2 PTEs, there
- * cannot be unsync'd SPTEs for either L1 or L2.
- *
- * - if it is also enabled by L1, then L0 doesn't need to sync on VM-Enter
- * VM-Enter as VM-Enter isn't required to invalidate guest-physical mappings
- * (irrespective of VPID), i.e. L1 can't rely on the (virtual) CPU to flush
- * stale guest-physical mappings for L2 from the TLB. And as above, L0 isn't
- * shadowing L1 PTEs so there are no unsync'd SPTEs to sync on VM-Exit.
- *
- * If EPT is disabled by L0:
- * - if VPID is enabled by L1 (for L2), the situation is similar to when L1
- * enables EPT: L0 doesn't need to sync as VM-Enter and VM-Exit aren't
- * required to invalidate linear mappings (EPT is disabled so there are
- * no combined or guest-physical mappings), i.e. L1 can't rely on the
- * (virtual) CPU to flush stale linear mappings for either L2 or itself (L1).
- *
- * - however if VPID is disabled by L1, then a sync is needed as L1 expects all
- * linear mappings (EPT is disabled so there are no combined or guest-physical
- * mappings) to be invalidated on both VM-Enter and VM-Exit.
- *
- * Note, this logic is subtly different than nested_has_guest_tlb_tag(), which
- * additionally checks that L2 has been assigned a VPID (when EPT is disabled).
- * Whether or not L2 has been assigned a VPID by L0 is irrelevant with respect
- * to L1's expectations, e.g. L0 needs to invalidate hardware TLB entries if L2
- * doesn't have a unique VPID to prevent reusing L1's entries (assuming L1 has
- * been assigned a VPID), but L0 doesn't need to do a MMU sync because L1
- * doesn't expect stale (virtual) TLB entries to be flushed, i.e. L1 doesn't
- * know that L0 will flush the TLB and so L1 will do INVVPID as needed to flush
- * stale TLB entries, at which point L0 will sync L2's MMU.
- */
-static bool nested_vmx_transition_mmu_sync(struct kvm_vcpu *vcpu)
-{
- return !enable_ept && !nested_cpu_has_vpid(get_vmcs12(vcpu));
-}
-
-/*
* Load guest's/host's cr3 at nested entry/exit. @nested_ept is true if we are
* emulating VM-Entry into a guest with EPT enabled. On failure, the expected
* Exit Qualification (for a VM-Entry consistency check VM-Exit) is assigned to
* @entry_failure_code.
*/
-static int nested_vmx_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3, bool nested_ept,
+static int nested_vmx_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3,
+ bool nested_ept, bool reload_pdptrs,
enum vm_entry_failure_code *entry_failure_code)
{
if (CC(kvm_vcpu_is_illegal_gpa(vcpu, cr3))) {
@@ -1117,27 +1086,20 @@ static int nested_vmx_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3, bool ne
* If PAE paging and EPT are both on, CR3 is not used by the CPU and
* must not be dereferenced.
*/
- if (!nested_ept && is_pae_paging(vcpu) &&
- (cr3 != kvm_read_cr3(vcpu) || pdptrs_changed(vcpu))) {
- if (CC(!load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3))) {
- *entry_failure_code = ENTRY_FAIL_PDPTE;
- return -EINVAL;
- }
+ if (reload_pdptrs && !nested_ept && is_pae_paging(vcpu) &&
+ CC(!load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3))) {
+ *entry_failure_code = ENTRY_FAIL_PDPTE;
+ return -EINVAL;
}
- /*
- * Unconditionally skip the TLB flush on fast CR3 switch, all TLB
- * flushes are handled by nested_vmx_transition_tlb_flush(). See
- * nested_vmx_transition_mmu_sync for details on skipping the MMU sync.
- */
if (!nested_ept)
- kvm_mmu_new_pgd(vcpu, cr3, true,
- !nested_vmx_transition_mmu_sync(vcpu));
+ kvm_mmu_new_pgd(vcpu, cr3);
vcpu->arch.cr3 = cr3;
kvm_register_mark_available(vcpu, VCPU_EXREG_CR3);
- kvm_init_mmu(vcpu, false);
+ /* Re-initialize the MMU, e.g. to pick up CR4 MMU role changes. */
+ kvm_init_mmu(vcpu);
return 0;
}
@@ -1170,17 +1132,28 @@ static void nested_vmx_transition_tlb_flush(struct kvm_vcpu *vcpu,
struct vcpu_vmx *vmx = to_vmx(vcpu);
/*
- * If VPID is disabled, linear and combined mappings are flushed on
- * VM-Enter/VM-Exit, and guest-physical mappings are valid only for
- * their associated EPTP.
+ * If vmcs12 doesn't use VPID, L1 expects linear and combined mappings
+ * for *all* contexts to be flushed on VM-Enter/VM-Exit, i.e. it's a
+ * full TLB flush from the guest's perspective. This is required even
+ * if VPID is disabled in the host as KVM may need to synchronize the
+ * MMU in response to the guest TLB flush.
+ *
+ * Note, using TLB_FLUSH_GUEST is correct even if nested EPT is in use.
+ * EPT is a special snowflake, as guest-physical mappings aren't
+ * flushed on VPID invalidations, including VM-Enter or VM-Exit with
+ * VPID disabled. As a result, KVM _never_ needs to sync nEPT
+ * entries on VM-Enter because L1 can't rely on VM-Enter to flush
+ * those mappings.
*/
- if (!enable_vpid)
+ if (!nested_cpu_has_vpid(vmcs12)) {
+ kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu);
return;
+ }
+
+ /* L2 should never have a VPID if VPID is disabled. */
+ WARN_ON(!enable_vpid);
/*
- * If vmcs12 doesn't use VPID, L1 expects linear and combined mappings
- * for *all* contexts to be flushed on VM-Enter/VM-Exit.
- *
* If VPID is enabled and used by vmc12, but L2 does not have a unique
* TLB tag (ASID), i.e. EPT is disabled and KVM was unable to allocate
* a VPID for L2, flush the current context as the effective ASID is
@@ -1192,13 +1165,12 @@ static void nested_vmx_transition_tlb_flush(struct kvm_vcpu *vcpu,
*
* If a TLB flush isn't required due to any of the above, and vpid12 is
* changing then the new "virtual" VPID (vpid12) will reuse the same
- * "real" VPID (vpid02), and so needs to be sync'd. There is no direct
+ * "real" VPID (vpid02), and so needs to be flushed. There's no direct
* mapping between vpid02 and vpid12, vpid02 is per-vCPU and reused for
- * all nested vCPUs.
+ * all nested vCPUs. Remember, a flush on VM-Enter does not invalidate
+ * guest-physical mappings, so there is no need to sync the nEPT MMU.
*/
- if (!nested_cpu_has_vpid(vmcs12)) {
- kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
- } else if (!nested_has_guest_tlb_tag(vcpu)) {
+ if (!nested_has_guest_tlb_tag(vcpu)) {
kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
} else if (is_vmenter &&
vmcs12->virtual_processor_id != vmx->nested.last_vpid) {
@@ -1586,7 +1558,7 @@ static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx)
vmcs_load(vmx->loaded_vmcs->vmcs);
}
-static int copy_enlightened_to_vmcs12(struct vcpu_vmx *vmx)
+static void copy_enlightened_to_vmcs12(struct vcpu_vmx *vmx, u32 hv_clean_fields)
{
struct vmcs12 *vmcs12 = vmx->nested.cached_vmcs12;
struct hv_enlightened_vmcs *evmcs = vmx->nested.hv_evmcs;
@@ -1595,7 +1567,7 @@ static int copy_enlightened_to_vmcs12(struct vcpu_vmx *vmx)
vmcs12->tpr_threshold = evmcs->tpr_threshold;
vmcs12->guest_rip = evmcs->guest_rip;
- if (unlikely(!(evmcs->hv_clean_fields &
+ if (unlikely(!(hv_clean_fields &
HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_BASIC))) {
vmcs12->guest_rsp = evmcs->guest_rsp;
vmcs12->guest_rflags = evmcs->guest_rflags;
@@ -1603,23 +1575,23 @@ static int copy_enlightened_to_vmcs12(struct vcpu_vmx *vmx)
evmcs->guest_interruptibility_info;
}
- if (unlikely(!(evmcs->hv_clean_fields &
+ if (unlikely(!(hv_clean_fields &
HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_PROC))) {
vmcs12->cpu_based_vm_exec_control =
evmcs->cpu_based_vm_exec_control;
}
- if (unlikely(!(evmcs->hv_clean_fields &
+ if (unlikely(!(hv_clean_fields &
HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_EXCPN))) {
vmcs12->exception_bitmap = evmcs->exception_bitmap;
}
- if (unlikely(!(evmcs->hv_clean_fields &
+ if (unlikely(!(hv_clean_fields &
HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_ENTRY))) {
vmcs12->vm_entry_controls = evmcs->vm_entry_controls;
}
- if (unlikely(!(evmcs->hv_clean_fields &
+ if (unlikely(!(hv_clean_fields &
HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_EVENT))) {
vmcs12->vm_entry_intr_info_field =
evmcs->vm_entry_intr_info_field;
@@ -1629,7 +1601,7 @@ static int copy_enlightened_to_vmcs12(struct vcpu_vmx *vmx)
evmcs->vm_entry_instruction_len;
}
- if (unlikely(!(evmcs->hv_clean_fields &
+ if (unlikely(!(hv_clean_fields &
HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1))) {
vmcs12->host_ia32_pat = evmcs->host_ia32_pat;
vmcs12->host_ia32_efer = evmcs->host_ia32_efer;
@@ -1649,7 +1621,7 @@ static int copy_enlightened_to_vmcs12(struct vcpu_vmx *vmx)
vmcs12->host_tr_selector = evmcs->host_tr_selector;
}
- if (unlikely(!(evmcs->hv_clean_fields &
+ if (unlikely(!(hv_clean_fields &
HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_GRP1))) {
vmcs12->pin_based_vm_exec_control =
evmcs->pin_based_vm_exec_control;
@@ -1658,18 +1630,18 @@ static int copy_enlightened_to_vmcs12(struct vcpu_vmx *vmx)
evmcs->secondary_vm_exec_control;
}
- if (unlikely(!(evmcs->hv_clean_fields &
+ if (unlikely(!(hv_clean_fields &
HV_VMX_ENLIGHTENED_CLEAN_FIELD_IO_BITMAP))) {
vmcs12->io_bitmap_a = evmcs->io_bitmap_a;
vmcs12->io_bitmap_b = evmcs->io_bitmap_b;
}
- if (unlikely(!(evmcs->hv_clean_fields &
+ if (unlikely(!(hv_clean_fields &
HV_VMX_ENLIGHTENED_CLEAN_FIELD_MSR_BITMAP))) {
vmcs12->msr_bitmap = evmcs->msr_bitmap;
}
- if (unlikely(!(evmcs->hv_clean_fields &
+ if (unlikely(!(hv_clean_fields &
HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2))) {
vmcs12->guest_es_base = evmcs->guest_es_base;
vmcs12->guest_cs_base = evmcs->guest_cs_base;
@@ -1709,14 +1681,14 @@ static int copy_enlightened_to_vmcs12(struct vcpu_vmx *vmx)
vmcs12->guest_tr_selector = evmcs->guest_tr_selector;
}
- if (unlikely(!(evmcs->hv_clean_fields &
+ if (unlikely(!(hv_clean_fields &
HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_GRP2))) {
vmcs12->tsc_offset = evmcs->tsc_offset;
vmcs12->virtual_apic_page_addr = evmcs->virtual_apic_page_addr;
vmcs12->xss_exit_bitmap = evmcs->xss_exit_bitmap;
}
- if (unlikely(!(evmcs->hv_clean_fields &
+ if (unlikely(!(hv_clean_fields &
HV_VMX_ENLIGHTENED_CLEAN_FIELD_CRDR))) {
vmcs12->cr0_guest_host_mask = evmcs->cr0_guest_host_mask;
vmcs12->cr4_guest_host_mask = evmcs->cr4_guest_host_mask;
@@ -1728,7 +1700,7 @@ static int copy_enlightened_to_vmcs12(struct vcpu_vmx *vmx)
vmcs12->guest_dr7 = evmcs->guest_dr7;
}
- if (unlikely(!(evmcs->hv_clean_fields &
+ if (unlikely(!(hv_clean_fields &
HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_POINTER))) {
vmcs12->host_fs_base = evmcs->host_fs_base;
vmcs12->host_gs_base = evmcs->host_gs_base;
@@ -1738,13 +1710,13 @@ static int copy_enlightened_to_vmcs12(struct vcpu_vmx *vmx)
vmcs12->host_rsp = evmcs->host_rsp;
}
- if (unlikely(!(evmcs->hv_clean_fields &
+ if (unlikely(!(hv_clean_fields &
HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_XLAT))) {
vmcs12->ept_pointer = evmcs->ept_pointer;
vmcs12->virtual_processor_id = evmcs->virtual_processor_id;
}
- if (unlikely(!(evmcs->hv_clean_fields &
+ if (unlikely(!(hv_clean_fields &
HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1))) {
vmcs12->vmcs_link_pointer = evmcs->vmcs_link_pointer;
vmcs12->guest_ia32_debugctl = evmcs->guest_ia32_debugctl;
@@ -1799,10 +1771,10 @@ static int copy_enlightened_to_vmcs12(struct vcpu_vmx *vmx)
* vmcs12->exit_io_instruction_eip = evmcs->exit_io_instruction_eip;
*/
- return 0;
+ return;
}
-static int copy_vmcs12_to_enlightened(struct vcpu_vmx *vmx)
+static void copy_vmcs12_to_enlightened(struct vcpu_vmx *vmx)
{
struct vmcs12 *vmcs12 = vmx->nested.cached_vmcs12;
struct hv_enlightened_vmcs *evmcs = vmx->nested.hv_evmcs;
@@ -1962,7 +1934,7 @@ static int copy_vmcs12_to_enlightened(struct vcpu_vmx *vmx)
evmcs->guest_bndcfgs = vmcs12->guest_bndcfgs;
- return 0;
+ return;
}
/*
@@ -1979,13 +1951,13 @@ static enum nested_evmptrld_status nested_vmx_handle_enlightened_vmptrld(
if (likely(!vmx->nested.enlightened_vmcs_enabled))
return EVMPTRLD_DISABLED;
- if (!nested_enlightened_vmentry(vcpu, &evmcs_gpa))
+ if (!nested_enlightened_vmentry(vcpu, &evmcs_gpa)) {
+ nested_release_evmcs(vcpu);
return EVMPTRLD_DISABLED;
+ }
- if (unlikely(!vmx->nested.hv_evmcs ||
- evmcs_gpa != vmx->nested.hv_evmcs_vmptr)) {
- if (!vmx->nested.hv_evmcs)
- vmx->nested.current_vmptr = -1ull;
+ if (unlikely(evmcs_gpa != vmx->nested.hv_evmcs_vmptr)) {
+ vmx->nested.current_vmptr = -1ull;
nested_release_evmcs(vcpu);
@@ -2023,7 +1995,6 @@ static enum nested_evmptrld_status nested_vmx_handle_enlightened_vmptrld(
return EVMPTRLD_VMFAIL;
}
- vmx->nested.dirty_vmcs12 = true;
vmx->nested.hv_evmcs_vmptr = evmcs_gpa;
evmcs_gpa_changed = true;
@@ -2056,14 +2027,10 @@ void nested_sync_vmcs12_to_shadow(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
- if (vmx->nested.hv_evmcs) {
+ if (evmptr_is_valid(vmx->nested.hv_evmcs_vmptr))
copy_vmcs12_to_enlightened(vmx);
- /* All fields are clean */
- vmx->nested.hv_evmcs->hv_clean_fields |=
- HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL;
- } else {
+ else
copy_vmcs12_to_shadow(vmx);
- }
vmx->nested.need_vmcs12_to_shadow_sync = false;
}
@@ -2208,7 +2175,7 @@ static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
u32 exec_control;
u64 guest_efer = nested_vmx_calc_efer(vmx, vmcs12);
- if (vmx->nested.dirty_vmcs12 || vmx->nested.hv_evmcs)
+ if (vmx->nested.dirty_vmcs12 || evmptr_is_valid(vmx->nested.hv_evmcs_vmptr))
prepare_vmcs02_early_rare(vmx, vmcs12);
/*
@@ -2277,7 +2244,8 @@ static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE |
SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
SECONDARY_EXEC_APIC_REGISTER_VIRT |
- SECONDARY_EXEC_ENABLE_VMFUNC);
+ SECONDARY_EXEC_ENABLE_VMFUNC |
+ SECONDARY_EXEC_TSC_SCALING);
if (nested_cpu_has(vmcs12,
CPU_BASED_ACTIVATE_SECONDARY_CONTROLS))
exec_control |= vmcs12->secondary_vm_exec_control;
@@ -2488,18 +2456,18 @@ static void prepare_vmcs02_rare(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
* is assigned to entry_failure_code on failure.
*/
static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
+ bool from_vmentry,
enum vm_entry_failure_code *entry_failure_code)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
- struct hv_enlightened_vmcs *hv_evmcs = vmx->nested.hv_evmcs;
bool load_guest_pdptrs_vmcs12 = false;
- if (vmx->nested.dirty_vmcs12 || hv_evmcs) {
+ if (vmx->nested.dirty_vmcs12 || evmptr_is_valid(vmx->nested.hv_evmcs_vmptr)) {
prepare_vmcs02_rare(vmx, vmcs12);
vmx->nested.dirty_vmcs12 = false;
- load_guest_pdptrs_vmcs12 = !hv_evmcs ||
- !(hv_evmcs->hv_clean_fields &
+ load_guest_pdptrs_vmcs12 = !evmptr_is_valid(vmx->nested.hv_evmcs_vmptr) ||
+ !(vmx->nested.hv_evmcs->hv_clean_fields &
HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1);
}
@@ -2532,10 +2500,18 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat);
}
- vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset);
+ vcpu->arch.tsc_offset = kvm_calc_nested_tsc_offset(
+ vcpu->arch.l1_tsc_offset,
+ vmx_get_l2_tsc_offset(vcpu),
+ vmx_get_l2_tsc_multiplier(vcpu));
+
+ vcpu->arch.tsc_scaling_ratio = kvm_calc_nested_tsc_multiplier(
+ vcpu->arch.l1_tsc_scaling_ratio,
+ vmx_get_l2_tsc_multiplier(vcpu));
+ vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset);
if (kvm_has_tsc_control)
- decache_tsc_multiplier(vmx);
+ vmcs_write64(TSC_MULTIPLIER, vcpu->arch.tsc_scaling_ratio);
nested_vmx_transition_tlb_flush(vcpu, vmcs12, true);
@@ -2572,7 +2548,7 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
/* Shadow page tables on either EPT or shadow page tables. */
if (nested_vmx_load_cr3(vcpu, vmcs12->guest_cr3, nested_cpu_has_ept(vmcs12),
- entry_failure_code))
+ from_vmentry, entry_failure_code))
return -EINVAL;
/*
@@ -2604,6 +2580,17 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
kvm_rsp_write(vcpu, vmcs12->guest_rsp);
kvm_rip_write(vcpu, vmcs12->guest_rip);
+
+ /*
+ * It was observed that genuine Hyper-V running in L1 doesn't reset
+ * 'hv_clean_fields' by itself, it only sets the corresponding dirty
+ * bits when it changes a field in eVMCS. Mark all fields as clean
+ * here.
+ */
+ if (evmptr_is_valid(vmx->nested.hv_evmcs_vmptr))
+ vmx->nested.hv_evmcs->hv_clean_fields |=
+ HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL;
+
return 0;
}
@@ -3093,20 +3080,20 @@ static bool nested_get_evmcs_page(struct kvm_vcpu *vcpu)
* L2 was running), map it here to make sure vmcs12 changes are
* properly reflected.
*/
- if (vmx->nested.enlightened_vmcs_enabled && !vmx->nested.hv_evmcs) {
+ if (vmx->nested.enlightened_vmcs_enabled &&
+ vmx->nested.hv_evmcs_vmptr == EVMPTR_MAP_PENDING) {
enum nested_evmptrld_status evmptrld_status =
nested_vmx_handle_enlightened_vmptrld(vcpu, false);
if (evmptrld_status == EVMPTRLD_VMFAIL ||
- evmptrld_status == EVMPTRLD_ERROR) {
- pr_debug_ratelimited("%s: enlightened vmptrld failed\n",
- __func__);
- vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
- vcpu->run->internal.suberror =
- KVM_INTERNAL_ERROR_EMULATION;
- vcpu->run->internal.ndata = 0;
+ evmptrld_status == EVMPTRLD_ERROR)
return false;
- }
+
+ /*
+ * Post migration VMCS12 always provides the most actual
+ * information, copy it to eVMCS upon entry.
+ */
+ vmx->nested.need_vmcs12_to_shadow_sync = true;
}
return true;
@@ -3120,6 +3107,18 @@ static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
struct page *page;
u64 hpa;
+ if (!vcpu->arch.pdptrs_from_userspace &&
+ !nested_cpu_has_ept(vmcs12) && is_pae_paging(vcpu)) {
+ /*
+ * Reload the guest's PDPTRs since after a migration
+ * the guest CR3 might be restored prior to setting the nested
+ * state which can lead to a load of wrong PDPTRs.
+ */
+ if (CC(!load_pdptrs(vcpu, vcpu->arch.walk_mmu, vcpu->arch.cr3)))
+ return false;
+ }
+
+
if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
/*
* Translate L1 physical address to host physical
@@ -3182,6 +3181,15 @@ static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
offset_in_page(vmcs12->posted_intr_desc_addr));
vmcs_write64(POSTED_INTR_DESC_ADDR,
pfn_to_hpa(map->pfn) + offset_in_page(vmcs12->posted_intr_desc_addr));
+ } else {
+ /*
+ * Defer the KVM_INTERNAL_EXIT until KVM tries to
+ * access the contents of the VMCS12 posted interrupt
+ * descriptor. (Note that KVM may do this when it
+ * should not, per the architectural specification.)
+ */
+ vmx->nested.pi_desc = NULL;
+ pin_controls_clearbit(vmx, PIN_BASED_POSTED_INTR);
}
}
if (nested_vmx_prepare_msr_bitmap(vcpu, vmcs12))
@@ -3194,8 +3202,16 @@ static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
static bool vmx_get_nested_state_pages(struct kvm_vcpu *vcpu)
{
- if (!nested_get_evmcs_page(vcpu))
+ if (!nested_get_evmcs_page(vcpu)) {
+ pr_debug_ratelimited("%s: enlightened vmptrld failed\n",
+ __func__);
+ vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ vcpu->run->internal.suberror =
+ KVM_INTERNAL_ERROR_EMULATION;
+ vcpu->run->internal.ndata = 0;
+
return false;
+ }
if (is_guest_mode(vcpu) && !nested_get_vmcs12_pages(vcpu))
return false;
@@ -3353,10 +3369,8 @@ enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu,
}
enter_guest_mode(vcpu);
- if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETTING)
- vcpu->arch.tsc_offset += vmcs12->tsc_offset;
- if (prepare_vmcs02(vcpu, vmcs12, &entry_failure_code)) {
+ if (prepare_vmcs02(vcpu, vmcs12, from_vmentry, &entry_failure_code)) {
exit_reason.basic = EXIT_REASON_INVALID_STATE;
vmcs12->exit_qualification = entry_failure_code;
goto vmentry_fail_vmexit_guest_mode;
@@ -3436,7 +3450,7 @@ vmentry_fail_vmexit:
load_vmcs12_host_state(vcpu, vmcs12);
vmcs12->vm_exit_reason = exit_reason.full;
- if (enable_shadow_vmcs || vmx->nested.hv_evmcs)
+ if (enable_shadow_vmcs || evmptr_is_valid(vmx->nested.hv_evmcs_vmptr))
vmx->nested.need_vmcs12_to_shadow_sync = true;
return NVMX_VMENTRY_VMEXIT;
}
@@ -3453,8 +3467,6 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
u32 interrupt_shadow = vmx_get_interrupt_shadow(vcpu);
enum nested_evmptrld_status evmptrld_status;
- ++vcpu->stat.nested_run;
-
if (!nested_vmx_check_permission(vcpu))
return 1;
@@ -3466,7 +3478,8 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
return nested_vmx_failInvalid(vcpu);
}
- if (CC(!vmx->nested.hv_evmcs && vmx->nested.current_vmptr == -1ull))
+ if (CC(!evmptr_is_valid(vmx->nested.hv_evmcs_vmptr) &&
+ vmx->nested.current_vmptr == -1ull))
return nested_vmx_failInvalid(vcpu);
vmcs12 = get_vmcs12(vcpu);
@@ -3480,8 +3493,8 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
if (CC(vmcs12->hdr.shadow_vmcs))
return nested_vmx_failInvalid(vcpu);
- if (vmx->nested.hv_evmcs) {
- copy_enlightened_to_vmcs12(vmx);
+ if (evmptr_is_valid(vmx->nested.hv_evmcs_vmptr)) {
+ copy_enlightened_to_vmcs12(vmx, vmx->nested.hv_evmcs->hv_clean_fields);
/* Enlightened VMCS doesn't have launch state */
vmcs12->launch_state = !launch;
} else if (enable_shadow_vmcs) {
@@ -3681,25 +3694,29 @@ void nested_mark_vmcs12_pages_dirty(struct kvm_vcpu *vcpu)
}
}
-static void vmx_complete_nested_posted_interrupt(struct kvm_vcpu *vcpu)
+static int vmx_complete_nested_posted_interrupt(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
int max_irr;
void *vapic_page;
u16 status;
- if (!vmx->nested.pi_desc || !vmx->nested.pi_pending)
- return;
+ if (!vmx->nested.pi_pending)
+ return 0;
+
+ if (!vmx->nested.pi_desc)
+ goto mmio_needed;
vmx->nested.pi_pending = false;
+
if (!pi_test_and_clear_on(vmx->nested.pi_desc))
- return;
+ return 0;
max_irr = find_last_bit((unsigned long *)vmx->nested.pi_desc->pir, 256);
if (max_irr != 256) {
vapic_page = vmx->nested.virtual_apic_map.hva;
if (!vapic_page)
- return;
+ goto mmio_needed;
__kvm_apic_update_irr(vmx->nested.pi_desc->pir,
vapic_page, &max_irr);
@@ -3712,6 +3729,11 @@ static void vmx_complete_nested_posted_interrupt(struct kvm_vcpu *vcpu)
}
nested_mark_vmcs12_pages_dirty(vcpu);
+ return 0;
+
+mmio_needed:
+ kvm_handle_memory_failure(vcpu, X86EMUL_IO_NEEDED, NULL);
+ return -ENXIO;
}
static void nested_vmx_inject_exception_vmexit(struct kvm_vcpu *vcpu,
@@ -3886,8 +3908,7 @@ static int vmx_check_nested_events(struct kvm_vcpu *vcpu)
}
no_vmexit:
- vmx_complete_nested_posted_interrupt(vcpu);
- return 0;
+ return vmx_complete_nested_posted_interrupt(vcpu);
}
static u32 vmx_get_preemption_timer_value(struct kvm_vcpu *vcpu)
@@ -4031,10 +4052,11 @@ static void sync_vmcs02_to_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
- if (vmx->nested.hv_evmcs)
+ if (evmptr_is_valid(vmx->nested.hv_evmcs_vmptr))
sync_vmcs02_to_vmcs12_rare(vcpu, vmcs12);
- vmx->nested.need_sync_vmcs02_to_vmcs12_rare = !vmx->nested.hv_evmcs;
+ vmx->nested.need_sync_vmcs02_to_vmcs12_rare =
+ !evmptr_is_valid(vmx->nested.hv_evmcs_vmptr);
vmcs12->guest_cr0 = vmcs12_guest_cr0(vcpu, vmcs12);
vmcs12->guest_cr4 = vmcs12_guest_cr4(vcpu, vmcs12);
@@ -4205,7 +4227,7 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
* Only PDPTE load can fail as the value of cr3 was checked on entry and
* couldn't have changed.
*/
- if (nested_vmx_load_cr3(vcpu, vmcs12->host_cr3, false, &ignored))
+ if (nested_vmx_load_cr3(vcpu, vmcs12->host_cr3, false, true, &ignored))
nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_PDPTE_FAIL);
nested_vmx_transition_tlb_flush(vcpu, vmcs12, false);
@@ -4435,7 +4457,15 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason,
/* Similarly, triple faults in L2 should never escape. */
WARN_ON_ONCE(kvm_check_request(KVM_REQ_TRIPLE_FAULT, vcpu));
- kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
+ if (kvm_check_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu)) {
+ /*
+ * KVM_REQ_GET_NESTED_STATE_PAGES is also used to map
+ * Enlightened VMCS after migration and we still need to
+ * do that when something is forcing L2->L1 exit prior to
+ * the first L2 run.
+ */
+ (void)nested_get_evmcs_page(vcpu);
+ }
/* Service the TLB flush request for L2 before switching to L1. */
if (kvm_check_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu))
@@ -4454,8 +4484,11 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason,
if (nested_cpu_has_preemption_timer(vmcs12))
hrtimer_cancel(&to_vmx(vcpu)->nested.preemption_timer);
- if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETTING)
- vcpu->arch.tsc_offset -= vmcs12->tsc_offset;
+ if (nested_cpu_has(vmcs12, CPU_BASED_USE_TSC_OFFSETTING)) {
+ vcpu->arch.tsc_offset = vcpu->arch.l1_tsc_offset;
+ if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_TSC_SCALING))
+ vcpu->arch.tsc_scaling_ratio = vcpu->arch.l1_tsc_scaling_ratio;
+ }
if (likely(!vmx->fail)) {
sync_vmcs02_to_vmcs12(vcpu, vmcs12);
@@ -4492,12 +4525,12 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason,
vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);
vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr);
vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset);
+ if (kvm_has_tsc_control)
+ vmcs_write64(TSC_MULTIPLIER, vcpu->arch.tsc_scaling_ratio);
+
if (vmx->nested.l1_tpr_threshold != -1)
vmcs_write32(TPR_THRESHOLD, vmx->nested.l1_tpr_threshold);
- if (kvm_has_tsc_control)
- decache_tsc_multiplier(vmx);
-
if (vmx->nested.change_vmcs01_virtual_apic_mode) {
vmx->nested.change_vmcs01_virtual_apic_mode = false;
vmx_set_virtual_apic_mode(vcpu);
@@ -4523,7 +4556,7 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason,
}
if ((vm_exit_reason != -1) &&
- (enable_shadow_vmcs || vmx->nested.hv_evmcs))
+ (enable_shadow_vmcs || evmptr_is_valid(vmx->nested.hv_evmcs_vmptr)))
vmx->nested.need_vmcs12_to_shadow_sync = true;
/* in case we halted in L2 */
@@ -4978,6 +5011,8 @@ static int handle_vmclear(struct kvm_vcpu *vcpu)
vmptr + offsetof(struct vmcs12,
launch_state),
&zero, sizeof(zero));
+ } else if (vmx->nested.hv_evmcs && vmptr == vmx->nested.hv_evmcs_vmptr) {
+ nested_release_evmcs(vcpu);
}
return nested_vmx_succeed(vcpu);
@@ -5219,7 +5254,7 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu)
return nested_vmx_fail(vcpu, VMXERR_VMPTRLD_VMXON_POINTER);
/* Forbid normal VMPTRLD if Enlightened version was used */
- if (vmx->nested.hv_evmcs)
+ if (evmptr_is_valid(vmx->nested.hv_evmcs_vmptr))
return 1;
if (vmx->nested.current_vmptr != vmptr) {
@@ -5275,7 +5310,7 @@ static int handle_vmptrst(struct kvm_vcpu *vcpu)
if (!nested_vmx_check_permission(vcpu))
return 1;
- if (unlikely(to_vmx(vcpu)->nested.hv_evmcs))
+ if (unlikely(evmptr_is_valid(to_vmx(vcpu)->nested.hv_evmcs_vmptr)))
return 1;
if (get_vmx_mem_address(vcpu, exit_qual, instr_info,
@@ -5452,8 +5487,8 @@ static int handle_invvpid(struct kvm_vcpu *vcpu)
/*
* Sync the shadow page tables if EPT is disabled, L1 is invalidating
- * linear mappings for L2 (tagged with L2's VPID). Free all roots as
- * VPIDs are not tracked in the MMU role.
+ * linear mappings for L2 (tagged with L2's VPID). Free all guest
+ * roots as VPIDs are not tracked in the MMU role.
*
* Note, this operates on root_mmu, not guest_mmu, as L1 and L2 share
* an MMU when EPT is disabled.
@@ -5461,8 +5496,7 @@ static int handle_invvpid(struct kvm_vcpu *vcpu)
* TODO: sync only the affected SPTEs for INVDIVIDUAL_ADDR.
*/
if (!enable_ept)
- kvm_mmu_free_roots(vcpu, &vcpu->arch.root_mmu,
- KVM_MMU_ROOTS_ALL);
+ kvm_mmu_free_guest_mode_roots(vcpu, &vcpu->arch.root_mmu);
return nested_vmx_succeed(vcpu);
}
@@ -5472,23 +5506,16 @@ static int nested_vmx_eptp_switching(struct kvm_vcpu *vcpu,
{
u32 index = kvm_rcx_read(vcpu);
u64 new_eptp;
- bool accessed_dirty;
- struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
- if (!nested_cpu_has_eptp_switching(vmcs12) ||
- !nested_cpu_has_ept(vmcs12))
+ if (WARN_ON_ONCE(!nested_cpu_has_ept(vmcs12)))
return 1;
-
if (index >= VMFUNC_EPTP_ENTRIES)
return 1;
-
if (kvm_vcpu_read_guest_page(vcpu, vmcs12->eptp_list_address >> PAGE_SHIFT,
&new_eptp, index * 8, 8))
return 1;
- accessed_dirty = !!(new_eptp & VMX_EPTP_AD_ENABLE_BIT);
-
/*
* If the (L2) guest does a vmfunc to the currently
* active ept pointer, we don't have to do anything else
@@ -5497,11 +5524,11 @@ static int nested_vmx_eptp_switching(struct kvm_vcpu *vcpu,
if (!nested_vmx_check_eptp(vcpu, new_eptp))
return 1;
- mmu->ept_ad = accessed_dirty;
- mmu->mmu_role.base.ad_disabled = !accessed_dirty;
vmcs12->ept_pointer = new_eptp;
+ nested_ept_new_eptp(vcpu);
- kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
+ if (!nested_cpu_has_vpid(vmcs12))
+ kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu);
}
return 0;
@@ -5524,7 +5551,17 @@ static int handle_vmfunc(struct kvm_vcpu *vcpu)
}
vmcs12 = get_vmcs12(vcpu);
- if ((vmcs12->vm_function_control & (1 << function)) == 0)
+
+ /*
+ * #UD on out-of-bounds function has priority over VM-Exit, and VMFUNC
+ * is enabled in vmcs02 if and only if it's enabled in vmcs12.
+ */
+ if (WARN_ON_ONCE((function > 63) || !nested_cpu_has_vmfunc(vmcs12))) {
+ kvm_queue_exception(vcpu, UD_VECTOR);
+ return 1;
+ }
+
+ if (!(vmcs12->vm_function_control & BIT_ULL(function)))
goto fail;
switch (function) {
@@ -5797,6 +5834,9 @@ static bool nested_vmx_l0_wants_exit(struct kvm_vcpu *vcpu,
else if (is_breakpoint(intr_info) &&
vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
return true;
+ else if (is_alignment_check(intr_info) &&
+ !vmx_guest_inject_ac(vcpu))
+ return true;
return false;
case EXIT_REASON_EXTERNAL_INTERRUPT:
return true;
@@ -6047,7 +6087,8 @@ static int vmx_get_nested_state(struct kvm_vcpu *vcpu,
if (vmx_has_valid_vmcs12(vcpu)) {
kvm_state.size += sizeof(user_vmx_nested_state->vmcs12);
- if (vmx->nested.hv_evmcs)
+ /* 'hv_evmcs_vmptr' can also be EVMPTR_MAP_PENDING here */
+ if (vmx->nested.hv_evmcs_vmptr != EVMPTR_INVALID)
kvm_state.flags |= KVM_STATE_NESTED_EVMCS;
if (is_guest_mode(vcpu) &&
@@ -6103,8 +6144,15 @@ static int vmx_get_nested_state(struct kvm_vcpu *vcpu,
} else {
copy_vmcs02_to_vmcs12_rare(vcpu, get_vmcs12(vcpu));
if (!vmx->nested.need_vmcs12_to_shadow_sync) {
- if (vmx->nested.hv_evmcs)
- copy_enlightened_to_vmcs12(vmx);
+ if (evmptr_is_valid(vmx->nested.hv_evmcs_vmptr))
+ /*
+ * L1 hypervisor is not obliged to keep eVMCS
+ * clean fields data always up-to-date while
+ * not in guest mode, 'hv_clean_fields' is only
+ * supposed to be actual upon vmentry so we need
+ * to ignore it here and do full copy.
+ */
+ copy_enlightened_to_vmcs12(vmx, 0);
else if (enable_shadow_vmcs)
copy_shadow_to_vmcs12(vmx);
}
@@ -6246,6 +6294,7 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
* restored yet. EVMCS will be mapped from
* nested_get_vmcs12_pages().
*/
+ vmx->nested.hv_evmcs_vmptr = EVMPTR_MAP_PENDING;
kvm_make_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
} else {
return -EINVAL;
@@ -6330,6 +6379,40 @@ void nested_vmx_set_vmcs_shadowing_bitmap(void)
}
/*
+ * Indexing into the vmcs12 uses the VMCS encoding rotated left by 6. Undo
+ * that madness to get the encoding for comparison.
+ */
+#define VMCS12_IDX_TO_ENC(idx) ((u16)(((u16)(idx) >> 6) | ((u16)(idx) << 10)))
+
+static u64 nested_vmx_calc_vmcs_enum_msr(void)
+{
+ /*
+ * Note these are the so called "index" of the VMCS field encoding, not
+ * the index into vmcs12.
+ */
+ unsigned int max_idx, idx;
+ int i;
+
+ /*
+ * For better or worse, KVM allows VMREAD/VMWRITE to all fields in
+ * vmcs12, regardless of whether or not the associated feature is
+ * exposed to L1. Simply find the field with the highest index.
+ */
+ max_idx = 0;
+ for (i = 0; i < nr_vmcs12_fields; i++) {
+ /* The vmcs12 table is very, very sparsely populated. */
+ if (!vmcs_field_to_offset_table[i])
+ continue;
+
+ idx = vmcs_field_index(VMCS12_IDX_TO_ENC(i));
+ if (idx > max_idx)
+ max_idx = idx;
+ }
+
+ return (u64)max_idx << VMCS_FIELD_INDEX_SHIFT;
+}
+
+/*
* nested_vmx_setup_ctls_msrs() sets up variables containing the values to be
* returned for the various VMX controls MSRs when nested VMX is enabled.
* The same values should also be used to verify that vmcs12 control fields are
@@ -6465,7 +6548,8 @@ void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps)
SECONDARY_EXEC_RDRAND_EXITING |
SECONDARY_EXEC_ENABLE_INVPCID |
SECONDARY_EXEC_RDSEED_EXITING |
- SECONDARY_EXEC_XSAVES;
+ SECONDARY_EXEC_XSAVES |
+ SECONDARY_EXEC_TSC_SCALING;
/*
* We can emulate "VMCS shadowing," even if the hardware
@@ -6573,8 +6657,7 @@ void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps)
rdmsrl(MSR_IA32_VMX_CR0_FIXED1, msrs->cr0_fixed1);
rdmsrl(MSR_IA32_VMX_CR4_FIXED1, msrs->cr4_fixed1);
- /* highest index: VMX_PREEMPTION_TIMER_VALUE */
- msrs->vmcs_enum = VMCS12_MAX_FIELD_INDEX << 1;
+ msrs->vmcs_enum = nested_vmx_calc_vmcs_enum_msr();
}
void nested_vmx_hardware_unsetup(void)
diff --git a/arch/x86/kvm/vmx/nested.h b/arch/x86/kvm/vmx/nested.h
index 184418baeb3c..b69a80f43b37 100644
--- a/arch/x86/kvm/vmx/nested.h
+++ b/arch/x86/kvm/vmx/nested.h
@@ -56,14 +56,9 @@ static inline int vmx_has_valid_vmcs12(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
- /*
- * In case we do two consecutive get/set_nested_state()s while L2 was
- * running hv_evmcs may end up not being mapped (we map it from
- * nested_vmx_run()/vmx_vcpu_run()). Check is_guest_mode() as we always
- * have vmcs12 if it is true.
- */
- return is_guest_mode(vcpu) || vmx->nested.current_vmptr != -1ull ||
- vmx->nested.hv_evmcs;
+ /* 'hv_evmcs_vmptr' can also be EVMPTR_MAP_PENDING here */
+ return vmx->nested.current_vmptr != -1ull ||
+ vmx->nested.hv_evmcs_vmptr != EVMPTR_INVALID;
}
static inline u16 nested_get_vpid02(struct kvm_vcpu *vcpu)
diff --git a/arch/x86/kvm/vmx/posted_intr.c b/arch/x86/kvm/vmx/posted_intr.c
index 459748680daf..5f81ef092bd4 100644
--- a/arch/x86/kvm/vmx/posted_intr.c
+++ b/arch/x86/kvm/vmx/posted_intr.c
@@ -238,6 +238,20 @@ bool pi_has_pending_interrupt(struct kvm_vcpu *vcpu)
/*
+ * Bail out of the block loop if the VM has an assigned
+ * device, but the blocking vCPU didn't reconfigure the
+ * PI.NV to the wakeup vector, i.e. the assigned device
+ * came along after the initial check in pi_pre_block().
+ */
+void vmx_pi_start_assignment(struct kvm *kvm)
+{
+ if (!irq_remapping_cap(IRQ_POSTING_CAP))
+ return;
+
+ kvm_make_all_cpus_request(kvm, KVM_REQ_UNBLOCK);
+}
+
+/*
* pi_update_irte - set IRTE for Posted-Interrupts
*
* @kvm: kvm
diff --git a/arch/x86/kvm/vmx/posted_intr.h b/arch/x86/kvm/vmx/posted_intr.h
index 0bdc41391c5b..7f7b2326caf5 100644
--- a/arch/x86/kvm/vmx/posted_intr.h
+++ b/arch/x86/kvm/vmx/posted_intr.h
@@ -95,5 +95,6 @@ void __init pi_init_cpu(int cpu);
bool pi_has_pending_interrupt(struct kvm_vcpu *vcpu);
int pi_update_irte(struct kvm *kvm, unsigned int host_irq, uint32_t guest_irq,
bool set);
+void vmx_pi_start_assignment(struct kvm *kvm);
#endif /* __KVM_X86_VMX_POSTED_INTR_H */
diff --git a/arch/x86/kvm/vmx/vmcs.h b/arch/x86/kvm/vmx/vmcs.h
index 1472c6c376f7..4b9957e2bf5b 100644
--- a/arch/x86/kvm/vmx/vmcs.h
+++ b/arch/x86/kvm/vmx/vmcs.h
@@ -117,6 +117,11 @@ static inline bool is_gp_fault(u32 intr_info)
return is_exception_n(intr_info, GP_VECTOR);
}
+static inline bool is_alignment_check(u32 intr_info)
+{
+ return is_exception_n(intr_info, AC_VECTOR);
+}
+
static inline bool is_machine_check(u32 intr_info)
{
return is_exception_n(intr_info, MC_VECTOR);
@@ -164,4 +169,12 @@ static inline int vmcs_field_readonly(unsigned long field)
return (((field >> 10) & 0x3) == 1);
}
+#define VMCS_FIELD_INDEX_SHIFT (1)
+#define VMCS_FIELD_INDEX_MASK GENMASK(9, 1)
+
+static inline unsigned int vmcs_field_index(unsigned long field)
+{
+ return (field & VMCS_FIELD_INDEX_MASK) >> VMCS_FIELD_INDEX_SHIFT;
+}
+
#endif /* __KVM_X86_VMX_VMCS_H */
diff --git a/arch/x86/kvm/vmx/vmcs12.c b/arch/x86/kvm/vmx/vmcs12.c
index 034adb6404dc..d9f5d7c56ae3 100644
--- a/arch/x86/kvm/vmx/vmcs12.c
+++ b/arch/x86/kvm/vmx/vmcs12.c
@@ -37,6 +37,7 @@ const unsigned short vmcs_field_to_offset_table[] = {
FIELD64(VM_ENTRY_MSR_LOAD_ADDR, vm_entry_msr_load_addr),
FIELD64(PML_ADDRESS, pml_address),
FIELD64(TSC_OFFSET, tsc_offset),
+ FIELD64(TSC_MULTIPLIER, tsc_multiplier),
FIELD64(VIRTUAL_APIC_PAGE_ADDR, virtual_apic_page_addr),
FIELD64(APIC_ACCESS_ADDR, apic_access_addr),
FIELD64(POSTED_INTR_DESC_ADDR, posted_intr_desc_addr),
diff --git a/arch/x86/kvm/vmx/vmcs12.h b/arch/x86/kvm/vmx/vmcs12.h
index 13494956d0e9..5e0e1b39f495 100644
--- a/arch/x86/kvm/vmx/vmcs12.h
+++ b/arch/x86/kvm/vmx/vmcs12.h
@@ -70,7 +70,8 @@ struct __packed vmcs12 {
u64 eptp_list_address;
u64 pml_address;
u64 encls_exiting_bitmap;
- u64 padding64[2]; /* room for future expansion */
+ u64 tsc_multiplier;
+ u64 padding64[1]; /* room for future expansion */
/*
* To allow migration of L1 (complete with its L2 guests) between
* machines of different natural widths (32 or 64 bit), we cannot have
@@ -205,12 +206,6 @@ struct __packed vmcs12 {
#define VMCS12_SIZE KVM_STATE_NESTED_VMX_VMCS_SIZE
/*
- * VMCS12_MAX_FIELD_INDEX is the highest index value used in any
- * supported VMCS12 field encoding.
- */
-#define VMCS12_MAX_FIELD_INDEX 0x17
-
-/*
* For save/restore compatibility, the vmcs12 field offsets must not change.
*/
#define CHECK_OFFSET(field, loc) \
@@ -258,6 +253,7 @@ static inline void vmx_check_vmcs12_offsets(void)
CHECK_OFFSET(eptp_list_address, 304);
CHECK_OFFSET(pml_address, 312);
CHECK_OFFSET(encls_exiting_bitmap, 320);
+ CHECK_OFFSET(tsc_multiplier, 328);
CHECK_OFFSET(cr0_guest_host_mask, 344);
CHECK_OFFSET(cr4_guest_host_mask, 352);
CHECK_OFFSET(cr0_read_shadow, 360);
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index d000cddbd734..927a552393b9 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -52,6 +52,7 @@
#include "cpuid.h"
#include "evmcs.h"
#include "hyperv.h"
+#include "kvm_onhyperv.h"
#include "irq.h"
#include "kvm_cache_regs.h"
#include "lapic.h"
@@ -101,7 +102,6 @@ module_param(emulate_invalid_guest_state, bool, S_IRUGO);
static bool __read_mostly fasteoi = 1;
module_param(fasteoi, bool, S_IRUGO);
-bool __read_mostly enable_apicv = 1;
module_param(enable_apicv, bool, S_IRUGO);
/*
@@ -455,105 +455,10 @@ static inline void vmx_segment_cache_clear(struct vcpu_vmx *vmx)
static unsigned long host_idt_base;
-/*
- * Though SYSCALL is only supported in 64-bit mode on Intel CPUs, kvm
- * will emulate SYSCALL in legacy mode if the vendor string in guest
- * CPUID.0:{EBX,ECX,EDX} is "AuthenticAMD" or "AMDisbetter!" To
- * support this emulation, IA32_STAR must always be included in
- * vmx_uret_msrs_list[], even in i386 builds.
- */
-static const u32 vmx_uret_msrs_list[] = {
-#ifdef CONFIG_X86_64
- MSR_SYSCALL_MASK, MSR_LSTAR, MSR_CSTAR,
-#endif
- MSR_EFER, MSR_TSC_AUX, MSR_STAR,
- MSR_IA32_TSX_CTRL,
-};
-
#if IS_ENABLED(CONFIG_HYPERV)
static bool __read_mostly enlightened_vmcs = true;
module_param(enlightened_vmcs, bool, 0444);
-static int kvm_fill_hv_flush_list_func(struct hv_guest_mapping_flush_list *flush,
- void *data)
-{
- struct kvm_tlb_range *range = data;
-
- return hyperv_fill_flush_guest_mapping_list(flush, range->start_gfn,
- range->pages);
-}
-
-static inline int hv_remote_flush_root_ept(hpa_t root_ept,
- struct kvm_tlb_range *range)
-{
- if (range)
- return hyperv_flush_guest_mapping_range(root_ept,
- kvm_fill_hv_flush_list_func, (void *)range);
- else
- return hyperv_flush_guest_mapping(root_ept);
-}
-
-static int hv_remote_flush_tlb_with_range(struct kvm *kvm,
- struct kvm_tlb_range *range)
-{
- struct kvm_vmx *kvm_vmx = to_kvm_vmx(kvm);
- struct kvm_vcpu *vcpu;
- int ret = 0, i, nr_unique_valid_roots;
- hpa_t root;
-
- spin_lock(&kvm_vmx->hv_root_ept_lock);
-
- if (!VALID_PAGE(kvm_vmx->hv_root_ept)) {
- nr_unique_valid_roots = 0;
-
- /*
- * Flush all valid roots, and see if all vCPUs have converged
- * on a common root, in which case future flushes can skip the
- * loop and flush the common root.
- */
- kvm_for_each_vcpu(i, vcpu, kvm) {
- root = to_vmx(vcpu)->hv_root_ept;
- if (!VALID_PAGE(root) || root == kvm_vmx->hv_root_ept)
- continue;
-
- /*
- * Set the tracked root to the first valid root. Keep
- * this root for the entirety of the loop even if more
- * roots are encountered as a low effort optimization
- * to avoid flushing the same (first) root again.
- */
- if (++nr_unique_valid_roots == 1)
- kvm_vmx->hv_root_ept = root;
-
- if (!ret)
- ret = hv_remote_flush_root_ept(root, range);
-
- /*
- * Stop processing roots if a failure occurred and
- * multiple valid roots have already been detected.
- */
- if (ret && nr_unique_valid_roots > 1)
- break;
- }
-
- /*
- * The optimized flush of a single root can't be used if there
- * are multiple valid roots (obviously).
- */
- if (nr_unique_valid_roots > 1)
- kvm_vmx->hv_root_ept = INVALID_PAGE;
- } else {
- ret = hv_remote_flush_root_ept(kvm_vmx->hv_root_ept, range);
- }
-
- spin_unlock(&kvm_vmx->hv_root_ept_lock);
- return ret;
-}
-static int hv_remote_flush_tlb(struct kvm *kvm)
-{
- return hv_remote_flush_tlb_with_range(kvm, NULL);
-}
-
static int hv_enable_direct_tlbflush(struct kvm_vcpu *vcpu)
{
struct hv_enlightened_vmcs *evmcs;
@@ -581,21 +486,6 @@ static int hv_enable_direct_tlbflush(struct kvm_vcpu *vcpu)
#endif /* IS_ENABLED(CONFIG_HYPERV) */
-static void hv_track_root_ept(struct kvm_vcpu *vcpu, hpa_t root_ept)
-{
-#if IS_ENABLED(CONFIG_HYPERV)
- struct kvm_vmx *kvm_vmx = to_kvm_vmx(vcpu->kvm);
-
- if (kvm_x86_ops.tlb_remote_flush == hv_remote_flush_tlb) {
- spin_lock(&kvm_vmx->hv_root_ept_lock);
- to_vmx(vcpu)->hv_root_ept = root_ept;
- if (root_ept != kvm_vmx->hv_root_ept)
- kvm_vmx->hv_root_ept = INVALID_PAGE;
- spin_unlock(&kvm_vmx->hv_root_ept_lock);
- }
-#endif
-}
-
/*
* Comment's format: document - errata name - stepping - processor name.
* Refer from
@@ -697,21 +587,11 @@ static bool is_valid_passthrough_msr(u32 msr)
return r;
}
-static inline int __vmx_find_uret_msr(struct vcpu_vmx *vmx, u32 msr)
-{
- int i;
-
- for (i = 0; i < vmx->nr_uret_msrs; ++i)
- if (vmx_uret_msrs_list[vmx->guest_uret_msrs[i].slot] == msr)
- return i;
- return -1;
-}
-
struct vmx_uret_msr *vmx_find_uret_msr(struct vcpu_vmx *vmx, u32 msr)
{
int i;
- i = __vmx_find_uret_msr(vmx, msr);
+ i = kvm_find_user_return_msr(msr);
if (i >= 0)
return &vmx->guest_uret_msrs[i];
return NULL;
@@ -720,13 +600,14 @@ struct vmx_uret_msr *vmx_find_uret_msr(struct vcpu_vmx *vmx, u32 msr)
static int vmx_set_guest_uret_msr(struct vcpu_vmx *vmx,
struct vmx_uret_msr *msr, u64 data)
{
+ unsigned int slot = msr - vmx->guest_uret_msrs;
int ret = 0;
u64 old_msr_data = msr->data;
msr->data = data;
- if (msr - vmx->guest_uret_msrs < vmx->nr_active_uret_msrs) {
+ if (msr->load_into_hardware) {
preempt_disable();
- ret = kvm_set_user_return_msr(msr->slot, msr->data, msr->mask);
+ ret = kvm_set_user_return_msr(slot, msr->data, msr->mask);
preempt_enable();
if (ret)
msr->data = old_msr_data;
@@ -866,16 +747,21 @@ void vmx_update_exception_bitmap(struct kvm_vcpu *vcpu)
if (is_guest_mode(vcpu))
eb |= get_vmcs12(vcpu)->exception_bitmap;
else {
- /*
- * If EPT is enabled, #PF is only trapped if MAXPHYADDR is mismatched
- * between guest and host. In that case we only care about present
- * faults. For vmcs02, however, PFEC_MASK and PFEC_MATCH are set in
- * prepare_vmcs02_rare.
- */
- bool selective_pf_trap = enable_ept && (eb & (1u << PF_VECTOR));
- int mask = selective_pf_trap ? PFERR_PRESENT_MASK : 0;
+ int mask = 0, match = 0;
+
+ if (enable_ept && (eb & (1u << PF_VECTOR))) {
+ /*
+ * If EPT is enabled, #PF is currently only intercepted
+ * if MAXPHYADDR is smaller on the guest than on the
+ * host. In that case we only care about present,
+ * non-reserved faults. For vmcs02, however, PFEC_MASK
+ * and PFEC_MATCH are set in prepare_vmcs02_rare.
+ */
+ mask = PFERR_PRESENT_MASK | PFERR_RSVD_MASK;
+ match = PFERR_PRESENT_MASK;
+ }
vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, mask);
- vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, mask);
+ vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, match);
}
vmcs_write32(EXCEPTION_BITMAP, eb);
@@ -1078,7 +964,7 @@ static bool update_transition_efer(struct vcpu_vmx *vmx)
return false;
}
- i = __vmx_find_uret_msr(vmx, MSR_EFER);
+ i = kvm_find_user_return_msr(MSR_EFER);
if (i < 0)
return false;
@@ -1240,11 +1126,14 @@ void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
*/
if (!vmx->guest_uret_msrs_loaded) {
vmx->guest_uret_msrs_loaded = true;
- for (i = 0; i < vmx->nr_active_uret_msrs; ++i)
- kvm_set_user_return_msr(vmx->guest_uret_msrs[i].slot,
+ for (i = 0; i < kvm_nr_uret_msrs; ++i) {
+ if (!vmx->guest_uret_msrs[i].load_into_hardware)
+ continue;
+
+ kvm_set_user_return_msr(i,
vmx->guest_uret_msrs[i].data,
vmx->guest_uret_msrs[i].mask);
-
+ }
}
if (vmx->nested.need_vmcs12_to_shadow_sync)
@@ -1411,11 +1300,6 @@ void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu,
vmx->loaded_vmcs->cpu = cpu;
}
-
- /* Setup TSC multiplier */
- if (kvm_has_tsc_control &&
- vmx->current_tsc_ratio != vcpu->arch.tsc_scaling_ratio)
- decache_tsc_multiplier(vmx);
}
/*
@@ -1751,19 +1635,16 @@ static void vmx_queue_exception(struct kvm_vcpu *vcpu)
vmx_clear_hlt(vcpu);
}
-static void vmx_setup_uret_msr(struct vcpu_vmx *vmx, unsigned int msr)
+static void vmx_setup_uret_msr(struct vcpu_vmx *vmx, unsigned int msr,
+ bool load_into_hardware)
{
- struct vmx_uret_msr tmp;
- int from, to;
+ struct vmx_uret_msr *uret_msr;
- from = __vmx_find_uret_msr(vmx, msr);
- if (from < 0)
+ uret_msr = vmx_find_uret_msr(vmx, msr);
+ if (!uret_msr)
return;
- to = vmx->nr_active_uret_msrs++;
- tmp = vmx->guest_uret_msrs[to];
- vmx->guest_uret_msrs[to] = vmx->guest_uret_msrs[from];
- vmx->guest_uret_msrs[from] = tmp;
+ uret_msr->load_into_hardware = load_into_hardware;
}
/*
@@ -1773,51 +1654,73 @@ static void vmx_setup_uret_msr(struct vcpu_vmx *vmx, unsigned int msr)
*/
static void setup_msrs(struct vcpu_vmx *vmx)
{
- vmx->guest_uret_msrs_loaded = false;
- vmx->nr_active_uret_msrs = 0;
#ifdef CONFIG_X86_64
+ bool load_syscall_msrs;
+
/*
* The SYSCALL MSRs are only needed on long mode guests, and only
* when EFER.SCE is set.
*/
- if (is_long_mode(&vmx->vcpu) && (vmx->vcpu.arch.efer & EFER_SCE)) {
- vmx_setup_uret_msr(vmx, MSR_STAR);
- vmx_setup_uret_msr(vmx, MSR_LSTAR);
- vmx_setup_uret_msr(vmx, MSR_SYSCALL_MASK);
- }
+ load_syscall_msrs = is_long_mode(&vmx->vcpu) &&
+ (vmx->vcpu.arch.efer & EFER_SCE);
+
+ vmx_setup_uret_msr(vmx, MSR_STAR, load_syscall_msrs);
+ vmx_setup_uret_msr(vmx, MSR_LSTAR, load_syscall_msrs);
+ vmx_setup_uret_msr(vmx, MSR_SYSCALL_MASK, load_syscall_msrs);
#endif
- if (update_transition_efer(vmx))
- vmx_setup_uret_msr(vmx, MSR_EFER);
+ vmx_setup_uret_msr(vmx, MSR_EFER, update_transition_efer(vmx));
- if (guest_cpuid_has(&vmx->vcpu, X86_FEATURE_RDTSCP))
- vmx_setup_uret_msr(vmx, MSR_TSC_AUX);
+ vmx_setup_uret_msr(vmx, MSR_TSC_AUX,
+ guest_cpuid_has(&vmx->vcpu, X86_FEATURE_RDTSCP) ||
+ guest_cpuid_has(&vmx->vcpu, X86_FEATURE_RDPID));
- vmx_setup_uret_msr(vmx, MSR_IA32_TSX_CTRL);
+ /*
+ * hle=0, rtm=0, tsx_ctrl=1 can be found with some combinations of new
+ * kernel and old userspace. If those guests run on a tsx=off host, do
+ * allow guests to use TSX_CTRL, but don't change the value in hardware
+ * so that TSX remains always disabled.
+ */
+ vmx_setup_uret_msr(vmx, MSR_IA32_TSX_CTRL, boot_cpu_has(X86_FEATURE_RTM));
if (cpu_has_vmx_msr_bitmap())
vmx_update_msr_bitmap(&vmx->vcpu);
+
+ /*
+ * The set of MSRs to load may have changed, reload MSRs before the
+ * next VM-Enter.
+ */
+ vmx->guest_uret_msrs_loaded = false;
}
-static u64 vmx_write_l1_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
+u64 vmx_get_l2_tsc_offset(struct kvm_vcpu *vcpu)
{
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
- u64 g_tsc_offset = 0;
- /*
- * We're here if L1 chose not to trap WRMSR to TSC. According
- * to the spec, this should set L1's TSC; The offset that L1
- * set for L2 remains unchanged, and still needs to be added
- * to the newly set TSC to get L2's TSC.
- */
- if (is_guest_mode(vcpu) &&
- (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETTING))
- g_tsc_offset = vmcs12->tsc_offset;
+ if (nested_cpu_has(vmcs12, CPU_BASED_USE_TSC_OFFSETTING))
+ return vmcs12->tsc_offset;
- trace_kvm_write_tsc_offset(vcpu->vcpu_id,
- vcpu->arch.tsc_offset - g_tsc_offset,
- offset);
- vmcs_write64(TSC_OFFSET, offset + g_tsc_offset);
- return offset + g_tsc_offset;
+ return 0;
+}
+
+u64 vmx_get_l2_tsc_multiplier(struct kvm_vcpu *vcpu)
+{
+ struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
+
+ if (nested_cpu_has(vmcs12, CPU_BASED_USE_TSC_OFFSETTING) &&
+ nested_cpu_has2(vmcs12, SECONDARY_EXEC_TSC_SCALING))
+ return vmcs12->tsc_multiplier;
+
+ return kvm_default_tsc_scaling_ratio;
+}
+
+static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
+{
+ vmcs_write64(TSC_OFFSET, offset);
+}
+
+static void vmx_write_tsc_multiplier(struct kvm_vcpu *vcpu, u64 multiplier)
+{
+ vmcs_write64(TSC_MULTIPLIER, multiplier);
}
/*
@@ -1993,11 +1896,6 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
else
msr_info->data = vmx->pt_desc.guest.addr_a[index / 2];
break;
- case MSR_TSC_AUX:
- if (!msr_info->host_initiated &&
- !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP))
- return 1;
- goto find_uret_msr;
case MSR_IA32_DEBUGCTLMSR:
msr_info->data = vmcs_read64(GUEST_IA32_DEBUGCTL);
break;
@@ -2031,6 +1929,9 @@ static u64 vcpu_supported_debugctl(struct kvm_vcpu *vcpu)
if (!intel_pmu_lbr_is_enabled(vcpu))
debugctl &= ~DEBUGCTLMSR_LBR_MASK;
+ if (!guest_cpuid_has(vcpu, X86_FEATURE_BUS_LOCK_DETECT))
+ debugctl &= ~DEBUGCTLMSR_BUS_LOCK_DETECT;
+
return debugctl;
}
@@ -2313,14 +2214,6 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
else
vmx->pt_desc.guest.addr_a[index / 2] = data;
break;
- case MSR_TSC_AUX:
- if (!msr_info->host_initiated &&
- !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP))
- return 1;
- /* Check reserved bit, higher 32 bits should be zero */
- if ((data >> 32) != 0)
- return 1;
- goto find_uret_msr;
case MSR_IA32_PERF_CAPABILITIES:
if (data && !vcpu_to_pmu(vcpu)->version)
return 1;
@@ -3202,7 +3095,7 @@ static void vmx_load_mmu_pgd(struct kvm_vcpu *vcpu, hpa_t root_hpa,
eptp = construct_eptp(vcpu, root_hpa, root_level);
vmcs_write64(EPT_POINTER, eptp);
- hv_track_root_ept(vcpu, root_hpa);
+ hv_track_root_tdp(vcpu, root_hpa);
if (!enable_unrestricted_guest && !is_paging(vcpu))
guest_cr3 = to_kvm_vmx(kvm)->ept_identity_map_addr;
@@ -3728,7 +3621,7 @@ static int alloc_apic_access_page(struct kvm *kvm)
int ret = 0;
mutex_lock(&kvm->slots_lock);
- if (kvm->arch.apic_access_page_done)
+ if (kvm->arch.apic_access_memslot_enabled)
goto out;
hva = __x86_set_memory_region(kvm, APIC_ACCESS_PAGE_PRIVATE_MEMSLOT,
APIC_DEFAULT_PHYS_BASE, PAGE_SIZE);
@@ -3748,7 +3641,7 @@ static int alloc_apic_access_page(struct kvm *kvm)
* is able to migrate it.
*/
put_page(page);
- kvm->arch.apic_access_page_done = true;
+ kvm->arch.apic_access_memslot_enabled = true;
out:
mutex_unlock(&kvm->slots_lock);
return ret;
@@ -4369,7 +4262,23 @@ static void vmx_compute_secondary_exec_control(struct vcpu_vmx *vmx)
xsaves_enabled, false);
}
- vmx_adjust_sec_exec_feature(vmx, &exec_control, rdtscp, RDTSCP);
+ /*
+ * RDPID is also gated by ENABLE_RDTSCP, turn on the control if either
+ * feature is exposed to the guest. This creates a virtualization hole
+ * if both are supported in hardware but only one is exposed to the
+ * guest, but letting the guest execute RDTSCP or RDPID when either one
+ * is advertised is preferable to emulating the advertised instruction
+ * in KVM on #UD, and obviously better than incorrectly injecting #UD.
+ */
+ if (cpu_has_vmx_rdtscp()) {
+ bool rdpid_or_rdtscp_enabled =
+ guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP) ||
+ guest_cpuid_has(vcpu, X86_FEATURE_RDPID);
+
+ vmx_adjust_secondary_exec_control(vmx, &exec_control,
+ SECONDARY_EXEC_ENABLE_RDTSCP,
+ rdpid_or_rdtscp_enabled, false);
+ }
vmx_adjust_sec_exec_feature(vmx, &exec_control, invpcid, INVPCID);
vmx_adjust_sec_exec_exiting(vmx, &exec_control, rdrand, RDRAND);
@@ -4834,7 +4743,7 @@ static int handle_machine_check(struct kvm_vcpu *vcpu)
* - Guest has #AC detection enabled in CR0
* - Guest EFLAGS has AC bit set
*/
-static inline bool guest_inject_ac(struct kvm_vcpu *vcpu)
+bool vmx_guest_inject_ac(struct kvm_vcpu *vcpu)
{
if (!boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT))
return true;
@@ -4848,7 +4757,7 @@ static int handle_exception_nmi(struct kvm_vcpu *vcpu)
struct vcpu_vmx *vmx = to_vmx(vcpu);
struct kvm_run *kvm_run = vcpu->run;
u32 intr_info, ex_no, error_code;
- unsigned long cr2, rip, dr6;
+ unsigned long cr2, dr6;
u32 vect_info;
vect_info = vmx->idt_vectoring_info;
@@ -4938,12 +4847,11 @@ static int handle_exception_nmi(struct kvm_vcpu *vcpu)
vmx->vcpu.arch.event_exit_inst_len =
vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
kvm_run->exit_reason = KVM_EXIT_DEBUG;
- rip = kvm_rip_read(vcpu);
- kvm_run->debug.arch.pc = vmcs_readl(GUEST_CS_BASE) + rip;
+ kvm_run->debug.arch.pc = kvm_get_linear_rip(vcpu);
kvm_run->debug.arch.exception = ex_no;
break;
case AC_VECTOR:
- if (guest_inject_ac(vcpu)) {
+ if (vmx_guest_inject_ac(vcpu)) {
kvm_queue_exception_e(vcpu, AC_VECTOR, error_code);
return 1;
}
@@ -5816,6 +5724,8 @@ void dump_vmcs(struct kvm_vcpu *vcpu)
if (cpu_has_secondary_exec_ctrls())
secondary_exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
+ pr_err("VMCS %p, last attempted VM-entry on CPU %d\n",
+ vmx->loaded_vmcs->vmcs, vcpu->arch.last_vmentry_cpu);
pr_err("*** Guest State ***\n");
pr_err("CR0: actual=0x%016lx, shadow=0x%016lx, gh_mask=%016lx\n",
vmcs_readl(GUEST_CR0), vmcs_readl(CR0_READ_SHADOW),
@@ -6253,6 +6163,7 @@ void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu)
switch (kvm_get_apic_mode(vcpu)) {
case LAPIC_MODE_INVALID:
WARN_ONCE(true, "Invalid local APIC state");
+ break;
case LAPIC_MODE_DISABLED:
break;
case LAPIC_MODE_XAPIC:
@@ -6811,7 +6722,18 @@ static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu)
kvm_load_host_xsave_state(vcpu);
- vmx->nested.nested_run_pending = 0;
+ if (is_guest_mode(vcpu)) {
+ /*
+ * Track VMLAUNCH/VMRESUME that have made past guest state
+ * checking.
+ */
+ if (vmx->nested.nested_run_pending &&
+ !vmx->exit_reason.failed_vmentry)
+ ++vcpu->stat.nested_run;
+
+ vmx->nested.nested_run_pending = 0;
+ }
+
vmx->idt_vectoring_info = 0;
if (unlikely(vmx->fail)) {
@@ -6855,6 +6777,7 @@ static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
static int vmx_create_vcpu(struct kvm_vcpu *vcpu)
{
+ struct vmx_uret_msr *tsx_ctrl;
struct vcpu_vmx *vmx;
int i, cpu, err;
@@ -6877,43 +6800,19 @@ static int vmx_create_vcpu(struct kvm_vcpu *vcpu)
goto free_vpid;
}
- BUILD_BUG_ON(ARRAY_SIZE(vmx_uret_msrs_list) != MAX_NR_USER_RETURN_MSRS);
-
- for (i = 0; i < ARRAY_SIZE(vmx_uret_msrs_list); ++i) {
- u32 index = vmx_uret_msrs_list[i];
- u32 data_low, data_high;
- int j = vmx->nr_uret_msrs;
-
- if (rdmsr_safe(index, &data_low, &data_high) < 0)
- continue;
- if (wrmsr_safe(index, data_low, data_high) < 0)
- continue;
-
- vmx->guest_uret_msrs[j].slot = i;
- vmx->guest_uret_msrs[j].data = 0;
- switch (index) {
- case MSR_IA32_TSX_CTRL:
- /*
- * TSX_CTRL_CPUID_CLEAR is handled in the CPUID
- * interception. Keep the host value unchanged to avoid
- * changing CPUID bits under the host kernel's feet.
- *
- * hle=0, rtm=0, tsx_ctrl=1 can be found with some
- * combinations of new kernel and old userspace. If
- * those guests run on a tsx=off host, do allow guests
- * to use TSX_CTRL, but do not change the value on the
- * host so that TSX remains always disabled.
- */
- if (boot_cpu_has(X86_FEATURE_RTM))
- vmx->guest_uret_msrs[j].mask = ~(u64)TSX_CTRL_CPUID_CLEAR;
- else
- vmx->guest_uret_msrs[j].mask = 0;
- break;
- default:
- vmx->guest_uret_msrs[j].mask = -1ull;
- break;
- }
- ++vmx->nr_uret_msrs;
+ for (i = 0; i < kvm_nr_uret_msrs; ++i) {
+ vmx->guest_uret_msrs[i].data = 0;
+ vmx->guest_uret_msrs[i].mask = -1ull;
+ }
+ if (boot_cpu_has(X86_FEATURE_RTM)) {
+ /*
+ * TSX_CTRL_CPUID_CLEAR is handled in the CPUID interception.
+ * Keep the host value unchanged to avoid changing CPUID bits
+ * under the host kernel's feet.
+ */
+ tsx_ctrl = vmx_find_uret_msr(vmx, MSR_IA32_TSX_CTRL);
+ if (tsx_ctrl)
+ vmx->guest_uret_msrs[i].mask = ~(u64)TSX_CTRL_CPUID_CLEAR;
}
err = alloc_loaded_vmcs(&vmx->vmcs01);
@@ -6969,6 +6868,7 @@ static int vmx_create_vcpu(struct kvm_vcpu *vcpu)
vmx->nested.posted_intr_nv = -1;
vmx->nested.current_vmptr = -1ull;
+ vmx->nested.hv_evmcs_vmptr = EVMPTR_INVALID;
vcpu->arch.microcode_version = 0x100000000ULL;
vmx->msr_ia32_feature_control_valid_bits = FEAT_CTL_LOCKED;
@@ -6980,9 +6880,6 @@ static int vmx_create_vcpu(struct kvm_vcpu *vcpu)
vmx->pi_desc.nv = POSTED_INTR_VECTOR;
vmx->pi_desc.sn = 1;
-#if IS_ENABLED(CONFIG_HYPERV)
- vmx->hv_root_ept = INVALID_PAGE;
-#endif
return 0;
free_vmcs:
@@ -6999,10 +6896,6 @@ free_vpid:
static int vmx_vm_init(struct kvm *kvm)
{
-#if IS_ENABLED(CONFIG_HYPERV)
- spin_lock_init(&to_kvm_vmx(kvm)->hv_root_ept_lock);
-#endif
-
if (!ple_gap)
kvm->arch.pause_in_guest = true;
@@ -7029,7 +6922,6 @@ static int vmx_vm_init(struct kvm *kvm)
break;
}
}
- kvm_apicv_init(kvm, enable_apicv);
return 0;
}
@@ -7344,9 +7236,11 @@ static __init void vmx_set_cpu_caps(void)
if (!cpu_has_vmx_xsaves())
kvm_cpu_cap_clear(X86_FEATURE_XSAVES);
- /* CPUID 0x80000001 */
- if (!cpu_has_vmx_rdtscp())
+ /* CPUID 0x80000001 and 0x7 (RDPID) */
+ if (!cpu_has_vmx_rdtscp()) {
kvm_cpu_cap_clear(X86_FEATURE_RDTSCP);
+ kvm_cpu_cap_clear(X86_FEATURE_RDPID);
+ }
if (cpu_has_vmx_waitpkg())
kvm_cpu_cap_check_and_set(X86_FEATURE_WAITPKG);
@@ -7402,8 +7296,9 @@ static int vmx_check_intercept(struct kvm_vcpu *vcpu,
/*
* RDPID causes #UD if disabled through secondary execution controls.
* Because it is marked as EmulateOnUD, we need to intercept it here.
+ * Note, RDPID is hidden behind ENABLE_RDTSCP.
*/
- case x86_intercept_rdtscp:
+ case x86_intercept_rdpid:
if (!nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_RDTSCP)) {
exception->vector = UD_VECTOR;
exception->error_code_valid = false;
@@ -7478,10 +7373,10 @@ static int vmx_set_hv_timer(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc,
delta_tsc = 0;
/* Convert to host delta tsc if tsc scaling is enabled */
- if (vcpu->arch.tsc_scaling_ratio != kvm_default_tsc_scaling_ratio &&
+ if (vcpu->arch.l1_tsc_scaling_ratio != kvm_default_tsc_scaling_ratio &&
delta_tsc && u64_shl_div_u64(delta_tsc,
kvm_tsc_scaling_ratio_frac_bits,
- vcpu->arch.tsc_scaling_ratio, &delta_tsc))
+ vcpu->arch.l1_tsc_scaling_ratio, &delta_tsc))
return -ERANGE;
/*
@@ -7567,7 +7462,7 @@ static int vmx_smi_allowed(struct kvm_vcpu *vcpu, bool for_injection)
return !is_smm(vcpu);
}
-static int vmx_pre_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
+static int vmx_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
@@ -7581,7 +7476,7 @@ static int vmx_pre_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
return 0;
}
-static int vmx_pre_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
+static int vmx_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
int ret;
@@ -7725,7 +7620,10 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = {
.has_wbinvd_exit = cpu_has_vmx_wbinvd_exit,
- .write_l1_tsc_offset = vmx_write_l1_tsc_offset,
+ .get_l2_tsc_offset = vmx_get_l2_tsc_offset,
+ .get_l2_tsc_multiplier = vmx_get_l2_tsc_multiplier,
+ .write_tsc_offset = vmx_write_tsc_offset,
+ .write_tsc_multiplier = vmx_write_tsc_multiplier,
.load_mmu_pgd = vmx_load_mmu_pgd,
@@ -7746,6 +7644,7 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = {
.nested_ops = &vmx_nested_ops,
.update_pi_irte = pi_update_irte,
+ .start_assignment = vmx_pi_start_assignment,
#ifdef CONFIG_X86_64
.set_hv_timer = vmx_set_hv_timer,
@@ -7755,8 +7654,8 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = {
.setup_mce = vmx_setup_mce,
.smi_allowed = vmx_smi_allowed,
- .pre_enter_smm = vmx_pre_enter_smm,
- .pre_leave_smm = vmx_pre_leave_smm,
+ .enter_smm = vmx_enter_smm,
+ .leave_smm = vmx_leave_smm,
.enable_smi_window = vmx_enable_smi_window,
.can_emulate_instruction = vmx_can_emulate_instruction,
@@ -7769,17 +7668,42 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = {
.vcpu_deliver_sipi_vector = kvm_vcpu_deliver_sipi_vector,
};
+static __init void vmx_setup_user_return_msrs(void)
+{
+
+ /*
+ * Though SYSCALL is only supported in 64-bit mode on Intel CPUs, kvm
+ * will emulate SYSCALL in legacy mode if the vendor string in guest
+ * CPUID.0:{EBX,ECX,EDX} is "AuthenticAMD" or "AMDisbetter!" To
+ * support this emulation, MSR_STAR is included in the list for i386,
+ * but is never loaded into hardware. MSR_CSTAR is also never loaded
+ * into hardware and is here purely for emulation purposes.
+ */
+ const u32 vmx_uret_msrs_list[] = {
+ #ifdef CONFIG_X86_64
+ MSR_SYSCALL_MASK, MSR_LSTAR, MSR_CSTAR,
+ #endif
+ MSR_EFER, MSR_TSC_AUX, MSR_STAR,
+ MSR_IA32_TSX_CTRL,
+ };
+ int i;
+
+ BUILD_BUG_ON(ARRAY_SIZE(vmx_uret_msrs_list) != MAX_NR_USER_RETURN_MSRS);
+
+ for (i = 0; i < ARRAY_SIZE(vmx_uret_msrs_list); ++i)
+ kvm_add_user_return_msr(vmx_uret_msrs_list[i]);
+}
+
static __init int hardware_setup(void)
{
unsigned long host_bndcfgs;
struct desc_ptr dt;
- int r, i, ept_lpage_level;
+ int r, ept_lpage_level;
store_idt(&dt);
host_idt_base = dt.address;
- for (i = 0; i < ARRAY_SIZE(vmx_uret_msrs_list); ++i)
- kvm_define_user_return_msr(i, vmx_uret_msrs_list[i]);
+ vmx_setup_user_return_msrs();
if (setup_vmcs_config(&vmcs_config, &vmx_capability) < 0)
return -EIO;
@@ -7806,6 +7730,12 @@ static __init int hardware_setup(void)
!cpu_has_vmx_invept_global())
enable_ept = 0;
+ /* NX support is required for shadow paging. */
+ if (!enable_ept && !boot_cpu_has(X86_FEATURE_NX)) {
+ pr_err_ratelimited("kvm: NX (Execute Disable) not supported\n");
+ return -EOPNOTSUPP;
+ }
+
if (!cpu_has_vmx_ept_ad_bits() || !enable_ept)
enable_ept_ad_bits = 0;
@@ -7995,6 +7925,8 @@ static void vmx_exit(void)
}
#endif
vmx_cleanup_l1d_flush();
+
+ allow_smaller_maxphyaddr = false;
}
module_exit(vmx_exit);
diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h
index 008cb87ff088..3979a947933a 100644
--- a/arch/x86/kvm/vmx/vmx.h
+++ b/arch/x86/kvm/vmx/vmx.h
@@ -36,7 +36,7 @@ struct vmx_msrs {
};
struct vmx_uret_msr {
- unsigned int slot; /* The MSR's slot in kvm_user_return_msrs. */
+ bool load_into_hardware;
u64 data;
u64 mask;
};
@@ -245,8 +245,16 @@ struct vcpu_vmx {
u32 idt_vectoring_info;
ulong rflags;
+ /*
+ * User return MSRs are always emulated when enabled in the guest, but
+ * only loaded into hardware when necessary, e.g. SYSCALL #UDs outside
+ * of 64-bit mode or if EFER.SCE=1, thus the SYSCALL MSRs don't need to
+ * be loaded into hardware if those conditions aren't met.
+ * nr_active_uret_msrs tracks the number of MSRs that need to be loaded
+ * into hardware when running the guest. guest_uret_msrs[] is resorted
+ * whenever the number of "active" uret MSRs is modified.
+ */
struct vmx_uret_msr guest_uret_msrs[MAX_NR_USER_RETURN_MSRS];
- int nr_uret_msrs;
int nr_active_uret_msrs;
bool guest_uret_msrs_loaded;
#ifdef CONFIG_X86_64
@@ -314,8 +322,6 @@ struct vcpu_vmx {
/* apic deadline value in host tsc */
u64 hv_deadline_tsc;
- u64 current_tsc_ratio;
-
unsigned long host_debugctlmsr;
/*
@@ -328,10 +334,6 @@ struct vcpu_vmx {
/* SGX Launch Control public key hash */
u64 msr_ia32_sgxlepubkeyhash[4];
-#if IS_ENABLED(CONFIG_HYPERV)
- u64 hv_root_ept;
-#endif
-
struct pt_desc pt_desc;
struct lbr_desc lbr_desc;
@@ -349,11 +351,6 @@ struct kvm_vmx {
unsigned int tss_addr;
bool ept_identity_pagetable_done;
gpa_t ept_identity_map_addr;
-
-#if IS_ENABLED(CONFIG_HYPERV)
- hpa_t hv_root_ept;
- spinlock_t hv_root_ept_lock;
-#endif
};
bool nested_vmx_allowed(struct kvm_vcpu *vcpu);
@@ -379,6 +376,7 @@ void vmx_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
void vmx_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
u64 construct_eptp(struct kvm_vcpu *vcpu, hpa_t root_hpa, int root_level);
+bool vmx_guest_inject_ac(struct kvm_vcpu *vcpu);
void vmx_update_exception_bitmap(struct kvm_vcpu *vcpu);
void vmx_update_msr_bitmap(struct kvm_vcpu *vcpu);
bool vmx_nmi_blocked(struct kvm_vcpu *vcpu);
@@ -396,6 +394,9 @@ void vmx_ept_load_pdptrs(struct kvm_vcpu *vcpu);
void vmx_disable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type);
void vmx_enable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type);
+u64 vmx_get_l2_tsc_offset(struct kvm_vcpu *vcpu);
+u64 vmx_get_l2_tsc_multiplier(struct kvm_vcpu *vcpu);
+
static inline void vmx_set_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr,
int type, bool value)
{
@@ -521,12 +522,6 @@ static inline struct vmcs *alloc_vmcs(bool shadow)
GFP_KERNEL_ACCOUNT);
}
-static inline void decache_tsc_multiplier(struct vcpu_vmx *vmx)
-{
- vmx->current_tsc_ratio = vmx->vcpu.arch.tsc_scaling_ratio;
- vmcs_write64(TSC_MULTIPLIER, vmx->current_tsc_ratio);
-}
-
static inline bool vmx_has_waitpkg(struct vcpu_vmx *vmx)
{
return vmx->secondary_exec_control &
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 6eda2834fc05..17468d983fbd 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -58,6 +58,7 @@
#include <linux/sched/isolation.h>
#include <linux/mem_encrypt.h>
#include <linux/entry-kvm.h>
+#include <linux/suspend.h>
#include <trace/events/kvm.h>
@@ -102,6 +103,8 @@ static u64 __read_mostly efer_reserved_bits = ~((u64)EFER_SCE);
static u64 __read_mostly cr4_reserved_bits = CR4_RESERVED_BITS;
+#define KVM_EXIT_HYPERCALL_VALID_MASK (1 << KVM_HC_MAP_GPA_RANGE)
+
#define KVM_X2APIC_API_VALID_FLAGS (KVM_X2APIC_API_USE_32BIT_IDS | \
KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK)
@@ -113,6 +116,9 @@ static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
static void store_regs(struct kvm_vcpu *vcpu);
static int sync_regs(struct kvm_vcpu *vcpu);
+static int __set_sregs2(struct kvm_vcpu *vcpu, struct kvm_sregs2 *sregs2);
+static void __get_sregs2(struct kvm_vcpu *vcpu, struct kvm_sregs2 *sregs2);
+
struct kvm_x86_ops kvm_x86_ops __read_mostly;
EXPORT_SYMBOL_GPL(kvm_x86_ops);
@@ -184,11 +190,6 @@ module_param(pi_inject_timer, bint, S_IRUGO | S_IWUSR);
*/
#define KVM_MAX_NR_USER_RETURN_MSRS 16
-struct kvm_user_return_msrs_global {
- int nr;
- u32 msrs[KVM_MAX_NR_USER_RETURN_MSRS];
-};
-
struct kvm_user_return_msrs {
struct user_return_notifier urn;
bool registered;
@@ -198,7 +199,9 @@ struct kvm_user_return_msrs {
} values[KVM_MAX_NR_USER_RETURN_MSRS];
};
-static struct kvm_user_return_msrs_global __read_mostly user_return_msrs_global;
+u32 __read_mostly kvm_nr_uret_msrs;
+EXPORT_SYMBOL_GPL(kvm_nr_uret_msrs);
+static u32 __read_mostly kvm_uret_msrs_list[KVM_MAX_NR_USER_RETURN_MSRS];
static struct kvm_user_return_msrs __percpu *user_return_msrs;
#define KVM_SUPPORTED_XCR0 (XFEATURE_MASK_FP | XFEATURE_MASK_SSE \
@@ -212,55 +215,78 @@ EXPORT_SYMBOL_GPL(host_efer);
bool __read_mostly allow_smaller_maxphyaddr = 0;
EXPORT_SYMBOL_GPL(allow_smaller_maxphyaddr);
+bool __read_mostly enable_apicv = true;
+EXPORT_SYMBOL_GPL(enable_apicv);
+
u64 __read_mostly host_xss;
EXPORT_SYMBOL_GPL(host_xss);
u64 __read_mostly supported_xss;
EXPORT_SYMBOL_GPL(supported_xss);
-struct kvm_stats_debugfs_item debugfs_entries[] = {
- VCPU_STAT("pf_fixed", pf_fixed),
- VCPU_STAT("pf_guest", pf_guest),
- VCPU_STAT("tlb_flush", tlb_flush),
- VCPU_STAT("invlpg", invlpg),
- VCPU_STAT("exits", exits),
- VCPU_STAT("io_exits", io_exits),
- VCPU_STAT("mmio_exits", mmio_exits),
- VCPU_STAT("signal_exits", signal_exits),
- VCPU_STAT("irq_window", irq_window_exits),
- VCPU_STAT("nmi_window", nmi_window_exits),
- VCPU_STAT("halt_exits", halt_exits),
- VCPU_STAT("halt_successful_poll", halt_successful_poll),
- VCPU_STAT("halt_attempted_poll", halt_attempted_poll),
- VCPU_STAT("halt_poll_invalid", halt_poll_invalid),
- VCPU_STAT("halt_wakeup", halt_wakeup),
- VCPU_STAT("hypercalls", hypercalls),
- VCPU_STAT("request_irq", request_irq_exits),
- VCPU_STAT("irq_exits", irq_exits),
- VCPU_STAT("host_state_reload", host_state_reload),
- VCPU_STAT("fpu_reload", fpu_reload),
- VCPU_STAT("insn_emulation", insn_emulation),
- VCPU_STAT("insn_emulation_fail", insn_emulation_fail),
- VCPU_STAT("irq_injections", irq_injections),
- VCPU_STAT("nmi_injections", nmi_injections),
- VCPU_STAT("req_event", req_event),
- VCPU_STAT("l1d_flush", l1d_flush),
- VCPU_STAT("halt_poll_success_ns", halt_poll_success_ns),
- VCPU_STAT("halt_poll_fail_ns", halt_poll_fail_ns),
- VCPU_STAT("nested_run", nested_run),
- VCPU_STAT("directed_yield_attempted", directed_yield_attempted),
- VCPU_STAT("directed_yield_successful", directed_yield_successful),
- VM_STAT("mmu_shadow_zapped", mmu_shadow_zapped),
- VM_STAT("mmu_pte_write", mmu_pte_write),
- VM_STAT("mmu_pde_zapped", mmu_pde_zapped),
- VM_STAT("mmu_flooded", mmu_flooded),
- VM_STAT("mmu_recycled", mmu_recycled),
- VM_STAT("mmu_cache_miss", mmu_cache_miss),
- VM_STAT("mmu_unsync", mmu_unsync),
- VM_STAT("remote_tlb_flush", remote_tlb_flush),
- VM_STAT("largepages", lpages, .mode = 0444),
- VM_STAT("nx_largepages_splitted", nx_lpage_splits, .mode = 0444),
- VM_STAT("max_mmu_page_hash_collisions", max_mmu_page_hash_collisions),
- { NULL }
+const struct _kvm_stats_desc kvm_vm_stats_desc[] = {
+ KVM_GENERIC_VM_STATS(),
+ STATS_DESC_COUNTER(VM, mmu_shadow_zapped),
+ STATS_DESC_COUNTER(VM, mmu_pte_write),
+ STATS_DESC_COUNTER(VM, mmu_pde_zapped),
+ STATS_DESC_COUNTER(VM, mmu_flooded),
+ STATS_DESC_COUNTER(VM, mmu_recycled),
+ STATS_DESC_COUNTER(VM, mmu_cache_miss),
+ STATS_DESC_ICOUNTER(VM, mmu_unsync),
+ STATS_DESC_ICOUNTER(VM, lpages),
+ STATS_DESC_ICOUNTER(VM, nx_lpage_splits),
+ STATS_DESC_PCOUNTER(VM, max_mmu_page_hash_collisions)
+};
+static_assert(ARRAY_SIZE(kvm_vm_stats_desc) ==
+ sizeof(struct kvm_vm_stat) / sizeof(u64));
+
+const struct kvm_stats_header kvm_vm_stats_header = {
+ .name_size = KVM_STATS_NAME_SIZE,
+ .num_desc = ARRAY_SIZE(kvm_vm_stats_desc),
+ .id_offset = sizeof(struct kvm_stats_header),
+ .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
+ .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
+ sizeof(kvm_vm_stats_desc),
+};
+
+const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
+ KVM_GENERIC_VCPU_STATS(),
+ STATS_DESC_COUNTER(VCPU, pf_fixed),
+ STATS_DESC_COUNTER(VCPU, pf_guest),
+ STATS_DESC_COUNTER(VCPU, tlb_flush),
+ STATS_DESC_COUNTER(VCPU, invlpg),
+ STATS_DESC_COUNTER(VCPU, exits),
+ STATS_DESC_COUNTER(VCPU, io_exits),
+ STATS_DESC_COUNTER(VCPU, mmio_exits),
+ STATS_DESC_COUNTER(VCPU, signal_exits),
+ STATS_DESC_COUNTER(VCPU, irq_window_exits),
+ STATS_DESC_COUNTER(VCPU, nmi_window_exits),
+ STATS_DESC_COUNTER(VCPU, l1d_flush),
+ STATS_DESC_COUNTER(VCPU, halt_exits),
+ STATS_DESC_COUNTER(VCPU, request_irq_exits),
+ STATS_DESC_COUNTER(VCPU, irq_exits),
+ STATS_DESC_COUNTER(VCPU, host_state_reload),
+ STATS_DESC_COUNTER(VCPU, fpu_reload),
+ STATS_DESC_COUNTER(VCPU, insn_emulation),
+ STATS_DESC_COUNTER(VCPU, insn_emulation_fail),
+ STATS_DESC_COUNTER(VCPU, hypercalls),
+ STATS_DESC_COUNTER(VCPU, irq_injections),
+ STATS_DESC_COUNTER(VCPU, nmi_injections),
+ STATS_DESC_COUNTER(VCPU, req_event),
+ STATS_DESC_COUNTER(VCPU, nested_run),
+ STATS_DESC_COUNTER(VCPU, directed_yield_attempted),
+ STATS_DESC_COUNTER(VCPU, directed_yield_successful),
+ STATS_DESC_ICOUNTER(VCPU, guest_mode)
+};
+static_assert(ARRAY_SIZE(kvm_vcpu_stats_desc) ==
+ sizeof(struct kvm_vcpu_stat) / sizeof(u64));
+
+const struct kvm_stats_header kvm_vcpu_stats_header = {
+ .name_size = KVM_STATS_NAME_SIZE,
+ .num_desc = ARRAY_SIZE(kvm_vcpu_stats_desc),
+ .id_offset = sizeof(struct kvm_stats_header),
+ .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
+ .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
+ sizeof(kvm_vcpu_stats_desc),
};
u64 __read_mostly host_xcr0;
@@ -330,23 +356,53 @@ static void kvm_on_user_return(struct user_return_notifier *urn)
user_return_notifier_unregister(urn);
}
local_irq_restore(flags);
- for (slot = 0; slot < user_return_msrs_global.nr; ++slot) {
+ for (slot = 0; slot < kvm_nr_uret_msrs; ++slot) {
values = &msrs->values[slot];
if (values->host != values->curr) {
- wrmsrl(user_return_msrs_global.msrs[slot], values->host);
+ wrmsrl(kvm_uret_msrs_list[slot], values->host);
values->curr = values->host;
}
}
}
-void kvm_define_user_return_msr(unsigned slot, u32 msr)
+static int kvm_probe_user_return_msr(u32 msr)
+{
+ u64 val;
+ int ret;
+
+ preempt_disable();
+ ret = rdmsrl_safe(msr, &val);
+ if (ret)
+ goto out;
+ ret = wrmsrl_safe(msr, val);
+out:
+ preempt_enable();
+ return ret;
+}
+
+int kvm_add_user_return_msr(u32 msr)
{
- BUG_ON(slot >= KVM_MAX_NR_USER_RETURN_MSRS);
- user_return_msrs_global.msrs[slot] = msr;
- if (slot >= user_return_msrs_global.nr)
- user_return_msrs_global.nr = slot + 1;
+ BUG_ON(kvm_nr_uret_msrs >= KVM_MAX_NR_USER_RETURN_MSRS);
+
+ if (kvm_probe_user_return_msr(msr))
+ return -1;
+
+ kvm_uret_msrs_list[kvm_nr_uret_msrs] = msr;
+ return kvm_nr_uret_msrs++;
}
-EXPORT_SYMBOL_GPL(kvm_define_user_return_msr);
+EXPORT_SYMBOL_GPL(kvm_add_user_return_msr);
+
+int kvm_find_user_return_msr(u32 msr)
+{
+ int i;
+
+ for (i = 0; i < kvm_nr_uret_msrs; ++i) {
+ if (kvm_uret_msrs_list[i] == msr)
+ return i;
+ }
+ return -1;
+}
+EXPORT_SYMBOL_GPL(kvm_find_user_return_msr);
static void kvm_user_return_msr_cpu_online(void)
{
@@ -355,8 +411,8 @@ static void kvm_user_return_msr_cpu_online(void)
u64 value;
int i;
- for (i = 0; i < user_return_msrs_global.nr; ++i) {
- rdmsrl_safe(user_return_msrs_global.msrs[i], &value);
+ for (i = 0; i < kvm_nr_uret_msrs; ++i) {
+ rdmsrl_safe(kvm_uret_msrs_list[i], &value);
msrs->values[i].host = value;
msrs->values[i].curr = value;
}
@@ -371,7 +427,7 @@ int kvm_set_user_return_msr(unsigned slot, u64 value, u64 mask)
value = (value & mask) | (msrs->values[slot].host & ~mask);
if (value == msrs->values[slot].curr)
return 0;
- err = wrmsrl_safe(user_return_msrs_global.msrs[slot], value);
+ err = wrmsrl_safe(kvm_uret_msrs_list[slot], value);
if (err)
return 1;
@@ -751,13 +807,6 @@ int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
}
EXPORT_SYMBOL_GPL(kvm_read_guest_page_mmu);
-static int kvm_read_nested_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn,
- void *data, int offset, int len, u32 access)
-{
- return kvm_read_guest_page_mmu(vcpu, vcpu->arch.walk_mmu, gfn,
- data, offset, len, access);
-}
-
static inline u64 pdptr_rsvd_bits(struct kvm_vcpu *vcpu)
{
return vcpu->arch.reserved_gpa_bits | rsvd_bits(5, 8) | rsvd_bits(1, 2);
@@ -792,6 +841,7 @@ int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3)
memcpy(mmu->pdptrs, pdpte, sizeof(mmu->pdptrs));
kvm_register_mark_dirty(vcpu, VCPU_EXREG_PDPTR);
+ vcpu->arch.pdptrs_from_userspace = false;
out:
@@ -799,40 +849,14 @@ out:
}
EXPORT_SYMBOL_GPL(load_pdptrs);
-bool pdptrs_changed(struct kvm_vcpu *vcpu)
-{
- u64 pdpte[ARRAY_SIZE(vcpu->arch.walk_mmu->pdptrs)];
- int offset;
- gfn_t gfn;
- int r;
-
- if (!is_pae_paging(vcpu))
- return false;
-
- if (!kvm_register_is_available(vcpu, VCPU_EXREG_PDPTR))
- return true;
-
- gfn = (kvm_read_cr3(vcpu) & 0xffffffe0ul) >> PAGE_SHIFT;
- offset = (kvm_read_cr3(vcpu) & 0xffffffe0ul) & (PAGE_SIZE - 1);
- r = kvm_read_nested_guest_page(vcpu, gfn, pdpte, offset, sizeof(pdpte),
- PFERR_USER_MASK | PFERR_WRITE_MASK);
- if (r < 0)
- return true;
-
- return memcmp(pdpte, vcpu->arch.walk_mmu->pdptrs, sizeof(pdpte)) != 0;
-}
-EXPORT_SYMBOL_GPL(pdptrs_changed);
-
void kvm_post_set_cr0(struct kvm_vcpu *vcpu, unsigned long old_cr0, unsigned long cr0)
{
- unsigned long update_bits = X86_CR0_PG | X86_CR0_WP;
-
if ((cr0 ^ old_cr0) & X86_CR0_PG) {
kvm_clear_async_pf_completion_queue(vcpu);
kvm_async_pf_hash_reset(vcpu);
}
- if ((cr0 ^ old_cr0) & update_bits)
+ if ((cr0 ^ old_cr0) & KVM_MMU_CR0_ROLE_BITS)
kvm_mmu_reset_context(vcpu);
if (((cr0 ^ old_cr0) & X86_CR0_CD) &&
@@ -1011,10 +1035,7 @@ EXPORT_SYMBOL_GPL(kvm_is_valid_cr4);
void kvm_post_set_cr4(struct kvm_vcpu *vcpu, unsigned long old_cr4, unsigned long cr4)
{
- unsigned long mmu_role_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE |
- X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_PKE;
-
- if (((cr4 ^ old_cr4) & mmu_role_bits) ||
+ if (((cr4 ^ old_cr4) & KVM_MMU_CR4_ROLE_BITS) ||
(!(cr4 & X86_CR4_PCIDE) && (old_cr4 & X86_CR4_PCIDE)))
kvm_mmu_reset_context(vcpu);
}
@@ -1057,25 +1078,46 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
}
EXPORT_SYMBOL_GPL(kvm_set_cr4);
+static void kvm_invalidate_pcid(struct kvm_vcpu *vcpu, unsigned long pcid)
+{
+ struct kvm_mmu *mmu = vcpu->arch.mmu;
+ unsigned long roots_to_free = 0;
+ int i;
+
+ /*
+ * If neither the current CR3 nor any of the prev_roots use the given
+ * PCID, then nothing needs to be done here because a resync will
+ * happen anyway before switching to any other CR3.
+ */
+ if (kvm_get_active_pcid(vcpu) == pcid) {
+ kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
+ kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
+ }
+
+ for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
+ if (kvm_get_pcid(vcpu, mmu->prev_roots[i].pgd) == pcid)
+ roots_to_free |= KVM_MMU_ROOT_PREVIOUS(i);
+
+ kvm_mmu_free_roots(vcpu, mmu, roots_to_free);
+}
+
int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
{
bool skip_tlb_flush = false;
+ unsigned long pcid = 0;
#ifdef CONFIG_X86_64
bool pcid_enabled = kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE);
if (pcid_enabled) {
skip_tlb_flush = cr3 & X86_CR3_PCID_NOFLUSH;
cr3 &= ~X86_CR3_PCID_NOFLUSH;
+ pcid = cr3 & X86_CR3_PCID_MASK;
}
#endif
- if (cr3 == kvm_read_cr3(vcpu) && !pdptrs_changed(vcpu)) {
- if (!skip_tlb_flush) {
- kvm_mmu_sync_roots(vcpu);
- kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
- }
- return 0;
- }
+ /* PDPTRs are always reloaded for PAE paging. */
+ if (cr3 == kvm_read_cr3(vcpu) && !is_pae_paging(vcpu))
+ goto handle_tlb_flush;
/*
* Do not condition the GPA check on long mode, this helper is used to
@@ -1088,10 +1130,23 @@ int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
if (is_pae_paging(vcpu) && !load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3))
return 1;
- kvm_mmu_new_pgd(vcpu, cr3, skip_tlb_flush, skip_tlb_flush);
+ if (cr3 != kvm_read_cr3(vcpu))
+ kvm_mmu_new_pgd(vcpu, cr3);
+
vcpu->arch.cr3 = cr3;
kvm_register_mark_available(vcpu, VCPU_EXREG_CR3);
+handle_tlb_flush:
+ /*
+ * A load of CR3 that flushes the TLB flushes only the current PCID,
+ * even if PCID is disabled, in which case PCID=0 is flushed. It's a
+ * moot point in the end because _disabling_ PCID will flush all PCIDs,
+ * and it's impossible to use a non-zero PCID when PCID is disabled,
+ * i.e. only PCID=0 can be relevant.
+ */
+ if (!skip_tlb_flush)
+ kvm_invalidate_pcid(vcpu, pcid);
+
return 0;
}
EXPORT_SYMBOL_GPL(kvm_set_cr3);
@@ -1149,6 +1204,9 @@ static u64 kvm_dr6_fixed(struct kvm_vcpu *vcpu)
if (!guest_cpuid_has(vcpu, X86_FEATURE_RTM))
fixed |= DR6_RTM;
+
+ if (!guest_cpuid_has(vcpu, X86_FEATURE_BUS_LOCK_DETECT))
+ fixed |= DR6_BUS_LOCK;
return fixed;
}
@@ -1615,6 +1673,30 @@ static int __kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data,
* invokes 64-bit SYSENTER.
*/
data = get_canonical(data, vcpu_virt_addr_bits(vcpu));
+ break;
+ case MSR_TSC_AUX:
+ if (!kvm_is_supported_user_return_msr(MSR_TSC_AUX))
+ return 1;
+
+ if (!host_initiated &&
+ !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP) &&
+ !guest_cpuid_has(vcpu, X86_FEATURE_RDPID))
+ return 1;
+
+ /*
+ * Per Intel's SDM, bits 63:32 are reserved, but AMD's APM has
+ * incomplete and conflicting architectural behavior. Current
+ * AMD CPUs completely ignore bits 63:32, i.e. they aren't
+ * reserved and always read as zeros. Enforce Intel's reserved
+ * bits check if and only if the guest CPU is Intel, and clear
+ * the bits in all other cases. This ensures cross-vendor
+ * migration will provide consistent behavior for the guest.
+ */
+ if (guest_cpuid_is_intel(vcpu) && (data >> 32) != 0)
+ return 1;
+
+ data = (u32)data;
+ break;
}
msr.data = data;
@@ -1651,6 +1733,18 @@ int __kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data,
if (!host_initiated && !kvm_msr_allowed(vcpu, index, KVM_MSR_FILTER_READ))
return KVM_MSR_RET_FILTERED;
+ switch (index) {
+ case MSR_TSC_AUX:
+ if (!kvm_is_supported_user_return_msr(MSR_TSC_AUX))
+ return 1;
+
+ if (!host_initiated &&
+ !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP) &&
+ !guest_cpuid_has(vcpu, X86_FEATURE_RDPID))
+ return 1;
+ break;
+ }
+
msr.index = index;
msr.host_initiated = host_initiated;
@@ -2113,13 +2207,15 @@ static u32 adjust_tsc_khz(u32 khz, s32 ppm)
return v;
}
+static void kvm_vcpu_write_tsc_multiplier(struct kvm_vcpu *vcpu, u64 l1_multiplier);
+
static int set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale)
{
u64 ratio;
/* Guest TSC same frequency as host TSC? */
if (!scale) {
- vcpu->arch.tsc_scaling_ratio = kvm_default_tsc_scaling_ratio;
+ kvm_vcpu_write_tsc_multiplier(vcpu, kvm_default_tsc_scaling_ratio);
return 0;
}
@@ -2145,7 +2241,7 @@ static int set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale)
return -1;
}
- vcpu->arch.tsc_scaling_ratio = ratio;
+ kvm_vcpu_write_tsc_multiplier(vcpu, ratio);
return 0;
}
@@ -2157,7 +2253,7 @@ static int kvm_set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz)
/* tsc_khz can be zero if TSC calibration fails */
if (user_tsc_khz == 0) {
/* set tsc_scaling_ratio to a safe value */
- vcpu->arch.tsc_scaling_ratio = kvm_default_tsc_scaling_ratio;
+ kvm_vcpu_write_tsc_multiplier(vcpu, kvm_default_tsc_scaling_ratio);
return -1;
}
@@ -2239,10 +2335,9 @@ static inline u64 __scale_tsc(u64 ratio, u64 tsc)
return mul_u64_u64_shr(tsc, ratio, kvm_tsc_scaling_ratio_frac_bits);
}
-u64 kvm_scale_tsc(struct kvm_vcpu *vcpu, u64 tsc)
+u64 kvm_scale_tsc(struct kvm_vcpu *vcpu, u64 tsc, u64 ratio)
{
u64 _tsc = tsc;
- u64 ratio = vcpu->arch.tsc_scaling_ratio;
if (ratio != kvm_default_tsc_scaling_ratio)
_tsc = __scale_tsc(ratio, tsc);
@@ -2251,25 +2346,86 @@ u64 kvm_scale_tsc(struct kvm_vcpu *vcpu, u64 tsc)
}
EXPORT_SYMBOL_GPL(kvm_scale_tsc);
-static u64 kvm_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc)
+static u64 kvm_compute_l1_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc)
{
u64 tsc;
- tsc = kvm_scale_tsc(vcpu, rdtsc());
+ tsc = kvm_scale_tsc(vcpu, rdtsc(), vcpu->arch.l1_tsc_scaling_ratio);
return target_tsc - tsc;
}
u64 kvm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc)
{
- return vcpu->arch.l1_tsc_offset + kvm_scale_tsc(vcpu, host_tsc);
+ return vcpu->arch.l1_tsc_offset +
+ kvm_scale_tsc(vcpu, host_tsc, vcpu->arch.l1_tsc_scaling_ratio);
}
EXPORT_SYMBOL_GPL(kvm_read_l1_tsc);
-static void kvm_vcpu_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
+u64 kvm_calc_nested_tsc_offset(u64 l1_offset, u64 l2_offset, u64 l2_multiplier)
+{
+ u64 nested_offset;
+
+ if (l2_multiplier == kvm_default_tsc_scaling_ratio)
+ nested_offset = l1_offset;
+ else
+ nested_offset = mul_s64_u64_shr((s64) l1_offset, l2_multiplier,
+ kvm_tsc_scaling_ratio_frac_bits);
+
+ nested_offset += l2_offset;
+ return nested_offset;
+}
+EXPORT_SYMBOL_GPL(kvm_calc_nested_tsc_offset);
+
+u64 kvm_calc_nested_tsc_multiplier(u64 l1_multiplier, u64 l2_multiplier)
{
- vcpu->arch.l1_tsc_offset = offset;
- vcpu->arch.tsc_offset = static_call(kvm_x86_write_l1_tsc_offset)(vcpu, offset);
+ if (l2_multiplier != kvm_default_tsc_scaling_ratio)
+ return mul_u64_u64_shr(l1_multiplier, l2_multiplier,
+ kvm_tsc_scaling_ratio_frac_bits);
+
+ return l1_multiplier;
+}
+EXPORT_SYMBOL_GPL(kvm_calc_nested_tsc_multiplier);
+
+static void kvm_vcpu_write_tsc_offset(struct kvm_vcpu *vcpu, u64 l1_offset)
+{
+ trace_kvm_write_tsc_offset(vcpu->vcpu_id,
+ vcpu->arch.l1_tsc_offset,
+ l1_offset);
+
+ vcpu->arch.l1_tsc_offset = l1_offset;
+
+ /*
+ * If we are here because L1 chose not to trap WRMSR to TSC then
+ * according to the spec this should set L1's TSC (as opposed to
+ * setting L1's offset for L2).
+ */
+ if (is_guest_mode(vcpu))
+ vcpu->arch.tsc_offset = kvm_calc_nested_tsc_offset(
+ l1_offset,
+ static_call(kvm_x86_get_l2_tsc_offset)(vcpu),
+ static_call(kvm_x86_get_l2_tsc_multiplier)(vcpu));
+ else
+ vcpu->arch.tsc_offset = l1_offset;
+
+ static_call(kvm_x86_write_tsc_offset)(vcpu, vcpu->arch.tsc_offset);
+}
+
+static void kvm_vcpu_write_tsc_multiplier(struct kvm_vcpu *vcpu, u64 l1_multiplier)
+{
+ vcpu->arch.l1_tsc_scaling_ratio = l1_multiplier;
+
+ /* Userspace is changing the multiplier while L2 is active */
+ if (is_guest_mode(vcpu))
+ vcpu->arch.tsc_scaling_ratio = kvm_calc_nested_tsc_multiplier(
+ l1_multiplier,
+ static_call(kvm_x86_get_l2_tsc_multiplier)(vcpu));
+ else
+ vcpu->arch.tsc_scaling_ratio = l1_multiplier;
+
+ if (kvm_has_tsc_control)
+ static_call(kvm_x86_write_tsc_multiplier)(
+ vcpu, vcpu->arch.tsc_scaling_ratio);
}
static inline bool kvm_check_tsc_unstable(void)
@@ -2295,7 +2451,7 @@ static void kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 data)
bool synchronizing = false;
raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags);
- offset = kvm_compute_tsc_offset(vcpu, data);
+ offset = kvm_compute_l1_tsc_offset(vcpu, data);
ns = get_kvmclock_base_ns();
elapsed = ns - kvm->arch.last_tsc_nsec;
@@ -2334,7 +2490,7 @@ static void kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 data)
} else {
u64 delta = nsec_to_cycles(vcpu, elapsed);
data += delta;
- offset = kvm_compute_tsc_offset(vcpu, data);
+ offset = kvm_compute_l1_tsc_offset(vcpu, data);
}
matched = true;
already_matched = (vcpu->arch.this_tsc_generation == kvm->arch.cur_tsc_generation);
@@ -2393,9 +2549,10 @@ static inline void adjust_tsc_offset_guest(struct kvm_vcpu *vcpu,
static inline void adjust_tsc_offset_host(struct kvm_vcpu *vcpu, s64 adjustment)
{
- if (vcpu->arch.tsc_scaling_ratio != kvm_default_tsc_scaling_ratio)
+ if (vcpu->arch.l1_tsc_scaling_ratio != kvm_default_tsc_scaling_ratio)
WARN_ON(adjustment < 0);
- adjustment = kvm_scale_tsc(vcpu, (u64) adjustment);
+ adjustment = kvm_scale_tsc(vcpu, (u64) adjustment,
+ vcpu->arch.l1_tsc_scaling_ratio);
adjust_tsc_offset_guest(vcpu, adjustment);
}
@@ -2778,7 +2935,8 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
/* With all the info we got, fill in the values */
if (kvm_has_tsc_control)
- tgt_tsc_khz = kvm_scale_tsc(v, tgt_tsc_khz);
+ tgt_tsc_khz = kvm_scale_tsc(v, tgt_tsc_khz,
+ v->arch.l1_tsc_scaling_ratio);
if (unlikely(vcpu->hw_tsc_khz != tgt_tsc_khz)) {
kvm_get_time_scale(NSEC_PER_SEC, tgt_tsc_khz * 1000LL,
@@ -3006,6 +3164,19 @@ static void kvm_vcpu_flush_tlb_all(struct kvm_vcpu *vcpu)
static void kvm_vcpu_flush_tlb_guest(struct kvm_vcpu *vcpu)
{
++vcpu->stat.tlb_flush;
+
+ if (!tdp_enabled) {
+ /*
+ * A TLB flush on behalf of the guest is equivalent to
+ * INVPCID(all), toggling CR4.PGE, etc., which requires
+ * a forced sync of the shadow page tables. Unload the
+ * entire MMU here and the subsequent load will sync the
+ * shadow page tables, and also flush the TLB.
+ */
+ kvm_mmu_unload(vcpu);
+ return;
+ }
+
static_call(kvm_x86_tlb_flush_guest)(vcpu);
}
@@ -3035,10 +3206,14 @@ static void record_steal_time(struct kvm_vcpu *vcpu)
* expensive IPIs.
*/
if (guest_pv_has(vcpu, KVM_FEATURE_PV_TLB_FLUSH)) {
+ u8 st_preempted = xchg(&st->preempted, 0);
+
trace_kvm_pv_tlb_flush(vcpu->vcpu_id,
- st->preempted & KVM_VCPU_FLUSH_TLB);
- if (xchg(&st->preempted, 0) & KVM_VCPU_FLUSH_TLB)
+ st_preempted & KVM_VCPU_FLUSH_TLB);
+ if (st_preempted & KVM_VCPU_FLUSH_TLB)
kvm_vcpu_flush_tlb_guest(vcpu);
+ } else {
+ st->preempted = 0;
}
vcpu->arch.st.preempted = 0;
@@ -3167,7 +3342,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
if (msr_info->host_initiated) {
kvm_synchronize_tsc(vcpu, data);
} else {
- u64 adj = kvm_compute_tsc_offset(vcpu, data) - vcpu->arch.l1_tsc_offset;
+ u64 adj = kvm_compute_l1_tsc_offset(vcpu, data) - vcpu->arch.l1_tsc_offset;
adjust_tsc_offset_guest(vcpu, adj);
vcpu->arch.ia32_tsc_adjust_msr += adj;
}
@@ -3402,7 +3577,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
case MSR_IA32_LASTBRANCHTOIP:
case MSR_IA32_LASTINTFROMIP:
case MSR_IA32_LASTINTTOIP:
- case MSR_K8_SYSCFG:
+ case MSR_AMD64_SYSCFG:
case MSR_K8_TSEG_ADDR:
case MSR_K8_TSEG_MASK:
case MSR_VM_HSAVE_PA:
@@ -3469,10 +3644,17 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
* return L1's TSC value to ensure backwards-compatible
* behavior for migration.
*/
- u64 tsc_offset = msr_info->host_initiated ? vcpu->arch.l1_tsc_offset :
- vcpu->arch.tsc_offset;
+ u64 offset, ratio;
- msr_info->data = kvm_scale_tsc(vcpu, rdtsc()) + tsc_offset;
+ if (msr_info->host_initiated) {
+ offset = vcpu->arch.l1_tsc_offset;
+ ratio = vcpu->arch.l1_tsc_scaling_ratio;
+ } else {
+ offset = vcpu->arch.tsc_offset;
+ ratio = vcpu->arch.tsc_scaling_ratio;
+ }
+
+ msr_info->data = kvm_scale_tsc(vcpu, rdtsc(), ratio) + offset;
break;
}
case MSR_MTRRcap:
@@ -3796,6 +3978,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_HYPERV_TLBFLUSH:
case KVM_CAP_HYPERV_SEND_IPI:
case KVM_CAP_HYPERV_CPUID:
+ case KVM_CAP_HYPERV_ENFORCE_CPUID:
case KVM_CAP_SYS_HYPERV_CPUID:
case KVM_CAP_PCI_SEGMENT:
case KVM_CAP_DEBUGREGS:
@@ -3826,8 +4009,13 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_SGX_ATTRIBUTE:
#endif
case KVM_CAP_VM_COPY_ENC_CONTEXT_FROM:
+ case KVM_CAP_SREGS2:
+ case KVM_CAP_EXIT_ON_EMULATION_FAILURE:
r = 1;
break;
+ case KVM_CAP_EXIT_HYPERCALL:
+ r = KVM_EXIT_HYPERCALL_VALID_MASK;
+ break;
case KVM_CAP_SET_GUEST_DEBUG2:
return KVM_GUESTDBG_VALID_MASK;
#ifdef CONFIG_KVM_XEN
@@ -4055,7 +4243,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
mark_tsc_unstable("KVM discovered backwards TSC");
if (kvm_check_tsc_unstable()) {
- u64 offset = kvm_compute_tsc_offset(vcpu,
+ u64 offset = kvm_compute_l1_tsc_offset(vcpu,
vcpu->arch.last_guest_tsc);
kvm_vcpu_write_tsc_offset(vcpu, offset);
vcpu->arch.tsc_catchup = 1;
@@ -4374,7 +4562,7 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
memset(&events->reserved, 0, sizeof(events->reserved));
}
-static void kvm_smm_changed(struct kvm_vcpu *vcpu);
+static void kvm_smm_changed(struct kvm_vcpu *vcpu, bool entering_smm);
static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
struct kvm_vcpu_events *events)
@@ -4434,13 +4622,8 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
vcpu->arch.apic->sipi_vector = events->sipi_vector;
if (events->flags & KVM_VCPUEVENT_VALID_SMM) {
- if (!!(vcpu->arch.hflags & HF_SMM_MASK) != events->smi.smm) {
- if (events->smi.smm)
- vcpu->arch.hflags |= HF_SMM_MASK;
- else
- vcpu->arch.hflags &= ~HF_SMM_MASK;
- kvm_smm_changed(vcpu);
- }
+ if (!!(vcpu->arch.hflags & HF_SMM_MASK) != events->smi.smm)
+ kvm_smm_changed(vcpu, events->smi.smm);
vcpu->arch.smi_pending = events->smi.pending;
@@ -4724,6 +4907,9 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
return static_call(kvm_x86_enable_direct_tlbflush)(vcpu);
+ case KVM_CAP_HYPERV_ENFORCE_CPUID:
+ return kvm_hv_set_enforce_cpuid(vcpu, cap->args[0]);
+
case KVM_CAP_ENFORCE_PV_FEATURE_CPUID:
vcpu->arch.pv_cpuid.enforce = cap->args[0];
if (vcpu->arch.pv_cpuid.enforce)
@@ -4742,6 +4928,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
void __user *argp = (void __user *)arg;
int r;
union {
+ struct kvm_sregs2 *sregs2;
struct kvm_lapic_state *lapic;
struct kvm_xsave *xsave;
struct kvm_xcrs *xcrs;
@@ -5114,6 +5301,28 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
break;
}
#endif
+ case KVM_GET_SREGS2: {
+ u.sregs2 = kzalloc(sizeof(struct kvm_sregs2), GFP_KERNEL);
+ r = -ENOMEM;
+ if (!u.sregs2)
+ goto out;
+ __get_sregs2(vcpu, u.sregs2);
+ r = -EFAULT;
+ if (copy_to_user(argp, u.sregs2, sizeof(struct kvm_sregs2)))
+ goto out;
+ r = 0;
+ break;
+ }
+ case KVM_SET_SREGS2: {
+ u.sregs2 = memdup_user(argp, sizeof(struct kvm_sregs2));
+ if (IS_ERR(u.sregs2)) {
+ r = PTR_ERR(u.sregs2);
+ u.sregs2 = NULL;
+ goto out;
+ }
+ r = __set_sregs2(vcpu, u.sregs2);
+ break;
+ }
default:
r = -EINVAL;
}
@@ -5433,6 +5642,21 @@ split_irqchip_unlock:
if (kvm_x86_ops.vm_copy_enc_context_from)
r = kvm_x86_ops.vm_copy_enc_context_from(kvm, cap->args[0]);
return r;
+ case KVM_CAP_EXIT_HYPERCALL:
+ if (cap->args[0] & ~KVM_EXIT_HYPERCALL_VALID_MASK) {
+ r = -EINVAL;
+ break;
+ }
+ kvm->arch.hypercall_exit_enabled = cap->args[0];
+ r = 0;
+ break;
+ case KVM_CAP_EXIT_ON_EMULATION_FAILURE:
+ r = -EINVAL;
+ if (cap->args[0] & ~1)
+ break;
+ kvm->arch.exit_on_emulation_error = cap->args[0];
+ r = 0;
+ break;
default:
r = -EINVAL;
break;
@@ -5468,14 +5692,18 @@ static void kvm_free_msr_filter(struct kvm_x86_msr_filter *msr_filter)
static int kvm_add_msr_filter(struct kvm_x86_msr_filter *msr_filter,
struct kvm_msr_filter_range *user_range)
{
- struct msr_bitmap_range range;
unsigned long *bitmap = NULL;
size_t bitmap_size;
- int r;
if (!user_range->nmsrs)
return 0;
+ if (user_range->flags & ~(KVM_MSR_FILTER_READ | KVM_MSR_FILTER_WRITE))
+ return -EINVAL;
+
+ if (!user_range->flags)
+ return -EINVAL;
+
bitmap_size = BITS_TO_LONGS(user_range->nmsrs) * sizeof(long);
if (!bitmap_size || bitmap_size > KVM_MSR_FILTER_MAX_BITMAP_SIZE)
return -EINVAL;
@@ -5484,31 +5712,15 @@ static int kvm_add_msr_filter(struct kvm_x86_msr_filter *msr_filter,
if (IS_ERR(bitmap))
return PTR_ERR(bitmap);
- range = (struct msr_bitmap_range) {
+ msr_filter->ranges[msr_filter->count] = (struct msr_bitmap_range) {
.flags = user_range->flags,
.base = user_range->base,
.nmsrs = user_range->nmsrs,
.bitmap = bitmap,
};
- if (range.flags & ~(KVM_MSR_FILTER_READ | KVM_MSR_FILTER_WRITE)) {
- r = -EINVAL;
- goto err;
- }
-
- if (!range.flags) {
- r = -EINVAL;
- goto err;
- }
-
- /* Everything ok, add this range identifier. */
- msr_filter->ranges[msr_filter->count] = range;
msr_filter->count++;
-
return 0;
-err:
- kfree(bitmap);
- return r;
}
static int kvm_vm_ioctl_set_msr_filter(struct kvm *kvm, void __user *argp)
@@ -5559,6 +5771,41 @@ static int kvm_vm_ioctl_set_msr_filter(struct kvm *kvm, void __user *argp)
return 0;
}
+#ifdef CONFIG_HAVE_KVM_PM_NOTIFIER
+static int kvm_arch_suspend_notifier(struct kvm *kvm)
+{
+ struct kvm_vcpu *vcpu;
+ int i, ret = 0;
+
+ mutex_lock(&kvm->lock);
+ kvm_for_each_vcpu(i, vcpu, kvm) {
+ if (!vcpu->arch.pv_time_enabled)
+ continue;
+
+ ret = kvm_set_guest_paused(vcpu);
+ if (ret) {
+ kvm_err("Failed to pause guest VCPU%d: %d\n",
+ vcpu->vcpu_id, ret);
+ break;
+ }
+ }
+ mutex_unlock(&kvm->lock);
+
+ return ret ? NOTIFY_BAD : NOTIFY_DONE;
+}
+
+int kvm_arch_pm_notifier(struct kvm *kvm, unsigned long state)
+{
+ switch (state) {
+ case PM_HIBERNATION_PREPARE:
+ case PM_SUSPEND_PREPARE:
+ return kvm_arch_suspend_notifier(kvm);
+ }
+
+ return NOTIFY_DONE;
+}
+#endif /* CONFIG_HAVE_KVM_PM_NOTIFIER */
+
long kvm_arch_vm_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg)
{
@@ -5937,7 +6184,8 @@ static void kvm_init_msr_list(void)
continue;
break;
case MSR_TSC_AUX:
- if (!kvm_cpu_cap_has(X86_FEATURE_RDTSCP))
+ if (!kvm_cpu_cap_has(X86_FEATURE_RDTSCP) &&
+ !kvm_cpu_cap_has(X86_FEATURE_RDPID))
continue;
break;
case MSR_IA32_UMWAIT_CONTROL:
@@ -7032,20 +7280,22 @@ static unsigned emulator_get_hflags(struct x86_emulate_ctxt *ctxt)
return emul_to_vcpu(ctxt)->arch.hflags;
}
-static void emulator_set_hflags(struct x86_emulate_ctxt *ctxt, unsigned emul_flags)
+static void emulator_exiting_smm(struct x86_emulate_ctxt *ctxt)
{
- emul_to_vcpu(ctxt)->arch.hflags = emul_flags;
+ struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
+
+ kvm_smm_changed(vcpu, false);
}
-static int emulator_pre_leave_smm(struct x86_emulate_ctxt *ctxt,
+static int emulator_leave_smm(struct x86_emulate_ctxt *ctxt,
const char *smstate)
{
- return static_call(kvm_x86_pre_leave_smm)(emul_to_vcpu(ctxt), smstate);
+ return static_call(kvm_x86_leave_smm)(emul_to_vcpu(ctxt), smstate);
}
-static void emulator_post_leave_smm(struct x86_emulate_ctxt *ctxt)
+static void emulator_triple_fault(struct x86_emulate_ctxt *ctxt)
{
- kvm_smm_changed(emul_to_vcpu(ctxt));
+ kvm_make_request(KVM_REQ_TRIPLE_FAULT, emul_to_vcpu(ctxt));
}
static int emulator_set_xcr(struct x86_emulate_ctxt *ctxt, u32 index, u64 xcr)
@@ -7094,9 +7344,9 @@ static const struct x86_emulate_ops emulate_ops = {
.guest_has_fxsr = emulator_guest_has_fxsr,
.set_nmi_mask = emulator_set_nmi_mask,
.get_hflags = emulator_get_hflags,
- .set_hflags = emulator_set_hflags,
- .pre_leave_smm = emulator_pre_leave_smm,
- .post_leave_smm = emulator_post_leave_smm,
+ .exiting_smm = emulator_exiting_smm,
+ .leave_smm = emulator_leave_smm,
+ .triple_fault = emulator_triple_fault,
.set_xcr = emulator_set_xcr,
};
@@ -7171,6 +7421,11 @@ static void init_emulate_ctxt(struct kvm_vcpu *vcpu)
BUILD_BUG_ON(HF_SMM_MASK != X86EMUL_SMM_MASK);
BUILD_BUG_ON(HF_SMM_INSIDE_NMI_MASK != X86EMUL_SMM_INSIDE_NMI_MASK);
+ ctxt->interruptibility = 0;
+ ctxt->have_exception = false;
+ ctxt->exception.vector = -1;
+ ctxt->perm_ok = false;
+
init_decode_cache(ctxt);
vcpu->arch.emulate_regs_need_sync_from_vcpu = false;
}
@@ -7197,8 +7452,33 @@ void kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip)
}
EXPORT_SYMBOL_GPL(kvm_inject_realmode_interrupt);
+static void prepare_emulation_failure_exit(struct kvm_vcpu *vcpu)
+{
+ struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt;
+ u32 insn_size = ctxt->fetch.end - ctxt->fetch.data;
+ struct kvm_run *run = vcpu->run;
+
+ run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ run->emulation_failure.suberror = KVM_INTERNAL_ERROR_EMULATION;
+ run->emulation_failure.ndata = 0;
+ run->emulation_failure.flags = 0;
+
+ if (insn_size) {
+ run->emulation_failure.ndata = 3;
+ run->emulation_failure.flags |=
+ KVM_INTERNAL_ERROR_EMULATION_FLAG_INSTRUCTION_BYTES;
+ run->emulation_failure.insn_size = insn_size;
+ memset(run->emulation_failure.insn_bytes, 0x90,
+ sizeof(run->emulation_failure.insn_bytes));
+ memcpy(run->emulation_failure.insn_bytes,
+ ctxt->fetch.data, insn_size);
+ }
+}
+
static int handle_emulation_failure(struct kvm_vcpu *vcpu, int emulation_type)
{
+ struct kvm *kvm = vcpu->kvm;
+
++vcpu->stat.insn_emulation_fail;
trace_kvm_emulate_insn_failed(vcpu);
@@ -7207,10 +7487,9 @@ static int handle_emulation_failure(struct kvm_vcpu *vcpu, int emulation_type)
return 1;
}
- if (emulation_type & EMULTYPE_SKIP) {
- vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
- vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
- vcpu->run->internal.ndata = 0;
+ if (kvm->arch.exit_on_emulation_error ||
+ (emulation_type & EMULTYPE_SKIP)) {
+ prepare_emulation_failure_exit(vcpu);
return 0;
}
@@ -7352,11 +7631,14 @@ static bool retry_instruction(struct x86_emulate_ctxt *ctxt,
static int complete_emulated_mmio(struct kvm_vcpu *vcpu);
static int complete_emulated_pio(struct kvm_vcpu *vcpu);
-static void kvm_smm_changed(struct kvm_vcpu *vcpu)
+static void kvm_smm_changed(struct kvm_vcpu *vcpu, bool entering_smm)
{
- if (!(vcpu->arch.hflags & HF_SMM_MASK)) {
- /* This is a good place to trace that we are exiting SMM. */
- trace_kvm_enter_smm(vcpu->vcpu_id, vcpu->arch.smbase, false);
+ trace_kvm_smm_transition(vcpu->vcpu_id, vcpu->arch.smbase, entering_smm);
+
+ if (entering_smm) {
+ vcpu->arch.hflags |= HF_SMM_MASK;
+ } else {
+ vcpu->arch.hflags &= ~(HF_SMM_MASK | HF_SMM_INSIDE_NMI_MASK);
/* Process a latched INIT or SMI, if any. */
kvm_make_request(KVM_REQ_EVENT, vcpu);
@@ -7506,14 +7788,7 @@ int x86_decode_emulated_instruction(struct kvm_vcpu *vcpu, int emulation_type,
kvm_vcpu_check_breakpoint(vcpu, &r))
return r;
- ctxt->interruptibility = 0;
- ctxt->have_exception = false;
- ctxt->exception.vector = -1;
- ctxt->perm_ok = false;
-
- ctxt->ud = emulation_type & EMULTYPE_TRAP_UD;
-
- r = x86_decode_insn(ctxt, insn, insn_len);
+ r = x86_decode_insn(ctxt, insn, insn_len, emulation_type);
trace_kvm_emulate_insn_start(vcpu);
++vcpu->stat.insn_emulation;
@@ -8040,6 +8315,18 @@ static void pvclock_gtod_update_fn(struct work_struct *work)
static DECLARE_WORK(pvclock_gtod_work, pvclock_gtod_update_fn);
/*
+ * Indirection to move queue_work() out of the tk_core.seq write held
+ * region to prevent possible deadlocks against time accessors which
+ * are invoked with work related locks held.
+ */
+static void pvclock_irq_work_fn(struct irq_work *w)
+{
+ queue_work(system_long_wq, &pvclock_gtod_work);
+}
+
+static DEFINE_IRQ_WORK(pvclock_irq_work, pvclock_irq_work_fn);
+
+/*
* Notification about pvclock gtod data update.
*/
static int pvclock_gtod_notify(struct notifier_block *nb, unsigned long unused,
@@ -8050,13 +8337,14 @@ static int pvclock_gtod_notify(struct notifier_block *nb, unsigned long unused,
update_pvclock_gtod(tk);
- /* disable master clock if host does not trust, or does not
- * use, TSC based clocksource.
+ /*
+ * Disable master clock if host does not trust, or does not use,
+ * TSC based clocksource. Delegate queue_work() to irq_work as
+ * this is invoked with tk_core.seq write held.
*/
if (!gtod_is_based_on_tsc(gtod->clock.vclock_mode) &&
atomic_read(&kvm_guest_has_master_clock) != 0)
- queue_work(system_long_wq, &pvclock_gtod_work);
-
+ irq_work_queue(&pvclock_irq_work);
return 0;
}
@@ -8118,6 +8406,7 @@ int kvm_arch_init(void *opaque)
printk(KERN_ERR "kvm: failed to allocate percpu kvm_user_return_msrs\n");
goto out_free_x86_emulator_cache;
}
+ kvm_nr_uret_msrs = 0;
r = kvm_mmu_module_init();
if (r)
@@ -8168,10 +8457,13 @@ void kvm_arch_exit(void)
cpuhp_remove_state_nocalls(CPUHP_AP_X86_KVM_CLK_ONLINE);
#ifdef CONFIG_X86_64
pvclock_gtod_unregister_notifier(&pvclock_gtod_notifier);
+ irq_work_sync(&pvclock_irq_work);
+ cancel_work_sync(&pvclock_gtod_work);
#endif
kvm_x86_ops.hardware_enable = NULL;
kvm_mmu_module_exit();
free_percpu(user_return_msrs);
+ kmem_cache_destroy(x86_emulator_cache);
kmem_cache_destroy(x86_fpu_cache);
#ifdef CONFIG_KVM_XEN
static_key_deferred_flush(&kvm_xen_enabled);
@@ -8271,16 +8563,15 @@ bool kvm_apicv_activated(struct kvm *kvm)
}
EXPORT_SYMBOL_GPL(kvm_apicv_activated);
-void kvm_apicv_init(struct kvm *kvm, bool enable)
+static void kvm_apicv_init(struct kvm *kvm)
{
- if (enable)
+ if (enable_apicv)
clear_bit(APICV_INHIBIT_REASON_DISABLE,
&kvm->arch.apicv_inhibit_reasons);
else
set_bit(APICV_INHIBIT_REASON_DISABLE,
&kvm->arch.apicv_inhibit_reasons);
}
-EXPORT_SYMBOL_GPL(kvm_apicv_init);
static void kvm_sched_yield(struct kvm_vcpu *vcpu, unsigned long dest_id)
{
@@ -8289,6 +8580,9 @@ static void kvm_sched_yield(struct kvm_vcpu *vcpu, unsigned long dest_id)
vcpu->stat.directed_yield_attempted++;
+ if (single_task_running())
+ goto no_yield;
+
rcu_read_lock();
map = rcu_dereference(vcpu->kvm->arch.apic_map);
@@ -8313,6 +8607,17 @@ no_yield:
return;
}
+static int complete_hypercall_exit(struct kvm_vcpu *vcpu)
+{
+ u64 ret = vcpu->run->hypercall.ret;
+
+ if (!is_64_bit_mode(vcpu))
+ ret = (u32)ret;
+ kvm_rax_write(vcpu, ret);
+ ++vcpu->stat.hypercalls;
+ return kvm_skip_emulated_instruction(vcpu);
+}
+
int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
{
unsigned long nr, a0, a1, a2, a3, ret;
@@ -8378,6 +8683,28 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
kvm_sched_yield(vcpu, a0);
ret = 0;
break;
+ case KVM_HC_MAP_GPA_RANGE: {
+ u64 gpa = a0, npages = a1, attrs = a2;
+
+ ret = -KVM_ENOSYS;
+ if (!(vcpu->kvm->arch.hypercall_exit_enabled & (1 << KVM_HC_MAP_GPA_RANGE)))
+ break;
+
+ if (!PAGE_ALIGNED(gpa) || !npages ||
+ gpa_to_gfn(gpa) + npages <= gpa_to_gfn(gpa)) {
+ ret = -KVM_EINVAL;
+ break;
+ }
+
+ vcpu->run->exit_reason = KVM_EXIT_HYPERCALL;
+ vcpu->run->hypercall.nr = KVM_HC_MAP_GPA_RANGE;
+ vcpu->run->hypercall.args[0] = gpa;
+ vcpu->run->hypercall.args[1] = npages;
+ vcpu->run->hypercall.args[2] = attrs;
+ vcpu->run->hypercall.longmode = op_64_bit;
+ vcpu->arch.complete_userspace_io = complete_hypercall_exit;
+ return 0;
+ }
default:
ret = -KVM_ENOSYS;
break;
@@ -8461,9 +8788,6 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu)
int kvm_check_nested_events(struct kvm_vcpu *vcpu)
{
- if (WARN_ON_ONCE(!is_guest_mode(vcpu)))
- return -EIO;
-
if (kvm_check_request(KVM_REQ_TRIPLE_FAULT, vcpu)) {
kvm_x86_ops.nested_ops->triple_fault(vcpu);
return 1;
@@ -8479,7 +8803,7 @@ static void kvm_inject_exception(struct kvm_vcpu *vcpu)
static_call(kvm_x86_queue_exception)(vcpu);
}
-static void inject_pending_event(struct kvm_vcpu *vcpu, bool *req_immediate_exit)
+static int inject_pending_event(struct kvm_vcpu *vcpu, bool *req_immediate_exit)
{
int r;
bool can_inject = true;
@@ -8526,7 +8850,7 @@ static void inject_pending_event(struct kvm_vcpu *vcpu, bool *req_immediate_exit
if (is_guest_mode(vcpu)) {
r = kvm_check_nested_events(vcpu);
if (r < 0)
- goto busy;
+ goto out;
}
/* try to inject new event if pending */
@@ -8568,7 +8892,7 @@ static void inject_pending_event(struct kvm_vcpu *vcpu, bool *req_immediate_exit
if (vcpu->arch.smi_pending) {
r = can_inject ? static_call(kvm_x86_smi_allowed)(vcpu, true) : -EBUSY;
if (r < 0)
- goto busy;
+ goto out;
if (r) {
vcpu->arch.smi_pending = false;
++vcpu->arch.smi_count;
@@ -8581,7 +8905,7 @@ static void inject_pending_event(struct kvm_vcpu *vcpu, bool *req_immediate_exit
if (vcpu->arch.nmi_pending) {
r = can_inject ? static_call(kvm_x86_nmi_allowed)(vcpu, true) : -EBUSY;
if (r < 0)
- goto busy;
+ goto out;
if (r) {
--vcpu->arch.nmi_pending;
vcpu->arch.nmi_injected = true;
@@ -8596,7 +8920,7 @@ static void inject_pending_event(struct kvm_vcpu *vcpu, bool *req_immediate_exit
if (kvm_cpu_has_injectable_intr(vcpu)) {
r = can_inject ? static_call(kvm_x86_interrupt_allowed)(vcpu, true) : -EBUSY;
if (r < 0)
- goto busy;
+ goto out;
if (r) {
kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu), false);
static_call(kvm_x86_set_irq)(vcpu);
@@ -8612,11 +8936,14 @@ static void inject_pending_event(struct kvm_vcpu *vcpu, bool *req_immediate_exit
*req_immediate_exit = true;
WARN_ON(vcpu->arch.exception.pending);
- return;
+ return 0;
-busy:
- *req_immediate_exit = true;
- return;
+out:
+ if (r == -EBUSY) {
+ *req_immediate_exit = true;
+ r = 0;
+ }
+ return r;
}
static void process_nmi(struct kvm_vcpu *vcpu)
@@ -8795,10 +9122,9 @@ static void enter_smm(struct kvm_vcpu *vcpu)
{
struct kvm_segment cs, ds;
struct desc_ptr dt;
+ unsigned long cr0;
char buf[512];
- u32 cr0;
- trace_kvm_enter_smm(vcpu->vcpu_id, vcpu->arch.smbase, true);
memset(buf, 0, 512);
#ifdef CONFIG_X86_64
if (guest_cpuid_has(vcpu, X86_FEATURE_LM))
@@ -8808,13 +9134,13 @@ static void enter_smm(struct kvm_vcpu *vcpu)
enter_smm_save_state_32(vcpu, buf);
/*
- * Give pre_enter_smm() a chance to make ISA-specific changes to the
- * vCPU state (e.g. leave guest mode) after we've saved the state into
- * the SMM state-save area.
+ * Give enter_smm() a chance to make ISA-specific changes to the vCPU
+ * state (e.g. leave guest mode) after we've saved the state into the
+ * SMM state-save area.
*/
- static_call(kvm_x86_pre_enter_smm)(vcpu, buf);
+ static_call(kvm_x86_enter_smm)(vcpu, buf);
- vcpu->arch.hflags |= HF_SMM_MASK;
+ kvm_smm_changed(vcpu, true);
kvm_vcpu_write_guest(vcpu, vcpu->arch.smbase + 0xfe00, buf, sizeof(buf));
if (static_call(kvm_x86_get_nmi_mask)(vcpu))
@@ -8903,6 +9229,15 @@ void kvm_vcpu_update_apicv(struct kvm_vcpu *vcpu)
vcpu->arch.apicv_active = kvm_apicv_activated(vcpu->kvm);
kvm_apic_update_apicv(vcpu);
static_call(kvm_x86_refresh_apicv_exec_ctrl)(vcpu);
+
+ /*
+ * When APICv gets disabled, we may still have injected interrupts
+ * pending. At the same time, KVM_REQ_EVENT may not be set as APICv was
+ * still active when the interrupt got accepted. Make sure
+ * inject_pending_event() is called to check for that.
+ */
+ if (!vcpu->arch.apicv_active)
+ kvm_make_request(KVM_REQ_EVENT, vcpu);
}
EXPORT_SYMBOL_GPL(kvm_vcpu_update_apicv);
@@ -9078,7 +9413,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
}
if (kvm_check_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu))
kvm_vcpu_flush_tlb_current(vcpu);
- if (kvm_check_request(KVM_REQ_HV_TLB_FLUSH, vcpu))
+ if (kvm_check_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu))
kvm_vcpu_flush_tlb_guest(vcpu);
if (kvm_check_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu)) {
@@ -9171,13 +9506,21 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
if (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win ||
kvm_xen_has_interrupt(vcpu)) {
++vcpu->stat.req_event;
- kvm_apic_accept_events(vcpu);
+ r = kvm_apic_accept_events(vcpu);
+ if (r < 0) {
+ r = 0;
+ goto out;
+ }
if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) {
r = 1;
goto out;
}
- inject_pending_event(vcpu, &req_immediate_exit);
+ r = inject_pending_event(vcpu, &req_immediate_exit);
+ if (r < 0) {
+ r = 0;
+ goto out;
+ }
if (req_int_win)
static_call(kvm_x86_enable_irq_window)(vcpu);
@@ -9379,7 +9722,8 @@ static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu)
return 1;
}
- kvm_apic_accept_events(vcpu);
+ if (kvm_apic_accept_events(vcpu) < 0)
+ return 0;
switch(vcpu->arch.mp_state) {
case KVM_MP_STATE_HALTED:
case KVM_MP_STATE_AP_RESET_HOLD:
@@ -9425,7 +9769,7 @@ static int vcpu_run(struct kvm_vcpu *vcpu)
if (r <= 0)
break;
- kvm_clear_request(KVM_REQ_PENDING_TIMER, vcpu);
+ kvm_clear_request(KVM_REQ_UNBLOCK, vcpu);
if (kvm_cpu_has_pending_timer(vcpu))
kvm_inject_pending_timer_irqs(vcpu);
@@ -9603,7 +9947,10 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
goto out;
}
kvm_vcpu_block(vcpu);
- kvm_apic_accept_events(vcpu);
+ if (kvm_apic_accept_events(vcpu) < 0) {
+ r = 0;
+ goto out;
+ }
kvm_clear_request(KVM_REQ_UNHALT, vcpu);
r = -EAGAIN;
if (signal_pending(current)) {
@@ -9752,7 +10099,7 @@ void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
}
EXPORT_SYMBOL_GPL(kvm_get_cs_db_l_bits);
-static void __get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
+static void __get_sregs_common(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
{
struct desc_ptr dt;
@@ -9785,14 +10132,36 @@ skip_protected_regs:
sregs->cr8 = kvm_get_cr8(vcpu);
sregs->efer = vcpu->arch.efer;
sregs->apic_base = kvm_get_apic_base(vcpu);
+}
- memset(sregs->interrupt_bitmap, 0, sizeof(sregs->interrupt_bitmap));
+static void __get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
+{
+ __get_sregs_common(vcpu, sregs);
+
+ if (vcpu->arch.guest_state_protected)
+ return;
if (vcpu->arch.interrupt.injected && !vcpu->arch.interrupt.soft)
set_bit(vcpu->arch.interrupt.nr,
(unsigned long *)sregs->interrupt_bitmap);
}
+static void __get_sregs2(struct kvm_vcpu *vcpu, struct kvm_sregs2 *sregs2)
+{
+ int i;
+
+ __get_sregs_common(vcpu, (struct kvm_sregs *)sregs2);
+
+ if (vcpu->arch.guest_state_protected)
+ return;
+
+ if (is_pae_paging(vcpu)) {
+ for (i = 0 ; i < 4 ; i++)
+ sregs2->pdptrs[i] = kvm_pdptr_read(vcpu, i);
+ sregs2->flags |= KVM_SREGS2_FLAGS_PDPTRS_VALID;
+ }
+}
+
int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
struct kvm_sregs *sregs)
{
@@ -9805,11 +10174,17 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
struct kvm_mp_state *mp_state)
{
+ int r;
+
vcpu_load(vcpu);
if (kvm_mpx_supported())
kvm_load_guest_fpu(vcpu);
- kvm_apic_accept_events(vcpu);
+ r = kvm_apic_accept_events(vcpu);
+ if (r < 0)
+ goto out;
+ r = 0;
+
if ((vcpu->arch.mp_state == KVM_MP_STATE_HALTED ||
vcpu->arch.mp_state == KVM_MP_STATE_AP_RESET_HOLD) &&
vcpu->arch.pv.pv_unhalted)
@@ -9817,10 +10192,11 @@ int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
else
mp_state->mp_state = vcpu->arch.mp_state;
+out:
if (kvm_mpx_supported())
kvm_put_guest_fpu(vcpu);
vcpu_put(vcpu);
- return 0;
+ return r;
}
int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
@@ -9904,24 +10280,23 @@ static bool kvm_is_valid_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
return kvm_is_valid_cr4(vcpu, sregs->cr4);
}
-static int __set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
+static int __set_sregs_common(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs,
+ int *mmu_reset_needed, bool update_pdptrs)
{
struct msr_data apic_base_msr;
- int mmu_reset_needed = 0;
- int pending_vec, max_bits, idx;
+ int idx;
struct desc_ptr dt;
- int ret = -EINVAL;
if (!kvm_is_valid_sregs(vcpu, sregs))
- goto out;
+ return -EINVAL;
apic_base_msr.data = sregs->apic_base;
apic_base_msr.host_initiated = true;
if (kvm_set_apic_base(vcpu, &apic_base_msr))
- goto out;
+ return -EINVAL;
if (vcpu->arch.guest_state_protected)
- goto skip_protected_regs;
+ return 0;
dt.size = sregs->idt.limit;
dt.address = sregs->idt.base;
@@ -9931,31 +10306,30 @@ static int __set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
static_call(kvm_x86_set_gdt)(vcpu, &dt);
vcpu->arch.cr2 = sregs->cr2;
- mmu_reset_needed |= kvm_read_cr3(vcpu) != sregs->cr3;
+ *mmu_reset_needed |= kvm_read_cr3(vcpu) != sregs->cr3;
vcpu->arch.cr3 = sregs->cr3;
kvm_register_mark_available(vcpu, VCPU_EXREG_CR3);
kvm_set_cr8(vcpu, sregs->cr8);
- mmu_reset_needed |= vcpu->arch.efer != sregs->efer;
+ *mmu_reset_needed |= vcpu->arch.efer != sregs->efer;
static_call(kvm_x86_set_efer)(vcpu, sregs->efer);
- mmu_reset_needed |= kvm_read_cr0(vcpu) != sregs->cr0;
+ *mmu_reset_needed |= kvm_read_cr0(vcpu) != sregs->cr0;
static_call(kvm_x86_set_cr0)(vcpu, sregs->cr0);
vcpu->arch.cr0 = sregs->cr0;
- mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4;
+ *mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4;
static_call(kvm_x86_set_cr4)(vcpu, sregs->cr4);
- idx = srcu_read_lock(&vcpu->kvm->srcu);
- if (is_pae_paging(vcpu)) {
- load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu));
- mmu_reset_needed = 1;
+ if (update_pdptrs) {
+ idx = srcu_read_lock(&vcpu->kvm->srcu);
+ if (is_pae_paging(vcpu)) {
+ load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu));
+ *mmu_reset_needed = 1;
+ }
+ srcu_read_unlock(&vcpu->kvm->srcu, idx);
}
- srcu_read_unlock(&vcpu->kvm->srcu, idx);
-
- if (mmu_reset_needed)
- kvm_mmu_reset_context(vcpu);
kvm_set_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
kvm_set_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
@@ -9975,20 +10349,63 @@ static int __set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
!is_protmode(vcpu))
vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
-skip_protected_regs:
+ return 0;
+}
+
+static int __set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
+{
+ int pending_vec, max_bits;
+ int mmu_reset_needed = 0;
+ int ret = __set_sregs_common(vcpu, sregs, &mmu_reset_needed, true);
+
+ if (ret)
+ return ret;
+
+ if (mmu_reset_needed)
+ kvm_mmu_reset_context(vcpu);
+
max_bits = KVM_NR_INTERRUPTS;
pending_vec = find_first_bit(
(const unsigned long *)sregs->interrupt_bitmap, max_bits);
+
if (pending_vec < max_bits) {
kvm_queue_interrupt(vcpu, pending_vec, false);
pr_debug("Set back pending irq %d\n", pending_vec);
+ kvm_make_request(KVM_REQ_EVENT, vcpu);
}
+ return 0;
+}
- kvm_make_request(KVM_REQ_EVENT, vcpu);
+static int __set_sregs2(struct kvm_vcpu *vcpu, struct kvm_sregs2 *sregs2)
+{
+ int mmu_reset_needed = 0;
+ bool valid_pdptrs = sregs2->flags & KVM_SREGS2_FLAGS_PDPTRS_VALID;
+ bool pae = (sregs2->cr0 & X86_CR0_PG) && (sregs2->cr4 & X86_CR4_PAE) &&
+ !(sregs2->efer & EFER_LMA);
+ int i, ret;
- ret = 0;
-out:
- return ret;
+ if (sregs2->flags & ~KVM_SREGS2_FLAGS_PDPTRS_VALID)
+ return -EINVAL;
+
+ if (valid_pdptrs && (!pae || vcpu->arch.guest_state_protected))
+ return -EINVAL;
+
+ ret = __set_sregs_common(vcpu, (struct kvm_sregs *)sregs2,
+ &mmu_reset_needed, !valid_pdptrs);
+ if (ret)
+ return ret;
+
+ if (valid_pdptrs) {
+ for (i = 0; i < 4 ; i++)
+ kvm_pdptr_write(vcpu, i, sregs2->pdptrs[i]);
+
+ kvm_register_mark_dirty(vcpu, VCPU_EXREG_PDPTR);
+ mmu_reset_needed = 1;
+ vcpu->arch.pdptrs_from_userspace = true;
+ }
+ if (mmu_reset_needed)
+ kvm_mmu_reset_context(vcpu);
+ return 0;
}
int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
@@ -10044,8 +10461,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
kvm_update_dr7(vcpu);
if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
- vcpu->arch.singlestep_rip = kvm_rip_read(vcpu) +
- get_segment_base(vcpu, VCPU_SREG_CS);
+ vcpu->arch.singlestep_rip = kvm_get_linear_rip(vcpu);
/*
* Trigger an rflags update that will inject or remove the trace
@@ -10213,13 +10629,13 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
struct page *page;
int r;
+ vcpu->arch.last_vmentry_cpu = -1;
+
if (!irqchip_in_kernel(vcpu->kvm) || kvm_vcpu_is_reset_bsp(vcpu))
vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
else
vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED;
- kvm_set_tsc_khz(vcpu, max_tsc_khz);
-
r = kvm_mmu_create(vcpu);
if (r < 0)
return r;
@@ -10279,6 +10695,10 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
vcpu->arch.pending_external_vector = -1;
vcpu->arch.preempted_in_kernel = false;
+#if IS_ENABLED(CONFIG_HYPERV)
+ vcpu->arch.hv_root_tdp = INVALID_PAGE;
+#endif
+
r = static_call(kvm_x86_vcpu_create)(vcpu);
if (r)
goto free_guest_fpu;
@@ -10287,8 +10707,9 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
vcpu->arch.msr_platform_info = MSR_PLATFORM_INFO_CPUID_FAULT;
kvm_vcpu_mtrr_init(vcpu);
vcpu_load(vcpu);
+ kvm_set_tsc_khz(vcpu, max_tsc_khz);
kvm_vcpu_reset(vcpu, false);
- kvm_init_mmu(vcpu, false);
+ kvm_init_mmu(vcpu);
vcpu_put(vcpu);
return 0;
@@ -10362,6 +10783,8 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
{
+ unsigned long old_cr0 = kvm_read_cr0(vcpu);
+
kvm_lapic_reset(vcpu, init_event);
vcpu->arch.hflags = 0;
@@ -10430,6 +10853,17 @@ void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
vcpu->arch.ia32_xss = 0;
static_call(kvm_x86_vcpu_reset)(vcpu, init_event);
+
+ /*
+ * Reset the MMU context if paging was enabled prior to INIT (which is
+ * implied if CR0.PG=1 as CR0 will be '0' prior to RESET). Unlike the
+ * standard CR0/CR4/EFER modification paths, only CR0.PG needs to be
+ * checked because it is unconditionally cleared on INIT and all other
+ * paging related bits are ignored if paging is disabled, i.e. CR0.WP,
+ * CR4, and EFER changes are all irrelevant if CR0.PG was '0'.
+ */
+ if (old_cr0 & X86_CR0_PG)
+ kvm_mmu_reset_context(vcpu);
}
void kvm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector)
@@ -10547,6 +10981,9 @@ int kvm_arch_hardware_setup(void *opaque)
int r;
rdmsrl_safe(MSR_EFER, &host_efer);
+ if (WARN_ON_ONCE(boot_cpu_has(X86_FEATURE_NX) &&
+ !(host_efer & EFER_NX)))
+ return -EIO;
if (boot_cpu_has(X86_FEATURE_XSAVES))
rdmsrl(MSR_IA32_XSS, host_xss);
@@ -10662,9 +11099,15 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
kvm->arch.guest_can_read_msr_platform_info = true;
+#if IS_ENABLED(CONFIG_HYPERV)
+ spin_lock_init(&kvm->arch.hv_root_tdp_lock);
+ kvm->arch.hv_root_tdp = INVALID_PAGE;
+#endif
+
INIT_DELAYED_WORK(&kvm->arch.kvmclock_update_work, kvmclock_update_fn);
INIT_DELAYED_WORK(&kvm->arch.kvmclock_sync_work, kvmclock_sync_fn);
+ kvm_apicv_init(kvm);
kvm_hv_init_vm(kvm);
kvm_page_track_init(kvm);
kvm_mmu_init_vm(kvm);
@@ -10825,17 +11268,23 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
kvm_hv_destroy_vm(kvm);
}
-void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot)
+static void memslot_rmap_free(struct kvm_memory_slot *slot)
{
int i;
for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) {
kvfree(slot->arch.rmap[i]);
slot->arch.rmap[i] = NULL;
+ }
+}
- if (i == 0)
- continue;
+void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot)
+{
+ int i;
+
+ memslot_rmap_free(slot);
+ for (i = 1; i < KVM_NR_PAGE_SIZES; ++i) {
kvfree(slot->arch.lpage_info[i - 1]);
slot->arch.lpage_info[i - 1] = NULL;
}
@@ -10843,11 +11292,79 @@ void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot)
kvm_page_track_free_memslot(slot);
}
-static int kvm_alloc_memslot_metadata(struct kvm_memory_slot *slot,
- unsigned long npages)
+static int memslot_rmap_alloc(struct kvm_memory_slot *slot,
+ unsigned long npages)
{
+ const int sz = sizeof(*slot->arch.rmap[0]);
int i;
+ for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) {
+ int level = i + 1;
+ int lpages = gfn_to_index(slot->base_gfn + npages - 1,
+ slot->base_gfn, level) + 1;
+
+ WARN_ON(slot->arch.rmap[i]);
+
+ slot->arch.rmap[i] = kvcalloc(lpages, sz, GFP_KERNEL_ACCOUNT);
+ if (!slot->arch.rmap[i]) {
+ memslot_rmap_free(slot);
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
+int alloc_all_memslots_rmaps(struct kvm *kvm)
+{
+ struct kvm_memslots *slots;
+ struct kvm_memory_slot *slot;
+ int r, i;
+
+ /*
+ * Check if memslots alreday have rmaps early before acquiring
+ * the slots_arch_lock below.
+ */
+ if (kvm_memslots_have_rmaps(kvm))
+ return 0;
+
+ mutex_lock(&kvm->slots_arch_lock);
+
+ /*
+ * Read memslots_have_rmaps again, under the slots arch lock,
+ * before allocating the rmaps
+ */
+ if (kvm_memslots_have_rmaps(kvm)) {
+ mutex_unlock(&kvm->slots_arch_lock);
+ return 0;
+ }
+
+ for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
+ slots = __kvm_memslots(kvm, i);
+ kvm_for_each_memslot(slot, slots) {
+ r = memslot_rmap_alloc(slot, slot->npages);
+ if (r) {
+ mutex_unlock(&kvm->slots_arch_lock);
+ return r;
+ }
+ }
+ }
+
+ /*
+ * Ensure that memslots_have_rmaps becomes true strictly after
+ * all the rmap pointers are set.
+ */
+ smp_store_release(&kvm->arch.memslots_have_rmaps, true);
+ mutex_unlock(&kvm->slots_arch_lock);
+ return 0;
+}
+
+static int kvm_alloc_memslot_metadata(struct kvm *kvm,
+ struct kvm_memory_slot *slot,
+ unsigned long npages)
+{
+ int i, r;
+
/*
* Clear out the previous array pointers for the KVM_MR_MOVE case. The
* old arrays will be freed by __kvm_set_memory_region() if installing
@@ -10855,7 +11372,13 @@ static int kvm_alloc_memslot_metadata(struct kvm_memory_slot *slot,
*/
memset(&slot->arch, 0, sizeof(slot->arch));
- for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) {
+ if (kvm_memslots_have_rmaps(kvm)) {
+ r = memslot_rmap_alloc(slot, npages);
+ if (r)
+ return r;
+ }
+
+ for (i = 1; i < KVM_NR_PAGE_SIZES; ++i) {
struct kvm_lpage_info *linfo;
unsigned long ugfn;
int lpages;
@@ -10864,14 +11387,6 @@ static int kvm_alloc_memslot_metadata(struct kvm_memory_slot *slot,
lpages = gfn_to_index(slot->base_gfn + npages - 1,
slot->base_gfn, level) + 1;
- slot->arch.rmap[i] =
- kvcalloc(lpages, sizeof(*slot->arch.rmap[i]),
- GFP_KERNEL_ACCOUNT);
- if (!slot->arch.rmap[i])
- goto out_free;
- if (i == 0)
- continue;
-
linfo = kvcalloc(lpages, sizeof(*linfo), GFP_KERNEL_ACCOUNT);
if (!linfo)
goto out_free;
@@ -10901,12 +11416,9 @@ static int kvm_alloc_memslot_metadata(struct kvm_memory_slot *slot,
return 0;
out_free:
- for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) {
- kvfree(slot->arch.rmap[i]);
- slot->arch.rmap[i] = NULL;
- if (i == 0)
- continue;
+ memslot_rmap_free(slot);
+ for (i = 1; i < KVM_NR_PAGE_SIZES; ++i) {
kvfree(slot->arch.lpage_info[i - 1]);
slot->arch.lpage_info[i - 1] = NULL;
}
@@ -10935,7 +11447,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
enum kvm_mr_change change)
{
if (change == KVM_MR_CREATE || change == KVM_MR_MOVE)
- return kvm_alloc_memslot_metadata(memslot,
+ return kvm_alloc_memslot_metadata(kvm, memslot,
mem->memory_size >> PAGE_SHIFT);
return 0;
}
@@ -11011,36 +11523,19 @@ static void kvm_mmu_slot_apply_flags(struct kvm *kvm,
*/
kvm_mmu_zap_collapsible_sptes(kvm, new);
} else {
- /* By default, write-protect everything to log writes. */
- int level = PG_LEVEL_4K;
+ /*
+ * Initially-all-set does not require write protecting any page,
+ * because they're all assumed to be dirty.
+ */
+ if (kvm_dirty_log_manual_protect_and_init_set(kvm))
+ return;
if (kvm_x86_ops.cpu_dirty_log_size) {
- /*
- * Clear all dirty bits, unless pages are treated as
- * dirty from the get-go.
- */
- if (!kvm_dirty_log_manual_protect_and_init_set(kvm))
- kvm_mmu_slot_leaf_clear_dirty(kvm, new);
-
- /*
- * Write-protect large pages on write so that dirty
- * logging happens at 4k granularity. No need to
- * write-protect small SPTEs since write accesses are
- * logged by the CPU via dirty bits.
- */
- level = PG_LEVEL_2M;
- } else if (kvm_dirty_log_manual_protect_and_init_set(kvm)) {
- /*
- * If we're with initial-all-set, we don't need
- * to write protect any small page because
- * they're reported as dirty already. However
- * we still need to write-protect huge pages
- * so that the page split can happen lazily on
- * the first write to the huge page.
- */
- level = PG_LEVEL_2M;
+ kvm_mmu_slot_leaf_clear_dirty(kvm, new);
+ kvm_mmu_slot_remove_write_access(kvm, new, PG_LEVEL_2M);
+ } else {
+ kvm_mmu_slot_remove_write_access(kvm, new, PG_LEVEL_4K);
}
- kvm_mmu_slot_remove_write_access(kvm, new, level);
}
}
@@ -11428,7 +11923,8 @@ bool kvm_arch_can_dequeue_async_page_present(struct kvm_vcpu *vcpu)
void kvm_arch_start_assignment(struct kvm *kvm)
{
- atomic_inc(&kvm->arch.assigned_device_count);
+ if (atomic_inc_return(&kvm->arch.assigned_device_count) == 1)
+ static_call_cond(kvm_x86_start_assignment)(kvm);
}
EXPORT_SYMBOL_GPL(kvm_arch_start_assignment);
@@ -11608,8 +12104,6 @@ int kvm_handle_invpcid(struct kvm_vcpu *vcpu, unsigned long type, gva_t gva)
{
bool pcid_enabled;
struct x86_exception e;
- unsigned i;
- unsigned long roots_to_free = 0;
struct {
u64 pcid;
u64 gla;
@@ -11643,23 +12137,7 @@ int kvm_handle_invpcid(struct kvm_vcpu *vcpu, unsigned long type, gva_t gva)
return 1;
}
- if (kvm_get_active_pcid(vcpu) == operand.pcid) {
- kvm_mmu_sync_roots(vcpu);
- kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
- }
-
- for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
- if (kvm_get_pcid(vcpu, vcpu->arch.mmu->prev_roots[i].pgd)
- == operand.pcid)
- roots_to_free |= KVM_MMU_ROOT_PREVIOUS(i);
-
- kvm_mmu_free_roots(vcpu, vcpu->arch.mmu, roots_to_free);
- /*
- * If neither the current cr3 nor any of the prev_roots use the
- * given PCID, then nothing needs to be done here because a
- * resync will happen anyway before switching to any other CR3.
- */
-
+ kvm_invalidate_pcid(vcpu, operand.pcid);
return kvm_skip_emulated_instruction(vcpu);
case INVPCID_TYPE_ALL_NON_GLOBAL:
@@ -11672,7 +12150,7 @@ int kvm_handle_invpcid(struct kvm_vcpu *vcpu, unsigned long type, gva_t gva)
fallthrough;
case INVPCID_TYPE_ALL_INCL_GLOBAL:
- kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
+ kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu);
return kvm_skip_emulated_instruction(vcpu);
default:
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
index 521f74e5bbf2..44ae10312740 100644
--- a/arch/x86/kvm/x86.h
+++ b/arch/x86/kvm/x86.h
@@ -157,16 +157,6 @@ static inline bool is_64_bit_mode(struct kvm_vcpu *vcpu)
return cs_l;
}
-static inline bool is_la57_mode(struct kvm_vcpu *vcpu)
-{
-#ifdef CONFIG_X86_64
- return (vcpu->arch.efer & EFER_LMA) &&
- kvm_read_cr4_bits(vcpu, X86_CR4_LA57);
-#else
- return 0;
-#endif
-}
-
static inline bool x86_exception_has_error_code(unsigned int vector)
{
static u32 exception_has_error_code = BIT(DF_VECTOR) | BIT(TS_VECTOR) |
diff --git a/arch/x86/lib/insn-eval.c b/arch/x86/lib/insn-eval.c
index a67afd74232c..a1d24fdc07cf 100644
--- a/arch/x86/lib/insn-eval.c
+++ b/arch/x86/lib/insn-eval.c
@@ -1417,7 +1417,7 @@ void __user *insn_get_addr_ref(struct insn *insn, struct pt_regs *regs)
}
}
-static unsigned long insn_get_effective_ip(struct pt_regs *regs)
+static int insn_get_effective_ip(struct pt_regs *regs, unsigned long *ip)
{
unsigned long seg_base = 0;
@@ -1430,10 +1430,12 @@ static unsigned long insn_get_effective_ip(struct pt_regs *regs)
if (!user_64bit_mode(regs)) {
seg_base = insn_get_seg_base(regs, INAT_SEG_REG_CS);
if (seg_base == -1L)
- return 0;
+ return -EINVAL;
}
- return seg_base + regs->ip;
+ *ip = seg_base + regs->ip;
+
+ return 0;
}
/**
@@ -1446,18 +1448,17 @@ static unsigned long insn_get_effective_ip(struct pt_regs *regs)
*
* Returns:
*
- * Number of instruction bytes copied.
- *
- * 0 if nothing was copied.
+ * - number of instruction bytes copied.
+ * - 0 if nothing was copied.
+ * - -EINVAL if the linear address of the instruction could not be calculated
*/
int insn_fetch_from_user(struct pt_regs *regs, unsigned char buf[MAX_INSN_SIZE])
{
unsigned long ip;
int not_copied;
- ip = insn_get_effective_ip(regs);
- if (!ip)
- return 0;
+ if (insn_get_effective_ip(regs, &ip))
+ return -EINVAL;
not_copied = copy_from_user(buf, (void __user *)ip, MAX_INSN_SIZE);
@@ -1475,18 +1476,17 @@ int insn_fetch_from_user(struct pt_regs *regs, unsigned char buf[MAX_INSN_SIZE])
*
* Returns:
*
- * Number of instruction bytes copied.
- *
- * 0 if nothing was copied.
+ * - number of instruction bytes copied.
+ * - 0 if nothing was copied.
+ * - -EINVAL if the linear address of the instruction could not be calculated.
*/
int insn_fetch_from_user_inatomic(struct pt_regs *regs, unsigned char buf[MAX_INSN_SIZE])
{
unsigned long ip;
int not_copied;
- ip = insn_get_effective_ip(regs);
- if (!ip)
- return 0;
+ if (insn_get_effective_ip(regs, &ip))
+ return -EINVAL;
not_copied = __copy_from_user_inatomic(buf, (void __user *)ip, MAX_INSN_SIZE);
diff --git a/arch/x86/lib/retpoline.S b/arch/x86/lib/retpoline.S
index 4d32cb06ffd5..ec9922cba30a 100644
--- a/arch/x86/lib/retpoline.S
+++ b/arch/x86/lib/retpoline.S
@@ -58,12 +58,16 @@ SYM_FUNC_START_NOALIGN(__x86_indirect_alt_call_\reg)
2: .skip 5-(2b-1b), 0x90
SYM_FUNC_END(__x86_indirect_alt_call_\reg)
+STACK_FRAME_NON_STANDARD(__x86_indirect_alt_call_\reg)
+
SYM_FUNC_START_NOALIGN(__x86_indirect_alt_jmp_\reg)
ANNOTATE_RETPOLINE_SAFE
1: jmp *%\reg
2: .skip 5-(2b-1b), 0x90
SYM_FUNC_END(__x86_indirect_alt_jmp_\reg)
+STACK_FRAME_NON_STANDARD(__x86_indirect_alt_jmp_\reg)
+
.endm
/*
diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
index b93d6cd08a7f..121921b2927c 100644
--- a/arch/x86/mm/extable.c
+++ b/arch/x86/mm/extable.c
@@ -5,7 +5,7 @@
#include <xen/xen.h>
#include <asm/fpu/internal.h>
-#include <asm/sev-es.h>
+#include <asm/sev.h>
#include <asm/traps.h>
#include <asm/kdebug.h>
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 1c548ad00752..2d27932c9ac7 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -836,8 +836,8 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
if (si_code == SEGV_PKUERR)
force_sig_pkuerr((void __user *)address, pkey);
-
- force_sig_fault(SIGSEGV, si_code, (void __user *)address);
+ else
+ force_sig_fault(SIGSEGV, si_code, (void __user *)address);
local_irq_disable();
}
@@ -1186,7 +1186,7 @@ do_kern_addr_fault(struct pt_regs *regs, unsigned long hw_error_code,
return;
/* kprobes don't want to hook the spurious faults: */
- if (kprobe_page_fault(regs, X86_TRAP_PF))
+ if (WARN_ON_ONCE(kprobe_page_fault(regs, X86_TRAP_PF)))
return;
/*
@@ -1239,7 +1239,7 @@ void do_user_addr_fault(struct pt_regs *regs,
}
/* kprobes don't want to hook the spurious faults: */
- if (unlikely(kprobe_page_fault(regs, X86_TRAP_PF)))
+ if (WARN_ON_ONCE(kprobe_page_fault(regs, X86_TRAP_PF)))
return;
/*
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index 21ffb03f6c72..74b78840182d 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -651,7 +651,7 @@ void __init find_low_pfn_range(void)
highmem_pfn_init();
}
-#ifndef CONFIG_NEED_MULTIPLE_NODES
+#ifndef CONFIG_NUMA
void __init initmem_init(void)
{
#ifdef CONFIG_HIGHMEM
@@ -677,7 +677,7 @@ void __init initmem_init(void)
setup_bootmem_allocator();
}
-#endif /* !CONFIG_NEED_MULTIPLE_NODES */
+#endif /* !CONFIG_NUMA */
void __init setup_bootmem_allocator(void)
{
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index e527d829e1ed..65ea58527176 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -33,6 +33,7 @@
#include <linux/nmi.h>
#include <linux/gfp.h>
#include <linux/kcore.h>
+#include <linux/bootmem_info.h>
#include <asm/processor.h>
#include <asm/bios_ebda.h>
@@ -1269,7 +1270,7 @@ static struct kcore_list kcore_vsyscall;
static void __init register_page_bootmem_info(void)
{
-#ifdef CONFIG_NUMA
+#if defined(CONFIG_NUMA) || defined(CONFIG_HUGETLB_PAGE_FREE_VMEMMAP)
int i;
for_each_online_node(i)
@@ -1623,7 +1624,7 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
return err;
}
-#if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) && defined(CONFIG_HAVE_BOOTMEM_INFO_NODE)
+#ifdef CONFIG_HAVE_BOOTMEM_INFO_NODE
void register_page_bootmem_memmap(unsigned long section_nr,
struct page *start_page, unsigned long nr_pages)
{
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index 12c686c65ea9..60ade7dd71bd 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -118,7 +118,9 @@ static void __ioremap_check_other(resource_size_t addr, struct ioremap_desc *des
if (!IS_ENABLED(CONFIG_EFI))
return;
- if (efi_mem_type(addr) == EFI_RUNTIME_SERVICES_DATA)
+ if (efi_mem_type(addr) == EFI_RUNTIME_SERVICES_DATA ||
+ (efi_mem_type(addr) == EFI_BOOT_SERVICES_DATA &&
+ efi_mem_attributes(addr) & EFI_MEMORY_RUNTIME))
desc->flags |= IORES_MAP_ENCRYPTED;
}
diff --git a/arch/x86/mm/mem_encrypt_identity.c b/arch/x86/mm/mem_encrypt_identity.c
index 04aba7e80a36..470b20208430 100644
--- a/arch/x86/mm/mem_encrypt_identity.c
+++ b/arch/x86/mm/mem_encrypt_identity.c
@@ -504,10 +504,6 @@ void __init sme_enable(struct boot_params *bp)
#define AMD_SME_BIT BIT(0)
#define AMD_SEV_BIT BIT(1)
- /* Check the SEV MSR whether SEV or SME is enabled */
- sev_status = __rdmsr(MSR_AMD64_SEV);
- feature_mask = (sev_status & MSR_AMD64_SEV_ENABLED) ? AMD_SEV_BIT : AMD_SME_BIT;
-
/*
* Check for the SME/SEV feature:
* CPUID Fn8000_001F[EAX]
@@ -519,17 +515,22 @@ void __init sme_enable(struct boot_params *bp)
eax = 0x8000001f;
ecx = 0;
native_cpuid(&eax, &ebx, &ecx, &edx);
- if (!(eax & feature_mask))
+ /* Check whether SEV or SME is supported */
+ if (!(eax & (AMD_SEV_BIT | AMD_SME_BIT)))
return;
me_mask = 1UL << (ebx & 0x3f);
+ /* Check the SEV MSR whether SEV or SME is enabled */
+ sev_status = __rdmsr(MSR_AMD64_SEV);
+ feature_mask = (sev_status & MSR_AMD64_SEV_ENABLED) ? AMD_SEV_BIT : AMD_SME_BIT;
+
/* Check if memory encryption is enabled */
if (feature_mask == AMD_SME_BIT) {
/*
* No SME if Hypervisor bit is set. This check is here to
* prevent a guest from trying to enable SME. For running as a
- * KVM guest the MSR_K8_SYSCFG will be sufficient, but there
+ * KVM guest the MSR_AMD64_SYSCFG will be sufficient, but there
* might be other hypervisors which emulate that MSR as non-zero
* or even pass it through to the guest.
* A malicious hypervisor can still trick a guest into this
@@ -542,8 +543,8 @@ void __init sme_enable(struct boot_params *bp)
return;
/* For SME, check the SYSCFG MSR */
- msr = __rdmsr(MSR_K8_SYSCFG);
- if (!(msr & MSR_K8_SYSCFG_MEM_ENCRYPT))
+ msr = __rdmsr(MSR_AMD64_SYSCFG);
+ if (!(msr & MSR_AMD64_SYSCFG_MEM_ENCRYPT))
return;
} else {
/* SEV state cannot be controlled by a command line option */
diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
index 5eb4dc2b97da..e94da744386f 100644
--- a/arch/x86/mm/numa.c
+++ b/arch/x86/mm/numa.c
@@ -254,7 +254,13 @@ int __init numa_cleanup_meminfo(struct numa_meminfo *mi)
/* make sure all non-reserved blocks are inside the limits */
bi->start = max(bi->start, low);
- bi->end = min(bi->end, high);
+
+ /* preserve info for non-RAM areas above 'max_pfn': */
+ if (bi->end > high) {
+ numa_add_memblk_to(bi->nid, high, bi->end,
+ &numa_reserved_meminfo);
+ bi->end = high;
+ }
/* and there's no empty block */
if (bi->start >= bi->end)
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
index d27cf69e811d..1303ff6ef7be 100644
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -682,6 +682,7 @@ int p4d_clear_huge(p4d_t *p4d)
}
#endif
+#if CONFIG_PGTABLE_LEVELS > 3
/**
* pud_set_huge - setup kernel PUD mapping
*
@@ -721,6 +722,23 @@ int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot)
}
/**
+ * pud_clear_huge - clear kernel PUD mapping when it is set
+ *
+ * Returns 1 on success and 0 on failure (no PUD map is found).
+ */
+int pud_clear_huge(pud_t *pud)
+{
+ if (pud_large(*pud)) {
+ pud_clear(pud);
+ return 1;
+ }
+
+ return 0;
+}
+#endif
+
+#if CONFIG_PGTABLE_LEVELS > 2
+/**
* pmd_set_huge - setup kernel PMD mapping
*
* See text over pud_set_huge() above.
@@ -751,21 +769,6 @@ int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot)
}
/**
- * pud_clear_huge - clear kernel PUD mapping when it is set
- *
- * Returns 1 on success and 0 on failure (no PUD map is found).
- */
-int pud_clear_huge(pud_t *pud)
-{
- if (pud_large(*pud)) {
- pud_clear(pud);
- return 1;
- }
-
- return 0;
-}
-
-/**
* pmd_clear_huge - clear kernel PMD mapping when it is set
*
* Returns 1 on success and 0 on failure (no PMD map is found).
@@ -779,6 +782,7 @@ int pmd_clear_huge(pmd_t *pmd)
return 0;
}
+#endif
#ifdef CONFIG_X86_64
/**
diff --git a/arch/x86/mm/pkeys.c b/arch/x86/mm/pkeys.c
index a2332eef66e9..4a67b922bce1 100644
--- a/arch/x86/mm/pkeys.c
+++ b/arch/x86/mm/pkeys.c
@@ -192,6 +192,10 @@ static const struct file_operations fops_init_pkru = {
static int __init create_init_pkru_value(void)
{
+ /* Do not expose the file if pkeys are not supported. */
+ if (!cpu_feature_enabled(X86_FEATURE_OSPKE))
+ return 0;
+
debugfs_create_file("init_pkru", S_IRUSR | S_IWUSR,
arch_debugfs_dir, NULL, &fops_init_pkru);
return 0;
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 78804680e923..cfe6b1e85fa6 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -14,6 +14,7 @@
#include <asm/nospec-branch.h>
#include <asm/cache.h>
#include <asm/apic.h>
+#include <asm/perf_event.h>
#include "mm_internal.h"
@@ -404,9 +405,14 @@ static inline void cr4_update_pce_mm(struct mm_struct *mm)
{
if (static_branch_unlikely(&rdpmc_always_available_key) ||
(!static_branch_unlikely(&rdpmc_never_available_key) &&
- atomic_read(&mm->context.perf_rdpmc_allowed)))
+ atomic_read(&mm->context.perf_rdpmc_allowed))) {
+ /*
+ * Clear the existing dirty counters to
+ * prevent the leak for an RDPMC task.
+ */
+ perf_clear_dirty_counters();
cr4_set_bits_irqsoff(X86_CR4_PCE);
- else
+ } else
cr4_clear_bits_irqsoff(X86_CR4_PCE);
}
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index 2a2e290fa5d8..e835164189f1 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -31,7 +31,7 @@ static u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len)
}
#define EMIT(bytes, len) \
- do { prog = emit_code(prog, bytes, len); cnt += len; } while (0)
+ do { prog = emit_code(prog, bytes, len); } while (0)
#define EMIT1(b1) EMIT(b1, 1)
#define EMIT2(b1, b2) EMIT((b1) + ((b2) << 8), 2)
@@ -239,7 +239,6 @@ struct jit_context {
static void push_callee_regs(u8 **pprog, bool *callee_regs_used)
{
u8 *prog = *pprog;
- int cnt = 0;
if (callee_regs_used[0])
EMIT1(0x53); /* push rbx */
@@ -255,7 +254,6 @@ static void push_callee_regs(u8 **pprog, bool *callee_regs_used)
static void pop_callee_regs(u8 **pprog, bool *callee_regs_used)
{
u8 *prog = *pprog;
- int cnt = 0;
if (callee_regs_used[3])
EMIT2(0x41, 0x5F); /* pop r15 */
@@ -277,13 +275,12 @@ static void emit_prologue(u8 **pprog, u32 stack_depth, bool ebpf_from_cbpf,
bool tail_call_reachable, bool is_subprog)
{
u8 *prog = *pprog;
- int cnt = X86_PATCH_SIZE;
/* BPF trampoline can be made to work without these nops,
* but let's waste 5 bytes for now and optimize later
*/
- memcpy(prog, x86_nops[5], cnt);
- prog += cnt;
+ memcpy(prog, x86_nops[5], X86_PATCH_SIZE);
+ prog += X86_PATCH_SIZE;
if (!ebpf_from_cbpf) {
if (tail_call_reachable && !is_subprog)
EMIT2(0x31, 0xC0); /* xor eax, eax */
@@ -303,7 +300,6 @@ static void emit_prologue(u8 **pprog, u32 stack_depth, bool ebpf_from_cbpf,
static int emit_patch(u8 **pprog, void *func, void *ip, u8 opcode)
{
u8 *prog = *pprog;
- int cnt = 0;
s64 offset;
offset = func - (ip + X86_PATCH_SIZE);
@@ -423,7 +419,6 @@ static void emit_bpf_tail_call_indirect(u8 **pprog, bool *callee_regs_used,
int off1 = 42;
int off2 = 31;
int off3 = 9;
- int cnt = 0;
/* count the additional bytes used for popping callee regs from stack
* that need to be taken into account for each of the offsets that
@@ -513,7 +508,6 @@ static void emit_bpf_tail_call_direct(struct bpf_jit_poke_descriptor *poke,
int pop_bytes = 0;
int off1 = 20;
int poke_off;
- int cnt = 0;
/* count the additional bytes used for popping callee regs to stack
* that need to be taken into account for jump offset that is used for
@@ -615,7 +609,6 @@ static void emit_mov_imm32(u8 **pprog, bool sign_propagate,
{
u8 *prog = *pprog;
u8 b1, b2, b3;
- int cnt = 0;
/*
* Optimization: if imm32 is positive, use 'mov %eax, imm32'
@@ -655,7 +648,6 @@ static void emit_mov_imm64(u8 **pprog, u32 dst_reg,
const u32 imm32_hi, const u32 imm32_lo)
{
u8 *prog = *pprog;
- int cnt = 0;
if (is_uimm32(((u64)imm32_hi << 32) | (u32)imm32_lo)) {
/*
@@ -678,7 +670,6 @@ static void emit_mov_imm64(u8 **pprog, u32 dst_reg,
static void emit_mov_reg(u8 **pprog, bool is64, u32 dst_reg, u32 src_reg)
{
u8 *prog = *pprog;
- int cnt = 0;
if (is64) {
/* mov dst, src */
@@ -697,7 +688,6 @@ static void emit_mov_reg(u8 **pprog, bool is64, u32 dst_reg, u32 src_reg)
static void emit_insn_suffix(u8 **pprog, u32 ptr_reg, u32 val_reg, int off)
{
u8 *prog = *pprog;
- int cnt = 0;
if (is_imm8(off)) {
/* 1-byte signed displacement.
@@ -720,7 +710,6 @@ static void emit_insn_suffix(u8 **pprog, u32 ptr_reg, u32 val_reg, int off)
static void maybe_emit_mod(u8 **pprog, u32 dst_reg, u32 src_reg, bool is64)
{
u8 *prog = *pprog;
- int cnt = 0;
if (is64)
EMIT1(add_2mod(0x48, dst_reg, src_reg));
@@ -733,7 +722,6 @@ static void maybe_emit_mod(u8 **pprog, u32 dst_reg, u32 src_reg, bool is64)
static void emit_ldx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
{
u8 *prog = *pprog;
- int cnt = 0;
switch (size) {
case BPF_B:
@@ -764,7 +752,6 @@ static void emit_ldx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
static void emit_stx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
{
u8 *prog = *pprog;
- int cnt = 0;
switch (size) {
case BPF_B:
@@ -799,7 +786,6 @@ static int emit_atomic(u8 **pprog, u8 atomic_op,
u32 dst_reg, u32 src_reg, s16 off, u8 bpf_size)
{
u8 *prog = *pprog;
- int cnt = 0;
EMIT1(0xF0); /* lock prefix */
@@ -869,10 +855,10 @@ static void detect_reg_usage(struct bpf_insn *insn, int insn_cnt,
}
}
-static int emit_nops(u8 **pprog, int len)
+static void emit_nops(u8 **pprog, int len)
{
u8 *prog = *pprog;
- int i, noplen, cnt = 0;
+ int i, noplen;
while (len > 0) {
noplen = len;
@@ -886,8 +872,6 @@ static int emit_nops(u8 **pprog, int len)
}
*pprog = prog;
-
- return cnt;
}
#define INSN_SZ_DIFF (((addrs[i] - addrs[i - 1]) - (prog - temp)))
@@ -902,7 +886,7 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
bool tail_call_seen = false;
bool seen_exit = false;
u8 temp[BPF_MAX_INSN_SIZE + BPF_INSN_SAFETY];
- int i, cnt = 0, excnt = 0;
+ int i, excnt = 0;
int ilen, proglen = 0;
u8 *prog = temp;
int err;
@@ -1297,7 +1281,7 @@ st: if (is_imm8(insn->off))
emit_ldx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off);
if (BPF_MODE(insn->code) == BPF_PROBE_MEM) {
struct exception_table_entry *ex;
- u8 *_insn = image + proglen;
+ u8 *_insn = image + proglen + (start_of_ldx - temp);
s64 delta;
/* populate jmp_offset for JMP above */
@@ -1576,7 +1560,7 @@ emit_cond_jmp: /* Convert BPF opcode to x86 */
nops);
return -EFAULT;
}
- cnt += emit_nops(&prog, nops);
+ emit_nops(&prog, nops);
}
EMIT2(jmp_cond, jmp_offset);
} else if (is_simm32(jmp_offset)) {
@@ -1622,7 +1606,7 @@ emit_cond_jmp: /* Convert BPF opcode to x86 */
nops);
return -EFAULT;
}
- cnt += emit_nops(&prog, nops);
+ emit_nops(&prog, nops);
}
break;
}
@@ -1647,7 +1631,7 @@ emit_jmp:
nops);
return -EFAULT;
}
- cnt += emit_nops(&prog, INSN_SZ_DIFF - 2);
+ emit_nops(&prog, INSN_SZ_DIFF - 2);
}
EMIT2(0xEB, jmp_offset);
} else if (is_simm32(jmp_offset)) {
@@ -1754,7 +1738,6 @@ static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog,
{
u8 *prog = *pprog;
u8 *jmp_insn;
- int cnt = 0;
/* arg1: mov rdi, progs[i] */
emit_mov_imm64(&prog, BPF_REG_1, (long) p >> 32, (u32) (long) p);
@@ -1822,7 +1805,6 @@ static void emit_align(u8 **pprog, u32 align)
static int emit_cond_near_jump(u8 **pprog, void *func, void *ip, u8 jmp_cond)
{
u8 *prog = *pprog;
- int cnt = 0;
s64 offset;
offset = func - (ip + 2 + 4);
@@ -1854,7 +1836,7 @@ static int invoke_bpf_mod_ret(const struct btf_func_model *m, u8 **pprog,
u8 **branches)
{
u8 *prog = *pprog;
- int i, cnt = 0;
+ int i;
/* The first fmod_ret program will receive a garbage return value.
* Set this to 0 to avoid confusing the program.
@@ -1950,7 +1932,7 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
struct bpf_tramp_progs *tprogs,
void *orig_call)
{
- int ret, i, cnt = 0, nr_args = m->nr_args;
+ int ret, i, nr_args = m->nr_args;
int stack_size = nr_args * 8;
struct bpf_tramp_progs *fentry = &tprogs[BPF_TRAMP_FENTRY];
struct bpf_tramp_progs *fexit = &tprogs[BPF_TRAMP_FEXIT];
@@ -2095,8 +2077,6 @@ static int emit_fallback_jump(u8 **pprog)
*/
err = emit_jump(&prog, __x86_indirect_thunk_rdx, prog);
#else
- int cnt = 0;
-
EMIT2(0xFF, 0xE2); /* jmp rdx */
#endif
*pprog = prog;
@@ -2106,7 +2086,7 @@ static int emit_fallback_jump(u8 **pprog)
static int emit_bpf_dispatcher(u8 **pprog, int a, int b, s64 *progs)
{
u8 *jg_reloc, *prog = *pprog;
- int pivot, err, jg_bytes = 1, cnt = 0;
+ int pivot, err, jg_bytes = 1;
s64 jg_offset;
if (a == b) {
diff --git a/arch/x86/pci/amd_bus.c b/arch/x86/pci/amd_bus.c
index ae744b6a0785..dd40d3fea74e 100644
--- a/arch/x86/pci/amd_bus.c
+++ b/arch/x86/pci/amd_bus.c
@@ -284,7 +284,7 @@ static int __init early_root_info_init(void)
/* need to take out [4G, TOM2) for RAM*/
/* SYS_CFG */
- address = MSR_K8_SYSCFG;
+ address = MSR_AMD64_SYSCFG;
rdmsrl(address, val);
/* TOP_MEM2 is enabled? */
if (val & (1<<21)) {
diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c
index 02dc64625e64..2edd86649468 100644
--- a/arch/x86/pci/fixup.c
+++ b/arch/x86/pci/fixup.c
@@ -779,4 +779,48 @@ DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x1571, pci_amd_enable_64bit_bar);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x15b1, pci_amd_enable_64bit_bar);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x1601, pci_amd_enable_64bit_bar);
+#define RS690_LOWER_TOP_OF_DRAM2 0x30
+#define RS690_LOWER_TOP_OF_DRAM2_VALID 0x1
+#define RS690_UPPER_TOP_OF_DRAM2 0x31
+#define RS690_HTIU_NB_INDEX 0xA8
+#define RS690_HTIU_NB_INDEX_WR_ENABLE 0x100
+#define RS690_HTIU_NB_DATA 0xAC
+
+/*
+ * Some BIOS implementations support RAM above 4GB, but do not configure the
+ * PCI host to respond to bus master accesses for these addresses. These
+ * implementations set the TOP_OF_DRAM_SLOT1 register correctly, so PCI DMA
+ * works as expected for addresses below 4GB.
+ *
+ * Reference: "AMD RS690 ASIC Family Register Reference Guide" (pg. 2-57)
+ * https://www.amd.com/system/files/TechDocs/43372_rs690_rrg_3.00o.pdf
+ */
+static void rs690_fix_64bit_dma(struct pci_dev *pdev)
+{
+ u32 val = 0;
+ phys_addr_t top_of_dram = __pa(high_memory - 1) + 1;
+
+ if (top_of_dram <= (1ULL << 32))
+ return;
+
+ pci_write_config_dword(pdev, RS690_HTIU_NB_INDEX,
+ RS690_LOWER_TOP_OF_DRAM2);
+ pci_read_config_dword(pdev, RS690_HTIU_NB_DATA, &val);
+
+ if (val)
+ return;
+
+ pci_info(pdev, "Adjusting top of DRAM to %pa for 64-bit DMA support\n", &top_of_dram);
+
+ pci_write_config_dword(pdev, RS690_HTIU_NB_INDEX,
+ RS690_UPPER_TOP_OF_DRAM2 | RS690_HTIU_NB_INDEX_WR_ENABLE);
+ pci_write_config_dword(pdev, RS690_HTIU_NB_DATA, top_of_dram >> 32);
+
+ pci_write_config_dword(pdev, RS690_HTIU_NB_INDEX,
+ RS690_LOWER_TOP_OF_DRAM2 | RS690_HTIU_NB_INDEX_WR_ENABLE);
+ pci_write_config_dword(pdev, RS690_HTIU_NB_DATA,
+ top_of_dram | RS690_LOWER_TOP_OF_DRAM2_VALID);
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7910, rs690_fix_64bit_dma);
+
#endif
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
index 8a26e705cb06..147c30a81f15 100644
--- a/arch/x86/platform/efi/efi.c
+++ b/arch/x86/platform/efi/efi.c
@@ -468,7 +468,7 @@ void __init efi_init(void)
*/
if (!efi_runtime_supported())
- pr_info("No EFI runtime due to 32/64-bit mismatch with kernel\n");
+ pr_err("No EFI runtime due to 32/64-bit mismatch with kernel\n");
if (!efi_runtime_supported() || efi_runtime_disabled()) {
efi_memmap_unmap();
diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
index df7b5477fc4f..7515e78ef898 100644
--- a/arch/x86/platform/efi/efi_64.c
+++ b/arch/x86/platform/efi/efi_64.c
@@ -47,7 +47,7 @@
#include <asm/realmode.h>
#include <asm/time.h>
#include <asm/pgalloc.h>
-#include <asm/sev-es.h>
+#include <asm/sev.h>
/*
* We allocate runtime services regions top-down, starting from -4G, i.e.
diff --git a/arch/x86/platform/efi/quirks.c b/arch/x86/platform/efi/quirks.c
index 7850111008a8..b15ebfe40a73 100644
--- a/arch/x86/platform/efi/quirks.c
+++ b/arch/x86/platform/efi/quirks.c
@@ -450,6 +450,18 @@ void __init efi_free_boot_services(void)
size -= rm_size;
}
+ /*
+ * Don't free memory under 1M for two reasons:
+ * - BIOS might clobber it
+ * - Crash kernel needs it to be reserved
+ */
+ if (start + size < SZ_1M)
+ continue;
+ if (start < SZ_1M) {
+ size -= (SZ_1M - start);
+ start = SZ_1M;
+ }
+
memblock_free_late(start, size);
}
diff --git a/arch/x86/purgatory/purgatory.c b/arch/x86/purgatory/purgatory.c
index f03b64d9cb51..7558139920f8 100644
--- a/arch/x86/purgatory/purgatory.c
+++ b/arch/x86/purgatory/purgatory.c
@@ -9,6 +9,8 @@
*/
#include <linux/bug.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
#include <crypto/sha2.h>
#include <asm/purgatory.h>
diff --git a/arch/x86/realmode/Makefile b/arch/x86/realmode/Makefile
index 6b1f3a4eeb44..a0b491ae2de8 100644
--- a/arch/x86/realmode/Makefile
+++ b/arch/x86/realmode/Makefile
@@ -10,7 +10,6 @@
# Sanitizer runtimes are unavailable and cannot be linked here.
KASAN_SANITIZE := n
KCSAN_SANITIZE := n
-OBJECT_FILES_NON_STANDARD := y
subdir- := rm
diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c
index 1be71ef5e4c4..6534c92d0f83 100644
--- a/arch/x86/realmode/init.c
+++ b/arch/x86/realmode/init.c
@@ -9,7 +9,7 @@
#include <asm/realmode.h>
#include <asm/tlbflush.h>
#include <asm/crash.h>
-#include <asm/sev-es.h>
+#include <asm/sev.h>
struct real_mode_header *real_mode_header;
u32 *trampoline_cr4_features;
@@ -29,14 +29,16 @@ void __init reserve_real_mode(void)
/* Has to be under 1M so we can execute real-mode AP code. */
mem = memblock_find_in_range(0, 1<<20, size, PAGE_SIZE);
- if (!mem) {
+ if (!mem)
pr_info("No sub-1M memory is available for the trampoline\n");
- return;
- }
+ else
+ set_real_mode_mem(mem);
- memblock_reserve(mem, size);
- set_real_mode_mem(mem);
- crash_reserve_low_1M();
+ /*
+ * Unconditionally reserve the entire fisrt 1M, see comment in
+ * setup_arch().
+ */
+ memblock_reserve(0, SZ_1M);
}
static void sme_sev_setup_real_mode(struct trampoline_header *th)
diff --git a/arch/x86/realmode/rm/trampoline_64.S b/arch/x86/realmode/rm/trampoline_64.S
index 84c5d1b33d10..cc8391f86cdb 100644
--- a/arch/x86/realmode/rm/trampoline_64.S
+++ b/arch/x86/realmode/rm/trampoline_64.S
@@ -123,9 +123,9 @@ SYM_CODE_START(startup_32)
*/
btl $TH_FLAGS_SME_ACTIVE_BIT, pa_tr_flags
jnc .Ldone
- movl $MSR_K8_SYSCFG, %ecx
+ movl $MSR_AMD64_SYSCFG, %ecx
rdmsr
- bts $MSR_K8_SYSCFG_MEM_ENCRYPT_BIT, %eax
+ bts $MSR_AMD64_SYSCFG_MEM_ENCRYPT_BIT, %eax
jc .Ldone
/*
diff --git a/arch/x86/um/sys_call_table_32.c b/arch/x86/um/sys_call_table_32.c
index 2ed81e581755..0575decb5e54 100644
--- a/arch/x86/um/sys_call_table_32.c
+++ b/arch/x86/um/sys_call_table_32.c
@@ -7,7 +7,6 @@
#include <linux/linkage.h>
#include <linux/sys.h>
#include <linux/cache.h>
-#include <asm/unistd.h>
#include <asm/syscall.h>
#define __NO_STUBS
@@ -26,20 +25,17 @@
#define old_mmap sys_old_mmap
-#define __SYSCALL_I386(nr, sym) extern asmlinkage long sym(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long) ;
+#define __SYSCALL_WITH_COMPAT(nr, native, compat) __SYSCALL(nr, native)
+
+#define __SYSCALL(nr, sym) extern asmlinkage long sym(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long);
#include <asm/syscalls_32.h>
-#undef __SYSCALL_I386
-#define __SYSCALL_I386(nr, sym) [ nr ] = sym,
+#undef __SYSCALL
+#define __SYSCALL(nr, sym) sym,
extern asmlinkage long sys_ni_syscall(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long);
const sys_call_ptr_t sys_call_table[] ____cacheline_aligned = {
- /*
- * Smells like a compiler bug -- it doesn't work
- * when the & below is removed.
- */
- [0 ... __NR_syscall_max] = &sys_ni_syscall,
#include <asm/syscalls_32.h>
};
diff --git a/arch/x86/um/sys_call_table_64.c b/arch/x86/um/sys_call_table_64.c
index 2e8544dafbb0..95725b5a41ac 100644
--- a/arch/x86/um/sys_call_table_64.c
+++ b/arch/x86/um/sys_call_table_64.c
@@ -7,7 +7,6 @@
#include <linux/linkage.h>
#include <linux/sys.h>
#include <linux/cache.h>
-#include <asm/unistd.h>
#include <asm/syscall.h>
#define __NO_STUBS
@@ -36,23 +35,15 @@
#define stub_execveat sys_execveat
#define stub_rt_sigreturn sys_rt_sigreturn
-#define __SYSCALL_X32(nr, sym)
-#define __SYSCALL_COMMON(nr, sym) __SYSCALL_64(nr, sym)
-
-#define __SYSCALL_64(nr, sym) extern asmlinkage long sym(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long) ;
+#define __SYSCALL(nr, sym) extern asmlinkage long sym(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long);
#include <asm/syscalls_64.h>
-#undef __SYSCALL_64
-#define __SYSCALL_64(nr, sym) [ nr ] = sym,
+#undef __SYSCALL
+#define __SYSCALL(nr, sym) sym,
extern asmlinkage long sys_ni_syscall(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long);
const sys_call_ptr_t sys_call_table[] ____cacheline_aligned = {
- /*
- * Smells like a compiler bug -- it doesn't work
- * when the & below is removed.
- */
- [0 ... __NR_syscall_max] = &sys_ni_syscall,
#include <asm/syscalls_64.h>
};
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index aa9f50fccc5d..c79bd0af2e8c 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -6,6 +6,7 @@
#include <linux/cpu.h>
#include <linux/kexec.h>
#include <linux/slab.h>
+#include <linux/panic_notifier.h>
#include <xen/xen.h>
#include <xen/features.h>
diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c
index 17503fed2017..03149422dce2 100644
--- a/arch/x86/xen/enlighten_pv.c
+++ b/arch/x86/xen/enlighten_pv.c
@@ -592,8 +592,10 @@ DEFINE_IDTENTRY_RAW(xenpv_exc_debug)
DEFINE_IDTENTRY_RAW(exc_xen_unknown_trap)
{
/* This should never happen and there is no way to handle it. */
+ instrumentation_begin();
pr_err("Unknown trap in Xen PV mode.");
BUG();
+ instrumentation_end();
}
#ifdef CONFIG_X86_MCE
@@ -1273,16 +1275,16 @@ asmlinkage __visible void __init xen_start_kernel(void)
/* Get mfn list */
xen_build_dynamic_phys_to_machine();
+ /* Work out if we support NX */
+ get_cpu_cap(&boot_cpu_data);
+ x86_configure_nx();
+
/*
* Set up kernel GDT and segment registers, mainly so that
* -fstack-protector code can be executed.
*/
xen_setup_gdt(0);
- /* Work out if we support NX */
- get_cpu_cap(&boot_cpu_data);
- x86_configure_nx();
-
/* Determine virtual and physical address sizes */
get_cpu_address_sizes(&boot_cpu_data);
diff --git a/arch/xtensa/include/asm/atomic.h b/arch/xtensa/include/asm/atomic.h
index 744c2f463845..4361fe4247e3 100644
--- a/arch/xtensa/include/asm/atomic.h
+++ b/arch/xtensa/include/asm/atomic.h
@@ -43,7 +43,7 @@
*
* Atomically reads the value of @v.
*/
-#define atomic_read(v) READ_ONCE((v)->counter)
+#define arch_atomic_read(v) READ_ONCE((v)->counter)
/**
* atomic_set - set atomic variable
@@ -52,11 +52,11 @@
*
* Atomically sets the value of @v to @i.
*/
-#define atomic_set(v,i) WRITE_ONCE((v)->counter, (i))
+#define arch_atomic_set(v,i) WRITE_ONCE((v)->counter, (i))
#if XCHAL_HAVE_EXCLUSIVE
#define ATOMIC_OP(op) \
-static inline void atomic_##op(int i, atomic_t *v) \
+static inline void arch_atomic_##op(int i, atomic_t *v) \
{ \
unsigned long tmp; \
int result; \
@@ -74,7 +74,7 @@ static inline void atomic_##op(int i, atomic_t *v) \
} \
#define ATOMIC_OP_RETURN(op) \
-static inline int atomic_##op##_return(int i, atomic_t *v) \
+static inline int arch_atomic_##op##_return(int i, atomic_t *v) \
{ \
unsigned long tmp; \
int result; \
@@ -95,7 +95,7 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
}
#define ATOMIC_FETCH_OP(op) \
-static inline int atomic_fetch_##op(int i, atomic_t *v) \
+static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \
{ \
unsigned long tmp; \
int result; \
@@ -116,7 +116,7 @@ static inline int atomic_fetch_##op(int i, atomic_t *v) \
#elif XCHAL_HAVE_S32C1I
#define ATOMIC_OP(op) \
-static inline void atomic_##op(int i, atomic_t * v) \
+static inline void arch_atomic_##op(int i, atomic_t * v) \
{ \
unsigned long tmp; \
int result; \
@@ -135,7 +135,7 @@ static inline void atomic_##op(int i, atomic_t * v) \
} \
#define ATOMIC_OP_RETURN(op) \
-static inline int atomic_##op##_return(int i, atomic_t * v) \
+static inline int arch_atomic_##op##_return(int i, atomic_t * v) \
{ \
unsigned long tmp; \
int result; \
@@ -157,7 +157,7 @@ static inline int atomic_##op##_return(int i, atomic_t * v) \
}
#define ATOMIC_FETCH_OP(op) \
-static inline int atomic_fetch_##op(int i, atomic_t * v) \
+static inline int arch_atomic_fetch_##op(int i, atomic_t * v) \
{ \
unsigned long tmp; \
int result; \
@@ -180,7 +180,7 @@ static inline int atomic_fetch_##op(int i, atomic_t * v) \
#else /* XCHAL_HAVE_S32C1I */
#define ATOMIC_OP(op) \
-static inline void atomic_##op(int i, atomic_t * v) \
+static inline void arch_atomic_##op(int i, atomic_t * v) \
{ \
unsigned int vval; \
\
@@ -198,7 +198,7 @@ static inline void atomic_##op(int i, atomic_t * v) \
} \
#define ATOMIC_OP_RETURN(op) \
-static inline int atomic_##op##_return(int i, atomic_t * v) \
+static inline int arch_atomic_##op##_return(int i, atomic_t * v) \
{ \
unsigned int vval; \
\
@@ -218,7 +218,7 @@ static inline int atomic_##op##_return(int i, atomic_t * v) \
}
#define ATOMIC_FETCH_OP(op) \
-static inline int atomic_fetch_##op(int i, atomic_t * v) \
+static inline int arch_atomic_fetch_##op(int i, atomic_t * v) \
{ \
unsigned int tmp, vval; \
\
@@ -257,7 +257,7 @@ ATOMIC_OPS(xor)
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
-#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
-#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
+#define arch_atomic_cmpxchg(v, o, n) ((int)arch_cmpxchg(&((v)->counter), (o), (n)))
+#define arch_atomic_xchg(v, new) (arch_xchg(&((v)->counter), new))
#endif /* _XTENSA_ATOMIC_H */
diff --git a/arch/xtensa/include/asm/cmpxchg.h b/arch/xtensa/include/asm/cmpxchg.h
index a175f8aec3fb..3699e2818efb 100644
--- a/arch/xtensa/include/asm/cmpxchg.h
+++ b/arch/xtensa/include/asm/cmpxchg.h
@@ -80,7 +80,7 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
}
}
-#define cmpxchg(ptr,o,n) \
+#define arch_cmpxchg(ptr,o,n) \
({ __typeof__(*(ptr)) _o_ = (o); \
__typeof__(*(ptr)) _n_ = (n); \
(__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
@@ -97,7 +97,7 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,
case 4:
return __cmpxchg_u32(ptr, old, new);
default:
- return __cmpxchg_local_generic(ptr, old, new, size);
+ return __generic_cmpxchg_local(ptr, old, new, size);
}
return old;
@@ -107,11 +107,11 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,
* cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
* them available.
*/
-#define cmpxchg_local(ptr, o, n) \
- ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\
+#define arch_cmpxchg_local(ptr, o, n) \
+ ((__typeof__(*(ptr)))__generic_cmpxchg_local((ptr), (unsigned long)(o),\
(unsigned long)(n), sizeof(*(ptr))))
-#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
-#define cmpxchg64(ptr, o, n) cmpxchg64_local((ptr), (o), (n))
+#define arch_cmpxchg64_local(ptr, o, n) __generic_cmpxchg64_local((ptr), (o), (n))
+#define arch_cmpxchg64(ptr, o, n) arch_cmpxchg64_local((ptr), (o), (n))
/*
* xchg_u32
@@ -169,7 +169,7 @@ static inline unsigned long xchg_u32(volatile int * m, unsigned long val)
#endif
}
-#define xchg(ptr,x) \
+#define arch_xchg(ptr,x) \
((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
static inline u32 xchg_small(volatile void *ptr, u32 x, int size)
diff --git a/arch/xtensa/include/asm/page.h b/arch/xtensa/include/asm/page.h
index 37ce25ef92d6..493eb7083b1a 100644
--- a/arch/xtensa/include/asm/page.h
+++ b/arch/xtensa/include/asm/page.h
@@ -192,10 +192,6 @@ static inline unsigned long ___pa(unsigned long va)
#define pfn_valid(pfn) \
((pfn) >= ARCH_PFN_OFFSET && ((pfn) - ARCH_PFN_OFFSET) < max_mapnr)
-#ifdef CONFIG_DISCONTIGMEM
-# error CONFIG_DISCONTIGMEM not supported
-#endif
-
#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
#define page_to_virt(page) __va(page_to_pfn(page) << PAGE_SHIFT)
#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
diff --git a/arch/xtensa/include/asm/pgalloc.h b/arch/xtensa/include/asm/pgalloc.h
index d3a22da4d2c9..eeb2de3a89e5 100644
--- a/arch/xtensa/include/asm/pgalloc.h
+++ b/arch/xtensa/include/asm/pgalloc.h
@@ -25,7 +25,6 @@
(pmd_val(*(pmdp)) = ((unsigned long)ptep))
#define pmd_populate(mm, pmdp, page) \
(pmd_val(*(pmdp)) = ((unsigned long)page_to_virt(page)))
-#define pmd_pgtable(pmd) pmd_page(pmd)
static inline pgd_t*
pgd_alloc(struct mm_struct *mm)
@@ -63,7 +62,6 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm)
return page;
}
-#define pmd_pgtable(pmd) pmd_page(pmd)
#endif /* CONFIG_MMU */
#endif /* _XTENSA_PGALLOC_H */
diff --git a/arch/xtensa/include/asm/pgtable.h b/arch/xtensa/include/asm/pgtable.h
index d7fc45c920c2..bd5aeb795567 100644
--- a/arch/xtensa/include/asm/pgtable.h
+++ b/arch/xtensa/include/asm/pgtable.h
@@ -59,7 +59,6 @@
#define PTRS_PER_PGD 1024
#define PGD_ORDER 0
#define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE)
-#define FIRST_USER_ADDRESS 0UL
#define FIRST_USER_PGD_NR (FIRST_USER_ADDRESS >> PGDIR_SHIFT)
#ifdef CONFIG_MMU
diff --git a/arch/xtensa/include/asm/tlbflush.h b/arch/xtensa/include/asm/tlbflush.h
index 856e2da2e397..573df8cea200 100644
--- a/arch/xtensa/include/asm/tlbflush.h
+++ b/arch/xtensa/include/asm/tlbflush.h
@@ -26,8 +26,8 @@
*
* - flush_tlb_all() flushes all processes TLB entries
* - flush_tlb_mm(mm) flushes the specified mm context TLB entries
- * - flush_tlb_page(mm, vmaddr) flushes a single page
- * - flush_tlb_range(mm, start, end) flushes a range of pages
+ * - flush_tlb_page(vma, page) flushes a single page
+ * - flush_tlb_range(vma, vmaddr, end) flushes a range of pages
*/
void local_flush_tlb_all(void);
diff --git a/arch/xtensa/include/asm/unaligned.h b/arch/xtensa/include/asm/unaligned.h
deleted file mode 100644
index 8e7ed046bfed..000000000000
--- a/arch/xtensa/include/asm/unaligned.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * Xtensa doesn't handle unaligned accesses efficiently.
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2001 - 2005 Tensilica Inc.
- */
-#ifndef _ASM_XTENSA_UNALIGNED_H
-#define _ASM_XTENSA_UNALIGNED_H
-
-#include <asm/byteorder.h>
-
-#ifdef __LITTLE_ENDIAN
-# include <linux/unaligned/le_struct.h>
-# include <linux/unaligned/be_byteshift.h>
-# include <linux/unaligned/generic.h>
-# define get_unaligned __get_unaligned_le
-# define put_unaligned __put_unaligned_le
-#else
-# include <linux/unaligned/be_struct.h>
-# include <linux/unaligned/le_byteshift.h>
-# include <linux/unaligned/generic.h>
-# define get_unaligned __get_unaligned_be
-# define put_unaligned __put_unaligned_be
-#endif
-
-#endif /* _ASM_XTENSA_UNALIGNED_H */
diff --git a/arch/xtensa/include/uapi/asm/mman.h b/arch/xtensa/include/uapi/asm/mman.h
index e5e643752947..b3a22095371b 100644
--- a/arch/xtensa/include/uapi/asm/mman.h
+++ b/arch/xtensa/include/uapi/asm/mman.h
@@ -106,6 +106,9 @@
#define MADV_COLD 20 /* deactivate these pages */
#define MADV_PAGEOUT 21 /* reclaim these pages */
+#define MADV_POPULATE_READ 22 /* populate (prefault) page tables readable */
+#define MADV_POPULATE_WRITE 23 /* populate (prefault) page tables writable */
+
/* compatibility flags */
#define MAP_FILE 0
diff --git a/arch/xtensa/kernel/process.c b/arch/xtensa/kernel/process.c
index 9534ef515d74..060165340612 100644
--- a/arch/xtensa/kernel/process.c
+++ b/arch/xtensa/kernel/process.c
@@ -304,7 +304,7 @@ unsigned long get_wchan(struct task_struct *p)
unsigned long stack_page = (unsigned long) task_stack_page(p);
int count = 0;
- if (!p || p == current || p->state == TASK_RUNNING)
+ if (!p || p == current || task_is_running(p))
return 0;
sp = p->thread.sp;
diff --git a/arch/xtensa/kernel/smp.c b/arch/xtensa/kernel/smp.c
index cd85a7a2722b..1254da07ead1 100644
--- a/arch/xtensa/kernel/smp.c
+++ b/arch/xtensa/kernel/smp.c
@@ -145,7 +145,6 @@ void secondary_start_kernel(void)
cpumask_set_cpu(cpu, mm_cpumask(mm));
enter_lazy_tlb(mm, current);
- preempt_disable();
trace_hardirqs_off();
calibrate_delay();
diff --git a/arch/xtensa/kernel/syscalls/syscall.tbl b/arch/xtensa/kernel/syscalls/syscall.tbl
index 9d76d433d3d6..235d67d6ceb4 100644
--- a/arch/xtensa/kernel/syscalls/syscall.tbl
+++ b/arch/xtensa/kernel/syscalls/syscall.tbl
@@ -413,7 +413,7 @@
440 common process_madvise sys_process_madvise
441 common epoll_pwait2 sys_epoll_pwait2
442 common mount_setattr sys_mount_setattr
-443 common quotactl_path sys_quotactl_path
+443 common quotactl_fd sys_quotactl_fd
444 common landlock_create_ruleset sys_landlock_create_ruleset
445 common landlock_add_rule sys_landlock_add_rule
446 common landlock_restrict_self sys_landlock_restrict_self
diff --git a/arch/xtensa/platforms/iss/console.c b/arch/xtensa/platforms/iss/console.c
index a3dda25a4e45..21184488c277 100644
--- a/arch/xtensa/platforms/iss/console.c
+++ b/arch/xtensa/platforms/iss/console.c
@@ -100,18 +100,12 @@ static void rs_flush_chars(struct tty_struct *tty)
{
}
-static int rs_write_room(struct tty_struct *tty)
+static unsigned int rs_write_room(struct tty_struct *tty)
{
/* Let's say iss can always accept 2K characters.. */
return 2 * 1024;
}
-static int rs_chars_in_buffer(struct tty_struct *tty)
-{
- /* the iss doesn't buffer characters */
- return 0;
-}
-
static void rs_hangup(struct tty_struct *tty)
{
/* Stub, once again.. */
@@ -135,7 +129,6 @@ static const struct tty_operations serial_ops = {
.put_char = rs_put_char,
.flush_chars = rs_flush_chars,
.write_room = rs_write_room,
- .chars_in_buffer = rs_chars_in_buffer,
.hangup = rs_hangup,
.wait_until_sent = rs_wait_until_sent,
.proc_show = rs_proc_show,
diff --git a/arch/xtensa/platforms/iss/setup.c b/arch/xtensa/platforms/iss/setup.c
index ed519aee0ec8..d3433e1bb94e 100644
--- a/arch/xtensa/platforms/iss/setup.c
+++ b/arch/xtensa/platforms/iss/setup.c
@@ -14,6 +14,7 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/notifier.h>
+#include <linux/panic_notifier.h>
#include <linux/printk.h>
#include <linux/string.h>
diff --git a/arch/xtensa/platforms/iss/simdisk.c b/arch/xtensa/platforms/iss/simdisk.c
index fc09be7b1347..3cdfa00738e0 100644
--- a/arch/xtensa/platforms/iss/simdisk.c
+++ b/arch/xtensa/platforms/iss/simdisk.c
@@ -27,7 +27,6 @@
struct simdisk {
const char *filename;
spinlock_t lock;
- struct request_queue *queue;
struct gendisk *gd;
struct proc_dir_entry *procfile;
int users;
@@ -266,21 +265,13 @@ static int __init simdisk_setup(struct simdisk *dev, int which,
spin_lock_init(&dev->lock);
dev->users = 0;
- dev->queue = blk_alloc_queue(NUMA_NO_NODE);
- if (dev->queue == NULL) {
- pr_err("blk_alloc_queue failed\n");
- goto out_alloc_queue;
- }
-
- dev->gd = alloc_disk(SIMDISK_MINORS);
- if (dev->gd == NULL) {
- pr_err("alloc_disk failed\n");
- goto out_alloc_disk;
- }
+ dev->gd = blk_alloc_disk(NUMA_NO_NODE);
+ if (!dev->gd)
+ return -ENOMEM;
dev->gd->major = simdisk_major;
dev->gd->first_minor = which;
+ dev->gd->minors = SIMDISK_MINORS;
dev->gd->fops = &simdisk_ops;
- dev->gd->queue = dev->queue;
dev->gd->private_data = dev;
snprintf(dev->gd->disk_name, 32, "simdisk%d", which);
set_capacity(dev->gd, 0);
@@ -288,12 +279,6 @@ static int __init simdisk_setup(struct simdisk *dev, int which,
dev->procfile = proc_create_data(tmp, 0644, procdir, &simdisk_proc_ops, dev);
return 0;
-
-out_alloc_disk:
- blk_cleanup_queue(dev->queue);
- dev->queue = NULL;
-out_alloc_queue:
- return -ENOMEM;
}
static int __init simdisk_init(void)
@@ -343,10 +328,10 @@ static void simdisk_teardown(struct simdisk *dev, int which,
char tmp[2] = { '0' + which, 0 };
simdisk_detach(dev);
- if (dev->gd)
+ if (dev->gd) {
del_gendisk(dev->gd);
- if (dev->queue)
- blk_cleanup_queue(dev->queue);
+ blk_cleanup_disk(dev->gd);
+ }
remove_proc_entry(tmp, procdir);
}