summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/Kconfig12
-rw-r--r--arch/alpha/Kconfig6
-rw-r--r--arch/alpha/include/uapi/asm/socket.h3
-rw-r--r--arch/alpha/kernel/osf_sys.c6
-rw-r--r--arch/alpha/kernel/srmcons.c18
-rw-r--r--arch/arm/Kconfig37
-rw-r--r--arch/arm/Kconfig.debug2
-rw-r--r--arch/arm/Makefile1
-rw-r--r--arch/arm/boot/dts/dbx5x0.dtsi10
-rw-r--r--arch/arm/boot/dts/exynos5250-smdk5250.dts26
-rw-r--r--arch/arm/boot/dts/exynos5250.dtsi44
-rw-r--r--arch/arm/boot/dts/highbank.dts10
-rw-r--r--arch/arm/boot/dts/imx23.dtsi2
-rw-r--r--arch/arm/boot/dts/prima2.dtsi31
-rw-r--r--arch/arm/boot/dts/sun4i-a10.dtsi30
-rw-r--r--arch/arm/boot/dts/sun5i-a13-olinuxino.dts2
-rw-r--r--arch/arm/boot/dts/sun5i-a13.dtsi23
-rw-r--r--arch/arm/boot/dts/vt8500.dtsi40
-rw-r--r--arch/arm/boot/dts/wm8505.dtsi60
-rw-r--r--arch/arm/boot/dts/wm8650.dtsi20
-rw-r--r--arch/arm/boot/dts/zynq-7000.dtsi4
-rw-r--r--arch/arm/common/gic.c25
-rw-r--r--arch/arm/crypto/aes-armv4.S64
-rw-r--r--arch/arm/crypto/sha1-armv4-large.S24
-rw-r--r--arch/arm/include/asm/assembler.h10
-rw-r--r--arch/arm/include/asm/cputype.h33
-rw-r--r--arch/arm/include/asm/cti.h10
-rw-r--r--arch/arm/include/asm/hardware/coresight.h6
-rw-r--r--arch/arm/include/asm/hw_breakpoint.h3
-rw-r--r--arch/arm/include/asm/idmap.h1
-rw-r--r--arch/arm/include/asm/kvm_arm.h214
-rw-r--r--arch/arm/include/asm/kvm_asm.h82
-rw-r--r--arch/arm/include/asm/kvm_coproc.h47
-rw-r--r--arch/arm/include/asm/kvm_emulate.h72
-rw-r--r--arch/arm/include/asm/kvm_host.h161
-rw-r--r--arch/arm/include/asm/kvm_mmio.h56
-rw-r--r--arch/arm/include/asm/kvm_mmu.h50
-rw-r--r--arch/arm/include/asm/kvm_psci.h23
-rw-r--r--arch/arm/include/asm/mach/pci.h1
-rw-r--r--arch/arm/include/asm/memory.h10
-rw-r--r--arch/arm/include/asm/opcodes-sec.h24
-rw-r--r--arch/arm/include/asm/opcodes.h1
-rw-r--r--arch/arm/include/asm/outercache.h1
-rw-r--r--arch/arm/include/asm/pgtable-3level-hwdef.h5
-rw-r--r--arch/arm/include/asm/pgtable-3level.h18
-rw-r--r--arch/arm/include/asm/pgtable.h7
-rw-r--r--arch/arm/include/asm/psci.h36
-rw-r--r--arch/arm/include/asm/spinlock.h16
-rw-r--r--arch/arm/include/asm/virt.h4
-rw-r--r--arch/arm/include/uapi/asm/kvm.h164
-rw-r--r--arch/arm/kernel/Makefile1
-rw-r--r--arch/arm/kernel/asm-offsets.c25
-rw-r--r--arch/arm/kernel/bios32.c9
-rw-r--r--arch/arm/kernel/hw_breakpoint.c61
-rw-r--r--arch/arm/kernel/perf_event.c16
-rw-r--r--arch/arm/kernel/perf_event_cpu.c51
-rw-r--r--arch/arm/kernel/perf_event_v6.c4
-rw-r--r--arch/arm/kernel/perf_event_v7.c18
-rw-r--r--arch/arm/kernel/perf_event_xscale.c2
-rw-r--r--arch/arm/kernel/process.c13
-rw-r--r--arch/arm/kernel/psci.c211
-rw-r--r--arch/arm/kernel/sched_clock.c4
-rw-r--r--arch/arm/kernel/smp.c31
-rw-r--r--arch/arm/kernel/smp_twd.c53
-rw-r--r--arch/arm/kernel/vmlinux.lds.S6
-rw-r--r--arch/arm/kvm/Kconfig56
-rw-r--r--arch/arm/kvm/Makefile21
-rw-r--r--arch/arm/kvm/arm.c1015
-rw-r--r--arch/arm/kvm/coproc.c1046
-rw-r--r--arch/arm/kvm/coproc.h153
-rw-r--r--arch/arm/kvm/coproc_a15.c162
-rw-r--r--arch/arm/kvm/emulate.c373
-rw-r--r--arch/arm/kvm/guest.c222
-rw-r--r--arch/arm/kvm/init.S114
-rw-r--r--arch/arm/kvm/interrupts.S478
-rw-r--r--arch/arm/kvm/interrupts_head.S441
-rw-r--r--arch/arm/kvm/mmio.c153
-rw-r--r--arch/arm/kvm/mmu.c787
-rw-r--r--arch/arm/kvm/psci.c108
-rw-r--r--arch/arm/kvm/reset.c74
-rw-r--r--arch/arm/kvm/trace.h235
-rw-r--r--arch/arm/mach-davinci/cpuidle.c84
-rw-r--r--arch/arm/mach-exynos/Kconfig2
-rw-r--r--arch/arm/mach-exynos/include/mach/cpufreq.h19
-rw-r--r--arch/arm/mach-exynos/mach-exynos5-dt.c6
-rw-r--r--arch/arm/mach-highbank/Kconfig4
-rw-r--r--arch/arm/mach-highbank/core.h1
-rw-r--r--arch/arm/mach-highbank/highbank.c3
-rw-r--r--arch/arm/mach-omap2/board-2430sdp.c2
-rw-r--r--arch/arm/mach-omap2/board-3430sdp.c2
-rw-r--r--arch/arm/mach-omap2/board-4430sdp.c2
-rw-r--r--arch/arm/mach-omap2/board-cm-t35.c2
-rw-r--r--arch/arm/mach-omap2/board-devkit8000.c2
-rw-r--r--arch/arm/mach-omap2/board-igep0020.c2
-rw-r--r--arch/arm/mach-omap2/board-ldp.c2
-rw-r--r--arch/arm/mach-omap2/board-omap3beagle.c2
-rw-r--r--arch/arm/mach-omap2/board-omap3evm.c12
-rw-r--r--arch/arm/mach-omap2/board-omap3logic.c2
-rw-r--r--arch/arm/mach-omap2/board-omap3pandora.c2
-rw-r--r--arch/arm/mach-omap2/board-omap3stalker.c2
-rw-r--r--arch/arm/mach-omap2/board-omap3touchbook.c2
-rw-r--r--arch/arm/mach-omap2/board-omap4panda.c2
-rw-r--r--arch/arm/mach-omap2/board-overo.c2
-rw-r--r--arch/arm/mach-omap2/board-rm680.c2
-rw-r--r--arch/arm/mach-omap2/board-rx51-peripherals.c2
-rw-r--r--arch/arm/mach-omap2/board-zoom-peripherals.c2
-rw-r--r--arch/arm/mach-omap2/devices.c45
-rw-r--r--arch/arm/mach-omap2/gpmc.c8
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_44xx_data.c13
-rw-r--r--arch/arm/mach-omap2/pm34xx.c2
-rw-r--r--arch/arm/mach-omap2/usb-musb.c3
-rw-r--r--arch/arm/mach-pxa/pxa27x.c20
-rw-r--r--arch/arm/mach-realview/include/mach/irqs-eb.h2
-rw-r--r--arch/arm/mach-s3c24xx/Kconfig4
-rw-r--r--arch/arm/mach-sa1100/assabet.c1
-rw-r--r--arch/arm/mach-shmobile/board-ap4evb.c42
-rw-r--r--arch/arm/mach-shmobile/board-armadillo800eva.c34
-rw-r--r--arch/arm/mach-shmobile/board-kzm9g.c20
-rw-r--r--arch/arm/mach-shmobile/board-mackerel.c42
-rw-r--r--arch/arm/mach-sunxi/Kconfig1
-rw-r--r--arch/arm/mach-tegra/cpu-tegra.c3
-rw-r--r--arch/arm/mach-tegra/tegra2_emc.c8
-rw-r--r--arch/arm/mach-ux500/Kconfig6
-rw-r--r--arch/arm/mach-ux500/board-mop500.c20
-rw-r--r--arch/arm/mach-ux500/cpu-db8500.c2
-rw-r--r--arch/arm/mach-ux500/include/mach/irqs-board-mop500.h10
-rw-r--r--arch/arm/mach-versatile/core.c15
-rw-r--r--arch/arm/mach-versatile/pci.c11
-rw-r--r--arch/arm/mm/Kconfig10
-rw-r--r--arch/arm/mm/Makefile2
-rw-r--r--arch/arm/mm/context.c3
-rw-r--r--arch/arm/mm/dma-mapping.c2
-rw-r--r--arch/arm/mm/idmap.c55
-rw-r--r--arch/arm/mm/ioremap.c135
-rw-r--r--arch/arm/mm/mm.h12
-rw-r--r--arch/arm/mm/mmu.c58
-rw-r--r--arch/arm/mm/proc-macros.S5
-rw-r--r--arch/arm/mm/proc-v6.S2
-rw-r--r--arch/arm/mm/proc-v7-2level.S2
-rw-r--r--arch/arm/mm/proc-v7-3level.S2
-rw-r--r--arch/arm/mm/vmregion.c205
-rw-r--r--arch/arm/mm/vmregion.h31
-rw-r--r--arch/arm/net/bpf_jit_32.c15
-rw-r--r--arch/arm/plat-omap/dmtimer.c8
-rw-r--r--arch/arm/plat-samsung/adc.c8
-rw-r--r--arch/arm/plat-samsung/dma-ops.c10
-rw-r--r--arch/arm/plat-samsung/include/plat/adc.h1
-rw-r--r--arch/arm/plat-samsung/include/plat/dma-ops.h3
-rw-r--r--arch/arm/plat-samsung/s3c-dma-ops.c3
-rw-r--r--arch/arm64/Kconfig2
-rw-r--r--arch/arm64/Kconfig.debug17
-rw-r--r--arch/arm64/include/asm/Kbuild2
-rw-r--r--arch/arm64/include/asm/atomic.h132
-rw-r--r--arch/arm64/include/asm/cmpxchg.h74
-rw-r--r--arch/arm64/include/asm/futex.h2
-rw-r--r--arch/arm64/include/asm/io.h3
-rw-r--r--arch/arm64/include/asm/memory.h1
-rw-r--r--arch/arm64/include/asm/mmu.h1
-rw-r--r--arch/arm64/include/asm/mmu_context.h15
-rw-r--r--arch/arm64/include/asm/perf_event.h7
-rw-r--r--arch/arm64/include/asm/psci.h38
-rw-r--r--arch/arm64/include/asm/ptrace.h10
-rw-r--r--arch/arm64/include/asm/smp.h11
-rw-r--r--arch/arm64/include/asm/spinlock.h78
-rw-r--r--arch/arm64/include/uapi/asm/Kbuild3
-rw-r--r--arch/arm64/kernel/Makefile5
-rw-r--r--arch/arm64/kernel/early_printk.c118
-rw-r--r--arch/arm64/kernel/head.S12
-rw-r--r--arch/arm64/kernel/perf_event.c37
-rw-r--r--arch/arm64/kernel/process.c17
-rw-r--r--arch/arm64/kernel/psci.c211
-rw-r--r--arch/arm64/kernel/setup.c11
-rw-r--r--arch/arm64/kernel/signal32.c4
-rw-r--r--arch/arm64/kernel/smp.c69
-rw-r--r--arch/arm64/kernel/smp_psci.c52
-rw-r--r--arch/arm64/kernel/smp_spin_table.c66
-rw-r--r--arch/arm64/mm/mmu.c42
-rw-r--r--arch/avr32/include/asm/dma-mapping.h10
-rw-r--r--arch/avr32/include/uapi/asm/socket.h4
-rw-r--r--arch/blackfin/Kconfig8
-rw-r--r--arch/blackfin/include/asm/dma-mapping.h10
-rw-r--r--arch/blackfin/kernel/process.c7
-rw-r--r--arch/c6x/include/asm/dma-mapping.h15
-rw-r--r--arch/cris/arch-v32/drivers/Kconfig4
-rw-r--r--arch/cris/include/asm/dma-mapping.h10
-rw-r--r--arch/cris/include/uapi/asm/socket.h4
-rw-r--r--arch/cris/kernel/process.c11
-rw-r--r--arch/frv/Kconfig1
-rw-r--r--arch/frv/include/asm/dma-mapping.h15
-rw-r--r--arch/frv/include/uapi/asm/socket.h4
-rw-r--r--arch/h8300/include/uapi/asm/socket.h4
-rw-r--r--arch/hexagon/Kconfig2
-rw-r--r--arch/ia64/Kconfig9
-rw-r--r--arch/ia64/hp/common/aml_nfw.c2
-rw-r--r--arch/ia64/hp/sim/Kconfig1
-rw-r--r--arch/ia64/hp/sim/simserial.c21
-rw-r--r--arch/ia64/include/asm/acpi.h4
-rw-r--r--arch/ia64/include/asm/cputime.h92
-rw-r--r--arch/ia64/include/asm/thread_info.h4
-rw-r--r--arch/ia64/include/asm/xen/minstate.h2
-rw-r--r--arch/ia64/include/uapi/asm/socket.h4
-rw-r--r--arch/ia64/kernel/asm-offsets.c2
-rw-r--r--arch/ia64/kernel/entry.S16
-rw-r--r--arch/ia64/kernel/fsys.S4
-rw-r--r--arch/ia64/kernel/head.S4
-rw-r--r--arch/ia64/kernel/ivt.S8
-rw-r--r--arch/ia64/kernel/minstate.h2
-rw-r--r--arch/ia64/kernel/process.c3
-rw-r--r--arch/ia64/kernel/setup.c1
-rw-r--r--arch/ia64/kernel/time.c5
-rw-r--r--arch/ia64/kvm/Kconfig2
-rw-r--r--arch/ia64/xen/Kconfig2
-rw-r--r--arch/m32r/include/uapi/asm/socket.h4
-rw-r--r--arch/m32r/kernel/process.c51
-rw-r--r--arch/m68k/Kconfig.devices2
-rw-r--r--arch/m68k/include/asm/dma-mapping.h13
-rw-r--r--arch/m68k/include/asm/processor.h1
-rw-r--r--arch/m68k/kernel/Makefile4
-rw-r--r--arch/microblaze/kernel/process.c3
-rw-r--r--arch/microblaze/platform/Kconfig.platform2
-rw-r--r--arch/mips/Kconfig29
-rw-r--r--arch/mips/bcm47xx/serial.c2
-rw-r--r--arch/mips/include/uapi/asm/socket.h5
-rw-r--r--arch/mips/jazz/Kconfig6
-rw-r--r--arch/mips/lantiq/xway/dma.c5
-rw-r--r--arch/mips/lantiq/xway/gptu.c8
-rw-r--r--arch/mips/pci/pci-lantiq.c12
-rw-r--r--arch/mips/sgi-ip27/Kconfig1
-rw-r--r--arch/mips/sni/a20r.c2
-rw-r--r--arch/mn10300/include/asm/dma-mapping.h15
-rw-r--r--arch/mn10300/include/uapi/asm/socket.h4
-rw-r--r--arch/mn10300/kernel/mn10300-serial.c20
-rw-r--r--arch/mn10300/kernel/process.c7
-rw-r--r--arch/openrisc/kernel/idle.c5
-rw-r--r--arch/parisc/Kconfig11
-rw-r--r--arch/parisc/include/asm/dma-mapping.h15
-rw-r--r--arch/parisc/include/uapi/asm/socket.h3
-rw-r--r--arch/parisc/kernel/pdc_cons.c10
-rw-r--r--arch/powerpc/Kconfig16
-rw-r--r--arch/powerpc/configs/chroma_defconfig2
-rw-r--r--arch/powerpc/configs/corenet64_smp_defconfig2
-rw-r--r--arch/powerpc/configs/pasemi_defconfig2
-rw-r--r--arch/powerpc/include/asm/cputime.h6
-rw-r--r--arch/powerpc/include/asm/lppaca.h2
-rw-r--r--arch/powerpc/include/asm/perf_event_server.h26
-rw-r--r--arch/powerpc/include/asm/ppc_asm.h4
-rw-r--r--arch/powerpc/include/uapi/asm/socket.h4
-rw-r--r--arch/powerpc/kernel/entry_64.S4
-rw-r--r--arch/powerpc/kernel/time.c7
-rw-r--r--arch/powerpc/kvm/Kconfig10
-rw-r--r--arch/powerpc/mm/hash_low_64.S62
-rw-r--r--arch/powerpc/perf/core-book3s.c12
-rw-r--r--arch/powerpc/perf/power7-pmu.c80
-rw-r--r--arch/powerpc/platforms/85xx/Kconfig1
-rw-r--r--arch/powerpc/platforms/cell/Kconfig2
-rw-r--r--arch/powerpc/platforms/cell/spufs/sched.c1
-rw-r--r--arch/powerpc/platforms/ps3/Kconfig2
-rw-r--r--arch/powerpc/platforms/pseries/dtl.c6
-rw-r--r--arch/powerpc/platforms/pseries/setup.c6
-rw-r--r--arch/powerpc/sysdev/bestcomm/bestcomm.c2
-rw-r--r--arch/s390/Kconfig5
-rw-r--r--arch/s390/include/uapi/asm/socket.h4
-rw-r--r--arch/s390/kernel/time.c3
-rw-r--r--arch/s390/kernel/vtime.c6
-rw-r--r--arch/s390/kvm/Kconfig2
-rw-r--r--arch/sh/Kconfig17
-rw-r--r--arch/sh/Kconfig.cpu3
-rw-r--r--arch/sh/boards/mach-ecovec24/setup.c27
-rw-r--r--arch/sh/boards/mach-se/7724/setup.c29
-rw-r--r--arch/sh/kernel/idle.c12
-rw-r--r--arch/sh/mm/Kconfig4
-rw-r--r--arch/sparc/Kconfig2
-rw-r--r--arch/sparc/include/asm/hugetlb.h1
-rw-r--r--arch/sparc/include/asm/page_64.h4
-rw-r--r--arch/sparc/include/asm/pgtable_64.h14
-rw-r--r--arch/sparc/include/asm/processor_32.h1
-rw-r--r--arch/sparc/include/asm/tsb.h28
-rw-r--r--arch/sparc/include/uapi/asm/socket.h3
-rw-r--r--arch/sparc/kernel/apc.c3
-rw-r--r--arch/sparc/kernel/kernel.h12
-rw-r--r--arch/sparc/kernel/kgdb_32.c1
-rw-r--r--arch/sparc/kernel/leon_pmc.c5
-rw-r--r--arch/sparc/kernel/leon_smp.c33
-rw-r--r--arch/sparc/kernel/pmc.c3
-rw-r--r--arch/sparc/kernel/process_32.c7
-rw-r--r--arch/sparc/kernel/prom_common.c4
-rw-r--r--arch/sparc/kernel/sbus.c6
-rw-r--r--arch/sparc/kernel/smp_32.c86
-rw-r--r--arch/sparc/kernel/sun4d_smp.c29
-rw-r--r--arch/sparc/kernel/sun4m_smp.c33
-rw-r--r--arch/sparc/kernel/trampoline_32.S17
-rw-r--r--arch/sparc/kernel/tsb.S39
-rw-r--r--arch/sparc/mm/fault_64.c9
-rw-r--r--arch/sparc/mm/gup.c59
-rw-r--r--arch/sparc/mm/init_64.c62
-rw-r--r--arch/sparc/mm/tlb.c11
-rw-r--r--arch/sparc/mm/tsb.c2
-rw-r--r--arch/tile/Kconfig3
-rw-r--r--arch/tile/include/asm/io.h6
-rw-r--r--arch/tile/include/asm/irqflags.h32
-rw-r--r--arch/tile/include/uapi/arch/interrupts_32.h394
-rw-r--r--arch/tile/include/uapi/arch/interrupts_64.h346
-rw-r--r--arch/tile/kernel/intvec_64.S4
-rw-r--r--arch/tile/kernel/process.c2
-rw-r--r--arch/tile/kernel/reboot.c2
-rw-r--r--arch/tile/kernel/setup.c5
-rw-r--r--arch/tile/kernel/stack.c3
-rw-r--r--arch/tile/kvm/Kconfig2
-rw-r--r--arch/tile/lib/cacheflush.c2
-rw-r--r--arch/tile/lib/cpumask.c2
-rw-r--r--arch/tile/lib/exports.c2
-rw-r--r--arch/tile/mm/homecache.c1
-rw-r--r--arch/um/Kconfig.common1
-rw-r--r--arch/um/Kconfig.net2
-rw-r--r--arch/um/Kconfig.um8
-rw-r--r--arch/um/drivers/chan.h3
-rw-r--r--arch/um/drivers/chan_kern.c25
-rw-r--r--arch/um/drivers/line.c7
-rw-r--r--arch/um/drivers/net_kern.c26
-rw-r--r--arch/um/include/shared/net_kern.h1
-rw-r--r--arch/unicore32/kernel/process.c5
-rw-r--r--arch/x86/Kconfig61
-rw-r--r--arch/x86/Makefile4
-rw-r--r--arch/x86/boot/compressed/misc.c2
-rw-r--r--arch/x86/boot/compressed/misc.h1
-rw-r--r--arch/x86/configs/i386_defconfig1
-rw-r--r--arch/x86/crypto/aesni-intel_glue.c37
-rw-r--r--arch/x86/ia32/ia32entry.S4
-rw-r--r--arch/x86/include/asm/acpi.h4
-rw-r--r--arch/x86/include/asm/amd_nb.h17
-rw-r--r--arch/x86/include/asm/bootparam_utils.h38
-rw-r--r--arch/x86/include/asm/cpufeature.h2
-rw-r--r--arch/x86/include/asm/ftrace.h1
-rw-r--r--arch/x86/include/asm/hpet.h5
-rw-r--r--arch/x86/include/asm/hw_irq.h13
-rw-r--r--arch/x86/include/asm/hypervisor.h13
-rw-r--r--arch/x86/include/asm/io_apic.h28
-rw-r--r--arch/x86/include/asm/irq_remapping.h40
-rw-r--r--arch/x86/include/asm/irq_vectors.h4
-rw-r--r--arch/x86/include/asm/kvm_para.h8
-rw-r--r--arch/x86/include/asm/linkage.h18
-rw-r--r--arch/x86/include/asm/mce.h84
-rw-r--r--arch/x86/include/asm/mshyperv.h4
-rw-r--r--arch/x86/include/asm/mwait.h3
-rw-r--r--arch/x86/include/asm/pci.h3
-rw-r--r--arch/x86/include/asm/perf_event.h13
-rw-r--r--arch/x86/include/asm/pgtable.h17
-rw-r--r--arch/x86/include/asm/pgtable_32.h7
-rw-r--r--arch/x86/include/asm/pgtable_64.h3
-rw-r--r--arch/x86/include/asm/processor.h20
-rw-r--r--arch/x86/include/asm/required-features.h8
-rw-r--r--arch/x86/include/asm/uv/uv_hub.h44
-rw-r--r--arch/x86/include/asm/uv/uv_mmrs.h1496
-rw-r--r--arch/x86/include/asm/x86_init.h27
-rw-r--r--arch/x86/include/asm/xor.h491
-rw-r--r--arch/x86/include/asm/xor_32.h309
-rw-r--r--arch/x86/include/asm/xor_64.h305
-rw-r--r--arch/x86/include/uapi/asm/mce.h87
-rw-r--r--arch/x86/include/uapi/asm/msr-index.h5
-rw-r--r--arch/x86/kernel/Makefile3
-rw-r--r--arch/x86/kernel/apb_timer.c10
-rw-r--r--arch/x86/kernel/apic/apic.c28
-rw-r--r--arch/x86/kernel/apic/io_apic.c457
-rw-r--r--arch/x86/kernel/apic/ipi.c2
-rw-r--r--arch/x86/kernel/apic/x2apic_phys.c21
-rw-r--r--arch/x86/kernel/apic/x2apic_uv_x.c206
-rw-r--r--arch/x86/kernel/apm_32.c68
-rw-r--r--arch/x86/kernel/cpu/amd.c4
-rw-r--r--arch/x86/kernel/cpu/bugs.c27
-rw-r--r--arch/x86/kernel/cpu/hypervisor.c7
-rw-r--r--arch/x86/kernel/cpu/intel_cacheinfo.c9
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c14
-rw-r--r--arch/x86/kernel/cpu/mshyperv.c54
-rw-r--r--arch/x86/kernel/cpu/perf_event.c15
-rw-r--r--arch/x86/kernel/cpu/perf_event.h25
-rw-r--r--arch/x86/kernel/cpu/perf_event_amd.c322
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c6
-rw-r--r--arch/x86/kernel/cpu/perf_event_p6.c2
-rw-r--r--arch/x86/kernel/cpu/proc.c2
-rw-r--r--arch/x86/kernel/cpu/vmware.c13
-rw-r--r--arch/x86/kernel/entry_32.S9
-rw-r--r--arch/x86/kernel/entry_64.S7
-rw-r--r--arch/x86/kernel/head32.c3
-rw-r--r--arch/x86/kernel/head64.c2
-rw-r--r--arch/x86/kernel/head_32.S93
-rw-r--r--arch/x86/kernel/hpet.c2
-rw-r--r--arch/x86/kernel/kprobes/Makefile7
-rw-r--r--arch/x86/kernel/kprobes/common.h (renamed from arch/x86/kernel/kprobes-common.h)11
-rw-r--r--arch/x86/kernel/kprobes/core.c (renamed from arch/x86/kernel/kprobes.c)76
-rw-r--r--arch/x86/kernel/kprobes/ftrace.c93
-rw-r--r--arch/x86/kernel/kprobes/opt.c (renamed from arch/x86/kernel/kprobes-opt.c)2
-rw-r--r--arch/x86/kernel/kvm.c1
-rw-r--r--arch/x86/kernel/process.c122
-rw-r--r--arch/x86/kernel/ptrace.c2
-rw-r--r--arch/x86/kernel/rtc.c1
-rw-r--r--arch/x86/kernel/smpboot.c2
-rw-r--r--arch/x86/kernel/sys_x86_64.c2
-rw-r--r--arch/x86/kernel/tsc.c3
-rw-r--r--arch/x86/kernel/uprobes.c4
-rw-r--r--arch/x86/kernel/x86_init.c24
-rw-r--r--arch/x86/lguest/Kconfig1
-rw-r--r--arch/x86/mm/fault.c8
-rw-r--r--arch/x86/mm/init_64.c7
-rw-r--r--arch/x86/mm/memtest.c10
-rw-r--r--arch/x86/mm/srat.c29
-rw-r--r--arch/x86/mm/tlb.c2
-rw-r--r--arch/x86/net/bpf_jit_comp.c40
-rw-r--r--arch/x86/pci/mmconfig-shared.c3
-rw-r--r--arch/x86/platform/Makefile2
-rw-r--r--arch/x86/platform/efi/efi-bgrt.c7
-rw-r--r--arch/x86/platform/efi/efi.c2
-rw-r--r--arch/x86/platform/goldfish/Makefile1
-rw-r--r--arch/x86/platform/goldfish/goldfish.c51
-rw-r--r--arch/x86/platform/olpc/olpc-xo15-sci.c2
-rw-r--r--arch/x86/platform/sfi/sfi.c2
-rw-r--r--arch/x86/platform/ts5500/Makefile1
-rw-r--r--arch/x86/platform/ts5500/ts5500.c339
-rw-r--r--arch/x86/platform/uv/tlb_uv.c4
-rw-r--r--arch/x86/platform/uv/uv_time.c13
-rw-r--r--arch/x86/tools/insn_sanity.c10
-rw-r--r--arch/x86/um/Kconfig3
-rw-r--r--arch/x86/um/fault.c2
-rw-r--r--arch/x86/vdso/vclock_gettime.c2
-rw-r--r--arch/x86/xen/enlighten.c78
-rw-r--r--arch/x86/xen/setup.c5
-rw-r--r--arch/x86/xen/suspend.c2
-rw-r--r--arch/x86/xen/xen-asm_32.S14
-rw-r--r--arch/x86/xen/xen-ops.h2
-rw-r--r--arch/xtensa/Kconfig1
-rw-r--r--arch/xtensa/include/asm/dma-mapping.h15
-rw-r--r--arch/xtensa/include/uapi/asm/socket.h4
-rw-r--r--arch/xtensa/platforms/iss/console.c10
432 files changed, 13373 insertions, 4140 deletions
diff --git a/arch/Kconfig b/arch/Kconfig
index 7f8f281f2585..97fb7d0365d1 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -76,6 +76,15 @@ config OPTPROBES
depends on KPROBES && HAVE_OPTPROBES
depends on !PREEMPT
+config KPROBES_ON_FTRACE
+ def_bool y
+ depends on KPROBES && HAVE_KPROBES_ON_FTRACE
+ depends on DYNAMIC_FTRACE_WITH_REGS
+ help
+ If function tracer is enabled and the arch supports full
+ passing of pt_regs to function tracing, then kprobes can
+ optimize on top of function tracing.
+
config UPROBES
bool "Transparent user-space probes (EXPERIMENTAL)"
depends on UPROBE_EVENT && PERF_EVENTS
@@ -158,6 +167,9 @@ config HAVE_KRETPROBES
config HAVE_OPTPROBES
bool
+config HAVE_KPROBES_ON_FTRACE
+ bool
+
config HAVE_NMI_WATCHDOG
bool
#
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig
index 9d5904cc7712..dabc93649495 100644
--- a/arch/alpha/Kconfig
+++ b/arch/alpha/Kconfig
@@ -5,7 +5,6 @@ config ALPHA
select HAVE_IDE
select HAVE_OPROFILE
select HAVE_SYSCALL_WRAPPERS
- select HAVE_IRQ_WORK
select HAVE_PCSPKR_PLATFORM
select HAVE_PERF_EVENTS
select HAVE_DMA_ATTRS
@@ -125,6 +124,7 @@ choice
config ALPHA_GENERIC
bool "Generic"
+ depends on TTY
help
A generic kernel will run on all supported Alpha hardware.
@@ -491,6 +491,7 @@ config VGA_HOSE
config ALPHA_SRM
bool "Use SRM as bootloader" if ALPHA_CABRIOLET || ALPHA_AVANTI_CH || ALPHA_EB64P || ALPHA_PC164 || ALPHA_TAKARA || ALPHA_EB164 || ALPHA_ALCOR || ALPHA_MIATA || ALPHA_LX164 || ALPHA_SX164 || ALPHA_NAUTILUS || ALPHA_NONAME
+ depends on TTY
default y if ALPHA_JENSEN || ALPHA_MIKASA || ALPHA_SABLE || ALPHA_LYNX || ALPHA_NORITAKE || ALPHA_DP264 || ALPHA_RAWHIDE || ALPHA_EIGER || ALPHA_WILDFIRE || ALPHA_TITAN || ALPHA_SHARK || ALPHA_MARVEL
---help---
There are two different types of booting firmware on Alphas: SRM,
@@ -556,8 +557,7 @@ config NR_CPUS
with working support have a maximum of 4 CPUs.
config ARCH_DISCONTIGMEM_ENABLE
- bool "Discontiguous Memory Support (EXPERIMENTAL)"
- depends on EXPERIMENTAL
+ bool "Discontiguous Memory Support"
help
Say Y to support efficient handling of discontiguous physical memory,
for architectures which are either NUMA (Non-Uniform Memory Access)
diff --git a/arch/alpha/include/uapi/asm/socket.h b/arch/alpha/include/uapi/asm/socket.h
index 097c1577735a..c5195524d1ef 100644
--- a/arch/alpha/include/uapi/asm/socket.h
+++ b/arch/alpha/include/uapi/asm/socket.h
@@ -19,7 +19,7 @@
#define SO_BROADCAST 0x0020
#define SO_LINGER 0x0080
#define SO_OOBINLINE 0x0100
-/* To add :#define SO_REUSEPORT 0x0200 */
+#define SO_REUSEPORT 0x0200
#define SO_TYPE 0x1008
#define SO_ERROR 0x1007
@@ -77,5 +77,6 @@
/* Instruct lower device to use last 4-bytes of skb data as FCS */
#define SO_NOFCS 43
+#define SO_LOCK_FILTER 44
#endif /* _UAPI_ASM_SOCKET_H */
diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
index 14db93e4c8a8..dbc1760f418b 100644
--- a/arch/alpha/kernel/osf_sys.c
+++ b/arch/alpha/kernel/osf_sys.c
@@ -1139,6 +1139,7 @@ struct rusage32 {
SYSCALL_DEFINE2(osf_getrusage, int, who, struct rusage32 __user *, ru)
{
struct rusage32 r;
+ cputime_t utime, stime;
if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN)
return -EINVAL;
@@ -1146,8 +1147,9 @@ SYSCALL_DEFINE2(osf_getrusage, int, who, struct rusage32 __user *, ru)
memset(&r, 0, sizeof(r));
switch (who) {
case RUSAGE_SELF:
- jiffies_to_timeval32(current->utime, &r.ru_utime);
- jiffies_to_timeval32(current->stime, &r.ru_stime);
+ task_cputime(current, &utime, &stime);
+ jiffies_to_timeval32(utime, &r.ru_utime);
+ jiffies_to_timeval32(stime, &r.ru_stime);
r.ru_minflt = current->min_flt;
r.ru_majflt = current->maj_flt;
break;
diff --git a/arch/alpha/kernel/srmcons.c b/arch/alpha/kernel/srmcons.c
index 59b7bbad8394..6f01d9ad7b81 100644
--- a/arch/alpha/kernel/srmcons.c
+++ b/arch/alpha/kernel/srmcons.c
@@ -44,7 +44,7 @@ typedef union _srmcons_result {
/* called with callback_lock held */
static int
-srmcons_do_receive_chars(struct tty_struct *tty)
+srmcons_do_receive_chars(struct tty_port *port)
{
srmcons_result result;
int count = 0, loops = 0;
@@ -52,13 +52,13 @@ srmcons_do_receive_chars(struct tty_struct *tty)
do {
result.as_long = callback_getc(0);
if (result.bits.status < 2) {
- tty_insert_flip_char(tty, (char)result.bits.c, 0);
+ tty_insert_flip_char(port, (char)result.bits.c, 0);
count++;
}
} while((result.bits.status & 1) && (++loops < 10));
if (count)
- tty_schedule_flip(tty);
+ tty_schedule_flip(port);
return count;
}
@@ -73,7 +73,7 @@ srmcons_receive_chars(unsigned long data)
local_irq_save(flags);
if (spin_trylock(&srmcons_callback_lock)) {
- if (!srmcons_do_receive_chars(port->tty))
+ if (!srmcons_do_receive_chars(port))
incr = 100;
spin_unlock(&srmcons_callback_lock);
}
@@ -88,7 +88,7 @@ srmcons_receive_chars(unsigned long data)
/* called with callback_lock held */
static int
-srmcons_do_write(struct tty_struct *tty, const char *buf, int count)
+srmcons_do_write(struct tty_port *port, const char *buf, int count)
{
static char str_cr[1] = "\r";
long c, remaining = count;
@@ -113,10 +113,10 @@ srmcons_do_write(struct tty_struct *tty, const char *buf, int count)
cur += result.bits.c;
/*
- * Check for pending input iff a tty was provided
+ * Check for pending input iff a tty port was provided
*/
- if (tty)
- srmcons_do_receive_chars(tty);
+ if (port)
+ srmcons_do_receive_chars(port);
}
while (need_cr) {
@@ -135,7 +135,7 @@ srmcons_write(struct tty_struct *tty,
unsigned long flags;
spin_lock_irqsave(&srmcons_callback_lock, flags);
- srmcons_do_write(tty, (const char *) buf, count);
+ srmcons_do_write(tty->port, (const char *) buf, count);
spin_unlock_irqrestore(&srmcons_callback_lock, flags);
return count;
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index b934d90a61e4..09238c83e6d6 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -36,7 +36,6 @@ config ARM
select HAVE_GENERIC_HARDIRQS
select HAVE_HW_BREAKPOINT if (PERF_EVENTS && (CPU_V6 || CPU_V6K || CPU_V7))
select HAVE_IDE if PCI || ISA || PCMCIA
- select HAVE_IRQ_WORK
select HAVE_KERNEL_GZIP
select HAVE_KERNEL_LZMA
select HAVE_KERNEL_LZO
@@ -1536,7 +1535,6 @@ config SMP
config SMP_ON_UP
bool "Allow booting SMP kernel on uniprocessor systems (EXPERIMENTAL)"
- depends on EXPERIMENTAL
depends on SMP && !XIP_KERNEL
default y
help
@@ -1625,6 +1623,16 @@ config HOTPLUG_CPU
Say Y here to experiment with turning CPUs off and on. CPUs
can be controlled through /sys/devices/system/cpu.
+config ARM_PSCI
+ bool "Support for the ARM Power State Coordination Interface (PSCI)"
+ depends on CPU_V7
+ help
+ Say Y here if you want Linux to communicate with system firmware
+ implementing the PSCI specification for CPU-centric power
+ management operations described in ARM document number ARM DEN
+ 0022A ("Power State Coordination Interface System Software on
+ ARM processors").
+
config LOCAL_TIMERS
bool "Use local timer interrupts"
depends on SMP
@@ -1642,7 +1650,7 @@ config ARCH_NR_GPIO
default 355 if ARCH_U8500
default 264 if MACH_H4700
default 512 if SOC_OMAP5
- default 288 if ARCH_VT8500
+ default 288 if ARCH_VT8500 || ARCH_SUNXI
default 0
help
Maximum number of GPIOs in the system.
@@ -1660,6 +1668,9 @@ config HZ
default SHMOBILE_TIMER_HZ if ARCH_SHMOBILE
default 100
+config SCHED_HRTICK
+ def_bool HIGH_RES_TIMERS
+
config THUMB2_KERNEL
bool "Compile the kernel in Thumb-2 mode"
depends on CPU_V7 && !CPU_V6 && !CPU_V6K
@@ -1724,7 +1735,7 @@ config AEABI
config OABI_COMPAT
bool "Allow old ABI binaries to run with this kernel (EXPERIMENTAL)"
- depends on AEABI && EXPERIMENTAL && !THUMB2_KERNEL
+ depends on AEABI && !THUMB2_KERNEL
default y
help
This option preserves the old syscall interface along with the
@@ -1848,7 +1859,6 @@ config SECCOMP
config CC_STACKPROTECTOR
bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
- depends on EXPERIMENTAL
help
This option turns on the -fstack-protector GCC feature. This
feature puts, at the beginning of functions, a canary value on
@@ -1865,7 +1875,7 @@ config XEN_DOM0
config XEN
bool "Xen guest support on ARM (EXPERIMENTAL)"
- depends on EXPERIMENTAL && ARM && OF
+ depends on ARM && OF
depends on CPU_V7 && !CPU_V6
help
Say Y if you want to run Linux in a Virtual Machine on Xen on ARM.
@@ -1934,7 +1944,7 @@ config ZBOOT_ROM
choice
prompt "Include SD/MMC loader in zImage (EXPERIMENTAL)"
- depends on ZBOOT_ROM && ARCH_SH7372 && EXPERIMENTAL
+ depends on ZBOOT_ROM && ARCH_SH7372
default ZBOOT_ROM_NONE
help
Include experimental SD/MMC loading code in the ROM-able zImage.
@@ -1963,7 +1973,7 @@ endchoice
config ARM_APPENDED_DTB
bool "Use appended device tree blob to zImage (EXPERIMENTAL)"
- depends on OF && !ZBOOT_ROM && EXPERIMENTAL
+ depends on OF && !ZBOOT_ROM
help
With this option, the boot code will look for a device tree binary
(DTB) appended to zImage
@@ -2081,7 +2091,7 @@ config XIP_PHYS_ADDR
config KEXEC
bool "Kexec system call (EXPERIMENTAL)"
- depends on EXPERIMENTAL && (!SMP || HOTPLUG_CPU)
+ depends on (!SMP || HOTPLUG_CPU)
help
kexec is a system call that implements the ability to shutdown your
current kernel, and to start another kernel. It is like a reboot
@@ -2103,7 +2113,6 @@ config ATAGS_PROC
config CRASH_DUMP
bool "Build kdump crash kernel (EXPERIMENTAL)"
- depends on EXPERIMENTAL
help
Generate crash dump after being started by kexec. This should
be normally only set in special crash dump kernels which are
@@ -2170,7 +2179,7 @@ config CPU_FREQ_S3C
config CPU_FREQ_S3C24XX
bool "CPUfreq driver for Samsung S3C24XX series CPUs (EXPERIMENTAL)"
- depends on ARCH_S3C24XX && CPU_FREQ && EXPERIMENTAL
+ depends on ARCH_S3C24XX && CPU_FREQ
select CPU_FREQ_S3C
help
This enables the CPUfreq driver for the Samsung S3C24XX family
@@ -2182,7 +2191,7 @@ config CPU_FREQ_S3C24XX
config CPU_FREQ_S3C24XX_PLL
bool "Support CPUfreq changing of PLL frequency (EXPERIMENTAL)"
- depends on CPU_FREQ_S3C24XX && EXPERIMENTAL
+ depends on CPU_FREQ_S3C24XX
help
Compile in support for changing the PLL frequency from the
S3C24XX series CPUfreq driver. The PLL takes time to settle
@@ -2245,7 +2254,7 @@ config FPE_NWFPE_XP
config FPE_FASTFPE
bool "FastFPE math emulation (EXPERIMENTAL)"
- depends on (!AEABI || OABI_COMPAT) && !CPU_32v3 && EXPERIMENTAL
+ depends on (!AEABI || OABI_COMPAT) && !CPU_32v3
---help---
Say Y here to include the FAST floating point emulator in the kernel.
This is an experimental much faster emulator which now also has full
@@ -2327,3 +2336,5 @@ source "security/Kconfig"
source "crypto/Kconfig"
source "lib/Kconfig"
+
+source "arch/arm/kvm/Kconfig"
diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
index 661030d6bc6c..fc2a591e1676 100644
--- a/arch/arm/Kconfig.debug
+++ b/arch/arm/Kconfig.debug
@@ -32,7 +32,7 @@ config FRAME_POINTER
config ARM_UNWIND
bool "Enable stack unwinding support (EXPERIMENTAL)"
- depends on AEABI && EXPERIMENTAL
+ depends on AEABI
default y
help
This option enables stack unwinding support in the kernel
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index 30c443c406f3..4bcd2d6b0535 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -252,6 +252,7 @@ core-$(CONFIG_FPE_NWFPE) += arch/arm/nwfpe/
core-$(CONFIG_FPE_FASTFPE) += $(FASTFPE_OBJ)
core-$(CONFIG_VFP) += arch/arm/vfp/
core-$(CONFIG_XEN) += arch/arm/xen/
+core-$(CONFIG_KVM_ARM_HOST) += arch/arm/kvm/
# If we have a machine-specific directory, then include it in the build.
core-y += arch/arm/kernel/ arch/arm/mm/ arch/arm/common/
diff --git a/arch/arm/boot/dts/dbx5x0.dtsi b/arch/arm/boot/dts/dbx5x0.dtsi
index 63f2fbcfe819..69140ba99f46 100644
--- a/arch/arm/boot/dts/dbx5x0.dtsi
+++ b/arch/arm/boot/dts/dbx5x0.dtsi
@@ -170,10 +170,9 @@
gpio-bank = <8>;
};
- pinctrl@80157000 {
- // This is actually the PRCMU base address
- reg = <0x80157000 0x2000>;
- compatible = "stericsson,nmk_pinctrl";
+ pinctrl {
+ compatible = "stericsson,nmk-pinctrl";
+ prcm = <&prcmu>;
};
usb@a03e0000 {
@@ -190,9 +189,10 @@
interrupts = <0 25 0x4>;
};
- prcmu@80157000 {
+ prcmu: prcmu@80157000 {
compatible = "stericsson,db8500-prcmu";
reg = <0x80157000 0x1000>;
+ reg-names = "prcmu";
interrupts = <0 47 0x4>;
#address-cells = <1>;
#size-cells = <1>;
diff --git a/arch/arm/boot/dts/exynos5250-smdk5250.dts b/arch/arm/boot/dts/exynos5250-smdk5250.dts
index e05b18f3c33d..4db9db0a8443 100644
--- a/arch/arm/boot/dts/exynos5250-smdk5250.dts
+++ b/arch/arm/boot/dts/exynos5250-smdk5250.dts
@@ -49,6 +49,11 @@
compatible = "samsung,s524ad0xd1";
reg = <0x51>;
};
+
+ wm8994: wm8994@1a {
+ compatible = "wlf,wm8994";
+ reg = <0x1a>;
+ };
};
i2c@121D0000 {
@@ -204,4 +209,25 @@
samsung,mfc-r = <0x43000000 0x800000>;
samsung,mfc-l = <0x51000000 0x800000>;
};
+
+ i2s0: i2s@03830000 {
+ gpios = <&gpz 0 2 0 0>, <&gpz 1 2 0 0>, <&gpz 2 2 0 0>,
+ <&gpz 3 2 0 0>, <&gpz 4 2 0 0>, <&gpz 5 2 0 0>,
+ <&gpz 6 2 0 0>;
+ };
+
+ i2s1: i2s@12D60000 {
+ status = "disabled";
+ };
+
+ i2s2: i2s@12D70000 {
+ status = "disabled";
+ };
+
+ sound {
+ compatible = "samsung,smdk-wm8994";
+
+ samsung,i2s-controller = <&i2s0>;
+ samsung,audio-codec = <&wm8994>;
+ };
};
diff --git a/arch/arm/boot/dts/exynos5250.dtsi b/arch/arm/boot/dts/exynos5250.dtsi
index 3acf594ea60b..f50b4e854355 100644
--- a/arch/arm/boot/dts/exynos5250.dtsi
+++ b/arch/arm/boot/dts/exynos5250.dtsi
@@ -211,8 +211,9 @@
compatible = "samsung,exynos4210-spi";
reg = <0x12d20000 0x100>;
interrupts = <0 66 0>;
- tx-dma-channel = <&pdma0 5>; /* preliminary */
- rx-dma-channel = <&pdma0 4>; /* preliminary */
+ dmas = <&pdma0 5
+ &pdma0 4>;
+ dma-names = "tx", "rx";
#address-cells = <1>;
#size-cells = <0>;
};
@@ -221,8 +222,9 @@
compatible = "samsung,exynos4210-spi";
reg = <0x12d30000 0x100>;
interrupts = <0 67 0>;
- tx-dma-channel = <&pdma1 5>; /* preliminary */
- rx-dma-channel = <&pdma1 4>; /* preliminary */
+ dmas = <&pdma1 5
+ &pdma1 4>;
+ dma-names = "tx", "rx";
#address-cells = <1>;
#size-cells = <0>;
};
@@ -231,8 +233,9 @@
compatible = "samsung,exynos4210-spi";
reg = <0x12d40000 0x100>;
interrupts = <0 68 0>;
- tx-dma-channel = <&pdma0 7>; /* preliminary */
- rx-dma-channel = <&pdma0 6>; /* preliminary */
+ dmas = <&pdma0 7
+ &pdma0 6>;
+ dma-names = "tx", "rx";
#address-cells = <1>;
#size-cells = <0>;
};
@@ -269,6 +272,35 @@
#size-cells = <0>;
};
+ i2s0: i2s@03830000 {
+ compatible = "samsung,i2s-v5";
+ reg = <0x03830000 0x100>;
+ dmas = <&pdma0 10
+ &pdma0 9
+ &pdma0 8>;
+ dma-names = "tx", "rx", "tx-sec";
+ samsung,supports-6ch;
+ samsung,supports-rstclr;
+ samsung,supports-secdai;
+ samsung,idma-addr = <0x03000000>;
+ };
+
+ i2s1: i2s@12D60000 {
+ compatible = "samsung,i2s-v5";
+ reg = <0x12D60000 0x100>;
+ dmas = <&pdma1 12
+ &pdma1 11>;
+ dma-names = "tx", "rx";
+ };
+
+ i2s2: i2s@12D70000 {
+ compatible = "samsung,i2s-v5";
+ reg = <0x12D70000 0x100>;
+ dmas = <&pdma0 12
+ &pdma0 11>;
+ dma-names = "tx", "rx";
+ };
+
amba {
#address-cells = <1>;
#size-cells = <1>;
diff --git a/arch/arm/boot/dts/highbank.dts b/arch/arm/boot/dts/highbank.dts
index 5927a8df5625..6aad34ad9517 100644
--- a/arch/arm/boot/dts/highbank.dts
+++ b/arch/arm/boot/dts/highbank.dts
@@ -37,6 +37,16 @@
next-level-cache = <&L2>;
clocks = <&a9pll>;
clock-names = "cpu";
+ operating-points = <
+ /* kHz ignored */
+ 1300000 1000000
+ 1200000 1000000
+ 1100000 1000000
+ 800000 1000000
+ 400000 1000000
+ 200000 1000000
+ >;
+ clock-latency = <100000>;
};
cpu@901 {
diff --git a/arch/arm/boot/dts/imx23.dtsi b/arch/arm/boot/dts/imx23.dtsi
index 65415c598a5e..56afcf41aae0 100644
--- a/arch/arm/boot/dts/imx23.dtsi
+++ b/arch/arm/boot/dts/imx23.dtsi
@@ -391,7 +391,9 @@
};
lradc@80050000 {
+ compatible = "fsl,imx23-lradc";
reg = <0x80050000 0x2000>;
+ interrupts = <36 37 38 39 40 41 42 43 44>;
status = "disabled";
};
diff --git a/arch/arm/boot/dts/prima2.dtsi b/arch/arm/boot/dts/prima2.dtsi
index 055fca542120..3329719a9412 100644
--- a/arch/arm/boot/dts/prima2.dtsi
+++ b/arch/arm/boot/dts/prima2.dtsi
@@ -58,10 +58,11 @@
#size-cells = <1>;
ranges = <0x88000000 0x88000000 0x40000>;
- clock-controller@88000000 {
+ clks: clock-controller@88000000 {
compatible = "sirf,prima2-clkc";
reg = <0x88000000 0x1000>;
interrupts = <3>;
+ #clock-cells = <1>;
};
reset-controller@88010000 {
@@ -85,6 +86,7 @@
compatible = "sirf,prima2-memc";
reg = <0x90000000 0x10000>;
interrupts = <27>;
+ clocks = <&clks 5>;
};
};
@@ -104,6 +106,7 @@
compatible = "sirf,prima2-vpp";
reg = <0x90020000 0x10000>;
interrupts = <31>;
+ clocks = <&clks 35>;
};
};
@@ -117,6 +120,7 @@
compatible = "powervr,sgx531";
reg = <0x98000000 0x8000000>;
interrupts = <6>;
+ clocks = <&clks 32>;
};
};
@@ -130,6 +134,7 @@
compatible = "sirf,prima2-video-codec";
reg = <0xa0000000 0x8000000>;
interrupts = <5>;
+ clocks = <&clks 33>;
};
};
@@ -149,12 +154,14 @@
compatible = "sirf,prima2-gps";
reg = <0xa8010000 0x10000>;
interrupts = <7>;
+ clocks = <&clks 9>;
};
dsp@a9000000 {
compatible = "sirf,prima2-dsp";
reg = <0xa9000000 0x1000000>;
interrupts = <8>;
+ clocks = <&clks 8>;
};
};
@@ -174,12 +181,14 @@
compatible = "sirf,prima2-nand";
reg = <0xb0030000 0x10000>;
interrupts = <41>;
+ clocks = <&clks 26>;
};
audio@b0040000 {
compatible = "sirf,prima2-audio";
reg = <0xb0040000 0x10000>;
interrupts = <35>;
+ clocks = <&clks 27>;
};
uart0: uart@b0050000 {
@@ -187,6 +196,7 @@
compatible = "sirf,prima2-uart";
reg = <0xb0050000 0x10000>;
interrupts = <17>;
+ clocks = <&clks 13>;
};
uart1: uart@b0060000 {
@@ -194,6 +204,7 @@
compatible = "sirf,prima2-uart";
reg = <0xb0060000 0x10000>;
interrupts = <18>;
+ clocks = <&clks 14>;
};
uart2: uart@b0070000 {
@@ -201,6 +212,7 @@
compatible = "sirf,prima2-uart";
reg = <0xb0070000 0x10000>;
interrupts = <19>;
+ clocks = <&clks 15>;
};
usp0: usp@b0080000 {
@@ -208,6 +220,7 @@
compatible = "sirf,prima2-usp";
reg = <0xb0080000 0x10000>;
interrupts = <20>;
+ clocks = <&clks 28>;
};
usp1: usp@b0090000 {
@@ -215,6 +228,7 @@
compatible = "sirf,prima2-usp";
reg = <0xb0090000 0x10000>;
interrupts = <21>;
+ clocks = <&clks 29>;
};
usp2: usp@b00a0000 {
@@ -222,6 +236,7 @@
compatible = "sirf,prima2-usp";
reg = <0xb00a0000 0x10000>;
interrupts = <22>;
+ clocks = <&clks 30>;
};
dmac0: dma-controller@b00b0000 {
@@ -229,6 +244,7 @@
compatible = "sirf,prima2-dmac";
reg = <0xb00b0000 0x10000>;
interrupts = <12>;
+ clocks = <&clks 24>;
};
dmac1: dma-controller@b0160000 {
@@ -236,11 +252,13 @@
compatible = "sirf,prima2-dmac";
reg = <0xb0160000 0x10000>;
interrupts = <13>;
+ clocks = <&clks 25>;
};
vip@b00C0000 {
compatible = "sirf,prima2-vip";
reg = <0xb00C0000 0x10000>;
+ clocks = <&clks 31>;
};
spi0: spi@b00d0000 {
@@ -248,6 +266,7 @@
compatible = "sirf,prima2-spi";
reg = <0xb00d0000 0x10000>;
interrupts = <15>;
+ clocks = <&clks 19>;
};
spi1: spi@b0170000 {
@@ -255,6 +274,7 @@
compatible = "sirf,prima2-spi";
reg = <0xb0170000 0x10000>;
interrupts = <16>;
+ clocks = <&clks 20>;
};
i2c0: i2c@b00e0000 {
@@ -262,6 +282,7 @@
compatible = "sirf,prima2-i2c";
reg = <0xb00e0000 0x10000>;
interrupts = <24>;
+ clocks = <&clks 17>;
};
i2c1: i2c@b00f0000 {
@@ -269,12 +290,14 @@
compatible = "sirf,prima2-i2c";
reg = <0xb00f0000 0x10000>;
interrupts = <25>;
+ clocks = <&clks 18>;
};
tsc@b0110000 {
compatible = "sirf,prima2-tsc";
reg = <0xb0110000 0x10000>;
interrupts = <33>;
+ clocks = <&clks 16>;
};
gpio: pinctrl@b0120000 {
@@ -507,17 +530,20 @@
pwm@b0130000 {
compatible = "sirf,prima2-pwm";
reg = <0xb0130000 0x10000>;
+ clocks = <&clks 21>;
};
efusesys@b0140000 {
compatible = "sirf,prima2-efuse";
reg = <0xb0140000 0x10000>;
+ clocks = <&clks 22>;
};
pulsec@b0150000 {
compatible = "sirf,prima2-pulsec";
reg = <0xb0150000 0x10000>;
interrupts = <48>;
+ clocks = <&clks 23>;
};
pci-iobg {
@@ -616,12 +642,14 @@
compatible = "chipidea,ci13611a-prima2";
reg = <0xb8000000 0x10000>;
interrupts = <10>;
+ clocks = <&clks 40>;
};
usb1: usb@b00f0000 {
compatible = "chipidea,ci13611a-prima2";
reg = <0xb8010000 0x10000>;
interrupts = <11>;
+ clocks = <&clks 41>;
};
sata@b00f0000 {
@@ -634,6 +662,7 @@
compatible = "sirf,prima2-security";
reg = <0xb8030000 0x10000>;
interrupts = <42>;
+ clocks = <&clks 7>;
};
};
};
diff --git a/arch/arm/boot/dts/sun4i-a10.dtsi b/arch/arm/boot/dts/sun4i-a10.dtsi
index e61fdd47bd01..f99f60dadf5d 100644
--- a/arch/arm/boot/dts/sun4i-a10.dtsi
+++ b/arch/arm/boot/dts/sun4i-a10.dtsi
@@ -16,4 +16,34 @@
memory {
reg = <0x40000000 0x80000000>;
};
+
+ soc {
+ pinctrl@01c20800 {
+ compatible = "allwinner,sun4i-a10-pinctrl";
+ reg = <0x01c20800 0x400>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ uart0_pins_a: uart0@0 {
+ allwinner,pins = "PB22", "PB23";
+ allwinner,function = "uart0";
+ allwinner,drive = <0>;
+ allwinner,pull = <0>;
+ };
+
+ uart0_pins_b: uart0@1 {
+ allwinner,pins = "PF2", "PF4";
+ allwinner,function = "uart0";
+ allwinner,drive = <0>;
+ allwinner,pull = <0>;
+ };
+
+ uart1_pins_a: uart1@0 {
+ allwinner,pins = "PA10", "PA11";
+ allwinner,function = "uart1";
+ allwinner,drive = <0>;
+ allwinner,pull = <0>;
+ };
+ };
+ };
};
diff --git a/arch/arm/boot/dts/sun5i-a13-olinuxino.dts b/arch/arm/boot/dts/sun5i-a13-olinuxino.dts
index 498a091a4ea2..4a1e45d4aace 100644
--- a/arch/arm/boot/dts/sun5i-a13-olinuxino.dts
+++ b/arch/arm/boot/dts/sun5i-a13-olinuxino.dts
@@ -24,6 +24,8 @@
soc {
uart1: uart@01c28400 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart1_pins_b>;
status = "okay";
};
};
diff --git a/arch/arm/boot/dts/sun5i-a13.dtsi b/arch/arm/boot/dts/sun5i-a13.dtsi
index 59a2d265a98e..e1121890fb29 100644
--- a/arch/arm/boot/dts/sun5i-a13.dtsi
+++ b/arch/arm/boot/dts/sun5i-a13.dtsi
@@ -17,4 +17,27 @@
memory {
reg = <0x40000000 0x20000000>;
};
+
+ soc {
+ pinctrl@01c20800 {
+ compatible = "allwinner,sun5i-a13-pinctrl";
+ reg = <0x01c20800 0x400>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ uart1_pins_a: uart1@0 {
+ allwinner,pins = "PE10", "PE11";
+ allwinner,function = "uart1";
+ allwinner,drive = <0>;
+ allwinner,pull = <0>;
+ };
+
+ uart1_pins_b: uart1@1 {
+ allwinner,pins = "PG3", "PG4";
+ allwinner,function = "uart1";
+ allwinner,drive = <0>;
+ allwinner,pull = <0>;
+ };
+ };
+ };
};
diff --git a/arch/arm/boot/dts/vt8500.dtsi b/arch/arm/boot/dts/vt8500.dtsi
index d8645e990b21..cf31ced46602 100644
--- a/arch/arm/boot/dts/vt8500.dtsi
+++ b/arch/arm/boot/dts/vt8500.dtsi
@@ -45,6 +45,38 @@
compatible = "fixed-clock";
clock-frequency = <24000000>;
};
+
+ clkuart0: uart0 {
+ #clock-cells = <0>;
+ compatible = "via,vt8500-device-clock";
+ clocks = <&ref24>;
+ enable-reg = <0x250>;
+ enable-bit = <1>;
+ };
+
+ clkuart1: uart1 {
+ #clock-cells = <0>;
+ compatible = "via,vt8500-device-clock";
+ clocks = <&ref24>;
+ enable-reg = <0x250>;
+ enable-bit = <2>;
+ };
+
+ clkuart2: uart2 {
+ #clock-cells = <0>;
+ compatible = "via,vt8500-device-clock";
+ clocks = <&ref24>;
+ enable-reg = <0x250>;
+ enable-bit = <3>;
+ };
+
+ clkuart3: uart3 {
+ #clock-cells = <0>;
+ compatible = "via,vt8500-device-clock";
+ clocks = <&ref24>;
+ enable-reg = <0x250>;
+ enable-bit = <4>;
+ };
};
};
@@ -83,28 +115,28 @@
compatible = "via,vt8500-uart";
reg = <0xd8200000 0x1040>;
interrupts = <32>;
- clocks = <&ref24>;
+ clocks = <&clkuart0>;
};
uart@d82b0000 {
compatible = "via,vt8500-uart";
reg = <0xd82b0000 0x1040>;
interrupts = <33>;
- clocks = <&ref24>;
+ clocks = <&clkuart1>;
};
uart@d8210000 {
compatible = "via,vt8500-uart";
reg = <0xd8210000 0x1040>;
interrupts = <47>;
- clocks = <&ref24>;
+ clocks = <&clkuart2>;
};
uart@d82c0000 {
compatible = "via,vt8500-uart";
reg = <0xd82c0000 0x1040>;
interrupts = <50>;
- clocks = <&ref24>;
+ clocks = <&clkuart3>;
};
rtc@d8100000 {
diff --git a/arch/arm/boot/dts/wm8505.dtsi b/arch/arm/boot/dts/wm8505.dtsi
index 330f833ac3b0..e74a1c0fb9a2 100644
--- a/arch/arm/boot/dts/wm8505.dtsi
+++ b/arch/arm/boot/dts/wm8505.dtsi
@@ -59,6 +59,54 @@
compatible = "fixed-clock";
clock-frequency = <24000000>;
};
+
+ clkuart0: uart0 {
+ #clock-cells = <0>;
+ compatible = "via,vt8500-device-clock";
+ clocks = <&ref24>;
+ enable-reg = <0x250>;
+ enable-bit = <1>;
+ };
+
+ clkuart1: uart1 {
+ #clock-cells = <0>;
+ compatible = "via,vt8500-device-clock";
+ clocks = <&ref24>;
+ enable-reg = <0x250>;
+ enable-bit = <2>;
+ };
+
+ clkuart2: uart2 {
+ #clock-cells = <0>;
+ compatible = "via,vt8500-device-clock";
+ clocks = <&ref24>;
+ enable-reg = <0x250>;
+ enable-bit = <3>;
+ };
+
+ clkuart3: uart3 {
+ #clock-cells = <0>;
+ compatible = "via,vt8500-device-clock";
+ clocks = <&ref24>;
+ enable-reg = <0x250>;
+ enable-bit = <4>;
+ };
+
+ clkuart4: uart4 {
+ #clock-cells = <0>;
+ compatible = "via,vt8500-device-clock";
+ clocks = <&ref24>;
+ enable-reg = <0x250>;
+ enable-bit = <22>;
+ };
+
+ clkuart5: uart5 {
+ #clock-cells = <0>;
+ compatible = "via,vt8500-device-clock";
+ clocks = <&ref24>;
+ enable-reg = <0x250>;
+ enable-bit = <23>;
+ };
};
};
@@ -96,42 +144,42 @@
compatible = "via,vt8500-uart";
reg = <0xd8200000 0x1040>;
interrupts = <32>;
- clocks = <&ref24>;
+ clocks = <&clkuart0>;
};
uart@d82b0000 {
compatible = "via,vt8500-uart";
reg = <0xd82b0000 0x1040>;
interrupts = <33>;
- clocks = <&ref24>;
+ clocks = <&clkuart1>;
};
uart@d8210000 {
compatible = "via,vt8500-uart";
reg = <0xd8210000 0x1040>;
interrupts = <47>;
- clocks = <&ref24>;
+ clocks = <&clkuart2>;
};
uart@d82c0000 {
compatible = "via,vt8500-uart";
reg = <0xd82c0000 0x1040>;
interrupts = <50>;
- clocks = <&ref24>;
+ clocks = <&clkuart3>;
};
uart@d8370000 {
compatible = "via,vt8500-uart";
reg = <0xd8370000 0x1040>;
interrupts = <31>;
- clocks = <&ref24>;
+ clocks = <&clkuart4>;
};
uart@d8380000 {
compatible = "via,vt8500-uart";
reg = <0xd8380000 0x1040>;
interrupts = <30>;
- clocks = <&ref24>;
+ clocks = <&clkuart5>;
};
rtc@d8100000 {
diff --git a/arch/arm/boot/dts/wm8650.dtsi b/arch/arm/boot/dts/wm8650.dtsi
index 83b9467559bb..db3c0a12e052 100644
--- a/arch/arm/boot/dts/wm8650.dtsi
+++ b/arch/arm/boot/dts/wm8650.dtsi
@@ -75,6 +75,22 @@
reg = <0x204>;
};
+ clkuart0: uart0 {
+ #clock-cells = <0>;
+ compatible = "via,vt8500-device-clock";
+ clocks = <&ref24>;
+ enable-reg = <0x250>;
+ enable-bit = <1>;
+ };
+
+ clkuart1: uart1 {
+ #clock-cells = <0>;
+ compatible = "via,vt8500-device-clock";
+ clocks = <&ref24>;
+ enable-reg = <0x250>;
+ enable-bit = <2>;
+ };
+
arm: arm {
#clock-cells = <0>;
compatible = "via,vt8500-device-clock";
@@ -128,14 +144,14 @@
compatible = "via,vt8500-uart";
reg = <0xd8200000 0x1040>;
interrupts = <32>;
- clocks = <&ref24>;
+ clocks = <&clkuart0>;
};
uart@d82b0000 {
compatible = "via,vt8500-uart";
reg = <0xd82b0000 0x1040>;
interrupts = <33>;
- clocks = <&ref24>;
+ clocks = <&clkuart1>;
};
rtc@d8100000 {
diff --git a/arch/arm/boot/dts/zynq-7000.dtsi b/arch/arm/boot/dts/zynq-7000.dtsi
index 401c1262d4ed..5914b5654591 100644
--- a/arch/arm/boot/dts/zynq-7000.dtsi
+++ b/arch/arm/boot/dts/zynq-7000.dtsi
@@ -44,14 +44,14 @@
compatible = "xlnx,xuartps";
reg = <0xE0000000 0x1000>;
interrupts = <0 27 4>;
- clock = <50000000>;
+ clocks = <&uart_clk 0>;
};
uart1: uart@e0001000 {
compatible = "xlnx,xuartps";
reg = <0xE0001000 0x1000>;
interrupts = <0 50 4>;
- clock = <50000000>;
+ clocks = <&uart_clk 1>;
};
slcr: slcr@f8000000 {
diff --git a/arch/arm/common/gic.c b/arch/arm/common/gic.c
index 36ae03a3f5d1..87dfa9026c5b 100644
--- a/arch/arm/common/gic.c
+++ b/arch/arm/common/gic.c
@@ -351,6 +351,25 @@ void __init gic_cascade_irq(unsigned int gic_nr, unsigned int irq)
irq_set_chained_handler(irq, gic_handle_cascade_irq);
}
+static u8 gic_get_cpumask(struct gic_chip_data *gic)
+{
+ void __iomem *base = gic_data_dist_base(gic);
+ u32 mask, i;
+
+ for (i = mask = 0; i < 32; i += 4) {
+ mask = readl_relaxed(base + GIC_DIST_TARGET + i);
+ mask |= mask >> 16;
+ mask |= mask >> 8;
+ if (mask)
+ break;
+ }
+
+ if (!mask)
+ pr_crit("GIC CPU mask not found - kernel will fail to boot.\n");
+
+ return mask;
+}
+
static void __init gic_dist_init(struct gic_chip_data *gic)
{
unsigned int i;
@@ -369,7 +388,9 @@ static void __init gic_dist_init(struct gic_chip_data *gic)
/*
* Set all global interrupts to this CPU only.
*/
- cpumask = readl_relaxed(base + GIC_DIST_TARGET + 0);
+ cpumask = gic_get_cpumask(gic);
+ cpumask |= cpumask << 8;
+ cpumask |= cpumask << 16;
for (i = 32; i < gic_irqs; i += 4)
writel_relaxed(cpumask, base + GIC_DIST_TARGET + i * 4 / 4);
@@ -400,7 +421,7 @@ static void __cpuinit gic_cpu_init(struct gic_chip_data *gic)
* Get what the GIC says our CPU mask is.
*/
BUG_ON(cpu >= NR_GIC_CPU_IF);
- cpu_mask = readl_relaxed(dist_base + GIC_DIST_TARGET + 0);
+ cpu_mask = gic_get_cpumask(gic);
gic_cpu_map[cpu] = cpu_mask;
/*
diff --git a/arch/arm/crypto/aes-armv4.S b/arch/arm/crypto/aes-armv4.S
index e59b1d505d6c..19d6cd6f29f9 100644
--- a/arch/arm/crypto/aes-armv4.S
+++ b/arch/arm/crypto/aes-armv4.S
@@ -34,8 +34,9 @@
@ A little glue here to select the correct code below for the ARM CPU
@ that is being targetted.
+#include <linux/linkage.h>
+
.text
-.code 32
.type AES_Te,%object
.align 5
@@ -145,10 +146,8 @@ AES_Te:
@ void AES_encrypt(const unsigned char *in, unsigned char *out,
@ const AES_KEY *key) {
-.global AES_encrypt
-.type AES_encrypt,%function
.align 5
-AES_encrypt:
+ENTRY(AES_encrypt)
sub r3,pc,#8 @ AES_encrypt
stmdb sp!,{r1,r4-r12,lr}
mov r12,r0 @ inp
@@ -239,15 +238,8 @@ AES_encrypt:
strb r6,[r12,#14]
strb r3,[r12,#15]
#endif
-#if __ARM_ARCH__>=5
ldmia sp!,{r4-r12,pc}
-#else
- ldmia sp!,{r4-r12,lr}
- tst lr,#1
- moveq pc,lr @ be binary compatible with V4, yet
- .word 0xe12fff1e @ interoperable with Thumb ISA:-)
-#endif
-.size AES_encrypt,.-AES_encrypt
+ENDPROC(AES_encrypt)
.type _armv4_AES_encrypt,%function
.align 2
@@ -386,10 +378,8 @@ _armv4_AES_encrypt:
ldr pc,[sp],#4 @ pop and return
.size _armv4_AES_encrypt,.-_armv4_AES_encrypt
-.global private_AES_set_encrypt_key
-.type private_AES_set_encrypt_key,%function
.align 5
-private_AES_set_encrypt_key:
+ENTRY(private_AES_set_encrypt_key)
_armv4_AES_set_encrypt_key:
sub r3,pc,#8 @ AES_set_encrypt_key
teq r0,#0
@@ -658,15 +648,11 @@ _armv4_AES_set_encrypt_key:
.Ldone: mov r0,#0
ldmia sp!,{r4-r12,lr}
-.Labrt: tst lr,#1
- moveq pc,lr @ be binary compatible with V4, yet
- .word 0xe12fff1e @ interoperable with Thumb ISA:-)
-.size private_AES_set_encrypt_key,.-private_AES_set_encrypt_key
+.Labrt: mov pc,lr
+ENDPROC(private_AES_set_encrypt_key)
-.global private_AES_set_decrypt_key
-.type private_AES_set_decrypt_key,%function
.align 5
-private_AES_set_decrypt_key:
+ENTRY(private_AES_set_decrypt_key)
str lr,[sp,#-4]! @ push lr
#if 0
@ kernel does both of these in setkey so optimise this bit out by
@@ -748,15 +734,8 @@ private_AES_set_decrypt_key:
bne .Lmix
mov r0,#0
-#if __ARM_ARCH__>=5
ldmia sp!,{r4-r12,pc}
-#else
- ldmia sp!,{r4-r12,lr}
- tst lr,#1
- moveq pc,lr @ be binary compatible with V4, yet
- .word 0xe12fff1e @ interoperable with Thumb ISA:-)
-#endif
-.size private_AES_set_decrypt_key,.-private_AES_set_decrypt_key
+ENDPROC(private_AES_set_decrypt_key)
.type AES_Td,%object
.align 5
@@ -862,10 +841,8 @@ AES_Td:
@ void AES_decrypt(const unsigned char *in, unsigned char *out,
@ const AES_KEY *key) {
-.global AES_decrypt
-.type AES_decrypt,%function
.align 5
-AES_decrypt:
+ENTRY(AES_decrypt)
sub r3,pc,#8 @ AES_decrypt
stmdb sp!,{r1,r4-r12,lr}
mov r12,r0 @ inp
@@ -956,15 +933,8 @@ AES_decrypt:
strb r6,[r12,#14]
strb r3,[r12,#15]
#endif
-#if __ARM_ARCH__>=5
ldmia sp!,{r4-r12,pc}
-#else
- ldmia sp!,{r4-r12,lr}
- tst lr,#1
- moveq pc,lr @ be binary compatible with V4, yet
- .word 0xe12fff1e @ interoperable with Thumb ISA:-)
-#endif
-.size AES_decrypt,.-AES_decrypt
+ENDPROC(AES_decrypt)
.type _armv4_AES_decrypt,%function
.align 2
@@ -1064,7 +1034,9 @@ _armv4_AES_decrypt:
and r9,lr,r1,lsr#8
ldrb r7,[r10,r7] @ Td4[s1>>0]
- ldrb r1,[r10,r1,lsr#24] @ Td4[s1>>24]
+ ARM( ldrb r1,[r10,r1,lsr#24] ) @ Td4[s1>>24]
+ THUMB( add r1,r10,r1,lsr#24 ) @ Td4[s1>>24]
+ THUMB( ldrb r1,[r1] )
ldrb r8,[r10,r8] @ Td4[s1>>16]
eor r0,r7,r0,lsl#24
ldrb r9,[r10,r9] @ Td4[s1>>8]
@@ -1077,7 +1049,9 @@ _armv4_AES_decrypt:
ldrb r8,[r10,r8] @ Td4[s2>>0]
and r9,lr,r2,lsr#16
- ldrb r2,[r10,r2,lsr#24] @ Td4[s2>>24]
+ ARM( ldrb r2,[r10,r2,lsr#24] ) @ Td4[s2>>24]
+ THUMB( add r2,r10,r2,lsr#24 ) @ Td4[s2>>24]
+ THUMB( ldrb r2,[r2] )
eor r0,r0,r7,lsl#8
ldrb r9,[r10,r9] @ Td4[s2>>16]
eor r1,r8,r1,lsl#16
@@ -1090,7 +1064,9 @@ _armv4_AES_decrypt:
and r9,lr,r3 @ i2
ldrb r9,[r10,r9] @ Td4[s3>>0]
- ldrb r3,[r10,r3,lsr#24] @ Td4[s3>>24]
+ ARM( ldrb r3,[r10,r3,lsr#24] ) @ Td4[s3>>24]
+ THUMB( add r3,r10,r3,lsr#24 ) @ Td4[s3>>24]
+ THUMB( ldrb r3,[r3] )
eor r0,r0,r7,lsl#16
ldr r7,[r11,#0]
eor r1,r1,r8,lsl#8
diff --git a/arch/arm/crypto/sha1-armv4-large.S b/arch/arm/crypto/sha1-armv4-large.S
index 7050ab133b9d..92c6eed7aac9 100644
--- a/arch/arm/crypto/sha1-armv4-large.S
+++ b/arch/arm/crypto/sha1-armv4-large.S
@@ -51,13 +51,12 @@
@ Profiler-assisted and platform-specific optimization resulted in 10%
@ improvement on Cortex A8 core and 12.2 cycles per byte.
-.text
+#include <linux/linkage.h>
-.global sha1_block_data_order
-.type sha1_block_data_order,%function
+.text
.align 2
-sha1_block_data_order:
+ENTRY(sha1_block_data_order)
stmdb sp!,{r4-r12,lr}
add r2,r1,r2,lsl#6 @ r2 to point at the end of r1
ldmia r0,{r3,r4,r5,r6,r7}
@@ -194,7 +193,7 @@ sha1_block_data_order:
eor r10,r10,r7,ror#2 @ F_00_19(B,C,D)
str r9,[r14,#-4]!
add r3,r3,r10 @ E+=F_00_19(B,C,D)
- teq r14,sp
+ cmp r14,sp
bne .L_00_15 @ [((11+4)*5+2)*3]
#if __ARM_ARCH__<7
ldrb r10,[r1,#2]
@@ -374,7 +373,9 @@ sha1_block_data_order:
@ F_xx_xx
add r3,r3,r9 @ E+=X[i]
add r3,r3,r10 @ E+=F_20_39(B,C,D)
- teq r14,sp @ preserve carry
+ ARM( teq r14,sp ) @ preserve carry
+ THUMB( mov r11,sp )
+ THUMB( teq r14,r11 ) @ preserve carry
bne .L_20_39_or_60_79 @ [+((12+3)*5+2)*4]
bcs .L_done @ [+((12+3)*5+2)*4], spare 300 bytes
@@ -466,7 +467,7 @@ sha1_block_data_order:
add r3,r3,r9 @ E+=X[i]
add r3,r3,r10 @ E+=F_40_59(B,C,D)
add r3,r3,r11,ror#2
- teq r14,sp
+ cmp r14,sp
bne .L_40_59 @ [+((12+5)*5+2)*4]
ldr r8,.LK_60_79
@@ -485,19 +486,12 @@ sha1_block_data_order:
teq r1,r2
bne .Lloop @ [+18], total 1307
-#if __ARM_ARCH__>=5
ldmia sp!,{r4-r12,pc}
-#else
- ldmia sp!,{r4-r12,lr}
- tst lr,#1
- moveq pc,lr @ be binary compatible with V4, yet
- .word 0xe12fff1e @ interoperable with Thumb ISA:-)
-#endif
.align 2
.LK_00_19: .word 0x5a827999
.LK_20_39: .word 0x6ed9eba1
.LK_40_59: .word 0x8f1bbcdc
.LK_60_79: .word 0xca62c1d6
-.size sha1_block_data_order,.-sha1_block_data_order
+ENDPROC(sha1_block_data_order)
.asciz "SHA1 block transform for ARMv4, CRYPTOGAMS by <appro@openssl.org>"
.align 2
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
index eb87200aa4b5..05ee9eebad6b 100644
--- a/arch/arm/include/asm/assembler.h
+++ b/arch/arm/include/asm/assembler.h
@@ -246,18 +246,14 @@
*
* This macro is intended for forcing the CPU into SVC mode at boot time.
* you cannot return to the original mode.
- *
- * Beware, it also clobers LR.
*/
.macro safe_svcmode_maskall reg:req
#if __LINUX_ARM_ARCH__ >= 6
mrs \reg , cpsr
- mov lr , \reg
- and lr , lr , #MODE_MASK
- cmp lr , #HYP_MODE
- orr \reg , \reg , #PSR_I_BIT | PSR_F_BIT
+ eor \reg, \reg, #HYP_MODE
+ tst \reg, #MODE_MASK
bic \reg , \reg , #MODE_MASK
- orr \reg , \reg , #SVC_MODE
+ orr \reg , \reg , #PSR_I_BIT | PSR_F_BIT | SVC_MODE
THUMB( orr \reg , \reg , #PSR_T_BIT )
bne 1f
orr \reg, \reg, #PSR_A_BIT
diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h
index a59dcb5ab5fc..ad41ec2471e8 100644
--- a/arch/arm/include/asm/cputype.h
+++ b/arch/arm/include/asm/cputype.h
@@ -64,6 +64,24 @@ extern unsigned int processor_id;
#define read_cpuid_ext(reg) 0
#endif
+#define ARM_CPU_IMP_ARM 0x41
+#define ARM_CPU_IMP_INTEL 0x69
+
+#define ARM_CPU_PART_ARM1136 0xB360
+#define ARM_CPU_PART_ARM1156 0xB560
+#define ARM_CPU_PART_ARM1176 0xB760
+#define ARM_CPU_PART_ARM11MPCORE 0xB020
+#define ARM_CPU_PART_CORTEX_A8 0xC080
+#define ARM_CPU_PART_CORTEX_A9 0xC090
+#define ARM_CPU_PART_CORTEX_A5 0xC050
+#define ARM_CPU_PART_CORTEX_A15 0xC0F0
+#define ARM_CPU_PART_CORTEX_A7 0xC070
+
+#define ARM_CPU_XSCALE_ARCH_MASK 0xe000
+#define ARM_CPU_XSCALE_ARCH_V1 0x2000
+#define ARM_CPU_XSCALE_ARCH_V2 0x4000
+#define ARM_CPU_XSCALE_ARCH_V3 0x6000
+
/*
* The CPU ID never changes at run time, so we might as well tell the
* compiler that it's constant. Use this function to read the CPU ID
@@ -74,6 +92,21 @@ static inline unsigned int __attribute_const__ read_cpuid_id(void)
return read_cpuid(CPUID_ID);
}
+static inline unsigned int __attribute_const__ read_cpuid_implementor(void)
+{
+ return (read_cpuid_id() & 0xFF000000) >> 24;
+}
+
+static inline unsigned int __attribute_const__ read_cpuid_part_number(void)
+{
+ return read_cpuid_id() & 0xFFF0;
+}
+
+static inline unsigned int __attribute_const__ xscale_cpu_arch_version(void)
+{
+ return read_cpuid_part_number() & ARM_CPU_XSCALE_ARCH_MASK;
+}
+
static inline unsigned int __attribute_const__ read_cpuid_cachetype(void)
{
return read_cpuid(CPUID_CACHETYPE);
diff --git a/arch/arm/include/asm/cti.h b/arch/arm/include/asm/cti.h
index f2e5cad3f306..2381199acb7d 100644
--- a/arch/arm/include/asm/cti.h
+++ b/arch/arm/include/asm/cti.h
@@ -2,6 +2,7 @@
#define __ASMARM_CTI_H
#include <asm/io.h>
+#include <asm/hardware/coresight.h>
/* The registers' definition is from section 3.2 of
* Embedded Cross Trigger Revision: r0p0
@@ -35,11 +36,6 @@
#define LOCKACCESS 0xFB0
#define LOCKSTATUS 0xFB4
-/* write this value to LOCKACCESS will unlock the module, and
- * other value will lock the module
- */
-#define LOCKCODE 0xC5ACCE55
-
/**
* struct cti - cross trigger interface struct
* @base: mapped virtual address for the cti base
@@ -146,7 +142,7 @@ static inline void cti_irq_ack(struct cti *cti)
*/
static inline void cti_unlock(struct cti *cti)
{
- __raw_writel(LOCKCODE, cti->base + LOCKACCESS);
+ __raw_writel(CS_LAR_KEY, cti->base + LOCKACCESS);
}
/**
@@ -158,6 +154,6 @@ static inline void cti_unlock(struct cti *cti)
*/
static inline void cti_lock(struct cti *cti)
{
- __raw_writel(~LOCKCODE, cti->base + LOCKACCESS);
+ __raw_writel(~CS_LAR_KEY, cti->base + LOCKACCESS);
}
#endif
diff --git a/arch/arm/include/asm/hardware/coresight.h b/arch/arm/include/asm/hardware/coresight.h
index 7ecd793b8f5a..0cf7a6b842ff 100644
--- a/arch/arm/include/asm/hardware/coresight.h
+++ b/arch/arm/include/asm/hardware/coresight.h
@@ -36,7 +36,7 @@
/* CoreSight Component Registers */
#define CSCR_CLASS 0xff4
-#define UNLOCK_MAGIC 0xc5acce55
+#define CS_LAR_KEY 0xc5acce55
/* ETM control register, "ETM Architecture", 3.3.1 */
#define ETMR_CTRL 0
@@ -147,11 +147,11 @@
#define etm_lock(t) do { etm_writel((t), 0, CSMR_LOCKACCESS); } while (0)
#define etm_unlock(t) \
- do { etm_writel((t), UNLOCK_MAGIC, CSMR_LOCKACCESS); } while (0)
+ do { etm_writel((t), CS_LAR_KEY, CSMR_LOCKACCESS); } while (0)
#define etb_lock(t) do { etb_writel((t), 0, CSMR_LOCKACCESS); } while (0)
#define etb_unlock(t) \
- do { etb_writel((t), UNLOCK_MAGIC, CSMR_LOCKACCESS); } while (0)
+ do { etb_writel((t), CS_LAR_KEY, CSMR_LOCKACCESS); } while (0)
#endif /* __ASM_HARDWARE_CORESIGHT_H */
diff --git a/arch/arm/include/asm/hw_breakpoint.h b/arch/arm/include/asm/hw_breakpoint.h
index 01169dd723f1..eef55ea9ef00 100644
--- a/arch/arm/include/asm/hw_breakpoint.h
+++ b/arch/arm/include/asm/hw_breakpoint.h
@@ -85,6 +85,9 @@ static inline void decode_ctrl_reg(u32 reg,
#define ARM_DSCR_HDBGEN (1 << 14)
#define ARM_DSCR_MDBGEN (1 << 15)
+/* OSLSR os lock model bits */
+#define ARM_OSLSR_OSLM0 (1 << 0)
+
/* opcode2 numbers for the co-processor instructions. */
#define ARM_OP2_BVR 4
#define ARM_OP2_BCR 5
diff --git a/arch/arm/include/asm/idmap.h b/arch/arm/include/asm/idmap.h
index bf863edb517d..1a66f907e5cc 100644
--- a/arch/arm/include/asm/idmap.h
+++ b/arch/arm/include/asm/idmap.h
@@ -8,6 +8,7 @@
#define __idmap __section(.idmap.text) noinline notrace
extern pgd_t *idmap_pgd;
+extern pgd_t *hyp_pgd;
void setup_mm_for_reboot(void);
diff --git a/arch/arm/include/asm/kvm_arm.h b/arch/arm/include/asm/kvm_arm.h
new file mode 100644
index 000000000000..7c3d813e15df
--- /dev/null
+++ b/arch/arm/include/asm/kvm_arm.h
@@ -0,0 +1,214 @@
+/*
+ * Copyright (C) 2012 - Virtual Open Systems and Columbia University
+ * Author: Christoffer Dall <c.dall@virtualopensystems.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __ARM_KVM_ARM_H__
+#define __ARM_KVM_ARM_H__
+
+#include <linux/types.h>
+
+/* Hyp Configuration Register (HCR) bits */
+#define HCR_TGE (1 << 27)
+#define HCR_TVM (1 << 26)
+#define HCR_TTLB (1 << 25)
+#define HCR_TPU (1 << 24)
+#define HCR_TPC (1 << 23)
+#define HCR_TSW (1 << 22)
+#define HCR_TAC (1 << 21)
+#define HCR_TIDCP (1 << 20)
+#define HCR_TSC (1 << 19)
+#define HCR_TID3 (1 << 18)
+#define HCR_TID2 (1 << 17)
+#define HCR_TID1 (1 << 16)
+#define HCR_TID0 (1 << 15)
+#define HCR_TWE (1 << 14)
+#define HCR_TWI (1 << 13)
+#define HCR_DC (1 << 12)
+#define HCR_BSU (3 << 10)
+#define HCR_BSU_IS (1 << 10)
+#define HCR_FB (1 << 9)
+#define HCR_VA (1 << 8)
+#define HCR_VI (1 << 7)
+#define HCR_VF (1 << 6)
+#define HCR_AMO (1 << 5)
+#define HCR_IMO (1 << 4)
+#define HCR_FMO (1 << 3)
+#define HCR_PTW (1 << 2)
+#define HCR_SWIO (1 << 1)
+#define HCR_VM 1
+
+/*
+ * The bits we set in HCR:
+ * TAC: Trap ACTLR
+ * TSC: Trap SMC
+ * TSW: Trap cache operations by set/way
+ * TWI: Trap WFI
+ * TIDCP: Trap L2CTLR/L2ECTLR
+ * BSU_IS: Upgrade barriers to the inner shareable domain
+ * FB: Force broadcast of all maintainance operations
+ * AMO: Override CPSR.A and enable signaling with VA
+ * IMO: Override CPSR.I and enable signaling with VI
+ * FMO: Override CPSR.F and enable signaling with VF
+ * SWIO: Turn set/way invalidates into set/way clean+invalidate
+ */
+#define HCR_GUEST_MASK (HCR_TSC | HCR_TSW | HCR_TWI | HCR_VM | HCR_BSU_IS | \
+ HCR_FB | HCR_TAC | HCR_AMO | HCR_IMO | HCR_FMO | \
+ HCR_SWIO | HCR_TIDCP)
+#define HCR_VIRT_EXCP_MASK (HCR_VA | HCR_VI | HCR_VF)
+
+/* System Control Register (SCTLR) bits */
+#define SCTLR_TE (1 << 30)
+#define SCTLR_EE (1 << 25)
+#define SCTLR_V (1 << 13)
+
+/* Hyp System Control Register (HSCTLR) bits */
+#define HSCTLR_TE (1 << 30)
+#define HSCTLR_EE (1 << 25)
+#define HSCTLR_FI (1 << 21)
+#define HSCTLR_WXN (1 << 19)
+#define HSCTLR_I (1 << 12)
+#define HSCTLR_C (1 << 2)
+#define HSCTLR_A (1 << 1)
+#define HSCTLR_M 1
+#define HSCTLR_MASK (HSCTLR_M | HSCTLR_A | HSCTLR_C | HSCTLR_I | \
+ HSCTLR_WXN | HSCTLR_FI | HSCTLR_EE | HSCTLR_TE)
+
+/* TTBCR and HTCR Registers bits */
+#define TTBCR_EAE (1 << 31)
+#define TTBCR_IMP (1 << 30)
+#define TTBCR_SH1 (3 << 28)
+#define TTBCR_ORGN1 (3 << 26)
+#define TTBCR_IRGN1 (3 << 24)
+#define TTBCR_EPD1 (1 << 23)
+#define TTBCR_A1 (1 << 22)
+#define TTBCR_T1SZ (3 << 16)
+#define TTBCR_SH0 (3 << 12)
+#define TTBCR_ORGN0 (3 << 10)
+#define TTBCR_IRGN0 (3 << 8)
+#define TTBCR_EPD0 (1 << 7)
+#define TTBCR_T0SZ 3
+#define HTCR_MASK (TTBCR_T0SZ | TTBCR_IRGN0 | TTBCR_ORGN0 | TTBCR_SH0)
+
+/* Hyp System Trap Register */
+#define HSTR_T(x) (1 << x)
+#define HSTR_TTEE (1 << 16)
+#define HSTR_TJDBX (1 << 17)
+
+/* Hyp Coprocessor Trap Register */
+#define HCPTR_TCP(x) (1 << x)
+#define HCPTR_TCP_MASK (0x3fff)
+#define HCPTR_TASE (1 << 15)
+#define HCPTR_TTA (1 << 20)
+#define HCPTR_TCPAC (1 << 31)
+
+/* Hyp Debug Configuration Register bits */
+#define HDCR_TDRA (1 << 11)
+#define HDCR_TDOSA (1 << 10)
+#define HDCR_TDA (1 << 9)
+#define HDCR_TDE (1 << 8)
+#define HDCR_HPME (1 << 7)
+#define HDCR_TPM (1 << 6)
+#define HDCR_TPMCR (1 << 5)
+#define HDCR_HPMN_MASK (0x1F)
+
+/*
+ * The architecture supports 40-bit IPA as input to the 2nd stage translations
+ * and PTRS_PER_S2_PGD becomes 1024, because each entry covers 1GB of address
+ * space.
+ */
+#define KVM_PHYS_SHIFT (40)
+#define KVM_PHYS_SIZE (1ULL << KVM_PHYS_SHIFT)
+#define KVM_PHYS_MASK (KVM_PHYS_SIZE - 1ULL)
+#define PTRS_PER_S2_PGD (1ULL << (KVM_PHYS_SHIFT - 30))
+#define S2_PGD_ORDER get_order(PTRS_PER_S2_PGD * sizeof(pgd_t))
+#define S2_PGD_SIZE (1 << S2_PGD_ORDER)
+
+/* Virtualization Translation Control Register (VTCR) bits */
+#define VTCR_SH0 (3 << 12)
+#define VTCR_ORGN0 (3 << 10)
+#define VTCR_IRGN0 (3 << 8)
+#define VTCR_SL0 (3 << 6)
+#define VTCR_S (1 << 4)
+#define VTCR_T0SZ (0xf)
+#define VTCR_MASK (VTCR_SH0 | VTCR_ORGN0 | VTCR_IRGN0 | VTCR_SL0 | \
+ VTCR_S | VTCR_T0SZ)
+#define VTCR_HTCR_SH (VTCR_SH0 | VTCR_ORGN0 | VTCR_IRGN0)
+#define VTCR_SL_L2 (0 << 6) /* Starting-level: 2 */
+#define VTCR_SL_L1 (1 << 6) /* Starting-level: 1 */
+#define KVM_VTCR_SL0 VTCR_SL_L1
+/* stage-2 input address range defined as 2^(32-T0SZ) */
+#define KVM_T0SZ (32 - KVM_PHYS_SHIFT)
+#define KVM_VTCR_T0SZ (KVM_T0SZ & VTCR_T0SZ)
+#define KVM_VTCR_S ((KVM_VTCR_T0SZ << 1) & VTCR_S)
+
+/* Virtualization Translation Table Base Register (VTTBR) bits */
+#if KVM_VTCR_SL0 == VTCR_SL_L2 /* see ARM DDI 0406C: B4-1720 */
+#define VTTBR_X (14 - KVM_T0SZ)
+#else
+#define VTTBR_X (5 - KVM_T0SZ)
+#endif
+#define VTTBR_BADDR_SHIFT (VTTBR_X - 1)
+#define VTTBR_BADDR_MASK (((1LLU << (40 - VTTBR_X)) - 1) << VTTBR_BADDR_SHIFT)
+#define VTTBR_VMID_SHIFT (48LLU)
+#define VTTBR_VMID_MASK (0xffLLU << VTTBR_VMID_SHIFT)
+
+/* Hyp Syndrome Register (HSR) bits */
+#define HSR_EC_SHIFT (26)
+#define HSR_EC (0x3fU << HSR_EC_SHIFT)
+#define HSR_IL (1U << 25)
+#define HSR_ISS (HSR_IL - 1)
+#define HSR_ISV_SHIFT (24)
+#define HSR_ISV (1U << HSR_ISV_SHIFT)
+#define HSR_SRT_SHIFT (16)
+#define HSR_SRT_MASK (0xf << HSR_SRT_SHIFT)
+#define HSR_FSC (0x3f)
+#define HSR_FSC_TYPE (0x3c)
+#define HSR_SSE (1 << 21)
+#define HSR_WNR (1 << 6)
+#define HSR_CV_SHIFT (24)
+#define HSR_CV (1U << HSR_CV_SHIFT)
+#define HSR_COND_SHIFT (20)
+#define HSR_COND (0xfU << HSR_COND_SHIFT)
+
+#define FSC_FAULT (0x04)
+#define FSC_PERM (0x0c)
+
+/* Hyp Prefetch Fault Address Register (HPFAR/HDFAR) */
+#define HPFAR_MASK (~0xf)
+
+#define HSR_EC_UNKNOWN (0x00)
+#define HSR_EC_WFI (0x01)
+#define HSR_EC_CP15_32 (0x03)
+#define HSR_EC_CP15_64 (0x04)
+#define HSR_EC_CP14_MR (0x05)
+#define HSR_EC_CP14_LS (0x06)
+#define HSR_EC_CP_0_13 (0x07)
+#define HSR_EC_CP10_ID (0x08)
+#define HSR_EC_JAZELLE (0x09)
+#define HSR_EC_BXJ (0x0A)
+#define HSR_EC_CP14_64 (0x0C)
+#define HSR_EC_SVC_HYP (0x11)
+#define HSR_EC_HVC (0x12)
+#define HSR_EC_SMC (0x13)
+#define HSR_EC_IABT (0x20)
+#define HSR_EC_IABT_HYP (0x21)
+#define HSR_EC_DABT (0x24)
+#define HSR_EC_DABT_HYP (0x25)
+
+#define HSR_HVC_IMM_MASK ((1UL << 16) - 1)
+
+#endif /* __ARM_KVM_ARM_H__ */
diff --git a/arch/arm/include/asm/kvm_asm.h b/arch/arm/include/asm/kvm_asm.h
new file mode 100644
index 000000000000..5e06e8177784
--- /dev/null
+++ b/arch/arm/include/asm/kvm_asm.h
@@ -0,0 +1,82 @@
+/*
+ * Copyright (C) 2012 - Virtual Open Systems and Columbia University
+ * Author: Christoffer Dall <c.dall@virtualopensystems.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __ARM_KVM_ASM_H__
+#define __ARM_KVM_ASM_H__
+
+/* 0 is reserved as an invalid value. */
+#define c0_MPIDR 1 /* MultiProcessor ID Register */
+#define c0_CSSELR 2 /* Cache Size Selection Register */
+#define c1_SCTLR 3 /* System Control Register */
+#define c1_ACTLR 4 /* Auxilliary Control Register */
+#define c1_CPACR 5 /* Coprocessor Access Control */
+#define c2_TTBR0 6 /* Translation Table Base Register 0 */
+#define c2_TTBR0_high 7 /* TTBR0 top 32 bits */
+#define c2_TTBR1 8 /* Translation Table Base Register 1 */
+#define c2_TTBR1_high 9 /* TTBR1 top 32 bits */
+#define c2_TTBCR 10 /* Translation Table Base Control R. */
+#define c3_DACR 11 /* Domain Access Control Register */
+#define c5_DFSR 12 /* Data Fault Status Register */
+#define c5_IFSR 13 /* Instruction Fault Status Register */
+#define c5_ADFSR 14 /* Auxilary Data Fault Status R */
+#define c5_AIFSR 15 /* Auxilary Instrunction Fault Status R */
+#define c6_DFAR 16 /* Data Fault Address Register */
+#define c6_IFAR 17 /* Instruction Fault Address Register */
+#define c9_L2CTLR 18 /* Cortex A15 L2 Control Register */
+#define c10_PRRR 19 /* Primary Region Remap Register */
+#define c10_NMRR 20 /* Normal Memory Remap Register */
+#define c12_VBAR 21 /* Vector Base Address Register */
+#define c13_CID 22 /* Context ID Register */
+#define c13_TID_URW 23 /* Thread ID, User R/W */
+#define c13_TID_URO 24 /* Thread ID, User R/O */
+#define c13_TID_PRIV 25 /* Thread ID, Privileged */
+#define NR_CP15_REGS 26 /* Number of regs (incl. invalid) */
+
+#define ARM_EXCEPTION_RESET 0
+#define ARM_EXCEPTION_UNDEFINED 1
+#define ARM_EXCEPTION_SOFTWARE 2
+#define ARM_EXCEPTION_PREF_ABORT 3
+#define ARM_EXCEPTION_DATA_ABORT 4
+#define ARM_EXCEPTION_IRQ 5
+#define ARM_EXCEPTION_FIQ 6
+#define ARM_EXCEPTION_HVC 7
+
+#ifndef __ASSEMBLY__
+struct kvm;
+struct kvm_vcpu;
+
+extern char __kvm_hyp_init[];
+extern char __kvm_hyp_init_end[];
+
+extern char __kvm_hyp_exit[];
+extern char __kvm_hyp_exit_end[];
+
+extern char __kvm_hyp_vector[];
+
+extern char __kvm_hyp_code_start[];
+extern char __kvm_hyp_code_end[];
+
+extern void __kvm_tlb_flush_vmid(struct kvm *kvm);
+
+extern void __kvm_flush_vm_context(void);
+extern void __kvm_tlb_flush_vmid(struct kvm *kvm);
+
+extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
+#endif
+
+#endif /* __ARM_KVM_ASM_H__ */
diff --git a/arch/arm/include/asm/kvm_coproc.h b/arch/arm/include/asm/kvm_coproc.h
new file mode 100644
index 000000000000..4917c2f7e459
--- /dev/null
+++ b/arch/arm/include/asm/kvm_coproc.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2012 Rusty Russell IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __ARM_KVM_COPROC_H__
+#define __ARM_KVM_COPROC_H__
+#include <linux/kvm_host.h>
+
+void kvm_reset_coprocs(struct kvm_vcpu *vcpu);
+
+struct kvm_coproc_target_table {
+ unsigned target;
+ const struct coproc_reg *table;
+ size_t num;
+};
+void kvm_register_target_coproc_table(struct kvm_coproc_target_table *table);
+
+int kvm_handle_cp10_id(struct kvm_vcpu *vcpu, struct kvm_run *run);
+int kvm_handle_cp_0_13_access(struct kvm_vcpu *vcpu, struct kvm_run *run);
+int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run);
+int kvm_handle_cp14_access(struct kvm_vcpu *vcpu, struct kvm_run *run);
+int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run);
+int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run);
+
+unsigned long kvm_arm_num_guest_msrs(struct kvm_vcpu *vcpu);
+int kvm_arm_copy_msrindices(struct kvm_vcpu *vcpu, u64 __user *uindices);
+void kvm_coproc_table_init(void);
+
+struct kvm_one_reg;
+int kvm_arm_copy_coproc_indices(struct kvm_vcpu *vcpu, u64 __user *uindices);
+int kvm_arm_coproc_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);
+int kvm_arm_coproc_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);
+unsigned long kvm_arm_num_coproc_regs(struct kvm_vcpu *vcpu);
+#endif /* __ARM_KVM_COPROC_H__ */
diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h
new file mode 100644
index 000000000000..fd611996bfb5
--- /dev/null
+++ b/arch/arm/include/asm/kvm_emulate.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright (C) 2012 - Virtual Open Systems and Columbia University
+ * Author: Christoffer Dall <c.dall@virtualopensystems.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __ARM_KVM_EMULATE_H__
+#define __ARM_KVM_EMULATE_H__
+
+#include <linux/kvm_host.h>
+#include <asm/kvm_asm.h>
+#include <asm/kvm_mmio.h>
+
+u32 *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num);
+u32 *vcpu_spsr(struct kvm_vcpu *vcpu);
+
+int kvm_handle_wfi(struct kvm_vcpu *vcpu, struct kvm_run *run);
+void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr);
+void kvm_inject_undefined(struct kvm_vcpu *vcpu);
+void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr);
+void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
+
+static inline bool vcpu_mode_is_32bit(struct kvm_vcpu *vcpu)
+{
+ return 1;
+}
+
+static inline u32 *vcpu_pc(struct kvm_vcpu *vcpu)
+{
+ return (u32 *)&vcpu->arch.regs.usr_regs.ARM_pc;
+}
+
+static inline u32 *vcpu_cpsr(struct kvm_vcpu *vcpu)
+{
+ return (u32 *)&vcpu->arch.regs.usr_regs.ARM_cpsr;
+}
+
+static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
+{
+ *vcpu_cpsr(vcpu) |= PSR_T_BIT;
+}
+
+static inline bool mode_has_spsr(struct kvm_vcpu *vcpu)
+{
+ unsigned long cpsr_mode = vcpu->arch.regs.usr_regs.ARM_cpsr & MODE_MASK;
+ return (cpsr_mode > USR_MODE && cpsr_mode < SYSTEM_MODE);
+}
+
+static inline bool vcpu_mode_priv(struct kvm_vcpu *vcpu)
+{
+ unsigned long cpsr_mode = vcpu->arch.regs.usr_regs.ARM_cpsr & MODE_MASK;
+ return cpsr_mode > USR_MODE;;
+}
+
+static inline bool kvm_vcpu_reg_is_pc(struct kvm_vcpu *vcpu, int reg)
+{
+ return reg == 15;
+}
+
+#endif /* __ARM_KVM_EMULATE_H__ */
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
new file mode 100644
index 000000000000..98b4d1a72923
--- /dev/null
+++ b/arch/arm/include/asm/kvm_host.h
@@ -0,0 +1,161 @@
+/*
+ * Copyright (C) 2012 - Virtual Open Systems and Columbia University
+ * Author: Christoffer Dall <c.dall@virtualopensystems.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __ARM_KVM_HOST_H__
+#define __ARM_KVM_HOST_H__
+
+#include <asm/kvm.h>
+#include <asm/kvm_asm.h>
+#include <asm/kvm_mmio.h>
+#include <asm/fpstate.h>
+
+#define KVM_MAX_VCPUS CONFIG_KVM_ARM_MAX_VCPUS
+#define KVM_MEMORY_SLOTS 32
+#define KVM_PRIVATE_MEM_SLOTS 4
+#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
+#define KVM_HAVE_ONE_REG
+
+#define KVM_VCPU_MAX_FEATURES 1
+
+/* We don't currently support large pages. */
+#define KVM_HPAGE_GFN_SHIFT(x) 0
+#define KVM_NR_PAGE_SIZES 1
+#define KVM_PAGES_PER_HPAGE(x) (1UL<<31)
+
+struct kvm_vcpu;
+u32 *kvm_vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num, u32 mode);
+int kvm_target_cpu(void);
+int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
+void kvm_reset_coprocs(struct kvm_vcpu *vcpu);
+
+struct kvm_arch {
+ /* VTTBR value associated with below pgd and vmid */
+ u64 vttbr;
+
+ /*
+ * Anything that is not used directly from assembly code goes
+ * here.
+ */
+
+ /* The VMID generation used for the virt. memory system */
+ u64 vmid_gen;
+ u32 vmid;
+
+ /* Stage-2 page table */
+ pgd_t *pgd;
+};
+
+#define KVM_NR_MEM_OBJS 40
+
+/*
+ * We don't want allocation failures within the mmu code, so we preallocate
+ * enough memory for a single page fault in a cache.
+ */
+struct kvm_mmu_memory_cache {
+ int nobjs;
+ void *objects[KVM_NR_MEM_OBJS];
+};
+
+struct kvm_vcpu_arch {
+ struct kvm_regs regs;
+
+ int target; /* Processor target */
+ DECLARE_BITMAP(features, KVM_VCPU_MAX_FEATURES);
+
+ /* System control coprocessor (cp15) */
+ u32 cp15[NR_CP15_REGS];
+
+ /* The CPU type we expose to the VM */
+ u32 midr;
+
+ /* Exception Information */
+ u32 hsr; /* Hyp Syndrome Register */
+ u32 hxfar; /* Hyp Data/Inst Fault Address Register */
+ u32 hpfar; /* Hyp IPA Fault Address Register */
+
+ /* Floating point registers (VFP and Advanced SIMD/NEON) */
+ struct vfp_hard_struct vfp_guest;
+ struct vfp_hard_struct *vfp_host;
+
+ /*
+ * Anything that is not used directly from assembly code goes
+ * here.
+ */
+ /* dcache set/way operation pending */
+ int last_pcpu;
+ cpumask_t require_dcache_flush;
+
+ /* Don't run the guest on this vcpu */
+ bool pause;
+
+ /* IO related fields */
+ struct kvm_decode mmio_decode;
+
+ /* Interrupt related fields */
+ u32 irq_lines; /* IRQ and FIQ levels */
+
+ /* Hyp exception information */
+ u32 hyp_pc; /* PC when exception was taken from Hyp mode */
+
+ /* Cache some mmu pages needed inside spinlock regions */
+ struct kvm_mmu_memory_cache mmu_page_cache;
+
+ /* Detect first run of a vcpu */
+ bool has_run_once;
+};
+
+struct kvm_vm_stat {
+ u32 remote_tlb_flush;
+};
+
+struct kvm_vcpu_stat {
+ u32 halt_wakeup;
+};
+
+struct kvm_vcpu_init;
+int kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
+ const struct kvm_vcpu_init *init);
+unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu);
+int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices);
+struct kvm_one_reg;
+int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
+int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
+u64 kvm_call_hyp(void *hypfn, ...);
+void force_vm_exit(const cpumask_t *mask);
+
+#define KVM_ARCH_WANT_MMU_NOTIFIER
+struct kvm;
+int kvm_unmap_hva(struct kvm *kvm, unsigned long hva);
+int kvm_unmap_hva_range(struct kvm *kvm,
+ unsigned long start, unsigned long end);
+void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
+
+unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu);
+int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices);
+
+/* We do not have shadow page tables, hence the empty hooks */
+static inline int kvm_age_hva(struct kvm *kvm, unsigned long hva)
+{
+ return 0;
+}
+
+static inline int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
+{
+ return 0;
+}
+#endif /* __ARM_KVM_HOST_H__ */
diff --git a/arch/arm/include/asm/kvm_mmio.h b/arch/arm/include/asm/kvm_mmio.h
new file mode 100644
index 000000000000..adcc0d7d3175
--- /dev/null
+++ b/arch/arm/include/asm/kvm_mmio.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2012 - Virtual Open Systems and Columbia University
+ * Author: Christoffer Dall <c.dall@virtualopensystems.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __ARM_KVM_MMIO_H__
+#define __ARM_KVM_MMIO_H__
+
+#include <linux/kvm_host.h>
+#include <asm/kvm_asm.h>
+#include <asm/kvm_arm.h>
+
+struct kvm_decode {
+ unsigned long rt;
+ bool sign_extend;
+};
+
+/*
+ * The in-kernel MMIO emulation code wants to use a copy of run->mmio,
+ * which is an anonymous type. Use our own type instead.
+ */
+struct kvm_exit_mmio {
+ phys_addr_t phys_addr;
+ u8 data[8];
+ u32 len;
+ bool is_write;
+};
+
+static inline void kvm_prepare_mmio(struct kvm_run *run,
+ struct kvm_exit_mmio *mmio)
+{
+ run->mmio.phys_addr = mmio->phys_addr;
+ run->mmio.len = mmio->len;
+ run->mmio.is_write = mmio->is_write;
+ memcpy(run->mmio.data, mmio->data, mmio->len);
+ run->exit_reason = KVM_EXIT_MMIO;
+}
+
+int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run);
+int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
+ phys_addr_t fault_ipa);
+
+#endif /* __ARM_KVM_MMIO_H__ */
diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
new file mode 100644
index 000000000000..421a20b34874
--- /dev/null
+++ b/arch/arm/include/asm/kvm_mmu.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (C) 2012 - Virtual Open Systems and Columbia University
+ * Author: Christoffer Dall <c.dall@virtualopensystems.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __ARM_KVM_MMU_H__
+#define __ARM_KVM_MMU_H__
+
+int create_hyp_mappings(void *from, void *to);
+int create_hyp_io_mappings(void *from, void *to, phys_addr_t);
+void free_hyp_pmds(void);
+
+int kvm_alloc_stage2_pgd(struct kvm *kvm);
+void kvm_free_stage2_pgd(struct kvm *kvm);
+int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
+ phys_addr_t pa, unsigned long size);
+
+int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run);
+
+void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu);
+
+phys_addr_t kvm_mmu_get_httbr(void);
+int kvm_mmu_init(void);
+void kvm_clear_hyp_idmap(void);
+
+static inline bool kvm_is_write_fault(unsigned long hsr)
+{
+ unsigned long hsr_ec = hsr >> HSR_EC_SHIFT;
+ if (hsr_ec == HSR_EC_IABT)
+ return false;
+ else if ((hsr & HSR_ISV) && !(hsr & HSR_WNR))
+ return false;
+ else
+ return true;
+}
+
+#endif /* __ARM_KVM_MMU_H__ */
diff --git a/arch/arm/include/asm/kvm_psci.h b/arch/arm/include/asm/kvm_psci.h
new file mode 100644
index 000000000000..9a83d98bf170
--- /dev/null
+++ b/arch/arm/include/asm/kvm_psci.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright (C) 2012 - ARM Ltd
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __ARM_KVM_PSCI_H__
+#define __ARM_KVM_PSCI_H__
+
+bool kvm_psci_call(struct kvm_vcpu *vcpu);
+
+#endif /* __ARM_KVM_PSCI_H__ */
diff --git a/arch/arm/include/asm/mach/pci.h b/arch/arm/include/asm/mach/pci.h
index db9fedb57f2c..5cf2e979b4be 100644
--- a/arch/arm/include/asm/mach/pci.h
+++ b/arch/arm/include/asm/mach/pci.h
@@ -23,6 +23,7 @@ struct hw_pci {
#endif
struct pci_ops *ops;
int nr_controllers;
+ void **private_data;
int (*setup)(int nr, struct pci_sys_data *);
struct pci_bus *(*scan)(int nr, struct pci_sys_data *);
void (*preinit)(void);
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h
index b11105c8599a..57870ab313c5 100644
--- a/arch/arm/include/asm/memory.h
+++ b/arch/arm/include/asm/memory.h
@@ -36,23 +36,23 @@
* TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area
*/
#define PAGE_OFFSET UL(CONFIG_PAGE_OFFSET)
-#define TASK_SIZE (UL(CONFIG_PAGE_OFFSET) - UL(0x01000000))
-#define TASK_UNMAPPED_BASE (UL(CONFIG_PAGE_OFFSET) / 3)
+#define TASK_SIZE (UL(CONFIG_PAGE_OFFSET) - UL(SZ_16M))
+#define TASK_UNMAPPED_BASE ALIGN(TASK_SIZE / 3, SZ_16M)
/*
* The maximum size of a 26-bit user space task.
*/
-#define TASK_SIZE_26 UL(0x04000000)
+#define TASK_SIZE_26 (UL(1) << 26)
/*
* The module space lives between the addresses given by TASK_SIZE
* and PAGE_OFFSET - it must be within 32MB of the kernel text.
*/
#ifndef CONFIG_THUMB2_KERNEL
-#define MODULES_VADDR (PAGE_OFFSET - 16*1024*1024)
+#define MODULES_VADDR (PAGE_OFFSET - SZ_16M)
#else
/* smaller range for Thumb-2 symbols relocation (2^24)*/
-#define MODULES_VADDR (PAGE_OFFSET - 8*1024*1024)
+#define MODULES_VADDR (PAGE_OFFSET - SZ_8M)
#endif
#if TASK_SIZE > MODULES_VADDR
diff --git a/arch/arm/include/asm/opcodes-sec.h b/arch/arm/include/asm/opcodes-sec.h
new file mode 100644
index 000000000000..bc3a9174417c
--- /dev/null
+++ b/arch/arm/include/asm/opcodes-sec.h
@@ -0,0 +1,24 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Copyright (C) 2012 ARM Limited
+ */
+
+#ifndef __ASM_ARM_OPCODES_SEC_H
+#define __ASM_ARM_OPCODES_SEC_H
+
+#include <asm/opcodes.h>
+
+#define __SMC(imm4) __inst_arm_thumb32( \
+ 0xE1600070 | (((imm4) & 0xF) << 0), \
+ 0xF7F08000 | (((imm4) & 0xF) << 16) \
+)
+
+#endif /* __ASM_ARM_OPCODES_SEC_H */
diff --git a/arch/arm/include/asm/opcodes.h b/arch/arm/include/asm/opcodes.h
index 74e211a6fb24..e796c598513b 100644
--- a/arch/arm/include/asm/opcodes.h
+++ b/arch/arm/include/asm/opcodes.h
@@ -10,6 +10,7 @@
#define __ASM_ARM_OPCODES_H
#ifndef __ASSEMBLY__
+#include <linux/linkage.h>
extern asmlinkage unsigned int arm_check_condition(u32 opcode, u32 psr);
#endif
diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h
index 53426c66352a..12f71a190422 100644
--- a/arch/arm/include/asm/outercache.h
+++ b/arch/arm/include/asm/outercache.h
@@ -92,6 +92,7 @@ static inline void outer_flush_range(phys_addr_t start, phys_addr_t end)
static inline void outer_flush_all(void) { }
static inline void outer_inv_all(void) { }
static inline void outer_disable(void) { }
+static inline void outer_resume(void) { }
#endif
diff --git a/arch/arm/include/asm/pgtable-3level-hwdef.h b/arch/arm/include/asm/pgtable-3level-hwdef.h
index d7952824c5c4..18f5cef82ad5 100644
--- a/arch/arm/include/asm/pgtable-3level-hwdef.h
+++ b/arch/arm/include/asm/pgtable-3level-hwdef.h
@@ -32,6 +32,9 @@
#define PMD_TYPE_SECT (_AT(pmdval_t, 1) << 0)
#define PMD_BIT4 (_AT(pmdval_t, 0))
#define PMD_DOMAIN(x) (_AT(pmdval_t, 0))
+#define PMD_APTABLE_SHIFT (61)
+#define PMD_APTABLE (_AT(pgdval_t, 3) << PGD_APTABLE_SHIFT)
+#define PMD_PXNTABLE (_AT(pgdval_t, 1) << 59)
/*
* - section
@@ -41,9 +44,11 @@
#define PMD_SECT_S (_AT(pmdval_t, 3) << 8)
#define PMD_SECT_AF (_AT(pmdval_t, 1) << 10)
#define PMD_SECT_nG (_AT(pmdval_t, 1) << 11)
+#define PMD_SECT_PXN (_AT(pmdval_t, 1) << 53)
#define PMD_SECT_XN (_AT(pmdval_t, 1) << 54)
#define PMD_SECT_AP_WRITE (_AT(pmdval_t, 0))
#define PMD_SECT_AP_READ (_AT(pmdval_t, 0))
+#define PMD_SECT_AP1 (_AT(pmdval_t, 1) << 6)
#define PMD_SECT_TEX(x) (_AT(pmdval_t, 0))
/*
diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
index a3f37929940a..6ef8afd1b64c 100644
--- a/arch/arm/include/asm/pgtable-3level.h
+++ b/arch/arm/include/asm/pgtable-3level.h
@@ -104,11 +104,29 @@
*/
#define L_PGD_SWAPPER (_AT(pgdval_t, 1) << 55) /* swapper_pg_dir entry */
+/*
+ * 2nd stage PTE definitions for LPAE.
+ */
+#define L_PTE_S2_MT_UNCACHED (_AT(pteval_t, 0x5) << 2) /* MemAttr[3:0] */
+#define L_PTE_S2_MT_WRITETHROUGH (_AT(pteval_t, 0xa) << 2) /* MemAttr[3:0] */
+#define L_PTE_S2_MT_WRITEBACK (_AT(pteval_t, 0xf) << 2) /* MemAttr[3:0] */
+#define L_PTE_S2_RDONLY (_AT(pteval_t, 1) << 6) /* HAP[1] */
+#define L_PTE_S2_RDWR (_AT(pteval_t, 2) << 6) /* HAP[2:1] */
+
+/*
+ * Hyp-mode PL2 PTE definitions for LPAE.
+ */
+#define L_PTE_HYP L_PTE_USER
+
#ifndef __ASSEMBLY__
#define pud_none(pud) (!pud_val(pud))
#define pud_bad(pud) (!(pud_val(pud) & 2))
#define pud_present(pud) (pud_val(pud))
+#define pmd_table(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \
+ PMD_TYPE_TABLE)
+#define pmd_sect(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \
+ PMD_TYPE_SECT)
#define pud_clear(pudp) \
do { \
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
index 9c82f988c0e3..f30ac3b55ba9 100644
--- a/arch/arm/include/asm/pgtable.h
+++ b/arch/arm/include/asm/pgtable.h
@@ -70,6 +70,9 @@ extern void __pgd_error(const char *file, int line, pgd_t);
extern pgprot_t pgprot_user;
extern pgprot_t pgprot_kernel;
+extern pgprot_t pgprot_hyp_device;
+extern pgprot_t pgprot_s2;
+extern pgprot_t pgprot_s2_device;
#define _MOD_PROT(p, b) __pgprot(pgprot_val(p) | (b))
@@ -82,6 +85,10 @@ extern pgprot_t pgprot_kernel;
#define PAGE_READONLY_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY)
#define PAGE_KERNEL _MOD_PROT(pgprot_kernel, L_PTE_XN)
#define PAGE_KERNEL_EXEC pgprot_kernel
+#define PAGE_HYP _MOD_PROT(pgprot_kernel, L_PTE_HYP)
+#define PAGE_HYP_DEVICE _MOD_PROT(pgprot_hyp_device, L_PTE_HYP)
+#define PAGE_S2 _MOD_PROT(pgprot_s2, L_PTE_S2_RDONLY)
+#define PAGE_S2_DEVICE _MOD_PROT(pgprot_s2_device, L_PTE_USER | L_PTE_S2_RDONLY)
#define __PAGE_NONE __pgprot(_L_PTE_DEFAULT | L_PTE_RDONLY | L_PTE_XN | L_PTE_NONE)
#define __PAGE_SHARED __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_XN)
diff --git a/arch/arm/include/asm/psci.h b/arch/arm/include/asm/psci.h
new file mode 100644
index 000000000000..ce0dbe7c1625
--- /dev/null
+++ b/arch/arm/include/asm/psci.h
@@ -0,0 +1,36 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Copyright (C) 2012 ARM Limited
+ */
+
+#ifndef __ASM_ARM_PSCI_H
+#define __ASM_ARM_PSCI_H
+
+#define PSCI_POWER_STATE_TYPE_STANDBY 0
+#define PSCI_POWER_STATE_TYPE_POWER_DOWN 1
+
+struct psci_power_state {
+ u16 id;
+ u8 type;
+ u8 affinity_level;
+};
+
+struct psci_operations {
+ int (*cpu_suspend)(struct psci_power_state state,
+ unsigned long entry_point);
+ int (*cpu_off)(struct psci_power_state state);
+ int (*cpu_on)(unsigned long cpuid, unsigned long entry_point);
+ int (*migrate)(unsigned long cpuid);
+};
+
+extern struct psci_operations psci_ops;
+
+#endif /* __ASM_ARM_PSCI_H */
diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h
index b4ca707d0a69..6220e9fdf4c7 100644
--- a/arch/arm/include/asm/spinlock.h
+++ b/arch/arm/include/asm/spinlock.h
@@ -119,22 +119,8 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
static inline void arch_spin_unlock(arch_spinlock_t *lock)
{
- unsigned long tmp;
- u32 slock;
-
smp_mb();
-
- __asm__ __volatile__(
-" mov %1, #1\n"
-"1: ldrex %0, [%2]\n"
-" uadd16 %0, %0, %1\n"
-" strex %1, %0, [%2]\n"
-" teq %1, #0\n"
-" bne 1b"
- : "=&r" (slock), "=&r" (tmp)
- : "r" (&lock->slock)
- : "cc");
-
+ lock->tickets.owner++;
dsb_sev();
}
diff --git a/arch/arm/include/asm/virt.h b/arch/arm/include/asm/virt.h
index 86164df86cb4..50af92bac737 100644
--- a/arch/arm/include/asm/virt.h
+++ b/arch/arm/include/asm/virt.h
@@ -24,9 +24,9 @@
/*
* Flag indicating that the kernel was not entered in the same mode on every
* CPU. The zImage loader stashes this value in an SPSR, so we need an
- * architecturally defined flag bit here (the N flag, as it happens)
+ * architecturally defined flag bit here.
*/
-#define BOOT_CPU_MODE_MISMATCH (1<<31)
+#define BOOT_CPU_MODE_MISMATCH PSR_N_BIT
#ifndef __ASSEMBLY__
diff --git a/arch/arm/include/uapi/asm/kvm.h b/arch/arm/include/uapi/asm/kvm.h
new file mode 100644
index 000000000000..3303ff5adbf3
--- /dev/null
+++ b/arch/arm/include/uapi/asm/kvm.h
@@ -0,0 +1,164 @@
+/*
+ * Copyright (C) 2012 - Virtual Open Systems and Columbia University
+ * Author: Christoffer Dall <c.dall@virtualopensystems.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __ARM_KVM_H__
+#define __ARM_KVM_H__
+
+#include <linux/types.h>
+#include <asm/ptrace.h>
+
+#define __KVM_HAVE_GUEST_DEBUG
+#define __KVM_HAVE_IRQ_LINE
+
+#define KVM_REG_SIZE(id) \
+ (1U << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT))
+
+/* Valid for svc_regs, abt_regs, und_regs, irq_regs in struct kvm_regs */
+#define KVM_ARM_SVC_sp svc_regs[0]
+#define KVM_ARM_SVC_lr svc_regs[1]
+#define KVM_ARM_SVC_spsr svc_regs[2]
+#define KVM_ARM_ABT_sp abt_regs[0]
+#define KVM_ARM_ABT_lr abt_regs[1]
+#define KVM_ARM_ABT_spsr abt_regs[2]
+#define KVM_ARM_UND_sp und_regs[0]
+#define KVM_ARM_UND_lr und_regs[1]
+#define KVM_ARM_UND_spsr und_regs[2]
+#define KVM_ARM_IRQ_sp irq_regs[0]
+#define KVM_ARM_IRQ_lr irq_regs[1]
+#define KVM_ARM_IRQ_spsr irq_regs[2]
+
+/* Valid only for fiq_regs in struct kvm_regs */
+#define KVM_ARM_FIQ_r8 fiq_regs[0]
+#define KVM_ARM_FIQ_r9 fiq_regs[1]
+#define KVM_ARM_FIQ_r10 fiq_regs[2]
+#define KVM_ARM_FIQ_fp fiq_regs[3]
+#define KVM_ARM_FIQ_ip fiq_regs[4]
+#define KVM_ARM_FIQ_sp fiq_regs[5]
+#define KVM_ARM_FIQ_lr fiq_regs[6]
+#define KVM_ARM_FIQ_spsr fiq_regs[7]
+
+struct kvm_regs {
+ struct pt_regs usr_regs;/* R0_usr - R14_usr, PC, CPSR */
+ __u32 svc_regs[3]; /* SP_svc, LR_svc, SPSR_svc */
+ __u32 abt_regs[3]; /* SP_abt, LR_abt, SPSR_abt */
+ __u32 und_regs[3]; /* SP_und, LR_und, SPSR_und */
+ __u32 irq_regs[3]; /* SP_irq, LR_irq, SPSR_irq */
+ __u32 fiq_regs[8]; /* R8_fiq - R14_fiq, SPSR_fiq */
+};
+
+/* Supported Processor Types */
+#define KVM_ARM_TARGET_CORTEX_A15 0
+#define KVM_ARM_NUM_TARGETS 1
+
+#define KVM_ARM_VCPU_POWER_OFF 0 /* CPU is started in OFF state */
+
+struct kvm_vcpu_init {
+ __u32 target;
+ __u32 features[7];
+};
+
+struct kvm_sregs {
+};
+
+struct kvm_fpu {
+};
+
+struct kvm_guest_debug_arch {
+};
+
+struct kvm_debug_exit_arch {
+};
+
+struct kvm_sync_regs {
+};
+
+struct kvm_arch_memory_slot {
+};
+
+/* If you need to interpret the index values, here is the key: */
+#define KVM_REG_ARM_COPROC_MASK 0x000000000FFF0000
+#define KVM_REG_ARM_COPROC_SHIFT 16
+#define KVM_REG_ARM_32_OPC2_MASK 0x0000000000000007
+#define KVM_REG_ARM_32_OPC2_SHIFT 0
+#define KVM_REG_ARM_OPC1_MASK 0x0000000000000078
+#define KVM_REG_ARM_OPC1_SHIFT 3
+#define KVM_REG_ARM_CRM_MASK 0x0000000000000780
+#define KVM_REG_ARM_CRM_SHIFT 7
+#define KVM_REG_ARM_32_CRN_MASK 0x0000000000007800
+#define KVM_REG_ARM_32_CRN_SHIFT 11
+
+/* Normal registers are mapped as coprocessor 16. */
+#define KVM_REG_ARM_CORE (0x0010 << KVM_REG_ARM_COPROC_SHIFT)
+#define KVM_REG_ARM_CORE_REG(name) (offsetof(struct kvm_regs, name) / 4)
+
+/* Some registers need more space to represent values. */
+#define KVM_REG_ARM_DEMUX (0x0011 << KVM_REG_ARM_COPROC_SHIFT)
+#define KVM_REG_ARM_DEMUX_ID_MASK 0x000000000000FF00
+#define KVM_REG_ARM_DEMUX_ID_SHIFT 8
+#define KVM_REG_ARM_DEMUX_ID_CCSIDR (0x00 << KVM_REG_ARM_DEMUX_ID_SHIFT)
+#define KVM_REG_ARM_DEMUX_VAL_MASK 0x00000000000000FF
+#define KVM_REG_ARM_DEMUX_VAL_SHIFT 0
+
+/* VFP registers: we could overload CP10 like ARM does, but that's ugly. */
+#define KVM_REG_ARM_VFP (0x0012 << KVM_REG_ARM_COPROC_SHIFT)
+#define KVM_REG_ARM_VFP_MASK 0x000000000000FFFF
+#define KVM_REG_ARM_VFP_BASE_REG 0x0
+#define KVM_REG_ARM_VFP_FPSID 0x1000
+#define KVM_REG_ARM_VFP_FPSCR 0x1001
+#define KVM_REG_ARM_VFP_MVFR1 0x1006
+#define KVM_REG_ARM_VFP_MVFR0 0x1007
+#define KVM_REG_ARM_VFP_FPEXC 0x1008
+#define KVM_REG_ARM_VFP_FPINST 0x1009
+#define KVM_REG_ARM_VFP_FPINST2 0x100A
+
+
+/* KVM_IRQ_LINE irq field index values */
+#define KVM_ARM_IRQ_TYPE_SHIFT 24
+#define KVM_ARM_IRQ_TYPE_MASK 0xff
+#define KVM_ARM_IRQ_VCPU_SHIFT 16
+#define KVM_ARM_IRQ_VCPU_MASK 0xff
+#define KVM_ARM_IRQ_NUM_SHIFT 0
+#define KVM_ARM_IRQ_NUM_MASK 0xffff
+
+/* irq_type field */
+#define KVM_ARM_IRQ_TYPE_CPU 0
+#define KVM_ARM_IRQ_TYPE_SPI 1
+#define KVM_ARM_IRQ_TYPE_PPI 2
+
+/* out-of-kernel GIC cpu interrupt injection irq_number field */
+#define KVM_ARM_IRQ_CPU_IRQ 0
+#define KVM_ARM_IRQ_CPU_FIQ 1
+
+/* Highest supported SPI, from VGIC_NR_IRQS */
+#define KVM_ARM_IRQ_GIC_MAX 127
+
+/* PSCI interface */
+#define KVM_PSCI_FN_BASE 0x95c1ba5e
+#define KVM_PSCI_FN(n) (KVM_PSCI_FN_BASE + (n))
+
+#define KVM_PSCI_FN_CPU_SUSPEND KVM_PSCI_FN(0)
+#define KVM_PSCI_FN_CPU_OFF KVM_PSCI_FN(1)
+#define KVM_PSCI_FN_CPU_ON KVM_PSCI_FN(2)
+#define KVM_PSCI_FN_MIGRATE KVM_PSCI_FN(3)
+
+#define KVM_PSCI_RET_SUCCESS 0
+#define KVM_PSCI_RET_NI ((unsigned long)-1)
+#define KVM_PSCI_RET_INVAL ((unsigned long)-2)
+#define KVM_PSCI_RET_DENIED ((unsigned long)-3)
+
+#endif /* __ARM_KVM_H__ */
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
index 5bbec7b8183e..5f3338eacad2 100644
--- a/arch/arm/kernel/Makefile
+++ b/arch/arm/kernel/Makefile
@@ -82,5 +82,6 @@ obj-$(CONFIG_DEBUG_LL) += debug.o
obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
obj-$(CONFIG_ARM_VIRT_EXT) += hyp-stub.o
+obj-$(CONFIG_ARM_PSCI) += psci.o
extra-y := $(head-y) vmlinux.lds
diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c
index c985b481192c..c8b3272dfed1 100644
--- a/arch/arm/kernel/asm-offsets.c
+++ b/arch/arm/kernel/asm-offsets.c
@@ -13,6 +13,9 @@
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/dma-mapping.h>
+#ifdef CONFIG_KVM_ARM_HOST
+#include <linux/kvm_host.h>
+#endif
#include <asm/cacheflush.h>
#include <asm/glue-df.h>
#include <asm/glue-pf.h>
@@ -146,5 +149,27 @@ int main(void)
DEFINE(DMA_BIDIRECTIONAL, DMA_BIDIRECTIONAL);
DEFINE(DMA_TO_DEVICE, DMA_TO_DEVICE);
DEFINE(DMA_FROM_DEVICE, DMA_FROM_DEVICE);
+#ifdef CONFIG_KVM_ARM_HOST
+ DEFINE(VCPU_KVM, offsetof(struct kvm_vcpu, kvm));
+ DEFINE(VCPU_MIDR, offsetof(struct kvm_vcpu, arch.midr));
+ DEFINE(VCPU_CP15, offsetof(struct kvm_vcpu, arch.cp15));
+ DEFINE(VCPU_VFP_GUEST, offsetof(struct kvm_vcpu, arch.vfp_guest));
+ DEFINE(VCPU_VFP_HOST, offsetof(struct kvm_vcpu, arch.vfp_host));
+ DEFINE(VCPU_REGS, offsetof(struct kvm_vcpu, arch.regs));
+ DEFINE(VCPU_USR_REGS, offsetof(struct kvm_vcpu, arch.regs.usr_regs));
+ DEFINE(VCPU_SVC_REGS, offsetof(struct kvm_vcpu, arch.regs.svc_regs));
+ DEFINE(VCPU_ABT_REGS, offsetof(struct kvm_vcpu, arch.regs.abt_regs));
+ DEFINE(VCPU_UND_REGS, offsetof(struct kvm_vcpu, arch.regs.und_regs));
+ DEFINE(VCPU_IRQ_REGS, offsetof(struct kvm_vcpu, arch.regs.irq_regs));
+ DEFINE(VCPU_FIQ_REGS, offsetof(struct kvm_vcpu, arch.regs.fiq_regs));
+ DEFINE(VCPU_PC, offsetof(struct kvm_vcpu, arch.regs.usr_regs.ARM_pc));
+ DEFINE(VCPU_CPSR, offsetof(struct kvm_vcpu, arch.regs.usr_regs.ARM_cpsr));
+ DEFINE(VCPU_IRQ_LINES, offsetof(struct kvm_vcpu, arch.irq_lines));
+ DEFINE(VCPU_HSR, offsetof(struct kvm_vcpu, arch.hsr));
+ DEFINE(VCPU_HxFAR, offsetof(struct kvm_vcpu, arch.hxfar));
+ DEFINE(VCPU_HPFAR, offsetof(struct kvm_vcpu, arch.hpfar));
+ DEFINE(VCPU_HYP_PC, offsetof(struct kvm_vcpu, arch.hyp_pc));
+ DEFINE(KVM_VTTBR, offsetof(struct kvm, arch.vttbr));
+#endif
return 0;
}
diff --git a/arch/arm/kernel/bios32.c b/arch/arm/kernel/bios32.c
index 379cf3292390..a1f73b502ef0 100644
--- a/arch/arm/kernel/bios32.c
+++ b/arch/arm/kernel/bios32.c
@@ -413,7 +413,7 @@ static int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
return irq;
}
-static int __init pcibios_init_resources(int busnr, struct pci_sys_data *sys)
+static int pcibios_init_resources(int busnr, struct pci_sys_data *sys)
{
int ret;
struct pci_host_bridge_window *window;
@@ -445,7 +445,7 @@ static int __init pcibios_init_resources(int busnr, struct pci_sys_data *sys)
return 0;
}
-static void __init pcibios_init_hw(struct hw_pci *hw, struct list_head *head)
+static void pcibios_init_hw(struct hw_pci *hw, struct list_head *head)
{
struct pci_sys_data *sys = NULL;
int ret;
@@ -464,6 +464,9 @@ static void __init pcibios_init_hw(struct hw_pci *hw, struct list_head *head)
sys->map_irq = hw->map_irq;
INIT_LIST_HEAD(&sys->resources);
+ if (hw->private_data)
+ sys->private_data = hw->private_data[nr];
+
ret = hw->setup(nr, sys);
if (ret > 0) {
@@ -493,7 +496,7 @@ static void __init pcibios_init_hw(struct hw_pci *hw, struct list_head *head)
}
}
-void __init pci_common_init(struct hw_pci *hw)
+void pci_common_init(struct hw_pci *hw)
{
struct pci_sys_data *sys;
LIST_HEAD(head);
diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
index 5ff2e77782b1..5eae53e7a2e1 100644
--- a/arch/arm/kernel/hw_breakpoint.c
+++ b/arch/arm/kernel/hw_breakpoint.c
@@ -28,6 +28,7 @@
#include <linux/perf_event.h>
#include <linux/hw_breakpoint.h>
#include <linux/smp.h>
+#include <linux/cpu_pm.h>
#include <asm/cacheflush.h>
#include <asm/cputype.h>
@@ -35,6 +36,7 @@
#include <asm/hw_breakpoint.h>
#include <asm/kdebug.h>
#include <asm/traps.h>
+#include <asm/hardware/coresight.h>
/* Breakpoint currently in use for each BRP. */
static DEFINE_PER_CPU(struct perf_event *, bp_on_reg[ARM_MAX_BRP]);
@@ -49,6 +51,9 @@ static int core_num_wrps;
/* Debug architecture version. */
static u8 debug_arch;
+/* Does debug architecture support OS Save and Restore? */
+static bool has_ossr;
+
/* Maximum supported watchpoint length. */
static u8 max_watchpoint_len;
@@ -903,6 +908,23 @@ static struct undef_hook debug_reg_hook = {
.fn = debug_reg_trap,
};
+/* Does this core support OS Save and Restore? */
+static bool core_has_os_save_restore(void)
+{
+ u32 oslsr;
+
+ switch (get_debug_arch()) {
+ case ARM_DEBUG_ARCH_V7_1:
+ return true;
+ case ARM_DEBUG_ARCH_V7_ECP14:
+ ARM_DBG_READ(c1, c1, 4, oslsr);
+ if (oslsr & ARM_OSLSR_OSLM0)
+ return true;
+ default:
+ return false;
+ }
+}
+
static void reset_ctrl_regs(void *unused)
{
int i, raw_num_brps, err = 0, cpu = smp_processor_id();
@@ -930,11 +952,7 @@ static void reset_ctrl_regs(void *unused)
if ((val & 0x1) == 0)
err = -EPERM;
- /*
- * Check whether we implement OS save and restore.
- */
- ARM_DBG_READ(c1, c1, 4, val);
- if ((val & 0x9) == 0)
+ if (!has_ossr)
goto clear_vcr;
break;
case ARM_DEBUG_ARCH_V7_1:
@@ -955,9 +973,9 @@ static void reset_ctrl_regs(void *unused)
/*
* Unconditionally clear the OS lock by writing a value
- * other than 0xC5ACCE55 to the access register.
+ * other than CS_LAR_KEY to the access register.
*/
- ARM_DBG_WRITE(c1, c0, 4, 0);
+ ARM_DBG_WRITE(c1, c0, 4, ~CS_LAR_KEY);
isb();
/*
@@ -1015,6 +1033,30 @@ static struct notifier_block __cpuinitdata dbg_reset_nb = {
.notifier_call = dbg_reset_notify,
};
+#ifdef CONFIG_CPU_PM
+static int dbg_cpu_pm_notify(struct notifier_block *self, unsigned long action,
+ void *v)
+{
+ if (action == CPU_PM_EXIT)
+ reset_ctrl_regs(NULL);
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block __cpuinitdata dbg_cpu_pm_nb = {
+ .notifier_call = dbg_cpu_pm_notify,
+};
+
+static void __init pm_init(void)
+{
+ cpu_pm_register_notifier(&dbg_cpu_pm_nb);
+}
+#else
+static inline void pm_init(void)
+{
+}
+#endif
+
static int __init arch_hw_breakpoint_init(void)
{
debug_arch = get_debug_arch();
@@ -1024,6 +1066,8 @@ static int __init arch_hw_breakpoint_init(void)
return 0;
}
+ has_ossr = core_has_os_save_restore();
+
/* Determine how many BRPs/WRPs are available. */
core_num_brps = get_num_brps();
core_num_wrps = get_num_wrps();
@@ -1062,8 +1106,9 @@ static int __init arch_hw_breakpoint_init(void)
hook_ifault_code(FAULT_CODE_DEBUG, hw_breakpoint_pending, SIGTRAP,
TRAP_HWBKPT, "breakpoint debug exception");
- /* Register hotplug notifier. */
+ /* Register hotplug and PM notifiers. */
register_cpu_notifier(&dbg_reset_nb);
+ pm_init();
return 0;
}
arch_initcall(arch_hw_breakpoint_init);
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index f9e8657dd241..31e0eb353cd8 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -149,12 +149,6 @@ again:
static void
armpmu_read(struct perf_event *event)
{
- struct hw_perf_event *hwc = &event->hw;
-
- /* Don't read disabled counters! */
- if (hwc->idx < 0)
- return;
-
armpmu_event_update(event);
}
@@ -207,8 +201,6 @@ armpmu_del(struct perf_event *event, int flags)
struct hw_perf_event *hwc = &event->hw;
int idx = hwc->idx;
- WARN_ON(idx < 0);
-
armpmu_stop(event, PERF_EF_UPDATE);
hw_events->events[idx] = NULL;
clear_bit(idx, hw_events->used_mask);
@@ -358,7 +350,7 @@ __hw_perf_event_init(struct perf_event *event)
{
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
- int mapping, err;
+ int mapping;
mapping = armpmu->map_event(event);
@@ -407,14 +399,12 @@ __hw_perf_event_init(struct perf_event *event)
local64_set(&hwc->period_left, hwc->sample_period);
}
- err = 0;
if (event->group_leader != event) {
- err = validate_group(event);
- if (err)
+ if (validate_group(event) != 0);
return -EINVAL;
}
- return err;
+ return 0;
}
static int armpmu_event_init(struct perf_event *event)
diff --git a/arch/arm/kernel/perf_event_cpu.c b/arch/arm/kernel/perf_event_cpu.c
index 5f6620684e25..1f2740e3dbc0 100644
--- a/arch/arm/kernel/perf_event_cpu.c
+++ b/arch/arm/kernel/perf_event_cpu.c
@@ -147,7 +147,7 @@ static void cpu_pmu_init(struct arm_pmu *cpu_pmu)
cpu_pmu->free_irq = cpu_pmu_free_irq;
/* Ensure the PMU has sane values out of reset. */
- if (cpu_pmu && cpu_pmu->reset)
+ if (cpu_pmu->reset)
on_each_cpu(cpu_pmu->reset, cpu_pmu, 1);
}
@@ -201,48 +201,46 @@ static struct platform_device_id cpu_pmu_plat_device_ids[] = {
static int probe_current_pmu(struct arm_pmu *pmu)
{
int cpu = get_cpu();
- unsigned long cpuid = read_cpuid_id();
- unsigned long implementor = (cpuid & 0xFF000000) >> 24;
- unsigned long part_number = (cpuid & 0xFFF0);
+ unsigned long implementor = read_cpuid_implementor();
+ unsigned long part_number = read_cpuid_part_number();
int ret = -ENODEV;
pr_info("probing PMU on CPU %d\n", cpu);
/* ARM Ltd CPUs. */
- if (0x41 == implementor) {
+ if (implementor == ARM_CPU_IMP_ARM) {
switch (part_number) {
- case 0xB360: /* ARM1136 */
- case 0xB560: /* ARM1156 */
- case 0xB760: /* ARM1176 */
+ case ARM_CPU_PART_ARM1136:
+ case ARM_CPU_PART_ARM1156:
+ case ARM_CPU_PART_ARM1176:
ret = armv6pmu_init(pmu);
break;
- case 0xB020: /* ARM11mpcore */
+ case ARM_CPU_PART_ARM11MPCORE:
ret = armv6mpcore_pmu_init(pmu);
break;
- case 0xC080: /* Cortex-A8 */
+ case ARM_CPU_PART_CORTEX_A8:
ret = armv7_a8_pmu_init(pmu);
break;
- case 0xC090: /* Cortex-A9 */
+ case ARM_CPU_PART_CORTEX_A9:
ret = armv7_a9_pmu_init(pmu);
break;
- case 0xC050: /* Cortex-A5 */
+ case ARM_CPU_PART_CORTEX_A5:
ret = armv7_a5_pmu_init(pmu);
break;
- case 0xC0F0: /* Cortex-A15 */
+ case ARM_CPU_PART_CORTEX_A15:
ret = armv7_a15_pmu_init(pmu);
break;
- case 0xC070: /* Cortex-A7 */
+ case ARM_CPU_PART_CORTEX_A7:
ret = armv7_a7_pmu_init(pmu);
break;
}
/* Intel CPUs [xscale]. */
- } else if (0x69 == implementor) {
- part_number = (cpuid >> 13) & 0x7;
- switch (part_number) {
- case 1:
+ } else if (implementor == ARM_CPU_IMP_INTEL) {
+ switch (xscale_cpu_arch_version()) {
+ case ARM_CPU_XSCALE_ARCH_V1:
ret = xscale1pmu_init(pmu);
break;
- case 2:
+ case ARM_CPU_XSCALE_ARCH_V2:
ret = xscale2pmu_init(pmu);
break;
}
@@ -279,17 +277,22 @@ static int cpu_pmu_device_probe(struct platform_device *pdev)
}
if (ret) {
- pr_info("failed to register PMU devices!");
- kfree(pmu);
- return ret;
+ pr_info("failed to probe PMU!");
+ goto out_free;
}
cpu_pmu = pmu;
cpu_pmu->plat_device = pdev;
cpu_pmu_init(cpu_pmu);
- armpmu_register(cpu_pmu, PERF_TYPE_RAW);
+ ret = armpmu_register(cpu_pmu, PERF_TYPE_RAW);
- return 0;
+ if (!ret)
+ return 0;
+
+out_free:
+ pr_info("failed to register PMU devices!");
+ kfree(pmu);
+ return ret;
}
static struct platform_driver cpu_pmu_driver = {
diff --git a/arch/arm/kernel/perf_event_v6.c b/arch/arm/kernel/perf_event_v6.c
index 041d0526a288..03664b0e8fa4 100644
--- a/arch/arm/kernel/perf_event_v6.c
+++ b/arch/arm/kernel/perf_event_v6.c
@@ -106,7 +106,7 @@ static const unsigned armv6_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = ARMV6_PERFCTR_ICACHE_MISS,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
@@ -259,7 +259,7 @@ static const unsigned armv6mpcore_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_ICACHE_MISS,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c
index 4fbc757d9cff..8c79a9e70b83 100644
--- a/arch/arm/kernel/perf_event_v7.c
+++ b/arch/arm/kernel/perf_event_v7.c
@@ -157,8 +157,8 @@ static const unsigned armv7_a8_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
[C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
},
[C(OP_WRITE)] = {
- [C(RESULT_ACCESS)] = ARMV7_A8_PERFCTR_L1_ICACHE_ACCESS,
- [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
@@ -282,7 +282,7 @@ static const unsigned armv7_a9_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
@@ -399,8 +399,8 @@ static const unsigned armv7_a5_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
[C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
},
[C(OP_WRITE)] = {
- [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS,
- [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
/*
* The prefetch counters don't differentiate between the I
@@ -527,8 +527,8 @@ static const unsigned armv7_a15_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
[C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
},
[C(OP_WRITE)] = {
- [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS,
- [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
@@ -651,8 +651,8 @@ static const unsigned armv7_a7_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
[C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
},
[C(OP_WRITE)] = {
- [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS,
- [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
diff --git a/arch/arm/kernel/perf_event_xscale.c b/arch/arm/kernel/perf_event_xscale.c
index 2b0fe30ec12e..63990c42fac9 100644
--- a/arch/arm/kernel/perf_event_xscale.c
+++ b/arch/arm/kernel/perf_event_xscale.c
@@ -83,7 +83,7 @@ static const unsigned xscale_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = XSCALE_PERFCTR_ICACHE_MISS,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index c6dec5fc20aa..047d3e40e470 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -172,14 +172,9 @@ static void default_idle(void)
local_irq_enable();
}
-void (*pm_idle)(void) = default_idle;
-EXPORT_SYMBOL(pm_idle);
-
/*
- * The idle thread, has rather strange semantics for calling pm_idle,
- * but this is what x86 does and we need to do the same, so that
- * things like cpuidle get called in the same way. The only difference
- * is that we always respect 'hlt_counter' to prevent low power idle.
+ * The idle thread.
+ * We always respect 'hlt_counter' to prevent low power idle.
*/
void cpu_idle(void)
{
@@ -210,10 +205,10 @@ void cpu_idle(void)
} else if (!need_resched()) {
stop_critical_timings();
if (cpuidle_idle_call())
- pm_idle();
+ default_idle();
start_critical_timings();
/*
- * pm_idle functions must always
+ * default_idle functions must always
* return with IRQs enabled.
*/
WARN_ON(irqs_disabled());
diff --git a/arch/arm/kernel/psci.c b/arch/arm/kernel/psci.c
new file mode 100644
index 000000000000..36531643cc2c
--- /dev/null
+++ b/arch/arm/kernel/psci.c
@@ -0,0 +1,211 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Copyright (C) 2012 ARM Limited
+ *
+ * Author: Will Deacon <will.deacon@arm.com>
+ */
+
+#define pr_fmt(fmt) "psci: " fmt
+
+#include <linux/init.h>
+#include <linux/of.h>
+
+#include <asm/compiler.h>
+#include <asm/errno.h>
+#include <asm/opcodes-sec.h>
+#include <asm/opcodes-virt.h>
+#include <asm/psci.h>
+
+struct psci_operations psci_ops;
+
+static int (*invoke_psci_fn)(u32, u32, u32, u32);
+
+enum psci_function {
+ PSCI_FN_CPU_SUSPEND,
+ PSCI_FN_CPU_ON,
+ PSCI_FN_CPU_OFF,
+ PSCI_FN_MIGRATE,
+ PSCI_FN_MAX,
+};
+
+static u32 psci_function_id[PSCI_FN_MAX];
+
+#define PSCI_RET_SUCCESS 0
+#define PSCI_RET_EOPNOTSUPP -1
+#define PSCI_RET_EINVAL -2
+#define PSCI_RET_EPERM -3
+
+static int psci_to_linux_errno(int errno)
+{
+ switch (errno) {
+ case PSCI_RET_SUCCESS:
+ return 0;
+ case PSCI_RET_EOPNOTSUPP:
+ return -EOPNOTSUPP;
+ case PSCI_RET_EINVAL:
+ return -EINVAL;
+ case PSCI_RET_EPERM:
+ return -EPERM;
+ };
+
+ return -EINVAL;
+}
+
+#define PSCI_POWER_STATE_ID_MASK 0xffff
+#define PSCI_POWER_STATE_ID_SHIFT 0
+#define PSCI_POWER_STATE_TYPE_MASK 0x1
+#define PSCI_POWER_STATE_TYPE_SHIFT 16
+#define PSCI_POWER_STATE_AFFL_MASK 0x3
+#define PSCI_POWER_STATE_AFFL_SHIFT 24
+
+static u32 psci_power_state_pack(struct psci_power_state state)
+{
+ return ((state.id & PSCI_POWER_STATE_ID_MASK)
+ << PSCI_POWER_STATE_ID_SHIFT) |
+ ((state.type & PSCI_POWER_STATE_TYPE_MASK)
+ << PSCI_POWER_STATE_TYPE_SHIFT) |
+ ((state.affinity_level & PSCI_POWER_STATE_AFFL_MASK)
+ << PSCI_POWER_STATE_AFFL_SHIFT);
+}
+
+/*
+ * The following two functions are invoked via the invoke_psci_fn pointer
+ * and will not be inlined, allowing us to piggyback on the AAPCS.
+ */
+static noinline int __invoke_psci_fn_hvc(u32 function_id, u32 arg0, u32 arg1,
+ u32 arg2)
+{
+ asm volatile(
+ __asmeq("%0", "r0")
+ __asmeq("%1", "r1")
+ __asmeq("%2", "r2")
+ __asmeq("%3", "r3")
+ __HVC(0)
+ : "+r" (function_id)
+ : "r" (arg0), "r" (arg1), "r" (arg2));
+
+ return function_id;
+}
+
+static noinline int __invoke_psci_fn_smc(u32 function_id, u32 arg0, u32 arg1,
+ u32 arg2)
+{
+ asm volatile(
+ __asmeq("%0", "r0")
+ __asmeq("%1", "r1")
+ __asmeq("%2", "r2")
+ __asmeq("%3", "r3")
+ __SMC(0)
+ : "+r" (function_id)
+ : "r" (arg0), "r" (arg1), "r" (arg2));
+
+ return function_id;
+}
+
+static int psci_cpu_suspend(struct psci_power_state state,
+ unsigned long entry_point)
+{
+ int err;
+ u32 fn, power_state;
+
+ fn = psci_function_id[PSCI_FN_CPU_SUSPEND];
+ power_state = psci_power_state_pack(state);
+ err = invoke_psci_fn(fn, power_state, entry_point, 0);
+ return psci_to_linux_errno(err);
+}
+
+static int psci_cpu_off(struct psci_power_state state)
+{
+ int err;
+ u32 fn, power_state;
+
+ fn = psci_function_id[PSCI_FN_CPU_OFF];
+ power_state = psci_power_state_pack(state);
+ err = invoke_psci_fn(fn, power_state, 0, 0);
+ return psci_to_linux_errno(err);
+}
+
+static int psci_cpu_on(unsigned long cpuid, unsigned long entry_point)
+{
+ int err;
+ u32 fn;
+
+ fn = psci_function_id[PSCI_FN_CPU_ON];
+ err = invoke_psci_fn(fn, cpuid, entry_point, 0);
+ return psci_to_linux_errno(err);
+}
+
+static int psci_migrate(unsigned long cpuid)
+{
+ int err;
+ u32 fn;
+
+ fn = psci_function_id[PSCI_FN_MIGRATE];
+ err = invoke_psci_fn(fn, cpuid, 0, 0);
+ return psci_to_linux_errno(err);
+}
+
+static const struct of_device_id psci_of_match[] __initconst = {
+ { .compatible = "arm,psci", },
+ {},
+};
+
+static int __init psci_init(void)
+{
+ struct device_node *np;
+ const char *method;
+ u32 id;
+
+ np = of_find_matching_node(NULL, psci_of_match);
+ if (!np)
+ return 0;
+
+ pr_info("probing function IDs from device-tree\n");
+
+ if (of_property_read_string(np, "method", &method)) {
+ pr_warning("missing \"method\" property\n");
+ goto out_put_node;
+ }
+
+ if (!strcmp("hvc", method)) {
+ invoke_psci_fn = __invoke_psci_fn_hvc;
+ } else if (!strcmp("smc", method)) {
+ invoke_psci_fn = __invoke_psci_fn_smc;
+ } else {
+ pr_warning("invalid \"method\" property: %s\n", method);
+ goto out_put_node;
+ }
+
+ if (!of_property_read_u32(np, "cpu_suspend", &id)) {
+ psci_function_id[PSCI_FN_CPU_SUSPEND] = id;
+ psci_ops.cpu_suspend = psci_cpu_suspend;
+ }
+
+ if (!of_property_read_u32(np, "cpu_off", &id)) {
+ psci_function_id[PSCI_FN_CPU_OFF] = id;
+ psci_ops.cpu_off = psci_cpu_off;
+ }
+
+ if (!of_property_read_u32(np, "cpu_on", &id)) {
+ psci_function_id[PSCI_FN_CPU_ON] = id;
+ psci_ops.cpu_on = psci_cpu_on;
+ }
+
+ if (!of_property_read_u32(np, "migrate", &id)) {
+ psci_function_id[PSCI_FN_MIGRATE] = id;
+ psci_ops.migrate = psci_migrate;
+ }
+
+out_put_node:
+ of_node_put(np);
+ return 0;
+}
+early_initcall(psci_init);
diff --git a/arch/arm/kernel/sched_clock.c b/arch/arm/kernel/sched_clock.c
index fc6692e2b603..bd6f56b9ec21 100644
--- a/arch/arm/kernel/sched_clock.c
+++ b/arch/arm/kernel/sched_clock.c
@@ -93,11 +93,11 @@ static void notrace update_sched_clock(void)
* detectable in cyc_to_fixed_sched_clock().
*/
raw_local_irq_save(flags);
- cd.epoch_cyc = cyc;
+ cd.epoch_cyc_copy = cyc;
smp_wmb();
cd.epoch_ns = ns;
smp_wmb();
- cd.epoch_cyc_copy = cyc;
+ cd.epoch_cyc = cyc;
raw_local_irq_restore(flags);
}
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 84f4cbf652e5..365c8d92e2eb 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -125,18 +125,6 @@ void __init smp_init_cpus(void)
smp_ops.smp_init_cpus();
}
-static void __init platform_smp_prepare_cpus(unsigned int max_cpus)
-{
- if (smp_ops.smp_prepare_cpus)
- smp_ops.smp_prepare_cpus(max_cpus);
-}
-
-static void __cpuinit platform_secondary_init(unsigned int cpu)
-{
- if (smp_ops.smp_secondary_init)
- smp_ops.smp_secondary_init(cpu);
-}
-
int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
{
if (smp_ops.smp_boot_secondary)
@@ -154,12 +142,6 @@ static int platform_cpu_kill(unsigned int cpu)
return 1;
}
-static void platform_cpu_die(unsigned int cpu)
-{
- if (smp_ops.cpu_die)
- smp_ops.cpu_die(cpu);
-}
-
static int platform_cpu_disable(unsigned int cpu)
{
if (smp_ops.cpu_disable)
@@ -257,7 +239,8 @@ void __ref cpu_die(void)
* actual CPU shutdown procedure is at least platform (if not
* CPU) specific.
*/
- platform_cpu_die(cpu);
+ if (smp_ops.cpu_die)
+ smp_ops.cpu_die(cpu);
/*
* Do not return to the idle loop - jump back to the secondary
@@ -324,7 +307,8 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
/*
* Give the platform a chance to do its own initialisation.
*/
- platform_secondary_init(cpu);
+ if (smp_ops.smp_secondary_init)
+ smp_ops.smp_secondary_init(cpu);
notify_cpu_starting(cpu);
@@ -399,8 +383,8 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
/*
* Initialise the present map, which describes the set of CPUs
* actually populated at the present time. A platform should
- * re-initialize the map in platform_smp_prepare_cpus() if
- * present != possible (e.g. physical hotplug).
+ * re-initialize the map in the platforms smp_prepare_cpus()
+ * if present != possible (e.g. physical hotplug).
*/
init_cpu_present(cpu_possible_mask);
@@ -408,7 +392,8 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
* Initialise the SCU if there are more than one CPU
* and let them know where to start.
*/
- platform_smp_prepare_cpus(max_cpus);
+ if (smp_ops.smp_prepare_cpus)
+ smp_ops.smp_prepare_cpus(max_cpus);
}
}
diff --git a/arch/arm/kernel/smp_twd.c b/arch/arm/kernel/smp_twd.c
index 49f335d301ba..ae0c7bb39ae8 100644
--- a/arch/arm/kernel/smp_twd.c
+++ b/arch/arm/kernel/smp_twd.c
@@ -31,7 +31,6 @@ static void __iomem *twd_base;
static struct clk *twd_clk;
static unsigned long twd_timer_rate;
-static bool common_setup_called;
static DEFINE_PER_CPU(bool, percpu_setup_called);
static struct clock_event_device __percpu **twd_evt;
@@ -239,25 +238,28 @@ static irqreturn_t twd_handler(int irq, void *dev_id)
return IRQ_NONE;
}
-static struct clk *twd_get_clock(void)
+static void twd_get_clock(struct device_node *np)
{
- struct clk *clk;
int err;
- clk = clk_get_sys("smp_twd", NULL);
- if (IS_ERR(clk)) {
- pr_err("smp_twd: clock not found: %d\n", (int)PTR_ERR(clk));
- return clk;
+ if (np)
+ twd_clk = of_clk_get(np, 0);
+ else
+ twd_clk = clk_get_sys("smp_twd", NULL);
+
+ if (IS_ERR(twd_clk)) {
+ pr_err("smp_twd: clock not found %d\n", (int) PTR_ERR(twd_clk));
+ return;
}
- err = clk_prepare_enable(clk);
+ err = clk_prepare_enable(twd_clk);
if (err) {
pr_err("smp_twd: clock failed to prepare+enable: %d\n", err);
- clk_put(clk);
- return ERR_PTR(err);
+ clk_put(twd_clk);
+ return;
}
- return clk;
+ twd_timer_rate = clk_get_rate(twd_clk);
}
/*
@@ -280,26 +282,7 @@ static int __cpuinit twd_timer_setup(struct clock_event_device *clk)
}
per_cpu(percpu_setup_called, cpu) = true;
- /*
- * This stuff only need to be done once for the entire TWD cluster
- * during the runtime of the system.
- */
- if (!common_setup_called) {
- twd_clk = twd_get_clock();
-
- /*
- * We use IS_ERR_OR_NULL() here, because if the clock stubs
- * are active we will get a valid clk reference which is
- * however NULL and will return the rate 0. In that case we
- * need to calibrate the rate instead.
- */
- if (!IS_ERR_OR_NULL(twd_clk))
- twd_timer_rate = clk_get_rate(twd_clk);
- else
- twd_calibrate_rate();
-
- common_setup_called = true;
- }
+ twd_calibrate_rate();
/*
* The following is done once per CPU the first time .setup() is
@@ -330,7 +313,7 @@ static struct local_timer_ops twd_lt_ops __cpuinitdata = {
.stop = twd_timer_stop,
};
-static int __init twd_local_timer_common_register(void)
+static int __init twd_local_timer_common_register(struct device_node *np)
{
int err;
@@ -350,6 +333,8 @@ static int __init twd_local_timer_common_register(void)
if (err)
goto out_irq;
+ twd_get_clock(np);
+
return 0;
out_irq:
@@ -373,7 +358,7 @@ int __init twd_local_timer_register(struct twd_local_timer *tlt)
if (!twd_base)
return -ENOMEM;
- return twd_local_timer_common_register();
+ return twd_local_timer_common_register(NULL);
}
#ifdef CONFIG_OF
@@ -405,7 +390,7 @@ void __init twd_local_timer_of_register(void)
goto out;
}
- err = twd_local_timer_common_register();
+ err = twd_local_timer_common_register(np);
out:
WARN(err, "twd_local_timer_of_register failed (%d)\n", err);
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
index 11c1785bf63e..b571484e9f03 100644
--- a/arch/arm/kernel/vmlinux.lds.S
+++ b/arch/arm/kernel/vmlinux.lds.S
@@ -19,7 +19,11 @@
ALIGN_FUNCTION(); \
VMLINUX_SYMBOL(__idmap_text_start) = .; \
*(.idmap.text) \
- VMLINUX_SYMBOL(__idmap_text_end) = .;
+ VMLINUX_SYMBOL(__idmap_text_end) = .; \
+ ALIGN_FUNCTION(); \
+ VMLINUX_SYMBOL(__hyp_idmap_text_start) = .; \
+ *(.hyp.idmap.text) \
+ VMLINUX_SYMBOL(__hyp_idmap_text_end) = .;
#ifdef CONFIG_HOTPLUG_CPU
#define ARM_CPU_DISCARD(x)
diff --git a/arch/arm/kvm/Kconfig b/arch/arm/kvm/Kconfig
new file mode 100644
index 000000000000..05227cb57a7b
--- /dev/null
+++ b/arch/arm/kvm/Kconfig
@@ -0,0 +1,56 @@
+#
+# KVM configuration
+#
+
+source "virt/kvm/Kconfig"
+
+menuconfig VIRTUALIZATION
+ bool "Virtualization"
+ ---help---
+ Say Y here to get to see options for using your Linux host to run
+ other operating systems inside virtual machines (guests).
+ This option alone does not add any kernel code.
+
+ If you say N, all options in this submenu will be skipped and
+ disabled.
+
+if VIRTUALIZATION
+
+config KVM
+ bool "Kernel-based Virtual Machine (KVM) support"
+ select PREEMPT_NOTIFIERS
+ select ANON_INODES
+ select KVM_MMIO
+ select KVM_ARM_HOST
+ depends on ARM_VIRT_EXT && ARM_LPAE
+ ---help---
+ Support hosting virtualized guest machines. You will also
+ need to select one or more of the processor modules below.
+
+ This module provides access to the hardware capabilities through
+ a character device node named /dev/kvm.
+
+ If unsure, say N.
+
+config KVM_ARM_HOST
+ bool "KVM host support for ARM cpus."
+ depends on KVM
+ depends on MMU
+ select MMU_NOTIFIER
+ ---help---
+ Provides host support for ARM processors.
+
+config KVM_ARM_MAX_VCPUS
+ int "Number maximum supported virtual CPUs per VM"
+ depends on KVM_ARM_HOST
+ default 4
+ help
+ Static number of max supported virtual CPUs per VM.
+
+ If you choose a high number, the vcpu structures will be quite
+ large, so only choose a reasonable number that you expect to
+ actually use.
+
+source drivers/virtio/Kconfig
+
+endif # VIRTUALIZATION
diff --git a/arch/arm/kvm/Makefile b/arch/arm/kvm/Makefile
new file mode 100644
index 000000000000..ea27987bd07f
--- /dev/null
+++ b/arch/arm/kvm/Makefile
@@ -0,0 +1,21 @@
+#
+# Makefile for Kernel-based Virtual Machine module
+#
+
+plus_virt := $(call as-instr,.arch_extension virt,+virt)
+ifeq ($(plus_virt),+virt)
+ plus_virt_def := -DREQUIRES_VIRT=1
+endif
+
+ccflags-y += -Ivirt/kvm -Iarch/arm/kvm
+CFLAGS_arm.o := -I. $(plus_virt_def)
+CFLAGS_mmu.o := -I.
+
+AFLAGS_init.o := -Wa,-march=armv7-a$(plus_virt)
+AFLAGS_interrupts.o := -Wa,-march=armv7-a$(plus_virt)
+
+kvm-arm-y = $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o)
+
+obj-y += kvm-arm.o init.o interrupts.o
+obj-y += arm.o guest.o mmu.o emulate.o reset.o
+obj-y += coproc.o coproc_a15.o mmio.o psci.o
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
new file mode 100644
index 000000000000..2d30e3afdaf9
--- /dev/null
+++ b/arch/arm/kvm/arm.c
@@ -0,0 +1,1015 @@
+/*
+ * Copyright (C) 2012 - Virtual Open Systems and Columbia University
+ * Author: Christoffer Dall <c.dall@virtualopensystems.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/kvm_host.h>
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+#include <linux/fs.h>
+#include <linux/mman.h>
+#include <linux/sched.h>
+#include <linux/kvm.h>
+#include <trace/events/kvm.h>
+
+#define CREATE_TRACE_POINTS
+#include "trace.h"
+
+#include <asm/unified.h>
+#include <asm/uaccess.h>
+#include <asm/ptrace.h>
+#include <asm/mman.h>
+#include <asm/cputype.h>
+#include <asm/tlbflush.h>
+#include <asm/cacheflush.h>
+#include <asm/virt.h>
+#include <asm/kvm_arm.h>
+#include <asm/kvm_asm.h>
+#include <asm/kvm_mmu.h>
+#include <asm/kvm_emulate.h>
+#include <asm/kvm_coproc.h>
+#include <asm/kvm_psci.h>
+#include <asm/opcodes.h>
+
+#ifdef REQUIRES_VIRT
+__asm__(".arch_extension virt");
+#endif
+
+static DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page);
+static struct vfp_hard_struct __percpu *kvm_host_vfp_state;
+static unsigned long hyp_default_vectors;
+
+/* The VMID used in the VTTBR */
+static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1);
+static u8 kvm_next_vmid;
+static DEFINE_SPINLOCK(kvm_vmid_lock);
+
+int kvm_arch_hardware_enable(void *garbage)
+{
+ return 0;
+}
+
+int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
+{
+ return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE;
+}
+
+void kvm_arch_hardware_disable(void *garbage)
+{
+}
+
+int kvm_arch_hardware_setup(void)
+{
+ return 0;
+}
+
+void kvm_arch_hardware_unsetup(void)
+{
+}
+
+void kvm_arch_check_processor_compat(void *rtn)
+{
+ *(int *)rtn = 0;
+}
+
+void kvm_arch_sync_events(struct kvm *kvm)
+{
+}
+
+/**
+ * kvm_arch_init_vm - initializes a VM data structure
+ * @kvm: pointer to the KVM struct
+ */
+int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
+{
+ int ret = 0;
+
+ if (type)
+ return -EINVAL;
+
+ ret = kvm_alloc_stage2_pgd(kvm);
+ if (ret)
+ goto out_fail_alloc;
+
+ ret = create_hyp_mappings(kvm, kvm + 1);
+ if (ret)
+ goto out_free_stage2_pgd;
+
+ /* Mark the initial VMID generation invalid */
+ kvm->arch.vmid_gen = 0;
+
+ return ret;
+out_free_stage2_pgd:
+ kvm_free_stage2_pgd(kvm);
+out_fail_alloc:
+ return ret;
+}
+
+int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
+{
+ return VM_FAULT_SIGBUS;
+}
+
+void kvm_arch_free_memslot(struct kvm_memory_slot *free,
+ struct kvm_memory_slot *dont)
+{
+}
+
+int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
+{
+ return 0;
+}
+
+/**
+ * kvm_arch_destroy_vm - destroy the VM data structure
+ * @kvm: pointer to the KVM struct
+ */
+void kvm_arch_destroy_vm(struct kvm *kvm)
+{
+ int i;
+
+ kvm_free_stage2_pgd(kvm);
+
+ for (i = 0; i < KVM_MAX_VCPUS; ++i) {
+ if (kvm->vcpus[i]) {
+ kvm_arch_vcpu_free(kvm->vcpus[i]);
+ kvm->vcpus[i] = NULL;
+ }
+ }
+}
+
+int kvm_dev_ioctl_check_extension(long ext)
+{
+ int r;
+ switch (ext) {
+ case KVM_CAP_USER_MEMORY:
+ case KVM_CAP_SYNC_MMU:
+ case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
+ case KVM_CAP_ONE_REG:
+ case KVM_CAP_ARM_PSCI:
+ r = 1;
+ break;
+ case KVM_CAP_COALESCED_MMIO:
+ r = KVM_COALESCED_MMIO_PAGE_OFFSET;
+ break;
+ case KVM_CAP_NR_VCPUS:
+ r = num_online_cpus();
+ break;
+ case KVM_CAP_MAX_VCPUS:
+ r = KVM_MAX_VCPUS;
+ break;
+ default:
+ r = 0;
+ break;
+ }
+ return r;
+}
+
+long kvm_arch_dev_ioctl(struct file *filp,
+ unsigned int ioctl, unsigned long arg)
+{
+ return -EINVAL;
+}
+
+int kvm_arch_set_memory_region(struct kvm *kvm,
+ struct kvm_userspace_memory_region *mem,
+ struct kvm_memory_slot old,
+ int user_alloc)
+{
+ return 0;
+}
+
+int kvm_arch_prepare_memory_region(struct kvm *kvm,
+ struct kvm_memory_slot *memslot,
+ struct kvm_memory_slot old,
+ struct kvm_userspace_memory_region *mem,
+ int user_alloc)
+{
+ return 0;
+}
+
+void kvm_arch_commit_memory_region(struct kvm *kvm,
+ struct kvm_userspace_memory_region *mem,
+ struct kvm_memory_slot old,
+ int user_alloc)
+{
+}
+
+void kvm_arch_flush_shadow_all(struct kvm *kvm)
+{
+}
+
+void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
+ struct kvm_memory_slot *slot)
+{
+}
+
+struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
+{
+ int err;
+ struct kvm_vcpu *vcpu;
+
+ vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
+ if (!vcpu) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ err = kvm_vcpu_init(vcpu, kvm, id);
+ if (err)
+ goto free_vcpu;
+
+ err = create_hyp_mappings(vcpu, vcpu + 1);
+ if (err)
+ goto vcpu_uninit;
+
+ return vcpu;
+vcpu_uninit:
+ kvm_vcpu_uninit(vcpu);
+free_vcpu:
+ kmem_cache_free(kvm_vcpu_cache, vcpu);
+out:
+ return ERR_PTR(err);
+}
+
+int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
+{
+ return 0;
+}
+
+void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
+{
+ kvm_mmu_free_memory_caches(vcpu);
+ kmem_cache_free(kvm_vcpu_cache, vcpu);
+}
+
+void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
+{
+ kvm_arch_vcpu_free(vcpu);
+}
+
+int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
+{
+ return 0;
+}
+
+int __attribute_const__ kvm_target_cpu(void)
+{
+ unsigned long implementor = read_cpuid_implementor();
+ unsigned long part_number = read_cpuid_part_number();
+
+ if (implementor != ARM_CPU_IMP_ARM)
+ return -EINVAL;
+
+ switch (part_number) {
+ case ARM_CPU_PART_CORTEX_A15:
+ return KVM_ARM_TARGET_CORTEX_A15;
+ default:
+ return -EINVAL;
+ }
+}
+
+int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
+{
+ /* Force users to call KVM_ARM_VCPU_INIT */
+ vcpu->arch.target = -1;
+ return 0;
+}
+
+void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
+{
+}
+
+void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+{
+ vcpu->cpu = cpu;
+ vcpu->arch.vfp_host = this_cpu_ptr(kvm_host_vfp_state);
+
+ /*
+ * Check whether this vcpu requires the cache to be flushed on
+ * this physical CPU. This is a consequence of doing dcache
+ * operations by set/way on this vcpu. We do it here to be in
+ * a non-preemptible section.
+ */
+ if (cpumask_test_and_clear_cpu(cpu, &vcpu->arch.require_dcache_flush))
+ flush_cache_all(); /* We'd really want v7_flush_dcache_all() */
+}
+
+void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
+{
+}
+
+int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
+ struct kvm_guest_debug *dbg)
+{
+ return -EINVAL;
+}
+
+
+int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
+ struct kvm_mp_state *mp_state)
+{
+ return -EINVAL;
+}
+
+int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
+ struct kvm_mp_state *mp_state)
+{
+ return -EINVAL;
+}
+
+/**
+ * kvm_arch_vcpu_runnable - determine if the vcpu can be scheduled
+ * @v: The VCPU pointer
+ *
+ * If the guest CPU is not waiting for interrupts or an interrupt line is
+ * asserted, the CPU is by definition runnable.
+ */
+int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
+{
+ return !!v->arch.irq_lines;
+}
+
+/* Just ensure a guest exit from a particular CPU */
+static void exit_vm_noop(void *info)
+{
+}
+
+void force_vm_exit(const cpumask_t *mask)
+{
+ smp_call_function_many(mask, exit_vm_noop, NULL, true);
+}
+
+/**
+ * need_new_vmid_gen - check that the VMID is still valid
+ * @kvm: The VM's VMID to checkt
+ *
+ * return true if there is a new generation of VMIDs being used
+ *
+ * The hardware supports only 256 values with the value zero reserved for the
+ * host, so we check if an assigned value belongs to a previous generation,
+ * which which requires us to assign a new value. If we're the first to use a
+ * VMID for the new generation, we must flush necessary caches and TLBs on all
+ * CPUs.
+ */
+static bool need_new_vmid_gen(struct kvm *kvm)
+{
+ return unlikely(kvm->arch.vmid_gen != atomic64_read(&kvm_vmid_gen));
+}
+
+/**
+ * update_vttbr - Update the VTTBR with a valid VMID before the guest runs
+ * @kvm The guest that we are about to run
+ *
+ * Called from kvm_arch_vcpu_ioctl_run before entering the guest to ensure the
+ * VM has a valid VMID, otherwise assigns a new one and flushes corresponding
+ * caches and TLBs.
+ */
+static void update_vttbr(struct kvm *kvm)
+{
+ phys_addr_t pgd_phys;
+ u64 vmid;
+
+ if (!need_new_vmid_gen(kvm))
+ return;
+
+ spin_lock(&kvm_vmid_lock);
+
+ /*
+ * We need to re-check the vmid_gen here to ensure that if another vcpu
+ * already allocated a valid vmid for this vm, then this vcpu should
+ * use the same vmid.
+ */
+ if (!need_new_vmid_gen(kvm)) {
+ spin_unlock(&kvm_vmid_lock);
+ return;
+ }
+
+ /* First user of a new VMID generation? */
+ if (unlikely(kvm_next_vmid == 0)) {
+ atomic64_inc(&kvm_vmid_gen);
+ kvm_next_vmid = 1;
+
+ /*
+ * On SMP we know no other CPUs can use this CPU's or each
+ * other's VMID after force_vm_exit returns since the
+ * kvm_vmid_lock blocks them from reentry to the guest.
+ */
+ force_vm_exit(cpu_all_mask);
+ /*
+ * Now broadcast TLB + ICACHE invalidation over the inner
+ * shareable domain to make sure all data structures are
+ * clean.
+ */
+ kvm_call_hyp(__kvm_flush_vm_context);
+ }
+
+ kvm->arch.vmid_gen = atomic64_read(&kvm_vmid_gen);
+ kvm->arch.vmid = kvm_next_vmid;
+ kvm_next_vmid++;
+
+ /* update vttbr to be used with the new vmid */
+ pgd_phys = virt_to_phys(kvm->arch.pgd);
+ vmid = ((u64)(kvm->arch.vmid) << VTTBR_VMID_SHIFT) & VTTBR_VMID_MASK;
+ kvm->arch.vttbr = pgd_phys & VTTBR_BADDR_MASK;
+ kvm->arch.vttbr |= vmid;
+
+ spin_unlock(&kvm_vmid_lock);
+}
+
+static int handle_svc_hyp(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+ /* SVC called from Hyp mode should never get here */
+ kvm_debug("SVC called from Hyp mode shouldn't go here\n");
+ BUG();
+ return -EINVAL; /* Squash warning */
+}
+
+static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+ trace_kvm_hvc(*vcpu_pc(vcpu), *vcpu_reg(vcpu, 0),
+ vcpu->arch.hsr & HSR_HVC_IMM_MASK);
+
+ if (kvm_psci_call(vcpu))
+ return 1;
+
+ kvm_inject_undefined(vcpu);
+ return 1;
+}
+
+static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+ if (kvm_psci_call(vcpu))
+ return 1;
+
+ kvm_inject_undefined(vcpu);
+ return 1;
+}
+
+static int handle_pabt_hyp(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+ /* The hypervisor should never cause aborts */
+ kvm_err("Prefetch Abort taken from Hyp mode at %#08x (HSR: %#08x)\n",
+ vcpu->arch.hxfar, vcpu->arch.hsr);
+ return -EFAULT;
+}
+
+static int handle_dabt_hyp(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+ /* This is either an error in the ws. code or an external abort */
+ kvm_err("Data Abort taken from Hyp mode at %#08x (HSR: %#08x)\n",
+ vcpu->arch.hxfar, vcpu->arch.hsr);
+ return -EFAULT;
+}
+
+typedef int (*exit_handle_fn)(struct kvm_vcpu *, struct kvm_run *);
+static exit_handle_fn arm_exit_handlers[] = {
+ [HSR_EC_WFI] = kvm_handle_wfi,
+ [HSR_EC_CP15_32] = kvm_handle_cp15_32,
+ [HSR_EC_CP15_64] = kvm_handle_cp15_64,
+ [HSR_EC_CP14_MR] = kvm_handle_cp14_access,
+ [HSR_EC_CP14_LS] = kvm_handle_cp14_load_store,
+ [HSR_EC_CP14_64] = kvm_handle_cp14_access,
+ [HSR_EC_CP_0_13] = kvm_handle_cp_0_13_access,
+ [HSR_EC_CP10_ID] = kvm_handle_cp10_id,
+ [HSR_EC_SVC_HYP] = handle_svc_hyp,
+ [HSR_EC_HVC] = handle_hvc,
+ [HSR_EC_SMC] = handle_smc,
+ [HSR_EC_IABT] = kvm_handle_guest_abort,
+ [HSR_EC_IABT_HYP] = handle_pabt_hyp,
+ [HSR_EC_DABT] = kvm_handle_guest_abort,
+ [HSR_EC_DABT_HYP] = handle_dabt_hyp,
+};
+
+/*
+ * A conditional instruction is allowed to trap, even though it
+ * wouldn't be executed. So let's re-implement the hardware, in
+ * software!
+ */
+static bool kvm_condition_valid(struct kvm_vcpu *vcpu)
+{
+ unsigned long cpsr, cond, insn;
+
+ /*
+ * Exception Code 0 can only happen if we set HCR.TGE to 1, to
+ * catch undefined instructions, and then we won't get past
+ * the arm_exit_handlers test anyway.
+ */
+ BUG_ON(((vcpu->arch.hsr & HSR_EC) >> HSR_EC_SHIFT) == 0);
+
+ /* Top two bits non-zero? Unconditional. */
+ if (vcpu->arch.hsr >> 30)
+ return true;
+
+ cpsr = *vcpu_cpsr(vcpu);
+
+ /* Is condition field valid? */
+ if ((vcpu->arch.hsr & HSR_CV) >> HSR_CV_SHIFT)
+ cond = (vcpu->arch.hsr & HSR_COND) >> HSR_COND_SHIFT;
+ else {
+ /* This can happen in Thumb mode: examine IT state. */
+ unsigned long it;
+
+ it = ((cpsr >> 8) & 0xFC) | ((cpsr >> 25) & 0x3);
+
+ /* it == 0 => unconditional. */
+ if (it == 0)
+ return true;
+
+ /* The cond for this insn works out as the top 4 bits. */
+ cond = (it >> 4);
+ }
+
+ /* Shift makes it look like an ARM-mode instruction */
+ insn = cond << 28;
+ return arm_check_condition(insn, cpsr) != ARM_OPCODE_CONDTEST_FAIL;
+}
+
+/*
+ * Return > 0 to return to guest, < 0 on error, 0 (and set exit_reason) on
+ * proper exit to QEMU.
+ */
+static int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
+ int exception_index)
+{
+ unsigned long hsr_ec;
+
+ switch (exception_index) {
+ case ARM_EXCEPTION_IRQ:
+ return 1;
+ case ARM_EXCEPTION_UNDEFINED:
+ kvm_err("Undefined exception in Hyp mode at: %#08x\n",
+ vcpu->arch.hyp_pc);
+ BUG();
+ panic("KVM: Hypervisor undefined exception!\n");
+ case ARM_EXCEPTION_DATA_ABORT:
+ case ARM_EXCEPTION_PREF_ABORT:
+ case ARM_EXCEPTION_HVC:
+ hsr_ec = (vcpu->arch.hsr & HSR_EC) >> HSR_EC_SHIFT;
+
+ if (hsr_ec >= ARRAY_SIZE(arm_exit_handlers)
+ || !arm_exit_handlers[hsr_ec]) {
+ kvm_err("Unkown exception class: %#08lx, "
+ "hsr: %#08x\n", hsr_ec,
+ (unsigned int)vcpu->arch.hsr);
+ BUG();
+ }
+
+ /*
+ * See ARM ARM B1.14.1: "Hyp traps on instructions
+ * that fail their condition code check"
+ */
+ if (!kvm_condition_valid(vcpu)) {
+ bool is_wide = vcpu->arch.hsr & HSR_IL;
+ kvm_skip_instr(vcpu, is_wide);
+ return 1;
+ }
+
+ return arm_exit_handlers[hsr_ec](vcpu, run);
+ default:
+ kvm_pr_unimpl("Unsupported exception type: %d",
+ exception_index);
+ run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ return 0;
+ }
+}
+
+static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
+{
+ if (likely(vcpu->arch.has_run_once))
+ return 0;
+
+ vcpu->arch.has_run_once = true;
+
+ /*
+ * Handle the "start in power-off" case by calling into the
+ * PSCI code.
+ */
+ if (test_and_clear_bit(KVM_ARM_VCPU_POWER_OFF, vcpu->arch.features)) {
+ *vcpu_reg(vcpu, 0) = KVM_PSCI_FN_CPU_OFF;
+ kvm_psci_call(vcpu);
+ }
+
+ return 0;
+}
+
+static void vcpu_pause(struct kvm_vcpu *vcpu)
+{
+ wait_queue_head_t *wq = kvm_arch_vcpu_wq(vcpu);
+
+ wait_event_interruptible(*wq, !vcpu->arch.pause);
+}
+
+/**
+ * kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code
+ * @vcpu: The VCPU pointer
+ * @run: The kvm_run structure pointer used for userspace state exchange
+ *
+ * This function is called through the VCPU_RUN ioctl called from user space. It
+ * will execute VM code in a loop until the time slice for the process is used
+ * or some emulation is needed from user space in which case the function will
+ * return with return value 0 and with the kvm_run structure filled in with the
+ * required data for the requested emulation.
+ */
+int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+ int ret;
+ sigset_t sigsaved;
+
+ /* Make sure they initialize the vcpu with KVM_ARM_VCPU_INIT */
+ if (unlikely(vcpu->arch.target < 0))
+ return -ENOEXEC;
+
+ ret = kvm_vcpu_first_run_init(vcpu);
+ if (ret)
+ return ret;
+
+ if (run->exit_reason == KVM_EXIT_MMIO) {
+ ret = kvm_handle_mmio_return(vcpu, vcpu->run);
+ if (ret)
+ return ret;
+ }
+
+ if (vcpu->sigset_active)
+ sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
+
+ ret = 1;
+ run->exit_reason = KVM_EXIT_UNKNOWN;
+ while (ret > 0) {
+ /*
+ * Check conditions before entering the guest
+ */
+ cond_resched();
+
+ update_vttbr(vcpu->kvm);
+
+ if (vcpu->arch.pause)
+ vcpu_pause(vcpu);
+
+ local_irq_disable();
+
+ /*
+ * Re-check atomic conditions
+ */
+ if (signal_pending(current)) {
+ ret = -EINTR;
+ run->exit_reason = KVM_EXIT_INTR;
+ }
+
+ if (ret <= 0 || need_new_vmid_gen(vcpu->kvm)) {
+ local_irq_enable();
+ continue;
+ }
+
+ /**************************************************************
+ * Enter the guest
+ */
+ trace_kvm_entry(*vcpu_pc(vcpu));
+ kvm_guest_enter();
+ vcpu->mode = IN_GUEST_MODE;
+
+ ret = kvm_call_hyp(__kvm_vcpu_run, vcpu);
+
+ vcpu->mode = OUTSIDE_GUEST_MODE;
+ vcpu->arch.last_pcpu = smp_processor_id();
+ kvm_guest_exit();
+ trace_kvm_exit(*vcpu_pc(vcpu));
+ /*
+ * We may have taken a host interrupt in HYP mode (ie
+ * while executing the guest). This interrupt is still
+ * pending, as we haven't serviced it yet!
+ *
+ * We're now back in SVC mode, with interrupts
+ * disabled. Enabling the interrupts now will have
+ * the effect of taking the interrupt again, in SVC
+ * mode this time.
+ */
+ local_irq_enable();
+
+ /*
+ * Back from guest
+ *************************************************************/
+
+ ret = handle_exit(vcpu, run, ret);
+ }
+
+ if (vcpu->sigset_active)
+ sigprocmask(SIG_SETMASK, &sigsaved, NULL);
+ return ret;
+}
+
+static int vcpu_interrupt_line(struct kvm_vcpu *vcpu, int number, bool level)
+{
+ int bit_index;
+ bool set;
+ unsigned long *ptr;
+
+ if (number == KVM_ARM_IRQ_CPU_IRQ)
+ bit_index = __ffs(HCR_VI);
+ else /* KVM_ARM_IRQ_CPU_FIQ */
+ bit_index = __ffs(HCR_VF);
+
+ ptr = (unsigned long *)&vcpu->arch.irq_lines;
+ if (level)
+ set = test_and_set_bit(bit_index, ptr);
+ else
+ set = test_and_clear_bit(bit_index, ptr);
+
+ /*
+ * If we didn't change anything, no need to wake up or kick other CPUs
+ */
+ if (set == level)
+ return 0;
+
+ /*
+ * The vcpu irq_lines field was updated, wake up sleeping VCPUs and
+ * trigger a world-switch round on the running physical CPU to set the
+ * virtual IRQ/FIQ fields in the HCR appropriately.
+ */
+ kvm_vcpu_kick(vcpu);
+
+ return 0;
+}
+
+int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level)
+{
+ u32 irq = irq_level->irq;
+ unsigned int irq_type, vcpu_idx, irq_num;
+ int nrcpus = atomic_read(&kvm->online_vcpus);
+ struct kvm_vcpu *vcpu = NULL;
+ bool level = irq_level->level;
+
+ irq_type = (irq >> KVM_ARM_IRQ_TYPE_SHIFT) & KVM_ARM_IRQ_TYPE_MASK;
+ vcpu_idx = (irq >> KVM_ARM_IRQ_VCPU_SHIFT) & KVM_ARM_IRQ_VCPU_MASK;
+ irq_num = (irq >> KVM_ARM_IRQ_NUM_SHIFT) & KVM_ARM_IRQ_NUM_MASK;
+
+ trace_kvm_irq_line(irq_type, vcpu_idx, irq_num, irq_level->level);
+
+ if (irq_type != KVM_ARM_IRQ_TYPE_CPU)
+ return -EINVAL;
+
+ if (vcpu_idx >= nrcpus)
+ return -EINVAL;
+
+ vcpu = kvm_get_vcpu(kvm, vcpu_idx);
+ if (!vcpu)
+ return -EINVAL;
+
+ if (irq_num > KVM_ARM_IRQ_CPU_FIQ)
+ return -EINVAL;
+
+ return vcpu_interrupt_line(vcpu, irq_num, level);
+}
+
+long kvm_arch_vcpu_ioctl(struct file *filp,
+ unsigned int ioctl, unsigned long arg)
+{
+ struct kvm_vcpu *vcpu = filp->private_data;
+ void __user *argp = (void __user *)arg;
+
+ switch (ioctl) {
+ case KVM_ARM_VCPU_INIT: {
+ struct kvm_vcpu_init init;
+
+ if (copy_from_user(&init, argp, sizeof(init)))
+ return -EFAULT;
+
+ return kvm_vcpu_set_target(vcpu, &init);
+
+ }
+ case KVM_SET_ONE_REG:
+ case KVM_GET_ONE_REG: {
+ struct kvm_one_reg reg;
+ if (copy_from_user(&reg, argp, sizeof(reg)))
+ return -EFAULT;
+ if (ioctl == KVM_SET_ONE_REG)
+ return kvm_arm_set_reg(vcpu, &reg);
+ else
+ return kvm_arm_get_reg(vcpu, &reg);
+ }
+ case KVM_GET_REG_LIST: {
+ struct kvm_reg_list __user *user_list = argp;
+ struct kvm_reg_list reg_list;
+ unsigned n;
+
+ if (copy_from_user(&reg_list, user_list, sizeof(reg_list)))
+ return -EFAULT;
+ n = reg_list.n;
+ reg_list.n = kvm_arm_num_regs(vcpu);
+ if (copy_to_user(user_list, &reg_list, sizeof(reg_list)))
+ return -EFAULT;
+ if (n < reg_list.n)
+ return -E2BIG;
+ return kvm_arm_copy_reg_indices(vcpu, user_list->reg);
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
+{
+ return -EINVAL;
+}
+
+long kvm_arch_vm_ioctl(struct file *filp,
+ unsigned int ioctl, unsigned long arg)
+{
+ return -EINVAL;
+}
+
+static void cpu_init_hyp_mode(void *vector)
+{
+ unsigned long long pgd_ptr;
+ unsigned long pgd_low, pgd_high;
+ unsigned long hyp_stack_ptr;
+ unsigned long stack_page;
+ unsigned long vector_ptr;
+
+ /* Switch from the HYP stub to our own HYP init vector */
+ __hyp_set_vectors((unsigned long)vector);
+
+ pgd_ptr = (unsigned long long)kvm_mmu_get_httbr();
+ pgd_low = (pgd_ptr & ((1ULL << 32) - 1));
+ pgd_high = (pgd_ptr >> 32ULL);
+ stack_page = __get_cpu_var(kvm_arm_hyp_stack_page);
+ hyp_stack_ptr = stack_page + PAGE_SIZE;
+ vector_ptr = (unsigned long)__kvm_hyp_vector;
+
+ /*
+ * Call initialization code, and switch to the full blown
+ * HYP code. The init code doesn't need to preserve these registers as
+ * r1-r3 and r12 are already callee save according to the AAPCS.
+ * Note that we slightly misuse the prototype by casing the pgd_low to
+ * a void *.
+ */
+ kvm_call_hyp((void *)pgd_low, pgd_high, hyp_stack_ptr, vector_ptr);
+}
+
+/**
+ * Inits Hyp-mode on all online CPUs
+ */
+static int init_hyp_mode(void)
+{
+ phys_addr_t init_phys_addr;
+ int cpu;
+ int err = 0;
+
+ /*
+ * Allocate Hyp PGD and setup Hyp identity mapping
+ */
+ err = kvm_mmu_init();
+ if (err)
+ goto out_err;
+
+ /*
+ * It is probably enough to obtain the default on one
+ * CPU. It's unlikely to be different on the others.
+ */
+ hyp_default_vectors = __hyp_get_vectors();
+
+ /*
+ * Allocate stack pages for Hypervisor-mode
+ */
+ for_each_possible_cpu(cpu) {
+ unsigned long stack_page;
+
+ stack_page = __get_free_page(GFP_KERNEL);
+ if (!stack_page) {
+ err = -ENOMEM;
+ goto out_free_stack_pages;
+ }
+
+ per_cpu(kvm_arm_hyp_stack_page, cpu) = stack_page;
+ }
+
+ /*
+ * Execute the init code on each CPU.
+ *
+ * Note: The stack is not mapped yet, so don't do anything else than
+ * initializing the hypervisor mode on each CPU using a local stack
+ * space for temporary storage.
+ */
+ init_phys_addr = virt_to_phys(__kvm_hyp_init);
+ for_each_online_cpu(cpu) {
+ smp_call_function_single(cpu, cpu_init_hyp_mode,
+ (void *)(long)init_phys_addr, 1);
+ }
+
+ /*
+ * Unmap the identity mapping
+ */
+ kvm_clear_hyp_idmap();
+
+ /*
+ * Map the Hyp-code called directly from the host
+ */
+ err = create_hyp_mappings(__kvm_hyp_code_start, __kvm_hyp_code_end);
+ if (err) {
+ kvm_err("Cannot map world-switch code\n");
+ goto out_free_mappings;
+ }
+
+ /*
+ * Map the Hyp stack pages
+ */
+ for_each_possible_cpu(cpu) {
+ char *stack_page = (char *)per_cpu(kvm_arm_hyp_stack_page, cpu);
+ err = create_hyp_mappings(stack_page, stack_page + PAGE_SIZE);
+
+ if (err) {
+ kvm_err("Cannot map hyp stack\n");
+ goto out_free_mappings;
+ }
+ }
+
+ /*
+ * Map the host VFP structures
+ */
+ kvm_host_vfp_state = alloc_percpu(struct vfp_hard_struct);
+ if (!kvm_host_vfp_state) {
+ err = -ENOMEM;
+ kvm_err("Cannot allocate host VFP state\n");
+ goto out_free_mappings;
+ }
+
+ for_each_possible_cpu(cpu) {
+ struct vfp_hard_struct *vfp;
+
+ vfp = per_cpu_ptr(kvm_host_vfp_state, cpu);
+ err = create_hyp_mappings(vfp, vfp + 1);
+
+ if (err) {
+ kvm_err("Cannot map host VFP state: %d\n", err);
+ goto out_free_vfp;
+ }
+ }
+
+ kvm_info("Hyp mode initialized successfully\n");
+ return 0;
+out_free_vfp:
+ free_percpu(kvm_host_vfp_state);
+out_free_mappings:
+ free_hyp_pmds();
+out_free_stack_pages:
+ for_each_possible_cpu(cpu)
+ free_page(per_cpu(kvm_arm_hyp_stack_page, cpu));
+out_err:
+ kvm_err("error initializing Hyp mode: %d\n", err);
+ return err;
+}
+
+/**
+ * Initialize Hyp-mode and memory mappings on all CPUs.
+ */
+int kvm_arch_init(void *opaque)
+{
+ int err;
+
+ if (!is_hyp_mode_available()) {
+ kvm_err("HYP mode not available\n");
+ return -ENODEV;
+ }
+
+ if (kvm_target_cpu() < 0) {
+ kvm_err("Target CPU not supported!\n");
+ return -ENODEV;
+ }
+
+ err = init_hyp_mode();
+ if (err)
+ goto out_err;
+
+ kvm_coproc_table_init();
+ return 0;
+out_err:
+ return err;
+}
+
+/* NOP: Compiling as a module not supported */
+void kvm_arch_exit(void)
+{
+}
+
+static int arm_init(void)
+{
+ int rc = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
+ return rc;
+}
+
+module_init(arm_init);
diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c
new file mode 100644
index 000000000000..d782638c7ec0
--- /dev/null
+++ b/arch/arm/kvm/coproc.c
@@ -0,0 +1,1046 @@
+/*
+ * Copyright (C) 2012 - Virtual Open Systems and Columbia University
+ * Authors: Rusty Russell <rusty@rustcorp.com.au>
+ * Christoffer Dall <c.dall@virtualopensystems.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include <linux/mm.h>
+#include <linux/kvm_host.h>
+#include <linux/uaccess.h>
+#include <asm/kvm_arm.h>
+#include <asm/kvm_host.h>
+#include <asm/kvm_emulate.h>
+#include <asm/kvm_coproc.h>
+#include <asm/cacheflush.h>
+#include <asm/cputype.h>
+#include <trace/events/kvm.h>
+#include <asm/vfp.h>
+#include "../vfp/vfpinstr.h"
+
+#include "trace.h"
+#include "coproc.h"
+
+
+/******************************************************************************
+ * Co-processor emulation
+ *****************************************************************************/
+
+/* 3 bits per cache level, as per CLIDR, but non-existent caches always 0 */
+static u32 cache_levels;
+
+/* CSSELR values; used to index KVM_REG_ARM_DEMUX_ID_CCSIDR */
+#define CSSELR_MAX 12
+
+int kvm_handle_cp10_id(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+ kvm_inject_undefined(vcpu);
+ return 1;
+}
+
+int kvm_handle_cp_0_13_access(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+ /*
+ * We can get here, if the host has been built without VFPv3 support,
+ * but the guest attempted a floating point operation.
+ */
+ kvm_inject_undefined(vcpu);
+ return 1;
+}
+
+int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+ kvm_inject_undefined(vcpu);
+ return 1;
+}
+
+int kvm_handle_cp14_access(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+ kvm_inject_undefined(vcpu);
+ return 1;
+}
+
+/* See note at ARM ARM B1.14.4 */
+static bool access_dcsw(struct kvm_vcpu *vcpu,
+ const struct coproc_params *p,
+ const struct coproc_reg *r)
+{
+ u32 val;
+ int cpu;
+
+ cpu = get_cpu();
+
+ if (!p->is_write)
+ return read_from_write_only(vcpu, p);
+
+ cpumask_setall(&vcpu->arch.require_dcache_flush);
+ cpumask_clear_cpu(cpu, &vcpu->arch.require_dcache_flush);
+
+ /* If we were already preempted, take the long way around */
+ if (cpu != vcpu->arch.last_pcpu) {
+ flush_cache_all();
+ goto done;
+ }
+
+ val = *vcpu_reg(vcpu, p->Rt1);
+
+ switch (p->CRm) {
+ case 6: /* Upgrade DCISW to DCCISW, as per HCR.SWIO */
+ case 14: /* DCCISW */
+ asm volatile("mcr p15, 0, %0, c7, c14, 2" : : "r" (val));
+ break;
+
+ case 10: /* DCCSW */
+ asm volatile("mcr p15, 0, %0, c7, c10, 2" : : "r" (val));
+ break;
+ }
+
+done:
+ put_cpu();
+
+ return true;
+}
+
+/*
+ * We could trap ID_DFR0 and tell the guest we don't support performance
+ * monitoring. Unfortunately the patch to make the kernel check ID_DFR0 was
+ * NAKed, so it will read the PMCR anyway.
+ *
+ * Therefore we tell the guest we have 0 counters. Unfortunately, we
+ * must always support PMCCNTR (the cycle counter): we just RAZ/WI for
+ * all PM registers, which doesn't crash the guest kernel at least.
+ */
+static bool pm_fake(struct kvm_vcpu *vcpu,
+ const struct coproc_params *p,
+ const struct coproc_reg *r)
+{
+ if (p->is_write)
+ return ignore_write(vcpu, p);
+ else
+ return read_zero(vcpu, p);
+}
+
+#define access_pmcr pm_fake
+#define access_pmcntenset pm_fake
+#define access_pmcntenclr pm_fake
+#define access_pmovsr pm_fake
+#define access_pmselr pm_fake
+#define access_pmceid0 pm_fake
+#define access_pmceid1 pm_fake
+#define access_pmccntr pm_fake
+#define access_pmxevtyper pm_fake
+#define access_pmxevcntr pm_fake
+#define access_pmuserenr pm_fake
+#define access_pmintenset pm_fake
+#define access_pmintenclr pm_fake
+
+/* Architected CP15 registers.
+ * Important: Must be sorted ascending by CRn, CRM, Op1, Op2
+ */
+static const struct coproc_reg cp15_regs[] = {
+ /* CSSELR: swapped by interrupt.S. */
+ { CRn( 0), CRm( 0), Op1( 2), Op2( 0), is32,
+ NULL, reset_unknown, c0_CSSELR },
+
+ /* TTBR0/TTBR1: swapped by interrupt.S. */
+ { CRm( 2), Op1( 0), is64, NULL, reset_unknown64, c2_TTBR0 },
+ { CRm( 2), Op1( 1), is64, NULL, reset_unknown64, c2_TTBR1 },
+
+ /* TTBCR: swapped by interrupt.S. */
+ { CRn( 2), CRm( 0), Op1( 0), Op2( 2), is32,
+ NULL, reset_val, c2_TTBCR, 0x00000000 },
+
+ /* DACR: swapped by interrupt.S. */
+ { CRn( 3), CRm( 0), Op1( 0), Op2( 0), is32,
+ NULL, reset_unknown, c3_DACR },
+
+ /* DFSR/IFSR/ADFSR/AIFSR: swapped by interrupt.S. */
+ { CRn( 5), CRm( 0), Op1( 0), Op2( 0), is32,
+ NULL, reset_unknown, c5_DFSR },
+ { CRn( 5), CRm( 0), Op1( 0), Op2( 1), is32,
+ NULL, reset_unknown, c5_IFSR },
+ { CRn( 5), CRm( 1), Op1( 0), Op2( 0), is32,
+ NULL, reset_unknown, c5_ADFSR },
+ { CRn( 5), CRm( 1), Op1( 0), Op2( 1), is32,
+ NULL, reset_unknown, c5_AIFSR },
+
+ /* DFAR/IFAR: swapped by interrupt.S. */
+ { CRn( 6), CRm( 0), Op1( 0), Op2( 0), is32,
+ NULL, reset_unknown, c6_DFAR },
+ { CRn( 6), CRm( 0), Op1( 0), Op2( 2), is32,
+ NULL, reset_unknown, c6_IFAR },
+ /*
+ * DC{C,I,CI}SW operations:
+ */
+ { CRn( 7), CRm( 6), Op1( 0), Op2( 2), is32, access_dcsw},
+ { CRn( 7), CRm(10), Op1( 0), Op2( 2), is32, access_dcsw},
+ { CRn( 7), CRm(14), Op1( 0), Op2( 2), is32, access_dcsw},
+ /*
+ * Dummy performance monitor implementation.
+ */
+ { CRn( 9), CRm(12), Op1( 0), Op2( 0), is32, access_pmcr},
+ { CRn( 9), CRm(12), Op1( 0), Op2( 1), is32, access_pmcntenset},
+ { CRn( 9), CRm(12), Op1( 0), Op2( 2), is32, access_pmcntenclr},
+ { CRn( 9), CRm(12), Op1( 0), Op2( 3), is32, access_pmovsr},
+ { CRn( 9), CRm(12), Op1( 0), Op2( 5), is32, access_pmselr},
+ { CRn( 9), CRm(12), Op1( 0), Op2( 6), is32, access_pmceid0},
+ { CRn( 9), CRm(12), Op1( 0), Op2( 7), is32, access_pmceid1},
+ { CRn( 9), CRm(13), Op1( 0), Op2( 0), is32, access_pmccntr},
+ { CRn( 9), CRm(13), Op1( 0), Op2( 1), is32, access_pmxevtyper},
+ { CRn( 9), CRm(13), Op1( 0), Op2( 2), is32, access_pmxevcntr},
+ { CRn( 9), CRm(14), Op1( 0), Op2( 0), is32, access_pmuserenr},
+ { CRn( 9), CRm(14), Op1( 0), Op2( 1), is32, access_pmintenset},
+ { CRn( 9), CRm(14), Op1( 0), Op2( 2), is32, access_pmintenclr},
+
+ /* PRRR/NMRR (aka MAIR0/MAIR1): swapped by interrupt.S. */
+ { CRn(10), CRm( 2), Op1( 0), Op2( 0), is32,
+ NULL, reset_unknown, c10_PRRR},
+ { CRn(10), CRm( 2), Op1( 0), Op2( 1), is32,
+ NULL, reset_unknown, c10_NMRR},
+
+ /* VBAR: swapped by interrupt.S. */
+ { CRn(12), CRm( 0), Op1( 0), Op2( 0), is32,
+ NULL, reset_val, c12_VBAR, 0x00000000 },
+
+ /* CONTEXTIDR/TPIDRURW/TPIDRURO/TPIDRPRW: swapped by interrupt.S. */
+ { CRn(13), CRm( 0), Op1( 0), Op2( 1), is32,
+ NULL, reset_val, c13_CID, 0x00000000 },
+ { CRn(13), CRm( 0), Op1( 0), Op2( 2), is32,
+ NULL, reset_unknown, c13_TID_URW },
+ { CRn(13), CRm( 0), Op1( 0), Op2( 3), is32,
+ NULL, reset_unknown, c13_TID_URO },
+ { CRn(13), CRm( 0), Op1( 0), Op2( 4), is32,
+ NULL, reset_unknown, c13_TID_PRIV },
+};
+
+/* Target specific emulation tables */
+static struct kvm_coproc_target_table *target_tables[KVM_ARM_NUM_TARGETS];
+
+void kvm_register_target_coproc_table(struct kvm_coproc_target_table *table)
+{
+ target_tables[table->target] = table;
+}
+
+/* Get specific register table for this target. */
+static const struct coproc_reg *get_target_table(unsigned target, size_t *num)
+{
+ struct kvm_coproc_target_table *table;
+
+ table = target_tables[target];
+ *num = table->num;
+ return table->table;
+}
+
+static const struct coproc_reg *find_reg(const struct coproc_params *params,
+ const struct coproc_reg table[],
+ unsigned int num)
+{
+ unsigned int i;
+
+ for (i = 0; i < num; i++) {
+ const struct coproc_reg *r = &table[i];
+
+ if (params->is_64bit != r->is_64)
+ continue;
+ if (params->CRn != r->CRn)
+ continue;
+ if (params->CRm != r->CRm)
+ continue;
+ if (params->Op1 != r->Op1)
+ continue;
+ if (params->Op2 != r->Op2)
+ continue;
+
+ return r;
+ }
+ return NULL;
+}
+
+static int emulate_cp15(struct kvm_vcpu *vcpu,
+ const struct coproc_params *params)
+{
+ size_t num;
+ const struct coproc_reg *table, *r;
+
+ trace_kvm_emulate_cp15_imp(params->Op1, params->Rt1, params->CRn,
+ params->CRm, params->Op2, params->is_write);
+
+ table = get_target_table(vcpu->arch.target, &num);
+
+ /* Search target-specific then generic table. */
+ r = find_reg(params, table, num);
+ if (!r)
+ r = find_reg(params, cp15_regs, ARRAY_SIZE(cp15_regs));
+
+ if (likely(r)) {
+ /* If we don't have an accessor, we should never get here! */
+ BUG_ON(!r->access);
+
+ if (likely(r->access(vcpu, params, r))) {
+ /* Skip instruction, since it was emulated */
+ kvm_skip_instr(vcpu, (vcpu->arch.hsr >> 25) & 1);
+ return 1;
+ }
+ /* If access function fails, it should complain. */
+ } else {
+ kvm_err("Unsupported guest CP15 access at: %08x\n",
+ *vcpu_pc(vcpu));
+ print_cp_instr(params);
+ }
+ kvm_inject_undefined(vcpu);
+ return 1;
+}
+
+/**
+ * kvm_handle_cp15_64 -- handles a mrrc/mcrr trap on a guest CP15 access
+ * @vcpu: The VCPU pointer
+ * @run: The kvm_run struct
+ */
+int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+ struct coproc_params params;
+
+ params.CRm = (vcpu->arch.hsr >> 1) & 0xf;
+ params.Rt1 = (vcpu->arch.hsr >> 5) & 0xf;
+ params.is_write = ((vcpu->arch.hsr & 1) == 0);
+ params.is_64bit = true;
+
+ params.Op1 = (vcpu->arch.hsr >> 16) & 0xf;
+ params.Op2 = 0;
+ params.Rt2 = (vcpu->arch.hsr >> 10) & 0xf;
+ params.CRn = 0;
+
+ return emulate_cp15(vcpu, &params);
+}
+
+static void reset_coproc_regs(struct kvm_vcpu *vcpu,
+ const struct coproc_reg *table, size_t num)
+{
+ unsigned long i;
+
+ for (i = 0; i < num; i++)
+ if (table[i].reset)
+ table[i].reset(vcpu, &table[i]);
+}
+
+/**
+ * kvm_handle_cp15_32 -- handles a mrc/mcr trap on a guest CP15 access
+ * @vcpu: The VCPU pointer
+ * @run: The kvm_run struct
+ */
+int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+ struct coproc_params params;
+
+ params.CRm = (vcpu->arch.hsr >> 1) & 0xf;
+ params.Rt1 = (vcpu->arch.hsr >> 5) & 0xf;
+ params.is_write = ((vcpu->arch.hsr & 1) == 0);
+ params.is_64bit = false;
+
+ params.CRn = (vcpu->arch.hsr >> 10) & 0xf;
+ params.Op1 = (vcpu->arch.hsr >> 14) & 0x7;
+ params.Op2 = (vcpu->arch.hsr >> 17) & 0x7;
+ params.Rt2 = 0;
+
+ return emulate_cp15(vcpu, &params);
+}
+
+/******************************************************************************
+ * Userspace API
+ *****************************************************************************/
+
+static bool index_to_params(u64 id, struct coproc_params *params)
+{
+ switch (id & KVM_REG_SIZE_MASK) {
+ case KVM_REG_SIZE_U32:
+ /* Any unused index bits means it's not valid. */
+ if (id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK
+ | KVM_REG_ARM_COPROC_MASK
+ | KVM_REG_ARM_32_CRN_MASK
+ | KVM_REG_ARM_CRM_MASK
+ | KVM_REG_ARM_OPC1_MASK
+ | KVM_REG_ARM_32_OPC2_MASK))
+ return false;
+
+ params->is_64bit = false;
+ params->CRn = ((id & KVM_REG_ARM_32_CRN_MASK)
+ >> KVM_REG_ARM_32_CRN_SHIFT);
+ params->CRm = ((id & KVM_REG_ARM_CRM_MASK)
+ >> KVM_REG_ARM_CRM_SHIFT);
+ params->Op1 = ((id & KVM_REG_ARM_OPC1_MASK)
+ >> KVM_REG_ARM_OPC1_SHIFT);
+ params->Op2 = ((id & KVM_REG_ARM_32_OPC2_MASK)
+ >> KVM_REG_ARM_32_OPC2_SHIFT);
+ return true;
+ case KVM_REG_SIZE_U64:
+ /* Any unused index bits means it's not valid. */
+ if (id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK
+ | KVM_REG_ARM_COPROC_MASK
+ | KVM_REG_ARM_CRM_MASK
+ | KVM_REG_ARM_OPC1_MASK))
+ return false;
+ params->is_64bit = true;
+ params->CRm = ((id & KVM_REG_ARM_CRM_MASK)
+ >> KVM_REG_ARM_CRM_SHIFT);
+ params->Op1 = ((id & KVM_REG_ARM_OPC1_MASK)
+ >> KVM_REG_ARM_OPC1_SHIFT);
+ params->Op2 = 0;
+ params->CRn = 0;
+ return true;
+ default:
+ return false;
+ }
+}
+
+/* Decode an index value, and find the cp15 coproc_reg entry. */
+static const struct coproc_reg *index_to_coproc_reg(struct kvm_vcpu *vcpu,
+ u64 id)
+{
+ size_t num;
+ const struct coproc_reg *table, *r;
+ struct coproc_params params;
+
+ /* We only do cp15 for now. */
+ if ((id & KVM_REG_ARM_COPROC_MASK) >> KVM_REG_ARM_COPROC_SHIFT != 15)
+ return NULL;
+
+ if (!index_to_params(id, &params))
+ return NULL;
+
+ table = get_target_table(vcpu->arch.target, &num);
+ r = find_reg(&params, table, num);
+ if (!r)
+ r = find_reg(&params, cp15_regs, ARRAY_SIZE(cp15_regs));
+
+ /* Not saved in the cp15 array? */
+ if (r && !r->reg)
+ r = NULL;
+
+ return r;
+}
+
+/*
+ * These are the invariant cp15 registers: we let the guest see the host
+ * versions of these, so they're part of the guest state.
+ *
+ * A future CPU may provide a mechanism to present different values to
+ * the guest, or a future kvm may trap them.
+ */
+/* Unfortunately, there's no register-argument for mrc, so generate. */
+#define FUNCTION_FOR32(crn, crm, op1, op2, name) \
+ static void get_##name(struct kvm_vcpu *v, \
+ const struct coproc_reg *r) \
+ { \
+ u32 val; \
+ \
+ asm volatile("mrc p15, " __stringify(op1) \
+ ", %0, c" __stringify(crn) \
+ ", c" __stringify(crm) \
+ ", " __stringify(op2) "\n" : "=r" (val)); \
+ ((struct coproc_reg *)r)->val = val; \
+ }
+
+FUNCTION_FOR32(0, 0, 0, 0, MIDR)
+FUNCTION_FOR32(0, 0, 0, 1, CTR)
+FUNCTION_FOR32(0, 0, 0, 2, TCMTR)
+FUNCTION_FOR32(0, 0, 0, 3, TLBTR)
+FUNCTION_FOR32(0, 0, 0, 6, REVIDR)
+FUNCTION_FOR32(0, 1, 0, 0, ID_PFR0)
+FUNCTION_FOR32(0, 1, 0, 1, ID_PFR1)
+FUNCTION_FOR32(0, 1, 0, 2, ID_DFR0)
+FUNCTION_FOR32(0, 1, 0, 3, ID_AFR0)
+FUNCTION_FOR32(0, 1, 0, 4, ID_MMFR0)
+FUNCTION_FOR32(0, 1, 0, 5, ID_MMFR1)
+FUNCTION_FOR32(0, 1, 0, 6, ID_MMFR2)
+FUNCTION_FOR32(0, 1, 0, 7, ID_MMFR3)
+FUNCTION_FOR32(0, 2, 0, 0, ID_ISAR0)
+FUNCTION_FOR32(0, 2, 0, 1, ID_ISAR1)
+FUNCTION_FOR32(0, 2, 0, 2, ID_ISAR2)
+FUNCTION_FOR32(0, 2, 0, 3, ID_ISAR3)
+FUNCTION_FOR32(0, 2, 0, 4, ID_ISAR4)
+FUNCTION_FOR32(0, 2, 0, 5, ID_ISAR5)
+FUNCTION_FOR32(0, 0, 1, 1, CLIDR)
+FUNCTION_FOR32(0, 0, 1, 7, AIDR)
+
+/* ->val is filled in by kvm_invariant_coproc_table_init() */
+static struct coproc_reg invariant_cp15[] = {
+ { CRn( 0), CRm( 0), Op1( 0), Op2( 0), is32, NULL, get_MIDR },
+ { CRn( 0), CRm( 0), Op1( 0), Op2( 1), is32, NULL, get_CTR },
+ { CRn( 0), CRm( 0), Op1( 0), Op2( 2), is32, NULL, get_TCMTR },
+ { CRn( 0), CRm( 0), Op1( 0), Op2( 3), is32, NULL, get_TLBTR },
+ { CRn( 0), CRm( 0), Op1( 0), Op2( 6), is32, NULL, get_REVIDR },
+
+ { CRn( 0), CRm( 1), Op1( 0), Op2( 0), is32, NULL, get_ID_PFR0 },
+ { CRn( 0), CRm( 1), Op1( 0), Op2( 1), is32, NULL, get_ID_PFR1 },
+ { CRn( 0), CRm( 1), Op1( 0), Op2( 2), is32, NULL, get_ID_DFR0 },
+ { CRn( 0), CRm( 1), Op1( 0), Op2( 3), is32, NULL, get_ID_AFR0 },
+ { CRn( 0), CRm( 1), Op1( 0), Op2( 4), is32, NULL, get_ID_MMFR0 },
+ { CRn( 0), CRm( 1), Op1( 0), Op2( 5), is32, NULL, get_ID_MMFR1 },
+ { CRn( 0), CRm( 1), Op1( 0), Op2( 6), is32, NULL, get_ID_MMFR2 },
+ { CRn( 0), CRm( 1), Op1( 0), Op2( 7), is32, NULL, get_ID_MMFR3 },
+
+ { CRn( 0), CRm( 2), Op1( 0), Op2( 0), is32, NULL, get_ID_ISAR0 },
+ { CRn( 0), CRm( 2), Op1( 0), Op2( 1), is32, NULL, get_ID_ISAR1 },
+ { CRn( 0), CRm( 2), Op1( 0), Op2( 2), is32, NULL, get_ID_ISAR2 },
+ { CRn( 0), CRm( 2), Op1( 0), Op2( 3), is32, NULL, get_ID_ISAR3 },
+ { CRn( 0), CRm( 2), Op1( 0), Op2( 4), is32, NULL, get_ID_ISAR4 },
+ { CRn( 0), CRm( 2), Op1( 0), Op2( 5), is32, NULL, get_ID_ISAR5 },
+
+ { CRn( 0), CRm( 0), Op1( 1), Op2( 1), is32, NULL, get_CLIDR },
+ { CRn( 0), CRm( 0), Op1( 1), Op2( 7), is32, NULL, get_AIDR },
+};
+
+static int reg_from_user(void *val, const void __user *uaddr, u64 id)
+{
+ /* This Just Works because we are little endian. */
+ if (copy_from_user(val, uaddr, KVM_REG_SIZE(id)) != 0)
+ return -EFAULT;
+ return 0;
+}
+
+static int reg_to_user(void __user *uaddr, const void *val, u64 id)
+{
+ /* This Just Works because we are little endian. */
+ if (copy_to_user(uaddr, val, KVM_REG_SIZE(id)) != 0)
+ return -EFAULT;
+ return 0;
+}
+
+static int get_invariant_cp15(u64 id, void __user *uaddr)
+{
+ struct coproc_params params;
+ const struct coproc_reg *r;
+
+ if (!index_to_params(id, &params))
+ return -ENOENT;
+
+ r = find_reg(&params, invariant_cp15, ARRAY_SIZE(invariant_cp15));
+ if (!r)
+ return -ENOENT;
+
+ return reg_to_user(uaddr, &r->val, id);
+}
+
+static int set_invariant_cp15(u64 id, void __user *uaddr)
+{
+ struct coproc_params params;
+ const struct coproc_reg *r;
+ int err;
+ u64 val = 0; /* Make sure high bits are 0 for 32-bit regs */
+
+ if (!index_to_params(id, &params))
+ return -ENOENT;
+ r = find_reg(&params, invariant_cp15, ARRAY_SIZE(invariant_cp15));
+ if (!r)
+ return -ENOENT;
+
+ err = reg_from_user(&val, uaddr, id);
+ if (err)
+ return err;
+
+ /* This is what we mean by invariant: you can't change it. */
+ if (r->val != val)
+ return -EINVAL;
+
+ return 0;
+}
+
+static bool is_valid_cache(u32 val)
+{
+ u32 level, ctype;
+
+ if (val >= CSSELR_MAX)
+ return -ENOENT;
+
+ /* Bottom bit is Instruction or Data bit. Next 3 bits are level. */
+ level = (val >> 1);
+ ctype = (cache_levels >> (level * 3)) & 7;
+
+ switch (ctype) {
+ case 0: /* No cache */
+ return false;
+ case 1: /* Instruction cache only */
+ return (val & 1);
+ case 2: /* Data cache only */
+ case 4: /* Unified cache */
+ return !(val & 1);
+ case 3: /* Separate instruction and data caches */
+ return true;
+ default: /* Reserved: we can't know instruction or data. */
+ return false;
+ }
+}
+
+/* Which cache CCSIDR represents depends on CSSELR value. */
+static u32 get_ccsidr(u32 csselr)
+{
+ u32 ccsidr;
+
+ /* Make sure noone else changes CSSELR during this! */
+ local_irq_disable();
+ /* Put value into CSSELR */
+ asm volatile("mcr p15, 2, %0, c0, c0, 0" : : "r" (csselr));
+ isb();
+ /* Read result out of CCSIDR */
+ asm volatile("mrc p15, 1, %0, c0, c0, 0" : "=r" (ccsidr));
+ local_irq_enable();
+
+ return ccsidr;
+}
+
+static int demux_c15_get(u64 id, void __user *uaddr)
+{
+ u32 val;
+ u32 __user *uval = uaddr;
+
+ /* Fail if we have unknown bits set. */
+ if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
+ | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
+ return -ENOENT;
+
+ switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
+ case KVM_REG_ARM_DEMUX_ID_CCSIDR:
+ if (KVM_REG_SIZE(id) != 4)
+ return -ENOENT;
+ val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
+ >> KVM_REG_ARM_DEMUX_VAL_SHIFT;
+ if (!is_valid_cache(val))
+ return -ENOENT;
+
+ return put_user(get_ccsidr(val), uval);
+ default:
+ return -ENOENT;
+ }
+}
+
+static int demux_c15_set(u64 id, void __user *uaddr)
+{
+ u32 val, newval;
+ u32 __user *uval = uaddr;
+
+ /* Fail if we have unknown bits set. */
+ if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
+ | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
+ return -ENOENT;
+
+ switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
+ case KVM_REG_ARM_DEMUX_ID_CCSIDR:
+ if (KVM_REG_SIZE(id) != 4)
+ return -ENOENT;
+ val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
+ >> KVM_REG_ARM_DEMUX_VAL_SHIFT;
+ if (!is_valid_cache(val))
+ return -ENOENT;
+
+ if (get_user(newval, uval))
+ return -EFAULT;
+
+ /* This is also invariant: you can't change it. */
+ if (newval != get_ccsidr(val))
+ return -EINVAL;
+ return 0;
+ default:
+ return -ENOENT;
+ }
+}
+
+#ifdef CONFIG_VFPv3
+static const int vfp_sysregs[] = { KVM_REG_ARM_VFP_FPEXC,
+ KVM_REG_ARM_VFP_FPSCR,
+ KVM_REG_ARM_VFP_FPINST,
+ KVM_REG_ARM_VFP_FPINST2,
+ KVM_REG_ARM_VFP_MVFR0,
+ KVM_REG_ARM_VFP_MVFR1,
+ KVM_REG_ARM_VFP_FPSID };
+
+static unsigned int num_fp_regs(void)
+{
+ if (((fmrx(MVFR0) & MVFR0_A_SIMD_MASK) >> MVFR0_A_SIMD_BIT) == 2)
+ return 32;
+ else
+ return 16;
+}
+
+static unsigned int num_vfp_regs(void)
+{
+ /* Normal FP regs + control regs. */
+ return num_fp_regs() + ARRAY_SIZE(vfp_sysregs);
+}
+
+static int copy_vfp_regids(u64 __user *uindices)
+{
+ unsigned int i;
+ const u64 u32reg = KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_VFP;
+ const u64 u64reg = KVM_REG_ARM | KVM_REG_SIZE_U64 | KVM_REG_ARM_VFP;
+
+ for (i = 0; i < num_fp_regs(); i++) {
+ if (put_user((u64reg | KVM_REG_ARM_VFP_BASE_REG) + i,
+ uindices))
+ return -EFAULT;
+ uindices++;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(vfp_sysregs); i++) {
+ if (put_user(u32reg | vfp_sysregs[i], uindices))
+ return -EFAULT;
+ uindices++;
+ }
+
+ return num_vfp_regs();
+}
+
+static int vfp_get_reg(const struct kvm_vcpu *vcpu, u64 id, void __user *uaddr)
+{
+ u32 vfpid = (id & KVM_REG_ARM_VFP_MASK);
+ u32 val;
+
+ /* Fail if we have unknown bits set. */
+ if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
+ | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
+ return -ENOENT;
+
+ if (vfpid < num_fp_regs()) {
+ if (KVM_REG_SIZE(id) != 8)
+ return -ENOENT;
+ return reg_to_user(uaddr, &vcpu->arch.vfp_guest.fpregs[vfpid],
+ id);
+ }
+
+ /* FP control registers are all 32 bit. */
+ if (KVM_REG_SIZE(id) != 4)
+ return -ENOENT;
+
+ switch (vfpid) {
+ case KVM_REG_ARM_VFP_FPEXC:
+ return reg_to_user(uaddr, &vcpu->arch.vfp_guest.fpexc, id);
+ case KVM_REG_ARM_VFP_FPSCR:
+ return reg_to_user(uaddr, &vcpu->arch.vfp_guest.fpscr, id);
+ case KVM_REG_ARM_VFP_FPINST:
+ return reg_to_user(uaddr, &vcpu->arch.vfp_guest.fpinst, id);
+ case KVM_REG_ARM_VFP_FPINST2:
+ return reg_to_user(uaddr, &vcpu->arch.vfp_guest.fpinst2, id);
+ case KVM_REG_ARM_VFP_MVFR0:
+ val = fmrx(MVFR0);
+ return reg_to_user(uaddr, &val, id);
+ case KVM_REG_ARM_VFP_MVFR1:
+ val = fmrx(MVFR1);
+ return reg_to_user(uaddr, &val, id);
+ case KVM_REG_ARM_VFP_FPSID:
+ val = fmrx(FPSID);
+ return reg_to_user(uaddr, &val, id);
+ default:
+ return -ENOENT;
+ }
+}
+
+static int vfp_set_reg(struct kvm_vcpu *vcpu, u64 id, const void __user *uaddr)
+{
+ u32 vfpid = (id & KVM_REG_ARM_VFP_MASK);
+ u32 val;
+
+ /* Fail if we have unknown bits set. */
+ if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
+ | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
+ return -ENOENT;
+
+ if (vfpid < num_fp_regs()) {
+ if (KVM_REG_SIZE(id) != 8)
+ return -ENOENT;
+ return reg_from_user(&vcpu->arch.vfp_guest.fpregs[vfpid],
+ uaddr, id);
+ }
+
+ /* FP control registers are all 32 bit. */
+ if (KVM_REG_SIZE(id) != 4)
+ return -ENOENT;
+
+ switch (vfpid) {
+ case KVM_REG_ARM_VFP_FPEXC:
+ return reg_from_user(&vcpu->arch.vfp_guest.fpexc, uaddr, id);
+ case KVM_REG_ARM_VFP_FPSCR:
+ return reg_from_user(&vcpu->arch.vfp_guest.fpscr, uaddr, id);
+ case KVM_REG_ARM_VFP_FPINST:
+ return reg_from_user(&vcpu->arch.vfp_guest.fpinst, uaddr, id);
+ case KVM_REG_ARM_VFP_FPINST2:
+ return reg_from_user(&vcpu->arch.vfp_guest.fpinst2, uaddr, id);
+ /* These are invariant. */
+ case KVM_REG_ARM_VFP_MVFR0:
+ if (reg_from_user(&val, uaddr, id))
+ return -EFAULT;
+ if (val != fmrx(MVFR0))
+ return -EINVAL;
+ return 0;
+ case KVM_REG_ARM_VFP_MVFR1:
+ if (reg_from_user(&val, uaddr, id))
+ return -EFAULT;
+ if (val != fmrx(MVFR1))
+ return -EINVAL;
+ return 0;
+ case KVM_REG_ARM_VFP_FPSID:
+ if (reg_from_user(&val, uaddr, id))
+ return -EFAULT;
+ if (val != fmrx(FPSID))
+ return -EINVAL;
+ return 0;
+ default:
+ return -ENOENT;
+ }
+}
+#else /* !CONFIG_VFPv3 */
+static unsigned int num_vfp_regs(void)
+{
+ return 0;
+}
+
+static int copy_vfp_regids(u64 __user *uindices)
+{
+ return 0;
+}
+
+static int vfp_get_reg(const struct kvm_vcpu *vcpu, u64 id, void __user *uaddr)
+{
+ return -ENOENT;
+}
+
+static int vfp_set_reg(struct kvm_vcpu *vcpu, u64 id, const void __user *uaddr)
+{
+ return -ENOENT;
+}
+#endif /* !CONFIG_VFPv3 */
+
+int kvm_arm_coproc_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
+{
+ const struct coproc_reg *r;
+ void __user *uaddr = (void __user *)(long)reg->addr;
+
+ if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
+ return demux_c15_get(reg->id, uaddr);
+
+ if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_VFP)
+ return vfp_get_reg(vcpu, reg->id, uaddr);
+
+ r = index_to_coproc_reg(vcpu, reg->id);
+ if (!r)
+ return get_invariant_cp15(reg->id, uaddr);
+
+ /* Note: copies two regs if size is 64 bit. */
+ return reg_to_user(uaddr, &vcpu->arch.cp15[r->reg], reg->id);
+}
+
+int kvm_arm_coproc_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
+{
+ const struct coproc_reg *r;
+ void __user *uaddr = (void __user *)(long)reg->addr;
+
+ if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
+ return demux_c15_set(reg->id, uaddr);
+
+ if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_VFP)
+ return vfp_set_reg(vcpu, reg->id, uaddr);
+
+ r = index_to_coproc_reg(vcpu, reg->id);
+ if (!r)
+ return set_invariant_cp15(reg->id, uaddr);
+
+ /* Note: copies two regs if size is 64 bit */
+ return reg_from_user(&vcpu->arch.cp15[r->reg], uaddr, reg->id);
+}
+
+static unsigned int num_demux_regs(void)
+{
+ unsigned int i, count = 0;
+
+ for (i = 0; i < CSSELR_MAX; i++)
+ if (is_valid_cache(i))
+ count++;
+
+ return count;
+}
+
+static int write_demux_regids(u64 __user *uindices)
+{
+ u64 val = KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX;
+ unsigned int i;
+
+ val |= KVM_REG_ARM_DEMUX_ID_CCSIDR;
+ for (i = 0; i < CSSELR_MAX; i++) {
+ if (!is_valid_cache(i))
+ continue;
+ if (put_user(val | i, uindices))
+ return -EFAULT;
+ uindices++;
+ }
+ return 0;
+}
+
+static u64 cp15_to_index(const struct coproc_reg *reg)
+{
+ u64 val = KVM_REG_ARM | (15 << KVM_REG_ARM_COPROC_SHIFT);
+ if (reg->is_64) {
+ val |= KVM_REG_SIZE_U64;
+ val |= (reg->Op1 << KVM_REG_ARM_OPC1_SHIFT);
+ val |= (reg->CRm << KVM_REG_ARM_CRM_SHIFT);
+ } else {
+ val |= KVM_REG_SIZE_U32;
+ val |= (reg->Op1 << KVM_REG_ARM_OPC1_SHIFT);
+ val |= (reg->Op2 << KVM_REG_ARM_32_OPC2_SHIFT);
+ val |= (reg->CRm << KVM_REG_ARM_CRM_SHIFT);
+ val |= (reg->CRn << KVM_REG_ARM_32_CRN_SHIFT);
+ }
+ return val;
+}
+
+static bool copy_reg_to_user(const struct coproc_reg *reg, u64 __user **uind)
+{
+ if (!*uind)
+ return true;
+
+ if (put_user(cp15_to_index(reg), *uind))
+ return false;
+
+ (*uind)++;
+ return true;
+}
+
+/* Assumed ordered tables, see kvm_coproc_table_init. */
+static int walk_cp15(struct kvm_vcpu *vcpu, u64 __user *uind)
+{
+ const struct coproc_reg *i1, *i2, *end1, *end2;
+ unsigned int total = 0;
+ size_t num;
+
+ /* We check for duplicates here, to allow arch-specific overrides. */
+ i1 = get_target_table(vcpu->arch.target, &num);
+ end1 = i1 + num;
+ i2 = cp15_regs;
+ end2 = cp15_regs + ARRAY_SIZE(cp15_regs);
+
+ BUG_ON(i1 == end1 || i2 == end2);
+
+ /* Walk carefully, as both tables may refer to the same register. */
+ while (i1 || i2) {
+ int cmp = cmp_reg(i1, i2);
+ /* target-specific overrides generic entry. */
+ if (cmp <= 0) {
+ /* Ignore registers we trap but don't save. */
+ if (i1->reg) {
+ if (!copy_reg_to_user(i1, &uind))
+ return -EFAULT;
+ total++;
+ }
+ } else {
+ /* Ignore registers we trap but don't save. */
+ if (i2->reg) {
+ if (!copy_reg_to_user(i2, &uind))
+ return -EFAULT;
+ total++;
+ }
+ }
+
+ if (cmp <= 0 && ++i1 == end1)
+ i1 = NULL;
+ if (cmp >= 0 && ++i2 == end2)
+ i2 = NULL;
+ }
+ return total;
+}
+
+unsigned long kvm_arm_num_coproc_regs(struct kvm_vcpu *vcpu)
+{
+ return ARRAY_SIZE(invariant_cp15)
+ + num_demux_regs()
+ + num_vfp_regs()
+ + walk_cp15(vcpu, (u64 __user *)NULL);
+}
+
+int kvm_arm_copy_coproc_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
+{
+ unsigned int i;
+ int err;
+
+ /* Then give them all the invariant registers' indices. */
+ for (i = 0; i < ARRAY_SIZE(invariant_cp15); i++) {
+ if (put_user(cp15_to_index(&invariant_cp15[i]), uindices))
+ return -EFAULT;
+ uindices++;
+ }
+
+ err = walk_cp15(vcpu, uindices);
+ if (err < 0)
+ return err;
+ uindices += err;
+
+ err = copy_vfp_regids(uindices);
+ if (err < 0)
+ return err;
+ uindices += err;
+
+ return write_demux_regids(uindices);
+}
+
+void kvm_coproc_table_init(void)
+{
+ unsigned int i;
+
+ /* Make sure tables are unique and in order. */
+ for (i = 1; i < ARRAY_SIZE(cp15_regs); i++)
+ BUG_ON(cmp_reg(&cp15_regs[i-1], &cp15_regs[i]) >= 0);
+
+ /* We abuse the reset function to overwrite the table itself. */
+ for (i = 0; i < ARRAY_SIZE(invariant_cp15); i++)
+ invariant_cp15[i].reset(NULL, &invariant_cp15[i]);
+
+ /*
+ * CLIDR format is awkward, so clean it up. See ARM B4.1.20:
+ *
+ * If software reads the Cache Type fields from Ctype1
+ * upwards, once it has seen a value of 0b000, no caches
+ * exist at further-out levels of the hierarchy. So, for
+ * example, if Ctype3 is the first Cache Type field with a
+ * value of 0b000, the values of Ctype4 to Ctype7 must be
+ * ignored.
+ */
+ asm volatile("mrc p15, 1, %0, c0, c0, 1" : "=r" (cache_levels));
+ for (i = 0; i < 7; i++)
+ if (((cache_levels >> (i*3)) & 7) == 0)
+ break;
+ /* Clear all higher bits. */
+ cache_levels &= (1 << (i*3))-1;
+}
+
+/**
+ * kvm_reset_coprocs - sets cp15 registers to reset value
+ * @vcpu: The VCPU pointer
+ *
+ * This function finds the right table above and sets the registers on the
+ * virtual CPU struct to their architecturally defined reset values.
+ */
+void kvm_reset_coprocs(struct kvm_vcpu *vcpu)
+{
+ size_t num;
+ const struct coproc_reg *table;
+
+ /* Catch someone adding a register without putting in reset entry. */
+ memset(vcpu->arch.cp15, 0x42, sizeof(vcpu->arch.cp15));
+
+ /* Generic chip reset first (so target could override). */
+ reset_coproc_regs(vcpu, cp15_regs, ARRAY_SIZE(cp15_regs));
+
+ table = get_target_table(vcpu->arch.target, &num);
+ reset_coproc_regs(vcpu, table, num);
+
+ for (num = 1; num < NR_CP15_REGS; num++)
+ if (vcpu->arch.cp15[num] == 0x42424242)
+ panic("Didn't reset vcpu->arch.cp15[%zi]", num);
+}
diff --git a/arch/arm/kvm/coproc.h b/arch/arm/kvm/coproc.h
new file mode 100644
index 000000000000..992adfafa2ff
--- /dev/null
+++ b/arch/arm/kvm/coproc.h
@@ -0,0 +1,153 @@
+/*
+ * Copyright (C) 2012 - Virtual Open Systems and Columbia University
+ * Authors: Christoffer Dall <c.dall@virtualopensystems.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __ARM_KVM_COPROC_LOCAL_H__
+#define __ARM_KVM_COPROC_LOCAL_H__
+
+struct coproc_params {
+ unsigned long CRn;
+ unsigned long CRm;
+ unsigned long Op1;
+ unsigned long Op2;
+ unsigned long Rt1;
+ unsigned long Rt2;
+ bool is_64bit;
+ bool is_write;
+};
+
+struct coproc_reg {
+ /* MRC/MCR/MRRC/MCRR instruction which accesses it. */
+ unsigned long CRn;
+ unsigned long CRm;
+ unsigned long Op1;
+ unsigned long Op2;
+
+ bool is_64;
+
+ /* Trapped access from guest, if non-NULL. */
+ bool (*access)(struct kvm_vcpu *,
+ const struct coproc_params *,
+ const struct coproc_reg *);
+
+ /* Initialization for vcpu. */
+ void (*reset)(struct kvm_vcpu *, const struct coproc_reg *);
+
+ /* Index into vcpu->arch.cp15[], or 0 if we don't need to save it. */
+ unsigned long reg;
+
+ /* Value (usually reset value) */
+ u64 val;
+};
+
+static inline void print_cp_instr(const struct coproc_params *p)
+{
+ /* Look, we even formatted it for you to paste into the table! */
+ if (p->is_64bit) {
+ kvm_pr_unimpl(" { CRm(%2lu), Op1(%2lu), is64, func_%s },\n",
+ p->CRm, p->Op1, p->is_write ? "write" : "read");
+ } else {
+ kvm_pr_unimpl(" { CRn(%2lu), CRm(%2lu), Op1(%2lu), Op2(%2lu), is32,"
+ " func_%s },\n",
+ p->CRn, p->CRm, p->Op1, p->Op2,
+ p->is_write ? "write" : "read");
+ }
+}
+
+static inline bool ignore_write(struct kvm_vcpu *vcpu,
+ const struct coproc_params *p)
+{
+ return true;
+}
+
+static inline bool read_zero(struct kvm_vcpu *vcpu,
+ const struct coproc_params *p)
+{
+ *vcpu_reg(vcpu, p->Rt1) = 0;
+ return true;
+}
+
+static inline bool write_to_read_only(struct kvm_vcpu *vcpu,
+ const struct coproc_params *params)
+{
+ kvm_debug("CP15 write to read-only register at: %08x\n",
+ *vcpu_pc(vcpu));
+ print_cp_instr(params);
+ return false;
+}
+
+static inline bool read_from_write_only(struct kvm_vcpu *vcpu,
+ const struct coproc_params *params)
+{
+ kvm_debug("CP15 read to write-only register at: %08x\n",
+ *vcpu_pc(vcpu));
+ print_cp_instr(params);
+ return false;
+}
+
+/* Reset functions */
+static inline void reset_unknown(struct kvm_vcpu *vcpu,
+ const struct coproc_reg *r)
+{
+ BUG_ON(!r->reg);
+ BUG_ON(r->reg >= ARRAY_SIZE(vcpu->arch.cp15));
+ vcpu->arch.cp15[r->reg] = 0xdecafbad;
+}
+
+static inline void reset_val(struct kvm_vcpu *vcpu, const struct coproc_reg *r)
+{
+ BUG_ON(!r->reg);
+ BUG_ON(r->reg >= ARRAY_SIZE(vcpu->arch.cp15));
+ vcpu->arch.cp15[r->reg] = r->val;
+}
+
+static inline void reset_unknown64(struct kvm_vcpu *vcpu,
+ const struct coproc_reg *r)
+{
+ BUG_ON(!r->reg);
+ BUG_ON(r->reg + 1 >= ARRAY_SIZE(vcpu->arch.cp15));
+
+ vcpu->arch.cp15[r->reg] = 0xdecafbad;
+ vcpu->arch.cp15[r->reg+1] = 0xd0c0ffee;
+}
+
+static inline int cmp_reg(const struct coproc_reg *i1,
+ const struct coproc_reg *i2)
+{
+ BUG_ON(i1 == i2);
+ if (!i1)
+ return 1;
+ else if (!i2)
+ return -1;
+ if (i1->CRn != i2->CRn)
+ return i1->CRn - i2->CRn;
+ if (i1->CRm != i2->CRm)
+ return i1->CRm - i2->CRm;
+ if (i1->Op1 != i2->Op1)
+ return i1->Op1 - i2->Op1;
+ return i1->Op2 - i2->Op2;
+}
+
+
+#define CRn(_x) .CRn = _x
+#define CRm(_x) .CRm = _x
+#define Op1(_x) .Op1 = _x
+#define Op2(_x) .Op2 = _x
+#define is64 .is_64 = true
+#define is32 .is_64 = false
+
+#endif /* __ARM_KVM_COPROC_LOCAL_H__ */
diff --git a/arch/arm/kvm/coproc_a15.c b/arch/arm/kvm/coproc_a15.c
new file mode 100644
index 000000000000..685063a6d0cf
--- /dev/null
+++ b/arch/arm/kvm/coproc_a15.c
@@ -0,0 +1,162 @@
+/*
+ * Copyright (C) 2012 - Virtual Open Systems and Columbia University
+ * Authors: Rusty Russell <rusty@rustcorp.au>
+ * Christoffer Dall <c.dall@virtualopensystems.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include <linux/kvm_host.h>
+#include <asm/cputype.h>
+#include <asm/kvm_arm.h>
+#include <asm/kvm_host.h>
+#include <asm/kvm_emulate.h>
+#include <asm/kvm_coproc.h>
+#include <linux/init.h>
+
+static void reset_mpidr(struct kvm_vcpu *vcpu, const struct coproc_reg *r)
+{
+ /*
+ * Compute guest MPIDR:
+ * (Even if we present only one VCPU to the guest on an SMP
+ * host we don't set the U bit in the MPIDR, or vice versa, as
+ * revealing the underlying hardware properties is likely to
+ * be the best choice).
+ */
+ vcpu->arch.cp15[c0_MPIDR] = (read_cpuid_mpidr() & ~MPIDR_LEVEL_MASK)
+ | (vcpu->vcpu_id & MPIDR_LEVEL_MASK);
+}
+
+#include "coproc.h"
+
+/* A15 TRM 4.3.28: RO WI */
+static bool access_actlr(struct kvm_vcpu *vcpu,
+ const struct coproc_params *p,
+ const struct coproc_reg *r)
+{
+ if (p->is_write)
+ return ignore_write(vcpu, p);
+
+ *vcpu_reg(vcpu, p->Rt1) = vcpu->arch.cp15[c1_ACTLR];
+ return true;
+}
+
+/* A15 TRM 4.3.60: R/O. */
+static bool access_cbar(struct kvm_vcpu *vcpu,
+ const struct coproc_params *p,
+ const struct coproc_reg *r)
+{
+ if (p->is_write)
+ return write_to_read_only(vcpu, p);
+ return read_zero(vcpu, p);
+}
+
+/* A15 TRM 4.3.48: R/O WI. */
+static bool access_l2ctlr(struct kvm_vcpu *vcpu,
+ const struct coproc_params *p,
+ const struct coproc_reg *r)
+{
+ if (p->is_write)
+ return ignore_write(vcpu, p);
+
+ *vcpu_reg(vcpu, p->Rt1) = vcpu->arch.cp15[c9_L2CTLR];
+ return true;
+}
+
+static void reset_l2ctlr(struct kvm_vcpu *vcpu, const struct coproc_reg *r)
+{
+ u32 l2ctlr, ncores;
+
+ asm volatile("mrc p15, 1, %0, c9, c0, 2\n" : "=r" (l2ctlr));
+ l2ctlr &= ~(3 << 24);
+ ncores = atomic_read(&vcpu->kvm->online_vcpus) - 1;
+ l2ctlr |= (ncores & 3) << 24;
+
+ vcpu->arch.cp15[c9_L2CTLR] = l2ctlr;
+}
+
+static void reset_actlr(struct kvm_vcpu *vcpu, const struct coproc_reg *r)
+{
+ u32 actlr;
+
+ /* ACTLR contains SMP bit: make sure you create all cpus first! */
+ asm volatile("mrc p15, 0, %0, c1, c0, 1\n" : "=r" (actlr));
+ /* Make the SMP bit consistent with the guest configuration */
+ if (atomic_read(&vcpu->kvm->online_vcpus) > 1)
+ actlr |= 1U << 6;
+ else
+ actlr &= ~(1U << 6);
+
+ vcpu->arch.cp15[c1_ACTLR] = actlr;
+}
+
+/* A15 TRM 4.3.49: R/O WI (even if NSACR.NS_L2ERR, a write of 1 is ignored). */
+static bool access_l2ectlr(struct kvm_vcpu *vcpu,
+ const struct coproc_params *p,
+ const struct coproc_reg *r)
+{
+ if (p->is_write)
+ return ignore_write(vcpu, p);
+
+ *vcpu_reg(vcpu, p->Rt1) = 0;
+ return true;
+}
+
+/*
+ * A15-specific CP15 registers.
+ * Important: Must be sorted ascending by CRn, CRM, Op1, Op2
+ */
+static const struct coproc_reg a15_regs[] = {
+ /* MPIDR: we use VMPIDR for guest access. */
+ { CRn( 0), CRm( 0), Op1( 0), Op2( 5), is32,
+ NULL, reset_mpidr, c0_MPIDR },
+
+ /* SCTLR: swapped by interrupt.S. */
+ { CRn( 1), CRm( 0), Op1( 0), Op2( 0), is32,
+ NULL, reset_val, c1_SCTLR, 0x00C50078 },
+ /* ACTLR: trapped by HCR.TAC bit. */
+ { CRn( 1), CRm( 0), Op1( 0), Op2( 1), is32,
+ access_actlr, reset_actlr, c1_ACTLR },
+ /* CPACR: swapped by interrupt.S. */
+ { CRn( 1), CRm( 0), Op1( 0), Op2( 2), is32,
+ NULL, reset_val, c1_CPACR, 0x00000000 },
+
+ /*
+ * L2CTLR access (guest wants to know #CPUs).
+ */
+ { CRn( 9), CRm( 0), Op1( 1), Op2( 2), is32,
+ access_l2ctlr, reset_l2ctlr, c9_L2CTLR },
+ { CRn( 9), CRm( 0), Op1( 1), Op2( 3), is32, access_l2ectlr},
+
+ /* The Configuration Base Address Register. */
+ { CRn(15), CRm( 0), Op1( 4), Op2( 0), is32, access_cbar},
+};
+
+static struct kvm_coproc_target_table a15_target_table = {
+ .target = KVM_ARM_TARGET_CORTEX_A15,
+ .table = a15_regs,
+ .num = ARRAY_SIZE(a15_regs),
+};
+
+static int __init coproc_a15_init(void)
+{
+ unsigned int i;
+
+ for (i = 1; i < ARRAY_SIZE(a15_regs); i++)
+ BUG_ON(cmp_reg(&a15_regs[i-1],
+ &a15_regs[i]) >= 0);
+
+ kvm_register_target_coproc_table(&a15_target_table);
+ return 0;
+}
+late_initcall(coproc_a15_init);
diff --git a/arch/arm/kvm/emulate.c b/arch/arm/kvm/emulate.c
new file mode 100644
index 000000000000..d61450ac6665
--- /dev/null
+++ b/arch/arm/kvm/emulate.c
@@ -0,0 +1,373 @@
+/*
+ * Copyright (C) 2012 - Virtual Open Systems and Columbia University
+ * Author: Christoffer Dall <c.dall@virtualopensystems.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/mm.h>
+#include <linux/kvm_host.h>
+#include <asm/kvm_arm.h>
+#include <asm/kvm_emulate.h>
+#include <trace/events/kvm.h>
+
+#include "trace.h"
+
+#define VCPU_NR_MODES 6
+#define VCPU_REG_OFFSET_USR 0
+#define VCPU_REG_OFFSET_FIQ 1
+#define VCPU_REG_OFFSET_IRQ 2
+#define VCPU_REG_OFFSET_SVC 3
+#define VCPU_REG_OFFSET_ABT 4
+#define VCPU_REG_OFFSET_UND 5
+#define REG_OFFSET(_reg) \
+ (offsetof(struct kvm_regs, _reg) / sizeof(u32))
+
+#define USR_REG_OFFSET(_num) REG_OFFSET(usr_regs.uregs[_num])
+
+static const unsigned long vcpu_reg_offsets[VCPU_NR_MODES][15] = {
+ /* USR/SYS Registers */
+ [VCPU_REG_OFFSET_USR] = {
+ USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
+ USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
+ USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
+ USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
+ USR_REG_OFFSET(12), USR_REG_OFFSET(13), USR_REG_OFFSET(14),
+ },
+
+ /* FIQ Registers */
+ [VCPU_REG_OFFSET_FIQ] = {
+ USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
+ USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
+ USR_REG_OFFSET(6), USR_REG_OFFSET(7),
+ REG_OFFSET(fiq_regs[0]), /* r8 */
+ REG_OFFSET(fiq_regs[1]), /* r9 */
+ REG_OFFSET(fiq_regs[2]), /* r10 */
+ REG_OFFSET(fiq_regs[3]), /* r11 */
+ REG_OFFSET(fiq_regs[4]), /* r12 */
+ REG_OFFSET(fiq_regs[5]), /* r13 */
+ REG_OFFSET(fiq_regs[6]), /* r14 */
+ },
+
+ /* IRQ Registers */
+ [VCPU_REG_OFFSET_IRQ] = {
+ USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
+ USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
+ USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
+ USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
+ USR_REG_OFFSET(12),
+ REG_OFFSET(irq_regs[0]), /* r13 */
+ REG_OFFSET(irq_regs[1]), /* r14 */
+ },
+
+ /* SVC Registers */
+ [VCPU_REG_OFFSET_SVC] = {
+ USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
+ USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
+ USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
+ USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
+ USR_REG_OFFSET(12),
+ REG_OFFSET(svc_regs[0]), /* r13 */
+ REG_OFFSET(svc_regs[1]), /* r14 */
+ },
+
+ /* ABT Registers */
+ [VCPU_REG_OFFSET_ABT] = {
+ USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
+ USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
+ USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
+ USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
+ USR_REG_OFFSET(12),
+ REG_OFFSET(abt_regs[0]), /* r13 */
+ REG_OFFSET(abt_regs[1]), /* r14 */
+ },
+
+ /* UND Registers */
+ [VCPU_REG_OFFSET_UND] = {
+ USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
+ USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
+ USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
+ USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
+ USR_REG_OFFSET(12),
+ REG_OFFSET(und_regs[0]), /* r13 */
+ REG_OFFSET(und_regs[1]), /* r14 */
+ },
+};
+
+/*
+ * Return a pointer to the register number valid in the current mode of
+ * the virtual CPU.
+ */
+u32 *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num)
+{
+ u32 *reg_array = (u32 *)&vcpu->arch.regs;
+ u32 mode = *vcpu_cpsr(vcpu) & MODE_MASK;
+
+ switch (mode) {
+ case USR_MODE...SVC_MODE:
+ mode &= ~MODE32_BIT; /* 0 ... 3 */
+ break;
+
+ case ABT_MODE:
+ mode = VCPU_REG_OFFSET_ABT;
+ break;
+
+ case UND_MODE:
+ mode = VCPU_REG_OFFSET_UND;
+ break;
+
+ case SYSTEM_MODE:
+ mode = VCPU_REG_OFFSET_USR;
+ break;
+
+ default:
+ BUG();
+ }
+
+ return reg_array + vcpu_reg_offsets[mode][reg_num];
+}
+
+/*
+ * Return the SPSR for the current mode of the virtual CPU.
+ */
+u32 *vcpu_spsr(struct kvm_vcpu *vcpu)
+{
+ u32 mode = *vcpu_cpsr(vcpu) & MODE_MASK;
+ switch (mode) {
+ case SVC_MODE:
+ return &vcpu->arch.regs.KVM_ARM_SVC_spsr;
+ case ABT_MODE:
+ return &vcpu->arch.regs.KVM_ARM_ABT_spsr;
+ case UND_MODE:
+ return &vcpu->arch.regs.KVM_ARM_UND_spsr;
+ case IRQ_MODE:
+ return &vcpu->arch.regs.KVM_ARM_IRQ_spsr;
+ case FIQ_MODE:
+ return &vcpu->arch.regs.KVM_ARM_FIQ_spsr;
+ default:
+ BUG();
+ }
+}
+
+/**
+ * kvm_handle_wfi - handle a wait-for-interrupts instruction executed by a guest
+ * @vcpu: the vcpu pointer
+ * @run: the kvm_run structure pointer
+ *
+ * Simply sets the wait_for_interrupts flag on the vcpu structure, which will
+ * halt execution of world-switches and schedule other host processes until
+ * there is an incoming IRQ or FIQ to the VM.
+ */
+int kvm_handle_wfi(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+ trace_kvm_wfi(*vcpu_pc(vcpu));
+ kvm_vcpu_block(vcpu);
+ return 1;
+}
+
+/**
+ * adjust_itstate - adjust ITSTATE when emulating instructions in IT-block
+ * @vcpu: The VCPU pointer
+ *
+ * When exceptions occur while instructions are executed in Thumb IF-THEN
+ * blocks, the ITSTATE field of the CPSR is not advanved (updated), so we have
+ * to do this little bit of work manually. The fields map like this:
+ *
+ * IT[7:0] -> CPSR[26:25],CPSR[15:10]
+ */
+static void kvm_adjust_itstate(struct kvm_vcpu *vcpu)
+{
+ unsigned long itbits, cond;
+ unsigned long cpsr = *vcpu_cpsr(vcpu);
+ bool is_arm = !(cpsr & PSR_T_BIT);
+
+ BUG_ON(is_arm && (cpsr & PSR_IT_MASK));
+
+ if (!(cpsr & PSR_IT_MASK))
+ return;
+
+ cond = (cpsr & 0xe000) >> 13;
+ itbits = (cpsr & 0x1c00) >> (10 - 2);
+ itbits |= (cpsr & (0x3 << 25)) >> 25;
+
+ /* Perform ITAdvance (see page A-52 in ARM DDI 0406C) */
+ if ((itbits & 0x7) == 0)
+ itbits = cond = 0;
+ else
+ itbits = (itbits << 1) & 0x1f;
+
+ cpsr &= ~PSR_IT_MASK;
+ cpsr |= cond << 13;
+ cpsr |= (itbits & 0x1c) << (10 - 2);
+ cpsr |= (itbits & 0x3) << 25;
+ *vcpu_cpsr(vcpu) = cpsr;
+}
+
+/**
+ * kvm_skip_instr - skip a trapped instruction and proceed to the next
+ * @vcpu: The vcpu pointer
+ */
+void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr)
+{
+ bool is_thumb;
+
+ is_thumb = !!(*vcpu_cpsr(vcpu) & PSR_T_BIT);
+ if (is_thumb && !is_wide_instr)
+ *vcpu_pc(vcpu) += 2;
+ else
+ *vcpu_pc(vcpu) += 4;
+ kvm_adjust_itstate(vcpu);
+}
+
+
+/******************************************************************************
+ * Inject exceptions into the guest
+ */
+
+static u32 exc_vector_base(struct kvm_vcpu *vcpu)
+{
+ u32 sctlr = vcpu->arch.cp15[c1_SCTLR];
+ u32 vbar = vcpu->arch.cp15[c12_VBAR];
+
+ if (sctlr & SCTLR_V)
+ return 0xffff0000;
+ else /* always have security exceptions */
+ return vbar;
+}
+
+/**
+ * kvm_inject_undefined - inject an undefined exception into the guest
+ * @vcpu: The VCPU to receive the undefined exception
+ *
+ * It is assumed that this code is called from the VCPU thread and that the
+ * VCPU therefore is not currently executing guest code.
+ *
+ * Modelled after TakeUndefInstrException() pseudocode.
+ */
+void kvm_inject_undefined(struct kvm_vcpu *vcpu)
+{
+ u32 new_lr_value;
+ u32 new_spsr_value;
+ u32 cpsr = *vcpu_cpsr(vcpu);
+ u32 sctlr = vcpu->arch.cp15[c1_SCTLR];
+ bool is_thumb = (cpsr & PSR_T_BIT);
+ u32 vect_offset = 4;
+ u32 return_offset = (is_thumb) ? 2 : 4;
+
+ new_spsr_value = cpsr;
+ new_lr_value = *vcpu_pc(vcpu) - return_offset;
+
+ *vcpu_cpsr(vcpu) = (cpsr & ~MODE_MASK) | UND_MODE;
+ *vcpu_cpsr(vcpu) |= PSR_I_BIT;
+ *vcpu_cpsr(vcpu) &= ~(PSR_IT_MASK | PSR_J_BIT | PSR_E_BIT | PSR_T_BIT);
+
+ if (sctlr & SCTLR_TE)
+ *vcpu_cpsr(vcpu) |= PSR_T_BIT;
+ if (sctlr & SCTLR_EE)
+ *vcpu_cpsr(vcpu) |= PSR_E_BIT;
+
+ /* Note: These now point to UND banked copies */
+ *vcpu_spsr(vcpu) = cpsr;
+ *vcpu_reg(vcpu, 14) = new_lr_value;
+
+ /* Branch to exception vector */
+ *vcpu_pc(vcpu) = exc_vector_base(vcpu) + vect_offset;
+}
+
+/*
+ * Modelled after TakeDataAbortException() and TakePrefetchAbortException
+ * pseudocode.
+ */
+static void inject_abt(struct kvm_vcpu *vcpu, bool is_pabt, unsigned long addr)
+{
+ u32 new_lr_value;
+ u32 new_spsr_value;
+ u32 cpsr = *vcpu_cpsr(vcpu);
+ u32 sctlr = vcpu->arch.cp15[c1_SCTLR];
+ bool is_thumb = (cpsr & PSR_T_BIT);
+ u32 vect_offset;
+ u32 return_offset = (is_thumb) ? 4 : 0;
+ bool is_lpae;
+
+ new_spsr_value = cpsr;
+ new_lr_value = *vcpu_pc(vcpu) + return_offset;
+
+ *vcpu_cpsr(vcpu) = (cpsr & ~MODE_MASK) | ABT_MODE;
+ *vcpu_cpsr(vcpu) |= PSR_I_BIT | PSR_A_BIT;
+ *vcpu_cpsr(vcpu) &= ~(PSR_IT_MASK | PSR_J_BIT | PSR_E_BIT | PSR_T_BIT);
+
+ if (sctlr & SCTLR_TE)
+ *vcpu_cpsr(vcpu) |= PSR_T_BIT;
+ if (sctlr & SCTLR_EE)
+ *vcpu_cpsr(vcpu) |= PSR_E_BIT;
+
+ /* Note: These now point to ABT banked copies */
+ *vcpu_spsr(vcpu) = cpsr;
+ *vcpu_reg(vcpu, 14) = new_lr_value;
+
+ if (is_pabt)
+ vect_offset = 12;
+ else
+ vect_offset = 16;
+
+ /* Branch to exception vector */
+ *vcpu_pc(vcpu) = exc_vector_base(vcpu) + vect_offset;
+
+ if (is_pabt) {
+ /* Set DFAR and DFSR */
+ vcpu->arch.cp15[c6_IFAR] = addr;
+ is_lpae = (vcpu->arch.cp15[c2_TTBCR] >> 31);
+ /* Always give debug fault for now - should give guest a clue */
+ if (is_lpae)
+ vcpu->arch.cp15[c5_IFSR] = 1 << 9 | 0x22;
+ else
+ vcpu->arch.cp15[c5_IFSR] = 2;
+ } else { /* !iabt */
+ /* Set DFAR and DFSR */
+ vcpu->arch.cp15[c6_DFAR] = addr;
+ is_lpae = (vcpu->arch.cp15[c2_TTBCR] >> 31);
+ /* Always give debug fault for now - should give guest a clue */
+ if (is_lpae)
+ vcpu->arch.cp15[c5_DFSR] = 1 << 9 | 0x22;
+ else
+ vcpu->arch.cp15[c5_DFSR] = 2;
+ }
+
+}
+
+/**
+ * kvm_inject_dabt - inject a data abort into the guest
+ * @vcpu: The VCPU to receive the undefined exception
+ * @addr: The address to report in the DFAR
+ *
+ * It is assumed that this code is called from the VCPU thread and that the
+ * VCPU therefore is not currently executing guest code.
+ */
+void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr)
+{
+ inject_abt(vcpu, false, addr);
+}
+
+/**
+ * kvm_inject_pabt - inject a prefetch abort into the guest
+ * @vcpu: The VCPU to receive the undefined exception
+ * @addr: The address to report in the DFAR
+ *
+ * It is assumed that this code is called from the VCPU thread and that the
+ * VCPU therefore is not currently executing guest code.
+ */
+void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr)
+{
+ inject_abt(vcpu, true, addr);
+}
diff --git a/arch/arm/kvm/guest.c b/arch/arm/kvm/guest.c
new file mode 100644
index 000000000000..2339d9609d36
--- /dev/null
+++ b/arch/arm/kvm/guest.c
@@ -0,0 +1,222 @@
+/*
+ * Copyright (C) 2012 - Virtual Open Systems and Columbia University
+ * Author: Christoffer Dall <c.dall@virtualopensystems.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/kvm_host.h>
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+#include <linux/fs.h>
+#include <asm/uaccess.h>
+#include <asm/kvm.h>
+#include <asm/kvm_asm.h>
+#include <asm/kvm_emulate.h>
+#include <asm/kvm_coproc.h>
+
+#define VM_STAT(x) { #x, offsetof(struct kvm, stat.x), KVM_STAT_VM }
+#define VCPU_STAT(x) { #x, offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU }
+
+struct kvm_stats_debugfs_item debugfs_entries[] = {
+ { NULL }
+};
+
+int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
+{
+ return 0;
+}
+
+static u64 core_reg_offset_from_id(u64 id)
+{
+ return id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_ARM_CORE);
+}
+
+static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
+{
+ u32 __user *uaddr = (u32 __user *)(long)reg->addr;
+ struct kvm_regs *regs = &vcpu->arch.regs;
+ u64 off;
+
+ if (KVM_REG_SIZE(reg->id) != 4)
+ return -ENOENT;
+
+ /* Our ID is an index into the kvm_regs struct. */
+ off = core_reg_offset_from_id(reg->id);
+ if (off >= sizeof(*regs) / KVM_REG_SIZE(reg->id))
+ return -ENOENT;
+
+ return put_user(((u32 *)regs)[off], uaddr);
+}
+
+static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
+{
+ u32 __user *uaddr = (u32 __user *)(long)reg->addr;
+ struct kvm_regs *regs = &vcpu->arch.regs;
+ u64 off, val;
+
+ if (KVM_REG_SIZE(reg->id) != 4)
+ return -ENOENT;
+
+ /* Our ID is an index into the kvm_regs struct. */
+ off = core_reg_offset_from_id(reg->id);
+ if (off >= sizeof(*regs) / KVM_REG_SIZE(reg->id))
+ return -ENOENT;
+
+ if (get_user(val, uaddr) != 0)
+ return -EFAULT;
+
+ if (off == KVM_REG_ARM_CORE_REG(usr_regs.ARM_cpsr)) {
+ unsigned long mode = val & MODE_MASK;
+ switch (mode) {
+ case USR_MODE:
+ case FIQ_MODE:
+ case IRQ_MODE:
+ case SVC_MODE:
+ case ABT_MODE:
+ case UND_MODE:
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ ((u32 *)regs)[off] = val;
+ return 0;
+}
+
+int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
+{
+ return -EINVAL;
+}
+
+int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
+{
+ return -EINVAL;
+}
+
+static unsigned long num_core_regs(void)
+{
+ return sizeof(struct kvm_regs) / sizeof(u32);
+}
+
+/**
+ * kvm_arm_num_regs - how many registers do we present via KVM_GET_ONE_REG
+ *
+ * This is for all registers.
+ */
+unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu)
+{
+ return num_core_regs() + kvm_arm_num_coproc_regs(vcpu);
+}
+
+/**
+ * kvm_arm_copy_reg_indices - get indices of all registers.
+ *
+ * We do core registers right here, then we apppend coproc regs.
+ */
+int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
+{
+ unsigned int i;
+ const u64 core_reg = KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_CORE;
+
+ for (i = 0; i < sizeof(struct kvm_regs)/sizeof(u32); i++) {
+ if (put_user(core_reg | i, uindices))
+ return -EFAULT;
+ uindices++;
+ }
+
+ return kvm_arm_copy_coproc_indices(vcpu, uindices);
+}
+
+int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
+{
+ /* We currently use nothing arch-specific in upper 32 bits */
+ if ((reg->id & ~KVM_REG_SIZE_MASK) >> 32 != KVM_REG_ARM >> 32)
+ return -EINVAL;
+
+ /* Register group 16 means we want a core register. */
+ if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE)
+ return get_core_reg(vcpu, reg);
+
+ return kvm_arm_coproc_get_reg(vcpu, reg);
+}
+
+int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
+{
+ /* We currently use nothing arch-specific in upper 32 bits */
+ if ((reg->id & ~KVM_REG_SIZE_MASK) >> 32 != KVM_REG_ARM >> 32)
+ return -EINVAL;
+
+ /* Register group 16 means we set a core register. */
+ if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE)
+ return set_core_reg(vcpu, reg);
+
+ return kvm_arm_coproc_set_reg(vcpu, reg);
+}
+
+int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
+ struct kvm_sregs *sregs)
+{
+ return -EINVAL;
+}
+
+int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
+ struct kvm_sregs *sregs)
+{
+ return -EINVAL;
+}
+
+int kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
+ const struct kvm_vcpu_init *init)
+{
+ unsigned int i;
+
+ /* We can only do a cortex A15 for now. */
+ if (init->target != kvm_target_cpu())
+ return -EINVAL;
+
+ vcpu->arch.target = init->target;
+ bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES);
+
+ /* -ENOENT for unknown features, -EINVAL for invalid combinations. */
+ for (i = 0; i < sizeof(init->features) * 8; i++) {
+ if (test_bit(i, (void *)init->features)) {
+ if (i >= KVM_VCPU_MAX_FEATURES)
+ return -ENOENT;
+ set_bit(i, vcpu->arch.features);
+ }
+ }
+
+ /* Now we know what it is, we can reset it. */
+ return kvm_reset_vcpu(vcpu);
+}
+
+int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
+{
+ return -EINVAL;
+}
+
+int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
+{
+ return -EINVAL;
+}
+
+int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
+ struct kvm_translation *tr)
+{
+ return -EINVAL;
+}
diff --git a/arch/arm/kvm/init.S b/arch/arm/kvm/init.S
new file mode 100644
index 000000000000..9f37a79b880b
--- /dev/null
+++ b/arch/arm/kvm/init.S
@@ -0,0 +1,114 @@
+/*
+ * Copyright (C) 2012 - Virtual Open Systems and Columbia University
+ * Author: Christoffer Dall <c.dall@virtualopensystems.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/linkage.h>
+#include <asm/unified.h>
+#include <asm/asm-offsets.h>
+#include <asm/kvm_asm.h>
+#include <asm/kvm_arm.h>
+
+/********************************************************************
+ * Hypervisor initialization
+ * - should be called with:
+ * r0,r1 = Hypervisor pgd pointer
+ * r2 = top of Hyp stack (kernel VA)
+ * r3 = pointer to hyp vectors
+ */
+
+ .text
+ .pushsection .hyp.idmap.text,"ax"
+ .align 5
+__kvm_hyp_init:
+ .globl __kvm_hyp_init
+
+ @ Hyp-mode exception vector
+ W(b) .
+ W(b) .
+ W(b) .
+ W(b) .
+ W(b) .
+ W(b) __do_hyp_init
+ W(b) .
+ W(b) .
+
+__do_hyp_init:
+ @ Set the HTTBR to point to the hypervisor PGD pointer passed
+ mcrr p15, 4, r0, r1, c2
+
+ @ Set the HTCR and VTCR to the same shareability and cacheability
+ @ settings as the non-secure TTBCR and with T0SZ == 0.
+ mrc p15, 4, r0, c2, c0, 2 @ HTCR
+ ldr r12, =HTCR_MASK
+ bic r0, r0, r12
+ mrc p15, 0, r1, c2, c0, 2 @ TTBCR
+ and r1, r1, #(HTCR_MASK & ~TTBCR_T0SZ)
+ orr r0, r0, r1
+ mcr p15, 4, r0, c2, c0, 2 @ HTCR
+
+ mrc p15, 4, r1, c2, c1, 2 @ VTCR
+ ldr r12, =VTCR_MASK
+ bic r1, r1, r12
+ bic r0, r0, #(~VTCR_HTCR_SH) @ clear non-reusable HTCR bits
+ orr r1, r0, r1
+ orr r1, r1, #(KVM_VTCR_SL0 | KVM_VTCR_T0SZ | KVM_VTCR_S)
+ mcr p15, 4, r1, c2, c1, 2 @ VTCR
+
+ @ Use the same memory attributes for hyp. accesses as the kernel
+ @ (copy MAIRx ro HMAIRx).
+ mrc p15, 0, r0, c10, c2, 0
+ mcr p15, 4, r0, c10, c2, 0
+ mrc p15, 0, r0, c10, c2, 1
+ mcr p15, 4, r0, c10, c2, 1
+
+ @ Set the HSCTLR to:
+ @ - ARM/THUMB exceptions: Kernel config (Thumb-2 kernel)
+ @ - Endianness: Kernel config
+ @ - Fast Interrupt Features: Kernel config
+ @ - Write permission implies XN: disabled
+ @ - Instruction cache: enabled
+ @ - Data/Unified cache: enabled
+ @ - Memory alignment checks: enabled
+ @ - MMU: enabled (this code must be run from an identity mapping)
+ mrc p15, 4, r0, c1, c0, 0 @ HSCR
+ ldr r12, =HSCTLR_MASK
+ bic r0, r0, r12
+ mrc p15, 0, r1, c1, c0, 0 @ SCTLR
+ ldr r12, =(HSCTLR_EE | HSCTLR_FI | HSCTLR_I | HSCTLR_C)
+ and r1, r1, r12
+ ARM( ldr r12, =(HSCTLR_M | HSCTLR_A) )
+ THUMB( ldr r12, =(HSCTLR_M | HSCTLR_A | HSCTLR_TE) )
+ orr r1, r1, r12
+ orr r0, r0, r1
+ isb
+ mcr p15, 4, r0, c1, c0, 0 @ HSCR
+ isb
+
+ @ Set stack pointer and return to the kernel
+ mov sp, r2
+
+ @ Set HVBAR to point to the HYP vectors
+ mcr p15, 4, r3, c12, c0, 0 @ HVBAR
+
+ eret
+
+ .ltorg
+
+ .globl __kvm_hyp_init_end
+__kvm_hyp_init_end:
+
+ .popsection
diff --git a/arch/arm/kvm/interrupts.S b/arch/arm/kvm/interrupts.S
new file mode 100644
index 000000000000..c5400d2e97ca
--- /dev/null
+++ b/arch/arm/kvm/interrupts.S
@@ -0,0 +1,478 @@
+/*
+ * Copyright (C) 2012 - Virtual Open Systems and Columbia University
+ * Author: Christoffer Dall <c.dall@virtualopensystems.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/linkage.h>
+#include <linux/const.h>
+#include <asm/unified.h>
+#include <asm/page.h>
+#include <asm/ptrace.h>
+#include <asm/asm-offsets.h>
+#include <asm/kvm_asm.h>
+#include <asm/kvm_arm.h>
+#include <asm/vfpmacros.h>
+#include "interrupts_head.S"
+
+ .text
+
+__kvm_hyp_code_start:
+ .globl __kvm_hyp_code_start
+
+/********************************************************************
+ * Flush per-VMID TLBs
+ *
+ * void __kvm_tlb_flush_vmid(struct kvm *kvm);
+ *
+ * We rely on the hardware to broadcast the TLB invalidation to all CPUs
+ * inside the inner-shareable domain (which is the case for all v7
+ * implementations). If we come across a non-IS SMP implementation, we'll
+ * have to use an IPI based mechanism. Until then, we stick to the simple
+ * hardware assisted version.
+ */
+ENTRY(__kvm_tlb_flush_vmid)
+ push {r2, r3}
+
+ add r0, r0, #KVM_VTTBR
+ ldrd r2, r3, [r0]
+ mcrr p15, 6, r2, r3, c2 @ Write VTTBR
+ isb
+ mcr p15, 0, r0, c8, c3, 0 @ TLBIALLIS (rt ignored)
+ dsb
+ isb
+ mov r2, #0
+ mov r3, #0
+ mcrr p15, 6, r2, r3, c2 @ Back to VMID #0
+ isb @ Not necessary if followed by eret
+
+ pop {r2, r3}
+ bx lr
+ENDPROC(__kvm_tlb_flush_vmid)
+
+/********************************************************************
+ * Flush TLBs and instruction caches of all CPUs inside the inner-shareable
+ * domain, for all VMIDs
+ *
+ * void __kvm_flush_vm_context(void);
+ */
+ENTRY(__kvm_flush_vm_context)
+ mov r0, #0 @ rn parameter for c15 flushes is SBZ
+
+ /* Invalidate NS Non-Hyp TLB Inner Shareable (TLBIALLNSNHIS) */
+ mcr p15, 4, r0, c8, c3, 4
+ /* Invalidate instruction caches Inner Shareable (ICIALLUIS) */
+ mcr p15, 0, r0, c7, c1, 0
+ dsb
+ isb @ Not necessary if followed by eret
+
+ bx lr
+ENDPROC(__kvm_flush_vm_context)
+
+
+/********************************************************************
+ * Hypervisor world-switch code
+ *
+ *
+ * int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
+ */
+ENTRY(__kvm_vcpu_run)
+ @ Save the vcpu pointer
+ mcr p15, 4, vcpu, c13, c0, 2 @ HTPIDR
+
+ save_host_regs
+
+ @ Store hardware CP15 state and load guest state
+ read_cp15_state store_to_vcpu = 0
+ write_cp15_state read_from_vcpu = 1
+
+ @ If the host kernel has not been configured with VFPv3 support,
+ @ then it is safer if we deny guests from using it as well.
+#ifdef CONFIG_VFPv3
+ @ Set FPEXC_EN so the guest doesn't trap floating point instructions
+ VFPFMRX r2, FPEXC @ VMRS
+ push {r2}
+ orr r2, r2, #FPEXC_EN
+ VFPFMXR FPEXC, r2 @ VMSR
+#endif
+
+ @ Configure Hyp-role
+ configure_hyp_role vmentry
+
+ @ Trap coprocessor CRx accesses
+ set_hstr vmentry
+ set_hcptr vmentry, (HCPTR_TTA | HCPTR_TCP(10) | HCPTR_TCP(11))
+ set_hdcr vmentry
+
+ @ Write configured ID register into MIDR alias
+ ldr r1, [vcpu, #VCPU_MIDR]
+ mcr p15, 4, r1, c0, c0, 0
+
+ @ Write guest view of MPIDR into VMPIDR
+ ldr r1, [vcpu, #CP15_OFFSET(c0_MPIDR)]
+ mcr p15, 4, r1, c0, c0, 5
+
+ @ Set up guest memory translation
+ ldr r1, [vcpu, #VCPU_KVM]
+ add r1, r1, #KVM_VTTBR
+ ldrd r2, r3, [r1]
+ mcrr p15, 6, r2, r3, c2 @ Write VTTBR
+
+ @ We're all done, just restore the GPRs and go to the guest
+ restore_guest_regs
+ clrex @ Clear exclusive monitor
+ eret
+
+__kvm_vcpu_return:
+ /*
+ * return convention:
+ * guest r0, r1, r2 saved on the stack
+ * r0: vcpu pointer
+ * r1: exception code
+ */
+ save_guest_regs
+
+ @ Set VMID == 0
+ mov r2, #0
+ mov r3, #0
+ mcrr p15, 6, r2, r3, c2 @ Write VTTBR
+
+ @ Don't trap coprocessor accesses for host kernel
+ set_hstr vmexit
+ set_hdcr vmexit
+ set_hcptr vmexit, (HCPTR_TTA | HCPTR_TCP(10) | HCPTR_TCP(11))
+
+#ifdef CONFIG_VFPv3
+ @ Save floating point registers we if let guest use them.
+ tst r2, #(HCPTR_TCP(10) | HCPTR_TCP(11))
+ bne after_vfp_restore
+
+ @ Switch VFP/NEON hardware state to the host's
+ add r7, vcpu, #VCPU_VFP_GUEST
+ store_vfp_state r7
+ add r7, vcpu, #VCPU_VFP_HOST
+ ldr r7, [r7]
+ restore_vfp_state r7
+
+after_vfp_restore:
+ @ Restore FPEXC_EN which we clobbered on entry
+ pop {r2}
+ VFPFMXR FPEXC, r2
+#endif
+
+ @ Reset Hyp-role
+ configure_hyp_role vmexit
+
+ @ Let host read hardware MIDR
+ mrc p15, 0, r2, c0, c0, 0
+ mcr p15, 4, r2, c0, c0, 0
+
+ @ Back to hardware MPIDR
+ mrc p15, 0, r2, c0, c0, 5
+ mcr p15, 4, r2, c0, c0, 5
+
+ @ Store guest CP15 state and restore host state
+ read_cp15_state store_to_vcpu = 1
+ write_cp15_state read_from_vcpu = 0
+
+ restore_host_regs
+ clrex @ Clear exclusive monitor
+ mov r0, r1 @ Return the return code
+ mov r1, #0 @ Clear upper bits in return value
+ bx lr @ return to IOCTL
+
+/********************************************************************
+ * Call function in Hyp mode
+ *
+ *
+ * u64 kvm_call_hyp(void *hypfn, ...);
+ *
+ * This is not really a variadic function in the classic C-way and care must
+ * be taken when calling this to ensure parameters are passed in registers
+ * only, since the stack will change between the caller and the callee.
+ *
+ * Call the function with the first argument containing a pointer to the
+ * function you wish to call in Hyp mode, and subsequent arguments will be
+ * passed as r0, r1, and r2 (a maximum of 3 arguments in addition to the
+ * function pointer can be passed). The function being called must be mapped
+ * in Hyp mode (see init_hyp_mode in arch/arm/kvm/arm.c). Return values are
+ * passed in r0 and r1.
+ *
+ * The calling convention follows the standard AAPCS:
+ * r0 - r3: caller save
+ * r12: caller save
+ * rest: callee save
+ */
+ENTRY(kvm_call_hyp)
+ hvc #0
+ bx lr
+
+/********************************************************************
+ * Hypervisor exception vector and handlers
+ *
+ *
+ * The KVM/ARM Hypervisor ABI is defined as follows:
+ *
+ * Entry to Hyp mode from the host kernel will happen _only_ when an HVC
+ * instruction is issued since all traps are disabled when running the host
+ * kernel as per the Hyp-mode initialization at boot time.
+ *
+ * HVC instructions cause a trap to the vector page + offset 0x18 (see hyp_hvc
+ * below) when the HVC instruction is called from SVC mode (i.e. a guest or the
+ * host kernel) and they cause a trap to the vector page + offset 0xc when HVC
+ * instructions are called from within Hyp-mode.
+ *
+ * Hyp-ABI: Calling HYP-mode functions from host (in SVC mode):
+ * Switching to Hyp mode is done through a simple HVC #0 instruction. The
+ * exception vector code will check that the HVC comes from VMID==0 and if
+ * so will push the necessary state (SPSR, lr_usr) on the Hyp stack.
+ * - r0 contains a pointer to a HYP function
+ * - r1, r2, and r3 contain arguments to the above function.
+ * - The HYP function will be called with its arguments in r0, r1 and r2.
+ * On HYP function return, we return directly to SVC.
+ *
+ * Note that the above is used to execute code in Hyp-mode from a host-kernel
+ * point of view, and is a different concept from performing a world-switch and
+ * executing guest code SVC mode (with a VMID != 0).
+ */
+
+/* Handle undef, svc, pabt, or dabt by crashing with a user notice */
+.macro bad_exception exception_code, panic_str
+ push {r0-r2}
+ mrrc p15, 6, r0, r1, c2 @ Read VTTBR
+ lsr r1, r1, #16
+ ands r1, r1, #0xff
+ beq 99f
+
+ load_vcpu @ Load VCPU pointer
+ .if \exception_code == ARM_EXCEPTION_DATA_ABORT
+ mrc p15, 4, r2, c5, c2, 0 @ HSR
+ mrc p15, 4, r1, c6, c0, 0 @ HDFAR
+ str r2, [vcpu, #VCPU_HSR]
+ str r1, [vcpu, #VCPU_HxFAR]
+ .endif
+ .if \exception_code == ARM_EXCEPTION_PREF_ABORT
+ mrc p15, 4, r2, c5, c2, 0 @ HSR
+ mrc p15, 4, r1, c6, c0, 2 @ HIFAR
+ str r2, [vcpu, #VCPU_HSR]
+ str r1, [vcpu, #VCPU_HxFAR]
+ .endif
+ mov r1, #\exception_code
+ b __kvm_vcpu_return
+
+ @ We were in the host already. Let's craft a panic-ing return to SVC.
+99: mrs r2, cpsr
+ bic r2, r2, #MODE_MASK
+ orr r2, r2, #SVC_MODE
+THUMB( orr r2, r2, #PSR_T_BIT )
+ msr spsr_cxsf, r2
+ mrs r1, ELR_hyp
+ ldr r2, =BSYM(panic)
+ msr ELR_hyp, r2
+ ldr r0, =\panic_str
+ eret
+.endm
+
+ .text
+
+ .align 5
+__kvm_hyp_vector:
+ .globl __kvm_hyp_vector
+
+ @ Hyp-mode exception vector
+ W(b) hyp_reset
+ W(b) hyp_undef
+ W(b) hyp_svc
+ W(b) hyp_pabt
+ W(b) hyp_dabt
+ W(b) hyp_hvc
+ W(b) hyp_irq
+ W(b) hyp_fiq
+
+ .align
+hyp_reset:
+ b hyp_reset
+
+ .align
+hyp_undef:
+ bad_exception ARM_EXCEPTION_UNDEFINED, und_die_str
+
+ .align
+hyp_svc:
+ bad_exception ARM_EXCEPTION_HVC, svc_die_str
+
+ .align
+hyp_pabt:
+ bad_exception ARM_EXCEPTION_PREF_ABORT, pabt_die_str
+
+ .align
+hyp_dabt:
+ bad_exception ARM_EXCEPTION_DATA_ABORT, dabt_die_str
+
+ .align
+hyp_hvc:
+ /*
+ * Getting here is either becuase of a trap from a guest or from calling
+ * HVC from the host kernel, which means "switch to Hyp mode".
+ */
+ push {r0, r1, r2}
+
+ @ Check syndrome register
+ mrc p15, 4, r1, c5, c2, 0 @ HSR
+ lsr r0, r1, #HSR_EC_SHIFT
+#ifdef CONFIG_VFPv3
+ cmp r0, #HSR_EC_CP_0_13
+ beq switch_to_guest_vfp
+#endif
+ cmp r0, #HSR_EC_HVC
+ bne guest_trap @ Not HVC instr.
+
+ /*
+ * Let's check if the HVC came from VMID 0 and allow simple
+ * switch to Hyp mode
+ */
+ mrrc p15, 6, r0, r2, c2
+ lsr r2, r2, #16
+ and r2, r2, #0xff
+ cmp r2, #0
+ bne guest_trap @ Guest called HVC
+
+host_switch_to_hyp:
+ pop {r0, r1, r2}
+
+ push {lr}
+ mrs lr, SPSR
+ push {lr}
+
+ mov lr, r0
+ mov r0, r1
+ mov r1, r2
+ mov r2, r3
+
+THUMB( orr lr, #1)
+ blx lr @ Call the HYP function
+
+ pop {lr}
+ msr SPSR_csxf, lr
+ pop {lr}
+ eret
+
+guest_trap:
+ load_vcpu @ Load VCPU pointer to r0
+ str r1, [vcpu, #VCPU_HSR]
+
+ @ Check if we need the fault information
+ lsr r1, r1, #HSR_EC_SHIFT
+ cmp r1, #HSR_EC_IABT
+ mrceq p15, 4, r2, c6, c0, 2 @ HIFAR
+ beq 2f
+ cmp r1, #HSR_EC_DABT
+ bne 1f
+ mrc p15, 4, r2, c6, c0, 0 @ HDFAR
+
+2: str r2, [vcpu, #VCPU_HxFAR]
+
+ /*
+ * B3.13.5 Reporting exceptions taken to the Non-secure PL2 mode:
+ *
+ * Abort on the stage 2 translation for a memory access from a
+ * Non-secure PL1 or PL0 mode:
+ *
+ * For any Access flag fault or Translation fault, and also for any
+ * Permission fault on the stage 2 translation of a memory access
+ * made as part of a translation table walk for a stage 1 translation,
+ * the HPFAR holds the IPA that caused the fault. Otherwise, the HPFAR
+ * is UNKNOWN.
+ */
+
+ /* Check for permission fault, and S1PTW */
+ mrc p15, 4, r1, c5, c2, 0 @ HSR
+ and r0, r1, #HSR_FSC_TYPE
+ cmp r0, #FSC_PERM
+ tsteq r1, #(1 << 7) @ S1PTW
+ mrcne p15, 4, r2, c6, c0, 4 @ HPFAR
+ bne 3f
+
+ /* Resolve IPA using the xFAR */
+ mcr p15, 0, r2, c7, c8, 0 @ ATS1CPR
+ isb
+ mrrc p15, 0, r0, r1, c7 @ PAR
+ tst r0, #1
+ bne 4f @ Failed translation
+ ubfx r2, r0, #12, #20
+ lsl r2, r2, #4
+ orr r2, r2, r1, lsl #24
+
+3: load_vcpu @ Load VCPU pointer to r0
+ str r2, [r0, #VCPU_HPFAR]
+
+1: mov r1, #ARM_EXCEPTION_HVC
+ b __kvm_vcpu_return
+
+4: pop {r0, r1, r2} @ Failed translation, return to guest
+ eret
+
+/*
+ * If VFPv3 support is not available, then we will not switch the VFP
+ * registers; however cp10 and cp11 accesses will still trap and fallback
+ * to the regular coprocessor emulation code, which currently will
+ * inject an undefined exception to the guest.
+ */
+#ifdef CONFIG_VFPv3
+switch_to_guest_vfp:
+ load_vcpu @ Load VCPU pointer to r0
+ push {r3-r7}
+
+ @ NEON/VFP used. Turn on VFP access.
+ set_hcptr vmexit, (HCPTR_TCP(10) | HCPTR_TCP(11))
+
+ @ Switch VFP/NEON hardware state to the guest's
+ add r7, r0, #VCPU_VFP_HOST
+ ldr r7, [r7]
+ store_vfp_state r7
+ add r7, r0, #VCPU_VFP_GUEST
+ restore_vfp_state r7
+
+ pop {r3-r7}
+ pop {r0-r2}
+ eret
+#endif
+
+ .align
+hyp_irq:
+ push {r0, r1, r2}
+ mov r1, #ARM_EXCEPTION_IRQ
+ load_vcpu @ Load VCPU pointer to r0
+ b __kvm_vcpu_return
+
+ .align
+hyp_fiq:
+ b hyp_fiq
+
+ .ltorg
+
+__kvm_hyp_code_end:
+ .globl __kvm_hyp_code_end
+
+ .section ".rodata"
+
+und_die_str:
+ .ascii "unexpected undefined exception in Hyp mode at: %#08x"
+pabt_die_str:
+ .ascii "unexpected prefetch abort in Hyp mode at: %#08x"
+dabt_die_str:
+ .ascii "unexpected data abort in Hyp mode at: %#08x"
+svc_die_str:
+ .ascii "unexpected HVC/SVC trap in Hyp mode at: %#08x"
diff --git a/arch/arm/kvm/interrupts_head.S b/arch/arm/kvm/interrupts_head.S
new file mode 100644
index 000000000000..6a95d341e9c5
--- /dev/null
+++ b/arch/arm/kvm/interrupts_head.S
@@ -0,0 +1,441 @@
+#define VCPU_USR_REG(_reg_nr) (VCPU_USR_REGS + (_reg_nr * 4))
+#define VCPU_USR_SP (VCPU_USR_REG(13))
+#define VCPU_USR_LR (VCPU_USR_REG(14))
+#define CP15_OFFSET(_cp15_reg_idx) (VCPU_CP15 + (_cp15_reg_idx * 4))
+
+/*
+ * Many of these macros need to access the VCPU structure, which is always
+ * held in r0. These macros should never clobber r1, as it is used to hold the
+ * exception code on the return path (except of course the macro that switches
+ * all the registers before the final jump to the VM).
+ */
+vcpu .req r0 @ vcpu pointer always in r0
+
+/* Clobbers {r2-r6} */
+.macro store_vfp_state vfp_base
+ @ The VFPFMRX and VFPFMXR macros are the VMRS and VMSR instructions
+ VFPFMRX r2, FPEXC
+ @ Make sure VFP is enabled so we can touch the registers.
+ orr r6, r2, #FPEXC_EN
+ VFPFMXR FPEXC, r6
+
+ VFPFMRX r3, FPSCR
+ tst r2, #FPEXC_EX @ Check for VFP Subarchitecture
+ beq 1f
+ @ If FPEXC_EX is 0, then FPINST/FPINST2 reads are upredictable, so
+ @ we only need to save them if FPEXC_EX is set.
+ VFPFMRX r4, FPINST
+ tst r2, #FPEXC_FP2V
+ VFPFMRX r5, FPINST2, ne @ vmrsne
+ bic r6, r2, #FPEXC_EX @ FPEXC_EX disable
+ VFPFMXR FPEXC, r6
+1:
+ VFPFSTMIA \vfp_base, r6 @ Save VFP registers
+ stm \vfp_base, {r2-r5} @ Save FPEXC, FPSCR, FPINST, FPINST2
+.endm
+
+/* Assume FPEXC_EN is on and FPEXC_EX is off, clobbers {r2-r6} */
+.macro restore_vfp_state vfp_base
+ VFPFLDMIA \vfp_base, r6 @ Load VFP registers
+ ldm \vfp_base, {r2-r5} @ Load FPEXC, FPSCR, FPINST, FPINST2
+
+ VFPFMXR FPSCR, r3
+ tst r2, #FPEXC_EX @ Check for VFP Subarchitecture
+ beq 1f
+ VFPFMXR FPINST, r4
+ tst r2, #FPEXC_FP2V
+ VFPFMXR FPINST2, r5, ne
+1:
+ VFPFMXR FPEXC, r2 @ FPEXC (last, in case !EN)
+.endm
+
+/* These are simply for the macros to work - value don't have meaning */
+.equ usr, 0
+.equ svc, 1
+.equ abt, 2
+.equ und, 3
+.equ irq, 4
+.equ fiq, 5
+
+.macro push_host_regs_mode mode
+ mrs r2, SP_\mode
+ mrs r3, LR_\mode
+ mrs r4, SPSR_\mode
+ push {r2, r3, r4}
+.endm
+
+/*
+ * Store all host persistent registers on the stack.
+ * Clobbers all registers, in all modes, except r0 and r1.
+ */
+.macro save_host_regs
+ /* Hyp regs. Only ELR_hyp (SPSR_hyp already saved) */
+ mrs r2, ELR_hyp
+ push {r2}
+
+ /* usr regs */
+ push {r4-r12} @ r0-r3 are always clobbered
+ mrs r2, SP_usr
+ mov r3, lr
+ push {r2, r3}
+
+ push_host_regs_mode svc
+ push_host_regs_mode abt
+ push_host_regs_mode und
+ push_host_regs_mode irq
+
+ /* fiq regs */
+ mrs r2, r8_fiq
+ mrs r3, r9_fiq
+ mrs r4, r10_fiq
+ mrs r5, r11_fiq
+ mrs r6, r12_fiq
+ mrs r7, SP_fiq
+ mrs r8, LR_fiq
+ mrs r9, SPSR_fiq
+ push {r2-r9}
+.endm
+
+.macro pop_host_regs_mode mode
+ pop {r2, r3, r4}
+ msr SP_\mode, r2
+ msr LR_\mode, r3
+ msr SPSR_\mode, r4
+.endm
+
+/*
+ * Restore all host registers from the stack.
+ * Clobbers all registers, in all modes, except r0 and r1.
+ */
+.macro restore_host_regs
+ pop {r2-r9}
+ msr r8_fiq, r2
+ msr r9_fiq, r3
+ msr r10_fiq, r4
+ msr r11_fiq, r5
+ msr r12_fiq, r6
+ msr SP_fiq, r7
+ msr LR_fiq, r8
+ msr SPSR_fiq, r9
+
+ pop_host_regs_mode irq
+ pop_host_regs_mode und
+ pop_host_regs_mode abt
+ pop_host_regs_mode svc
+
+ pop {r2, r3}
+ msr SP_usr, r2
+ mov lr, r3
+ pop {r4-r12}
+
+ pop {r2}
+ msr ELR_hyp, r2
+.endm
+
+/*
+ * Restore SP, LR and SPSR for a given mode. offset is the offset of
+ * this mode's registers from the VCPU base.
+ *
+ * Assumes vcpu pointer in vcpu reg
+ *
+ * Clobbers r1, r2, r3, r4.
+ */
+.macro restore_guest_regs_mode mode, offset
+ add r1, vcpu, \offset
+ ldm r1, {r2, r3, r4}
+ msr SP_\mode, r2
+ msr LR_\mode, r3
+ msr SPSR_\mode, r4
+.endm
+
+/*
+ * Restore all guest registers from the vcpu struct.
+ *
+ * Assumes vcpu pointer in vcpu reg
+ *
+ * Clobbers *all* registers.
+ */
+.macro restore_guest_regs
+ restore_guest_regs_mode svc, #VCPU_SVC_REGS
+ restore_guest_regs_mode abt, #VCPU_ABT_REGS
+ restore_guest_regs_mode und, #VCPU_UND_REGS
+ restore_guest_regs_mode irq, #VCPU_IRQ_REGS
+
+ add r1, vcpu, #VCPU_FIQ_REGS
+ ldm r1, {r2-r9}
+ msr r8_fiq, r2
+ msr r9_fiq, r3
+ msr r10_fiq, r4
+ msr r11_fiq, r5
+ msr r12_fiq, r6
+ msr SP_fiq, r7
+ msr LR_fiq, r8
+ msr SPSR_fiq, r9
+
+ @ Load return state
+ ldr r2, [vcpu, #VCPU_PC]
+ ldr r3, [vcpu, #VCPU_CPSR]
+ msr ELR_hyp, r2
+ msr SPSR_cxsf, r3
+
+ @ Load user registers
+ ldr r2, [vcpu, #VCPU_USR_SP]
+ ldr r3, [vcpu, #VCPU_USR_LR]
+ msr SP_usr, r2
+ mov lr, r3
+ add vcpu, vcpu, #(VCPU_USR_REGS)
+ ldm vcpu, {r0-r12}
+.endm
+
+/*
+ * Save SP, LR and SPSR for a given mode. offset is the offset of
+ * this mode's registers from the VCPU base.
+ *
+ * Assumes vcpu pointer in vcpu reg
+ *
+ * Clobbers r2, r3, r4, r5.
+ */
+.macro save_guest_regs_mode mode, offset
+ add r2, vcpu, \offset
+ mrs r3, SP_\mode
+ mrs r4, LR_\mode
+ mrs r5, SPSR_\mode
+ stm r2, {r3, r4, r5}
+.endm
+
+/*
+ * Save all guest registers to the vcpu struct
+ * Expects guest's r0, r1, r2 on the stack.
+ *
+ * Assumes vcpu pointer in vcpu reg
+ *
+ * Clobbers r2, r3, r4, r5.
+ */
+.macro save_guest_regs
+ @ Store usr registers
+ add r2, vcpu, #VCPU_USR_REG(3)
+ stm r2, {r3-r12}
+ add r2, vcpu, #VCPU_USR_REG(0)
+ pop {r3, r4, r5} @ r0, r1, r2
+ stm r2, {r3, r4, r5}
+ mrs r2, SP_usr
+ mov r3, lr
+ str r2, [vcpu, #VCPU_USR_SP]
+ str r3, [vcpu, #VCPU_USR_LR]
+
+ @ Store return state
+ mrs r2, ELR_hyp
+ mrs r3, spsr
+ str r2, [vcpu, #VCPU_PC]
+ str r3, [vcpu, #VCPU_CPSR]
+
+ @ Store other guest registers
+ save_guest_regs_mode svc, #VCPU_SVC_REGS
+ save_guest_regs_mode abt, #VCPU_ABT_REGS
+ save_guest_regs_mode und, #VCPU_UND_REGS
+ save_guest_regs_mode irq, #VCPU_IRQ_REGS
+.endm
+
+/* Reads cp15 registers from hardware and stores them in memory
+ * @store_to_vcpu: If 0, registers are written in-order to the stack,
+ * otherwise to the VCPU struct pointed to by vcpup
+ *
+ * Assumes vcpu pointer in vcpu reg
+ *
+ * Clobbers r2 - r12
+ */
+.macro read_cp15_state store_to_vcpu
+ mrc p15, 0, r2, c1, c0, 0 @ SCTLR
+ mrc p15, 0, r3, c1, c0, 2 @ CPACR
+ mrc p15, 0, r4, c2, c0, 2 @ TTBCR
+ mrc p15, 0, r5, c3, c0, 0 @ DACR
+ mrrc p15, 0, r6, r7, c2 @ TTBR 0
+ mrrc p15, 1, r8, r9, c2 @ TTBR 1
+ mrc p15, 0, r10, c10, c2, 0 @ PRRR
+ mrc p15, 0, r11, c10, c2, 1 @ NMRR
+ mrc p15, 2, r12, c0, c0, 0 @ CSSELR
+
+ .if \store_to_vcpu == 0
+ push {r2-r12} @ Push CP15 registers
+ .else
+ str r2, [vcpu, #CP15_OFFSET(c1_SCTLR)]
+ str r3, [vcpu, #CP15_OFFSET(c1_CPACR)]
+ str r4, [vcpu, #CP15_OFFSET(c2_TTBCR)]
+ str r5, [vcpu, #CP15_OFFSET(c3_DACR)]
+ add r2, vcpu, #CP15_OFFSET(c2_TTBR0)
+ strd r6, r7, [r2]
+ add r2, vcpu, #CP15_OFFSET(c2_TTBR1)
+ strd r8, r9, [r2]
+ str r10, [vcpu, #CP15_OFFSET(c10_PRRR)]
+ str r11, [vcpu, #CP15_OFFSET(c10_NMRR)]
+ str r12, [vcpu, #CP15_OFFSET(c0_CSSELR)]
+ .endif
+
+ mrc p15, 0, r2, c13, c0, 1 @ CID
+ mrc p15, 0, r3, c13, c0, 2 @ TID_URW
+ mrc p15, 0, r4, c13, c0, 3 @ TID_URO
+ mrc p15, 0, r5, c13, c0, 4 @ TID_PRIV
+ mrc p15, 0, r6, c5, c0, 0 @ DFSR
+ mrc p15, 0, r7, c5, c0, 1 @ IFSR
+ mrc p15, 0, r8, c5, c1, 0 @ ADFSR
+ mrc p15, 0, r9, c5, c1, 1 @ AIFSR
+ mrc p15, 0, r10, c6, c0, 0 @ DFAR
+ mrc p15, 0, r11, c6, c0, 2 @ IFAR
+ mrc p15, 0, r12, c12, c0, 0 @ VBAR
+
+ .if \store_to_vcpu == 0
+ push {r2-r12} @ Push CP15 registers
+ .else
+ str r2, [vcpu, #CP15_OFFSET(c13_CID)]
+ str r3, [vcpu, #CP15_OFFSET(c13_TID_URW)]
+ str r4, [vcpu, #CP15_OFFSET(c13_TID_URO)]
+ str r5, [vcpu, #CP15_OFFSET(c13_TID_PRIV)]
+ str r6, [vcpu, #CP15_OFFSET(c5_DFSR)]
+ str r7, [vcpu, #CP15_OFFSET(c5_IFSR)]
+ str r8, [vcpu, #CP15_OFFSET(c5_ADFSR)]
+ str r9, [vcpu, #CP15_OFFSET(c5_AIFSR)]
+ str r10, [vcpu, #CP15_OFFSET(c6_DFAR)]
+ str r11, [vcpu, #CP15_OFFSET(c6_IFAR)]
+ str r12, [vcpu, #CP15_OFFSET(c12_VBAR)]
+ .endif
+.endm
+
+/*
+ * Reads cp15 registers from memory and writes them to hardware
+ * @read_from_vcpu: If 0, registers are read in-order from the stack,
+ * otherwise from the VCPU struct pointed to by vcpup
+ *
+ * Assumes vcpu pointer in vcpu reg
+ */
+.macro write_cp15_state read_from_vcpu
+ .if \read_from_vcpu == 0
+ pop {r2-r12}
+ .else
+ ldr r2, [vcpu, #CP15_OFFSET(c13_CID)]
+ ldr r3, [vcpu, #CP15_OFFSET(c13_TID_URW)]
+ ldr r4, [vcpu, #CP15_OFFSET(c13_TID_URO)]
+ ldr r5, [vcpu, #CP15_OFFSET(c13_TID_PRIV)]
+ ldr r6, [vcpu, #CP15_OFFSET(c5_DFSR)]
+ ldr r7, [vcpu, #CP15_OFFSET(c5_IFSR)]
+ ldr r8, [vcpu, #CP15_OFFSET(c5_ADFSR)]
+ ldr r9, [vcpu, #CP15_OFFSET(c5_AIFSR)]
+ ldr r10, [vcpu, #CP15_OFFSET(c6_DFAR)]
+ ldr r11, [vcpu, #CP15_OFFSET(c6_IFAR)]
+ ldr r12, [vcpu, #CP15_OFFSET(c12_VBAR)]
+ .endif
+
+ mcr p15, 0, r2, c13, c0, 1 @ CID
+ mcr p15, 0, r3, c13, c0, 2 @ TID_URW
+ mcr p15, 0, r4, c13, c0, 3 @ TID_URO
+ mcr p15, 0, r5, c13, c0, 4 @ TID_PRIV
+ mcr p15, 0, r6, c5, c0, 0 @ DFSR
+ mcr p15, 0, r7, c5, c0, 1 @ IFSR
+ mcr p15, 0, r8, c5, c1, 0 @ ADFSR
+ mcr p15, 0, r9, c5, c1, 1 @ AIFSR
+ mcr p15, 0, r10, c6, c0, 0 @ DFAR
+ mcr p15, 0, r11, c6, c0, 2 @ IFAR
+ mcr p15, 0, r12, c12, c0, 0 @ VBAR
+
+ .if \read_from_vcpu == 0
+ pop {r2-r12}
+ .else
+ ldr r2, [vcpu, #CP15_OFFSET(c1_SCTLR)]
+ ldr r3, [vcpu, #CP15_OFFSET(c1_CPACR)]
+ ldr r4, [vcpu, #CP15_OFFSET(c2_TTBCR)]
+ ldr r5, [vcpu, #CP15_OFFSET(c3_DACR)]
+ add r12, vcpu, #CP15_OFFSET(c2_TTBR0)
+ ldrd r6, r7, [r12]
+ add r12, vcpu, #CP15_OFFSET(c2_TTBR1)
+ ldrd r8, r9, [r12]
+ ldr r10, [vcpu, #CP15_OFFSET(c10_PRRR)]
+ ldr r11, [vcpu, #CP15_OFFSET(c10_NMRR)]
+ ldr r12, [vcpu, #CP15_OFFSET(c0_CSSELR)]
+ .endif
+
+ mcr p15, 0, r2, c1, c0, 0 @ SCTLR
+ mcr p15, 0, r3, c1, c0, 2 @ CPACR
+ mcr p15, 0, r4, c2, c0, 2 @ TTBCR
+ mcr p15, 0, r5, c3, c0, 0 @ DACR
+ mcrr p15, 0, r6, r7, c2 @ TTBR 0
+ mcrr p15, 1, r8, r9, c2 @ TTBR 1
+ mcr p15, 0, r10, c10, c2, 0 @ PRRR
+ mcr p15, 0, r11, c10, c2, 1 @ NMRR
+ mcr p15, 2, r12, c0, c0, 0 @ CSSELR
+.endm
+
+/*
+ * Save the VGIC CPU state into memory
+ *
+ * Assumes vcpu pointer in vcpu reg
+ */
+.macro save_vgic_state
+.endm
+
+/*
+ * Restore the VGIC CPU state from memory
+ *
+ * Assumes vcpu pointer in vcpu reg
+ */
+.macro restore_vgic_state
+.endm
+
+.equ vmentry, 0
+.equ vmexit, 1
+
+/* Configures the HSTR (Hyp System Trap Register) on entry/return
+ * (hardware reset value is 0) */
+.macro set_hstr operation
+ mrc p15, 4, r2, c1, c1, 3
+ ldr r3, =HSTR_T(15)
+ .if \operation == vmentry
+ orr r2, r2, r3 @ Trap CR{15}
+ .else
+ bic r2, r2, r3 @ Don't trap any CRx accesses
+ .endif
+ mcr p15, 4, r2, c1, c1, 3
+.endm
+
+/* Configures the HCPTR (Hyp Coprocessor Trap Register) on entry/return
+ * (hardware reset value is 0). Keep previous value in r2. */
+.macro set_hcptr operation, mask
+ mrc p15, 4, r2, c1, c1, 2
+ ldr r3, =\mask
+ .if \operation == vmentry
+ orr r3, r2, r3 @ Trap coproc-accesses defined in mask
+ .else
+ bic r3, r2, r3 @ Don't trap defined coproc-accesses
+ .endif
+ mcr p15, 4, r3, c1, c1, 2
+.endm
+
+/* Configures the HDCR (Hyp Debug Configuration Register) on entry/return
+ * (hardware reset value is 0) */
+.macro set_hdcr operation
+ mrc p15, 4, r2, c1, c1, 1
+ ldr r3, =(HDCR_TPM|HDCR_TPMCR)
+ .if \operation == vmentry
+ orr r2, r2, r3 @ Trap some perfmon accesses
+ .else
+ bic r2, r2, r3 @ Don't trap any perfmon accesses
+ .endif
+ mcr p15, 4, r2, c1, c1, 1
+.endm
+
+/* Enable/Disable: stage-2 trans., trap interrupts, trap wfi, trap smc */
+.macro configure_hyp_role operation
+ mrc p15, 4, r2, c1, c1, 0 @ HCR
+ bic r2, r2, #HCR_VIRT_EXCP_MASK
+ ldr r3, =HCR_GUEST_MASK
+ .if \operation == vmentry
+ orr r2, r2, r3
+ ldr r3, [vcpu, #VCPU_IRQ_LINES]
+ orr r2, r2, r3
+ .else
+ bic r2, r2, r3
+ .endif
+ mcr p15, 4, r2, c1, c1, 0
+.endm
+
+.macro load_vcpu
+ mrc p15, 4, vcpu, c13, c0, 2 @ HTPIDR
+.endm
diff --git a/arch/arm/kvm/mmio.c b/arch/arm/kvm/mmio.c
new file mode 100644
index 000000000000..0144baf82904
--- /dev/null
+++ b/arch/arm/kvm/mmio.c
@@ -0,0 +1,153 @@
+/*
+ * Copyright (C) 2012 - Virtual Open Systems and Columbia University
+ * Author: Christoffer Dall <c.dall@virtualopensystems.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/kvm_host.h>
+#include <asm/kvm_mmio.h>
+#include <asm/kvm_emulate.h>
+#include <trace/events/kvm.h>
+
+#include "trace.h"
+
+/**
+ * kvm_handle_mmio_return -- Handle MMIO loads after user space emulation
+ * @vcpu: The VCPU pointer
+ * @run: The VCPU run struct containing the mmio data
+ *
+ * This should only be called after returning from userspace for MMIO load
+ * emulation.
+ */
+int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+ __u32 *dest;
+ unsigned int len;
+ int mask;
+
+ if (!run->mmio.is_write) {
+ dest = vcpu_reg(vcpu, vcpu->arch.mmio_decode.rt);
+ memset(dest, 0, sizeof(int));
+
+ len = run->mmio.len;
+ if (len > 4)
+ return -EINVAL;
+
+ memcpy(dest, run->mmio.data, len);
+
+ trace_kvm_mmio(KVM_TRACE_MMIO_READ, len, run->mmio.phys_addr,
+ *((u64 *)run->mmio.data));
+
+ if (vcpu->arch.mmio_decode.sign_extend && len < 4) {
+ mask = 1U << ((len * 8) - 1);
+ *dest = (*dest ^ mask) - mask;
+ }
+ }
+
+ return 0;
+}
+
+static int decode_hsr(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
+ struct kvm_exit_mmio *mmio)
+{
+ unsigned long rt, len;
+ bool is_write, sign_extend;
+
+ if ((vcpu->arch.hsr >> 8) & 1) {
+ /* cache operation on I/O addr, tell guest unsupported */
+ kvm_inject_dabt(vcpu, vcpu->arch.hxfar);
+ return 1;
+ }
+
+ if ((vcpu->arch.hsr >> 7) & 1) {
+ /* page table accesses IO mem: tell guest to fix its TTBR */
+ kvm_inject_dabt(vcpu, vcpu->arch.hxfar);
+ return 1;
+ }
+
+ switch ((vcpu->arch.hsr >> 22) & 0x3) {
+ case 0:
+ len = 1;
+ break;
+ case 1:
+ len = 2;
+ break;
+ case 2:
+ len = 4;
+ break;
+ default:
+ kvm_err("Hardware is weird: SAS 0b11 is reserved\n");
+ return -EFAULT;
+ }
+
+ is_write = vcpu->arch.hsr & HSR_WNR;
+ sign_extend = vcpu->arch.hsr & HSR_SSE;
+ rt = (vcpu->arch.hsr & HSR_SRT_MASK) >> HSR_SRT_SHIFT;
+
+ if (kvm_vcpu_reg_is_pc(vcpu, rt)) {
+ /* IO memory trying to read/write pc */
+ kvm_inject_pabt(vcpu, vcpu->arch.hxfar);
+ return 1;
+ }
+
+ mmio->is_write = is_write;
+ mmio->phys_addr = fault_ipa;
+ mmio->len = len;
+ vcpu->arch.mmio_decode.sign_extend = sign_extend;
+ vcpu->arch.mmio_decode.rt = rt;
+
+ /*
+ * The MMIO instruction is emulated and should not be re-executed
+ * in the guest.
+ */
+ kvm_skip_instr(vcpu, (vcpu->arch.hsr >> 25) & 1);
+ return 0;
+}
+
+int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
+ phys_addr_t fault_ipa)
+{
+ struct kvm_exit_mmio mmio;
+ unsigned long rt;
+ int ret;
+
+ /*
+ * Prepare MMIO operation. First stash it in a private
+ * structure that we can use for in-kernel emulation. If the
+ * kernel can't handle it, copy it into run->mmio and let user
+ * space do its magic.
+ */
+
+ if (vcpu->arch.hsr & HSR_ISV) {
+ ret = decode_hsr(vcpu, fault_ipa, &mmio);
+ if (ret)
+ return ret;
+ } else {
+ kvm_err("load/store instruction decoding not implemented\n");
+ return -ENOSYS;
+ }
+
+ rt = vcpu->arch.mmio_decode.rt;
+ trace_kvm_mmio((mmio.is_write) ? KVM_TRACE_MMIO_WRITE :
+ KVM_TRACE_MMIO_READ_UNSATISFIED,
+ mmio.len, fault_ipa,
+ (mmio.is_write) ? *vcpu_reg(vcpu, rt) : 0);
+
+ if (mmio.is_write)
+ memcpy(mmio.data, vcpu_reg(vcpu, rt), mmio.len);
+
+ kvm_prepare_mmio(run, &mmio);
+ return 0;
+}
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
new file mode 100644
index 000000000000..f30e13163a96
--- /dev/null
+++ b/arch/arm/kvm/mmu.c
@@ -0,0 +1,787 @@
+/*
+ * Copyright (C) 2012 - Virtual Open Systems and Columbia University
+ * Author: Christoffer Dall <c.dall@virtualopensystems.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/mman.h>
+#include <linux/kvm_host.h>
+#include <linux/io.h>
+#include <trace/events/kvm.h>
+#include <asm/idmap.h>
+#include <asm/pgalloc.h>
+#include <asm/cacheflush.h>
+#include <asm/kvm_arm.h>
+#include <asm/kvm_mmu.h>
+#include <asm/kvm_mmio.h>
+#include <asm/kvm_asm.h>
+#include <asm/kvm_emulate.h>
+#include <asm/mach/map.h>
+#include <trace/events/kvm.h>
+
+#include "trace.h"
+
+extern char __hyp_idmap_text_start[], __hyp_idmap_text_end[];
+
+static DEFINE_MUTEX(kvm_hyp_pgd_mutex);
+
+static void kvm_tlb_flush_vmid(struct kvm *kvm)
+{
+ kvm_call_hyp(__kvm_tlb_flush_vmid, kvm);
+}
+
+static void kvm_set_pte(pte_t *pte, pte_t new_pte)
+{
+ pte_val(*pte) = new_pte;
+ /*
+ * flush_pmd_entry just takes a void pointer and cleans the necessary
+ * cache entries, so we can reuse the function for ptes.
+ */
+ flush_pmd_entry(pte);
+}
+
+static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
+ int min, int max)
+{
+ void *page;
+
+ BUG_ON(max > KVM_NR_MEM_OBJS);
+ if (cache->nobjs >= min)
+ return 0;
+ while (cache->nobjs < max) {
+ page = (void *)__get_free_page(PGALLOC_GFP);
+ if (!page)
+ return -ENOMEM;
+ cache->objects[cache->nobjs++] = page;
+ }
+ return 0;
+}
+
+static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
+{
+ while (mc->nobjs)
+ free_page((unsigned long)mc->objects[--mc->nobjs]);
+}
+
+static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
+{
+ void *p;
+
+ BUG_ON(!mc || !mc->nobjs);
+ p = mc->objects[--mc->nobjs];
+ return p;
+}
+
+static void free_ptes(pmd_t *pmd, unsigned long addr)
+{
+ pte_t *pte;
+ unsigned int i;
+
+ for (i = 0; i < PTRS_PER_PMD; i++, addr += PMD_SIZE) {
+ if (!pmd_none(*pmd) && pmd_table(*pmd)) {
+ pte = pte_offset_kernel(pmd, addr);
+ pte_free_kernel(NULL, pte);
+ }
+ pmd++;
+ }
+}
+
+/**
+ * free_hyp_pmds - free a Hyp-mode level-2 tables and child level-3 tables
+ *
+ * Assumes this is a page table used strictly in Hyp-mode and therefore contains
+ * only mappings in the kernel memory area, which is above PAGE_OFFSET.
+ */
+void free_hyp_pmds(void)
+{
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+ unsigned long addr;
+
+ mutex_lock(&kvm_hyp_pgd_mutex);
+ for (addr = PAGE_OFFSET; addr != 0; addr += PGDIR_SIZE) {
+ pgd = hyp_pgd + pgd_index(addr);
+ pud = pud_offset(pgd, addr);
+
+ if (pud_none(*pud))
+ continue;
+ BUG_ON(pud_bad(*pud));
+
+ pmd = pmd_offset(pud, addr);
+ free_ptes(pmd, addr);
+ pmd_free(NULL, pmd);
+ pud_clear(pud);
+ }
+ mutex_unlock(&kvm_hyp_pgd_mutex);
+}
+
+static void create_hyp_pte_mappings(pmd_t *pmd, unsigned long start,
+ unsigned long end)
+{
+ pte_t *pte;
+ unsigned long addr;
+ struct page *page;
+
+ for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) {
+ pte = pte_offset_kernel(pmd, addr);
+ BUG_ON(!virt_addr_valid(addr));
+ page = virt_to_page(addr);
+ kvm_set_pte(pte, mk_pte(page, PAGE_HYP));
+ }
+}
+
+static void create_hyp_io_pte_mappings(pmd_t *pmd, unsigned long start,
+ unsigned long end,
+ unsigned long *pfn_base)
+{
+ pte_t *pte;
+ unsigned long addr;
+
+ for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) {
+ pte = pte_offset_kernel(pmd, addr);
+ BUG_ON(pfn_valid(*pfn_base));
+ kvm_set_pte(pte, pfn_pte(*pfn_base, PAGE_HYP_DEVICE));
+ (*pfn_base)++;
+ }
+}
+
+static int create_hyp_pmd_mappings(pud_t *pud, unsigned long start,
+ unsigned long end, unsigned long *pfn_base)
+{
+ pmd_t *pmd;
+ pte_t *pte;
+ unsigned long addr, next;
+
+ for (addr = start; addr < end; addr = next) {
+ pmd = pmd_offset(pud, addr);
+
+ BUG_ON(pmd_sect(*pmd));
+
+ if (pmd_none(*pmd)) {
+ pte = pte_alloc_one_kernel(NULL, addr);
+ if (!pte) {
+ kvm_err("Cannot allocate Hyp pte\n");
+ return -ENOMEM;
+ }
+ pmd_populate_kernel(NULL, pmd, pte);
+ }
+
+ next = pmd_addr_end(addr, end);
+
+ /*
+ * If pfn_base is NULL, we map kernel pages into HYP with the
+ * virtual address. Otherwise, this is considered an I/O
+ * mapping and we map the physical region starting at
+ * *pfn_base to [start, end[.
+ */
+ if (!pfn_base)
+ create_hyp_pte_mappings(pmd, addr, next);
+ else
+ create_hyp_io_pte_mappings(pmd, addr, next, pfn_base);
+ }
+
+ return 0;
+}
+
+static int __create_hyp_mappings(void *from, void *to, unsigned long *pfn_base)
+{
+ unsigned long start = (unsigned long)from;
+ unsigned long end = (unsigned long)to;
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+ unsigned long addr, next;
+ int err = 0;
+
+ BUG_ON(start > end);
+ if (start < PAGE_OFFSET)
+ return -EINVAL;
+
+ mutex_lock(&kvm_hyp_pgd_mutex);
+ for (addr = start; addr < end; addr = next) {
+ pgd = hyp_pgd + pgd_index(addr);
+ pud = pud_offset(pgd, addr);
+
+ if (pud_none_or_clear_bad(pud)) {
+ pmd = pmd_alloc_one(NULL, addr);
+ if (!pmd) {
+ kvm_err("Cannot allocate Hyp pmd\n");
+ err = -ENOMEM;
+ goto out;
+ }
+ pud_populate(NULL, pud, pmd);
+ }
+
+ next = pgd_addr_end(addr, end);
+ err = create_hyp_pmd_mappings(pud, addr, next, pfn_base);
+ if (err)
+ goto out;
+ }
+out:
+ mutex_unlock(&kvm_hyp_pgd_mutex);
+ return err;
+}
+
+/**
+ * create_hyp_mappings - map a kernel virtual address range in Hyp mode
+ * @from: The virtual kernel start address of the range
+ * @to: The virtual kernel end address of the range (exclusive)
+ *
+ * The same virtual address as the kernel virtual address is also used in
+ * Hyp-mode mapping to the same underlying physical pages.
+ *
+ * Note: Wrapping around zero in the "to" address is not supported.
+ */
+int create_hyp_mappings(void *from, void *to)
+{
+ return __create_hyp_mappings(from, to, NULL);
+}
+
+/**
+ * create_hyp_io_mappings - map a physical IO range in Hyp mode
+ * @from: The virtual HYP start address of the range
+ * @to: The virtual HYP end address of the range (exclusive)
+ * @addr: The physical start address which gets mapped
+ */
+int create_hyp_io_mappings(void *from, void *to, phys_addr_t addr)
+{
+ unsigned long pfn = __phys_to_pfn(addr);
+ return __create_hyp_mappings(from, to, &pfn);
+}
+
+/**
+ * kvm_alloc_stage2_pgd - allocate level-1 table for stage-2 translation.
+ * @kvm: The KVM struct pointer for the VM.
+ *
+ * Allocates the 1st level table only of size defined by S2_PGD_ORDER (can
+ * support either full 40-bit input addresses or limited to 32-bit input
+ * addresses). Clears the allocated pages.
+ *
+ * Note we don't need locking here as this is only called when the VM is
+ * created, which can only be done once.
+ */
+int kvm_alloc_stage2_pgd(struct kvm *kvm)
+{
+ pgd_t *pgd;
+
+ if (kvm->arch.pgd != NULL) {
+ kvm_err("kvm_arch already initialized?\n");
+ return -EINVAL;
+ }
+
+ pgd = (pgd_t *)__get_free_pages(GFP_KERNEL, S2_PGD_ORDER);
+ if (!pgd)
+ return -ENOMEM;
+
+ /* stage-2 pgd must be aligned to its size */
+ VM_BUG_ON((unsigned long)pgd & (S2_PGD_SIZE - 1));
+
+ memset(pgd, 0, PTRS_PER_S2_PGD * sizeof(pgd_t));
+ clean_dcache_area(pgd, PTRS_PER_S2_PGD * sizeof(pgd_t));
+ kvm->arch.pgd = pgd;
+
+ return 0;
+}
+
+static void clear_pud_entry(pud_t *pud)
+{
+ pmd_t *pmd_table = pmd_offset(pud, 0);
+ pud_clear(pud);
+ pmd_free(NULL, pmd_table);
+ put_page(virt_to_page(pud));
+}
+
+static void clear_pmd_entry(pmd_t *pmd)
+{
+ pte_t *pte_table = pte_offset_kernel(pmd, 0);
+ pmd_clear(pmd);
+ pte_free_kernel(NULL, pte_table);
+ put_page(virt_to_page(pmd));
+}
+
+static bool pmd_empty(pmd_t *pmd)
+{
+ struct page *pmd_page = virt_to_page(pmd);
+ return page_count(pmd_page) == 1;
+}
+
+static void clear_pte_entry(pte_t *pte)
+{
+ if (pte_present(*pte)) {
+ kvm_set_pte(pte, __pte(0));
+ put_page(virt_to_page(pte));
+ }
+}
+
+static bool pte_empty(pte_t *pte)
+{
+ struct page *pte_page = virt_to_page(pte);
+ return page_count(pte_page) == 1;
+}
+
+/**
+ * unmap_stage2_range -- Clear stage2 page table entries to unmap a range
+ * @kvm: The VM pointer
+ * @start: The intermediate physical base address of the range to unmap
+ * @size: The size of the area to unmap
+ *
+ * Clear a range of stage-2 mappings, lowering the various ref-counts. Must
+ * be called while holding mmu_lock (unless for freeing the stage2 pgd before
+ * destroying the VM), otherwise another faulting VCPU may come in and mess
+ * with things behind our backs.
+ */
+static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
+{
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *pte;
+ phys_addr_t addr = start, end = start + size;
+ u64 range;
+
+ while (addr < end) {
+ pgd = kvm->arch.pgd + pgd_index(addr);
+ pud = pud_offset(pgd, addr);
+ if (pud_none(*pud)) {
+ addr += PUD_SIZE;
+ continue;
+ }
+
+ pmd = pmd_offset(pud, addr);
+ if (pmd_none(*pmd)) {
+ addr += PMD_SIZE;
+ continue;
+ }
+
+ pte = pte_offset_kernel(pmd, addr);
+ clear_pte_entry(pte);
+ range = PAGE_SIZE;
+
+ /* If we emptied the pte, walk back up the ladder */
+ if (pte_empty(pte)) {
+ clear_pmd_entry(pmd);
+ range = PMD_SIZE;
+ if (pmd_empty(pmd)) {
+ clear_pud_entry(pud);
+ range = PUD_SIZE;
+ }
+ }
+
+ addr += range;
+ }
+}
+
+/**
+ * kvm_free_stage2_pgd - free all stage-2 tables
+ * @kvm: The KVM struct pointer for the VM.
+ *
+ * Walks the level-1 page table pointed to by kvm->arch.pgd and frees all
+ * underlying level-2 and level-3 tables before freeing the actual level-1 table
+ * and setting the struct pointer to NULL.
+ *
+ * Note we don't need locking here as this is only called when the VM is
+ * destroyed, which can only be done once.
+ */
+void kvm_free_stage2_pgd(struct kvm *kvm)
+{
+ if (kvm->arch.pgd == NULL)
+ return;
+
+ unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE);
+ free_pages((unsigned long)kvm->arch.pgd, S2_PGD_ORDER);
+ kvm->arch.pgd = NULL;
+}
+
+
+static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
+ phys_addr_t addr, const pte_t *new_pte, bool iomap)
+{
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *pte, old_pte;
+
+ /* Create 2nd stage page table mapping - Level 1 */
+ pgd = kvm->arch.pgd + pgd_index(addr);
+ pud = pud_offset(pgd, addr);
+ if (pud_none(*pud)) {
+ if (!cache)
+ return 0; /* ignore calls from kvm_set_spte_hva */
+ pmd = mmu_memory_cache_alloc(cache);
+ pud_populate(NULL, pud, pmd);
+ pmd += pmd_index(addr);
+ get_page(virt_to_page(pud));
+ } else
+ pmd = pmd_offset(pud, addr);
+
+ /* Create 2nd stage page table mapping - Level 2 */
+ if (pmd_none(*pmd)) {
+ if (!cache)
+ return 0; /* ignore calls from kvm_set_spte_hva */
+ pte = mmu_memory_cache_alloc(cache);
+ clean_pte_table(pte);
+ pmd_populate_kernel(NULL, pmd, pte);
+ pte += pte_index(addr);
+ get_page(virt_to_page(pmd));
+ } else
+ pte = pte_offset_kernel(pmd, addr);
+
+ if (iomap && pte_present(*pte))
+ return -EFAULT;
+
+ /* Create 2nd stage page table mapping - Level 3 */
+ old_pte = *pte;
+ kvm_set_pte(pte, *new_pte);
+ if (pte_present(old_pte))
+ kvm_tlb_flush_vmid(kvm);
+ else
+ get_page(virt_to_page(pte));
+
+ return 0;
+}
+
+/**
+ * kvm_phys_addr_ioremap - map a device range to guest IPA
+ *
+ * @kvm: The KVM pointer
+ * @guest_ipa: The IPA at which to insert the mapping
+ * @pa: The physical address of the device
+ * @size: The size of the mapping
+ */
+int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
+ phys_addr_t pa, unsigned long size)
+{
+ phys_addr_t addr, end;
+ int ret = 0;
+ unsigned long pfn;
+ struct kvm_mmu_memory_cache cache = { 0, };
+
+ end = (guest_ipa + size + PAGE_SIZE - 1) & PAGE_MASK;
+ pfn = __phys_to_pfn(pa);
+
+ for (addr = guest_ipa; addr < end; addr += PAGE_SIZE) {
+ pte_t pte = pfn_pte(pfn, PAGE_S2_DEVICE | L_PTE_S2_RDWR);
+
+ ret = mmu_topup_memory_cache(&cache, 2, 2);
+ if (ret)
+ goto out;
+ spin_lock(&kvm->mmu_lock);
+ ret = stage2_set_pte(kvm, &cache, addr, &pte, true);
+ spin_unlock(&kvm->mmu_lock);
+ if (ret)
+ goto out;
+
+ pfn++;
+ }
+
+out:
+ mmu_free_memory_cache(&cache);
+ return ret;
+}
+
+static void coherent_icache_guest_page(struct kvm *kvm, gfn_t gfn)
+{
+ /*
+ * If we are going to insert an instruction page and the icache is
+ * either VIPT or PIPT, there is a potential problem where the host
+ * (or another VM) may have used the same page as this guest, and we
+ * read incorrect data from the icache. If we're using a PIPT cache,
+ * we can invalidate just that page, but if we are using a VIPT cache
+ * we need to invalidate the entire icache - damn shame - as written
+ * in the ARM ARM (DDI 0406C.b - Page B3-1393).
+ *
+ * VIVT caches are tagged using both the ASID and the VMID and doesn't
+ * need any kind of flushing (DDI 0406C.b - Page B3-1392).
+ */
+ if (icache_is_pipt()) {
+ unsigned long hva = gfn_to_hva(kvm, gfn);
+ __cpuc_coherent_user_range(hva, hva + PAGE_SIZE);
+ } else if (!icache_is_vivt_asid_tagged()) {
+ /* any kind of VIPT cache */
+ __flush_icache_all();
+ }
+}
+
+static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
+ gfn_t gfn, struct kvm_memory_slot *memslot,
+ unsigned long fault_status)
+{
+ pte_t new_pte;
+ pfn_t pfn;
+ int ret;
+ bool write_fault, writable;
+ unsigned long mmu_seq;
+ struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
+
+ write_fault = kvm_is_write_fault(vcpu->arch.hsr);
+ if (fault_status == FSC_PERM && !write_fault) {
+ kvm_err("Unexpected L2 read permission error\n");
+ return -EFAULT;
+ }
+
+ /* We need minimum second+third level pages */
+ ret = mmu_topup_memory_cache(memcache, 2, KVM_NR_MEM_OBJS);
+ if (ret)
+ return ret;
+
+ mmu_seq = vcpu->kvm->mmu_notifier_seq;
+ /*
+ * Ensure the read of mmu_notifier_seq happens before we call
+ * gfn_to_pfn_prot (which calls get_user_pages), so that we don't risk
+ * the page we just got a reference to gets unmapped before we have a
+ * chance to grab the mmu_lock, which ensure that if the page gets
+ * unmapped afterwards, the call to kvm_unmap_hva will take it away
+ * from us again properly. This smp_rmb() interacts with the smp_wmb()
+ * in kvm_mmu_notifier_invalidate_<page|range_end>.
+ */
+ smp_rmb();
+
+ pfn = gfn_to_pfn_prot(vcpu->kvm, gfn, write_fault, &writable);
+ if (is_error_pfn(pfn))
+ return -EFAULT;
+
+ new_pte = pfn_pte(pfn, PAGE_S2);
+ coherent_icache_guest_page(vcpu->kvm, gfn);
+
+ spin_lock(&vcpu->kvm->mmu_lock);
+ if (mmu_notifier_retry(vcpu->kvm, mmu_seq))
+ goto out_unlock;
+ if (writable) {
+ pte_val(new_pte) |= L_PTE_S2_RDWR;
+ kvm_set_pfn_dirty(pfn);
+ }
+ stage2_set_pte(vcpu->kvm, memcache, fault_ipa, &new_pte, false);
+
+out_unlock:
+ spin_unlock(&vcpu->kvm->mmu_lock);
+ kvm_release_pfn_clean(pfn);
+ return 0;
+}
+
+/**
+ * kvm_handle_guest_abort - handles all 2nd stage aborts
+ * @vcpu: the VCPU pointer
+ * @run: the kvm_run structure
+ *
+ * Any abort that gets to the host is almost guaranteed to be caused by a
+ * missing second stage translation table entry, which can mean that either the
+ * guest simply needs more memory and we must allocate an appropriate page or it
+ * can mean that the guest tried to access I/O memory, which is emulated by user
+ * space. The distinction is based on the IPA causing the fault and whether this
+ * memory region has been registered as standard RAM by user space.
+ */
+int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+ unsigned long hsr_ec;
+ unsigned long fault_status;
+ phys_addr_t fault_ipa;
+ struct kvm_memory_slot *memslot;
+ bool is_iabt;
+ gfn_t gfn;
+ int ret, idx;
+
+ hsr_ec = vcpu->arch.hsr >> HSR_EC_SHIFT;
+ is_iabt = (hsr_ec == HSR_EC_IABT);
+ fault_ipa = ((phys_addr_t)vcpu->arch.hpfar & HPFAR_MASK) << 8;
+
+ trace_kvm_guest_fault(*vcpu_pc(vcpu), vcpu->arch.hsr,
+ vcpu->arch.hxfar, fault_ipa);
+
+ /* Check the stage-2 fault is trans. fault or write fault */
+ fault_status = (vcpu->arch.hsr & HSR_FSC_TYPE);
+ if (fault_status != FSC_FAULT && fault_status != FSC_PERM) {
+ kvm_err("Unsupported fault status: EC=%#lx DFCS=%#lx\n",
+ hsr_ec, fault_status);
+ return -EFAULT;
+ }
+
+ idx = srcu_read_lock(&vcpu->kvm->srcu);
+
+ gfn = fault_ipa >> PAGE_SHIFT;
+ if (!kvm_is_visible_gfn(vcpu->kvm, gfn)) {
+ if (is_iabt) {
+ /* Prefetch Abort on I/O address */
+ kvm_inject_pabt(vcpu, vcpu->arch.hxfar);
+ ret = 1;
+ goto out_unlock;
+ }
+
+ if (fault_status != FSC_FAULT) {
+ kvm_err("Unsupported fault status on io memory: %#lx\n",
+ fault_status);
+ ret = -EFAULT;
+ goto out_unlock;
+ }
+
+ /* Adjust page offset */
+ fault_ipa |= vcpu->arch.hxfar & ~PAGE_MASK;
+ ret = io_mem_abort(vcpu, run, fault_ipa);
+ goto out_unlock;
+ }
+
+ memslot = gfn_to_memslot(vcpu->kvm, gfn);
+ if (!memslot->user_alloc) {
+ kvm_err("non user-alloc memslots not supported\n");
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ ret = user_mem_abort(vcpu, fault_ipa, gfn, memslot, fault_status);
+ if (ret == 0)
+ ret = 1;
+out_unlock:
+ srcu_read_unlock(&vcpu->kvm->srcu, idx);
+ return ret;
+}
+
+static void handle_hva_to_gpa(struct kvm *kvm,
+ unsigned long start,
+ unsigned long end,
+ void (*handler)(struct kvm *kvm,
+ gpa_t gpa, void *data),
+ void *data)
+{
+ struct kvm_memslots *slots;
+ struct kvm_memory_slot *memslot;
+
+ slots = kvm_memslots(kvm);
+
+ /* we only care about the pages that the guest sees */
+ kvm_for_each_memslot(memslot, slots) {
+ unsigned long hva_start, hva_end;
+ gfn_t gfn, gfn_end;
+
+ hva_start = max(start, memslot->userspace_addr);
+ hva_end = min(end, memslot->userspace_addr +
+ (memslot->npages << PAGE_SHIFT));
+ if (hva_start >= hva_end)
+ continue;
+
+ /*
+ * {gfn(page) | page intersects with [hva_start, hva_end)} =
+ * {gfn_start, gfn_start+1, ..., gfn_end-1}.
+ */
+ gfn = hva_to_gfn_memslot(hva_start, memslot);
+ gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot);
+
+ for (; gfn < gfn_end; ++gfn) {
+ gpa_t gpa = gfn << PAGE_SHIFT;
+ handler(kvm, gpa, data);
+ }
+ }
+}
+
+static void kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, void *data)
+{
+ unmap_stage2_range(kvm, gpa, PAGE_SIZE);
+ kvm_tlb_flush_vmid(kvm);
+}
+
+int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
+{
+ unsigned long end = hva + PAGE_SIZE;
+
+ if (!kvm->arch.pgd)
+ return 0;
+
+ trace_kvm_unmap_hva(hva);
+ handle_hva_to_gpa(kvm, hva, end, &kvm_unmap_hva_handler, NULL);
+ return 0;
+}
+
+int kvm_unmap_hva_range(struct kvm *kvm,
+ unsigned long start, unsigned long end)
+{
+ if (!kvm->arch.pgd)
+ return 0;
+
+ trace_kvm_unmap_hva_range(start, end);
+ handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL);
+ return 0;
+}
+
+static void kvm_set_spte_handler(struct kvm *kvm, gpa_t gpa, void *data)
+{
+ pte_t *pte = (pte_t *)data;
+
+ stage2_set_pte(kvm, NULL, gpa, pte, false);
+}
+
+
+void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
+{
+ unsigned long end = hva + PAGE_SIZE;
+ pte_t stage2_pte;
+
+ if (!kvm->arch.pgd)
+ return;
+
+ trace_kvm_set_spte_hva(hva);
+ stage2_pte = pfn_pte(pte_pfn(pte), PAGE_S2);
+ handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &stage2_pte);
+}
+
+void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu)
+{
+ mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
+}
+
+phys_addr_t kvm_mmu_get_httbr(void)
+{
+ VM_BUG_ON(!virt_addr_valid(hyp_pgd));
+ return virt_to_phys(hyp_pgd);
+}
+
+int kvm_mmu_init(void)
+{
+ if (!hyp_pgd) {
+ kvm_err("Hyp mode PGD not allocated\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/**
+ * kvm_clear_idmap - remove all idmaps from the hyp pgd
+ *
+ * Free the underlying pmds for all pgds in range and clear the pgds (but
+ * don't free them) afterwards.
+ */
+void kvm_clear_hyp_idmap(void)
+{
+ unsigned long addr, end;
+ unsigned long next;
+ pgd_t *pgd = hyp_pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+
+ addr = virt_to_phys(__hyp_idmap_text_start);
+ end = virt_to_phys(__hyp_idmap_text_end);
+
+ pgd += pgd_index(addr);
+ do {
+ next = pgd_addr_end(addr, end);
+ if (pgd_none_or_clear_bad(pgd))
+ continue;
+ pud = pud_offset(pgd, addr);
+ pmd = pmd_offset(pud, addr);
+
+ pud_clear(pud);
+ clean_pmd_entry(pmd);
+ pmd_free(NULL, (pmd_t *)((unsigned long)pmd & PAGE_MASK));
+ } while (pgd++, addr = next, addr < end);
+}
diff --git a/arch/arm/kvm/psci.c b/arch/arm/kvm/psci.c
new file mode 100644
index 000000000000..7ee5bb7a3667
--- /dev/null
+++ b/arch/arm/kvm/psci.c
@@ -0,0 +1,108 @@
+/*
+ * Copyright (C) 2012 - ARM Ltd
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/kvm_host.h>
+#include <linux/wait.h>
+
+#include <asm/kvm_emulate.h>
+#include <asm/kvm_psci.h>
+
+/*
+ * This is an implementation of the Power State Coordination Interface
+ * as described in ARM document number ARM DEN 0022A.
+ */
+
+static void kvm_psci_vcpu_off(struct kvm_vcpu *vcpu)
+{
+ vcpu->arch.pause = true;
+}
+
+static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
+{
+ struct kvm *kvm = source_vcpu->kvm;
+ struct kvm_vcpu *vcpu;
+ wait_queue_head_t *wq;
+ unsigned long cpu_id;
+ phys_addr_t target_pc;
+
+ cpu_id = *vcpu_reg(source_vcpu, 1);
+ if (vcpu_mode_is_32bit(source_vcpu))
+ cpu_id &= ~((u32) 0);
+
+ if (cpu_id >= atomic_read(&kvm->online_vcpus))
+ return KVM_PSCI_RET_INVAL;
+
+ target_pc = *vcpu_reg(source_vcpu, 2);
+
+ vcpu = kvm_get_vcpu(kvm, cpu_id);
+
+ wq = kvm_arch_vcpu_wq(vcpu);
+ if (!waitqueue_active(wq))
+ return KVM_PSCI_RET_INVAL;
+
+ kvm_reset_vcpu(vcpu);
+
+ /* Gracefully handle Thumb2 entry point */
+ if (vcpu_mode_is_32bit(vcpu) && (target_pc & 1)) {
+ target_pc &= ~((phys_addr_t) 1);
+ vcpu_set_thumb(vcpu);
+ }
+
+ *vcpu_pc(vcpu) = target_pc;
+ vcpu->arch.pause = false;
+ smp_mb(); /* Make sure the above is visible */
+
+ wake_up_interruptible(wq);
+
+ return KVM_PSCI_RET_SUCCESS;
+}
+
+/**
+ * kvm_psci_call - handle PSCI call if r0 value is in range
+ * @vcpu: Pointer to the VCPU struct
+ *
+ * Handle PSCI calls from guests through traps from HVC or SMC instructions.
+ * The calling convention is similar to SMC calls to the secure world where
+ * the function number is placed in r0 and this function returns true if the
+ * function number specified in r0 is withing the PSCI range, and false
+ * otherwise.
+ */
+bool kvm_psci_call(struct kvm_vcpu *vcpu)
+{
+ unsigned long psci_fn = *vcpu_reg(vcpu, 0) & ~((u32) 0);
+ unsigned long val;
+
+ switch (psci_fn) {
+ case KVM_PSCI_FN_CPU_OFF:
+ kvm_psci_vcpu_off(vcpu);
+ val = KVM_PSCI_RET_SUCCESS;
+ break;
+ case KVM_PSCI_FN_CPU_ON:
+ val = kvm_psci_vcpu_on(vcpu);
+ break;
+ case KVM_PSCI_FN_CPU_SUSPEND:
+ case KVM_PSCI_FN_MIGRATE:
+ val = KVM_PSCI_RET_NI;
+ break;
+
+ default:
+ return false;
+ }
+
+ *vcpu_reg(vcpu, 0) = val;
+ return true;
+}
diff --git a/arch/arm/kvm/reset.c b/arch/arm/kvm/reset.c
new file mode 100644
index 000000000000..b80256b554cd
--- /dev/null
+++ b/arch/arm/kvm/reset.c
@@ -0,0 +1,74 @@
+/*
+ * Copyright (C) 2012 - Virtual Open Systems and Columbia University
+ * Author: Christoffer Dall <c.dall@virtualopensystems.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include <linux/compiler.h>
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/kvm_host.h>
+#include <linux/kvm.h>
+
+#include <asm/unified.h>
+#include <asm/ptrace.h>
+#include <asm/cputype.h>
+#include <asm/kvm_arm.h>
+#include <asm/kvm_coproc.h>
+
+/******************************************************************************
+ * Cortex-A15 Reset Values
+ */
+
+static const int a15_max_cpu_idx = 3;
+
+static struct kvm_regs a15_regs_reset = {
+ .usr_regs.ARM_cpsr = SVC_MODE | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT,
+};
+
+
+/*******************************************************************************
+ * Exported reset function
+ */
+
+/**
+ * kvm_reset_vcpu - sets core registers and cp15 registers to reset value
+ * @vcpu: The VCPU pointer
+ *
+ * This function finds the right table above and sets the registers on the
+ * virtual CPU struct to their architectually defined reset values.
+ */
+int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
+{
+ struct kvm_regs *cpu_reset;
+
+ switch (vcpu->arch.target) {
+ case KVM_ARM_TARGET_CORTEX_A15:
+ if (vcpu->vcpu_id > a15_max_cpu_idx)
+ return -EINVAL;
+ cpu_reset = &a15_regs_reset;
+ vcpu->arch.midr = read_cpuid_id();
+ break;
+ default:
+ return -ENODEV;
+ }
+
+ /* Reset core registers */
+ memcpy(&vcpu->arch.regs, cpu_reset, sizeof(vcpu->arch.regs));
+
+ /* Reset CP15 registers */
+ kvm_reset_coprocs(vcpu);
+
+ return 0;
+}
diff --git a/arch/arm/kvm/trace.h b/arch/arm/kvm/trace.h
new file mode 100644
index 000000000000..a8e73ed5ad5b
--- /dev/null
+++ b/arch/arm/kvm/trace.h
@@ -0,0 +1,235 @@
+#if !defined(_TRACE_KVM_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_KVM_H
+
+#include <linux/tracepoint.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM kvm
+
+/*
+ * Tracepoints for entry/exit to guest
+ */
+TRACE_EVENT(kvm_entry,
+ TP_PROTO(unsigned long vcpu_pc),
+ TP_ARGS(vcpu_pc),
+
+ TP_STRUCT__entry(
+ __field( unsigned long, vcpu_pc )
+ ),
+
+ TP_fast_assign(
+ __entry->vcpu_pc = vcpu_pc;
+ ),
+
+ TP_printk("PC: 0x%08lx", __entry->vcpu_pc)
+);
+
+TRACE_EVENT(kvm_exit,
+ TP_PROTO(unsigned long vcpu_pc),
+ TP_ARGS(vcpu_pc),
+
+ TP_STRUCT__entry(
+ __field( unsigned long, vcpu_pc )
+ ),
+
+ TP_fast_assign(
+ __entry->vcpu_pc = vcpu_pc;
+ ),
+
+ TP_printk("PC: 0x%08lx", __entry->vcpu_pc)
+);
+
+TRACE_EVENT(kvm_guest_fault,
+ TP_PROTO(unsigned long vcpu_pc, unsigned long hsr,
+ unsigned long hxfar,
+ unsigned long long ipa),
+ TP_ARGS(vcpu_pc, hsr, hxfar, ipa),
+
+ TP_STRUCT__entry(
+ __field( unsigned long, vcpu_pc )
+ __field( unsigned long, hsr )
+ __field( unsigned long, hxfar )
+ __field( unsigned long long, ipa )
+ ),
+
+ TP_fast_assign(
+ __entry->vcpu_pc = vcpu_pc;
+ __entry->hsr = hsr;
+ __entry->hxfar = hxfar;
+ __entry->ipa = ipa;
+ ),
+
+ TP_printk("guest fault at PC %#08lx (hxfar %#08lx, "
+ "ipa %#16llx, hsr %#08lx",
+ __entry->vcpu_pc, __entry->hxfar,
+ __entry->ipa, __entry->hsr)
+);
+
+TRACE_EVENT(kvm_irq_line,
+ TP_PROTO(unsigned int type, int vcpu_idx, int irq_num, int level),
+ TP_ARGS(type, vcpu_idx, irq_num, level),
+
+ TP_STRUCT__entry(
+ __field( unsigned int, type )
+ __field( int, vcpu_idx )
+ __field( int, irq_num )
+ __field( int, level )
+ ),
+
+ TP_fast_assign(
+ __entry->type = type;
+ __entry->vcpu_idx = vcpu_idx;
+ __entry->irq_num = irq_num;
+ __entry->level = level;
+ ),
+
+ TP_printk("Inject %s interrupt (%d), vcpu->idx: %d, num: %d, level: %d",
+ (__entry->type == KVM_ARM_IRQ_TYPE_CPU) ? "CPU" :
+ (__entry->type == KVM_ARM_IRQ_TYPE_PPI) ? "VGIC PPI" :
+ (__entry->type == KVM_ARM_IRQ_TYPE_SPI) ? "VGIC SPI" : "UNKNOWN",
+ __entry->type, __entry->vcpu_idx, __entry->irq_num, __entry->level)
+);
+
+TRACE_EVENT(kvm_mmio_emulate,
+ TP_PROTO(unsigned long vcpu_pc, unsigned long instr,
+ unsigned long cpsr),
+ TP_ARGS(vcpu_pc, instr, cpsr),
+
+ TP_STRUCT__entry(
+ __field( unsigned long, vcpu_pc )
+ __field( unsigned long, instr )
+ __field( unsigned long, cpsr )
+ ),
+
+ TP_fast_assign(
+ __entry->vcpu_pc = vcpu_pc;
+ __entry->instr = instr;
+ __entry->cpsr = cpsr;
+ ),
+
+ TP_printk("Emulate MMIO at: 0x%08lx (instr: %08lx, cpsr: %08lx)",
+ __entry->vcpu_pc, __entry->instr, __entry->cpsr)
+);
+
+/* Architecturally implementation defined CP15 register access */
+TRACE_EVENT(kvm_emulate_cp15_imp,
+ TP_PROTO(unsigned long Op1, unsigned long Rt1, unsigned long CRn,
+ unsigned long CRm, unsigned long Op2, bool is_write),
+ TP_ARGS(Op1, Rt1, CRn, CRm, Op2, is_write),
+
+ TP_STRUCT__entry(
+ __field( unsigned int, Op1 )
+ __field( unsigned int, Rt1 )
+ __field( unsigned int, CRn )
+ __field( unsigned int, CRm )
+ __field( unsigned int, Op2 )
+ __field( bool, is_write )
+ ),
+
+ TP_fast_assign(
+ __entry->is_write = is_write;
+ __entry->Op1 = Op1;
+ __entry->Rt1 = Rt1;
+ __entry->CRn = CRn;
+ __entry->CRm = CRm;
+ __entry->Op2 = Op2;
+ ),
+
+ TP_printk("Implementation defined CP15: %s\tp15, %u, r%u, c%u, c%u, %u",
+ (__entry->is_write) ? "mcr" : "mrc",
+ __entry->Op1, __entry->Rt1, __entry->CRn,
+ __entry->CRm, __entry->Op2)
+);
+
+TRACE_EVENT(kvm_wfi,
+ TP_PROTO(unsigned long vcpu_pc),
+ TP_ARGS(vcpu_pc),
+
+ TP_STRUCT__entry(
+ __field( unsigned long, vcpu_pc )
+ ),
+
+ TP_fast_assign(
+ __entry->vcpu_pc = vcpu_pc;
+ ),
+
+ TP_printk("guest executed wfi at: 0x%08lx", __entry->vcpu_pc)
+);
+
+TRACE_EVENT(kvm_unmap_hva,
+ TP_PROTO(unsigned long hva),
+ TP_ARGS(hva),
+
+ TP_STRUCT__entry(
+ __field( unsigned long, hva )
+ ),
+
+ TP_fast_assign(
+ __entry->hva = hva;
+ ),
+
+ TP_printk("mmu notifier unmap hva: %#08lx", __entry->hva)
+);
+
+TRACE_EVENT(kvm_unmap_hva_range,
+ TP_PROTO(unsigned long start, unsigned long end),
+ TP_ARGS(start, end),
+
+ TP_STRUCT__entry(
+ __field( unsigned long, start )
+ __field( unsigned long, end )
+ ),
+
+ TP_fast_assign(
+ __entry->start = start;
+ __entry->end = end;
+ ),
+
+ TP_printk("mmu notifier unmap range: %#08lx -- %#08lx",
+ __entry->start, __entry->end)
+);
+
+TRACE_EVENT(kvm_set_spte_hva,
+ TP_PROTO(unsigned long hva),
+ TP_ARGS(hva),
+
+ TP_STRUCT__entry(
+ __field( unsigned long, hva )
+ ),
+
+ TP_fast_assign(
+ __entry->hva = hva;
+ ),
+
+ TP_printk("mmu notifier set pte hva: %#08lx", __entry->hva)
+);
+
+TRACE_EVENT(kvm_hvc,
+ TP_PROTO(unsigned long vcpu_pc, unsigned long r0, unsigned long imm),
+ TP_ARGS(vcpu_pc, r0, imm),
+
+ TP_STRUCT__entry(
+ __field( unsigned long, vcpu_pc )
+ __field( unsigned long, r0 )
+ __field( unsigned long, imm )
+ ),
+
+ TP_fast_assign(
+ __entry->vcpu_pc = vcpu_pc;
+ __entry->r0 = r0;
+ __entry->imm = imm;
+ ),
+
+ TP_printk("HVC at 0x%08lx (r0: 0x%08lx, imm: 0x%lx",
+ __entry->vcpu_pc, __entry->r0, __entry->imm)
+);
+
+#endif /* _TRACE_KVM_H */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH arch/arm/kvm
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/arch/arm/mach-davinci/cpuidle.c b/arch/arm/mach-davinci/cpuidle.c
index 9107691adbdb..5ac9e9384b15 100644
--- a/arch/arm/mach-davinci/cpuidle.c
+++ b/arch/arm/mach-davinci/cpuidle.c
@@ -25,35 +25,44 @@
#define DAVINCI_CPUIDLE_MAX_STATES 2
-struct davinci_ops {
- void (*enter) (u32 flags);
- void (*exit) (u32 flags);
- u32 flags;
-};
+static DEFINE_PER_CPU(struct cpuidle_device, davinci_cpuidle_device);
+static void __iomem *ddr2_reg_base;
+static bool ddr2_pdown;
+
+static void davinci_save_ddr_power(int enter, bool pdown)
+{
+ u32 val;
+
+ val = __raw_readl(ddr2_reg_base + DDR2_SDRCR_OFFSET);
+
+ if (enter) {
+ if (pdown)
+ val |= DDR2_SRPD_BIT;
+ else
+ val &= ~DDR2_SRPD_BIT;
+ val |= DDR2_LPMODEN_BIT;
+ } else {
+ val &= ~(DDR2_SRPD_BIT | DDR2_LPMODEN_BIT);
+ }
+
+ __raw_writel(val, ddr2_reg_base + DDR2_SDRCR_OFFSET);
+}
/* Actual code that puts the SoC in different idle states */
static int davinci_enter_idle(struct cpuidle_device *dev,
struct cpuidle_driver *drv,
int index)
{
- struct cpuidle_state_usage *state_usage = &dev->states_usage[index];
- struct davinci_ops *ops = cpuidle_get_statedata(state_usage);
-
- if (ops && ops->enter)
- ops->enter(ops->flags);
+ davinci_save_ddr_power(1, ddr2_pdown);
index = cpuidle_wrap_enter(dev, drv, index,
arm_cpuidle_simple_enter);
- if (ops && ops->exit)
- ops->exit(ops->flags);
+ davinci_save_ddr_power(0, ddr2_pdown);
return index;
}
-/* fields in davinci_ops.flags */
-#define DAVINCI_CPUIDLE_FLAGS_DDR2_PWDN BIT(0)
-
static struct cpuidle_driver davinci_idle_driver = {
.name = "cpuidle-davinci",
.owner = THIS_MODULE,
@@ -70,45 +79,6 @@ static struct cpuidle_driver davinci_idle_driver = {
.state_count = DAVINCI_CPUIDLE_MAX_STATES,
};
-static DEFINE_PER_CPU(struct cpuidle_device, davinci_cpuidle_device);
-static void __iomem *ddr2_reg_base;
-
-static void davinci_save_ddr_power(int enter, bool pdown)
-{
- u32 val;
-
- val = __raw_readl(ddr2_reg_base + DDR2_SDRCR_OFFSET);
-
- if (enter) {
- if (pdown)
- val |= DDR2_SRPD_BIT;
- else
- val &= ~DDR2_SRPD_BIT;
- val |= DDR2_LPMODEN_BIT;
- } else {
- val &= ~(DDR2_SRPD_BIT | DDR2_LPMODEN_BIT);
- }
-
- __raw_writel(val, ddr2_reg_base + DDR2_SDRCR_OFFSET);
-}
-
-static void davinci_c2state_enter(u32 flags)
-{
- davinci_save_ddr_power(1, !!(flags & DAVINCI_CPUIDLE_FLAGS_DDR2_PWDN));
-}
-
-static void davinci_c2state_exit(u32 flags)
-{
- davinci_save_ddr_power(0, !!(flags & DAVINCI_CPUIDLE_FLAGS_DDR2_PWDN));
-}
-
-static struct davinci_ops davinci_states[DAVINCI_CPUIDLE_MAX_STATES] = {
- [1] = {
- .enter = davinci_c2state_enter,
- .exit = davinci_c2state_exit,
- },
-};
-
static int __init davinci_cpuidle_probe(struct platform_device *pdev)
{
int ret;
@@ -124,11 +94,7 @@ static int __init davinci_cpuidle_probe(struct platform_device *pdev)
ddr2_reg_base = pdata->ddr2_ctlr_base;
- if (pdata->ddr2_pdown)
- davinci_states[1].flags |= DAVINCI_CPUIDLE_FLAGS_DDR2_PWDN;
- cpuidle_set_statedata(&device->states_usage[1], &davinci_states[1]);
-
- device->state_count = DAVINCI_CPUIDLE_MAX_STATES;
+ ddr2_pdown = pdata->ddr2_pdown;
ret = cpuidle_register_driver(&davinci_idle_driver);
if (ret) {
diff --git a/arch/arm/mach-exynos/Kconfig b/arch/arm/mach-exynos/Kconfig
index e103c290bc9e..85afb031b676 100644
--- a/arch/arm/mach-exynos/Kconfig
+++ b/arch/arm/mach-exynos/Kconfig
@@ -414,7 +414,7 @@ config MACH_EXYNOS4_DT
select CPU_EXYNOS4210
select HAVE_SAMSUNG_KEYPAD if INPUT_KEYBOARD
select PINCTRL
- select PINCTRL_EXYNOS4
+ select PINCTRL_EXYNOS
select USE_OF
help
Machine support for Samsung Exynos4 machine with device tree enabled.
diff --git a/arch/arm/mach-exynos/include/mach/cpufreq.h b/arch/arm/mach-exynos/include/mach/cpufreq.h
index 7517c3f417af..b5d39dd03b2a 100644
--- a/arch/arm/mach-exynos/include/mach/cpufreq.h
+++ b/arch/arm/mach-exynos/include/mach/cpufreq.h
@@ -18,12 +18,25 @@ enum cpufreq_level_index {
L20,
};
+#define APLL_FREQ(f, a0, a1, a2, a3, a4, a5, a6, a7, b0, b1, b2, m, p, s) \
+ { \
+ .freq = (f) * 1000, \
+ .clk_div_cpu0 = ((a0) | (a1) << 4 | (a2) << 8 | (a3) << 12 | \
+ (a4) << 16 | (a5) << 20 | (a6) << 24 | (a7) << 28), \
+ .clk_div_cpu1 = (b0 << 0 | b1 << 4 | b2 << 8), \
+ .mps = ((m) << 16 | (p) << 8 | (s)), \
+ }
+
+struct apll_freq {
+ unsigned int freq;
+ u32 clk_div_cpu0;
+ u32 clk_div_cpu1;
+ u32 mps;
+};
+
struct exynos_dvfs_info {
unsigned long mpll_freq_khz;
unsigned int pll_safe_idx;
- unsigned int pm_lock_idx;
- unsigned int max_support_idx;
- unsigned int min_support_idx;
struct clk *cpu_clk;
unsigned int *volt_table;
struct cpufreq_frequency_table *freq_table;
diff --git a/arch/arm/mach-exynos/mach-exynos5-dt.c b/arch/arm/mach-exynos/mach-exynos5-dt.c
index e99d3d8f2bcf..ea9e3020972d 100644
--- a/arch/arm/mach-exynos/mach-exynos5-dt.c
+++ b/arch/arm/mach-exynos/mach-exynos5-dt.c
@@ -104,6 +104,12 @@ static const struct of_dev_auxdata exynos5250_auxdata_lookup[] __initconst = {
OF_DEV_AUXDATA("samsung,mfc-v6", 0x11000000, "s5p-mfc-v6", NULL),
OF_DEV_AUXDATA("samsung,exynos5250-tmu", 0x10060000,
"exynos-tmu", NULL),
+ OF_DEV_AUXDATA("samsung,i2s-v5", 0x03830000,
+ "samsung-i2s.0", NULL),
+ OF_DEV_AUXDATA("samsung,i2s-v5", 0x12D60000,
+ "samsung-i2s.1", NULL),
+ OF_DEV_AUXDATA("samsung,i2s-v5", 0x12D70000,
+ "samsung-i2s.2", NULL),
{},
};
diff --git a/arch/arm/mach-highbank/Kconfig b/arch/arm/mach-highbank/Kconfig
index 551c97e87a78..44b12f9c1584 100644
--- a/arch/arm/mach-highbank/Kconfig
+++ b/arch/arm/mach-highbank/Kconfig
@@ -1,5 +1,7 @@
config ARCH_HIGHBANK
bool "Calxeda ECX-1000/2000 (Highbank/Midway)" if ARCH_MULTI_V7
+ select ARCH_HAS_CPUFREQ
+ select ARCH_HAS_OPP
select ARCH_WANT_OPTIONAL_GPIOLIB
select ARM_AMBA
select ARM_GIC
@@ -11,5 +13,7 @@ config ARCH_HIGHBANK
select GENERIC_CLOCKEVENTS
select HAVE_ARM_SCU
select HAVE_SMP
+ select MAILBOX
+ select PL320_MBOX
select SPARSE_IRQ
select USE_OF
diff --git a/arch/arm/mach-highbank/core.h b/arch/arm/mach-highbank/core.h
index 80235b46cb58..3f65206a9b92 100644
--- a/arch/arm/mach-highbank/core.h
+++ b/arch/arm/mach-highbank/core.h
@@ -2,7 +2,6 @@
#define __HIGHBANK_CORE_H
extern void highbank_set_cpu_jump(int cpu, void *jump_addr);
-extern void highbank_clocks_init(void);
extern void highbank_restart(char, const char *);
extern void __iomem *scu_base_addr;
diff --git a/arch/arm/mach-highbank/highbank.c b/arch/arm/mach-highbank/highbank.c
index e6c061282939..65656ff0eb33 100644
--- a/arch/arm/mach-highbank/highbank.c
+++ b/arch/arm/mach-highbank/highbank.c
@@ -25,6 +25,7 @@
#include <linux/of_address.h>
#include <linux/smp.h>
#include <linux/amba/bus.h>
+#include <linux/clk-provider.h>
#include <asm/arch_timer.h>
#include <asm/cacheflush.h>
@@ -117,7 +118,7 @@ static void __init highbank_timer_init(void)
WARN_ON(!timer_base);
irq = irq_of_parse_and_map(np, 0);
- highbank_clocks_init();
+ of_clk_init(NULL);
lookup.clk = of_clk_get(np, 0);
clkdev_add(&lookup);
diff --git a/arch/arm/mach-omap2/board-2430sdp.c b/arch/arm/mach-omap2/board-2430sdp.c
index 4815ea6f8f5d..1337f2c51f9e 100644
--- a/arch/arm/mach-omap2/board-2430sdp.c
+++ b/arch/arm/mach-omap2/board-2430sdp.c
@@ -27,6 +27,7 @@
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/gpio.h>
+#include <linux/usb/phy.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
@@ -263,6 +264,7 @@ static void __init omap_2430sdp_init(void)
omap_hsmmc_init(mmc);
omap_mux_init_signal("usb0hs_stp", OMAP_PULL_ENA | OMAP_PULL_UP);
+ usb_bind_phy("musb-hdrc.0.auto", 0, "twl4030_usb");
usb_musb_init(NULL);
board_smc91x_init();
diff --git a/arch/arm/mach-omap2/board-3430sdp.c b/arch/arm/mach-omap2/board-3430sdp.c
index bb73afc9ac17..8a2e242910eb 100644
--- a/arch/arm/mach-omap2/board-3430sdp.c
+++ b/arch/arm/mach-omap2/board-3430sdp.c
@@ -25,6 +25,7 @@
#include <linux/gpio.h>
#include <linux/mmc/host.h>
#include <linux/platform_data/spi-omap2-mcspi.h>
+#include <linux/usb/phy.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
@@ -579,6 +580,7 @@ static void __init omap_3430sdp_init(void)
omap_ads7846_init(1, gpio_pendown, 310, NULL);
omap_serial_init();
omap_sdrc_init(hyb18m512160af6_sdrc_params, NULL);
+ usb_bind_phy("musb-hdrc.0.auto", 0, "twl4030_usb");
usb_musb_init(NULL);
board_smc91x_init();
board_flash_init(sdp_flash_partitions, chip_sel_3430, 0);
diff --git a/arch/arm/mach-omap2/board-4430sdp.c b/arch/arm/mach-omap2/board-4430sdp.c
index 1cc6696594fd..8e8efccf762f 100644
--- a/arch/arm/mach-omap2/board-4430sdp.c
+++ b/arch/arm/mach-omap2/board-4430sdp.c
@@ -28,6 +28,7 @@
#include <linux/leds_pwm.h>
#include <linux/platform_data/omap4-keypad.h>
#include <linux/usb/musb.h>
+#include <linux/usb/phy.h>
#include <asm/hardware/gic.h>
#include <asm/mach-types.h>
@@ -696,6 +697,7 @@ static void __init omap_4430sdp_init(void)
omap4_sdp4430_wifi_init();
omap4_twl6030_hsmmc_init(mmc);
+ usb_bind_phy("musb-hdrc.0.auto", 0, "omap-usb2.1.auto");
usb_musb_init(&musb_board_data);
status = omap_ethernet_init();
diff --git a/arch/arm/mach-omap2/board-cm-t35.c b/arch/arm/mach-omap2/board-cm-t35.c
index b3102c2f4a3c..f1172f2f1a7e 100644
--- a/arch/arm/mach-omap2/board-cm-t35.c
+++ b/arch/arm/mach-omap2/board-cm-t35.c
@@ -30,6 +30,7 @@
#include <linux/regulator/fixed.h>
#include <linux/regulator/machine.h>
#include <linux/mmc/host.h>
+#include <linux/usb/phy.h>
#include <linux/spi/spi.h>
#include <linux/spi/tdo24m.h>
@@ -724,6 +725,7 @@ static void __init cm_t3x_common_init(void)
cm_t35_init_display();
omap_twl4030_audio_init("cm-t3x");
+ usb_bind_phy("musb-hdrc.0.auto", 0, "twl4030_usb");
usb_musb_init(NULL);
cm_t35_init_usbh();
cm_t35_init_camera();
diff --git a/arch/arm/mach-omap2/board-devkit8000.c b/arch/arm/mach-omap2/board-devkit8000.c
index 12865af25d3a..77cade52b022 100644
--- a/arch/arm/mach-omap2/board-devkit8000.c
+++ b/arch/arm/mach-omap2/board-devkit8000.c
@@ -29,6 +29,7 @@
#include <linux/mtd/partitions.h>
#include <linux/mtd/nand.h>
#include <linux/mmc/host.h>
+#include <linux/usb/phy.h>
#include <linux/regulator/machine.h>
#include <linux/i2c/twl.h>
@@ -622,6 +623,7 @@ static void __init devkit8000_init(void)
omap_ads7846_init(2, OMAP3_DEVKIT_TS_GPIO, 0, NULL);
+ usb_bind_phy("musb-hdrc.0.auto", 0, "twl4030_usb");
usb_musb_init(NULL);
usbhs_init(&usbhs_bdata);
board_nand_init(devkit8000_nand_partitions,
diff --git a/arch/arm/mach-omap2/board-igep0020.c b/arch/arm/mach-omap2/board-igep0020.c
index 0f24cb84ba5a..15e58815a131 100644
--- a/arch/arm/mach-omap2/board-igep0020.c
+++ b/arch/arm/mach-omap2/board-igep0020.c
@@ -18,6 +18,7 @@
#include <linux/gpio.h>
#include <linux/interrupt.h>
#include <linux/input.h>
+#include <linux/usb/phy.h>
#include <linux/regulator/machine.h>
#include <linux/regulator/fixed.h>
@@ -625,6 +626,7 @@ static void __init igep_init(void)
omap_serial_init();
omap_sdrc_init(m65kxxxxam_sdrc_params,
m65kxxxxam_sdrc_params);
+ usb_bind_phy("musb-hdrc.0.auto", 0, "twl4030_usb");
usb_musb_init(NULL);
igep_flash_init();
diff --git a/arch/arm/mach-omap2/board-ldp.c b/arch/arm/mach-omap2/board-ldp.c
index 0869f4f3d3e1..3b5510a433f0 100644
--- a/arch/arm/mach-omap2/board-ldp.c
+++ b/arch/arm/mach-omap2/board-ldp.c
@@ -28,6 +28,7 @@
#include <linux/io.h>
#include <linux/smsc911x.h>
#include <linux/mmc/host.h>
+#include <linux/usb/phy.h>
#include <linux/platform_data/spi-omap2-mcspi.h>
#include <asm/mach-types.h>
@@ -418,6 +419,7 @@ static void __init omap_ldp_init(void)
omap_ads7846_init(1, 54, 310, NULL);
omap_serial_init();
omap_sdrc_init(NULL, NULL);
+ usb_bind_phy("musb-hdrc.0.auto", 0, "twl4030_usb");
usb_musb_init(NULL);
board_nand_init(ldp_nand_partitions, ARRAY_SIZE(ldp_nand_partitions),
ZOOM_NAND_CS, 0, nand_default_timings);
diff --git a/arch/arm/mach-omap2/board-omap3beagle.c b/arch/arm/mach-omap2/board-omap3beagle.c
index 22c483d5dfa8..4616f9269d00 100644
--- a/arch/arm/mach-omap2/board-omap3beagle.c
+++ b/arch/arm/mach-omap2/board-omap3beagle.c
@@ -30,6 +30,7 @@
#include <linux/mtd/partitions.h>
#include <linux/mtd/nand.h>
#include <linux/mmc/host.h>
+#include <linux/usb/phy.h>
#include <linux/regulator/machine.h>
#include <linux/i2c/twl.h>
@@ -519,6 +520,7 @@ static void __init omap3_beagle_init(void)
omap_sdrc_init(mt46h32m32lf6_sdrc_params,
mt46h32m32lf6_sdrc_params);
+ usb_bind_phy("musb-hdrc.0.auto", 0, "twl4030_usb");
usb_musb_init(NULL);
usbhs_init(&usbhs_bdata);
board_nand_init(omap3beagle_nand_partitions,
diff --git a/arch/arm/mach-omap2/board-omap3evm.c b/arch/arm/mach-omap2/board-omap3evm.c
index 3985f35aee06..202389503457 100644
--- a/arch/arm/mach-omap2/board-omap3evm.c
+++ b/arch/arm/mach-omap2/board-omap3evm.c
@@ -41,6 +41,7 @@
#include <linux/regulator/machine.h>
#include <linux/mmc/host.h>
#include <linux/export.h>
+#include <linux/usb/phy.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
@@ -309,7 +310,7 @@ static struct omap2_hsmmc_info mmc[] = {
.gpio_wp = 63,
.deferred = true,
},
-#ifdef CONFIG_WL12XX_PLATFORM_DATA
+#ifdef CONFIG_WILINK_PLATFORM_DATA
{
.name = "wl1271",
.mmc = 2,
@@ -450,7 +451,7 @@ static struct regulator_init_data omap3evm_vio = {
.consumer_supplies = omap3evm_vio_supply,
};
-#ifdef CONFIG_WL12XX_PLATFORM_DATA
+#ifdef CONFIG_WILINK_PLATFORM_DATA
#define OMAP3EVM_WLAN_PMENA_GPIO (150)
#define OMAP3EVM_WLAN_IRQ_GPIO (149)
@@ -563,7 +564,7 @@ static struct omap_board_mux omap35x_board_mux[] __initdata = {
OMAP_PIN_OFF_NONE),
OMAP3_MUX(GPMC_WAIT2, OMAP_MUX_MODE4 | OMAP_PIN_INPUT_PULLUP |
OMAP_PIN_OFF_NONE),
-#ifdef CONFIG_WL12XX_PLATFORM_DATA
+#ifdef CONFIG_WILINK_PLATFORM_DATA
/* WLAN IRQ - GPIO 149 */
OMAP3_MUX(UART1_RTS, OMAP_MUX_MODE4 | OMAP_PIN_INPUT),
@@ -601,7 +602,7 @@ static struct omap_board_mux omap36x_board_mux[] __initdata = {
OMAP3_MUX(SYS_BOOT4, OMAP_MUX_MODE3 | OMAP_PIN_OFF_NONE),
OMAP3_MUX(SYS_BOOT5, OMAP_MUX_MODE3 | OMAP_PIN_OFF_NONE),
OMAP3_MUX(SYS_BOOT6, OMAP_MUX_MODE3 | OMAP_PIN_OFF_NONE),
-#ifdef CONFIG_WL12XX_PLATFORM_DATA
+#ifdef CONFIG_WILINK_PLATFORM_DATA
/* WLAN IRQ - GPIO 149 */
OMAP3_MUX(UART1_RTS, OMAP_MUX_MODE4 | OMAP_PIN_INPUT),
@@ -637,7 +638,7 @@ static struct gpio omap3_evm_ehci_gpios[] __initdata = {
static void __init omap3_evm_wl12xx_init(void)
{
-#ifdef CONFIG_WL12XX_PLATFORM_DATA
+#ifdef CONFIG_WILINK_PLATFORM_DATA
int ret;
/* WL12xx WLAN Init */
@@ -734,6 +735,7 @@ static void __init omap3_evm_init(void)
omap_mux_init_gpio(135, OMAP_PIN_OUTPUT);
usbhs_bdata.reset_gpio_port[1] = 135;
}
+ usb_bind_phy("musb-hdrc.0.auto", 0, "twl4030_usb");
usb_musb_init(&musb_board_data);
usbhs_init(&usbhs_bdata);
board_nand_init(omap3evm_nand_partitions,
diff --git a/arch/arm/mach-omap2/board-omap3logic.c b/arch/arm/mach-omap2/board-omap3logic.c
index 2a065ba6eb58..9409eb897e2f 100644
--- a/arch/arm/mach-omap2/board-omap3logic.c
+++ b/arch/arm/mach-omap2/board-omap3logic.c
@@ -29,6 +29,7 @@
#include <linux/i2c/twl.h>
#include <linux/mmc/host.h>
+#include <linux/usb/phy.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
@@ -215,6 +216,7 @@ static void __init omap3logic_init(void)
board_mmc_init();
board_smsc911x_init();
+ usb_bind_phy("musb-hdrc.0.auto", 0, "twl4030_usb");
usb_musb_init(NULL);
/* Ensure SDRC pins are mux'd for self-refresh */
diff --git a/arch/arm/mach-omap2/board-omap3pandora.c b/arch/arm/mach-omap2/board-omap3pandora.c
index a53a6683c1b8..1ac3e81969e0 100644
--- a/arch/arm/mach-omap2/board-omap3pandora.c
+++ b/arch/arm/mach-omap2/board-omap3pandora.c
@@ -35,6 +35,7 @@
#include <linux/mmc/host.h>
#include <linux/mmc/card.h>
#include <linux/regulator/fixed.h>
+#include <linux/usb/phy.h>
#include <linux/platform_data/spi-omap2-mcspi.h>
#include <asm/mach-types.h>
@@ -601,6 +602,7 @@ static void __init omap3pandora_init(void)
ARRAY_SIZE(omap3pandora_spi_board_info));
omap_ads7846_init(1, OMAP3_PANDORA_TS_GPIO, 0, NULL);
usbhs_init(&usbhs_bdata);
+ usb_bind_phy("musb-hdrc.0.auto", 0, "twl4030_usb");
usb_musb_init(NULL);
gpmc_nand_init(&pandora_nand_data, NULL);
diff --git a/arch/arm/mach-omap2/board-omap3stalker.c b/arch/arm/mach-omap2/board-omap3stalker.c
index 53a6cbcf9747..63cb204e0811 100644
--- a/arch/arm/mach-omap2/board-omap3stalker.c
+++ b/arch/arm/mach-omap2/board-omap3stalker.c
@@ -33,6 +33,7 @@
#include <linux/interrupt.h>
#include <linux/smsc911x.h>
#include <linux/i2c/at24.h>
+#include <linux/usb/phy.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
@@ -404,6 +405,7 @@ static void __init omap3_stalker_init(void)
omap_serial_init();
omap_sdrc_init(mt46h32m32lf6_sdrc_params, NULL);
+ usb_bind_phy("musb-hdrc.0.auto", 0, "twl4030_usb");
usb_musb_init(NULL);
usbhs_init(&usbhs_bdata);
omap_ads7846_init(1, OMAP3_STALKER_TS_GPIO, 310, NULL);
diff --git a/arch/arm/mach-omap2/board-omap3touchbook.c b/arch/arm/mach-omap2/board-omap3touchbook.c
index 263cb9cfbf37..6b22ce3581d7 100644
--- a/arch/arm/mach-omap2/board-omap3touchbook.c
+++ b/arch/arm/mach-omap2/board-omap3touchbook.c
@@ -28,6 +28,7 @@
#include <linux/mtd/partitions.h>
#include <linux/mtd/nand.h>
#include <linux/mmc/host.h>
+#include <linux/usb/phy.h>
#include <linux/platform_data/spi-omap2-mcspi.h>
#include <linux/spi/spi.h>
@@ -365,6 +366,7 @@ static void __init omap3_touchbook_init(void)
/* Touchscreen and accelerometer */
omap_ads7846_init(4, OMAP3_TS_GPIO, 310, &ads7846_pdata);
+ usb_bind_phy("musb-hdrc.0.auto", 0, "twl4030_usb");
usb_musb_init(NULL);
usbhs_init(&usbhs_bdata);
board_nand_init(omap3touchbook_nand_partitions,
diff --git a/arch/arm/mach-omap2/board-omap4panda.c b/arch/arm/mach-omap2/board-omap4panda.c
index 769c1feee1c4..40184cc494f9 100644
--- a/arch/arm/mach-omap2/board-omap4panda.c
+++ b/arch/arm/mach-omap2/board-omap4panda.c
@@ -30,6 +30,7 @@
#include <linux/regulator/fixed.h>
#include <linux/ti_wilink_st.h>
#include <linux/usb/musb.h>
+#include <linux/usb/phy.h>
#include <linux/wl12xx.h>
#include <linux/platform_data/omap-abe-twl6040.h>
@@ -447,6 +448,7 @@ static void __init omap4_panda_init(void)
omap_sdrc_init(NULL, NULL);
omap4_twl6030_hsmmc_init(mmc);
omap4_ehci_init();
+ usb_bind_phy("musb-hdrc.0.auto", 0, "omap-usb2.1.auto");
usb_musb_init(&musb_board_data);
omap4_panda_display_init();
}
diff --git a/arch/arm/mach-omap2/board-overo.c b/arch/arm/mach-omap2/board-overo.c
index c8fde3e56441..7e43ff3f704c 100644
--- a/arch/arm/mach-omap2/board-overo.c
+++ b/arch/arm/mach-omap2/board-overo.c
@@ -36,6 +36,7 @@
#include <linux/mtd/nand.h>
#include <linux/mtd/partitions.h>
#include <linux/mmc/host.h>
+#include <linux/usb/phy.h>
#include <linux/platform_data/mtd-nand-omap2.h>
#include <linux/platform_data/spi-omap2-mcspi.h>
@@ -499,6 +500,7 @@ static void __init overo_init(void)
mt46h32m32lf6_sdrc_params);
board_nand_init(overo_nand_partitions,
ARRAY_SIZE(overo_nand_partitions), NAND_CS, 0, NULL);
+ usb_bind_phy("musb-hdrc.0.auto", 0, "twl4030_usb");
usb_musb_init(NULL);
usbhs_init(&usbhs_bdata);
overo_spi_init();
diff --git a/arch/arm/mach-omap2/board-rm680.c b/arch/arm/mach-omap2/board-rm680.c
index 0c777b75e484..f8a272c253f5 100644
--- a/arch/arm/mach-omap2/board-rm680.c
+++ b/arch/arm/mach-omap2/board-rm680.c
@@ -18,6 +18,7 @@
#include <linux/regulator/machine.h>
#include <linux/regulator/consumer.h>
#include <linux/platform_data/mtd-onenand-omap2.h>
+#include <linux/usb/phy.h>
#include <asm/mach/arch.h>
#include <asm/mach-types.h>
@@ -134,6 +135,7 @@ static void __init rm680_init(void)
sdrc_params = nokia_get_sdram_timings();
omap_sdrc_init(sdrc_params, sdrc_params);
+ usb_bind_phy("musb-hdrc.0.auto", 0, "twl4030_usb");
usb_musb_init(NULL);
rm680_peripherals_init();
}
diff --git a/arch/arm/mach-omap2/board-rx51-peripherals.c b/arch/arm/mach-omap2/board-rx51-peripherals.c
index cf07e289b4ea..f3d075baebb6 100644
--- a/arch/arm/mach-omap2/board-rx51-peripherals.c
+++ b/arch/arm/mach-omap2/board-rx51-peripherals.c
@@ -42,7 +42,7 @@
#include <media/si4713.h>
#include <linux/leds-lp5523.h>
-#include <../drivers/staging/iio/light/tsl2563.h>
+#include <linux/platform_data/tsl2563.h>
#include <linux/lis3lv02d.h>
#if defined(CONFIG_IR_RX51) || defined(CONFIG_IR_RX51_MODULE)
diff --git a/arch/arm/mach-omap2/board-zoom-peripherals.c b/arch/arm/mach-omap2/board-zoom-peripherals.c
index 26e07addc9d7..dc5498b1b3a7 100644
--- a/arch/arm/mach-omap2/board-zoom-peripherals.c
+++ b/arch/arm/mach-omap2/board-zoom-peripherals.c
@@ -20,6 +20,7 @@
#include <linux/wl12xx.h>
#include <linux/mmc/host.h>
#include <linux/platform_data/gpio-omap.h>
+#include <linux/usb/phy.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
@@ -298,6 +299,7 @@ void __init zoom_peripherals_init(void)
omap_hsmmc_init(mmc);
omap_i2c_init();
platform_device_register(&omap_vwlan_device);
+ usb_bind_phy("musb-hdrc.0.auto", 0, "twl4030_usb");
usb_musb_init(NULL);
enable_board_wakeup_source();
omap_serial_init();
diff --git a/arch/arm/mach-omap2/devices.c b/arch/arm/mach-omap2/devices.c
index 626f3ea3142f..b6cc233214d7 100644
--- a/arch/arm/mach-omap2/devices.c
+++ b/arch/arm/mach-omap2/devices.c
@@ -20,6 +20,7 @@
#include <linux/pinctrl/machine.h>
#include <linux/platform_data/omap4-keypad.h>
#include <linux/platform_data/omap_ocp2scp.h>
+#include <linux/usb/omap_control_usb.h>
#include <asm/mach-types.h>
#include <asm/mach/map.h>
@@ -254,6 +255,49 @@ static inline void omap_init_camera(void)
#endif
}
+#if IS_ENABLED(CONFIG_OMAP_CONTROL_USB)
+static struct omap_control_usb_platform_data omap4_control_usb_pdata = {
+ .type = 1,
+};
+
+struct resource omap4_control_usb_res[] = {
+ {
+ .name = "control_dev_conf",
+ .start = 0x4a002300,
+ .end = 0x4a002303,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .name = "otghs_control",
+ .start = 0x4a00233c,
+ .end = 0x4a00233f,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct platform_device omap4_control_usb = {
+ .name = "omap-control-usb",
+ .id = -1,
+ .dev = {
+ .platform_data = &omap4_control_usb_pdata,
+ },
+ .num_resources = 2,
+ .resource = omap4_control_usb_res,
+};
+
+static inline void __init omap_init_control_usb(void)
+{
+ if (!cpu_is_omap44xx())
+ return;
+
+ if (platform_device_register(&omap4_control_usb))
+ pr_err("Error registering omap_control_usb device\n");
+}
+
+#else
+static inline void omap_init_control_usb(void) { }
+#endif /* CONFIG_OMAP_CONTROL_USB */
+
int __init omap4_keyboard_init(struct omap4_keypad_platform_data
*sdp4430_keypad_data, struct omap_board_data *bdata)
{
@@ -721,6 +765,7 @@ static int __init omap2_init_devices(void)
omap_init_mbox();
/* If dtb is there, the devices will be created dynamically */
if (!of_have_populated_dt()) {
+ omap_init_control_usb();
omap_init_dmic();
omap_init_mcpdm();
omap_init_mcspi();
diff --git a/arch/arm/mach-omap2/gpmc.c b/arch/arm/mach-omap2/gpmc.c
index 8033cb747c86..64bac53da0e8 100644
--- a/arch/arm/mach-omap2/gpmc.c
+++ b/arch/arm/mach-omap2/gpmc.c
@@ -1134,11 +1134,9 @@ static int gpmc_probe(struct platform_device *pdev)
phys_base = res->start;
mem_size = resource_size(res);
- gpmc_base = devm_request_and_ioremap(&pdev->dev, res);
- if (!gpmc_base) {
- dev_err(&pdev->dev, "error: request memory / ioremap\n");
- return -EADDRNOTAVAIL;
- }
+ gpmc_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(gpmc_base))
+ return PTR_ERR(gpmc_base);
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (res == NULL)
diff --git a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
index 793f54ac7d14..624a7e84a685 100644
--- a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
@@ -2702,13 +2702,6 @@ static struct resource omap44xx_usb_phy_and_pll_addrs[] = {
.end = 0x4a0ae000,
.flags = IORESOURCE_MEM,
},
- {
- /* XXX: Remove this once control module driver is in place */
- .name = "ctrl_dev",
- .start = 0x4a002300,
- .end = 0x4a002303,
- .flags = IORESOURCE_MEM,
- },
{ }
};
@@ -6156,12 +6149,6 @@ static struct omap_hwmod_addr_space omap44xx_usb_otg_hs_addrs[] = {
.pa_end = 0x4a0ab7ff,
.flags = ADDR_TYPE_RT
},
- {
- /* XXX: Remove this once control module driver is in place */
- .pa_start = 0x4a00233c,
- .pa_end = 0x4a00233f,
- .flags = ADDR_TYPE_RT
- },
{ }
};
diff --git a/arch/arm/mach-omap2/pm34xx.c b/arch/arm/mach-omap2/pm34xx.c
index 7be3622cfc85..2d93d8b23835 100644
--- a/arch/arm/mach-omap2/pm34xx.c
+++ b/arch/arm/mach-omap2/pm34xx.c
@@ -351,12 +351,10 @@ static void omap3_pm_idle(void)
if (omap_irq_pending())
goto out;
- trace_power_start(POWER_CSTATE, 1, smp_processor_id());
trace_cpu_idle(1, smp_processor_id());
omap_sram_idle();
- trace_power_end(smp_processor_id());
trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id());
out:
diff --git a/arch/arm/mach-omap2/usb-musb.c b/arch/arm/mach-omap2/usb-musb.c
index 7b33b375fe77..9d27e3f8a09c 100644
--- a/arch/arm/mach-omap2/usb-musb.c
+++ b/arch/arm/mach-omap2/usb-musb.c
@@ -85,6 +85,9 @@ void __init usb_musb_init(struct omap_musb_board_data *musb_board_data)
musb_plat.mode = board_data->mode;
musb_plat.extvbus = board_data->extvbus;
+ if (cpu_is_omap44xx())
+ musb_plat.has_mailbox = true;
+
if (soc_is_am35xx()) {
oh_name = "am35x_otg_hs";
name = "musb-am35x";
diff --git a/arch/arm/mach-pxa/pxa27x.c b/arch/arm/mach-pxa/pxa27x.c
index 616cb87b6179..69985b06c0da 100644
--- a/arch/arm/mach-pxa/pxa27x.c
+++ b/arch/arm/mach-pxa/pxa27x.c
@@ -53,17 +53,25 @@ static unsigned long ac97_reset_config[] = {
GPIO95_AC97_nRESET,
};
-void pxa27x_assert_ac97reset(int reset_gpio, int on)
+void pxa27x_configure_ac97reset(int reset_gpio, bool to_gpio)
{
+ /*
+ * This helper function is used to work around a bug in the pxa27x's
+ * ac97 controller during a warm reset. The configuration of the
+ * reset_gpio is changed as follows:
+ * to_gpio == true: configured to generic output gpio and driven high
+ * to_gpio == false: configured to ac97 controller alt fn AC97_nRESET
+ */
+
if (reset_gpio == 113)
- pxa2xx_mfp_config(on ? &ac97_reset_config[0] :
- &ac97_reset_config[1], 1);
+ pxa2xx_mfp_config(to_gpio ? &ac97_reset_config[0] :
+ &ac97_reset_config[1], 1);
if (reset_gpio == 95)
- pxa2xx_mfp_config(on ? &ac97_reset_config[2] :
- &ac97_reset_config[3], 1);
+ pxa2xx_mfp_config(to_gpio ? &ac97_reset_config[2] :
+ &ac97_reset_config[3], 1);
}
-EXPORT_SYMBOL_GPL(pxa27x_assert_ac97reset);
+EXPORT_SYMBOL_GPL(pxa27x_configure_ac97reset);
/* Crystal clock: 13MHz */
#define BASE_CLK 13000000
diff --git a/arch/arm/mach-realview/include/mach/irqs-eb.h b/arch/arm/mach-realview/include/mach/irqs-eb.h
index d6b5073692d2..44754230fdcc 100644
--- a/arch/arm/mach-realview/include/mach/irqs-eb.h
+++ b/arch/arm/mach-realview/include/mach/irqs-eb.h
@@ -115,7 +115,7 @@
/*
* Only define NR_IRQS if less than NR_IRQS_EB
*/
-#define NR_IRQS_EB (IRQ_EB_GIC_START + 96)
+#define NR_IRQS_EB (IRQ_EB_GIC_START + 128)
#if defined(CONFIG_MACH_REALVIEW_EB) \
&& (!defined(NR_IRQS) || (NR_IRQS < NR_IRQS_EB))
diff --git a/arch/arm/mach-s3c24xx/Kconfig b/arch/arm/mach-s3c24xx/Kconfig
index d1e80d0fd671..7079a70b1ab8 100644
--- a/arch/arm/mach-s3c24xx/Kconfig
+++ b/arch/arm/mach-s3c24xx/Kconfig
@@ -293,8 +293,8 @@ config MACH_JIVE
Say Y here if you are using the Logitech Jive.
config MACH_JIVE_SHOW_BOOTLOADER
- bool "Allow access to bootloader partitions in MTD (EXPERIMENTAL)"
- depends on MACH_JIVE && EXPERIMENTAL
+ bool "Allow access to bootloader partitions in MTD"
+ depends on MACH_JIVE
config MACH_S3C2413
bool
diff --git a/arch/arm/mach-sa1100/assabet.c b/arch/arm/mach-sa1100/assabet.c
index 9a23739f7026..442497363db9 100644
--- a/arch/arm/mach-sa1100/assabet.c
+++ b/arch/arm/mach-sa1100/assabet.c
@@ -16,6 +16,7 @@
#include <linux/ioport.h>
#include <linux/platform_data/sa11x0-serial.h>
#include <linux/serial_core.h>
+#include <linux/platform_device.h>
#include <linux/mfd/ucb1x00.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
diff --git a/arch/arm/mach-shmobile/board-ap4evb.c b/arch/arm/mach-shmobile/board-ap4evb.c
index 99ef190d0909..08294fa9e0d4 100644
--- a/arch/arm/mach-shmobile/board-ap4evb.c
+++ b/arch/arm/mach-shmobile/board-ap4evb.c
@@ -657,14 +657,8 @@ static struct platform_device lcdc_device = {
/* FSI */
#define IRQ_FSI evt2irq(0x1840)
static struct sh_fsi_platform_info fsi_info = {
- .port_a = {
- .flags = SH_FSI_BRS_INV,
- },
.port_b = {
- .flags = SH_FSI_BRS_INV |
- SH_FSI_BRM_INV |
- SH_FSI_LRS_INV |
- SH_FSI_CLK_CPG |
+ .flags = SH_FSI_CLK_CPG |
SH_FSI_FMT_SPDIF,
},
};
@@ -692,21 +686,21 @@ static struct platform_device fsi_device = {
},
};
-static struct asoc_simple_dai_init_info fsi2_ak4643_init_info = {
- .fmt = SND_SOC_DAIFMT_LEFT_J,
- .codec_daifmt = SND_SOC_DAIFMT_CBM_CFM,
- .cpu_daifmt = SND_SOC_DAIFMT_CBS_CFS,
- .sysclk = 11289600,
-};
-
static struct asoc_simple_card_info fsi2_ak4643_info = {
.name = "AK4643",
.card = "FSI2A-AK4643",
- .cpu_dai = "fsia-dai",
.codec = "ak4642-codec.0-0013",
.platform = "sh_fsi2",
- .codec_dai = "ak4642-hifi",
- .init = &fsi2_ak4643_init_info,
+ .daifmt = SND_SOC_DAIFMT_LEFT_J,
+ .cpu_dai = {
+ .name = "fsia-dai",
+ .fmt = SND_SOC_DAIFMT_CBS_CFS,
+ },
+ .codec_dai = {
+ .name = "ak4642-hifi",
+ .fmt = SND_SOC_DAIFMT_CBM_CFM,
+ .sysclk = 11289600,
+ },
};
static struct platform_device fsi_ak4643_device = {
@@ -815,18 +809,18 @@ static struct platform_device lcdc1_device = {
},
};
-static struct asoc_simple_dai_init_info fsi2_hdmi_init_info = {
- .cpu_daifmt = SND_SOC_DAIFMT_CBM_CFM,
-};
-
static struct asoc_simple_card_info fsi2_hdmi_info = {
.name = "HDMI",
.card = "FSI2B-HDMI",
- .cpu_dai = "fsib-dai",
.codec = "sh-mobile-hdmi",
.platform = "sh_fsi2",
- .codec_dai = "sh_mobile_hdmi-hifi",
- .init = &fsi2_hdmi_init_info,
+ .cpu_dai = {
+ .name = "fsib-dai",
+ .fmt = SND_SOC_DAIFMT_CBM_CFM | SND_SOC_DAIFMT_IB_NF,
+ },
+ .codec_dai = {
+ .name = "sh_mobile_hdmi-hifi",
+ },
};
static struct platform_device fsi_hdmi_device = {
diff --git a/arch/arm/mach-shmobile/board-armadillo800eva.c b/arch/arm/mach-shmobile/board-armadillo800eva.c
index 5353adf6b828..0679ca6bf1f6 100644
--- a/arch/arm/mach-shmobile/board-armadillo800eva.c
+++ b/arch/arm/mach-shmobile/board-armadillo800eva.c
@@ -806,21 +806,21 @@ static struct platform_device fsi_device = {
};
/* FSI-WM8978 */
-static struct asoc_simple_dai_init_info fsi_wm8978_init_info = {
- .fmt = SND_SOC_DAIFMT_I2S,
- .codec_daifmt = SND_SOC_DAIFMT_CBM_CFM | SND_SOC_DAIFMT_NB_NF,
- .cpu_daifmt = SND_SOC_DAIFMT_CBS_CFS,
- .sysclk = 12288000,
-};
-
static struct asoc_simple_card_info fsi_wm8978_info = {
.name = "wm8978",
.card = "FSI2A-WM8978",
- .cpu_dai = "fsia-dai",
.codec = "wm8978.0-001a",
.platform = "sh_fsi2",
- .codec_dai = "wm8978-hifi",
- .init = &fsi_wm8978_init_info,
+ .daifmt = SND_SOC_DAIFMT_I2S,
+ .cpu_dai = {
+ .name = "fsia-dai",
+ .fmt = SND_SOC_DAIFMT_CBS_CFS | SND_SOC_DAIFMT_IB_NF,
+ },
+ .codec_dai = {
+ .name = "wm8978-hifi",
+ .fmt = SND_SOC_DAIFMT_CBM_CFM | SND_SOC_DAIFMT_NB_NF,
+ .sysclk = 12288000,
+ },
};
static struct platform_device fsi_wm8978_device = {
@@ -832,18 +832,18 @@ static struct platform_device fsi_wm8978_device = {
};
/* FSI-HDMI */
-static struct asoc_simple_dai_init_info fsi2_hdmi_init_info = {
- .cpu_daifmt = SND_SOC_DAIFMT_CBM_CFM,
-};
-
static struct asoc_simple_card_info fsi2_hdmi_info = {
.name = "HDMI",
.card = "FSI2B-HDMI",
- .cpu_dai = "fsib-dai",
.codec = "sh-mobile-hdmi",
.platform = "sh_fsi2",
- .codec_dai = "sh_mobile_hdmi-hifi",
- .init = &fsi2_hdmi_init_info,
+ .cpu_dai = {
+ .name = "fsib-dai",
+ .fmt = SND_SOC_DAIFMT_CBM_CFM,
+ },
+ .codec_dai = {
+ .name = "sh_mobile_hdmi-hifi",
+ },
};
static struct platform_device fsi_hdmi_device = {
diff --git a/arch/arm/mach-shmobile/board-kzm9g.c b/arch/arm/mach-shmobile/board-kzm9g.c
index c02448d6847f..f41b71e8df3e 100644
--- a/arch/arm/mach-shmobile/board-kzm9g.c
+++ b/arch/arm/mach-shmobile/board-kzm9g.c
@@ -525,21 +525,21 @@ static struct platform_device fsi_device = {
},
};
-static struct asoc_simple_dai_init_info fsi2_ak4648_init_info = {
- .fmt = SND_SOC_DAIFMT_LEFT_J,
- .codec_daifmt = SND_SOC_DAIFMT_CBM_CFM,
- .cpu_daifmt = SND_SOC_DAIFMT_CBS_CFS,
- .sysclk = 11289600,
-};
-
static struct asoc_simple_card_info fsi2_ak4648_info = {
.name = "AK4648",
.card = "FSI2A-AK4648",
- .cpu_dai = "fsia-dai",
.codec = "ak4642-codec.0-0012",
.platform = "sh_fsi2",
- .codec_dai = "ak4642-hifi",
- .init = &fsi2_ak4648_init_info,
+ .daifmt = SND_SOC_DAIFMT_LEFT_J,
+ .cpu_dai = {
+ .name = "fsia-dai",
+ .fmt = SND_SOC_DAIFMT_CBS_CFS,
+ },
+ .codec_dai = {
+ .name = "ak4642-hifi",
+ .fmt = SND_SOC_DAIFMT_CBM_CFM,
+ .sysclk = 11289600,
+ },
};
static struct platform_device fsi_ak4648_device = {
diff --git a/arch/arm/mach-shmobile/board-mackerel.c b/arch/arm/mach-shmobile/board-mackerel.c
index 2fed62f66045..3fd716dae405 100644
--- a/arch/arm/mach-shmobile/board-mackerel.c
+++ b/arch/arm/mach-shmobile/board-mackerel.c
@@ -502,18 +502,18 @@ static struct platform_device hdmi_lcdc_device = {
},
};
-static struct asoc_simple_dai_init_info fsi2_hdmi_init_info = {
- .cpu_daifmt = SND_SOC_DAIFMT_CBM_CFM,
-};
-
static struct asoc_simple_card_info fsi2_hdmi_info = {
.name = "HDMI",
.card = "FSI2B-HDMI",
- .cpu_dai = "fsib-dai",
.codec = "sh-mobile-hdmi",
.platform = "sh_fsi2",
- .codec_dai = "sh_mobile_hdmi-hifi",
- .init = &fsi2_hdmi_init_info,
+ .cpu_dai = {
+ .name = "fsib-dai",
+ .fmt = SND_SOC_DAIFMT_CBM_CFM | SND_SOC_DAIFMT_IB_NF,
+ },
+ .codec_dai = {
+ .name = "sh_mobile_hdmi-hifi",
+ },
};
static struct platform_device fsi_hdmi_device = {
@@ -858,16 +858,12 @@ static struct platform_device leds_device = {
#define IRQ_FSI evt2irq(0x1840)
static struct sh_fsi_platform_info fsi_info = {
.port_a = {
- .flags = SH_FSI_BRS_INV,
.tx_id = SHDMA_SLAVE_FSIA_TX,
.rx_id = SHDMA_SLAVE_FSIA_RX,
},
.port_b = {
- .flags = SH_FSI_BRS_INV |
- SH_FSI_BRM_INV |
- SH_FSI_LRS_INV |
- SH_FSI_CLK_CPG |
- SH_FSI_FMT_SPDIF,
+ .flags = SH_FSI_CLK_CPG |
+ SH_FSI_FMT_SPDIF,
}
};
@@ -896,21 +892,21 @@ static struct platform_device fsi_device = {
},
};
-static struct asoc_simple_dai_init_info fsi2_ak4643_init_info = {
- .fmt = SND_SOC_DAIFMT_LEFT_J,
- .codec_daifmt = SND_SOC_DAIFMT_CBM_CFM,
- .cpu_daifmt = SND_SOC_DAIFMT_CBS_CFS,
- .sysclk = 11289600,
-};
-
static struct asoc_simple_card_info fsi2_ak4643_info = {
.name = "AK4643",
.card = "FSI2A-AK4643",
- .cpu_dai = "fsia-dai",
.codec = "ak4642-codec.0-0013",
.platform = "sh_fsi2",
- .codec_dai = "ak4642-hifi",
- .init = &fsi2_ak4643_init_info,
+ .daifmt = SND_SOC_DAIFMT_LEFT_J,
+ .cpu_dai = {
+ .name = "fsia-dai",
+ .fmt = SND_SOC_DAIFMT_CBS_CFS,
+ },
+ .codec_dai = {
+ .name = "ak4642-hifi",
+ .fmt = SND_SOC_DAIFMT_CBM_CFM,
+ .sysclk = 11289600,
+ },
};
static struct platform_device fsi_ak4643_device = {
diff --git a/arch/arm/mach-sunxi/Kconfig b/arch/arm/mach-sunxi/Kconfig
index 3fdd0085e306..8709a39bd34c 100644
--- a/arch/arm/mach-sunxi/Kconfig
+++ b/arch/arm/mach-sunxi/Kconfig
@@ -7,3 +7,4 @@ config ARCH_SUNXI
select PINCTRL
select SPARSE_IRQ
select SUNXI_TIMER
+ select PINCTRL_SUNXI \ No newline at end of file
diff --git a/arch/arm/mach-tegra/cpu-tegra.c b/arch/arm/mach-tegra/cpu-tegra.c
index a74d3c7d2e26..a36a03d3c9a0 100644
--- a/arch/arm/mach-tegra/cpu-tegra.c
+++ b/arch/arm/mach-tegra/cpu-tegra.c
@@ -243,8 +243,7 @@ static int tegra_cpu_init(struct cpufreq_policy *policy)
/* FIXME: what's the actual transition time? */
policy->cpuinfo.transition_latency = 300 * 1000;
- policy->shared_type = CPUFREQ_SHARED_TYPE_ALL;
- cpumask_copy(policy->related_cpus, cpu_possible_mask);
+ cpumask_copy(policy->cpus, cpu_possible_mask);
if (policy->cpu == 0)
register_pm_notifier(&tegra_cpu_pm_notifier);
diff --git a/arch/arm/mach-tegra/tegra2_emc.c b/arch/arm/mach-tegra/tegra2_emc.c
index e18aa2f83ebf..ce7ce42a1ac9 100644
--- a/arch/arm/mach-tegra/tegra2_emc.c
+++ b/arch/arm/mach-tegra/tegra2_emc.c
@@ -312,11 +312,9 @@ static int tegra_emc_probe(struct platform_device *pdev)
return -ENOMEM;
}
- emc_regbase = devm_request_and_ioremap(&pdev->dev, res);
- if (!emc_regbase) {
- dev_err(&pdev->dev, "failed to remap registers\n");
- return -ENOMEM;
- }
+ emc_regbase = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(emc_regbase))
+ return PTR_ERR(emc_regbase);
pdata = pdev->dev.platform_data;
diff --git a/arch/arm/mach-ux500/Kconfig b/arch/arm/mach-ux500/Kconfig
index 5dea90636d94..3e5bbd0e5b23 100644
--- a/arch/arm/mach-ux500/Kconfig
+++ b/arch/arm/mach-ux500/Kconfig
@@ -11,6 +11,7 @@ config UX500_SOC_COMMON
select COMMON_CLK
select PINCTRL
select PINCTRL_NOMADIK
+ select PINCTRL_ABX500
select PL310_ERRATA_753970 if CACHE_PL310
config UX500_SOC_DB8500
@@ -18,6 +19,11 @@ config UX500_SOC_DB8500
select CPU_FREQ_TABLE if CPU_FREQ
select MFD_DB8500_PRCMU
select PINCTRL_DB8500
+ select PINCTRL_DB8540
+ select PINCTRL_AB8500
+ select PINCTRL_AB8505
+ select PINCTRL_AB9540
+ select PINCTRL_AB8540
select REGULATOR
select REGULATOR_DB8500_PRCMU
diff --git a/arch/arm/mach-ux500/board-mop500.c b/arch/arm/mach-ux500/board-mop500.c
index d453522edb0d..b8781caa54b8 100644
--- a/arch/arm/mach-ux500/board-mop500.c
+++ b/arch/arm/mach-ux500/board-mop500.c
@@ -90,26 +90,8 @@ static struct platform_device snowball_gpio_en_3v3_regulator_dev = {
},
};
-static struct ab8500_gpio_platform_data ab8500_gpio_pdata = {
+static struct abx500_gpio_platform_data ab8500_gpio_pdata = {
.gpio_base = MOP500_AB8500_PIN_GPIO(1),
- .irq_base = MOP500_AB8500_VIR_GPIO_IRQ_BASE,
- /* config_reg is the initial configuration of ab8500 pins.
- * The pins can be configured as GPIO or alt functions based
- * on value present in GpioSel1 to GpioSel6 and AlternatFunction
- * register. This is the array of 7 configuration settings.
- * One has to compile time decide these settings. Below is the
- * explanation of these setting
- * GpioSel1 = 0x00 => Pins GPIO1 to GPIO8 are not used as GPIO
- * GpioSel2 = 0x1E => Pins GPIO10 to GPIO13 are configured as GPIO
- * GpioSel3 = 0x80 => Pin GPIO24 is configured as GPIO
- * GpioSel4 = 0x01 => Pin GPIo25 is configured as GPIO
- * GpioSel5 = 0x7A => Pins GPIO34, GPIO36 to GPIO39 are conf as GPIO
- * GpioSel6 = 0x00 => Pins GPIO41 & GPIo42 are not configured as GPIO
- * AlternaFunction = 0x00 => If Pins GPIO10 to 13 are not configured
- * as GPIO then this register selectes the alternate fucntions
- */
- .config_reg = {0x00, 0x1E, 0x80, 0x01,
- 0x7A, 0x00, 0x00},
};
/* ab8500-codec */
diff --git a/arch/arm/mach-ux500/cpu-db8500.c b/arch/arm/mach-ux500/cpu-db8500.c
index 5b286e06474c..b80ad9610e97 100644
--- a/arch/arm/mach-ux500/cpu-db8500.c
+++ b/arch/arm/mach-ux500/cpu-db8500.c
@@ -285,7 +285,7 @@ static struct of_dev_auxdata u8500_auxdata_lookup[] __initdata = {
OF_DEV_AUXDATA("st,nomadik-i2c", 0x80110000, "nmk-i2c.3", NULL),
OF_DEV_AUXDATA("st,nomadik-i2c", 0x8012a000, "nmk-i2c.4", NULL),
/* Requires device name bindings. */
- OF_DEV_AUXDATA("stericsson,nmk_pinctrl", U8500_PRCMU_BASE,
+ OF_DEV_AUXDATA("stericsson,nmk-pinctrl", U8500_PRCMU_BASE,
"pinctrl-db8500", NULL),
/* Requires clock name and DMA bindings. */
OF_DEV_AUXDATA("stericsson,ux500-msp-i2s", 0x80123000,
diff --git a/arch/arm/mach-ux500/include/mach/irqs-board-mop500.h b/arch/arm/mach-ux500/include/mach/irqs-board-mop500.h
index 7d34c52798b5..d526dd8e87d3 100644
--- a/arch/arm/mach-ux500/include/mach/irqs-board-mop500.h
+++ b/arch/arm/mach-ux500/include/mach/irqs-board-mop500.h
@@ -38,15 +38,7 @@
#define MOP500_STMPE1601_IRQ_END \
MOP500_STMPE1601_IRQ(STMPE_NR_INTERNAL_IRQS)
-/* AB8500 virtual gpio IRQ */
-#define AB8500_VIR_GPIO_NR_IRQS 16
-
-#define MOP500_AB8500_VIR_GPIO_IRQ_BASE \
- MOP500_STMPE1601_IRQ_END
-#define MOP500_AB8500_VIR_GPIO_IRQ_END \
- (MOP500_AB8500_VIR_GPIO_IRQ_BASE + AB8500_VIR_GPIO_NR_IRQS)
-
-#define MOP500_NR_IRQS MOP500_AB8500_VIR_GPIO_IRQ_END
+#define MOP500_NR_IRQS MOP500_STMPE1601_IRQ_END
#define MOP500_IRQ_END MOP500_NR_IRQS
diff --git a/arch/arm/mach-versatile/core.c b/arch/arm/mach-versatile/core.c
index 60c092cfdae3..43478c299cc8 100644
--- a/arch/arm/mach-versatile/core.c
+++ b/arch/arm/mach-versatile/core.c
@@ -36,6 +36,7 @@
#include <linux/gfp.h>
#include <linux/clkdev.h>
#include <linux/mtd/physmap.h>
+#include <linux/bitops.h>
#include <asm/irq.h>
#include <asm/hardware/arm_timer.h>
@@ -65,16 +66,28 @@
#define VA_VIC_BASE __io_address(VERSATILE_VIC_BASE)
#define VA_SIC_BASE __io_address(VERSATILE_SIC_BASE)
+/* These PIC IRQs are valid in each configuration */
+#define PIC_VALID_ALL BIT(SIC_INT_KMI0) | BIT(SIC_INT_KMI1) | \
+ BIT(SIC_INT_SCI3) | BIT(SIC_INT_UART3) | \
+ BIT(SIC_INT_CLCD) | BIT(SIC_INT_TOUCH) | \
+ BIT(SIC_INT_KEYPAD) | BIT(SIC_INT_DoC) | \
+ BIT(SIC_INT_USB) | BIT(SIC_INT_PCI0) | \
+ BIT(SIC_INT_PCI1) | BIT(SIC_INT_PCI2) | \
+ BIT(SIC_INT_PCI3)
#if 1
#define IRQ_MMCI0A IRQ_VICSOURCE22
#define IRQ_AACI IRQ_VICSOURCE24
#define IRQ_ETH IRQ_VICSOURCE25
#define PIC_MASK 0xFFD00000
+#define PIC_VALID PIC_VALID_ALL
#else
#define IRQ_MMCI0A IRQ_SIC_MMCI0A
#define IRQ_AACI IRQ_SIC_AACI
#define IRQ_ETH IRQ_SIC_ETH
#define PIC_MASK 0
+#define PIC_VALID PIC_VALID_ALL | BIT(SIC_INT_MMCI0A) | \
+ BIT(SIC_INT_MMCI1A) | BIT(SIC_INT_AACI) | \
+ BIT(SIC_INT_ETH)
#endif
/* Lookup table for finding a DT node that represents the vic instance */
@@ -102,7 +115,7 @@ void __init versatile_init_irq(void)
VERSATILE_SIC_BASE);
fpga_irq_init(VA_SIC_BASE, "SIC", IRQ_SIC_START,
- IRQ_VICSOURCE31, ~PIC_MASK, np);
+ IRQ_VICSOURCE31, PIC_VALID, np);
/*
* Interrupts on secondary controller from 0 to 8 are routed to
diff --git a/arch/arm/mach-versatile/pci.c b/arch/arm/mach-versatile/pci.c
index 2f84f4094f13..e92e5e0705bc 100644
--- a/arch/arm/mach-versatile/pci.c
+++ b/arch/arm/mach-versatile/pci.c
@@ -23,6 +23,7 @@
#include <linux/io.h>
#include <mach/hardware.h>
+#include <mach/irqs.h>
#include <asm/irq.h>
#include <asm/mach/pci.h>
@@ -327,12 +328,12 @@ static int __init versatile_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
int irq;
/* slot, pin, irq
- * 24 1 27
- * 25 1 28
- * 26 1 29
- * 27 1 30
+ * 24 1 IRQ_SIC_PCI0
+ * 25 1 IRQ_SIC_PCI1
+ * 26 1 IRQ_SIC_PCI2
+ * 27 1 IRQ_SIC_PCI3
*/
- irq = 27 + ((slot - 24 + pin - 1) & 3);
+ irq = IRQ_SIC_PCI0 + ((slot - 24 + pin - 1) & 3);
return irq;
}
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index 3fd629d5a513..025d17328730 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -629,8 +629,9 @@ config ARM_THUMBEE
make use of it. Say N for code that can run on CPUs without ThumbEE.
config ARM_VIRT_EXT
- bool "Native support for the ARM Virtualization Extensions"
- depends on MMU && CPU_V7
+ bool
+ depends on MMU
+ default y if CPU_V7
help
Enable the kernel to make use of the ARM Virtualization
Extensions to install hypervisors without run-time firmware
@@ -640,11 +641,6 @@ config ARM_VIRT_EXT
use of this feature. Refer to Documentation/arm/Booting for
details.
- It is safe to enable this option even if the kernel may not be
- booted in HYP mode, may not have support for the
- virtualization extensions, or may be booted with a
- non-compliant bootloader.
-
config SWP_EMULATE
bool "Emulate SWP/SWPB instructions"
depends on !CPU_USE_DOMAINS && CPU_V7
diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile
index 8a9c4cb50a93..4e333fa2756f 100644
--- a/arch/arm/mm/Makefile
+++ b/arch/arm/mm/Makefile
@@ -6,7 +6,7 @@ obj-y := dma-mapping.o extable.o fault.o init.o \
iomap.o
obj-$(CONFIG_MMU) += fault-armv.o flush.o idmap.o ioremap.o \
- mmap.o pgd.o mmu.o vmregion.o
+ mmap.o pgd.o mmu.o
ifneq ($(CONFIG_MMU),y)
obj-y += nommu.o
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
index bc4a5e9ebb78..7a0511191f6b 100644
--- a/arch/arm/mm/context.c
+++ b/arch/arm/mm/context.c
@@ -34,6 +34,9 @@
* The ASID is used to tag entries in the CPU caches and TLBs.
* The context ID is used by debuggers and trace logic, and
* should be unique within all running processes.
+ *
+ * In big endian operation, the two 32 bit words are swapped if accesed by
+ * non 64-bit operations.
*/
#define ASID_FIRST_VERSION (1ULL << ASID_BITS)
#define NUM_USER_ASIDS (ASID_FIRST_VERSION - 1)
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 076c26d43864..dda3904dc64c 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -640,7 +640,7 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
if (is_coherent || nommu())
addr = __alloc_simple_buffer(dev, size, gfp, &page);
- else if (gfp & GFP_ATOMIC)
+ else if (!(gfp & __GFP_WAIT))
addr = __alloc_from_pool(size, &page);
else if (!IS_ENABLED(CONFIG_CMA))
addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller);
diff --git a/arch/arm/mm/idmap.c b/arch/arm/mm/idmap.c
index 99db769307ec..2dffc010cc41 100644
--- a/arch/arm/mm/idmap.c
+++ b/arch/arm/mm/idmap.c
@@ -1,4 +1,6 @@
+#include <linux/module.h>
#include <linux/kernel.h>
+#include <linux/slab.h>
#include <asm/cputype.h>
#include <asm/idmap.h>
@@ -6,6 +8,7 @@
#include <asm/pgtable.h>
#include <asm/sections.h>
#include <asm/system_info.h>
+#include <asm/virt.h>
pgd_t *idmap_pgd;
@@ -59,11 +62,17 @@ static void idmap_add_pud(pgd_t *pgd, unsigned long addr, unsigned long end,
} while (pud++, addr = next, addr != end);
}
-static void identity_mapping_add(pgd_t *pgd, unsigned long addr, unsigned long end)
+static void identity_mapping_add(pgd_t *pgd, const char *text_start,
+ const char *text_end, unsigned long prot)
{
- unsigned long prot, next;
+ unsigned long addr, end;
+ unsigned long next;
+
+ addr = virt_to_phys(text_start);
+ end = virt_to_phys(text_end);
+
+ prot |= PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_AF;
- prot = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_AF;
if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale())
prot |= PMD_BIT4;
@@ -74,28 +83,52 @@ static void identity_mapping_add(pgd_t *pgd, unsigned long addr, unsigned long e
} while (pgd++, addr = next, addr != end);
}
+#if defined(CONFIG_ARM_VIRT_EXT) && defined(CONFIG_ARM_LPAE)
+pgd_t *hyp_pgd;
+
+extern char __hyp_idmap_text_start[], __hyp_idmap_text_end[];
+
+static int __init init_static_idmap_hyp(void)
+{
+ hyp_pgd = kzalloc(PTRS_PER_PGD * sizeof(pgd_t), GFP_KERNEL);
+ if (!hyp_pgd)
+ return -ENOMEM;
+
+ pr_info("Setting up static HYP identity map for 0x%p - 0x%p\n",
+ __hyp_idmap_text_start, __hyp_idmap_text_end);
+ identity_mapping_add(hyp_pgd, __hyp_idmap_text_start,
+ __hyp_idmap_text_end, PMD_SECT_AP1);
+
+ return 0;
+}
+#else
+static int __init init_static_idmap_hyp(void)
+{
+ return 0;
+}
+#endif
+
extern char __idmap_text_start[], __idmap_text_end[];
static int __init init_static_idmap(void)
{
- phys_addr_t idmap_start, idmap_end;
+ int ret;
idmap_pgd = pgd_alloc(&init_mm);
if (!idmap_pgd)
return -ENOMEM;
- /* Add an identity mapping for the physical address of the section. */
- idmap_start = virt_to_phys((void *)__idmap_text_start);
- idmap_end = virt_to_phys((void *)__idmap_text_end);
+ pr_info("Setting up static identity map for 0x%p - 0x%p\n",
+ __idmap_text_start, __idmap_text_end);
+ identity_mapping_add(idmap_pgd, __idmap_text_start,
+ __idmap_text_end, 0);
- pr_info("Setting up static identity map for 0x%llx - 0x%llx\n",
- (long long)idmap_start, (long long)idmap_end);
- identity_mapping_add(idmap_pgd, idmap_start, idmap_end);
+ ret = init_static_idmap_hyp();
/* Flush L1 for the hardware to see this page table content */
flush_cache_louis();
- return 0;
+ return ret;
}
early_initcall(init_static_idmap);
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
index 88fd86cf3d9a..04d9006eab1f 100644
--- a/arch/arm/mm/ioremap.c
+++ b/arch/arm/mm/ioremap.c
@@ -39,6 +39,70 @@
#include <asm/mach/pci.h>
#include "mm.h"
+
+LIST_HEAD(static_vmlist);
+
+static struct static_vm *find_static_vm_paddr(phys_addr_t paddr,
+ size_t size, unsigned int mtype)
+{
+ struct static_vm *svm;
+ struct vm_struct *vm;
+
+ list_for_each_entry(svm, &static_vmlist, list) {
+ vm = &svm->vm;
+ if (!(vm->flags & VM_ARM_STATIC_MAPPING))
+ continue;
+ if ((vm->flags & VM_ARM_MTYPE_MASK) != VM_ARM_MTYPE(mtype))
+ continue;
+
+ if (vm->phys_addr > paddr ||
+ paddr + size - 1 > vm->phys_addr + vm->size - 1)
+ continue;
+
+ return svm;
+ }
+
+ return NULL;
+}
+
+struct static_vm *find_static_vm_vaddr(void *vaddr)
+{
+ struct static_vm *svm;
+ struct vm_struct *vm;
+
+ list_for_each_entry(svm, &static_vmlist, list) {
+ vm = &svm->vm;
+
+ /* static_vmlist is ascending order */
+ if (vm->addr > vaddr)
+ break;
+
+ if (vm->addr <= vaddr && vm->addr + vm->size > vaddr)
+ return svm;
+ }
+
+ return NULL;
+}
+
+void __init add_static_vm_early(struct static_vm *svm)
+{
+ struct static_vm *curr_svm;
+ struct vm_struct *vm;
+ void *vaddr;
+
+ vm = &svm->vm;
+ vm_area_add_early(vm);
+ vaddr = vm->addr;
+
+ list_for_each_entry(curr_svm, &static_vmlist, list) {
+ vm = &curr_svm->vm;
+
+ if (vm->addr > vaddr)
+ break;
+ }
+ list_add_tail(&svm->list, &curr_svm->list);
+}
+
int ioremap_page(unsigned long virt, unsigned long phys,
const struct mem_type *mtype)
{
@@ -197,13 +261,14 @@ void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
const struct mem_type *type;
int err;
unsigned long addr;
- struct vm_struct * area;
+ struct vm_struct *area;
+ phys_addr_t paddr = __pfn_to_phys(pfn);
#ifndef CONFIG_ARM_LPAE
/*
* High mappings must be supersection aligned
*/
- if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SUPERSECTION_MASK))
+ if (pfn >= 0x100000 && (paddr & ~SUPERSECTION_MASK))
return NULL;
#endif
@@ -219,24 +284,16 @@ void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
/*
* Try to reuse one of the static mapping whenever possible.
*/
- read_lock(&vmlist_lock);
- for (area = vmlist; area; area = area->next) {
- if (!size || (sizeof(phys_addr_t) == 4 && pfn >= 0x100000))
- break;
- if (!(area->flags & VM_ARM_STATIC_MAPPING))
- continue;
- if ((area->flags & VM_ARM_MTYPE_MASK) != VM_ARM_MTYPE(mtype))
- continue;
- if (__phys_to_pfn(area->phys_addr) > pfn ||
- __pfn_to_phys(pfn) + size-1 > area->phys_addr + area->size-1)
- continue;
- /* we can drop the lock here as we know *area is static */
- read_unlock(&vmlist_lock);
- addr = (unsigned long)area->addr;
- addr += __pfn_to_phys(pfn) - area->phys_addr;
- return (void __iomem *) (offset + addr);
+ if (size && !(sizeof(phys_addr_t) == 4 && pfn >= 0x100000)) {
+ struct static_vm *svm;
+
+ svm = find_static_vm_paddr(paddr, size, mtype);
+ if (svm) {
+ addr = (unsigned long)svm->vm.addr;
+ addr += paddr - svm->vm.phys_addr;
+ return (void __iomem *) (offset + addr);
+ }
}
- read_unlock(&vmlist_lock);
/*
* Don't allow RAM to be mapped - this causes problems with ARMv6+
@@ -248,21 +305,21 @@ void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
if (!area)
return NULL;
addr = (unsigned long)area->addr;
- area->phys_addr = __pfn_to_phys(pfn);
+ area->phys_addr = paddr;
#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
if (DOMAIN_IO == 0 &&
(((cpu_architecture() >= CPU_ARCH_ARMv6) && (get_cr() & CR_XP)) ||
cpu_is_xsc3()) && pfn >= 0x100000 &&
- !((__pfn_to_phys(pfn) | size | addr) & ~SUPERSECTION_MASK)) {
+ !((paddr | size | addr) & ~SUPERSECTION_MASK)) {
area->flags |= VM_ARM_SECTION_MAPPING;
err = remap_area_supersections(addr, pfn, size, type);
- } else if (!((__pfn_to_phys(pfn) | size | addr) & ~PMD_MASK)) {
+ } else if (!((paddr | size | addr) & ~PMD_MASK)) {
area->flags |= VM_ARM_SECTION_MAPPING;
err = remap_area_sections(addr, pfn, size, type);
} else
#endif
- err = ioremap_page_range(addr, addr + size, __pfn_to_phys(pfn),
+ err = ioremap_page_range(addr, addr + size, paddr,
__pgprot(type->prot_pte));
if (err) {
@@ -346,34 +403,28 @@ __arm_ioremap_exec(unsigned long phys_addr, size_t size, bool cached)
void __iounmap(volatile void __iomem *io_addr)
{
void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr);
- struct vm_struct *vm;
+ struct static_vm *svm;
+
+ /* If this is a static mapping, we must leave it alone */
+ svm = find_static_vm_vaddr(addr);
+ if (svm)
+ return;
- read_lock(&vmlist_lock);
- for (vm = vmlist; vm; vm = vm->next) {
- if (vm->addr > addr)
- break;
- if (!(vm->flags & VM_IOREMAP))
- continue;
- /* If this is a static mapping we must leave it alone */
- if ((vm->flags & VM_ARM_STATIC_MAPPING) &&
- (vm->addr <= addr) && (vm->addr + vm->size > addr)) {
- read_unlock(&vmlist_lock);
- return;
- }
#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
+ {
+ struct vm_struct *vm;
+
+ vm = find_vm_area(addr);
+
/*
* If this is a section based mapping we need to handle it
* specially as the VM subsystem does not know how to handle
* such a beast.
*/
- if ((vm->addr == addr) &&
- (vm->flags & VM_ARM_SECTION_MAPPING)) {
+ if (vm && (vm->flags & VM_ARM_SECTION_MAPPING))
unmap_area_sections((unsigned long)vm->addr, vm->size);
- break;
- }
-#endif
}
- read_unlock(&vmlist_lock);
+#endif
vunmap(addr);
}
diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h
index a8ee92da3544..d5a4e9ad8f0f 100644
--- a/arch/arm/mm/mm.h
+++ b/arch/arm/mm/mm.h
@@ -1,4 +1,6 @@
#ifdef CONFIG_MMU
+#include <linux/list.h>
+#include <linux/vmalloc.h>
/* the upper-most page table pointer */
extern pmd_t *top_pmd;
@@ -65,6 +67,16 @@ extern void __flush_dcache_page(struct address_space *mapping, struct page *page
/* consistent regions used by dma_alloc_attrs() */
#define VM_ARM_DMA_CONSISTENT 0x20000000
+
+struct static_vm {
+ struct vm_struct vm;
+ struct list_head list;
+};
+
+extern struct list_head static_vmlist;
+extern struct static_vm *find_static_vm_vaddr(void *vaddr);
+extern __init void add_static_vm_early(struct static_vm *svm);
+
#endif
#ifdef CONFIG_ZONE_DMA
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index ce328c7f5c94..e95a996ab78f 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -57,6 +57,9 @@ static unsigned int cachepolicy __initdata = CPOLICY_WRITEBACK;
static unsigned int ecc_mask __initdata = 0;
pgprot_t pgprot_user;
pgprot_t pgprot_kernel;
+pgprot_t pgprot_hyp_device;
+pgprot_t pgprot_s2;
+pgprot_t pgprot_s2_device;
EXPORT_SYMBOL(pgprot_user);
EXPORT_SYMBOL(pgprot_kernel);
@@ -66,34 +69,46 @@ struct cachepolicy {
unsigned int cr_mask;
pmdval_t pmd;
pteval_t pte;
+ pteval_t pte_s2;
};
+#ifdef CONFIG_ARM_LPAE
+#define s2_policy(policy) policy
+#else
+#define s2_policy(policy) 0
+#endif
+
static struct cachepolicy cache_policies[] __initdata = {
{
.policy = "uncached",
.cr_mask = CR_W|CR_C,
.pmd = PMD_SECT_UNCACHED,
.pte = L_PTE_MT_UNCACHED,
+ .pte_s2 = s2_policy(L_PTE_S2_MT_UNCACHED),
}, {
.policy = "buffered",
.cr_mask = CR_C,
.pmd = PMD_SECT_BUFFERED,
.pte = L_PTE_MT_BUFFERABLE,
+ .pte_s2 = s2_policy(L_PTE_S2_MT_UNCACHED),
}, {
.policy = "writethrough",
.cr_mask = 0,
.pmd = PMD_SECT_WT,
.pte = L_PTE_MT_WRITETHROUGH,
+ .pte_s2 = s2_policy(L_PTE_S2_MT_WRITETHROUGH),
}, {
.policy = "writeback",
.cr_mask = 0,
.pmd = PMD_SECT_WB,
.pte = L_PTE_MT_WRITEBACK,
+ .pte_s2 = s2_policy(L_PTE_S2_MT_WRITEBACK),
}, {
.policy = "writealloc",
.cr_mask = 0,
.pmd = PMD_SECT_WBWA,
.pte = L_PTE_MT_WRITEALLOC,
+ .pte_s2 = s2_policy(L_PTE_S2_MT_WRITEBACK),
}
};
@@ -310,6 +325,7 @@ static void __init build_mem_type_table(void)
struct cachepolicy *cp;
unsigned int cr = get_cr();
pteval_t user_pgprot, kern_pgprot, vecs_pgprot;
+ pteval_t hyp_device_pgprot, s2_pgprot, s2_device_pgprot;
int cpu_arch = cpu_architecture();
int i;
@@ -421,6 +437,8 @@ static void __init build_mem_type_table(void)
*/
cp = &cache_policies[cachepolicy];
vecs_pgprot = kern_pgprot = user_pgprot = cp->pte;
+ s2_pgprot = cp->pte_s2;
+ hyp_device_pgprot = s2_device_pgprot = mem_types[MT_DEVICE].prot_pte;
/*
* ARMv6 and above have extended page tables.
@@ -444,6 +462,7 @@ static void __init build_mem_type_table(void)
user_pgprot |= L_PTE_SHARED;
kern_pgprot |= L_PTE_SHARED;
vecs_pgprot |= L_PTE_SHARED;
+ s2_pgprot |= L_PTE_SHARED;
mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_S;
mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED;
mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
@@ -498,6 +517,9 @@ static void __init build_mem_type_table(void)
pgprot_user = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | user_pgprot);
pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG |
L_PTE_DIRTY | kern_pgprot);
+ pgprot_s2 = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | s2_pgprot);
+ pgprot_s2_device = __pgprot(s2_device_pgprot);
+ pgprot_hyp_device = __pgprot(hyp_device_pgprot);
mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
@@ -757,21 +779,24 @@ void __init iotable_init(struct map_desc *io_desc, int nr)
{
struct map_desc *md;
struct vm_struct *vm;
+ struct static_vm *svm;
if (!nr)
return;
- vm = early_alloc_aligned(sizeof(*vm) * nr, __alignof__(*vm));
+ svm = early_alloc_aligned(sizeof(*svm) * nr, __alignof__(*svm));
for (md = io_desc; nr; md++, nr--) {
create_mapping(md);
+
+ vm = &svm->vm;
vm->addr = (void *)(md->virtual & PAGE_MASK);
vm->size = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK));
vm->phys_addr = __pfn_to_phys(md->pfn);
vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING;
vm->flags |= VM_ARM_MTYPE(md->type);
vm->caller = iotable_init;
- vm_area_add_early(vm++);
+ add_static_vm_early(svm++);
}
}
@@ -779,13 +804,16 @@ void __init vm_reserve_area_early(unsigned long addr, unsigned long size,
void *caller)
{
struct vm_struct *vm;
+ struct static_vm *svm;
+
+ svm = early_alloc_aligned(sizeof(*svm), __alignof__(*svm));
- vm = early_alloc_aligned(sizeof(*vm), __alignof__(*vm));
+ vm = &svm->vm;
vm->addr = (void *)addr;
vm->size = size;
vm->flags = VM_IOREMAP | VM_ARM_EMPTY_MAPPING;
vm->caller = caller;
- vm_area_add_early(vm);
+ add_static_vm_early(svm);
}
#ifndef CONFIG_ARM_LPAE
@@ -810,14 +838,13 @@ static void __init pmd_empty_section_gap(unsigned long addr)
static void __init fill_pmd_gaps(void)
{
+ struct static_vm *svm;
struct vm_struct *vm;
unsigned long addr, next = 0;
pmd_t *pmd;
- /* we're still single threaded hence no lock needed here */
- for (vm = vmlist; vm; vm = vm->next) {
- if (!(vm->flags & (VM_ARM_STATIC_MAPPING | VM_ARM_EMPTY_MAPPING)))
- continue;
+ list_for_each_entry(svm, &static_vmlist, list) {
+ vm = &svm->vm;
addr = (unsigned long)vm->addr;
if (addr < next)
continue;
@@ -857,19 +884,12 @@ static void __init fill_pmd_gaps(void)
#if defined(CONFIG_PCI) && !defined(CONFIG_NEED_MACH_IO_H)
static void __init pci_reserve_io(void)
{
- struct vm_struct *vm;
- unsigned long addr;
+ struct static_vm *svm;
- /* we're still single threaded hence no lock needed here */
- for (vm = vmlist; vm; vm = vm->next) {
- if (!(vm->flags & VM_ARM_STATIC_MAPPING))
- continue;
- addr = (unsigned long)vm->addr;
- addr &= ~(SZ_2M - 1);
- if (addr == PCI_IO_VIRT_BASE)
- return;
+ svm = find_static_vm_vaddr((void *)PCI_IO_VIRT_BASE);
+ if (svm)
+ return;
- }
vm_reserve_area_early(PCI_IO_VIRT_BASE, SZ_2M, pci_reserve_io);
}
#else
diff --git a/arch/arm/mm/proc-macros.S b/arch/arm/mm/proc-macros.S
index eb6aa73bc8b7..f9a0aa725ea9 100644
--- a/arch/arm/mm/proc-macros.S
+++ b/arch/arm/mm/proc-macros.S
@@ -38,9 +38,14 @@
/*
* mmid - get context id from mm pointer (mm->context.id)
+ * note, this field is 64bit, so in big-endian the two words are swapped too.
*/
.macro mmid, rd, rn
+#ifdef __ARMEB__
+ ldr \rd, [\rn, #MM_CONTEXT_ID + 4 ]
+#else
ldr \rd, [\rn, #MM_CONTEXT_ID]
+#endif
.endm
/*
diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S
index 09c5233f4dfc..bcaaa8de9325 100644
--- a/arch/arm/mm/proc-v6.S
+++ b/arch/arm/mm/proc-v6.S
@@ -101,7 +101,7 @@ ENTRY(cpu_v6_dcache_clean_area)
ENTRY(cpu_v6_switch_mm)
#ifdef CONFIG_MMU
mov r2, #0
- ldr r1, [r1, #MM_CONTEXT_ID] @ get mm->context.id
+ mmid r1, r1 @ get mm->context.id
ALT_SMP(orr r0, r0, #TTB_FLAGS_SMP)
ALT_UP(orr r0, r0, #TTB_FLAGS_UP)
mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB
diff --git a/arch/arm/mm/proc-v7-2level.S b/arch/arm/mm/proc-v7-2level.S
index 6d98c13ab827..78f520bc0e99 100644
--- a/arch/arm/mm/proc-v7-2level.S
+++ b/arch/arm/mm/proc-v7-2level.S
@@ -40,7 +40,7 @@
ENTRY(cpu_v7_switch_mm)
#ifdef CONFIG_MMU
mov r2, #0
- ldr r1, [r1, #MM_CONTEXT_ID] @ get mm->context.id
+ mmid r1, r1 @ get mm->context.id
ALT_SMP(orr r0, r0, #TTB_FLAGS_SMP)
ALT_UP(orr r0, r0, #TTB_FLAGS_UP)
#ifdef CONFIG_ARM_ERRATA_430973
diff --git a/arch/arm/mm/proc-v7-3level.S b/arch/arm/mm/proc-v7-3level.S
index 7b56386f9496..50bf1dafc9ea 100644
--- a/arch/arm/mm/proc-v7-3level.S
+++ b/arch/arm/mm/proc-v7-3level.S
@@ -47,7 +47,7 @@
*/
ENTRY(cpu_v7_switch_mm)
#ifdef CONFIG_MMU
- ldr r1, [r1, #MM_CONTEXT_ID] @ get mm->context.id
+ mmid r1, r1 @ get mm->context.id
and r3, r1, #0xff
mov r3, r3, lsl #(48 - 32) @ ASID
mcrr p15, 0, r0, r3, c2 @ set TTB 0
diff --git a/arch/arm/mm/vmregion.c b/arch/arm/mm/vmregion.c
deleted file mode 100644
index a631016e1f8f..000000000000
--- a/arch/arm/mm/vmregion.c
+++ /dev/null
@@ -1,205 +0,0 @@
-#include <linux/fs.h>
-#include <linux/spinlock.h>
-#include <linux/list.h>
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#include <linux/slab.h>
-
-#include "vmregion.h"
-
-/*
- * VM region handling support.
- *
- * This should become something generic, handling VM region allocations for
- * vmalloc and similar (ioremap, module space, etc).
- *
- * I envisage vmalloc()'s supporting vm_struct becoming:
- *
- * struct vm_struct {
- * struct vmregion region;
- * unsigned long flags;
- * struct page **pages;
- * unsigned int nr_pages;
- * unsigned long phys_addr;
- * };
- *
- * get_vm_area() would then call vmregion_alloc with an appropriate
- * struct vmregion head (eg):
- *
- * struct vmregion vmalloc_head = {
- * .vm_list = LIST_HEAD_INIT(vmalloc_head.vm_list),
- * .vm_start = VMALLOC_START,
- * .vm_end = VMALLOC_END,
- * };
- *
- * However, vmalloc_head.vm_start is variable (typically, it is dependent on
- * the amount of RAM found at boot time.) I would imagine that get_vm_area()
- * would have to initialise this each time prior to calling vmregion_alloc().
- */
-
-struct arm_vmregion *
-arm_vmregion_alloc(struct arm_vmregion_head *head, size_t align,
- size_t size, gfp_t gfp, const void *caller)
-{
- unsigned long start = head->vm_start, addr = head->vm_end;
- unsigned long flags;
- struct arm_vmregion *c, *new;
-
- if (head->vm_end - head->vm_start < size) {
- printk(KERN_WARNING "%s: allocation too big (requested %#x)\n",
- __func__, size);
- goto out;
- }
-
- new = kmalloc(sizeof(struct arm_vmregion), gfp);
- if (!new)
- goto out;
-
- new->caller = caller;
-
- spin_lock_irqsave(&head->vm_lock, flags);
-
- addr = rounddown(addr - size, align);
- list_for_each_entry_reverse(c, &head->vm_list, vm_list) {
- if (addr >= c->vm_end)
- goto found;
- addr = rounddown(c->vm_start - size, align);
- if (addr < start)
- goto nospc;
- }
-
- found:
- /*
- * Insert this entry after the one we found.
- */
- list_add(&new->vm_list, &c->vm_list);
- new->vm_start = addr;
- new->vm_end = addr + size;
- new->vm_active = 1;
-
- spin_unlock_irqrestore(&head->vm_lock, flags);
- return new;
-
- nospc:
- spin_unlock_irqrestore(&head->vm_lock, flags);
- kfree(new);
- out:
- return NULL;
-}
-
-static struct arm_vmregion *__arm_vmregion_find(struct arm_vmregion_head *head, unsigned long addr)
-{
- struct arm_vmregion *c;
-
- list_for_each_entry(c, &head->vm_list, vm_list) {
- if (c->vm_active && c->vm_start == addr)
- goto out;
- }
- c = NULL;
- out:
- return c;
-}
-
-struct arm_vmregion *arm_vmregion_find(struct arm_vmregion_head *head, unsigned long addr)
-{
- struct arm_vmregion *c;
- unsigned long flags;
-
- spin_lock_irqsave(&head->vm_lock, flags);
- c = __arm_vmregion_find(head, addr);
- spin_unlock_irqrestore(&head->vm_lock, flags);
- return c;
-}
-
-struct arm_vmregion *arm_vmregion_find_remove(struct arm_vmregion_head *head, unsigned long addr)
-{
- struct arm_vmregion *c;
- unsigned long flags;
-
- spin_lock_irqsave(&head->vm_lock, flags);
- c = __arm_vmregion_find(head, addr);
- if (c)
- c->vm_active = 0;
- spin_unlock_irqrestore(&head->vm_lock, flags);
- return c;
-}
-
-void arm_vmregion_free(struct arm_vmregion_head *head, struct arm_vmregion *c)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&head->vm_lock, flags);
- list_del(&c->vm_list);
- spin_unlock_irqrestore(&head->vm_lock, flags);
-
- kfree(c);
-}
-
-#ifdef CONFIG_PROC_FS
-static int arm_vmregion_show(struct seq_file *m, void *p)
-{
- struct arm_vmregion *c = list_entry(p, struct arm_vmregion, vm_list);
-
- seq_printf(m, "0x%08lx-0x%08lx %7lu", c->vm_start, c->vm_end,
- c->vm_end - c->vm_start);
- if (c->caller)
- seq_printf(m, " %pS", (void *)c->caller);
- seq_putc(m, '\n');
- return 0;
-}
-
-static void *arm_vmregion_start(struct seq_file *m, loff_t *pos)
-{
- struct arm_vmregion_head *h = m->private;
- spin_lock_irq(&h->vm_lock);
- return seq_list_start(&h->vm_list, *pos);
-}
-
-static void *arm_vmregion_next(struct seq_file *m, void *p, loff_t *pos)
-{
- struct arm_vmregion_head *h = m->private;
- return seq_list_next(p, &h->vm_list, pos);
-}
-
-static void arm_vmregion_stop(struct seq_file *m, void *p)
-{
- struct arm_vmregion_head *h = m->private;
- spin_unlock_irq(&h->vm_lock);
-}
-
-static const struct seq_operations arm_vmregion_ops = {
- .start = arm_vmregion_start,
- .stop = arm_vmregion_stop,
- .next = arm_vmregion_next,
- .show = arm_vmregion_show,
-};
-
-static int arm_vmregion_open(struct inode *inode, struct file *file)
-{
- struct arm_vmregion_head *h = PDE(inode)->data;
- int ret = seq_open(file, &arm_vmregion_ops);
- if (!ret) {
- struct seq_file *m = file->private_data;
- m->private = h;
- }
- return ret;
-}
-
-static const struct file_operations arm_vmregion_fops = {
- .open = arm_vmregion_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = seq_release,
-};
-
-int arm_vmregion_create_proc(const char *path, struct arm_vmregion_head *h)
-{
- proc_create_data(path, S_IRUSR, NULL, &arm_vmregion_fops, h);
- return 0;
-}
-#else
-int arm_vmregion_create_proc(const char *path, struct arm_vmregion_head *h)
-{
- return 0;
-}
-#endif
diff --git a/arch/arm/mm/vmregion.h b/arch/arm/mm/vmregion.h
deleted file mode 100644
index 0f5a5f2a2c7b..000000000000
--- a/arch/arm/mm/vmregion.h
+++ /dev/null
@@ -1,31 +0,0 @@
-#ifndef VMREGION_H
-#define VMREGION_H
-
-#include <linux/spinlock.h>
-#include <linux/list.h>
-
-struct page;
-
-struct arm_vmregion_head {
- spinlock_t vm_lock;
- struct list_head vm_list;
- unsigned long vm_start;
- unsigned long vm_end;
-};
-
-struct arm_vmregion {
- struct list_head vm_list;
- unsigned long vm_start;
- unsigned long vm_end;
- int vm_active;
- const void *caller;
-};
-
-struct arm_vmregion *arm_vmregion_alloc(struct arm_vmregion_head *, size_t, size_t, gfp_t, const void *);
-struct arm_vmregion *arm_vmregion_find(struct arm_vmregion_head *, unsigned long);
-struct arm_vmregion *arm_vmregion_find_remove(struct arm_vmregion_head *, unsigned long);
-void arm_vmregion_free(struct arm_vmregion_head *, struct arm_vmregion *);
-
-int arm_vmregion_create_proc(const char *, struct arm_vmregion_head *);
-
-#endif
diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
index a34f1e214116..6828ef6ce80e 100644
--- a/arch/arm/net/bpf_jit_32.c
+++ b/arch/arm/net/bpf_jit_32.c
@@ -341,10 +341,17 @@ static void emit_load_be16(u8 cond, u8 r_res, u8 r_addr, struct jit_ctx *ctx)
static inline void emit_swap16(u8 r_dst, u8 r_src, struct jit_ctx *ctx)
{
- emit(ARM_LSL_R(ARM_R1, r_src, 8), ctx);
- emit(ARM_ORR_S(r_dst, ARM_R1, r_src, SRTYPE_LSL, 8), ctx);
- emit(ARM_LSL_I(r_dst, r_dst, 8), ctx);
- emit(ARM_LSL_R(r_dst, r_dst, 8), ctx);
+ /* r_dst = (r_src << 8) | (r_src >> 8) */
+ emit(ARM_LSL_I(ARM_R1, r_src, 8), ctx);
+ emit(ARM_ORR_S(r_dst, ARM_R1, r_src, SRTYPE_LSR, 8), ctx);
+
+ /*
+ * we need to mask out the bits set in r_dst[23:16] due to
+ * the first shift instruction.
+ *
+ * note that 0x8ff is the encoded immediate 0x00ff0000.
+ */
+ emit(ARM_BIC_I(r_dst, r_dst, 0x8ff), ctx);
}
#else /* ARMv6+ */
diff --git a/arch/arm/plat-omap/dmtimer.c b/arch/arm/plat-omap/dmtimer.c
index 7b433f3bddca..a0daa2fb5de6 100644
--- a/arch/arm/plat-omap/dmtimer.c
+++ b/arch/arm/plat-omap/dmtimer.c
@@ -808,11 +808,9 @@ static int omap_dm_timer_probe(struct platform_device *pdev)
return -ENOMEM;
}
- timer->io_base = devm_request_and_ioremap(dev, mem);
- if (!timer->io_base) {
- dev_err(dev, "%s: region already claimed.\n", __func__);
- return -ENOMEM;
- }
+ timer->io_base = devm_ioremap_resource(dev, mem);
+ if (IS_ERR(timer->io_base))
+ return PTR_ERR(timer->io_base);
if (dev->of_node) {
if (of_find_property(dev->of_node, "ti,timer-alwon", NULL))
diff --git a/arch/arm/plat-samsung/adc.c b/arch/arm/plat-samsung/adc.c
index 2d676ab50f73..ca07cb1b155a 100644
--- a/arch/arm/plat-samsung/adc.c
+++ b/arch/arm/plat-samsung/adc.c
@@ -386,11 +386,9 @@ static int s3c_adc_probe(struct platform_device *pdev)
return -ENXIO;
}
- adc->regs = devm_request_and_ioremap(dev, regs);
- if (!adc->regs) {
- dev_err(dev, "failed to map registers\n");
- return -ENXIO;
- }
+ adc->regs = devm_ioremap_resource(dev, regs);
+ if (IS_ERR(adc->regs))
+ return PTR_ERR(adc->regs);
ret = regulator_enable(adc->vdd);
if (ret)
diff --git a/arch/arm/plat-samsung/dma-ops.c b/arch/arm/plat-samsung/dma-ops.c
index d088afa034e8..71d58ddea9c1 100644
--- a/arch/arm/plat-samsung/dma-ops.c
+++ b/arch/arm/plat-samsung/dma-ops.c
@@ -19,7 +19,8 @@
#include <mach/dma.h>
static unsigned samsung_dmadev_request(enum dma_ch dma_ch,
- struct samsung_dma_req *param)
+ struct samsung_dma_req *param,
+ struct device *dev, char *ch_name)
{
dma_cap_mask_t mask;
void *filter_param;
@@ -33,7 +34,12 @@ static unsigned samsung_dmadev_request(enum dma_ch dma_ch,
*/
filter_param = (dma_ch == DMACH_DT_PROP) ?
(void *)param->dt_dmach_prop : (void *)dma_ch;
- return (unsigned)dma_request_channel(mask, pl330_filter, filter_param);
+
+ if (dev->of_node)
+ return (unsigned)dma_request_slave_channel(dev, ch_name);
+ else
+ return (unsigned)dma_request_channel(mask, pl330_filter,
+ filter_param);
}
static int samsung_dmadev_release(unsigned ch, void *param)
diff --git a/arch/arm/plat-samsung/include/plat/adc.h b/arch/arm/plat-samsung/include/plat/adc.h
index b258a08de591..2fc89315553f 100644
--- a/arch/arm/plat-samsung/include/plat/adc.h
+++ b/arch/arm/plat-samsung/include/plat/adc.h
@@ -15,6 +15,7 @@
#define __ASM_PLAT_ADC_H __FILE__
struct s3c_adc_client;
+struct platform_device;
extern int s3c_adc_start(struct s3c_adc_client *client,
unsigned int channel, unsigned int nr_samples);
diff --git a/arch/arm/plat-samsung/include/plat/dma-ops.h b/arch/arm/plat-samsung/include/plat/dma-ops.h
index f5144cdd3001..114178268b75 100644
--- a/arch/arm/plat-samsung/include/plat/dma-ops.h
+++ b/arch/arm/plat-samsung/include/plat/dma-ops.h
@@ -39,7 +39,8 @@ struct samsung_dma_config {
};
struct samsung_dma_ops {
- unsigned (*request)(enum dma_ch ch, struct samsung_dma_req *param);
+ unsigned (*request)(enum dma_ch ch, struct samsung_dma_req *param,
+ struct device *dev, char *ch_name);
int (*release)(unsigned ch, void *param);
int (*config)(unsigned ch, struct samsung_dma_config *param);
int (*prepare)(unsigned ch, struct samsung_dma_prep *param);
diff --git a/arch/arm/plat-samsung/s3c-dma-ops.c b/arch/arm/plat-samsung/s3c-dma-ops.c
index f99448c48d30..0cc40aea3f5a 100644
--- a/arch/arm/plat-samsung/s3c-dma-ops.c
+++ b/arch/arm/plat-samsung/s3c-dma-ops.c
@@ -36,7 +36,8 @@ static void s3c_dma_cb(struct s3c2410_dma_chan *channel, void *param,
}
static unsigned s3c_dma_request(enum dma_ch dma_ch,
- struct samsung_dma_req *param)
+ struct samsung_dma_req *param,
+ struct device *dev, char *ch_name)
{
struct cb_data *data;
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index f8f362aafee9..7c43569e3141 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -2,6 +2,7 @@ config ARM64
def_bool y
select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
select ARCH_WANT_COMPAT_IPC_PARSE_VERSION
+ select ARCH_WANT_FRAME_POINTERS
select ARM_AMBA
select CLONE_BACKWARDS
select COMMON_CLK
@@ -21,7 +22,6 @@ config ARM64
select HAVE_GENERIC_DMA_COHERENT
select HAVE_GENERIC_HARDIRQS
select HAVE_HW_BREAKPOINT if PERF_EVENTS
- select HAVE_IRQ_WORK
select HAVE_MEMBLOCK
select HAVE_PERF_EVENTS
select IRQ_DOMAIN
diff --git a/arch/arm64/Kconfig.debug b/arch/arm64/Kconfig.debug
index d7553f2bda66..51493430f142 100644
--- a/arch/arm64/Kconfig.debug
+++ b/arch/arm64/Kconfig.debug
@@ -24,4 +24,21 @@ config DEBUG_STACK_USAGE
Enables the display of the minimum amount of free stack which each
task has ever had available in the sysrq-T output.
+config EARLY_PRINTK
+ bool "Early printk support"
+ default y
+ help
+ Say Y here if you want to have an early console using the
+ earlyprintk=<name>[,<addr>][,<options>] kernel parameter. It
+ is assumed that the early console device has been initialised
+ by the boot loader prior to starting the Linux kernel.
+
+config PID_IN_CONTEXTIDR
+ bool "Write the current PID to the CONTEXTIDR register"
+ help
+ Enabling this option causes the kernel to write the current PID to
+ the CONTEXTIDR register, at the expense of some additional
+ instructions during context switch. Say Y here only if you are
+ planning to use hardware trace tools with this kernel.
+
endmenu
diff --git a/arch/arm64/include/asm/Kbuild b/arch/arm64/include/asm/Kbuild
index 14a9d5a2b85b..e5fe4f99fe10 100644
--- a/arch/arm64/include/asm/Kbuild
+++ b/arch/arm64/include/asm/Kbuild
@@ -19,6 +19,7 @@ generic-y += ipcbuf.h
generic-y += irq_regs.h
generic-y += kdebug.h
generic-y += kmap_types.h
+generic-y += kvm_para.h
generic-y += local.h
generic-y += local64.h
generic-y += mman.h
@@ -48,3 +49,4 @@ generic-y += trace_clock.h
generic-y += types.h
generic-y += unaligned.h
generic-y += user.h
+generic-y += xor.h
diff --git a/arch/arm64/include/asm/atomic.h b/arch/arm64/include/asm/atomic.h
index 407717ba060e..836364468571 100644
--- a/arch/arm64/include/asm/atomic.h
+++ b/arch/arm64/include/asm/atomic.h
@@ -49,12 +49,12 @@ static inline void atomic_add(int i, atomic_t *v)
int result;
asm volatile("// atomic_add\n"
-"1: ldxr %w0, [%3]\n"
-" add %w0, %w0, %w4\n"
-" stxr %w1, %w0, [%3]\n"
+"1: ldxr %w0, %2\n"
+" add %w0, %w0, %w3\n"
+" stxr %w1, %w0, %2\n"
" cbnz %w1, 1b"
- : "=&r" (result), "=&r" (tmp), "+o" (v->counter)
- : "r" (&v->counter), "Ir" (i)
+ : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
+ : "Ir" (i)
: "cc");
}
@@ -64,13 +64,13 @@ static inline int atomic_add_return(int i, atomic_t *v)
int result;
asm volatile("// atomic_add_return\n"
-"1: ldaxr %w0, [%3]\n"
-" add %w0, %w0, %w4\n"
-" stlxr %w1, %w0, [%3]\n"
+"1: ldaxr %w0, %2\n"
+" add %w0, %w0, %w3\n"
+" stlxr %w1, %w0, %2\n"
" cbnz %w1, 1b"
- : "=&r" (result), "=&r" (tmp), "+o" (v->counter)
- : "r" (&v->counter), "Ir" (i)
- : "cc");
+ : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
+ : "Ir" (i)
+ : "cc", "memory");
return result;
}
@@ -81,12 +81,12 @@ static inline void atomic_sub(int i, atomic_t *v)
int result;
asm volatile("// atomic_sub\n"
-"1: ldxr %w0, [%3]\n"
-" sub %w0, %w0, %w4\n"
-" stxr %w1, %w0, [%3]\n"
+"1: ldxr %w0, %2\n"
+" sub %w0, %w0, %w3\n"
+" stxr %w1, %w0, %2\n"
" cbnz %w1, 1b"
- : "=&r" (result), "=&r" (tmp), "+o" (v->counter)
- : "r" (&v->counter), "Ir" (i)
+ : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
+ : "Ir" (i)
: "cc");
}
@@ -96,13 +96,13 @@ static inline int atomic_sub_return(int i, atomic_t *v)
int result;
asm volatile("// atomic_sub_return\n"
-"1: ldaxr %w0, [%3]\n"
-" sub %w0, %w0, %w4\n"
-" stlxr %w1, %w0, [%3]\n"
+"1: ldaxr %w0, %2\n"
+" sub %w0, %w0, %w3\n"
+" stlxr %w1, %w0, %2\n"
" cbnz %w1, 1b"
- : "=&r" (result), "=&r" (tmp), "+o" (v->counter)
- : "r" (&v->counter), "Ir" (i)
- : "cc");
+ : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
+ : "Ir" (i)
+ : "cc", "memory");
return result;
}
@@ -113,15 +113,15 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
int oldval;
asm volatile("// atomic_cmpxchg\n"
-"1: ldaxr %w1, [%3]\n"
-" cmp %w1, %w4\n"
+"1: ldaxr %w1, %2\n"
+" cmp %w1, %w3\n"
" b.ne 2f\n"
-" stlxr %w0, %w5, [%3]\n"
+" stlxr %w0, %w4, %2\n"
" cbnz %w0, 1b\n"
"2:"
- : "=&r" (tmp), "=&r" (oldval), "+o" (ptr->counter)
- : "r" (&ptr->counter), "Ir" (old), "r" (new)
- : "cc");
+ : "=&r" (tmp), "=&r" (oldval), "+Q" (ptr->counter)
+ : "Ir" (old), "r" (new)
+ : "cc", "memory");
return oldval;
}
@@ -131,12 +131,12 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
unsigned long tmp, tmp2;
asm volatile("// atomic_clear_mask\n"
-"1: ldxr %0, [%3]\n"
-" bic %0, %0, %4\n"
-" stxr %w1, %0, [%3]\n"
+"1: ldxr %0, %2\n"
+" bic %0, %0, %3\n"
+" stxr %w1, %0, %2\n"
" cbnz %w1, 1b"
- : "=&r" (tmp), "=&r" (tmp2), "+o" (*addr)
- : "r" (addr), "Ir" (mask)
+ : "=&r" (tmp), "=&r" (tmp2), "+Q" (*addr)
+ : "Ir" (mask)
: "cc");
}
@@ -182,12 +182,12 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
unsigned long tmp;
asm volatile("// atomic64_add\n"
-"1: ldxr %0, [%3]\n"
-" add %0, %0, %4\n"
-" stxr %w1, %0, [%3]\n"
+"1: ldxr %0, %2\n"
+" add %0, %0, %3\n"
+" stxr %w1, %0, %2\n"
" cbnz %w1, 1b"
- : "=&r" (result), "=&r" (tmp), "+o" (v->counter)
- : "r" (&v->counter), "Ir" (i)
+ : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
+ : "Ir" (i)
: "cc");
}
@@ -197,13 +197,13 @@ static inline long atomic64_add_return(long i, atomic64_t *v)
unsigned long tmp;
asm volatile("// atomic64_add_return\n"
-"1: ldaxr %0, [%3]\n"
-" add %0, %0, %4\n"
-" stlxr %w1, %0, [%3]\n"
+"1: ldaxr %0, %2\n"
+" add %0, %0, %3\n"
+" stlxr %w1, %0, %2\n"
" cbnz %w1, 1b"
- : "=&r" (result), "=&r" (tmp), "+o" (v->counter)
- : "r" (&v->counter), "Ir" (i)
- : "cc");
+ : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
+ : "Ir" (i)
+ : "cc", "memory");
return result;
}
@@ -214,12 +214,12 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
unsigned long tmp;
asm volatile("// atomic64_sub\n"
-"1: ldxr %0, [%3]\n"
-" sub %0, %0, %4\n"
-" stxr %w1, %0, [%3]\n"
+"1: ldxr %0, %2\n"
+" sub %0, %0, %3\n"
+" stxr %w1, %0, %2\n"
" cbnz %w1, 1b"
- : "=&r" (result), "=&r" (tmp), "+o" (v->counter)
- : "r" (&v->counter), "Ir" (i)
+ : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
+ : "Ir" (i)
: "cc");
}
@@ -229,13 +229,13 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
unsigned long tmp;
asm volatile("// atomic64_sub_return\n"
-"1: ldaxr %0, [%3]\n"
-" sub %0, %0, %4\n"
-" stlxr %w1, %0, [%3]\n"
+"1: ldaxr %0, %2\n"
+" sub %0, %0, %3\n"
+" stlxr %w1, %0, %2\n"
" cbnz %w1, 1b"
- : "=&r" (result), "=&r" (tmp), "+o" (v->counter)
- : "r" (&v->counter), "Ir" (i)
- : "cc");
+ : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
+ : "Ir" (i)
+ : "cc", "memory");
return result;
}
@@ -246,15 +246,15 @@ static inline long atomic64_cmpxchg(atomic64_t *ptr, long old, long new)
unsigned long res;
asm volatile("// atomic64_cmpxchg\n"
-"1: ldaxr %1, [%3]\n"
-" cmp %1, %4\n"
+"1: ldaxr %1, %2\n"
+" cmp %1, %3\n"
" b.ne 2f\n"
-" stlxr %w0, %5, [%3]\n"
+" stlxr %w0, %4, %2\n"
" cbnz %w0, 1b\n"
"2:"
- : "=&r" (res), "=&r" (oldval), "+o" (ptr->counter)
- : "r" (&ptr->counter), "Ir" (old), "r" (new)
- : "cc");
+ : "=&r" (res), "=&r" (oldval), "+Q" (ptr->counter)
+ : "Ir" (old), "r" (new)
+ : "cc", "memory");
return oldval;
}
@@ -267,15 +267,15 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
unsigned long tmp;
asm volatile("// atomic64_dec_if_positive\n"
-"1: ldaxr %0, [%3]\n"
+"1: ldaxr %0, %2\n"
" subs %0, %0, #1\n"
" b.mi 2f\n"
-" stlxr %w1, %0, [%3]\n"
+" stlxr %w1, %0, %2\n"
" cbnz %w1, 1b\n"
"2:"
- : "=&r" (result), "=&r" (tmp), "+o" (v->counter)
- : "r" (&v->counter)
- : "cc");
+ : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
+ :
+ : "cc", "memory");
return result;
}
diff --git a/arch/arm64/include/asm/cmpxchg.h b/arch/arm64/include/asm/cmpxchg.h
index e0e65b069d9e..968b5cbfc260 100644
--- a/arch/arm64/include/asm/cmpxchg.h
+++ b/arch/arm64/include/asm/cmpxchg.h
@@ -29,39 +29,39 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
switch (size) {
case 1:
asm volatile("// __xchg1\n"
- "1: ldaxrb %w0, [%3]\n"
- " stlxrb %w1, %w2, [%3]\n"
+ "1: ldaxrb %w0, %2\n"
+ " stlxrb %w1, %w3, %2\n"
" cbnz %w1, 1b\n"
- : "=&r" (ret), "=&r" (tmp)
- : "r" (x), "r" (ptr)
- : "memory", "cc");
+ : "=&r" (ret), "=&r" (tmp), "+Q" (*(u8 *)ptr)
+ : "r" (x)
+ : "cc", "memory");
break;
case 2:
asm volatile("// __xchg2\n"
- "1: ldaxrh %w0, [%3]\n"
- " stlxrh %w1, %w2, [%3]\n"
+ "1: ldaxrh %w0, %2\n"
+ " stlxrh %w1, %w3, %2\n"
" cbnz %w1, 1b\n"
- : "=&r" (ret), "=&r" (tmp)
- : "r" (x), "r" (ptr)
- : "memory", "cc");
+ : "=&r" (ret), "=&r" (tmp), "+Q" (*(u16 *)ptr)
+ : "r" (x)
+ : "cc", "memory");
break;
case 4:
asm volatile("// __xchg4\n"
- "1: ldaxr %w0, [%3]\n"
- " stlxr %w1, %w2, [%3]\n"
+ "1: ldaxr %w0, %2\n"
+ " stlxr %w1, %w3, %2\n"
" cbnz %w1, 1b\n"
- : "=&r" (ret), "=&r" (tmp)
- : "r" (x), "r" (ptr)
- : "memory", "cc");
+ : "=&r" (ret), "=&r" (tmp), "+Q" (*(u32 *)ptr)
+ : "r" (x)
+ : "cc", "memory");
break;
case 8:
asm volatile("// __xchg8\n"
- "1: ldaxr %0, [%3]\n"
- " stlxr %w1, %2, [%3]\n"
+ "1: ldaxr %0, %2\n"
+ " stlxr %w1, %3, %2\n"
" cbnz %w1, 1b\n"
- : "=&r" (ret), "=&r" (tmp)
- : "r" (x), "r" (ptr)
- : "memory", "cc");
+ : "=&r" (ret), "=&r" (tmp), "+Q" (*(u64 *)ptr)
+ : "r" (x)
+ : "cc", "memory");
break;
default:
BUILD_BUG();
@@ -82,14 +82,14 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
case 1:
do {
asm volatile("// __cmpxchg1\n"
- " ldxrb %w1, [%2]\n"
+ " ldxrb %w1, %2\n"
" mov %w0, #0\n"
" cmp %w1, %w3\n"
" b.ne 1f\n"
- " stxrb %w0, %w4, [%2]\n"
+ " stxrb %w0, %w4, %2\n"
"1:\n"
- : "=&r" (res), "=&r" (oldval)
- : "r" (ptr), "Ir" (old), "r" (new)
+ : "=&r" (res), "=&r" (oldval), "+Q" (*(u8 *)ptr)
+ : "Ir" (old), "r" (new)
: "cc");
} while (res);
break;
@@ -97,29 +97,29 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
case 2:
do {
asm volatile("// __cmpxchg2\n"
- " ldxrh %w1, [%2]\n"
+ " ldxrh %w1, %2\n"
" mov %w0, #0\n"
" cmp %w1, %w3\n"
" b.ne 1f\n"
- " stxrh %w0, %w4, [%2]\n"
+ " stxrh %w0, %w4, %2\n"
"1:\n"
- : "=&r" (res), "=&r" (oldval)
- : "r" (ptr), "Ir" (old), "r" (new)
- : "memory", "cc");
+ : "=&r" (res), "=&r" (oldval), "+Q" (*(u16 *)ptr)
+ : "Ir" (old), "r" (new)
+ : "cc");
} while (res);
break;
case 4:
do {
asm volatile("// __cmpxchg4\n"
- " ldxr %w1, [%2]\n"
+ " ldxr %w1, %2\n"
" mov %w0, #0\n"
" cmp %w1, %w3\n"
" b.ne 1f\n"
- " stxr %w0, %w4, [%2]\n"
+ " stxr %w0, %w4, %2\n"
"1:\n"
- : "=&r" (res), "=&r" (oldval)
- : "r" (ptr), "Ir" (old), "r" (new)
+ : "=&r" (res), "=&r" (oldval), "+Q" (*(u32 *)ptr)
+ : "Ir" (old), "r" (new)
: "cc");
} while (res);
break;
@@ -127,14 +127,14 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
case 8:
do {
asm volatile("// __cmpxchg8\n"
- " ldxr %1, [%2]\n"
+ " ldxr %1, %2\n"
" mov %w0, #0\n"
" cmp %1, %3\n"
" b.ne 1f\n"
- " stxr %w0, %4, [%2]\n"
+ " stxr %w0, %4, %2\n"
"1:\n"
- : "=&r" (res), "=&r" (oldval)
- : "r" (ptr), "Ir" (old), "r" (new)
+ : "=&r" (res), "=&r" (oldval), "+Q" (*(u64 *)ptr)
+ : "Ir" (old), "r" (new)
: "cc");
} while (res);
break;
diff --git a/arch/arm64/include/asm/futex.h b/arch/arm64/include/asm/futex.h
index 3468ae8439fa..c582fa316366 100644
--- a/arch/arm64/include/asm/futex.h
+++ b/arch/arm64/include/asm/futex.h
@@ -39,7 +39,7 @@
" .popsection\n" \
: "=&r" (ret), "=&r" (oldval), "+Q" (*uaddr), "=&r" (tmp) \
: "r" (oparg), "Ir" (-EFAULT) \
- : "cc")
+ : "cc", "memory")
static inline int
futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h
index d2f05a608274..57f12c991de2 100644
--- a/arch/arm64/include/asm/io.h
+++ b/arch/arm64/include/asm/io.h
@@ -230,6 +230,9 @@ extern void __iounmap(volatile void __iomem *addr);
#define ioremap_wc(addr, size) __ioremap((addr), (size), __pgprot(PROT_NORMAL_NC))
#define iounmap __iounmap
+#define PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_SECT_AF)
+#define PROT_SECT_DEVICE_nGnRE (PROT_SECT_DEFAULT | PTE_PXN | PTE_UXN | PMD_ATTRINDX(MT_DEVICE_nGnRE))
+
#define ARCH_HAS_IOREMAP_WC
#include <asm-generic/iomap.h>
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index 1cac16a001cb..381f556b664e 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -43,6 +43,7 @@
#define PAGE_OFFSET UL(0xffffffc000000000)
#define MODULES_END (PAGE_OFFSET)
#define MODULES_VADDR (MODULES_END - SZ_64M)
+#define EARLYCON_IOBASE (MODULES_VADDR - SZ_4M)
#define VA_BITS (39)
#define TASK_SIZE_64 (UL(1) << VA_BITS)
diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h
index d4f7fd5b9e33..2494fc01896a 100644
--- a/arch/arm64/include/asm/mmu.h
+++ b/arch/arm64/include/asm/mmu.h
@@ -26,5 +26,6 @@ typedef struct {
extern void paging_init(void);
extern void setup_mm_for_reboot(void);
+extern void __iomem *early_io_map(phys_addr_t phys, unsigned long virt);
#endif
diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h
index f68465dee026..e2bc385adb6b 100644
--- a/arch/arm64/include/asm/mmu_context.h
+++ b/arch/arm64/include/asm/mmu_context.h
@@ -35,6 +35,21 @@ extern unsigned int cpu_last_asid;
void __init_new_context(struct task_struct *tsk, struct mm_struct *mm);
void __new_context(struct mm_struct *mm);
+#ifdef CONFIG_PID_IN_CONTEXTIDR
+static inline void contextidr_thread_switch(struct task_struct *next)
+{
+ asm(
+ " msr contextidr_el1, %0\n"
+ " isb"
+ :
+ : "r" (task_pid_nr(next)));
+}
+#else
+static inline void contextidr_thread_switch(struct task_struct *next)
+{
+}
+#endif
+
/*
* Set TTBR0 to empty_zero_page. No translations will be possible via TTBR0.
*/
diff --git a/arch/arm64/include/asm/perf_event.h b/arch/arm64/include/asm/perf_event.h
index a6fffd511c5e..d26d1d53c0d7 100644
--- a/arch/arm64/include/asm/perf_event.h
+++ b/arch/arm64/include/asm/perf_event.h
@@ -17,6 +17,11 @@
#ifndef __ASM_PERF_EVENT_H
#define __ASM_PERF_EVENT_H
-/* It's quiet around here... */
+#ifdef CONFIG_HW_PERF_EVENTS
+struct pt_regs;
+extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
+extern unsigned long perf_misc_flags(struct pt_regs *regs);
+#define perf_misc_flags(regs) perf_misc_flags(regs)
+#endif
#endif
diff --git a/arch/arm64/include/asm/psci.h b/arch/arm64/include/asm/psci.h
new file mode 100644
index 000000000000..0604237ecd99
--- /dev/null
+++ b/arch/arm64/include/asm/psci.h
@@ -0,0 +1,38 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Copyright (C) 2013 ARM Limited
+ */
+
+#ifndef __ASM_PSCI_H
+#define __ASM_PSCI_H
+
+#define PSCI_POWER_STATE_TYPE_STANDBY 0
+#define PSCI_POWER_STATE_TYPE_POWER_DOWN 1
+
+struct psci_power_state {
+ u16 id;
+ u8 type;
+ u8 affinity_level;
+};
+
+struct psci_operations {
+ int (*cpu_suspend)(struct psci_power_state state,
+ unsigned long entry_point);
+ int (*cpu_off)(struct psci_power_state state);
+ int (*cpu_on)(unsigned long cpuid, unsigned long entry_point);
+ int (*migrate)(unsigned long cpuid);
+};
+
+extern struct psci_operations psci_ops;
+
+int psci_init(void);
+
+#endif /* __ASM_PSCI_H */
diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h
index 4ce845f8ee1c..41a71ee4c3df 100644
--- a/arch/arm64/include/asm/ptrace.h
+++ b/arch/arm64/include/asm/ptrace.h
@@ -42,6 +42,16 @@
#define COMPAT_PSR_MODE_UND 0x0000001b
#define COMPAT_PSR_MODE_SYS 0x0000001f
#define COMPAT_PSR_T_BIT 0x00000020
+#define COMPAT_PSR_F_BIT 0x00000040
+#define COMPAT_PSR_I_BIT 0x00000080
+#define COMPAT_PSR_A_BIT 0x00000100
+#define COMPAT_PSR_E_BIT 0x00000200
+#define COMPAT_PSR_J_BIT 0x01000000
+#define COMPAT_PSR_Q_BIT 0x08000000
+#define COMPAT_PSR_V_BIT 0x10000000
+#define COMPAT_PSR_C_BIT 0x20000000
+#define COMPAT_PSR_Z_BIT 0x40000000
+#define COMPAT_PSR_N_BIT 0x80000000
#define COMPAT_PSR_IT_MASK 0x0600fc00 /* If-Then execution state mask */
/*
* These are 'magic' values for PTRACE_PEEKUSR that return info about where a
diff --git a/arch/arm64/include/asm/smp.h b/arch/arm64/include/asm/smp.h
index 7e34295f78e3..4b8023c5d146 100644
--- a/arch/arm64/include/asm/smp.h
+++ b/arch/arm64/include/asm/smp.h
@@ -66,4 +66,15 @@ extern volatile unsigned long secondary_holding_pen_release;
extern void arch_send_call_function_single_ipi(int cpu);
extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
+struct device_node;
+
+struct smp_enable_ops {
+ const char *name;
+ int (*init_cpu)(struct device_node *, int);
+ int (*prepare_cpu)(int);
+};
+
+extern const struct smp_enable_ops smp_spin_table_ops;
+extern const struct smp_enable_ops smp_psci_ops;
+
#endif /* ifndef __ASM_SMP_H */
diff --git a/arch/arm64/include/asm/spinlock.h b/arch/arm64/include/asm/spinlock.h
index 41112fe2f8b1..7065e920149d 100644
--- a/arch/arm64/include/asm/spinlock.h
+++ b/arch/arm64/include/asm/spinlock.h
@@ -45,13 +45,13 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
asm volatile(
" sevl\n"
"1: wfe\n"
- "2: ldaxr %w0, [%1]\n"
+ "2: ldaxr %w0, %1\n"
" cbnz %w0, 1b\n"
- " stxr %w0, %w2, [%1]\n"
+ " stxr %w0, %w2, %1\n"
" cbnz %w0, 2b\n"
- : "=&r" (tmp)
- : "r" (&lock->lock), "r" (1)
- : "memory");
+ : "=&r" (tmp), "+Q" (lock->lock)
+ : "r" (1)
+ : "cc", "memory");
}
static inline int arch_spin_trylock(arch_spinlock_t *lock)
@@ -59,13 +59,13 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
unsigned int tmp;
asm volatile(
- " ldaxr %w0, [%1]\n"
+ " ldaxr %w0, %1\n"
" cbnz %w0, 1f\n"
- " stxr %w0, %w2, [%1]\n"
+ " stxr %w0, %w2, %1\n"
"1:\n"
- : "=&r" (tmp)
- : "r" (&lock->lock), "r" (1)
- : "memory");
+ : "=&r" (tmp), "+Q" (lock->lock)
+ : "r" (1)
+ : "cc", "memory");
return !tmp;
}
@@ -73,8 +73,8 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
static inline void arch_spin_unlock(arch_spinlock_t *lock)
{
asm volatile(
- " stlr %w1, [%0]\n"
- : : "r" (&lock->lock), "r" (0) : "memory");
+ " stlr %w1, %0\n"
+ : "=Q" (lock->lock) : "r" (0) : "memory");
}
/*
@@ -94,13 +94,13 @@ static inline void arch_write_lock(arch_rwlock_t *rw)
asm volatile(
" sevl\n"
"1: wfe\n"
- "2: ldaxr %w0, [%1]\n"
+ "2: ldaxr %w0, %1\n"
" cbnz %w0, 1b\n"
- " stxr %w0, %w2, [%1]\n"
+ " stxr %w0, %w2, %1\n"
" cbnz %w0, 2b\n"
- : "=&r" (tmp)
- : "r" (&rw->lock), "r" (0x80000000)
- : "memory");
+ : "=&r" (tmp), "+Q" (rw->lock)
+ : "r" (0x80000000)
+ : "cc", "memory");
}
static inline int arch_write_trylock(arch_rwlock_t *rw)
@@ -108,13 +108,13 @@ static inline int arch_write_trylock(arch_rwlock_t *rw)
unsigned int tmp;
asm volatile(
- " ldaxr %w0, [%1]\n"
+ " ldaxr %w0, %1\n"
" cbnz %w0, 1f\n"
- " stxr %w0, %w2, [%1]\n"
+ " stxr %w0, %w2, %1\n"
"1:\n"
- : "=&r" (tmp)
- : "r" (&rw->lock), "r" (0x80000000)
- : "memory");
+ : "=&r" (tmp), "+Q" (rw->lock)
+ : "r" (0x80000000)
+ : "cc", "memory");
return !tmp;
}
@@ -122,8 +122,8 @@ static inline int arch_write_trylock(arch_rwlock_t *rw)
static inline void arch_write_unlock(arch_rwlock_t *rw)
{
asm volatile(
- " stlr %w1, [%0]\n"
- : : "r" (&rw->lock), "r" (0) : "memory");
+ " stlr %w1, %0\n"
+ : "=Q" (rw->lock) : "r" (0) : "memory");
}
/* write_can_lock - would write_trylock() succeed? */
@@ -148,14 +148,14 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
asm volatile(
" sevl\n"
"1: wfe\n"
- "2: ldaxr %w0, [%2]\n"
+ "2: ldaxr %w0, %2\n"
" add %w0, %w0, #1\n"
" tbnz %w0, #31, 1b\n"
- " stxr %w1, %w0, [%2]\n"
+ " stxr %w1, %w0, %2\n"
" cbnz %w1, 2b\n"
- : "=&r" (tmp), "=&r" (tmp2)
- : "r" (&rw->lock)
- : "memory");
+ : "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock)
+ :
+ : "cc", "memory");
}
static inline void arch_read_unlock(arch_rwlock_t *rw)
@@ -163,13 +163,13 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
unsigned int tmp, tmp2;
asm volatile(
- "1: ldxr %w0, [%2]\n"
+ "1: ldxr %w0, %2\n"
" sub %w0, %w0, #1\n"
- " stlxr %w1, %w0, [%2]\n"
+ " stlxr %w1, %w0, %2\n"
" cbnz %w1, 1b\n"
- : "=&r" (tmp), "=&r" (tmp2)
- : "r" (&rw->lock)
- : "memory");
+ : "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock)
+ :
+ : "cc", "memory");
}
static inline int arch_read_trylock(arch_rwlock_t *rw)
@@ -177,14 +177,14 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
unsigned int tmp, tmp2 = 1;
asm volatile(
- " ldaxr %w0, [%2]\n"
+ " ldaxr %w0, %2\n"
" add %w0, %w0, #1\n"
" tbnz %w0, #31, 1f\n"
- " stxr %w1, %w0, [%2]\n"
+ " stxr %w1, %w0, %2\n"
"1:\n"
- : "=&r" (tmp), "+r" (tmp2)
- : "r" (&rw->lock)
- : "memory");
+ : "=&r" (tmp), "+r" (tmp2), "+Q" (rw->lock)
+ :
+ : "cc", "memory");
return !tmp2;
}
diff --git a/arch/arm64/include/uapi/asm/Kbuild b/arch/arm64/include/uapi/asm/Kbuild
index ca5b65f75c7b..e4b78bdca19e 100644
--- a/arch/arm64/include/uapi/asm/Kbuild
+++ b/arch/arm64/include/uapi/asm/Kbuild
@@ -1,11 +1,14 @@
# UAPI Header export list
include include/uapi/asm-generic/Kbuild.asm
+generic-y += kvm_para.h
+
header-y += auxvec.h
header-y += bitsperlong.h
header-y += byteorder.h
header-y += fcntl.h
header-y += hwcap.h
+header-y += kvm_para.h
header-y += param.h
header-y += ptrace.h
header-y += setup.h
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile
index 74239c31e25a..7b4b564961d4 100644
--- a/arch/arm64/kernel/Makefile
+++ b/arch/arm64/kernel/Makefile
@@ -9,14 +9,15 @@ AFLAGS_head.o := -DTEXT_OFFSET=$(TEXT_OFFSET)
arm64-obj-y := cputable.o debug-monitors.o entry.o irq.o fpsimd.o \
entry-fpsimd.o process.o ptrace.o setup.o signal.o \
sys.o stacktrace.o time.o traps.o io.o vdso.o \
- hyp-stub.o
+ hyp-stub.o psci.o
arm64-obj-$(CONFIG_COMPAT) += sys32.o kuser32.o signal32.o \
sys_compat.o
arm64-obj-$(CONFIG_MODULES) += arm64ksyms.o module.o
-arm64-obj-$(CONFIG_SMP) += smp.o
+arm64-obj-$(CONFIG_SMP) += smp.o smp_spin_table.o smp_psci.o
arm64-obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o
arm64-obj-$(CONFIG_HAVE_HW_BREAKPOINT)+= hw_breakpoint.o
+arm64-obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
obj-y += $(arm64-obj-y) vdso/
obj-m += $(arm64-obj-m)
diff --git a/arch/arm64/kernel/early_printk.c b/arch/arm64/kernel/early_printk.c
new file mode 100644
index 000000000000..7e320a2edb9b
--- /dev/null
+++ b/arch/arm64/kernel/early_printk.c
@@ -0,0 +1,118 @@
+/*
+ * Earlyprintk support.
+ *
+ * Copyright (C) 2012 ARM Ltd.
+ * Author: Catalin Marinas <catalin.marinas@arm.com>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/kernel.h>
+#include <linux/console.h>
+#include <linux/init.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/io.h>
+
+#include <linux/amba/serial.h>
+
+static void __iomem *early_base;
+static void (*printch)(char ch);
+
+/*
+ * PL011 single character TX.
+ */
+static void pl011_printch(char ch)
+{
+ while (readl_relaxed(early_base + UART01x_FR) & UART01x_FR_TXFF)
+ ;
+ writeb_relaxed(ch, early_base + UART01x_DR);
+ while (readl_relaxed(early_base + UART01x_FR) & UART01x_FR_BUSY)
+ ;
+}
+
+struct earlycon_match {
+ const char *name;
+ void (*printch)(char ch);
+};
+
+static const struct earlycon_match earlycon_match[] __initconst = {
+ { .name = "pl011", .printch = pl011_printch, },
+ {}
+};
+
+static void early_write(struct console *con, const char *s, unsigned n)
+{
+ while (n-- > 0) {
+ if (*s == '\n')
+ printch('\r');
+ printch(*s);
+ s++;
+ }
+}
+
+static struct console early_console = {
+ .name = "earlycon",
+ .write = early_write,
+ .flags = CON_PRINTBUFFER | CON_BOOT,
+ .index = -1,
+};
+
+/*
+ * Parse earlyprintk=... parameter in the format:
+ *
+ * <name>[,<addr>][,<options>]
+ *
+ * and register the early console. It is assumed that the UART has been
+ * initialised by the bootloader already.
+ */
+static int __init setup_early_printk(char *buf)
+{
+ const struct earlycon_match *match = earlycon_match;
+ phys_addr_t paddr = 0;
+
+ if (!buf) {
+ pr_warning("No earlyprintk arguments passed.\n");
+ return 0;
+ }
+
+ while (match->name) {
+ size_t len = strlen(match->name);
+ if (!strncmp(buf, match->name, len)) {
+ buf += len;
+ break;
+ }
+ match++;
+ }
+ if (!match->name) {
+ pr_warning("Unknown earlyprintk arguments: %s\n", buf);
+ return 0;
+ }
+
+ /* I/O address */
+ if (!strncmp(buf, ",0x", 3)) {
+ char *e;
+ paddr = simple_strtoul(buf + 1, &e, 16);
+ buf = e;
+ }
+ /* no options parsing yet */
+
+ if (paddr)
+ early_base = early_io_map(paddr, EARLYCON_IOBASE);
+
+ printch = match->printch;
+ register_console(&early_console);
+
+ return 0;
+}
+
+early_param("earlyprintk", setup_early_printk);
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index 368ad1f7c36c..0a0a49756826 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -82,10 +82,8 @@
#ifdef CONFIG_ARM64_64K_PAGES
#define MM_MMUFLAGS PTE_ATTRINDX(MT_NORMAL) | PTE_FLAGS
-#define IO_MMUFLAGS PTE_ATTRINDX(MT_DEVICE_nGnRE) | PTE_XN | PTE_FLAGS
#else
#define MM_MMUFLAGS PMD_ATTRINDX(MT_NORMAL) | PMD_FLAGS
-#define IO_MMUFLAGS PMD_ATTRINDX(MT_DEVICE_nGnRE) | PMD_SECT_XN | PMD_FLAGS
#endif
/*
@@ -368,6 +366,7 @@ ENDPROC(__calc_phys_offset)
* - identity mapping to enable the MMU (low address, TTBR0)
* - first few MB of the kernel linear mapping to jump to once the MMU has
* been enabled, including the FDT blob (TTBR1)
+ * - UART mapping if CONFIG_EARLY_PRINTK is enabled (TTBR1)
*/
__create_page_tables:
pgtbl x25, x26, x24 // idmap_pg_dir and swapper_pg_dir addresses
@@ -420,6 +419,15 @@ __create_page_tables:
sub x6, x6, #1 // inclusive range
create_block_map x0, x7, x3, x5, x6
1:
+#ifdef CONFIG_EARLY_PRINTK
+ /*
+ * Create the pgd entry for the UART mapping. The full mapping is done
+ * later based earlyprintk kernel parameter.
+ */
+ ldr x5, =EARLYCON_IOBASE // UART virtual address
+ add x0, x26, #2 * PAGE_SIZE // section table address
+ create_pgd_entry x26, x0, x5, x6, x7
+#endif
ret
ENDPROC(__create_page_tables)
.ltorg
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
index f7073c7b1ca9..1e49e5eb81e9 100644
--- a/arch/arm64/kernel/perf_event.c
+++ b/arch/arm64/kernel/perf_event.c
@@ -1331,6 +1331,11 @@ void perf_callchain_user(struct perf_callchain_entry *entry,
{
struct frame_tail __user *tail;
+ if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
+ /* We don't support guest os callchain now */
+ return;
+ }
+
tail = (struct frame_tail __user *)regs->regs[29];
while (entry->nr < PERF_MAX_STACK_DEPTH &&
@@ -1355,8 +1360,40 @@ void perf_callchain_kernel(struct perf_callchain_entry *entry,
{
struct stackframe frame;
+ if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
+ /* We don't support guest os callchain now */
+ return;
+ }
+
frame.fp = regs->regs[29];
frame.sp = regs->sp;
frame.pc = regs->pc;
walk_stackframe(&frame, callchain_trace, entry);
}
+
+unsigned long perf_instruction_pointer(struct pt_regs *regs)
+{
+ if (perf_guest_cbs && perf_guest_cbs->is_in_guest())
+ return perf_guest_cbs->get_guest_ip();
+
+ return instruction_pointer(regs);
+}
+
+unsigned long perf_misc_flags(struct pt_regs *regs)
+{
+ int misc = 0;
+
+ if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
+ if (perf_guest_cbs->is_user_mode())
+ misc |= PERF_RECORD_MISC_GUEST_USER;
+ else
+ misc |= PERF_RECORD_MISC_GUEST_KERNEL;
+ } else {
+ if (user_mode(regs))
+ misc |= PERF_RECORD_MISC_USER;
+ else
+ misc |= PERF_RECORD_MISC_KERNEL;
+ }
+
+ return misc;
+}
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index cb0956bc96ed..0337cdb0667b 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -45,9 +45,10 @@
#include <asm/compat.h>
#include <asm/cacheflush.h>
+#include <asm/fpsimd.h>
+#include <asm/mmu_context.h>
#include <asm/processor.h>
#include <asm/stacktrace.h>
-#include <asm/fpsimd.h>
static void setup_restart(void)
{
@@ -97,14 +98,9 @@ static void default_idle(void)
local_irq_enable();
}
-void (*pm_idle)(void) = default_idle;
-EXPORT_SYMBOL_GPL(pm_idle);
-
/*
- * The idle thread, has rather strange semantics for calling pm_idle,
- * but this is what x86 does and we need to do the same, so that
- * things like cpuidle get called in the same way. The only difference
- * is that we always respect 'hlt_counter' to prevent low power idle.
+ * The idle thread.
+ * We always respect 'hlt_counter' to prevent low power idle.
*/
void cpu_idle(void)
{
@@ -122,10 +118,10 @@ void cpu_idle(void)
local_irq_disable();
if (!need_resched()) {
stop_critical_timings();
- pm_idle();
+ default_idle();
start_critical_timings();
/*
- * pm_idle functions should always return
+ * default_idle functions should always return
* with IRQs enabled.
*/
WARN_ON(irqs_disabled());
@@ -319,6 +315,7 @@ struct task_struct *__switch_to(struct task_struct *prev,
/* the actual thread switch */
last = cpu_switch_to(prev, next);
+ contextidr_thread_switch(next);
return last;
}
diff --git a/arch/arm64/kernel/psci.c b/arch/arm64/kernel/psci.c
new file mode 100644
index 000000000000..14f73c445ff5
--- /dev/null
+++ b/arch/arm64/kernel/psci.c
@@ -0,0 +1,211 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Copyright (C) 2013 ARM Limited
+ *
+ * Author: Will Deacon <will.deacon@arm.com>
+ */
+
+#define pr_fmt(fmt) "psci: " fmt
+
+#include <linux/init.h>
+#include <linux/of.h>
+
+#include <asm/compiler.h>
+#include <asm/errno.h>
+#include <asm/psci.h>
+
+struct psci_operations psci_ops;
+
+static int (*invoke_psci_fn)(u64, u64, u64, u64);
+
+enum psci_function {
+ PSCI_FN_CPU_SUSPEND,
+ PSCI_FN_CPU_ON,
+ PSCI_FN_CPU_OFF,
+ PSCI_FN_MIGRATE,
+ PSCI_FN_MAX,
+};
+
+static u32 psci_function_id[PSCI_FN_MAX];
+
+#define PSCI_RET_SUCCESS 0
+#define PSCI_RET_EOPNOTSUPP -1
+#define PSCI_RET_EINVAL -2
+#define PSCI_RET_EPERM -3
+
+static int psci_to_linux_errno(int errno)
+{
+ switch (errno) {
+ case PSCI_RET_SUCCESS:
+ return 0;
+ case PSCI_RET_EOPNOTSUPP:
+ return -EOPNOTSUPP;
+ case PSCI_RET_EINVAL:
+ return -EINVAL;
+ case PSCI_RET_EPERM:
+ return -EPERM;
+ };
+
+ return -EINVAL;
+}
+
+#define PSCI_POWER_STATE_ID_MASK 0xffff
+#define PSCI_POWER_STATE_ID_SHIFT 0
+#define PSCI_POWER_STATE_TYPE_MASK 0x1
+#define PSCI_POWER_STATE_TYPE_SHIFT 16
+#define PSCI_POWER_STATE_AFFL_MASK 0x3
+#define PSCI_POWER_STATE_AFFL_SHIFT 24
+
+static u32 psci_power_state_pack(struct psci_power_state state)
+{
+ return ((state.id & PSCI_POWER_STATE_ID_MASK)
+ << PSCI_POWER_STATE_ID_SHIFT) |
+ ((state.type & PSCI_POWER_STATE_TYPE_MASK)
+ << PSCI_POWER_STATE_TYPE_SHIFT) |
+ ((state.affinity_level & PSCI_POWER_STATE_AFFL_MASK)
+ << PSCI_POWER_STATE_AFFL_SHIFT);
+}
+
+/*
+ * The following two functions are invoked via the invoke_psci_fn pointer
+ * and will not be inlined, allowing us to piggyback on the AAPCS.
+ */
+static noinline int __invoke_psci_fn_hvc(u64 function_id, u64 arg0, u64 arg1,
+ u64 arg2)
+{
+ asm volatile(
+ __asmeq("%0", "x0")
+ __asmeq("%1", "x1")
+ __asmeq("%2", "x2")
+ __asmeq("%3", "x3")
+ "hvc #0\n"
+ : "+r" (function_id)
+ : "r" (arg0), "r" (arg1), "r" (arg2));
+
+ return function_id;
+}
+
+static noinline int __invoke_psci_fn_smc(u64 function_id, u64 arg0, u64 arg1,
+ u64 arg2)
+{
+ asm volatile(
+ __asmeq("%0", "x0")
+ __asmeq("%1", "x1")
+ __asmeq("%2", "x2")
+ __asmeq("%3", "x3")
+ "smc #0\n"
+ : "+r" (function_id)
+ : "r" (arg0), "r" (arg1), "r" (arg2));
+
+ return function_id;
+}
+
+static int psci_cpu_suspend(struct psci_power_state state,
+ unsigned long entry_point)
+{
+ int err;
+ u32 fn, power_state;
+
+ fn = psci_function_id[PSCI_FN_CPU_SUSPEND];
+ power_state = psci_power_state_pack(state);
+ err = invoke_psci_fn(fn, power_state, entry_point, 0);
+ return psci_to_linux_errno(err);
+}
+
+static int psci_cpu_off(struct psci_power_state state)
+{
+ int err;
+ u32 fn, power_state;
+
+ fn = psci_function_id[PSCI_FN_CPU_OFF];
+ power_state = psci_power_state_pack(state);
+ err = invoke_psci_fn(fn, power_state, 0, 0);
+ return psci_to_linux_errno(err);
+}
+
+static int psci_cpu_on(unsigned long cpuid, unsigned long entry_point)
+{
+ int err;
+ u32 fn;
+
+ fn = psci_function_id[PSCI_FN_CPU_ON];
+ err = invoke_psci_fn(fn, cpuid, entry_point, 0);
+ return psci_to_linux_errno(err);
+}
+
+static int psci_migrate(unsigned long cpuid)
+{
+ int err;
+ u32 fn;
+
+ fn = psci_function_id[PSCI_FN_MIGRATE];
+ err = invoke_psci_fn(fn, cpuid, 0, 0);
+ return psci_to_linux_errno(err);
+}
+
+static const struct of_device_id psci_of_match[] __initconst = {
+ { .compatible = "arm,psci", },
+ {},
+};
+
+int __init psci_init(void)
+{
+ struct device_node *np;
+ const char *method;
+ u32 id;
+ int err = 0;
+
+ np = of_find_matching_node(NULL, psci_of_match);
+ if (!np)
+ return -ENODEV;
+
+ pr_info("probing function IDs from device-tree\n");
+
+ if (of_property_read_string(np, "method", &method)) {
+ pr_warning("missing \"method\" property\n");
+ err = -ENXIO;
+ goto out_put_node;
+ }
+
+ if (!strcmp("hvc", method)) {
+ invoke_psci_fn = __invoke_psci_fn_hvc;
+ } else if (!strcmp("smc", method)) {
+ invoke_psci_fn = __invoke_psci_fn_smc;
+ } else {
+ pr_warning("invalid \"method\" property: %s\n", method);
+ err = -EINVAL;
+ goto out_put_node;
+ }
+
+ if (!of_property_read_u32(np, "cpu_suspend", &id)) {
+ psci_function_id[PSCI_FN_CPU_SUSPEND] = id;
+ psci_ops.cpu_suspend = psci_cpu_suspend;
+ }
+
+ if (!of_property_read_u32(np, "cpu_off", &id)) {
+ psci_function_id[PSCI_FN_CPU_OFF] = id;
+ psci_ops.cpu_off = psci_cpu_off;
+ }
+
+ if (!of_property_read_u32(np, "cpu_on", &id)) {
+ psci_function_id[PSCI_FN_CPU_ON] = id;
+ psci_ops.cpu_on = psci_cpu_on;
+ }
+
+ if (!of_property_read_u32(np, "migrate", &id)) {
+ psci_function_id[PSCI_FN_MIGRATE] = id;
+ psci_ops.migrate = psci_migrate;
+ }
+
+out_put_node:
+ of_node_put(np);
+ return err;
+}
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index 7665a9bfdb1e..113db863f832 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -39,6 +39,7 @@
#include <linux/proc_fs.h>
#include <linux/memblock.h>
#include <linux/of_fdt.h>
+#include <linux/of_platform.h>
#include <asm/cputype.h>
#include <asm/elf.h>
@@ -49,6 +50,7 @@
#include <asm/tlbflush.h>
#include <asm/traps.h>
#include <asm/memblock.h>
+#include <asm/psci.h>
unsigned int processor_id;
EXPORT_SYMBOL(processor_id);
@@ -260,6 +262,8 @@ void __init setup_arch(char **cmdline_p)
unflatten_device_tree();
+ psci_init();
+
#ifdef CONFIG_SMP
smp_init_cpus();
#endif
@@ -289,6 +293,13 @@ static int __init topology_init(void)
}
subsys_initcall(topology_init);
+static int __init arm64_device_probe(void)
+{
+ of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
+ return 0;
+}
+device_initcall(arm64_device_probe);
+
static const char *hwcap_str[] = {
"fp",
"asimd",
diff --git a/arch/arm64/kernel/signal32.c b/arch/arm64/kernel/signal32.c
index a4db3d22aac4..41db148a7eb9 100644
--- a/arch/arm64/kernel/signal32.c
+++ b/arch/arm64/kernel/signal32.c
@@ -76,7 +76,7 @@ struct compat_sigcontext {
struct compat_ucontext {
compat_ulong_t uc_flags;
- struct compat_ucontext *uc_link;
+ compat_uptr_t uc_link;
compat_stack_t uc_stack;
struct compat_sigcontext uc_mcontext;
compat_sigset_t uc_sigmask;
@@ -703,7 +703,7 @@ int compat_setup_rt_frame(int usig, struct k_sigaction *ka, siginfo_t *info,
err |= copy_siginfo_to_user32(&frame->info, info);
__put_user_error(0, &frame->sig.uc.uc_flags, err);
- __put_user_error(NULL, &frame->sig.uc.uc_link, err);
+ __put_user_error(0, &frame->sig.uc.uc_link, err);
memset(&stack, 0, sizeof(stack));
stack.ss_sp = (compat_uptr_t)current->sas_ss_sp;
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index 538300f2273d..bdd34597254b 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -233,7 +233,28 @@ void __init smp_prepare_boot_cpu(void)
}
static void (*smp_cross_call)(const struct cpumask *, unsigned int);
-static phys_addr_t cpu_release_addr[NR_CPUS];
+
+static const struct smp_enable_ops *enable_ops[] __initconst = {
+ &smp_spin_table_ops,
+ &smp_psci_ops,
+ NULL,
+};
+
+static const struct smp_enable_ops *smp_enable_ops[NR_CPUS];
+
+static const struct smp_enable_ops * __init smp_get_enable_ops(const char *name)
+{
+ const struct smp_enable_ops *ops = enable_ops[0];
+
+ while (ops) {
+ if (!strcmp(name, ops->name))
+ return ops;
+
+ ops++;
+ }
+
+ return NULL;
+}
/*
* Enumerate the possible CPU set from the device tree.
@@ -252,22 +273,22 @@ void __init smp_init_cpus(void)
* We currently support only the "spin-table" enable-method.
*/
enable_method = of_get_property(dn, "enable-method", NULL);
- if (!enable_method || strcmp(enable_method, "spin-table")) {
- pr_err("CPU %d: missing or invalid enable-method property: %s\n",
- cpu, enable_method);
+ if (!enable_method) {
+ pr_err("CPU %d: missing enable-method property\n", cpu);
goto next;
}
- /*
- * Determine the address from which the CPU is polling.
- */
- if (of_property_read_u64(dn, "cpu-release-addr",
- &cpu_release_addr[cpu])) {
- pr_err("CPU %d: missing or invalid cpu-release-addr property\n",
- cpu);
+ smp_enable_ops[cpu] = smp_get_enable_ops(enable_method);
+
+ if (!smp_enable_ops[cpu]) {
+ pr_err("CPU %d: invalid enable-method property: %s\n",
+ cpu, enable_method);
goto next;
}
+ if (smp_enable_ops[cpu]->init_cpu(dn, cpu))
+ goto next;
+
set_cpu_possible(cpu, true);
next:
cpu++;
@@ -281,8 +302,7 @@ next:
void __init smp_prepare_cpus(unsigned int max_cpus)
{
- int cpu;
- void **release_addr;
+ int cpu, err;
unsigned int ncores = num_possible_cpus();
/*
@@ -291,30 +311,35 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
if (max_cpus > ncores)
max_cpus = ncores;
+ /* Don't bother if we're effectively UP */
+ if (max_cpus <= 1)
+ return;
+
/*
* Initialise the present map (which describes the set of CPUs
* actually populated at the present time) and release the
* secondaries from the bootloader.
+ *
+ * Make sure we online at most (max_cpus - 1) additional CPUs.
*/
+ max_cpus--;
for_each_possible_cpu(cpu) {
if (max_cpus == 0)
break;
- if (!cpu_release_addr[cpu])
+ if (cpu == smp_processor_id())
+ continue;
+
+ if (!smp_enable_ops[cpu])
continue;
- release_addr = __va(cpu_release_addr[cpu]);
- release_addr[0] = (void *)__pa(secondary_holding_pen);
- __flush_dcache_area(release_addr, sizeof(release_addr[0]));
+ err = smp_enable_ops[cpu]->prepare_cpu(cpu);
+ if (err)
+ continue;
set_cpu_present(cpu, true);
max_cpus--;
}
-
- /*
- * Send an event to wake up the secondaries.
- */
- sev();
}
diff --git a/arch/arm64/kernel/smp_psci.c b/arch/arm64/kernel/smp_psci.c
new file mode 100644
index 000000000000..112091684c22
--- /dev/null
+++ b/arch/arm64/kernel/smp_psci.c
@@ -0,0 +1,52 @@
+/*
+ * PSCI SMP initialisation
+ *
+ * Copyright (C) 2013 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/init.h>
+#include <linux/of.h>
+#include <linux/smp.h>
+
+#include <asm/psci.h>
+
+static int __init smp_psci_init_cpu(struct device_node *dn, int cpu)
+{
+ return 0;
+}
+
+static int __init smp_psci_prepare_cpu(int cpu)
+{
+ int err;
+
+ if (!psci_ops.cpu_on) {
+ pr_err("psci: no cpu_on method, not booting CPU%d\n", cpu);
+ return -ENODEV;
+ }
+
+ err = psci_ops.cpu_on(cpu, __pa(secondary_holding_pen));
+ if (err) {
+ pr_err("psci: failed to boot CPU%d (%d)\n", cpu, err);
+ return err;
+ }
+
+ return 0;
+}
+
+const struct smp_enable_ops smp_psci_ops __initconst = {
+ .name = "psci",
+ .init_cpu = smp_psci_init_cpu,
+ .prepare_cpu = smp_psci_prepare_cpu,
+};
diff --git a/arch/arm64/kernel/smp_spin_table.c b/arch/arm64/kernel/smp_spin_table.c
new file mode 100644
index 000000000000..7c35fa682f76
--- /dev/null
+++ b/arch/arm64/kernel/smp_spin_table.c
@@ -0,0 +1,66 @@
+/*
+ * Spin Table SMP initialisation
+ *
+ * Copyright (C) 2013 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/init.h>
+#include <linux/of.h>
+#include <linux/smp.h>
+
+#include <asm/cacheflush.h>
+
+static phys_addr_t cpu_release_addr[NR_CPUS];
+
+static int __init smp_spin_table_init_cpu(struct device_node *dn, int cpu)
+{
+ /*
+ * Determine the address from which the CPU is polling.
+ */
+ if (of_property_read_u64(dn, "cpu-release-addr",
+ &cpu_release_addr[cpu])) {
+ pr_err("CPU %d: missing or invalid cpu-release-addr property\n",
+ cpu);
+
+ return -1;
+ }
+
+ return 0;
+}
+
+static int __init smp_spin_table_prepare_cpu(int cpu)
+{
+ void **release_addr;
+
+ if (!cpu_release_addr[cpu])
+ return -ENODEV;
+
+ release_addr = __va(cpu_release_addr[cpu]);
+ release_addr[0] = (void *)__pa(secondary_holding_pen);
+ __flush_dcache_area(release_addr, sizeof(release_addr[0]));
+
+ /*
+ * Send an event to wake up the secondary CPU.
+ */
+ sev();
+
+ return 0;
+}
+
+const struct smp_enable_ops smp_spin_table_ops __initconst = {
+ .name = "spin-table",
+ .init_cpu = smp_spin_table_init_cpu,
+ .prepare_cpu = smp_spin_table_prepare_cpu,
+};
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index a6885d896ab6..f4dd585898c5 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -25,6 +25,7 @@
#include <linux/nodemask.h>
#include <linux/memblock.h>
#include <linux/fs.h>
+#include <linux/io.h>
#include <asm/cputype.h>
#include <asm/sections.h>
@@ -251,6 +252,47 @@ static void __init create_mapping(phys_addr_t phys, unsigned long virt,
} while (pgd++, addr = next, addr != end);
}
+#ifdef CONFIG_EARLY_PRINTK
+/*
+ * Create an early I/O mapping using the pgd/pmd entries already populated
+ * in head.S as this function is called too early to allocated any memory. The
+ * mapping size is 2MB with 4KB pages or 64KB or 64KB pages.
+ */
+void __iomem * __init early_io_map(phys_addr_t phys, unsigned long virt)
+{
+ unsigned long size, mask;
+ bool page64k = IS_ENABLED(ARM64_64K_PAGES);
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *pte;
+
+ /*
+ * No early pte entries with !ARM64_64K_PAGES configuration, so using
+ * sections (pmd).
+ */
+ size = page64k ? PAGE_SIZE : SECTION_SIZE;
+ mask = ~(size - 1);
+
+ pgd = pgd_offset_k(virt);
+ pud = pud_offset(pgd, virt);
+ if (pud_none(*pud))
+ return NULL;
+ pmd = pmd_offset(pud, virt);
+
+ if (page64k) {
+ if (pmd_none(*pmd))
+ return NULL;
+ pte = pte_offset_kernel(pmd, virt);
+ set_pte(pte, __pte((phys & mask) | PROT_DEVICE_nGnRE));
+ } else {
+ set_pmd(pmd, __pmd((phys & mask) | PROT_SECT_DEVICE_nGnRE));
+ }
+
+ return (void __iomem *)((virt & mask) + (phys & ~mask));
+}
+#endif
+
static void __init map_mem(void)
{
struct memblock_region *reg;
diff --git a/arch/avr32/include/asm/dma-mapping.h b/arch/avr32/include/asm/dma-mapping.h
index aaf5199d8fcb..b3d18f9f3e8d 100644
--- a/arch/avr32/include/asm/dma-mapping.h
+++ b/arch/avr32/include/asm/dma-mapping.h
@@ -336,4 +336,14 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
+/* drivers/base/dma-mapping.c */
+extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size);
+extern int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
+ void *cpu_addr, dma_addr_t dma_addr,
+ size_t size);
+
+#define dma_mmap_coherent(d, v, c, h, s) dma_common_mmap(d, v, c, h, s)
+#define dma_get_sgtable(d, t, v, h, s) dma_common_get_sgtable(d, t, v, h, s)
+
#endif /* __ASM_AVR32_DMA_MAPPING_H */
diff --git a/arch/avr32/include/uapi/asm/socket.h b/arch/avr32/include/uapi/asm/socket.h
index 486df68abeec..51c6401582ea 100644
--- a/arch/avr32/include/uapi/asm/socket.h
+++ b/arch/avr32/include/uapi/asm/socket.h
@@ -22,7 +22,7 @@
#define SO_PRIORITY 12
#define SO_LINGER 13
#define SO_BSDCOMPAT 14
-/* To add :#define SO_REUSEPORT 15 */
+#define SO_REUSEPORT 15
#define SO_PASSCRED 16
#define SO_PEERCRED 17
#define SO_RCVLOWAT 18
@@ -70,4 +70,6 @@
/* Instruct lower device to use last 4-bytes of skb data as FCS */
#define SO_NOFCS 43
+#define SO_LOCK_FILTER 44
+
#endif /* __ASM_AVR32_SOCKET_H */
diff --git a/arch/blackfin/Kconfig b/arch/blackfin/Kconfig
index b6f3ad5441c5..e98f3248c8aa 100644
--- a/arch/blackfin/Kconfig
+++ b/arch/blackfin/Kconfig
@@ -24,7 +24,6 @@ config BLACKFIN
select HAVE_FUNCTION_TRACER
select HAVE_FUNCTION_TRACE_MCOUNT_TEST
select HAVE_IDE
- select HAVE_IRQ_WORK
select HAVE_KERNEL_GZIP if RAMKERNEL
select HAVE_KERNEL_BZIP2 if RAMKERNEL
select HAVE_KERNEL_LZMA if RAMKERNEL
@@ -38,7 +37,6 @@ config BLACKFIN
select HAVE_GENERIC_HARDIRQS
select GENERIC_ATOMIC64
select GENERIC_IRQ_PROBE
- select IRQ_PER_CPU if SMP
select USE_GENERIC_SMP_HELPERS if SMP
select HAVE_NMI_WATCHDOG if NMI_WATCHDOG
select GENERIC_SMP_IDLE_THREAD
@@ -562,8 +560,7 @@ choice
accurate - This option is therefore marked experimental.
config BFIN_KERNEL_CLOCK_MEMINIT_CALC
- bool "Calculate Timings (EXPERIMENTAL)"
- depends on EXPERIMENTAL
+ bool "Calculate Timings"
config BFIN_KERNEL_CLOCK_MEMINIT_SPEC
bool "Provide accurate Timings based on target SCLK"
@@ -1120,7 +1117,7 @@ endchoice
comment "Memory Protection Unit"
config MPU
- bool "Enable the memory protection unit (EXPERIMENTAL)"
+ bool "Enable the memory protection unit"
default n
help
Use the processor's MPU to protect applications from accessing
@@ -1442,7 +1439,6 @@ config BFIN_CPU_FREQ
config CPU_VOLTAGE
bool "CPU Voltage scaling"
- depends on EXPERIMENTAL
depends on CPU_FREQ
default n
help
diff --git a/arch/blackfin/include/asm/dma-mapping.h b/arch/blackfin/include/asm/dma-mapping.h
index bbf461076a0a..054d9ec57d9d 100644
--- a/arch/blackfin/include/asm/dma-mapping.h
+++ b/arch/blackfin/include/asm/dma-mapping.h
@@ -154,4 +154,14 @@ dma_cache_sync(struct device *dev, void *vaddr, size_t size,
_dma_sync((dma_addr_t)vaddr, size, dir);
}
+/* drivers/base/dma-mapping.c */
+extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size);
+extern int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
+ void *cpu_addr, dma_addr_t dma_addr,
+ size_t size);
+
+#define dma_mmap_coherent(d, v, c, h, s) dma_common_mmap(d, v, c, h, s)
+#define dma_get_sgtable(d, t, v, h, s) dma_common_get_sgtable(d, t, v, h, s)
+
#endif /* _BLACKFIN_DMA_MAPPING_H */
diff --git a/arch/blackfin/kernel/process.c b/arch/blackfin/kernel/process.c
index 3e16ad9b0a99..8061426b7df5 100644
--- a/arch/blackfin/kernel/process.c
+++ b/arch/blackfin/kernel/process.c
@@ -39,12 +39,6 @@ int nr_l1stack_tasks;
void *l1_stack_base;
unsigned long l1_stack_len;
-/*
- * Powermanagement idle function, if any..
- */
-void (*pm_idle)(void) = NULL;
-EXPORT_SYMBOL(pm_idle);
-
void (*pm_power_off)(void) = NULL;
EXPORT_SYMBOL(pm_power_off);
@@ -81,7 +75,6 @@ void cpu_idle(void)
{
/* endless idle loop with no priority at all */
while (1) {
- void (*idle)(void) = pm_idle;
#ifdef CONFIG_HOTPLUG_CPU
if (cpu_is_offline(smp_processor_id()))
diff --git a/arch/c6x/include/asm/dma-mapping.h b/arch/c6x/include/asm/dma-mapping.h
index 3c694065030f..88bd0d899bdb 100644
--- a/arch/c6x/include/asm/dma-mapping.h
+++ b/arch/c6x/include/asm/dma-mapping.h
@@ -89,4 +89,19 @@ extern void dma_free_coherent(struct device *, size_t, void *, dma_addr_t);
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent((d), (s), (h), (f))
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent((d), (s), (v), (h))
+/* Not supported for now */
+static inline int dma_mmap_coherent(struct device *dev,
+ struct vm_area_struct *vma, void *cpu_addr,
+ dma_addr_t dma_addr, size_t size)
+{
+ return -EINVAL;
+}
+
+static inline int dma_get_sgtable(struct device *dev, struct sg_table *sgt,
+ void *cpu_addr, dma_addr_t dma_addr,
+ size_t size)
+{
+ return -EINVAL;
+}
+
#endif /* _ASM_C6X_DMA_MAPPING_H */
diff --git a/arch/cris/arch-v32/drivers/Kconfig b/arch/cris/arch-v32/drivers/Kconfig
index de43aadcdbc4..af4a486dadcd 100644
--- a/arch/cris/arch-v32/drivers/Kconfig
+++ b/arch/cris/arch-v32/drivers/Kconfig
@@ -680,7 +680,7 @@ config ETRAX_SPI_MMC_BOARD
config SPI_ETRAX_SSER
tristate
- depends on SPI_MASTER && ETRAX_ARCH_V32 && EXPERIMENTAL
+ depends on SPI_MASTER && ETRAX_ARCH_V32
select SPI_BITBANG
help
This enables using an synchronous serial (sser) port as a
@@ -689,7 +689,7 @@ config SPI_ETRAX_SSER
config SPI_ETRAX_GPIO
tristate
- depends on SPI_MASTER && ETRAX_ARCH_V32 && EXPERIMENTAL
+ depends on SPI_MASTER && ETRAX_ARCH_V32
select SPI_BITBANG
help
This enables using GPIO pins port as a SPI master controller
diff --git a/arch/cris/include/asm/dma-mapping.h b/arch/cris/include/asm/dma-mapping.h
index 8588b2ccf854..2f0f654f1b44 100644
--- a/arch/cris/include/asm/dma-mapping.h
+++ b/arch/cris/include/asm/dma-mapping.h
@@ -158,5 +158,15 @@ dma_cache_sync(struct device *dev, void *vaddr, size_t size,
{
}
+/* drivers/base/dma-mapping.c */
+extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size);
+extern int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
+ void *cpu_addr, dma_addr_t dma_addr,
+ size_t size);
+
+#define dma_mmap_coherent(d, v, c, h, s) dma_common_mmap(d, v, c, h, s)
+#define dma_get_sgtable(d, t, v, h, s) dma_common_get_sgtable(d, t, v, h, s)
+
#endif
diff --git a/arch/cris/include/uapi/asm/socket.h b/arch/cris/include/uapi/asm/socket.h
index b681b043f6c8..50692b738c75 100644
--- a/arch/cris/include/uapi/asm/socket.h
+++ b/arch/cris/include/uapi/asm/socket.h
@@ -24,7 +24,7 @@
#define SO_PRIORITY 12
#define SO_LINGER 13
#define SO_BSDCOMPAT 14
-/* To add :#define SO_REUSEPORT 15 */
+#define SO_REUSEPORT 15
#define SO_PASSCRED 16
#define SO_PEERCRED 17
#define SO_RCVLOWAT 18
@@ -72,6 +72,8 @@
/* Instruct lower device to use last 4-bytes of skb data as FCS */
#define SO_NOFCS 43
+#define SO_LOCK_FILTER 44
+
#endif /* _ASM_SOCKET_H */
diff --git a/arch/cris/kernel/process.c b/arch/cris/kernel/process.c
index 7f65be6f7f17..104ff4dd9b98 100644
--- a/arch/cris/kernel/process.c
+++ b/arch/cris/kernel/process.c
@@ -54,11 +54,6 @@ void enable_hlt(void)
EXPORT_SYMBOL(enable_hlt);
-/*
- * The following aren't currently used.
- */
-void (*pm_idle)(void);
-
extern void default_idle(void);
void (*pm_power_off)(void);
@@ -77,16 +72,12 @@ void cpu_idle (void)
while (1) {
rcu_idle_enter();
while (!need_resched()) {
- void (*idle)(void);
/*
* Mark this as an RCU critical section so that
* synchronize_kernel() in the unload path waits
* for our completion.
*/
- idle = pm_idle;
- if (!idle)
- idle = default_idle;
- idle();
+ default_idle();
}
rcu_idle_exit();
schedule_preempt_disabled();
diff --git a/arch/frv/Kconfig b/arch/frv/Kconfig
index 9d262645f667..17df48fc8f44 100644
--- a/arch/frv/Kconfig
+++ b/arch/frv/Kconfig
@@ -3,7 +3,6 @@ config FRV
default y
select HAVE_IDE
select HAVE_ARCH_TRACEHOOK
- select HAVE_IRQ_WORK
select HAVE_PERF_EVENTS
select HAVE_UID16
select HAVE_GENERIC_HARDIRQS
diff --git a/arch/frv/include/asm/dma-mapping.h b/arch/frv/include/asm/dma-mapping.h
index dfb811002c64..1746a2b8e6e7 100644
--- a/arch/frv/include/asm/dma-mapping.h
+++ b/arch/frv/include/asm/dma-mapping.h
@@ -132,4 +132,19 @@ void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
flush_write_buffers();
}
+/* Not supported for now */
+static inline int dma_mmap_coherent(struct device *dev,
+ struct vm_area_struct *vma, void *cpu_addr,
+ dma_addr_t dma_addr, size_t size)
+{
+ return -EINVAL;
+}
+
+static inline int dma_get_sgtable(struct device *dev, struct sg_table *sgt,
+ void *cpu_addr, dma_addr_t dma_addr,
+ size_t size)
+{
+ return -EINVAL;
+}
+
#endif /* _ASM_DMA_MAPPING_H */
diff --git a/arch/frv/include/uapi/asm/socket.h b/arch/frv/include/uapi/asm/socket.h
index 871f89b7fbda..595391f0f98c 100644
--- a/arch/frv/include/uapi/asm/socket.h
+++ b/arch/frv/include/uapi/asm/socket.h
@@ -22,7 +22,7 @@
#define SO_PRIORITY 12
#define SO_LINGER 13
#define SO_BSDCOMPAT 14
-/* To add :#define SO_REUSEPORT 15 */
+#define SO_REUSEPORT 15
#define SO_PASSCRED 16
#define SO_PEERCRED 17
#define SO_RCVLOWAT 18
@@ -70,5 +70,7 @@
/* Instruct lower device to use last 4-bytes of skb data as FCS */
#define SO_NOFCS 43
+#define SO_LOCK_FILTER 44
+
#endif /* _ASM_SOCKET_H */
diff --git a/arch/h8300/include/uapi/asm/socket.h b/arch/h8300/include/uapi/asm/socket.h
index 90a2e573c7e6..43e32621da7d 100644
--- a/arch/h8300/include/uapi/asm/socket.h
+++ b/arch/h8300/include/uapi/asm/socket.h
@@ -22,7 +22,7 @@
#define SO_PRIORITY 12
#define SO_LINGER 13
#define SO_BSDCOMPAT 14
-/* To add :#define SO_REUSEPORT 15 */
+#define SO_REUSEPORT 15
#define SO_PASSCRED 16
#define SO_PEERCRED 17
#define SO_RCVLOWAT 18
@@ -70,4 +70,6 @@
/* Instruct lower device to use last 4-bytes of skb data as FCS */
#define SO_NOFCS 43
+#define SO_LOCK_FILTER 44
+
#endif /* _ASM_SOCKET_H */
diff --git a/arch/hexagon/Kconfig b/arch/hexagon/Kconfig
index 0744f7d7b1fd..e4decc6b8947 100644
--- a/arch/hexagon/Kconfig
+++ b/arch/hexagon/Kconfig
@@ -12,9 +12,7 @@ config HEXAGON
# select ARCH_WANT_OPTIONAL_GPIOLIB
# select ARCH_REQUIRE_GPIOLIB
# select HAVE_CLK
- # select IRQ_PER_CPU
# select GENERIC_PENDING_IRQ if SMP
- select HAVE_IRQ_WORK
select GENERIC_ATOMIC64
select HAVE_PERF_EVENTS
select HAVE_GENERIC_HARDIRQS
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 3279646120e3..c1b80fb6938d 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -29,7 +29,6 @@ config IA64
select ARCH_DISCARD_MEMBLOCK
select GENERIC_IRQ_PROBE
select GENERIC_PENDING_IRQ if SMP
- select IRQ_PER_CPU
select GENERIC_IRQ_SHOW
select ARCH_WANT_OPTIONAL_GPIOLIB
select ARCH_HAVE_NMI_SAFE_CMPXCHG
@@ -375,8 +374,8 @@ config NR_CPUS
performance hit.
config HOTPLUG_CPU
- bool "Support for hot-pluggable CPUs (EXPERIMENTAL)"
- depends on SMP && EXPERIMENTAL
+ bool "Support for hot-pluggable CPUs"
+ depends on SMP
select HOTPLUG
default n
---help---
@@ -555,8 +554,8 @@ config IA64_HP_AML_NFW
source "drivers/sn/Kconfig"
config KEXEC
- bool "kexec system call (EXPERIMENTAL)"
- depends on EXPERIMENTAL && !IA64_HP_SIM && (!SMP || HOTPLUG_CPU)
+ bool "kexec system call"
+ depends on !IA64_HP_SIM && (!SMP || HOTPLUG_CPU)
help
kexec is a system call that implements the ability to shutdown your
current kernel, and to start another kernel. It is like a reboot
diff --git a/arch/ia64/hp/common/aml_nfw.c b/arch/ia64/hp/common/aml_nfw.c
index 6192f7188654..916ffe770bcf 100644
--- a/arch/ia64/hp/common/aml_nfw.c
+++ b/arch/ia64/hp/common/aml_nfw.c
@@ -191,7 +191,7 @@ static int aml_nfw_add(struct acpi_device *device)
return aml_nfw_add_global_handler();
}
-static int aml_nfw_remove(struct acpi_device *device, int type)
+static int aml_nfw_remove(struct acpi_device *device)
{
return aml_nfw_remove_global_handler();
}
diff --git a/arch/ia64/hp/sim/Kconfig b/arch/ia64/hp/sim/Kconfig
index 8d513a8c5266..d84707d55203 100644
--- a/arch/ia64/hp/sim/Kconfig
+++ b/arch/ia64/hp/sim/Kconfig
@@ -8,6 +8,7 @@ config HP_SIMETH
config HP_SIMSERIAL
bool "Simulated serial driver support"
+ depends on TTY
config HP_SIMSERIAL_CONSOLE
bool "Console for HP simulator"
diff --git a/arch/ia64/hp/sim/simserial.c b/arch/ia64/hp/sim/simserial.c
index fc3924d18c1f..da2f319fb71d 100644
--- a/arch/ia64/hp/sim/simserial.c
+++ b/arch/ia64/hp/sim/simserial.c
@@ -53,7 +53,7 @@ struct tty_driver *hp_simserial_driver;
static struct console *console;
-static void receive_chars(struct tty_struct *tty)
+static void receive_chars(struct tty_port *port)
{
unsigned char ch;
static unsigned char seen_esc = 0;
@@ -81,10 +81,10 @@ static void receive_chars(struct tty_struct *tty)
}
seen_esc = 0;
- if (tty_insert_flip_char(tty, ch, TTY_NORMAL) == 0)
+ if (tty_insert_flip_char(port, ch, TTY_NORMAL) == 0)
break;
}
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(port);
}
/*
@@ -93,18 +93,9 @@ static void receive_chars(struct tty_struct *tty)
static irqreturn_t rs_interrupt_single(int irq, void *dev_id)
{
struct serial_state *info = dev_id;
- struct tty_struct *tty = tty_port_tty_get(&info->port);
- if (!tty) {
- printk(KERN_INFO "%s: tty=0 problem\n", __func__);
- return IRQ_NONE;
- }
- /*
- * pretty simple in our case, because we only get interrupts
- * on inbound traffic
- */
- receive_chars(tty);
- tty_kref_put(tty);
+ receive_chars(&info->port);
+
return IRQ_HANDLED;
}
@@ -435,7 +426,7 @@ static int rs_open(struct tty_struct *tty, struct file * filp)
struct tty_port *port = &info->port;
tty->driver_data = info;
- tty->low_latency = (port->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
+ port->low_latency = (port->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
/*
* figure out which console to use (should be one already)
diff --git a/arch/ia64/include/asm/acpi.h b/arch/ia64/include/asm/acpi.h
index 359e68a03ca3..faa1bf0da815 100644
--- a/arch/ia64/include/asm/acpi.h
+++ b/arch/ia64/include/asm/acpi.h
@@ -52,10 +52,6 @@
/* Asm macros */
-#define ACPI_ASM_MACROS
-#define BREAKPOINT3
-#define ACPI_DISABLE_IRQS() local_irq_disable()
-#define ACPI_ENABLE_IRQS() local_irq_enable()
#define ACPI_FLUSH_CPU_CACHE()
static inline int
diff --git a/arch/ia64/include/asm/cputime.h b/arch/ia64/include/asm/cputime.h
index 7fcf7f08ab06..e2d3f5baf265 100644
--- a/arch/ia64/include/asm/cputime.h
+++ b/arch/ia64/include/asm/cputime.h
@@ -11,99 +11,19 @@
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
- * If we have CONFIG_VIRT_CPU_ACCOUNTING, we measure cpu time in nsec.
+ * If we have CONFIG_VIRT_CPU_ACCOUNTING_NATIVE, we measure cpu time in nsec.
* Otherwise we measure cpu time in jiffies using the generic definitions.
*/
#ifndef __IA64_CPUTIME_H
#define __IA64_CPUTIME_H
-#ifndef CONFIG_VIRT_CPU_ACCOUNTING
-#include <asm-generic/cputime.h>
+#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
+# include <asm-generic/cputime.h>
#else
-
-#include <linux/time.h>
-#include <linux/jiffies.h>
-#include <asm/processor.h>
-
-typedef u64 __nocast cputime_t;
-typedef u64 __nocast cputime64_t;
-
-#define cputime_one_jiffy jiffies_to_cputime(1)
-
-/*
- * Convert cputime <-> jiffies (HZ)
- */
-#define cputime_to_jiffies(__ct) \
- ((__force u64)(__ct) / (NSEC_PER_SEC / HZ))
-#define jiffies_to_cputime(__jif) \
- (__force cputime_t)((__jif) * (NSEC_PER_SEC / HZ))
-#define cputime64_to_jiffies64(__ct) \
- ((__force u64)(__ct) / (NSEC_PER_SEC / HZ))
-#define jiffies64_to_cputime64(__jif) \
- (__force cputime64_t)((__jif) * (NSEC_PER_SEC / HZ))
-
-/*
- * Convert cputime <-> microseconds
- */
-#define cputime_to_usecs(__ct) \
- ((__force u64)(__ct) / NSEC_PER_USEC)
-#define usecs_to_cputime(__usecs) \
- (__force cputime_t)((__usecs) * NSEC_PER_USEC)
-#define usecs_to_cputime64(__usecs) \
- (__force cputime64_t)((__usecs) * NSEC_PER_USEC)
-
-/*
- * Convert cputime <-> seconds
- */
-#define cputime_to_secs(__ct) \
- ((__force u64)(__ct) / NSEC_PER_SEC)
-#define secs_to_cputime(__secs) \
- (__force cputime_t)((__secs) * NSEC_PER_SEC)
-
-/*
- * Convert cputime <-> timespec (nsec)
- */
-static inline cputime_t timespec_to_cputime(const struct timespec *val)
-{
- u64 ret = val->tv_sec * NSEC_PER_SEC + val->tv_nsec;
- return (__force cputime_t) ret;
-}
-static inline void cputime_to_timespec(const cputime_t ct, struct timespec *val)
-{
- val->tv_sec = (__force u64) ct / NSEC_PER_SEC;
- val->tv_nsec = (__force u64) ct % NSEC_PER_SEC;
-}
-
-/*
- * Convert cputime <-> timeval (msec)
- */
-static inline cputime_t timeval_to_cputime(struct timeval *val)
-{
- u64 ret = val->tv_sec * NSEC_PER_SEC + val->tv_usec * NSEC_PER_USEC;
- return (__force cputime_t) ret;
-}
-static inline void cputime_to_timeval(const cputime_t ct, struct timeval *val)
-{
- val->tv_sec = (__force u64) ct / NSEC_PER_SEC;
- val->tv_usec = ((__force u64) ct % NSEC_PER_SEC) / NSEC_PER_USEC;
-}
-
-/*
- * Convert cputime <-> clock (USER_HZ)
- */
-#define cputime_to_clock_t(__ct) \
- ((__force u64)(__ct) / (NSEC_PER_SEC / USER_HZ))
-#define clock_t_to_cputime(__x) \
- (__force cputime_t)((__x) * (NSEC_PER_SEC / USER_HZ))
-
-/*
- * Convert cputime64 to clock.
- */
-#define cputime64_to_clock_t(__ct) \
- cputime_to_clock_t((__force cputime_t)__ct)
-
+# include <asm/processor.h>
+# include <asm-generic/cputime_nsecs.h>
extern void arch_vtime_task_switch(struct task_struct *tsk);
+#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
-#endif /* CONFIG_VIRT_CPU_ACCOUNTING */
#endif /* __IA64_CPUTIME_H */
diff --git a/arch/ia64/include/asm/thread_info.h b/arch/ia64/include/asm/thread_info.h
index ff2ae4136584..020d655ed082 100644
--- a/arch/ia64/include/asm/thread_info.h
+++ b/arch/ia64/include/asm/thread_info.h
@@ -31,7 +31,7 @@ struct thread_info {
mm_segment_t addr_limit; /* user-level address space limit */
int preempt_count; /* 0=premptable, <0=BUG; will also serve as bh-counter */
struct restart_block restart_block;
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
__u64 ac_stamp;
__u64 ac_leave;
__u64 ac_stime;
@@ -69,7 +69,7 @@ struct thread_info {
#define task_stack_page(tsk) ((void *)(tsk))
#define __HAVE_THREAD_FUNCTIONS
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
#define setup_thread_stack(p, org) \
*task_thread_info(p) = *task_thread_info(org); \
task_thread_info(p)->ac_stime = 0; \
diff --git a/arch/ia64/include/asm/xen/minstate.h b/arch/ia64/include/asm/xen/minstate.h
index c57fa910f2c9..00cf03e0cb82 100644
--- a/arch/ia64/include/asm/xen/minstate.h
+++ b/arch/ia64/include/asm/xen/minstate.h
@@ -1,5 +1,5 @@
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
/* read ar.itc in advance, and use it before leaving bank 0 */
#define XEN_ACCOUNT_GET_STAMP \
MOV_FROM_ITC(pUStk, p6, r20, r2);
diff --git a/arch/ia64/include/uapi/asm/socket.h b/arch/ia64/include/uapi/asm/socket.h
index 23d6759bb57b..c567adc8bea5 100644
--- a/arch/ia64/include/uapi/asm/socket.h
+++ b/arch/ia64/include/uapi/asm/socket.h
@@ -31,7 +31,7 @@
#define SO_PRIORITY 12
#define SO_LINGER 13
#define SO_BSDCOMPAT 14
-/* To add :#define SO_REUSEPORT 15 */
+#define SO_REUSEPORT 15
#define SO_PASSCRED 16
#define SO_PEERCRED 17
#define SO_RCVLOWAT 18
@@ -79,4 +79,6 @@
/* Instruct lower device to use last 4-bytes of skb data as FCS */
#define SO_NOFCS 43
+#define SO_LOCK_FILTER 44
+
#endif /* _ASM_IA64_SOCKET_H */
diff --git a/arch/ia64/kernel/asm-offsets.c b/arch/ia64/kernel/asm-offsets.c
index a48bd9a9927b..46c9e3007315 100644
--- a/arch/ia64/kernel/asm-offsets.c
+++ b/arch/ia64/kernel/asm-offsets.c
@@ -41,7 +41,7 @@ void foo(void)
DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
DEFINE(TI_PRE_COUNT, offsetof(struct thread_info, preempt_count));
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
DEFINE(TI_AC_STAMP, offsetof(struct thread_info, ac_stamp));
DEFINE(TI_AC_LEAVE, offsetof(struct thread_info, ac_leave));
DEFINE(TI_AC_STIME, offsetof(struct thread_info, ac_stime));
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S
index 6bfd8429ee0f..7a53530f22c2 100644
--- a/arch/ia64/kernel/entry.S
+++ b/arch/ia64/kernel/entry.S
@@ -724,7 +724,7 @@ GLOBAL_ENTRY(__paravirt_leave_syscall)
#endif
.global __paravirt_work_processed_syscall;
__paravirt_work_processed_syscall:
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
adds r2=PT(LOADRS)+16,r12
MOV_FROM_ITC(pUStk, p9, r22, r19) // fetch time at leave
adds r18=TI_FLAGS+IA64_TASK_SIZE,r13
@@ -762,7 +762,7 @@ __paravirt_work_processed_syscall:
ld8 r29=[r2],16 // M0|1 load cr.ipsr
ld8 r28=[r3],16 // M0|1 load cr.iip
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
(pUStk) add r14=TI_AC_LEAVE+IA64_TASK_SIZE,r13
;;
ld8 r30=[r2],16 // M0|1 load cr.ifs
@@ -793,7 +793,7 @@ __paravirt_work_processed_syscall:
ld8.fill r1=[r3],16 // M0|1 load r1
(pUStk) mov r17=1 // A
;;
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
(pUStk) st1 [r15]=r17 // M2|3
#else
(pUStk) st1 [r14]=r17 // M2|3
@@ -813,7 +813,7 @@ __paravirt_work_processed_syscall:
shr.u r18=r19,16 // I0|1 get byte size of existing "dirty" partition
COVER // B add current frame into dirty partition & set cr.ifs
;;
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
mov r19=ar.bsp // M2 get new backing store pointer
st8 [r14]=r22 // M save time at leave
mov f10=f0 // F clear f10
@@ -948,7 +948,7 @@ GLOBAL_ENTRY(__paravirt_leave_kernel)
adds r16=PT(CR_IPSR)+16,r12
adds r17=PT(CR_IIP)+16,r12
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
.pred.rel.mutex pUStk,pKStk
MOV_FROM_PSR(pKStk, r22, r29) // M2 read PSR now that interrupts are disabled
MOV_FROM_ITC(pUStk, p9, r22, r29) // M fetch time at leave
@@ -981,7 +981,7 @@ GLOBAL_ENTRY(__paravirt_leave_kernel)
;;
ld8.fill r12=[r16],16
ld8.fill r13=[r17],16
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
(pUStk) adds r3=TI_AC_LEAVE+IA64_TASK_SIZE,r18
#else
(pUStk) adds r18=IA64_TASK_THREAD_ON_USTACK_OFFSET,r18
@@ -989,7 +989,7 @@ GLOBAL_ENTRY(__paravirt_leave_kernel)
;;
ld8 r20=[r16],16 // ar.fpsr
ld8.fill r15=[r17],16
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
(pUStk) adds r18=IA64_TASK_THREAD_ON_USTACK_OFFSET,r18 // deferred
#endif
;;
@@ -997,7 +997,7 @@ GLOBAL_ENTRY(__paravirt_leave_kernel)
ld8.fill r2=[r17]
(pUStk) mov r17=1
;;
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
// mmi_ : ld8 st1 shr;; mmi_ : st8 st1 shr;;
// mib : mov add br -> mib : ld8 add br
// bbb_ : br nop cover;; mbb_ : mov br cover;;
diff --git a/arch/ia64/kernel/fsys.S b/arch/ia64/kernel/fsys.S
index e662f178b990..c4cd45d97749 100644
--- a/arch/ia64/kernel/fsys.S
+++ b/arch/ia64/kernel/fsys.S
@@ -529,7 +529,7 @@ GLOBAL_ENTRY(paravirt_fsys_bubble_down)
nop.i 0
;;
mov ar.rsc=0 // M2 set enforced lazy mode, pl 0, LE, loadrs=0
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
MOV_FROM_ITC(p0, p6, r30, r23) // M get cycle for accounting
#else
nop.m 0
@@ -555,7 +555,7 @@ GLOBAL_ENTRY(paravirt_fsys_bubble_down)
cmp.ne pKStk,pUStk=r0,r0 // A set pKStk <- 0, pUStk <- 1
br.call.sptk.many b7=ia64_syscall_setup // B
;;
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
// mov.m r30=ar.itc is called in advance
add r16=TI_AC_STAMP+IA64_TASK_SIZE,r2
add r17=TI_AC_LEAVE+IA64_TASK_SIZE,r2
diff --git a/arch/ia64/kernel/head.S b/arch/ia64/kernel/head.S
index 4738ff7bd66a..9be4e497f3d3 100644
--- a/arch/ia64/kernel/head.S
+++ b/arch/ia64/kernel/head.S
@@ -1073,7 +1073,7 @@ END(ia64_native_sched_clock)
sched_clock = ia64_native_sched_clock
#endif
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
GLOBAL_ENTRY(cycle_to_cputime)
alloc r16=ar.pfs,1,0,0,0
addl r8=THIS_CPU(ia64_cpu_info) + IA64_CPUINFO_NSEC_PER_CYC_OFFSET,r0
@@ -1091,7 +1091,7 @@ GLOBAL_ENTRY(cycle_to_cputime)
shrp r8=r9,r8,IA64_NSEC_PER_CYC_SHIFT
br.ret.sptk.many rp
END(cycle_to_cputime)
-#endif /* CONFIG_VIRT_CPU_ACCOUNTING */
+#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
#ifdef CONFIG_IA64_BRL_EMU
diff --git a/arch/ia64/kernel/ivt.S b/arch/ia64/kernel/ivt.S
index fa25689fc453..689ffcaa284e 100644
--- a/arch/ia64/kernel/ivt.S
+++ b/arch/ia64/kernel/ivt.S
@@ -784,7 +784,7 @@ ENTRY(break_fault)
(p8) adds r28=16,r28 // A switch cr.iip to next bundle
(p9) adds r8=1,r8 // A increment ei to next slot
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
;;
mov b6=r30 // I0 setup syscall handler branch reg early
#else
@@ -801,7 +801,7 @@ ENTRY(break_fault)
//
///////////////////////////////////////////////////////////////////////
st1 [r16]=r0 // M2|3 clear current->thread.on_ustack flag
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
MOV_FROM_ITC(p0, p14, r30, r18) // M get cycle for accounting
#else
mov b6=r30 // I0 setup syscall handler branch reg early
@@ -817,7 +817,7 @@ ENTRY(break_fault)
cmp.eq p14,p0=r9,r0 // A are syscalls being traced/audited?
br.call.sptk.many b7=ia64_syscall_setup // B
1:
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
// mov.m r30=ar.itc is called in advance, and r13 is current
add r16=TI_AC_STAMP+IA64_TASK_SIZE,r13 // A
add r17=TI_AC_LEAVE+IA64_TASK_SIZE,r13 // A
@@ -1043,7 +1043,7 @@ END(ia64_syscall_setup)
DBG_FAULT(16)
FAULT(16)
-#if defined(CONFIG_VIRT_CPU_ACCOUNTING) && defined(__IA64_ASM_PARAVIRTUALIZED_NATIVE)
+#if defined(CONFIG_VIRT_CPU_ACCOUNTING_NATIVE) && defined(__IA64_ASM_PARAVIRTUALIZED_NATIVE)
/*
* There is no particular reason for this code to be here, other than
* that there happens to be space here that would go unused otherwise.
diff --git a/arch/ia64/kernel/minstate.h b/arch/ia64/kernel/minstate.h
index d56753a11636..cc82a7d744c9 100644
--- a/arch/ia64/kernel/minstate.h
+++ b/arch/ia64/kernel/minstate.h
@@ -4,7 +4,7 @@
#include "entry.h"
#include "paravirt_inst.h"
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
/* read ar.itc in advance, and use it before leaving bank 0 */
#define ACCOUNT_GET_STAMP \
(pUStk) mov.m r20=ar.itc;
diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c
index 31360cbbd5f8..e34f565f595a 100644
--- a/arch/ia64/kernel/process.c
+++ b/arch/ia64/kernel/process.c
@@ -57,8 +57,6 @@ void (*ia64_mark_idle)(int);
unsigned long boot_option_idle_override = IDLE_NO_OVERRIDE;
EXPORT_SYMBOL(boot_option_idle_override);
-void (*pm_idle) (void);
-EXPORT_SYMBOL(pm_idle);
void (*pm_power_off) (void);
EXPORT_SYMBOL(pm_power_off);
@@ -301,7 +299,6 @@ cpu_idle (void)
if (mark_idle)
(*mark_idle)(1);
- idle = pm_idle;
if (!idle)
idle = default_idle;
(*idle)();
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index aaefd9b94f2f..2029cc0d2fc6 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -1051,7 +1051,6 @@ cpu_init (void)
max_num_phys_stacked = num_phys_stacked;
}
platform_cpu_init();
- pm_idle = default_idle;
}
void __init
diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c
index 88a794536bc0..fbaac1afb844 100644
--- a/arch/ia64/kernel/time.c
+++ b/arch/ia64/kernel/time.c
@@ -77,7 +77,7 @@ static struct clocksource clocksource_itc = {
};
static struct clocksource *itc_clocksource;
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
#include <linux/kernel_stat.h>
@@ -136,13 +136,14 @@ void vtime_account_system(struct task_struct *tsk)
account_system_time(tsk, 0, delta, delta);
}
+EXPORT_SYMBOL_GPL(vtime_account_system);
void vtime_account_idle(struct task_struct *tsk)
{
account_idle_time(vtime_delta(tsk));
}
-#endif /* CONFIG_VIRT_CPU_ACCOUNTING */
+#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
static irqreturn_t
timer_interrupt (int irq, void *dev_id)
diff --git a/arch/ia64/kvm/Kconfig b/arch/ia64/kvm/Kconfig
index e7947528aee6..2cd225f8c68d 100644
--- a/arch/ia64/kvm/Kconfig
+++ b/arch/ia64/kvm/Kconfig
@@ -20,7 +20,7 @@ if VIRTUALIZATION
config KVM
tristate "Kernel-based Virtual Machine (KVM) support"
depends on BROKEN
- depends on HAVE_KVM && MODULES && EXPERIMENTAL
+ depends on HAVE_KVM && MODULES
# for device assignment:
depends on PCI
depends on BROKEN
diff --git a/arch/ia64/xen/Kconfig b/arch/ia64/xen/Kconfig
index 515e0826803a..5d8a06b0ddf7 100644
--- a/arch/ia64/xen/Kconfig
+++ b/arch/ia64/xen/Kconfig
@@ -5,7 +5,7 @@
config XEN
bool "Xen hypervisor support"
default y
- depends on PARAVIRT && MCKINLEY && IA64_PAGE_SIZE_16KB && EXPERIMENTAL
+ depends on PARAVIRT && MCKINLEY && IA64_PAGE_SIZE_16KB
select XEN_XENCOMM
select NO_IDLE_HZ
# followings are required to save/restore.
diff --git a/arch/m32r/include/uapi/asm/socket.h b/arch/m32r/include/uapi/asm/socket.h
index 5e7088a26726..519afa2755db 100644
--- a/arch/m32r/include/uapi/asm/socket.h
+++ b/arch/m32r/include/uapi/asm/socket.h
@@ -22,7 +22,7 @@
#define SO_PRIORITY 12
#define SO_LINGER 13
#define SO_BSDCOMPAT 14
-/* To add :#define SO_REUSEPORT 15 */
+#define SO_REUSEPORT 15
#define SO_PASSCRED 16
#define SO_PEERCRED 17
#define SO_RCVLOWAT 18
@@ -70,4 +70,6 @@
/* Instruct lower device to use last 4-bytes of skb data as FCS */
#define SO_NOFCS 43
+#define SO_LOCK_FILTER 44
+
#endif /* _ASM_M32R_SOCKET_H */
diff --git a/arch/m32r/kernel/process.c b/arch/m32r/kernel/process.c
index 765d0f57c787..bde899e155d3 100644
--- a/arch/m32r/kernel/process.c
+++ b/arch/m32r/kernel/process.c
@@ -44,36 +44,10 @@ unsigned long thread_saved_pc(struct task_struct *tsk)
return tsk->thread.lr;
}
-/*
- * Powermanagement idle function, if any..
- */
-static void (*pm_idle)(void) = NULL;
-
void (*pm_power_off)(void) = NULL;
EXPORT_SYMBOL(pm_power_off);
/*
- * We use this is we don't have any better
- * idle routine..
- */
-static void default_idle(void)
-{
- /* M32R_FIXME: Please use "cpu_sleep" mode. */
- cpu_relax();
-}
-
-/*
- * On SMP it's slightly faster (but much more power-consuming!)
- * to poll the ->work.need_resched flag instead of waiting for the
- * cross-CPU IPI to arrive. Use this option with caution.
- */
-static void poll_idle (void)
-{
- /* M32R_FIXME */
- cpu_relax();
-}
-
-/*
* The idle thread. There's no useful work to be
* done, so just try to conserve power and have a
* low exit latency (ie sit in a loop waiting for
@@ -84,14 +58,8 @@ void cpu_idle (void)
/* endless idle loop with no priority at all */
while (1) {
rcu_idle_enter();
- while (!need_resched()) {
- void (*idle)(void) = pm_idle;
-
- if (!idle)
- idle = default_idle;
-
- idle();
- }
+ while (!need_resched())
+ cpu_relax();
rcu_idle_exit();
schedule_preempt_disabled();
}
@@ -120,21 +88,6 @@ void machine_power_off(void)
/* M32R_FIXME */
}
-static int __init idle_setup (char *str)
-{
- if (!strncmp(str, "poll", 4)) {
- printk("using poll in idle threads.\n");
- pm_idle = poll_idle;
- } else if (!strncmp(str, "sleep", 4)) {
- printk("using sleep in idle threads.\n");
- pm_idle = default_idle;
- }
-
- return 1;
-}
-
-__setup("idle=", idle_setup);
-
void show_regs(struct pt_regs * regs)
{
printk("\n");
diff --git a/arch/m68k/Kconfig.devices b/arch/m68k/Kconfig.devices
index c4cdfe444c64..4bc945dfe467 100644
--- a/arch/m68k/Kconfig.devices
+++ b/arch/m68k/Kconfig.devices
@@ -41,7 +41,7 @@ config NFBLOCK
config NFCON
tristate "NatFeat console driver"
- depends on NATFEAT
+ depends on TTY && NATFEAT
help
Say Y to include support for the ARAnyM NatFeat console driver
which allows the console output to be redirected to the stderr
diff --git a/arch/m68k/include/asm/dma-mapping.h b/arch/m68k/include/asm/dma-mapping.h
index 3e6b8445af6a..05aa53594d49 100644
--- a/arch/m68k/include/asm/dma-mapping.h
+++ b/arch/m68k/include/asm/dma-mapping.h
@@ -5,7 +5,6 @@
struct scatterlist;
-#ifndef CONFIG_MMU_SUN3
static inline int dma_supported(struct device *dev, u64 mask)
{
return 1;
@@ -111,8 +110,14 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t handle)
return 0;
}
-#else
-#include <asm-generic/dma-mapping-broken.h>
-#endif
+/* drivers/base/dma-mapping.c */
+extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size);
+extern int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
+ void *cpu_addr, dma_addr_t dma_addr,
+ size_t size);
+
+#define dma_mmap_coherent(d, v, c, h, s) dma_common_mmap(d, v, c, h, s)
+#define dma_get_sgtable(d, t, v, h, s) dma_common_get_sgtable(d, t, v, h, s)
#endif /* _M68K_DMA_MAPPING_H */
diff --git a/arch/m68k/include/asm/processor.h b/arch/m68k/include/asm/processor.h
index ae700f49e51d..b0768a657920 100644
--- a/arch/m68k/include/asm/processor.h
+++ b/arch/m68k/include/asm/processor.h
@@ -130,7 +130,6 @@ extern int handle_kernel_fault(struct pt_regs *regs);
#define start_thread(_regs, _pc, _usp) \
do { \
(_regs)->pc = (_pc); \
- ((struct switch_stack *)(_regs))[-1].a6 = 0; \
setframeformat(_regs); \
if (current->mm) \
(_regs)->d5 = current->mm->start_data; \
diff --git a/arch/m68k/kernel/Makefile b/arch/m68k/kernel/Makefile
index 068ad49210d6..655347d80780 100644
--- a/arch/m68k/kernel/Makefile
+++ b/arch/m68k/kernel/Makefile
@@ -20,7 +20,5 @@ obj-$(CONFIG_MMU_MOTOROLA) += ints.o vectors.o
obj-$(CONFIG_MMU_SUN3) += ints.o vectors.o
obj-$(CONFIG_PCI) += pcibios.o
-ifndef CONFIG_MMU_SUN3
-obj-y += dma.o
-endif
+obj-$(CONFIG_HAS_DMA) += dma.o
diff --git a/arch/microblaze/kernel/process.c b/arch/microblaze/kernel/process.c
index a5b74f729e5b..6ff2dcff3410 100644
--- a/arch/microblaze/kernel/process.c
+++ b/arch/microblaze/kernel/process.c
@@ -41,7 +41,6 @@ void show_regs(struct pt_regs *regs)
regs->msr, regs->ear, regs->esr, regs->fsr);
}
-void (*pm_idle)(void);
void (*pm_power_off)(void) = NULL;
EXPORT_SYMBOL(pm_power_off);
@@ -98,8 +97,6 @@ void cpu_idle(void)
/* endless idle loop with no priority at all */
while (1) {
- void (*idle)(void) = pm_idle;
-
if (!idle)
idle = default_idle;
diff --git a/arch/microblaze/platform/Kconfig.platform b/arch/microblaze/platform/Kconfig.platform
index 669c7eec293e..b1747211b8b1 100644
--- a/arch/microblaze/platform/Kconfig.platform
+++ b/arch/microblaze/platform/Kconfig.platform
@@ -20,7 +20,7 @@ endchoice
config SELFMOD
bool "Use self modified code for intc/timer"
- depends on EXPERIMENTAL && NO_MMU
+ depends on NO_MMU
default n
help
This choice enables self-modified code for interrupt controller
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 2ac626ab9d43..5d7170bfeb28 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -4,7 +4,6 @@ config MIPS
select HAVE_GENERIC_DMA_COHERENT
select HAVE_IDE
select HAVE_OPROFILE
- select HAVE_IRQ_WORK
select HAVE_PERF_EVENTS
select PERF_USE_VMALLOC
select HAVE_ARCH_KGDB
@@ -170,7 +169,7 @@ config MACH_DECSTATION
select SYS_HAS_CPU_R3000
select SYS_HAS_CPU_R4X00
select SYS_SUPPORTS_32BIT_KERNEL
- select SYS_SUPPORTS_64BIT_KERNEL if EXPERIMENTAL
+ select SYS_SUPPORTS_64BIT_KERNEL
select SYS_SUPPORTS_LITTLE_ENDIAN
select SYS_SUPPORTS_128HZ
select SYS_SUPPORTS_256HZ
@@ -206,7 +205,7 @@ config MACH_JAZZ
select ISA
select SYS_HAS_CPU_R4X00
select SYS_SUPPORTS_32BIT_KERNEL
- select SYS_SUPPORTS_64BIT_KERNEL if EXPERIMENTAL
+ select SYS_SUPPORTS_64BIT_KERNEL
select SYS_SUPPORTS_100HZ
help
This a family of machines based on the MIPS R4030 chipset which was
@@ -396,7 +395,6 @@ config PNX8550_STB810
config PMC_MSP
bool "PMC-Sierra MSP chipsets"
- depends on EXPERIMENTAL
select CEVT_R4K
select CSRC_R4K
select DMA_NONCOHERENT
@@ -495,8 +493,7 @@ config SGI_IP27
here.
config SGI_IP28
- bool "SGI IP28 (Indigo2 R10k) (EXPERIMENTAL)"
- depends on EXPERIMENTAL
+ bool "SGI IP28 (Indigo2 R10k)"
select FW_ARC
select FW_ARC64
select BOOT_ELF64
@@ -554,7 +551,6 @@ config SGI_IP32
config SIBYTE_CRHINE
bool "Sibyte BCM91120C-CRhine"
- depends on EXPERIMENTAL
select BOOT_ELF32
select DMA_COHERENT
select SIBYTE_BCM1120
@@ -565,7 +561,6 @@ config SIBYTE_CRHINE
config SIBYTE_CARMEL
bool "Sibyte BCM91120x-Carmel"
- depends on EXPERIMENTAL
select BOOT_ELF32
select DMA_COHERENT
select SIBYTE_BCM1120
@@ -576,7 +571,6 @@ config SIBYTE_CARMEL
config SIBYTE_CRHONE
bool "Sibyte BCM91125C-CRhone"
- depends on EXPERIMENTAL
select BOOT_ELF32
select DMA_COHERENT
select SIBYTE_BCM1125
@@ -588,7 +582,6 @@ config SIBYTE_CRHONE
config SIBYTE_RHONE
bool "Sibyte BCM91125E-Rhone"
- depends on EXPERIMENTAL
select BOOT_ELF32
select DMA_COHERENT
select SIBYTE_BCM1125H
@@ -613,7 +606,6 @@ config SIBYTE_SWARM
config SIBYTE_LITTLESUR
bool "Sibyte BCM91250C2-LittleSur"
- depends on EXPERIMENTAL
select BOOT_ELF32
select DMA_COHERENT
select HAVE_PATA_PLATFORM
@@ -627,7 +619,6 @@ config SIBYTE_LITTLESUR
config SIBYTE_SENTOSA
bool "Sibyte BCM91250E-Sentosa"
- depends on EXPERIMENTAL
select BOOT_ELF32
select DMA_COHERENT
select NR_CPUS_DEFAULT_2
@@ -676,7 +667,7 @@ config SNI_RM
select R5000_CPU_SCACHE
select SYS_HAS_EARLY_PRINTK
select SYS_SUPPORTS_32BIT_KERNEL
- select SYS_SUPPORTS_64BIT_KERNEL if EXPERIMENTAL
+ select SYS_SUPPORTS_64BIT_KERNEL
select SYS_SUPPORTS_BIG_ENDIAN
select SYS_SUPPORTS_HIGHMEM
select SYS_SUPPORTS_LITTLE_ENDIAN
@@ -780,7 +771,6 @@ config CAVIUM_OCTEON_REFERENCE_BOARD
config NLM_XLR_BOARD
bool "Netlogic XLR/XLS based systems"
- depends on EXPERIMENTAL
select BOOT_ELF32
select NLM_COMMON
select SYS_HAS_CPU_XLR
@@ -809,7 +799,6 @@ config NLM_XLR_BOARD
config NLM_XLP_BOARD
bool "Netlogic XLP based systems"
- depends on EXPERIMENTAL
select BOOT_ELF32
select NLM_COMMON
select SYS_HAS_CPU_XLP
@@ -1375,7 +1364,6 @@ config CPU_R5500
config CPU_R6000
bool "R6000"
- depends on EXPERIMENTAL
depends on SYS_HAS_CPU_R6000
select CPU_SUPPORTS_32BIT_KERNEL
help
@@ -1393,7 +1381,6 @@ config CPU_NEVADA
config CPU_R8000
bool "R8000"
- depends on EXPERIMENTAL
depends on SYS_HAS_CPU_R8000
select CPU_HAS_PREFETCH
select CPU_SUPPORTS_64BIT_KERNEL
@@ -1768,7 +1755,7 @@ config PAGE_SIZE_4KB
config PAGE_SIZE_8KB
bool "8kB"
- depends on (EXPERIMENTAL && CPU_R8000) || CPU_CAVIUM_OCTEON
+ depends on CPU_R8000 || CPU_CAVIUM_OCTEON
help
Using 8kB page size will result in higher performance kernel at
the price of higher memory consumption. This option is available
@@ -1795,7 +1782,7 @@ config PAGE_SIZE_32KB
config PAGE_SIZE_64KB
bool "64kB"
- depends on EXPERIMENTAL && !CPU_R3000 && !CPU_TX39XX
+ depends on !CPU_R3000 && !CPU_TX39XX
help
Using 64kB page size will result in higher performance kernel at
the price of higher memory consumption. This option is available on
@@ -2161,7 +2148,6 @@ source "mm/Kconfig"
config SMP
bool "Multi-Processing support"
depends on SYS_SUPPORTS_SMP
- select IRQ_PER_CPU
select USE_GENERIC_SMP_HELPERS
help
This enables support for systems with more than one CPU. If you have
@@ -2312,8 +2298,7 @@ config HZ
source "kernel/Kconfig.preempt"
config KEXEC
- bool "Kexec system call (EXPERIMENTAL)"
- depends on EXPERIMENTAL
+ bool "Kexec system call"
help
kexec is a system call that implements the ability to shutdown your
current kernel, and to start another kernel. It is like a reboot
diff --git a/arch/mips/bcm47xx/serial.c b/arch/mips/bcm47xx/serial.c
index 57981e4fe2bc..b8ef965705cf 100644
--- a/arch/mips/bcm47xx/serial.c
+++ b/arch/mips/bcm47xx/serial.c
@@ -62,7 +62,7 @@ static int __init uart8250_init_bcma(void)
p->mapbase = (unsigned int) bcma_port->regs;
p->membase = (void *) bcma_port->regs;
- p->irq = bcma_port->irq + 2;
+ p->irq = bcma_port->irq;
p->uartclk = bcma_port->baud_base;
p->regshift = bcma_port->reg_shift;
p->iotype = UPIO_MEM;
diff --git a/arch/mips/include/uapi/asm/socket.h b/arch/mips/include/uapi/asm/socket.h
index 17307ab90474..3e68bfbda6bc 100644
--- a/arch/mips/include/uapi/asm/socket.h
+++ b/arch/mips/include/uapi/asm/socket.h
@@ -28,9 +28,7 @@
#define SO_LINGER 0x0080 /* Block on close of a reliable
socket to transmit pending data. */
#define SO_OOBINLINE 0x0100 /* Receive out-of-band data in-band. */
-#if 0
-To add: #define SO_REUSEPORT 0x0200 /* Allow local address and port reuse. */
-#endif
+#define SO_REUSEPORT 0x0200 /* Allow local address and port reuse. */
#define SO_TYPE 0x1008 /* Compatible name for SO_STYLE. */
#define SO_STYLE SO_TYPE /* Synonym */
@@ -90,5 +88,6 @@ To add: #define SO_REUSEPORT 0x0200 /* Allow local address and port reuse. */
/* Instruct lower device to use last 4-bytes of skb data as FCS */
#define SO_NOFCS 43
+#define SO_LOCK_FILTER 44
#endif /* _UAPI_ASM_SOCKET_H */
diff --git a/arch/mips/jazz/Kconfig b/arch/mips/jazz/Kconfig
index 1f372b0d2559..fb1e072da30b 100644
--- a/arch/mips/jazz/Kconfig
+++ b/arch/mips/jazz/Kconfig
@@ -1,6 +1,6 @@
config ACER_PICA_61
- bool "Support for Acer PICA 1 chipset (EXPERIMENTAL)"
- depends on MACH_JAZZ && EXPERIMENTAL
+ bool "Support for Acer PICA 1 chipset"
+ depends on MACH_JAZZ
select DMA_NONCOHERENT
select SYS_SUPPORTS_LITTLE_ENDIAN
help
@@ -13,7 +13,7 @@ config MIPS_MAGNUM_4000
bool "Support for MIPS Magnum 4000"
depends on MACH_JAZZ
select DMA_NONCOHERENT
- select SYS_SUPPORTS_BIG_ENDIAN if EXPERIMENTAL
+ select SYS_SUPPORTS_BIG_ENDIAN
select SYS_SUPPORTS_LITTLE_ENDIAN
help
This is a machine with a R4000 100 MHz CPU. To compile a Linux
diff --git a/arch/mips/lantiq/xway/dma.c b/arch/mips/lantiq/xway/dma.c
index e44a1866653f..08f7ebd9c774 100644
--- a/arch/mips/lantiq/xway/dma.c
+++ b/arch/mips/lantiq/xway/dma.c
@@ -21,6 +21,7 @@
#include <linux/dma-mapping.h>
#include <linux/module.h>
#include <linux/clk.h>
+#include <linux/err.h>
#include <lantiq_soc.h>
#include <xway_dma.h>
@@ -223,8 +224,8 @@ ltq_dma_init(struct platform_device *pdev)
panic("Failed to get dma resource");
/* remap dma register range */
- ltq_dma_membase = devm_request_and_ioremap(&pdev->dev, res);
- if (!ltq_dma_membase)
+ ltq_dma_membase = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(ltq_dma_membase))
panic("Failed to remap dma resource");
/* power up and reset the dma engine */
diff --git a/arch/mips/lantiq/xway/gptu.c b/arch/mips/lantiq/xway/gptu.c
index e30b1ed1b936..9861c8669fab 100644
--- a/arch/mips/lantiq/xway/gptu.c
+++ b/arch/mips/lantiq/xway/gptu.c
@@ -150,11 +150,9 @@ static int gptu_probe(struct platform_device *pdev)
}
/* remap gptu register range */
- gptu_membase = devm_request_and_ioremap(&pdev->dev, res);
- if (!gptu_membase) {
- dev_err(&pdev->dev, "Failed to remap resource\n");
- return -ENOMEM;
- }
+ gptu_membase = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(gptu_membase))
+ return PTR_ERR(gptu_membase);
/* enable our clock */
clk = clk_get(&pdev->dev, NULL);
diff --git a/arch/mips/pci/pci-lantiq.c b/arch/mips/pci/pci-lantiq.c
index 95681789b51e..910fb4c20b9e 100644
--- a/arch/mips/pci/pci-lantiq.c
+++ b/arch/mips/pci/pci-lantiq.c
@@ -214,13 +214,13 @@ static int ltq_pci_probe(struct platform_device *pdev)
return -EINVAL;
}
- ltq_pci_membase = devm_request_and_ioremap(&pdev->dev, res_bridge);
- ltq_pci_mapped_cfg = devm_request_and_ioremap(&pdev->dev, res_cfg);
+ ltq_pci_membase = devm_ioremap_resource(&pdev->dev, res_bridge);
+ if (IS_ERR(ltq_pci_membase))
+ return PTR_ERR(ltq_pci_membase);
- if (!ltq_pci_membase || !ltq_pci_mapped_cfg) {
- dev_err(&pdev->dev, "failed to remap resources\n");
- return -ENOMEM;
- }
+ ltq_pci_mapped_cfg = devm_ioremap_resource(&pdev->dev, res_cfg);
+ if (IS_ERR(ltq_pci_mapped_cfg))
+ return PTR_ERR(ltq_pci_mapped_cfg);
ltq_pci_startup(pdev);
diff --git a/arch/mips/sgi-ip27/Kconfig b/arch/mips/sgi-ip27/Kconfig
index 4b2ea282b9c7..4d8705a65e4a 100644
--- a/arch/mips/sgi-ip27/Kconfig
+++ b/arch/mips/sgi-ip27/Kconfig
@@ -13,7 +13,6 @@ config SGI_SN_M_MODE
config SGI_SN_N_MODE
bool "IP27 N-Mode"
- depends on EXPERIMENTAL
help
The nodes of Origin, Onyx, Fuel and Tezro systems can be configured
in either N-Modes which allows for more nodes or M-Mode which allows
diff --git a/arch/mips/sni/a20r.c b/arch/mips/sni/a20r.c
index 9cb9d43a3a0e..e05ad4da44d9 100644
--- a/arch/mips/sni/a20r.c
+++ b/arch/mips/sni/a20r.c
@@ -118,7 +118,7 @@ static struct resource sc26xx_rsrc[] = {
}
};
-#include <linux/platform_data/sccnxp.h>
+#include <linux/platform_data/serial-sccnxp.h>
static struct sccnxp_pdata sccnxp_data = {
.reg_shift = 2,
diff --git a/arch/mn10300/include/asm/dma-mapping.h b/arch/mn10300/include/asm/dma-mapping.h
index c1be4397b1ed..a18abfc558eb 100644
--- a/arch/mn10300/include/asm/dma-mapping.h
+++ b/arch/mn10300/include/asm/dma-mapping.h
@@ -168,4 +168,19 @@ void dma_cache_sync(void *vaddr, size_t size,
mn10300_dcache_flush_inv();
}
+/* Not supported for now */
+static inline int dma_mmap_coherent(struct device *dev,
+ struct vm_area_struct *vma, void *cpu_addr,
+ dma_addr_t dma_addr, size_t size)
+{
+ return -EINVAL;
+}
+
+static inline int dma_get_sgtable(struct device *dev, struct sg_table *sgt,
+ void *cpu_addr, dma_addr_t dma_addr,
+ size_t size)
+{
+ return -EINVAL;
+}
+
#endif
diff --git a/arch/mn10300/include/uapi/asm/socket.h b/arch/mn10300/include/uapi/asm/socket.h
index af5366bbfe62..5c7c7c988544 100644
--- a/arch/mn10300/include/uapi/asm/socket.h
+++ b/arch/mn10300/include/uapi/asm/socket.h
@@ -22,7 +22,7 @@
#define SO_PRIORITY 12
#define SO_LINGER 13
#define SO_BSDCOMPAT 14
-/* To add :#define SO_REUSEPORT 15 */
+#define SO_REUSEPORT 15
#define SO_PASSCRED 16
#define SO_PEERCRED 17
#define SO_RCVLOWAT 18
@@ -70,4 +70,6 @@
/* Instruct lower device to use last 4-bytes of skb data as FCS */
#define SO_NOFCS 43
+#define SO_LOCK_FILTER 44
+
#endif /* _ASM_SOCKET_H */
diff --git a/arch/mn10300/kernel/mn10300-serial.c b/arch/mn10300/kernel/mn10300-serial.c
index 81d5cb9b6569..bf6e949a2f87 100644
--- a/arch/mn10300/kernel/mn10300-serial.c
+++ b/arch/mn10300/kernel/mn10300-serial.c
@@ -524,7 +524,7 @@ static int mask_test_and_clear(volatile u8 *ptr, u8 mask)
static void mn10300_serial_receive_interrupt(struct mn10300_serial_port *port)
{
struct uart_icount *icount = &port->uart.icount;
- struct tty_struct *tty = port->uart.state->port.tty;
+ struct tty_port *tport = &port->uart.state->port;
unsigned ix;
int count;
u8 st, ch, push, status, overrun;
@@ -534,10 +534,10 @@ static void mn10300_serial_receive_interrupt(struct mn10300_serial_port *port)
push = 0;
count = CIRC_CNT(port->rx_inp, port->rx_outp, MNSC_BUFFER_SIZE);
- count = tty_buffer_request_room(tty, count);
+ count = tty_buffer_request_room(tport, count);
if (count == 0) {
- if (!tty->low_latency)
- tty_flip_buffer_push(tty);
+ if (!tport->low_latency)
+ tty_flip_buffer_push(tport);
return;
}
@@ -545,8 +545,8 @@ try_again:
/* pull chars out of the hat */
ix = ACCESS_ONCE(port->rx_outp);
if (CIRC_CNT(port->rx_inp, ix, MNSC_BUFFER_SIZE) == 0) {
- if (push && !tty->low_latency)
- tty_flip_buffer_push(tty);
+ if (push && !tport->low_latency)
+ tty_flip_buffer_push(tport);
return;
}
@@ -666,19 +666,19 @@ insert:
else
flag = TTY_NORMAL;
- tty_insert_flip_char(tty, ch, flag);
+ tty_insert_flip_char(tport, ch, flag);
}
/* overrun is special, since it's reported immediately, and doesn't
* affect the current character
*/
if (overrun)
- tty_insert_flip_char(tty, 0, TTY_OVERRUN);
+ tty_insert_flip_char(tport, 0, TTY_OVERRUN);
count--;
if (count <= 0) {
- if (!tty->low_latency)
- tty_flip_buffer_push(tty);
+ if (!tport->low_latency)
+ tty_flip_buffer_push(tport);
return;
}
diff --git a/arch/mn10300/kernel/process.c b/arch/mn10300/kernel/process.c
index eb09f5a552ff..84f4e97e3074 100644
--- a/arch/mn10300/kernel/process.c
+++ b/arch/mn10300/kernel/process.c
@@ -37,12 +37,6 @@
#include "internal.h"
/*
- * power management idle function, if any..
- */
-void (*pm_idle)(void);
-EXPORT_SYMBOL(pm_idle);
-
-/*
* return saved PC of a blocked thread.
*/
unsigned long thread_saved_pc(struct task_struct *tsk)
@@ -113,7 +107,6 @@ void cpu_idle(void)
void (*idle)(void);
smp_rmb();
- idle = pm_idle;
if (!idle) {
#if defined(CONFIG_SMP) && !defined(CONFIG_HOTPLUG_CPU)
idle = poll_idle;
diff --git a/arch/openrisc/kernel/idle.c b/arch/openrisc/kernel/idle.c
index 7d618feb1b72..5e8a3b6d6bc6 100644
--- a/arch/openrisc/kernel/idle.c
+++ b/arch/openrisc/kernel/idle.c
@@ -39,11 +39,6 @@
void (*powersave) (void) = NULL;
-static inline void pm_idle(void)
-{
- barrier();
-}
-
void cpu_idle(void)
{
set_thread_flag(TIF_POLLING_NRFLAG);
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index b77feffbadea..c600f39aa453 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -9,20 +9,19 @@ config PARISC
select RTC_DRV_GENERIC
select INIT_ALL_POSSIBLE
select BUG
- select HAVE_IRQ_WORK
select HAVE_PERF_EVENTS
select GENERIC_ATOMIC64 if !64BIT
select HAVE_GENERIC_HARDIRQS
select BROKEN_RODATA
select GENERIC_IRQ_PROBE
select GENERIC_PCI_IOMAP
- select IRQ_PER_CPU
select ARCH_HAVE_NMI_SAFE_CMPXCHG
select GENERIC_SMP_IDLE_THREAD
select GENERIC_STRNCPY_FROM_USER
select HAVE_MOD_ARCH_SPECIFIC
select MODULES_USE_ELF_RELA
select CLONE_BACKWARDS
+ select TTY # Needed for pdc_cons.c
help
The PA-RISC microprocessor is designed by Hewlett-Packard and used
@@ -194,12 +193,12 @@ config PARISC_PAGE_SIZE_4KB
If you don't know what to do, choose 4KB.
config PARISC_PAGE_SIZE_16KB
- bool "16KB (EXPERIMENTAL)"
- depends on PA8X00 && EXPERIMENTAL
+ bool "16KB"
+ depends on PA8X00
config PARISC_PAGE_SIZE_64KB
- bool "64KB (EXPERIMENTAL)"
- depends on PA8X00 && EXPERIMENTAL
+ bool "64KB"
+ depends on PA8X00
endchoice
diff --git a/arch/parisc/include/asm/dma-mapping.h b/arch/parisc/include/asm/dma-mapping.h
index 467bbd510eac..106b395688e1 100644
--- a/arch/parisc/include/asm/dma-mapping.h
+++ b/arch/parisc/include/asm/dma-mapping.h
@@ -238,4 +238,19 @@ void * sba_get_iommu(struct parisc_device *dev);
/* At the moment, we panic on error for IOMMU resource exaustion */
#define dma_mapping_error(dev, x) 0
+/* This API cannot be supported on PA-RISC */
+static inline int dma_mmap_coherent(struct device *dev,
+ struct vm_area_struct *vma, void *cpu_addr,
+ dma_addr_t dma_addr, size_t size)
+{
+ return -EINVAL;
+}
+
+static inline int dma_get_sgtable(struct device *dev, struct sg_table *sgt,
+ void *cpu_addr, dma_addr_t dma_addr,
+ size_t size)
+{
+ return -EINVAL;
+}
+
#endif
diff --git a/arch/parisc/include/uapi/asm/socket.h b/arch/parisc/include/uapi/asm/socket.h
index d9ff4731253b..526e4b9aece0 100644
--- a/arch/parisc/include/uapi/asm/socket.h
+++ b/arch/parisc/include/uapi/asm/socket.h
@@ -13,7 +13,7 @@
#define SO_BROADCAST 0x0020
#define SO_LINGER 0x0080
#define SO_OOBINLINE 0x0100
-/* To add :#define SO_REUSEPORT 0x0200 */
+#define SO_REUSEPORT 0x0200
#define SO_SNDBUF 0x1001
#define SO_RCVBUF 0x1002
#define SO_SNDBUFFORCE 0x100a
@@ -69,6 +69,7 @@
/* Instruct lower device to use last 4-bytes of skb data as FCS */
#define SO_NOFCS 0x4024
+#define SO_LOCK_FILTER 0x4025
/* O_NONBLOCK clashes with the bits used for socket types. Therefore we
* have to define SOCK_NONBLOCK to a different value here.
diff --git a/arch/parisc/kernel/pdc_cons.c b/arch/parisc/kernel/pdc_cons.c
index efc5e7d30530..d5cae55195ec 100644
--- a/arch/parisc/kernel/pdc_cons.c
+++ b/arch/parisc/kernel/pdc_cons.c
@@ -138,23 +138,17 @@ static const struct tty_operations pdc_console_tty_ops = {
static void pdc_console_poll(unsigned long unused)
{
int data, count = 0;
- struct tty_struct *tty = tty_port_tty_get(&tty_port);
-
- if (!tty)
- return;
while (1) {
data = pdc_console_poll_key(NULL);
if (data == -1)
break;
- tty_insert_flip_char(tty, data & 0xFF, TTY_NORMAL);
+ tty_insert_flip_char(&tty_port, data & 0xFF, TTY_NORMAL);
count ++;
}
if (count)
- tty_flip_buffer_push(tty);
-
- tty_kref_put(tty);
+ tty_flip_buffer_push(&tty_port);
if (pdc_cons.flags & CON_ENABLED)
mod_timer(&pdc_console_timer, jiffies + PDC_CONS_POLL_DELAY);
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 17903f1f356b..e7fb8edb629b 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -118,14 +118,12 @@ config PPC
select HAVE_SYSCALL_WRAPPERS if PPC64
select GENERIC_ATOMIC64 if PPC32
select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
- select HAVE_IRQ_WORK
select HAVE_PERF_EVENTS
select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_HW_BREAKPOINT if PERF_EVENTS && PPC_BOOK3S_64
select HAVE_GENERIC_HARDIRQS
select ARCH_WANT_IPC_PARSE_VERSION
select SPARSE_IRQ
- select IRQ_PER_CPU
select IRQ_DOMAIN
select GENERIC_IRQ_SHOW
select GENERIC_IRQ_SHOW_LEVEL
@@ -334,7 +332,7 @@ config SWIOTLB
config HOTPLUG_CPU
bool "Support for enabling/disabling CPUs"
- depends on SMP && HOTPLUG && EXPERIMENTAL && (PPC_PSERIES || \
+ depends on SMP && HOTPLUG && (PPC_PSERIES || \
PPC_PMAC || PPC_POWERNV || (PPC_85xx && !PPC_E500MC))
---help---
Say Y here to be able to disable and re-enable individual
@@ -356,8 +354,8 @@ config ARCH_ENABLE_MEMORY_HOTREMOVE
def_bool y
config KEXEC
- bool "kexec system call (EXPERIMENTAL)"
- depends on (PPC_BOOK3S || FSL_BOOKE || (44x && !SMP)) && EXPERIMENTAL
+ bool "kexec system call"
+ depends on (PPC_BOOK3S || FSL_BOOKE || (44x && !SMP))
help
kexec is a system call that implements the ability to shutdown your
current kernel, and to start another kernel. It is like a reboot
@@ -852,8 +850,8 @@ config LOWMEM_CAM_NUM
default 3
config DYNAMIC_MEMSTART
- bool "Enable page aligned dynamic load address for kernel (EXPERIMENTAL)"
- depends on EXPERIMENTAL && ADVANCED_OPTIONS && FLATMEM && (FSL_BOOKE || 44x)
+ bool "Enable page aligned dynamic load address for kernel"
+ depends on ADVANCED_OPTIONS && FLATMEM && (FSL_BOOKE || 44x)
select NONSTATIC_KERNEL
help
This option enables the kernel to be loaded at any page aligned
@@ -870,8 +868,8 @@ config DYNAMIC_MEMSTART
This option is overridden by CONFIG_RELOCATABLE
config RELOCATABLE
- bool "Build a relocatable kernel (EXPERIMENTAL)"
- depends on EXPERIMENTAL && ADVANCED_OPTIONS && FLATMEM && 44x
+ bool "Build a relocatable kernel"
+ depends on ADVANCED_OPTIONS && FLATMEM && 44x
select NONSTATIC_KERNEL
help
This builds a kernel image that is capable of running at the
diff --git a/arch/powerpc/configs/chroma_defconfig b/arch/powerpc/configs/chroma_defconfig
index 29bb11ec6c64..4f35fc462385 100644
--- a/arch/powerpc/configs/chroma_defconfig
+++ b/arch/powerpc/configs/chroma_defconfig
@@ -1,6 +1,6 @@
CONFIG_PPC64=y
CONFIG_PPC_BOOK3E_64=y
-# CONFIG_VIRT_CPU_ACCOUNTING is not set
+# CONFIG_VIRT_CPU_ACCOUNTING_NATIVE is not set
CONFIG_SMP=y
CONFIG_NR_CPUS=256
CONFIG_EXPERIMENTAL=y
diff --git a/arch/powerpc/configs/corenet64_smp_defconfig b/arch/powerpc/configs/corenet64_smp_defconfig
index 88fa5c46f66f..f7df8362911f 100644
--- a/arch/powerpc/configs/corenet64_smp_defconfig
+++ b/arch/powerpc/configs/corenet64_smp_defconfig
@@ -1,6 +1,6 @@
CONFIG_PPC64=y
CONFIG_PPC_BOOK3E_64=y
-# CONFIG_VIRT_CPU_ACCOUNTING is not set
+# CONFIG_VIRT_CPU_ACCOUNTING_NATIVE is not set
CONFIG_SMP=y
CONFIG_NR_CPUS=2
CONFIG_EXPERIMENTAL=y
diff --git a/arch/powerpc/configs/pasemi_defconfig b/arch/powerpc/configs/pasemi_defconfig
index 840a2c2d0430..bcedeea0df89 100644
--- a/arch/powerpc/configs/pasemi_defconfig
+++ b/arch/powerpc/configs/pasemi_defconfig
@@ -1,6 +1,6 @@
CONFIG_PPC64=y
CONFIG_ALTIVEC=y
-# CONFIG_VIRT_CPU_ACCOUNTING is not set
+# CONFIG_VIRT_CPU_ACCOUNTING_NATIVE is not set
CONFIG_SMP=y
CONFIG_NR_CPUS=2
CONFIG_EXPERIMENTAL=y
diff --git a/arch/powerpc/include/asm/cputime.h b/arch/powerpc/include/asm/cputime.h
index 483733bd06d4..607559ab271f 100644
--- a/arch/powerpc/include/asm/cputime.h
+++ b/arch/powerpc/include/asm/cputime.h
@@ -8,7 +8,7 @@
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
- * If we have CONFIG_VIRT_CPU_ACCOUNTING, we measure cpu time in
+ * If we have CONFIG_VIRT_CPU_ACCOUNTING_NATIVE, we measure cpu time in
* the same units as the timebase. Otherwise we measure cpu time
* in jiffies using the generic definitions.
*/
@@ -16,7 +16,7 @@
#ifndef __POWERPC_CPUTIME_H
#define __POWERPC_CPUTIME_H
-#ifndef CONFIG_VIRT_CPU_ACCOUNTING
+#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
#include <asm-generic/cputime.h>
#ifdef __KERNEL__
static inline void setup_cputime_one_jiffy(void) { }
@@ -231,5 +231,5 @@ static inline cputime_t clock_t_to_cputime(const unsigned long clk)
static inline void arch_vtime_task_switch(struct task_struct *tsk) { }
#endif /* __KERNEL__ */
-#endif /* CONFIG_VIRT_CPU_ACCOUNTING */
+#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
#endif /* __POWERPC_CPUTIME_H */
diff --git a/arch/powerpc/include/asm/lppaca.h b/arch/powerpc/include/asm/lppaca.h
index 531fe0c3108f..b1e7f2af1016 100644
--- a/arch/powerpc/include/asm/lppaca.h
+++ b/arch/powerpc/include/asm/lppaca.h
@@ -145,7 +145,7 @@ struct dtl_entry {
extern struct kmem_cache *dtl_cache;
/*
- * When CONFIG_VIRT_CPU_ACCOUNTING = y, the cpu accounting code controls
+ * When CONFIG_VIRT_CPU_ACCOUNTING_NATIVE = y, the cpu accounting code controls
* reading from the dispatch trace log. If other code wants to consume
* DTL entries, it can set this pointer to a function that will get
* called once for each DTL entry that gets processed.
diff --git a/arch/powerpc/include/asm/perf_event_server.h b/arch/powerpc/include/asm/perf_event_server.h
index 9710be3a2d17..136bba62efa4 100644
--- a/arch/powerpc/include/asm/perf_event_server.h
+++ b/arch/powerpc/include/asm/perf_event_server.h
@@ -11,6 +11,7 @@
#include <linux/types.h>
#include <asm/hw_irq.h>
+#include <linux/device.h>
#define MAX_HWEVENTS 8
#define MAX_EVENT_ALTERNATIVES 8
@@ -35,6 +36,7 @@ struct power_pmu {
void (*disable_pmc)(unsigned int pmc, unsigned long mmcr[]);
int (*limited_pmc_event)(u64 event_id);
u32 flags;
+ const struct attribute_group **attr_groups;
int n_generic;
int *generic_events;
int (*cache_events)[PERF_COUNT_HW_CACHE_MAX]
@@ -109,3 +111,27 @@ extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
* If an event_id is not subject to the constraint expressed by a particular
* field, then it will have 0 in both the mask and value for that field.
*/
+
+extern ssize_t power_events_sysfs_show(struct device *dev,
+ struct device_attribute *attr, char *page);
+
+/*
+ * EVENT_VAR() is same as PMU_EVENT_VAR with a suffix.
+ *
+ * Having a suffix allows us to have aliases in sysfs - eg: the generic
+ * event 'cpu-cycles' can have two entries in sysfs: 'cpu-cycles' and
+ * 'PM_CYC' where the latter is the name by which the event is known in
+ * POWER CPU specification.
+ */
+#define EVENT_VAR(_id, _suffix) event_attr_##_id##_suffix
+#define EVENT_PTR(_id, _suffix) &EVENT_VAR(_id, _suffix).attr.attr
+
+#define EVENT_ATTR(_name, _id, _suffix) \
+ PMU_EVENT_ATTR(_name, EVENT_VAR(_id, _suffix), PME_PM_##_id, \
+ power_events_sysfs_show)
+
+#define GENERIC_EVENT_ATTR(_name, _id) EVENT_ATTR(_name, _id, _g)
+#define GENERIC_EVENT_PTR(_id) EVENT_PTR(_id, _g)
+
+#define POWER_EVENT_ATTR(_name, _id) EVENT_ATTR(PM_##_name, _id, _p)
+#define POWER_EVENT_PTR(_id) EVENT_PTR(_id, _p)
diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h
index ea2a86e8ff95..2d0e1f5d8339 100644
--- a/arch/powerpc/include/asm/ppc_asm.h
+++ b/arch/powerpc/include/asm/ppc_asm.h
@@ -24,7 +24,7 @@
* user_time and system_time fields in the paca.
*/
-#ifndef CONFIG_VIRT_CPU_ACCOUNTING
+#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
#define ACCOUNT_CPU_USER_ENTRY(ra, rb)
#define ACCOUNT_CPU_USER_EXIT(ra, rb)
#define ACCOUNT_STOLEN_TIME
@@ -70,7 +70,7 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR)
#endif /* CONFIG_PPC_SPLPAR */
-#endif /* CONFIG_VIRT_CPU_ACCOUNTING */
+#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
/*
* Macros for storing registers into and loading registers from
diff --git a/arch/powerpc/include/uapi/asm/socket.h b/arch/powerpc/include/uapi/asm/socket.h
index eb0b1864d400..a26dcaece509 100644
--- a/arch/powerpc/include/uapi/asm/socket.h
+++ b/arch/powerpc/include/uapi/asm/socket.h
@@ -29,7 +29,7 @@
#define SO_PRIORITY 12
#define SO_LINGER 13
#define SO_BSDCOMPAT 14
-/* To add :#define SO_REUSEPORT 15 */
+#define SO_REUSEPORT 15
#define SO_RCVLOWAT 16
#define SO_SNDLOWAT 17
#define SO_RCVTIMEO 18
@@ -77,4 +77,6 @@
/* Instruct lower device to use last 4-bytes of skb data as FCS */
#define SO_NOFCS 43
+#define SO_LOCK_FILTER 44
+
#endif /* _ASM_POWERPC_SOCKET_H */
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 3d990d3bd8ba..ac057013f9fd 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -94,7 +94,7 @@ system_call_common:
addi r9,r1,STACK_FRAME_OVERHEAD
ld r11,exception_marker@toc(r2)
std r11,-16(r9) /* "regshere" marker */
-#if defined(CONFIG_VIRT_CPU_ACCOUNTING) && defined(CONFIG_PPC_SPLPAR)
+#if defined(CONFIG_VIRT_CPU_ACCOUNTING_NATIVE) && defined(CONFIG_PPC_SPLPAR)
BEGIN_FW_FTR_SECTION
beq 33f
/* if from user, see if there are any DTL entries to process */
@@ -110,7 +110,7 @@ BEGIN_FW_FTR_SECTION
addi r9,r1,STACK_FRAME_OVERHEAD
33:
END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR)
-#endif /* CONFIG_VIRT_CPU_ACCOUNTING && CONFIG_PPC_SPLPAR */
+#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE && CONFIG_PPC_SPLPAR */
/*
* A syscall should always be called with interrupts enabled
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 127361e093f4..f77fa22754bc 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -143,7 +143,7 @@ EXPORT_SYMBOL_GPL(ppc_proc_freq);
unsigned long ppc_tb_freq;
EXPORT_SYMBOL_GPL(ppc_tb_freq);
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
/*
* Factors for converting from cputime_t (timebase ticks) to
* jiffies, microseconds, seconds, and clock_t (1/USER_HZ seconds).
@@ -347,6 +347,7 @@ void vtime_account_system(struct task_struct *tsk)
if (stolen)
account_steal_time(stolen);
}
+EXPORT_SYMBOL_GPL(vtime_account_system);
void vtime_account_idle(struct task_struct *tsk)
{
@@ -377,7 +378,7 @@ void vtime_account_user(struct task_struct *tsk)
account_user_time(tsk, utime, utimescaled);
}
-#else /* ! CONFIG_VIRT_CPU_ACCOUNTING */
+#else /* ! CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
#define calc_cputime_factors()
#endif
@@ -668,7 +669,7 @@ int update_persistent_clock(struct timespec now)
struct rtc_time tm;
if (!ppc_md.set_rtc_time)
- return 0;
+ return -ENODEV;
to_tm(now.tv_sec + 1 + timezone_offset, &tm);
tm.tm_year -= 1900;
diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig
index 4730c953f435..63c67ec72e43 100644
--- a/arch/powerpc/kvm/Kconfig
+++ b/arch/powerpc/kvm/Kconfig
@@ -41,7 +41,7 @@ config KVM_BOOK3S_PR
config KVM_BOOK3S_32
tristate "KVM support for PowerPC book3s_32 processors"
- depends on EXPERIMENTAL && PPC_BOOK3S_32 && !SMP && !PTE_64BIT
+ depends on PPC_BOOK3S_32 && !SMP && !PTE_64BIT
select KVM
select KVM_BOOK3S_32_HANDLER
select KVM_BOOK3S_PR
@@ -56,7 +56,7 @@ config KVM_BOOK3S_32
config KVM_BOOK3S_64
tristate "KVM support for PowerPC book3s_64 processors"
- depends on EXPERIMENTAL && PPC_BOOK3S_64
+ depends on PPC_BOOK3S_64
select KVM_BOOK3S_64_HANDLER
select KVM
---help---
@@ -97,7 +97,7 @@ config KVM_BOOKE_HV
config KVM_440
bool "KVM support for PowerPC 440 processors"
- depends on EXPERIMENTAL && 44x
+ depends on 44x
select KVM
select KVM_MMIO
---help---
@@ -122,7 +122,7 @@ config KVM_EXIT_TIMING
config KVM_E500V2
bool "KVM support for PowerPC E500v2 processors"
- depends on EXPERIMENTAL && E500 && !PPC_E500MC
+ depends on E500 && !PPC_E500MC
select KVM
select KVM_MMIO
select MMU_NOTIFIER
@@ -137,7 +137,7 @@ config KVM_E500V2
config KVM_E500MC
bool "KVM support for PowerPC E500MC/E5500 processors"
- depends on EXPERIMENTAL && PPC_E500MC
+ depends on PPC_E500MC
select KVM
select KVM_MMIO
select KVM_BOOKE_HV
diff --git a/arch/powerpc/mm/hash_low_64.S b/arch/powerpc/mm/hash_low_64.S
index 56585086413a..7443481a315c 100644
--- a/arch/powerpc/mm/hash_low_64.S
+++ b/arch/powerpc/mm/hash_low_64.S
@@ -115,11 +115,13 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
sldi r29,r5,SID_SHIFT - VPN_SHIFT
rldicl r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT - VPN_SHIFT)
or r29,r28,r29
-
- /* Calculate hash value for primary slot and store it in r28 */
- rldicl r5,r5,0,25 /* vsid & 0x0000007fffffffff */
- rldicl r0,r3,64-12,48 /* (ea >> 12) & 0xffff */
- xor r28,r5,r0
+ /*
+ * Calculate hash value for primary slot and store it in r28
+ * r3 = va, r5 = vsid
+ * r0 = (va >> 12) & ((1ul << (28 - 12)) -1)
+ */
+ rldicl r0,r3,64-12,48
+ xor r28,r5,r0 /* hash */
b 4f
3: /* Calc vpn and put it in r29 */
@@ -130,11 +132,12 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
/*
* calculate hash value for primary slot and
* store it in r28 for 1T segment
+ * r3 = va, r5 = vsid
*/
- rldic r28,r5,25,25 /* (vsid << 25) & 0x7fffffffff */
- clrldi r5,r5,40 /* vsid & 0xffffff */
- rldicl r0,r3,64-12,36 /* (ea >> 12) & 0xfffffff */
- xor r28,r28,r5
+ sldi r28,r5,25 /* vsid << 25 */
+ /* r0 = (va >> 12) & ((1ul << (40 - 12)) -1) */
+ rldicl r0,r3,64-12,36
+ xor r28,r28,r5 /* vsid ^ ( vsid << 25) */
xor r28,r28,r0 /* hash */
/* Convert linux PTE bits into HW equivalents */
@@ -407,11 +410,13 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
*/
rldicl r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT - VPN_SHIFT)
or r29,r28,r29
-
- /* Calculate hash value for primary slot and store it in r28 */
- rldicl r5,r5,0,25 /* vsid & 0x0000007fffffffff */
- rldicl r0,r3,64-12,48 /* (ea >> 12) & 0xffff */
- xor r28,r5,r0
+ /*
+ * Calculate hash value for primary slot and store it in r28
+ * r3 = va, r5 = vsid
+ * r0 = (va >> 12) & ((1ul << (28 - 12)) -1)
+ */
+ rldicl r0,r3,64-12,48
+ xor r28,r5,r0 /* hash */
b 4f
3: /* Calc vpn and put it in r29 */
@@ -426,11 +431,12 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
/*
* Calculate hash value for primary slot and
* store it in r28 for 1T segment
+ * r3 = va, r5 = vsid
*/
- rldic r28,r5,25,25 /* (vsid << 25) & 0x7fffffffff */
- clrldi r5,r5,40 /* vsid & 0xffffff */
- rldicl r0,r3,64-12,36 /* (ea >> 12) & 0xfffffff */
- xor r28,r28,r5
+ sldi r28,r5,25 /* vsid << 25 */
+ /* r0 = (va >> 12) & ((1ul << (40 - 12)) -1) */
+ rldicl r0,r3,64-12,36
+ xor r28,r28,r5 /* vsid ^ ( vsid << 25) */
xor r28,r28,r0 /* hash */
/* Convert linux PTE bits into HW equivalents */
@@ -752,25 +758,27 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
rldicl r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT - VPN_SHIFT)
or r29,r28,r29
- /* Calculate hash value for primary slot and store it in r28 */
- rldicl r5,r5,0,25 /* vsid & 0x0000007fffffffff */
- rldicl r0,r3,64-16,52 /* (ea >> 16) & 0xfff */
- xor r28,r5,r0
+ /* Calculate hash value for primary slot and store it in r28
+ * r3 = va, r5 = vsid
+ * r0 = (va >> 16) & ((1ul << (28 - 16)) -1)
+ */
+ rldicl r0,r3,64-16,52
+ xor r28,r5,r0 /* hash */
b 4f
3: /* Calc vpn and put it in r29 */
sldi r29,r5,SID_SHIFT_1T - VPN_SHIFT
rldicl r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT_1T - VPN_SHIFT)
or r29,r28,r29
-
/*
* calculate hash value for primary slot and
* store it in r28 for 1T segment
+ * r3 = va, r5 = vsid
*/
- rldic r28,r5,25,25 /* (vsid << 25) & 0x7fffffffff */
- clrldi r5,r5,40 /* vsid & 0xffffff */
- rldicl r0,r3,64-16,40 /* (ea >> 16) & 0xffffff */
- xor r28,r28,r5
+ sldi r28,r5,25 /* vsid << 25 */
+ /* r0 = (va >> 16) & ((1ul << (40 - 16)) -1) */
+ rldicl r0,r3,64-16,40
+ xor r28,r28,r5 /* vsid ^ ( vsid << 25) */
xor r28,r28,r0 /* hash */
/* Convert linux PTE bits into HW equivalents */
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index aa2465e21f1a..fa476d50791f 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -1305,6 +1305,16 @@ static int power_pmu_event_idx(struct perf_event *event)
return event->hw.idx;
}
+ssize_t power_events_sysfs_show(struct device *dev,
+ struct device_attribute *attr, char *page)
+{
+ struct perf_pmu_events_attr *pmu_attr;
+
+ pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
+
+ return sprintf(page, "event=0x%02llx\n", pmu_attr->id);
+}
+
struct pmu power_pmu = {
.pmu_enable = power_pmu_enable,
.pmu_disable = power_pmu_disable,
@@ -1537,6 +1547,8 @@ int __cpuinit register_power_pmu(struct power_pmu *pmu)
pr_info("%s performance monitor hardware support registered\n",
pmu->name);
+ power_pmu.attr_groups = ppmu->attr_groups;
+
#ifdef MSR_HV
/*
* Use FCHV to ignore kernel events if MSR.HV is set.
diff --git a/arch/powerpc/perf/power7-pmu.c b/arch/powerpc/perf/power7-pmu.c
index 2ee01e38d5e2..b554879bd31e 100644
--- a/arch/powerpc/perf/power7-pmu.c
+++ b/arch/powerpc/perf/power7-pmu.c
@@ -51,6 +51,18 @@
#define MMCR1_PMCSEL_MSK 0xff
/*
+ * Power7 event codes.
+ */
+#define PME_PM_CYC 0x1e
+#define PME_PM_GCT_NOSLOT_CYC 0x100f8
+#define PME_PM_CMPLU_STALL 0x4000a
+#define PME_PM_INST_CMPL 0x2
+#define PME_PM_LD_REF_L1 0xc880
+#define PME_PM_LD_MISS_L1 0x400f0
+#define PME_PM_BRU_FIN 0x10068
+#define PME_PM_BRU_MPRED 0x400f6
+
+/*
* Layout of constraint bits:
* 6666555555555544444444443333333333222222222211111111110000000000
* 3210987654321098765432109876543210987654321098765432109876543210
@@ -307,14 +319,14 @@ static void power7_disable_pmc(unsigned int pmc, unsigned long mmcr[])
}
static int power7_generic_events[] = {
- [PERF_COUNT_HW_CPU_CYCLES] = 0x1e,
- [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x100f8, /* GCT_NOSLOT_CYC */
- [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x4000a, /* CMPLU_STALL */
- [PERF_COUNT_HW_INSTRUCTIONS] = 2,
- [PERF_COUNT_HW_CACHE_REFERENCES] = 0xc880, /* LD_REF_L1_LSU*/
- [PERF_COUNT_HW_CACHE_MISSES] = 0x400f0, /* LD_MISS_L1 */
- [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x10068, /* BRU_FIN */
- [PERF_COUNT_HW_BRANCH_MISSES] = 0x400f6, /* BR_MPRED */
+ [PERF_COUNT_HW_CPU_CYCLES] = PME_PM_CYC,
+ [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = PME_PM_GCT_NOSLOT_CYC,
+ [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = PME_PM_CMPLU_STALL,
+ [PERF_COUNT_HW_INSTRUCTIONS] = PME_PM_INST_CMPL,
+ [PERF_COUNT_HW_CACHE_REFERENCES] = PME_PM_LD_REF_L1,
+ [PERF_COUNT_HW_CACHE_MISSES] = PME_PM_LD_MISS_L1,
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = PME_PM_BRU_FIN,
+ [PERF_COUNT_HW_BRANCH_MISSES] = PME_PM_BRU_MPRED,
};
#define C(x) PERF_COUNT_HW_CACHE_##x
@@ -362,6 +374,57 @@ static int power7_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
},
};
+
+GENERIC_EVENT_ATTR(cpu-cycles, CYC);
+GENERIC_EVENT_ATTR(stalled-cycles-frontend, GCT_NOSLOT_CYC);
+GENERIC_EVENT_ATTR(stalled-cycles-backend, CMPLU_STALL);
+GENERIC_EVENT_ATTR(instructions, INST_CMPL);
+GENERIC_EVENT_ATTR(cache-references, LD_REF_L1);
+GENERIC_EVENT_ATTR(cache-misses, LD_MISS_L1);
+GENERIC_EVENT_ATTR(branch-instructions, BRU_FIN);
+GENERIC_EVENT_ATTR(branch-misses, BRU_MPRED);
+
+POWER_EVENT_ATTR(CYC, CYC);
+POWER_EVENT_ATTR(GCT_NOSLOT_CYC, GCT_NOSLOT_CYC);
+POWER_EVENT_ATTR(CMPLU_STALL, CMPLU_STALL);
+POWER_EVENT_ATTR(INST_CMPL, INST_CMPL);
+POWER_EVENT_ATTR(LD_REF_L1, LD_REF_L1);
+POWER_EVENT_ATTR(LD_MISS_L1, LD_MISS_L1);
+POWER_EVENT_ATTR(BRU_FIN, BRU_FIN)
+POWER_EVENT_ATTR(BRU_MPRED, BRU_MPRED);
+
+static struct attribute *power7_events_attr[] = {
+ GENERIC_EVENT_PTR(CYC),
+ GENERIC_EVENT_PTR(GCT_NOSLOT_CYC),
+ GENERIC_EVENT_PTR(CMPLU_STALL),
+ GENERIC_EVENT_PTR(INST_CMPL),
+ GENERIC_EVENT_PTR(LD_REF_L1),
+ GENERIC_EVENT_PTR(LD_MISS_L1),
+ GENERIC_EVENT_PTR(BRU_FIN),
+ GENERIC_EVENT_PTR(BRU_MPRED),
+
+ POWER_EVENT_PTR(CYC),
+ POWER_EVENT_PTR(GCT_NOSLOT_CYC),
+ POWER_EVENT_PTR(CMPLU_STALL),
+ POWER_EVENT_PTR(INST_CMPL),
+ POWER_EVENT_PTR(LD_REF_L1),
+ POWER_EVENT_PTR(LD_MISS_L1),
+ POWER_EVENT_PTR(BRU_FIN),
+ POWER_EVENT_PTR(BRU_MPRED),
+ NULL
+};
+
+
+static struct attribute_group power7_pmu_events_group = {
+ .name = "events",
+ .attrs = power7_events_attr,
+};
+
+static const struct attribute_group *power7_pmu_attr_groups[] = {
+ &power7_pmu_events_group,
+ NULL,
+};
+
static struct power_pmu power7_pmu = {
.name = "POWER7",
.n_counter = 6,
@@ -373,6 +436,7 @@ static struct power_pmu power7_pmu = {
.get_alternatives = power7_get_alternatives,
.disable_pmc = power7_disable_pmc,
.flags = PPMU_ALT_SIPR,
+ .attr_groups = power7_pmu_attr_groups,
.n_generic = ARRAY_SIZE(power7_generic_events),
.generic_events = power7_generic_events,
.cache_events = &power7_cache_events,
diff --git a/arch/powerpc/platforms/85xx/Kconfig b/arch/powerpc/platforms/85xx/Kconfig
index 02d02a09942d..92ab60a62711 100644
--- a/arch/powerpc/platforms/85xx/Kconfig
+++ b/arch/powerpc/platforms/85xx/Kconfig
@@ -277,7 +277,6 @@ config P5040_DS
config PPC_QEMU_E500
bool "QEMU generic e500 platform"
- depends on EXPERIMENTAL
select DEFAULT_UIMAGE
help
This option enables support for running as a QEMU guest using
diff --git a/arch/powerpc/platforms/cell/Kconfig b/arch/powerpc/platforms/cell/Kconfig
index 2e7ff0c5cf42..53aaefeb3386 100644
--- a/arch/powerpc/platforms/cell/Kconfig
+++ b/arch/powerpc/platforms/cell/Kconfig
@@ -124,7 +124,7 @@ config CBE_CPUFREQ
config CBE_CPUFREQ_PMI_ENABLE
bool "CBE frequency scaling using PMI interface"
- depends on CBE_CPUFREQ && EXPERIMENTAL
+ depends on CBE_CPUFREQ
default n
help
Select this, if you want to use the PMI interface
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c
index 25db92a8e1cf..49318385d4fa 100644
--- a/arch/powerpc/platforms/cell/spufs/sched.c
+++ b/arch/powerpc/platforms/cell/spufs/sched.c
@@ -24,6 +24,7 @@
#include <linux/errno.h>
#include <linux/sched.h>
+#include <linux/sched/rt.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/slab.h>
diff --git a/arch/powerpc/platforms/ps3/Kconfig b/arch/powerpc/platforms/ps3/Kconfig
index 46b7f0232523..e87c19473973 100644
--- a/arch/powerpc/platforms/ps3/Kconfig
+++ b/arch/powerpc/platforms/ps3/Kconfig
@@ -48,7 +48,7 @@ config PS3_HTAB_SIZE
system will have optimal runtime performance.
config PS3_DYNAMIC_DMA
- depends on PPC_PS3 && EXPERIMENTAL
+ depends on PPC_PS3
bool "PS3 Platform dynamic DMA page table management"
default n
help
diff --git a/arch/powerpc/platforms/pseries/dtl.c b/arch/powerpc/platforms/pseries/dtl.c
index a7648543c59e..0cc0ac07a55d 100644
--- a/arch/powerpc/platforms/pseries/dtl.c
+++ b/arch/powerpc/platforms/pseries/dtl.c
@@ -57,7 +57,7 @@ static u8 dtl_event_mask = 0x7;
*/
static int dtl_buf_entries = N_DISPATCH_LOG;
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
struct dtl_ring {
u64 write_index;
struct dtl_entry *write_ptr;
@@ -142,7 +142,7 @@ static u64 dtl_current_index(struct dtl *dtl)
return per_cpu(dtl_rings, dtl->cpu).write_index;
}
-#else /* CONFIG_VIRT_CPU_ACCOUNTING */
+#else /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
static int dtl_start(struct dtl *dtl)
{
@@ -188,7 +188,7 @@ static u64 dtl_current_index(struct dtl *dtl)
{
return lppaca_of(dtl->cpu).dtl_idx;
}
-#endif /* CONFIG_VIRT_CPU_ACCOUNTING */
+#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
static int dtl_enable(struct dtl *dtl)
{
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index ca55882465d6..527e12c9573b 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -281,7 +281,7 @@ static struct notifier_block pci_dn_reconfig_nb = {
struct kmem_cache *dtl_cache;
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
/*
* Allocate space for the dispatch trace log for all possible cpus
* and register the buffers with the hypervisor. This is used for
@@ -332,12 +332,12 @@ static int alloc_dispatch_logs(void)
return 0;
}
-#else /* !CONFIG_VIRT_CPU_ACCOUNTING */
+#else /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
static inline int alloc_dispatch_logs(void)
{
return 0;
}
-#endif /* CONFIG_VIRT_CPU_ACCOUNTING */
+#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
static int alloc_dispatch_log_kmem_cache(void)
{
diff --git a/arch/powerpc/sysdev/bestcomm/bestcomm.c b/arch/powerpc/sysdev/bestcomm/bestcomm.c
index d9130630f7ef..81c331481336 100644
--- a/arch/powerpc/sysdev/bestcomm/bestcomm.c
+++ b/arch/powerpc/sysdev/bestcomm/bestcomm.c
@@ -414,7 +414,7 @@ static int mpc52xx_bcom_probe(struct platform_device *op)
goto error_sramclean;
}
- if (!request_mem_region(res_bcom.start, sizeof(struct mpc52xx_sdma),
+ if (!request_mem_region(res_bcom.start, resource_size(&res_bcom),
DRIVER_NAME)) {
printk(KERN_ERR DRIVER_NAME ": "
"Can't request registers region\n");
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index b5ea38c25647..27c91c38d1a1 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -78,7 +78,6 @@ config S390
select HAVE_KVM if 64BIT
select HAVE_ARCH_TRACEHOOK
select INIT_ALL_POSSIBLE
- select HAVE_IRQ_WORK
select HAVE_PERF_EVENTS
select ARCH_HAVE_NMI_SAFE_CMPXCHG
select HAVE_DEBUG_KMEMLEAK
@@ -718,8 +717,8 @@ source "arch/s390/kvm/Kconfig"
config S390_GUEST
def_bool y
- prompt "s390 support for virtio devices (EXPERIMENTAL)"
- depends on 64BIT && EXPERIMENTAL
+ prompt "s390 support for virtio devices"
+ depends on 64BIT
select VIRTUALIZATION
select VIRTIO
select VIRTIO_CONSOLE
diff --git a/arch/s390/include/uapi/asm/socket.h b/arch/s390/include/uapi/asm/socket.h
index 436d07c23be8..f99eea7fff0f 100644
--- a/arch/s390/include/uapi/asm/socket.h
+++ b/arch/s390/include/uapi/asm/socket.h
@@ -28,7 +28,7 @@
#define SO_PRIORITY 12
#define SO_LINGER 13
#define SO_BSDCOMPAT 14
-/* To add :#define SO_REUSEPORT 15 */
+#define SO_REUSEPORT 15
#define SO_PASSCRED 16
#define SO_PEERCRED 17
#define SO_RCVLOWAT 18
@@ -76,4 +76,6 @@
/* Instruct lower device to use last 4-bytes of skb data as FCS */
#define SO_NOFCS 43
+#define SO_LOCK_FILTER 44
+
#endif /* _ASM_SOCKET_H */
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index a5f4f5a1d24b..0aa98db8a80d 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -120,6 +120,9 @@ static int s390_next_ktime(ktime_t expires,
nsecs = ktime_to_ns(ktime_add(timespec_to_ktime(ts), expires));
do_div(nsecs, 125);
S390_lowcore.clock_comparator = sched_clock_base_cc + (nsecs << 9);
+ /* Program the maximum value if we have an overflow (== year 2042) */
+ if (unlikely(S390_lowcore.clock_comparator < sched_clock_base_cc))
+ S390_lowcore.clock_comparator = -1ULL;
set_clock_comparator(S390_lowcore.clock_comparator);
return 0;
}
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index e84b8b68444a..ce9cc5aa2033 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -127,7 +127,7 @@ void vtime_account_user(struct task_struct *tsk)
* Update process times based on virtual cpu times stored by entry.S
* to the lowcore fields user_timer, system_timer & steal_clock.
*/
-void vtime_account(struct task_struct *tsk)
+void vtime_account_irq_enter(struct task_struct *tsk)
{
struct thread_info *ti = task_thread_info(tsk);
u64 timer, system;
@@ -145,10 +145,10 @@ void vtime_account(struct task_struct *tsk)
virt_timer_forward(system);
}
-EXPORT_SYMBOL_GPL(vtime_account);
+EXPORT_SYMBOL_GPL(vtime_account_irq_enter);
void vtime_account_system(struct task_struct *tsk)
-__attribute__((alias("vtime_account")));
+__attribute__((alias("vtime_account_irq_enter")));
EXPORT_SYMBOL_GPL(vtime_account_system);
void __kprobes vtime_stop_cpu(void)
diff --git a/arch/s390/kvm/Kconfig b/arch/s390/kvm/Kconfig
index b58dd869cb32..60f9f8ae0fc8 100644
--- a/arch/s390/kvm/Kconfig
+++ b/arch/s390/kvm/Kconfig
@@ -18,7 +18,7 @@ if VIRTUALIZATION
config KVM
def_tristate y
prompt "Kernel-based Virtual Machine (KVM) support"
- depends on HAVE_KVM && EXPERIMENTAL
+ depends on HAVE_KVM
select PREEMPT_NOTIFIERS
select ANON_INODES
select HAVE_KVM_CPU_RELAX_INTERCEPT
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index babc2b826c5c..87f720037ff7 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -11,7 +11,6 @@ config SUPERH
select HAVE_ARCH_TRACEHOOK
select HAVE_DMA_API_DEBUG
select HAVE_DMA_ATTRS
- select HAVE_IRQ_WORK
select HAVE_PERF_EVENTS
select HAVE_DEBUG_BUGVERBOSE
select ARCH_HAVE_CUSTOM_GPIO_H
@@ -91,9 +90,6 @@ config GENERIC_CSUM
config GENERIC_HWEIGHT
def_bool y
-config IRQ_PER_CPU
- def_bool y
-
config GENERIC_GPIO
def_bool n
@@ -648,7 +644,7 @@ source kernel/Kconfig.hz
config KEXEC
bool "kexec system call (EXPERIMENTAL)"
- depends on SUPERH32 && EXPERIMENTAL && MMU
+ depends on SUPERH32 && MMU
help
kexec is a system call that implements the ability to shutdown your
current kernel, and to start another kernel. It is like a reboot
@@ -665,7 +661,7 @@ config KEXEC
config CRASH_DUMP
bool "kernel crash dumps (EXPERIMENTAL)"
- depends on SUPERH32 && EXPERIMENTAL && BROKEN_ON_SMP
+ depends on SUPERH32 && BROKEN_ON_SMP
help
Generate crash dump after being started by kexec.
This should be normally only set in special crash dump kernels
@@ -679,7 +675,7 @@ config CRASH_DUMP
config KEXEC_JUMP
bool "kexec jump (EXPERIMENTAL)"
- depends on SUPERH32 && KEXEC && HIBERNATION && EXPERIMENTAL
+ depends on SUPERH32 && KEXEC && HIBERNATION
help
Jump between original kernel and kexeced kernel and invoke
code via KEXEC
@@ -713,7 +709,7 @@ config SECCOMP
config CC_STACKPROTECTOR
bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
- depends on SUPERH32 && EXPERIMENTAL
+ depends on SUPERH32
help
This option turns on the -fstack-protector GCC feature. This
feature puts, at the beginning of functions, a canary value on
@@ -764,7 +760,7 @@ config NR_CPUS
config HOTPLUG_CPU
bool "Support for hot-pluggable CPUs (EXPERIMENTAL)"
- depends on SMP && HOTPLUG && EXPERIMENTAL
+ depends on SMP && HOTPLUG
help
Say Y here to experiment with turning CPUs off and on. CPUs
can be controlled through /sys/devices/system/cpu.
@@ -843,7 +839,7 @@ config ENTRY_OFFSET
config ROMIMAGE_MMCIF
bool "Include MMCIF loader in romImage (EXPERIMENTAL)"
- depends on CPU_SUBTYPE_SH7724 && EXPERIMENTAL
+ depends on CPU_SUBTYPE_SH7724
help
Say Y here to include experimental MMCIF loading code in
romImage. With this enabled it is possible to write the romImage
@@ -929,7 +925,6 @@ source "fs/Kconfig.binfmt"
endmenu
menu "Power management options (EXPERIMENTAL)"
-depends on EXPERIMENTAL
source "kernel/power/Kconfig"
diff --git a/arch/sh/Kconfig.cpu b/arch/sh/Kconfig.cpu
index 770ff2d5b94d..05b518e90cf7 100644
--- a/arch/sh/Kconfig.cpu
+++ b/arch/sh/Kconfig.cpu
@@ -33,7 +33,7 @@ config SH64_FPU_DENORM_FLUSH
config SH_FPU_EMU
def_bool n
prompt "FPU emulation support"
- depends on !SH_FPU && EXPERIMENTAL
+ depends on !SH_FPU
help
Selecting this option will enable support for software FPU emulation.
Most SH-3 users will want to say Y here, whereas most SH-4 users will
@@ -68,7 +68,6 @@ config SH_STORE_QUEUES
config SPECULATIVE_EXECUTION
bool "Speculative subroutine return"
- depends on EXPERIMENTAL
depends on CPU_SUBTYPE_SH7780 || CPU_SUBTYPE_SH7785 || CPU_SUBTYPE_SH7786
help
This enables support for a speculative instruction fetch for
diff --git a/arch/sh/boards/mach-ecovec24/setup.c b/arch/sh/boards/mach-ecovec24/setup.c
index a0fa5791cd44..aaff7671101b 100644
--- a/arch/sh/boards/mach-ecovec24/setup.c
+++ b/arch/sh/boards/mach-ecovec24/setup.c
@@ -887,12 +887,6 @@ static struct platform_device camera_devices[] = {
};
/* FSI */
-static struct sh_fsi_platform_info fsi_info = {
- .port_b = {
- .flags = SH_FSI_BRS_INV,
- },
-};
-
static struct resource fsi_resources[] = {
[0] = {
.name = "FSI",
@@ -911,25 +905,22 @@ static struct platform_device fsi_device = {
.id = 0,
.num_resources = ARRAY_SIZE(fsi_resources),
.resource = fsi_resources,
- .dev = {
- .platform_data = &fsi_info,
- },
-};
-
-static struct asoc_simple_dai_init_info fsi_da7210_init_info = {
- .fmt = SND_SOC_DAIFMT_I2S,
- .codec_daifmt = SND_SOC_DAIFMT_CBM_CFM,
- .cpu_daifmt = SND_SOC_DAIFMT_CBS_CFS,
};
static struct asoc_simple_card_info fsi_da7210_info = {
.name = "DA7210",
.card = "FSIB-DA7210",
- .cpu_dai = "fsib-dai",
.codec = "da7210.0-001a",
.platform = "sh_fsi.0",
- .codec_dai = "da7210-hifi",
- .init = &fsi_da7210_init_info,
+ .daifmt = SND_SOC_DAIFMT_I2S,
+ .cpu_dai = {
+ .name = "fsib-dai",
+ .fmt = SND_SOC_DAIFMT_CBS_CFS | SND_SOC_DAIFMT_IB_NF,
+ },
+ .codec_dai = {
+ .name = "da7210-hifi",
+ .fmt = SND_SOC_DAIFMT_CBM_CFM,
+ },
};
static struct platform_device fsi_da7210_device = {
diff --git a/arch/sh/boards/mach-se/7724/setup.c b/arch/sh/boards/mach-se/7724/setup.c
index 35f6efa3ac0e..4010e63e82d8 100644
--- a/arch/sh/boards/mach-se/7724/setup.c
+++ b/arch/sh/boards/mach-se/7724/setup.c
@@ -279,12 +279,6 @@ static struct platform_device ceu1_device = {
/* FSI */
/* change J20, J21, J22 pin to 1-2 connection to use slave mode */
-static struct sh_fsi_platform_info fsi_info = {
- .port_a = {
- .flags = SH_FSI_BRS_INV,
- },
-};
-
static struct resource fsi_resources[] = {
[0] = {
.name = "FSI",
@@ -303,26 +297,23 @@ static struct platform_device fsi_device = {
.id = 0,
.num_resources = ARRAY_SIZE(fsi_resources),
.resource = fsi_resources,
- .dev = {
- .platform_data = &fsi_info,
- },
-};
-
-static struct asoc_simple_dai_init_info fsi2_ak4642_init_info = {
- .fmt = SND_SOC_DAIFMT_LEFT_J,
- .codec_daifmt = SND_SOC_DAIFMT_CBM_CFM,
- .cpu_daifmt = SND_SOC_DAIFMT_CBS_CFS,
- .sysclk = 11289600,
};
static struct asoc_simple_card_info fsi_ak4642_info = {
.name = "AK4642",
.card = "FSIA-AK4642",
- .cpu_dai = "fsia-dai",
.codec = "ak4642-codec.0-0012",
.platform = "sh_fsi.0",
- .codec_dai = "ak4642-hifi",
- .init = &fsi2_ak4642_init_info,
+ .daifmt = SND_SOC_DAIFMT_LEFT_J,
+ .cpu_dai = {
+ .name = "fsia-dai",
+ .fmt = SND_SOC_DAIFMT_CBS_CFS | SND_SOC_DAIFMT_IB_NF,
+ },
+ .codec_dai = {
+ .name = "ak4642-hifi",
+ .fmt = SND_SOC_DAIFMT_CBM_CFM,
+ .sysclk = 11289600,
+ },
};
static struct platform_device fsi_ak4642_device = {
diff --git a/arch/sh/kernel/idle.c b/arch/sh/kernel/idle.c
index 0c910163caa3..3d5a1b387cc0 100644
--- a/arch/sh/kernel/idle.c
+++ b/arch/sh/kernel/idle.c
@@ -22,7 +22,7 @@
#include <asm/smp.h>
#include <asm/bl_bit.h>
-void (*pm_idle)(void);
+static void (*sh_idle)(void);
static int hlt_counter;
@@ -103,9 +103,9 @@ void cpu_idle(void)
/* Don't trace irqs off for idle */
stop_critical_timings();
if (cpuidle_idle_call())
- pm_idle();
+ sh_idle();
/*
- * Sanity check to ensure that pm_idle() returns
+ * Sanity check to ensure that sh_idle() returns
* with IRQs enabled
*/
WARN_ON(irqs_disabled());
@@ -123,13 +123,13 @@ void __init select_idle_routine(void)
/*
* If a platform has set its own idle routine, leave it alone.
*/
- if (pm_idle)
+ if (sh_idle)
return;
if (hlt_works())
- pm_idle = default_idle;
+ sh_idle = default_idle;
else
- pm_idle = poll_idle;
+ sh_idle = poll_idle;
}
void stop_this_cpu(void *unused)
diff --git a/arch/sh/mm/Kconfig b/arch/sh/mm/Kconfig
index 0f7c852f355c..5a43a871e097 100644
--- a/arch/sh/mm/Kconfig
+++ b/arch/sh/mm/Kconfig
@@ -83,7 +83,7 @@ config 32BIT
config PMB
bool "Support 32-bit physical addressing through PMB"
- depends on MMU && EXPERIMENTAL && CPU_SH4A && !CPU_SH4AL_DSP
+ depends on MMU && CPU_SH4A && !CPU_SH4AL_DSP
select 32BIT
select UNCACHED_MAPPING
help
@@ -110,7 +110,7 @@ config VSYSCALL
config NUMA
bool "Non Uniform Memory Access (NUMA) Support"
- depends on MMU && SYS_SUPPORTS_NUMA && EXPERIMENTAL
+ depends on MMU && SYS_SUPPORTS_NUMA
select ARCH_WANT_NUMA_VARIABLE_LOCALITY
default n
help
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 9f2edb5c5551..9bff3db17c8c 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -23,7 +23,6 @@ config SPARC
select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
select RTC_CLASS
select RTC_DRV_M48T59
- select HAVE_IRQ_WORK
select HAVE_DMA_ATTRS
select HAVE_DMA_API_DEBUG
select HAVE_ARCH_JUMP_LABEL
@@ -61,6 +60,7 @@ config SPARC64
select HAVE_MEMBLOCK
select HAVE_MEMBLOCK_NODE_MAP
select HAVE_SYSCALL_WRAPPERS
+ select HAVE_ARCH_TRANSPARENT_HUGEPAGE
select HAVE_DYNAMIC_FTRACE
select HAVE_FTRACE_MCOUNT_RECORD
select HAVE_SYSCALL_TRACEPOINTS
diff --git a/arch/sparc/include/asm/hugetlb.h b/arch/sparc/include/asm/hugetlb.h
index 9661e9bc7bb6..7eb57d245044 100644
--- a/arch/sparc/include/asm/hugetlb.h
+++ b/arch/sparc/include/asm/hugetlb.h
@@ -12,7 +12,6 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm)
{
- hugetlb_setup(mm);
}
static inline int is_hugepage_only_range(struct mm_struct *mm,
diff --git a/arch/sparc/include/asm/page_64.h b/arch/sparc/include/asm/page_64.h
index 4b39f74d6ca0..e15538899f3d 100644
--- a/arch/sparc/include/asm/page_64.h
+++ b/arch/sparc/include/asm/page_64.h
@@ -27,8 +27,8 @@
#ifndef __ASSEMBLY__
#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
-struct mm_struct;
-extern void hugetlb_setup(struct mm_struct *mm);
+struct pt_regs;
+extern void hugetlb_setup(struct pt_regs *regs);
#endif
#define WANT_PAGE_VIRTUAL
diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h
index 7870be0f5adc..08fcce90316b 100644
--- a/arch/sparc/include/asm/pgtable_64.h
+++ b/arch/sparc/include/asm/pgtable_64.h
@@ -71,7 +71,6 @@
#define PMD_PADDR _AC(0xfffffffe,UL)
#define PMD_PADDR_SHIFT _AC(11,UL)
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
#define PMD_ISHUGE _AC(0x00000001,UL)
/* This is the PMD layout when PMD_ISHUGE is set. With 4MB huge
@@ -86,7 +85,6 @@
#define PMD_HUGE_ACCESSED _AC(0x00000080,UL)
#define PMD_HUGE_EXEC _AC(0x00000040,UL)
#define PMD_HUGE_SPLITTING _AC(0x00000020,UL)
-#endif
/* PGDs point to PMD tables which are 8K aligned. */
#define PGD_PADDR _AC(0xfffffffc,UL)
@@ -628,6 +626,12 @@ static inline unsigned long pte_special(pte_t pte)
return pte_val(pte) & _PAGE_SPECIAL;
}
+static inline int pmd_large(pmd_t pmd)
+{
+ return (pmd_val(pmd) & (PMD_ISHUGE | PMD_HUGE_PRESENT)) ==
+ (PMD_ISHUGE | PMD_HUGE_PRESENT);
+}
+
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
static inline int pmd_young(pmd_t pmd)
{
@@ -646,12 +650,6 @@ static inline unsigned long pmd_pfn(pmd_t pmd)
return val >> (PAGE_SHIFT - PMD_PADDR_SHIFT);
}
-static inline int pmd_large(pmd_t pmd)
-{
- return (pmd_val(pmd) & (PMD_ISHUGE | PMD_HUGE_PRESENT)) ==
- (PMD_ISHUGE | PMD_HUGE_PRESENT);
-}
-
static inline int pmd_trans_splitting(pmd_t pmd)
{
return (pmd_val(pmd) & (PMD_ISHUGE|PMD_HUGE_SPLITTING)) ==
diff --git a/arch/sparc/include/asm/processor_32.h b/arch/sparc/include/asm/processor_32.h
index c1e01914fd98..2c7baa4c4505 100644
--- a/arch/sparc/include/asm/processor_32.h
+++ b/arch/sparc/include/asm/processor_32.h
@@ -118,6 +118,7 @@ extern unsigned long get_wchan(struct task_struct *);
extern struct task_struct *last_task_used_math;
#define cpu_relax() barrier()
+extern void (*sparc_idle)(void);
#endif
diff --git a/arch/sparc/include/asm/tsb.h b/arch/sparc/include/asm/tsb.h
index b4c258de4443..e696432b950d 100644
--- a/arch/sparc/include/asm/tsb.h
+++ b/arch/sparc/include/asm/tsb.h
@@ -157,17 +157,26 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end;
andn REG2, 0x7, REG2; \
add REG1, REG2, REG1;
- /* This macro exists only to make the PMD translator below easier
- * to read. It hides the ELF section switch for the sun4v code
- * patching.
+ /* These macros exists only to make the PMD translator below
+ * easier to read. It hides the ELF section switch for the
+ * sun4v code patching.
*/
-#define OR_PTE_BIT(REG, NAME) \
+#define OR_PTE_BIT_1INSN(REG, NAME) \
661: or REG, _PAGE_##NAME##_4U, REG; \
.section .sun4v_1insn_patch, "ax"; \
.word 661b; \
or REG, _PAGE_##NAME##_4V, REG; \
.previous;
+#define OR_PTE_BIT_2INSN(REG, TMP, NAME) \
+661: sethi %hi(_PAGE_##NAME##_4U), TMP; \
+ or REG, TMP, REG; \
+ .section .sun4v_2insn_patch, "ax"; \
+ .word 661b; \
+ mov -1, TMP; \
+ or REG, _PAGE_##NAME##_4V, REG; \
+ .previous;
+
/* Load into REG the PTE value for VALID, CACHE, and SZHUGE. */
#define BUILD_PTE_VALID_SZHUGE_CACHE(REG) \
661: sethi %uhi(_PAGE_VALID|_PAGE_SZHUGE_4U), REG; \
@@ -214,12 +223,13 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end;
andn REG1, PMD_HUGE_PROTBITS, REG2; \
sllx REG2, PMD_PADDR_SHIFT, REG2; \
/* REG2 now holds PFN << PAGE_SHIFT */ \
- andcc REG1, PMD_HUGE_EXEC, %g0; \
- bne,a,pt %xcc, 1f; \
- OR_PTE_BIT(REG2, EXEC); \
-1: andcc REG1, PMD_HUGE_WRITE, %g0; \
+ andcc REG1, PMD_HUGE_WRITE, %g0; \
bne,a,pt %xcc, 1f; \
- OR_PTE_BIT(REG2, W); \
+ OR_PTE_BIT_1INSN(REG2, W); \
+1: andcc REG1, PMD_HUGE_EXEC, %g0; \
+ be,pt %xcc, 1f; \
+ nop; \
+ OR_PTE_BIT_2INSN(REG2, REG1, EXEC); \
/* REG1 can now be clobbered, build final PTE */ \
1: BUILD_PTE_VALID_SZHUGE_CACHE(REG1); \
ba,pt %xcc, PTE_LABEL; \
diff --git a/arch/sparc/include/uapi/asm/socket.h b/arch/sparc/include/uapi/asm/socket.h
index c83a937ead00..cbbad74b2e06 100644
--- a/arch/sparc/include/uapi/asm/socket.h
+++ b/arch/sparc/include/uapi/asm/socket.h
@@ -15,7 +15,7 @@
#define SO_PEERCRED 0x0040
#define SO_LINGER 0x0080
#define SO_OOBINLINE 0x0100
-/* To add :#define SO_REUSEPORT 0x0200 */
+#define SO_REUSEPORT 0x0200
#define SO_BSDCOMPAT 0x0400
#define SO_RCVLOWAT 0x0800
#define SO_SNDLOWAT 0x1000
@@ -66,6 +66,7 @@
/* Instruct lower device to use last 4-bytes of skb data as FCS */
#define SO_NOFCS 0x0027
+#define SO_LOCK_FILTER 0x0028
/* Security levels - as per NRL IPv6 - don't actually do anything */
#define SO_SECURITY_AUTHENTICATION 0x5001
diff --git a/arch/sparc/kernel/apc.c b/arch/sparc/kernel/apc.c
index 348fa1aeabce..eefda32b595e 100644
--- a/arch/sparc/kernel/apc.c
+++ b/arch/sparc/kernel/apc.c
@@ -20,6 +20,7 @@
#include <asm/uaccess.h>
#include <asm/auxio.h>
#include <asm/apc.h>
+#include <asm/processor.h>
/* Debugging
*
@@ -158,7 +159,7 @@ static int apc_probe(struct platform_device *op)
/* Assign power management IDLE handler */
if (!apc_no_idle)
- pm_idle = apc_swift_idle;
+ sparc_idle = apc_swift_idle;
printk(KERN_INFO "%s: power management initialized%s\n",
APC_DEVNAME, apc_no_idle ? " (CPU idle disabled)" : "");
diff --git a/arch/sparc/kernel/kernel.h b/arch/sparc/kernel/kernel.h
index 291bb5de9ce0..a702d9ab019c 100644
--- a/arch/sparc/kernel/kernel.h
+++ b/arch/sparc/kernel/kernel.h
@@ -48,6 +48,10 @@ extern void sun4m_init_IRQ(void);
extern void sun4m_unmask_profile_irq(void);
extern void sun4m_clear_profile_irq(int cpu);
+/* sun4m_smp.c */
+void sun4m_cpu_pre_starting(void *arg);
+void sun4m_cpu_pre_online(void *arg);
+
/* sun4d_irq.c */
extern spinlock_t sun4d_imsk_lock;
@@ -60,6 +64,14 @@ extern int show_sun4d_interrupts(struct seq_file *, void *);
extern void sun4d_distribute_irqs(void);
extern void sun4d_free_irq(unsigned int irq, void *dev_id);
+/* sun4d_smp.c */
+void sun4d_cpu_pre_starting(void *arg);
+void sun4d_cpu_pre_online(void *arg);
+
+/* leon_smp.c */
+void leon_cpu_pre_starting(void *arg);
+void leon_cpu_pre_online(void *arg);
+
/* head_32.S */
extern unsigned int t_nmi[];
extern unsigned int linux_trap_ipi15_sun4d[];
diff --git a/arch/sparc/kernel/kgdb_32.c b/arch/sparc/kernel/kgdb_32.c
index 2e424a576a36..dcf210811af4 100644
--- a/arch/sparc/kernel/kgdb_32.c
+++ b/arch/sparc/kernel/kgdb_32.c
@@ -5,6 +5,7 @@
#include <linux/kgdb.h>
#include <linux/kdebug.h>
+#include <linux/sched.h>
#include <asm/kdebug.h>
#include <asm/ptrace.h>
diff --git a/arch/sparc/kernel/leon_pmc.c b/arch/sparc/kernel/leon_pmc.c
index 4e174321097d..708bca435219 100644
--- a/arch/sparc/kernel/leon_pmc.c
+++ b/arch/sparc/kernel/leon_pmc.c
@@ -9,6 +9,7 @@
#include <asm/leon_amba.h>
#include <asm/cpu_type.h>
#include <asm/leon.h>
+#include <asm/processor.h>
/* List of Systems that need fixup instructions around power-down instruction */
unsigned int pmc_leon_fixup_ids[] = {
@@ -69,9 +70,9 @@ static int __init leon_pmc_install(void)
if (sparc_cpu_model == sparc_leon) {
/* Assign power management IDLE handler */
if (pmc_leon_need_fixup())
- pm_idle = pmc_leon_idle_fixup;
+ sparc_idle = pmc_leon_idle_fixup;
else
- pm_idle = pmc_leon_idle;
+ sparc_idle = pmc_leon_idle;
printk(KERN_INFO "leon: power management initialized\n");
}
diff --git a/arch/sparc/kernel/leon_smp.c b/arch/sparc/kernel/leon_smp.c
index 0f3fb6d9c8ef..9b40c9c12a0c 100644
--- a/arch/sparc/kernel/leon_smp.c
+++ b/arch/sparc/kernel/leon_smp.c
@@ -69,31 +69,19 @@ static inline unsigned long do_swap(volatile unsigned long *ptr,
return val;
}
-void __cpuinit leon_callin(void)
+void __cpuinit leon_cpu_pre_starting(void *arg)
{
- int cpuid = hard_smp_processor_id();
-
- local_ops->cache_all();
- local_ops->tlb_all();
leon_configure_cache_smp();
+}
- notify_cpu_starting(cpuid);
-
- /* Get our local ticker going. */
- register_percpu_ce(cpuid);
-
- calibrate_delay();
- smp_store_cpu_info(cpuid);
-
- local_ops->cache_all();
- local_ops->tlb_all();
+void __cpuinit leon_cpu_pre_online(void *arg)
+{
+ int cpuid = hard_smp_processor_id();
- /*
- * Unblock the master CPU _only_ when the scheduler state
- * of all secondary CPUs will be up-to-date, so after
- * the SMP initialization the master will be just allowed
- * to call the scheduler code.
- * Allow master to continue.
+ /* Allow master to continue. The master will then give us the
+ * go-ahead by setting the smp_commenced_mask and will wait without
+ * timeouts until our setup is completed fully (signified by
+ * our bit being set in the cpu_online_mask).
*/
do_swap(&cpu_callin_map[cpuid], 1);
@@ -110,9 +98,6 @@ void __cpuinit leon_callin(void)
while (!cpumask_test_cpu(cpuid, &smp_commenced_mask))
mb();
-
- local_irq_enable();
- set_cpu_online(cpuid, true);
}
/*
diff --git a/arch/sparc/kernel/pmc.c b/arch/sparc/kernel/pmc.c
index dcbb62f63068..8b7297faca79 100644
--- a/arch/sparc/kernel/pmc.c
+++ b/arch/sparc/kernel/pmc.c
@@ -17,6 +17,7 @@
#include <asm/oplib.h>
#include <asm/uaccess.h>
#include <asm/auxio.h>
+#include <asm/processor.h>
/* Debug
*
@@ -63,7 +64,7 @@ static int pmc_probe(struct platform_device *op)
#ifndef PMC_NO_IDLE
/* Assign power management IDLE handler */
- pm_idle = pmc_swift_idle;
+ sparc_idle = pmc_swift_idle;
#endif
printk(KERN_INFO "%s: power management initialized\n", PMC_DEVNAME);
diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
index be8e862badaf..62eede13831a 100644
--- a/arch/sparc/kernel/process_32.c
+++ b/arch/sparc/kernel/process_32.c
@@ -43,8 +43,7 @@
* Power management idle function
* Set in pm platform drivers (apc.c and pmc.c)
*/
-void (*pm_idle)(void);
-EXPORT_SYMBOL(pm_idle);
+void (*sparc_idle)(void);
/*
* Power-off handler instantiation for pm.h compliance
@@ -75,8 +74,8 @@ void cpu_idle(void)
/* endless idle loop with no priority at all */
for (;;) {
while (!need_resched()) {
- if (pm_idle)
- (*pm_idle)();
+ if (sparc_idle)
+ (*sparc_idle)();
else
cpu_relax();
}
diff --git a/arch/sparc/kernel/prom_common.c b/arch/sparc/kernel/prom_common.c
index 1303021748c8..9f20566b0773 100644
--- a/arch/sparc/kernel/prom_common.c
+++ b/arch/sparc/kernel/prom_common.c
@@ -64,7 +64,7 @@ int of_set_property(struct device_node *dp, const char *name, void *val, int len
err = -ENODEV;
mutex_lock(&of_set_property_mutex);
- write_lock(&devtree_lock);
+ raw_spin_lock(&devtree_lock);
prevp = &dp->properties;
while (*prevp) {
struct property *prop = *prevp;
@@ -91,7 +91,7 @@ int of_set_property(struct device_node *dp, const char *name, void *val, int len
}
prevp = &(*prevp)->next;
}
- write_unlock(&devtree_lock);
+ raw_spin_unlock(&devtree_lock);
mutex_unlock(&of_set_property_mutex);
/* XXX Upate procfs if necessary... */
diff --git a/arch/sparc/kernel/sbus.c b/arch/sparc/kernel/sbus.c
index 1271b3a27d4e..be5bdf93c767 100644
--- a/arch/sparc/kernel/sbus.c
+++ b/arch/sparc/kernel/sbus.c
@@ -554,10 +554,8 @@ static void __init sbus_iommu_init(struct platform_device *op)
regs = pr->phys_addr;
iommu = kzalloc(sizeof(*iommu), GFP_ATOMIC);
- if (!iommu)
- goto fatal_memory_error;
strbuf = kzalloc(sizeof(*strbuf), GFP_ATOMIC);
- if (!strbuf)
+ if (!iommu || !strbuf)
goto fatal_memory_error;
op->dev.archdata.iommu = iommu;
@@ -656,6 +654,8 @@ static void __init sbus_iommu_init(struct platform_device *op)
return;
fatal_memory_error:
+ kfree(iommu);
+ kfree(strbuf);
prom_printf("sbus_iommu_init: Fatal memory allocation error.\n");
}
diff --git a/arch/sparc/kernel/smp_32.c b/arch/sparc/kernel/smp_32.c
index 79db45e5134a..9e7e6d718367 100644
--- a/arch/sparc/kernel/smp_32.c
+++ b/arch/sparc/kernel/smp_32.c
@@ -20,6 +20,7 @@
#include <linux/seq_file.h>
#include <linux/cache.h>
#include <linux/delay.h>
+#include <linux/cpu.h>
#include <asm/ptrace.h>
#include <linux/atomic.h>
@@ -32,8 +33,10 @@
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
#include <asm/cpudata.h>
+#include <asm/timer.h>
#include <asm/leon.h>
+#include "kernel.h"
#include "irq.h"
volatile unsigned long cpu_callin_map[NR_CPUS] __cpuinitdata = {0,};
@@ -294,6 +297,89 @@ int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle)
return ret;
}
+void __cpuinit arch_cpu_pre_starting(void *arg)
+{
+ local_ops->cache_all();
+ local_ops->tlb_all();
+
+ switch(sparc_cpu_model) {
+ case sun4m:
+ sun4m_cpu_pre_starting(arg);
+ break;
+ case sun4d:
+ sun4d_cpu_pre_starting(arg);
+ break;
+ case sparc_leon:
+ leon_cpu_pre_starting(arg);
+ break;
+ default:
+ BUG();
+ }
+}
+
+void __cpuinit arch_cpu_pre_online(void *arg)
+{
+ unsigned int cpuid = hard_smp_processor_id();
+
+ register_percpu_ce(cpuid);
+
+ calibrate_delay();
+ smp_store_cpu_info(cpuid);
+
+ local_ops->cache_all();
+ local_ops->tlb_all();
+
+ switch(sparc_cpu_model) {
+ case sun4m:
+ sun4m_cpu_pre_online(arg);
+ break;
+ case sun4d:
+ sun4d_cpu_pre_online(arg);
+ break;
+ case sparc_leon:
+ leon_cpu_pre_online(arg);
+ break;
+ default:
+ BUG();
+ }
+}
+
+void __cpuinit sparc_start_secondary(void *arg)
+{
+ unsigned int cpu;
+
+ /*
+ * SMP booting is extremely fragile in some architectures. So run
+ * the cpu initialization code first before anything else.
+ */
+ arch_cpu_pre_starting(arg);
+
+ preempt_disable();
+ cpu = smp_processor_id();
+
+ /* Invoke the CPU_STARTING notifier callbacks */
+ notify_cpu_starting(cpu);
+
+ arch_cpu_pre_online(arg);
+
+ /* Set the CPU in the cpu_online_mask */
+ set_cpu_online(cpu, true);
+
+ /* Enable local interrupts now */
+ local_irq_enable();
+
+ wmb();
+ cpu_idle();
+
+ /* We should never reach here! */
+ BUG();
+}
+
+void __cpuinit smp_callin(void)
+{
+ sparc_start_secondary(NULL);
+}
+
void smp_bogo(struct seq_file *m)
{
int i;
diff --git a/arch/sparc/kernel/sun4d_smp.c b/arch/sparc/kernel/sun4d_smp.c
index ddaea31de586..c9eb82f23d92 100644
--- a/arch/sparc/kernel/sun4d_smp.c
+++ b/arch/sparc/kernel/sun4d_smp.c
@@ -50,10 +50,9 @@ static inline void show_leds(int cpuid)
"i" (ASI_M_CTL));
}
-void __cpuinit smp4d_callin(void)
+void __cpuinit sun4d_cpu_pre_starting(void *arg)
{
int cpuid = hard_smp_processor_id();
- unsigned long flags;
/* Show we are alive */
cpu_leds[cpuid] = 0x6;
@@ -61,26 +60,20 @@ void __cpuinit smp4d_callin(void)
/* Enable level15 interrupt, disable level14 interrupt for now */
cc_set_imsk((cc_get_imsk() & ~0x8000) | 0x4000);
+}
- local_ops->cache_all();
- local_ops->tlb_all();
+void __cpuinit sun4d_cpu_pre_online(void *arg)
+{
+ unsigned long flags;
+ int cpuid;
- notify_cpu_starting(cpuid);
- /*
- * Unblock the master CPU _only_ when the scheduler state
+ cpuid = hard_smp_processor_id();
+
+ /* Unblock the master CPU _only_ when the scheduler state
* of all secondary CPUs will be up-to-date, so after
* the SMP initialization the master will be just allowed
* to call the scheduler code.
*/
- /* Get our local ticker going. */
- register_percpu_ce(cpuid);
-
- calibrate_delay();
- smp_store_cpu_info(cpuid);
- local_ops->cache_all();
- local_ops->tlb_all();
-
- /* Allow master to continue. */
sun4d_swap((unsigned long *)&cpu_callin_map[cpuid], 1);
local_ops->cache_all();
local_ops->tlb_all();
@@ -106,16 +99,12 @@ void __cpuinit smp4d_callin(void)
local_ops->cache_all();
local_ops->tlb_all();
- local_irq_enable(); /* We don't allow PIL 14 yet */
-
while (!cpumask_test_cpu(cpuid, &smp_commenced_mask))
barrier();
spin_lock_irqsave(&sun4d_imsk_lock, flags);
cc_set_imsk(cc_get_imsk() & ~0x4000); /* Allow PIL 14 as well */
spin_unlock_irqrestore(&sun4d_imsk_lock, flags);
- set_cpu_online(cpuid, true);
-
}
/*
diff --git a/arch/sparc/kernel/sun4m_smp.c b/arch/sparc/kernel/sun4m_smp.c
index 128af7304288..8a65f158153d 100644
--- a/arch/sparc/kernel/sun4m_smp.c
+++ b/arch/sparc/kernel/sun4m_smp.c
@@ -34,30 +34,19 @@ swap_ulong(volatile unsigned long *ptr, unsigned long val)
return val;
}
-void __cpuinit smp4m_callin(void)
+void __cpuinit sun4m_cpu_pre_starting(void *arg)
{
- int cpuid = hard_smp_processor_id();
-
- local_ops->cache_all();
- local_ops->tlb_all();
-
- notify_cpu_starting(cpuid);
-
- register_percpu_ce(cpuid);
-
- calibrate_delay();
- smp_store_cpu_info(cpuid);
+}
- local_ops->cache_all();
- local_ops->tlb_all();
+void __cpuinit sun4m_cpu_pre_online(void *arg)
+{
+ int cpuid = hard_smp_processor_id();
- /*
- * Unblock the master CPU _only_ when the scheduler state
- * of all secondary CPUs will be up-to-date, so after
- * the SMP initialization the master will be just allowed
- * to call the scheduler code.
+ /* Allow master to continue. The master will then give us the
+ * go-ahead by setting the smp_commenced_mask and will wait without
+ * timeouts until our setup is completed fully (signified by
+ * our bit being set in the cpu_online_mask).
*/
- /* Allow master to continue. */
swap_ulong(&cpu_callin_map[cpuid], 1);
/* XXX: What's up with all the flushes? */
@@ -75,10 +64,6 @@ void __cpuinit smp4m_callin(void)
while (!cpumask_test_cpu(cpuid, &smp_commenced_mask))
mb();
-
- local_irq_enable();
-
- set_cpu_online(cpuid, true);
}
/*
diff --git a/arch/sparc/kernel/trampoline_32.S b/arch/sparc/kernel/trampoline_32.S
index af27acab4486..6cdb08cdabf0 100644
--- a/arch/sparc/kernel/trampoline_32.S
+++ b/arch/sparc/kernel/trampoline_32.S
@@ -79,18 +79,15 @@ cpu3_startup:
nop
/* Start this processor. */
- call smp4m_callin
+ call smp_callin
nop
- b,a smp_do_cpu_idle
+ b,a smp_panic
.text
.align 4
-smp_do_cpu_idle:
- call cpu_idle
- mov 0, %o0
-
+smp_panic:
call cpu_panic
nop
@@ -144,10 +141,10 @@ sun4d_cpu_startup:
nop
/* Start this processor. */
- call smp4d_callin
+ call smp_callin
nop
- b,a smp_do_cpu_idle
+ b,a smp_panic
__CPUINIT
.align 4
@@ -201,7 +198,7 @@ leon_smp_cpu_startup:
nop
/* Start this processor. */
- call leon_callin
+ call smp_callin
nop
- b,a smp_do_cpu_idle
+ b,a smp_panic
diff --git a/arch/sparc/kernel/tsb.S b/arch/sparc/kernel/tsb.S
index d4bdc7a62375..a313e4a9399b 100644
--- a/arch/sparc/kernel/tsb.S
+++ b/arch/sparc/kernel/tsb.S
@@ -136,12 +136,43 @@ tsb_miss_page_table_walk_sun4v_fastpath:
nop
/* It is a huge page, use huge page TSB entry address we
- * calculated above.
+ * calculated above. If the huge page TSB has not been
+ * allocated, setup a trap stack and call hugetlb_setup()
+ * to do so, then return from the trap to replay the TLB
+ * miss.
+ *
+ * This is necessary to handle the case of transparent huge
+ * pages where we don't really have a non-atomic context
+ * in which to allocate the hugepage TSB hash table. When
+ * the 'mm' faults in the hugepage for the first time, we
+ * thus handle it here. This also makes sure that we can
+ * allocate the TSB hash table on the correct NUMA node.
*/
TRAP_LOAD_TRAP_BLOCK(%g7, %g2)
- ldx [%g7 + TRAP_PER_CPU_TSB_HUGE_TEMP], %g2
- cmp %g2, -1
- movne %xcc, %g2, %g1
+ ldx [%g7 + TRAP_PER_CPU_TSB_HUGE_TEMP], %g1
+ cmp %g1, -1
+ bne,pt %xcc, 60f
+ nop
+
+661: rdpr %pstate, %g5
+ wrpr %g5, PSTATE_AG | PSTATE_MG, %pstate
+ .section .sun4v_2insn_patch, "ax"
+ .word 661b
+ SET_GL(1)
+ nop
+ .previous
+
+ rdpr %tl, %g3
+ cmp %g3, 1
+ bne,pn %xcc, winfix_trampoline
+ nop
+ ba,pt %xcc, etrap
+ rd %pc, %g7
+ call hugetlb_setup
+ add %sp, PTREGS_OFF, %o0
+ ba,pt %xcc, rtrap
+ nop
+
60:
#endif
diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
index 097aee763af3..5062ff389e83 100644
--- a/arch/sparc/mm/fault_64.c
+++ b/arch/sparc/mm/fault_64.c
@@ -472,8 +472,13 @@ good_area:
#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
mm_rss = mm->context.huge_pte_count;
if (unlikely(mm_rss >
- mm->context.tsb_block[MM_TSB_HUGE].tsb_rss_limit))
- tsb_grow(mm, MM_TSB_HUGE, mm_rss);
+ mm->context.tsb_block[MM_TSB_HUGE].tsb_rss_limit)) {
+ if (mm->context.tsb_block[MM_TSB_HUGE].tsb)
+ tsb_grow(mm, MM_TSB_HUGE, mm_rss);
+ else
+ hugetlb_setup(regs);
+
+ }
#endif
return;
diff --git a/arch/sparc/mm/gup.c b/arch/sparc/mm/gup.c
index 42c55df3aec3..01ee23dd724d 100644
--- a/arch/sparc/mm/gup.c
+++ b/arch/sparc/mm/gup.c
@@ -66,6 +66,56 @@ static noinline int gup_pte_range(pmd_t pmd, unsigned long addr,
return 1;
}
+static int gup_huge_pmd(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
+ unsigned long end, int write, struct page **pages,
+ int *nr)
+{
+ struct page *head, *page, *tail;
+ u32 mask;
+ int refs;
+
+ mask = PMD_HUGE_PRESENT;
+ if (write)
+ mask |= PMD_HUGE_WRITE;
+ if ((pmd_val(pmd) & mask) != mask)
+ return 0;
+
+ refs = 0;
+ head = pmd_page(pmd);
+ page = head + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
+ tail = page;
+ do {
+ VM_BUG_ON(compound_head(page) != head);
+ pages[*nr] = page;
+ (*nr)++;
+ page++;
+ refs++;
+ } while (addr += PAGE_SIZE, addr != end);
+
+ if (!page_cache_add_speculative(head, refs)) {
+ *nr -= refs;
+ return 0;
+ }
+
+ if (unlikely(pmd_val(pmd) != pmd_val(*pmdp))) {
+ *nr -= refs;
+ while (refs--)
+ put_page(head);
+ return 0;
+ }
+
+ /* Any tail page need their mapcount reference taken before we
+ * return.
+ */
+ while (refs--) {
+ if (PageTail(tail))
+ get_huge_page_tail(tail);
+ tail++;
+ }
+
+ return 1;
+}
+
static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
int write, struct page **pages, int *nr)
{
@@ -77,9 +127,14 @@ static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
pmd_t pmd = *pmdp;
next = pmd_addr_end(addr, end);
- if (pmd_none(pmd))
+ if (pmd_none(pmd) || pmd_trans_splitting(pmd))
return 0;
- if (!gup_pte_range(pmd, addr, next, write, pages, nr))
+ if (unlikely(pmd_large(pmd))) {
+ if (!gup_huge_pmd(pmdp, pmd, addr, next,
+ write, pages, nr))
+ return 0;
+ } else if (!gup_pte_range(pmd, addr, next, write,
+ pages, nr))
return 0;
} while (pmdp++, addr = next, addr != end);
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index c3b72423c846..82bbf048a5b0 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -314,16 +314,31 @@ static void __update_mmu_tsb_insert(struct mm_struct *mm, unsigned long tsb_inde
struct tsb *tsb = mm->context.tsb_block[tsb_index].tsb;
unsigned long tag;
+ if (unlikely(!tsb))
+ return;
+
tsb += ((address >> tsb_hash_shift) &
(mm->context.tsb_block[tsb_index].tsb_nentries - 1UL));
tag = (address >> 22UL);
tsb_insert(tsb, tag, tte);
}
+#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
+static inline bool is_hugetlb_pte(pte_t pte)
+{
+ if ((tlb_type == hypervisor &&
+ (pte_val(pte) & _PAGE_SZALL_4V) == _PAGE_SZHUGE_4V) ||
+ (tlb_type != hypervisor &&
+ (pte_val(pte) & _PAGE_SZALL_4U) == _PAGE_SZHUGE_4U))
+ return true;
+ return false;
+}
+#endif
+
void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
{
- unsigned long tsb_index, tsb_hash_shift, flags;
struct mm_struct *mm;
+ unsigned long flags;
pte_t pte = *ptep;
if (tlb_type != hypervisor) {
@@ -335,25 +350,16 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *
mm = vma->vm_mm;
- tsb_index = MM_TSB_BASE;
- tsb_hash_shift = PAGE_SHIFT;
-
spin_lock_irqsave(&mm->context.lock, flags);
#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
- if (mm->context.tsb_block[MM_TSB_HUGE].tsb != NULL) {
- if ((tlb_type == hypervisor &&
- (pte_val(pte) & _PAGE_SZALL_4V) == _PAGE_SZHUGE_4V) ||
- (tlb_type != hypervisor &&
- (pte_val(pte) & _PAGE_SZALL_4U) == _PAGE_SZHUGE_4U)) {
- tsb_index = MM_TSB_HUGE;
- tsb_hash_shift = HPAGE_SHIFT;
- }
- }
+ if (mm->context.huge_pte_count && is_hugetlb_pte(pte))
+ __update_mmu_tsb_insert(mm, MM_TSB_HUGE, HPAGE_SHIFT,
+ address, pte_val(pte));
+ else
#endif
-
- __update_mmu_tsb_insert(mm, tsb_index, tsb_hash_shift,
- address, pte_val(pte));
+ __update_mmu_tsb_insert(mm, MM_TSB_BASE, PAGE_SHIFT,
+ address, pte_val(pte));
spin_unlock_irqrestore(&mm->context.lock, flags);
}
@@ -2712,14 +2718,28 @@ static void context_reload(void *__data)
load_secondary_context(mm);
}
-void hugetlb_setup(struct mm_struct *mm)
+void hugetlb_setup(struct pt_regs *regs)
{
- struct tsb_config *tp = &mm->context.tsb_block[MM_TSB_HUGE];
+ struct mm_struct *mm = current->mm;
+ struct tsb_config *tp;
- if (likely(tp->tsb != NULL))
- return;
+ if (in_atomic() || !mm) {
+ const struct exception_table_entry *entry;
+
+ entry = search_exception_tables(regs->tpc);
+ if (entry) {
+ regs->tpc = entry->fixup;
+ regs->tnpc = regs->tpc + 4;
+ return;
+ }
+ pr_alert("Unexpected HugeTLB setup in atomic context.\n");
+ die_if_kernel("HugeTSB in atomic", regs);
+ }
+
+ tp = &mm->context.tsb_block[MM_TSB_HUGE];
+ if (likely(tp->tsb == NULL))
+ tsb_grow(mm, MM_TSB_HUGE, 0);
- tsb_grow(mm, MM_TSB_HUGE, 0);
tsb_context_switch(mm);
smp_tsb_sync(mm);
diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c
index 3e8fec391fe0..ba6ae7ffdc2c 100644
--- a/arch/sparc/mm/tlb.c
+++ b/arch/sparc/mm/tlb.c
@@ -135,8 +135,15 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr,
mm->context.huge_pte_count++;
else
mm->context.huge_pte_count--;
- if (mm->context.huge_pte_count == 1)
- hugetlb_setup(mm);
+
+ /* Do not try to allocate the TSB hash table if we
+ * don't have one already. We have various locks held
+ * and thus we'll end up doing a GFP_KERNEL allocation
+ * in an atomic context.
+ *
+ * Instead, we let the first TLB miss on a hugepage
+ * take care of this.
+ */
}
if (!pmd_none(orig)) {
diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c
index 7f6474347491..428982b9becf 100644
--- a/arch/sparc/mm/tsb.c
+++ b/arch/sparc/mm/tsb.c
@@ -314,7 +314,7 @@ void tsb_grow(struct mm_struct *mm, unsigned long tsb_index, unsigned long rss)
retry_tsb_alloc:
gfp_flags = GFP_KERNEL;
if (new_size > (PAGE_SIZE * 2))
- gfp_flags = __GFP_NOWARN | __GFP_NORETRY;
+ gfp_flags |= __GFP_NOWARN | __GFP_NORETRY;
new_tsb = kmem_cache_alloc_node(tsb_caches[new_cache_index],
gfp_flags, numa_node_id());
diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig
index 875d008828b8..36136e81f5d8 100644
--- a/arch/tile/Kconfig
+++ b/arch/tile/Kconfig
@@ -121,6 +121,7 @@ config DEBUG_COPY_FROM_USER
def_bool n
config HVC_TILE
+ depends on TTY
select HVC_DRIVER
def_bool y
@@ -140,6 +141,8 @@ config ARCH_DEFCONFIG
source "init/Kconfig"
+source "kernel/Kconfig.freezer"
+
menu "Tilera-specific configuration"
config NR_CPUS
diff --git a/arch/tile/include/asm/io.h b/arch/tile/include/asm/io.h
index 2a9b293fece6..31672918064c 100644
--- a/arch/tile/include/asm/io.h
+++ b/arch/tile/include/asm/io.h
@@ -250,7 +250,9 @@ static inline void writeq(u64 val, unsigned long addr)
#define iowrite32 writel
#define iowrite64 writeq
-static inline void memset_io(void *dst, int val, size_t len)
+#if CHIP_HAS_MMIO() || defined(CONFIG_PCI)
+
+static inline void memset_io(volatile void *dst, int val, size_t len)
{
int x;
BUG_ON((unsigned long)dst & 0x3);
@@ -277,6 +279,8 @@ static inline void memcpy_toio(volatile void __iomem *dst, const void *src,
writel(*(u32 *)(src + x), dst + x);
}
+#endif
+
/*
* The Tile architecture does not support IOPORT, even with PCI.
* Unfortunately we can't yet simply not declare these methods,
diff --git a/arch/tile/include/asm/irqflags.h b/arch/tile/include/asm/irqflags.h
index b4e96fef2cf8..241c0bb60b12 100644
--- a/arch/tile/include/asm/irqflags.h
+++ b/arch/tile/include/asm/irqflags.h
@@ -18,32 +18,20 @@
#include <arch/interrupts.h>
#include <arch/chip.h>
-#if !defined(__tilegx__) && defined(__ASSEMBLY__)
-
/*
* The set of interrupts we want to allow when interrupts are nominally
* disabled. The remainder are effectively "NMI" interrupts from
* the point of view of the generic Linux code. Note that synchronous
* interrupts (aka "non-queued") are not blocked by the mask in any case.
*/
-#if CHIP_HAS_AUX_PERF_COUNTERS()
-#define LINUX_MASKABLE_INTERRUPTS_HI \
- (~(INT_MASK_HI(INT_PERF_COUNT) | INT_MASK_HI(INT_AUX_PERF_COUNT)))
-#else
-#define LINUX_MASKABLE_INTERRUPTS_HI \
- (~(INT_MASK_HI(INT_PERF_COUNT)))
-#endif
-
-#else
-
-#if CHIP_HAS_AUX_PERF_COUNTERS()
-#define LINUX_MASKABLE_INTERRUPTS \
- (~(INT_MASK(INT_PERF_COUNT) | INT_MASK(INT_AUX_PERF_COUNT)))
-#else
#define LINUX_MASKABLE_INTERRUPTS \
- (~(INT_MASK(INT_PERF_COUNT)))
-#endif
+ (~((_AC(1,ULL) << INT_PERF_COUNT) | (_AC(1,ULL) << INT_AUX_PERF_COUNT)))
+#if CHIP_HAS_SPLIT_INTR_MASK()
+/* The same macro, but for the two 32-bit SPRs separately. */
+#define LINUX_MASKABLE_INTERRUPTS_LO (-1)
+#define LINUX_MASKABLE_INTERRUPTS_HI \
+ (~((1 << (INT_PERF_COUNT - 32)) | (1 << (INT_AUX_PERF_COUNT - 32))))
#endif
#ifndef __ASSEMBLY__
@@ -126,7 +114,7 @@
* to know our current state.
*/
DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask);
-#define INITIAL_INTERRUPTS_ENABLED INT_MASK(INT_MEM_ERROR)
+#define INITIAL_INTERRUPTS_ENABLED (1ULL << INT_MEM_ERROR)
/* Disable interrupts. */
#define arch_local_irq_disable() \
@@ -165,7 +153,7 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask);
/* Prevent the given interrupt from being enabled next time we enable irqs. */
#define arch_local_irq_mask(interrupt) \
- (__get_cpu_var(interrupts_enabled_mask) &= ~INT_MASK(interrupt))
+ (__get_cpu_var(interrupts_enabled_mask) &= ~(1ULL << (interrupt)))
/* Prevent the given interrupt from being enabled immediately. */
#define arch_local_irq_mask_now(interrupt) do { \
@@ -175,7 +163,7 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask);
/* Allow the given interrupt to be enabled next time we enable irqs. */
#define arch_local_irq_unmask(interrupt) \
- (__get_cpu_var(interrupts_enabled_mask) |= INT_MASK(interrupt))
+ (__get_cpu_var(interrupts_enabled_mask) |= (1ULL << (interrupt)))
/* Allow the given interrupt to be enabled immediately, if !irqs_disabled. */
#define arch_local_irq_unmask_now(interrupt) do { \
@@ -250,7 +238,7 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask);
/* Disable interrupts. */
#define IRQ_DISABLE(tmp0, tmp1) \
{ \
- movei tmp0, -1; \
+ movei tmp0, LINUX_MASKABLE_INTERRUPTS_LO; \
moveli tmp1, lo16(LINUX_MASKABLE_INTERRUPTS_HI) \
}; \
{ \
diff --git a/arch/tile/include/uapi/arch/interrupts_32.h b/arch/tile/include/uapi/arch/interrupts_32.h
index 96b5710505b6..2efe3f68b2d6 100644
--- a/arch/tile/include/uapi/arch/interrupts_32.h
+++ b/arch/tile/include/uapi/arch/interrupts_32.h
@@ -15,6 +15,7 @@
#ifndef __ARCH_INTERRUPTS_H__
#define __ARCH_INTERRUPTS_H__
+#ifndef __KERNEL__
/** Mask for an interrupt. */
/* Note: must handle breaking interrupts into high and low words manually. */
#define INT_MASK_LO(intno) (1 << (intno))
@@ -23,6 +24,7 @@
#ifndef __ASSEMBLER__
#define INT_MASK(intno) (1ULL << (intno))
#endif
+#endif
/** Where a given interrupt executes */
@@ -92,216 +94,216 @@
#ifndef __ASSEMBLER__
#define QUEUED_INTERRUPTS ( \
- INT_MASK(INT_MEM_ERROR) | \
- INT_MASK(INT_DMATLB_MISS) | \
- INT_MASK(INT_DMATLB_ACCESS) | \
- INT_MASK(INT_SNITLB_MISS) | \
- INT_MASK(INT_SN_NOTIFY) | \
- INT_MASK(INT_SN_FIREWALL) | \
- INT_MASK(INT_IDN_FIREWALL) | \
- INT_MASK(INT_UDN_FIREWALL) | \
- INT_MASK(INT_TILE_TIMER) | \
- INT_MASK(INT_IDN_TIMER) | \
- INT_MASK(INT_UDN_TIMER) | \
- INT_MASK(INT_DMA_NOTIFY) | \
- INT_MASK(INT_IDN_CA) | \
- INT_MASK(INT_UDN_CA) | \
- INT_MASK(INT_IDN_AVAIL) | \
- INT_MASK(INT_UDN_AVAIL) | \
- INT_MASK(INT_PERF_COUNT) | \
- INT_MASK(INT_INTCTRL_3) | \
- INT_MASK(INT_INTCTRL_2) | \
- INT_MASK(INT_INTCTRL_1) | \
- INT_MASK(INT_INTCTRL_0) | \
- INT_MASK(INT_BOOT_ACCESS) | \
- INT_MASK(INT_WORLD_ACCESS) | \
- INT_MASK(INT_I_ASID) | \
- INT_MASK(INT_D_ASID) | \
- INT_MASK(INT_DMA_ASID) | \
- INT_MASK(INT_SNI_ASID) | \
- INT_MASK(INT_DMA_CPL) | \
- INT_MASK(INT_SN_CPL) | \
- INT_MASK(INT_DOUBLE_FAULT) | \
- INT_MASK(INT_AUX_PERF_COUNT) | \
+ (1ULL << INT_MEM_ERROR) | \
+ (1ULL << INT_DMATLB_MISS) | \
+ (1ULL << INT_DMATLB_ACCESS) | \
+ (1ULL << INT_SNITLB_MISS) | \
+ (1ULL << INT_SN_NOTIFY) | \
+ (1ULL << INT_SN_FIREWALL) | \
+ (1ULL << INT_IDN_FIREWALL) | \
+ (1ULL << INT_UDN_FIREWALL) | \
+ (1ULL << INT_TILE_TIMER) | \
+ (1ULL << INT_IDN_TIMER) | \
+ (1ULL << INT_UDN_TIMER) | \
+ (1ULL << INT_DMA_NOTIFY) | \
+ (1ULL << INT_IDN_CA) | \
+ (1ULL << INT_UDN_CA) | \
+ (1ULL << INT_IDN_AVAIL) | \
+ (1ULL << INT_UDN_AVAIL) | \
+ (1ULL << INT_PERF_COUNT) | \
+ (1ULL << INT_INTCTRL_3) | \
+ (1ULL << INT_INTCTRL_2) | \
+ (1ULL << INT_INTCTRL_1) | \
+ (1ULL << INT_INTCTRL_0) | \
+ (1ULL << INT_BOOT_ACCESS) | \
+ (1ULL << INT_WORLD_ACCESS) | \
+ (1ULL << INT_I_ASID) | \
+ (1ULL << INT_D_ASID) | \
+ (1ULL << INT_DMA_ASID) | \
+ (1ULL << INT_SNI_ASID) | \
+ (1ULL << INT_DMA_CPL) | \
+ (1ULL << INT_SN_CPL) | \
+ (1ULL << INT_DOUBLE_FAULT) | \
+ (1ULL << INT_AUX_PERF_COUNT) | \
0)
#define NONQUEUED_INTERRUPTS ( \
- INT_MASK(INT_ITLB_MISS) | \
- INT_MASK(INT_ILL) | \
- INT_MASK(INT_GPV) | \
- INT_MASK(INT_SN_ACCESS) | \
- INT_MASK(INT_IDN_ACCESS) | \
- INT_MASK(INT_UDN_ACCESS) | \
- INT_MASK(INT_IDN_REFILL) | \
- INT_MASK(INT_UDN_REFILL) | \
- INT_MASK(INT_IDN_COMPLETE) | \
- INT_MASK(INT_UDN_COMPLETE) | \
- INT_MASK(INT_SWINT_3) | \
- INT_MASK(INT_SWINT_2) | \
- INT_MASK(INT_SWINT_1) | \
- INT_MASK(INT_SWINT_0) | \
- INT_MASK(INT_UNALIGN_DATA) | \
- INT_MASK(INT_DTLB_MISS) | \
- INT_MASK(INT_DTLB_ACCESS) | \
- INT_MASK(INT_SN_STATIC_ACCESS) | \
+ (1ULL << INT_ITLB_MISS) | \
+ (1ULL << INT_ILL) | \
+ (1ULL << INT_GPV) | \
+ (1ULL << INT_SN_ACCESS) | \
+ (1ULL << INT_IDN_ACCESS) | \
+ (1ULL << INT_UDN_ACCESS) | \
+ (1ULL << INT_IDN_REFILL) | \
+ (1ULL << INT_UDN_REFILL) | \
+ (1ULL << INT_IDN_COMPLETE) | \
+ (1ULL << INT_UDN_COMPLETE) | \
+ (1ULL << INT_SWINT_3) | \
+ (1ULL << INT_SWINT_2) | \
+ (1ULL << INT_SWINT_1) | \
+ (1ULL << INT_SWINT_0) | \
+ (1ULL << INT_UNALIGN_DATA) | \
+ (1ULL << INT_DTLB_MISS) | \
+ (1ULL << INT_DTLB_ACCESS) | \
+ (1ULL << INT_SN_STATIC_ACCESS) | \
0)
#define CRITICAL_MASKED_INTERRUPTS ( \
- INT_MASK(INT_MEM_ERROR) | \
- INT_MASK(INT_DMATLB_MISS) | \
- INT_MASK(INT_DMATLB_ACCESS) | \
- INT_MASK(INT_SNITLB_MISS) | \
- INT_MASK(INT_SN_NOTIFY) | \
- INT_MASK(INT_SN_FIREWALL) | \
- INT_MASK(INT_IDN_FIREWALL) | \
- INT_MASK(INT_UDN_FIREWALL) | \
- INT_MASK(INT_TILE_TIMER) | \
- INT_MASK(INT_IDN_TIMER) | \
- INT_MASK(INT_UDN_TIMER) | \
- INT_MASK(INT_DMA_NOTIFY) | \
- INT_MASK(INT_IDN_CA) | \
- INT_MASK(INT_UDN_CA) | \
- INT_MASK(INT_IDN_AVAIL) | \
- INT_MASK(INT_UDN_AVAIL) | \
- INT_MASK(INT_PERF_COUNT) | \
- INT_MASK(INT_INTCTRL_3) | \
- INT_MASK(INT_INTCTRL_2) | \
- INT_MASK(INT_INTCTRL_1) | \
- INT_MASK(INT_INTCTRL_0) | \
- INT_MASK(INT_AUX_PERF_COUNT) | \
+ (1ULL << INT_MEM_ERROR) | \
+ (1ULL << INT_DMATLB_MISS) | \
+ (1ULL << INT_DMATLB_ACCESS) | \
+ (1ULL << INT_SNITLB_MISS) | \
+ (1ULL << INT_SN_NOTIFY) | \
+ (1ULL << INT_SN_FIREWALL) | \
+ (1ULL << INT_IDN_FIREWALL) | \
+ (1ULL << INT_UDN_FIREWALL) | \
+ (1ULL << INT_TILE_TIMER) | \
+ (1ULL << INT_IDN_TIMER) | \
+ (1ULL << INT_UDN_TIMER) | \
+ (1ULL << INT_DMA_NOTIFY) | \
+ (1ULL << INT_IDN_CA) | \
+ (1ULL << INT_UDN_CA) | \
+ (1ULL << INT_IDN_AVAIL) | \
+ (1ULL << INT_UDN_AVAIL) | \
+ (1ULL << INT_PERF_COUNT) | \
+ (1ULL << INT_INTCTRL_3) | \
+ (1ULL << INT_INTCTRL_2) | \
+ (1ULL << INT_INTCTRL_1) | \
+ (1ULL << INT_INTCTRL_0) | \
+ (1ULL << INT_AUX_PERF_COUNT) | \
0)
#define CRITICAL_UNMASKED_INTERRUPTS ( \
- INT_MASK(INT_ITLB_MISS) | \
- INT_MASK(INT_ILL) | \
- INT_MASK(INT_GPV) | \
- INT_MASK(INT_SN_ACCESS) | \
- INT_MASK(INT_IDN_ACCESS) | \
- INT_MASK(INT_UDN_ACCESS) | \
- INT_MASK(INT_IDN_REFILL) | \
- INT_MASK(INT_UDN_REFILL) | \
- INT_MASK(INT_IDN_COMPLETE) | \
- INT_MASK(INT_UDN_COMPLETE) | \
- INT_MASK(INT_SWINT_3) | \
- INT_MASK(INT_SWINT_2) | \
- INT_MASK(INT_SWINT_1) | \
- INT_MASK(INT_SWINT_0) | \
- INT_MASK(INT_UNALIGN_DATA) | \
- INT_MASK(INT_DTLB_MISS) | \
- INT_MASK(INT_DTLB_ACCESS) | \
- INT_MASK(INT_BOOT_ACCESS) | \
- INT_MASK(INT_WORLD_ACCESS) | \
- INT_MASK(INT_I_ASID) | \
- INT_MASK(INT_D_ASID) | \
- INT_MASK(INT_DMA_ASID) | \
- INT_MASK(INT_SNI_ASID) | \
- INT_MASK(INT_DMA_CPL) | \
- INT_MASK(INT_SN_CPL) | \
- INT_MASK(INT_DOUBLE_FAULT) | \
- INT_MASK(INT_SN_STATIC_ACCESS) | \
+ (1ULL << INT_ITLB_MISS) | \
+ (1ULL << INT_ILL) | \
+ (1ULL << INT_GPV) | \
+ (1ULL << INT_SN_ACCESS) | \
+ (1ULL << INT_IDN_ACCESS) | \
+ (1ULL << INT_UDN_ACCESS) | \
+ (1ULL << INT_IDN_REFILL) | \
+ (1ULL << INT_UDN_REFILL) | \
+ (1ULL << INT_IDN_COMPLETE) | \
+ (1ULL << INT_UDN_COMPLETE) | \
+ (1ULL << INT_SWINT_3) | \
+ (1ULL << INT_SWINT_2) | \
+ (1ULL << INT_SWINT_1) | \
+ (1ULL << INT_SWINT_0) | \
+ (1ULL << INT_UNALIGN_DATA) | \
+ (1ULL << INT_DTLB_MISS) | \
+ (1ULL << INT_DTLB_ACCESS) | \
+ (1ULL << INT_BOOT_ACCESS) | \
+ (1ULL << INT_WORLD_ACCESS) | \
+ (1ULL << INT_I_ASID) | \
+ (1ULL << INT_D_ASID) | \
+ (1ULL << INT_DMA_ASID) | \
+ (1ULL << INT_SNI_ASID) | \
+ (1ULL << INT_DMA_CPL) | \
+ (1ULL << INT_SN_CPL) | \
+ (1ULL << INT_DOUBLE_FAULT) | \
+ (1ULL << INT_SN_STATIC_ACCESS) | \
0)
#define MASKABLE_INTERRUPTS ( \
- INT_MASK(INT_MEM_ERROR) | \
- INT_MASK(INT_IDN_REFILL) | \
- INT_MASK(INT_UDN_REFILL) | \
- INT_MASK(INT_IDN_COMPLETE) | \
- INT_MASK(INT_UDN_COMPLETE) | \
- INT_MASK(INT_DMATLB_MISS) | \
- INT_MASK(INT_DMATLB_ACCESS) | \
- INT_MASK(INT_SNITLB_MISS) | \
- INT_MASK(INT_SN_NOTIFY) | \
- INT_MASK(INT_SN_FIREWALL) | \
- INT_MASK(INT_IDN_FIREWALL) | \
- INT_MASK(INT_UDN_FIREWALL) | \
- INT_MASK(INT_TILE_TIMER) | \
- INT_MASK(INT_IDN_TIMER) | \
- INT_MASK(INT_UDN_TIMER) | \
- INT_MASK(INT_DMA_NOTIFY) | \
- INT_MASK(INT_IDN_CA) | \
- INT_MASK(INT_UDN_CA) | \
- INT_MASK(INT_IDN_AVAIL) | \
- INT_MASK(INT_UDN_AVAIL) | \
- INT_MASK(INT_PERF_COUNT) | \
- INT_MASK(INT_INTCTRL_3) | \
- INT_MASK(INT_INTCTRL_2) | \
- INT_MASK(INT_INTCTRL_1) | \
- INT_MASK(INT_INTCTRL_0) | \
- INT_MASK(INT_AUX_PERF_COUNT) | \
+ (1ULL << INT_MEM_ERROR) | \
+ (1ULL << INT_IDN_REFILL) | \
+ (1ULL << INT_UDN_REFILL) | \
+ (1ULL << INT_IDN_COMPLETE) | \
+ (1ULL << INT_UDN_COMPLETE) | \
+ (1ULL << INT_DMATLB_MISS) | \
+ (1ULL << INT_DMATLB_ACCESS) | \
+ (1ULL << INT_SNITLB_MISS) | \
+ (1ULL << INT_SN_NOTIFY) | \
+ (1ULL << INT_SN_FIREWALL) | \
+ (1ULL << INT_IDN_FIREWALL) | \
+ (1ULL << INT_UDN_FIREWALL) | \
+ (1ULL << INT_TILE_TIMER) | \
+ (1ULL << INT_IDN_TIMER) | \
+ (1ULL << INT_UDN_TIMER) | \
+ (1ULL << INT_DMA_NOTIFY) | \
+ (1ULL << INT_IDN_CA) | \
+ (1ULL << INT_UDN_CA) | \
+ (1ULL << INT_IDN_AVAIL) | \
+ (1ULL << INT_UDN_AVAIL) | \
+ (1ULL << INT_PERF_COUNT) | \
+ (1ULL << INT_INTCTRL_3) | \
+ (1ULL << INT_INTCTRL_2) | \
+ (1ULL << INT_INTCTRL_1) | \
+ (1ULL << INT_INTCTRL_0) | \
+ (1ULL << INT_AUX_PERF_COUNT) | \
0)
#define UNMASKABLE_INTERRUPTS ( \
- INT_MASK(INT_ITLB_MISS) | \
- INT_MASK(INT_ILL) | \
- INT_MASK(INT_GPV) | \
- INT_MASK(INT_SN_ACCESS) | \
- INT_MASK(INT_IDN_ACCESS) | \
- INT_MASK(INT_UDN_ACCESS) | \
- INT_MASK(INT_SWINT_3) | \
- INT_MASK(INT_SWINT_2) | \
- INT_MASK(INT_SWINT_1) | \
- INT_MASK(INT_SWINT_0) | \
- INT_MASK(INT_UNALIGN_DATA) | \
- INT_MASK(INT_DTLB_MISS) | \
- INT_MASK(INT_DTLB_ACCESS) | \
- INT_MASK(INT_BOOT_ACCESS) | \
- INT_MASK(INT_WORLD_ACCESS) | \
- INT_MASK(INT_I_ASID) | \
- INT_MASK(INT_D_ASID) | \
- INT_MASK(INT_DMA_ASID) | \
- INT_MASK(INT_SNI_ASID) | \
- INT_MASK(INT_DMA_CPL) | \
- INT_MASK(INT_SN_CPL) | \
- INT_MASK(INT_DOUBLE_FAULT) | \
- INT_MASK(INT_SN_STATIC_ACCESS) | \
+ (1ULL << INT_ITLB_MISS) | \
+ (1ULL << INT_ILL) | \
+ (1ULL << INT_GPV) | \
+ (1ULL << INT_SN_ACCESS) | \
+ (1ULL << INT_IDN_ACCESS) | \
+ (1ULL << INT_UDN_ACCESS) | \
+ (1ULL << INT_SWINT_3) | \
+ (1ULL << INT_SWINT_2) | \
+ (1ULL << INT_SWINT_1) | \
+ (1ULL << INT_SWINT_0) | \
+ (1ULL << INT_UNALIGN_DATA) | \
+ (1ULL << INT_DTLB_MISS) | \
+ (1ULL << INT_DTLB_ACCESS) | \
+ (1ULL << INT_BOOT_ACCESS) | \
+ (1ULL << INT_WORLD_ACCESS) | \
+ (1ULL << INT_I_ASID) | \
+ (1ULL << INT_D_ASID) | \
+ (1ULL << INT_DMA_ASID) | \
+ (1ULL << INT_SNI_ASID) | \
+ (1ULL << INT_DMA_CPL) | \
+ (1ULL << INT_SN_CPL) | \
+ (1ULL << INT_DOUBLE_FAULT) | \
+ (1ULL << INT_SN_STATIC_ACCESS) | \
0)
#define SYNC_INTERRUPTS ( \
- INT_MASK(INT_ITLB_MISS) | \
- INT_MASK(INT_ILL) | \
- INT_MASK(INT_GPV) | \
- INT_MASK(INT_SN_ACCESS) | \
- INT_MASK(INT_IDN_ACCESS) | \
- INT_MASK(INT_UDN_ACCESS) | \
- INT_MASK(INT_IDN_REFILL) | \
- INT_MASK(INT_UDN_REFILL) | \
- INT_MASK(INT_IDN_COMPLETE) | \
- INT_MASK(INT_UDN_COMPLETE) | \
- INT_MASK(INT_SWINT_3) | \
- INT_MASK(INT_SWINT_2) | \
- INT_MASK(INT_SWINT_1) | \
- INT_MASK(INT_SWINT_0) | \
- INT_MASK(INT_UNALIGN_DATA) | \
- INT_MASK(INT_DTLB_MISS) | \
- INT_MASK(INT_DTLB_ACCESS) | \
- INT_MASK(INT_SN_STATIC_ACCESS) | \
+ (1ULL << INT_ITLB_MISS) | \
+ (1ULL << INT_ILL) | \
+ (1ULL << INT_GPV) | \
+ (1ULL << INT_SN_ACCESS) | \
+ (1ULL << INT_IDN_ACCESS) | \
+ (1ULL << INT_UDN_ACCESS) | \
+ (1ULL << INT_IDN_REFILL) | \
+ (1ULL << INT_UDN_REFILL) | \
+ (1ULL << INT_IDN_COMPLETE) | \
+ (1ULL << INT_UDN_COMPLETE) | \
+ (1ULL << INT_SWINT_3) | \
+ (1ULL << INT_SWINT_2) | \
+ (1ULL << INT_SWINT_1) | \
+ (1ULL << INT_SWINT_0) | \
+ (1ULL << INT_UNALIGN_DATA) | \
+ (1ULL << INT_DTLB_MISS) | \
+ (1ULL << INT_DTLB_ACCESS) | \
+ (1ULL << INT_SN_STATIC_ACCESS) | \
0)
#define NON_SYNC_INTERRUPTS ( \
- INT_MASK(INT_MEM_ERROR) | \
- INT_MASK(INT_DMATLB_MISS) | \
- INT_MASK(INT_DMATLB_ACCESS) | \
- INT_MASK(INT_SNITLB_MISS) | \
- INT_MASK(INT_SN_NOTIFY) | \
- INT_MASK(INT_SN_FIREWALL) | \
- INT_MASK(INT_IDN_FIREWALL) | \
- INT_MASK(INT_UDN_FIREWALL) | \
- INT_MASK(INT_TILE_TIMER) | \
- INT_MASK(INT_IDN_TIMER) | \
- INT_MASK(INT_UDN_TIMER) | \
- INT_MASK(INT_DMA_NOTIFY) | \
- INT_MASK(INT_IDN_CA) | \
- INT_MASK(INT_UDN_CA) | \
- INT_MASK(INT_IDN_AVAIL) | \
- INT_MASK(INT_UDN_AVAIL) | \
- INT_MASK(INT_PERF_COUNT) | \
- INT_MASK(INT_INTCTRL_3) | \
- INT_MASK(INT_INTCTRL_2) | \
- INT_MASK(INT_INTCTRL_1) | \
- INT_MASK(INT_INTCTRL_0) | \
- INT_MASK(INT_BOOT_ACCESS) | \
- INT_MASK(INT_WORLD_ACCESS) | \
- INT_MASK(INT_I_ASID) | \
- INT_MASK(INT_D_ASID) | \
- INT_MASK(INT_DMA_ASID) | \
- INT_MASK(INT_SNI_ASID) | \
- INT_MASK(INT_DMA_CPL) | \
- INT_MASK(INT_SN_CPL) | \
- INT_MASK(INT_DOUBLE_FAULT) | \
- INT_MASK(INT_AUX_PERF_COUNT) | \
+ (1ULL << INT_MEM_ERROR) | \
+ (1ULL << INT_DMATLB_MISS) | \
+ (1ULL << INT_DMATLB_ACCESS) | \
+ (1ULL << INT_SNITLB_MISS) | \
+ (1ULL << INT_SN_NOTIFY) | \
+ (1ULL << INT_SN_FIREWALL) | \
+ (1ULL << INT_IDN_FIREWALL) | \
+ (1ULL << INT_UDN_FIREWALL) | \
+ (1ULL << INT_TILE_TIMER) | \
+ (1ULL << INT_IDN_TIMER) | \
+ (1ULL << INT_UDN_TIMER) | \
+ (1ULL << INT_DMA_NOTIFY) | \
+ (1ULL << INT_IDN_CA) | \
+ (1ULL << INT_UDN_CA) | \
+ (1ULL << INT_IDN_AVAIL) | \
+ (1ULL << INT_UDN_AVAIL) | \
+ (1ULL << INT_PERF_COUNT) | \
+ (1ULL << INT_INTCTRL_3) | \
+ (1ULL << INT_INTCTRL_2) | \
+ (1ULL << INT_INTCTRL_1) | \
+ (1ULL << INT_INTCTRL_0) | \
+ (1ULL << INT_BOOT_ACCESS) | \
+ (1ULL << INT_WORLD_ACCESS) | \
+ (1ULL << INT_I_ASID) | \
+ (1ULL << INT_D_ASID) | \
+ (1ULL << INT_DMA_ASID) | \
+ (1ULL << INT_SNI_ASID) | \
+ (1ULL << INT_DMA_CPL) | \
+ (1ULL << INT_SN_CPL) | \
+ (1ULL << INT_DOUBLE_FAULT) | \
+ (1ULL << INT_AUX_PERF_COUNT) | \
0)
#endif /* !__ASSEMBLER__ */
#endif /* !__ARCH_INTERRUPTS_H__ */
diff --git a/arch/tile/include/uapi/arch/interrupts_64.h b/arch/tile/include/uapi/arch/interrupts_64.h
index 5bb58b2e4e6f..13c9f9182348 100644
--- a/arch/tile/include/uapi/arch/interrupts_64.h
+++ b/arch/tile/include/uapi/arch/interrupts_64.h
@@ -15,6 +15,7 @@
#ifndef __ARCH_INTERRUPTS_H__
#define __ARCH_INTERRUPTS_H__
+#ifndef __KERNEL__
/** Mask for an interrupt. */
#ifdef __ASSEMBLER__
/* Note: must handle breaking interrupts into high and low words manually. */
@@ -22,6 +23,7 @@
#else
#define INT_MASK(intno) (1ULL << (intno))
#endif
+#endif
/** Where a given interrupt executes */
@@ -85,192 +87,192 @@
#ifndef __ASSEMBLER__
#define QUEUED_INTERRUPTS ( \
- INT_MASK(INT_MEM_ERROR) | \
- INT_MASK(INT_IDN_COMPLETE) | \
- INT_MASK(INT_UDN_COMPLETE) | \
- INT_MASK(INT_IDN_FIREWALL) | \
- INT_MASK(INT_UDN_FIREWALL) | \
- INT_MASK(INT_TILE_TIMER) | \
- INT_MASK(INT_AUX_TILE_TIMER) | \
- INT_MASK(INT_IDN_TIMER) | \
- INT_MASK(INT_UDN_TIMER) | \
- INT_MASK(INT_IDN_AVAIL) | \
- INT_MASK(INT_UDN_AVAIL) | \
- INT_MASK(INT_IPI_3) | \
- INT_MASK(INT_IPI_2) | \
- INT_MASK(INT_IPI_1) | \
- INT_MASK(INT_IPI_0) | \
- INT_MASK(INT_PERF_COUNT) | \
- INT_MASK(INT_AUX_PERF_COUNT) | \
- INT_MASK(INT_INTCTRL_3) | \
- INT_MASK(INT_INTCTRL_2) | \
- INT_MASK(INT_INTCTRL_1) | \
- INT_MASK(INT_INTCTRL_0) | \
- INT_MASK(INT_BOOT_ACCESS) | \
- INT_MASK(INT_WORLD_ACCESS) | \
- INT_MASK(INT_I_ASID) | \
- INT_MASK(INT_D_ASID) | \
- INT_MASK(INT_DOUBLE_FAULT) | \
+ (1ULL << INT_MEM_ERROR) | \
+ (1ULL << INT_IDN_COMPLETE) | \
+ (1ULL << INT_UDN_COMPLETE) | \
+ (1ULL << INT_IDN_FIREWALL) | \
+ (1ULL << INT_UDN_FIREWALL) | \
+ (1ULL << INT_TILE_TIMER) | \
+ (1ULL << INT_AUX_TILE_TIMER) | \
+ (1ULL << INT_IDN_TIMER) | \
+ (1ULL << INT_UDN_TIMER) | \
+ (1ULL << INT_IDN_AVAIL) | \
+ (1ULL << INT_UDN_AVAIL) | \
+ (1ULL << INT_IPI_3) | \
+ (1ULL << INT_IPI_2) | \
+ (1ULL << INT_IPI_1) | \
+ (1ULL << INT_IPI_0) | \
+ (1ULL << INT_PERF_COUNT) | \
+ (1ULL << INT_AUX_PERF_COUNT) | \
+ (1ULL << INT_INTCTRL_3) | \
+ (1ULL << INT_INTCTRL_2) | \
+ (1ULL << INT_INTCTRL_1) | \
+ (1ULL << INT_INTCTRL_0) | \
+ (1ULL << INT_BOOT_ACCESS) | \
+ (1ULL << INT_WORLD_ACCESS) | \
+ (1ULL << INT_I_ASID) | \
+ (1ULL << INT_D_ASID) | \
+ (1ULL << INT_DOUBLE_FAULT) | \
0)
#define NONQUEUED_INTERRUPTS ( \
- INT_MASK(INT_SINGLE_STEP_3) | \
- INT_MASK(INT_SINGLE_STEP_2) | \
- INT_MASK(INT_SINGLE_STEP_1) | \
- INT_MASK(INT_SINGLE_STEP_0) | \
- INT_MASK(INT_ITLB_MISS) | \
- INT_MASK(INT_ILL) | \
- INT_MASK(INT_GPV) | \
- INT_MASK(INT_IDN_ACCESS) | \
- INT_MASK(INT_UDN_ACCESS) | \
- INT_MASK(INT_SWINT_3) | \
- INT_MASK(INT_SWINT_2) | \
- INT_MASK(INT_SWINT_1) | \
- INT_MASK(INT_SWINT_0) | \
- INT_MASK(INT_ILL_TRANS) | \
- INT_MASK(INT_UNALIGN_DATA) | \
- INT_MASK(INT_DTLB_MISS) | \
- INT_MASK(INT_DTLB_ACCESS) | \
+ (1ULL << INT_SINGLE_STEP_3) | \
+ (1ULL << INT_SINGLE_STEP_2) | \
+ (1ULL << INT_SINGLE_STEP_1) | \
+ (1ULL << INT_SINGLE_STEP_0) | \
+ (1ULL << INT_ITLB_MISS) | \
+ (1ULL << INT_ILL) | \
+ (1ULL << INT_GPV) | \
+ (1ULL << INT_IDN_ACCESS) | \
+ (1ULL << INT_UDN_ACCESS) | \
+ (1ULL << INT_SWINT_3) | \
+ (1ULL << INT_SWINT_2) | \
+ (1ULL << INT_SWINT_1) | \
+ (1ULL << INT_SWINT_0) | \
+ (1ULL << INT_ILL_TRANS) | \
+ (1ULL << INT_UNALIGN_DATA) | \
+ (1ULL << INT_DTLB_MISS) | \
+ (1ULL << INT_DTLB_ACCESS) | \
0)
#define CRITICAL_MASKED_INTERRUPTS ( \
- INT_MASK(INT_MEM_ERROR) | \
- INT_MASK(INT_SINGLE_STEP_3) | \
- INT_MASK(INT_SINGLE_STEP_2) | \
- INT_MASK(INT_SINGLE_STEP_1) | \
- INT_MASK(INT_SINGLE_STEP_0) | \
- INT_MASK(INT_IDN_COMPLETE) | \
- INT_MASK(INT_UDN_COMPLETE) | \
- INT_MASK(INT_IDN_FIREWALL) | \
- INT_MASK(INT_UDN_FIREWALL) | \
- INT_MASK(INT_TILE_TIMER) | \
- INT_MASK(INT_AUX_TILE_TIMER) | \
- INT_MASK(INT_IDN_TIMER) | \
- INT_MASK(INT_UDN_TIMER) | \
- INT_MASK(INT_IDN_AVAIL) | \
- INT_MASK(INT_UDN_AVAIL) | \
- INT_MASK(INT_IPI_3) | \
- INT_MASK(INT_IPI_2) | \
- INT_MASK(INT_IPI_1) | \
- INT_MASK(INT_IPI_0) | \
- INT_MASK(INT_PERF_COUNT) | \
- INT_MASK(INT_AUX_PERF_COUNT) | \
- INT_MASK(INT_INTCTRL_3) | \
- INT_MASK(INT_INTCTRL_2) | \
- INT_MASK(INT_INTCTRL_1) | \
- INT_MASK(INT_INTCTRL_0) | \
+ (1ULL << INT_MEM_ERROR) | \
+ (1ULL << INT_SINGLE_STEP_3) | \
+ (1ULL << INT_SINGLE_STEP_2) | \
+ (1ULL << INT_SINGLE_STEP_1) | \
+ (1ULL << INT_SINGLE_STEP_0) | \
+ (1ULL << INT_IDN_COMPLETE) | \
+ (1ULL << INT_UDN_COMPLETE) | \
+ (1ULL << INT_IDN_FIREWALL) | \
+ (1ULL << INT_UDN_FIREWALL) | \
+ (1ULL << INT_TILE_TIMER) | \
+ (1ULL << INT_AUX_TILE_TIMER) | \
+ (1ULL << INT_IDN_TIMER) | \
+ (1ULL << INT_UDN_TIMER) | \
+ (1ULL << INT_IDN_AVAIL) | \
+ (1ULL << INT_UDN_AVAIL) | \
+ (1ULL << INT_IPI_3) | \
+ (1ULL << INT_IPI_2) | \
+ (1ULL << INT_IPI_1) | \
+ (1ULL << INT_IPI_0) | \
+ (1ULL << INT_PERF_COUNT) | \
+ (1ULL << INT_AUX_PERF_COUNT) | \
+ (1ULL << INT_INTCTRL_3) | \
+ (1ULL << INT_INTCTRL_2) | \
+ (1ULL << INT_INTCTRL_1) | \
+ (1ULL << INT_INTCTRL_0) | \
0)
#define CRITICAL_UNMASKED_INTERRUPTS ( \
- INT_MASK(INT_ITLB_MISS) | \
- INT_MASK(INT_ILL) | \
- INT_MASK(INT_GPV) | \
- INT_MASK(INT_IDN_ACCESS) | \
- INT_MASK(INT_UDN_ACCESS) | \
- INT_MASK(INT_SWINT_3) | \
- INT_MASK(INT_SWINT_2) | \
- INT_MASK(INT_SWINT_1) | \
- INT_MASK(INT_SWINT_0) | \
- INT_MASK(INT_ILL_TRANS) | \
- INT_MASK(INT_UNALIGN_DATA) | \
- INT_MASK(INT_DTLB_MISS) | \
- INT_MASK(INT_DTLB_ACCESS) | \
- INT_MASK(INT_BOOT_ACCESS) | \
- INT_MASK(INT_WORLD_ACCESS) | \
- INT_MASK(INT_I_ASID) | \
- INT_MASK(INT_D_ASID) | \
- INT_MASK(INT_DOUBLE_FAULT) | \
+ (1ULL << INT_ITLB_MISS) | \
+ (1ULL << INT_ILL) | \
+ (1ULL << INT_GPV) | \
+ (1ULL << INT_IDN_ACCESS) | \
+ (1ULL << INT_UDN_ACCESS) | \
+ (1ULL << INT_SWINT_3) | \
+ (1ULL << INT_SWINT_2) | \
+ (1ULL << INT_SWINT_1) | \
+ (1ULL << INT_SWINT_0) | \
+ (1ULL << INT_ILL_TRANS) | \
+ (1ULL << INT_UNALIGN_DATA) | \
+ (1ULL << INT_DTLB_MISS) | \
+ (1ULL << INT_DTLB_ACCESS) | \
+ (1ULL << INT_BOOT_ACCESS) | \
+ (1ULL << INT_WORLD_ACCESS) | \
+ (1ULL << INT_I_ASID) | \
+ (1ULL << INT_D_ASID) | \
+ (1ULL << INT_DOUBLE_FAULT) | \
0)
#define MASKABLE_INTERRUPTS ( \
- INT_MASK(INT_MEM_ERROR) | \
- INT_MASK(INT_SINGLE_STEP_3) | \
- INT_MASK(INT_SINGLE_STEP_2) | \
- INT_MASK(INT_SINGLE_STEP_1) | \
- INT_MASK(INT_SINGLE_STEP_0) | \
- INT_MASK(INT_IDN_COMPLETE) | \
- INT_MASK(INT_UDN_COMPLETE) | \
- INT_MASK(INT_IDN_FIREWALL) | \
- INT_MASK(INT_UDN_FIREWALL) | \
- INT_MASK(INT_TILE_TIMER) | \
- INT_MASK(INT_AUX_TILE_TIMER) | \
- INT_MASK(INT_IDN_TIMER) | \
- INT_MASK(INT_UDN_TIMER) | \
- INT_MASK(INT_IDN_AVAIL) | \
- INT_MASK(INT_UDN_AVAIL) | \
- INT_MASK(INT_IPI_3) | \
- INT_MASK(INT_IPI_2) | \
- INT_MASK(INT_IPI_1) | \
- INT_MASK(INT_IPI_0) | \
- INT_MASK(INT_PERF_COUNT) | \
- INT_MASK(INT_AUX_PERF_COUNT) | \
- INT_MASK(INT_INTCTRL_3) | \
- INT_MASK(INT_INTCTRL_2) | \
- INT_MASK(INT_INTCTRL_1) | \
- INT_MASK(INT_INTCTRL_0) | \
+ (1ULL << INT_MEM_ERROR) | \
+ (1ULL << INT_SINGLE_STEP_3) | \
+ (1ULL << INT_SINGLE_STEP_2) | \
+ (1ULL << INT_SINGLE_STEP_1) | \
+ (1ULL << INT_SINGLE_STEP_0) | \
+ (1ULL << INT_IDN_COMPLETE) | \
+ (1ULL << INT_UDN_COMPLETE) | \
+ (1ULL << INT_IDN_FIREWALL) | \
+ (1ULL << INT_UDN_FIREWALL) | \
+ (1ULL << INT_TILE_TIMER) | \
+ (1ULL << INT_AUX_TILE_TIMER) | \
+ (1ULL << INT_IDN_TIMER) | \
+ (1ULL << INT_UDN_TIMER) | \
+ (1ULL << INT_IDN_AVAIL) | \
+ (1ULL << INT_UDN_AVAIL) | \
+ (1ULL << INT_IPI_3) | \
+ (1ULL << INT_IPI_2) | \
+ (1ULL << INT_IPI_1) | \
+ (1ULL << INT_IPI_0) | \
+ (1ULL << INT_PERF_COUNT) | \
+ (1ULL << INT_AUX_PERF_COUNT) | \
+ (1ULL << INT_INTCTRL_3) | \
+ (1ULL << INT_INTCTRL_2) | \
+ (1ULL << INT_INTCTRL_1) | \
+ (1ULL << INT_INTCTRL_0) | \
0)
#define UNMASKABLE_INTERRUPTS ( \
- INT_MASK(INT_ITLB_MISS) | \
- INT_MASK(INT_ILL) | \
- INT_MASK(INT_GPV) | \
- INT_MASK(INT_IDN_ACCESS) | \
- INT_MASK(INT_UDN_ACCESS) | \
- INT_MASK(INT_SWINT_3) | \
- INT_MASK(INT_SWINT_2) | \
- INT_MASK(INT_SWINT_1) | \
- INT_MASK(INT_SWINT_0) | \
- INT_MASK(INT_ILL_TRANS) | \
- INT_MASK(INT_UNALIGN_DATA) | \
- INT_MASK(INT_DTLB_MISS) | \
- INT_MASK(INT_DTLB_ACCESS) | \
- INT_MASK(INT_BOOT_ACCESS) | \
- INT_MASK(INT_WORLD_ACCESS) | \
- INT_MASK(INT_I_ASID) | \
- INT_MASK(INT_D_ASID) | \
- INT_MASK(INT_DOUBLE_FAULT) | \
+ (1ULL << INT_ITLB_MISS) | \
+ (1ULL << INT_ILL) | \
+ (1ULL << INT_GPV) | \
+ (1ULL << INT_IDN_ACCESS) | \
+ (1ULL << INT_UDN_ACCESS) | \
+ (1ULL << INT_SWINT_3) | \
+ (1ULL << INT_SWINT_2) | \
+ (1ULL << INT_SWINT_1) | \
+ (1ULL << INT_SWINT_0) | \
+ (1ULL << INT_ILL_TRANS) | \
+ (1ULL << INT_UNALIGN_DATA) | \
+ (1ULL << INT_DTLB_MISS) | \
+ (1ULL << INT_DTLB_ACCESS) | \
+ (1ULL << INT_BOOT_ACCESS) | \
+ (1ULL << INT_WORLD_ACCESS) | \
+ (1ULL << INT_I_ASID) | \
+ (1ULL << INT_D_ASID) | \
+ (1ULL << INT_DOUBLE_FAULT) | \
0)
#define SYNC_INTERRUPTS ( \
- INT_MASK(INT_SINGLE_STEP_3) | \
- INT_MASK(INT_SINGLE_STEP_2) | \
- INT_MASK(INT_SINGLE_STEP_1) | \
- INT_MASK(INT_SINGLE_STEP_0) | \
- INT_MASK(INT_IDN_COMPLETE) | \
- INT_MASK(INT_UDN_COMPLETE) | \
- INT_MASK(INT_ITLB_MISS) | \
- INT_MASK(INT_ILL) | \
- INT_MASK(INT_GPV) | \
- INT_MASK(INT_IDN_ACCESS) | \
- INT_MASK(INT_UDN_ACCESS) | \
- INT_MASK(INT_SWINT_3) | \
- INT_MASK(INT_SWINT_2) | \
- INT_MASK(INT_SWINT_1) | \
- INT_MASK(INT_SWINT_0) | \
- INT_MASK(INT_ILL_TRANS) | \
- INT_MASK(INT_UNALIGN_DATA) | \
- INT_MASK(INT_DTLB_MISS) | \
- INT_MASK(INT_DTLB_ACCESS) | \
+ (1ULL << INT_SINGLE_STEP_3) | \
+ (1ULL << INT_SINGLE_STEP_2) | \
+ (1ULL << INT_SINGLE_STEP_1) | \
+ (1ULL << INT_SINGLE_STEP_0) | \
+ (1ULL << INT_IDN_COMPLETE) | \
+ (1ULL << INT_UDN_COMPLETE) | \
+ (1ULL << INT_ITLB_MISS) | \
+ (1ULL << INT_ILL) | \
+ (1ULL << INT_GPV) | \
+ (1ULL << INT_IDN_ACCESS) | \
+ (1ULL << INT_UDN_ACCESS) | \
+ (1ULL << INT_SWINT_3) | \
+ (1ULL << INT_SWINT_2) | \
+ (1ULL << INT_SWINT_1) | \
+ (1ULL << INT_SWINT_0) | \
+ (1ULL << INT_ILL_TRANS) | \
+ (1ULL << INT_UNALIGN_DATA) | \
+ (1ULL << INT_DTLB_MISS) | \
+ (1ULL << INT_DTLB_ACCESS) | \
0)
#define NON_SYNC_INTERRUPTS ( \
- INT_MASK(INT_MEM_ERROR) | \
- INT_MASK(INT_IDN_FIREWALL) | \
- INT_MASK(INT_UDN_FIREWALL) | \
- INT_MASK(INT_TILE_TIMER) | \
- INT_MASK(INT_AUX_TILE_TIMER) | \
- INT_MASK(INT_IDN_TIMER) | \
- INT_MASK(INT_UDN_TIMER) | \
- INT_MASK(INT_IDN_AVAIL) | \
- INT_MASK(INT_UDN_AVAIL) | \
- INT_MASK(INT_IPI_3) | \
- INT_MASK(INT_IPI_2) | \
- INT_MASK(INT_IPI_1) | \
- INT_MASK(INT_IPI_0) | \
- INT_MASK(INT_PERF_COUNT) | \
- INT_MASK(INT_AUX_PERF_COUNT) | \
- INT_MASK(INT_INTCTRL_3) | \
- INT_MASK(INT_INTCTRL_2) | \
- INT_MASK(INT_INTCTRL_1) | \
- INT_MASK(INT_INTCTRL_0) | \
- INT_MASK(INT_BOOT_ACCESS) | \
- INT_MASK(INT_WORLD_ACCESS) | \
- INT_MASK(INT_I_ASID) | \
- INT_MASK(INT_D_ASID) | \
- INT_MASK(INT_DOUBLE_FAULT) | \
+ (1ULL << INT_MEM_ERROR) | \
+ (1ULL << INT_IDN_FIREWALL) | \
+ (1ULL << INT_UDN_FIREWALL) | \
+ (1ULL << INT_TILE_TIMER) | \
+ (1ULL << INT_AUX_TILE_TIMER) | \
+ (1ULL << INT_IDN_TIMER) | \
+ (1ULL << INT_UDN_TIMER) | \
+ (1ULL << INT_IDN_AVAIL) | \
+ (1ULL << INT_UDN_AVAIL) | \
+ (1ULL << INT_IPI_3) | \
+ (1ULL << INT_IPI_2) | \
+ (1ULL << INT_IPI_1) | \
+ (1ULL << INT_IPI_0) | \
+ (1ULL << INT_PERF_COUNT) | \
+ (1ULL << INT_AUX_PERF_COUNT) | \
+ (1ULL << INT_INTCTRL_3) | \
+ (1ULL << INT_INTCTRL_2) | \
+ (1ULL << INT_INTCTRL_1) | \
+ (1ULL << INT_INTCTRL_0) | \
+ (1ULL << INT_BOOT_ACCESS) | \
+ (1ULL << INT_WORLD_ACCESS) | \
+ (1ULL << INT_I_ASID) | \
+ (1ULL << INT_D_ASID) | \
+ (1ULL << INT_DOUBLE_FAULT) | \
0)
#endif /* !__ASSEMBLER__ */
#endif /* !__ARCH_INTERRUPTS_H__ */
diff --git a/arch/tile/kernel/intvec_64.S b/arch/tile/kernel/intvec_64.S
index 54bc9a6678e8..4ea080902654 100644
--- a/arch/tile/kernel/intvec_64.S
+++ b/arch/tile/kernel/intvec_64.S
@@ -1035,7 +1035,9 @@ handle_syscall:
/* Ensure that the syscall number is within the legal range. */
{
moveli r20, hw2(sys_call_table)
+#ifdef CONFIG_COMPAT
blbs r30, .Lcompat_syscall
+#endif
}
{
cmpltu r21, TREG_SYSCALL_NR_NAME, r21
@@ -1093,6 +1095,7 @@ handle_syscall:
j .Lresume_userspace /* jump into middle of interrupt_return */
}
+#ifdef CONFIG_COMPAT
.Lcompat_syscall:
/*
* Load the base of the compat syscall table in r20, and
@@ -1117,6 +1120,7 @@ handle_syscall:
{ move r15, r4; addxi r4, r4, 0 }
{ move r16, r5; addxi r5, r5, 0 }
j .Lload_syscall_pointer
+#endif
.Linvalid_syscall:
/* Report an invalid syscall back to the user program */
diff --git a/arch/tile/kernel/process.c b/arch/tile/kernel/process.c
index 0e5661e7d00d..caf93ae11793 100644
--- a/arch/tile/kernel/process.c
+++ b/arch/tile/kernel/process.c
@@ -159,7 +159,7 @@ static void save_arch_state(struct thread_struct *t);
int copy_thread(unsigned long clone_flags, unsigned long sp,
unsigned long arg, struct task_struct *p)
{
- struct pt_regs *childregs = task_pt_regs(p), *regs = current_pt_regs();
+ struct pt_regs *childregs = task_pt_regs(p);
unsigned long ksp;
unsigned long *callee_regs;
diff --git a/arch/tile/kernel/reboot.c b/arch/tile/kernel/reboot.c
index baa3d905fee2..d1b5c913ae72 100644
--- a/arch/tile/kernel/reboot.c
+++ b/arch/tile/kernel/reboot.c
@@ -16,6 +16,7 @@
#include <linux/reboot.h>
#include <linux/smp.h>
#include <linux/pm.h>
+#include <linux/export.h>
#include <asm/page.h>
#include <asm/setup.h>
#include <hv/hypervisor.h>
@@ -49,3 +50,4 @@ void machine_restart(char *cmd)
/* No interesting distinction to be made here. */
void (*pm_power_off)(void) = NULL;
+EXPORT_SYMBOL(pm_power_off);
diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c
index 6a649a4462d3..d1e15f7b59c6 100644
--- a/arch/tile/kernel/setup.c
+++ b/arch/tile/kernel/setup.c
@@ -31,6 +31,7 @@
#include <linux/timex.h>
#include <linux/hugetlb.h>
#include <linux/start_kernel.h>
+#include <linux/screen_info.h>
#include <asm/setup.h>
#include <asm/sections.h>
#include <asm/cacheflush.h>
@@ -49,6 +50,10 @@ static inline int ABS(int x) { return x >= 0 ? x : -x; }
/* Chip information */
char chip_model[64] __write_once;
+#ifdef CONFIG_VT
+struct screen_info screen_info;
+#endif
+
struct pglist_data node_data[MAX_NUMNODES] __read_mostly;
EXPORT_SYMBOL(node_data);
diff --git a/arch/tile/kernel/stack.c b/arch/tile/kernel/stack.c
index b2f44c28dda6..ed258b8ae320 100644
--- a/arch/tile/kernel/stack.c
+++ b/arch/tile/kernel/stack.c
@@ -112,7 +112,7 @@ static struct pt_regs *valid_fault_handler(struct KBacktraceIterator* kbt)
p->pc, p->sp, p->ex1);
p = NULL;
}
- if (!kbt->profile || (INT_MASK(p->faultnum) & QUEUED_INTERRUPTS) == 0)
+ if (!kbt->profile || ((1ULL << p->faultnum) & QUEUED_INTERRUPTS) == 0)
return p;
return NULL;
}
@@ -484,6 +484,7 @@ void save_stack_trace(struct stack_trace *trace)
{
save_stack_trace_tsk(NULL, trace);
}
+EXPORT_SYMBOL_GPL(save_stack_trace);
#endif
diff --git a/arch/tile/kvm/Kconfig b/arch/tile/kvm/Kconfig
index 669fcdba31ea..2298cb1daff7 100644
--- a/arch/tile/kvm/Kconfig
+++ b/arch/tile/kvm/Kconfig
@@ -18,7 +18,7 @@ if VIRTUALIZATION
config KVM
tristate "Kernel-based Virtual Machine (KVM) support"
- depends on HAVE_KVM && MODULES && EXPERIMENTAL
+ depends on HAVE_KVM && MODULES
select PREEMPT_NOTIFIERS
select ANON_INODES
---help---
diff --git a/arch/tile/lib/cacheflush.c b/arch/tile/lib/cacheflush.c
index db4fb89e12d8..8f8ad814b139 100644
--- a/arch/tile/lib/cacheflush.c
+++ b/arch/tile/lib/cacheflush.c
@@ -12,6 +12,7 @@
* more details.
*/
+#include <linux/export.h>
#include <asm/page.h>
#include <asm/cacheflush.h>
#include <arch/icache.h>
@@ -165,3 +166,4 @@ void finv_buffer_remote(void *buffer, size_t size, int hfh)
__insn_mtspr(SPR_DSTREAM_PF, old_dstream_pf);
#endif
}
+EXPORT_SYMBOL_GPL(finv_buffer_remote);
diff --git a/arch/tile/lib/cpumask.c b/arch/tile/lib/cpumask.c
index fdc403614d12..75947edccb26 100644
--- a/arch/tile/lib/cpumask.c
+++ b/arch/tile/lib/cpumask.c
@@ -16,6 +16,7 @@
#include <linux/ctype.h>
#include <linux/errno.h>
#include <linux/smp.h>
+#include <linux/export.h>
/*
* Allow cropping out bits beyond the end of the array.
@@ -50,3 +51,4 @@ int bitmap_parselist_crop(const char *bp, unsigned long *maskp, int nmaskbits)
} while (*bp != '\0' && *bp != '\n');
return 0;
}
+EXPORT_SYMBOL(bitmap_parselist_crop);
diff --git a/arch/tile/lib/exports.c b/arch/tile/lib/exports.c
index dd5f0a33fdaf..4385cb6fa00a 100644
--- a/arch/tile/lib/exports.c
+++ b/arch/tile/lib/exports.c
@@ -55,6 +55,8 @@ EXPORT_SYMBOL(hv_dev_poll_cancel);
EXPORT_SYMBOL(hv_dev_close);
EXPORT_SYMBOL(hv_sysconf);
EXPORT_SYMBOL(hv_confstr);
+EXPORT_SYMBOL(hv_get_rtc);
+EXPORT_SYMBOL(hv_set_rtc);
/* libgcc.a */
uint32_t __udivsi3(uint32_t dividend, uint32_t divisor);
diff --git a/arch/tile/mm/homecache.c b/arch/tile/mm/homecache.c
index 5f7868dcd6d4..1ae911939a18 100644
--- a/arch/tile/mm/homecache.c
+++ b/arch/tile/mm/homecache.c
@@ -408,6 +408,7 @@ void homecache_change_page_home(struct page *page, int order, int home)
__set_pte(ptep, pte_set_home(pteval, home));
}
}
+EXPORT_SYMBOL(homecache_change_page_home);
struct page *homecache_alloc_pages(gfp_t gfp_mask,
unsigned int order, int home)
diff --git a/arch/um/Kconfig.common b/arch/um/Kconfig.common
index 648121b037d5..bceee6623b00 100644
--- a/arch/um/Kconfig.common
+++ b/arch/um/Kconfig.common
@@ -12,6 +12,7 @@ config UML
select GENERIC_CPU_DEVICES
select GENERIC_IO
select GENERIC_CLOCKEVENTS
+ select TTY # Needed for line.c
config MMU
bool
diff --git a/arch/um/Kconfig.net b/arch/um/Kconfig.net
index 3160b1a5adb7..820a56f00332 100644
--- a/arch/um/Kconfig.net
+++ b/arch/um/Kconfig.net
@@ -157,7 +157,7 @@ config UML_NET_MCAST
config UML_NET_PCAP
bool "pcap transport"
- depends on UML_NET && EXPERIMENTAL
+ depends on UML_NET
help
The pcap transport makes a pcap packet stream on the host look
like an ethernet device inside UML. This is useful for making
diff --git a/arch/um/Kconfig.um b/arch/um/Kconfig.um
index bf87f25eb2de..a7520c90f62d 100644
--- a/arch/um/Kconfig.um
+++ b/arch/um/Kconfig.um
@@ -45,8 +45,8 @@ config HOSTFS
say Y or M here; otherwise say N.
config HPPFS
- tristate "HoneyPot ProcFS (EXPERIMENTAL)"
- depends on EXPERIMENTAL && PROC_FS
+ tristate "HoneyPot ProcFS"
+ depends on PROC_FS
help
hppfs (HoneyPot ProcFS) is a filesystem which allows UML /proc
entries to be overridden, removed, or fabricated from the host.
@@ -96,7 +96,7 @@ config MAGIC_SYSRQ
unless you really know what this hack does.
config SMP
- bool "Symmetric multi-processing support (EXPERIMENTAL)"
+ bool "Symmetric multi-processing support"
default n
depends on BROKEN
help
@@ -126,7 +126,7 @@ config NR_CPUS
default "32"
config HIGHMEM
- bool "Highmem support (EXPERIMENTAL)"
+ bool "Highmem support"
depends on !64BIT && BROKEN
default n
help
diff --git a/arch/um/drivers/chan.h b/arch/um/drivers/chan.h
index 02b5a76e98d9..78f1b8999964 100644
--- a/arch/um/drivers/chan.h
+++ b/arch/um/drivers/chan.h
@@ -27,8 +27,7 @@ struct chan {
void *data;
};
-extern void chan_interrupt(struct line *line,
- struct tty_struct *tty, int irq);
+extern void chan_interrupt(struct line *line, int irq);
extern int parse_chan_pair(char *str, struct line *line, int device,
const struct chan_opts *opts, char **error_out);
extern int write_chan(struct chan *chan, const char *buf, int len,
diff --git a/arch/um/drivers/chan_kern.c b/arch/um/drivers/chan_kern.c
index e9a0abc6a32f..15c553c239a1 100644
--- a/arch/um/drivers/chan_kern.c
+++ b/arch/um/drivers/chan_kern.c
@@ -81,12 +81,6 @@ static const struct chan_ops not_configged_ops = {
};
#endif /* CONFIG_NOCONFIG_CHAN */
-static void tty_receive_char(struct tty_struct *tty, char ch)
-{
- if (tty)
- tty_insert_flip_char(tty, ch, TTY_NORMAL);
-}
-
static int open_one_chan(struct chan *chan)
{
int fd, err;
@@ -137,11 +131,9 @@ void chan_enable_winch(struct chan *chan, struct tty_struct *tty)
static void line_timer_cb(struct work_struct *work)
{
struct line *line = container_of(work, struct line, task.work);
- struct tty_struct *tty = tty_port_tty_get(&line->port);
if (!line->throttled)
- chan_interrupt(line, tty, line->driver->read_irq);
- tty_kref_put(tty);
+ chan_interrupt(line, line->driver->read_irq);
}
int enable_chan(struct line *line)
@@ -552,8 +544,9 @@ int parse_chan_pair(char *str, struct line *line, int device,
return 0;
}
-void chan_interrupt(struct line *line, struct tty_struct *tty, int irq)
+void chan_interrupt(struct line *line, int irq)
{
+ struct tty_port *port = &line->port;
struct chan *chan = line->chan_in;
int err;
char c;
@@ -562,21 +555,24 @@ void chan_interrupt(struct line *line, struct tty_struct *tty, int irq)
goto out;
do {
- if (tty && !tty_buffer_request_room(tty, 1)) {
+ if (!tty_buffer_request_room(port, 1)) {
schedule_delayed_work(&line->task, 1);
goto out;
}
err = chan->ops->read(chan->fd, &c, chan->data);
if (err > 0)
- tty_receive_char(tty, c);
+ tty_insert_flip_char(port, c, TTY_NORMAL);
} while (err > 0);
if (err == 0)
reactivate_fd(chan->fd, irq);
if (err == -EIO) {
if (chan->primary) {
- if (tty != NULL)
+ struct tty_struct *tty = tty_port_tty_get(&line->port);
+ if (tty != NULL) {
tty_hangup(tty);
+ tty_kref_put(tty);
+ }
if (line->chan_out != chan)
close_one_chan(line->chan_out, 1);
}
@@ -585,6 +581,5 @@ void chan_interrupt(struct line *line, struct tty_struct *tty, int irq)
return;
}
out:
- if (tty)
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(port);
}
diff --git a/arch/um/drivers/line.c b/arch/um/drivers/line.c
index 9ffc28bd4b7a..f1b38571f94e 100644
--- a/arch/um/drivers/line.c
+++ b/arch/um/drivers/line.c
@@ -19,11 +19,10 @@ static irqreturn_t line_interrupt(int irq, void *data)
{
struct chan *chan = data;
struct line *line = chan->line;
- struct tty_struct *tty = tty_port_tty_get(&line->port);
if (line)
- chan_interrupt(line, tty, irq);
- tty_kref_put(tty);
+ chan_interrupt(line, irq);
+
return IRQ_HANDLED;
}
@@ -234,7 +233,7 @@ void line_unthrottle(struct tty_struct *tty)
struct line *line = tty->driver_data;
line->throttled = 0;
- chan_interrupt(line, tty, line->driver->read_irq);
+ chan_interrupt(line, line->driver->read_irq);
/*
* Maybe there is enough stuff pending that calling the interrupt
diff --git a/arch/um/drivers/net_kern.c b/arch/um/drivers/net_kern.c
index b1314ebf1f72..d8926c303629 100644
--- a/arch/um/drivers/net_kern.c
+++ b/arch/um/drivers/net_kern.c
@@ -274,8 +274,8 @@ static void uml_net_poll_controller(struct net_device *dev)
static void uml_net_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strcpy(info->driver, DRIVER_NAME);
- strcpy(info->version, "42");
+ strlcpy(info->driver, DRIVER_NAME, sizeof(info->driver));
+ strlcpy(info->version, "42", sizeof(info->version));
}
static const struct ethtool_ops uml_net_ethtool_ops = {
@@ -293,8 +293,9 @@ static void uml_net_user_timer_expire(unsigned long _conn)
#endif
}
-static int setup_etheraddr(char *str, unsigned char *addr, char *name)
+static void setup_etheraddr(struct net_device *dev, char *str)
{
+ unsigned char *addr = dev->dev_addr;
char *end;
int i;
@@ -334,13 +335,12 @@ static int setup_etheraddr(char *str, unsigned char *addr, char *name)
addr[0] | 0x02, addr[1], addr[2], addr[3], addr[4],
addr[5]);
}
- return 0;
+ return;
random:
printk(KERN_INFO
- "Choosing a random ethernet address for device %s\n", name);
- eth_random_addr(addr);
- return 1;
+ "Choosing a random ethernet address for device %s\n", dev->name);
+ eth_hw_addr_random(dev);
}
static DEFINE_SPINLOCK(devices_lock);
@@ -392,7 +392,6 @@ static void eth_configure(int n, void *init, char *mac,
struct net_device *dev;
struct uml_net_private *lp;
int err, size;
- int random_mac;
size = transport->private_size + sizeof(struct uml_net_private);
@@ -419,9 +418,9 @@ static void eth_configure(int n, void *init, char *mac,
*/
snprintf(dev->name, sizeof(dev->name), "eth%d", n);
- random_mac = setup_etheraddr(mac, device->mac, dev->name);
+ setup_etheraddr(dev, mac);
- printk(KERN_INFO "Netdevice %d (%pM) : ", n, device->mac);
+ printk(KERN_INFO "Netdevice %d (%pM) : ", n, dev->dev_addr);
lp = netdev_priv(dev);
/* This points to the transport private data. It's still clear, but we
@@ -468,17 +467,12 @@ static void eth_configure(int n, void *init, char *mac,
init_timer(&lp->tl);
spin_lock_init(&lp->lock);
lp->tl.function = uml_net_user_timer_expire;
- memcpy(lp->mac, device->mac, sizeof(lp->mac));
+ memcpy(lp->mac, dev->dev_addr, sizeof(lp->mac));
if ((transport->user->init != NULL) &&
((*transport->user->init)(&lp->user, dev) != 0))
goto out_unregister;
- /* don't use eth_mac_addr, it will not work here */
- memcpy(dev->dev_addr, device->mac, ETH_ALEN);
- if (random_mac)
- dev->addr_assign_type |= NET_ADDR_RANDOM;
-
dev->mtu = transport->user->mtu;
dev->netdev_ops = &uml_netdev_ops;
dev->ethtool_ops = &uml_net_ethtool_ops;
diff --git a/arch/um/include/shared/net_kern.h b/arch/um/include/shared/net_kern.h
index 5c367f22595b..012ac87d4900 100644
--- a/arch/um/include/shared/net_kern.h
+++ b/arch/um/include/shared/net_kern.h
@@ -18,7 +18,6 @@ struct uml_net {
struct net_device *dev;
struct platform_device pdev;
int index;
- unsigned char mac[ETH_ALEN];
};
struct uml_net_private {
diff --git a/arch/unicore32/kernel/process.c b/arch/unicore32/kernel/process.c
index 62bad9fed03e..872d7e22d847 100644
--- a/arch/unicore32/kernel/process.c
+++ b/arch/unicore32/kernel/process.c
@@ -45,11 +45,6 @@ static const char * const processor_modes[] = {
"UK18", "UK19", "UK1A", "EXTN", "UK1C", "UK1D", "UK1E", "SUSR"
};
-/*
- * The idle thread, has rather strange semantics for calling pm_idle,
- * but this is what x86 does and we need to do the same, so that
- * things like cpuidle get called in the same way.
- */
void cpu_idle(void)
{
/* endless idle loop with no priority at all */
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 225543bf45a5..63c0431daa3a 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -1,7 +1,7 @@
# Select 32 or 64 bit
config 64BIT
bool "64-bit kernel" if ARCH = "x86"
- default ARCH = "x86_64"
+ default ARCH != "i386"
---help---
Say yes to build a 64-bit kernel - formerly known as x86_64
Say no to build a 32-bit kernel - formerly known as i386
@@ -28,7 +28,6 @@ config X86
select HAVE_OPROFILE
select HAVE_PCSPKR_PLATFORM
select HAVE_PERF_EVENTS
- select HAVE_IRQ_WORK
select HAVE_IOREMAP_PROT
select HAVE_KPROBES
select HAVE_MEMBLOCK
@@ -40,10 +39,12 @@ config X86
select HAVE_DMA_CONTIGUOUS if !SWIOTLB
select HAVE_KRETPROBES
select HAVE_OPTPROBES
+ select HAVE_KPROBES_ON_FTRACE
select HAVE_FTRACE_MCOUNT_RECORD
select HAVE_FENTRY if X86_64
select HAVE_C_RECORDMCOUNT
select HAVE_DYNAMIC_FTRACE
+ select HAVE_DYNAMIC_FTRACE_WITH_REGS
select HAVE_FUNCTION_TRACER
select HAVE_FUNCTION_GRAPH_TRACER
select HAVE_FUNCTION_GRAPH_FP_TEST
@@ -106,6 +107,7 @@ config X86
select GENERIC_CLOCKEVENTS_BROADCAST if X86_64 || (X86_32 && X86_LOCAL_APIC)
select GENERIC_TIME_VSYSCALL if X86_64
select KTIME_SCALAR if X86_32
+ select ALWAYS_USE_PERSISTENT_CLOCK
select GENERIC_STRNCPY_FROM_USER
select GENERIC_STRNLEN_USER
select HAVE_CONTEXT_TRACKING if X86_64
@@ -114,6 +116,7 @@ config X86
select MODULES_USE_ELF_RELA if X86_64
select CLONE_BACKWARDS if X86_32
select GENERIC_SIGALTSTACK
+ select ARCH_USE_BUILTIN_BSWAP
config INSTRUCTION_DECODER
def_bool y
@@ -222,7 +225,7 @@ config ARCH_SUPPORTS_DEBUG_PAGEALLOC
config HAVE_INTEL_TXT
def_bool y
- depends on EXPERIMENTAL && INTEL_IOMMU && ACPI
+ depends on INTEL_IOMMU && ACPI
config X86_32_SMP
def_bool y
@@ -320,6 +323,10 @@ config X86_BIGSMP
---help---
This option is needed for the systems that have more than 8 CPUs
+config GOLDFISH
+ def_bool y
+ depends on X86_GOLDFISH
+
if X86_32
config X86_EXTENDED_PLATFORM
bool "Support for extended (non-PC) x86 platforms"
@@ -402,6 +409,14 @@ config X86_UV
# Following is an alphabetically sorted list of 32 bit extended platforms
# Please maintain the alphabetic order if and when there are additions
+config X86_GOLDFISH
+ bool "Goldfish (Virtual Platform)"
+ depends on X86_32
+ ---help---
+ Enable support for the Goldfish virtual platform used primarily
+ for Android development. Unless you are building for the Android
+ Goldfish emulator say N here.
+
config X86_INTEL_CE
bool "CE4100 TV platform"
depends on PCI
@@ -454,6 +469,16 @@ config X86_MDFLD
endif
+config X86_INTEL_LPSS
+ bool "Intel Low Power Subsystem Support"
+ depends on ACPI
+ select COMMON_CLK
+ ---help---
+ Select to build support for Intel Low Power Subsystem such as
+ found on Intel Lynxpoint PCH. Selecting this option enables
+ things like clock tree (common clock framework) which are needed
+ by the LPSS peripheral drivers.
+
config X86_RDC321X
bool "RDC R-321x SoC"
depends on X86_32
@@ -617,7 +642,7 @@ config PARAVIRT
config PARAVIRT_SPINLOCKS
bool "Paravirtualization layer for spinlocks"
- depends on PARAVIRT && SMP && EXPERIMENTAL
+ depends on PARAVIRT && SMP
---help---
Paravirtualized spinlocks allow a pvops backend to replace the
spinlock implementation with something virtualization-friendly
@@ -729,7 +754,7 @@ config GART_IOMMU
config CALGARY_IOMMU
bool "IBM Calgary IOMMU support"
select SWIOTLB
- depends on X86_64 && PCI && EXPERIMENTAL
+ depends on X86_64 && PCI
---help---
Support for hardware IOMMUs in IBM's xSeries x366 and x460
systems. Needed to run systems with more than 3GB of memory
@@ -771,7 +796,7 @@ config IOMMU_HELPER
config MAXSMP
bool "Enable Maximum number of SMP Processors and NUMA Nodes"
- depends on X86_64 && SMP && DEBUG_KERNEL && EXPERIMENTAL
+ depends on X86_64 && SMP && DEBUG_KERNEL
select CPUMASK_OFFSTACK
---help---
Enable maximum number of CPUS and NUMA Nodes for this architecture.
@@ -1107,7 +1132,6 @@ config HIGHMEM64G
endchoice
choice
- depends on EXPERIMENTAL
prompt "Memory split" if EXPERT
default VMSPLIT_3G
depends on X86_32
@@ -1184,7 +1208,7 @@ config DIRECT_GBPAGES
config NUMA
bool "Numa Memory Allocation and Scheduler Support"
depends on SMP
- depends on X86_64 || (X86_32 && HIGHMEM64G && (X86_NUMAQ || X86_BIGSMP || X86_SUMMIT && ACPI) && EXPERIMENTAL)
+ depends on X86_64 || (X86_32 && HIGHMEM64G && (X86_NUMAQ || X86_BIGSMP || X86_SUMMIT && ACPI))
default y if (X86_NUMAQ || X86_SUMMIT || X86_BIGSMP)
---help---
Enable NUMA (Non Uniform Memory Access) support.
@@ -1279,7 +1303,7 @@ config ARCH_DISCONTIGMEM_DEFAULT
config ARCH_SPARSEMEM_ENABLE
def_bool y
- depends on X86_64 || NUMA || (EXPERIMENTAL && X86_32) || X86_32_NON_STANDARD
+ depends on X86_64 || NUMA || X86_32 || X86_32_NON_STANDARD
select SPARSEMEM_STATIC if X86_32
select SPARSEMEM_VMEMMAP_ENABLE if X86_64
@@ -1593,8 +1617,7 @@ config CRASH_DUMP
For more details see Documentation/kdump/kdump.txt
config KEXEC_JUMP
- bool "kexec jump (EXPERIMENTAL)"
- depends on EXPERIMENTAL
+ bool "kexec jump"
depends on KEXEC && HIBERNATION
---help---
Jump between original kernel and kexeced kernel and invoke
@@ -1912,6 +1935,7 @@ config APM_DO_ENABLE
this feature.
config APM_CPU_IDLE
+ depends on CPU_IDLE
bool "Make CPU Idle calls when idle"
---help---
Enable calls to APM CPU Idle/CPU Busy inside the kernel's idle loop.
@@ -2037,7 +2061,7 @@ config PCI_MMCONFIG
config PCI_CNB20LE_QUIRK
bool "Read CNB20LE Host Bridge Windows" if EXPERT
- depends on PCI && EXPERIMENTAL
+ depends on PCI
help
Read the PCI windows out of the CNB20LE host bridge. This allows
PCI hotplug to work on systems with the CNB20LE chipset which do
@@ -2188,6 +2212,15 @@ config GEOS
---help---
This option enables system support for the Traverse Technologies GEOS.
+config TS5500
+ bool "Technologic Systems TS-5500 platform support"
+ depends on MELAN
+ select CHECK_SIGNATURE
+ select NEW_LEDS
+ select LEDS_CLASS
+ ---help---
+ This option enables system support for the Technologic Systems TS-5500.
+
endif # X86_32
config AMD_NB
@@ -2232,8 +2265,8 @@ config IA32_AOUT
Support old a.out binaries in the 32bit emulation.
config X86_X32
- bool "x32 ABI for 64-bit mode (EXPERIMENTAL)"
- depends on X86_64 && IA32_EMULATION && EXPERIMENTAL
+ bool "x32 ABI for 64-bit mode"
+ depends on X86_64 && IA32_EMULATION
---help---
Include code to run binaries for the x32 native 32-bit ABI
for 64-bit processors. An x32 process gets access to the
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index e71fc4279aab..5c477260294f 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -2,7 +2,11 @@
# select defconfig based on actual architecture
ifeq ($(ARCH),x86)
+ ifeq ($(shell uname -m),x86_64)
+ KBUILD_DEFCONFIG := x86_64_defconfig
+ else
KBUILD_DEFCONFIG := i386_defconfig
+ endif
else
KBUILD_DEFCONFIG := $(ARCH)_defconfig
endif
diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
index 88f7ff6da404..7cb56c6ca351 100644
--- a/arch/x86/boot/compressed/misc.c
+++ b/arch/x86/boot/compressed/misc.c
@@ -325,6 +325,8 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap,
{
real_mode = rmode;
+ sanitize_boot_params(real_mode);
+
if (real_mode->screen_info.orig_video_mode == 7) {
vidmem = (char *) 0xb0000;
vidport = 0x3b4;
diff --git a/arch/x86/boot/compressed/misc.h b/arch/x86/boot/compressed/misc.h
index 0e6dc0ee0eea..674019d8e235 100644
--- a/arch/x86/boot/compressed/misc.h
+++ b/arch/x86/boot/compressed/misc.h
@@ -18,6 +18,7 @@
#include <asm/page.h>
#include <asm/boot.h>
#include <asm/bootparam.h>
+#include <asm/bootparam_utils.h>
#define BOOT_BOOT_H
#include "../ctype.h"
diff --git a/arch/x86/configs/i386_defconfig b/arch/x86/configs/i386_defconfig
index 5598547281a7..94447086e551 100644
--- a/arch/x86/configs/i386_defconfig
+++ b/arch/x86/configs/i386_defconfig
@@ -1,3 +1,4 @@
+# CONFIG_64BIT is not set
CONFIG_EXPERIMENTAL=y
# CONFIG_LOCALVERSION_AUTO is not set
CONFIG_SYSVIPC=y
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
index 1b9c22bea8a7..a0795da22c02 100644
--- a/arch/x86/crypto/aesni-intel_glue.c
+++ b/arch/x86/crypto/aesni-intel_glue.c
@@ -40,10 +40,6 @@
#include <linux/workqueue.h>
#include <linux/spinlock.h>
-#if defined(CONFIG_CRYPTO_CTR) || defined(CONFIG_CRYPTO_CTR_MODULE)
-#define HAS_CTR
-#endif
-
#if defined(CONFIG_CRYPTO_PCBC) || defined(CONFIG_CRYPTO_PCBC_MODULE)
#define HAS_PCBC
#endif
@@ -395,12 +391,6 @@ static int ablk_ctr_init(struct crypto_tfm *tfm)
return ablk_init_common(tfm, "__driver-ctr-aes-aesni");
}
-#ifdef HAS_CTR
-static int ablk_rfc3686_ctr_init(struct crypto_tfm *tfm)
-{
- return ablk_init_common(tfm, "rfc3686(__driver-ctr-aes-aesni)");
-}
-#endif
#endif
#ifdef HAS_PCBC
@@ -1158,33 +1148,6 @@ static struct crypto_alg aesni_algs[] = { {
.maxauthsize = 16,
},
},
-#ifdef HAS_CTR
-}, {
- .cra_name = "rfc3686(ctr(aes))",
- .cra_driver_name = "rfc3686-ctr-aes-aesni",
- .cra_priority = 400,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct async_helper_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = ablk_rfc3686_ctr_init,
- .cra_exit = ablk_exit,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE +
- CTR_RFC3686_NONCE_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE +
- CTR_RFC3686_NONCE_SIZE,
- .ivsize = CTR_RFC3686_IV_SIZE,
- .setkey = ablk_set_key,
- .encrypt = ablk_encrypt,
- .decrypt = ablk_decrypt,
- .geniv = "seqiv",
- },
- },
-#endif
#endif
#ifdef HAS_PCBC
}, {
diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
index 102ff7cb3e41..142c4ceff112 100644
--- a/arch/x86/ia32/ia32entry.S
+++ b/arch/x86/ia32/ia32entry.S
@@ -207,7 +207,7 @@ sysexit_from_sys_call:
testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
jnz ia32_ret_from_sys_call
TRACE_IRQS_ON
- sti
+ ENABLE_INTERRUPTS(CLBR_NONE)
movl %eax,%esi /* second arg, syscall return value */
cmpl $-MAX_ERRNO,%eax /* is it an error ? */
jbe 1f
@@ -217,7 +217,7 @@ sysexit_from_sys_call:
call __audit_syscall_exit
movq RAX-ARGOFFSET(%rsp),%rax /* reload syscall return value */
movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
- cli
+ DISABLE_INTERRUPTS(CLBR_NONE)
TRACE_IRQS_OFF
testl %edi,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
jz \exit
diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h
index 0c44630d1789..b31bf97775fc 100644
--- a/arch/x86/include/asm/acpi.h
+++ b/arch/x86/include/asm/acpi.h
@@ -49,10 +49,6 @@
/* Asm macros */
-#define ACPI_ASM_MACROS
-#define BREAKPOINT3
-#define ACPI_DISABLE_IRQS() local_irq_disable()
-#define ACPI_ENABLE_IRQS() local_irq_enable()
#define ACPI_FLUSH_CPU_CACHE() wbinvd()
int __acpi_acquire_global_lock(unsigned int *lock);
diff --git a/arch/x86/include/asm/amd_nb.h b/arch/x86/include/asm/amd_nb.h
index b3341e9cd8fd..a54ee1d054d9 100644
--- a/arch/x86/include/asm/amd_nb.h
+++ b/arch/x86/include/asm/amd_nb.h
@@ -81,6 +81,23 @@ static inline struct amd_northbridge *node_to_amd_nb(int node)
return (node < amd_northbridges.num) ? &amd_northbridges.nb[node] : NULL;
}
+static inline u16 amd_get_node_id(struct pci_dev *pdev)
+{
+ struct pci_dev *misc;
+ int i;
+
+ for (i = 0; i != amd_nb_num(); i++) {
+ misc = node_to_amd_nb(i)->misc;
+
+ if (pci_domain_nr(misc->bus) == pci_domain_nr(pdev->bus) &&
+ PCI_SLOT(misc->devfn) == PCI_SLOT(pdev->devfn))
+ return i;
+ }
+
+ WARN(1, "Unable to find AMD Northbridge id for %s\n", pci_name(pdev));
+ return 0;
+}
+
#else
#define amd_nb_num(x) 0
diff --git a/arch/x86/include/asm/bootparam_utils.h b/arch/x86/include/asm/bootparam_utils.h
new file mode 100644
index 000000000000..5b5e9cb774b5
--- /dev/null
+++ b/arch/x86/include/asm/bootparam_utils.h
@@ -0,0 +1,38 @@
+#ifndef _ASM_X86_BOOTPARAM_UTILS_H
+#define _ASM_X86_BOOTPARAM_UTILS_H
+
+#include <asm/bootparam.h>
+
+/*
+ * This file is included from multiple environments. Do not
+ * add completing #includes to make it standalone.
+ */
+
+/*
+ * Deal with bootloaders which fail to initialize unknown fields in
+ * boot_params to zero. The list fields in this list are taken from
+ * analysis of kexec-tools; if other broken bootloaders initialize a
+ * different set of fields we will need to figure out how to disambiguate.
+ *
+ */
+static void sanitize_boot_params(struct boot_params *boot_params)
+{
+ if (boot_params->sentinel) {
+ /*fields in boot_params are not valid, clear them */
+ memset(&boot_params->olpc_ofw_header, 0,
+ (char *)&boot_params->alt_mem_k -
+ (char *)&boot_params->olpc_ofw_header);
+ memset(&boot_params->kbd_status, 0,
+ (char *)&boot_params->hdr -
+ (char *)&boot_params->kbd_status);
+ memset(&boot_params->_pad7[0], 0,
+ (char *)&boot_params->edd_mbr_sig_buffer[0] -
+ (char *)&boot_params->_pad7[0]);
+ memset(&boot_params->_pad8[0], 0,
+ (char *)&boot_params->eddbuf[0] -
+ (char *)&boot_params->_pad8[0]);
+ memset(&boot_params->_pad9[0], 0, sizeof(boot_params->_pad9));
+ }
+}
+
+#endif /* _ASM_X86_BOOTPARAM_UTILS_H */
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
index 2d9075e863a0..93fe929d1cee 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -167,6 +167,7 @@
#define X86_FEATURE_TBM (6*32+21) /* trailing bit manipulations */
#define X86_FEATURE_TOPOEXT (6*32+22) /* topology extensions CPUID leafs */
#define X86_FEATURE_PERFCTR_CORE (6*32+23) /* core performance counter extensions */
+#define X86_FEATURE_PERFCTR_NB (6*32+24) /* NB performance counter extensions */
/*
* Auxiliary flags: Linux defined - For features scattered in various
@@ -309,6 +310,7 @@ extern const char * const x86_power_flags[32];
#define cpu_has_hypervisor boot_cpu_has(X86_FEATURE_HYPERVISOR)
#define cpu_has_pclmulqdq boot_cpu_has(X86_FEATURE_PCLMULQDQ)
#define cpu_has_perfctr_core boot_cpu_has(X86_FEATURE_PERFCTR_CORE)
+#define cpu_has_perfctr_nb boot_cpu_has(X86_FEATURE_PERFCTR_NB)
#define cpu_has_cx8 boot_cpu_has(X86_FEATURE_CX8)
#define cpu_has_cx16 boot_cpu_has(X86_FEATURE_CX16)
#define cpu_has_eager_fpu boot_cpu_has(X86_FEATURE_EAGER_FPU)
diff --git a/arch/x86/include/asm/ftrace.h b/arch/x86/include/asm/ftrace.h
index 9a25b522d377..86cb51e1ca96 100644
--- a/arch/x86/include/asm/ftrace.h
+++ b/arch/x86/include/asm/ftrace.h
@@ -44,7 +44,6 @@
#ifdef CONFIG_DYNAMIC_FTRACE
#define ARCH_SUPPORTS_FTRACE_OPS 1
-#define ARCH_SUPPORTS_FTRACE_SAVE_REGS
#endif
#ifndef __ASSEMBLY__
diff --git a/arch/x86/include/asm/hpet.h b/arch/x86/include/asm/hpet.h
index 434e2106cc87..b18df579c0e9 100644
--- a/arch/x86/include/asm/hpet.h
+++ b/arch/x86/include/asm/hpet.h
@@ -80,9 +80,9 @@ extern void hpet_msi_write(struct hpet_dev *hdev, struct msi_msg *msg);
extern void hpet_msi_read(struct hpet_dev *hdev, struct msi_msg *msg);
#ifdef CONFIG_PCI_MSI
-extern int arch_setup_hpet_msi(unsigned int irq, unsigned int id);
+extern int default_setup_hpet_msi(unsigned int irq, unsigned int id);
#else
-static inline int arch_setup_hpet_msi(unsigned int irq, unsigned int id)
+static inline int default_setup_hpet_msi(unsigned int irq, unsigned int id)
{
return -EINVAL;
}
@@ -111,6 +111,7 @@ extern void hpet_unregister_irq_handler(rtc_irq_handler handler);
static inline int hpet_enable(void) { return 0; }
static inline int is_hpet_enabled(void) { return 0; }
#define hpet_readl(a) 0
+#define default_setup_hpet_msi NULL
#endif
#endif /* _ASM_X86_HPET_H */
diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
index eb92a6ed2be7..10a78c3d3d5a 100644
--- a/arch/x86/include/asm/hw_irq.h
+++ b/arch/x86/include/asm/hw_irq.h
@@ -101,6 +101,7 @@ static inline void set_io_apic_irq_attr(struct io_apic_irq_attr *irq_attr,
irq_attr->polarity = polarity;
}
+/* Intel specific interrupt remapping information */
struct irq_2_iommu {
struct intel_iommu *iommu;
u16 irte_index;
@@ -108,6 +109,12 @@ struct irq_2_iommu {
u8 irte_mask;
};
+/* AMD specific interrupt remapping information */
+struct irq_2_irte {
+ u16 devid; /* Device ID for IRTE table */
+ u16 index; /* Index into IRTE table*/
+};
+
/*
* This is performance-critical, we want to do it O(1)
*
@@ -120,7 +127,11 @@ struct irq_cfg {
u8 vector;
u8 move_in_progress : 1;
#ifdef CONFIG_IRQ_REMAP
- struct irq_2_iommu irq_2_iommu;
+ u8 remapped : 1;
+ union {
+ struct irq_2_iommu irq_2_iommu;
+ struct irq_2_irte irq_2_irte;
+ };
#endif
};
diff --git a/arch/x86/include/asm/hypervisor.h b/arch/x86/include/asm/hypervisor.h
index b518c7509933..86095ed14135 100644
--- a/arch/x86/include/asm/hypervisor.h
+++ b/arch/x86/include/asm/hypervisor.h
@@ -25,6 +25,7 @@
extern void init_hypervisor(struct cpuinfo_x86 *c);
extern void init_hypervisor_platform(void);
+extern bool hypervisor_x2apic_available(void);
/*
* x86 hypervisor information
@@ -41,6 +42,9 @@ struct hypervisor_x86 {
/* Platform setup (run once per boot) */
void (*init_platform)(void);
+
+ /* X2APIC detection (run once per boot) */
+ bool (*x2apic_available)(void);
};
extern const struct hypervisor_x86 *x86_hyper;
@@ -51,13 +55,4 @@ extern const struct hypervisor_x86 x86_hyper_ms_hyperv;
extern const struct hypervisor_x86 x86_hyper_xen_hvm;
extern const struct hypervisor_x86 x86_hyper_kvm;
-static inline bool hypervisor_x2apic_available(void)
-{
- if (kvm_para_available())
- return true;
- if (xen_x2apic_para_available())
- return true;
- return false;
-}
-
#endif
diff --git a/arch/x86/include/asm/io_apic.h b/arch/x86/include/asm/io_apic.h
index 73d8c5398ea9..459e50a424d1 100644
--- a/arch/x86/include/asm/io_apic.h
+++ b/arch/x86/include/asm/io_apic.h
@@ -144,11 +144,24 @@ extern int timer_through_8259;
(mp_irq_entries && !skip_ioapic_setup && io_apic_irqs)
struct io_apic_irq_attr;
+struct irq_cfg;
extern int io_apic_set_pci_routing(struct device *dev, int irq,
struct io_apic_irq_attr *irq_attr);
void setup_IO_APIC_irq_extra(u32 gsi);
extern void ioapic_insert_resources(void);
+extern int native_setup_ioapic_entry(int, struct IO_APIC_route_entry *,
+ unsigned int, int,
+ struct io_apic_irq_attr *);
+extern int native_setup_ioapic_entry(int, struct IO_APIC_route_entry *,
+ unsigned int, int,
+ struct io_apic_irq_attr *);
+extern void eoi_ioapic_irq(unsigned int irq, struct irq_cfg *cfg);
+
+extern void native_compose_msi_msg(struct pci_dev *pdev,
+ unsigned int irq, unsigned int dest,
+ struct msi_msg *msg, u8 hpet_id);
+extern void native_eoi_ioapic_pin(int apic, int pin, int vector);
int io_apic_setup_irq_pin_once(unsigned int irq, int node, struct io_apic_irq_attr *attr);
extern int save_ioapic_entries(void);
@@ -179,6 +192,12 @@ extern void __init native_io_apic_init_mappings(void);
extern unsigned int native_io_apic_read(unsigned int apic, unsigned int reg);
extern void native_io_apic_write(unsigned int apic, unsigned int reg, unsigned int val);
extern void native_io_apic_modify(unsigned int apic, unsigned int reg, unsigned int val);
+extern void native_disable_io_apic(void);
+extern void native_io_apic_print_entries(unsigned int apic, unsigned int nr_entries);
+extern void intel_ir_io_apic_print_entries(unsigned int apic, unsigned int nr_entries);
+extern int native_ioapic_set_affinity(struct irq_data *,
+ const struct cpumask *,
+ bool);
static inline unsigned int io_apic_read(unsigned int apic, unsigned int reg)
{
@@ -193,6 +212,9 @@ static inline void io_apic_modify(unsigned int apic, unsigned int reg, unsigned
{
x86_io_apic_ops.modify(apic, reg, value);
}
+
+extern void io_apic_eoi(unsigned int apic, unsigned int vector);
+
#else /* !CONFIG_X86_IO_APIC */
#define io_apic_assign_pci_irqs 0
@@ -223,6 +245,12 @@ static inline void disable_ioapic_support(void) { }
#define native_io_apic_read NULL
#define native_io_apic_write NULL
#define native_io_apic_modify NULL
+#define native_disable_io_apic NULL
+#define native_io_apic_print_entries NULL
+#define native_ioapic_set_affinity NULL
+#define native_setup_ioapic_entry NULL
+#define native_compose_msi_msg NULL
+#define native_eoi_ioapic_pin NULL
#endif
#endif /* _ASM_X86_IO_APIC_H */
diff --git a/arch/x86/include/asm/irq_remapping.h b/arch/x86/include/asm/irq_remapping.h
index 5fb9bbbd2f14..95fd3527f632 100644
--- a/arch/x86/include/asm/irq_remapping.h
+++ b/arch/x86/include/asm/irq_remapping.h
@@ -26,8 +26,6 @@
#ifdef CONFIG_IRQ_REMAP
-extern int irq_remapping_enabled;
-
extern void setup_irq_remapping_ops(void);
extern int irq_remapping_supported(void);
extern int irq_remapping_prepare(void);
@@ -40,21 +38,19 @@ extern int setup_ioapic_remapped_entry(int irq,
unsigned int destination,
int vector,
struct io_apic_irq_attr *attr);
-extern int set_remapped_irq_affinity(struct irq_data *data,
- const struct cpumask *mask,
- bool force);
extern void free_remapped_irq(int irq);
extern void compose_remapped_msi_msg(struct pci_dev *pdev,
unsigned int irq, unsigned int dest,
struct msi_msg *msg, u8 hpet_id);
-extern int msi_alloc_remapped_irq(struct pci_dev *pdev, int irq, int nvec);
-extern int msi_setup_remapped_irq(struct pci_dev *pdev, unsigned int irq,
- int index, int sub_handle);
extern int setup_hpet_msi_remapped(unsigned int irq, unsigned int id);
+extern void panic_if_irq_remap(const char *msg);
+extern bool setup_remapped_irq(int irq,
+ struct irq_cfg *cfg,
+ struct irq_chip *chip);
-#else /* CONFIG_IRQ_REMAP */
+void irq_remap_modify_chip_defaults(struct irq_chip *chip);
-#define irq_remapping_enabled 0
+#else /* CONFIG_IRQ_REMAP */
static inline void setup_irq_remapping_ops(void) { }
static inline int irq_remapping_supported(void) { return 0; }
@@ -71,30 +67,30 @@ static inline int setup_ioapic_remapped_entry(int irq,
{
return -ENODEV;
}
-static inline int set_remapped_irq_affinity(struct irq_data *data,
- const struct cpumask *mask,
- bool force)
-{
- return 0;
-}
static inline void free_remapped_irq(int irq) { }
static inline void compose_remapped_msi_msg(struct pci_dev *pdev,
unsigned int irq, unsigned int dest,
struct msi_msg *msg, u8 hpet_id)
{
}
-static inline int msi_alloc_remapped_irq(struct pci_dev *pdev, int irq, int nvec)
+static inline int setup_hpet_msi_remapped(unsigned int irq, unsigned int id)
{
return -ENODEV;
}
-static inline int msi_setup_remapped_irq(struct pci_dev *pdev, unsigned int irq,
- int index, int sub_handle)
+
+static inline void panic_if_irq_remap(const char *msg)
+{
+}
+
+static inline void irq_remap_modify_chip_defaults(struct irq_chip *chip)
{
- return -ENODEV;
}
-static inline int setup_hpet_msi_remapped(unsigned int irq, unsigned int id)
+
+static inline bool setup_remapped_irq(int irq,
+ struct irq_cfg *cfg,
+ struct irq_chip *chip)
{
- return -ENODEV;
+ return false;
}
#endif /* CONFIG_IRQ_REMAP */
diff --git a/arch/x86/include/asm/irq_vectors.h b/arch/x86/include/asm/irq_vectors.h
index 1508e518c7e3..aac5fa62a86c 100644
--- a/arch/x86/include/asm/irq_vectors.h
+++ b/arch/x86/include/asm/irq_vectors.h
@@ -109,8 +109,8 @@
#define UV_BAU_MESSAGE 0xf5
-/* Xen vector callback to receive events in a HVM domain */
-#define XEN_HVM_EVTCHN_CALLBACK 0xf3
+/* Vector on which hypervisor callbacks will be delivered */
+#define HYPERVISOR_CALLBACK_VECTOR 0xf3
/*
* Local APIC timer IRQ vector is on a different priority level,
diff --git a/arch/x86/include/asm/kvm_para.h b/arch/x86/include/asm/kvm_para.h
index 5ed1f16187be..65231e173baf 100644
--- a/arch/x86/include/asm/kvm_para.h
+++ b/arch/x86/include/asm/kvm_para.h
@@ -85,13 +85,13 @@ static inline long kvm_hypercall4(unsigned int nr, unsigned long p1,
return ret;
}
-static inline int kvm_para_available(void)
+static inline bool kvm_para_available(void)
{
unsigned int eax, ebx, ecx, edx;
char signature[13];
if (boot_cpu_data.cpuid_level < 0)
- return 0; /* So we don't blow up on old processors */
+ return false; /* So we don't blow up on old processors */
if (cpu_has_hypervisor) {
cpuid(KVM_CPUID_SIGNATURE, &eax, &ebx, &ecx, &edx);
@@ -101,10 +101,10 @@ static inline int kvm_para_available(void)
signature[12] = 0;
if (strcmp(signature, "KVMKVMKVM") == 0)
- return 1;
+ return true;
}
- return 0;
+ return false;
}
static inline unsigned int kvm_arch_para_features(void)
diff --git a/arch/x86/include/asm/linkage.h b/arch/x86/include/asm/linkage.h
index 48142971b25d..79327e9483a3 100644
--- a/arch/x86/include/asm/linkage.h
+++ b/arch/x86/include/asm/linkage.h
@@ -27,20 +27,20 @@
#define __asmlinkage_protect0(ret) \
__asmlinkage_protect_n(ret)
#define __asmlinkage_protect1(ret, arg1) \
- __asmlinkage_protect_n(ret, "g" (arg1))
+ __asmlinkage_protect_n(ret, "m" (arg1))
#define __asmlinkage_protect2(ret, arg1, arg2) \
- __asmlinkage_protect_n(ret, "g" (arg1), "g" (arg2))
+ __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2))
#define __asmlinkage_protect3(ret, arg1, arg2, arg3) \
- __asmlinkage_protect_n(ret, "g" (arg1), "g" (arg2), "g" (arg3))
+ __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3))
#define __asmlinkage_protect4(ret, arg1, arg2, arg3, arg4) \
- __asmlinkage_protect_n(ret, "g" (arg1), "g" (arg2), "g" (arg3), \
- "g" (arg4))
+ __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3), \
+ "m" (arg4))
#define __asmlinkage_protect5(ret, arg1, arg2, arg3, arg4, arg5) \
- __asmlinkage_protect_n(ret, "g" (arg1), "g" (arg2), "g" (arg3), \
- "g" (arg4), "g" (arg5))
+ __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3), \
+ "m" (arg4), "m" (arg5))
#define __asmlinkage_protect6(ret, arg1, arg2, arg3, arg4, arg5, arg6) \
- __asmlinkage_protect_n(ret, "g" (arg1), "g" (arg2), "g" (arg3), \
- "g" (arg4), "g" (arg5), "g" (arg6))
+ __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3), \
+ "m" (arg4), "m" (arg5), "m" (arg6))
#endif /* CONFIG_X86_32 */
diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h
index ecdfee60ee4a..f4076af1f4ed 100644
--- a/arch/x86/include/asm/mce.h
+++ b/arch/x86/include/asm/mce.h
@@ -3,6 +3,90 @@
#include <uapi/asm/mce.h>
+/*
+ * Machine Check support for x86
+ */
+
+/* MCG_CAP register defines */
+#define MCG_BANKCNT_MASK 0xff /* Number of Banks */
+#define MCG_CTL_P (1ULL<<8) /* MCG_CTL register available */
+#define MCG_EXT_P (1ULL<<9) /* Extended registers available */
+#define MCG_CMCI_P (1ULL<<10) /* CMCI supported */
+#define MCG_EXT_CNT_MASK 0xff0000 /* Number of Extended registers */
+#define MCG_EXT_CNT_SHIFT 16
+#define MCG_EXT_CNT(c) (((c) & MCG_EXT_CNT_MASK) >> MCG_EXT_CNT_SHIFT)
+#define MCG_SER_P (1ULL<<24) /* MCA recovery/new status bits */
+
+/* MCG_STATUS register defines */
+#define MCG_STATUS_RIPV (1ULL<<0) /* restart ip valid */
+#define MCG_STATUS_EIPV (1ULL<<1) /* ip points to correct instruction */
+#define MCG_STATUS_MCIP (1ULL<<2) /* machine check in progress */
+
+/* MCi_STATUS register defines */
+#define MCI_STATUS_VAL (1ULL<<63) /* valid error */
+#define MCI_STATUS_OVER (1ULL<<62) /* previous errors lost */
+#define MCI_STATUS_UC (1ULL<<61) /* uncorrected error */
+#define MCI_STATUS_EN (1ULL<<60) /* error enabled */
+#define MCI_STATUS_MISCV (1ULL<<59) /* misc error reg. valid */
+#define MCI_STATUS_ADDRV (1ULL<<58) /* addr reg. valid */
+#define MCI_STATUS_PCC (1ULL<<57) /* processor context corrupt */
+#define MCI_STATUS_S (1ULL<<56) /* Signaled machine check */
+#define MCI_STATUS_AR (1ULL<<55) /* Action required */
+#define MCACOD 0xffff /* MCA Error Code */
+
+/* Architecturally defined codes from SDM Vol. 3B Chapter 15 */
+#define MCACOD_SCRUB 0x00C0 /* 0xC0-0xCF Memory Scrubbing */
+#define MCACOD_SCRUBMSK 0xfff0
+#define MCACOD_L3WB 0x017A /* L3 Explicit Writeback */
+#define MCACOD_DATA 0x0134 /* Data Load */
+#define MCACOD_INSTR 0x0150 /* Instruction Fetch */
+
+/* MCi_MISC register defines */
+#define MCI_MISC_ADDR_LSB(m) ((m) & 0x3f)
+#define MCI_MISC_ADDR_MODE(m) (((m) >> 6) & 7)
+#define MCI_MISC_ADDR_SEGOFF 0 /* segment offset */
+#define MCI_MISC_ADDR_LINEAR 1 /* linear address */
+#define MCI_MISC_ADDR_PHYS 2 /* physical address */
+#define MCI_MISC_ADDR_MEM 3 /* memory address */
+#define MCI_MISC_ADDR_GENERIC 7 /* generic */
+
+/* CTL2 register defines */
+#define MCI_CTL2_CMCI_EN (1ULL << 30)
+#define MCI_CTL2_CMCI_THRESHOLD_MASK 0x7fffULL
+
+#define MCJ_CTX_MASK 3
+#define MCJ_CTX(flags) ((flags) & MCJ_CTX_MASK)
+#define MCJ_CTX_RANDOM 0 /* inject context: random */
+#define MCJ_CTX_PROCESS 0x1 /* inject context: process */
+#define MCJ_CTX_IRQ 0x2 /* inject context: IRQ */
+#define MCJ_NMI_BROADCAST 0x4 /* do NMI broadcasting */
+#define MCJ_EXCEPTION 0x8 /* raise as exception */
+#define MCJ_IRQ_BRAODCAST 0x10 /* do IRQ broadcasting */
+
+#define MCE_OVERFLOW 0 /* bit 0 in flags means overflow */
+
+/* Software defined banks */
+#define MCE_EXTENDED_BANK 128
+#define MCE_THERMAL_BANK (MCE_EXTENDED_BANK + 0)
+#define K8_MCE_THRESHOLD_BASE (MCE_EXTENDED_BANK + 1)
+
+#define MCE_LOG_LEN 32
+#define MCE_LOG_SIGNATURE "MACHINECHECK"
+
+/*
+ * This structure contains all data related to the MCE log. Also
+ * carries a signature to make it easier to find from external
+ * debugging tools. Each entry is only valid when its finished flag
+ * is set.
+ */
+struct mce_log {
+ char signature[12]; /* "MACHINECHECK" */
+ unsigned len; /* = MCE_LOG_LEN */
+ unsigned next;
+ unsigned flags;
+ unsigned recordlen; /* length of struct mce */
+ struct mce entry[MCE_LOG_LEN];
+};
struct mca_config {
bool dont_log_ce;
diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h
index 79ce5685ab64..c2934be2446a 100644
--- a/arch/x86/include/asm/mshyperv.h
+++ b/arch/x86/include/asm/mshyperv.h
@@ -11,4 +11,8 @@ struct ms_hyperv_info {
extern struct ms_hyperv_info ms_hyperv;
+void hyperv_callback_vector(void);
+void hyperv_vector_handler(struct pt_regs *regs);
+void hv_register_vmbus_handler(int irq, irq_handler_t handler);
+
#endif
diff --git a/arch/x86/include/asm/mwait.h b/arch/x86/include/asm/mwait.h
index bcdff997668c..2f366d0ac6b4 100644
--- a/arch/x86/include/asm/mwait.h
+++ b/arch/x86/include/asm/mwait.h
@@ -4,7 +4,8 @@
#define MWAIT_SUBSTATE_MASK 0xf
#define MWAIT_CSTATE_MASK 0xf
#define MWAIT_SUBSTATE_SIZE 4
-#define MWAIT_MAX_NUM_CSTATES 8
+#define MWAIT_HINT2CSTATE(hint) (((hint) >> MWAIT_SUBSTATE_SIZE) & MWAIT_CSTATE_MASK)
+#define MWAIT_HINT2SUBSTATE(hint) ((hint) & MWAIT_CSTATE_MASK)
#define CPUID_MWAIT_LEAF 5
#define CPUID5_ECX_EXTENSIONS_SUPPORTED 0x1
diff --git a/arch/x86/include/asm/pci.h b/arch/x86/include/asm/pci.h
index dba7805176bf..c28fd02f4bf7 100644
--- a/arch/x86/include/asm/pci.h
+++ b/arch/x86/include/asm/pci.h
@@ -121,9 +121,12 @@ static inline void x86_restore_msi_irqs(struct pci_dev *dev, int irq)
#define arch_teardown_msi_irq x86_teardown_msi_irq
#define arch_restore_msi_irqs x86_restore_msi_irqs
/* implemented in arch/x86/kernel/apic/io_apic. */
+struct msi_desc;
int native_setup_msi_irqs(struct pci_dev *dev, int nvec, int type);
void native_teardown_msi_irq(unsigned int irq);
void native_restore_msi_irqs(struct pci_dev *dev, int irq);
+int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc,
+ unsigned int irq_base, unsigned int irq_offset);
/* default to the implementation in drivers/lib/msi.c */
#define HAVE_DEFAULT_MSI_TEARDOWN_IRQS
#define HAVE_DEFAULT_MSI_RESTORE_IRQS
diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
index 4fabcdf1cfa7..57cb63402213 100644
--- a/arch/x86/include/asm/perf_event.h
+++ b/arch/x86/include/asm/perf_event.h
@@ -29,8 +29,13 @@
#define ARCH_PERFMON_EVENTSEL_INV (1ULL << 23)
#define ARCH_PERFMON_EVENTSEL_CMASK 0xFF000000ULL
-#define AMD_PERFMON_EVENTSEL_GUESTONLY (1ULL << 40)
-#define AMD_PERFMON_EVENTSEL_HOSTONLY (1ULL << 41)
+#define AMD64_EVENTSEL_INT_CORE_ENABLE (1ULL << 36)
+#define AMD64_EVENTSEL_GUESTONLY (1ULL << 40)
+#define AMD64_EVENTSEL_HOSTONLY (1ULL << 41)
+
+#define AMD64_EVENTSEL_INT_CORE_SEL_SHIFT 37
+#define AMD64_EVENTSEL_INT_CORE_SEL_MASK \
+ (0xFULL << AMD64_EVENTSEL_INT_CORE_SEL_SHIFT)
#define AMD64_EVENTSEL_EVENT \
(ARCH_PERFMON_EVENTSEL_EVENT | (0x0FULL << 32))
@@ -46,8 +51,12 @@
#define AMD64_RAW_EVENT_MASK \
(X86_RAW_EVENT_MASK | \
AMD64_EVENTSEL_EVENT)
+#define AMD64_RAW_EVENT_MASK_NB \
+ (AMD64_EVENTSEL_EVENT | \
+ ARCH_PERFMON_EVENTSEL_UMASK)
#define AMD64_NUM_COUNTERS 4
#define AMD64_NUM_COUNTERS_CORE 6
+#define AMD64_NUM_COUNTERS_NB 4
#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL 0x3c
#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8)
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index 5199db2923d3..fc304279b559 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -142,6 +142,11 @@ static inline unsigned long pmd_pfn(pmd_t pmd)
return (pmd_val(pmd) & PTE_PFN_MASK) >> PAGE_SHIFT;
}
+static inline unsigned long pud_pfn(pud_t pud)
+{
+ return (pud_val(pud) & PTE_PFN_MASK) >> PAGE_SHIFT;
+}
+
#define pte_page(pte) pfn_to_page(pte_pfn(pte))
static inline int pmd_large(pmd_t pte)
@@ -781,6 +786,18 @@ static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
memcpy(dst, src, count * sizeof(pgd_t));
}
+/*
+ * The x86 doesn't have any external MMU info: the kernel page
+ * tables contain all the necessary information.
+ */
+static inline void update_mmu_cache(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep)
+{
+}
+static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
+ unsigned long addr, pmd_t *pmd)
+{
+}
#include <asm-generic/pgtable.h>
#endif /* __ASSEMBLY__ */
diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
index 8faa215a503e..9ee322103c6d 100644
--- a/arch/x86/include/asm/pgtable_32.h
+++ b/arch/x86/include/asm/pgtable_32.h
@@ -66,13 +66,6 @@ do { \
__flush_tlb_one((vaddr)); \
} while (0)
-/*
- * The i386 doesn't have any external MMU info: the kernel page
- * tables contain all the necessary information.
- */
-#define update_mmu_cache(vma, address, ptep) do { } while (0)
-#define update_mmu_cache_pmd(vma, address, pmd) do { } while (0)
-
#endif /* !__ASSEMBLY__ */
/*
diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
index 47356f9df82e..615b0c78449f 100644
--- a/arch/x86/include/asm/pgtable_64.h
+++ b/arch/x86/include/asm/pgtable_64.h
@@ -142,9 +142,6 @@ static inline int pgd_large(pgd_t pgd) { return 0; }
#define pte_offset_map(dir, address) pte_offset_kernel((dir), (address))
#define pte_unmap(pte) ((void)(pte))/* NOP */
-#define update_mmu_cache(vma, address, ptep) do { } while (0)
-#define update_mmu_cache_pmd(vma, address, pmd) do { } while (0)
-
/* Encode and de-code a swap entry */
#if _PAGE_BIT_FILE < _PAGE_BIT_PROTNONE
#define SWP_TYPE_BITS (_PAGE_BIT_FILE - _PAGE_BIT_PRESENT - 1)
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 888184b2fc85..d172588efae5 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -89,7 +89,6 @@ struct cpuinfo_x86 {
char wp_works_ok; /* It doesn't on 386's */
/* Problems on some 486Dx4's and old 386's: */
- char hlt_works_ok;
char hard_math;
char rfu;
char fdiv_bug;
@@ -165,15 +164,6 @@ DECLARE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info);
extern const struct seq_operations cpuinfo_op;
-static inline int hlt_works(int cpu)
-{
-#ifdef CONFIG_X86_32
- return cpu_data(cpu).hlt_works_ok;
-#else
- return 1;
-#endif
-}
-
#define cache_line_size() (boot_cpu_data.x86_cache_alignment)
extern void cpu_detect(struct cpuinfo_x86 *c);
@@ -725,7 +715,7 @@ extern unsigned long boot_option_idle_override;
extern bool amd_e400_c1e_detected;
enum idle_boot_override {IDLE_NO_OVERRIDE=0, IDLE_HALT, IDLE_NOMWAIT,
- IDLE_POLL, IDLE_FORCE_MWAIT};
+ IDLE_POLL};
extern void enable_sep_cpu(void);
extern int sysenter_setup(void);
@@ -943,7 +933,7 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
extern int get_tsc_mode(unsigned long adr);
extern int set_tsc_mode(unsigned int val);
-extern int amd_get_nb_id(int cpu);
+extern u16 amd_get_nb_id(int cpu);
struct aperfmperf {
u64 aperf, mperf;
@@ -998,7 +988,11 @@ extern unsigned long arch_align_stack(unsigned long sp);
extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
void default_idle(void);
-bool set_pm_idle_to_default(void);
+#ifdef CONFIG_XEN
+bool xen_set_default_idle(void);
+#else
+#define xen_set_default_idle 0
+#endif
void stop_this_cpu(void *dummy);
diff --git a/arch/x86/include/asm/required-features.h b/arch/x86/include/asm/required-features.h
index 6c7fc25f2c34..5c6e4fb370f5 100644
--- a/arch/x86/include/asm/required-features.h
+++ b/arch/x86/include/asm/required-features.h
@@ -47,6 +47,12 @@
# define NEED_NOPL 0
#endif
+#ifdef CONFIG_MATOM
+# define NEED_MOVBE (1<<(X86_FEATURE_MOVBE & 31))
+#else
+# define NEED_MOVBE 0
+#endif
+
#ifdef CONFIG_X86_64
#ifdef CONFIG_PARAVIRT
/* Paravirtualized systems may not have PSE or PGE available */
@@ -80,7 +86,7 @@
#define REQUIRED_MASK2 0
#define REQUIRED_MASK3 (NEED_NOPL)
-#define REQUIRED_MASK4 0
+#define REQUIRED_MASK4 (NEED_MOVBE)
#define REQUIRED_MASK5 0
#define REQUIRED_MASK6 0
#define REQUIRED_MASK7 0
diff --git a/arch/x86/include/asm/uv/uv_hub.h b/arch/x86/include/asm/uv/uv_hub.h
index 21f7385badb8..2c32df95bb78 100644
--- a/arch/x86/include/asm/uv/uv_hub.h
+++ b/arch/x86/include/asm/uv/uv_hub.h
@@ -5,7 +5,7 @@
*
* SGI UV architectural definitions
*
- * Copyright (C) 2007-2010 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 2007-2013 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef _ASM_X86_UV_UV_HUB_H
@@ -175,6 +175,7 @@ DECLARE_PER_CPU(struct uv_hub_info_s, __uv_hub_info);
*/
#define UV1_HUB_REVISION_BASE 1
#define UV2_HUB_REVISION_BASE 3
+#define UV3_HUB_REVISION_BASE 5
static inline int is_uv1_hub(void)
{
@@ -183,6 +184,23 @@ static inline int is_uv1_hub(void)
static inline int is_uv2_hub(void)
{
+ return ((uv_hub_info->hub_revision >= UV2_HUB_REVISION_BASE) &&
+ (uv_hub_info->hub_revision < UV3_HUB_REVISION_BASE));
+}
+
+static inline int is_uv3_hub(void)
+{
+ return uv_hub_info->hub_revision >= UV3_HUB_REVISION_BASE;
+}
+
+static inline int is_uv_hub(void)
+{
+ return uv_hub_info->hub_revision;
+}
+
+/* code common to uv2 and uv3 only */
+static inline int is_uvx_hub(void)
+{
return uv_hub_info->hub_revision >= UV2_HUB_REVISION_BASE;
}
@@ -230,14 +248,23 @@ union uvh_apicid {
#define UV2_LOCAL_MMR_SIZE (32UL * 1024 * 1024)
#define UV2_GLOBAL_MMR32_SIZE (32UL * 1024 * 1024)
-#define UV_LOCAL_MMR_BASE (is_uv1_hub() ? UV1_LOCAL_MMR_BASE \
- : UV2_LOCAL_MMR_BASE)
-#define UV_GLOBAL_MMR32_BASE (is_uv1_hub() ? UV1_GLOBAL_MMR32_BASE \
- : UV2_GLOBAL_MMR32_BASE)
-#define UV_LOCAL_MMR_SIZE (is_uv1_hub() ? UV1_LOCAL_MMR_SIZE : \
- UV2_LOCAL_MMR_SIZE)
+#define UV3_LOCAL_MMR_BASE 0xfa000000UL
+#define UV3_GLOBAL_MMR32_BASE 0xfc000000UL
+#define UV3_LOCAL_MMR_SIZE (32UL * 1024 * 1024)
+#define UV3_GLOBAL_MMR32_SIZE (32UL * 1024 * 1024)
+
+#define UV_LOCAL_MMR_BASE (is_uv1_hub() ? UV1_LOCAL_MMR_BASE : \
+ (is_uv2_hub() ? UV2_LOCAL_MMR_BASE : \
+ UV3_LOCAL_MMR_BASE))
+#define UV_GLOBAL_MMR32_BASE (is_uv1_hub() ? UV1_GLOBAL_MMR32_BASE :\
+ (is_uv2_hub() ? UV2_GLOBAL_MMR32_BASE :\
+ UV3_GLOBAL_MMR32_BASE))
+#define UV_LOCAL_MMR_SIZE (is_uv1_hub() ? UV1_LOCAL_MMR_SIZE : \
+ (is_uv2_hub() ? UV2_LOCAL_MMR_SIZE : \
+ UV3_LOCAL_MMR_SIZE))
#define UV_GLOBAL_MMR32_SIZE (is_uv1_hub() ? UV1_GLOBAL_MMR32_SIZE :\
- UV2_GLOBAL_MMR32_SIZE)
+ (is_uv2_hub() ? UV2_GLOBAL_MMR32_SIZE :\
+ UV3_GLOBAL_MMR32_SIZE))
#define UV_GLOBAL_MMR64_BASE (uv_hub_info->global_mmr_base)
#define UV_GLOBAL_GRU_MMR_BASE 0x4000000
@@ -599,6 +626,7 @@ static inline void uv_hub_send_ipi(int pnode, int apicid, int vector)
* 1 - UV1 rev 1.0 initial silicon
* 2 - UV1 rev 2.0 production silicon
* 3 - UV2 rev 1.0 initial silicon
+ * 5 - UV3 rev 1.0 initial silicon
*/
static inline int uv_get_min_hub_revision_id(void)
{
diff --git a/arch/x86/include/asm/uv/uv_mmrs.h b/arch/x86/include/asm/uv/uv_mmrs.h
index cf1d73643f60..bd5f80e58a23 100644
--- a/arch/x86/include/asm/uv/uv_mmrs.h
+++ b/arch/x86/include/asm/uv/uv_mmrs.h
@@ -5,16 +5,25 @@
*
* SGI UV MMR definitions
*
- * Copyright (C) 2007-2011 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 2007-2013 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef _ASM_X86_UV_UV_MMRS_H
#define _ASM_X86_UV_UV_MMRS_H
/*
- * This file contains MMR definitions for both UV1 & UV2 hubs.
+ * This file contains MMR definitions for all UV hubs types.
*
- * In general, MMR addresses and structures are identical on both hubs.
+ * To minimize coding differences between hub types, the symbols are
+ * grouped by architecture types.
+ *
+ * UVH - definitions common to all UV hub types.
+ * UVXH - definitions common to all UV eXtended hub types (currently 2 & 3).
+ * UV1H - definitions specific to UV type 1 hub.
+ * UV2H - definitions specific to UV type 2 hub.
+ * UV3H - definitions specific to UV type 3 hub.
+ *
+ * So in general, MMR addresses and structures are identical on all hubs types.
* These MMRs are identified as:
* #define UVH_xxx <address>
* union uvh_xxx {
@@ -23,24 +32,36 @@
* } s;
* };
*
- * If the MMR exists on both hub type but has different addresses or
- * contents, the MMR definition is similar to:
- * #define UV1H_xxx <uv1 address>
- * #define UV2H_xxx <uv2address>
- * #define UVH_xxx (is_uv1_hub() ? UV1H_xxx : UV2H_xxx)
+ * If the MMR exists on all hub types but have different addresses:
+ * #define UV1Hxxx a
+ * #define UV2Hxxx b
+ * #define UV3Hxxx c
+ * #define UVHxxx (is_uv1_hub() ? UV1Hxxx :
+ * (is_uv2_hub() ? UV2Hxxx :
+ * UV3Hxxx))
+ *
+ * If the MMR exists on all hub types > 1 but have different addresses:
+ * #define UV2Hxxx b
+ * #define UV3Hxxx c
+ * #define UVXHxxx (is_uv2_hub() ? UV2Hxxx :
+ * UV3Hxxx))
+ *
* union uvh_xxx {
* unsigned long v;
- * struct uv1h_int_cmpd_s { (Common fields only)
+ * struct uvh_xxx_s { # Common fields only
* } s;
- * struct uv1h_int_cmpd_s { (Full UV1 definition)
+ * struct uv1h_xxx_s { # Full UV1 definition (*)
* } s1;
- * struct uv2h_int_cmpd_s { (Full UV2 definition)
+ * struct uv2h_xxx_s { # Full UV2 definition (*)
* } s2;
+ * struct uv3h_xxx_s { # Full UV3 definition (*)
+ * } s3;
* };
+ * (* - if present and different than the common struct)
*
- * Only essential difference are enumerated. For example, if the address is
- * the same for both UV1 & UV2, only a single #define is generated. Likewise,
- * if the contents is the same for both hubs, only the "s" structure is
+ * Only essential differences are enumerated. For example, if the address is
+ * the same for all UV's, only a single #define is generated. Likewise,
+ * if the contents is the same for all hubs, only the "s" structure is
* generated.
*
* If the MMR exists on ONLY 1 type of hub, no generic definition is
@@ -51,6 +72,8 @@
* struct uvh_int_cmpd_s {
* } sn;
* };
+ *
+ * (GEN Flags: mflags_opt= undefs=0 UV23=UVXH)
*/
#define UV_MMR_ENABLE (1UL << 63)
@@ -58,15 +81,18 @@
#define UV1_HUB_PART_NUMBER 0x88a5
#define UV2_HUB_PART_NUMBER 0x8eb8
#define UV2_HUB_PART_NUMBER_X 0x1111
+#define UV3_HUB_PART_NUMBER 0x9578
+#define UV3_HUB_PART_NUMBER_X 0x4321
-/* Compat: if this #define is present, UV headers support UV2 */
+/* Compat: Indicate which UV Hubs are supported. */
#define UV2_HUB_IS_SUPPORTED 1
+#define UV3_HUB_IS_SUPPORTED 1
/* ========================================================================= */
/* UVH_BAU_DATA_BROADCAST */
/* ========================================================================= */
-#define UVH_BAU_DATA_BROADCAST 0x61688UL
-#define UVH_BAU_DATA_BROADCAST_32 0x440
+#define UVH_BAU_DATA_BROADCAST 0x61688UL
+#define UVH_BAU_DATA_BROADCAST_32 0x440
#define UVH_BAU_DATA_BROADCAST_ENABLE_SHFT 0
#define UVH_BAU_DATA_BROADCAST_ENABLE_MASK 0x0000000000000001UL
@@ -82,8 +108,8 @@ union uvh_bau_data_broadcast_u {
/* ========================================================================= */
/* UVH_BAU_DATA_CONFIG */
/* ========================================================================= */
-#define UVH_BAU_DATA_CONFIG 0x61680UL
-#define UVH_BAU_DATA_CONFIG_32 0x438
+#define UVH_BAU_DATA_CONFIG 0x61680UL
+#define UVH_BAU_DATA_CONFIG_32 0x438
#define UVH_BAU_DATA_CONFIG_VECTOR_SHFT 0
#define UVH_BAU_DATA_CONFIG_DM_SHFT 8
@@ -121,10 +147,14 @@ union uvh_bau_data_config_u {
/* ========================================================================= */
/* UVH_EVENT_OCCURRED0 */
/* ========================================================================= */
-#define UVH_EVENT_OCCURRED0 0x70000UL
-#define UVH_EVENT_OCCURRED0_32 0x5e8
+#define UVH_EVENT_OCCURRED0 0x70000UL
+#define UVH_EVENT_OCCURRED0_32 0x5e8
+
+#define UVH_EVENT_OCCURRED0_LB_HCERR_SHFT 0
+#define UVH_EVENT_OCCURRED0_RH_AOERR0_SHFT 11
+#define UVH_EVENT_OCCURRED0_LB_HCERR_MASK 0x0000000000000001UL
+#define UVH_EVENT_OCCURRED0_RH_AOERR0_MASK 0x0000000000000800UL
-#define UV1H_EVENT_OCCURRED0_LB_HCERR_SHFT 0
#define UV1H_EVENT_OCCURRED0_GR0_HCERR_SHFT 1
#define UV1H_EVENT_OCCURRED0_GR1_HCERR_SHFT 2
#define UV1H_EVENT_OCCURRED0_LH_HCERR_SHFT 3
@@ -135,7 +165,6 @@ union uvh_bau_data_config_u {
#define UV1H_EVENT_OCCURRED0_GR0_AOERR0_SHFT 8
#define UV1H_EVENT_OCCURRED0_GR1_AOERR0_SHFT 9
#define UV1H_EVENT_OCCURRED0_LH_AOERR0_SHFT 10
-#define UV1H_EVENT_OCCURRED0_RH_AOERR0_SHFT 11
#define UV1H_EVENT_OCCURRED0_XN_AOERR0_SHFT 12
#define UV1H_EVENT_OCCURRED0_SI_AOERR0_SHFT 13
#define UV1H_EVENT_OCCURRED0_LB_AOERR1_SHFT 14
@@ -181,7 +210,6 @@ union uvh_bau_data_config_u {
#define UV1H_EVENT_OCCURRED0_RTC3_SHFT 54
#define UV1H_EVENT_OCCURRED0_BAU_DATA_SHFT 55
#define UV1H_EVENT_OCCURRED0_POWER_MANAGEMENT_REQ_SHFT 56
-#define UV1H_EVENT_OCCURRED0_LB_HCERR_MASK 0x0000000000000001UL
#define UV1H_EVENT_OCCURRED0_GR0_HCERR_MASK 0x0000000000000002UL
#define UV1H_EVENT_OCCURRED0_GR1_HCERR_MASK 0x0000000000000004UL
#define UV1H_EVENT_OCCURRED0_LH_HCERR_MASK 0x0000000000000008UL
@@ -192,7 +220,6 @@ union uvh_bau_data_config_u {
#define UV1H_EVENT_OCCURRED0_GR0_AOERR0_MASK 0x0000000000000100UL
#define UV1H_EVENT_OCCURRED0_GR1_AOERR0_MASK 0x0000000000000200UL
#define UV1H_EVENT_OCCURRED0_LH_AOERR0_MASK 0x0000000000000400UL
-#define UV1H_EVENT_OCCURRED0_RH_AOERR0_MASK 0x0000000000000800UL
#define UV1H_EVENT_OCCURRED0_XN_AOERR0_MASK 0x0000000000001000UL
#define UV1H_EVENT_OCCURRED0_SI_AOERR0_MASK 0x0000000000002000UL
#define UV1H_EVENT_OCCURRED0_LB_AOERR1_MASK 0x0000000000004000UL
@@ -239,188 +266,130 @@ union uvh_bau_data_config_u {
#define UV1H_EVENT_OCCURRED0_BAU_DATA_MASK 0x0080000000000000UL
#define UV1H_EVENT_OCCURRED0_POWER_MANAGEMENT_REQ_MASK 0x0100000000000000UL
-#define UV2H_EVENT_OCCURRED0_LB_HCERR_SHFT 0
-#define UV2H_EVENT_OCCURRED0_QP_HCERR_SHFT 1
-#define UV2H_EVENT_OCCURRED0_RH_HCERR_SHFT 2
-#define UV2H_EVENT_OCCURRED0_LH0_HCERR_SHFT 3
-#define UV2H_EVENT_OCCURRED0_LH1_HCERR_SHFT 4
-#define UV2H_EVENT_OCCURRED0_GR0_HCERR_SHFT 5
-#define UV2H_EVENT_OCCURRED0_GR1_HCERR_SHFT 6
-#define UV2H_EVENT_OCCURRED0_NI0_HCERR_SHFT 7
-#define UV2H_EVENT_OCCURRED0_NI1_HCERR_SHFT 8
-#define UV2H_EVENT_OCCURRED0_LB_AOERR0_SHFT 9
-#define UV2H_EVENT_OCCURRED0_QP_AOERR0_SHFT 10
-#define UV2H_EVENT_OCCURRED0_RH_AOERR0_SHFT 11
-#define UV2H_EVENT_OCCURRED0_LH0_AOERR0_SHFT 12
-#define UV2H_EVENT_OCCURRED0_LH1_AOERR0_SHFT 13
-#define UV2H_EVENT_OCCURRED0_GR0_AOERR0_SHFT 14
-#define UV2H_EVENT_OCCURRED0_GR1_AOERR0_SHFT 15
-#define UV2H_EVENT_OCCURRED0_XB_AOERR0_SHFT 16
-#define UV2H_EVENT_OCCURRED0_RT_AOERR0_SHFT 17
-#define UV2H_EVENT_OCCURRED0_NI0_AOERR0_SHFT 18
-#define UV2H_EVENT_OCCURRED0_NI1_AOERR0_SHFT 19
-#define UV2H_EVENT_OCCURRED0_LB_AOERR1_SHFT 20
-#define UV2H_EVENT_OCCURRED0_QP_AOERR1_SHFT 21
-#define UV2H_EVENT_OCCURRED0_RH_AOERR1_SHFT 22
-#define UV2H_EVENT_OCCURRED0_LH0_AOERR1_SHFT 23
-#define UV2H_EVENT_OCCURRED0_LH1_AOERR1_SHFT 24
-#define UV2H_EVENT_OCCURRED0_GR0_AOERR1_SHFT 25
-#define UV2H_EVENT_OCCURRED0_GR1_AOERR1_SHFT 26
-#define UV2H_EVENT_OCCURRED0_XB_AOERR1_SHFT 27
-#define UV2H_EVENT_OCCURRED0_RT_AOERR1_SHFT 28
-#define UV2H_EVENT_OCCURRED0_NI0_AOERR1_SHFT 29
-#define UV2H_EVENT_OCCURRED0_NI1_AOERR1_SHFT 30
-#define UV2H_EVENT_OCCURRED0_SYSTEM_SHUTDOWN_INT_SHFT 31
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_0_SHFT 32
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_1_SHFT 33
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_2_SHFT 34
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_3_SHFT 35
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_4_SHFT 36
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_5_SHFT 37
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_6_SHFT 38
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_7_SHFT 39
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_8_SHFT 40
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_9_SHFT 41
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_10_SHFT 42
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_11_SHFT 43
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_12_SHFT 44
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_13_SHFT 45
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_14_SHFT 46
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_15_SHFT 47
-#define UV2H_EVENT_OCCURRED0_L1_NMI_INT_SHFT 48
-#define UV2H_EVENT_OCCURRED0_STOP_CLOCK_SHFT 49
-#define UV2H_EVENT_OCCURRED0_ASIC_TO_L1_SHFT 50
-#define UV2H_EVENT_OCCURRED0_L1_TO_ASIC_SHFT 51
-#define UV2H_EVENT_OCCURRED0_LA_SEQ_TRIGGER_SHFT 52
-#define UV2H_EVENT_OCCURRED0_IPI_INT_SHFT 53
-#define UV2H_EVENT_OCCURRED0_EXTIO_INT0_SHFT 54
-#define UV2H_EVENT_OCCURRED0_EXTIO_INT1_SHFT 55
-#define UV2H_EVENT_OCCURRED0_EXTIO_INT2_SHFT 56
-#define UV2H_EVENT_OCCURRED0_EXTIO_INT3_SHFT 57
-#define UV2H_EVENT_OCCURRED0_PROFILE_INT_SHFT 58
-#define UV2H_EVENT_OCCURRED0_LB_HCERR_MASK 0x0000000000000001UL
-#define UV2H_EVENT_OCCURRED0_QP_HCERR_MASK 0x0000000000000002UL
-#define UV2H_EVENT_OCCURRED0_RH_HCERR_MASK 0x0000000000000004UL
-#define UV2H_EVENT_OCCURRED0_LH0_HCERR_MASK 0x0000000000000008UL
-#define UV2H_EVENT_OCCURRED0_LH1_HCERR_MASK 0x0000000000000010UL
-#define UV2H_EVENT_OCCURRED0_GR0_HCERR_MASK 0x0000000000000020UL
-#define UV2H_EVENT_OCCURRED0_GR1_HCERR_MASK 0x0000000000000040UL
-#define UV2H_EVENT_OCCURRED0_NI0_HCERR_MASK 0x0000000000000080UL
-#define UV2H_EVENT_OCCURRED0_NI1_HCERR_MASK 0x0000000000000100UL
-#define UV2H_EVENT_OCCURRED0_LB_AOERR0_MASK 0x0000000000000200UL
-#define UV2H_EVENT_OCCURRED0_QP_AOERR0_MASK 0x0000000000000400UL
-#define UV2H_EVENT_OCCURRED0_RH_AOERR0_MASK 0x0000000000000800UL
-#define UV2H_EVENT_OCCURRED0_LH0_AOERR0_MASK 0x0000000000001000UL
-#define UV2H_EVENT_OCCURRED0_LH1_AOERR0_MASK 0x0000000000002000UL
-#define UV2H_EVENT_OCCURRED0_GR0_AOERR0_MASK 0x0000000000004000UL
-#define UV2H_EVENT_OCCURRED0_GR1_AOERR0_MASK 0x0000000000008000UL
-#define UV2H_EVENT_OCCURRED0_XB_AOERR0_MASK 0x0000000000010000UL
-#define UV2H_EVENT_OCCURRED0_RT_AOERR0_MASK 0x0000000000020000UL
-#define UV2H_EVENT_OCCURRED0_NI0_AOERR0_MASK 0x0000000000040000UL
-#define UV2H_EVENT_OCCURRED0_NI1_AOERR0_MASK 0x0000000000080000UL
-#define UV2H_EVENT_OCCURRED0_LB_AOERR1_MASK 0x0000000000100000UL
-#define UV2H_EVENT_OCCURRED0_QP_AOERR1_MASK 0x0000000000200000UL
-#define UV2H_EVENT_OCCURRED0_RH_AOERR1_MASK 0x0000000000400000UL
-#define UV2H_EVENT_OCCURRED0_LH0_AOERR1_MASK 0x0000000000800000UL
-#define UV2H_EVENT_OCCURRED0_LH1_AOERR1_MASK 0x0000000001000000UL
-#define UV2H_EVENT_OCCURRED0_GR0_AOERR1_MASK 0x0000000002000000UL
-#define UV2H_EVENT_OCCURRED0_GR1_AOERR1_MASK 0x0000000004000000UL
-#define UV2H_EVENT_OCCURRED0_XB_AOERR1_MASK 0x0000000008000000UL
-#define UV2H_EVENT_OCCURRED0_RT_AOERR1_MASK 0x0000000010000000UL
-#define UV2H_EVENT_OCCURRED0_NI0_AOERR1_MASK 0x0000000020000000UL
-#define UV2H_EVENT_OCCURRED0_NI1_AOERR1_MASK 0x0000000040000000UL
-#define UV2H_EVENT_OCCURRED0_SYSTEM_SHUTDOWN_INT_MASK 0x0000000080000000UL
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_0_MASK 0x0000000100000000UL
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_1_MASK 0x0000000200000000UL
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_2_MASK 0x0000000400000000UL
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_3_MASK 0x0000000800000000UL
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_4_MASK 0x0000001000000000UL
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_5_MASK 0x0000002000000000UL
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_6_MASK 0x0000004000000000UL
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_7_MASK 0x0000008000000000UL
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_8_MASK 0x0000010000000000UL
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_9_MASK 0x0000020000000000UL
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_10_MASK 0x0000040000000000UL
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_11_MASK 0x0000080000000000UL
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_12_MASK 0x0000100000000000UL
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_13_MASK 0x0000200000000000UL
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_14_MASK 0x0000400000000000UL
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_15_MASK 0x0000800000000000UL
-#define UV2H_EVENT_OCCURRED0_L1_NMI_INT_MASK 0x0001000000000000UL
-#define UV2H_EVENT_OCCURRED0_STOP_CLOCK_MASK 0x0002000000000000UL
-#define UV2H_EVENT_OCCURRED0_ASIC_TO_L1_MASK 0x0004000000000000UL
-#define UV2H_EVENT_OCCURRED0_L1_TO_ASIC_MASK 0x0008000000000000UL
-#define UV2H_EVENT_OCCURRED0_LA_SEQ_TRIGGER_MASK 0x0010000000000000UL
-#define UV2H_EVENT_OCCURRED0_IPI_INT_MASK 0x0020000000000000UL
-#define UV2H_EVENT_OCCURRED0_EXTIO_INT0_MASK 0x0040000000000000UL
-#define UV2H_EVENT_OCCURRED0_EXTIO_INT1_MASK 0x0080000000000000UL
-#define UV2H_EVENT_OCCURRED0_EXTIO_INT2_MASK 0x0100000000000000UL
-#define UV2H_EVENT_OCCURRED0_EXTIO_INT3_MASK 0x0200000000000000UL
-#define UV2H_EVENT_OCCURRED0_PROFILE_INT_MASK 0x0400000000000000UL
+#define UVXH_EVENT_OCCURRED0_QP_HCERR_SHFT 1
+#define UVXH_EVENT_OCCURRED0_RH_HCERR_SHFT 2
+#define UVXH_EVENT_OCCURRED0_LH0_HCERR_SHFT 3
+#define UVXH_EVENT_OCCURRED0_LH1_HCERR_SHFT 4
+#define UVXH_EVENT_OCCURRED0_GR0_HCERR_SHFT 5
+#define UVXH_EVENT_OCCURRED0_GR1_HCERR_SHFT 6
+#define UVXH_EVENT_OCCURRED0_NI0_HCERR_SHFT 7
+#define UVXH_EVENT_OCCURRED0_NI1_HCERR_SHFT 8
+#define UVXH_EVENT_OCCURRED0_LB_AOERR0_SHFT 9
+#define UVXH_EVENT_OCCURRED0_QP_AOERR0_SHFT 10
+#define UVXH_EVENT_OCCURRED0_LH0_AOERR0_SHFT 12
+#define UVXH_EVENT_OCCURRED0_LH1_AOERR0_SHFT 13
+#define UVXH_EVENT_OCCURRED0_GR0_AOERR0_SHFT 14
+#define UVXH_EVENT_OCCURRED0_GR1_AOERR0_SHFT 15
+#define UVXH_EVENT_OCCURRED0_XB_AOERR0_SHFT 16
+#define UVXH_EVENT_OCCURRED0_RT_AOERR0_SHFT 17
+#define UVXH_EVENT_OCCURRED0_NI0_AOERR0_SHFT 18
+#define UVXH_EVENT_OCCURRED0_NI1_AOERR0_SHFT 19
+#define UVXH_EVENT_OCCURRED0_LB_AOERR1_SHFT 20
+#define UVXH_EVENT_OCCURRED0_QP_AOERR1_SHFT 21
+#define UVXH_EVENT_OCCURRED0_RH_AOERR1_SHFT 22
+#define UVXH_EVENT_OCCURRED0_LH0_AOERR1_SHFT 23
+#define UVXH_EVENT_OCCURRED0_LH1_AOERR1_SHFT 24
+#define UVXH_EVENT_OCCURRED0_GR0_AOERR1_SHFT 25
+#define UVXH_EVENT_OCCURRED0_GR1_AOERR1_SHFT 26
+#define UVXH_EVENT_OCCURRED0_XB_AOERR1_SHFT 27
+#define UVXH_EVENT_OCCURRED0_RT_AOERR1_SHFT 28
+#define UVXH_EVENT_OCCURRED0_NI0_AOERR1_SHFT 29
+#define UVXH_EVENT_OCCURRED0_NI1_AOERR1_SHFT 30
+#define UVXH_EVENT_OCCURRED0_SYSTEM_SHUTDOWN_INT_SHFT 31
+#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_0_SHFT 32
+#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_1_SHFT 33
+#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_2_SHFT 34
+#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_3_SHFT 35
+#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_4_SHFT 36
+#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_5_SHFT 37
+#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_6_SHFT 38
+#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_7_SHFT 39
+#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_8_SHFT 40
+#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_9_SHFT 41
+#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_10_SHFT 42
+#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_11_SHFT 43
+#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_12_SHFT 44
+#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_13_SHFT 45
+#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_14_SHFT 46
+#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_15_SHFT 47
+#define UVXH_EVENT_OCCURRED0_L1_NMI_INT_SHFT 48
+#define UVXH_EVENT_OCCURRED0_STOP_CLOCK_SHFT 49
+#define UVXH_EVENT_OCCURRED0_ASIC_TO_L1_SHFT 50
+#define UVXH_EVENT_OCCURRED0_L1_TO_ASIC_SHFT 51
+#define UVXH_EVENT_OCCURRED0_LA_SEQ_TRIGGER_SHFT 52
+#define UVXH_EVENT_OCCURRED0_IPI_INT_SHFT 53
+#define UVXH_EVENT_OCCURRED0_EXTIO_INT0_SHFT 54
+#define UVXH_EVENT_OCCURRED0_EXTIO_INT1_SHFT 55
+#define UVXH_EVENT_OCCURRED0_EXTIO_INT2_SHFT 56
+#define UVXH_EVENT_OCCURRED0_EXTIO_INT3_SHFT 57
+#define UVXH_EVENT_OCCURRED0_PROFILE_INT_SHFT 58
+#define UVXH_EVENT_OCCURRED0_QP_HCERR_MASK 0x0000000000000002UL
+#define UVXH_EVENT_OCCURRED0_RH_HCERR_MASK 0x0000000000000004UL
+#define UVXH_EVENT_OCCURRED0_LH0_HCERR_MASK 0x0000000000000008UL
+#define UVXH_EVENT_OCCURRED0_LH1_HCERR_MASK 0x0000000000000010UL
+#define UVXH_EVENT_OCCURRED0_GR0_HCERR_MASK 0x0000000000000020UL
+#define UVXH_EVENT_OCCURRED0_GR1_HCERR_MASK 0x0000000000000040UL
+#define UVXH_EVENT_OCCURRED0_NI0_HCERR_MASK 0x0000000000000080UL
+#define UVXH_EVENT_OCCURRED0_NI1_HCERR_MASK 0x0000000000000100UL
+#define UVXH_EVENT_OCCURRED0_LB_AOERR0_MASK 0x0000000000000200UL
+#define UVXH_EVENT_OCCURRED0_QP_AOERR0_MASK 0x0000000000000400UL
+#define UVXH_EVENT_OCCURRED0_LH0_AOERR0_MASK 0x0000000000001000UL
+#define UVXH_EVENT_OCCURRED0_LH1_AOERR0_MASK 0x0000000000002000UL
+#define UVXH_EVENT_OCCURRED0_GR0_AOERR0_MASK 0x0000000000004000UL
+#define UVXH_EVENT_OCCURRED0_GR1_AOERR0_MASK 0x0000000000008000UL
+#define UVXH_EVENT_OCCURRED0_XB_AOERR0_MASK 0x0000000000010000UL
+#define UVXH_EVENT_OCCURRED0_RT_AOERR0_MASK 0x0000000000020000UL
+#define UVXH_EVENT_OCCURRED0_NI0_AOERR0_MASK 0x0000000000040000UL
+#define UVXH_EVENT_OCCURRED0_NI1_AOERR0_MASK 0x0000000000080000UL
+#define UVXH_EVENT_OCCURRED0_LB_AOERR1_MASK 0x0000000000100000UL
+#define UVXH_EVENT_OCCURRED0_QP_AOERR1_MASK 0x0000000000200000UL
+#define UVXH_EVENT_OCCURRED0_RH_AOERR1_MASK 0x0000000000400000UL
+#define UVXH_EVENT_OCCURRED0_LH0_AOERR1_MASK 0x0000000000800000UL
+#define UVXH_EVENT_OCCURRED0_LH1_AOERR1_MASK 0x0000000001000000UL
+#define UVXH_EVENT_OCCURRED0_GR0_AOERR1_MASK 0x0000000002000000UL
+#define UVXH_EVENT_OCCURRED0_GR1_AOERR1_MASK 0x0000000004000000UL
+#define UVXH_EVENT_OCCURRED0_XB_AOERR1_MASK 0x0000000008000000UL
+#define UVXH_EVENT_OCCURRED0_RT_AOERR1_MASK 0x0000000010000000UL
+#define UVXH_EVENT_OCCURRED0_NI0_AOERR1_MASK 0x0000000020000000UL
+#define UVXH_EVENT_OCCURRED0_NI1_AOERR1_MASK 0x0000000040000000UL
+#define UVXH_EVENT_OCCURRED0_SYSTEM_SHUTDOWN_INT_MASK 0x0000000080000000UL
+#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_0_MASK 0x0000000100000000UL
+#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_1_MASK 0x0000000200000000UL
+#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_2_MASK 0x0000000400000000UL
+#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_3_MASK 0x0000000800000000UL
+#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_4_MASK 0x0000001000000000UL
+#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_5_MASK 0x0000002000000000UL
+#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_6_MASK 0x0000004000000000UL
+#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_7_MASK 0x0000008000000000UL
+#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_8_MASK 0x0000010000000000UL
+#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_9_MASK 0x0000020000000000UL
+#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_10_MASK 0x0000040000000000UL
+#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_11_MASK 0x0000080000000000UL
+#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_12_MASK 0x0000100000000000UL
+#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_13_MASK 0x0000200000000000UL
+#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_14_MASK 0x0000400000000000UL
+#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_15_MASK 0x0000800000000000UL
+#define UVXH_EVENT_OCCURRED0_L1_NMI_INT_MASK 0x0001000000000000UL
+#define UVXH_EVENT_OCCURRED0_STOP_CLOCK_MASK 0x0002000000000000UL
+#define UVXH_EVENT_OCCURRED0_ASIC_TO_L1_MASK 0x0004000000000000UL
+#define UVXH_EVENT_OCCURRED0_L1_TO_ASIC_MASK 0x0008000000000000UL
+#define UVXH_EVENT_OCCURRED0_LA_SEQ_TRIGGER_MASK 0x0010000000000000UL
+#define UVXH_EVENT_OCCURRED0_IPI_INT_MASK 0x0020000000000000UL
+#define UVXH_EVENT_OCCURRED0_EXTIO_INT0_MASK 0x0040000000000000UL
+#define UVXH_EVENT_OCCURRED0_EXTIO_INT1_MASK 0x0080000000000000UL
+#define UVXH_EVENT_OCCURRED0_EXTIO_INT2_MASK 0x0100000000000000UL
+#define UVXH_EVENT_OCCURRED0_EXTIO_INT3_MASK 0x0200000000000000UL
+#define UVXH_EVENT_OCCURRED0_PROFILE_INT_MASK 0x0400000000000000UL
union uvh_event_occurred0_u {
unsigned long v;
- struct uv1h_event_occurred0_s {
+ struct uvh_event_occurred0_s {
unsigned long lb_hcerr:1; /* RW, W1C */
- unsigned long gr0_hcerr:1; /* RW, W1C */
- unsigned long gr1_hcerr:1; /* RW, W1C */
- unsigned long lh_hcerr:1; /* RW, W1C */
- unsigned long rh_hcerr:1; /* RW, W1C */
- unsigned long xn_hcerr:1; /* RW, W1C */
- unsigned long si_hcerr:1; /* RW, W1C */
- unsigned long lb_aoerr0:1; /* RW, W1C */
- unsigned long gr0_aoerr0:1; /* RW, W1C */
- unsigned long gr1_aoerr0:1; /* RW, W1C */
- unsigned long lh_aoerr0:1; /* RW, W1C */
+ unsigned long rsvd_1_10:10;
unsigned long rh_aoerr0:1; /* RW, W1C */
- unsigned long xn_aoerr0:1; /* RW, W1C */
- unsigned long si_aoerr0:1; /* RW, W1C */
- unsigned long lb_aoerr1:1; /* RW, W1C */
- unsigned long gr0_aoerr1:1; /* RW, W1C */
- unsigned long gr1_aoerr1:1; /* RW, W1C */
- unsigned long lh_aoerr1:1; /* RW, W1C */
- unsigned long rh_aoerr1:1; /* RW, W1C */
- unsigned long xn_aoerr1:1; /* RW, W1C */
- unsigned long si_aoerr1:1; /* RW, W1C */
- unsigned long rh_vpi_int:1; /* RW, W1C */
- unsigned long system_shutdown_int:1; /* RW, W1C */
- unsigned long lb_irq_int_0:1; /* RW, W1C */
- unsigned long lb_irq_int_1:1; /* RW, W1C */
- unsigned long lb_irq_int_2:1; /* RW, W1C */
- unsigned long lb_irq_int_3:1; /* RW, W1C */
- unsigned long lb_irq_int_4:1; /* RW, W1C */
- unsigned long lb_irq_int_5:1; /* RW, W1C */
- unsigned long lb_irq_int_6:1; /* RW, W1C */
- unsigned long lb_irq_int_7:1; /* RW, W1C */
- unsigned long lb_irq_int_8:1; /* RW, W1C */
- unsigned long lb_irq_int_9:1; /* RW, W1C */
- unsigned long lb_irq_int_10:1; /* RW, W1C */
- unsigned long lb_irq_int_11:1; /* RW, W1C */
- unsigned long lb_irq_int_12:1; /* RW, W1C */
- unsigned long lb_irq_int_13:1; /* RW, W1C */
- unsigned long lb_irq_int_14:1; /* RW, W1C */
- unsigned long lb_irq_int_15:1; /* RW, W1C */
- unsigned long l1_nmi_int:1; /* RW, W1C */
- unsigned long stop_clock:1; /* RW, W1C */
- unsigned long asic_to_l1:1; /* RW, W1C */
- unsigned long l1_to_asic:1; /* RW, W1C */
- unsigned long ltc_int:1; /* RW, W1C */
- unsigned long la_seq_trigger:1; /* RW, W1C */
- unsigned long ipi_int:1; /* RW, W1C */
- unsigned long extio_int0:1; /* RW, W1C */
- unsigned long extio_int1:1; /* RW, W1C */
- unsigned long extio_int2:1; /* RW, W1C */
- unsigned long extio_int3:1; /* RW, W1C */
- unsigned long profile_int:1; /* RW, W1C */
- unsigned long rtc0:1; /* RW, W1C */
- unsigned long rtc1:1; /* RW, W1C */
- unsigned long rtc2:1; /* RW, W1C */
- unsigned long rtc3:1; /* RW, W1C */
- unsigned long bau_data:1; /* RW, W1C */
- unsigned long power_management_req:1; /* RW, W1C */
- unsigned long rsvd_57_63:7;
- } s1;
- struct uv2h_event_occurred0_s {
+ unsigned long rsvd_12_63:52;
+ } s;
+ struct uvxh_event_occurred0_s {
unsigned long lb_hcerr:1; /* RW */
unsigned long qp_hcerr:1; /* RW */
unsigned long rh_hcerr:1; /* RW */
@@ -481,19 +450,20 @@ union uvh_event_occurred0_u {
unsigned long extio_int3:1; /* RW */
unsigned long profile_int:1; /* RW */
unsigned long rsvd_59_63:5;
- } s2;
+ } sx;
};
/* ========================================================================= */
/* UVH_EVENT_OCCURRED0_ALIAS */
/* ========================================================================= */
-#define UVH_EVENT_OCCURRED0_ALIAS 0x0000000000070008UL
-#define UVH_EVENT_OCCURRED0_ALIAS_32 0x5f0
+#define UVH_EVENT_OCCURRED0_ALIAS 0x70008UL
+#define UVH_EVENT_OCCURRED0_ALIAS_32 0x5f0
+
/* ========================================================================= */
/* UVH_GR0_TLB_INT0_CONFIG */
/* ========================================================================= */
-#define UVH_GR0_TLB_INT0_CONFIG 0x61b00UL
+#define UVH_GR0_TLB_INT0_CONFIG 0x61b00UL
#define UVH_GR0_TLB_INT0_CONFIG_VECTOR_SHFT 0
#define UVH_GR0_TLB_INT0_CONFIG_DM_SHFT 8
@@ -531,7 +501,7 @@ union uvh_gr0_tlb_int0_config_u {
/* ========================================================================= */
/* UVH_GR0_TLB_INT1_CONFIG */
/* ========================================================================= */
-#define UVH_GR0_TLB_INT1_CONFIG 0x61b40UL
+#define UVH_GR0_TLB_INT1_CONFIG 0x61b40UL
#define UVH_GR0_TLB_INT1_CONFIG_VECTOR_SHFT 0
#define UVH_GR0_TLB_INT1_CONFIG_DM_SHFT 8
@@ -571,9 +541,11 @@ union uvh_gr0_tlb_int1_config_u {
/* ========================================================================= */
#define UV1H_GR0_TLB_MMR_CONTROL 0x401080UL
#define UV2H_GR0_TLB_MMR_CONTROL 0xc01080UL
-#define UVH_GR0_TLB_MMR_CONTROL (is_uv1_hub() ? \
- UV1H_GR0_TLB_MMR_CONTROL : \
- UV2H_GR0_TLB_MMR_CONTROL)
+#define UV3H_GR0_TLB_MMR_CONTROL 0xc01080UL
+#define UVH_GR0_TLB_MMR_CONTROL \
+ (is_uv1_hub() ? UV1H_GR0_TLB_MMR_CONTROL : \
+ (is_uv2_hub() ? UV2H_GR0_TLB_MMR_CONTROL : \
+ UV3H_GR0_TLB_MMR_CONTROL))
#define UVH_GR0_TLB_MMR_CONTROL_INDEX_SHFT 0
#define UVH_GR0_TLB_MMR_CONTROL_MEM_SEL_SHFT 12
@@ -611,6 +583,21 @@ union uvh_gr0_tlb_int1_config_u {
#define UV1H_GR0_TLB_MMR_CONTROL_MMR_INJ_TLBRREG_MASK 0x0100000000000000UL
#define UV1H_GR0_TLB_MMR_CONTROL_MMR_INJ_TLBLRUV_MASK 0x1000000000000000UL
+#define UVXH_GR0_TLB_MMR_CONTROL_INDEX_SHFT 0
+#define UVXH_GR0_TLB_MMR_CONTROL_MEM_SEL_SHFT 12
+#define UVXH_GR0_TLB_MMR_CONTROL_AUTO_VALID_EN_SHFT 16
+#define UVXH_GR0_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_SHFT 20
+#define UVXH_GR0_TLB_MMR_CONTROL_MMR_WRITE_SHFT 30
+#define UVXH_GR0_TLB_MMR_CONTROL_MMR_READ_SHFT 31
+#define UVXH_GR0_TLB_MMR_CONTROL_MMR_OP_DONE_SHFT 32
+#define UVXH_GR0_TLB_MMR_CONTROL_INDEX_MASK 0x0000000000000fffUL
+#define UVXH_GR0_TLB_MMR_CONTROL_MEM_SEL_MASK 0x0000000000003000UL
+#define UVXH_GR0_TLB_MMR_CONTROL_AUTO_VALID_EN_MASK 0x0000000000010000UL
+#define UVXH_GR0_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_MASK 0x0000000000100000UL
+#define UVXH_GR0_TLB_MMR_CONTROL_MMR_WRITE_MASK 0x0000000040000000UL
+#define UVXH_GR0_TLB_MMR_CONTROL_MMR_READ_MASK 0x0000000080000000UL
+#define UVXH_GR0_TLB_MMR_CONTROL_MMR_OP_DONE_MASK 0x0000000100000000UL
+
#define UV2H_GR0_TLB_MMR_CONTROL_INDEX_SHFT 0
#define UV2H_GR0_TLB_MMR_CONTROL_MEM_SEL_SHFT 12
#define UV2H_GR0_TLB_MMR_CONTROL_AUTO_VALID_EN_SHFT 16
@@ -630,6 +617,23 @@ union uvh_gr0_tlb_int1_config_u {
#define UV2H_GR0_TLB_MMR_CONTROL_MMR_INJ_CON_MASK 0x0001000000000000UL
#define UV2H_GR0_TLB_MMR_CONTROL_MMR_INJ_TLBRAM_MASK 0x0010000000000000UL
+#define UV3H_GR0_TLB_MMR_CONTROL_INDEX_SHFT 0
+#define UV3H_GR0_TLB_MMR_CONTROL_MEM_SEL_SHFT 12
+#define UV3H_GR0_TLB_MMR_CONTROL_AUTO_VALID_EN_SHFT 16
+#define UV3H_GR0_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_SHFT 20
+#define UV3H_GR0_TLB_MMR_CONTROL_ECC_SEL_SHFT 21
+#define UV3H_GR0_TLB_MMR_CONTROL_MMR_WRITE_SHFT 30
+#define UV3H_GR0_TLB_MMR_CONTROL_MMR_READ_SHFT 31
+#define UV3H_GR0_TLB_MMR_CONTROL_MMR_OP_DONE_SHFT 32
+#define UV3H_GR0_TLB_MMR_CONTROL_INDEX_MASK 0x0000000000000fffUL
+#define UV3H_GR0_TLB_MMR_CONTROL_MEM_SEL_MASK 0x0000000000003000UL
+#define UV3H_GR0_TLB_MMR_CONTROL_AUTO_VALID_EN_MASK 0x0000000000010000UL
+#define UV3H_GR0_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_MASK 0x0000000000100000UL
+#define UV3H_GR0_TLB_MMR_CONTROL_ECC_SEL_MASK 0x0000000000200000UL
+#define UV3H_GR0_TLB_MMR_CONTROL_MMR_WRITE_MASK 0x0000000040000000UL
+#define UV3H_GR0_TLB_MMR_CONTROL_MMR_READ_MASK 0x0000000080000000UL
+#define UV3H_GR0_TLB_MMR_CONTROL_MMR_OP_DONE_MASK 0x0000000100000000UL
+
union uvh_gr0_tlb_mmr_control_u {
unsigned long v;
struct uvh_gr0_tlb_mmr_control_s {
@@ -642,7 +646,9 @@ union uvh_gr0_tlb_mmr_control_u {
unsigned long rsvd_21_29:9;
unsigned long mmr_write:1; /* WP */
unsigned long mmr_read:1; /* WP */
- unsigned long rsvd_32_63:32;
+ unsigned long rsvd_32_48:17;
+ unsigned long rsvd_49_51:3;
+ unsigned long rsvd_52_63:12;
} s;
struct uv1h_gr0_tlb_mmr_control_s {
unsigned long index:12; /* RW */
@@ -666,6 +672,23 @@ union uvh_gr0_tlb_mmr_control_u {
unsigned long mmr_inj_tlblruv:1; /* RW */
unsigned long rsvd_61_63:3;
} s1;
+ struct uvxh_gr0_tlb_mmr_control_s {
+ unsigned long index:12; /* RW */
+ unsigned long mem_sel:2; /* RW */
+ unsigned long rsvd_14_15:2;
+ unsigned long auto_valid_en:1; /* RW */
+ unsigned long rsvd_17_19:3;
+ unsigned long mmr_hash_index_en:1; /* RW */
+ unsigned long rsvd_21_29:9;
+ unsigned long mmr_write:1; /* WP */
+ unsigned long mmr_read:1; /* WP */
+ unsigned long mmr_op_done:1; /* RW */
+ unsigned long rsvd_33_47:15;
+ unsigned long rsvd_48:1;
+ unsigned long rsvd_49_51:3;
+ unsigned long rsvd_52:1;
+ unsigned long rsvd_53_63:11;
+ } sx;
struct uv2h_gr0_tlb_mmr_control_s {
unsigned long index:12; /* RW */
unsigned long mem_sel:2; /* RW */
@@ -683,6 +706,24 @@ union uvh_gr0_tlb_mmr_control_u {
unsigned long mmr_inj_tlbram:1; /* RW */
unsigned long rsvd_53_63:11;
} s2;
+ struct uv3h_gr0_tlb_mmr_control_s {
+ unsigned long index:12; /* RW */
+ unsigned long mem_sel:2; /* RW */
+ unsigned long rsvd_14_15:2;
+ unsigned long auto_valid_en:1; /* RW */
+ unsigned long rsvd_17_19:3;
+ unsigned long mmr_hash_index_en:1; /* RW */
+ unsigned long ecc_sel:1; /* RW */
+ unsigned long rsvd_22_29:8;
+ unsigned long mmr_write:1; /* WP */
+ unsigned long mmr_read:1; /* WP */
+ unsigned long mmr_op_done:1; /* RW */
+ unsigned long rsvd_33_47:15;
+ unsigned long undef_48:1; /* Undefined */
+ unsigned long rsvd_49_51:3;
+ unsigned long undef_52:1; /* Undefined */
+ unsigned long rsvd_53_63:11;
+ } s3;
};
/* ========================================================================= */
@@ -690,9 +731,11 @@ union uvh_gr0_tlb_mmr_control_u {
/* ========================================================================= */
#define UV1H_GR0_TLB_MMR_READ_DATA_HI 0x4010a0UL
#define UV2H_GR0_TLB_MMR_READ_DATA_HI 0xc010a0UL
-#define UVH_GR0_TLB_MMR_READ_DATA_HI (is_uv1_hub() ? \
- UV1H_GR0_TLB_MMR_READ_DATA_HI : \
- UV2H_GR0_TLB_MMR_READ_DATA_HI)
+#define UV3H_GR0_TLB_MMR_READ_DATA_HI 0xc010a0UL
+#define UVH_GR0_TLB_MMR_READ_DATA_HI \
+ (is_uv1_hub() ? UV1H_GR0_TLB_MMR_READ_DATA_HI : \
+ (is_uv2_hub() ? UV2H_GR0_TLB_MMR_READ_DATA_HI : \
+ UV3H_GR0_TLB_MMR_READ_DATA_HI))
#define UVH_GR0_TLB_MMR_READ_DATA_HI_PFN_SHFT 0
#define UVH_GR0_TLB_MMR_READ_DATA_HI_GAA_SHFT 41
@@ -703,6 +746,46 @@ union uvh_gr0_tlb_mmr_control_u {
#define UVH_GR0_TLB_MMR_READ_DATA_HI_DIRTY_MASK 0x0000080000000000UL
#define UVH_GR0_TLB_MMR_READ_DATA_HI_LARGER_MASK 0x0000100000000000UL
+#define UV1H_GR0_TLB_MMR_READ_DATA_HI_PFN_SHFT 0
+#define UV1H_GR0_TLB_MMR_READ_DATA_HI_GAA_SHFT 41
+#define UV1H_GR0_TLB_MMR_READ_DATA_HI_DIRTY_SHFT 43
+#define UV1H_GR0_TLB_MMR_READ_DATA_HI_LARGER_SHFT 44
+#define UV1H_GR0_TLB_MMR_READ_DATA_HI_PFN_MASK 0x000001ffffffffffUL
+#define UV1H_GR0_TLB_MMR_READ_DATA_HI_GAA_MASK 0x0000060000000000UL
+#define UV1H_GR0_TLB_MMR_READ_DATA_HI_DIRTY_MASK 0x0000080000000000UL
+#define UV1H_GR0_TLB_MMR_READ_DATA_HI_LARGER_MASK 0x0000100000000000UL
+
+#define UVXH_GR0_TLB_MMR_READ_DATA_HI_PFN_SHFT 0
+#define UVXH_GR0_TLB_MMR_READ_DATA_HI_GAA_SHFT 41
+#define UVXH_GR0_TLB_MMR_READ_DATA_HI_DIRTY_SHFT 43
+#define UVXH_GR0_TLB_MMR_READ_DATA_HI_LARGER_SHFT 44
+#define UVXH_GR0_TLB_MMR_READ_DATA_HI_PFN_MASK 0x000001ffffffffffUL
+#define UVXH_GR0_TLB_MMR_READ_DATA_HI_GAA_MASK 0x0000060000000000UL
+#define UVXH_GR0_TLB_MMR_READ_DATA_HI_DIRTY_MASK 0x0000080000000000UL
+#define UVXH_GR0_TLB_MMR_READ_DATA_HI_LARGER_MASK 0x0000100000000000UL
+
+#define UV2H_GR0_TLB_MMR_READ_DATA_HI_PFN_SHFT 0
+#define UV2H_GR0_TLB_MMR_READ_DATA_HI_GAA_SHFT 41
+#define UV2H_GR0_TLB_MMR_READ_DATA_HI_DIRTY_SHFT 43
+#define UV2H_GR0_TLB_MMR_READ_DATA_HI_LARGER_SHFT 44
+#define UV2H_GR0_TLB_MMR_READ_DATA_HI_PFN_MASK 0x000001ffffffffffUL
+#define UV2H_GR0_TLB_MMR_READ_DATA_HI_GAA_MASK 0x0000060000000000UL
+#define UV2H_GR0_TLB_MMR_READ_DATA_HI_DIRTY_MASK 0x0000080000000000UL
+#define UV2H_GR0_TLB_MMR_READ_DATA_HI_LARGER_MASK 0x0000100000000000UL
+
+#define UV3H_GR0_TLB_MMR_READ_DATA_HI_PFN_SHFT 0
+#define UV3H_GR0_TLB_MMR_READ_DATA_HI_GAA_SHFT 41
+#define UV3H_GR0_TLB_MMR_READ_DATA_HI_DIRTY_SHFT 43
+#define UV3H_GR0_TLB_MMR_READ_DATA_HI_LARGER_SHFT 44
+#define UV3H_GR0_TLB_MMR_READ_DATA_HI_AA_EXT_SHFT 45
+#define UV3H_GR0_TLB_MMR_READ_DATA_HI_WAY_ECC_SHFT 55
+#define UV3H_GR0_TLB_MMR_READ_DATA_HI_PFN_MASK 0x000001ffffffffffUL
+#define UV3H_GR0_TLB_MMR_READ_DATA_HI_GAA_MASK 0x0000060000000000UL
+#define UV3H_GR0_TLB_MMR_READ_DATA_HI_DIRTY_MASK 0x0000080000000000UL
+#define UV3H_GR0_TLB_MMR_READ_DATA_HI_LARGER_MASK 0x0000100000000000UL
+#define UV3H_GR0_TLB_MMR_READ_DATA_HI_AA_EXT_MASK 0x0000200000000000UL
+#define UV3H_GR0_TLB_MMR_READ_DATA_HI_WAY_ECC_MASK 0xff80000000000000UL
+
union uvh_gr0_tlb_mmr_read_data_hi_u {
unsigned long v;
struct uvh_gr0_tlb_mmr_read_data_hi_s {
@@ -712,6 +795,36 @@ union uvh_gr0_tlb_mmr_read_data_hi_u {
unsigned long larger:1; /* RO */
unsigned long rsvd_45_63:19;
} s;
+ struct uv1h_gr0_tlb_mmr_read_data_hi_s {
+ unsigned long pfn:41; /* RO */
+ unsigned long gaa:2; /* RO */
+ unsigned long dirty:1; /* RO */
+ unsigned long larger:1; /* RO */
+ unsigned long rsvd_45_63:19;
+ } s1;
+ struct uvxh_gr0_tlb_mmr_read_data_hi_s {
+ unsigned long pfn:41; /* RO */
+ unsigned long gaa:2; /* RO */
+ unsigned long dirty:1; /* RO */
+ unsigned long larger:1; /* RO */
+ unsigned long rsvd_45_63:19;
+ } sx;
+ struct uv2h_gr0_tlb_mmr_read_data_hi_s {
+ unsigned long pfn:41; /* RO */
+ unsigned long gaa:2; /* RO */
+ unsigned long dirty:1; /* RO */
+ unsigned long larger:1; /* RO */
+ unsigned long rsvd_45_63:19;
+ } s2;
+ struct uv3h_gr0_tlb_mmr_read_data_hi_s {
+ unsigned long pfn:41; /* RO */
+ unsigned long gaa:2; /* RO */
+ unsigned long dirty:1; /* RO */
+ unsigned long larger:1; /* RO */
+ unsigned long aa_ext:1; /* RO */
+ unsigned long undef_46_54:9; /* Undefined */
+ unsigned long way_ecc:9; /* RO */
+ } s3;
};
/* ========================================================================= */
@@ -719,9 +832,11 @@ union uvh_gr0_tlb_mmr_read_data_hi_u {
/* ========================================================================= */
#define UV1H_GR0_TLB_MMR_READ_DATA_LO 0x4010a8UL
#define UV2H_GR0_TLB_MMR_READ_DATA_LO 0xc010a8UL
-#define UVH_GR0_TLB_MMR_READ_DATA_LO (is_uv1_hub() ? \
- UV1H_GR0_TLB_MMR_READ_DATA_LO : \
- UV2H_GR0_TLB_MMR_READ_DATA_LO)
+#define UV3H_GR0_TLB_MMR_READ_DATA_LO 0xc010a8UL
+#define UVH_GR0_TLB_MMR_READ_DATA_LO \
+ (is_uv1_hub() ? UV1H_GR0_TLB_MMR_READ_DATA_LO : \
+ (is_uv2_hub() ? UV2H_GR0_TLB_MMR_READ_DATA_LO : \
+ UV3H_GR0_TLB_MMR_READ_DATA_LO))
#define UVH_GR0_TLB_MMR_READ_DATA_LO_VPN_SHFT 0
#define UVH_GR0_TLB_MMR_READ_DATA_LO_ASID_SHFT 39
@@ -730,6 +845,34 @@ union uvh_gr0_tlb_mmr_read_data_hi_u {
#define UVH_GR0_TLB_MMR_READ_DATA_LO_ASID_MASK 0x7fffff8000000000UL
#define UVH_GR0_TLB_MMR_READ_DATA_LO_VALID_MASK 0x8000000000000000UL
+#define UV1H_GR0_TLB_MMR_READ_DATA_LO_VPN_SHFT 0
+#define UV1H_GR0_TLB_MMR_READ_DATA_LO_ASID_SHFT 39
+#define UV1H_GR0_TLB_MMR_READ_DATA_LO_VALID_SHFT 63
+#define UV1H_GR0_TLB_MMR_READ_DATA_LO_VPN_MASK 0x0000007fffffffffUL
+#define UV1H_GR0_TLB_MMR_READ_DATA_LO_ASID_MASK 0x7fffff8000000000UL
+#define UV1H_GR0_TLB_MMR_READ_DATA_LO_VALID_MASK 0x8000000000000000UL
+
+#define UVXH_GR0_TLB_MMR_READ_DATA_LO_VPN_SHFT 0
+#define UVXH_GR0_TLB_MMR_READ_DATA_LO_ASID_SHFT 39
+#define UVXH_GR0_TLB_MMR_READ_DATA_LO_VALID_SHFT 63
+#define UVXH_GR0_TLB_MMR_READ_DATA_LO_VPN_MASK 0x0000007fffffffffUL
+#define UVXH_GR0_TLB_MMR_READ_DATA_LO_ASID_MASK 0x7fffff8000000000UL
+#define UVXH_GR0_TLB_MMR_READ_DATA_LO_VALID_MASK 0x8000000000000000UL
+
+#define UV2H_GR0_TLB_MMR_READ_DATA_LO_VPN_SHFT 0
+#define UV2H_GR0_TLB_MMR_READ_DATA_LO_ASID_SHFT 39
+#define UV2H_GR0_TLB_MMR_READ_DATA_LO_VALID_SHFT 63
+#define UV2H_GR0_TLB_MMR_READ_DATA_LO_VPN_MASK 0x0000007fffffffffUL
+#define UV2H_GR0_TLB_MMR_READ_DATA_LO_ASID_MASK 0x7fffff8000000000UL
+#define UV2H_GR0_TLB_MMR_READ_DATA_LO_VALID_MASK 0x8000000000000000UL
+
+#define UV3H_GR0_TLB_MMR_READ_DATA_LO_VPN_SHFT 0
+#define UV3H_GR0_TLB_MMR_READ_DATA_LO_ASID_SHFT 39
+#define UV3H_GR0_TLB_MMR_READ_DATA_LO_VALID_SHFT 63
+#define UV3H_GR0_TLB_MMR_READ_DATA_LO_VPN_MASK 0x0000007fffffffffUL
+#define UV3H_GR0_TLB_MMR_READ_DATA_LO_ASID_MASK 0x7fffff8000000000UL
+#define UV3H_GR0_TLB_MMR_READ_DATA_LO_VALID_MASK 0x8000000000000000UL
+
union uvh_gr0_tlb_mmr_read_data_lo_u {
unsigned long v;
struct uvh_gr0_tlb_mmr_read_data_lo_s {
@@ -737,12 +880,32 @@ union uvh_gr0_tlb_mmr_read_data_lo_u {
unsigned long asid:24; /* RO */
unsigned long valid:1; /* RO */
} s;
+ struct uv1h_gr0_tlb_mmr_read_data_lo_s {
+ unsigned long vpn:39; /* RO */
+ unsigned long asid:24; /* RO */
+ unsigned long valid:1; /* RO */
+ } s1;
+ struct uvxh_gr0_tlb_mmr_read_data_lo_s {
+ unsigned long vpn:39; /* RO */
+ unsigned long asid:24; /* RO */
+ unsigned long valid:1; /* RO */
+ } sx;
+ struct uv2h_gr0_tlb_mmr_read_data_lo_s {
+ unsigned long vpn:39; /* RO */
+ unsigned long asid:24; /* RO */
+ unsigned long valid:1; /* RO */
+ } s2;
+ struct uv3h_gr0_tlb_mmr_read_data_lo_s {
+ unsigned long vpn:39; /* RO */
+ unsigned long asid:24; /* RO */
+ unsigned long valid:1; /* RO */
+ } s3;
};
/* ========================================================================= */
/* UVH_GR1_TLB_INT0_CONFIG */
/* ========================================================================= */
-#define UVH_GR1_TLB_INT0_CONFIG 0x61f00UL
+#define UVH_GR1_TLB_INT0_CONFIG 0x61f00UL
#define UVH_GR1_TLB_INT0_CONFIG_VECTOR_SHFT 0
#define UVH_GR1_TLB_INT0_CONFIG_DM_SHFT 8
@@ -780,7 +943,7 @@ union uvh_gr1_tlb_int0_config_u {
/* ========================================================================= */
/* UVH_GR1_TLB_INT1_CONFIG */
/* ========================================================================= */
-#define UVH_GR1_TLB_INT1_CONFIG 0x61f40UL
+#define UVH_GR1_TLB_INT1_CONFIG 0x61f40UL
#define UVH_GR1_TLB_INT1_CONFIG_VECTOR_SHFT 0
#define UVH_GR1_TLB_INT1_CONFIG_DM_SHFT 8
@@ -820,9 +983,11 @@ union uvh_gr1_tlb_int1_config_u {
/* ========================================================================= */
#define UV1H_GR1_TLB_MMR_CONTROL 0x801080UL
#define UV2H_GR1_TLB_MMR_CONTROL 0x1001080UL
-#define UVH_GR1_TLB_MMR_CONTROL (is_uv1_hub() ? \
- UV1H_GR1_TLB_MMR_CONTROL : \
- UV2H_GR1_TLB_MMR_CONTROL)
+#define UV3H_GR1_TLB_MMR_CONTROL 0x1001080UL
+#define UVH_GR1_TLB_MMR_CONTROL \
+ (is_uv1_hub() ? UV1H_GR1_TLB_MMR_CONTROL : \
+ (is_uv2_hub() ? UV2H_GR1_TLB_MMR_CONTROL : \
+ UV3H_GR1_TLB_MMR_CONTROL))
#define UVH_GR1_TLB_MMR_CONTROL_INDEX_SHFT 0
#define UVH_GR1_TLB_MMR_CONTROL_MEM_SEL_SHFT 12
@@ -860,6 +1025,21 @@ union uvh_gr1_tlb_int1_config_u {
#define UV1H_GR1_TLB_MMR_CONTROL_MMR_INJ_TLBRREG_MASK 0x0100000000000000UL
#define UV1H_GR1_TLB_MMR_CONTROL_MMR_INJ_TLBLRUV_MASK 0x1000000000000000UL
+#define UVXH_GR1_TLB_MMR_CONTROL_INDEX_SHFT 0
+#define UVXH_GR1_TLB_MMR_CONTROL_MEM_SEL_SHFT 12
+#define UVXH_GR1_TLB_MMR_CONTROL_AUTO_VALID_EN_SHFT 16
+#define UVXH_GR1_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_SHFT 20
+#define UVXH_GR1_TLB_MMR_CONTROL_MMR_WRITE_SHFT 30
+#define UVXH_GR1_TLB_MMR_CONTROL_MMR_READ_SHFT 31
+#define UVXH_GR1_TLB_MMR_CONTROL_MMR_OP_DONE_SHFT 32
+#define UVXH_GR1_TLB_MMR_CONTROL_INDEX_MASK 0x0000000000000fffUL
+#define UVXH_GR1_TLB_MMR_CONTROL_MEM_SEL_MASK 0x0000000000003000UL
+#define UVXH_GR1_TLB_MMR_CONTROL_AUTO_VALID_EN_MASK 0x0000000000010000UL
+#define UVXH_GR1_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_MASK 0x0000000000100000UL
+#define UVXH_GR1_TLB_MMR_CONTROL_MMR_WRITE_MASK 0x0000000040000000UL
+#define UVXH_GR1_TLB_MMR_CONTROL_MMR_READ_MASK 0x0000000080000000UL
+#define UVXH_GR1_TLB_MMR_CONTROL_MMR_OP_DONE_MASK 0x0000000100000000UL
+
#define UV2H_GR1_TLB_MMR_CONTROL_INDEX_SHFT 0
#define UV2H_GR1_TLB_MMR_CONTROL_MEM_SEL_SHFT 12
#define UV2H_GR1_TLB_MMR_CONTROL_AUTO_VALID_EN_SHFT 16
@@ -879,6 +1059,23 @@ union uvh_gr1_tlb_int1_config_u {
#define UV2H_GR1_TLB_MMR_CONTROL_MMR_INJ_CON_MASK 0x0001000000000000UL
#define UV2H_GR1_TLB_MMR_CONTROL_MMR_INJ_TLBRAM_MASK 0x0010000000000000UL
+#define UV3H_GR1_TLB_MMR_CONTROL_INDEX_SHFT 0
+#define UV3H_GR1_TLB_MMR_CONTROL_MEM_SEL_SHFT 12
+#define UV3H_GR1_TLB_MMR_CONTROL_AUTO_VALID_EN_SHFT 16
+#define UV3H_GR1_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_SHFT 20
+#define UV3H_GR1_TLB_MMR_CONTROL_ECC_SEL_SHFT 21
+#define UV3H_GR1_TLB_MMR_CONTROL_MMR_WRITE_SHFT 30
+#define UV3H_GR1_TLB_MMR_CONTROL_MMR_READ_SHFT 31
+#define UV3H_GR1_TLB_MMR_CONTROL_MMR_OP_DONE_SHFT 32
+#define UV3H_GR1_TLB_MMR_CONTROL_INDEX_MASK 0x0000000000000fffUL
+#define UV3H_GR1_TLB_MMR_CONTROL_MEM_SEL_MASK 0x0000000000003000UL
+#define UV3H_GR1_TLB_MMR_CONTROL_AUTO_VALID_EN_MASK 0x0000000000010000UL
+#define UV3H_GR1_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_MASK 0x0000000000100000UL
+#define UV3H_GR1_TLB_MMR_CONTROL_ECC_SEL_MASK 0x0000000000200000UL
+#define UV3H_GR1_TLB_MMR_CONTROL_MMR_WRITE_MASK 0x0000000040000000UL
+#define UV3H_GR1_TLB_MMR_CONTROL_MMR_READ_MASK 0x0000000080000000UL
+#define UV3H_GR1_TLB_MMR_CONTROL_MMR_OP_DONE_MASK 0x0000000100000000UL
+
union uvh_gr1_tlb_mmr_control_u {
unsigned long v;
struct uvh_gr1_tlb_mmr_control_s {
@@ -891,7 +1088,9 @@ union uvh_gr1_tlb_mmr_control_u {
unsigned long rsvd_21_29:9;
unsigned long mmr_write:1; /* WP */
unsigned long mmr_read:1; /* WP */
- unsigned long rsvd_32_63:32;
+ unsigned long rsvd_32_48:17;
+ unsigned long rsvd_49_51:3;
+ unsigned long rsvd_52_63:12;
} s;
struct uv1h_gr1_tlb_mmr_control_s {
unsigned long index:12; /* RW */
@@ -915,6 +1114,23 @@ union uvh_gr1_tlb_mmr_control_u {
unsigned long mmr_inj_tlblruv:1; /* RW */
unsigned long rsvd_61_63:3;
} s1;
+ struct uvxh_gr1_tlb_mmr_control_s {
+ unsigned long index:12; /* RW */
+ unsigned long mem_sel:2; /* RW */
+ unsigned long rsvd_14_15:2;
+ unsigned long auto_valid_en:1; /* RW */
+ unsigned long rsvd_17_19:3;
+ unsigned long mmr_hash_index_en:1; /* RW */
+ unsigned long rsvd_21_29:9;
+ unsigned long mmr_write:1; /* WP */
+ unsigned long mmr_read:1; /* WP */
+ unsigned long mmr_op_done:1; /* RW */
+ unsigned long rsvd_33_47:15;
+ unsigned long rsvd_48:1;
+ unsigned long rsvd_49_51:3;
+ unsigned long rsvd_52:1;
+ unsigned long rsvd_53_63:11;
+ } sx;
struct uv2h_gr1_tlb_mmr_control_s {
unsigned long index:12; /* RW */
unsigned long mem_sel:2; /* RW */
@@ -932,6 +1148,24 @@ union uvh_gr1_tlb_mmr_control_u {
unsigned long mmr_inj_tlbram:1; /* RW */
unsigned long rsvd_53_63:11;
} s2;
+ struct uv3h_gr1_tlb_mmr_control_s {
+ unsigned long index:12; /* RW */
+ unsigned long mem_sel:2; /* RW */
+ unsigned long rsvd_14_15:2;
+ unsigned long auto_valid_en:1; /* RW */
+ unsigned long rsvd_17_19:3;
+ unsigned long mmr_hash_index_en:1; /* RW */
+ unsigned long ecc_sel:1; /* RW */
+ unsigned long rsvd_22_29:8;
+ unsigned long mmr_write:1; /* WP */
+ unsigned long mmr_read:1; /* WP */
+ unsigned long mmr_op_done:1; /* RW */
+ unsigned long rsvd_33_47:15;
+ unsigned long undef_48:1; /* Undefined */
+ unsigned long rsvd_49_51:3;
+ unsigned long undef_52:1; /* Undefined */
+ unsigned long rsvd_53_63:11;
+ } s3;
};
/* ========================================================================= */
@@ -939,9 +1173,11 @@ union uvh_gr1_tlb_mmr_control_u {
/* ========================================================================= */
#define UV1H_GR1_TLB_MMR_READ_DATA_HI 0x8010a0UL
#define UV2H_GR1_TLB_MMR_READ_DATA_HI 0x10010a0UL
-#define UVH_GR1_TLB_MMR_READ_DATA_HI (is_uv1_hub() ? \
- UV1H_GR1_TLB_MMR_READ_DATA_HI : \
- UV2H_GR1_TLB_MMR_READ_DATA_HI)
+#define UV3H_GR1_TLB_MMR_READ_DATA_HI 0x10010a0UL
+#define UVH_GR1_TLB_MMR_READ_DATA_HI \
+ (is_uv1_hub() ? UV1H_GR1_TLB_MMR_READ_DATA_HI : \
+ (is_uv2_hub() ? UV2H_GR1_TLB_MMR_READ_DATA_HI : \
+ UV3H_GR1_TLB_MMR_READ_DATA_HI))
#define UVH_GR1_TLB_MMR_READ_DATA_HI_PFN_SHFT 0
#define UVH_GR1_TLB_MMR_READ_DATA_HI_GAA_SHFT 41
@@ -952,6 +1188,46 @@ union uvh_gr1_tlb_mmr_control_u {
#define UVH_GR1_TLB_MMR_READ_DATA_HI_DIRTY_MASK 0x0000080000000000UL
#define UVH_GR1_TLB_MMR_READ_DATA_HI_LARGER_MASK 0x0000100000000000UL
+#define UV1H_GR1_TLB_MMR_READ_DATA_HI_PFN_SHFT 0
+#define UV1H_GR1_TLB_MMR_READ_DATA_HI_GAA_SHFT 41
+#define UV1H_GR1_TLB_MMR_READ_DATA_HI_DIRTY_SHFT 43
+#define UV1H_GR1_TLB_MMR_READ_DATA_HI_LARGER_SHFT 44
+#define UV1H_GR1_TLB_MMR_READ_DATA_HI_PFN_MASK 0x000001ffffffffffUL
+#define UV1H_GR1_TLB_MMR_READ_DATA_HI_GAA_MASK 0x0000060000000000UL
+#define UV1H_GR1_TLB_MMR_READ_DATA_HI_DIRTY_MASK 0x0000080000000000UL
+#define UV1H_GR1_TLB_MMR_READ_DATA_HI_LARGER_MASK 0x0000100000000000UL
+
+#define UVXH_GR1_TLB_MMR_READ_DATA_HI_PFN_SHFT 0
+#define UVXH_GR1_TLB_MMR_READ_DATA_HI_GAA_SHFT 41
+#define UVXH_GR1_TLB_MMR_READ_DATA_HI_DIRTY_SHFT 43
+#define UVXH_GR1_TLB_MMR_READ_DATA_HI_LARGER_SHFT 44
+#define UVXH_GR1_TLB_MMR_READ_DATA_HI_PFN_MASK 0x000001ffffffffffUL
+#define UVXH_GR1_TLB_MMR_READ_DATA_HI_GAA_MASK 0x0000060000000000UL
+#define UVXH_GR1_TLB_MMR_READ_DATA_HI_DIRTY_MASK 0x0000080000000000UL
+#define UVXH_GR1_TLB_MMR_READ_DATA_HI_LARGER_MASK 0x0000100000000000UL
+
+#define UV2H_GR1_TLB_MMR_READ_DATA_HI_PFN_SHFT 0
+#define UV2H_GR1_TLB_MMR_READ_DATA_HI_GAA_SHFT 41
+#define UV2H_GR1_TLB_MMR_READ_DATA_HI_DIRTY_SHFT 43
+#define UV2H_GR1_TLB_MMR_READ_DATA_HI_LARGER_SHFT 44
+#define UV2H_GR1_TLB_MMR_READ_DATA_HI_PFN_MASK 0x000001ffffffffffUL
+#define UV2H_GR1_TLB_MMR_READ_DATA_HI_GAA_MASK 0x0000060000000000UL
+#define UV2H_GR1_TLB_MMR_READ_DATA_HI_DIRTY_MASK 0x0000080000000000UL
+#define UV2H_GR1_TLB_MMR_READ_DATA_HI_LARGER_MASK 0x0000100000000000UL
+
+#define UV3H_GR1_TLB_MMR_READ_DATA_HI_PFN_SHFT 0
+#define UV3H_GR1_TLB_MMR_READ_DATA_HI_GAA_SHFT 41
+#define UV3H_GR1_TLB_MMR_READ_DATA_HI_DIRTY_SHFT 43
+#define UV3H_GR1_TLB_MMR_READ_DATA_HI_LARGER_SHFT 44
+#define UV3H_GR1_TLB_MMR_READ_DATA_HI_AA_EXT_SHFT 45
+#define UV3H_GR1_TLB_MMR_READ_DATA_HI_WAY_ECC_SHFT 55
+#define UV3H_GR1_TLB_MMR_READ_DATA_HI_PFN_MASK 0x000001ffffffffffUL
+#define UV3H_GR1_TLB_MMR_READ_DATA_HI_GAA_MASK 0x0000060000000000UL
+#define UV3H_GR1_TLB_MMR_READ_DATA_HI_DIRTY_MASK 0x0000080000000000UL
+#define UV3H_GR1_TLB_MMR_READ_DATA_HI_LARGER_MASK 0x0000100000000000UL
+#define UV3H_GR1_TLB_MMR_READ_DATA_HI_AA_EXT_MASK 0x0000200000000000UL
+#define UV3H_GR1_TLB_MMR_READ_DATA_HI_WAY_ECC_MASK 0xff80000000000000UL
+
union uvh_gr1_tlb_mmr_read_data_hi_u {
unsigned long v;
struct uvh_gr1_tlb_mmr_read_data_hi_s {
@@ -961,6 +1237,36 @@ union uvh_gr1_tlb_mmr_read_data_hi_u {
unsigned long larger:1; /* RO */
unsigned long rsvd_45_63:19;
} s;
+ struct uv1h_gr1_tlb_mmr_read_data_hi_s {
+ unsigned long pfn:41; /* RO */
+ unsigned long gaa:2; /* RO */
+ unsigned long dirty:1; /* RO */
+ unsigned long larger:1; /* RO */
+ unsigned long rsvd_45_63:19;
+ } s1;
+ struct uvxh_gr1_tlb_mmr_read_data_hi_s {
+ unsigned long pfn:41; /* RO */
+ unsigned long gaa:2; /* RO */
+ unsigned long dirty:1; /* RO */
+ unsigned long larger:1; /* RO */
+ unsigned long rsvd_45_63:19;
+ } sx;
+ struct uv2h_gr1_tlb_mmr_read_data_hi_s {
+ unsigned long pfn:41; /* RO */
+ unsigned long gaa:2; /* RO */
+ unsigned long dirty:1; /* RO */
+ unsigned long larger:1; /* RO */
+ unsigned long rsvd_45_63:19;
+ } s2;
+ struct uv3h_gr1_tlb_mmr_read_data_hi_s {
+ unsigned long pfn:41; /* RO */
+ unsigned long gaa:2; /* RO */
+ unsigned long dirty:1; /* RO */
+ unsigned long larger:1; /* RO */
+ unsigned long aa_ext:1; /* RO */
+ unsigned long undef_46_54:9; /* Undefined */
+ unsigned long way_ecc:9; /* RO */
+ } s3;
};
/* ========================================================================= */
@@ -968,9 +1274,11 @@ union uvh_gr1_tlb_mmr_read_data_hi_u {
/* ========================================================================= */
#define UV1H_GR1_TLB_MMR_READ_DATA_LO 0x8010a8UL
#define UV2H_GR1_TLB_MMR_READ_DATA_LO 0x10010a8UL
-#define UVH_GR1_TLB_MMR_READ_DATA_LO (is_uv1_hub() ? \
- UV1H_GR1_TLB_MMR_READ_DATA_LO : \
- UV2H_GR1_TLB_MMR_READ_DATA_LO)
+#define UV3H_GR1_TLB_MMR_READ_DATA_LO 0x10010a8UL
+#define UVH_GR1_TLB_MMR_READ_DATA_LO \
+ (is_uv1_hub() ? UV1H_GR1_TLB_MMR_READ_DATA_LO : \
+ (is_uv2_hub() ? UV2H_GR1_TLB_MMR_READ_DATA_LO : \
+ UV3H_GR1_TLB_MMR_READ_DATA_LO))
#define UVH_GR1_TLB_MMR_READ_DATA_LO_VPN_SHFT 0
#define UVH_GR1_TLB_MMR_READ_DATA_LO_ASID_SHFT 39
@@ -979,6 +1287,34 @@ union uvh_gr1_tlb_mmr_read_data_hi_u {
#define UVH_GR1_TLB_MMR_READ_DATA_LO_ASID_MASK 0x7fffff8000000000UL
#define UVH_GR1_TLB_MMR_READ_DATA_LO_VALID_MASK 0x8000000000000000UL
+#define UV1H_GR1_TLB_MMR_READ_DATA_LO_VPN_SHFT 0
+#define UV1H_GR1_TLB_MMR_READ_DATA_LO_ASID_SHFT 39
+#define UV1H_GR1_TLB_MMR_READ_DATA_LO_VALID_SHFT 63
+#define UV1H_GR1_TLB_MMR_READ_DATA_LO_VPN_MASK 0x0000007fffffffffUL
+#define UV1H_GR1_TLB_MMR_READ_DATA_LO_ASID_MASK 0x7fffff8000000000UL
+#define UV1H_GR1_TLB_MMR_READ_DATA_LO_VALID_MASK 0x8000000000000000UL
+
+#define UVXH_GR1_TLB_MMR_READ_DATA_LO_VPN_SHFT 0
+#define UVXH_GR1_TLB_MMR_READ_DATA_LO_ASID_SHFT 39
+#define UVXH_GR1_TLB_MMR_READ_DATA_LO_VALID_SHFT 63
+#define UVXH_GR1_TLB_MMR_READ_DATA_LO_VPN_MASK 0x0000007fffffffffUL
+#define UVXH_GR1_TLB_MMR_READ_DATA_LO_ASID_MASK 0x7fffff8000000000UL
+#define UVXH_GR1_TLB_MMR_READ_DATA_LO_VALID_MASK 0x8000000000000000UL
+
+#define UV2H_GR1_TLB_MMR_READ_DATA_LO_VPN_SHFT 0
+#define UV2H_GR1_TLB_MMR_READ_DATA_LO_ASID_SHFT 39
+#define UV2H_GR1_TLB_MMR_READ_DATA_LO_VALID_SHFT 63
+#define UV2H_GR1_TLB_MMR_READ_DATA_LO_VPN_MASK 0x0000007fffffffffUL
+#define UV2H_GR1_TLB_MMR_READ_DATA_LO_ASID_MASK 0x7fffff8000000000UL
+#define UV2H_GR1_TLB_MMR_READ_DATA_LO_VALID_MASK 0x8000000000000000UL
+
+#define UV3H_GR1_TLB_MMR_READ_DATA_LO_VPN_SHFT 0
+#define UV3H_GR1_TLB_MMR_READ_DATA_LO_ASID_SHFT 39
+#define UV3H_GR1_TLB_MMR_READ_DATA_LO_VALID_SHFT 63
+#define UV3H_GR1_TLB_MMR_READ_DATA_LO_VPN_MASK 0x0000007fffffffffUL
+#define UV3H_GR1_TLB_MMR_READ_DATA_LO_ASID_MASK 0x7fffff8000000000UL
+#define UV3H_GR1_TLB_MMR_READ_DATA_LO_VALID_MASK 0x8000000000000000UL
+
union uvh_gr1_tlb_mmr_read_data_lo_u {
unsigned long v;
struct uvh_gr1_tlb_mmr_read_data_lo_s {
@@ -986,12 +1322,32 @@ union uvh_gr1_tlb_mmr_read_data_lo_u {
unsigned long asid:24; /* RO */
unsigned long valid:1; /* RO */
} s;
+ struct uv1h_gr1_tlb_mmr_read_data_lo_s {
+ unsigned long vpn:39; /* RO */
+ unsigned long asid:24; /* RO */
+ unsigned long valid:1; /* RO */
+ } s1;
+ struct uvxh_gr1_tlb_mmr_read_data_lo_s {
+ unsigned long vpn:39; /* RO */
+ unsigned long asid:24; /* RO */
+ unsigned long valid:1; /* RO */
+ } sx;
+ struct uv2h_gr1_tlb_mmr_read_data_lo_s {
+ unsigned long vpn:39; /* RO */
+ unsigned long asid:24; /* RO */
+ unsigned long valid:1; /* RO */
+ } s2;
+ struct uv3h_gr1_tlb_mmr_read_data_lo_s {
+ unsigned long vpn:39; /* RO */
+ unsigned long asid:24; /* RO */
+ unsigned long valid:1; /* RO */
+ } s3;
};
/* ========================================================================= */
/* UVH_INT_CMPB */
/* ========================================================================= */
-#define UVH_INT_CMPB 0x22080UL
+#define UVH_INT_CMPB 0x22080UL
#define UVH_INT_CMPB_REAL_TIME_CMPB_SHFT 0
#define UVH_INT_CMPB_REAL_TIME_CMPB_MASK 0x00ffffffffffffffUL
@@ -1007,10 +1363,13 @@ union uvh_int_cmpb_u {
/* ========================================================================= */
/* UVH_INT_CMPC */
/* ========================================================================= */
-#define UVH_INT_CMPC 0x22100UL
+#define UVH_INT_CMPC 0x22100UL
+
+#define UV1H_INT_CMPC_REAL_TIME_CMPC_SHFT 0
+#define UV1H_INT_CMPC_REAL_TIME_CMPC_MASK 0x00ffffffffffffffUL
-#define UVH_INT_CMPC_REAL_TIME_CMPC_SHFT 0
-#define UVH_INT_CMPC_REAL_TIME_CMPC_MASK 0xffffffffffffffUL
+#define UVXH_INT_CMPC_REAL_TIME_CMP_2_SHFT 0
+#define UVXH_INT_CMPC_REAL_TIME_CMP_2_MASK 0x00ffffffffffffffUL
union uvh_int_cmpc_u {
unsigned long v;
@@ -1023,10 +1382,13 @@ union uvh_int_cmpc_u {
/* ========================================================================= */
/* UVH_INT_CMPD */
/* ========================================================================= */
-#define UVH_INT_CMPD 0x22180UL
+#define UVH_INT_CMPD 0x22180UL
-#define UVH_INT_CMPD_REAL_TIME_CMPD_SHFT 0
-#define UVH_INT_CMPD_REAL_TIME_CMPD_MASK 0xffffffffffffffUL
+#define UV1H_INT_CMPD_REAL_TIME_CMPD_SHFT 0
+#define UV1H_INT_CMPD_REAL_TIME_CMPD_MASK 0x00ffffffffffffffUL
+
+#define UVXH_INT_CMPD_REAL_TIME_CMP_3_SHFT 0
+#define UVXH_INT_CMPD_REAL_TIME_CMP_3_MASK 0x00ffffffffffffffUL
union uvh_int_cmpd_u {
unsigned long v;
@@ -1039,8 +1401,8 @@ union uvh_int_cmpd_u {
/* ========================================================================= */
/* UVH_IPI_INT */
/* ========================================================================= */
-#define UVH_IPI_INT 0x60500UL
-#define UVH_IPI_INT_32 0x348
+#define UVH_IPI_INT 0x60500UL
+#define UVH_IPI_INT_32 0x348
#define UVH_IPI_INT_VECTOR_SHFT 0
#define UVH_IPI_INT_DELIVERY_MODE_SHFT 8
@@ -1069,8 +1431,8 @@ union uvh_ipi_int_u {
/* ========================================================================= */
/* UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST */
/* ========================================================================= */
-#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST 0x320050UL
-#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_32 0x9c0
+#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST 0x320050UL
+#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_32 0x9c0
#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_ADDRESS_SHFT 4
#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_NODE_ID_SHFT 49
@@ -1091,8 +1453,8 @@ union uvh_lb_bau_intd_payload_queue_first_u {
/* ========================================================================= */
/* UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST */
/* ========================================================================= */
-#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST 0x320060UL
-#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST_32 0x9c8
+#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST 0x320060UL
+#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST_32 0x9c8
#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST_ADDRESS_SHFT 4
#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST_ADDRESS_MASK 0x000007fffffffff0UL
@@ -1109,8 +1471,8 @@ union uvh_lb_bau_intd_payload_queue_last_u {
/* ========================================================================= */
/* UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL */
/* ========================================================================= */
-#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL 0x320070UL
-#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL_32 0x9d0
+#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL 0x320070UL
+#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL_32 0x9d0
#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL_ADDRESS_SHFT 4
#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL_ADDRESS_MASK 0x000007fffffffff0UL
@@ -1127,8 +1489,8 @@ union uvh_lb_bau_intd_payload_queue_tail_u {
/* ========================================================================= */
/* UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE */
/* ========================================================================= */
-#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE 0x320080UL
-#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_32 0xa68
+#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE 0x320080UL
+#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_32 0xa68
#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_0_SHFT 0
#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_1_SHFT 1
@@ -1189,14 +1551,21 @@ union uvh_lb_bau_intd_software_acknowledge_u {
/* ========================================================================= */
/* UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS */
/* ========================================================================= */
-#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS 0x0000000000320088UL
-#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS_32 0xa70
+#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS 0x320088UL
+#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS_32 0xa70
+
/* ========================================================================= */
/* UVH_LB_BAU_MISC_CONTROL */
/* ========================================================================= */
-#define UVH_LB_BAU_MISC_CONTROL 0x320170UL
-#define UVH_LB_BAU_MISC_CONTROL_32 0xa10
+#define UVH_LB_BAU_MISC_CONTROL 0x320170UL
+#define UV1H_LB_BAU_MISC_CONTROL 0x320170UL
+#define UV2H_LB_BAU_MISC_CONTROL 0x320170UL
+#define UV3H_LB_BAU_MISC_CONTROL 0x320170UL
+#define UVH_LB_BAU_MISC_CONTROL_32 0xa10
+#define UV1H_LB_BAU_MISC_CONTROL_32 0x320170UL
+#define UV2H_LB_BAU_MISC_CONTROL_32 0x320170UL
+#define UV3H_LB_BAU_MISC_CONTROL_32 0x320170UL
#define UVH_LB_BAU_MISC_CONTROL_REJECTION_DELAY_SHFT 0
#define UVH_LB_BAU_MISC_CONTROL_APIC_MODE_SHFT 8
@@ -1213,6 +1582,7 @@ union uvh_lb_bau_intd_software_acknowledge_u {
#define UVH_LB_BAU_MISC_CONTROL_PROGRAMMED_INITIAL_PRIORITY_SHFT 24
#define UVH_LB_BAU_MISC_CONTROL_USE_INCOMING_PRIORITY_SHFT 27
#define UVH_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_SHFT 28
+#define UVH_LB_BAU_MISC_CONTROL_FUN_SHFT 48
#define UVH_LB_BAU_MISC_CONTROL_REJECTION_DELAY_MASK 0x00000000000000ffUL
#define UVH_LB_BAU_MISC_CONTROL_APIC_MODE_MASK 0x0000000000000100UL
#define UVH_LB_BAU_MISC_CONTROL_FORCE_BROADCAST_MASK 0x0000000000000200UL
@@ -1228,6 +1598,7 @@ union uvh_lb_bau_intd_software_acknowledge_u {
#define UVH_LB_BAU_MISC_CONTROL_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000007000000UL
#define UVH_LB_BAU_MISC_CONTROL_USE_INCOMING_PRIORITY_MASK 0x0000000008000000UL
#define UVH_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000010000000UL
+#define UVH_LB_BAU_MISC_CONTROL_FUN_MASK 0xffff000000000000UL
#define UV1H_LB_BAU_MISC_CONTROL_REJECTION_DELAY_SHFT 0
#define UV1H_LB_BAU_MISC_CONTROL_APIC_MODE_SHFT 8
@@ -1262,6 +1633,53 @@ union uvh_lb_bau_intd_software_acknowledge_u {
#define UV1H_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000010000000UL
#define UV1H_LB_BAU_MISC_CONTROL_FUN_MASK 0xffff000000000000UL
+#define UVXH_LB_BAU_MISC_CONTROL_REJECTION_DELAY_SHFT 0
+#define UVXH_LB_BAU_MISC_CONTROL_APIC_MODE_SHFT 8
+#define UVXH_LB_BAU_MISC_CONTROL_FORCE_BROADCAST_SHFT 9
+#define UVXH_LB_BAU_MISC_CONTROL_FORCE_LOCK_NOP_SHFT 10
+#define UVXH_LB_BAU_MISC_CONTROL_QPI_AGENT_PRESENCE_VECTOR_SHFT 11
+#define UVXH_LB_BAU_MISC_CONTROL_DESCRIPTOR_FETCH_MODE_SHFT 14
+#define UVXH_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_SHFT 15
+#define UVXH_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHFT 16
+#define UVXH_LB_BAU_MISC_CONTROL_ENABLE_DUAL_MAPPING_MODE_SHFT 20
+#define UVXH_LB_BAU_MISC_CONTROL_VGA_IO_PORT_DECODE_ENABLE_SHFT 21
+#define UVXH_LB_BAU_MISC_CONTROL_VGA_IO_PORT_16_BIT_DECODE_SHFT 22
+#define UVXH_LB_BAU_MISC_CONTROL_SUPPRESS_DEST_REGISTRATION_SHFT 23
+#define UVXH_LB_BAU_MISC_CONTROL_PROGRAMMED_INITIAL_PRIORITY_SHFT 24
+#define UVXH_LB_BAU_MISC_CONTROL_USE_INCOMING_PRIORITY_SHFT 27
+#define UVXH_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_SHFT 28
+#define UVXH_LB_BAU_MISC_CONTROL_ENABLE_AUTOMATIC_APIC_MODE_SELECTION_SHFT 29
+#define UVXH_LB_BAU_MISC_CONTROL_APIC_MODE_STATUS_SHFT 30
+#define UVXH_LB_BAU_MISC_CONTROL_SUPPRESS_INTERRUPTS_TO_SELF_SHFT 31
+#define UVXH_LB_BAU_MISC_CONTROL_ENABLE_LOCK_BASED_SYSTEM_FLUSH_SHFT 32
+#define UVXH_LB_BAU_MISC_CONTROL_ENABLE_EXTENDED_SB_STATUS_SHFT 33
+#define UVXH_LB_BAU_MISC_CONTROL_SUPPRESS_INT_PRIO_UDT_TO_SELF_SHFT 34
+#define UVXH_LB_BAU_MISC_CONTROL_USE_LEGACY_DESCRIPTOR_FORMATS_SHFT 35
+#define UVXH_LB_BAU_MISC_CONTROL_FUN_SHFT 48
+#define UVXH_LB_BAU_MISC_CONTROL_REJECTION_DELAY_MASK 0x00000000000000ffUL
+#define UVXH_LB_BAU_MISC_CONTROL_APIC_MODE_MASK 0x0000000000000100UL
+#define UVXH_LB_BAU_MISC_CONTROL_FORCE_BROADCAST_MASK 0x0000000000000200UL
+#define UVXH_LB_BAU_MISC_CONTROL_FORCE_LOCK_NOP_MASK 0x0000000000000400UL
+#define UVXH_LB_BAU_MISC_CONTROL_QPI_AGENT_PRESENCE_VECTOR_MASK 0x0000000000003800UL
+#define UVXH_LB_BAU_MISC_CONTROL_DESCRIPTOR_FETCH_MODE_MASK 0x0000000000004000UL
+#define UVXH_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_MASK 0x0000000000008000UL
+#define UVXH_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_MASK 0x00000000000f0000UL
+#define UVXH_LB_BAU_MISC_CONTROL_ENABLE_DUAL_MAPPING_MODE_MASK 0x0000000000100000UL
+#define UVXH_LB_BAU_MISC_CONTROL_VGA_IO_PORT_DECODE_ENABLE_MASK 0x0000000000200000UL
+#define UVXH_LB_BAU_MISC_CONTROL_VGA_IO_PORT_16_BIT_DECODE_MASK 0x0000000000400000UL
+#define UVXH_LB_BAU_MISC_CONTROL_SUPPRESS_DEST_REGISTRATION_MASK 0x0000000000800000UL
+#define UVXH_LB_BAU_MISC_CONTROL_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000007000000UL
+#define UVXH_LB_BAU_MISC_CONTROL_USE_INCOMING_PRIORITY_MASK 0x0000000008000000UL
+#define UVXH_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000010000000UL
+#define UVXH_LB_BAU_MISC_CONTROL_ENABLE_AUTOMATIC_APIC_MODE_SELECTION_MASK 0x0000000020000000UL
+#define UVXH_LB_BAU_MISC_CONTROL_APIC_MODE_STATUS_MASK 0x0000000040000000UL
+#define UVXH_LB_BAU_MISC_CONTROL_SUPPRESS_INTERRUPTS_TO_SELF_MASK 0x0000000080000000UL
+#define UVXH_LB_BAU_MISC_CONTROL_ENABLE_LOCK_BASED_SYSTEM_FLUSH_MASK 0x0000000100000000UL
+#define UVXH_LB_BAU_MISC_CONTROL_ENABLE_EXTENDED_SB_STATUS_MASK 0x0000000200000000UL
+#define UVXH_LB_BAU_MISC_CONTROL_SUPPRESS_INT_PRIO_UDT_TO_SELF_MASK 0x0000000400000000UL
+#define UVXH_LB_BAU_MISC_CONTROL_USE_LEGACY_DESCRIPTOR_FORMATS_MASK 0x0000000800000000UL
+#define UVXH_LB_BAU_MISC_CONTROL_FUN_MASK 0xffff000000000000UL
+
#define UV2H_LB_BAU_MISC_CONTROL_REJECTION_DELAY_SHFT 0
#define UV2H_LB_BAU_MISC_CONTROL_APIC_MODE_SHFT 8
#define UV2H_LB_BAU_MISC_CONTROL_FORCE_BROADCAST_SHFT 9
@@ -1309,6 +1727,59 @@ union uvh_lb_bau_intd_software_acknowledge_u {
#define UV2H_LB_BAU_MISC_CONTROL_USE_LEGACY_DESCRIPTOR_FORMATS_MASK 0x0000000800000000UL
#define UV2H_LB_BAU_MISC_CONTROL_FUN_MASK 0xffff000000000000UL
+#define UV3H_LB_BAU_MISC_CONTROL_REJECTION_DELAY_SHFT 0
+#define UV3H_LB_BAU_MISC_CONTROL_APIC_MODE_SHFT 8
+#define UV3H_LB_BAU_MISC_CONTROL_FORCE_BROADCAST_SHFT 9
+#define UV3H_LB_BAU_MISC_CONTROL_FORCE_LOCK_NOP_SHFT 10
+#define UV3H_LB_BAU_MISC_CONTROL_QPI_AGENT_PRESENCE_VECTOR_SHFT 11
+#define UV3H_LB_BAU_MISC_CONTROL_DESCRIPTOR_FETCH_MODE_SHFT 14
+#define UV3H_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_SHFT 15
+#define UV3H_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHFT 16
+#define UV3H_LB_BAU_MISC_CONTROL_ENABLE_DUAL_MAPPING_MODE_SHFT 20
+#define UV3H_LB_BAU_MISC_CONTROL_VGA_IO_PORT_DECODE_ENABLE_SHFT 21
+#define UV3H_LB_BAU_MISC_CONTROL_VGA_IO_PORT_16_BIT_DECODE_SHFT 22
+#define UV3H_LB_BAU_MISC_CONTROL_SUPPRESS_DEST_REGISTRATION_SHFT 23
+#define UV3H_LB_BAU_MISC_CONTROL_PROGRAMMED_INITIAL_PRIORITY_SHFT 24
+#define UV3H_LB_BAU_MISC_CONTROL_USE_INCOMING_PRIORITY_SHFT 27
+#define UV3H_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_SHFT 28
+#define UV3H_LB_BAU_MISC_CONTROL_ENABLE_AUTOMATIC_APIC_MODE_SELECTION_SHFT 29
+#define UV3H_LB_BAU_MISC_CONTROL_APIC_MODE_STATUS_SHFT 30
+#define UV3H_LB_BAU_MISC_CONTROL_SUPPRESS_INTERRUPTS_TO_SELF_SHFT 31
+#define UV3H_LB_BAU_MISC_CONTROL_ENABLE_LOCK_BASED_SYSTEM_FLUSH_SHFT 32
+#define UV3H_LB_BAU_MISC_CONTROL_ENABLE_EXTENDED_SB_STATUS_SHFT 33
+#define UV3H_LB_BAU_MISC_CONTROL_SUPPRESS_INT_PRIO_UDT_TO_SELF_SHFT 34
+#define UV3H_LB_BAU_MISC_CONTROL_USE_LEGACY_DESCRIPTOR_FORMATS_SHFT 35
+#define UV3H_LB_BAU_MISC_CONTROL_SUPPRESS_QUIESCE_MSGS_TO_QPI_SHFT 36
+#define UV3H_LB_BAU_MISC_CONTROL_ENABLE_INTD_PREFETCH_HINT_SHFT 37
+#define UV3H_LB_BAU_MISC_CONTROL_THREAD_KILL_TIMEBASE_SHFT 38
+#define UV3H_LB_BAU_MISC_CONTROL_FUN_SHFT 48
+#define UV3H_LB_BAU_MISC_CONTROL_REJECTION_DELAY_MASK 0x00000000000000ffUL
+#define UV3H_LB_BAU_MISC_CONTROL_APIC_MODE_MASK 0x0000000000000100UL
+#define UV3H_LB_BAU_MISC_CONTROL_FORCE_BROADCAST_MASK 0x0000000000000200UL
+#define UV3H_LB_BAU_MISC_CONTROL_FORCE_LOCK_NOP_MASK 0x0000000000000400UL
+#define UV3H_LB_BAU_MISC_CONTROL_QPI_AGENT_PRESENCE_VECTOR_MASK 0x0000000000003800UL
+#define UV3H_LB_BAU_MISC_CONTROL_DESCRIPTOR_FETCH_MODE_MASK 0x0000000000004000UL
+#define UV3H_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_MASK 0x0000000000008000UL
+#define UV3H_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_MASK 0x00000000000f0000UL
+#define UV3H_LB_BAU_MISC_CONTROL_ENABLE_DUAL_MAPPING_MODE_MASK 0x0000000000100000UL
+#define UV3H_LB_BAU_MISC_CONTROL_VGA_IO_PORT_DECODE_ENABLE_MASK 0x0000000000200000UL
+#define UV3H_LB_BAU_MISC_CONTROL_VGA_IO_PORT_16_BIT_DECODE_MASK 0x0000000000400000UL
+#define UV3H_LB_BAU_MISC_CONTROL_SUPPRESS_DEST_REGISTRATION_MASK 0x0000000000800000UL
+#define UV3H_LB_BAU_MISC_CONTROL_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000007000000UL
+#define UV3H_LB_BAU_MISC_CONTROL_USE_INCOMING_PRIORITY_MASK 0x0000000008000000UL
+#define UV3H_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000010000000UL
+#define UV3H_LB_BAU_MISC_CONTROL_ENABLE_AUTOMATIC_APIC_MODE_SELECTION_MASK 0x0000000020000000UL
+#define UV3H_LB_BAU_MISC_CONTROL_APIC_MODE_STATUS_MASK 0x0000000040000000UL
+#define UV3H_LB_BAU_MISC_CONTROL_SUPPRESS_INTERRUPTS_TO_SELF_MASK 0x0000000080000000UL
+#define UV3H_LB_BAU_MISC_CONTROL_ENABLE_LOCK_BASED_SYSTEM_FLUSH_MASK 0x0000000100000000UL
+#define UV3H_LB_BAU_MISC_CONTROL_ENABLE_EXTENDED_SB_STATUS_MASK 0x0000000200000000UL
+#define UV3H_LB_BAU_MISC_CONTROL_SUPPRESS_INT_PRIO_UDT_TO_SELF_MASK 0x0000000400000000UL
+#define UV3H_LB_BAU_MISC_CONTROL_USE_LEGACY_DESCRIPTOR_FORMATS_MASK 0x0000000800000000UL
+#define UV3H_LB_BAU_MISC_CONTROL_SUPPRESS_QUIESCE_MSGS_TO_QPI_MASK 0x0000001000000000UL
+#define UV3H_LB_BAU_MISC_CONTROL_ENABLE_INTD_PREFETCH_HINT_MASK 0x0000002000000000UL
+#define UV3H_LB_BAU_MISC_CONTROL_THREAD_KILL_TIMEBASE_MASK 0x00003fc000000000UL
+#define UV3H_LB_BAU_MISC_CONTROL_FUN_MASK 0xffff000000000000UL
+
union uvh_lb_bau_misc_control_u {
unsigned long v;
struct uvh_lb_bau_misc_control_s {
@@ -1327,7 +1798,8 @@ union uvh_lb_bau_misc_control_u {
unsigned long programmed_initial_priority:3; /* RW */
unsigned long use_incoming_priority:1; /* RW */
unsigned long enable_programmed_initial_priority:1;/* RW */
- unsigned long rsvd_29_63:35;
+ unsigned long rsvd_29_47:19;
+ unsigned long fun:16; /* RW */
} s;
struct uv1h_lb_bau_misc_control_s {
unsigned long rejection_delay:8; /* RW */
@@ -1348,6 +1820,32 @@ union uvh_lb_bau_misc_control_u {
unsigned long rsvd_29_47:19;
unsigned long fun:16; /* RW */
} s1;
+ struct uvxh_lb_bau_misc_control_s {
+ unsigned long rejection_delay:8; /* RW */
+ unsigned long apic_mode:1; /* RW */
+ unsigned long force_broadcast:1; /* RW */
+ unsigned long force_lock_nop:1; /* RW */
+ unsigned long qpi_agent_presence_vector:3; /* RW */
+ unsigned long descriptor_fetch_mode:1; /* RW */
+ unsigned long enable_intd_soft_ack_mode:1; /* RW */
+ unsigned long intd_soft_ack_timeout_period:4; /* RW */
+ unsigned long enable_dual_mapping_mode:1; /* RW */
+ unsigned long vga_io_port_decode_enable:1; /* RW */
+ unsigned long vga_io_port_16_bit_decode:1; /* RW */
+ unsigned long suppress_dest_registration:1; /* RW */
+ unsigned long programmed_initial_priority:3; /* RW */
+ unsigned long use_incoming_priority:1; /* RW */
+ unsigned long enable_programmed_initial_priority:1;/* RW */
+ unsigned long enable_automatic_apic_mode_selection:1;/* RW */
+ unsigned long apic_mode_status:1; /* RO */
+ unsigned long suppress_interrupts_to_self:1; /* RW */
+ unsigned long enable_lock_based_system_flush:1;/* RW */
+ unsigned long enable_extended_sb_status:1; /* RW */
+ unsigned long suppress_int_prio_udt_to_self:1;/* RW */
+ unsigned long use_legacy_descriptor_formats:1;/* RW */
+ unsigned long rsvd_36_47:12;
+ unsigned long fun:16; /* RW */
+ } sx;
struct uv2h_lb_bau_misc_control_s {
unsigned long rejection_delay:8; /* RW */
unsigned long apic_mode:1; /* RW */
@@ -1374,13 +1872,42 @@ union uvh_lb_bau_misc_control_u {
unsigned long rsvd_36_47:12;
unsigned long fun:16; /* RW */
} s2;
+ struct uv3h_lb_bau_misc_control_s {
+ unsigned long rejection_delay:8; /* RW */
+ unsigned long apic_mode:1; /* RW */
+ unsigned long force_broadcast:1; /* RW */
+ unsigned long force_lock_nop:1; /* RW */
+ unsigned long qpi_agent_presence_vector:3; /* RW */
+ unsigned long descriptor_fetch_mode:1; /* RW */
+ unsigned long enable_intd_soft_ack_mode:1; /* RW */
+ unsigned long intd_soft_ack_timeout_period:4; /* RW */
+ unsigned long enable_dual_mapping_mode:1; /* RW */
+ unsigned long vga_io_port_decode_enable:1; /* RW */
+ unsigned long vga_io_port_16_bit_decode:1; /* RW */
+ unsigned long suppress_dest_registration:1; /* RW */
+ unsigned long programmed_initial_priority:3; /* RW */
+ unsigned long use_incoming_priority:1; /* RW */
+ unsigned long enable_programmed_initial_priority:1;/* RW */
+ unsigned long enable_automatic_apic_mode_selection:1;/* RW */
+ unsigned long apic_mode_status:1; /* RO */
+ unsigned long suppress_interrupts_to_self:1; /* RW */
+ unsigned long enable_lock_based_system_flush:1;/* RW */
+ unsigned long enable_extended_sb_status:1; /* RW */
+ unsigned long suppress_int_prio_udt_to_self:1;/* RW */
+ unsigned long use_legacy_descriptor_formats:1;/* RW */
+ unsigned long suppress_quiesce_msgs_to_qpi:1; /* RW */
+ unsigned long enable_intd_prefetch_hint:1; /* RW */
+ unsigned long thread_kill_timebase:8; /* RW */
+ unsigned long rsvd_46_47:2;
+ unsigned long fun:16; /* RW */
+ } s3;
};
/* ========================================================================= */
/* UVH_LB_BAU_SB_ACTIVATION_CONTROL */
/* ========================================================================= */
-#define UVH_LB_BAU_SB_ACTIVATION_CONTROL 0x320020UL
-#define UVH_LB_BAU_SB_ACTIVATION_CONTROL_32 0x9a8
+#define UVH_LB_BAU_SB_ACTIVATION_CONTROL 0x320020UL
+#define UVH_LB_BAU_SB_ACTIVATION_CONTROL_32 0x9a8
#define UVH_LB_BAU_SB_ACTIVATION_CONTROL_INDEX_SHFT 0
#define UVH_LB_BAU_SB_ACTIVATION_CONTROL_PUSH_SHFT 62
@@ -1402,8 +1929,8 @@ union uvh_lb_bau_sb_activation_control_u {
/* ========================================================================= */
/* UVH_LB_BAU_SB_ACTIVATION_STATUS_0 */
/* ========================================================================= */
-#define UVH_LB_BAU_SB_ACTIVATION_STATUS_0 0x320030UL
-#define UVH_LB_BAU_SB_ACTIVATION_STATUS_0_32 0x9b0
+#define UVH_LB_BAU_SB_ACTIVATION_STATUS_0 0x320030UL
+#define UVH_LB_BAU_SB_ACTIVATION_STATUS_0_32 0x9b0
#define UVH_LB_BAU_SB_ACTIVATION_STATUS_0_STATUS_SHFT 0
#define UVH_LB_BAU_SB_ACTIVATION_STATUS_0_STATUS_MASK 0xffffffffffffffffUL
@@ -1418,8 +1945,8 @@ union uvh_lb_bau_sb_activation_status_0_u {
/* ========================================================================= */
/* UVH_LB_BAU_SB_ACTIVATION_STATUS_1 */
/* ========================================================================= */
-#define UVH_LB_BAU_SB_ACTIVATION_STATUS_1 0x320040UL
-#define UVH_LB_BAU_SB_ACTIVATION_STATUS_1_32 0x9b8
+#define UVH_LB_BAU_SB_ACTIVATION_STATUS_1 0x320040UL
+#define UVH_LB_BAU_SB_ACTIVATION_STATUS_1_32 0x9b8
#define UVH_LB_BAU_SB_ACTIVATION_STATUS_1_STATUS_SHFT 0
#define UVH_LB_BAU_SB_ACTIVATION_STATUS_1_STATUS_MASK 0xffffffffffffffffUL
@@ -1434,8 +1961,8 @@ union uvh_lb_bau_sb_activation_status_1_u {
/* ========================================================================= */
/* UVH_LB_BAU_SB_DESCRIPTOR_BASE */
/* ========================================================================= */
-#define UVH_LB_BAU_SB_DESCRIPTOR_BASE 0x320010UL
-#define UVH_LB_BAU_SB_DESCRIPTOR_BASE_32 0x9a0
+#define UVH_LB_BAU_SB_DESCRIPTOR_BASE 0x320010UL
+#define UVH_LB_BAU_SB_DESCRIPTOR_BASE_32 0x9a0
#define UVH_LB_BAU_SB_DESCRIPTOR_BASE_PAGE_ADDRESS_SHFT 12
#define UVH_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_SHFT 49
@@ -1456,7 +1983,10 @@ union uvh_lb_bau_sb_descriptor_base_u {
/* ========================================================================= */
/* UVH_NODE_ID */
/* ========================================================================= */
-#define UVH_NODE_ID 0x0UL
+#define UVH_NODE_ID 0x0UL
+#define UV1H_NODE_ID 0x0UL
+#define UV2H_NODE_ID 0x0UL
+#define UV3H_NODE_ID 0x0UL
#define UVH_NODE_ID_FORCE1_SHFT 0
#define UVH_NODE_ID_MANUFACTURER_SHFT 1
@@ -1484,6 +2014,21 @@ union uvh_lb_bau_sb_descriptor_base_u {
#define UV1H_NODE_ID_NODES_PER_BIT_MASK 0x007f000000000000UL
#define UV1H_NODE_ID_NI_PORT_MASK 0x0f00000000000000UL
+#define UVXH_NODE_ID_FORCE1_SHFT 0
+#define UVXH_NODE_ID_MANUFACTURER_SHFT 1
+#define UVXH_NODE_ID_PART_NUMBER_SHFT 12
+#define UVXH_NODE_ID_REVISION_SHFT 28
+#define UVXH_NODE_ID_NODE_ID_SHFT 32
+#define UVXH_NODE_ID_NODES_PER_BIT_SHFT 50
+#define UVXH_NODE_ID_NI_PORT_SHFT 57
+#define UVXH_NODE_ID_FORCE1_MASK 0x0000000000000001UL
+#define UVXH_NODE_ID_MANUFACTURER_MASK 0x0000000000000ffeUL
+#define UVXH_NODE_ID_PART_NUMBER_MASK 0x000000000ffff000UL
+#define UVXH_NODE_ID_REVISION_MASK 0x00000000f0000000UL
+#define UVXH_NODE_ID_NODE_ID_MASK 0x00007fff00000000UL
+#define UVXH_NODE_ID_NODES_PER_BIT_MASK 0x01fc000000000000UL
+#define UVXH_NODE_ID_NI_PORT_MASK 0x3e00000000000000UL
+
#define UV2H_NODE_ID_FORCE1_SHFT 0
#define UV2H_NODE_ID_MANUFACTURER_SHFT 1
#define UV2H_NODE_ID_PART_NUMBER_SHFT 12
@@ -1499,6 +2044,25 @@ union uvh_lb_bau_sb_descriptor_base_u {
#define UV2H_NODE_ID_NODES_PER_BIT_MASK 0x01fc000000000000UL
#define UV2H_NODE_ID_NI_PORT_MASK 0x3e00000000000000UL
+#define UV3H_NODE_ID_FORCE1_SHFT 0
+#define UV3H_NODE_ID_MANUFACTURER_SHFT 1
+#define UV3H_NODE_ID_PART_NUMBER_SHFT 12
+#define UV3H_NODE_ID_REVISION_SHFT 28
+#define UV3H_NODE_ID_NODE_ID_SHFT 32
+#define UV3H_NODE_ID_ROUTER_SELECT_SHFT 48
+#define UV3H_NODE_ID_RESERVED_2_SHFT 49
+#define UV3H_NODE_ID_NODES_PER_BIT_SHFT 50
+#define UV3H_NODE_ID_NI_PORT_SHFT 57
+#define UV3H_NODE_ID_FORCE1_MASK 0x0000000000000001UL
+#define UV3H_NODE_ID_MANUFACTURER_MASK 0x0000000000000ffeUL
+#define UV3H_NODE_ID_PART_NUMBER_MASK 0x000000000ffff000UL
+#define UV3H_NODE_ID_REVISION_MASK 0x00000000f0000000UL
+#define UV3H_NODE_ID_NODE_ID_MASK 0x00007fff00000000UL
+#define UV3H_NODE_ID_ROUTER_SELECT_MASK 0x0001000000000000UL
+#define UV3H_NODE_ID_RESERVED_2_MASK 0x0002000000000000UL
+#define UV3H_NODE_ID_NODES_PER_BIT_MASK 0x01fc000000000000UL
+#define UV3H_NODE_ID_NI_PORT_MASK 0x3e00000000000000UL
+
union uvh_node_id_u {
unsigned long v;
struct uvh_node_id_s {
@@ -1521,6 +2085,17 @@ union uvh_node_id_u {
unsigned long ni_port:4; /* RO */
unsigned long rsvd_60_63:4;
} s1;
+ struct uvxh_node_id_s {
+ unsigned long force1:1; /* RO */
+ unsigned long manufacturer:11; /* RO */
+ unsigned long part_number:16; /* RO */
+ unsigned long revision:4; /* RO */
+ unsigned long node_id:15; /* RW */
+ unsigned long rsvd_47_49:3;
+ unsigned long nodes_per_bit:7; /* RO */
+ unsigned long ni_port:5; /* RO */
+ unsigned long rsvd_62_63:2;
+ } sx;
struct uv2h_node_id_s {
unsigned long force1:1; /* RO */
unsigned long manufacturer:11; /* RO */
@@ -1532,13 +2107,26 @@ union uvh_node_id_u {
unsigned long ni_port:5; /* RO */
unsigned long rsvd_62_63:2;
} s2;
+ struct uv3h_node_id_s {
+ unsigned long force1:1; /* RO */
+ unsigned long manufacturer:11; /* RO */
+ unsigned long part_number:16; /* RO */
+ unsigned long revision:4; /* RO */
+ unsigned long node_id:15; /* RW */
+ unsigned long rsvd_47:1;
+ unsigned long router_select:1; /* RO */
+ unsigned long rsvd_49:1;
+ unsigned long nodes_per_bit:7; /* RO */
+ unsigned long ni_port:5; /* RO */
+ unsigned long rsvd_62_63:2;
+ } s3;
};
/* ========================================================================= */
/* UVH_NODE_PRESENT_TABLE */
/* ========================================================================= */
-#define UVH_NODE_PRESENT_TABLE 0x1400UL
-#define UVH_NODE_PRESENT_TABLE_DEPTH 16
+#define UVH_NODE_PRESENT_TABLE 0x1400UL
+#define UVH_NODE_PRESENT_TABLE_DEPTH 16
#define UVH_NODE_PRESENT_TABLE_NODES_SHFT 0
#define UVH_NODE_PRESENT_TABLE_NODES_MASK 0xffffffffffffffffUL
@@ -1553,7 +2141,7 @@ union uvh_node_present_table_u {
/* ========================================================================= */
/* UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR */
/* ========================================================================= */
-#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR 0x16000c8UL
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR 0x16000c8UL
#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_BASE_SHFT 24
#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_M_ALIAS_SHFT 48
@@ -1577,7 +2165,7 @@ union uvh_rh_gam_alias210_overlay_config_0_mmr_u {
/* ========================================================================= */
/* UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR */
/* ========================================================================= */
-#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR 0x16000d8UL
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR 0x16000d8UL
#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_BASE_SHFT 24
#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_M_ALIAS_SHFT 48
@@ -1601,7 +2189,7 @@ union uvh_rh_gam_alias210_overlay_config_1_mmr_u {
/* ========================================================================= */
/* UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR */
/* ========================================================================= */
-#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR 0x16000e8UL
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR 0x16000e8UL
#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_BASE_SHFT 24
#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_M_ALIAS_SHFT 48
@@ -1625,7 +2213,7 @@ union uvh_rh_gam_alias210_overlay_config_2_mmr_u {
/* ========================================================================= */
/* UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR */
/* ========================================================================= */
-#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR 0x16000d0UL
+#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR 0x16000d0UL
#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_SHFT 24
#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_MASK 0x00003fffff000000UL
@@ -1642,7 +2230,7 @@ union uvh_rh_gam_alias210_redirect_config_0_mmr_u {
/* ========================================================================= */
/* UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR */
/* ========================================================================= */
-#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR 0x16000e0UL
+#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR 0x16000e0UL
#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR_DEST_BASE_SHFT 24
#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR_DEST_BASE_MASK 0x00003fffff000000UL
@@ -1659,7 +2247,7 @@ union uvh_rh_gam_alias210_redirect_config_1_mmr_u {
/* ========================================================================= */
/* UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR */
/* ========================================================================= */
-#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR 0x16000f0UL
+#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR 0x16000f0UL
#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR_DEST_BASE_SHFT 24
#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR_DEST_BASE_MASK 0x00003fffff000000UL
@@ -1676,7 +2264,10 @@ union uvh_rh_gam_alias210_redirect_config_2_mmr_u {
/* ========================================================================= */
/* UVH_RH_GAM_CONFIG_MMR */
/* ========================================================================= */
-#define UVH_RH_GAM_CONFIG_MMR 0x1600000UL
+#define UVH_RH_GAM_CONFIG_MMR 0x1600000UL
+#define UV1H_RH_GAM_CONFIG_MMR 0x1600000UL
+#define UV2H_RH_GAM_CONFIG_MMR 0x1600000UL
+#define UV3H_RH_GAM_CONFIG_MMR 0x1600000UL
#define UVH_RH_GAM_CONFIG_MMR_M_SKT_SHFT 0
#define UVH_RH_GAM_CONFIG_MMR_N_SKT_SHFT 6
@@ -1690,11 +2281,21 @@ union uvh_rh_gam_alias210_redirect_config_2_mmr_u {
#define UV1H_RH_GAM_CONFIG_MMR_N_SKT_MASK 0x00000000000003c0UL
#define UV1H_RH_GAM_CONFIG_MMR_MMIOL_CFG_MASK 0x0000000000001000UL
+#define UVXH_RH_GAM_CONFIG_MMR_M_SKT_SHFT 0
+#define UVXH_RH_GAM_CONFIG_MMR_N_SKT_SHFT 6
+#define UVXH_RH_GAM_CONFIG_MMR_M_SKT_MASK 0x000000000000003fUL
+#define UVXH_RH_GAM_CONFIG_MMR_N_SKT_MASK 0x00000000000003c0UL
+
#define UV2H_RH_GAM_CONFIG_MMR_M_SKT_SHFT 0
#define UV2H_RH_GAM_CONFIG_MMR_N_SKT_SHFT 6
#define UV2H_RH_GAM_CONFIG_MMR_M_SKT_MASK 0x000000000000003fUL
#define UV2H_RH_GAM_CONFIG_MMR_N_SKT_MASK 0x00000000000003c0UL
+#define UV3H_RH_GAM_CONFIG_MMR_M_SKT_SHFT 0
+#define UV3H_RH_GAM_CONFIG_MMR_N_SKT_SHFT 6
+#define UV3H_RH_GAM_CONFIG_MMR_M_SKT_MASK 0x000000000000003fUL
+#define UV3H_RH_GAM_CONFIG_MMR_N_SKT_MASK 0x00000000000003c0UL
+
union uvh_rh_gam_config_mmr_u {
unsigned long v;
struct uvh_rh_gam_config_mmr_s {
@@ -1709,20 +2310,37 @@ union uvh_rh_gam_config_mmr_u {
unsigned long mmiol_cfg:1; /* RW */
unsigned long rsvd_13_63:51;
} s1;
+ struct uvxh_rh_gam_config_mmr_s {
+ unsigned long m_skt:6; /* RW */
+ unsigned long n_skt:4; /* RW */
+ unsigned long rsvd_10_63:54;
+ } sx;
struct uv2h_rh_gam_config_mmr_s {
unsigned long m_skt:6; /* RW */
unsigned long n_skt:4; /* RW */
unsigned long rsvd_10_63:54;
} s2;
+ struct uv3h_rh_gam_config_mmr_s {
+ unsigned long m_skt:6; /* RW */
+ unsigned long n_skt:4; /* RW */
+ unsigned long rsvd_10_63:54;
+ } s3;
};
/* ========================================================================= */
/* UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR */
/* ========================================================================= */
-#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR 0x1600010UL
+#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR 0x1600010UL
+#define UV1H_RH_GAM_GRU_OVERLAY_CONFIG_MMR 0x1600010UL
+#define UV2H_RH_GAM_GRU_OVERLAY_CONFIG_MMR 0x1600010UL
+#define UV3H_RH_GAM_GRU_OVERLAY_CONFIG_MMR 0x1600010UL
#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT 28
+#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_SHFT 52
+#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63
#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffff0000000UL
+#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_MASK 0x00f0000000000000UL
+#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
#define UV1H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT 28
#define UV1H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_GR4_SHFT 48
@@ -1733,6 +2351,13 @@ union uvh_rh_gam_config_mmr_u {
#define UV1H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_MASK 0x00f0000000000000UL
#define UV1H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
+#define UVXH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT 28
+#define UVXH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_SHFT 52
+#define UVXH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63
+#define UVXH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffff0000000UL
+#define UVXH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_MASK 0x00f0000000000000UL
+#define UVXH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
+
#define UV2H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT 28
#define UV2H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_SHFT 52
#define UV2H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63
@@ -1740,12 +2365,23 @@ union uvh_rh_gam_config_mmr_u {
#define UV2H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_MASK 0x00f0000000000000UL
#define UV2H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
+#define UV3H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT 28
+#define UV3H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_SHFT 52
+#define UV3H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_MODE_SHFT 62
+#define UV3H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63
+#define UV3H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffff0000000UL
+#define UV3H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_MASK 0x00f0000000000000UL
+#define UV3H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_MODE_MASK 0x4000000000000000UL
+#define UV3H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
+
union uvh_rh_gam_gru_overlay_config_mmr_u {
unsigned long v;
struct uvh_rh_gam_gru_overlay_config_mmr_s {
unsigned long rsvd_0_27:28;
unsigned long base:18; /* RW */
- unsigned long rsvd_46_62:17;
+ unsigned long rsvd_46_51:6;
+ unsigned long n_gru:4; /* RW */
+ unsigned long rsvd_56_62:7;
unsigned long enable:1; /* RW */
} s;
struct uv1h_rh_gam_gru_overlay_config_mmr_s {
@@ -1758,6 +2394,14 @@ union uvh_rh_gam_gru_overlay_config_mmr_u {
unsigned long rsvd_56_62:7;
unsigned long enable:1; /* RW */
} s1;
+ struct uvxh_rh_gam_gru_overlay_config_mmr_s {
+ unsigned long rsvd_0_27:28;
+ unsigned long base:18; /* RW */
+ unsigned long rsvd_46_51:6;
+ unsigned long n_gru:4; /* RW */
+ unsigned long rsvd_56_62:7;
+ unsigned long enable:1; /* RW */
+ } sx;
struct uv2h_rh_gam_gru_overlay_config_mmr_s {
unsigned long rsvd_0_27:28;
unsigned long base:18; /* RW */
@@ -1766,12 +2410,22 @@ union uvh_rh_gam_gru_overlay_config_mmr_u {
unsigned long rsvd_56_62:7;
unsigned long enable:1; /* RW */
} s2;
+ struct uv3h_rh_gam_gru_overlay_config_mmr_s {
+ unsigned long rsvd_0_27:28;
+ unsigned long base:18; /* RW */
+ unsigned long rsvd_46_51:6;
+ unsigned long n_gru:4; /* RW */
+ unsigned long rsvd_56_61:6;
+ unsigned long mode:1; /* RW */
+ unsigned long enable:1; /* RW */
+ } s3;
};
/* ========================================================================= */
/* UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR */
/* ========================================================================= */
-#define UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR 0x1600030UL
+#define UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR 0x1600030UL
+#define UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR 0x1600030UL
#define UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_BASE_SHFT 30
#define UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_M_IO_SHFT 46
@@ -1814,10 +2468,15 @@ union uvh_rh_gam_mmioh_overlay_config_mmr_u {
/* ========================================================================= */
/* UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR */
/* ========================================================================= */
-#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR 0x1600028UL
+#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR 0x1600028UL
+#define UV1H_RH_GAM_MMR_OVERLAY_CONFIG_MMR 0x1600028UL
+#define UV2H_RH_GAM_MMR_OVERLAY_CONFIG_MMR 0x1600028UL
+#define UV3H_RH_GAM_MMR_OVERLAY_CONFIG_MMR 0x1600028UL
#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_SHFT 26
+#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63
#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffffc000000UL
+#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
#define UV1H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_SHFT 26
#define UV1H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_DUAL_HUB_SHFT 46
@@ -1826,11 +2485,21 @@ union uvh_rh_gam_mmioh_overlay_config_mmr_u {
#define UV1H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_DUAL_HUB_MASK 0x0000400000000000UL
#define UV1H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
+#define UVXH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_SHFT 26
+#define UVXH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63
+#define UVXH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffffc000000UL
+#define UVXH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
+
#define UV2H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_SHFT 26
#define UV2H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63
#define UV2H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffffc000000UL
#define UV2H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
+#define UV3H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_SHFT 26
+#define UV3H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63
+#define UV3H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffffc000000UL
+#define UV3H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
+
union uvh_rh_gam_mmr_overlay_config_mmr_u {
unsigned long v;
struct uvh_rh_gam_mmr_overlay_config_mmr_s {
@@ -1846,18 +2515,30 @@ union uvh_rh_gam_mmr_overlay_config_mmr_u {
unsigned long rsvd_47_62:16;
unsigned long enable:1; /* RW */
} s1;
+ struct uvxh_rh_gam_mmr_overlay_config_mmr_s {
+ unsigned long rsvd_0_25:26;
+ unsigned long base:20; /* RW */
+ unsigned long rsvd_46_62:17;
+ unsigned long enable:1; /* RW */
+ } sx;
struct uv2h_rh_gam_mmr_overlay_config_mmr_s {
unsigned long rsvd_0_25:26;
unsigned long base:20; /* RW */
unsigned long rsvd_46_62:17;
unsigned long enable:1; /* RW */
} s2;
+ struct uv3h_rh_gam_mmr_overlay_config_mmr_s {
+ unsigned long rsvd_0_25:26;
+ unsigned long base:20; /* RW */
+ unsigned long rsvd_46_62:17;
+ unsigned long enable:1; /* RW */
+ } s3;
};
/* ========================================================================= */
/* UVH_RTC */
/* ========================================================================= */
-#define UVH_RTC 0x340000UL
+#define UVH_RTC 0x340000UL
#define UVH_RTC_REAL_TIME_CLOCK_SHFT 0
#define UVH_RTC_REAL_TIME_CLOCK_MASK 0x00ffffffffffffffUL
@@ -1873,7 +2554,7 @@ union uvh_rtc_u {
/* ========================================================================= */
/* UVH_RTC1_INT_CONFIG */
/* ========================================================================= */
-#define UVH_RTC1_INT_CONFIG 0x615c0UL
+#define UVH_RTC1_INT_CONFIG 0x615c0UL
#define UVH_RTC1_INT_CONFIG_VECTOR_SHFT 0
#define UVH_RTC1_INT_CONFIG_DM_SHFT 8
@@ -1911,8 +2592,8 @@ union uvh_rtc1_int_config_u {
/* ========================================================================= */
/* UVH_SCRATCH5 */
/* ========================================================================= */
-#define UVH_SCRATCH5 0x2d0200UL
-#define UVH_SCRATCH5_32 0x778
+#define UVH_SCRATCH5 0x2d0200UL
+#define UVH_SCRATCH5_32 0x778
#define UVH_SCRATCH5_SCRATCH5_SHFT 0
#define UVH_SCRATCH5_SCRATCH5_MASK 0xffffffffffffffffUL
@@ -1925,79 +2606,79 @@ union uvh_scratch5_u {
};
/* ========================================================================= */
-/* UV2H_EVENT_OCCURRED2 */
-/* ========================================================================= */
-#define UV2H_EVENT_OCCURRED2 0x70100UL
-#define UV2H_EVENT_OCCURRED2_32 0xb68
-
-#define UV2H_EVENT_OCCURRED2_RTC_0_SHFT 0
-#define UV2H_EVENT_OCCURRED2_RTC_1_SHFT 1
-#define UV2H_EVENT_OCCURRED2_RTC_2_SHFT 2
-#define UV2H_EVENT_OCCURRED2_RTC_3_SHFT 3
-#define UV2H_EVENT_OCCURRED2_RTC_4_SHFT 4
-#define UV2H_EVENT_OCCURRED2_RTC_5_SHFT 5
-#define UV2H_EVENT_OCCURRED2_RTC_6_SHFT 6
-#define UV2H_EVENT_OCCURRED2_RTC_7_SHFT 7
-#define UV2H_EVENT_OCCURRED2_RTC_8_SHFT 8
-#define UV2H_EVENT_OCCURRED2_RTC_9_SHFT 9
-#define UV2H_EVENT_OCCURRED2_RTC_10_SHFT 10
-#define UV2H_EVENT_OCCURRED2_RTC_11_SHFT 11
-#define UV2H_EVENT_OCCURRED2_RTC_12_SHFT 12
-#define UV2H_EVENT_OCCURRED2_RTC_13_SHFT 13
-#define UV2H_EVENT_OCCURRED2_RTC_14_SHFT 14
-#define UV2H_EVENT_OCCURRED2_RTC_15_SHFT 15
-#define UV2H_EVENT_OCCURRED2_RTC_16_SHFT 16
-#define UV2H_EVENT_OCCURRED2_RTC_17_SHFT 17
-#define UV2H_EVENT_OCCURRED2_RTC_18_SHFT 18
-#define UV2H_EVENT_OCCURRED2_RTC_19_SHFT 19
-#define UV2H_EVENT_OCCURRED2_RTC_20_SHFT 20
-#define UV2H_EVENT_OCCURRED2_RTC_21_SHFT 21
-#define UV2H_EVENT_OCCURRED2_RTC_22_SHFT 22
-#define UV2H_EVENT_OCCURRED2_RTC_23_SHFT 23
-#define UV2H_EVENT_OCCURRED2_RTC_24_SHFT 24
-#define UV2H_EVENT_OCCURRED2_RTC_25_SHFT 25
-#define UV2H_EVENT_OCCURRED2_RTC_26_SHFT 26
-#define UV2H_EVENT_OCCURRED2_RTC_27_SHFT 27
-#define UV2H_EVENT_OCCURRED2_RTC_28_SHFT 28
-#define UV2H_EVENT_OCCURRED2_RTC_29_SHFT 29
-#define UV2H_EVENT_OCCURRED2_RTC_30_SHFT 30
-#define UV2H_EVENT_OCCURRED2_RTC_31_SHFT 31
-#define UV2H_EVENT_OCCURRED2_RTC_0_MASK 0x0000000000000001UL
-#define UV2H_EVENT_OCCURRED2_RTC_1_MASK 0x0000000000000002UL
-#define UV2H_EVENT_OCCURRED2_RTC_2_MASK 0x0000000000000004UL
-#define UV2H_EVENT_OCCURRED2_RTC_3_MASK 0x0000000000000008UL
-#define UV2H_EVENT_OCCURRED2_RTC_4_MASK 0x0000000000000010UL
-#define UV2H_EVENT_OCCURRED2_RTC_5_MASK 0x0000000000000020UL
-#define UV2H_EVENT_OCCURRED2_RTC_6_MASK 0x0000000000000040UL
-#define UV2H_EVENT_OCCURRED2_RTC_7_MASK 0x0000000000000080UL
-#define UV2H_EVENT_OCCURRED2_RTC_8_MASK 0x0000000000000100UL
-#define UV2H_EVENT_OCCURRED2_RTC_9_MASK 0x0000000000000200UL
-#define UV2H_EVENT_OCCURRED2_RTC_10_MASK 0x0000000000000400UL
-#define UV2H_EVENT_OCCURRED2_RTC_11_MASK 0x0000000000000800UL
-#define UV2H_EVENT_OCCURRED2_RTC_12_MASK 0x0000000000001000UL
-#define UV2H_EVENT_OCCURRED2_RTC_13_MASK 0x0000000000002000UL
-#define UV2H_EVENT_OCCURRED2_RTC_14_MASK 0x0000000000004000UL
-#define UV2H_EVENT_OCCURRED2_RTC_15_MASK 0x0000000000008000UL
-#define UV2H_EVENT_OCCURRED2_RTC_16_MASK 0x0000000000010000UL
-#define UV2H_EVENT_OCCURRED2_RTC_17_MASK 0x0000000000020000UL
-#define UV2H_EVENT_OCCURRED2_RTC_18_MASK 0x0000000000040000UL
-#define UV2H_EVENT_OCCURRED2_RTC_19_MASK 0x0000000000080000UL
-#define UV2H_EVENT_OCCURRED2_RTC_20_MASK 0x0000000000100000UL
-#define UV2H_EVENT_OCCURRED2_RTC_21_MASK 0x0000000000200000UL
-#define UV2H_EVENT_OCCURRED2_RTC_22_MASK 0x0000000000400000UL
-#define UV2H_EVENT_OCCURRED2_RTC_23_MASK 0x0000000000800000UL
-#define UV2H_EVENT_OCCURRED2_RTC_24_MASK 0x0000000001000000UL
-#define UV2H_EVENT_OCCURRED2_RTC_25_MASK 0x0000000002000000UL
-#define UV2H_EVENT_OCCURRED2_RTC_26_MASK 0x0000000004000000UL
-#define UV2H_EVENT_OCCURRED2_RTC_27_MASK 0x0000000008000000UL
-#define UV2H_EVENT_OCCURRED2_RTC_28_MASK 0x0000000010000000UL
-#define UV2H_EVENT_OCCURRED2_RTC_29_MASK 0x0000000020000000UL
-#define UV2H_EVENT_OCCURRED2_RTC_30_MASK 0x0000000040000000UL
-#define UV2H_EVENT_OCCURRED2_RTC_31_MASK 0x0000000080000000UL
-
-union uv2h_event_occurred2_u {
+/* UVXH_EVENT_OCCURRED2 */
+/* ========================================================================= */
+#define UVXH_EVENT_OCCURRED2 0x70100UL
+#define UVXH_EVENT_OCCURRED2_32 0xb68
+
+#define UVXH_EVENT_OCCURRED2_RTC_0_SHFT 0
+#define UVXH_EVENT_OCCURRED2_RTC_1_SHFT 1
+#define UVXH_EVENT_OCCURRED2_RTC_2_SHFT 2
+#define UVXH_EVENT_OCCURRED2_RTC_3_SHFT 3
+#define UVXH_EVENT_OCCURRED2_RTC_4_SHFT 4
+#define UVXH_EVENT_OCCURRED2_RTC_5_SHFT 5
+#define UVXH_EVENT_OCCURRED2_RTC_6_SHFT 6
+#define UVXH_EVENT_OCCURRED2_RTC_7_SHFT 7
+#define UVXH_EVENT_OCCURRED2_RTC_8_SHFT 8
+#define UVXH_EVENT_OCCURRED2_RTC_9_SHFT 9
+#define UVXH_EVENT_OCCURRED2_RTC_10_SHFT 10
+#define UVXH_EVENT_OCCURRED2_RTC_11_SHFT 11
+#define UVXH_EVENT_OCCURRED2_RTC_12_SHFT 12
+#define UVXH_EVENT_OCCURRED2_RTC_13_SHFT 13
+#define UVXH_EVENT_OCCURRED2_RTC_14_SHFT 14
+#define UVXH_EVENT_OCCURRED2_RTC_15_SHFT 15
+#define UVXH_EVENT_OCCURRED2_RTC_16_SHFT 16
+#define UVXH_EVENT_OCCURRED2_RTC_17_SHFT 17
+#define UVXH_EVENT_OCCURRED2_RTC_18_SHFT 18
+#define UVXH_EVENT_OCCURRED2_RTC_19_SHFT 19
+#define UVXH_EVENT_OCCURRED2_RTC_20_SHFT 20
+#define UVXH_EVENT_OCCURRED2_RTC_21_SHFT 21
+#define UVXH_EVENT_OCCURRED2_RTC_22_SHFT 22
+#define UVXH_EVENT_OCCURRED2_RTC_23_SHFT 23
+#define UVXH_EVENT_OCCURRED2_RTC_24_SHFT 24
+#define UVXH_EVENT_OCCURRED2_RTC_25_SHFT 25
+#define UVXH_EVENT_OCCURRED2_RTC_26_SHFT 26
+#define UVXH_EVENT_OCCURRED2_RTC_27_SHFT 27
+#define UVXH_EVENT_OCCURRED2_RTC_28_SHFT 28
+#define UVXH_EVENT_OCCURRED2_RTC_29_SHFT 29
+#define UVXH_EVENT_OCCURRED2_RTC_30_SHFT 30
+#define UVXH_EVENT_OCCURRED2_RTC_31_SHFT 31
+#define UVXH_EVENT_OCCURRED2_RTC_0_MASK 0x0000000000000001UL
+#define UVXH_EVENT_OCCURRED2_RTC_1_MASK 0x0000000000000002UL
+#define UVXH_EVENT_OCCURRED2_RTC_2_MASK 0x0000000000000004UL
+#define UVXH_EVENT_OCCURRED2_RTC_3_MASK 0x0000000000000008UL
+#define UVXH_EVENT_OCCURRED2_RTC_4_MASK 0x0000000000000010UL
+#define UVXH_EVENT_OCCURRED2_RTC_5_MASK 0x0000000000000020UL
+#define UVXH_EVENT_OCCURRED2_RTC_6_MASK 0x0000000000000040UL
+#define UVXH_EVENT_OCCURRED2_RTC_7_MASK 0x0000000000000080UL
+#define UVXH_EVENT_OCCURRED2_RTC_8_MASK 0x0000000000000100UL
+#define UVXH_EVENT_OCCURRED2_RTC_9_MASK 0x0000000000000200UL
+#define UVXH_EVENT_OCCURRED2_RTC_10_MASK 0x0000000000000400UL
+#define UVXH_EVENT_OCCURRED2_RTC_11_MASK 0x0000000000000800UL
+#define UVXH_EVENT_OCCURRED2_RTC_12_MASK 0x0000000000001000UL
+#define UVXH_EVENT_OCCURRED2_RTC_13_MASK 0x0000000000002000UL
+#define UVXH_EVENT_OCCURRED2_RTC_14_MASK 0x0000000000004000UL
+#define UVXH_EVENT_OCCURRED2_RTC_15_MASK 0x0000000000008000UL
+#define UVXH_EVENT_OCCURRED2_RTC_16_MASK 0x0000000000010000UL
+#define UVXH_EVENT_OCCURRED2_RTC_17_MASK 0x0000000000020000UL
+#define UVXH_EVENT_OCCURRED2_RTC_18_MASK 0x0000000000040000UL
+#define UVXH_EVENT_OCCURRED2_RTC_19_MASK 0x0000000000080000UL
+#define UVXH_EVENT_OCCURRED2_RTC_20_MASK 0x0000000000100000UL
+#define UVXH_EVENT_OCCURRED2_RTC_21_MASK 0x0000000000200000UL
+#define UVXH_EVENT_OCCURRED2_RTC_22_MASK 0x0000000000400000UL
+#define UVXH_EVENT_OCCURRED2_RTC_23_MASK 0x0000000000800000UL
+#define UVXH_EVENT_OCCURRED2_RTC_24_MASK 0x0000000001000000UL
+#define UVXH_EVENT_OCCURRED2_RTC_25_MASK 0x0000000002000000UL
+#define UVXH_EVENT_OCCURRED2_RTC_26_MASK 0x0000000004000000UL
+#define UVXH_EVENT_OCCURRED2_RTC_27_MASK 0x0000000008000000UL
+#define UVXH_EVENT_OCCURRED2_RTC_28_MASK 0x0000000010000000UL
+#define UVXH_EVENT_OCCURRED2_RTC_29_MASK 0x0000000020000000UL
+#define UVXH_EVENT_OCCURRED2_RTC_30_MASK 0x0000000040000000UL
+#define UVXH_EVENT_OCCURRED2_RTC_31_MASK 0x0000000080000000UL
+
+union uvxh_event_occurred2_u {
unsigned long v;
- struct uv2h_event_occurred2_s {
+ struct uvxh_event_occurred2_s {
unsigned long rtc_0:1; /* RW */
unsigned long rtc_1:1; /* RW */
unsigned long rtc_2:1; /* RW */
@@ -2031,29 +2712,46 @@ union uv2h_event_occurred2_u {
unsigned long rtc_30:1; /* RW */
unsigned long rtc_31:1; /* RW */
unsigned long rsvd_32_63:32;
- } s1;
+ } sx;
};
/* ========================================================================= */
-/* UV2H_EVENT_OCCURRED2_ALIAS */
+/* UVXH_EVENT_OCCURRED2_ALIAS */
/* ========================================================================= */
-#define UV2H_EVENT_OCCURRED2_ALIAS 0x70108UL
-#define UV2H_EVENT_OCCURRED2_ALIAS_32 0xb70
+#define UVXH_EVENT_OCCURRED2_ALIAS 0x70108UL
+#define UVXH_EVENT_OCCURRED2_ALIAS_32 0xb70
+
/* ========================================================================= */
-/* UV2H_LB_BAU_SB_ACTIVATION_STATUS_2 */
+/* UVXH_LB_BAU_SB_ACTIVATION_STATUS_2 */
/* ========================================================================= */
-#define UV2H_LB_BAU_SB_ACTIVATION_STATUS_2 0x320130UL
-#define UV2H_LB_BAU_SB_ACTIVATION_STATUS_2_32 0x9f0
+#define UVXH_LB_BAU_SB_ACTIVATION_STATUS_2 0x320130UL
+#define UV2H_LB_BAU_SB_ACTIVATION_STATUS_2 0x320130UL
+#define UV3H_LB_BAU_SB_ACTIVATION_STATUS_2 0x320130UL
+#define UVXH_LB_BAU_SB_ACTIVATION_STATUS_2_32 0x9f0
+#define UV2H_LB_BAU_SB_ACTIVATION_STATUS_2_32 0x320130UL
+#define UV3H_LB_BAU_SB_ACTIVATION_STATUS_2_32 0x320130UL
+
+#define UVXH_LB_BAU_SB_ACTIVATION_STATUS_2_AUX_ERROR_SHFT 0
+#define UVXH_LB_BAU_SB_ACTIVATION_STATUS_2_AUX_ERROR_MASK 0xffffffffffffffffUL
#define UV2H_LB_BAU_SB_ACTIVATION_STATUS_2_AUX_ERROR_SHFT 0
#define UV2H_LB_BAU_SB_ACTIVATION_STATUS_2_AUX_ERROR_MASK 0xffffffffffffffffUL
-union uv2h_lb_bau_sb_activation_status_2_u {
+#define UV3H_LB_BAU_SB_ACTIVATION_STATUS_2_AUX_ERROR_SHFT 0
+#define UV3H_LB_BAU_SB_ACTIVATION_STATUS_2_AUX_ERROR_MASK 0xffffffffffffffffUL
+
+union uvxh_lb_bau_sb_activation_status_2_u {
unsigned long v;
+ struct uvxh_lb_bau_sb_activation_status_2_s {
+ unsigned long aux_error:64; /* RW */
+ } sx;
struct uv2h_lb_bau_sb_activation_status_2_s {
unsigned long aux_error:64; /* RW */
- } s1;
+ } s2;
+ struct uv3h_lb_bau_sb_activation_status_2_s {
+ unsigned long aux_error:64; /* RW */
+ } s3;
};
/* ========================================================================= */
@@ -2073,5 +2771,87 @@ union uv1h_lb_target_physical_apic_id_mask_u {
} s1;
};
+/* ========================================================================= */
+/* UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR */
+/* ========================================================================= */
+#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR 0x1603000UL
+
+#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_BASE_SHFT 26
+#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_M_IO_SHFT 46
+#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_ENABLE_SHFT 63
+#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_BASE_MASK 0x00003ffffc000000UL
+#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_M_IO_MASK 0x000fc00000000000UL
+#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_ENABLE_MASK 0x8000000000000000UL
+
+union uv3h_rh_gam_mmioh_overlay_config0_mmr_u {
+ unsigned long v;
+ struct uv3h_rh_gam_mmioh_overlay_config0_mmr_s {
+ unsigned long rsvd_0_25:26;
+ unsigned long base:20; /* RW */
+ unsigned long m_io:6; /* RW */
+ unsigned long n_io:4;
+ unsigned long rsvd_56_62:7;
+ unsigned long enable:1; /* RW */
+ } s3;
+};
+
+/* ========================================================================= */
+/* UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR */
+/* ========================================================================= */
+#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR 0x1604000UL
+
+#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_BASE_SHFT 26
+#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_M_IO_SHFT 46
+#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_ENABLE_SHFT 63
+#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_BASE_MASK 0x00003ffffc000000UL
+#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_M_IO_MASK 0x000fc00000000000UL
+#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_ENABLE_MASK 0x8000000000000000UL
+
+union uv3h_rh_gam_mmioh_overlay_config1_mmr_u {
+ unsigned long v;
+ struct uv3h_rh_gam_mmioh_overlay_config1_mmr_s {
+ unsigned long rsvd_0_25:26;
+ unsigned long base:20; /* RW */
+ unsigned long m_io:6; /* RW */
+ unsigned long n_io:4;
+ unsigned long rsvd_56_62:7;
+ unsigned long enable:1; /* RW */
+ } s3;
+};
+
+/* ========================================================================= */
+/* UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR */
+/* ========================================================================= */
+#define UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR 0x1603800UL
+#define UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_DEPTH 128
+
+#define UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_NASID_SHFT 0
+#define UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_NASID_MASK 0x0000000000007fffUL
+
+union uv3h_rh_gam_mmioh_redirect_config0_mmr_u {
+ unsigned long v;
+ struct uv3h_rh_gam_mmioh_redirect_config0_mmr_s {
+ unsigned long nasid:15; /* RW */
+ unsigned long rsvd_15_63:49;
+ } s3;
+};
+
+/* ========================================================================= */
+/* UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR */
+/* ========================================================================= */
+#define UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR 0x1604800UL
+#define UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_DEPTH 128
+
+#define UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_NASID_SHFT 0
+#define UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_NASID_MASK 0x0000000000007fffUL
+
+union uv3h_rh_gam_mmioh_redirect_config1_mmr_u {
+ unsigned long v;
+ struct uv3h_rh_gam_mmioh_redirect_config1_mmr_s {
+ unsigned long nasid:15; /* RW */
+ unsigned long rsvd_15_63:49;
+ } s3;
+};
+
#endif /* _ASM_X86_UV_UV_MMRS_H */
diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
index 57693498519c..7669941cc9d2 100644
--- a/arch/x86/include/asm/x86_init.h
+++ b/arch/x86/include/asm/x86_init.h
@@ -181,19 +181,38 @@ struct x86_platform_ops {
};
struct pci_dev;
+struct msi_msg;
struct x86_msi_ops {
int (*setup_msi_irqs)(struct pci_dev *dev, int nvec, int type);
+ void (*compose_msi_msg)(struct pci_dev *dev, unsigned int irq,
+ unsigned int dest, struct msi_msg *msg,
+ u8 hpet_id);
void (*teardown_msi_irq)(unsigned int irq);
void (*teardown_msi_irqs)(struct pci_dev *dev);
void (*restore_msi_irqs)(struct pci_dev *dev, int irq);
+ int (*setup_hpet_msi)(unsigned int irq, unsigned int id);
};
+struct IO_APIC_route_entry;
+struct io_apic_irq_attr;
+struct irq_data;
+struct cpumask;
+
struct x86_io_apic_ops {
- void (*init) (void);
- unsigned int (*read) (unsigned int apic, unsigned int reg);
- void (*write) (unsigned int apic, unsigned int reg, unsigned int value);
- void (*modify)(unsigned int apic, unsigned int reg, unsigned int value);
+ void (*init) (void);
+ unsigned int (*read) (unsigned int apic, unsigned int reg);
+ void (*write) (unsigned int apic, unsigned int reg, unsigned int value);
+ void (*modify) (unsigned int apic, unsigned int reg, unsigned int value);
+ void (*disable)(void);
+ void (*print_entries)(unsigned int apic, unsigned int nr_entries);
+ int (*set_affinity)(struct irq_data *data,
+ const struct cpumask *mask,
+ bool force);
+ int (*setup_entry)(int irq, struct IO_APIC_route_entry *entry,
+ unsigned int destination, int vector,
+ struct io_apic_irq_attr *attr);
+ void (*eoi_ioapic_pin)(int apic, int pin, int vector);
};
extern struct x86_init_ops x86_init;
diff --git a/arch/x86/include/asm/xor.h b/arch/x86/include/asm/xor.h
index f8fde90bc45e..d8829751b3f8 100644
--- a/arch/x86/include/asm/xor.h
+++ b/arch/x86/include/asm/xor.h
@@ -1,10 +1,499 @@
#ifdef CONFIG_KMEMCHECK
/* kmemcheck doesn't handle MMX/SSE/SSE2 instructions */
# include <asm-generic/xor.h>
+#elif !defined(_ASM_X86_XOR_H)
+#define _ASM_X86_XOR_H
+
+/*
+ * Optimized RAID-5 checksumming functions for SSE.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * You should have received a copy of the GNU General Public License
+ * (for example /usr/src/linux/COPYING); if not, write to the Free
+ * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+/*
+ * Cache avoiding checksumming functions utilizing KNI instructions
+ * Copyright (C) 1999 Zach Brown (with obvious credit due Ingo)
+ */
+
+/*
+ * Based on
+ * High-speed RAID5 checksumming functions utilizing SSE instructions.
+ * Copyright (C) 1998 Ingo Molnar.
+ */
+
+/*
+ * x86-64 changes / gcc fixes from Andi Kleen.
+ * Copyright 2002 Andi Kleen, SuSE Labs.
+ *
+ * This hasn't been optimized for the hammer yet, but there are likely
+ * no advantages to be gotten from x86-64 here anyways.
+ */
+
+#include <asm/i387.h>
+
+#ifdef CONFIG_X86_32
+/* reduce register pressure */
+# define XOR_CONSTANT_CONSTRAINT "i"
#else
+# define XOR_CONSTANT_CONSTRAINT "re"
+#endif
+
+#define OFFS(x) "16*("#x")"
+#define PF_OFFS(x) "256+16*("#x")"
+#define PF0(x) " prefetchnta "PF_OFFS(x)"(%[p1]) ;\n"
+#define LD(x, y) " movaps "OFFS(x)"(%[p1]), %%xmm"#y" ;\n"
+#define ST(x, y) " movaps %%xmm"#y", "OFFS(x)"(%[p1]) ;\n"
+#define PF1(x) " prefetchnta "PF_OFFS(x)"(%[p2]) ;\n"
+#define PF2(x) " prefetchnta "PF_OFFS(x)"(%[p3]) ;\n"
+#define PF3(x) " prefetchnta "PF_OFFS(x)"(%[p4]) ;\n"
+#define PF4(x) " prefetchnta "PF_OFFS(x)"(%[p5]) ;\n"
+#define XO1(x, y) " xorps "OFFS(x)"(%[p2]), %%xmm"#y" ;\n"
+#define XO2(x, y) " xorps "OFFS(x)"(%[p3]), %%xmm"#y" ;\n"
+#define XO3(x, y) " xorps "OFFS(x)"(%[p4]), %%xmm"#y" ;\n"
+#define XO4(x, y) " xorps "OFFS(x)"(%[p5]), %%xmm"#y" ;\n"
+#define NOP(x)
+
+#define BLK64(pf, op, i) \
+ pf(i) \
+ op(i, 0) \
+ op(i + 1, 1) \
+ op(i + 2, 2) \
+ op(i + 3, 3)
+
+static void
+xor_sse_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
+{
+ unsigned long lines = bytes >> 8;
+
+ kernel_fpu_begin();
+
+ asm volatile(
+#undef BLOCK
+#define BLOCK(i) \
+ LD(i, 0) \
+ LD(i + 1, 1) \
+ PF1(i) \
+ PF1(i + 2) \
+ LD(i + 2, 2) \
+ LD(i + 3, 3) \
+ PF0(i + 4) \
+ PF0(i + 6) \
+ XO1(i, 0) \
+ XO1(i + 1, 1) \
+ XO1(i + 2, 2) \
+ XO1(i + 3, 3) \
+ ST(i, 0) \
+ ST(i + 1, 1) \
+ ST(i + 2, 2) \
+ ST(i + 3, 3) \
+
+
+ PF0(0)
+ PF0(2)
+
+ " .align 32 ;\n"
+ " 1: ;\n"
+
+ BLOCK(0)
+ BLOCK(4)
+ BLOCK(8)
+ BLOCK(12)
+
+ " add %[inc], %[p1] ;\n"
+ " add %[inc], %[p2] ;\n"
+ " dec %[cnt] ;\n"
+ " jnz 1b ;\n"
+ : [cnt] "+r" (lines),
+ [p1] "+r" (p1), [p2] "+r" (p2)
+ : [inc] XOR_CONSTANT_CONSTRAINT (256UL)
+ : "memory");
+
+ kernel_fpu_end();
+}
+
+static void
+xor_sse_2_pf64(unsigned long bytes, unsigned long *p1, unsigned long *p2)
+{
+ unsigned long lines = bytes >> 8;
+
+ kernel_fpu_begin();
+
+ asm volatile(
+#undef BLOCK
+#define BLOCK(i) \
+ BLK64(PF0, LD, i) \
+ BLK64(PF1, XO1, i) \
+ BLK64(NOP, ST, i) \
+
+ " .align 32 ;\n"
+ " 1: ;\n"
+
+ BLOCK(0)
+ BLOCK(4)
+ BLOCK(8)
+ BLOCK(12)
+
+ " add %[inc], %[p1] ;\n"
+ " add %[inc], %[p2] ;\n"
+ " dec %[cnt] ;\n"
+ " jnz 1b ;\n"
+ : [cnt] "+r" (lines),
+ [p1] "+r" (p1), [p2] "+r" (p2)
+ : [inc] XOR_CONSTANT_CONSTRAINT (256UL)
+ : "memory");
+
+ kernel_fpu_end();
+}
+
+static void
+xor_sse_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+ unsigned long *p3)
+{
+ unsigned long lines = bytes >> 8;
+
+ kernel_fpu_begin();
+
+ asm volatile(
+#undef BLOCK
+#define BLOCK(i) \
+ PF1(i) \
+ PF1(i + 2) \
+ LD(i, 0) \
+ LD(i + 1, 1) \
+ LD(i + 2, 2) \
+ LD(i + 3, 3) \
+ PF2(i) \
+ PF2(i + 2) \
+ PF0(i + 4) \
+ PF0(i + 6) \
+ XO1(i, 0) \
+ XO1(i + 1, 1) \
+ XO1(i + 2, 2) \
+ XO1(i + 3, 3) \
+ XO2(i, 0) \
+ XO2(i + 1, 1) \
+ XO2(i + 2, 2) \
+ XO2(i + 3, 3) \
+ ST(i, 0) \
+ ST(i + 1, 1) \
+ ST(i + 2, 2) \
+ ST(i + 3, 3) \
+
+
+ PF0(0)
+ PF0(2)
+
+ " .align 32 ;\n"
+ " 1: ;\n"
+
+ BLOCK(0)
+ BLOCK(4)
+ BLOCK(8)
+ BLOCK(12)
+
+ " add %[inc], %[p1] ;\n"
+ " add %[inc], %[p2] ;\n"
+ " add %[inc], %[p3] ;\n"
+ " dec %[cnt] ;\n"
+ " jnz 1b ;\n"
+ : [cnt] "+r" (lines),
+ [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3)
+ : [inc] XOR_CONSTANT_CONSTRAINT (256UL)
+ : "memory");
+
+ kernel_fpu_end();
+}
+
+static void
+xor_sse_3_pf64(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+ unsigned long *p3)
+{
+ unsigned long lines = bytes >> 8;
+
+ kernel_fpu_begin();
+
+ asm volatile(
+#undef BLOCK
+#define BLOCK(i) \
+ BLK64(PF0, LD, i) \
+ BLK64(PF1, XO1, i) \
+ BLK64(PF2, XO2, i) \
+ BLK64(NOP, ST, i) \
+
+ " .align 32 ;\n"
+ " 1: ;\n"
+
+ BLOCK(0)
+ BLOCK(4)
+ BLOCK(8)
+ BLOCK(12)
+
+ " add %[inc], %[p1] ;\n"
+ " add %[inc], %[p2] ;\n"
+ " add %[inc], %[p3] ;\n"
+ " dec %[cnt] ;\n"
+ " jnz 1b ;\n"
+ : [cnt] "+r" (lines),
+ [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3)
+ : [inc] XOR_CONSTANT_CONSTRAINT (256UL)
+ : "memory");
+
+ kernel_fpu_end();
+}
+
+static void
+xor_sse_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+ unsigned long *p3, unsigned long *p4)
+{
+ unsigned long lines = bytes >> 8;
+
+ kernel_fpu_begin();
+
+ asm volatile(
+#undef BLOCK
+#define BLOCK(i) \
+ PF1(i) \
+ PF1(i + 2) \
+ LD(i, 0) \
+ LD(i + 1, 1) \
+ LD(i + 2, 2) \
+ LD(i + 3, 3) \
+ PF2(i) \
+ PF2(i + 2) \
+ XO1(i, 0) \
+ XO1(i + 1, 1) \
+ XO1(i + 2, 2) \
+ XO1(i + 3, 3) \
+ PF3(i) \
+ PF3(i + 2) \
+ PF0(i + 4) \
+ PF0(i + 6) \
+ XO2(i, 0) \
+ XO2(i + 1, 1) \
+ XO2(i + 2, 2) \
+ XO2(i + 3, 3) \
+ XO3(i, 0) \
+ XO3(i + 1, 1) \
+ XO3(i + 2, 2) \
+ XO3(i + 3, 3) \
+ ST(i, 0) \
+ ST(i + 1, 1) \
+ ST(i + 2, 2) \
+ ST(i + 3, 3) \
+
+
+ PF0(0)
+ PF0(2)
+
+ " .align 32 ;\n"
+ " 1: ;\n"
+
+ BLOCK(0)
+ BLOCK(4)
+ BLOCK(8)
+ BLOCK(12)
+
+ " add %[inc], %[p1] ;\n"
+ " add %[inc], %[p2] ;\n"
+ " add %[inc], %[p3] ;\n"
+ " add %[inc], %[p4] ;\n"
+ " dec %[cnt] ;\n"
+ " jnz 1b ;\n"
+ : [cnt] "+r" (lines), [p1] "+r" (p1),
+ [p2] "+r" (p2), [p3] "+r" (p3), [p4] "+r" (p4)
+ : [inc] XOR_CONSTANT_CONSTRAINT (256UL)
+ : "memory");
+
+ kernel_fpu_end();
+}
+
+static void
+xor_sse_4_pf64(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+ unsigned long *p3, unsigned long *p4)
+{
+ unsigned long lines = bytes >> 8;
+
+ kernel_fpu_begin();
+
+ asm volatile(
+#undef BLOCK
+#define BLOCK(i) \
+ BLK64(PF0, LD, i) \
+ BLK64(PF1, XO1, i) \
+ BLK64(PF2, XO2, i) \
+ BLK64(PF3, XO3, i) \
+ BLK64(NOP, ST, i) \
+
+ " .align 32 ;\n"
+ " 1: ;\n"
+
+ BLOCK(0)
+ BLOCK(4)
+ BLOCK(8)
+ BLOCK(12)
+
+ " add %[inc], %[p1] ;\n"
+ " add %[inc], %[p2] ;\n"
+ " add %[inc], %[p3] ;\n"
+ " add %[inc], %[p4] ;\n"
+ " dec %[cnt] ;\n"
+ " jnz 1b ;\n"
+ : [cnt] "+r" (lines), [p1] "+r" (p1),
+ [p2] "+r" (p2), [p3] "+r" (p3), [p4] "+r" (p4)
+ : [inc] XOR_CONSTANT_CONSTRAINT (256UL)
+ : "memory");
+
+ kernel_fpu_end();
+}
+
+static void
+xor_sse_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+ unsigned long *p3, unsigned long *p4, unsigned long *p5)
+{
+ unsigned long lines = bytes >> 8;
+
+ kernel_fpu_begin();
+
+ asm volatile(
+#undef BLOCK
+#define BLOCK(i) \
+ PF1(i) \
+ PF1(i + 2) \
+ LD(i, 0) \
+ LD(i + 1, 1) \
+ LD(i + 2, 2) \
+ LD(i + 3, 3) \
+ PF2(i) \
+ PF2(i + 2) \
+ XO1(i, 0) \
+ XO1(i + 1, 1) \
+ XO1(i + 2, 2) \
+ XO1(i + 3, 3) \
+ PF3(i) \
+ PF3(i + 2) \
+ XO2(i, 0) \
+ XO2(i + 1, 1) \
+ XO2(i + 2, 2) \
+ XO2(i + 3, 3) \
+ PF4(i) \
+ PF4(i + 2) \
+ PF0(i + 4) \
+ PF0(i + 6) \
+ XO3(i, 0) \
+ XO3(i + 1, 1) \
+ XO3(i + 2, 2) \
+ XO3(i + 3, 3) \
+ XO4(i, 0) \
+ XO4(i + 1, 1) \
+ XO4(i + 2, 2) \
+ XO4(i + 3, 3) \
+ ST(i, 0) \
+ ST(i + 1, 1) \
+ ST(i + 2, 2) \
+ ST(i + 3, 3) \
+
+
+ PF0(0)
+ PF0(2)
+
+ " .align 32 ;\n"
+ " 1: ;\n"
+
+ BLOCK(0)
+ BLOCK(4)
+ BLOCK(8)
+ BLOCK(12)
+
+ " add %[inc], %[p1] ;\n"
+ " add %[inc], %[p2] ;\n"
+ " add %[inc], %[p3] ;\n"
+ " add %[inc], %[p4] ;\n"
+ " add %[inc], %[p5] ;\n"
+ " dec %[cnt] ;\n"
+ " jnz 1b ;\n"
+ : [cnt] "+r" (lines), [p1] "+r" (p1), [p2] "+r" (p2),
+ [p3] "+r" (p3), [p4] "+r" (p4), [p5] "+r" (p5)
+ : [inc] XOR_CONSTANT_CONSTRAINT (256UL)
+ : "memory");
+
+ kernel_fpu_end();
+}
+
+static void
+xor_sse_5_pf64(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+ unsigned long *p3, unsigned long *p4, unsigned long *p5)
+{
+ unsigned long lines = bytes >> 8;
+
+ kernel_fpu_begin();
+
+ asm volatile(
+#undef BLOCK
+#define BLOCK(i) \
+ BLK64(PF0, LD, i) \
+ BLK64(PF1, XO1, i) \
+ BLK64(PF2, XO2, i) \
+ BLK64(PF3, XO3, i) \
+ BLK64(PF4, XO4, i) \
+ BLK64(NOP, ST, i) \
+
+ " .align 32 ;\n"
+ " 1: ;\n"
+
+ BLOCK(0)
+ BLOCK(4)
+ BLOCK(8)
+ BLOCK(12)
+
+ " add %[inc], %[p1] ;\n"
+ " add %[inc], %[p2] ;\n"
+ " add %[inc], %[p3] ;\n"
+ " add %[inc], %[p4] ;\n"
+ " add %[inc], %[p5] ;\n"
+ " dec %[cnt] ;\n"
+ " jnz 1b ;\n"
+ : [cnt] "+r" (lines), [p1] "+r" (p1), [p2] "+r" (p2),
+ [p3] "+r" (p3), [p4] "+r" (p4), [p5] "+r" (p5)
+ : [inc] XOR_CONSTANT_CONSTRAINT (256UL)
+ : "memory");
+
+ kernel_fpu_end();
+}
+
+static struct xor_block_template xor_block_sse_pf64 = {
+ .name = "prefetch64-sse",
+ .do_2 = xor_sse_2_pf64,
+ .do_3 = xor_sse_3_pf64,
+ .do_4 = xor_sse_4_pf64,
+ .do_5 = xor_sse_5_pf64,
+};
+
+#undef LD
+#undef XO1
+#undef XO2
+#undef XO3
+#undef XO4
+#undef ST
+#undef NOP
+#undef BLK64
+#undef BLOCK
+
+#undef XOR_CONSTANT_CONSTRAINT
+
#ifdef CONFIG_X86_32
# include <asm/xor_32.h>
#else
# include <asm/xor_64.h>
#endif
-#endif
+
+#define XOR_SELECT_TEMPLATE(FASTEST) \
+ AVX_SELECT(FASTEST)
+
+#endif /* _ASM_X86_XOR_H */
diff --git a/arch/x86/include/asm/xor_32.h b/arch/x86/include/asm/xor_32.h
index f79cb7ec0e06..ce05722e3c68 100644
--- a/arch/x86/include/asm/xor_32.h
+++ b/arch/x86/include/asm/xor_32.h
@@ -2,7 +2,7 @@
#define _ASM_X86_XOR_32_H
/*
- * Optimized RAID-5 checksumming functions for MMX and SSE.
+ * Optimized RAID-5 checksumming functions for MMX.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -529,290 +529,6 @@ static struct xor_block_template xor_block_p5_mmx = {
.do_5 = xor_p5_mmx_5,
};
-/*
- * Cache avoiding checksumming functions utilizing KNI instructions
- * Copyright (C) 1999 Zach Brown (with obvious credit due Ingo)
- */
-
-#define OFFS(x) "16*("#x")"
-#define PF_OFFS(x) "256+16*("#x")"
-#define PF0(x) " prefetchnta "PF_OFFS(x)"(%1) ;\n"
-#define LD(x, y) " movaps "OFFS(x)"(%1), %%xmm"#y" ;\n"
-#define ST(x, y) " movaps %%xmm"#y", "OFFS(x)"(%1) ;\n"
-#define PF1(x) " prefetchnta "PF_OFFS(x)"(%2) ;\n"
-#define PF2(x) " prefetchnta "PF_OFFS(x)"(%3) ;\n"
-#define PF3(x) " prefetchnta "PF_OFFS(x)"(%4) ;\n"
-#define PF4(x) " prefetchnta "PF_OFFS(x)"(%5) ;\n"
-#define PF5(x) " prefetchnta "PF_OFFS(x)"(%6) ;\n"
-#define XO1(x, y) " xorps "OFFS(x)"(%2), %%xmm"#y" ;\n"
-#define XO2(x, y) " xorps "OFFS(x)"(%3), %%xmm"#y" ;\n"
-#define XO3(x, y) " xorps "OFFS(x)"(%4), %%xmm"#y" ;\n"
-#define XO4(x, y) " xorps "OFFS(x)"(%5), %%xmm"#y" ;\n"
-#define XO5(x, y) " xorps "OFFS(x)"(%6), %%xmm"#y" ;\n"
-
-
-static void
-xor_sse_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
-{
- unsigned long lines = bytes >> 8;
-
- kernel_fpu_begin();
-
- asm volatile(
-#undef BLOCK
-#define BLOCK(i) \
- LD(i, 0) \
- LD(i + 1, 1) \
- PF1(i) \
- PF1(i + 2) \
- LD(i + 2, 2) \
- LD(i + 3, 3) \
- PF0(i + 4) \
- PF0(i + 6) \
- XO1(i, 0) \
- XO1(i + 1, 1) \
- XO1(i + 2, 2) \
- XO1(i + 3, 3) \
- ST(i, 0) \
- ST(i + 1, 1) \
- ST(i + 2, 2) \
- ST(i + 3, 3) \
-
-
- PF0(0)
- PF0(2)
-
- " .align 32 ;\n"
- " 1: ;\n"
-
- BLOCK(0)
- BLOCK(4)
- BLOCK(8)
- BLOCK(12)
-
- " addl $256, %1 ;\n"
- " addl $256, %2 ;\n"
- " decl %0 ;\n"
- " jnz 1b ;\n"
- : "+r" (lines),
- "+r" (p1), "+r" (p2)
- :
- : "memory");
-
- kernel_fpu_end();
-}
-
-static void
-xor_sse_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3)
-{
- unsigned long lines = bytes >> 8;
-
- kernel_fpu_begin();
-
- asm volatile(
-#undef BLOCK
-#define BLOCK(i) \
- PF1(i) \
- PF1(i + 2) \
- LD(i,0) \
- LD(i + 1, 1) \
- LD(i + 2, 2) \
- LD(i + 3, 3) \
- PF2(i) \
- PF2(i + 2) \
- PF0(i + 4) \
- PF0(i + 6) \
- XO1(i,0) \
- XO1(i + 1, 1) \
- XO1(i + 2, 2) \
- XO1(i + 3, 3) \
- XO2(i,0) \
- XO2(i + 1, 1) \
- XO2(i + 2, 2) \
- XO2(i + 3, 3) \
- ST(i,0) \
- ST(i + 1, 1) \
- ST(i + 2, 2) \
- ST(i + 3, 3) \
-
-
- PF0(0)
- PF0(2)
-
- " .align 32 ;\n"
- " 1: ;\n"
-
- BLOCK(0)
- BLOCK(4)
- BLOCK(8)
- BLOCK(12)
-
- " addl $256, %1 ;\n"
- " addl $256, %2 ;\n"
- " addl $256, %3 ;\n"
- " decl %0 ;\n"
- " jnz 1b ;\n"
- : "+r" (lines),
- "+r" (p1), "+r"(p2), "+r"(p3)
- :
- : "memory" );
-
- kernel_fpu_end();
-}
-
-static void
-xor_sse_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3, unsigned long *p4)
-{
- unsigned long lines = bytes >> 8;
-
- kernel_fpu_begin();
-
- asm volatile(
-#undef BLOCK
-#define BLOCK(i) \
- PF1(i) \
- PF1(i + 2) \
- LD(i,0) \
- LD(i + 1, 1) \
- LD(i + 2, 2) \
- LD(i + 3, 3) \
- PF2(i) \
- PF2(i + 2) \
- XO1(i,0) \
- XO1(i + 1, 1) \
- XO1(i + 2, 2) \
- XO1(i + 3, 3) \
- PF3(i) \
- PF3(i + 2) \
- PF0(i + 4) \
- PF0(i + 6) \
- XO2(i,0) \
- XO2(i + 1, 1) \
- XO2(i + 2, 2) \
- XO2(i + 3, 3) \
- XO3(i,0) \
- XO3(i + 1, 1) \
- XO3(i + 2, 2) \
- XO3(i + 3, 3) \
- ST(i,0) \
- ST(i + 1, 1) \
- ST(i + 2, 2) \
- ST(i + 3, 3) \
-
-
- PF0(0)
- PF0(2)
-
- " .align 32 ;\n"
- " 1: ;\n"
-
- BLOCK(0)
- BLOCK(4)
- BLOCK(8)
- BLOCK(12)
-
- " addl $256, %1 ;\n"
- " addl $256, %2 ;\n"
- " addl $256, %3 ;\n"
- " addl $256, %4 ;\n"
- " decl %0 ;\n"
- " jnz 1b ;\n"
- : "+r" (lines),
- "+r" (p1), "+r" (p2), "+r" (p3), "+r" (p4)
- :
- : "memory" );
-
- kernel_fpu_end();
-}
-
-static void
-xor_sse_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3, unsigned long *p4, unsigned long *p5)
-{
- unsigned long lines = bytes >> 8;
-
- kernel_fpu_begin();
-
- /* Make sure GCC forgets anything it knows about p4 or p5,
- such that it won't pass to the asm volatile below a
- register that is shared with any other variable. That's
- because we modify p4 and p5 there, but we can't mark them
- as read/write, otherwise we'd overflow the 10-asm-operands
- limit of GCC < 3.1. */
- asm("" : "+r" (p4), "+r" (p5));
-
- asm volatile(
-#undef BLOCK
-#define BLOCK(i) \
- PF1(i) \
- PF1(i + 2) \
- LD(i,0) \
- LD(i + 1, 1) \
- LD(i + 2, 2) \
- LD(i + 3, 3) \
- PF2(i) \
- PF2(i + 2) \
- XO1(i,0) \
- XO1(i + 1, 1) \
- XO1(i + 2, 2) \
- XO1(i + 3, 3) \
- PF3(i) \
- PF3(i + 2) \
- XO2(i,0) \
- XO2(i + 1, 1) \
- XO2(i + 2, 2) \
- XO2(i + 3, 3) \
- PF4(i) \
- PF4(i + 2) \
- PF0(i + 4) \
- PF0(i + 6) \
- XO3(i,0) \
- XO3(i + 1, 1) \
- XO3(i + 2, 2) \
- XO3(i + 3, 3) \
- XO4(i,0) \
- XO4(i + 1, 1) \
- XO4(i + 2, 2) \
- XO4(i + 3, 3) \
- ST(i,0) \
- ST(i + 1, 1) \
- ST(i + 2, 2) \
- ST(i + 3, 3) \
-
-
- PF0(0)
- PF0(2)
-
- " .align 32 ;\n"
- " 1: ;\n"
-
- BLOCK(0)
- BLOCK(4)
- BLOCK(8)
- BLOCK(12)
-
- " addl $256, %1 ;\n"
- " addl $256, %2 ;\n"
- " addl $256, %3 ;\n"
- " addl $256, %4 ;\n"
- " addl $256, %5 ;\n"
- " decl %0 ;\n"
- " jnz 1b ;\n"
- : "+r" (lines),
- "+r" (p1), "+r" (p2), "+r" (p3)
- : "r" (p4), "r" (p5)
- : "memory");
-
- /* p4 and p5 were modified, and now the variables are dead.
- Clobber them just to be sure nobody does something stupid
- like assuming they have some legal value. */
- asm("" : "=r" (p4), "=r" (p5));
-
- kernel_fpu_end();
-}
-
static struct xor_block_template xor_block_pIII_sse = {
.name = "pIII_sse",
.do_2 = xor_sse_2,
@@ -827,26 +543,25 @@ static struct xor_block_template xor_block_pIII_sse = {
/* Also try the generic routines. */
#include <asm-generic/xor.h>
+/* We force the use of the SSE xor block because it can write around L2.
+ We may also be able to load into the L1 only depending on how the cpu
+ deals with a load to a line that is being prefetched. */
#undef XOR_TRY_TEMPLATES
#define XOR_TRY_TEMPLATES \
do { \
- xor_speed(&xor_block_8regs); \
- xor_speed(&xor_block_8regs_p); \
- xor_speed(&xor_block_32regs); \
- xor_speed(&xor_block_32regs_p); \
AVX_XOR_SPEED; \
- if (cpu_has_xmm) \
+ if (cpu_has_xmm) { \
xor_speed(&xor_block_pIII_sse); \
- if (cpu_has_mmx) { \
+ xor_speed(&xor_block_sse_pf64); \
+ } else if (cpu_has_mmx) { \
xor_speed(&xor_block_pII_mmx); \
xor_speed(&xor_block_p5_mmx); \
+ } else { \
+ xor_speed(&xor_block_8regs); \
+ xor_speed(&xor_block_8regs_p); \
+ xor_speed(&xor_block_32regs); \
+ xor_speed(&xor_block_32regs_p); \
} \
} while (0)
-/* We force the use of the SSE xor block because it can write around L2.
- We may also be able to load into the L1 only depending on how the cpu
- deals with a load to a line that is being prefetched. */
-#define XOR_SELECT_TEMPLATE(FASTEST) \
- AVX_SELECT(cpu_has_xmm ? &xor_block_pIII_sse : FASTEST)
-
#endif /* _ASM_X86_XOR_32_H */
diff --git a/arch/x86/include/asm/xor_64.h b/arch/x86/include/asm/xor_64.h
index 87ac522c4af5..546f1e3b87cc 100644
--- a/arch/x86/include/asm/xor_64.h
+++ b/arch/x86/include/asm/xor_64.h
@@ -1,301 +1,6 @@
#ifndef _ASM_X86_XOR_64_H
#define _ASM_X86_XOR_64_H
-/*
- * Optimized RAID-5 checksumming functions for MMX and SSE.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * You should have received a copy of the GNU General Public License
- * (for example /usr/src/linux/COPYING); if not, write to the Free
- * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-
-/*
- * Cache avoiding checksumming functions utilizing KNI instructions
- * Copyright (C) 1999 Zach Brown (with obvious credit due Ingo)
- */
-
-/*
- * Based on
- * High-speed RAID5 checksumming functions utilizing SSE instructions.
- * Copyright (C) 1998 Ingo Molnar.
- */
-
-/*
- * x86-64 changes / gcc fixes from Andi Kleen.
- * Copyright 2002 Andi Kleen, SuSE Labs.
- *
- * This hasn't been optimized for the hammer yet, but there are likely
- * no advantages to be gotten from x86-64 here anyways.
- */
-
-#include <asm/i387.h>
-
-#define OFFS(x) "16*("#x")"
-#define PF_OFFS(x) "256+16*("#x")"
-#define PF0(x) " prefetchnta "PF_OFFS(x)"(%[p1]) ;\n"
-#define LD(x, y) " movaps "OFFS(x)"(%[p1]), %%xmm"#y" ;\n"
-#define ST(x, y) " movaps %%xmm"#y", "OFFS(x)"(%[p1]) ;\n"
-#define PF1(x) " prefetchnta "PF_OFFS(x)"(%[p2]) ;\n"
-#define PF2(x) " prefetchnta "PF_OFFS(x)"(%[p3]) ;\n"
-#define PF3(x) " prefetchnta "PF_OFFS(x)"(%[p4]) ;\n"
-#define PF4(x) " prefetchnta "PF_OFFS(x)"(%[p5]) ;\n"
-#define PF5(x) " prefetchnta "PF_OFFS(x)"(%[p6]) ;\n"
-#define XO1(x, y) " xorps "OFFS(x)"(%[p2]), %%xmm"#y" ;\n"
-#define XO2(x, y) " xorps "OFFS(x)"(%[p3]), %%xmm"#y" ;\n"
-#define XO3(x, y) " xorps "OFFS(x)"(%[p4]), %%xmm"#y" ;\n"
-#define XO4(x, y) " xorps "OFFS(x)"(%[p5]), %%xmm"#y" ;\n"
-#define XO5(x, y) " xorps "OFFS(x)"(%[p6]), %%xmm"#y" ;\n"
-
-
-static void
-xor_sse_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
-{
- unsigned int lines = bytes >> 8;
-
- kernel_fpu_begin();
-
- asm volatile(
-#undef BLOCK
-#define BLOCK(i) \
- LD(i, 0) \
- LD(i + 1, 1) \
- PF1(i) \
- PF1(i + 2) \
- LD(i + 2, 2) \
- LD(i + 3, 3) \
- PF0(i + 4) \
- PF0(i + 6) \
- XO1(i, 0) \
- XO1(i + 1, 1) \
- XO1(i + 2, 2) \
- XO1(i + 3, 3) \
- ST(i, 0) \
- ST(i + 1, 1) \
- ST(i + 2, 2) \
- ST(i + 3, 3) \
-
-
- PF0(0)
- PF0(2)
-
- " .align 32 ;\n"
- " 1: ;\n"
-
- BLOCK(0)
- BLOCK(4)
- BLOCK(8)
- BLOCK(12)
-
- " addq %[inc], %[p1] ;\n"
- " addq %[inc], %[p2] ;\n"
- " decl %[cnt] ; jnz 1b"
- : [p1] "+r" (p1), [p2] "+r" (p2), [cnt] "+r" (lines)
- : [inc] "r" (256UL)
- : "memory");
-
- kernel_fpu_end();
-}
-
-static void
-xor_sse_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3)
-{
- unsigned int lines = bytes >> 8;
-
- kernel_fpu_begin();
- asm volatile(
-#undef BLOCK
-#define BLOCK(i) \
- PF1(i) \
- PF1(i + 2) \
- LD(i, 0) \
- LD(i + 1, 1) \
- LD(i + 2, 2) \
- LD(i + 3, 3) \
- PF2(i) \
- PF2(i + 2) \
- PF0(i + 4) \
- PF0(i + 6) \
- XO1(i, 0) \
- XO1(i + 1, 1) \
- XO1(i + 2, 2) \
- XO1(i + 3, 3) \
- XO2(i, 0) \
- XO2(i + 1, 1) \
- XO2(i + 2, 2) \
- XO2(i + 3, 3) \
- ST(i, 0) \
- ST(i + 1, 1) \
- ST(i + 2, 2) \
- ST(i + 3, 3) \
-
-
- PF0(0)
- PF0(2)
-
- " .align 32 ;\n"
- " 1: ;\n"
-
- BLOCK(0)
- BLOCK(4)
- BLOCK(8)
- BLOCK(12)
-
- " addq %[inc], %[p1] ;\n"
- " addq %[inc], %[p2] ;\n"
- " addq %[inc], %[p3] ;\n"
- " decl %[cnt] ; jnz 1b"
- : [cnt] "+r" (lines),
- [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3)
- : [inc] "r" (256UL)
- : "memory");
- kernel_fpu_end();
-}
-
-static void
-xor_sse_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3, unsigned long *p4)
-{
- unsigned int lines = bytes >> 8;
-
- kernel_fpu_begin();
-
- asm volatile(
-#undef BLOCK
-#define BLOCK(i) \
- PF1(i) \
- PF1(i + 2) \
- LD(i, 0) \
- LD(i + 1, 1) \
- LD(i + 2, 2) \
- LD(i + 3, 3) \
- PF2(i) \
- PF2(i + 2) \
- XO1(i, 0) \
- XO1(i + 1, 1) \
- XO1(i + 2, 2) \
- XO1(i + 3, 3) \
- PF3(i) \
- PF3(i + 2) \
- PF0(i + 4) \
- PF0(i + 6) \
- XO2(i, 0) \
- XO2(i + 1, 1) \
- XO2(i + 2, 2) \
- XO2(i + 3, 3) \
- XO3(i, 0) \
- XO3(i + 1, 1) \
- XO3(i + 2, 2) \
- XO3(i + 3, 3) \
- ST(i, 0) \
- ST(i + 1, 1) \
- ST(i + 2, 2) \
- ST(i + 3, 3) \
-
-
- PF0(0)
- PF0(2)
-
- " .align 32 ;\n"
- " 1: ;\n"
-
- BLOCK(0)
- BLOCK(4)
- BLOCK(8)
- BLOCK(12)
-
- " addq %[inc], %[p1] ;\n"
- " addq %[inc], %[p2] ;\n"
- " addq %[inc], %[p3] ;\n"
- " addq %[inc], %[p4] ;\n"
- " decl %[cnt] ; jnz 1b"
- : [cnt] "+c" (lines),
- [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3), [p4] "+r" (p4)
- : [inc] "r" (256UL)
- : "memory" );
-
- kernel_fpu_end();
-}
-
-static void
-xor_sse_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3, unsigned long *p4, unsigned long *p5)
-{
- unsigned int lines = bytes >> 8;
-
- kernel_fpu_begin();
-
- asm volatile(
-#undef BLOCK
-#define BLOCK(i) \
- PF1(i) \
- PF1(i + 2) \
- LD(i, 0) \
- LD(i + 1, 1) \
- LD(i + 2, 2) \
- LD(i + 3, 3) \
- PF2(i) \
- PF2(i + 2) \
- XO1(i, 0) \
- XO1(i + 1, 1) \
- XO1(i + 2, 2) \
- XO1(i + 3, 3) \
- PF3(i) \
- PF3(i + 2) \
- XO2(i, 0) \
- XO2(i + 1, 1) \
- XO2(i + 2, 2) \
- XO2(i + 3, 3) \
- PF4(i) \
- PF4(i + 2) \
- PF0(i + 4) \
- PF0(i + 6) \
- XO3(i, 0) \
- XO3(i + 1, 1) \
- XO3(i + 2, 2) \
- XO3(i + 3, 3) \
- XO4(i, 0) \
- XO4(i + 1, 1) \
- XO4(i + 2, 2) \
- XO4(i + 3, 3) \
- ST(i, 0) \
- ST(i + 1, 1) \
- ST(i + 2, 2) \
- ST(i + 3, 3) \
-
-
- PF0(0)
- PF0(2)
-
- " .align 32 ;\n"
- " 1: ;\n"
-
- BLOCK(0)
- BLOCK(4)
- BLOCK(8)
- BLOCK(12)
-
- " addq %[inc], %[p1] ;\n"
- " addq %[inc], %[p2] ;\n"
- " addq %[inc], %[p3] ;\n"
- " addq %[inc], %[p4] ;\n"
- " addq %[inc], %[p5] ;\n"
- " decl %[cnt] ; jnz 1b"
- : [cnt] "+c" (lines),
- [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3), [p4] "+r" (p4),
- [p5] "+r" (p5)
- : [inc] "r" (256UL)
- : "memory");
-
- kernel_fpu_end();
-}
-
static struct xor_block_template xor_block_sse = {
.name = "generic_sse",
.do_2 = xor_sse_2,
@@ -308,17 +13,15 @@ static struct xor_block_template xor_block_sse = {
/* Also try the AVX routines */
#include <asm/xor_avx.h>
+/* We force the use of the SSE xor block because it can write around L2.
+ We may also be able to load into the L1 only depending on how the cpu
+ deals with a load to a line that is being prefetched. */
#undef XOR_TRY_TEMPLATES
#define XOR_TRY_TEMPLATES \
do { \
AVX_XOR_SPEED; \
+ xor_speed(&xor_block_sse_pf64); \
xor_speed(&xor_block_sse); \
} while (0)
-/* We force the use of the SSE xor block because it can write around L2.
- We may also be able to load into the L1 only depending on how the cpu
- deals with a load to a line that is being prefetched. */
-#define XOR_SELECT_TEMPLATE(FASTEST) \
- AVX_SELECT(&xor_block_sse)
-
#endif /* _ASM_X86_XOR_64_H */
diff --git a/arch/x86/include/uapi/asm/mce.h b/arch/x86/include/uapi/asm/mce.h
index 58c829871c31..a0eab85ce7b8 100644
--- a/arch/x86/include/uapi/asm/mce.h
+++ b/arch/x86/include/uapi/asm/mce.h
@@ -4,66 +4,6 @@
#include <linux/types.h>
#include <asm/ioctls.h>
-/*
- * Machine Check support for x86
- */
-
-/* MCG_CAP register defines */
-#define MCG_BANKCNT_MASK 0xff /* Number of Banks */
-#define MCG_CTL_P (1ULL<<8) /* MCG_CTL register available */
-#define MCG_EXT_P (1ULL<<9) /* Extended registers available */
-#define MCG_CMCI_P (1ULL<<10) /* CMCI supported */
-#define MCG_EXT_CNT_MASK 0xff0000 /* Number of Extended registers */
-#define MCG_EXT_CNT_SHIFT 16
-#define MCG_EXT_CNT(c) (((c) & MCG_EXT_CNT_MASK) >> MCG_EXT_CNT_SHIFT)
-#define MCG_SER_P (1ULL<<24) /* MCA recovery/new status bits */
-
-/* MCG_STATUS register defines */
-#define MCG_STATUS_RIPV (1ULL<<0) /* restart ip valid */
-#define MCG_STATUS_EIPV (1ULL<<1) /* ip points to correct instruction */
-#define MCG_STATUS_MCIP (1ULL<<2) /* machine check in progress */
-
-/* MCi_STATUS register defines */
-#define MCI_STATUS_VAL (1ULL<<63) /* valid error */
-#define MCI_STATUS_OVER (1ULL<<62) /* previous errors lost */
-#define MCI_STATUS_UC (1ULL<<61) /* uncorrected error */
-#define MCI_STATUS_EN (1ULL<<60) /* error enabled */
-#define MCI_STATUS_MISCV (1ULL<<59) /* misc error reg. valid */
-#define MCI_STATUS_ADDRV (1ULL<<58) /* addr reg. valid */
-#define MCI_STATUS_PCC (1ULL<<57) /* processor context corrupt */
-#define MCI_STATUS_S (1ULL<<56) /* Signaled machine check */
-#define MCI_STATUS_AR (1ULL<<55) /* Action required */
-#define MCACOD 0xffff /* MCA Error Code */
-
-/* Architecturally defined codes from SDM Vol. 3B Chapter 15 */
-#define MCACOD_SCRUB 0x00C0 /* 0xC0-0xCF Memory Scrubbing */
-#define MCACOD_SCRUBMSK 0xfff0
-#define MCACOD_L3WB 0x017A /* L3 Explicit Writeback */
-#define MCACOD_DATA 0x0134 /* Data Load */
-#define MCACOD_INSTR 0x0150 /* Instruction Fetch */
-
-/* MCi_MISC register defines */
-#define MCI_MISC_ADDR_LSB(m) ((m) & 0x3f)
-#define MCI_MISC_ADDR_MODE(m) (((m) >> 6) & 7)
-#define MCI_MISC_ADDR_SEGOFF 0 /* segment offset */
-#define MCI_MISC_ADDR_LINEAR 1 /* linear address */
-#define MCI_MISC_ADDR_PHYS 2 /* physical address */
-#define MCI_MISC_ADDR_MEM 3 /* memory address */
-#define MCI_MISC_ADDR_GENERIC 7 /* generic */
-
-/* CTL2 register defines */
-#define MCI_CTL2_CMCI_EN (1ULL << 30)
-#define MCI_CTL2_CMCI_THRESHOLD_MASK 0x7fffULL
-
-#define MCJ_CTX_MASK 3
-#define MCJ_CTX(flags) ((flags) & MCJ_CTX_MASK)
-#define MCJ_CTX_RANDOM 0 /* inject context: random */
-#define MCJ_CTX_PROCESS 0x1 /* inject context: process */
-#define MCJ_CTX_IRQ 0x2 /* inject context: IRQ */
-#define MCJ_NMI_BROADCAST 0x4 /* do NMI broadcasting */
-#define MCJ_EXCEPTION 0x8 /* raise as exception */
-#define MCJ_IRQ_BRAODCAST 0x10 /* do IRQ broadcasting */
-
/* Fields are zero when not available */
struct mce {
__u64 status;
@@ -87,35 +27,8 @@ struct mce {
__u64 mcgcap; /* MCGCAP MSR: machine check capabilities of CPU */
};
-/*
- * This structure contains all data related to the MCE log. Also
- * carries a signature to make it easier to find from external
- * debugging tools. Each entry is only valid when its finished flag
- * is set.
- */
-
-#define MCE_LOG_LEN 32
-
-struct mce_log {
- char signature[12]; /* "MACHINECHECK" */
- unsigned len; /* = MCE_LOG_LEN */
- unsigned next;
- unsigned flags;
- unsigned recordlen; /* length of struct mce */
- struct mce entry[MCE_LOG_LEN];
-};
-
-#define MCE_OVERFLOW 0 /* bit 0 in flags means overflow */
-
-#define MCE_LOG_SIGNATURE "MACHINECHECK"
-
#define MCE_GET_RECORD_LEN _IOR('M', 1, int)
#define MCE_GET_LOG_LEN _IOR('M', 2, int)
#define MCE_GETCLEAR_FLAGS _IOR('M', 3, int)
-/* Software defined banks */
-#define MCE_EXTENDED_BANK 128
-#define MCE_THERMAL_BANK MCE_EXTENDED_BANK + 0
-#define K8_MCE_THRESHOLD_BASE (MCE_EXTENDED_BANK + 1)
-
#endif /* _UAPI_ASM_X86_MCE_H */
diff --git a/arch/x86/include/uapi/asm/msr-index.h b/arch/x86/include/uapi/asm/msr-index.h
index 433a59fb1a74..f26d2771846f 100644
--- a/arch/x86/include/uapi/asm/msr-index.h
+++ b/arch/x86/include/uapi/asm/msr-index.h
@@ -103,6 +103,8 @@
#define DEBUGCTLMSR_BTS_OFF_USR (1UL << 10)
#define DEBUGCTLMSR_FREEZE_LBRS_ON_PMI (1UL << 11)
+#define MSR_IA32_POWER_CTL 0x000001fc
+
#define MSR_IA32_MC0_CTL 0x00000400
#define MSR_IA32_MC0_STATUS 0x00000401
#define MSR_IA32_MC0_ADDR 0x00000402
@@ -194,6 +196,8 @@
/* Fam 15h MSRs */
#define MSR_F15H_PERF_CTL 0xc0010200
#define MSR_F15H_PERF_CTR 0xc0010201
+#define MSR_F15H_NB_PERF_CTL 0xc0010240
+#define MSR_F15H_NB_PERF_CTR 0xc0010241
/* Fam 10h MSRs */
#define MSR_FAM10H_MMIO_CONF_BASE 0xc0010058
@@ -272,6 +276,7 @@
#define MSR_IA32_PLATFORM_ID 0x00000017
#define MSR_IA32_EBL_CR_POWERON 0x0000002a
#define MSR_EBC_FREQUENCY_ID 0x0000002c
+#define MSR_SMI_COUNT 0x00000034
#define MSR_IA32_FEATURE_CONTROL 0x0000003a
#define MSR_IA32_TSC_ADJUST 0x0000003b
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index 34e923a53762..ac3b3d002833 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -65,8 +65,7 @@ obj-$(CONFIG_X86_TSC) += trace_clock.o
obj-$(CONFIG_KEXEC) += machine_kexec_$(BITS).o
obj-$(CONFIG_KEXEC) += relocate_kernel_$(BITS).o crash.o
obj-$(CONFIG_CRASH_DUMP) += crash_dump_$(BITS).o
-obj-$(CONFIG_KPROBES) += kprobes.o
-obj-$(CONFIG_OPTPROBES) += kprobes-opt.o
+obj-y += kprobes/
obj-$(CONFIG_MODULES) += module.o
obj-$(CONFIG_DOUBLEFAULT) += doublefault_32.o
obj-$(CONFIG_KGDB) += kgdb.o
diff --git a/arch/x86/kernel/apb_timer.c b/arch/x86/kernel/apb_timer.c
index afdc3f756dea..c9876efecafb 100644
--- a/arch/x86/kernel/apb_timer.c
+++ b/arch/x86/kernel/apb_timer.c
@@ -240,7 +240,7 @@ static int apbt_cpuhp_notify(struct notifier_block *n,
dw_apb_clockevent_pause(adev->timer);
if (system_state == SYSTEM_RUNNING) {
pr_debug("skipping APBT CPU %lu offline\n", cpu);
- } else if (adev) {
+ } else {
pr_debug("APBT clockevent for cpu %lu offline\n", cpu);
dw_apb_clockevent_stop(adev->timer);
}
@@ -311,7 +311,6 @@ void __init apbt_time_init(void)
#ifdef CONFIG_SMP
int i;
struct sfi_timer_table_entry *p_mtmr;
- unsigned int percpu_timer;
struct apbt_dev *adev;
#endif
@@ -346,13 +345,10 @@ void __init apbt_time_init(void)
return;
}
pr_debug("%s: %d CPUs online\n", __func__, num_online_cpus());
- if (num_possible_cpus() <= sfi_mtimer_num) {
- percpu_timer = 1;
+ if (num_possible_cpus() <= sfi_mtimer_num)
apbt_num_timers_used = num_possible_cpus();
- } else {
- percpu_timer = 0;
+ else
apbt_num_timers_used = 1;
- }
pr_debug("%s: %d APB timers used\n", __func__, apbt_num_timers_used);
/* here we set up per CPU timer data structure */
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index b994cc84aa7e..a5b4dce1b7ac 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -1477,8 +1477,7 @@ void __init bsp_end_local_APIC_setup(void)
* Now that local APIC setup is completed for BP, configure the fault
* handling for interrupt remapping.
*/
- if (irq_remapping_enabled)
- irq_remap_enable_fault_handling();
+ irq_remap_enable_fault_handling();
}
@@ -2251,8 +2250,7 @@ static int lapic_suspend(void)
local_irq_save(flags);
disable_local_APIC();
- if (irq_remapping_enabled)
- irq_remapping_disable();
+ irq_remapping_disable();
local_irq_restore(flags);
return 0;
@@ -2268,16 +2266,15 @@ static void lapic_resume(void)
return;
local_irq_save(flags);
- if (irq_remapping_enabled) {
- /*
- * IO-APIC and PIC have their own resume routines.
- * We just mask them here to make sure the interrupt
- * subsystem is completely quiet while we enable x2apic
- * and interrupt-remapping.
- */
- mask_ioapic_entries();
- legacy_pic->mask_all();
- }
+
+ /*
+ * IO-APIC and PIC have their own resume routines.
+ * We just mask them here to make sure the interrupt
+ * subsystem is completely quiet while we enable x2apic
+ * and interrupt-remapping.
+ */
+ mask_ioapic_entries();
+ legacy_pic->mask_all();
if (x2apic_mode)
enable_x2apic();
@@ -2320,8 +2317,7 @@ static void lapic_resume(void)
apic_write(APIC_ESR, 0);
apic_read(APIC_ESR);
- if (irq_remapping_enabled)
- irq_remapping_reenable(x2apic_mode);
+ irq_remapping_reenable(x2apic_mode);
local_irq_restore(flags);
}
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index b739d398bb29..9ed796ccc32c 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -68,22 +68,6 @@
#define for_each_irq_pin(entry, head) \
for (entry = head; entry; entry = entry->next)
-#ifdef CONFIG_IRQ_REMAP
-static void irq_remap_modify_chip_defaults(struct irq_chip *chip);
-static inline bool irq_remapped(struct irq_cfg *cfg)
-{
- return cfg->irq_2_iommu.iommu != NULL;
-}
-#else
-static inline bool irq_remapped(struct irq_cfg *cfg)
-{
- return false;
-}
-static inline void irq_remap_modify_chip_defaults(struct irq_chip *chip)
-{
-}
-#endif
-
/*
* Is the SiS APIC rmw bug present ?
* -1 = don't know, 0 = no, 1 = yes
@@ -300,9 +284,9 @@ static struct irq_cfg *alloc_irq_and_cfg_at(unsigned int at, int node)
return cfg;
}
-static int alloc_irq_from(unsigned int from, int node)
+static int alloc_irqs_from(unsigned int from, unsigned int count, int node)
{
- return irq_alloc_desc_from(from, node);
+ return irq_alloc_descs_from(from, count, node);
}
static void free_irq_at(unsigned int at, struct irq_cfg *cfg)
@@ -326,7 +310,7 @@ static __attribute_const__ struct io_apic __iomem *io_apic_base(int idx)
+ (mpc_ioapic_addr(idx) & ~PAGE_MASK);
}
-static inline void io_apic_eoi(unsigned int apic, unsigned int vector)
+void io_apic_eoi(unsigned int apic, unsigned int vector)
{
struct io_apic __iomem *io_apic = io_apic_base(apic);
writel(vector, &io_apic->eoi);
@@ -573,19 +557,10 @@ static void unmask_ioapic_irq(struct irq_data *data)
* Otherwise, we simulate the EOI message manually by changing the trigger
* mode to edge and then back to level, with RTE being masked during this.
*/
-static void __eoi_ioapic_pin(int apic, int pin, int vector, struct irq_cfg *cfg)
+void native_eoi_ioapic_pin(int apic, int pin, int vector)
{
if (mpc_ioapic_ver(apic) >= 0x20) {
- /*
- * Intr-remapping uses pin number as the virtual vector
- * in the RTE. Actual vector is programmed in
- * intr-remapping table entry. Hence for the io-apic
- * EOI we use the pin number.
- */
- if (cfg && irq_remapped(cfg))
- io_apic_eoi(apic, pin);
- else
- io_apic_eoi(apic, vector);
+ io_apic_eoi(apic, vector);
} else {
struct IO_APIC_route_entry entry, entry1;
@@ -606,14 +581,15 @@ static void __eoi_ioapic_pin(int apic, int pin, int vector, struct irq_cfg *cfg)
}
}
-static void eoi_ioapic_irq(unsigned int irq, struct irq_cfg *cfg)
+void eoi_ioapic_irq(unsigned int irq, struct irq_cfg *cfg)
{
struct irq_pin_list *entry;
unsigned long flags;
raw_spin_lock_irqsave(&ioapic_lock, flags);
for_each_irq_pin(entry, cfg->irq_2_pin)
- __eoi_ioapic_pin(entry->apic, entry->pin, cfg->vector, cfg);
+ x86_io_apic_ops.eoi_ioapic_pin(entry->apic, entry->pin,
+ cfg->vector);
raw_spin_unlock_irqrestore(&ioapic_lock, flags);
}
@@ -650,7 +626,7 @@ static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin)
}
raw_spin_lock_irqsave(&ioapic_lock, flags);
- __eoi_ioapic_pin(apic, pin, entry.vector, NULL);
+ x86_io_apic_ops.eoi_ioapic_pin(apic, pin, entry.vector);
raw_spin_unlock_irqrestore(&ioapic_lock, flags);
}
@@ -1304,25 +1280,18 @@ static void ioapic_register_intr(unsigned int irq, struct irq_cfg *cfg,
fasteoi = false;
}
- if (irq_remapped(cfg)) {
- irq_set_status_flags(irq, IRQ_MOVE_PCNTXT);
- irq_remap_modify_chip_defaults(chip);
+ if (setup_remapped_irq(irq, cfg, chip))
fasteoi = trigger != 0;
- }
hdl = fasteoi ? handle_fasteoi_irq : handle_edge_irq;
irq_set_chip_and_handler_name(irq, chip, hdl,
fasteoi ? "fasteoi" : "edge");
}
-static int setup_ioapic_entry(int irq, struct IO_APIC_route_entry *entry,
- unsigned int destination, int vector,
- struct io_apic_irq_attr *attr)
+int native_setup_ioapic_entry(int irq, struct IO_APIC_route_entry *entry,
+ unsigned int destination, int vector,
+ struct io_apic_irq_attr *attr)
{
- if (irq_remapping_enabled)
- return setup_ioapic_remapped_entry(irq, entry, destination,
- vector, attr);
-
memset(entry, 0, sizeof(*entry));
entry->delivery_mode = apic->irq_delivery_mode;
@@ -1370,8 +1339,8 @@ static void setup_ioapic_irq(unsigned int irq, struct irq_cfg *cfg,
attr->ioapic, mpc_ioapic_id(attr->ioapic), attr->ioapic_pin,
cfg->vector, irq, attr->trigger, attr->polarity, dest);
- if (setup_ioapic_entry(irq, &entry, dest, cfg->vector, attr)) {
- pr_warn("Failed to setup ioapic entry for ioapic %d, pin %d\n",
+ if (x86_io_apic_ops.setup_entry(irq, &entry, dest, cfg->vector, attr)) {
+ pr_warn("Failed to setup ioapic entry for ioapic %d, pin %d\n",
mpc_ioapic_id(attr->ioapic), attr->ioapic_pin);
__clear_irq_vector(irq, cfg);
@@ -1479,9 +1448,6 @@ static void __init setup_timer_IRQ0_pin(unsigned int ioapic_idx,
struct IO_APIC_route_entry entry;
unsigned int dest;
- if (irq_remapping_enabled)
- return;
-
memset(&entry, 0, sizeof(entry));
/*
@@ -1513,9 +1479,63 @@ static void __init setup_timer_IRQ0_pin(unsigned int ioapic_idx,
ioapic_write_entry(ioapic_idx, pin, entry);
}
-__apicdebuginit(void) print_IO_APIC(int ioapic_idx)
+void native_io_apic_print_entries(unsigned int apic, unsigned int nr_entries)
{
int i;
+
+ pr_debug(" NR Dst Mask Trig IRR Pol Stat Dmod Deli Vect:\n");
+
+ for (i = 0; i <= nr_entries; i++) {
+ struct IO_APIC_route_entry entry;
+
+ entry = ioapic_read_entry(apic, i);
+
+ pr_debug(" %02x %02X ", i, entry.dest);
+ pr_cont("%1d %1d %1d %1d %1d "
+ "%1d %1d %02X\n",
+ entry.mask,
+ entry.trigger,
+ entry.irr,
+ entry.polarity,
+ entry.delivery_status,
+ entry.dest_mode,
+ entry.delivery_mode,
+ entry.vector);
+ }
+}
+
+void intel_ir_io_apic_print_entries(unsigned int apic,
+ unsigned int nr_entries)
+{
+ int i;
+
+ pr_debug(" NR Indx Fmt Mask Trig IRR Pol Stat Indx2 Zero Vect:\n");
+
+ for (i = 0; i <= nr_entries; i++) {
+ struct IR_IO_APIC_route_entry *ir_entry;
+ struct IO_APIC_route_entry entry;
+
+ entry = ioapic_read_entry(apic, i);
+
+ ir_entry = (struct IR_IO_APIC_route_entry *)&entry;
+
+ pr_debug(" %02x %04X ", i, ir_entry->index);
+ pr_cont("%1d %1d %1d %1d %1d "
+ "%1d %1d %X %02X\n",
+ ir_entry->format,
+ ir_entry->mask,
+ ir_entry->trigger,
+ ir_entry->irr,
+ ir_entry->polarity,
+ ir_entry->delivery_status,
+ ir_entry->index2,
+ ir_entry->zero,
+ ir_entry->vector);
+ }
+}
+
+__apicdebuginit(void) print_IO_APIC(int ioapic_idx)
+{
union IO_APIC_reg_00 reg_00;
union IO_APIC_reg_01 reg_01;
union IO_APIC_reg_02 reg_02;
@@ -1568,58 +1588,7 @@ __apicdebuginit(void) print_IO_APIC(int ioapic_idx)
printk(KERN_DEBUG ".... IRQ redirection table:\n");
- if (irq_remapping_enabled) {
- printk(KERN_DEBUG " NR Indx Fmt Mask Trig IRR"
- " Pol Stat Indx2 Zero Vect:\n");
- } else {
- printk(KERN_DEBUG " NR Dst Mask Trig IRR Pol"
- " Stat Dmod Deli Vect:\n");
- }
-
- for (i = 0; i <= reg_01.bits.entries; i++) {
- if (irq_remapping_enabled) {
- struct IO_APIC_route_entry entry;
- struct IR_IO_APIC_route_entry *ir_entry;
-
- entry = ioapic_read_entry(ioapic_idx, i);
- ir_entry = (struct IR_IO_APIC_route_entry *) &entry;
- printk(KERN_DEBUG " %02x %04X ",
- i,
- ir_entry->index
- );
- pr_cont("%1d %1d %1d %1d %1d "
- "%1d %1d %X %02X\n",
- ir_entry->format,
- ir_entry->mask,
- ir_entry->trigger,
- ir_entry->irr,
- ir_entry->polarity,
- ir_entry->delivery_status,
- ir_entry->index2,
- ir_entry->zero,
- ir_entry->vector
- );
- } else {
- struct IO_APIC_route_entry entry;
-
- entry = ioapic_read_entry(ioapic_idx, i);
- printk(KERN_DEBUG " %02x %02X ",
- i,
- entry.dest
- );
- pr_cont("%1d %1d %1d %1d %1d "
- "%1d %1d %02X\n",
- entry.mask,
- entry.trigger,
- entry.irr,
- entry.polarity,
- entry.delivery_status,
- entry.dest_mode,
- entry.delivery_mode,
- entry.vector
- );
- }
- }
+ x86_io_apic_ops.print_entries(ioapic_idx, reg_01.bits.entries);
}
__apicdebuginit(void) print_IO_APICs(void)
@@ -1921,30 +1890,14 @@ void __init enable_IO_APIC(void)
clear_IO_APIC();
}
-/*
- * Not an __init, needed by the reboot code
- */
-void disable_IO_APIC(void)
+void native_disable_io_apic(void)
{
/*
- * Clear the IO-APIC before rebooting:
- */
- clear_IO_APIC();
-
- if (!legacy_pic->nr_legacy_irqs)
- return;
-
- /*
* If the i8259 is routed through an IOAPIC
* Put that IOAPIC in virtual wire mode
* so legacy interrupts can be delivered.
- *
- * With interrupt-remapping, for now we will use virtual wire A mode,
- * as virtual wire B is little complex (need to configure both
- * IOAPIC RTE as well as interrupt-remapping table entry).
- * As this gets called during crash dump, keep this simple for now.
*/
- if (ioapic_i8259.pin != -1 && !irq_remapping_enabled) {
+ if (ioapic_i8259.pin != -1) {
struct IO_APIC_route_entry entry;
memset(&entry, 0, sizeof(entry));
@@ -1964,12 +1917,25 @@ void disable_IO_APIC(void)
ioapic_write_entry(ioapic_i8259.apic, ioapic_i8259.pin, entry);
}
+ if (cpu_has_apic || apic_from_smp_config())
+ disconnect_bsp_APIC(ioapic_i8259.pin != -1);
+
+}
+
+/*
+ * Not an __init, needed by the reboot code
+ */
+void disable_IO_APIC(void)
+{
/*
- * Use virtual wire A mode when interrupt remapping is enabled.
+ * Clear the IO-APIC before rebooting:
*/
- if (cpu_has_apic || apic_from_smp_config())
- disconnect_bsp_APIC(!irq_remapping_enabled &&
- ioapic_i8259.pin != -1);
+ clear_IO_APIC();
+
+ if (!legacy_pic->nr_legacy_irqs)
+ return;
+
+ x86_io_apic_ops.disable();
}
#ifdef CONFIG_X86_32
@@ -2322,12 +2288,8 @@ static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq
apic = entry->apic;
pin = entry->pin;
- /*
- * With interrupt-remapping, destination information comes
- * from interrupt-remapping table entry.
- */
- if (!irq_remapped(cfg))
- io_apic_write(apic, 0x11 + pin*2, dest);
+
+ io_apic_write(apic, 0x11 + pin*2, dest);
reg = io_apic_read(apic, 0x10 + pin*2);
reg &= ~IO_APIC_REDIR_VECTOR_MASK;
reg |= vector;
@@ -2369,9 +2331,10 @@ int __ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask,
return 0;
}
-static int
-ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask,
- bool force)
+
+int native_ioapic_set_affinity(struct irq_data *data,
+ const struct cpumask *mask,
+ bool force)
{
unsigned int dest, irq = data->irq;
unsigned long flags;
@@ -2548,33 +2511,6 @@ static void ack_apic_level(struct irq_data *data)
ioapic_irqd_unmask(data, cfg, masked);
}
-#ifdef CONFIG_IRQ_REMAP
-static void ir_ack_apic_edge(struct irq_data *data)
-{
- ack_APIC_irq();
-}
-
-static void ir_ack_apic_level(struct irq_data *data)
-{
- ack_APIC_irq();
- eoi_ioapic_irq(data->irq, data->chip_data);
-}
-
-static void ir_print_prefix(struct irq_data *data, struct seq_file *p)
-{
- seq_printf(p, " IR-%s", data->chip->name);
-}
-
-static void irq_remap_modify_chip_defaults(struct irq_chip *chip)
-{
- chip->irq_print_chip = ir_print_prefix;
- chip->irq_ack = ir_ack_apic_edge;
- chip->irq_eoi = ir_ack_apic_level;
-
- chip->irq_set_affinity = set_remapped_irq_affinity;
-}
-#endif /* CONFIG_IRQ_REMAP */
-
static struct irq_chip ioapic_chip __read_mostly = {
.name = "IO-APIC",
.irq_startup = startup_ioapic_irq,
@@ -2582,7 +2518,7 @@ static struct irq_chip ioapic_chip __read_mostly = {
.irq_unmask = unmask_ioapic_irq,
.irq_ack = ack_apic_edge,
.irq_eoi = ack_apic_level,
- .irq_set_affinity = ioapic_set_affinity,
+ .irq_set_affinity = native_ioapic_set_affinity,
.irq_retrigger = ioapic_retrigger_irq,
};
@@ -2781,8 +2717,7 @@ static inline void __init check_timer(void)
* 8259A.
*/
if (pin1 == -1) {
- if (irq_remapping_enabled)
- panic("BIOS bug: timer not connected to IO-APIC");
+ panic_if_irq_remap("BIOS bug: timer not connected to IO-APIC");
pin1 = pin2;
apic1 = apic2;
no_pin1 = 1;
@@ -2814,8 +2749,7 @@ static inline void __init check_timer(void)
clear_IO_APIC_pin(0, pin1);
goto out;
}
- if (irq_remapping_enabled)
- panic("timer doesn't work through Interrupt-remapped IO-APIC");
+ panic_if_irq_remap("timer doesn't work through Interrupt-remapped IO-APIC");
local_irq_disable();
clear_IO_APIC_pin(apic1, pin1);
if (!no_pin1)
@@ -2982,37 +2916,58 @@ device_initcall(ioapic_init_ops);
/*
* Dynamic irq allocate and deallocation
*/
-unsigned int create_irq_nr(unsigned int from, int node)
+unsigned int __create_irqs(unsigned int from, unsigned int count, int node)
{
- struct irq_cfg *cfg;
+ struct irq_cfg **cfg;
unsigned long flags;
- unsigned int ret = 0;
- int irq;
+ int irq, i;
if (from < nr_irqs_gsi)
from = nr_irqs_gsi;
- irq = alloc_irq_from(from, node);
- if (irq < 0)
- return 0;
- cfg = alloc_irq_cfg(irq, node);
- if (!cfg) {
- free_irq_at(irq, NULL);
+ cfg = kzalloc_node(count * sizeof(cfg[0]), GFP_KERNEL, node);
+ if (!cfg)
return 0;
+
+ irq = alloc_irqs_from(from, count, node);
+ if (irq < 0)
+ goto out_cfgs;
+
+ for (i = 0; i < count; i++) {
+ cfg[i] = alloc_irq_cfg(irq + i, node);
+ if (!cfg[i])
+ goto out_irqs;
}
raw_spin_lock_irqsave(&vector_lock, flags);
- if (!__assign_irq_vector(irq, cfg, apic->target_cpus()))
- ret = irq;
+ for (i = 0; i < count; i++)
+ if (__assign_irq_vector(irq + i, cfg[i], apic->target_cpus()))
+ goto out_vecs;
raw_spin_unlock_irqrestore(&vector_lock, flags);
- if (ret) {
- irq_set_chip_data(irq, cfg);
- irq_clear_status_flags(irq, IRQ_NOREQUEST);
- } else {
- free_irq_at(irq, cfg);
+ for (i = 0; i < count; i++) {
+ irq_set_chip_data(irq + i, cfg[i]);
+ irq_clear_status_flags(irq + i, IRQ_NOREQUEST);
}
- return ret;
+
+ kfree(cfg);
+ return irq;
+
+out_vecs:
+ for (i--; i >= 0; i--)
+ __clear_irq_vector(irq + i, cfg[i]);
+ raw_spin_unlock_irqrestore(&vector_lock, flags);
+out_irqs:
+ for (i = 0; i < count; i++)
+ free_irq_at(irq + i, cfg[i]);
+out_cfgs:
+ kfree(cfg);
+ return 0;
+}
+
+unsigned int create_irq_nr(unsigned int from, int node)
+{
+ return __create_irqs(from, 1, node);
}
int create_irq(void)
@@ -3037,48 +2992,35 @@ void destroy_irq(unsigned int irq)
irq_set_status_flags(irq, IRQ_NOREQUEST|IRQ_NOPROBE);
- if (irq_remapped(cfg))
- free_remapped_irq(irq);
+ free_remapped_irq(irq);
+
raw_spin_lock_irqsave(&vector_lock, flags);
__clear_irq_vector(irq, cfg);
raw_spin_unlock_irqrestore(&vector_lock, flags);
free_irq_at(irq, cfg);
}
+void destroy_irqs(unsigned int irq, unsigned int count)
+{
+ unsigned int i;
+
+ for (i = 0; i < count; i++)
+ destroy_irq(irq + i);
+}
+
/*
* MSI message composition
*/
-#ifdef CONFIG_PCI_MSI
-static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq,
- struct msi_msg *msg, u8 hpet_id)
+void native_compose_msi_msg(struct pci_dev *pdev,
+ unsigned int irq, unsigned int dest,
+ struct msi_msg *msg, u8 hpet_id)
{
- struct irq_cfg *cfg;
- int err;
- unsigned dest;
-
- if (disable_apic)
- return -ENXIO;
-
- cfg = irq_cfg(irq);
- err = assign_irq_vector(irq, cfg, apic->target_cpus());
- if (err)
- return err;
+ struct irq_cfg *cfg = irq_cfg(irq);
- err = apic->cpu_mask_to_apicid_and(cfg->domain,
- apic->target_cpus(), &dest);
- if (err)
- return err;
-
- if (irq_remapped(cfg)) {
- compose_remapped_msi_msg(pdev, irq, dest, msg, hpet_id);
- return err;
- }
+ msg->address_hi = MSI_ADDR_BASE_HI;
if (x2apic_enabled())
- msg->address_hi = MSI_ADDR_BASE_HI |
- MSI_ADDR_EXT_DEST_ID(dest);
- else
- msg->address_hi = MSI_ADDR_BASE_HI;
+ msg->address_hi |= MSI_ADDR_EXT_DEST_ID(dest);
msg->address_lo =
MSI_ADDR_BASE_LO |
@@ -3097,8 +3039,32 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq,
MSI_DATA_DELIVERY_FIXED:
MSI_DATA_DELIVERY_LOWPRI) |
MSI_DATA_VECTOR(cfg->vector);
+}
- return err;
+#ifdef CONFIG_PCI_MSI
+static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq,
+ struct msi_msg *msg, u8 hpet_id)
+{
+ struct irq_cfg *cfg;
+ int err;
+ unsigned dest;
+
+ if (disable_apic)
+ return -ENXIO;
+
+ cfg = irq_cfg(irq);
+ err = assign_irq_vector(irq, cfg, apic->target_cpus());
+ if (err)
+ return err;
+
+ err = apic->cpu_mask_to_apicid_and(cfg->domain,
+ apic->target_cpus(), &dest);
+ if (err)
+ return err;
+
+ x86_msi.compose_msi_msg(pdev, irq, dest, msg, hpet_id);
+
+ return 0;
}
static int
@@ -3136,23 +3102,28 @@ static struct irq_chip msi_chip = {
.irq_retrigger = ioapic_retrigger_irq,
};
-static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int irq)
+int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc,
+ unsigned int irq_base, unsigned int irq_offset)
{
struct irq_chip *chip = &msi_chip;
struct msi_msg msg;
+ unsigned int irq = irq_base + irq_offset;
int ret;
ret = msi_compose_msg(dev, irq, &msg, -1);
if (ret < 0)
return ret;
- irq_set_msi_desc(irq, msidesc);
- write_msi_msg(irq, &msg);
+ irq_set_msi_desc_off(irq_base, irq_offset, msidesc);
- if (irq_remapped(irq_get_chip_data(irq))) {
- irq_set_status_flags(irq, IRQ_MOVE_PCNTXT);
- irq_remap_modify_chip_defaults(chip);
- }
+ /*
+ * MSI-X message is written per-IRQ, the offset is always 0.
+ * MSI message denotes a contiguous group of IRQs, written for 0th IRQ.
+ */
+ if (!irq_offset)
+ write_msi_msg(irq, &msg);
+
+ setup_remapped_irq(irq, irq_get_chip_data(irq), chip);
irq_set_chip_and_handler_name(irq, chip, handle_edge_irq, "edge");
@@ -3163,46 +3134,26 @@ static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int irq)
int native_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
{
- int node, ret, sub_handle, index = 0;
unsigned int irq, irq_want;
struct msi_desc *msidesc;
+ int node, ret;
- /* x86 doesn't support multiple MSI yet */
+ /* Multiple MSI vectors only supported with interrupt remapping */
if (type == PCI_CAP_ID_MSI && nvec > 1)
return 1;
node = dev_to_node(&dev->dev);
irq_want = nr_irqs_gsi;
- sub_handle = 0;
list_for_each_entry(msidesc, &dev->msi_list, list) {
irq = create_irq_nr(irq_want, node);
if (irq == 0)
- return -1;
+ return -ENOSPC;
+
irq_want = irq + 1;
- if (!irq_remapping_enabled)
- goto no_ir;
- if (!sub_handle) {
- /*
- * allocate the consecutive block of IRTE's
- * for 'nvec'
- */
- index = msi_alloc_remapped_irq(dev, irq, nvec);
- if (index < 0) {
- ret = index;
- goto error;
- }
- } else {
- ret = msi_setup_remapped_irq(dev, irq, index,
- sub_handle);
- if (ret < 0)
- goto error;
- }
-no_ir:
- ret = setup_msi_irq(dev, msidesc, irq);
+ ret = setup_msi_irq(dev, msidesc, irq, 0);
if (ret < 0)
goto error;
- sub_handle++;
}
return 0;
@@ -3298,26 +3249,19 @@ static struct irq_chip hpet_msi_type = {
.irq_retrigger = ioapic_retrigger_irq,
};
-int arch_setup_hpet_msi(unsigned int irq, unsigned int id)
+int default_setup_hpet_msi(unsigned int irq, unsigned int id)
{
struct irq_chip *chip = &hpet_msi_type;
struct msi_msg msg;
int ret;
- if (irq_remapping_enabled) {
- ret = setup_hpet_msi_remapped(irq, id);
- if (ret)
- return ret;
- }
-
ret = msi_compose_msg(NULL, irq, &msg, id);
if (ret < 0)
return ret;
hpet_msi_write(irq_get_handler_data(irq), &msg);
irq_set_status_flags(irq, IRQ_MOVE_PCNTXT);
- if (irq_remapped(irq_get_chip_data(irq)))
- irq_remap_modify_chip_defaults(chip);
+ setup_remapped_irq(irq, irq_get_chip_data(irq), chip);
irq_set_chip_and_handler_name(irq, chip, handle_edge_irq, "edge");
return 0;
@@ -3683,10 +3627,7 @@ void __init setup_ioapic_dest(void)
else
mask = apic->target_cpus();
- if (irq_remapping_enabled)
- set_remapped_irq_affinity(idata, mask, false);
- else
- ioapic_set_affinity(idata, mask, false);
+ x86_io_apic_ops.set_affinity(idata, mask, false);
}
}
diff --git a/arch/x86/kernel/apic/ipi.c b/arch/x86/kernel/apic/ipi.c
index cce91bf26676..7434d8556d09 100644
--- a/arch/x86/kernel/apic/ipi.c
+++ b/arch/x86/kernel/apic/ipi.c
@@ -106,7 +106,7 @@ void default_send_IPI_mask_logical(const struct cpumask *cpumask, int vector)
unsigned long mask = cpumask_bits(cpumask)[0];
unsigned long flags;
- if (WARN_ONCE(!mask, "empty IPI mask"))
+ if (!mask)
return;
local_irq_save(flags);
diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c
index e03a1e180e81..562a76d433c8 100644
--- a/arch/x86/kernel/apic/x2apic_phys.c
+++ b/arch/x86/kernel/apic/x2apic_phys.c
@@ -20,18 +20,19 @@ static int set_x2apic_phys_mode(char *arg)
}
early_param("x2apic_phys", set_x2apic_phys_mode);
-static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
+static bool x2apic_fadt_phys(void)
{
- if (x2apic_phys)
- return x2apic_enabled();
- else if ((acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID) &&
- (acpi_gbl_FADT.flags & ACPI_FADT_APIC_PHYSICAL) &&
- x2apic_enabled()) {
+ if ((acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID) &&
+ (acpi_gbl_FADT.flags & ACPI_FADT_APIC_PHYSICAL)) {
printk(KERN_DEBUG "System requires x2apic physical mode\n");
- return 1;
+ return true;
}
- else
- return 0;
+ return false;
+}
+
+static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
+{
+ return x2apic_enabled() && (x2apic_phys || x2apic_fadt_phys());
}
static void
@@ -82,7 +83,7 @@ static void init_x2apic_ldr(void)
static int x2apic_phys_probe(void)
{
- if (x2apic_mode && x2apic_phys)
+ if (x2apic_mode && (x2apic_phys || x2apic_fadt_phys()))
return 1;
return apic == &apic_x2apic_phys;
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index 8cfade9510a4..794f6eb54cd3 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -5,7 +5,7 @@
*
* SGI UV APIC functions (note: not an Intel compatible APIC)
*
- * Copyright (C) 2007-2010 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 2007-2013 Silicon Graphics, Inc. All rights reserved.
*/
#include <linux/cpumask.h>
#include <linux/hardirq.h>
@@ -91,10 +91,16 @@ static int __init early_get_pnodeid(void)
m_n_config.v = uv_early_read_mmr(UVH_RH_GAM_CONFIG_MMR);
uv_min_hub_revision_id = node_id.s.revision;
- if (node_id.s.part_number == UV2_HUB_PART_NUMBER)
- uv_min_hub_revision_id += UV2_HUB_REVISION_BASE - 1;
- if (node_id.s.part_number == UV2_HUB_PART_NUMBER_X)
+ switch (node_id.s.part_number) {
+ case UV2_HUB_PART_NUMBER:
+ case UV2_HUB_PART_NUMBER_X:
uv_min_hub_revision_id += UV2_HUB_REVISION_BASE - 1;
+ break;
+ case UV3_HUB_PART_NUMBER:
+ case UV3_HUB_PART_NUMBER_X:
+ uv_min_hub_revision_id += UV3_HUB_REVISION_BASE - 1;
+ break;
+ }
uv_hub_info->hub_revision = uv_min_hub_revision_id;
pnode = (node_id.s.node_id >> 1) & ((1 << m_n_config.s.n_skt) - 1);
@@ -130,13 +136,16 @@ static void __init uv_set_apicid_hibit(void)
static int __init uv_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
{
- int pnodeid, is_uv1, is_uv2;
+ int pnodeid, is_uv1, is_uv2, is_uv3;
is_uv1 = !strcmp(oem_id, "SGI");
is_uv2 = !strcmp(oem_id, "SGI2");
- if (is_uv1 || is_uv2) {
+ is_uv3 = !strncmp(oem_id, "SGI3", 4); /* there are varieties of UV3 */
+ if (is_uv1 || is_uv2 || is_uv3) {
uv_hub_info->hub_revision =
- is_uv1 ? UV1_HUB_REVISION_BASE : UV2_HUB_REVISION_BASE;
+ (is_uv1 ? UV1_HUB_REVISION_BASE :
+ (is_uv2 ? UV2_HUB_REVISION_BASE :
+ UV3_HUB_REVISION_BASE));
pnodeid = early_get_pnodeid();
early_get_apic_pnode_shift();
x86_platform.is_untracked_pat_range = uv_is_untracked_pat_range;
@@ -450,14 +459,17 @@ static __init void map_high(char *id, unsigned long base, int pshift,
paddr = base << pshift;
bytes = (1UL << bshift) * (max_pnode + 1);
- printk(KERN_INFO "UV: Map %s_HI 0x%lx - 0x%lx\n", id, paddr,
- paddr + bytes);
+ if (!paddr) {
+ pr_info("UV: Map %s_HI base address NULL\n", id);
+ return;
+ }
+ pr_info("UV: Map %s_HI 0x%lx - 0x%lx\n", id, paddr, paddr + bytes);
if (map_type == map_uc)
init_extra_mapping_uc(paddr, bytes);
else
init_extra_mapping_wb(paddr, bytes);
-
}
+
static __init void map_gru_high(int max_pnode)
{
union uvh_rh_gam_gru_overlay_config_mmr_u gru;
@@ -468,7 +480,8 @@ static __init void map_gru_high(int max_pnode)
map_high("GRU", gru.s.base, shift, shift, max_pnode, map_wb);
gru_start_paddr = ((u64)gru.s.base << shift);
gru_end_paddr = gru_start_paddr + (1UL << shift) * (max_pnode + 1);
-
+ } else {
+ pr_info("UV: GRU disabled\n");
}
}
@@ -480,23 +493,146 @@ static __init void map_mmr_high(int max_pnode)
mmr.v = uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR);
if (mmr.s.enable)
map_high("MMR", mmr.s.base, shift, shift, max_pnode, map_uc);
+ else
+ pr_info("UV: MMR disabled\n");
+}
+
+/*
+ * This commonality works because both 0 & 1 versions of the MMIOH OVERLAY
+ * and REDIRECT MMR regs are exactly the same on UV3.
+ */
+struct mmioh_config {
+ unsigned long overlay;
+ unsigned long redirect;
+ char *id;
+};
+
+static __initdata struct mmioh_config mmiohs[] = {
+ {
+ UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR,
+ UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR,
+ "MMIOH0"
+ },
+ {
+ UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR,
+ UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR,
+ "MMIOH1"
+ },
+};
+
+static __init void map_mmioh_high_uv3(int index, int min_pnode, int max_pnode)
+{
+ union uv3h_rh_gam_mmioh_overlay_config0_mmr_u overlay;
+ unsigned long mmr;
+ unsigned long base;
+ int i, n, shift, m_io, max_io;
+ int nasid, lnasid, fi, li;
+ char *id;
+
+ id = mmiohs[index].id;
+ overlay.v = uv_read_local_mmr(mmiohs[index].overlay);
+ pr_info("UV: %s overlay 0x%lx base:0x%x m_io:%d\n",
+ id, overlay.v, overlay.s3.base, overlay.s3.m_io);
+ if (!overlay.s3.enable) {
+ pr_info("UV: %s disabled\n", id);
+ return;
+ }
+
+ shift = UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_BASE_SHFT;
+ base = (unsigned long)overlay.s3.base;
+ m_io = overlay.s3.m_io;
+ mmr = mmiohs[index].redirect;
+ n = UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_DEPTH;
+ min_pnode *= 2; /* convert to NASID */
+ max_pnode *= 2;
+ max_io = lnasid = fi = li = -1;
+
+ for (i = 0; i < n; i++) {
+ union uv3h_rh_gam_mmioh_redirect_config0_mmr_u redirect;
+
+ redirect.v = uv_read_local_mmr(mmr + i * 8);
+ nasid = redirect.s3.nasid;
+ if (nasid < min_pnode || max_pnode < nasid)
+ nasid = -1; /* invalid NASID */
+
+ if (nasid == lnasid) {
+ li = i;
+ if (i != n-1) /* last entry check */
+ continue;
+ }
+
+ /* check if we have a cached (or last) redirect to print */
+ if (lnasid != -1 || (i == n-1 && nasid != -1)) {
+ unsigned long addr1, addr2;
+ int f, l;
+
+ if (lnasid == -1) {
+ f = l = i;
+ lnasid = nasid;
+ } else {
+ f = fi;
+ l = li;
+ }
+ addr1 = (base << shift) +
+ f * (unsigned long)(1 << m_io);
+ addr2 = (base << shift) +
+ (l + 1) * (unsigned long)(1 << m_io);
+ pr_info("UV: %s[%03d..%03d] NASID 0x%04x ADDR 0x%016lx - 0x%016lx\n",
+ id, fi, li, lnasid, addr1, addr2);
+ if (max_io < l)
+ max_io = l;
+ }
+ fi = li = i;
+ lnasid = nasid;
+ }
+
+ pr_info("UV: %s base:0x%lx shift:%d M_IO:%d MAX_IO:%d\n",
+ id, base, shift, m_io, max_io);
+
+ if (max_io >= 0)
+ map_high(id, base, shift, m_io, max_io, map_uc);
}
-static __init void map_mmioh_high(int max_pnode)
+static __init void map_mmioh_high(int min_pnode, int max_pnode)
{
union uvh_rh_gam_mmioh_overlay_config_mmr_u mmioh;
- int shift;
+ unsigned long mmr, base;
+ int shift, enable, m_io, n_io;
- mmioh.v = uv_read_local_mmr(UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR);
- if (is_uv1_hub() && mmioh.s1.enable) {
- shift = UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_BASE_SHFT;
- map_high("MMIOH", mmioh.s1.base, shift, mmioh.s1.m_io,
- max_pnode, map_uc);
+ if (is_uv3_hub()) {
+ /* Map both MMIOH Regions */
+ map_mmioh_high_uv3(0, min_pnode, max_pnode);
+ map_mmioh_high_uv3(1, min_pnode, max_pnode);
+ return;
}
- if (is_uv2_hub() && mmioh.s2.enable) {
+
+ if (is_uv1_hub()) {
+ mmr = UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR;
+ shift = UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_BASE_SHFT;
+ mmioh.v = uv_read_local_mmr(mmr);
+ enable = !!mmioh.s1.enable;
+ base = mmioh.s1.base;
+ m_io = mmioh.s1.m_io;
+ n_io = mmioh.s1.n_io;
+ } else if (is_uv2_hub()) {
+ mmr = UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR;
shift = UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_BASE_SHFT;
- map_high("MMIOH", mmioh.s2.base, shift, mmioh.s2.m_io,
- max_pnode, map_uc);
+ mmioh.v = uv_read_local_mmr(mmr);
+ enable = !!mmioh.s2.enable;
+ base = mmioh.s2.base;
+ m_io = mmioh.s2.m_io;
+ n_io = mmioh.s2.n_io;
+ } else
+ return;
+
+ if (enable) {
+ max_pnode &= (1 << n_io) - 1;
+ pr_info(
+ "UV: base:0x%lx shift:%d N_IO:%d M_IO:%d max_pnode:0x%x\n",
+ base, shift, m_io, n_io, max_pnode);
+ map_high("MMIOH", base, shift, m_io, max_pnode, map_uc);
+ } else {
+ pr_info("UV: MMIOH disabled\n");
}
}
@@ -724,42 +860,41 @@ void uv_nmi_init(void)
void __init uv_system_init(void)
{
union uvh_rh_gam_config_mmr_u m_n_config;
- union uvh_rh_gam_mmioh_overlay_config_mmr_u mmioh;
union uvh_node_id_u node_id;
unsigned long gnode_upper, lowmem_redir_base, lowmem_redir_size;
- int bytes, nid, cpu, lcpu, pnode, blade, i, j, m_val, n_val, n_io;
- int gnode_extra, max_pnode = 0;
+ int bytes, nid, cpu, lcpu, pnode, blade, i, j, m_val, n_val;
+ int gnode_extra, min_pnode = 999999, max_pnode = -1;
unsigned long mmr_base, present, paddr;
- unsigned short pnode_mask, pnode_io_mask;
+ unsigned short pnode_mask;
+ char *hub = (is_uv1_hub() ? "UV1" :
+ (is_uv2_hub() ? "UV2" :
+ "UV3"));
- printk(KERN_INFO "UV: Found %s hub\n", is_uv1_hub() ? "UV1" : "UV2");
+ pr_info("UV: Found %s hub\n", hub);
map_low_mmrs();
m_n_config.v = uv_read_local_mmr(UVH_RH_GAM_CONFIG_MMR );
m_val = m_n_config.s.m_skt;
n_val = m_n_config.s.n_skt;
- mmioh.v = uv_read_local_mmr(UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR);
- n_io = is_uv1_hub() ? mmioh.s1.n_io : mmioh.s2.n_io;
+ pnode_mask = (1 << n_val) - 1;
mmr_base =
uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR) &
~UV_MMR_ENABLE;
- pnode_mask = (1 << n_val) - 1;
- pnode_io_mask = (1 << n_io) - 1;
node_id.v = uv_read_local_mmr(UVH_NODE_ID);
gnode_extra = (node_id.s.node_id & ~((1 << n_val) - 1)) >> 1;
gnode_upper = ((unsigned long)gnode_extra << m_val);
- printk(KERN_INFO "UV: N %d, M %d, N_IO: %d, gnode_upper 0x%lx, gnode_extra 0x%x, pnode_mask 0x%x, pnode_io_mask 0x%x\n",
- n_val, m_val, n_io, gnode_upper, gnode_extra, pnode_mask, pnode_io_mask);
+ pr_info("UV: N:%d M:%d pnode_mask:0x%x gnode_upper/extra:0x%lx/0x%x\n",
+ n_val, m_val, pnode_mask, gnode_upper, gnode_extra);
- printk(KERN_DEBUG "UV: global MMR base 0x%lx\n", mmr_base);
+ pr_info("UV: global MMR base 0x%lx\n", mmr_base);
for(i = 0; i < UVH_NODE_PRESENT_TABLE_DEPTH; i++)
uv_possible_blades +=
hweight64(uv_read_local_mmr( UVH_NODE_PRESENT_TABLE + i * 8));
/* uv_num_possible_blades() is really the hub count */
- printk(KERN_INFO "UV: Found %d blades, %d hubs\n",
+ pr_info("UV: Found %d blades, %d hubs\n",
is_uv1_hub() ? uv_num_possible_blades() :
(uv_num_possible_blades() + 1) / 2,
uv_num_possible_blades());
@@ -794,6 +929,7 @@ void __init uv_system_init(void)
uv_blade_info[blade].nr_possible_cpus = 0;
uv_blade_info[blade].nr_online_cpus = 0;
spin_lock_init(&uv_blade_info[blade].nmi_lock);
+ min_pnode = min(pnode, min_pnode);
max_pnode = max(pnode, max_pnode);
blade++;
}
@@ -856,7 +992,7 @@ void __init uv_system_init(void)
map_gru_high(max_pnode);
map_mmr_high(max_pnode);
- map_mmioh_high(max_pnode & pnode_io_mask);
+ map_mmioh_high(min_pnode, max_pnode);
uv_cpu_init();
uv_scir_register_cpu_notifier();
diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
index d65464e43503..66b5faffe14a 100644
--- a/arch/x86/kernel/apm_32.c
+++ b/arch/x86/kernel/apm_32.c
@@ -232,6 +232,7 @@
#include <linux/acpi.h>
#include <linux/syscore_ops.h>
#include <linux/i8253.h>
+#include <linux/cpuidle.h>
#include <asm/uaccess.h>
#include <asm/desc.h>
@@ -360,13 +361,35 @@ struct apm_user {
* idle percentage above which bios idle calls are done
*/
#ifdef CONFIG_APM_CPU_IDLE
-#warning deprecated CONFIG_APM_CPU_IDLE will be deleted in 2012
#define DEFAULT_IDLE_THRESHOLD 95
#else
#define DEFAULT_IDLE_THRESHOLD 100
#endif
#define DEFAULT_IDLE_PERIOD (100 / 3)
+static int apm_cpu_idle(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int index);
+
+static struct cpuidle_driver apm_idle_driver = {
+ .name = "apm_idle",
+ .owner = THIS_MODULE,
+ .en_core_tk_irqen = 1,
+ .states = {
+ { /* entry 0 is for polling */ },
+ { /* entry 1 is for APM idle */
+ .name = "APM",
+ .desc = "APM idle",
+ .flags = CPUIDLE_FLAG_TIME_VALID,
+ .exit_latency = 250, /* WAG */
+ .target_residency = 500, /* WAG */
+ .enter = &apm_cpu_idle
+ },
+ },
+ .state_count = 2,
+};
+
+static struct cpuidle_device apm_cpuidle_device;
+
/*
* Local variables
*/
@@ -377,7 +400,6 @@ static struct {
static int clock_slowed;
static int idle_threshold __read_mostly = DEFAULT_IDLE_THRESHOLD;
static int idle_period __read_mostly = DEFAULT_IDLE_PERIOD;
-static int set_pm_idle;
static int suspends_pending;
static int standbys_pending;
static int ignore_sys_suspend;
@@ -884,8 +906,6 @@ static void apm_do_busy(void)
#define IDLE_CALC_LIMIT (HZ * 100)
#define IDLE_LEAKY_MAX 16
-static void (*original_pm_idle)(void) __read_mostly;
-
/**
* apm_cpu_idle - cpu idling for APM capable Linux
*
@@ -894,35 +914,36 @@ static void (*original_pm_idle)(void) __read_mostly;
* Furthermore it calls the system default idle routine.
*/
-static void apm_cpu_idle(void)
+static int apm_cpu_idle(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int index)
{
static int use_apm_idle; /* = 0 */
static unsigned int last_jiffies; /* = 0 */
static unsigned int last_stime; /* = 0 */
+ cputime_t stime;
int apm_idle_done = 0;
unsigned int jiffies_since_last_check = jiffies - last_jiffies;
unsigned int bucket;
- WARN_ONCE(1, "deprecated apm_cpu_idle will be deleted in 2012");
recalc:
+ task_cputime(current, NULL, &stime);
if (jiffies_since_last_check > IDLE_CALC_LIMIT) {
use_apm_idle = 0;
- last_jiffies = jiffies;
- last_stime = current->stime;
} else if (jiffies_since_last_check > idle_period) {
unsigned int idle_percentage;
- idle_percentage = current->stime - last_stime;
+ idle_percentage = stime - last_stime;
idle_percentage *= 100;
idle_percentage /= jiffies_since_last_check;
use_apm_idle = (idle_percentage > idle_threshold);
if (apm_info.forbid_idle)
use_apm_idle = 0;
- last_jiffies = jiffies;
- last_stime = current->stime;
}
+ last_jiffies = jiffies;
+ last_stime = stime;
+
bucket = IDLE_LEAKY_MAX;
while (!need_resched()) {
@@ -950,10 +971,7 @@ recalc:
break;
}
}
- if (original_pm_idle)
- original_pm_idle();
- else
- default_idle();
+ default_idle();
local_irq_disable();
jiffies_since_last_check = jiffies - last_jiffies;
if (jiffies_since_last_check > idle_period)
@@ -963,7 +981,7 @@ recalc:
if (apm_idle_done)
apm_do_busy();
- local_irq_enable();
+ return index;
}
/**
@@ -2381,9 +2399,9 @@ static int __init apm_init(void)
if (HZ != 100)
idle_period = (idle_period * HZ) / 100;
if (idle_threshold < 100) {
- original_pm_idle = pm_idle;
- pm_idle = apm_cpu_idle;
- set_pm_idle = 1;
+ if (!cpuidle_register_driver(&apm_idle_driver))
+ if (cpuidle_register_device(&apm_cpuidle_device))
+ cpuidle_unregister_driver(&apm_idle_driver);
}
return 0;
@@ -2393,15 +2411,9 @@ static void __exit apm_exit(void)
{
int error;
- if (set_pm_idle) {
- pm_idle = original_pm_idle;
- /*
- * We are about to unload the current idle thread pm callback
- * (pm_idle), Wait for all processors to update cached/local
- * copies of pm_idle before proceeding.
- */
- kick_all_cpus_sync();
- }
+ cpuidle_unregister_device(&apm_cpuidle_device);
+ cpuidle_unregister_driver(&apm_idle_driver);
+
if (((apm_info.bios.flags & APM_BIOS_DISENGAGED) == 0)
&& (apm_info.connection_version > 0x0100)) {
error = apm_engage_power_management(APM_DEVICE_ALL, 0);
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index 15239fffd6fe..782c456eaa01 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -364,9 +364,9 @@ static void __cpuinit amd_detect_cmp(struct cpuinfo_x86 *c)
#endif
}
-int amd_get_nb_id(int cpu)
+u16 amd_get_nb_id(int cpu)
{
- int id = 0;
+ u16 id = 0;
#ifdef CONFIG_SMP
id = per_cpu(cpu_llc_id, cpu);
#endif
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index 92dfec986a48..af6455e3fcc9 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -17,15 +17,6 @@
#include <asm/paravirt.h>
#include <asm/alternative.h>
-static int __init no_halt(char *s)
-{
- WARN_ONCE(1, "\"no-hlt\" is deprecated, please use \"idle=poll\"\n");
- boot_cpu_data.hlt_works_ok = 0;
- return 1;
-}
-
-__setup("no-hlt", no_halt);
-
static int __init no_387(char *s)
{
boot_cpu_data.hard_math = 0;
@@ -89,23 +80,6 @@ static void __init check_fpu(void)
pr_warn("Hmm, FPU with FDIV bug\n");
}
-static void __init check_hlt(void)
-{
- if (boot_cpu_data.x86 >= 5 || paravirt_enabled())
- return;
-
- pr_info("Checking 'hlt' instruction... ");
- if (!boot_cpu_data.hlt_works_ok) {
- pr_cont("disabled\n");
- return;
- }
- halt();
- halt();
- halt();
- halt();
- pr_cont("OK\n");
-}
-
/*
* Check whether we are able to run this kernel safely on SMP.
*
@@ -129,7 +103,6 @@ void __init check_bugs(void)
print_cpu_info(&boot_cpu_data);
#endif
check_config();
- check_hlt();
init_utsname()->machine[1] =
'0' + (boot_cpu_data.x86 > 6 ? 6 : boot_cpu_data.x86);
alternative_instructions();
diff --git a/arch/x86/kernel/cpu/hypervisor.c b/arch/x86/kernel/cpu/hypervisor.c
index a8f8fa9769d6..1e7e84a02eba 100644
--- a/arch/x86/kernel/cpu/hypervisor.c
+++ b/arch/x86/kernel/cpu/hypervisor.c
@@ -79,3 +79,10 @@ void __init init_hypervisor_platform(void)
if (x86_hyper->init_platform)
x86_hyper->init_platform();
}
+
+bool __init hypervisor_x2apic_available(void)
+{
+ return x86_hyper &&
+ x86_hyper->x2apic_available &&
+ x86_hyper->x2apic_available();
+}
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
index fe9edec6698a..7c6f7d548c0f 100644
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
@@ -298,8 +298,7 @@ struct _cache_attr {
unsigned int);
};
-#ifdef CONFIG_AMD_NB
-
+#if defined(CONFIG_AMD_NB) && defined(CONFIG_SYSFS)
/*
* L3 cache descriptors
*/
@@ -524,9 +523,9 @@ store_subcaches(struct _cpuid4_info *this_leaf, const char *buf, size_t count,
static struct _cache_attr subcaches =
__ATTR(subcaches, 0644, show_subcaches, store_subcaches);
-#else /* CONFIG_AMD_NB */
+#else
#define amd_init_l3_cache(x, y)
-#endif /* CONFIG_AMD_NB */
+#endif /* CONFIG_AMD_NB && CONFIG_SYSFS */
static int
__cpuinit cpuid4_cache_lookup_regs(int index,
@@ -1227,7 +1226,7 @@ static struct notifier_block __cpuinitdata cacheinfo_cpu_notifier = {
.notifier_call = cacheinfo_cpu_callback,
};
-static int __cpuinit cache_sysfs_init(void)
+static int __init cache_sysfs_init(void)
{
int i;
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index 80dbda84f1c3..fc7608a89d93 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -512,11 +512,8 @@ int mce_available(struct cpuinfo_x86 *c)
static void mce_schedule_work(void)
{
- if (!mce_ring_empty()) {
- struct work_struct *work = &__get_cpu_var(mce_work);
- if (!work_pending(work))
- schedule_work(work);
- }
+ if (!mce_ring_empty())
+ schedule_work(&__get_cpu_var(mce_work));
}
DEFINE_PER_CPU(struct irq_work, mce_irq_work);
@@ -1351,12 +1348,7 @@ int mce_notify_irq(void)
/* wake processes polling /dev/mcelog */
wake_up_interruptible(&mce_chrdev_wait);
- /*
- * There is no risk of missing notifications because
- * work_pending is always cleared before the function is
- * executed.
- */
- if (mce_helper[0] && !work_pending(&mce_trigger_work))
+ if (mce_helper[0])
schedule_work(&mce_trigger_work);
if (__ratelimit(&ratelimit))
diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c
index 0a630dd4b620..a7d26d83fb70 100644
--- a/arch/x86/kernel/cpu/mshyperv.c
+++ b/arch/x86/kernel/cpu/mshyperv.c
@@ -14,10 +14,15 @@
#include <linux/time.h>
#include <linux/clocksource.h>
#include <linux/module.h>
+#include <linux/hardirq.h>
+#include <linux/interrupt.h>
#include <asm/processor.h>
#include <asm/hypervisor.h>
#include <asm/hyperv.h>
#include <asm/mshyperv.h>
+#include <asm/desc.h>
+#include <asm/idle.h>
+#include <asm/irq_regs.h>
struct ms_hyperv_info ms_hyperv;
EXPORT_SYMBOL_GPL(ms_hyperv);
@@ -30,6 +35,13 @@ static bool __init ms_hyperv_platform(void)
if (!boot_cpu_has(X86_FEATURE_HYPERVISOR))
return false;
+ /*
+ * Xen emulates Hyper-V to support enlightened Windows.
+ * Check to see first if we are on a Xen Hypervisor.
+ */
+ if (xen_cpuid_base())
+ return false;
+
cpuid(HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS,
&eax, &hyp_signature[0], &hyp_signature[1], &hyp_signature[2]);
@@ -68,7 +80,14 @@ static void __init ms_hyperv_init_platform(void)
printk(KERN_INFO "HyperV: features 0x%x, hints 0x%x\n",
ms_hyperv.features, ms_hyperv.hints);
- clocksource_register_hz(&hyperv_cs, NSEC_PER_SEC/100);
+ if (ms_hyperv.features & HV_X64_MSR_TIME_REF_COUNT_AVAILABLE)
+ clocksource_register_hz(&hyperv_cs, NSEC_PER_SEC/100);
+#if IS_ENABLED(CONFIG_HYPERV)
+ /*
+ * Setup the IDT for hypervisor callback.
+ */
+ alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR, hyperv_callback_vector);
+#endif
}
const __refconst struct hypervisor_x86 x86_hyper_ms_hyperv = {
@@ -77,3 +96,36 @@ const __refconst struct hypervisor_x86 x86_hyper_ms_hyperv = {
.init_platform = ms_hyperv_init_platform,
};
EXPORT_SYMBOL(x86_hyper_ms_hyperv);
+
+#if IS_ENABLED(CONFIG_HYPERV)
+static int vmbus_irq = -1;
+static irq_handler_t vmbus_isr;
+
+void hv_register_vmbus_handler(int irq, irq_handler_t handler)
+{
+ vmbus_irq = irq;
+ vmbus_isr = handler;
+}
+
+void hyperv_vector_handler(struct pt_regs *regs)
+{
+ struct pt_regs *old_regs = set_irq_regs(regs);
+ struct irq_desc *desc;
+
+ irq_enter();
+ exit_idle();
+
+ desc = irq_to_desc(vmbus_irq);
+
+ if (desc)
+ generic_handle_irq_desc(vmbus_irq, desc);
+
+ irq_exit();
+ set_irq_regs(old_regs);
+}
+#else
+void hv_register_vmbus_handler(int irq, irq_handler_t handler)
+{
+}
+#endif
+EXPORT_SYMBOL_GPL(hv_register_vmbus_handler);
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 6774c17a5576..bf0f01aea994 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -829,7 +829,7 @@ static inline void x86_assign_hw_event(struct perf_event *event,
} else {
hwc->config_base = x86_pmu_config_addr(hwc->idx);
hwc->event_base = x86_pmu_event_addr(hwc->idx);
- hwc->event_base_rdpmc = hwc->idx;
+ hwc->event_base_rdpmc = x86_pmu_rdpmc_index(hwc->idx);
}
}
@@ -1310,11 +1310,6 @@ static struct attribute_group x86_pmu_format_group = {
.attrs = NULL,
};
-struct perf_pmu_events_attr {
- struct device_attribute attr;
- u64 id;
-};
-
/*
* Remove all undefined events (x86_pmu.event_map(id) == 0)
* out of events_attr attributes.
@@ -1348,11 +1343,9 @@ static ssize_t events_sysfs_show(struct device *dev, struct device_attribute *at
#define EVENT_VAR(_id) event_attr_##_id
#define EVENT_PTR(_id) &event_attr_##_id.attr.attr
-#define EVENT_ATTR(_name, _id) \
-static struct perf_pmu_events_attr EVENT_VAR(_id) = { \
- .attr = __ATTR(_name, 0444, events_sysfs_show, NULL), \
- .id = PERF_COUNT_HW_##_id, \
-};
+#define EVENT_ATTR(_name, _id) \
+ PMU_EVENT_ATTR(_name, EVENT_VAR(_id), PERF_COUNT_HW_##_id, \
+ events_sysfs_show)
EVENT_ATTR(cpu-cycles, CPU_CYCLES );
EVENT_ATTR(instructions, INSTRUCTIONS );
diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h
index 115c1ea97746..7f5c75c2afdd 100644
--- a/arch/x86/kernel/cpu/perf_event.h
+++ b/arch/x86/kernel/cpu/perf_event.h
@@ -325,6 +325,8 @@ struct x86_pmu {
int (*schedule_events)(struct cpu_hw_events *cpuc, int n, int *assign);
unsigned eventsel;
unsigned perfctr;
+ int (*addr_offset)(int index, bool eventsel);
+ int (*rdpmc_index)(int index);
u64 (*event_map)(int);
int max_events;
int num_counters;
@@ -446,28 +448,21 @@ extern u64 __read_mostly hw_cache_extra_regs
u64 x86_perf_event_update(struct perf_event *event);
-static inline int x86_pmu_addr_offset(int index)
+static inline unsigned int x86_pmu_config_addr(int index)
{
- int offset;
-
- /* offset = X86_FEATURE_PERFCTR_CORE ? index << 1 : index */
- alternative_io(ASM_NOP2,
- "shll $1, %%eax",
- X86_FEATURE_PERFCTR_CORE,
- "=a" (offset),
- "a" (index));
-
- return offset;
+ return x86_pmu.eventsel + (x86_pmu.addr_offset ?
+ x86_pmu.addr_offset(index, true) : index);
}
-static inline unsigned int x86_pmu_config_addr(int index)
+static inline unsigned int x86_pmu_event_addr(int index)
{
- return x86_pmu.eventsel + x86_pmu_addr_offset(index);
+ return x86_pmu.perfctr + (x86_pmu.addr_offset ?
+ x86_pmu.addr_offset(index, false) : index);
}
-static inline unsigned int x86_pmu_event_addr(int index)
+static inline int x86_pmu_rdpmc_index(int index)
{
- return x86_pmu.perfctr + x86_pmu_addr_offset(index);
+ return x86_pmu.rdpmc_index ? x86_pmu.rdpmc_index(index) : index;
}
int x86_setup_perfctr(struct perf_event *event);
diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c
index c93bc4e813a0..dfdab42aed27 100644
--- a/arch/x86/kernel/cpu/perf_event_amd.c
+++ b/arch/x86/kernel/cpu/perf_event_amd.c
@@ -132,21 +132,102 @@ static u64 amd_pmu_event_map(int hw_event)
return amd_perfmon_event_map[hw_event];
}
-static int amd_pmu_hw_config(struct perf_event *event)
+static struct event_constraint *amd_nb_event_constraint;
+
+/*
+ * Previously calculated offsets
+ */
+static unsigned int event_offsets[X86_PMC_IDX_MAX] __read_mostly;
+static unsigned int count_offsets[X86_PMC_IDX_MAX] __read_mostly;
+static unsigned int rdpmc_indexes[X86_PMC_IDX_MAX] __read_mostly;
+
+/*
+ * Legacy CPUs:
+ * 4 counters starting at 0xc0010000 each offset by 1
+ *
+ * CPUs with core performance counter extensions:
+ * 6 counters starting at 0xc0010200 each offset by 2
+ *
+ * CPUs with north bridge performance counter extensions:
+ * 4 additional counters starting at 0xc0010240 each offset by 2
+ * (indexed right above either one of the above core counters)
+ */
+static inline int amd_pmu_addr_offset(int index, bool eventsel)
{
- int ret;
+ int offset, first, base;
- /* pass precise event sampling to ibs: */
- if (event->attr.precise_ip && get_ibs_caps())
- return -ENOENT;
+ if (!index)
+ return index;
+
+ if (eventsel)
+ offset = event_offsets[index];
+ else
+ offset = count_offsets[index];
+
+ if (offset)
+ return offset;
+
+ if (amd_nb_event_constraint &&
+ test_bit(index, amd_nb_event_constraint->idxmsk)) {
+ /*
+ * calculate the offset of NB counters with respect to
+ * base eventsel or perfctr
+ */
+
+ first = find_first_bit(amd_nb_event_constraint->idxmsk,
+ X86_PMC_IDX_MAX);
+
+ if (eventsel)
+ base = MSR_F15H_NB_PERF_CTL - x86_pmu.eventsel;
+ else
+ base = MSR_F15H_NB_PERF_CTR - x86_pmu.perfctr;
+
+ offset = base + ((index - first) << 1);
+ } else if (!cpu_has_perfctr_core)
+ offset = index;
+ else
+ offset = index << 1;
+
+ if (eventsel)
+ event_offsets[index] = offset;
+ else
+ count_offsets[index] = offset;
+
+ return offset;
+}
+
+static inline int amd_pmu_rdpmc_index(int index)
+{
+ int ret, first;
+
+ if (!index)
+ return index;
+
+ ret = rdpmc_indexes[index];
- ret = x86_pmu_hw_config(event);
if (ret)
return ret;
- if (has_branch_stack(event))
- return -EOPNOTSUPP;
+ if (amd_nb_event_constraint &&
+ test_bit(index, amd_nb_event_constraint->idxmsk)) {
+ /*
+ * according to the mnual, ECX value of the NB counters is
+ * the index of the NB counter (0, 1, 2 or 3) plus 6
+ */
+
+ first = find_first_bit(amd_nb_event_constraint->idxmsk,
+ X86_PMC_IDX_MAX);
+ ret = index - first + 6;
+ } else
+ ret = index;
+
+ rdpmc_indexes[index] = ret;
+
+ return ret;
+}
+static int amd_core_hw_config(struct perf_event *event)
+{
if (event->attr.exclude_host && event->attr.exclude_guest)
/*
* When HO == GO == 1 the hardware treats that as GO == HO == 0
@@ -156,14 +237,37 @@ static int amd_pmu_hw_config(struct perf_event *event)
event->hw.config &= ~(ARCH_PERFMON_EVENTSEL_USR |
ARCH_PERFMON_EVENTSEL_OS);
else if (event->attr.exclude_host)
- event->hw.config |= AMD_PERFMON_EVENTSEL_GUESTONLY;
+ event->hw.config |= AMD64_EVENTSEL_GUESTONLY;
else if (event->attr.exclude_guest)
- event->hw.config |= AMD_PERFMON_EVENTSEL_HOSTONLY;
+ event->hw.config |= AMD64_EVENTSEL_HOSTONLY;
+
+ return 0;
+}
+
+/*
+ * NB counters do not support the following event select bits:
+ * Host/Guest only
+ * Counter mask
+ * Invert counter mask
+ * Edge detect
+ * OS/User mode
+ */
+static int amd_nb_hw_config(struct perf_event *event)
+{
+ /* for NB, we only allow system wide counting mode */
+ if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
+ return -EINVAL;
+
+ if (event->attr.exclude_user || event->attr.exclude_kernel ||
+ event->attr.exclude_host || event->attr.exclude_guest)
+ return -EINVAL;
- if (event->attr.type != PERF_TYPE_RAW)
- return 0;
+ event->hw.config &= ~(ARCH_PERFMON_EVENTSEL_USR |
+ ARCH_PERFMON_EVENTSEL_OS);
- event->hw.config |= event->attr.config & AMD64_RAW_EVENT_MASK;
+ if (event->hw.config & ~(AMD64_RAW_EVENT_MASK_NB |
+ ARCH_PERFMON_EVENTSEL_INT))
+ return -EINVAL;
return 0;
}
@@ -181,6 +285,11 @@ static inline int amd_is_nb_event(struct hw_perf_event *hwc)
return (hwc->config & 0xe0) == 0xe0;
}
+static inline int amd_is_perfctr_nb_event(struct hw_perf_event *hwc)
+{
+ return amd_nb_event_constraint && amd_is_nb_event(hwc);
+}
+
static inline int amd_has_nb(struct cpu_hw_events *cpuc)
{
struct amd_nb *nb = cpuc->amd_nb;
@@ -188,20 +297,37 @@ static inline int amd_has_nb(struct cpu_hw_events *cpuc)
return nb && nb->nb_id != -1;
}
-static void amd_put_event_constraints(struct cpu_hw_events *cpuc,
- struct perf_event *event)
+static int amd_pmu_hw_config(struct perf_event *event)
+{
+ int ret;
+
+ /* pass precise event sampling to ibs: */
+ if (event->attr.precise_ip && get_ibs_caps())
+ return -ENOENT;
+
+ if (has_branch_stack(event))
+ return -EOPNOTSUPP;
+
+ ret = x86_pmu_hw_config(event);
+ if (ret)
+ return ret;
+
+ if (event->attr.type == PERF_TYPE_RAW)
+ event->hw.config |= event->attr.config & AMD64_RAW_EVENT_MASK;
+
+ if (amd_is_perfctr_nb_event(&event->hw))
+ return amd_nb_hw_config(event);
+
+ return amd_core_hw_config(event);
+}
+
+static void __amd_put_nb_event_constraints(struct cpu_hw_events *cpuc,
+ struct perf_event *event)
{
- struct hw_perf_event *hwc = &event->hw;
struct amd_nb *nb = cpuc->amd_nb;
int i;
/*
- * only care about NB events
- */
- if (!(amd_has_nb(cpuc) && amd_is_nb_event(hwc)))
- return;
-
- /*
* need to scan whole list because event may not have
* been assigned during scheduling
*
@@ -215,6 +341,19 @@ static void amd_put_event_constraints(struct cpu_hw_events *cpuc,
}
}
+static void amd_nb_interrupt_hw_config(struct hw_perf_event *hwc)
+{
+ int core_id = cpu_data(smp_processor_id()).cpu_core_id;
+
+ /* deliver interrupts only to this core */
+ if (hwc->config & ARCH_PERFMON_EVENTSEL_INT) {
+ hwc->config |= AMD64_EVENTSEL_INT_CORE_ENABLE;
+ hwc->config &= ~AMD64_EVENTSEL_INT_CORE_SEL_MASK;
+ hwc->config |= (u64)(core_id) <<
+ AMD64_EVENTSEL_INT_CORE_SEL_SHIFT;
+ }
+}
+
/*
* AMD64 NorthBridge events need special treatment because
* counter access needs to be synchronized across all cores
@@ -247,24 +386,24 @@ static void amd_put_event_constraints(struct cpu_hw_events *cpuc,
*
* Given that resources are allocated (cmpxchg), they must be
* eventually freed for others to use. This is accomplished by
- * calling amd_put_event_constraints().
+ * calling __amd_put_nb_event_constraints()
*
* Non NB events are not impacted by this restriction.
*/
static struct event_constraint *
-amd_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
+__amd_get_nb_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event,
+ struct event_constraint *c)
{
struct hw_perf_event *hwc = &event->hw;
struct amd_nb *nb = cpuc->amd_nb;
- struct perf_event *old = NULL;
- int max = x86_pmu.num_counters;
- int i, j, k = -1;
+ struct perf_event *old;
+ int idx, new = -1;
- /*
- * if not NB event or no NB, then no constraints
- */
- if (!(amd_has_nb(cpuc) && amd_is_nb_event(hwc)))
- return &unconstrained;
+ if (!c)
+ c = &unconstrained;
+
+ if (cpuc->is_fake)
+ return c;
/*
* detect if already present, if so reuse
@@ -276,48 +415,36 @@ amd_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
* because of successive calls to x86_schedule_events() from
* hw_perf_group_sched_in() without hw_perf_enable()
*/
- for (i = 0; i < max; i++) {
- /*
- * keep track of first free slot
- */
- if (k == -1 && !nb->owners[i])
- k = i;
+ for_each_set_bit(idx, c->idxmsk, x86_pmu.num_counters) {
+ if (new == -1 || hwc->idx == idx)
+ /* assign free slot, prefer hwc->idx */
+ old = cmpxchg(nb->owners + idx, NULL, event);
+ else if (nb->owners[idx] == event)
+ /* event already present */
+ old = event;
+ else
+ continue;
+
+ if (old && old != event)
+ continue;
+
+ /* reassign to this slot */
+ if (new != -1)
+ cmpxchg(nb->owners + new, event, NULL);
+ new = idx;
/* already present, reuse */
- if (nb->owners[i] == event)
- goto done;
- }
- /*
- * not present, so grab a new slot
- * starting either at:
- */
- if (hwc->idx != -1) {
- /* previous assignment */
- i = hwc->idx;
- } else if (k != -1) {
- /* start from free slot found */
- i = k;
- } else {
- /*
- * event not found, no slot found in
- * first pass, try again from the
- * beginning
- */
- i = 0;
- }
- j = i;
- do {
- old = cmpxchg(nb->owners+i, NULL, event);
- if (!old)
+ if (old == event)
break;
- if (++i == max)
- i = 0;
- } while (i != j);
-done:
- if (!old)
- return &nb->event_constraints[i];
-
- return &emptyconstraint;
+ }
+
+ if (new == -1)
+ return &emptyconstraint;
+
+ if (amd_is_perfctr_nb_event(hwc))
+ amd_nb_interrupt_hw_config(hwc);
+
+ return &nb->event_constraints[new];
}
static struct amd_nb *amd_alloc_nb(int cpu)
@@ -364,7 +491,7 @@ static void amd_pmu_cpu_starting(int cpu)
struct amd_nb *nb;
int i, nb_id;
- cpuc->perf_ctr_virt_mask = AMD_PERFMON_EVENTSEL_HOSTONLY;
+ cpuc->perf_ctr_virt_mask = AMD64_EVENTSEL_HOSTONLY;
if (boot_cpu_data.x86_max_cores < 2)
return;
@@ -407,6 +534,26 @@ static void amd_pmu_cpu_dead(int cpu)
}
}
+static struct event_constraint *
+amd_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
+{
+ /*
+ * if not NB event or no NB, then no constraints
+ */
+ if (!(amd_has_nb(cpuc) && amd_is_nb_event(&event->hw)))
+ return &unconstrained;
+
+ return __amd_get_nb_event_constraints(cpuc, event,
+ amd_nb_event_constraint);
+}
+
+static void amd_put_event_constraints(struct cpu_hw_events *cpuc,
+ struct perf_event *event)
+{
+ if (amd_has_nb(cpuc) && amd_is_nb_event(&event->hw))
+ __amd_put_nb_event_constraints(cpuc, event);
+}
+
PMU_FORMAT_ATTR(event, "config:0-7,32-35");
PMU_FORMAT_ATTR(umask, "config:8-15" );
PMU_FORMAT_ATTR(edge, "config:18" );
@@ -496,6 +643,9 @@ static struct event_constraint amd_f15_PMC30 = EVENT_CONSTRAINT_OVERLAP(0, 0x09,
static struct event_constraint amd_f15_PMC50 = EVENT_CONSTRAINT(0, 0x3F, 0);
static struct event_constraint amd_f15_PMC53 = EVENT_CONSTRAINT(0, 0x38, 0);
+static struct event_constraint amd_NBPMC96 = EVENT_CONSTRAINT(0, 0x3C0, 0);
+static struct event_constraint amd_NBPMC74 = EVENT_CONSTRAINT(0, 0xF0, 0);
+
static struct event_constraint *
amd_get_event_constraints_f15h(struct cpu_hw_events *cpuc, struct perf_event *event)
{
@@ -561,8 +711,8 @@ amd_get_event_constraints_f15h(struct cpu_hw_events *cpuc, struct perf_event *ev
return &amd_f15_PMC20;
}
case AMD_EVENT_NB:
- /* not yet implemented */
- return &emptyconstraint;
+ return __amd_get_nb_event_constraints(cpuc, event,
+ amd_nb_event_constraint);
default:
return &emptyconstraint;
}
@@ -587,6 +737,8 @@ static __initconst const struct x86_pmu amd_pmu = {
.schedule_events = x86_schedule_events,
.eventsel = MSR_K7_EVNTSEL0,
.perfctr = MSR_K7_PERFCTR0,
+ .addr_offset = amd_pmu_addr_offset,
+ .rdpmc_index = amd_pmu_rdpmc_index,
.event_map = amd_pmu_event_map,
.max_events = ARRAY_SIZE(amd_perfmon_event_map),
.num_counters = AMD64_NUM_COUNTERS,
@@ -608,7 +760,7 @@ static __initconst const struct x86_pmu amd_pmu = {
static int setup_event_constraints(void)
{
- if (boot_cpu_data.x86 >= 0x15)
+ if (boot_cpu_data.x86 == 0x15)
x86_pmu.get_event_constraints = amd_get_event_constraints_f15h;
return 0;
}
@@ -638,6 +790,23 @@ static int setup_perfctr_core(void)
return 0;
}
+static int setup_perfctr_nb(void)
+{
+ if (!cpu_has_perfctr_nb)
+ return -ENODEV;
+
+ x86_pmu.num_counters += AMD64_NUM_COUNTERS_NB;
+
+ if (cpu_has_perfctr_core)
+ amd_nb_event_constraint = &amd_NBPMC96;
+ else
+ amd_nb_event_constraint = &amd_NBPMC74;
+
+ printk(KERN_INFO "perf: AMD northbridge performance counters detected\n");
+
+ return 0;
+}
+
__init int amd_pmu_init(void)
{
/* Performance-monitoring supported from K7 and later: */
@@ -648,6 +817,7 @@ __init int amd_pmu_init(void)
setup_event_constraints();
setup_perfctr_core();
+ setup_perfctr_nb();
/* Events are common for all AMDs */
memcpy(hw_cache_event_ids, amd_hw_cache_event_ids,
@@ -678,7 +848,7 @@ void amd_pmu_disable_virt(void)
* SVM is disabled the Guest-only bits still gets set and the counter
* will not count anything.
*/
- cpuc->perf_ctr_virt_mask = AMD_PERFMON_EVENTSEL_HOSTONLY;
+ cpuc->perf_ctr_virt_mask = AMD64_EVENTSEL_HOSTONLY;
/* Reload all events */
x86_pmu_disable_all();
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index 93b9e1181f83..4914e94ad6e8 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -2019,7 +2019,10 @@ __init int intel_pmu_init(void)
break;
case 28: /* Atom */
- case 54: /* Cedariew */
+ case 38: /* Lincroft */
+ case 39: /* Penwell */
+ case 53: /* Cloverview */
+ case 54: /* Cedarview */
memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,
sizeof(hw_cache_event_ids));
@@ -2084,6 +2087,7 @@ __init int intel_pmu_init(void)
pr_cont("SandyBridge events, ");
break;
case 58: /* IvyBridge */
+ case 62: /* IvyBridge EP */
memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
sizeof(hw_cache_event_ids));
memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs,
diff --git a/arch/x86/kernel/cpu/perf_event_p6.c b/arch/x86/kernel/cpu/perf_event_p6.c
index f2af39f5dc3d..4820c232a0b9 100644
--- a/arch/x86/kernel/cpu/perf_event_p6.c
+++ b/arch/x86/kernel/cpu/perf_event_p6.c
@@ -19,7 +19,7 @@ static const u64 p6_perfmon_event_map[] =
};
-static __initconst u64 p6_hw_cache_event_ids
+static u64 p6_hw_cache_event_ids
[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX] =
diff --git a/arch/x86/kernel/cpu/proc.c b/arch/x86/kernel/cpu/proc.c
index 3286a92e662a..e280253f6f94 100644
--- a/arch/x86/kernel/cpu/proc.c
+++ b/arch/x86/kernel/cpu/proc.c
@@ -28,7 +28,6 @@ static void show_cpuinfo_misc(struct seq_file *m, struct cpuinfo_x86 *c)
{
seq_printf(m,
"fdiv_bug\t: %s\n"
- "hlt_bug\t\t: %s\n"
"f00f_bug\t: %s\n"
"coma_bug\t: %s\n"
"fpu\t\t: %s\n"
@@ -36,7 +35,6 @@ static void show_cpuinfo_misc(struct seq_file *m, struct cpuinfo_x86 *c)
"cpuid level\t: %d\n"
"wp\t\t: %s\n",
c->fdiv_bug ? "yes" : "no",
- c->hlt_works_ok ? "no" : "yes",
c->f00f_bug ? "yes" : "no",
c->coma_bug ? "yes" : "no",
c->hard_math ? "yes" : "no",
diff --git a/arch/x86/kernel/cpu/vmware.c b/arch/x86/kernel/cpu/vmware.c
index d22d0c4edcfd..03a36321ec54 100644
--- a/arch/x86/kernel/cpu/vmware.c
+++ b/arch/x86/kernel/cpu/vmware.c
@@ -33,6 +33,9 @@
#define VMWARE_PORT_CMD_GETVERSION 10
#define VMWARE_PORT_CMD_GETHZ 45
+#define VMWARE_PORT_CMD_GETVCPU_INFO 68
+#define VMWARE_PORT_CMD_LEGACY_X2APIC 3
+#define VMWARE_PORT_CMD_VCPU_RESERVED 31
#define VMWARE_PORT(cmd, eax, ebx, ecx, edx) \
__asm__("inl (%%dx)" : \
@@ -125,10 +128,20 @@ static void __cpuinit vmware_set_cpu_features(struct cpuinfo_x86 *c)
set_cpu_cap(c, X86_FEATURE_TSC_RELIABLE);
}
+/* Checks if hypervisor supports x2apic without VT-D interrupt remapping. */
+static bool __init vmware_legacy_x2apic_available(void)
+{
+ uint32_t eax, ebx, ecx, edx;
+ VMWARE_PORT(GETVCPU_INFO, eax, ebx, ecx, edx);
+ return (eax & (1 << VMWARE_PORT_CMD_VCPU_RESERVED)) == 0 &&
+ (eax & (1 << VMWARE_PORT_CMD_LEGACY_X2APIC)) != 0;
+}
+
const __refconst struct hypervisor_x86 x86_hyper_vmware = {
.name = "VMware",
.detect = vmware_platform,
.set_cpu_features = vmware_set_cpu_features,
.init_platform = vmware_platform_setup,
+ .x2apic_available = vmware_legacy_x2apic_available,
};
EXPORT_SYMBOL(x86_hyper_vmware);
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
index 6ed91d9980e2..8831176aa5ef 100644
--- a/arch/x86/kernel/entry_32.S
+++ b/arch/x86/kernel/entry_32.S
@@ -1091,11 +1091,18 @@ ENTRY(xen_failsafe_callback)
_ASM_EXTABLE(4b,9b)
ENDPROC(xen_failsafe_callback)
-BUILD_INTERRUPT3(xen_hvm_callback_vector, XEN_HVM_EVTCHN_CALLBACK,
+BUILD_INTERRUPT3(xen_hvm_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
xen_evtchn_do_upcall)
#endif /* CONFIG_XEN */
+#if IS_ENABLED(CONFIG_HYPERV)
+
+BUILD_INTERRUPT3(hyperv_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
+ hyperv_vector_handler)
+
+#endif /* CONFIG_HYPERV */
+
#ifdef CONFIG_FUNCTION_TRACER
#ifdef CONFIG_DYNAMIC_FTRACE
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index cb3c591339aa..048f2240f8e6 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -1454,11 +1454,16 @@ ENTRY(xen_failsafe_callback)
CFI_ENDPROC
END(xen_failsafe_callback)
-apicinterrupt XEN_HVM_EVTCHN_CALLBACK \
+apicinterrupt HYPERVISOR_CALLBACK_VECTOR \
xen_hvm_callback_vector xen_evtchn_do_upcall
#endif /* CONFIG_XEN */
+#if IS_ENABLED(CONFIG_HYPERV)
+apicinterrupt HYPERVISOR_CALLBACK_VECTOR \
+ hyperv_callback_vector hyperv_vector_handler
+#endif /* CONFIG_HYPERV */
+
/*
* Some functions should be protected against kprobes
*/
diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
index c18f59d10101..6773c918b8cc 100644
--- a/arch/x86/kernel/head32.c
+++ b/arch/x86/kernel/head32.c
@@ -18,6 +18,7 @@
#include <asm/io_apic.h>
#include <asm/bios_ebda.h>
#include <asm/tlbflush.h>
+#include <asm/bootparam_utils.h>
static void __init i386_default_early_setup(void)
{
@@ -30,6 +31,8 @@ static void __init i386_default_early_setup(void)
void __init i386_start_kernel(void)
{
+ sanitize_boot_params(&boot_params);
+
memblock_reserve(__pa_symbol(&_text),
__pa_symbol(&__bss_stop) - __pa_symbol(&_text));
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index 037df57a99ac..849fc9e63c2f 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -25,6 +25,7 @@
#include <asm/kdebug.h>
#include <asm/e820.h>
#include <asm/bios_ebda.h>
+#include <asm/bootparam_utils.h>
static void __init zap_identity_mappings(void)
{
@@ -46,6 +47,7 @@ static void __init copy_bootdata(char *real_mode_data)
char * command_line;
memcpy(&boot_params, real_mode_data, sizeof boot_params);
+ sanitize_boot_params(&boot_params);
if (boot_params.hdr.cmd_line_ptr) {
command_line = __va(boot_params.hdr.cmd_line_ptr);
memcpy(boot_command_line, command_line, COMMAND_LINE_SIZE);
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
index c8932c79e78b..3c3f58a0808f 100644
--- a/arch/x86/kernel/head_32.S
+++ b/arch/x86/kernel/head_32.S
@@ -307,36 +307,45 @@ default_entry:
movl %eax,%cr0
/*
- * New page tables may be in 4Mbyte page mode and may
- * be using the global pages.
+ * We want to start out with EFLAGS unambiguously cleared. Some BIOSes leave
+ * bits like NT set. This would confuse the debugger if this code is traced. So
+ * initialize them properly now before switching to protected mode. That means
+ * DF in particular (even though we have cleared it earlier after copying the
+ * command line) because GCC expects it.
+ */
+ pushl $0
+ popfl
+
+/*
+ * New page tables may be in 4Mbyte page mode and may be using the global pages.
*
- * NOTE! If we are on a 486 we may have no cr4 at all!
- * Specifically, cr4 exists if and only if CPUID exists
- * and has flags other than the FPU flag set.
+ * NOTE! If we are on a 486 we may have no cr4 at all! Specifically, cr4 exists
+ * if and only if CPUID exists and has flags other than the FPU flag set.
*/
+ movl $-1,pa(X86_CPUID) # preset CPUID level
movl $X86_EFLAGS_ID,%ecx
pushl %ecx
- popfl
+ popfl # set EFLAGS=ID
pushfl
- popl %eax
- pushl $0
- popfl
- pushfl
- popl %edx
- xorl %edx,%eax
- testl %ecx,%eax
- jz 6f # No ID flag = no CPUID = no CR4
+ popl %eax # get EFLAGS
+ testl $X86_EFLAGS_ID,%eax # did EFLAGS.ID remained set?
+ jz enable_paging # hw disallowed setting of ID bit
+ # which means no CPUID and no CR4
+
+ xorl %eax,%eax
+ cpuid
+ movl %eax,pa(X86_CPUID) # save largest std CPUID function
movl $1,%eax
cpuid
- andl $~1,%edx # Ignore CPUID.FPU
- jz 6f # No flags or only CPUID.FPU = no CR4
+ andl $~1,%edx # Ignore CPUID.FPU
+ jz enable_paging # No flags or only CPUID.FPU = no CR4
movl pa(mmu_cr4_features),%eax
movl %eax,%cr4
testb $X86_CR4_PAE, %al # check if PAE is enabled
- jz 6f
+ jz enable_paging
/* Check if extended functions are implemented */
movl $0x80000000, %eax
@@ -344,7 +353,7 @@ default_entry:
/* Value must be in the range 0x80000001 to 0x8000ffff */
subl $0x80000001, %eax
cmpl $(0x8000ffff-0x80000001), %eax
- ja 6f
+ ja enable_paging
/* Clear bogus XD_DISABLE bits */
call verify_cpu
@@ -353,7 +362,7 @@ default_entry:
cpuid
/* Execute Disable bit supported? */
btl $(X86_FEATURE_NX & 31), %edx
- jnc 6f
+ jnc enable_paging
/* Setup EFER (Extended Feature Enable Register) */
movl $MSR_EFER, %ecx
@@ -363,7 +372,7 @@ default_entry:
/* Make changes effective */
wrmsr
-6:
+enable_paging:
/*
* Enable paging
@@ -378,14 +387,6 @@ default_entry:
addl $__PAGE_OFFSET, %esp
/*
- * Initialize eflags. Some BIOS's leave bits like NT set. This would
- * confuse the debugger if this code is traced.
- * XXX - best to initialize before switching to protected mode.
- */
- pushl $0
- popfl
-
-/*
* start system 32-bit setup. We need to re-do some of the things done
* in 16-bit mode for the "real" operations.
*/
@@ -394,31 +395,11 @@ default_entry:
jz 1f # Did we do this already?
call *%eax
1:
-
-/* check if it is 486 or 386. */
+
/*
- * XXX - this does a lot of unnecessary setup. Alignment checks don't
- * apply at our cpl of 0 and the stack ought to be aligned already, and
- * we don't need to preserve eflags.
+ * Check if it is 486
*/
- movl $-1,X86_CPUID # -1 for no CPUID initially
- movb $3,X86 # at least 386
- pushfl # push EFLAGS
- popl %eax # get EFLAGS
- movl %eax,%ecx # save original EFLAGS
- xorl $0x240000,%eax # flip AC and ID bits in EFLAGS
- pushl %eax # copy to EFLAGS
- popfl # set EFLAGS
- pushfl # get new EFLAGS
- popl %eax # put it in eax
- xorl %ecx,%eax # change in flags
- pushl %ecx # restore original EFLAGS
- popfl
- testl $0x40000,%eax # check if AC bit changed
- je is386
-
- movb $4,X86 # at least 486
- testl $0x200000,%eax # check if ID bit changed
+ cmpl $-1,X86_CPUID
je is486
/* get vendor info */
@@ -444,11 +425,10 @@ default_entry:
movb %cl,X86_MASK
movl %edx,X86_CAPABILITY
-is486: movl $0x50022,%ecx # set AM, WP, NE and MP
- jmp 2f
-
-is386: movl $2,%ecx # set MP
-2: movl %cr0,%eax
+is486:
+ movb $4,X86
+ movl $0x50022,%ecx # set AM, WP, NE and MP
+ movl %cr0,%eax
andl $0x80000011,%eax # Save PG,PE,ET
orl %ecx,%eax
movl %eax,%cr0
@@ -473,7 +453,6 @@ is386: movl $2,%ecx # set MP
xorl %eax,%eax # Clear LDT
lldt %ax
- cld # gcc2 wants the direction flag cleared at all times
pushl $0 # fake return address for unwinder
jmp *(initial_code)
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
index e28670f9a589..da85a8e830a1 100644
--- a/arch/x86/kernel/hpet.c
+++ b/arch/x86/kernel/hpet.c
@@ -478,7 +478,7 @@ static int hpet_msi_next_event(unsigned long delta,
static int hpet_setup_msi_irq(unsigned int irq)
{
- if (arch_setup_hpet_msi(irq, hpet_blockid)) {
+ if (x86_msi.setup_hpet_msi(irq, hpet_blockid)) {
destroy_irq(irq);
return -EINVAL;
}
diff --git a/arch/x86/kernel/kprobes/Makefile b/arch/x86/kernel/kprobes/Makefile
new file mode 100644
index 000000000000..0d33169cc1a2
--- /dev/null
+++ b/arch/x86/kernel/kprobes/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for kernel probes
+#
+
+obj-$(CONFIG_KPROBES) += core.o
+obj-$(CONFIG_OPTPROBES) += opt.o
+obj-$(CONFIG_KPROBES_ON_FTRACE) += ftrace.o
diff --git a/arch/x86/kernel/kprobes-common.h b/arch/x86/kernel/kprobes/common.h
index 3230b68ef29a..2e9d4b5af036 100644
--- a/arch/x86/kernel/kprobes-common.h
+++ b/arch/x86/kernel/kprobes/common.h
@@ -99,4 +99,15 @@ static inline unsigned long __recover_optprobed_insn(kprobe_opcode_t *buf, unsig
return addr;
}
#endif
+
+#ifdef CONFIG_KPROBES_ON_FTRACE
+extern int skip_singlestep(struct kprobe *p, struct pt_regs *regs,
+ struct kprobe_ctlblk *kcb);
+#else
+static inline int skip_singlestep(struct kprobe *p, struct pt_regs *regs,
+ struct kprobe_ctlblk *kcb)
+{
+ return 0;
+}
+#endif
#endif
diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes/core.c
index 57916c0d3cf6..e124554598ee 100644
--- a/arch/x86/kernel/kprobes.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -58,7 +58,7 @@
#include <asm/insn.h>
#include <asm/debugreg.h>
-#include "kprobes-common.h"
+#include "common.h"
void jprobe_return_end(void);
@@ -78,7 +78,7 @@ DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
* Groups, and some special opcodes can not boost.
* This is non-const and volatile to keep gcc from statically
* optimizing it out, as variable_test_bit makes gcc think only
- * *(unsigned long*) is used.
+ * *(unsigned long*) is used.
*/
static volatile u32 twobyte_is_boostable[256 / 32] = {
/* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
@@ -117,7 +117,7 @@ static void __kprobes __synthesize_relative_insn(void *from, void *to, u8 op)
struct __arch_relative_insn {
u8 op;
s32 raddr;
- } __attribute__((packed)) *insn;
+ } __packed *insn;
insn = (struct __arch_relative_insn *)from;
insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
@@ -541,23 +541,6 @@ reenter_kprobe(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb
return 1;
}
-#ifdef KPROBES_CAN_USE_FTRACE
-static void __kprobes skip_singlestep(struct kprobe *p, struct pt_regs *regs,
- struct kprobe_ctlblk *kcb)
-{
- /*
- * Emulate singlestep (and also recover regs->ip)
- * as if there is a 5byte nop
- */
- regs->ip = (unsigned long)p->addr + MCOUNT_INSN_SIZE;
- if (unlikely(p->post_handler)) {
- kcb->kprobe_status = KPROBE_HIT_SSDONE;
- p->post_handler(p, regs, 0);
- }
- __this_cpu_write(current_kprobe, NULL);
-}
-#endif
-
/*
* Interrupts are disabled on entry as trap3 is an interrupt gate and they
* remain disabled throughout this function.
@@ -616,13 +599,8 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
} else if (kprobe_running()) {
p = __this_cpu_read(current_kprobe);
if (p->break_handler && p->break_handler(p, regs)) {
-#ifdef KPROBES_CAN_USE_FTRACE
- if (kprobe_ftrace(p)) {
- skip_singlestep(p, regs, kcb);
- return 1;
- }
-#endif
- setup_singlestep(p, regs, kcb, 0);
+ if (!skip_singlestep(p, regs, kcb))
+ setup_singlestep(p, regs, kcb, 0);
return 1;
}
} /* else: not a kprobe fault; let the kernel handle it */
@@ -1075,50 +1053,6 @@ int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
return 0;
}
-#ifdef KPROBES_CAN_USE_FTRACE
-/* Ftrace callback handler for kprobes */
-void __kprobes kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
- struct ftrace_ops *ops, struct pt_regs *regs)
-{
- struct kprobe *p;
- struct kprobe_ctlblk *kcb;
- unsigned long flags;
-
- /* Disable irq for emulating a breakpoint and avoiding preempt */
- local_irq_save(flags);
-
- p = get_kprobe((kprobe_opcode_t *)ip);
- if (unlikely(!p) || kprobe_disabled(p))
- goto end;
-
- kcb = get_kprobe_ctlblk();
- if (kprobe_running()) {
- kprobes_inc_nmissed_count(p);
- } else {
- /* Kprobe handler expects regs->ip = ip + 1 as breakpoint hit */
- regs->ip = ip + sizeof(kprobe_opcode_t);
-
- __this_cpu_write(current_kprobe, p);
- kcb->kprobe_status = KPROBE_HIT_ACTIVE;
- if (!p->pre_handler || !p->pre_handler(p, regs))
- skip_singlestep(p, regs, kcb);
- /*
- * If pre_handler returns !0, it sets regs->ip and
- * resets current kprobe.
- */
- }
-end:
- local_irq_restore(flags);
-}
-
-int __kprobes arch_prepare_kprobe_ftrace(struct kprobe *p)
-{
- p->ainsn.insn = NULL;
- p->ainsn.boostable = -1;
- return 0;
-}
-#endif
-
int __init arch_init_kprobes(void)
{
return arch_init_optprobes();
diff --git a/arch/x86/kernel/kprobes/ftrace.c b/arch/x86/kernel/kprobes/ftrace.c
new file mode 100644
index 000000000000..23ef5c556f06
--- /dev/null
+++ b/arch/x86/kernel/kprobes/ftrace.c
@@ -0,0 +1,93 @@
+/*
+ * Dynamic Ftrace based Kprobes Optimization
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (C) Hitachi Ltd., 2012
+ */
+#include <linux/kprobes.h>
+#include <linux/ptrace.h>
+#include <linux/hardirq.h>
+#include <linux/preempt.h>
+#include <linux/ftrace.h>
+
+#include "common.h"
+
+static int __skip_singlestep(struct kprobe *p, struct pt_regs *regs,
+ struct kprobe_ctlblk *kcb)
+{
+ /*
+ * Emulate singlestep (and also recover regs->ip)
+ * as if there is a 5byte nop
+ */
+ regs->ip = (unsigned long)p->addr + MCOUNT_INSN_SIZE;
+ if (unlikely(p->post_handler)) {
+ kcb->kprobe_status = KPROBE_HIT_SSDONE;
+ p->post_handler(p, regs, 0);
+ }
+ __this_cpu_write(current_kprobe, NULL);
+ return 1;
+}
+
+int __kprobes skip_singlestep(struct kprobe *p, struct pt_regs *regs,
+ struct kprobe_ctlblk *kcb)
+{
+ if (kprobe_ftrace(p))
+ return __skip_singlestep(p, regs, kcb);
+ else
+ return 0;
+}
+
+/* Ftrace callback handler for kprobes */
+void __kprobes kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
+ struct ftrace_ops *ops, struct pt_regs *regs)
+{
+ struct kprobe *p;
+ struct kprobe_ctlblk *kcb;
+ unsigned long flags;
+
+ /* Disable irq for emulating a breakpoint and avoiding preempt */
+ local_irq_save(flags);
+
+ p = get_kprobe((kprobe_opcode_t *)ip);
+ if (unlikely(!p) || kprobe_disabled(p))
+ goto end;
+
+ kcb = get_kprobe_ctlblk();
+ if (kprobe_running()) {
+ kprobes_inc_nmissed_count(p);
+ } else {
+ /* Kprobe handler expects regs->ip = ip + 1 as breakpoint hit */
+ regs->ip = ip + sizeof(kprobe_opcode_t);
+
+ __this_cpu_write(current_kprobe, p);
+ kcb->kprobe_status = KPROBE_HIT_ACTIVE;
+ if (!p->pre_handler || !p->pre_handler(p, regs))
+ __skip_singlestep(p, regs, kcb);
+ /*
+ * If pre_handler returns !0, it sets regs->ip and
+ * resets current kprobe.
+ */
+ }
+end:
+ local_irq_restore(flags);
+}
+
+int __kprobes arch_prepare_kprobe_ftrace(struct kprobe *p)
+{
+ p->ainsn.insn = NULL;
+ p->ainsn.boostable = -1;
+ return 0;
+}
diff --git a/arch/x86/kernel/kprobes-opt.c b/arch/x86/kernel/kprobes/opt.c
index c5e410eed403..76dc6f095724 100644
--- a/arch/x86/kernel/kprobes-opt.c
+++ b/arch/x86/kernel/kprobes/opt.c
@@ -37,7 +37,7 @@
#include <asm/insn.h>
#include <asm/debugreg.h>
-#include "kprobes-common.h"
+#include "common.h"
unsigned long __recover_optprobed_insn(kprobe_opcode_t *buf, unsigned long addr)
{
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index 9c2bd8bd4b4c..2b44ea5f269d 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -505,6 +505,7 @@ static bool __init kvm_detect(void)
const struct hypervisor_x86 x86_hyper_kvm __refconst = {
.name = "KVM",
.detect = kvm_detect,
+ .x2apic_available = kvm_para_available,
};
EXPORT_SYMBOL_GPL(x86_hyper_kvm);
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 2ed787f15bf0..14ae10031ff0 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -268,13 +268,7 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
unsigned long boot_option_idle_override = IDLE_NO_OVERRIDE;
EXPORT_SYMBOL(boot_option_idle_override);
-/*
- * Powermanagement idle function, if any..
- */
-void (*pm_idle)(void);
-#ifdef CONFIG_APM_MODULE
-EXPORT_SYMBOL(pm_idle);
-#endif
+static void (*x86_idle)(void);
#ifndef CONFIG_SMP
static inline void play_dead(void)
@@ -351,7 +345,7 @@ void cpu_idle(void)
rcu_idle_enter();
if (cpuidle_idle_call())
- pm_idle();
+ x86_idle();
rcu_idle_exit();
start_critical_timings();
@@ -375,7 +369,6 @@ void cpu_idle(void)
*/
void default_idle(void)
{
- trace_power_start_rcuidle(POWER_CSTATE, 1, smp_processor_id());
trace_cpu_idle_rcuidle(1, smp_processor_id());
current_thread_info()->status &= ~TS_POLLING;
/*
@@ -389,21 +382,22 @@ void default_idle(void)
else
local_irq_enable();
current_thread_info()->status |= TS_POLLING;
- trace_power_end_rcuidle(smp_processor_id());
trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
}
#ifdef CONFIG_APM_MODULE
EXPORT_SYMBOL(default_idle);
#endif
-bool set_pm_idle_to_default(void)
+#ifdef CONFIG_XEN
+bool xen_set_default_idle(void)
{
- bool ret = !!pm_idle;
+ bool ret = !!x86_idle;
- pm_idle = default_idle;
+ x86_idle = default_idle;
return ret;
}
+#endif
void stop_this_cpu(void *dummy)
{
local_irq_disable();
@@ -413,31 +407,8 @@ void stop_this_cpu(void *dummy)
set_cpu_online(smp_processor_id(), false);
disable_local_APIC();
- for (;;) {
- if (hlt_works(smp_processor_id()))
- halt();
- }
-}
-
-/* Default MONITOR/MWAIT with no hints, used for default C1 state */
-static void mwait_idle(void)
-{
- if (!need_resched()) {
- trace_power_start_rcuidle(POWER_CSTATE, 1, smp_processor_id());
- trace_cpu_idle_rcuidle(1, smp_processor_id());
- if (this_cpu_has(X86_FEATURE_CLFLUSH_MONITOR))
- clflush((void *)&current_thread_info()->flags);
-
- __monitor((void *)&current_thread_info()->flags, 0, 0);
- smp_mb();
- if (!need_resched())
- __sti_mwait(0, 0);
- else
- local_irq_enable();
- trace_power_end_rcuidle(smp_processor_id());
- trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
- } else
- local_irq_enable();
+ for (;;)
+ halt();
}
/*
@@ -447,62 +418,13 @@ static void mwait_idle(void)
*/
static void poll_idle(void)
{
- trace_power_start_rcuidle(POWER_CSTATE, 0, smp_processor_id());
trace_cpu_idle_rcuidle(0, smp_processor_id());
local_irq_enable();
while (!need_resched())
cpu_relax();
- trace_power_end_rcuidle(smp_processor_id());
trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
}
-/*
- * mwait selection logic:
- *
- * It depends on the CPU. For AMD CPUs that support MWAIT this is
- * wrong. Family 0x10 and 0x11 CPUs will enter C1 on HLT. Powersavings
- * then depend on a clock divisor and current Pstate of the core. If
- * all cores of a processor are in halt state (C1) the processor can
- * enter the C1E (C1 enhanced) state. If mwait is used this will never
- * happen.
- *
- * idle=mwait overrides this decision and forces the usage of mwait.
- */
-
-#define MWAIT_INFO 0x05
-#define MWAIT_ECX_EXTENDED_INFO 0x01
-#define MWAIT_EDX_C1 0xf0
-
-int mwait_usable(const struct cpuinfo_x86 *c)
-{
- u32 eax, ebx, ecx, edx;
-
- /* Use mwait if idle=mwait boot option is given */
- if (boot_option_idle_override == IDLE_FORCE_MWAIT)
- return 1;
-
- /*
- * Any idle= boot option other than idle=mwait means that we must not
- * use mwait. Eg: idle=halt or idle=poll or idle=nomwait
- */
- if (boot_option_idle_override != IDLE_NO_OVERRIDE)
- return 0;
-
- if (c->cpuid_level < MWAIT_INFO)
- return 0;
-
- cpuid(MWAIT_INFO, &eax, &ebx, &ecx, &edx);
- /* Check, whether EDX has extended info about MWAIT */
- if (!(ecx & MWAIT_ECX_EXTENDED_INFO))
- return 1;
-
- /*
- * edx enumeratios MONITOR/MWAIT extensions. Check, whether
- * C1 supports MWAIT
- */
- return (edx & MWAIT_EDX_C1);
-}
-
bool amd_e400_c1e_detected;
EXPORT_SYMBOL(amd_e400_c1e_detected);
@@ -567,31 +489,24 @@ static void amd_e400_idle(void)
void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
{
#ifdef CONFIG_SMP
- if (pm_idle == poll_idle && smp_num_siblings > 1) {
+ if (x86_idle == poll_idle && smp_num_siblings > 1)
pr_warn_once("WARNING: polling idle and HT enabled, performance may degrade\n");
- }
#endif
- if (pm_idle)
+ if (x86_idle)
return;
- if (cpu_has(c, X86_FEATURE_MWAIT) && mwait_usable(c)) {
- /*
- * One CPU supports mwait => All CPUs supports mwait
- */
- pr_info("using mwait in idle threads\n");
- pm_idle = mwait_idle;
- } else if (cpu_has_amd_erratum(amd_erratum_400)) {
+ if (cpu_has_amd_erratum(amd_erratum_400)) {
/* E400: APIC timer interrupt does not wake up CPU from C1e */
pr_info("using AMD E400 aware idle routine\n");
- pm_idle = amd_e400_idle;
+ x86_idle = amd_e400_idle;
} else
- pm_idle = default_idle;
+ x86_idle = default_idle;
}
void __init init_amd_e400_c1e_mask(void)
{
/* If we're using amd_e400_idle, we need to allocate amd_e400_c1e_mask. */
- if (pm_idle == amd_e400_idle)
+ if (x86_idle == amd_e400_idle)
zalloc_cpumask_var(&amd_e400_c1e_mask, GFP_KERNEL);
}
@@ -602,11 +517,8 @@ static int __init idle_setup(char *str)
if (!strcmp(str, "poll")) {
pr_info("using polling idle threads\n");
- pm_idle = poll_idle;
+ x86_idle = poll_idle;
boot_option_idle_override = IDLE_POLL;
- } else if (!strcmp(str, "mwait")) {
- boot_option_idle_override = IDLE_FORCE_MWAIT;
- WARN_ONCE(1, "\"idle=mwait\" will be removed in 2012\n");
} else if (!strcmp(str, "halt")) {
/*
* When the boot option of idle=halt is added, halt is
@@ -615,7 +527,7 @@ static int __init idle_setup(char *str)
* To continue to load the CPU idle driver, don't touch
* the boot_option_idle_override.
*/
- pm_idle = default_idle;
+ x86_idle = default_idle;
boot_option_idle_override = IDLE_HALT;
} else if (!strcmp(str, "nomwait")) {
/*
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
index b629bbe0d9bd..29a8120e6fe8 100644
--- a/arch/x86/kernel/ptrace.c
+++ b/arch/x86/kernel/ptrace.c
@@ -22,7 +22,7 @@
#include <linux/perf_event.h>
#include <linux/hw_breakpoint.h>
#include <linux/rcupdate.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/context_tracking.h>
#include <asm/uaccess.h>
diff --git a/arch/x86/kernel/rtc.c b/arch/x86/kernel/rtc.c
index 801602b5d745..2e8f3d3b5641 100644
--- a/arch/x86/kernel/rtc.c
+++ b/arch/x86/kernel/rtc.c
@@ -149,7 +149,6 @@ unsigned long mach_get_cmos_time(void)
if (century) {
century = bcd2bin(century);
year += century * 100;
- printk(KERN_INFO "Extended CMOS year: %d\n", century * 100);
} else
year += CMOS_YEARS_OFFS;
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index ed0fe385289d..a6ceaedc396a 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -1369,7 +1369,7 @@ static inline void mwait_play_dead(void)
void *mwait_ptr;
struct cpuinfo_x86 *c = __this_cpu_ptr(&cpu_info);
- if (!(this_cpu_has(X86_FEATURE_MWAIT) && mwait_usable(c)))
+ if (!this_cpu_has(X86_FEATURE_MWAIT))
return;
if (!this_cpu_has(X86_FEATURE_CLFLSH))
return;
diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
index 97ef74b88e0f..dbded5aedb81 100644
--- a/arch/x86/kernel/sys_x86_64.c
+++ b/arch/x86/kernel/sys_x86_64.c
@@ -157,7 +157,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
if (flags & MAP_FIXED)
return addr;
- /* for MAP_32BIT mappings we force the legact mmap base */
+ /* for MAP_32BIT mappings we force the legacy mmap base */
if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT))
goto bottomup;
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 06ccb5073a3f..4b9ea101fe3b 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -623,7 +623,8 @@ static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu)
ns_now = __cycles_2_ns(tsc_now);
if (cpu_khz) {
- *scale = (NSEC_PER_MSEC << CYC2NS_SCALE_FACTOR)/cpu_khz;
+ *scale = ((NSEC_PER_MSEC << CYC2NS_SCALE_FACTOR) +
+ cpu_khz / 2) / cpu_khz;
*offset = ns_now - mult_frac(tsc_now, *scale,
(1UL << CYC2NS_SCALE_FACTOR));
}
diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
index c71025b67462..0ba4cfb4f412 100644
--- a/arch/x86/kernel/uprobes.c
+++ b/arch/x86/kernel/uprobes.c
@@ -680,8 +680,10 @@ static bool __skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
if (auprobe->insn[i] == 0x66)
continue;
- if (auprobe->insn[i] == 0x90)
+ if (auprobe->insn[i] == 0x90) {
+ regs->ip += i + 1;
return true;
+ }
break;
}
diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c
index 7a3d075a814a..d065d67c2672 100644
--- a/arch/x86/kernel/x86_init.c
+++ b/arch/x86/kernel/x86_init.c
@@ -19,6 +19,7 @@
#include <asm/time.h>
#include <asm/irq.h>
#include <asm/io_apic.h>
+#include <asm/hpet.h>
#include <asm/pat.h>
#include <asm/tsc.h>
#include <asm/iommu.h>
@@ -111,15 +112,22 @@ struct x86_platform_ops x86_platform = {
EXPORT_SYMBOL_GPL(x86_platform);
struct x86_msi_ops x86_msi = {
- .setup_msi_irqs = native_setup_msi_irqs,
- .teardown_msi_irq = native_teardown_msi_irq,
- .teardown_msi_irqs = default_teardown_msi_irqs,
- .restore_msi_irqs = default_restore_msi_irqs,
+ .setup_msi_irqs = native_setup_msi_irqs,
+ .compose_msi_msg = native_compose_msi_msg,
+ .teardown_msi_irq = native_teardown_msi_irq,
+ .teardown_msi_irqs = default_teardown_msi_irqs,
+ .restore_msi_irqs = default_restore_msi_irqs,
+ .setup_hpet_msi = default_setup_hpet_msi,
};
struct x86_io_apic_ops x86_io_apic_ops = {
- .init = native_io_apic_init_mappings,
- .read = native_io_apic_read,
- .write = native_io_apic_write,
- .modify = native_io_apic_modify,
+ .init = native_io_apic_init_mappings,
+ .read = native_io_apic_read,
+ .write = native_io_apic_write,
+ .modify = native_io_apic_modify,
+ .disable = native_disable_io_apic,
+ .print_entries = native_io_apic_print_entries,
+ .set_affinity = native_ioapic_set_affinity,
+ .setup_entry = native_setup_ioapic_entry,
+ .eoi_ioapic_pin = native_eoi_ioapic_pin,
};
diff --git a/arch/x86/lguest/Kconfig b/arch/x86/lguest/Kconfig
index 7872a3330fb5..29043d2048a0 100644
--- a/arch/x86/lguest/Kconfig
+++ b/arch/x86/lguest/Kconfig
@@ -2,6 +2,7 @@ config LGUEST_GUEST
bool "Lguest guest support"
select PARAVIRT
depends on X86_32
+ select TTY
select VIRTUALIZATION
select VIRTIO
select VIRTIO_CONSOLE
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 027088f2f7dd..fb674fd3fc22 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -748,13 +748,15 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
return;
}
#endif
+ /* Kernel addresses are always protection faults: */
+ if (address >= TASK_SIZE)
+ error_code |= PF_PROT;
- if (unlikely(show_unhandled_signals))
+ if (likely(show_unhandled_signals))
show_signal_msg(regs, error_code, address, tsk);
- /* Kernel addresses are always protection faults: */
tsk->thread.cr2 = address;
- tsk->thread.error_code = error_code | (address >= TASK_SIZE);
+ tsk->thread.error_code = error_code;
tsk->thread.trap_nr = X86_TRAP_PF;
force_sig_info_fault(SIGSEGV, si_code, address, tsk, 0);
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 2ead3c8a4c84..d6eeead43758 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -605,7 +605,7 @@ kernel_physical_mapping_init(unsigned long start,
}
if (pgd_changed)
- sync_global_pgds(addr, end);
+ sync_global_pgds(addr, end - 1);
__flush_tlb_all();
@@ -831,6 +831,9 @@ int kern_addr_valid(unsigned long addr)
if (pud_none(*pud))
return 0;
+ if (pud_large(*pud))
+ return pfn_valid(pud_pfn(*pud));
+
pmd = pmd_offset(pud, addr);
if (pmd_none(*pmd))
return 0;
@@ -981,7 +984,7 @@ vmemmap_populate(struct page *start_page, unsigned long size, int node)
}
}
- sync_global_pgds((unsigned long)start_page, end);
+ sync_global_pgds((unsigned long)start_page, end - 1);
return 0;
}
diff --git a/arch/x86/mm/memtest.c b/arch/x86/mm/memtest.c
index c80b9fb95734..8dabbed409ee 100644
--- a/arch/x86/mm/memtest.c
+++ b/arch/x86/mm/memtest.c
@@ -9,6 +9,7 @@
#include <linux/memblock.h>
static u64 patterns[] __initdata = {
+ /* The first entry has to be 0 to leave memtest with zeroed memory */
0,
0xffffffffffffffffULL,
0x5555555555555555ULL,
@@ -110,15 +111,8 @@ void __init early_memtest(unsigned long start, unsigned long end)
return;
printk(KERN_INFO "early_memtest: # of tests: %d\n", memtest_pattern);
- for (i = 0; i < memtest_pattern; i++) {
+ for (i = memtest_pattern-1; i < UINT_MAX; --i) {
idx = i % ARRAY_SIZE(patterns);
do_one_pass(patterns[idx], start, end);
}
-
- if (idx > 0) {
- printk(KERN_INFO "early_memtest: wipe out "
- "test pattern from memory\n");
- /* additional test with pattern 0 will do this */
- do_one_pass(0, start, end);
- }
}
diff --git a/arch/x86/mm/srat.c b/arch/x86/mm/srat.c
index 4ddf497ca65b..cdd0da9dd530 100644
--- a/arch/x86/mm/srat.c
+++ b/arch/x86/mm/srat.c
@@ -149,39 +149,40 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
int node, pxm;
if (srat_disabled())
- return -1;
- if (ma->header.length != sizeof(struct acpi_srat_mem_affinity)) {
- bad_srat();
- return -1;
- }
+ goto out_err;
+ if (ma->header.length != sizeof(struct acpi_srat_mem_affinity))
+ goto out_err_bad_srat;
if ((ma->flags & ACPI_SRAT_MEM_ENABLED) == 0)
- return -1;
-
+ goto out_err;
if ((ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE) && !save_add_info())
- return -1;
+ goto out_err;
+
start = ma->base_address;
end = start + ma->length;
pxm = ma->proximity_domain;
if (acpi_srat_revision <= 1)
pxm &= 0xff;
+
node = setup_node(pxm);
if (node < 0) {
printk(KERN_ERR "SRAT: Too many proximity domains.\n");
- bad_srat();
- return -1;
+ goto out_err_bad_srat;
}
- if (numa_add_memblk(node, start, end) < 0) {
- bad_srat();
- return -1;
- }
+ if (numa_add_memblk(node, start, end) < 0)
+ goto out_err_bad_srat;
node_set(node, numa_nodes_parsed);
printk(KERN_INFO "SRAT: Node %u PXM %u [mem %#010Lx-%#010Lx]\n",
node, pxm,
(unsigned long long) start, (unsigned long long) end - 1);
+
return 0;
+out_err_bad_srat:
+ bad_srat();
+out_err:
+ return -1;
}
void __init acpi_numa_arch_fixup(void) {}
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 13a6b29e2e5d..282375f13c7e 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -335,7 +335,7 @@ static const struct file_operations fops_tlbflush = {
.llseek = default_llseek,
};
-static int __cpuinit create_tlb_flushall_shift(void)
+static int __init create_tlb_flushall_shift(void)
{
debugfs_create_file("tlb_flushall_shift", S_IRUSR | S_IWUSR,
arch_debugfs_dir, NULL, &fops_tlbflush);
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index d11a47099d33..3cbe45381bbb 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -1,6 +1,6 @@
/* bpf_jit_comp.c : BPF JIT compiler
*
- * Copyright (C) 2011 Eric Dumazet (eric.dumazet@gmail.com)
+ * Copyright (C) 2011-2013 Eric Dumazet (eric.dumazet@gmail.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -124,6 +124,26 @@ static inline void bpf_flush_icache(void *start, void *end)
#define CHOOSE_LOAD_FUNC(K, func) \
((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
+/* Helper to find the offset of pkt_type in sk_buff
+ * We want to make sure its still a 3bit field starting at a byte boundary.
+ */
+#define PKT_TYPE_MAX 7
+static int pkt_type_offset(void)
+{
+ struct sk_buff skb_probe = {
+ .pkt_type = ~0,
+ };
+ char *ct = (char *)&skb_probe;
+ unsigned int off;
+
+ for (off = 0; off < sizeof(struct sk_buff); off++) {
+ if (ct[off] == PKT_TYPE_MAX)
+ return off;
+ }
+ pr_err_once("Please fix pkt_type_offset(), as pkt_type couldn't be found\n");
+ return -1;
+}
+
void bpf_jit_compile(struct sk_filter *fp)
{
u8 temp[64];
@@ -216,6 +236,7 @@ void bpf_jit_compile(struct sk_filter *fp)
case BPF_S_ANC_VLAN_TAG:
case BPF_S_ANC_VLAN_TAG_PRESENT:
case BPF_S_ANC_QUEUE:
+ case BPF_S_ANC_PKTTYPE:
case BPF_S_LD_W_ABS:
case BPF_S_LD_H_ABS:
case BPF_S_LD_B_ABS:
@@ -536,6 +557,23 @@ void bpf_jit_compile(struct sk_filter *fp)
EMIT3(0x83, 0xe0, 0x01); /* and $0x1,%eax */
}
break;
+ case BPF_S_ANC_PKTTYPE:
+ {
+ int off = pkt_type_offset();
+
+ if (off < 0)
+ goto out;
+ if (is_imm8(off)) {
+ /* movzbl off8(%rdi),%eax */
+ EMIT4(0x0f, 0xb6, 0x47, off);
+ } else {
+ /* movbl off32(%rdi),%eax */
+ EMIT3(0x0f, 0xb6, 0x87);
+ EMIT(off, 4);
+ }
+ EMIT3(0x83, 0xe0, PKT_TYPE_MAX); /* and $0x7,%eax */
+ break;
+ }
case BPF_S_LD_W_ABS:
func = CHOOSE_LOAD_FUNC(K, sk_load_word);
common_load: seen |= SEEN_DATAREF;
diff --git a/arch/x86/pci/mmconfig-shared.c b/arch/x86/pci/mmconfig-shared.c
index fb29968a7cd5..082e88129712 100644
--- a/arch/x86/pci/mmconfig-shared.c
+++ b/arch/x86/pci/mmconfig-shared.c
@@ -548,8 +548,7 @@ static int __init acpi_mcfg_check_entry(struct acpi_table_mcfg *mcfg,
if (cfg->address < 0xFFFFFFFF)
return 0;
- if (!strcmp(mcfg->header.oem_id, "SGI") ||
- !strcmp(mcfg->header.oem_id, "SGI2"))
+ if (!strncmp(mcfg->header.oem_id, "SGI", 3))
return 0;
if (mcfg->header.revision >= 1) {
diff --git a/arch/x86/platform/Makefile b/arch/x86/platform/Makefile
index 8d874396cb29..01e0231a113e 100644
--- a/arch/x86/platform/Makefile
+++ b/arch/x86/platform/Makefile
@@ -2,10 +2,12 @@
obj-y += ce4100/
obj-y += efi/
obj-y += geode/
+obj-y += goldfish/
obj-y += iris/
obj-y += mrst/
obj-y += olpc/
obj-y += scx200/
obj-y += sfi/
+obj-y += ts5500/
obj-y += visws/
obj-y += uv/
diff --git a/arch/x86/platform/efi/efi-bgrt.c b/arch/x86/platform/efi/efi-bgrt.c
index d9c1b95af17c..7145ec63c520 100644
--- a/arch/x86/platform/efi/efi-bgrt.c
+++ b/arch/x86/platform/efi/efi-bgrt.c
@@ -11,20 +11,21 @@
* published by the Free Software Foundation.
*/
#include <linux/kernel.h>
+#include <linux/init.h>
#include <linux/acpi.h>
#include <linux/efi.h>
#include <linux/efi-bgrt.h>
struct acpi_table_bgrt *bgrt_tab;
-void *bgrt_image;
-size_t bgrt_image_size;
+void *__initdata bgrt_image;
+size_t __initdata bgrt_image_size;
struct bmp_header {
u16 id;
u32 size;
} __packed;
-void efi_bgrt_init(void)
+void __init efi_bgrt_init(void)
{
acpi_status status;
void __iomem *image;
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
index 77cf0090c0a3..928bf837040a 100644
--- a/arch/x86/platform/efi/efi.c
+++ b/arch/x86/platform/efi/efi.c
@@ -87,7 +87,7 @@ EXPORT_SYMBOL(efi_enabled);
static int __init setup_noefi(char *arg)
{
- clear_bit(EFI_BOOT, &x86_efi_facility);
+ clear_bit(EFI_RUNTIME_SERVICES, &x86_efi_facility);
return 0;
}
early_param("noefi", setup_noefi);
diff --git a/arch/x86/platform/goldfish/Makefile b/arch/x86/platform/goldfish/Makefile
new file mode 100644
index 000000000000..f030b532fdf3
--- /dev/null
+++ b/arch/x86/platform/goldfish/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_GOLDFISH) += goldfish.o
diff --git a/arch/x86/platform/goldfish/goldfish.c b/arch/x86/platform/goldfish/goldfish.c
new file mode 100644
index 000000000000..1693107a518e
--- /dev/null
+++ b/arch/x86/platform/goldfish/goldfish.c
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (C) 2011 Intel, Inc.
+ * Copyright (C) 2013 Intel, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/irq.h>
+#include <linux/platform_device.h>
+
+/*
+ * Where in virtual device memory the IO devices (timers, system controllers
+ * and so on)
+ */
+
+#define GOLDFISH_PDEV_BUS_BASE (0xff001000)
+#define GOLDFISH_PDEV_BUS_END (0xff7fffff)
+#define GOLDFISH_PDEV_BUS_IRQ (4)
+
+#define GOLDFISH_TTY_BASE (0x2000)
+
+static struct resource goldfish_pdev_bus_resources[] = {
+ {
+ .start = GOLDFISH_PDEV_BUS_BASE,
+ .end = GOLDFISH_PDEV_BUS_END,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = GOLDFISH_PDEV_BUS_IRQ,
+ .end = GOLDFISH_PDEV_BUS_IRQ,
+ .flags = IORESOURCE_IRQ,
+ }
+};
+
+static int __init goldfish_init(void)
+{
+ platform_device_register_simple("goldfish_pdev_bus", -1,
+ goldfish_pdev_bus_resources, 2);
+ return 0;
+}
+device_initcall(goldfish_init);
diff --git a/arch/x86/platform/olpc/olpc-xo15-sci.c b/arch/x86/platform/olpc/olpc-xo15-sci.c
index 2fdca25905ae..fef7d0ba7e3a 100644
--- a/arch/x86/platform/olpc/olpc-xo15-sci.c
+++ b/arch/x86/platform/olpc/olpc-xo15-sci.c
@@ -195,7 +195,7 @@ err_sysfs:
return r;
}
-static int xo15_sci_remove(struct acpi_device *device, int type)
+static int xo15_sci_remove(struct acpi_device *device)
{
acpi_disable_gpe(NULL, xo15_sci_gpe);
acpi_remove_gpe_handler(NULL, xo15_sci_gpe, xo15_sci_gpe_handler);
diff --git a/arch/x86/platform/sfi/sfi.c b/arch/x86/platform/sfi/sfi.c
index 7785b72ecc3a..bcd1a703e3e6 100644
--- a/arch/x86/platform/sfi/sfi.c
+++ b/arch/x86/platform/sfi/sfi.c
@@ -35,7 +35,7 @@
static unsigned long sfi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE;
/* All CPUs enumerated by SFI must be present and enabled */
-static void __cpuinit mp_sfi_register_lapic(u8 id)
+static void __init mp_sfi_register_lapic(u8 id)
{
if (MAX_LOCAL_APIC - id <= 0) {
pr_warning("Processor #%d invalid (max %d)\n",
diff --git a/arch/x86/platform/ts5500/Makefile b/arch/x86/platform/ts5500/Makefile
new file mode 100644
index 000000000000..c54e348c96a7
--- /dev/null
+++ b/arch/x86/platform/ts5500/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_TS5500) += ts5500.o
diff --git a/arch/x86/platform/ts5500/ts5500.c b/arch/x86/platform/ts5500/ts5500.c
new file mode 100644
index 000000000000..39febb214e8c
--- /dev/null
+++ b/arch/x86/platform/ts5500/ts5500.c
@@ -0,0 +1,339 @@
+/*
+ * Technologic Systems TS-5500 Single Board Computer support
+ *
+ * Copyright (C) 2013 Savoir-faire Linux Inc.
+ * Vivien Didelot <vivien.didelot@savoirfairelinux.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License as published by the Free Software
+ * Foundation; either version 2 of the License, or (at your option) any later
+ * version.
+ *
+ *
+ * This driver registers the Technologic Systems TS-5500 Single Board Computer
+ * (SBC) and its devices, and exposes information to userspace such as jumpers'
+ * state or available options. For further information about sysfs entries, see
+ * Documentation/ABI/testing/sysfs-platform-ts5500.
+ *
+ * This code actually supports the TS-5500 platform, but it may be extended to
+ * support similar Technologic Systems x86-based platforms, such as the TS-5600.
+ */
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/leds.h>
+#include <linux/module.h>
+#include <linux/platform_data/gpio-ts5500.h>
+#include <linux/platform_data/max197.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+/* Product code register */
+#define TS5500_PRODUCT_CODE_ADDR 0x74
+#define TS5500_PRODUCT_CODE 0x60 /* TS-5500 product code */
+
+/* SRAM/RS-485/ADC options, and RS-485 RTS/Automatic RS-485 flags register */
+#define TS5500_SRAM_RS485_ADC_ADDR 0x75
+#define TS5500_SRAM BIT(0) /* SRAM option */
+#define TS5500_RS485 BIT(1) /* RS-485 option */
+#define TS5500_ADC BIT(2) /* A/D converter option */
+#define TS5500_RS485_RTS BIT(6) /* RTS for RS-485 */
+#define TS5500_RS485_AUTO BIT(7) /* Automatic RS-485 */
+
+/* External Reset/Industrial Temperature Range options register */
+#define TS5500_ERESET_ITR_ADDR 0x76
+#define TS5500_ERESET BIT(0) /* External Reset option */
+#define TS5500_ITR BIT(1) /* Indust. Temp. Range option */
+
+/* LED/Jumpers register */
+#define TS5500_LED_JP_ADDR 0x77
+#define TS5500_LED BIT(0) /* LED flag */
+#define TS5500_JP1 BIT(1) /* Automatic CMOS */
+#define TS5500_JP2 BIT(2) /* Enable Serial Console */
+#define TS5500_JP3 BIT(3) /* Write Enable Drive A */
+#define TS5500_JP4 BIT(4) /* Fast Console (115K baud) */
+#define TS5500_JP5 BIT(5) /* User Jumper */
+#define TS5500_JP6 BIT(6) /* Console on COM1 (req. JP2) */
+#define TS5500_JP7 BIT(7) /* Undocumented (Unused) */
+
+/* A/D Converter registers */
+#define TS5500_ADC_CONV_BUSY_ADDR 0x195 /* Conversion state register */
+#define TS5500_ADC_CONV_BUSY BIT(0)
+#define TS5500_ADC_CONV_INIT_LSB_ADDR 0x196 /* Start conv. / LSB register */
+#define TS5500_ADC_CONV_MSB_ADDR 0x197 /* MSB register */
+#define TS5500_ADC_CONV_DELAY 12 /* usec */
+
+/**
+ * struct ts5500_sbc - TS-5500 board description
+ * @id: Board product ID.
+ * @sram: Flag for SRAM option.
+ * @rs485: Flag for RS-485 option.
+ * @adc: Flag for Analog/Digital converter option.
+ * @ereset: Flag for External Reset option.
+ * @itr: Flag for Industrial Temperature Range option.
+ * @jumpers: Bitfield for jumpers' state.
+ */
+struct ts5500_sbc {
+ int id;
+ bool sram;
+ bool rs485;
+ bool adc;
+ bool ereset;
+ bool itr;
+ u8 jumpers;
+};
+
+/* Board signatures in BIOS shadow RAM */
+static const struct {
+ const char * const string;
+ const ssize_t offset;
+} ts5500_signatures[] __initdata = {
+ { "TS-5x00 AMD Elan", 0xb14 },
+};
+
+static int __init ts5500_check_signature(void)
+{
+ void __iomem *bios;
+ int i, ret = -ENODEV;
+
+ bios = ioremap(0xf0000, 0x10000);
+ if (!bios)
+ return -ENOMEM;
+
+ for (i = 0; i < ARRAY_SIZE(ts5500_signatures); i++) {
+ if (check_signature(bios + ts5500_signatures[i].offset,
+ ts5500_signatures[i].string,
+ strlen(ts5500_signatures[i].string))) {
+ ret = 0;
+ break;
+ }
+ }
+
+ iounmap(bios);
+ return ret;
+}
+
+static int __init ts5500_detect_config(struct ts5500_sbc *sbc)
+{
+ u8 tmp;
+ int ret = 0;
+
+ if (!request_region(TS5500_PRODUCT_CODE_ADDR, 4, "ts5500"))
+ return -EBUSY;
+
+ tmp = inb(TS5500_PRODUCT_CODE_ADDR);
+ if (tmp != TS5500_PRODUCT_CODE) {
+ pr_err("This platform is not a TS-5500 (found ID 0x%x)\n", tmp);
+ ret = -ENODEV;
+ goto cleanup;
+ }
+ sbc->id = tmp;
+
+ tmp = inb(TS5500_SRAM_RS485_ADC_ADDR);
+ sbc->sram = tmp & TS5500_SRAM;
+ sbc->rs485 = tmp & TS5500_RS485;
+ sbc->adc = tmp & TS5500_ADC;
+
+ tmp = inb(TS5500_ERESET_ITR_ADDR);
+ sbc->ereset = tmp & TS5500_ERESET;
+ sbc->itr = tmp & TS5500_ITR;
+
+ tmp = inb(TS5500_LED_JP_ADDR);
+ sbc->jumpers = tmp & ~TS5500_LED;
+
+cleanup:
+ release_region(TS5500_PRODUCT_CODE_ADDR, 4);
+ return ret;
+}
+
+static ssize_t ts5500_show_id(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ts5500_sbc *sbc = dev_get_drvdata(dev);
+
+ return sprintf(buf, "0x%.2x\n", sbc->id);
+}
+
+static ssize_t ts5500_show_jumpers(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ts5500_sbc *sbc = dev_get_drvdata(dev);
+
+ return sprintf(buf, "0x%.2x\n", sbc->jumpers >> 1);
+}
+
+#define TS5500_SHOW(field) \
+ static ssize_t ts5500_show_##field(struct device *dev, \
+ struct device_attribute *attr, \
+ char *buf) \
+ { \
+ struct ts5500_sbc *sbc = dev_get_drvdata(dev); \
+ return sprintf(buf, "%d\n", sbc->field); \
+ }
+
+TS5500_SHOW(sram)
+TS5500_SHOW(rs485)
+TS5500_SHOW(adc)
+TS5500_SHOW(ereset)
+TS5500_SHOW(itr)
+
+static DEVICE_ATTR(id, S_IRUGO, ts5500_show_id, NULL);
+static DEVICE_ATTR(jumpers, S_IRUGO, ts5500_show_jumpers, NULL);
+static DEVICE_ATTR(sram, S_IRUGO, ts5500_show_sram, NULL);
+static DEVICE_ATTR(rs485, S_IRUGO, ts5500_show_rs485, NULL);
+static DEVICE_ATTR(adc, S_IRUGO, ts5500_show_adc, NULL);
+static DEVICE_ATTR(ereset, S_IRUGO, ts5500_show_ereset, NULL);
+static DEVICE_ATTR(itr, S_IRUGO, ts5500_show_itr, NULL);
+
+static struct attribute *ts5500_attributes[] = {
+ &dev_attr_id.attr,
+ &dev_attr_jumpers.attr,
+ &dev_attr_sram.attr,
+ &dev_attr_rs485.attr,
+ &dev_attr_adc.attr,
+ &dev_attr_ereset.attr,
+ &dev_attr_itr.attr,
+ NULL
+};
+
+static const struct attribute_group ts5500_attr_group = {
+ .attrs = ts5500_attributes,
+};
+
+static struct resource ts5500_dio1_resource[] = {
+ DEFINE_RES_IRQ_NAMED(7, "DIO1 interrupt"),
+};
+
+static struct platform_device ts5500_dio1_pdev = {
+ .name = "ts5500-dio1",
+ .id = -1,
+ .resource = ts5500_dio1_resource,
+ .num_resources = 1,
+};
+
+static struct resource ts5500_dio2_resource[] = {
+ DEFINE_RES_IRQ_NAMED(6, "DIO2 interrupt"),
+};
+
+static struct platform_device ts5500_dio2_pdev = {
+ .name = "ts5500-dio2",
+ .id = -1,
+ .resource = ts5500_dio2_resource,
+ .num_resources = 1,
+};
+
+static void ts5500_led_set(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ outb(!!brightness, TS5500_LED_JP_ADDR);
+}
+
+static enum led_brightness ts5500_led_get(struct led_classdev *led_cdev)
+{
+ return (inb(TS5500_LED_JP_ADDR) & TS5500_LED) ? LED_FULL : LED_OFF;
+}
+
+static struct led_classdev ts5500_led_cdev = {
+ .name = "ts5500:green:",
+ .brightness_set = ts5500_led_set,
+ .brightness_get = ts5500_led_get,
+};
+
+static int ts5500_adc_convert(u8 ctrl)
+{
+ u8 lsb, msb;
+
+ /* Start conversion (ensure the 3 MSB are set to 0) */
+ outb(ctrl & 0x1f, TS5500_ADC_CONV_INIT_LSB_ADDR);
+
+ /*
+ * The platform has CPLD logic driving the A/D converter.
+ * The conversion must complete within 11 microseconds,
+ * otherwise we have to re-initiate a conversion.
+ */
+ udelay(TS5500_ADC_CONV_DELAY);
+ if (inb(TS5500_ADC_CONV_BUSY_ADDR) & TS5500_ADC_CONV_BUSY)
+ return -EBUSY;
+
+ /* Read the raw data */
+ lsb = inb(TS5500_ADC_CONV_INIT_LSB_ADDR);
+ msb = inb(TS5500_ADC_CONV_MSB_ADDR);
+
+ return (msb << 8) | lsb;
+}
+
+static struct max197_platform_data ts5500_adc_pdata = {
+ .convert = ts5500_adc_convert,
+};
+
+static struct platform_device ts5500_adc_pdev = {
+ .name = "max197",
+ .id = -1,
+ .dev = {
+ .platform_data = &ts5500_adc_pdata,
+ },
+};
+
+static int __init ts5500_init(void)
+{
+ struct platform_device *pdev;
+ struct ts5500_sbc *sbc;
+ int err;
+
+ /*
+ * There is no DMI available or PCI bridge subvendor info,
+ * only the BIOS provides a 16-bit identification call.
+ * It is safer to find a signature in the BIOS shadow RAM.
+ */
+ err = ts5500_check_signature();
+ if (err)
+ return err;
+
+ pdev = platform_device_register_simple("ts5500", -1, NULL, 0);
+ if (IS_ERR(pdev))
+ return PTR_ERR(pdev);
+
+ sbc = devm_kzalloc(&pdev->dev, sizeof(struct ts5500_sbc), GFP_KERNEL);
+ if (!sbc) {
+ err = -ENOMEM;
+ goto error;
+ }
+
+ err = ts5500_detect_config(sbc);
+ if (err)
+ goto error;
+
+ platform_set_drvdata(pdev, sbc);
+
+ err = sysfs_create_group(&pdev->dev.kobj, &ts5500_attr_group);
+ if (err)
+ goto error;
+
+ ts5500_dio1_pdev.dev.parent = &pdev->dev;
+ if (platform_device_register(&ts5500_dio1_pdev))
+ dev_warn(&pdev->dev, "DIO1 block registration failed\n");
+ ts5500_dio2_pdev.dev.parent = &pdev->dev;
+ if (platform_device_register(&ts5500_dio2_pdev))
+ dev_warn(&pdev->dev, "DIO2 block registration failed\n");
+
+ if (led_classdev_register(&pdev->dev, &ts5500_led_cdev))
+ dev_warn(&pdev->dev, "LED registration failed\n");
+
+ if (sbc->adc) {
+ ts5500_adc_pdev.dev.parent = &pdev->dev;
+ if (platform_device_register(&ts5500_adc_pdev))
+ dev_warn(&pdev->dev, "ADC registration failed\n");
+ }
+
+ return 0;
+error:
+ platform_device_unregister(pdev);
+ return err;
+}
+device_initcall(ts5500_init);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Savoir-faire Linux Inc. <kernel@savoirfairelinux.com>");
+MODULE_DESCRIPTION("Technologic Systems TS-5500 platform driver");
diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c
index dbbdca5f508c..0f92173a12b6 100644
--- a/arch/x86/platform/uv/tlb_uv.c
+++ b/arch/x86/platform/uv/tlb_uv.c
@@ -1467,7 +1467,7 @@ static ssize_t ptc_proc_write(struct file *file, const char __user *user,
}
if (input_arg == 0) {
- elements = sizeof(stat_description)/sizeof(*stat_description);
+ elements = ARRAY_SIZE(stat_description);
printk(KERN_DEBUG "# cpu: cpu number\n");
printk(KERN_DEBUG "Sender statistics:\n");
for (i = 0; i < elements; i++)
@@ -1508,7 +1508,7 @@ static int parse_tunables_write(struct bau_control *bcp, char *instr,
char *q;
int cnt = 0;
int val;
- int e = sizeof(tunables) / sizeof(*tunables);
+ int e = ARRAY_SIZE(tunables);
p = instr + strspn(instr, WHITESPACE);
q = p;
diff --git a/arch/x86/platform/uv/uv_time.c b/arch/x86/platform/uv/uv_time.c
index 5032e0d19b86..98718f604eb6 100644
--- a/arch/x86/platform/uv/uv_time.c
+++ b/arch/x86/platform/uv/uv_time.c
@@ -15,7 +15,7 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
- * Copyright (c) 2009 Silicon Graphics, Inc. All Rights Reserved.
+ * Copyright (c) 2009-2013 Silicon Graphics, Inc. All Rights Reserved.
* Copyright (c) Dimitri Sivanich
*/
#include <linux/clockchips.h>
@@ -102,9 +102,10 @@ static int uv_intr_pending(int pnode)
if (is_uv1_hub())
return uv_read_global_mmr64(pnode, UVH_EVENT_OCCURRED0) &
UV1H_EVENT_OCCURRED0_RTC1_MASK;
- else
- return uv_read_global_mmr64(pnode, UV2H_EVENT_OCCURRED2) &
- UV2H_EVENT_OCCURRED2_RTC_1_MASK;
+ else if (is_uvx_hub())
+ return uv_read_global_mmr64(pnode, UVXH_EVENT_OCCURRED2) &
+ UVXH_EVENT_OCCURRED2_RTC_1_MASK;
+ return 0;
}
/* Setup interrupt and return non-zero if early expiration occurred. */
@@ -122,8 +123,8 @@ static int uv_setup_intr(int cpu, u64 expires)
uv_write_global_mmr64(pnode, UVH_EVENT_OCCURRED0_ALIAS,
UV1H_EVENT_OCCURRED0_RTC1_MASK);
else
- uv_write_global_mmr64(pnode, UV2H_EVENT_OCCURRED2_ALIAS,
- UV2H_EVENT_OCCURRED2_RTC_1_MASK);
+ uv_write_global_mmr64(pnode, UVXH_EVENT_OCCURRED2_ALIAS,
+ UVXH_EVENT_OCCURRED2_RTC_1_MASK);
val = (X86_PLATFORM_IPI_VECTOR << UVH_RTC1_INT_CONFIG_VECTOR_SHFT) |
((u64)apicid << UVH_RTC1_INT_CONFIG_APIC_ID_SHFT);
diff --git a/arch/x86/tools/insn_sanity.c b/arch/x86/tools/insn_sanity.c
index cc2f8c131286..872eb60e7806 100644
--- a/arch/x86/tools/insn_sanity.c
+++ b/arch/x86/tools/insn_sanity.c
@@ -55,7 +55,7 @@ static FILE *input_file; /* Input file name */
static void usage(const char *err)
{
if (err)
- fprintf(stderr, "Error: %s\n\n", err);
+ fprintf(stderr, "%s: Error: %s\n\n", prog, err);
fprintf(stderr, "Usage: %s [-y|-n|-v] [-s seed[,no]] [-m max] [-i input]\n", prog);
fprintf(stderr, "\t-y 64bit mode\n");
fprintf(stderr, "\t-n 32bit mode\n");
@@ -269,7 +269,13 @@ int main(int argc, char **argv)
insns++;
}
- fprintf(stdout, "%s: decoded and checked %d %s instructions with %d errors (seed:0x%x)\n", (errors) ? "Failure" : "Success", insns, (input_file) ? "given" : "random", errors, seed);
+ fprintf(stdout, "%s: %s: decoded and checked %d %s instructions with %d errors (seed:0x%x)\n",
+ prog,
+ (errors) ? "Failure" : "Success",
+ insns,
+ (input_file) ? "given" : "random",
+ errors,
+ seed);
return errors ? 1 : 0;
}
diff --git a/arch/x86/um/Kconfig b/arch/x86/um/Kconfig
index 53c90fd412d1..21a13ce1d751 100644
--- a/arch/x86/um/Kconfig
+++ b/arch/x86/um/Kconfig
@@ -37,9 +37,8 @@ config RWSEM_GENERIC_SPINLOCK
def_bool !RWSEM_XCHGADD_ALGORITHM
config 3_LEVEL_PGTABLES
- bool "Three-level pagetables (EXPERIMENTAL)" if !64BIT
+ bool "Three-level pagetables" if !64BIT
default 64BIT
- depends on EXPERIMENTAL
help
Three-level pagetables will let UML have more than 4G of physical
memory. All the memory that can't be mapped directly will be treated
diff --git a/arch/x86/um/fault.c b/arch/x86/um/fault.c
index 8784ab30d91b..84ac7f7b0257 100644
--- a/arch/x86/um/fault.c
+++ b/arch/x86/um/fault.c
@@ -20,7 +20,7 @@ int arch_fixup(unsigned long address, struct uml_pt_regs *regs)
const struct exception_table_entry *fixup;
fixup = search_exception_tables(address);
- if (fixup != 0) {
+ if (fixup) {
UPT_IP(regs) = fixup->fixup;
return 1;
}
diff --git a/arch/x86/vdso/vclock_gettime.c b/arch/x86/vdso/vclock_gettime.c
index 205ad328aa52..c74436e687bf 100644
--- a/arch/x86/vdso/vclock_gettime.c
+++ b/arch/x86/vdso/vclock_gettime.c
@@ -60,7 +60,7 @@ notrace static cycle_t vread_tsc(void)
static notrace cycle_t vread_hpet(void)
{
- return readl((const void __iomem *)fix_to_virt(VSYSCALL_HPET) + 0xf0);
+ return readl((const void __iomem *)fix_to_virt(VSYSCALL_HPET) + HPET_COUNTER);
}
#ifdef CONFIG_PARAVIRT_CLOCK
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 138e5667409a..39928d16be3b 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -1517,72 +1517,51 @@ asmlinkage void __init xen_start_kernel(void)
#endif
}
-#ifdef CONFIG_XEN_PVHVM
-#define HVM_SHARED_INFO_ADDR 0xFE700000UL
-static struct shared_info *xen_hvm_shared_info;
-static unsigned long xen_hvm_sip_phys;
-static int xen_major, xen_minor;
-
-static void xen_hvm_connect_shared_info(unsigned long pfn)
+void __ref xen_hvm_init_shared_info(void)
{
+ int cpu;
struct xen_add_to_physmap xatp;
+ static struct shared_info *shared_info_page = 0;
+ if (!shared_info_page)
+ shared_info_page = (struct shared_info *)
+ extend_brk(PAGE_SIZE, PAGE_SIZE);
xatp.domid = DOMID_SELF;
xatp.idx = 0;
xatp.space = XENMAPSPACE_shared_info;
- xatp.gpfn = pfn;
+ xatp.gpfn = __pa(shared_info_page) >> PAGE_SHIFT;
if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp))
BUG();
-}
-static void __init xen_hvm_set_shared_info(struct shared_info *sip)
-{
- int cpu;
-
- HYPERVISOR_shared_info = sip;
+ HYPERVISOR_shared_info = (struct shared_info *)shared_info_page;
/* xen_vcpu is a pointer to the vcpu_info struct in the shared_info
* page, we use it in the event channel upcall and in some pvclock
* related functions. We don't need the vcpu_info placement
* optimizations because we don't use any pv_mmu or pv_irq op on
- * HVM. */
- for_each_online_cpu(cpu)
+ * HVM.
+ * When xen_hvm_init_shared_info is run at boot time only vcpu 0 is
+ * online but xen_hvm_init_shared_info is run at resume time too and
+ * in that case multiple vcpus might be online. */
+ for_each_online_cpu(cpu) {
per_cpu(xen_vcpu, cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu];
-}
-
-/* Reconnect the shared_info pfn to a (new) mfn */
-void xen_hvm_resume_shared_info(void)
-{
- xen_hvm_connect_shared_info(xen_hvm_sip_phys >> PAGE_SHIFT);
-}
-
-/* Xen tools prior to Xen 4 do not provide a E820_Reserved area for guest usage.
- * On these old tools the shared info page will be placed in E820_Ram.
- * Xen 4 provides a E820_Reserved area at 0xFC000000, and this code expects
- * that nothing is mapped up to HVM_SHARED_INFO_ADDR.
- * Xen 4.3+ provides an explicit 1MB area at HVM_SHARED_INFO_ADDR which is used
- * here for the shared info page. */
-static void __init xen_hvm_init_shared_info(void)
-{
- if (xen_major < 4) {
- xen_hvm_shared_info = extend_brk(PAGE_SIZE, PAGE_SIZE);
- xen_hvm_sip_phys = __pa(xen_hvm_shared_info);
- } else {
- xen_hvm_sip_phys = HVM_SHARED_INFO_ADDR;
- set_fixmap(FIX_PARAVIRT_BOOTMAP, xen_hvm_sip_phys);
- xen_hvm_shared_info =
- (struct shared_info *)fix_to_virt(FIX_PARAVIRT_BOOTMAP);
}
- xen_hvm_connect_shared_info(xen_hvm_sip_phys >> PAGE_SHIFT);
- xen_hvm_set_shared_info(xen_hvm_shared_info);
}
+#ifdef CONFIG_XEN_PVHVM
static void __init init_hvm_pv_info(void)
{
- uint32_t ecx, edx, pages, msr, base;
+ int major, minor;
+ uint32_t eax, ebx, ecx, edx, pages, msr, base;
u64 pfn;
base = xen_cpuid_base();
+ cpuid(base + 1, &eax, &ebx, &ecx, &edx);
+
+ major = eax >> 16;
+ minor = eax & 0xffff;
+ printk(KERN_INFO "Xen version %d.%d.\n", major, minor);
+
cpuid(base + 2, &pages, &msr, &ecx, &edx);
pfn = __pa(hypercall_page);
@@ -1633,22 +1612,12 @@ static void __init xen_hvm_guest_init(void)
static bool __init xen_hvm_platform(void)
{
- uint32_t eax, ebx, ecx, edx, base;
-
if (xen_pv_domain())
return false;
- base = xen_cpuid_base();
- if (!base)
+ if (!xen_cpuid_base())
return false;
- cpuid(base + 1, &eax, &ebx, &ecx, &edx);
-
- xen_major = eax >> 16;
- xen_minor = eax & 0xffff;
-
- printk(KERN_INFO "Xen version %d.%d.\n", xen_major, xen_minor);
-
return true;
}
@@ -1668,6 +1637,7 @@ const struct hypervisor_x86 x86_hyper_xen_hvm __refconst = {
.name = "Xen HVM",
.detect = xen_hvm_platform,
.init_platform = xen_hvm_guest_init,
+ .x2apic_available = xen_x2apic_para_available,
};
EXPORT_SYMBOL(x86_hyper_xen_hvm);
#endif
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index 8971a26d21ab..94eac5c85cdc 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -556,12 +556,9 @@ void __init xen_arch_setup(void)
COMMAND_LINE_SIZE : MAX_GUEST_CMDLINE);
/* Set up idle, making sure it calls safe_halt() pvop */
-#ifdef CONFIG_X86_32
- boot_cpu_data.hlt_works_ok = 1;
-#endif
disable_cpuidle();
disable_cpufreq();
- WARN_ON(set_pm_idle_to_default());
+ WARN_ON(xen_set_default_idle());
fiddle_vdso();
#ifdef CONFIG_NUMA
numa_off = 1;
diff --git a/arch/x86/xen/suspend.c b/arch/x86/xen/suspend.c
index ae8a00c39de4..45329c8c226e 100644
--- a/arch/x86/xen/suspend.c
+++ b/arch/x86/xen/suspend.c
@@ -30,7 +30,7 @@ void xen_arch_hvm_post_suspend(int suspend_cancelled)
{
#ifdef CONFIG_XEN_PVHVM
int cpu;
- xen_hvm_resume_shared_info();
+ xen_hvm_init_shared_info();
xen_callback_vector();
xen_unplug_emulated_devices();
if (xen_feature(XENFEAT_hvm_safe_pvclock)) {
diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
index f9643fc50de5..33ca6e42a4ca 100644
--- a/arch/x86/xen/xen-asm_32.S
+++ b/arch/x86/xen/xen-asm_32.S
@@ -89,11 +89,11 @@ ENTRY(xen_iret)
*/
#ifdef CONFIG_SMP
GET_THREAD_INFO(%eax)
- movl TI_cpu(%eax), %eax
- movl __per_cpu_offset(,%eax,4), %eax
- mov xen_vcpu(%eax), %eax
+ movl %ss:TI_cpu(%eax), %eax
+ movl %ss:__per_cpu_offset(,%eax,4), %eax
+ mov %ss:xen_vcpu(%eax), %eax
#else
- movl xen_vcpu, %eax
+ movl %ss:xen_vcpu, %eax
#endif
/* check IF state we're restoring */
@@ -106,11 +106,11 @@ ENTRY(xen_iret)
* resuming the code, so we don't have to be worried about
* being preempted to another CPU.
*/
- setz XEN_vcpu_info_mask(%eax)
+ setz %ss:XEN_vcpu_info_mask(%eax)
xen_iret_start_crit:
/* check for unmasked and pending */
- cmpw $0x0001, XEN_vcpu_info_pending(%eax)
+ cmpw $0x0001, %ss:XEN_vcpu_info_pending(%eax)
/*
* If there's something pending, mask events again so we can
@@ -118,7 +118,7 @@ xen_iret_start_crit:
* touch XEN_vcpu_info_mask.
*/
jne 1f
- movb $1, XEN_vcpu_info_mask(%eax)
+ movb $1, %ss:XEN_vcpu_info_mask(%eax)
1: popl %eax
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
index d2e73d19d366..a95b41744ad0 100644
--- a/arch/x86/xen/xen-ops.h
+++ b/arch/x86/xen/xen-ops.h
@@ -40,7 +40,7 @@ void xen_enable_syscall(void);
void xen_vcpu_restore(void);
void xen_callback_vector(void);
-void xen_hvm_resume_shared_info(void);
+void xen_hvm_init_shared_info(void);
void xen_unplug_emulated_devices(void);
void __init xen_build_dynamic_phys_to_machine(void);
diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig
index 5aab1acabf1c..ad64c73b8675 100644
--- a/arch/xtensa/Kconfig
+++ b/arch/xtensa/Kconfig
@@ -132,6 +132,7 @@ choice
config XTENSA_PLATFORM_ISS
bool "ISS"
+ depends on TTY
select XTENSA_CALIBRATE_CCOUNT
select SERIAL_CONSOLE
select XTENSA_ISS_NETWORK
diff --git a/arch/xtensa/include/asm/dma-mapping.h b/arch/xtensa/include/asm/dma-mapping.h
index 4acb5feba1fb..172a02a6ad14 100644
--- a/arch/xtensa/include/asm/dma-mapping.h
+++ b/arch/xtensa/include/asm/dma-mapping.h
@@ -170,4 +170,19 @@ dma_cache_sync(struct device *dev, void *vaddr, size_t size,
consistent_sync(vaddr, size, direction);
}
+/* Not supported for now */
+static inline int dma_mmap_coherent(struct device *dev,
+ struct vm_area_struct *vma, void *cpu_addr,
+ dma_addr_t dma_addr, size_t size)
+{
+ return -EINVAL;
+}
+
+static inline int dma_get_sgtable(struct device *dev, struct sg_table *sgt,
+ void *cpu_addr, dma_addr_t dma_addr,
+ size_t size)
+{
+ return -EINVAL;
+}
+
#endif /* _XTENSA_DMA_MAPPING_H */
diff --git a/arch/xtensa/include/uapi/asm/socket.h b/arch/xtensa/include/uapi/asm/socket.h
index 38079be1cf1e..35905cb6e419 100644
--- a/arch/xtensa/include/uapi/asm/socket.h
+++ b/arch/xtensa/include/uapi/asm/socket.h
@@ -32,7 +32,7 @@
#define SO_PRIORITY 12
#define SO_LINGER 13
#define SO_BSDCOMPAT 14
-/* To add :#define SO_REUSEPORT 15 */
+#define SO_REUSEPORT 15
#define SO_PASSCRED 16
#define SO_PEERCRED 17
#define SO_RCVLOWAT 18
@@ -81,4 +81,6 @@
/* Instruct lower device to use last 4-bytes of skb data as FCS */
#define SO_NOFCS 43
+#define SO_LOCK_FILTER 44
+
#endif /* _XTENSA_SOCKET_H */
diff --git a/arch/xtensa/platforms/iss/console.c b/arch/xtensa/platforms/iss/console.c
index 8207a119eee9..da9866f7fecf 100644
--- a/arch/xtensa/platforms/iss/console.c
+++ b/arch/xtensa/platforms/iss/console.c
@@ -58,7 +58,8 @@ static int rs_open(struct tty_struct *tty, struct file * filp)
tty->port = &serial_port;
spin_lock(&timer_lock);
if (tty->count == 1) {
- setup_timer(&serial_timer, rs_poll, (unsigned long)tty);
+ setup_timer(&serial_timer, rs_poll,
+ (unsigned long)&serial_port);
mod_timer(&serial_timer, jiffies + SERIAL_TIMER_VALUE);
}
spin_unlock(&timer_lock);
@@ -97,8 +98,7 @@ static int rs_write(struct tty_struct * tty,
static void rs_poll(unsigned long priv)
{
- struct tty_struct* tty = (struct tty_struct*) priv;
-
+ struct tty_port *port = (struct tty_port *)priv;
struct timeval tv = { .tv_sec = 0, .tv_usec = 0 };
int i = 0;
unsigned char c;
@@ -107,12 +107,12 @@ static void rs_poll(unsigned long priv)
while (__simc(SYS_select_one, 0, XTISS_SELECT_ONE_READ, (int)&tv,0,0)){
__simc (SYS_read, 0, (unsigned long)&c, 1, 0, 0);
- tty_insert_flip_char(tty, c, TTY_NORMAL);
+ tty_insert_flip_char(port, c, TTY_NORMAL);
i++;
}
if (i)
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(port);
mod_timer(&serial_timer, jiffies + SERIAL_TIMER_VALUE);