summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/alpha/Kconfig1
-rw-r--r--arch/alpha/include/asm/8253pit.h3
-rw-r--r--arch/alpha/kernel/module.c34
-rw-r--r--arch/alpha/kernel/perf_event.c2
-rw-r--r--arch/alpha/kernel/sys_ruffian.c1
-rw-r--r--arch/alpha/kernel/time.c3
-rw-r--r--arch/arm/Kconfig1
-rw-r--r--arch/arm/configs/mmp2_defconfig9
-rw-r--r--arch/arm/include/asm/i8253.h15
-rw-r--r--arch/arm/kernel/module.c29
-rw-r--r--arch/arm/kernel/perf_event_v6.c30
-rw-r--r--arch/arm/kernel/perf_event_v7.c30
-rw-r--r--arch/arm/kernel/perf_event_xscale.c18
-rw-r--r--arch/arm/kernel/ptrace.c5
-rw-r--r--arch/arm/kernel/swp_emulate.c2
-rw-r--r--arch/arm/mach-at91/include/mach/at91_mci.h115
-rw-r--r--arch/arm/mach-davinci/board-mityomapl138.c1
-rw-r--r--arch/arm/mach-davinci/dm646x.c1
-rw-r--r--arch/arm/mach-davinci/pm.c1
-rw-r--r--arch/arm/mach-ep93xx/Makefile4
-rw-r--r--arch/arm/mach-ep93xx/core.c33
-rw-r--r--arch/arm/mach-ep93xx/dma-m2p.c411
-rw-r--r--arch/arm/mach-ep93xx/dma.c108
-rw-r--r--arch/arm/mach-ep93xx/gpio.c410
-rw-r--r--arch/arm/mach-ep93xx/include/mach/dma.h190
-rw-r--r--arch/arm/mach-ep93xx/include/mach/ep93xx-regs.h1
-rw-r--r--arch/arm/mach-ep93xx/include/mach/ep93xx_spi.h2
-rw-r--r--arch/arm/mach-footbridge/Kconfig1
-rw-r--r--arch/arm/mach-footbridge/isa-timer.c59
-rw-r--r--arch/arm/mach-imx/clock-imx25.c7
-rw-r--r--arch/arm/mach-imx/dma-v1.c1
-rw-r--r--arch/arm/mach-imx/mach-apf9328.c2
-rw-r--r--arch/arm/mach-imx/mach-armadillo5x0.c2
-rw-r--r--arch/arm/mach-imx/mach-bug.c2
-rw-r--r--arch/arm/mach-imx/mach-cpuimx27.c2
-rw-r--r--arch/arm/mach-imx/mach-cpuimx35.c2
-rw-r--r--arch/arm/mach-imx/mach-eukrea_cpuimx25.c2
-rw-r--r--arch/arm/mach-imx/mach-imx27_visstrim_m10.c2
-rw-r--r--arch/arm/mach-imx/mach-imx27ipcam.c2
-rw-r--r--arch/arm/mach-imx/mach-imx27lite.c2
-rw-r--r--arch/arm/mach-imx/mach-kzm_arm11_01.c2
-rw-r--r--arch/arm/mach-imx/mach-mx1ads.c2
-rw-r--r--arch/arm/mach-imx/mach-mx21ads.c2
-rw-r--r--arch/arm/mach-imx/mach-mx25_3ds.c2
-rw-r--r--arch/arm/mach-imx/mach-mx27_3ds.c2
-rw-r--r--arch/arm/mach-imx/mach-mx27ads.c2
-rw-r--r--arch/arm/mach-imx/mach-mx31_3ds.c3
-rw-r--r--arch/arm/mach-imx/mach-mx31ads.c2
-rw-r--r--arch/arm/mach-imx/mach-mx31lilly.c2
-rw-r--r--arch/arm/mach-imx/mach-mx31lite.c2
-rw-r--r--arch/arm/mach-imx/mach-mx31moboard.c2
-rw-r--r--arch/arm/mach-imx/mach-mx35_3ds.c2
-rw-r--r--arch/arm/mach-imx/mach-mxt_td60.c2
-rw-r--r--arch/arm/mach-imx/mach-pca100.c2
-rw-r--r--arch/arm/mach-imx/mach-pcm037.c2
-rw-r--r--arch/arm/mach-imx/mach-pcm038.c2
-rw-r--r--arch/arm/mach-imx/mach-pcm043.c2
-rw-r--r--arch/arm/mach-imx/mach-qong.c2
-rw-r--r--arch/arm/mach-imx/mach-scb9328.c2
-rw-r--r--arch/arm/mach-imx/mach-vpr200.c2
-rw-r--r--arch/arm/mach-imx/mm-imx1.c21
-rw-r--r--arch/arm/mach-imx/mm-imx21.c21
-rw-r--r--arch/arm/mach-imx/mm-imx25.c17
-rw-r--r--arch/arm/mach-imx/mm-imx27.c22
-rw-r--r--arch/arm/mach-imx/mm-imx31.c15
-rw-r--r--arch/arm/mach-imx/mm-imx35.c16
-rw-r--r--arch/arm/mach-iop13xx/setup.c1
-rw-r--r--arch/arm/mach-mmp/brownstone.c10
-rw-r--r--arch/arm/mach-mmp/include/mach/mmp2.h2
-rw-r--r--arch/arm/mach-mmp/jasper.c2
-rw-r--r--arch/arm/mach-mmp/mmp2.c16
-rw-r--r--arch/arm/mach-msm/Kconfig19
-rw-r--r--arch/arm/mach-msm/Makefile2
-rw-r--r--arch/arm/mach-msm/iommu.c731
-rw-r--r--arch/arm/mach-msm/iommu_dev.c422
-rw-r--r--arch/arm/mach-mx5/board-cpuimx51.c2
-rw-r--r--arch/arm/mach-mx5/board-cpuimx51sd.c2
-rw-r--r--arch/arm/mach-mx5/board-mx50_rdp.c2
-rw-r--r--arch/arm/mach-mx5/board-mx51_3ds.c2
-rw-r--r--arch/arm/mach-mx5/board-mx51_babbage.c2
-rw-r--r--arch/arm/mach-mx5/board-mx51_efikamx.c2
-rw-r--r--arch/arm/mach-mx5/board-mx51_efikasb.c2
-rw-r--r--arch/arm/mach-mx5/board-mx53_evk.c2
-rw-r--r--arch/arm/mach-mx5/board-mx53_loco.c2
-rw-r--r--arch/arm/mach-mx5/board-mx53_smd.c2
-rw-r--r--arch/arm/mach-mx5/clock-mx51-mx53.c11
-rw-r--r--arch/arm/mach-mx5/devices.c64
-rw-r--r--arch/arm/mach-mx5/mm-mx50.c22
-rw-r--r--arch/arm/mach-mx5/mm.c27
-rw-r--r--arch/arm/mach-mxs/Makefile2
-rw-r--r--arch/arm/mach-mxs/devices.c11
-rw-r--r--arch/arm/mach-mxs/devices/Makefile1
-rw-r--r--arch/arm/mach-mxs/devices/platform-auart.c1
-rw-r--r--arch/arm/mach-mxs/devices/platform-dma.c1
-rw-r--r--arch/arm/mach-mxs/devices/platform-fec.c1
-rw-r--r--arch/arm/mach-mxs/devices/platform-gpio-mxs.c53
-rw-r--r--arch/arm/mach-mxs/gpio.c331
-rw-r--r--arch/arm/mach-mxs/gpio.h34
-rw-r--r--arch/arm/mach-mxs/include/mach/devices-common.h2
-rw-r--r--arch/arm/mach-mxs/mach-mx28evk.c1
-rw-r--r--arch/arm/mach-mxs/mm-mx23.c1
-rw-r--r--arch/arm/mach-mxs/mm-mx28.c1
-rw-r--r--arch/arm/mach-omap1/gpio15xx.c22
-rw-r--r--arch/arm/mach-omap1/gpio16xx.c28
-rw-r--r--arch/arm/mach-omap1/gpio7xx.c27
-rw-r--r--arch/arm/mach-omap1/pm_bus.c14
-rw-r--r--arch/arm/mach-omap2/gpio.c34
-rw-r--r--arch/arm/mach-omap2/serial.c1
-rw-r--r--arch/arm/mach-s3c2410/include/mach/spi-gpio.h28
-rw-r--r--arch/arm/mach-s3c2410/mach-qt2410.c34
-rw-r--r--arch/arm/mach-s3c2412/mach-jive.c41
-rw-r--r--arch/arm/mach-s3c2440/mach-gta02.c1
-rw-r--r--arch/arm/mach-shmobile/board-ap4evb.c5
-rw-r--r--arch/arm/mach-shmobile/board-mackerel.c5
-rw-r--r--arch/arm/mach-shmobile/clock-sh7372.c1
-rw-r--r--arch/arm/mach-shmobile/include/mach/sh7372.h29
-rw-r--r--arch/arm/mach-shmobile/pm-sh7372.c160
-rw-r--r--arch/arm/mach-shmobile/pm_runtime.c22
-rw-r--r--arch/arm/mach-shmobile/setup-sh7372.c11
-rw-r--r--arch/arm/mach-tegra/Makefile1
-rw-r--r--arch/arm/mach-tegra/clock.c7
-rw-r--r--arch/arm/mach-tegra/gpio.c431
-rw-r--r--arch/arm/mach-ux500/clock.c31
-rw-r--r--arch/arm/mm/fault.c6
-rw-r--r--arch/arm/plat-mxc/Makefile2
-rw-r--r--arch/arm/plat-mxc/devices.c11
-rw-r--r--arch/arm/plat-mxc/devices/Makefile1
-rw-r--r--arch/arm/plat-mxc/devices/platform-fec.c1
-rw-r--r--arch/arm/plat-mxc/devices/platform-fsl-usb2-udc.c1
-rw-r--r--arch/arm/plat-mxc/devices/platform-gpio-mxc.c32
-rw-r--r--arch/arm/plat-mxc/devices/platform-imx-fb.c1
-rw-r--r--arch/arm/plat-mxc/devices/platform-ipu-core.c1
-rw-r--r--arch/arm/plat-mxc/devices/platform-mxc-ehci.c1
-rw-r--r--arch/arm/plat-mxc/devices/platform-mxc-mmc.c1
-rw-r--r--arch/arm/plat-mxc/devices/platform-spi_imx.c12
-rw-r--r--arch/arm/plat-mxc/gpio.c361
-rw-r--r--arch/arm/plat-mxc/include/mach/common.h12
-rw-r--r--arch/arm/plat-mxc/include/mach/devices-common.h2
-rw-r--r--arch/arm/plat-mxc/include/mach/gpio.h27
-rw-r--r--arch/arm/plat-mxc/include/mach/irqs.h21
-rw-r--r--arch/arm/plat-nomadik/include/plat/ste_dma40.h1
-rw-r--r--arch/arm/plat-omap/clock.c12
-rw-r--r--arch/arm/plat-omap/include/plat/gpio.h20
-rw-r--r--arch/arm/plat-omap/include/plat/omap_device.h9
-rw-r--r--arch/arm/plat-omap/omap_device.c53
-rw-r--r--arch/arm/plat-pxa/include/plat/sdhci.h35
-rw-r--r--arch/arm/plat-samsung/clock.c7
-rw-r--r--arch/arm/plat-spear/clock.c7
-rw-r--r--arch/avr32/include/asm/delay.h27
-rw-r--r--arch/avr32/kernel/module.c20
-rw-r--r--arch/blackfin/Kconfig10
-rw-r--r--arch/blackfin/configs/BF561-EZKIT_defconfig8
-rw-r--r--arch/blackfin/include/asm/Kbuild43
-rw-r--r--arch/blackfin/include/asm/atomic.h13
-rw-r--r--arch/blackfin/include/asm/auxvec.h1
-rw-r--r--arch/blackfin/include/asm/bitsperlong.h1
-rw-r--r--arch/blackfin/include/asm/blackfin.h6
-rw-r--r--arch/blackfin/include/asm/bugs.h1
-rw-r--r--arch/blackfin/include/asm/cputime.h1
-rw-r--r--arch/blackfin/include/asm/current.h1
-rw-r--r--arch/blackfin/include/asm/device.h1
-rw-r--r--arch/blackfin/include/asm/div64.h1
-rw-r--r--arch/blackfin/include/asm/dpmc.h27
-rw-r--r--arch/blackfin/include/asm/emergency-restart.h1
-rw-r--r--arch/blackfin/include/asm/errno.h1
-rw-r--r--arch/blackfin/include/asm/fb.h1
-rw-r--r--arch/blackfin/include/asm/futex.h1
-rw-r--r--arch/blackfin/include/asm/gpio.h64
-rw-r--r--arch/blackfin/include/asm/gptimers.h19
-rw-r--r--arch/blackfin/include/asm/hw_irq.h1
-rw-r--r--arch/blackfin/include/asm/ioctl.h1
-rw-r--r--arch/blackfin/include/asm/ipcbuf.h1
-rw-r--r--arch/blackfin/include/asm/irq_regs.h1
-rw-r--r--arch/blackfin/include/asm/irqflags.h42
-rw-r--r--arch/blackfin/include/asm/kdebug.h1
-rw-r--r--arch/blackfin/include/asm/kmap_types.h1
-rw-r--r--arch/blackfin/include/asm/local.h1
-rw-r--r--arch/blackfin/include/asm/local64.h1
-rw-r--r--arch/blackfin/include/asm/mman.h1
-rw-r--r--arch/blackfin/include/asm/module.h8
-rw-r--r--arch/blackfin/include/asm/msgbuf.h1
-rw-r--r--arch/blackfin/include/asm/mutex.h77
-rw-r--r--arch/blackfin/include/asm/page.h8
-rw-r--r--arch/blackfin/include/asm/param.h1
-rw-r--r--arch/blackfin/include/asm/pda.h10
-rw-r--r--arch/blackfin/include/asm/percpu.h1
-rw-r--r--arch/blackfin/include/asm/pgalloc.h1
-rw-r--r--arch/blackfin/include/asm/resource.h1
-rw-r--r--arch/blackfin/include/asm/scatterlist.h6
-rw-r--r--arch/blackfin/include/asm/sections.h8
-rw-r--r--arch/blackfin/include/asm/sembuf.h1
-rw-r--r--arch/blackfin/include/asm/serial.h1
-rw-r--r--arch/blackfin/include/asm/setup.h1
-rw-r--r--arch/blackfin/include/asm/shmbuf.h1
-rw-r--r--arch/blackfin/include/asm/shmparam.h1
-rw-r--r--arch/blackfin/include/asm/sigcontext.h8
-rw-r--r--arch/blackfin/include/asm/socket.h1
-rw-r--r--arch/blackfin/include/asm/sockios.h1
-rw-r--r--arch/blackfin/include/asm/spinlock.h8
-rw-r--r--arch/blackfin/include/asm/statfs.h1
-rw-r--r--arch/blackfin/include/asm/termbits.h1
-rw-r--r--arch/blackfin/include/asm/termios.h1
-rw-r--r--arch/blackfin/include/asm/topology.h1
-rw-r--r--arch/blackfin/include/asm/types.h1
-rw-r--r--arch/blackfin/include/asm/ucontext.h1
-rw-r--r--arch/blackfin/include/asm/unaligned.h1
-rw-r--r--arch/blackfin/include/asm/user.h1
-rw-r--r--arch/blackfin/include/asm/xor.h1
-rw-r--r--arch/blackfin/kernel/Makefile1
-rw-r--r--arch/blackfin/kernel/asm-offsets.c10
-rw-r--r--arch/blackfin/kernel/bfin_gpio.c26
-rw-r--r--arch/blackfin/kernel/debug-mmrs.c109
-rw-r--r--arch/blackfin/kernel/gptimers.c93
-rw-r--r--arch/blackfin/kernel/module.c21
-rw-r--r--arch/blackfin/kernel/process.c1
-rw-r--r--arch/blackfin/kernel/pwm.c100
-rw-r--r--arch/blackfin/kernel/reboot.c4
-rw-r--r--arch/blackfin/kernel/setup.c16
-rw-r--r--arch/blackfin/kernel/time.c4
-rw-r--r--arch/blackfin/kernel/vmlinux.lds.S1
-rw-r--r--arch/blackfin/mach-bf518/Kconfig78
-rw-r--r--arch/blackfin/mach-bf518/boards/ezbrd.c59
-rw-r--r--arch/blackfin/mach-bf518/boards/tcm-bf518.c47
-rw-r--r--arch/blackfin/mach-bf518/include/mach/anomaly.h24
-rw-r--r--arch/blackfin/mach-bf518/include/mach/portmux.h54
-rw-r--r--arch/blackfin/mach-bf527/boards/ad7160eval.c19
-rw-r--r--arch/blackfin/mach-bf527/boards/cm_bf527.c55
-rw-r--r--arch/blackfin/mach-bf527/boards/ezbrd.c62
-rw-r--r--arch/blackfin/mach-bf527/boards/ezkit.c98
-rw-r--r--arch/blackfin/mach-bf527/boards/tll6527m.c70
-rw-r--r--arch/blackfin/mach-bf527/include/mach/anomaly.h34
-rw-r--r--arch/blackfin/mach-bf533/boards/H8606.c28
-rw-r--r--arch/blackfin/mach-bf533/boards/blackstamp.c10
-rw-r--r--arch/blackfin/mach-bf533/boards/cm_bf533.c29
-rw-r--r--arch/blackfin/mach-bf533/boards/ezkit.c36
-rw-r--r--arch/blackfin/mach-bf533/boards/ip0x.c1
-rw-r--r--arch/blackfin/mach-bf533/boards/stamp.c78
-rw-r--r--arch/blackfin/mach-bf533/include/mach/anomaly.h19
-rw-r--r--arch/blackfin/mach-bf537/boards/cm_bf537e.c51
-rw-r--r--arch/blackfin/mach-bf537/boards/cm_bf537u.c63
-rw-r--r--arch/blackfin/mach-bf537/boards/dnp5370.c2
-rw-r--r--arch/blackfin/mach-bf537/boards/minotaur.c2
-rw-r--r--arch/blackfin/mach-bf537/boards/pnav10.c38
-rw-r--r--arch/blackfin/mach-bf537/boards/stamp.c176
-rw-r--r--arch/blackfin/mach-bf537/boards/tcm_bf537.c51
-rw-r--r--arch/blackfin/mach-bf537/include/mach/anomaly.h34
-rw-r--r--arch/blackfin/mach-bf538/boards/ezkit.c25
-rw-r--r--arch/blackfin/mach-bf538/ext-gpio.c37
-rw-r--r--arch/blackfin/mach-bf538/include/mach/anomaly.h38
-rw-r--r--arch/blackfin/mach-bf538/include/mach/gpio.h3
-rw-r--r--arch/blackfin/mach-bf548/boards/cm_bf548.c15
-rw-r--r--arch/blackfin/mach-bf548/boards/ezkit.c32
-rw-r--r--arch/blackfin/mach-bf548/include/mach/anomaly.h220
-rw-r--r--arch/blackfin/mach-bf548/include/mach/gpio.h2
-rw-r--r--arch/blackfin/mach-bf548/include/mach/irq.h2
-rw-r--r--arch/blackfin/mach-bf561/boards/acvilon.c9
-rw-r--r--arch/blackfin/mach-bf561/boards/cm_bf561.c58
-rw-r--r--arch/blackfin/mach-bf561/boards/ezkit.c41
-rw-r--r--arch/blackfin/mach-bf561/include/mach/anomaly.h132
-rw-r--r--arch/blackfin/mach-bf561/include/mach/gpio.h6
-rw-r--r--arch/blackfin/mach-bf561/secondary.S152
-rw-r--r--arch/blackfin/mach-common/dpmc_modes.S1016
-rw-r--r--arch/blackfin/mach-common/head.S36
-rw-r--r--arch/blackfin/mach-common/ints-priority.c41
-rw-r--r--arch/blackfin/mach-common/smp.c17
-rw-r--r--arch/cris/kernel/module.c43
-rw-r--r--arch/frv/kernel/module.c57
-rw-r--r--arch/h8300/Kconfig.cpu4
-rw-r--r--arch/h8300/kernel/module.c45
-rw-r--r--arch/ia64/Kconfig27
-rw-r--r--arch/ia64/include/asm/clocksource.h10
-rw-r--r--arch/ia64/include/asm/paravirt.h4
-rw-r--r--arch/ia64/kernel/cyclone.c2
-rw-r--r--arch/ia64/kernel/module.c16
-rw-r--r--arch/ia64/kernel/paravirt.c2
-rw-r--r--arch/ia64/kernel/time.c2
-rw-r--r--arch/ia64/kvm/Kconfig1
-rw-r--r--arch/ia64/sn/kernel/irq.c14
-rw-r--r--arch/ia64/sn/kernel/sn2/timer.c2
-rw-r--r--arch/m32r/Kconfig12
-rw-r--r--arch/m32r/include/asm/delay.h27
-rw-r--r--arch/m32r/kernel/module.c38
-rw-r--r--arch/m68k/emu/nfeth.c2
-rw-r--r--arch/m68k/kernel/module_mm.c27
-rw-r--r--arch/m68k/kernel/module_no.c34
-rw-r--r--arch/microblaze/include/asm/pci-bridge.h13
-rw-r--r--arch/microblaze/include/asm/pci.h3
-rw-r--r--arch/microblaze/include/asm/prom.h15
-rw-r--r--arch/microblaze/kernel/module.c35
-rw-r--r--arch/microblaze/pci/Makefile2
-rw-r--r--arch/microblaze/pci/pci-common.c112
-rw-r--r--arch/microblaze/pci/pci_32.c432
-rw-r--r--arch/mips/Kconfig20
-rw-r--r--arch/mips/cobalt/time.c2
-rw-r--r--arch/mips/include/asm/i8253.h24
-rw-r--r--arch/mips/include/asm/stacktrace.h4
-rw-r--r--arch/mips/jazz/irq.c2
-rw-r--r--arch/mips/kernel/i8253.c102
-rw-r--r--arch/mips/kernel/module.c20
-rw-r--r--arch/mips/kernel/perf_event.c2
-rw-r--r--arch/mips/kernel/perf_event_mipsxx.c28
-rw-r--r--arch/mips/kernel/process.c19
-rw-r--r--arch/mips/kernel/traps.c8
-rw-r--r--arch/mips/kernel/unaligned.c5
-rw-r--r--arch/mips/math-emu/cp1emu.c3
-rw-r--r--arch/mips/mm/fault.c8
-rw-r--r--arch/mips/mti-malta/malta-time.c2
-rw-r--r--arch/mips/oprofile/Makefile2
-rw-r--r--arch/mips/oprofile/backtrace.c175
-rw-r--r--arch/mips/oprofile/common.c1
-rw-r--r--arch/mips/oprofile/op_impl.h2
-rw-r--r--arch/mips/sgi-ip22/ip22-time.c2
-rw-r--r--arch/mips/sni/time.c2
-rw-r--r--arch/mn10300/kernel/module.c61
-rw-r--r--arch/openrisc/Kconfig207
-rw-r--r--arch/openrisc/Makefile55
-rw-r--r--arch/openrisc/README.openrisc99
-rw-r--r--arch/openrisc/TODO.openrisc16
-rw-r--r--arch/openrisc/boot/Makefile15
-rw-r--r--arch/openrisc/boot/dts/or1ksim.dts50
-rw-r--r--arch/openrisc/configs/or1ksim_defconfig65
-rw-r--r--arch/openrisc/include/asm/Kbuild64
-rw-r--r--arch/openrisc/include/asm/asm-offsets.h1
-rw-r--r--arch/openrisc/include/asm/bitops.h59
-rw-r--r--arch/openrisc/include/asm/bitops/__ffs.h33
-rw-r--r--arch/openrisc/include/asm/bitops/__fls.h33
-rw-r--r--arch/openrisc/include/asm/bitops/ffs.h32
-rw-r--r--arch/openrisc/include/asm/bitops/fls.h33
-rw-r--r--arch/openrisc/include/asm/byteorder.h1
-rw-r--r--arch/openrisc/include/asm/cache.h29
-rw-r--r--arch/openrisc/include/asm/cpuinfo.h34
-rw-r--r--arch/openrisc/include/asm/delay.h24
-rw-r--r--arch/openrisc/include/asm/dma-mapping.h134
-rw-r--r--arch/openrisc/include/asm/elf.h108
-rw-r--r--arch/openrisc/include/asm/fixmap.h87
-rw-r--r--arch/openrisc/include/asm/gpio.h65
-rw-r--r--arch/openrisc/include/asm/io.h51
-rw-r--r--arch/openrisc/include/asm/irq.h27
-rw-r--r--arch/openrisc/include/asm/irqflags.h29
-rw-r--r--arch/openrisc/include/asm/linkage.h25
-rw-r--r--arch/openrisc/include/asm/memblock.h24
-rw-r--r--arch/openrisc/include/asm/mmu.h26
-rw-r--r--arch/openrisc/include/asm/mmu_context.h43
-rw-r--r--arch/openrisc/include/asm/mutex.h27
-rw-r--r--arch/openrisc/include/asm/page.h110
-rw-r--r--arch/openrisc/include/asm/param.h26
-rw-r--r--arch/openrisc/include/asm/pgalloc.h102
-rw-r--r--arch/openrisc/include/asm/pgtable.h463
-rw-r--r--arch/openrisc/include/asm/processor.h113
-rw-r--r--arch/openrisc/include/asm/prom.h77
-rw-r--r--arch/openrisc/include/asm/ptrace.h131
-rw-r--r--arch/openrisc/include/asm/serial.h36
-rw-r--r--arch/openrisc/include/asm/sigcontext.h38
-rw-r--r--arch/openrisc/include/asm/spinlock.h24
-rw-r--r--arch/openrisc/include/asm/spr.h42
-rw-r--r--arch/openrisc/include/asm/spr_defs.h604
-rw-r--r--arch/openrisc/include/asm/syscall.h77
-rw-r--r--arch/openrisc/include/asm/syscalls.h27
-rw-r--r--arch/openrisc/include/asm/system.h35
-rw-r--r--arch/openrisc/include/asm/thread_info.h134
-rw-r--r--arch/openrisc/include/asm/timex.h36
-rw-r--r--arch/openrisc/include/asm/tlb.h34
-rw-r--r--arch/openrisc/include/asm/tlbflush.h55
-rw-r--r--arch/openrisc/include/asm/uaccess.h355
-rw-r--r--arch/openrisc/include/asm/unaligned.h51
-rw-r--r--arch/openrisc/include/asm/unistd.h31
-rw-r--r--arch/openrisc/kernel/Makefile14
-rw-r--r--arch/openrisc/kernel/asm-offsets.c70
-rw-r--r--arch/openrisc/kernel/dma.c191
-rw-r--r--arch/openrisc/kernel/entry.S1128
-rw-r--r--arch/openrisc/kernel/head.S1607
-rw-r--r--arch/openrisc/kernel/idle.c77
-rw-r--r--arch/openrisc/kernel/init_task.c41
-rw-r--r--arch/openrisc/kernel/irq.c172
-rw-r--r--arch/openrisc/kernel/module.c72
-rw-r--r--arch/openrisc/kernel/or32_ksyms.c46
-rw-r--r--arch/openrisc/kernel/process.c311
-rw-r--r--arch/openrisc/kernel/prom.c108
-rw-r--r--arch/openrisc/kernel/ptrace.c211
-rw-r--r--arch/openrisc/kernel/setup.c381
-rw-r--r--arch/openrisc/kernel/signal.c396
-rw-r--r--arch/openrisc/kernel/sys_call_table.c28
-rw-r--r--arch/openrisc/kernel/sys_or32.c57
-rw-r--r--arch/openrisc/kernel/time.c181
-rw-r--r--arch/openrisc/kernel/traps.c366
-rw-r--r--arch/openrisc/kernel/vmlinux.h12
-rw-r--r--arch/openrisc/kernel/vmlinux.lds.S115
-rw-r--r--arch/openrisc/lib/Makefile5
-rw-r--r--arch/openrisc/lib/delay.c60
-rw-r--r--arch/openrisc/lib/string.S204
-rw-r--r--arch/openrisc/mm/Makefile5
-rw-r--r--arch/openrisc/mm/fault.c338
-rw-r--r--arch/openrisc/mm/init.c283
-rw-r--r--arch/openrisc/mm/ioremap.c137
-rw-r--r--arch/openrisc/mm/tlb.c193
-rw-r--r--arch/parisc/kernel/module.c12
-rw-r--r--arch/powerpc/Kconfig1
-rw-r--r--arch/powerpc/Makefile3
-rw-r--r--arch/powerpc/include/asm/8253pit.h3
-rw-r--r--arch/powerpc/include/asm/cputable.h14
-rw-r--r--arch/powerpc/include/asm/emulated_ops.h4
-rw-r--r--arch/powerpc/include/asm/exception-64s.h136
-rw-r--r--arch/powerpc/include/asm/hvcall.h5
-rw-r--r--arch/powerpc/include/asm/hw_breakpoint.h2
-rw-r--r--arch/powerpc/include/asm/kvm.h15
-rw-r--r--arch/powerpc/include/asm/kvm_asm.h4
-rw-r--r--arch/powerpc/include/asm/kvm_book3s.h196
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_64.h4
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_asm.h41
-rw-r--r--arch/powerpc/include/asm/kvm_booke.h4
-rw-r--r--arch/powerpc/include/asm/kvm_e500.h30
-rw-r--r--arch/powerpc/include/asm/kvm_host.h169
-rw-r--r--arch/powerpc/include/asm/kvm_ppc.h41
-rw-r--r--arch/powerpc/include/asm/mmu-hash64.h10
-rw-r--r--arch/powerpc/include/asm/paca.h3
-rw-r--r--arch/powerpc/include/asm/pci-bridge.h29
-rw-r--r--arch/powerpc/include/asm/pci.h3
-rw-r--r--arch/powerpc/include/asm/ppc-opcode.h40
-rw-r--r--arch/powerpc/include/asm/ppc_asm.h28
-rw-r--r--arch/powerpc/include/asm/prom.h14
-rw-r--r--arch/powerpc/include/asm/reg.h25
-rw-r--r--arch/powerpc/include/asm/reg_booke.h1
-rw-r--r--arch/powerpc/kernel/asm-offsets.c190
-rw-r--r--arch/powerpc/kernel/cpu_setup_power7.S22
-rw-r--r--arch/powerpc/kernel/cpu_setup_ppc970.S26
-rw-r--r--arch/powerpc/kernel/e500-pmu.c5
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S228
-rw-r--r--arch/powerpc/kernel/head_fsl_booke.S8
-rw-r--r--arch/powerpc/kernel/idle_power7.S2
-rw-r--r--arch/powerpc/kernel/module.c18
-rw-r--r--arch/powerpc/kernel/module_32.c11
-rw-r--r--arch/powerpc/kernel/module_64.c10
-rw-r--r--arch/powerpc/kernel/mpc7450-pmu.c5
-rw-r--r--arch/powerpc/kernel/paca.c2
-rw-r--r--arch/powerpc/kernel/pci-common.c11
-rw-r--r--arch/powerpc/kernel/pci_32.c150
-rw-r--r--arch/powerpc/kernel/pci_dn.c47
-rw-r--r--arch/powerpc/kernel/pci_of_scan.c9
-rw-r--r--arch/powerpc/kernel/perf_event.c6
-rw-r--r--arch/powerpc/kernel/perf_event_fsl_emb.c6
-rw-r--r--arch/powerpc/kernel/power4-pmu.c5
-rw-r--r--arch/powerpc/kernel/power5+-pmu.c5
-rw-r--r--arch/powerpc/kernel/power5-pmu.c5
-rw-r--r--arch/powerpc/kernel/power6-pmu.c5
-rw-r--r--arch/powerpc/kernel/power7-pmu.c5
-rw-r--r--arch/powerpc/kernel/ppc970-pmu.c5
-rw-r--r--arch/powerpc/kernel/process.c4
-rw-r--r--arch/powerpc/kernel/ptrace.c4
-rw-r--r--arch/powerpc/kernel/setup-common.c3
-rw-r--r--arch/powerpc/kernel/setup_64.c3
-rw-r--r--arch/powerpc/kernel/smp.c1
-rw-r--r--arch/powerpc/kernel/time.c2
-rw-r--r--arch/powerpc/kernel/traps.c5
-rw-r--r--arch/powerpc/kvm/44x_tlb.c4
-rw-r--r--arch/powerpc/kvm/Kconfig35
-rw-r--r--arch/powerpc/kvm/Makefile27
-rw-r--r--arch/powerpc/kvm/book3s.c1007
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu.c54
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_hv.c180
-rw-r--r--arch/powerpc/kvm/book3s_64_vio_hv.c73
-rw-r--r--arch/powerpc/kvm/book3s_exports.c9
-rw-r--r--arch/powerpc/kvm/book3s_hv.c1269
-rw-r--r--arch/powerpc/kvm/book3s_hv_builtin.c155
-rw-r--r--arch/powerpc/kvm/book3s_hv_interrupts.S166
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_mmu.c370
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S1345
-rw-r--r--arch/powerpc/kvm/book3s_interrupts.S21
-rw-r--r--arch/powerpc/kvm/book3s_mmu_hpte.c71
-rw-r--r--arch/powerpc/kvm/book3s_pr.c1029
-rw-r--r--arch/powerpc/kvm/book3s_rmhandlers.S102
-rw-r--r--arch/powerpc/kvm/book3s_segment.S117
-rw-r--r--arch/powerpc/kvm/booke.c132
-rw-r--r--arch/powerpc/kvm/booke.h23
-rw-r--r--arch/powerpc/kvm/booke_interrupts.S66
-rw-r--r--arch/powerpc/kvm/e500.c7
-rw-r--r--arch/powerpc/kvm/e500_emulate.c4
-rw-r--r--arch/powerpc/kvm/e500_tlb.c800
-rw-r--r--arch/powerpc/kvm/e500_tlb.h13
-rw-r--r--arch/powerpc/kvm/powerpc.c78
-rw-r--r--arch/powerpc/kvm/timing.c9
-rw-r--r--arch/powerpc/kvm/trace.h4
-rw-r--r--arch/powerpc/mm/fault.c6
-rw-r--r--arch/powerpc/mm/hash_native_64.c6
-rw-r--r--arch/powerpc/net/Makefile4
-rw-r--r--arch/powerpc/net/bpf_jit.h227
-rw-r--r--arch/powerpc/net/bpf_jit_64.S138
-rw-r--r--arch/powerpc/net/bpf_jit_comp.c694
-rw-r--r--arch/powerpc/platforms/52xx/Kconfig8
-rw-r--r--arch/powerpc/platforms/52xx/Makefile1
-rw-r--r--arch/powerpc/platforms/52xx/mpc52xx_gpio.c380
-rw-r--r--arch/powerpc/platforms/amigaone/Kconfig2
-rw-r--r--arch/powerpc/platforms/cell/spufs/file.c11
-rw-r--r--arch/powerpc/platforms/cell/spufs/inode.c29
-rw-r--r--arch/powerpc/platforms/cell/spufs/spufs.h2
-rw-r--r--arch/powerpc/platforms/cell/spufs/syscalls.c22
-rw-r--r--arch/powerpc/platforms/chrp/Kconfig1
-rw-r--r--arch/powerpc/platforms/iseries/exception.S2
-rw-r--r--arch/powerpc/platforms/iseries/exception.h4
-rw-r--r--arch/powerpc/platforms/powermac/pci.c3
-rw-r--r--arch/powerpc/platforms/prep/Kconfig1
-rw-r--r--arch/powerpc/platforms/pseries/Kconfig1
-rw-r--r--arch/powerpc/sysdev/xics/icp-native.c9
-rw-r--r--arch/s390/boot/compressed/head31.S4
-rw-r--r--arch/s390/boot/compressed/head64.S4
-rw-r--r--arch/s390/crypto/sha256_s390.c66
-rw-r--r--arch/s390/include/asm/irqflags.h16
-rw-r--r--arch/s390/include/asm/kvm_host.h12
-rw-r--r--arch/s390/include/asm/linkage.h5
-rw-r--r--arch/s390/include/asm/lowcore.h2
-rw-r--r--arch/s390/include/asm/mmu.h4
-rw-r--r--arch/s390/include/asm/pgalloc.h7
-rw-r--r--arch/s390/include/asm/pgtable.h42
-rw-r--r--arch/s390/include/asm/processor.h1
-rw-r--r--arch/s390/include/asm/thread_info.h2
-rw-r--r--arch/s390/include/asm/tlbflush.h2
-rw-r--r--arch/s390/kernel/asm-offsets.c2
-rw-r--r--arch/s390/kernel/base.S25
-rw-r--r--arch/s390/kernel/compat_wrapper.S836
-rw-r--r--arch/s390/kernel/entry.S32
-rw-r--r--arch/s390/kernel/entry.h7
-rw-r--r--arch/s390/kernel/entry64.S111
-rw-r--r--arch/s390/kernel/head.S7
-rw-r--r--arch/s390/kernel/head31.S13
-rw-r--r--arch/s390/kernel/head64.S13
-rw-r--r--arch/s390/kernel/irq.c83
-rw-r--r--arch/s390/kernel/mcount.S16
-rw-r--r--arch/s390/kernel/mcount64.S16
-rw-r--r--arch/s390/kernel/module.c20
-rw-r--r--arch/s390/kernel/reipl.S5
-rw-r--r--arch/s390/kernel/reipl64.S5
-rw-r--r--arch/s390/kernel/relocate_kernel.S6
-rw-r--r--arch/s390/kernel/relocate_kernel64.S6
-rw-r--r--arch/s390/kernel/s390_ksyms.c4
-rw-r--r--arch/s390/kernel/sclp.S5
-rw-r--r--arch/s390/kernel/smp.c3
-rw-r--r--arch/s390/kernel/switch_cpu.S8
-rw-r--r--arch/s390/kernel/switch_cpu64.S8
-rw-r--r--arch/s390/kernel/swsusp_asm64.S8
-rw-r--r--arch/s390/kernel/traps.c36
-rw-r--r--arch/s390/kvm/Kconfig1
-rw-r--r--arch/s390/kvm/Makefile2
-rw-r--r--arch/s390/kvm/gaccess.h243
-rw-r--r--arch/s390/kvm/intercept.c35
-rw-r--r--arch/s390/kvm/interrupt.c4
-rw-r--r--arch/s390/kvm/kvm-s390.c48
-rw-r--r--arch/s390/kvm/kvm-s390.h28
-rw-r--r--arch/s390/kvm/priv.c49
-rw-r--r--arch/s390/kvm/sie64a.S98
-rw-r--r--arch/s390/kvm/sigp.c6
-rw-r--r--arch/s390/lib/qrnnd.S5
-rw-r--r--arch/s390/mm/fault.c24
-rw-r--r--arch/s390/mm/hugetlbpage.c2
-rw-r--r--arch/s390/mm/pgtable.c421
-rw-r--r--arch/s390/mm/vmem.c8
-rw-r--r--arch/score/kernel/module.c29
-rw-r--r--arch/sh/Kconfig16
-rw-r--r--arch/sh/include/asm/delay.h27
-rw-r--r--arch/sh/kernel/cpu/sh4/perf_event.c15
-rw-r--r--arch/sh/kernel/cpu/sh4a/perf_event.c15
-rw-r--r--arch/sh/kernel/cpu/shmobile/pm_runtime.c6
-rw-r--r--arch/sh/kernel/module.c35
-rw-r--r--arch/sh/kernel/ptrace_32.c5
-rw-r--r--arch/sh/kernel/traps_32.c2
-rw-r--r--arch/sh/kernel/traps_64.c8
-rw-r--r--arch/sh/math-emu/math.c2
-rw-r--r--arch/sh/mm/fault_32.c6
-rw-r--r--arch/sh/mm/tlbflush_64.c6
-rw-r--r--arch/sparc/include/asm/pci_32.h3
-rw-r--r--arch/sparc/include/asm/pci_64.h3
-rw-r--r--arch/sparc/include/asm/ptrace.h1
-rw-r--r--arch/sparc/kernel/module.c28
-rw-r--r--arch/sparc/kernel/pci.c8
-rw-r--r--arch/sparc/kernel/pcic.c8
-rw-r--r--arch/sparc/kernel/perf_event.c44
-rw-r--r--arch/sparc/kernel/unaligned_32.c4
-rw-r--r--arch/sparc/kernel/unaligned_64.c12
-rw-r--r--arch/sparc/kernel/visemul.c2
-rw-r--r--arch/sparc/math-emu/math_32.c2
-rw-r--r--arch/sparc/math-emu/math_64.c2
-rw-r--r--arch/sparc/mm/fault_32.c8
-rw-r--r--arch/sparc/mm/fault_64.c8
-rw-r--r--arch/tile/kernel/module.c31
-rw-r--r--arch/tile/kvm/Kconfig1
-rw-r--r--arch/um/sys-i386/Makefile3
-rw-r--r--arch/um/sys-x86_64/Makefile2
-rw-r--r--arch/unicore32/kernel/module.c35
-rw-r--r--arch/x86/Kconfig113
-rw-r--r--arch/x86/Kconfig.cpu3
-rw-r--r--arch/x86/boot/Makefile9
-rw-r--r--arch/x86/boot/tools/build.c33
-rw-r--r--arch/x86/crypto/ghash-clmulni-intel_glue.c2
-rw-r--r--arch/x86/ia32/ia32_signal.c22
-rw-r--r--arch/x86/ia32/ia32entry.S10
-rw-r--r--arch/x86/include/asm/alternative-asm.h4
-rw-r--r--arch/x86/include/asm/alternative.h8
-rw-r--r--arch/x86/include/asm/amd_iommu.h35
-rw-r--r--arch/x86/include/asm/amd_iommu_proto.h54
-rw-r--r--arch/x86/include/asm/amd_iommu_types.h580
-rw-r--r--arch/x86/include/asm/apb_timer.h23
-rw-r--r--arch/x86/include/asm/asm.h5
-rw-r--r--arch/x86/include/asm/calling.h130
-rw-r--r--arch/x86/include/asm/clocksource.h18
-rw-r--r--arch/x86/include/asm/cmpxchg_32.h48
-rw-r--r--arch/x86/include/asm/cmpxchg_64.h45
-rw-r--r--arch/x86/include/asm/cpufeature.h10
-rw-r--r--arch/x86/include/asm/delay.h25
-rw-r--r--arch/x86/include/asm/entry_arch.h4
-rw-r--r--arch/x86/include/asm/fixmap.h1
-rw-r--r--arch/x86/include/asm/frame.h11
-rw-r--r--arch/x86/include/asm/hw_irq.h1
-rw-r--r--arch/x86/include/asm/i8253.h20
-rw-r--r--arch/x86/include/asm/irq_vectors.h11
-rw-r--r--arch/x86/include/asm/irqflags.h11
-rw-r--r--arch/x86/include/asm/kvm_emulate.h52
-rw-r--r--arch/x86/include/asm/kvm_host.h46
-rw-r--r--arch/x86/include/asm/kvm_para.h20
-rw-r--r--arch/x86/include/asm/lguest_hcall.h1
-rw-r--r--arch/x86/include/asm/mce.h19
-rw-r--r--arch/x86/include/asm/mmzone_32.h6
-rw-r--r--arch/x86/include/asm/msr-index.h15
-rw-r--r--arch/x86/include/asm/paravirt.h9
-rw-r--r--arch/x86/include/asm/paravirt_types.h1
-rw-r--r--arch/x86/include/asm/percpu.h11
-rw-r--r--arch/x86/include/asm/perf_event.h5
-rw-r--r--arch/x86/include/asm/perf_event_p4.h33
-rw-r--r--arch/x86/include/asm/pgtable_types.h6
-rw-r--r--arch/x86/include/asm/processor-flags.h1
-rw-r--r--arch/x86/include/asm/prom.h11
-rw-r--r--arch/x86/include/asm/rwlock.h43
-rw-r--r--arch/x86/include/asm/segment.h2
-rw-r--r--arch/x86/include/asm/smpboot_hooks.h8
-rw-r--r--arch/x86/include/asm/spinlock.h37
-rw-r--r--arch/x86/include/asm/spinlock_types.h6
-rw-r--r--arch/x86/include/asm/time.h6
-rw-r--r--arch/x86/include/asm/traps.h4
-rw-r--r--arch/x86/include/asm/tsc.h4
-rw-r--r--arch/x86/include/asm/uaccess.h3
-rw-r--r--arch/x86/include/asm/uv/uv_bau.h59
-rw-r--r--arch/x86/include/asm/uv/uv_mmrs.h2889
-rw-r--r--arch/x86/include/asm/vgtod.h3
-rw-r--r--arch/x86/include/asm/vmx.h43
-rw-r--r--arch/x86/include/asm/vsyscall.h4
-rw-r--r--arch/x86/include/asm/vvar.h24
-rw-r--r--arch/x86/include/asm/xen/hypercall.h22
-rw-r--r--arch/x86/include/asm/xen/pci.h5
-rw-r--r--arch/x86/include/asm/xen/trace_types.h18
-rw-r--r--arch/x86/kernel/Makefile9
-rw-r--r--arch/x86/kernel/alternative.c23
-rw-r--r--arch/x86/kernel/amd_iommu.c2764
-rw-r--r--arch/x86/kernel/amd_iommu_init.c1572
-rw-r--r--arch/x86/kernel/apb_timer.c410
-rw-r--r--arch/x86/kernel/apic/apic.c27
-rw-r--r--arch/x86/kernel/apic/io_apic.c91
-rw-r--r--arch/x86/kernel/apm_32.c8
-rw-r--r--arch/x86/kernel/asm-offsets_32.c1
-rw-r--r--arch/x86/kernel/cpu/bugs.c4
-rw-r--r--arch/x86/kernel/cpu/hypervisor.c4
-rw-r--r--arch/x86/kernel/cpu/intel.c18
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce-severity.c152
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c288
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_amd.c10
-rw-r--r--arch/x86/kernel/cpu/mtrr/main.c182
-rw-r--r--arch/x86/kernel/cpu/perf_event.c168
-rw-r--r--arch/x86/kernel/cpu/perf_event_amd.c14
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c385
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_ds.c4
-rw-r--r--arch/x86/kernel/cpu/perf_event_p4.c119
-rw-r--r--arch/x86/kernel/devicetree.c60
-rw-r--r--arch/x86/kernel/dumpstack_64.c37
-rw-r--r--arch/x86/kernel/entry_64.S84
-rw-r--r--arch/x86/kernel/hpet.c14
-rw-r--r--arch/x86/kernel/i8253.c99
-rw-r--r--arch/x86/kernel/irqinit.c3
-rw-r--r--arch/x86/kernel/kgdb.c4
-rw-r--r--arch/x86/kernel/kvm.c72
-rw-r--r--arch/x86/kernel/kvmclock.c2
-rw-r--r--arch/x86/kernel/microcode_amd.c21
-rw-r--r--arch/x86/kernel/module.c37
-rw-r--r--arch/x86/kernel/paravirt.c9
-rw-r--r--arch/x86/kernel/ptrace.c5
-rw-r--r--arch/x86/kernel/quirks.c5
-rw-r--r--arch/x86/kernel/reboot.c16
-rw-r--r--arch/x86/kernel/relocate_kernel_32.S2
-rw-r--r--arch/x86/kernel/relocate_kernel_64.S2
-rw-r--r--arch/x86/kernel/signal.c56
-rw-r--r--arch/x86/kernel/smpboot.c2
-rw-r--r--arch/x86/kernel/stacktrace.c2
-rw-r--r--arch/x86/kernel/tboot.c1
-rw-r--r--arch/x86/kernel/time.c2
-rw-r--r--arch/x86/kernel/traps.c6
-rw-r--r--arch/x86/kernel/tsc.c26
-rw-r--r--arch/x86/kernel/vmlinux.lds.S49
-rw-r--r--arch/x86/kernel/vread_tsc_64.c36
-rw-r--r--arch/x86/kernel/vsyscall_64.c310
-rw-r--r--arch/x86/kernel/vsyscall_emu_64.S27
-rw-r--r--arch/x86/kvm/Kconfig2
-rw-r--r--arch/x86/kvm/emulate.c1749
-rw-r--r--arch/x86/kvm/mmu.c1226
-rw-r--r--arch/x86/kvm/mmu.h25
-rw-r--r--arch/x86/kvm/mmu_audit.c12
-rw-r--r--arch/x86/kvm/mmutrace.h48
-rw-r--r--arch/x86/kvm/paging_tmpl.h258
-rw-r--r--arch/x86/kvm/svm.c6
-rw-r--r--arch/x86/kvm/trace.h31
-rw-r--r--arch/x86/kvm/vmx.c2784
-rw-r--r--arch/x86/kvm/x86.c374
-rw-r--r--arch/x86/kvm/x86.h44
-rw-r--r--arch/x86/lguest/boot.c36
-rw-r--r--arch/x86/lguest/i386_head.S35
-rw-r--r--arch/x86/lib/Makefile9
-rw-r--r--arch/x86/lib/copy_page_64.S9
-rw-r--r--arch/x86/lib/memmove_64.S11
-rw-r--r--arch/x86/lib/rwlock.S44
-rw-r--r--arch/x86/lib/rwlock_64.S38
-rw-r--r--arch/x86/lib/rwsem.S (renamed from arch/x86/lib/rwsem_64.S)75
-rw-r--r--arch/x86/lib/semaphore_32.S124
-rw-r--r--arch/x86/lib/thunk_64.S45
-rw-r--r--arch/x86/lib/usercopy.c43
-rw-r--r--arch/x86/mm/fault.c6
-rw-r--r--arch/x86/mm/kmemcheck/error.c2
-rw-r--r--arch/x86/mm/numa.c15
-rw-r--r--arch/x86/mm/numa_32.c6
-rw-r--r--arch/x86/mm/pageattr-test.c3
-rw-r--r--arch/x86/oprofile/backtrace.c21
-rw-r--r--arch/x86/pci/mmconfig-shared.c3
-rw-r--r--arch/x86/pci/xen.c371
-rw-r--r--arch/x86/platform/efi/efi.c90
-rw-r--r--arch/x86/platform/uv/tlb_uv.c69
-rw-r--r--arch/x86/vdso/Makefile1
-rw-r--r--arch/x86/vdso/vclock_gettime.c103
-rw-r--r--arch/x86/vdso/vdso.S15
-rw-r--r--arch/x86/vdso/vma.c58
-rw-r--r--arch/x86/xen/Makefile4
-rw-r--r--arch/x86/xen/enlighten.c24
-rw-r--r--arch/x86/xen/mmu.c139
-rw-r--r--arch/x86/xen/multicalls.c169
-rw-r--r--arch/x86/xen/multicalls.h6
-rw-r--r--arch/x86/xen/platform-pci-unplug.c2
-rw-r--r--arch/x86/xen/trace.c61
-rw-r--r--arch/x86/xen/vga.c67
-rw-r--r--arch/x86/xen/xen-ops.h11
-rw-r--r--arch/xtensa/Kconfig13
-rw-r--r--arch/xtensa/kernel/module.c43
743 files changed, 32849 insertions, 21912 deletions
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig
index 60219bf94198..ca2da8da6e9c 100644
--- a/arch/alpha/Kconfig
+++ b/arch/alpha/Kconfig
@@ -6,6 +6,7 @@ config ALPHA
select HAVE_OPROFILE
select HAVE_SYSCALL_WRAPPERS
select HAVE_IRQ_WORK
+ select HAVE_PCSPKR_PLATFORM
select HAVE_PERF_EVENTS
select HAVE_DMA_ATTRS
select HAVE_GENERIC_HARDIRQS
diff --git a/arch/alpha/include/asm/8253pit.h b/arch/alpha/include/asm/8253pit.h
deleted file mode 100644
index a71c9c1455a7..000000000000
--- a/arch/alpha/include/asm/8253pit.h
+++ /dev/null
@@ -1,3 +0,0 @@
-/*
- * 8253/8254 Programmable Interval Timer
- */
diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
index ebc3c894b5a2..2fd00b7077e4 100644
--- a/arch/alpha/kernel/module.c
+++ b/arch/alpha/kernel/module.c
@@ -29,20 +29,6 @@
#define DEBUGP(fmt...)
#endif
-void *
-module_alloc(unsigned long size)
-{
- if (size == 0)
- return NULL;
- return vmalloc(size);
-}
-
-void
-module_free(struct module *mod, void *module_region)
-{
- vfree(module_region);
-}
-
/* Allocate the GOT at the end of the core sections. */
struct got_entry {
@@ -156,14 +142,6 @@ module_frob_arch_sections(Elf64_Ehdr *hdr, Elf64_Shdr *sechdrs,
}
int
-apply_relocate(Elf64_Shdr *sechdrs, const char *strtab, unsigned int symindex,
- unsigned int relsec, struct module *me)
-{
- printk(KERN_ERR "module %s: REL relocation unsupported\n", me->name);
- return -ENOEXEC;
-}
-
-int
apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
unsigned int symindex, unsigned int relsec,
struct module *me)
@@ -302,15 +280,3 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
return 0;
}
-
-int
-module_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs,
- struct module *me)
-{
- return 0;
-}
-
-void
-module_arch_cleanup(struct module *mod)
-{
-}
diff --git a/arch/alpha/kernel/perf_event.c b/arch/alpha/kernel/perf_event.c
index 90561c45e7d8..8e47709160f8 100644
--- a/arch/alpha/kernel/perf_event.c
+++ b/arch/alpha/kernel/perf_event.c
@@ -847,7 +847,7 @@ static void alpha_perf_event_irq_handler(unsigned long la_ptr,
data.period = event->hw.last_period;
if (alpha_perf_event_set_period(event, hwc, idx)) {
- if (perf_event_overflow(event, 1, &data, regs)) {
+ if (perf_event_overflow(event, &data, regs)) {
/* Interrupts coming too quickly; "throttle" the
* counter, i.e., disable it for a little while.
*/
diff --git a/arch/alpha/kernel/sys_ruffian.c b/arch/alpha/kernel/sys_ruffian.c
index 8de1046fe91e..f33648e4e8cf 100644
--- a/arch/alpha/kernel/sys_ruffian.c
+++ b/arch/alpha/kernel/sys_ruffian.c
@@ -26,7 +26,6 @@
#include <asm/pgtable.h>
#include <asm/core_cia.h>
#include <asm/tlbflush.h>
-#include <asm/8253pit.h>
#include "proto.h"
#include "irq_impl.h"
diff --git a/arch/alpha/kernel/time.c b/arch/alpha/kernel/time.c
index 818e74ed45dc..e336694ca042 100644
--- a/arch/alpha/kernel/time.c
+++ b/arch/alpha/kernel/time.c
@@ -46,7 +46,6 @@
#include <asm/uaccess.h>
#include <asm/io.h>
#include <asm/hwrpb.h>
-#include <asm/8253pit.h>
#include <asm/rtc.h>
#include <linux/mc146818rtc.h>
@@ -91,7 +90,7 @@ DEFINE_PER_CPU(u8, irq_work_pending);
#define test_irq_work_pending() __get_cpu_var(irq_work_pending)
#define clear_irq_work_pending() __get_cpu_var(irq_work_pending) = 0
-void set_irq_work_pending(void)
+void arch_irq_work_raise(void)
{
set_irq_work_pending_flag();
}
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 894487479f37..1478c6171b00 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -645,6 +645,7 @@ config ARCH_SHMOBILE
select NO_IOPORT
select SPARSE_IRQ
select MULTI_IRQ_HANDLER
+ select PM_GENERIC_DOMAINS if PM
help
Support for Renesas's SH-Mobile and R-Mobile ARM platforms.
diff --git a/arch/arm/configs/mmp2_defconfig b/arch/arm/configs/mmp2_defconfig
index 47ad3b1a4fee..5a584520db2f 100644
--- a/arch/arm/configs/mmp2_defconfig
+++ b/arch/arm/configs/mmp2_defconfig
@@ -8,6 +8,7 @@ CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
CONFIG_ARCH_MMP=y
+CONFIG_MACH_BROWNSTONE=y
CONFIG_MACH_FLINT=y
CONFIG_MACH_MARVELL_JASPER=y
CONFIG_HIGH_RES_TIMERS=y
@@ -63,10 +64,16 @@ CONFIG_BACKLIGHT_MAX8925=y
# CONFIG_USB_SUPPORT is not set
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_MAX8925=y
+CONFIG_MMC=y
# CONFIG_DNOTIFY is not set
CONFIG_INOTIFY=y
CONFIG_TMPFS=y
CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT3_FS=y
+CONFIG_EXT4_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_FAT_DEFAULT_CODEPAGE=437
CONFIG_JFFS2_FS=y
CONFIG_CRAMFS=y
CONFIG_NFS_FS=y
@@ -81,7 +88,7 @@ CONFIG_DEBUG_KERNEL=y
# CONFIG_DEBUG_PREEMPT is not set
CONFIG_DEBUG_INFO=y
# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_DYNAMIC_DEBUG=y
+# CONFIG_DYNAMIC_DEBUG is not set
CONFIG_DEBUG_USER=y
CONFIG_DEBUG_ERRORS=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/arm/include/asm/i8253.h b/arch/arm/include/asm/i8253.h
deleted file mode 100644
index 70656b69d5ce..000000000000
--- a/arch/arm/include/asm/i8253.h
+++ /dev/null
@@ -1,15 +0,0 @@
-#ifndef __ASMARM_I8253_H
-#define __ASMARM_I8253_H
-
-/* i8253A PIT registers */
-#define PIT_MODE 0x43
-#define PIT_CH0 0x40
-
-#define PIT_LATCH ((PIT_TICK_RATE + HZ / 2) / HZ)
-
-extern raw_spinlock_t i8253_lock;
-
-#define outb_pit outb_p
-#define inb_pit inb_p
-
-#endif
diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c
index 016d6a0830a3..05b377616fd5 100644
--- a/arch/arm/kernel/module.c
+++ b/arch/arm/kernel/module.c
@@ -43,25 +43,7 @@ void *module_alloc(unsigned long size)
GFP_KERNEL, PAGE_KERNEL_EXEC, -1,
__builtin_return_address(0));
}
-#else /* CONFIG_MMU */
-void *module_alloc(unsigned long size)
-{
- return size == 0 ? NULL : vmalloc(size);
-}
-#endif /* !CONFIG_MMU */
-
-void module_free(struct module *module, void *region)
-{
- vfree(region);
-}
-
-int module_frob_arch_sections(Elf_Ehdr *hdr,
- Elf_Shdr *sechdrs,
- char *secstrings,
- struct module *mod)
-{
- return 0;
-}
+#endif
int
apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
@@ -265,15 +247,6 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
return 0;
}
-int
-apply_relocate_add(Elf32_Shdr *sechdrs, const char *strtab,
- unsigned int symindex, unsigned int relsec, struct module *module)
-{
- printk(KERN_ERR "module %s: ADD RELOCATION unsupported\n",
- module->name);
- return -ENOEXEC;
-}
-
struct mod_unwind_map {
const Elf_Shdr *unw_sec;
const Elf_Shdr *txt_sec;
diff --git a/arch/arm/kernel/perf_event_v6.c b/arch/arm/kernel/perf_event_v6.c
index f1e8dd94afe8..dd7f3b9f4cb3 100644
--- a/arch/arm/kernel/perf_event_v6.c
+++ b/arch/arm/kernel/perf_event_v6.c
@@ -173,6 +173,20 @@ static const unsigned armv6_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
+ [C(NODE)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ },
};
enum armv6mpcore_perf_types {
@@ -310,6 +324,20 @@ static const unsigned armv6mpcore_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
+ [C(NODE)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ },
};
static inline unsigned long
@@ -479,7 +507,7 @@ armv6pmu_handle_irq(int irq_num,
if (!armpmu_event_set_period(event, hwc, idx))
continue;
- if (perf_event_overflow(event, 0, &data, regs))
+ if (perf_event_overflow(event, &data, regs))
armpmu->disable(hwc, idx);
}
diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c
index 963317896c80..4c851834f68e 100644
--- a/arch/arm/kernel/perf_event_v7.c
+++ b/arch/arm/kernel/perf_event_v7.c
@@ -298,6 +298,20 @@ static const unsigned armv7_a8_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
+ [C(NODE)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ },
};
/*
@@ -409,6 +423,20 @@ static const unsigned armv7_a9_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
+ [C(NODE)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ },
};
/*
@@ -1061,7 +1089,7 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
if (!armpmu_event_set_period(event, hwc, idx))
continue;
- if (perf_event_overflow(event, 0, &data, regs))
+ if (perf_event_overflow(event, &data, regs))
armpmu->disable(hwc, idx);
}
diff --git a/arch/arm/kernel/perf_event_xscale.c b/arch/arm/kernel/perf_event_xscale.c
index 39affbe4fdb2..3c4397491d08 100644
--- a/arch/arm/kernel/perf_event_xscale.c
+++ b/arch/arm/kernel/perf_event_xscale.c
@@ -144,6 +144,20 @@ static const unsigned xscale_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
+ [C(NODE)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ },
};
#define XSCALE_PMU_ENABLE 0x001
@@ -251,7 +265,7 @@ xscale1pmu_handle_irq(int irq_num, void *dev)
if (!armpmu_event_set_period(event, hwc, idx))
continue;
- if (perf_event_overflow(event, 0, &data, regs))
+ if (perf_event_overflow(event, &data, regs))
armpmu->disable(hwc, idx);
}
@@ -583,7 +597,7 @@ xscale2pmu_handle_irq(int irq_num, void *dev)
if (!armpmu_event_set_period(event, hwc, idx))
continue;
- if (perf_event_overflow(event, 0, &data, regs))
+ if (perf_event_overflow(event, &data, regs))
armpmu->disable(hwc, idx);
}
diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
index 897ade059f58..2491f3b406bc 100644
--- a/arch/arm/kernel/ptrace.c
+++ b/arch/arm/kernel/ptrace.c
@@ -374,7 +374,7 @@ static long ptrace_hbp_idx_to_num(int idx)
/*
* Handle hitting a HW-breakpoint.
*/
-static void ptrace_hbptriggered(struct perf_event *bp, int unused,
+static void ptrace_hbptriggered(struct perf_event *bp,
struct perf_sample_data *data,
struct pt_regs *regs)
{
@@ -457,7 +457,8 @@ static struct perf_event *ptrace_hbp_create(struct task_struct *tsk, int type)
attr.bp_type = type;
attr.disabled = 1;
- return register_user_hw_breakpoint(&attr, ptrace_hbptriggered, tsk);
+ return register_user_hw_breakpoint(&attr, ptrace_hbptriggered, NULL,
+ tsk);
}
static int ptrace_gethbpregs(struct task_struct *tsk, long num,
diff --git a/arch/arm/kernel/swp_emulate.c b/arch/arm/kernel/swp_emulate.c
index 40ee7e5045e4..5f452f8fde05 100644
--- a/arch/arm/kernel/swp_emulate.c
+++ b/arch/arm/kernel/swp_emulate.c
@@ -183,7 +183,7 @@ static int swp_handler(struct pt_regs *regs, unsigned int instr)
unsigned int address, destreg, data, type;
unsigned int res = 0;
- perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, regs->ARM_pc);
+ perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, regs->ARM_pc);
if (current->pid != previous_pid) {
pr_debug("\"%s\" (%ld) uses deprecated SWP{B} instruction\n",
diff --git a/arch/arm/mach-at91/include/mach/at91_mci.h b/arch/arm/mach-at91/include/mach/at91_mci.h
deleted file mode 100644
index 02182c16a022..000000000000
--- a/arch/arm/mach-at91/include/mach/at91_mci.h
+++ /dev/null
@@ -1,115 +0,0 @@
-/*
- * arch/arm/mach-at91/include/mach/at91_mci.h
- *
- * Copyright (C) 2005 Ivan Kokshaysky
- * Copyright (C) SAN People
- *
- * MultiMedia Card Interface (MCI) registers.
- * Based on AT91RM9200 datasheet revision F.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#ifndef AT91_MCI_H
-#define AT91_MCI_H
-
-#define AT91_MCI_CR 0x00 /* Control Register */
-#define AT91_MCI_MCIEN (1 << 0) /* Multi-Media Interface Enable */
-#define AT91_MCI_MCIDIS (1 << 1) /* Multi-Media Interface Disable */
-#define AT91_MCI_PWSEN (1 << 2) /* Power Save Mode Enable */
-#define AT91_MCI_PWSDIS (1 << 3) /* Power Save Mode Disable */
-#define AT91_MCI_SWRST (1 << 7) /* Software Reset */
-
-#define AT91_MCI_MR 0x04 /* Mode Register */
-#define AT91_MCI_CLKDIV (0xff << 0) /* Clock Divider */
-#define AT91_MCI_PWSDIV (7 << 8) /* Power Saving Divider */
-#define AT91_MCI_RDPROOF (1 << 11) /* Read Proof Enable [SAM926[03] only] */
-#define AT91_MCI_WRPROOF (1 << 12) /* Write Proof Enable [SAM926[03] only] */
-#define AT91_MCI_PDCFBYTE (1 << 13) /* PDC Force Byte Transfer [SAM926[03] only] */
-#define AT91_MCI_PDCPADV (1 << 14) /* PDC Padding Value */
-#define AT91_MCI_PDCMODE (1 << 15) /* PDC-orientated Mode */
-#define AT91_MCI_BLKLEN (0xfff << 18) /* Data Block Length */
-
-#define AT91_MCI_DTOR 0x08 /* Data Timeout Register */
-#define AT91_MCI_DTOCYC (0xf << 0) /* Data Timeout Cycle Number */
-#define AT91_MCI_DTOMUL (7 << 4) /* Data Timeout Multiplier */
-#define AT91_MCI_DTOMUL_1 (0 << 4)
-#define AT91_MCI_DTOMUL_16 (1 << 4)
-#define AT91_MCI_DTOMUL_128 (2 << 4)
-#define AT91_MCI_DTOMUL_256 (3 << 4)
-#define AT91_MCI_DTOMUL_1K (4 << 4)
-#define AT91_MCI_DTOMUL_4K (5 << 4)
-#define AT91_MCI_DTOMUL_64K (6 << 4)
-#define AT91_MCI_DTOMUL_1M (7 << 4)
-
-#define AT91_MCI_SDCR 0x0c /* SD Card Register */
-#define AT91_MCI_SDCSEL (3 << 0) /* SD Card Selector */
-#define AT91_MCI_SDCBUS (1 << 7) /* 1-bit or 4-bit bus */
-
-#define AT91_MCI_ARGR 0x10 /* Argument Register */
-
-#define AT91_MCI_CMDR 0x14 /* Command Register */
-#define AT91_MCI_CMDNB (0x3f << 0) /* Command Number */
-#define AT91_MCI_RSPTYP (3 << 6) /* Response Type */
-#define AT91_MCI_RSPTYP_NONE (0 << 6)
-#define AT91_MCI_RSPTYP_48 (1 << 6)
-#define AT91_MCI_RSPTYP_136 (2 << 6)
-#define AT91_MCI_SPCMD (7 << 8) /* Special Command */
-#define AT91_MCI_SPCMD_NONE (0 << 8)
-#define AT91_MCI_SPCMD_INIT (1 << 8)
-#define AT91_MCI_SPCMD_SYNC (2 << 8)
-#define AT91_MCI_SPCMD_ICMD (4 << 8)
-#define AT91_MCI_SPCMD_IRESP (5 << 8)
-#define AT91_MCI_OPDCMD (1 << 11) /* Open Drain Command */
-#define AT91_MCI_MAXLAT (1 << 12) /* Max Latency for Command to Response */
-#define AT91_MCI_TRCMD (3 << 16) /* Transfer Command */
-#define AT91_MCI_TRCMD_NONE (0 << 16)
-#define AT91_MCI_TRCMD_START (1 << 16)
-#define AT91_MCI_TRCMD_STOP (2 << 16)
-#define AT91_MCI_TRDIR (1 << 18) /* Transfer Direction */
-#define AT91_MCI_TRTYP (3 << 19) /* Transfer Type */
-#define AT91_MCI_TRTYP_BLOCK (0 << 19)
-#define AT91_MCI_TRTYP_MULTIPLE (1 << 19)
-#define AT91_MCI_TRTYP_STREAM (2 << 19)
-#define AT91_MCI_TRTYP_SDIO_BYTE (4 << 19)
-#define AT91_MCI_TRTYP_SDIO_BLOCK (5 << 19)
-
-#define AT91_MCI_BLKR 0x18 /* Block Register */
-#define AT91_MCI_BLKR_BCNT(n) ((0xffff & (n)) << 0) /* Block count */
-#define AT91_MCI_BLKR_BLKLEN(n) ((0xffff & (n)) << 16) /* Block length */
-
-#define AT91_MCI_RSPR(n) (0x20 + ((n) * 4)) /* Response Registers 0-3 */
-#define AT91_MCR_RDR 0x30 /* Receive Data Register */
-#define AT91_MCR_TDR 0x34 /* Transmit Data Register */
-
-#define AT91_MCI_SR 0x40 /* Status Register */
-#define AT91_MCI_CMDRDY (1 << 0) /* Command Ready */
-#define AT91_MCI_RXRDY (1 << 1) /* Receiver Ready */
-#define AT91_MCI_TXRDY (1 << 2) /* Transmit Ready */
-#define AT91_MCI_BLKE (1 << 3) /* Data Block Ended */
-#define AT91_MCI_DTIP (1 << 4) /* Data Transfer in Progress */
-#define AT91_MCI_NOTBUSY (1 << 5) /* Data Not Busy */
-#define AT91_MCI_ENDRX (1 << 6) /* End of RX Buffer */
-#define AT91_MCI_ENDTX (1 << 7) /* End fo TX Buffer */
-#define AT91_MCI_SDIOIRQA (1 << 8) /* SDIO Interrupt for Slot A */
-#define AT91_MCI_SDIOIRQB (1 << 9) /* SDIO Interrupt for Slot B */
-#define AT91_MCI_RXBUFF (1 << 14) /* RX Buffer Full */
-#define AT91_MCI_TXBUFE (1 << 15) /* TX Buffer Empty */
-#define AT91_MCI_RINDE (1 << 16) /* Response Index Error */
-#define AT91_MCI_RDIRE (1 << 17) /* Response Direction Error */
-#define AT91_MCI_RCRCE (1 << 18) /* Response CRC Error */
-#define AT91_MCI_RENDE (1 << 19) /* Response End Bit Error */
-#define AT91_MCI_RTOE (1 << 20) /* Response Time-out Error */
-#define AT91_MCI_DCRCE (1 << 21) /* Data CRC Error */
-#define AT91_MCI_DTOE (1 << 22) /* Data Time-out Error */
-#define AT91_MCI_OVRE (1 << 30) /* Overrun */
-#define AT91_MCI_UNRE (1 << 31) /* Underrun */
-
-#define AT91_MCI_IER 0x44 /* Interrupt Enable Register */
-#define AT91_MCI_IDR 0x48 /* Interrupt Disable Register */
-#define AT91_MCI_IMR 0x4c /* Interrupt Mask Register */
-
-#endif
diff --git a/arch/arm/mach-davinci/board-mityomapl138.c b/arch/arm/mach-davinci/board-mityomapl138.c
index b8d59ca49027..c278226627ad 100644
--- a/arch/arm/mach-davinci/board-mityomapl138.c
+++ b/arch/arm/mach-davinci/board-mityomapl138.c
@@ -20,6 +20,7 @@
#include <linux/spi/spi.h>
#include <linux/spi/flash.h>
+#include <asm/io.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <mach/common.h>
diff --git a/arch/arm/mach-davinci/dm646x.c b/arch/arm/mach-davinci/dm646x.c
index 1e0f809644bb..e00d61e2efbe 100644
--- a/arch/arm/mach-davinci/dm646x.c
+++ b/arch/arm/mach-davinci/dm646x.c
@@ -8,6 +8,7 @@
* is licensed "as is" without any warranty of any kind, whether express
* or implied.
*/
+#include <linux/dma-mapping.h>
#include <linux/init.h>
#include <linux/clk.h>
#include <linux/serial_8250.h>
diff --git a/arch/arm/mach-davinci/pm.c b/arch/arm/mach-davinci/pm.c
index 1bd73a04be20..04c49f7543ef 100644
--- a/arch/arm/mach-davinci/pm.c
+++ b/arch/arm/mach-davinci/pm.c
@@ -17,6 +17,7 @@
#include <asm/cacheflush.h>
#include <asm/delay.h>
+#include <asm/io.h>
#include <mach/da8xx.h>
#include <mach/sram.h>
diff --git a/arch/arm/mach-ep93xx/Makefile b/arch/arm/mach-ep93xx/Makefile
index 33ee2c863d18..3cedcf2d39e5 100644
--- a/arch/arm/mach-ep93xx/Makefile
+++ b/arch/arm/mach-ep93xx/Makefile
@@ -1,11 +1,13 @@
#
# Makefile for the linux kernel.
#
-obj-y := core.o clock.o dma-m2p.o gpio.o
+obj-y := core.o clock.o
obj-m :=
obj-n :=
obj- :=
+obj-$(CONFIG_EP93XX_DMA) += dma.o
+
obj-$(CONFIG_MACH_ADSSPHERE) += adssphere.o
obj-$(CONFIG_MACH_EDB93XX) += edb93xx.o
obj-$(CONFIG_MACH_GESBC9312) += gesbc9312.o
diff --git a/arch/arm/mach-ep93xx/core.c b/arch/arm/mach-ep93xx/core.c
index 6659a0d137a3..c60f081e930b 100644
--- a/arch/arm/mach-ep93xx/core.c
+++ b/arch/arm/mach-ep93xx/core.c
@@ -174,14 +174,10 @@ struct sys_timer ep93xx_timer = {
/*************************************************************************
* EP93xx IRQ handling
*************************************************************************/
-extern void ep93xx_gpio_init_irq(void);
-
void __init ep93xx_init_irq(void)
{
vic_init(EP93XX_VIC1_BASE, 0, EP93XX_VIC1_VALID_IRQ_MASK, 0);
vic_init(EP93XX_VIC2_BASE, 32, EP93XX_VIC2_VALID_IRQ_MASK, 0);
-
- ep93xx_gpio_init_irq();
}
@@ -241,6 +237,24 @@ unsigned int ep93xx_chip_revision(void)
}
/*************************************************************************
+ * EP93xx GPIO
+ *************************************************************************/
+static struct resource ep93xx_gpio_resource[] = {
+ {
+ .start = EP93XX_GPIO_PHYS_BASE,
+ .end = EP93XX_GPIO_PHYS_BASE + 0xcc - 1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct platform_device ep93xx_gpio_device = {
+ .name = "gpio-ep93xx",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(ep93xx_gpio_resource),
+ .resource = ep93xx_gpio_resource,
+};
+
+/*************************************************************************
* EP93xx peripheral handling
*************************************************************************/
#define EP93XX_UART_MCR_OFFSET (0x0100)
@@ -492,11 +506,15 @@ static struct resource ep93xx_spi_resources[] = {
},
};
+static u64 ep93xx_spi_dma_mask = DMA_BIT_MASK(32);
+
static struct platform_device ep93xx_spi_device = {
.name = "ep93xx-spi",
.id = 0,
.dev = {
- .platform_data = &ep93xx_spi_master_data,
+ .platform_data = &ep93xx_spi_master_data,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ .dma_mask = &ep93xx_spi_dma_mask,
},
.num_resources = ARRAY_SIZE(ep93xx_spi_resources),
.resource = ep93xx_spi_resources,
@@ -870,14 +888,13 @@ void __init ep93xx_register_ac97(void)
platform_device_register(&ep93xx_pcm_device);
}
-extern void ep93xx_gpio_init(void);
-
void __init ep93xx_init_devices(void)
{
/* Disallow access to MaverickCrunch initially */
ep93xx_devcfg_clear_bits(EP93XX_SYSCON_DEVCFG_CPENA);
- ep93xx_gpio_init();
+ /* Get the GPIO working early, other devices need it */
+ platform_device_register(&ep93xx_gpio_device);
amba_device_register(&uart1_device, &iomem_resource);
amba_device_register(&uart2_device, &iomem_resource);
diff --git a/arch/arm/mach-ep93xx/dma-m2p.c b/arch/arm/mach-ep93xx/dma-m2p.c
deleted file mode 100644
index a696d354b1f8..000000000000
--- a/arch/arm/mach-ep93xx/dma-m2p.c
+++ /dev/null
@@ -1,411 +0,0 @@
-/*
- * arch/arm/mach-ep93xx/dma-m2p.c
- * M2P DMA handling for Cirrus EP93xx chips.
- *
- * Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org>
- * Copyright (C) 2006 Applied Data Systems
- *
- * Copyright (C) 2009 Ryan Mallon <ryan@bluewatersys.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or (at
- * your option) any later version.
- */
-
-/*
- * On the EP93xx chip the following peripherals my be allocated to the 10
- * Memory to Internal Peripheral (M2P) channels (5 transmit + 5 receive).
- *
- * I2S contains 3 Tx and 3 Rx DMA Channels
- * AAC contains 3 Tx and 3 Rx DMA Channels
- * UART1 contains 1 Tx and 1 Rx DMA Channels
- * UART2 contains 1 Tx and 1 Rx DMA Channels
- * UART3 contains 1 Tx and 1 Rx DMA Channels
- * IrDA contains 1 Tx and 1 Rx DMA Channels
- *
- * SSP and IDE use the Memory to Memory (M2M) channels and are not covered
- * with this implementation.
- */
-
-#define pr_fmt(fmt) "ep93xx " KBUILD_MODNAME ": " fmt
-
-#include <linux/kernel.h>
-#include <linux/clk.h>
-#include <linux/err.h>
-#include <linux/interrupt.h>
-#include <linux/module.h>
-#include <linux/io.h>
-
-#include <mach/dma.h>
-#include <mach/hardware.h>
-
-#define M2P_CONTROL 0x00
-#define M2P_CONTROL_STALL_IRQ_EN (1 << 0)
-#define M2P_CONTROL_NFB_IRQ_EN (1 << 1)
-#define M2P_CONTROL_ERROR_IRQ_EN (1 << 3)
-#define M2P_CONTROL_ENABLE (1 << 4)
-#define M2P_INTERRUPT 0x04
-#define M2P_INTERRUPT_STALL (1 << 0)
-#define M2P_INTERRUPT_NFB (1 << 1)
-#define M2P_INTERRUPT_ERROR (1 << 3)
-#define M2P_PPALLOC 0x08
-#define M2P_STATUS 0x0c
-#define M2P_REMAIN 0x14
-#define M2P_MAXCNT0 0x20
-#define M2P_BASE0 0x24
-#define M2P_MAXCNT1 0x30
-#define M2P_BASE1 0x34
-
-#define STATE_IDLE 0 /* Channel is inactive. */
-#define STATE_STALL 1 /* Channel is active, no buffers pending. */
-#define STATE_ON 2 /* Channel is active, one buffer pending. */
-#define STATE_NEXT 3 /* Channel is active, two buffers pending. */
-
-struct m2p_channel {
- char *name;
- void __iomem *base;
- int irq;
-
- struct clk *clk;
- spinlock_t lock;
-
- void *client;
- unsigned next_slot:1;
- struct ep93xx_dma_buffer *buffer_xfer;
- struct ep93xx_dma_buffer *buffer_next;
- struct list_head buffers_pending;
-};
-
-static struct m2p_channel m2p_rx[] = {
- {"m2p1", EP93XX_DMA_BASE + 0x0040, IRQ_EP93XX_DMAM2P1},
- {"m2p3", EP93XX_DMA_BASE + 0x00c0, IRQ_EP93XX_DMAM2P3},
- {"m2p5", EP93XX_DMA_BASE + 0x0200, IRQ_EP93XX_DMAM2P5},
- {"m2p7", EP93XX_DMA_BASE + 0x0280, IRQ_EP93XX_DMAM2P7},
- {"m2p9", EP93XX_DMA_BASE + 0x0300, IRQ_EP93XX_DMAM2P9},
- {NULL},
-};
-
-static struct m2p_channel m2p_tx[] = {
- {"m2p0", EP93XX_DMA_BASE + 0x0000, IRQ_EP93XX_DMAM2P0},
- {"m2p2", EP93XX_DMA_BASE + 0x0080, IRQ_EP93XX_DMAM2P2},
- {"m2p4", EP93XX_DMA_BASE + 0x0240, IRQ_EP93XX_DMAM2P4},
- {"m2p6", EP93XX_DMA_BASE + 0x02c0, IRQ_EP93XX_DMAM2P6},
- {"m2p8", EP93XX_DMA_BASE + 0x0340, IRQ_EP93XX_DMAM2P8},
- {NULL},
-};
-
-static void feed_buf(struct m2p_channel *ch, struct ep93xx_dma_buffer *buf)
-{
- if (ch->next_slot == 0) {
- writel(buf->size, ch->base + M2P_MAXCNT0);
- writel(buf->bus_addr, ch->base + M2P_BASE0);
- } else {
- writel(buf->size, ch->base + M2P_MAXCNT1);
- writel(buf->bus_addr, ch->base + M2P_BASE1);
- }
- ch->next_slot ^= 1;
-}
-
-static void choose_buffer_xfer(struct m2p_channel *ch)
-{
- struct ep93xx_dma_buffer *buf;
-
- ch->buffer_xfer = NULL;
- if (!list_empty(&ch->buffers_pending)) {
- buf = list_entry(ch->buffers_pending.next,
- struct ep93xx_dma_buffer, list);
- list_del(&buf->list);
- feed_buf(ch, buf);
- ch->buffer_xfer = buf;
- }
-}
-
-static void choose_buffer_next(struct m2p_channel *ch)
-{
- struct ep93xx_dma_buffer *buf;
-
- ch->buffer_next = NULL;
- if (!list_empty(&ch->buffers_pending)) {
- buf = list_entry(ch->buffers_pending.next,
- struct ep93xx_dma_buffer, list);
- list_del(&buf->list);
- feed_buf(ch, buf);
- ch->buffer_next = buf;
- }
-}
-
-static inline void m2p_set_control(struct m2p_channel *ch, u32 v)
-{
- /*
- * The control register must be read immediately after being written so
- * that the internal state machine is correctly updated. See the ep93xx
- * users' guide for details.
- */
- writel(v, ch->base + M2P_CONTROL);
- readl(ch->base + M2P_CONTROL);
-}
-
-static inline int m2p_channel_state(struct m2p_channel *ch)
-{
- return (readl(ch->base + M2P_STATUS) >> 4) & 0x3;
-}
-
-static irqreturn_t m2p_irq(int irq, void *dev_id)
-{
- struct m2p_channel *ch = dev_id;
- struct ep93xx_dma_m2p_client *cl;
- u32 irq_status, v;
- int error = 0;
-
- cl = ch->client;
-
- spin_lock(&ch->lock);
- irq_status = readl(ch->base + M2P_INTERRUPT);
-
- if (irq_status & M2P_INTERRUPT_ERROR) {
- writel(M2P_INTERRUPT_ERROR, ch->base + M2P_INTERRUPT);
- error = 1;
- }
-
- if ((irq_status & (M2P_INTERRUPT_STALL | M2P_INTERRUPT_NFB)) == 0) {
- spin_unlock(&ch->lock);
- return IRQ_NONE;
- }
-
- switch (m2p_channel_state(ch)) {
- case STATE_IDLE:
- pr_crit("dma interrupt without a dma buffer\n");
- BUG();
- break;
-
- case STATE_STALL:
- cl->buffer_finished(cl->cookie, ch->buffer_xfer, 0, error);
- if (ch->buffer_next != NULL) {
- cl->buffer_finished(cl->cookie, ch->buffer_next,
- 0, error);
- }
- choose_buffer_xfer(ch);
- choose_buffer_next(ch);
- if (ch->buffer_xfer != NULL)
- cl->buffer_started(cl->cookie, ch->buffer_xfer);
- break;
-
- case STATE_ON:
- cl->buffer_finished(cl->cookie, ch->buffer_xfer, 0, error);
- ch->buffer_xfer = ch->buffer_next;
- choose_buffer_next(ch);
- cl->buffer_started(cl->cookie, ch->buffer_xfer);
- break;
-
- case STATE_NEXT:
- pr_crit("dma interrupt while next\n");
- BUG();
- break;
- }
-
- v = readl(ch->base + M2P_CONTROL) & ~(M2P_CONTROL_STALL_IRQ_EN |
- M2P_CONTROL_NFB_IRQ_EN);
- if (ch->buffer_xfer != NULL)
- v |= M2P_CONTROL_STALL_IRQ_EN;
- if (ch->buffer_next != NULL)
- v |= M2P_CONTROL_NFB_IRQ_EN;
- m2p_set_control(ch, v);
-
- spin_unlock(&ch->lock);
- return IRQ_HANDLED;
-}
-
-static struct m2p_channel *find_free_channel(struct ep93xx_dma_m2p_client *cl)
-{
- struct m2p_channel *ch;
- int i;
-
- if (cl->flags & EP93XX_DMA_M2P_RX)
- ch = m2p_rx;
- else
- ch = m2p_tx;
-
- for (i = 0; ch[i].base; i++) {
- struct ep93xx_dma_m2p_client *client;
-
- client = ch[i].client;
- if (client != NULL) {
- int port;
-
- port = cl->flags & EP93XX_DMA_M2P_PORT_MASK;
- if (port == (client->flags &
- EP93XX_DMA_M2P_PORT_MASK)) {
- pr_warning("DMA channel already used by %s\n",
- cl->name ? : "unknown client");
- return ERR_PTR(-EBUSY);
- }
- }
- }
-
- for (i = 0; ch[i].base; i++) {
- if (ch[i].client == NULL)
- return ch + i;
- }
-
- pr_warning("No free DMA channel for %s\n",
- cl->name ? : "unknown client");
- return ERR_PTR(-ENODEV);
-}
-
-static void channel_enable(struct m2p_channel *ch)
-{
- struct ep93xx_dma_m2p_client *cl = ch->client;
- u32 v;
-
- clk_enable(ch->clk);
-
- v = cl->flags & EP93XX_DMA_M2P_PORT_MASK;
- writel(v, ch->base + M2P_PPALLOC);
-
- v = cl->flags & EP93XX_DMA_M2P_ERROR_MASK;
- v |= M2P_CONTROL_ENABLE | M2P_CONTROL_ERROR_IRQ_EN;
- m2p_set_control(ch, v);
-}
-
-static void channel_disable(struct m2p_channel *ch)
-{
- u32 v;
-
- v = readl(ch->base + M2P_CONTROL);
- v &= ~(M2P_CONTROL_STALL_IRQ_EN | M2P_CONTROL_NFB_IRQ_EN);
- m2p_set_control(ch, v);
-
- while (m2p_channel_state(ch) >= STATE_ON)
- cpu_relax();
-
- m2p_set_control(ch, 0x0);
-
- while (m2p_channel_state(ch) == STATE_STALL)
- cpu_relax();
-
- clk_disable(ch->clk);
-}
-
-int ep93xx_dma_m2p_client_register(struct ep93xx_dma_m2p_client *cl)
-{
- struct m2p_channel *ch;
- int err;
-
- ch = find_free_channel(cl);
- if (IS_ERR(ch))
- return PTR_ERR(ch);
-
- err = request_irq(ch->irq, m2p_irq, 0, cl->name ? : "dma-m2p", ch);
- if (err)
- return err;
-
- ch->client = cl;
- ch->next_slot = 0;
- ch->buffer_xfer = NULL;
- ch->buffer_next = NULL;
- INIT_LIST_HEAD(&ch->buffers_pending);
-
- cl->channel = ch;
-
- channel_enable(ch);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(ep93xx_dma_m2p_client_register);
-
-void ep93xx_dma_m2p_client_unregister(struct ep93xx_dma_m2p_client *cl)
-{
- struct m2p_channel *ch = cl->channel;
-
- channel_disable(ch);
- free_irq(ch->irq, ch);
- ch->client = NULL;
-}
-EXPORT_SYMBOL_GPL(ep93xx_dma_m2p_client_unregister);
-
-void ep93xx_dma_m2p_submit(struct ep93xx_dma_m2p_client *cl,
- struct ep93xx_dma_buffer *buf)
-{
- struct m2p_channel *ch = cl->channel;
- unsigned long flags;
- u32 v;
-
- spin_lock_irqsave(&ch->lock, flags);
- v = readl(ch->base + M2P_CONTROL);
- if (ch->buffer_xfer == NULL) {
- ch->buffer_xfer = buf;
- feed_buf(ch, buf);
- cl->buffer_started(cl->cookie, buf);
-
- v |= M2P_CONTROL_STALL_IRQ_EN;
- m2p_set_control(ch, v);
-
- } else if (ch->buffer_next == NULL) {
- ch->buffer_next = buf;
- feed_buf(ch, buf);
-
- v |= M2P_CONTROL_NFB_IRQ_EN;
- m2p_set_control(ch, v);
- } else {
- list_add_tail(&buf->list, &ch->buffers_pending);
- }
- spin_unlock_irqrestore(&ch->lock, flags);
-}
-EXPORT_SYMBOL_GPL(ep93xx_dma_m2p_submit);
-
-void ep93xx_dma_m2p_submit_recursive(struct ep93xx_dma_m2p_client *cl,
- struct ep93xx_dma_buffer *buf)
-{
- struct m2p_channel *ch = cl->channel;
-
- list_add_tail(&buf->list, &ch->buffers_pending);
-}
-EXPORT_SYMBOL_GPL(ep93xx_dma_m2p_submit_recursive);
-
-void ep93xx_dma_m2p_flush(struct ep93xx_dma_m2p_client *cl)
-{
- struct m2p_channel *ch = cl->channel;
-
- channel_disable(ch);
- ch->next_slot = 0;
- ch->buffer_xfer = NULL;
- ch->buffer_next = NULL;
- INIT_LIST_HEAD(&ch->buffers_pending);
- channel_enable(ch);
-}
-EXPORT_SYMBOL_GPL(ep93xx_dma_m2p_flush);
-
-static int init_channel(struct m2p_channel *ch)
-{
- ch->clk = clk_get(NULL, ch->name);
- if (IS_ERR(ch->clk))
- return PTR_ERR(ch->clk);
-
- spin_lock_init(&ch->lock);
- ch->client = NULL;
-
- return 0;
-}
-
-static int __init ep93xx_dma_m2p_init(void)
-{
- int i;
- int ret;
-
- for (i = 0; m2p_rx[i].base; i++) {
- ret = init_channel(m2p_rx + i);
- if (ret)
- return ret;
- }
-
- for (i = 0; m2p_tx[i].base; i++) {
- ret = init_channel(m2p_tx + i);
- if (ret)
- return ret;
- }
-
- pr_info("M2P DMA subsystem initialized\n");
- return 0;
-}
-arch_initcall(ep93xx_dma_m2p_init);
diff --git a/arch/arm/mach-ep93xx/dma.c b/arch/arm/mach-ep93xx/dma.c
new file mode 100644
index 000000000000..5a2570881255
--- /dev/null
+++ b/arch/arm/mach-ep93xx/dma.c
@@ -0,0 +1,108 @@
+/*
+ * arch/arm/mach-ep93xx/dma.c
+ *
+ * Platform support code for the EP93xx dmaengine driver.
+ *
+ * Copyright (C) 2011 Mika Westerberg
+ *
+ * This work is based on the original dma-m2p implementation with
+ * following copyrights:
+ *
+ * Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org>
+ * Copyright (C) 2006 Applied Data Systems
+ * Copyright (C) 2009 Ryan Mallon <rmallon@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ */
+
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+
+#include <mach/dma.h>
+#include <mach/hardware.h>
+
+#define DMA_CHANNEL(_name, _base, _irq) \
+ { .name = (_name), .base = (_base), .irq = (_irq) }
+
+/*
+ * DMA M2P channels.
+ *
+ * On the EP93xx chip the following peripherals my be allocated to the 10
+ * Memory to Internal Peripheral (M2P) channels (5 transmit + 5 receive).
+ *
+ * I2S contains 3 Tx and 3 Rx DMA Channels
+ * AAC contains 3 Tx and 3 Rx DMA Channels
+ * UART1 contains 1 Tx and 1 Rx DMA Channels
+ * UART2 contains 1 Tx and 1 Rx DMA Channels
+ * UART3 contains 1 Tx and 1 Rx DMA Channels
+ * IrDA contains 1 Tx and 1 Rx DMA Channels
+ *
+ * Registers are mapped statically in ep93xx_map_io().
+ */
+static struct ep93xx_dma_chan_data ep93xx_dma_m2p_channels[] = {
+ DMA_CHANNEL("m2p0", EP93XX_DMA_BASE + 0x0000, IRQ_EP93XX_DMAM2P0),
+ DMA_CHANNEL("m2p1", EP93XX_DMA_BASE + 0x0040, IRQ_EP93XX_DMAM2P1),
+ DMA_CHANNEL("m2p2", EP93XX_DMA_BASE + 0x0080, IRQ_EP93XX_DMAM2P2),
+ DMA_CHANNEL("m2p3", EP93XX_DMA_BASE + 0x00c0, IRQ_EP93XX_DMAM2P3),
+ DMA_CHANNEL("m2p4", EP93XX_DMA_BASE + 0x0240, IRQ_EP93XX_DMAM2P4),
+ DMA_CHANNEL("m2p5", EP93XX_DMA_BASE + 0x0200, IRQ_EP93XX_DMAM2P5),
+ DMA_CHANNEL("m2p6", EP93XX_DMA_BASE + 0x02c0, IRQ_EP93XX_DMAM2P6),
+ DMA_CHANNEL("m2p7", EP93XX_DMA_BASE + 0x0280, IRQ_EP93XX_DMAM2P7),
+ DMA_CHANNEL("m2p8", EP93XX_DMA_BASE + 0x0340, IRQ_EP93XX_DMAM2P8),
+ DMA_CHANNEL("m2p9", EP93XX_DMA_BASE + 0x0300, IRQ_EP93XX_DMAM2P9),
+};
+
+static struct ep93xx_dma_platform_data ep93xx_dma_m2p_data = {
+ .channels = ep93xx_dma_m2p_channels,
+ .num_channels = ARRAY_SIZE(ep93xx_dma_m2p_channels),
+};
+
+static struct platform_device ep93xx_dma_m2p_device = {
+ .name = "ep93xx-dma-m2p",
+ .id = -1,
+ .dev = {
+ .platform_data = &ep93xx_dma_m2p_data,
+ },
+};
+
+/*
+ * DMA M2M channels.
+ *
+ * There are 2 M2M channels which support memcpy/memset and in addition simple
+ * hardware requests from/to SSP and IDE. We do not implement an external
+ * hardware requests.
+ *
+ * Registers are mapped statically in ep93xx_map_io().
+ */
+static struct ep93xx_dma_chan_data ep93xx_dma_m2m_channels[] = {
+ DMA_CHANNEL("m2m0", EP93XX_DMA_BASE + 0x0100, IRQ_EP93XX_DMAM2M0),
+ DMA_CHANNEL("m2m1", EP93XX_DMA_BASE + 0x0140, IRQ_EP93XX_DMAM2M1),
+};
+
+static struct ep93xx_dma_platform_data ep93xx_dma_m2m_data = {
+ .channels = ep93xx_dma_m2m_channels,
+ .num_channels = ARRAY_SIZE(ep93xx_dma_m2m_channels),
+};
+
+static struct platform_device ep93xx_dma_m2m_device = {
+ .name = "ep93xx-dma-m2m",
+ .id = -1,
+ .dev = {
+ .platform_data = &ep93xx_dma_m2m_data,
+ },
+};
+
+static int __init ep93xx_dma_init(void)
+{
+ platform_device_register(&ep93xx_dma_m2p_device);
+ platform_device_register(&ep93xx_dma_m2m_device);
+ return 0;
+}
+arch_initcall(ep93xx_dma_init);
diff --git a/arch/arm/mach-ep93xx/gpio.c b/arch/arm/mach-ep93xx/gpio.c
deleted file mode 100644
index 415dce37b88c..000000000000
--- a/arch/arm/mach-ep93xx/gpio.c
+++ /dev/null
@@ -1,410 +0,0 @@
-/*
- * linux/arch/arm/mach-ep93xx/gpio.c
- *
- * Generic EP93xx GPIO handling
- *
- * Copyright (c) 2008 Ryan Mallon <ryan@bluewatersys.com>
- *
- * Based on code originally from:
- * linux/arch/arm/mach-ep93xx/core.c
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#define pr_fmt(fmt) "ep93xx " KBUILD_MODNAME ": " fmt
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/seq_file.h>
-#include <linux/io.h>
-#include <linux/gpio.h>
-#include <linux/irq.h>
-
-#include <mach/hardware.h>
-
-/*************************************************************************
- * Interrupt handling for EP93xx on-chip GPIOs
- *************************************************************************/
-static unsigned char gpio_int_unmasked[3];
-static unsigned char gpio_int_enabled[3];
-static unsigned char gpio_int_type1[3];
-static unsigned char gpio_int_type2[3];
-static unsigned char gpio_int_debounce[3];
-
-/* Port ordering is: A B F */
-static const u8 int_type1_register_offset[3] = { 0x90, 0xac, 0x4c };
-static const u8 int_type2_register_offset[3] = { 0x94, 0xb0, 0x50 };
-static const u8 eoi_register_offset[3] = { 0x98, 0xb4, 0x54 };
-static const u8 int_en_register_offset[3] = { 0x9c, 0xb8, 0x58 };
-static const u8 int_debounce_register_offset[3] = { 0xa8, 0xc4, 0x64 };
-
-static void ep93xx_gpio_update_int_params(unsigned port)
-{
- BUG_ON(port > 2);
-
- __raw_writeb(0, EP93XX_GPIO_REG(int_en_register_offset[port]));
-
- __raw_writeb(gpio_int_type2[port],
- EP93XX_GPIO_REG(int_type2_register_offset[port]));
-
- __raw_writeb(gpio_int_type1[port],
- EP93XX_GPIO_REG(int_type1_register_offset[port]));
-
- __raw_writeb(gpio_int_unmasked[port] & gpio_int_enabled[port],
- EP93XX_GPIO_REG(int_en_register_offset[port]));
-}
-
-static inline void ep93xx_gpio_int_mask(unsigned line)
-{
- gpio_int_unmasked[line >> 3] &= ~(1 << (line & 7));
-}
-
-static void ep93xx_gpio_int_debounce(unsigned int irq, bool enable)
-{
- int line = irq_to_gpio(irq);
- int port = line >> 3;
- int port_mask = 1 << (line & 7);
-
- if (enable)
- gpio_int_debounce[port] |= port_mask;
- else
- gpio_int_debounce[port] &= ~port_mask;
-
- __raw_writeb(gpio_int_debounce[port],
- EP93XX_GPIO_REG(int_debounce_register_offset[port]));
-}
-
-static void ep93xx_gpio_ab_irq_handler(unsigned int irq, struct irq_desc *desc)
-{
- unsigned char status;
- int i;
-
- status = __raw_readb(EP93XX_GPIO_A_INT_STATUS);
- for (i = 0; i < 8; i++) {
- if (status & (1 << i)) {
- int gpio_irq = gpio_to_irq(EP93XX_GPIO_LINE_A(0)) + i;
- generic_handle_irq(gpio_irq);
- }
- }
-
- status = __raw_readb(EP93XX_GPIO_B_INT_STATUS);
- for (i = 0; i < 8; i++) {
- if (status & (1 << i)) {
- int gpio_irq = gpio_to_irq(EP93XX_GPIO_LINE_B(0)) + i;
- generic_handle_irq(gpio_irq);
- }
- }
-}
-
-static void ep93xx_gpio_f_irq_handler(unsigned int irq, struct irq_desc *desc)
-{
- /*
- * map discontiguous hw irq range to continuous sw irq range:
- *
- * IRQ_EP93XX_GPIO{0..7}MUX -> gpio_to_irq(EP93XX_GPIO_LINE_F({0..7})
- */
- int port_f_idx = ((irq + 1) & 7) ^ 4; /* {19..22,47..50} -> {0..7} */
- int gpio_irq = gpio_to_irq(EP93XX_GPIO_LINE_F(0)) + port_f_idx;
-
- generic_handle_irq(gpio_irq);
-}
-
-static void ep93xx_gpio_irq_ack(struct irq_data *d)
-{
- int line = irq_to_gpio(d->irq);
- int port = line >> 3;
- int port_mask = 1 << (line & 7);
-
- if (irqd_get_trigger_type(d) == IRQ_TYPE_EDGE_BOTH) {
- gpio_int_type2[port] ^= port_mask; /* switch edge direction */
- ep93xx_gpio_update_int_params(port);
- }
-
- __raw_writeb(port_mask, EP93XX_GPIO_REG(eoi_register_offset[port]));
-}
-
-static void ep93xx_gpio_irq_mask_ack(struct irq_data *d)
-{
- int line = irq_to_gpio(d->irq);
- int port = line >> 3;
- int port_mask = 1 << (line & 7);
-
- if (irqd_get_trigger_type(d) == IRQ_TYPE_EDGE_BOTH)
- gpio_int_type2[port] ^= port_mask; /* switch edge direction */
-
- gpio_int_unmasked[port] &= ~port_mask;
- ep93xx_gpio_update_int_params(port);
-
- __raw_writeb(port_mask, EP93XX_GPIO_REG(eoi_register_offset[port]));
-}
-
-static void ep93xx_gpio_irq_mask(struct irq_data *d)
-{
- int line = irq_to_gpio(d->irq);
- int port = line >> 3;
-
- gpio_int_unmasked[port] &= ~(1 << (line & 7));
- ep93xx_gpio_update_int_params(port);
-}
-
-static void ep93xx_gpio_irq_unmask(struct irq_data *d)
-{
- int line = irq_to_gpio(d->irq);
- int port = line >> 3;
-
- gpio_int_unmasked[port] |= 1 << (line & 7);
- ep93xx_gpio_update_int_params(port);
-}
-
-/*
- * gpio_int_type1 controls whether the interrupt is level (0) or
- * edge (1) triggered, while gpio_int_type2 controls whether it
- * triggers on low/falling (0) or high/rising (1).
- */
-static int ep93xx_gpio_irq_type(struct irq_data *d, unsigned int type)
-{
- const int gpio = irq_to_gpio(d->irq);
- const int port = gpio >> 3;
- const int port_mask = 1 << (gpio & 7);
- irq_flow_handler_t handler;
-
- gpio_direction_input(gpio);
-
- switch (type) {
- case IRQ_TYPE_EDGE_RISING:
- gpio_int_type1[port] |= port_mask;
- gpio_int_type2[port] |= port_mask;
- handler = handle_edge_irq;
- break;
- case IRQ_TYPE_EDGE_FALLING:
- gpio_int_type1[port] |= port_mask;
- gpio_int_type2[port] &= ~port_mask;
- handler = handle_edge_irq;
- break;
- case IRQ_TYPE_LEVEL_HIGH:
- gpio_int_type1[port] &= ~port_mask;
- gpio_int_type2[port] |= port_mask;
- handler = handle_level_irq;
- break;
- case IRQ_TYPE_LEVEL_LOW:
- gpio_int_type1[port] &= ~port_mask;
- gpio_int_type2[port] &= ~port_mask;
- handler = handle_level_irq;
- break;
- case IRQ_TYPE_EDGE_BOTH:
- gpio_int_type1[port] |= port_mask;
- /* set initial polarity based on current input level */
- if (gpio_get_value(gpio))
- gpio_int_type2[port] &= ~port_mask; /* falling */
- else
- gpio_int_type2[port] |= port_mask; /* rising */
- handler = handle_edge_irq;
- break;
- default:
- pr_err("failed to set irq type %d for gpio %d\n", type, gpio);
- return -EINVAL;
- }
-
- __irq_set_handler_locked(d->irq, handler);
-
- gpio_int_enabled[port] |= port_mask;
-
- ep93xx_gpio_update_int_params(port);
-
- return 0;
-}
-
-static struct irq_chip ep93xx_gpio_irq_chip = {
- .name = "GPIO",
- .irq_ack = ep93xx_gpio_irq_ack,
- .irq_mask_ack = ep93xx_gpio_irq_mask_ack,
- .irq_mask = ep93xx_gpio_irq_mask,
- .irq_unmask = ep93xx_gpio_irq_unmask,
- .irq_set_type = ep93xx_gpio_irq_type,
-};
-
-void __init ep93xx_gpio_init_irq(void)
-{
- int gpio_irq;
-
- for (gpio_irq = gpio_to_irq(0);
- gpio_irq <= gpio_to_irq(EP93XX_GPIO_LINE_MAX_IRQ); ++gpio_irq) {
- irq_set_chip_and_handler(gpio_irq, &ep93xx_gpio_irq_chip,
- handle_level_irq);
- set_irq_flags(gpio_irq, IRQF_VALID);
- }
-
- irq_set_chained_handler(IRQ_EP93XX_GPIO_AB,
- ep93xx_gpio_ab_irq_handler);
- irq_set_chained_handler(IRQ_EP93XX_GPIO0MUX,
- ep93xx_gpio_f_irq_handler);
- irq_set_chained_handler(IRQ_EP93XX_GPIO1MUX,
- ep93xx_gpio_f_irq_handler);
- irq_set_chained_handler(IRQ_EP93XX_GPIO2MUX,
- ep93xx_gpio_f_irq_handler);
- irq_set_chained_handler(IRQ_EP93XX_GPIO3MUX,
- ep93xx_gpio_f_irq_handler);
- irq_set_chained_handler(IRQ_EP93XX_GPIO4MUX,
- ep93xx_gpio_f_irq_handler);
- irq_set_chained_handler(IRQ_EP93XX_GPIO5MUX,
- ep93xx_gpio_f_irq_handler);
- irq_set_chained_handler(IRQ_EP93XX_GPIO6MUX,
- ep93xx_gpio_f_irq_handler);
- irq_set_chained_handler(IRQ_EP93XX_GPIO7MUX,
- ep93xx_gpio_f_irq_handler);
-}
-
-
-/*************************************************************************
- * gpiolib interface for EP93xx on-chip GPIOs
- *************************************************************************/
-struct ep93xx_gpio_chip {
- struct gpio_chip chip;
-
- void __iomem *data_reg;
- void __iomem *data_dir_reg;
-};
-
-#define to_ep93xx_gpio_chip(c) container_of(c, struct ep93xx_gpio_chip, chip)
-
-static int ep93xx_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
-{
- struct ep93xx_gpio_chip *ep93xx_chip = to_ep93xx_gpio_chip(chip);
- unsigned long flags;
- u8 v;
-
- local_irq_save(flags);
- v = __raw_readb(ep93xx_chip->data_dir_reg);
- v &= ~(1 << offset);
- __raw_writeb(v, ep93xx_chip->data_dir_reg);
- local_irq_restore(flags);
-
- return 0;
-}
-
-static int ep93xx_gpio_direction_output(struct gpio_chip *chip,
- unsigned offset, int val)
-{
- struct ep93xx_gpio_chip *ep93xx_chip = to_ep93xx_gpio_chip(chip);
- unsigned long flags;
- int line;
- u8 v;
-
- local_irq_save(flags);
-
- /* Set the value */
- v = __raw_readb(ep93xx_chip->data_reg);
- if (val)
- v |= (1 << offset);
- else
- v &= ~(1 << offset);
- __raw_writeb(v, ep93xx_chip->data_reg);
-
- /* Drive as an output */
- line = chip->base + offset;
- if (line <= EP93XX_GPIO_LINE_MAX_IRQ) {
- /* Ports A/B/F */
- ep93xx_gpio_int_mask(line);
- ep93xx_gpio_update_int_params(line >> 3);
- }
-
- v = __raw_readb(ep93xx_chip->data_dir_reg);
- v |= (1 << offset);
- __raw_writeb(v, ep93xx_chip->data_dir_reg);
-
- local_irq_restore(flags);
-
- return 0;
-}
-
-static int ep93xx_gpio_get(struct gpio_chip *chip, unsigned offset)
-{
- struct ep93xx_gpio_chip *ep93xx_chip = to_ep93xx_gpio_chip(chip);
-
- return !!(__raw_readb(ep93xx_chip->data_reg) & (1 << offset));
-}
-
-static void ep93xx_gpio_set(struct gpio_chip *chip, unsigned offset, int val)
-{
- struct ep93xx_gpio_chip *ep93xx_chip = to_ep93xx_gpio_chip(chip);
- unsigned long flags;
- u8 v;
-
- local_irq_save(flags);
- v = __raw_readb(ep93xx_chip->data_reg);
- if (val)
- v |= (1 << offset);
- else
- v &= ~(1 << offset);
- __raw_writeb(v, ep93xx_chip->data_reg);
- local_irq_restore(flags);
-}
-
-static int ep93xx_gpio_set_debounce(struct gpio_chip *chip,
- unsigned offset, unsigned debounce)
-{
- int gpio = chip->base + offset;
- int irq = gpio_to_irq(gpio);
-
- if (irq < 0)
- return -EINVAL;
-
- ep93xx_gpio_int_debounce(irq, debounce ? true : false);
-
- return 0;
-}
-
-#define EP93XX_GPIO_BANK(name, dr, ddr, base_gpio) \
- { \
- .chip = { \
- .label = name, \
- .direction_input = ep93xx_gpio_direction_input, \
- .direction_output = ep93xx_gpio_direction_output, \
- .get = ep93xx_gpio_get, \
- .set = ep93xx_gpio_set, \
- .base = base_gpio, \
- .ngpio = 8, \
- }, \
- .data_reg = EP93XX_GPIO_REG(dr), \
- .data_dir_reg = EP93XX_GPIO_REG(ddr), \
- }
-
-static struct ep93xx_gpio_chip ep93xx_gpio_banks[] = {
- EP93XX_GPIO_BANK("A", 0x00, 0x10, 0),
- EP93XX_GPIO_BANK("B", 0x04, 0x14, 8),
- EP93XX_GPIO_BANK("C", 0x08, 0x18, 40),
- EP93XX_GPIO_BANK("D", 0x0c, 0x1c, 24),
- EP93XX_GPIO_BANK("E", 0x20, 0x24, 32),
- EP93XX_GPIO_BANK("F", 0x30, 0x34, 16),
- EP93XX_GPIO_BANK("G", 0x38, 0x3c, 48),
- EP93XX_GPIO_BANK("H", 0x40, 0x44, 56),
-};
-
-void __init ep93xx_gpio_init(void)
-{
- int i;
-
- /* Set Ports C, D, E, G, and H for GPIO use */
- ep93xx_devcfg_set_bits(EP93XX_SYSCON_DEVCFG_KEYS |
- EP93XX_SYSCON_DEVCFG_GONK |
- EP93XX_SYSCON_DEVCFG_EONIDE |
- EP93XX_SYSCON_DEVCFG_GONIDE |
- EP93XX_SYSCON_DEVCFG_HONIDE);
-
- for (i = 0; i < ARRAY_SIZE(ep93xx_gpio_banks); i++) {
- struct gpio_chip *chip = &ep93xx_gpio_banks[i].chip;
-
- /*
- * Ports A, B, and F support input debouncing when
- * used as interrupts.
- */
- if (!strcmp(chip->label, "A") ||
- !strcmp(chip->label, "B") ||
- !strcmp(chip->label, "F"))
- chip->set_debounce = ep93xx_gpio_set_debounce;
-
- gpiochip_add(chip);
- }
-}
diff --git a/arch/arm/mach-ep93xx/include/mach/dma.h b/arch/arm/mach-ep93xx/include/mach/dma.h
index 5e31b2b25da9..46d4d876e6fb 100644
--- a/arch/arm/mach-ep93xx/include/mach/dma.h
+++ b/arch/arm/mach-ep93xx/include/mach/dma.h
@@ -1,149 +1,93 @@
-/**
- * DOC: EP93xx DMA M2P memory to peripheral and peripheral to memory engine
- *
- * The EP93xx DMA M2P subsystem handles DMA transfers between memory and
- * peripherals. DMA M2P channels are available for audio, UARTs and IrDA.
- * See chapter 10 of the EP93xx users guide for full details on the DMA M2P
- * engine.
- *
- * See sound/soc/ep93xx/ep93xx-pcm.c for an example use of the DMA M2P code.
- *
- */
-
#ifndef __ASM_ARCH_DMA_H
#define __ASM_ARCH_DMA_H
-#include <linux/list.h>
#include <linux/types.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
-/**
- * struct ep93xx_dma_buffer - Information about a buffer to be transferred
- * using the DMA M2P engine
+/*
+ * M2P channels.
*
- * @list: Entry in DMA buffer list
- * @bus_addr: Physical address of the buffer
- * @size: Size of the buffer in bytes
+ * Note that these values are also directly used for setting the PPALLOC
+ * register.
*/
-struct ep93xx_dma_buffer {
- struct list_head list;
- u32 bus_addr;
- u16 size;
-};
+#define EP93XX_DMA_I2S1 0
+#define EP93XX_DMA_I2S2 1
+#define EP93XX_DMA_AAC1 2
+#define EP93XX_DMA_AAC2 3
+#define EP93XX_DMA_AAC3 4
+#define EP93XX_DMA_I2S3 5
+#define EP93XX_DMA_UART1 6
+#define EP93XX_DMA_UART2 7
+#define EP93XX_DMA_UART3 8
+#define EP93XX_DMA_IRDA 9
+/* M2M channels */
+#define EP93XX_DMA_SSP 10
+#define EP93XX_DMA_IDE 11
/**
- * struct ep93xx_dma_m2p_client - Information about a DMA M2P client
- *
- * @name: Unique name for this client
- * @flags: Client flags
- * @cookie: User data to pass to callback functions
- * @buffer_started: Non NULL function to call when a transfer is started.
- * The arguments are the user data cookie and the DMA
- * buffer which is starting.
- * @buffer_finished: Non NULL function to call when a transfer is completed.
- * The arguments are the user data cookie, the DMA buffer
- * which has completed, and a boolean flag indicating if
- * the transfer had an error.
+ * struct ep93xx_dma_data - configuration data for the EP93xx dmaengine
+ * @port: peripheral which is requesting the channel
+ * @direction: TX/RX channel
+ * @name: optional name for the channel, this is displayed in /proc/interrupts
+ *
+ * This information is passed as private channel parameter in a filter
+ * function. Note that this is only needed for slave/cyclic channels. For
+ * memcpy channels %NULL data should be passed.
*/
-struct ep93xx_dma_m2p_client {
- char *name;
- u8 flags;
- void *cookie;
- void (*buffer_started)(void *cookie,
- struct ep93xx_dma_buffer *buf);
- void (*buffer_finished)(void *cookie,
- struct ep93xx_dma_buffer *buf,
- int bytes, int error);
-
- /* private: Internal use only */
- void *channel;
+struct ep93xx_dma_data {
+ int port;
+ enum dma_data_direction direction;
+ const char *name;
};
-/* DMA M2P ports */
-#define EP93XX_DMA_M2P_PORT_I2S1 0x00
-#define EP93XX_DMA_M2P_PORT_I2S2 0x01
-#define EP93XX_DMA_M2P_PORT_AAC1 0x02
-#define EP93XX_DMA_M2P_PORT_AAC2 0x03
-#define EP93XX_DMA_M2P_PORT_AAC3 0x04
-#define EP93XX_DMA_M2P_PORT_I2S3 0x05
-#define EP93XX_DMA_M2P_PORT_UART1 0x06
-#define EP93XX_DMA_M2P_PORT_UART2 0x07
-#define EP93XX_DMA_M2P_PORT_UART3 0x08
-#define EP93XX_DMA_M2P_PORT_IRDA 0x09
-#define EP93XX_DMA_M2P_PORT_MASK 0x0f
-
-/* DMA M2P client flags */
-#define EP93XX_DMA_M2P_TX 0x00 /* Memory to peripheral */
-#define EP93XX_DMA_M2P_RX 0x10 /* Peripheral to memory */
-
-/*
- * DMA M2P client error handling flags. See the EP93xx users guide
- * documentation on the DMA M2P CONTROL register for more details
- */
-#define EP93XX_DMA_M2P_ABORT_ON_ERROR 0x20 /* Abort on peripheral error */
-#define EP93XX_DMA_M2P_IGNORE_ERROR 0x40 /* Ignore peripheral errors */
-#define EP93XX_DMA_M2P_ERROR_MASK 0x60 /* Mask of error bits */
-
/**
- * ep93xx_dma_m2p_client_register - Register a client with the DMA M2P
- * subsystem
- *
- * @m2p: Client information to register
- * returns 0 on success
- *
- * The DMA M2P subsystem allocates a channel and an interrupt line for the DMA
- * client
+ * struct ep93xx_dma_chan_data - platform specific data for a DMA channel
+ * @name: name of the channel, used for getting the right clock for the channel
+ * @base: mapped registers
+ * @irq: interrupt number used by this channel
*/
-int ep93xx_dma_m2p_client_register(struct ep93xx_dma_m2p_client *m2p);
+struct ep93xx_dma_chan_data {
+ const char *name;
+ void __iomem *base;
+ int irq;
+};
/**
- * ep93xx_dma_m2p_client_unregister - Unregister a client from the DMA M2P
- * subsystem
- *
- * @m2p: Client to unregister
+ * struct ep93xx_dma_platform_data - platform data for the dmaengine driver
+ * @channels: array of channels which are passed to the driver
+ * @num_channels: number of channels in the array
*
- * Any transfers currently in progress will be completed in hardware, but
- * ignored in software.
+ * This structure is passed to the DMA engine driver via platform data. For
+ * M2P channels, contract is that even channels are for TX and odd for RX.
+ * There is no requirement for the M2M channels.
*/
-void ep93xx_dma_m2p_client_unregister(struct ep93xx_dma_m2p_client *m2p);
+struct ep93xx_dma_platform_data {
+ struct ep93xx_dma_chan_data *channels;
+ size_t num_channels;
+};
-/**
- * ep93xx_dma_m2p_submit - Submit a DMA M2P transfer
- *
- * @m2p: DMA Client to submit the transfer on
- * @buf: DMA Buffer to submit
- *
- * If the current or next transfer positions are free on the M2P client then
- * the transfer is started immediately. If not, the transfer is added to the
- * list of pending transfers. This function must not be called from the
- * buffer_finished callback for an M2P channel.
- *
- */
-void ep93xx_dma_m2p_submit(struct ep93xx_dma_m2p_client *m2p,
- struct ep93xx_dma_buffer *buf);
+static inline bool ep93xx_dma_chan_is_m2p(struct dma_chan *chan)
+{
+ return !strcmp(dev_name(chan->device->dev), "ep93xx-dma-m2p");
+}
/**
- * ep93xx_dma_m2p_submit_recursive - Put a DMA transfer on the pending list
- * for an M2P channel
+ * ep93xx_dma_chan_direction - returns direction the channel can be used
+ * @chan: channel
*
- * @m2p: DMA Client to submit the transfer on
- * @buf: DMA Buffer to submit
- *
- * This function must only be called from the buffer_finished callback for an
- * M2P channel. It is commonly used to add the next transfer in a chained list
- * of DMA transfers.
+ * This function can be used in filter functions to find out whether the
+ * channel supports given DMA direction. Only M2P channels have such
+ * limitation, for M2M channels the direction is configurable.
*/
-void ep93xx_dma_m2p_submit_recursive(struct ep93xx_dma_m2p_client *m2p,
- struct ep93xx_dma_buffer *buf);
+static inline enum dma_data_direction
+ep93xx_dma_chan_direction(struct dma_chan *chan)
+{
+ if (!ep93xx_dma_chan_is_m2p(chan))
+ return DMA_NONE;
-/**
- * ep93xx_dma_m2p_flush - Flush all pending transfers on a DMA M2P client
- *
- * @m2p: DMA client to flush transfers on
- *
- * Any transfers currently in progress will be completed in hardware, but
- * ignored in software.
- *
- */
-void ep93xx_dma_m2p_flush(struct ep93xx_dma_m2p_client *m2p);
+ /* even channels are for TX, odd for RX */
+ return (chan->chan_id % 2 == 0) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
+}
#endif /* __ASM_ARCH_DMA_H */
diff --git a/arch/arm/mach-ep93xx/include/mach/ep93xx-regs.h b/arch/arm/mach-ep93xx/include/mach/ep93xx-regs.h
index 9ac4d1055097..c4a7b84ef06d 100644
--- a/arch/arm/mach-ep93xx/include/mach/ep93xx-regs.h
+++ b/arch/arm/mach-ep93xx/include/mach/ep93xx-regs.h
@@ -98,6 +98,7 @@
#define EP93XX_SECURITY_BASE EP93XX_APB_IOMEM(0x00030000)
+#define EP93XX_GPIO_PHYS_BASE EP93XX_APB_PHYS(0x00040000)
#define EP93XX_GPIO_BASE EP93XX_APB_IOMEM(0x00040000)
#define EP93XX_GPIO_REG(x) (EP93XX_GPIO_BASE + (x))
#define EP93XX_GPIO_F_INT_STATUS EP93XX_GPIO_REG(0x5c)
diff --git a/arch/arm/mach-ep93xx/include/mach/ep93xx_spi.h b/arch/arm/mach-ep93xx/include/mach/ep93xx_spi.h
index 0a37961b3453..9bb63ac13f04 100644
--- a/arch/arm/mach-ep93xx/include/mach/ep93xx_spi.h
+++ b/arch/arm/mach-ep93xx/include/mach/ep93xx_spi.h
@@ -7,9 +7,11 @@ struct spi_device;
* struct ep93xx_spi_info - EP93xx specific SPI descriptor
* @num_chipselect: number of chip selects on this board, must be
* at least one
+ * @use_dma: use DMA for the transfers
*/
struct ep93xx_spi_info {
int num_chipselect;
+ bool use_dma;
};
/**
diff --git a/arch/arm/mach-footbridge/Kconfig b/arch/arm/mach-footbridge/Kconfig
index 46adca068f2c..dc26fff22cf0 100644
--- a/arch/arm/mach-footbridge/Kconfig
+++ b/arch/arm/mach-footbridge/Kconfig
@@ -5,6 +5,7 @@ menu "Footbridge Implementations"
config ARCH_CATS
bool "CATS"
select CLKSRC_I8253
+ select CLKEVT_I8253
select FOOTBRIDGE_HOST
select ISA
select ISA_DMA
diff --git a/arch/arm/mach-footbridge/isa-timer.c b/arch/arm/mach-footbridge/isa-timer.c
index 7020f1a3feca..c40bb415f4b5 100644
--- a/arch/arm/mach-footbridge/isa-timer.c
+++ b/arch/arm/mach-footbridge/isa-timer.c
@@ -5,64 +5,18 @@
* Copyright (C) 1998 Phil Blundell
*/
#include <linux/clockchips.h>
-#include <linux/clocksource.h>
+#include <linux/i8253.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
-#include <linux/io.h>
#include <linux/spinlock.h>
#include <linux/timex.h>
#include <asm/irq.h>
-#include <asm/i8253.h>
#include <asm/mach/time.h>
#include "common.h"
-DEFINE_RAW_SPINLOCK(i8253_lock);
-
-static void pit_set_mode(enum clock_event_mode mode,
- struct clock_event_device *evt)
-{
- unsigned long flags;
-
- raw_local_irq_save(flags);
-
- switch (mode) {
- case CLOCK_EVT_MODE_PERIODIC:
- outb_p(0x34, PIT_MODE);
- outb_p(PIT_LATCH & 0xff, PIT_CH0);
- outb_p(PIT_LATCH >> 8, PIT_CH0);
- break;
-
- case CLOCK_EVT_MODE_SHUTDOWN:
- case CLOCK_EVT_MODE_UNUSED:
- outb_p(0x30, PIT_MODE);
- outb_p(0, PIT_CH0);
- outb_p(0, PIT_CH0);
- break;
-
- case CLOCK_EVT_MODE_ONESHOT:
- case CLOCK_EVT_MODE_RESUME:
- break;
- }
- local_irq_restore(flags);
-}
-
-static int pit_set_next_event(unsigned long delta,
- struct clock_event_device *evt)
-{
- return 0;
-}
-
-static struct clock_event_device pit_ce = {
- .name = "pit",
- .features = CLOCK_EVT_FEAT_PERIODIC,
- .set_mode = pit_set_mode,
- .set_next_event = pit_set_next_event,
- .shift = 32,
-};
-
static irqreturn_t pit_timer_interrupt(int irq, void *dev_id)
{
struct clock_event_device *ce = dev_id;
@@ -74,20 +28,15 @@ static struct irqaction pit_timer_irq = {
.name = "pit",
.handler = pit_timer_interrupt,
.flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
- .dev_id = &pit_ce,
+ .dev_id = &i8253_clockevent,
};
static void __init isa_timer_init(void)
{
- pit_ce.cpumask = cpumask_of(smp_processor_id());
- pit_ce.mult = div_sc(PIT_TICK_RATE, NSEC_PER_SEC, pit_ce.shift);
- pit_ce.max_delta_ns = clockevent_delta2ns(0x7fff, &pit_ce);
- pit_ce.min_delta_ns = clockevent_delta2ns(0x000f, &pit_ce);
-
clocksource_i8253_init();
- setup_irq(pit_ce.irq, &pit_timer_irq);
- clockevents_register_device(&pit_ce);
+ setup_irq(i8253_clockevent.irq, &pit_timer_irq);
+ clockevent_i8253_init(false);
}
struct sys_timer isa_timer = {
diff --git a/arch/arm/mach-imx/clock-imx25.c b/arch/arm/mach-imx/clock-imx25.c
index a65838fc061c..af1c580b06bc 100644
--- a/arch/arm/mach-imx/clock-imx25.c
+++ b/arch/arm/mach-imx/clock-imx25.c
@@ -282,9 +282,10 @@ static struct clk_lookup lookups[] = {
_REGISTER_CLOCK("mxc-ehci.2", "usb", usbotg_clk)
_REGISTER_CLOCK("fsl-usb2-udc", "usb", usbotg_clk)
_REGISTER_CLOCK("mxc_nand.0", NULL, nfc_clk)
- _REGISTER_CLOCK("imx25-cspi.0", NULL, cspi1_clk)
- _REGISTER_CLOCK("imx25-cspi.1", NULL, cspi2_clk)
- _REGISTER_CLOCK("imx25-cspi.2", NULL, cspi3_clk)
+ /* i.mx25 has the i.mx35 type cspi */
+ _REGISTER_CLOCK("imx35-cspi.0", NULL, cspi1_clk)
+ _REGISTER_CLOCK("imx35-cspi.1", NULL, cspi2_clk)
+ _REGISTER_CLOCK("imx35-cspi.2", NULL, cspi3_clk)
_REGISTER_CLOCK("mxc_pwm.0", NULL, pwm1_clk)
_REGISTER_CLOCK("mxc_pwm.1", NULL, pwm2_clk)
_REGISTER_CLOCK("mxc_pwm.2", NULL, pwm3_clk)
diff --git a/arch/arm/mach-imx/dma-v1.c b/arch/arm/mach-imx/dma-v1.c
index 236f1495efad..f8aa5be0eb15 100644
--- a/arch/arm/mach-imx/dma-v1.c
+++ b/arch/arm/mach-imx/dma-v1.c
@@ -26,6 +26,7 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/interrupt.h>
+#include <linux/err.h>
#include <linux/errno.h>
#include <linux/clk.h>
#include <linux/scatterlist.h>
diff --git a/arch/arm/mach-imx/mach-apf9328.c b/arch/arm/mach-imx/mach-apf9328.c
index 15e45c84e371..59d2a3b137d9 100644
--- a/arch/arm/mach-imx/mach-apf9328.c
+++ b/arch/arm/mach-imx/mach-apf9328.c
@@ -115,6 +115,8 @@ static struct platform_device *devices[] __initdata = {
static void __init apf9328_init(void)
{
+ imx1_soc_init();
+
mxc_gpio_setup_multiple_pins(apf9328_pins,
ARRAY_SIZE(apf9328_pins),
"APF9328");
diff --git a/arch/arm/mach-imx/mach-armadillo5x0.c b/arch/arm/mach-imx/mach-armadillo5x0.c
index ffb40ff619b1..ede2710f8b76 100644
--- a/arch/arm/mach-imx/mach-armadillo5x0.c
+++ b/arch/arm/mach-imx/mach-armadillo5x0.c
@@ -490,6 +490,8 @@ static struct platform_device *devices[] __initdata = {
*/
static void __init armadillo5x0_init(void)
{
+ imx31_soc_init();
+
mxc_iomux_setup_multiple_pins(armadillo5x0_pins,
ARRAY_SIZE(armadillo5x0_pins), "armadillo5x0");
diff --git a/arch/arm/mach-imx/mach-bug.c b/arch/arm/mach-imx/mach-bug.c
index 42e4f078a19c..f49470553bdf 100644
--- a/arch/arm/mach-imx/mach-bug.c
+++ b/arch/arm/mach-imx/mach-bug.c
@@ -42,6 +42,8 @@ static const unsigned int bug_pins[] __initconst = {
static void __init bug_board_init(void)
{
+ imx31_soc_init();
+
mxc_iomux_setup_multiple_pins(bug_pins,
ARRAY_SIZE(bug_pins), "uart-4");
imx31_add_imx_uart4(&uart_pdata);
diff --git a/arch/arm/mach-imx/mach-cpuimx27.c b/arch/arm/mach-imx/mach-cpuimx27.c
index 46a2e41d43d2..87887ac5806b 100644
--- a/arch/arm/mach-imx/mach-cpuimx27.c
+++ b/arch/arm/mach-imx/mach-cpuimx27.c
@@ -250,6 +250,8 @@ __setup("otg_mode=", eukrea_cpuimx27_otg_mode);
static void __init eukrea_cpuimx27_init(void)
{
+ imx27_soc_init();
+
mxc_gpio_setup_multiple_pins(eukrea_cpuimx27_pins,
ARRAY_SIZE(eukrea_cpuimx27_pins), "CPUIMX27");
diff --git a/arch/arm/mach-imx/mach-cpuimx35.c b/arch/arm/mach-imx/mach-cpuimx35.c
index 3f8ef825fa6f..f39a478ba1a6 100644
--- a/arch/arm/mach-imx/mach-cpuimx35.c
+++ b/arch/arm/mach-imx/mach-cpuimx35.c
@@ -156,6 +156,8 @@ __setup("otg_mode=", eukrea_cpuimx35_otg_mode);
*/
static void __init eukrea_cpuimx35_init(void)
{
+ imx35_soc_init();
+
mxc_iomux_v3_setup_multiple_pads(eukrea_cpuimx35_pads,
ARRAY_SIZE(eukrea_cpuimx35_pads));
diff --git a/arch/arm/mach-imx/mach-eukrea_cpuimx25.c b/arch/arm/mach-imx/mach-eukrea_cpuimx25.c
index 148cff2819b9..da36da52969d 100644
--- a/arch/arm/mach-imx/mach-eukrea_cpuimx25.c
+++ b/arch/arm/mach-imx/mach-eukrea_cpuimx25.c
@@ -125,6 +125,8 @@ __setup("otg_mode=", eukrea_cpuimx25_otg_mode);
static void __init eukrea_cpuimx25_init(void)
{
+ imx25_soc_init();
+
if (mxc_iomux_v3_setup_multiple_pads(eukrea_cpuimx25_pads,
ARRAY_SIZE(eukrea_cpuimx25_pads)))
printk(KERN_ERR "error setting cpuimx25 pads !\n");
diff --git a/arch/arm/mach-imx/mach-imx27_visstrim_m10.c b/arch/arm/mach-imx/mach-imx27_visstrim_m10.c
index 7ae43b1ec517..c6269d60ddbc 100644
--- a/arch/arm/mach-imx/mach-imx27_visstrim_m10.c
+++ b/arch/arm/mach-imx/mach-imx27_visstrim_m10.c
@@ -231,6 +231,8 @@ static void __init visstrim_m10_board_init(void)
{
int ret;
+ imx27_soc_init();
+
ret = mxc_gpio_setup_multiple_pins(visstrim_m10_pins,
ARRAY_SIZE(visstrim_m10_pins), "VISSTRIM_M10");
if (ret)
diff --git a/arch/arm/mach-imx/mach-imx27ipcam.c b/arch/arm/mach-imx/mach-imx27ipcam.c
index 9be6cd6fbf8c..272f793e9247 100644
--- a/arch/arm/mach-imx/mach-imx27ipcam.c
+++ b/arch/arm/mach-imx/mach-imx27ipcam.c
@@ -50,6 +50,8 @@ static const int mx27ipcam_pins[] __initconst = {
static void __init mx27ipcam_init(void)
{
+ imx27_soc_init();
+
mxc_gpio_setup_multiple_pins(mx27ipcam_pins, ARRAY_SIZE(mx27ipcam_pins),
"mx27ipcam");
diff --git a/arch/arm/mach-imx/mach-imx27lite.c b/arch/arm/mach-imx/mach-imx27lite.c
index 841140516ede..d81a769fe895 100644
--- a/arch/arm/mach-imx/mach-imx27lite.c
+++ b/arch/arm/mach-imx/mach-imx27lite.c
@@ -59,6 +59,8 @@ static const struct imxuart_platform_data uart_pdata __initconst = {
static void __init mx27lite_init(void)
{
+ imx27_soc_init();
+
mxc_gpio_setup_multiple_pins(mx27lite_pins, ARRAY_SIZE(mx27lite_pins),
"imx27lite");
imx27_add_imx_uart0(&uart_pdata);
diff --git a/arch/arm/mach-imx/mach-kzm_arm11_01.c b/arch/arm/mach-imx/mach-kzm_arm11_01.c
index 1ecae20cf4e3..e472a1d88058 100644
--- a/arch/arm/mach-imx/mach-kzm_arm11_01.c
+++ b/arch/arm/mach-imx/mach-kzm_arm11_01.c
@@ -223,6 +223,8 @@ static int kzm_pins[] __initdata = {
*/
static void __init kzm_board_init(void)
{
+ imx31_soc_init();
+
mxc_iomux_setup_multiple_pins(kzm_pins,
ARRAY_SIZE(kzm_pins), "kzm");
kzm_init_ext_uart();
diff --git a/arch/arm/mach-imx/mach-mx1ads.c b/arch/arm/mach-imx/mach-mx1ads.c
index 38ec5cbbda9b..5cd8bee46960 100644
--- a/arch/arm/mach-imx/mach-mx1ads.c
+++ b/arch/arm/mach-imx/mach-mx1ads.c
@@ -115,6 +115,8 @@ static struct i2c_board_info mx1ads_i2c_devices[] = {
*/
static void __init mx1ads_init(void)
{
+ imx1_soc_init();
+
mxc_gpio_setup_multiple_pins(mx1ads_pins,
ARRAY_SIZE(mx1ads_pins), "mx1ads");
diff --git a/arch/arm/mach-imx/mach-mx21ads.c b/arch/arm/mach-imx/mach-mx21ads.c
index 74ac88978ddd..d389ecf9b5a8 100644
--- a/arch/arm/mach-imx/mach-mx21ads.c
+++ b/arch/arm/mach-imx/mach-mx21ads.c
@@ -279,6 +279,8 @@ static struct platform_device *platform_devices[] __initdata = {
static void __init mx21ads_board_init(void)
{
+ imx21_soc_init();
+
mxc_gpio_setup_multiple_pins(mx21ads_pins, ARRAY_SIZE(mx21ads_pins),
"mx21ads");
diff --git a/arch/arm/mach-imx/mach-mx25_3ds.c b/arch/arm/mach-imx/mach-mx25_3ds.c
index 58ea3fdf0911..01534bb61305 100644
--- a/arch/arm/mach-imx/mach-mx25_3ds.c
+++ b/arch/arm/mach-imx/mach-mx25_3ds.c
@@ -219,6 +219,8 @@ static const struct esdhc_platform_data mx25pdk_esdhc_pdata __initconst = {
static void __init mx25pdk_init(void)
{
+ imx25_soc_init();
+
mxc_iomux_v3_setup_multiple_pads(mx25pdk_pads,
ARRAY_SIZE(mx25pdk_pads));
diff --git a/arch/arm/mach-imx/mach-mx27_3ds.c b/arch/arm/mach-imx/mach-mx27_3ds.c
index 6e1accf93f81..117ce0a50f4e 100644
--- a/arch/arm/mach-imx/mach-mx27_3ds.c
+++ b/arch/arm/mach-imx/mach-mx27_3ds.c
@@ -267,6 +267,8 @@ static const struct imxi2c_platform_data mx27_3ds_i2c0_data __initconst = {
static void __init mx27pdk_init(void)
{
+ imx27_soc_init();
+
mxc_gpio_setup_multiple_pins(mx27pdk_pins, ARRAY_SIZE(mx27pdk_pins),
"mx27pdk");
mx27_3ds_sdhc1_enable_level_translator();
diff --git a/arch/arm/mach-imx/mach-mx27ads.c b/arch/arm/mach-imx/mach-mx27ads.c
index 1db79506f5e4..fc26ed71b9ed 100644
--- a/arch/arm/mach-imx/mach-mx27ads.c
+++ b/arch/arm/mach-imx/mach-mx27ads.c
@@ -288,6 +288,8 @@ static const struct imxuart_platform_data uart_pdata __initconst = {
static void __init mx27ads_board_init(void)
{
+ imx27_soc_init();
+
mxc_gpio_setup_multiple_pins(mx27ads_pins, ARRAY_SIZE(mx27ads_pins),
"mx27ads");
diff --git a/arch/arm/mach-imx/mach-mx31_3ds.c b/arch/arm/mach-imx/mach-mx31_3ds.c
index 9b982449cb52..441fbb83f39c 100644
--- a/arch/arm/mach-imx/mach-mx31_3ds.c
+++ b/arch/arm/mach-imx/mach-mx31_3ds.c
@@ -13,6 +13,7 @@
*/
#include <linux/delay.h>
+#include <linux/dma-mapping.h>
#include <linux/types.h>
#include <linux/init.h>
#include <linux/clk.h>
@@ -689,6 +690,8 @@ static void __init mx31_3ds_init(void)
{
int ret;
+ imx31_soc_init();
+
mxc_iomux_setup_multiple_pins(mx31_3ds_pins, ARRAY_SIZE(mx31_3ds_pins),
"mx31_3ds");
diff --git a/arch/arm/mach-imx/mach-mx31ads.c b/arch/arm/mach-imx/mach-mx31ads.c
index f4dee0254634..0ce49478a479 100644
--- a/arch/arm/mach-imx/mach-mx31ads.c
+++ b/arch/arm/mach-imx/mach-mx31ads.c
@@ -516,6 +516,8 @@ static void __init mx31ads_init_irq(void)
static void __init mx31ads_init(void)
{
+ imx31_soc_init();
+
mxc_init_extuart();
mxc_init_imx_uart();
mxc_init_i2c();
diff --git a/arch/arm/mach-imx/mach-mx31lilly.c b/arch/arm/mach-imx/mach-mx31lilly.c
index 410e676ae087..750368ddf0f9 100644
--- a/arch/arm/mach-imx/mach-mx31lilly.c
+++ b/arch/arm/mach-imx/mach-mx31lilly.c
@@ -243,6 +243,8 @@ core_param(mx31lilly_baseboard, mx31lilly_baseboard, int, 0444);
static void __init mx31lilly_board_init(void)
{
+ imx31_soc_init();
+
switch (mx31lilly_baseboard) {
case MX31LILLY_NOBOARD:
break;
diff --git a/arch/arm/mach-imx/mach-mx31lite.c b/arch/arm/mach-imx/mach-mx31lite.c
index ac9b4cad320e..4b47fd9fdd89 100644
--- a/arch/arm/mach-imx/mach-mx31lite.c
+++ b/arch/arm/mach-imx/mach-mx31lite.c
@@ -230,6 +230,8 @@ static void __init mx31lite_init(void)
{
int ret;
+ imx31_soc_init();
+
switch (mx31lite_baseboard) {
case MX31LITE_NOBOARD:
break;
diff --git a/arch/arm/mach-imx/mach-mx31moboard.c b/arch/arm/mach-imx/mach-mx31moboard.c
index eaa51e49ca95..a52fd36e2b52 100644
--- a/arch/arm/mach-imx/mach-mx31moboard.c
+++ b/arch/arm/mach-imx/mach-mx31moboard.c
@@ -507,6 +507,8 @@ core_param(mx31moboard_baseboard, mx31moboard_baseboard, int, 0444);
*/
static void __init mx31moboard_init(void)
{
+ imx31_soc_init();
+
mxc_iomux_setup_multiple_pins(moboard_pins, ARRAY_SIZE(moboard_pins),
"moboard");
diff --git a/arch/arm/mach-imx/mach-mx35_3ds.c b/arch/arm/mach-imx/mach-mx35_3ds.c
index 882880ac1bbc..48b3c6fd5cf0 100644
--- a/arch/arm/mach-imx/mach-mx35_3ds.c
+++ b/arch/arm/mach-imx/mach-mx35_3ds.c
@@ -179,6 +179,8 @@ static const struct imxi2c_platform_data mx35_3ds_i2c0_data __initconst = {
*/
static void __init mx35_3ds_init(void)
{
+ imx35_soc_init();
+
mxc_iomux_v3_setup_multiple_pads(mx35pdk_pads, ARRAY_SIZE(mx35pdk_pads));
imx35_add_fec(NULL);
diff --git a/arch/arm/mach-imx/mach-mxt_td60.c b/arch/arm/mach-imx/mach-mxt_td60.c
index 2774541511e7..c85876fed663 100644
--- a/arch/arm/mach-imx/mach-mxt_td60.c
+++ b/arch/arm/mach-imx/mach-mxt_td60.c
@@ -233,6 +233,8 @@ static const struct imxuart_platform_data uart_pdata __initconst = {
static void __init mxt_td60_board_init(void)
{
+ imx27_soc_init();
+
mxc_gpio_setup_multiple_pins(mxt_td60_pins, ARRAY_SIZE(mxt_td60_pins),
"MXT_TD60");
diff --git a/arch/arm/mach-imx/mach-pca100.c b/arch/arm/mach-imx/mach-pca100.c
index bbddc5a11c43..71083aa16038 100644
--- a/arch/arm/mach-imx/mach-pca100.c
+++ b/arch/arm/mach-imx/mach-pca100.c
@@ -357,6 +357,8 @@ static void __init pca100_init(void)
{
int ret;
+ imx27_soc_init();
+
/* SSI unit */
mxc_audmux_v1_configure_port(MX27_AUDMUX_HPCR1_SSI0,
MXC_AUDMUX_V1_PCR_SYN | /* 4wire mode */
diff --git a/arch/arm/mach-imx/mach-pcm037.c b/arch/arm/mach-imx/mach-pcm037.c
index 89c213b81295..f45b7cd72c8a 100644
--- a/arch/arm/mach-imx/mach-pcm037.c
+++ b/arch/arm/mach-imx/mach-pcm037.c
@@ -576,6 +576,8 @@ static void __init pcm037_init(void)
{
int ret;
+ imx31_soc_init();
+
mxc_iomux_set_gpr(MUX_PGP_UH2, 1);
mxc_iomux_setup_multiple_pins(pcm037_pins, ARRAY_SIZE(pcm037_pins),
diff --git a/arch/arm/mach-imx/mach-pcm038.c b/arch/arm/mach-imx/mach-pcm038.c
index 853bb871c7ed..2d6a64bbac44 100644
--- a/arch/arm/mach-imx/mach-pcm038.c
+++ b/arch/arm/mach-imx/mach-pcm038.c
@@ -295,6 +295,8 @@ static const struct mxc_usbh_platform_data usbh2_pdata __initconst = {
static void __init pcm038_init(void)
{
+ imx27_soc_init();
+
mxc_gpio_setup_multiple_pins(pcm038_pins, ARRAY_SIZE(pcm038_pins),
"PCM038");
diff --git a/arch/arm/mach-imx/mach-pcm043.c b/arch/arm/mach-imx/mach-pcm043.c
index 026441628dfa..163cc318cafb 100644
--- a/arch/arm/mach-imx/mach-pcm043.c
+++ b/arch/arm/mach-imx/mach-pcm043.c
@@ -356,6 +356,8 @@ static struct esdhc_platform_data sd1_pdata = {
*/
static void __init pcm043_init(void)
{
+ imx35_soc_init();
+
mxc_iomux_v3_setup_multiple_pads(pcm043_pads, ARRAY_SIZE(pcm043_pads));
mxc_audmux_v2_configure_port(3,
diff --git a/arch/arm/mach-imx/mach-qong.c b/arch/arm/mach-imx/mach-qong.c
index c16328715939..3626f486498a 100644
--- a/arch/arm/mach-imx/mach-qong.c
+++ b/arch/arm/mach-imx/mach-qong.c
@@ -244,6 +244,8 @@ static void __init qong_init_fpga(void)
*/
static void __init qong_init(void)
{
+ imx31_soc_init();
+
mxc_init_imx_uart();
qong_init_nor_mtd();
qong_init_fpga();
diff --git a/arch/arm/mach-imx/mach-scb9328.c b/arch/arm/mach-imx/mach-scb9328.c
index dcaee043628e..82805260e19c 100644
--- a/arch/arm/mach-imx/mach-scb9328.c
+++ b/arch/arm/mach-imx/mach-scb9328.c
@@ -129,6 +129,8 @@ static struct platform_device *devices[] __initdata = {
*/
static void __init scb9328_init(void)
{
+ imx1_soc_init();
+
imx1_add_imx_uart0(&uart_pdata);
printk(KERN_INFO"Scb9328: Adding devices\n");
diff --git a/arch/arm/mach-imx/mach-vpr200.c b/arch/arm/mach-imx/mach-vpr200.c
index d74e3473d236..7d8e012a6335 100644
--- a/arch/arm/mach-imx/mach-vpr200.c
+++ b/arch/arm/mach-imx/mach-vpr200.c
@@ -267,6 +267,8 @@ static struct platform_device *devices[] __initdata = {
*/
static void __init vpr200_board_init(void)
{
+ imx35_soc_init();
+
mxc_iomux_v3_setup_multiple_pads(vpr200_pads, ARRAY_SIZE(vpr200_pads));
imx35_add_fec(NULL);
diff --git a/arch/arm/mach-imx/mm-imx1.c b/arch/arm/mach-imx/mm-imx1.c
index 2e482ba5a0e7..2bded591d5c2 100644
--- a/arch/arm/mach-imx/mm-imx1.c
+++ b/arch/arm/mach-imx/mm-imx1.c
@@ -23,7 +23,6 @@
#include <mach/common.h>
#include <mach/hardware.h>
-#include <mach/gpio.h>
#include <mach/irqs.h>
#include <mach/iomux-v1.h>
@@ -44,15 +43,19 @@ void __init imx1_init_early(void)
MX1_NUM_GPIO_PORT);
}
-static struct mxc_gpio_port imx1_gpio_ports[] = {
- DEFINE_IMX_GPIO_PORT_IRQ(MX1, 0, 1, MX1_GPIO_INT_PORTA),
- DEFINE_IMX_GPIO_PORT_IRQ(MX1, 1, 2, MX1_GPIO_INT_PORTB),
- DEFINE_IMX_GPIO_PORT_IRQ(MX1, 2, 3, MX1_GPIO_INT_PORTC),
- DEFINE_IMX_GPIO_PORT_IRQ(MX1, 3, 4, MX1_GPIO_INT_PORTD),
-};
-
void __init mx1_init_irq(void)
{
mxc_init_irq(MX1_IO_ADDRESS(MX1_AVIC_BASE_ADDR));
- mxc_gpio_init(imx1_gpio_ports, ARRAY_SIZE(imx1_gpio_ports));
+}
+
+void __init imx1_soc_init(void)
+{
+ mxc_register_gpio("imx1-gpio", 0, MX1_GPIO1_BASE_ADDR, SZ_256,
+ MX1_GPIO_INT_PORTA, 0);
+ mxc_register_gpio("imx1-gpio", 1, MX1_GPIO2_BASE_ADDR, SZ_256,
+ MX1_GPIO_INT_PORTB, 0);
+ mxc_register_gpio("imx1-gpio", 2, MX1_GPIO3_BASE_ADDR, SZ_256,
+ MX1_GPIO_INT_PORTC, 0);
+ mxc_register_gpio("imx1-gpio", 3, MX1_GPIO4_BASE_ADDR, SZ_256,
+ MX1_GPIO_INT_PORTD, 0);
}
diff --git a/arch/arm/mach-imx/mm-imx21.c b/arch/arm/mach-imx/mm-imx21.c
index 7a0c500ac2c8..6d7d518686a5 100644
--- a/arch/arm/mach-imx/mm-imx21.c
+++ b/arch/arm/mach-imx/mm-imx21.c
@@ -24,7 +24,6 @@
#include <mach/common.h>
#include <asm/pgtable.h>
#include <asm/mach/map.h>
-#include <mach/gpio.h>
#include <mach/irqs.h>
#include <mach/iomux-v1.h>
@@ -70,17 +69,17 @@ void __init imx21_init_early(void)
MX21_NUM_GPIO_PORT);
}
-static struct mxc_gpio_port imx21_gpio_ports[] = {
- DEFINE_IMX_GPIO_PORT_IRQ(MX21, 0, 1, MX21_INT_GPIO),
- DEFINE_IMX_GPIO_PORT(MX21, 1, 2),
- DEFINE_IMX_GPIO_PORT(MX21, 2, 3),
- DEFINE_IMX_GPIO_PORT(MX21, 3, 4),
- DEFINE_IMX_GPIO_PORT(MX21, 4, 5),
- DEFINE_IMX_GPIO_PORT(MX21, 5, 6),
-};
-
void __init mx21_init_irq(void)
{
mxc_init_irq(MX21_IO_ADDRESS(MX21_AVIC_BASE_ADDR));
- mxc_gpio_init(imx21_gpio_ports, ARRAY_SIZE(imx21_gpio_ports));
+}
+
+void __init imx21_soc_init(void)
+{
+ mxc_register_gpio("imx21-gpio", 0, MX21_GPIO1_BASE_ADDR, SZ_256, MX21_INT_GPIO, 0);
+ mxc_register_gpio("imx21-gpio", 1, MX21_GPIO2_BASE_ADDR, SZ_256, MX21_INT_GPIO, 0);
+ mxc_register_gpio("imx21-gpio", 2, MX21_GPIO3_BASE_ADDR, SZ_256, MX21_INT_GPIO, 0);
+ mxc_register_gpio("imx21-gpio", 3, MX21_GPIO4_BASE_ADDR, SZ_256, MX21_INT_GPIO, 0);
+ mxc_register_gpio("imx21-gpio", 4, MX21_GPIO5_BASE_ADDR, SZ_256, MX21_INT_GPIO, 0);
+ mxc_register_gpio("imx21-gpio", 5, MX21_GPIO6_BASE_ADDR, SZ_256, MX21_INT_GPIO, 0);
}
diff --git a/arch/arm/mach-imx/mm-imx25.c b/arch/arm/mach-imx/mm-imx25.c
index 02f7b5c7fa8e..9a1591c2508d 100644
--- a/arch/arm/mach-imx/mm-imx25.c
+++ b/arch/arm/mach-imx/mm-imx25.c
@@ -27,7 +27,6 @@
#include <mach/hardware.h>
#include <mach/mx25.h>
#include <mach/iomux-v3.h>
-#include <mach/gpio.h>
#include <mach/irqs.h>
/*
@@ -57,16 +56,16 @@ void __init imx25_init_early(void)
mxc_arch_reset_init(MX25_IO_ADDRESS(MX25_WDOG_BASE_ADDR));
}
-static struct mxc_gpio_port imx25_gpio_ports[] = {
- DEFINE_IMX_GPIO_PORT_IRQ(MX25, 0, 1, MX25_INT_GPIO1),
- DEFINE_IMX_GPIO_PORT_IRQ(MX25, 1, 2, MX25_INT_GPIO2),
- DEFINE_IMX_GPIO_PORT_IRQ(MX25, 2, 3, MX25_INT_GPIO3),
- DEFINE_IMX_GPIO_PORT_IRQ(MX25, 3, 4, MX25_INT_GPIO4),
-};
-
void __init mx25_init_irq(void)
{
mxc_init_irq(MX25_IO_ADDRESS(MX25_AVIC_BASE_ADDR));
- mxc_gpio_init(imx25_gpio_ports, ARRAY_SIZE(imx25_gpio_ports));
}
+void __init imx25_soc_init(void)
+{
+ /* i.mx25 has the i.mx31 type gpio */
+ mxc_register_gpio("imx31-gpio", 0, MX25_GPIO1_BASE_ADDR, SZ_16K, MX25_INT_GPIO1, 0);
+ mxc_register_gpio("imx31-gpio", 1, MX25_GPIO2_BASE_ADDR, SZ_16K, MX25_INT_GPIO2, 0);
+ mxc_register_gpio("imx31-gpio", 2, MX25_GPIO3_BASE_ADDR, SZ_16K, MX25_INT_GPIO3, 0);
+ mxc_register_gpio("imx31-gpio", 3, MX25_GPIO4_BASE_ADDR, SZ_16K, MX25_INT_GPIO4, 0);
+}
diff --git a/arch/arm/mach-imx/mm-imx27.c b/arch/arm/mach-imx/mm-imx27.c
index a6761a39f08c..133b30003ddb 100644
--- a/arch/arm/mach-imx/mm-imx27.c
+++ b/arch/arm/mach-imx/mm-imx27.c
@@ -24,7 +24,6 @@
#include <mach/common.h>
#include <asm/pgtable.h>
#include <asm/mach/map.h>
-#include <mach/gpio.h>
#include <mach/irqs.h>
#include <mach/iomux-v1.h>
@@ -70,17 +69,18 @@ void __init imx27_init_early(void)
MX27_NUM_GPIO_PORT);
}
-static struct mxc_gpio_port imx27_gpio_ports[] = {
- DEFINE_IMX_GPIO_PORT_IRQ(MX27, 0, 1, MX27_INT_GPIO),
- DEFINE_IMX_GPIO_PORT(MX27, 1, 2),
- DEFINE_IMX_GPIO_PORT(MX27, 2, 3),
- DEFINE_IMX_GPIO_PORT(MX27, 3, 4),
- DEFINE_IMX_GPIO_PORT(MX27, 4, 5),
- DEFINE_IMX_GPIO_PORT(MX27, 5, 6),
-};
-
void __init mx27_init_irq(void)
{
mxc_init_irq(MX27_IO_ADDRESS(MX27_AVIC_BASE_ADDR));
- mxc_gpio_init(imx27_gpio_ports, ARRAY_SIZE(imx27_gpio_ports));
+}
+
+void __init imx27_soc_init(void)
+{
+ /* i.mx27 has the i.mx21 type gpio */
+ mxc_register_gpio("imx21-gpio", 0, MX27_GPIO1_BASE_ADDR, SZ_256, MX27_INT_GPIO, 0);
+ mxc_register_gpio("imx21-gpio", 1, MX27_GPIO2_BASE_ADDR, SZ_256, MX27_INT_GPIO, 0);
+ mxc_register_gpio("imx21-gpio", 2, MX27_GPIO3_BASE_ADDR, SZ_256, MX27_INT_GPIO, 0);
+ mxc_register_gpio("imx21-gpio", 3, MX27_GPIO4_BASE_ADDR, SZ_256, MX27_INT_GPIO, 0);
+ mxc_register_gpio("imx21-gpio", 4, MX27_GPIO5_BASE_ADDR, SZ_256, MX27_INT_GPIO, 0);
+ mxc_register_gpio("imx21-gpio", 5, MX27_GPIO6_BASE_ADDR, SZ_256, MX27_INT_GPIO, 0);
}
diff --git a/arch/arm/mach-imx/mm-imx31.c b/arch/arm/mach-imx/mm-imx31.c
index 86b9b45864d2..6d103c01b8b9 100644
--- a/arch/arm/mach-imx/mm-imx31.c
+++ b/arch/arm/mach-imx/mm-imx31.c
@@ -26,7 +26,6 @@
#include <mach/common.h>
#include <mach/hardware.h>
#include <mach/iomux-v3.h>
-#include <mach/gpio.h>
#include <mach/irqs.h>
static struct map_desc mx31_io_desc[] __initdata = {
@@ -53,14 +52,14 @@ void __init imx31_init_early(void)
mxc_arch_reset_init(MX31_IO_ADDRESS(MX31_WDOG_BASE_ADDR));
}
-static struct mxc_gpio_port imx31_gpio_ports[] = {
- DEFINE_IMX_GPIO_PORT_IRQ(MX31, 0, 1, MX31_INT_GPIO1),
- DEFINE_IMX_GPIO_PORT_IRQ(MX31, 1, 2, MX31_INT_GPIO2),
- DEFINE_IMX_GPIO_PORT_IRQ(MX31, 2, 3, MX31_INT_GPIO3),
-};
-
void __init mx31_init_irq(void)
{
mxc_init_irq(MX31_IO_ADDRESS(MX31_AVIC_BASE_ADDR));
- mxc_gpio_init(imx31_gpio_ports, ARRAY_SIZE(imx31_gpio_ports));
+}
+
+void __init imx31_soc_init(void)
+{
+ mxc_register_gpio("imx31-gpio", 0, MX31_GPIO1_BASE_ADDR, SZ_16K, MX31_INT_GPIO1, 0);
+ mxc_register_gpio("imx31-gpio", 1, MX31_GPIO2_BASE_ADDR, SZ_16K, MX31_INT_GPIO2, 0);
+ mxc_register_gpio("imx31-gpio", 2, MX31_GPIO3_BASE_ADDR, SZ_16K, MX31_INT_GPIO3, 0);
}
diff --git a/arch/arm/mach-imx/mm-imx35.c b/arch/arm/mach-imx/mm-imx35.c
index c880e6d1ae55..bb068bc8dab7 100644
--- a/arch/arm/mach-imx/mm-imx35.c
+++ b/arch/arm/mach-imx/mm-imx35.c
@@ -27,7 +27,6 @@
#include <mach/common.h>
#include <mach/hardware.h>
#include <mach/iomux-v3.h>
-#include <mach/gpio.h>
#include <mach/irqs.h>
static struct map_desc mx35_io_desc[] __initdata = {
@@ -50,14 +49,15 @@ void __init imx35_init_early(void)
mxc_arch_reset_init(MX35_IO_ADDRESS(MX35_WDOG_BASE_ADDR));
}
-static struct mxc_gpio_port imx35_gpio_ports[] = {
- DEFINE_IMX_GPIO_PORT_IRQ(MX35, 0, 1, MX35_INT_GPIO1),
- DEFINE_IMX_GPIO_PORT_IRQ(MX35, 1, 2, MX35_INT_GPIO2),
- DEFINE_IMX_GPIO_PORT_IRQ(MX35, 2, 3, MX35_INT_GPIO3),
-};
-
void __init mx35_init_irq(void)
{
mxc_init_irq(MX35_IO_ADDRESS(MX35_AVIC_BASE_ADDR));
- mxc_gpio_init(imx35_gpio_ports, ARRAY_SIZE(imx35_gpio_ports));
+}
+
+void __init imx35_soc_init(void)
+{
+ /* i.mx35 has the i.mx31 type gpio */
+ mxc_register_gpio("imx31-gpio", 0, MX35_GPIO1_BASE_ADDR, SZ_16K, MX35_INT_GPIO1, 0);
+ mxc_register_gpio("imx31-gpio", 1, MX35_GPIO2_BASE_ADDR, SZ_16K, MX35_INT_GPIO2, 0);
+ mxc_register_gpio("imx31-gpio", 2, MX35_GPIO3_BASE_ADDR, SZ_16K, MX35_INT_GPIO3, 0);
}
diff --git a/arch/arm/mach-iop13xx/setup.c b/arch/arm/mach-iop13xx/setup.c
index 5c147fb66a01..a5b989728b9e 100644
--- a/arch/arm/mach-iop13xx/setup.c
+++ b/arch/arm/mach-iop13xx/setup.c
@@ -17,6 +17,7 @@
*
*/
+#include <linux/dma-mapping.h>
#include <linux/serial_8250.h>
#include <linux/io.h>
#ifdef CONFIG_MTD_PHYSMAP
diff --git a/arch/arm/mach-mmp/brownstone.c b/arch/arm/mach-mmp/brownstone.c
index 7bb78fd5a2a6..c79162a50f28 100644
--- a/arch/arm/mach-mmp/brownstone.c
+++ b/arch/arm/mach-mmp/brownstone.c
@@ -177,9 +177,16 @@ static struct i2c_board_info brownstone_twsi1_info[] = {
};
static struct sdhci_pxa_platdata mmp2_sdh_platdata_mmc0 = {
- .max_speed = 25000000,
+ .clk_delay_cycles = 0x1f,
};
+static struct sdhci_pxa_platdata mmp2_sdh_platdata_mmc2 = {
+ .clk_delay_cycles = 0x1f,
+ .flags = PXA_FLAG_CARD_PERMANENT
+ | PXA_FLAG_SD_8_BIT_CAPABLE_SLOT,
+};
+
+
static void __init brownstone_init(void)
{
mfp_config(ARRAY_AND_SIZE(brownstone_pin_config));
@@ -189,6 +196,7 @@ static void __init brownstone_init(void)
mmp2_add_uart(3);
mmp2_add_twsi(1, NULL, ARRAY_AND_SIZE(brownstone_twsi1_info));
mmp2_add_sdhost(0, &mmp2_sdh_platdata_mmc0); /* SD/MMC */
+ mmp2_add_sdhost(2, &mmp2_sdh_platdata_mmc2); /* eMMC */
/* enable 5v regulator */
platform_device_register(&brownstone_v_5vp_device);
diff --git a/arch/arm/mach-mmp/include/mach/mmp2.h b/arch/arm/mach-mmp/include/mach/mmp2.h
index 2cbf6df09b82..de7b88826ad7 100644
--- a/arch/arm/mach-mmp/include/mach/mmp2.h
+++ b/arch/arm/mach-mmp/include/mach/mmp2.h
@@ -1,7 +1,7 @@
#ifndef __ASM_MACH_MMP2_H
#define __ASM_MACH_MMP2_H
-#include <plat/sdhci.h>
+#include <linux/platform_data/pxa_sdhci.h>
struct sys_timer;
diff --git a/arch/arm/mach-mmp/jasper.c b/arch/arm/mach-mmp/jasper.c
index 24172a0aad59..5d6421d63254 100644
--- a/arch/arm/mach-mmp/jasper.c
+++ b/arch/arm/mach-mmp/jasper.c
@@ -154,7 +154,7 @@ static struct i2c_board_info jasper_twsi1_info[] = {
};
static struct sdhci_pxa_platdata mmp2_sdh_platdata_mmc0 = {
- .max_speed = 25000000,
+ .clk_delay_cycles = 0x1f,
};
static void __init jasper_init(void)
diff --git a/arch/arm/mach-mmp/mmp2.c b/arch/arm/mach-mmp/mmp2.c
index 8e6c3ac7f7c1..079c18861d5c 100644
--- a/arch/arm/mach-mmp/mmp2.c
+++ b/arch/arm/mach-mmp/mmp2.c
@@ -168,10 +168,10 @@ static struct clk_lookup mmp2_clkregs[] = {
INIT_CLKREG(&clk_twsi5, "pxa2xx-i2c.4", NULL),
INIT_CLKREG(&clk_twsi6, "pxa2xx-i2c.5", NULL),
INIT_CLKREG(&clk_nand, "pxa3xx-nand", NULL),
- INIT_CLKREG(&clk_sdh0, "sdhci-pxa.0", "PXA-SDHCLK"),
- INIT_CLKREG(&clk_sdh1, "sdhci-pxa.1", "PXA-SDHCLK"),
- INIT_CLKREG(&clk_sdh2, "sdhci-pxa.2", "PXA-SDHCLK"),
- INIT_CLKREG(&clk_sdh3, "sdhci-pxa.3", "PXA-SDHCLK"),
+ INIT_CLKREG(&clk_sdh0, "sdhci-pxav3.0", "PXA-SDHCLK"),
+ INIT_CLKREG(&clk_sdh1, "sdhci-pxav3.1", "PXA-SDHCLK"),
+ INIT_CLKREG(&clk_sdh2, "sdhci-pxav3.2", "PXA-SDHCLK"),
+ INIT_CLKREG(&clk_sdh3, "sdhci-pxav3.3", "PXA-SDHCLK"),
};
static int __init mmp2_init(void)
@@ -222,8 +222,8 @@ MMP2_DEVICE(twsi4, "pxa2xx-i2c", 3, TWSI4, 0xd4033000, 0x70);
MMP2_DEVICE(twsi5, "pxa2xx-i2c", 4, TWSI5, 0xd4033800, 0x70);
MMP2_DEVICE(twsi6, "pxa2xx-i2c", 5, TWSI6, 0xd4034000, 0x70);
MMP2_DEVICE(nand, "pxa3xx-nand", -1, NAND, 0xd4283000, 0x100, 28, 29);
-MMP2_DEVICE(sdh0, "sdhci-pxa", 0, MMC, 0xd4280000, 0x120);
-MMP2_DEVICE(sdh1, "sdhci-pxa", 1, MMC2, 0xd4280800, 0x120);
-MMP2_DEVICE(sdh2, "sdhci-pxa", 2, MMC3, 0xd4281000, 0x120);
-MMP2_DEVICE(sdh3, "sdhci-pxa", 3, MMC4, 0xd4281800, 0x120);
+MMP2_DEVICE(sdh0, "sdhci-pxav3", 0, MMC, 0xd4280000, 0x120);
+MMP2_DEVICE(sdh1, "sdhci-pxav3", 1, MMC2, 0xd4280800, 0x120);
+MMP2_DEVICE(sdh2, "sdhci-pxav3", 2, MMC3, 0xd4281000, 0x120);
+MMP2_DEVICE(sdh3, "sdhci-pxav3", 3, MMC4, 0xd4281800, 0x120);
diff --git a/arch/arm/mach-msm/Kconfig b/arch/arm/mach-msm/Kconfig
index 1516896e8d17..888e92502e15 100644
--- a/arch/arm/mach-msm/Kconfig
+++ b/arch/arm/mach-msm/Kconfig
@@ -148,22 +148,6 @@ config MACH_MSM8960_RUMI3
endmenu
-config MSM_IOMMU
- bool "MSM IOMMU Support"
- depends on ARCH_MSM8X60 || ARCH_MSM8960
- select IOMMU_API
- default n
- help
- Support for the IOMMUs found on certain Qualcomm SOCs.
- These IOMMUs allow virtualization of the address space used by most
- cores within the multimedia subsystem.
-
- If unsure, say N here.
-
-config IOMMU_PGTABLES_L2
- def_bool y
- depends on MSM_IOMMU && MMU && SMP && CPU_DCACHE_DISABLE=n
-
config MSM_DEBUG_UART
int
default 1 if MSM_DEBUG_UART1
@@ -205,9 +189,6 @@ config MSM_GPIOMUX
config MSM_V2_TLMM
bool
-config IOMMU_API
- bool
-
config MSM_SCM
bool
endif
diff --git a/arch/arm/mach-msm/Makefile b/arch/arm/mach-msm/Makefile
index 9519fd28a025..b70658c5ae00 100644
--- a/arch/arm/mach-msm/Makefile
+++ b/arch/arm/mach-msm/Makefile
@@ -3,7 +3,7 @@ obj-y += clock.o
obj-$(CONFIG_DEBUG_FS) += clock-debug.o
obj-$(CONFIG_MSM_VIC) += irq-vic.o
-obj-$(CONFIG_MSM_IOMMU) += iommu.o iommu_dev.o devices-iommu.o
+obj-$(CONFIG_MSM_IOMMU) += devices-iommu.o
obj-$(CONFIG_ARCH_MSM7X00A) += dma.o irq.o acpuclock-arm11.o
obj-$(CONFIG_ARCH_MSM7X30) += dma.o
diff --git a/arch/arm/mach-msm/iommu.c b/arch/arm/mach-msm/iommu.c
deleted file mode 100644
index 1a584e077c61..000000000000
--- a/arch/arm/mach-msm/iommu.c
+++ /dev/null
@@ -1,731 +0,0 @@
-/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/errno.h>
-#include <linux/io.h>
-#include <linux/interrupt.h>
-#include <linux/list.h>
-#include <linux/spinlock.h>
-#include <linux/slab.h>
-#include <linux/iommu.h>
-#include <linux/clk.h>
-
-#include <asm/cacheflush.h>
-#include <asm/sizes.h>
-
-#include <mach/iommu_hw-8xxx.h>
-#include <mach/iommu.h>
-
-#define MRC(reg, processor, op1, crn, crm, op2) \
-__asm__ __volatile__ ( \
-" mrc " #processor "," #op1 ", %0," #crn "," #crm "," #op2 "\n" \
-: "=r" (reg))
-
-#define RCP15_PRRR(reg) MRC(reg, p15, 0, c10, c2, 0)
-#define RCP15_NMRR(reg) MRC(reg, p15, 0, c10, c2, 1)
-
-static int msm_iommu_tex_class[4];
-
-DEFINE_SPINLOCK(msm_iommu_lock);
-
-struct msm_priv {
- unsigned long *pgtable;
- struct list_head list_attached;
-};
-
-static int __enable_clocks(struct msm_iommu_drvdata *drvdata)
-{
- int ret;
-
- ret = clk_enable(drvdata->pclk);
- if (ret)
- goto fail;
-
- if (drvdata->clk) {
- ret = clk_enable(drvdata->clk);
- if (ret)
- clk_disable(drvdata->pclk);
- }
-fail:
- return ret;
-}
-
-static void __disable_clocks(struct msm_iommu_drvdata *drvdata)
-{
- if (drvdata->clk)
- clk_disable(drvdata->clk);
- clk_disable(drvdata->pclk);
-}
-
-static int __flush_iotlb(struct iommu_domain *domain)
-{
- struct msm_priv *priv = domain->priv;
- struct msm_iommu_drvdata *iommu_drvdata;
- struct msm_iommu_ctx_drvdata *ctx_drvdata;
- int ret = 0;
-#ifndef CONFIG_IOMMU_PGTABLES_L2
- unsigned long *fl_table = priv->pgtable;
- int i;
-
- if (!list_empty(&priv->list_attached)) {
- dmac_flush_range(fl_table, fl_table + SZ_16K);
-
- for (i = 0; i < NUM_FL_PTE; i++)
- if ((fl_table[i] & 0x03) == FL_TYPE_TABLE) {
- void *sl_table = __va(fl_table[i] &
- FL_BASE_MASK);
- dmac_flush_range(sl_table, sl_table + SZ_4K);
- }
- }
-#endif
-
- list_for_each_entry(ctx_drvdata, &priv->list_attached, attached_elm) {
- if (!ctx_drvdata->pdev || !ctx_drvdata->pdev->dev.parent)
- BUG();
-
- iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
- BUG_ON(!iommu_drvdata);
-
- ret = __enable_clocks(iommu_drvdata);
- if (ret)
- goto fail;
-
- SET_CTX_TLBIALL(iommu_drvdata->base, ctx_drvdata->num, 0);
- __disable_clocks(iommu_drvdata);
- }
-fail:
- return ret;
-}
-
-static void __reset_context(void __iomem *base, int ctx)
-{
- SET_BPRCOSH(base, ctx, 0);
- SET_BPRCISH(base, ctx, 0);
- SET_BPRCNSH(base, ctx, 0);
- SET_BPSHCFG(base, ctx, 0);
- SET_BPMTCFG(base, ctx, 0);
- SET_ACTLR(base, ctx, 0);
- SET_SCTLR(base, ctx, 0);
- SET_FSRRESTORE(base, ctx, 0);
- SET_TTBR0(base, ctx, 0);
- SET_TTBR1(base, ctx, 0);
- SET_TTBCR(base, ctx, 0);
- SET_BFBCR(base, ctx, 0);
- SET_PAR(base, ctx, 0);
- SET_FAR(base, ctx, 0);
- SET_CTX_TLBIALL(base, ctx, 0);
- SET_TLBFLPTER(base, ctx, 0);
- SET_TLBSLPTER(base, ctx, 0);
- SET_TLBLKCR(base, ctx, 0);
- SET_PRRR(base, ctx, 0);
- SET_NMRR(base, ctx, 0);
-}
-
-static void __program_context(void __iomem *base, int ctx, phys_addr_t pgtable)
-{
- unsigned int prrr, nmrr;
- __reset_context(base, ctx);
-
- /* Set up HTW mode */
- /* TLB miss configuration: perform HTW on miss */
- SET_TLBMCFG(base, ctx, 0x3);
-
- /* V2P configuration: HTW for access */
- SET_V2PCFG(base, ctx, 0x3);
-
- SET_TTBCR(base, ctx, 0);
- SET_TTBR0_PA(base, ctx, (pgtable >> 14));
-
- /* Invalidate the TLB for this context */
- SET_CTX_TLBIALL(base, ctx, 0);
-
- /* Set interrupt number to "secure" interrupt */
- SET_IRPTNDX(base, ctx, 0);
-
- /* Enable context fault interrupt */
- SET_CFEIE(base, ctx, 1);
-
- /* Stall access on a context fault and let the handler deal with it */
- SET_CFCFG(base, ctx, 1);
-
- /* Redirect all cacheable requests to L2 slave port. */
- SET_RCISH(base, ctx, 1);
- SET_RCOSH(base, ctx, 1);
- SET_RCNSH(base, ctx, 1);
-
- /* Turn on TEX Remap */
- SET_TRE(base, ctx, 1);
-
- /* Set TEX remap attributes */
- RCP15_PRRR(prrr);
- RCP15_NMRR(nmrr);
- SET_PRRR(base, ctx, prrr);
- SET_NMRR(base, ctx, nmrr);
-
- /* Turn on BFB prefetch */
- SET_BFBDFE(base, ctx, 1);
-
-#ifdef CONFIG_IOMMU_PGTABLES_L2
- /* Configure page tables as inner-cacheable and shareable to reduce
- * the TLB miss penalty.
- */
- SET_TTBR0_SH(base, ctx, 1);
- SET_TTBR1_SH(base, ctx, 1);
-
- SET_TTBR0_NOS(base, ctx, 1);
- SET_TTBR1_NOS(base, ctx, 1);
-
- SET_TTBR0_IRGNH(base, ctx, 0); /* WB, WA */
- SET_TTBR0_IRGNL(base, ctx, 1);
-
- SET_TTBR1_IRGNH(base, ctx, 0); /* WB, WA */
- SET_TTBR1_IRGNL(base, ctx, 1);
-
- SET_TTBR0_ORGN(base, ctx, 1); /* WB, WA */
- SET_TTBR1_ORGN(base, ctx, 1); /* WB, WA */
-#endif
-
- /* Enable the MMU */
- SET_M(base, ctx, 1);
-}
-
-static int msm_iommu_domain_init(struct iommu_domain *domain)
-{
- struct msm_priv *priv = kzalloc(sizeof(*priv), GFP_KERNEL);
-
- if (!priv)
- goto fail_nomem;
-
- INIT_LIST_HEAD(&priv->list_attached);
- priv->pgtable = (unsigned long *)__get_free_pages(GFP_KERNEL,
- get_order(SZ_16K));
-
- if (!priv->pgtable)
- goto fail_nomem;
-
- memset(priv->pgtable, 0, SZ_16K);
- domain->priv = priv;
- return 0;
-
-fail_nomem:
- kfree(priv);
- return -ENOMEM;
-}
-
-static void msm_iommu_domain_destroy(struct iommu_domain *domain)
-{
- struct msm_priv *priv;
- unsigned long flags;
- unsigned long *fl_table;
- int i;
-
- spin_lock_irqsave(&msm_iommu_lock, flags);
- priv = domain->priv;
- domain->priv = NULL;
-
- if (priv) {
- fl_table = priv->pgtable;
-
- for (i = 0; i < NUM_FL_PTE; i++)
- if ((fl_table[i] & 0x03) == FL_TYPE_TABLE)
- free_page((unsigned long) __va(((fl_table[i]) &
- FL_BASE_MASK)));
-
- free_pages((unsigned long)priv->pgtable, get_order(SZ_16K));
- priv->pgtable = NULL;
- }
-
- kfree(priv);
- spin_unlock_irqrestore(&msm_iommu_lock, flags);
-}
-
-static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
-{
- struct msm_priv *priv;
- struct msm_iommu_ctx_dev *ctx_dev;
- struct msm_iommu_drvdata *iommu_drvdata;
- struct msm_iommu_ctx_drvdata *ctx_drvdata;
- struct msm_iommu_ctx_drvdata *tmp_drvdata;
- int ret = 0;
- unsigned long flags;
-
- spin_lock_irqsave(&msm_iommu_lock, flags);
-
- priv = domain->priv;
-
- if (!priv || !dev) {
- ret = -EINVAL;
- goto fail;
- }
-
- iommu_drvdata = dev_get_drvdata(dev->parent);
- ctx_drvdata = dev_get_drvdata(dev);
- ctx_dev = dev->platform_data;
-
- if (!iommu_drvdata || !ctx_drvdata || !ctx_dev) {
- ret = -EINVAL;
- goto fail;
- }
-
- if (!list_empty(&ctx_drvdata->attached_elm)) {
- ret = -EBUSY;
- goto fail;
- }
-
- list_for_each_entry(tmp_drvdata, &priv->list_attached, attached_elm)
- if (tmp_drvdata == ctx_drvdata) {
- ret = -EBUSY;
- goto fail;
- }
-
- ret = __enable_clocks(iommu_drvdata);
- if (ret)
- goto fail;
-
- __program_context(iommu_drvdata->base, ctx_dev->num,
- __pa(priv->pgtable));
-
- __disable_clocks(iommu_drvdata);
- list_add(&(ctx_drvdata->attached_elm), &priv->list_attached);
- ret = __flush_iotlb(domain);
-
-fail:
- spin_unlock_irqrestore(&msm_iommu_lock, flags);
- return ret;
-}
-
-static void msm_iommu_detach_dev(struct iommu_domain *domain,
- struct device *dev)
-{
- struct msm_priv *priv;
- struct msm_iommu_ctx_dev *ctx_dev;
- struct msm_iommu_drvdata *iommu_drvdata;
- struct msm_iommu_ctx_drvdata *ctx_drvdata;
- unsigned long flags;
- int ret;
-
- spin_lock_irqsave(&msm_iommu_lock, flags);
- priv = domain->priv;
-
- if (!priv || !dev)
- goto fail;
-
- iommu_drvdata = dev_get_drvdata(dev->parent);
- ctx_drvdata = dev_get_drvdata(dev);
- ctx_dev = dev->platform_data;
-
- if (!iommu_drvdata || !ctx_drvdata || !ctx_dev)
- goto fail;
-
- ret = __flush_iotlb(domain);
- if (ret)
- goto fail;
-
- ret = __enable_clocks(iommu_drvdata);
- if (ret)
- goto fail;
-
- __reset_context(iommu_drvdata->base, ctx_dev->num);
- __disable_clocks(iommu_drvdata);
- list_del_init(&ctx_drvdata->attached_elm);
-
-fail:
- spin_unlock_irqrestore(&msm_iommu_lock, flags);
-}
-
-static int msm_iommu_map(struct iommu_domain *domain, unsigned long va,
- phys_addr_t pa, int order, int prot)
-{
- struct msm_priv *priv;
- unsigned long flags;
- unsigned long *fl_table;
- unsigned long *fl_pte;
- unsigned long fl_offset;
- unsigned long *sl_table;
- unsigned long *sl_pte;
- unsigned long sl_offset;
- unsigned int pgprot;
- size_t len = 0x1000UL << order;
- int ret = 0, tex, sh;
-
- spin_lock_irqsave(&msm_iommu_lock, flags);
-
- sh = (prot & MSM_IOMMU_ATTR_SH) ? 1 : 0;
- tex = msm_iommu_tex_class[prot & MSM_IOMMU_CP_MASK];
-
- if (tex < 0 || tex > NUM_TEX_CLASS - 1) {
- ret = -EINVAL;
- goto fail;
- }
-
- priv = domain->priv;
- if (!priv) {
- ret = -EINVAL;
- goto fail;
- }
-
- fl_table = priv->pgtable;
-
- if (len != SZ_16M && len != SZ_1M &&
- len != SZ_64K && len != SZ_4K) {
- pr_debug("Bad size: %d\n", len);
- ret = -EINVAL;
- goto fail;
- }
-
- if (!fl_table) {
- pr_debug("Null page table\n");
- ret = -EINVAL;
- goto fail;
- }
-
- if (len == SZ_16M || len == SZ_1M) {
- pgprot = sh ? FL_SHARED : 0;
- pgprot |= tex & 0x01 ? FL_BUFFERABLE : 0;
- pgprot |= tex & 0x02 ? FL_CACHEABLE : 0;
- pgprot |= tex & 0x04 ? FL_TEX0 : 0;
- } else {
- pgprot = sh ? SL_SHARED : 0;
- pgprot |= tex & 0x01 ? SL_BUFFERABLE : 0;
- pgprot |= tex & 0x02 ? SL_CACHEABLE : 0;
- pgprot |= tex & 0x04 ? SL_TEX0 : 0;
- }
-
- fl_offset = FL_OFFSET(va); /* Upper 12 bits */
- fl_pte = fl_table + fl_offset; /* int pointers, 4 bytes */
-
- if (len == SZ_16M) {
- int i = 0;
- for (i = 0; i < 16; i++)
- *(fl_pte+i) = (pa & 0xFF000000) | FL_SUPERSECTION |
- FL_AP_READ | FL_AP_WRITE | FL_TYPE_SECT |
- FL_SHARED | FL_NG | pgprot;
- }
-
- if (len == SZ_1M)
- *fl_pte = (pa & 0xFFF00000) | FL_AP_READ | FL_AP_WRITE | FL_NG |
- FL_TYPE_SECT | FL_SHARED | pgprot;
-
- /* Need a 2nd level table */
- if ((len == SZ_4K || len == SZ_64K) && (*fl_pte) == 0) {
- unsigned long *sl;
- sl = (unsigned long *) __get_free_pages(GFP_ATOMIC,
- get_order(SZ_4K));
-
- if (!sl) {
- pr_debug("Could not allocate second level table\n");
- ret = -ENOMEM;
- goto fail;
- }
-
- memset(sl, 0, SZ_4K);
- *fl_pte = ((((int)__pa(sl)) & FL_BASE_MASK) | FL_TYPE_TABLE);
- }
-
- sl_table = (unsigned long *) __va(((*fl_pte) & FL_BASE_MASK));
- sl_offset = SL_OFFSET(va);
- sl_pte = sl_table + sl_offset;
-
-
- if (len == SZ_4K)
- *sl_pte = (pa & SL_BASE_MASK_SMALL) | SL_AP0 | SL_AP1 | SL_NG |
- SL_SHARED | SL_TYPE_SMALL | pgprot;
-
- if (len == SZ_64K) {
- int i;
-
- for (i = 0; i < 16; i++)
- *(sl_pte+i) = (pa & SL_BASE_MASK_LARGE) | SL_AP0 |
- SL_NG | SL_AP1 | SL_SHARED | SL_TYPE_LARGE | pgprot;
- }
-
- ret = __flush_iotlb(domain);
-fail:
- spin_unlock_irqrestore(&msm_iommu_lock, flags);
- return ret;
-}
-
-static int msm_iommu_unmap(struct iommu_domain *domain, unsigned long va,
- int order)
-{
- struct msm_priv *priv;
- unsigned long flags;
- unsigned long *fl_table;
- unsigned long *fl_pte;
- unsigned long fl_offset;
- unsigned long *sl_table;
- unsigned long *sl_pte;
- unsigned long sl_offset;
- size_t len = 0x1000UL << order;
- int i, ret = 0;
-
- spin_lock_irqsave(&msm_iommu_lock, flags);
-
- priv = domain->priv;
-
- if (!priv) {
- ret = -ENODEV;
- goto fail;
- }
-
- fl_table = priv->pgtable;
-
- if (len != SZ_16M && len != SZ_1M &&
- len != SZ_64K && len != SZ_4K) {
- pr_debug("Bad length: %d\n", len);
- ret = -EINVAL;
- goto fail;
- }
-
- if (!fl_table) {
- pr_debug("Null page table\n");
- ret = -EINVAL;
- goto fail;
- }
-
- fl_offset = FL_OFFSET(va); /* Upper 12 bits */
- fl_pte = fl_table + fl_offset; /* int pointers, 4 bytes */
-
- if (*fl_pte == 0) {
- pr_debug("First level PTE is 0\n");
- ret = -ENODEV;
- goto fail;
- }
-
- /* Unmap supersection */
- if (len == SZ_16M)
- for (i = 0; i < 16; i++)
- *(fl_pte+i) = 0;
-
- if (len == SZ_1M)
- *fl_pte = 0;
-
- sl_table = (unsigned long *) __va(((*fl_pte) & FL_BASE_MASK));
- sl_offset = SL_OFFSET(va);
- sl_pte = sl_table + sl_offset;
-
- if (len == SZ_64K) {
- for (i = 0; i < 16; i++)
- *(sl_pte+i) = 0;
- }
-
- if (len == SZ_4K)
- *sl_pte = 0;
-
- if (len == SZ_4K || len == SZ_64K) {
- int used = 0;
-
- for (i = 0; i < NUM_SL_PTE; i++)
- if (sl_table[i])
- used = 1;
- if (!used) {
- free_page((unsigned long)sl_table);
- *fl_pte = 0;
- }
- }
-
- ret = __flush_iotlb(domain);
-fail:
- spin_unlock_irqrestore(&msm_iommu_lock, flags);
- return ret;
-}
-
-static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain,
- unsigned long va)
-{
- struct msm_priv *priv;
- struct msm_iommu_drvdata *iommu_drvdata;
- struct msm_iommu_ctx_drvdata *ctx_drvdata;
- unsigned int par;
- unsigned long flags;
- void __iomem *base;
- phys_addr_t ret = 0;
- int ctx;
-
- spin_lock_irqsave(&msm_iommu_lock, flags);
-
- priv = domain->priv;
- if (list_empty(&priv->list_attached))
- goto fail;
-
- ctx_drvdata = list_entry(priv->list_attached.next,
- struct msm_iommu_ctx_drvdata, attached_elm);
- iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
-
- base = iommu_drvdata->base;
- ctx = ctx_drvdata->num;
-
- ret = __enable_clocks(iommu_drvdata);
- if (ret)
- goto fail;
-
- /* Invalidate context TLB */
- SET_CTX_TLBIALL(base, ctx, 0);
- SET_V2PPR(base, ctx, va & V2Pxx_VA);
-
- par = GET_PAR(base, ctx);
-
- /* We are dealing with a supersection */
- if (GET_NOFAULT_SS(base, ctx))
- ret = (par & 0xFF000000) | (va & 0x00FFFFFF);
- else /* Upper 20 bits from PAR, lower 12 from VA */
- ret = (par & 0xFFFFF000) | (va & 0x00000FFF);
-
- if (GET_FAULT(base, ctx))
- ret = 0;
-
- __disable_clocks(iommu_drvdata);
-fail:
- spin_unlock_irqrestore(&msm_iommu_lock, flags);
- return ret;
-}
-
-static int msm_iommu_domain_has_cap(struct iommu_domain *domain,
- unsigned long cap)
-{
- return 0;
-}
-
-static void print_ctx_regs(void __iomem *base, int ctx)
-{
- unsigned int fsr = GET_FSR(base, ctx);
- pr_err("FAR = %08x PAR = %08x\n",
- GET_FAR(base, ctx), GET_PAR(base, ctx));
- pr_err("FSR = %08x [%s%s%s%s%s%s%s%s%s%s]\n", fsr,
- (fsr & 0x02) ? "TF " : "",
- (fsr & 0x04) ? "AFF " : "",
- (fsr & 0x08) ? "APF " : "",
- (fsr & 0x10) ? "TLBMF " : "",
- (fsr & 0x20) ? "HTWDEEF " : "",
- (fsr & 0x40) ? "HTWSEEF " : "",
- (fsr & 0x80) ? "MHF " : "",
- (fsr & 0x10000) ? "SL " : "",
- (fsr & 0x40000000) ? "SS " : "",
- (fsr & 0x80000000) ? "MULTI " : "");
-
- pr_err("FSYNR0 = %08x FSYNR1 = %08x\n",
- GET_FSYNR0(base, ctx), GET_FSYNR1(base, ctx));
- pr_err("TTBR0 = %08x TTBR1 = %08x\n",
- GET_TTBR0(base, ctx), GET_TTBR1(base, ctx));
- pr_err("SCTLR = %08x ACTLR = %08x\n",
- GET_SCTLR(base, ctx), GET_ACTLR(base, ctx));
- pr_err("PRRR = %08x NMRR = %08x\n",
- GET_PRRR(base, ctx), GET_NMRR(base, ctx));
-}
-
-irqreturn_t msm_iommu_fault_handler(int irq, void *dev_id)
-{
- struct msm_iommu_drvdata *drvdata = dev_id;
- void __iomem *base;
- unsigned int fsr;
- int i, ret;
-
- spin_lock(&msm_iommu_lock);
-
- if (!drvdata) {
- pr_err("Invalid device ID in context interrupt handler\n");
- goto fail;
- }
-
- base = drvdata->base;
-
- pr_err("Unexpected IOMMU page fault!\n");
- pr_err("base = %08x\n", (unsigned int) base);
-
- ret = __enable_clocks(drvdata);
- if (ret)
- goto fail;
-
- for (i = 0; i < drvdata->ncb; i++) {
- fsr = GET_FSR(base, i);
- if (fsr) {
- pr_err("Fault occurred in context %d.\n", i);
- pr_err("Interesting registers:\n");
- print_ctx_regs(base, i);
- SET_FSR(base, i, 0x4000000F);
- }
- }
- __disable_clocks(drvdata);
-fail:
- spin_unlock(&msm_iommu_lock);
- return 0;
-}
-
-static struct iommu_ops msm_iommu_ops = {
- .domain_init = msm_iommu_domain_init,
- .domain_destroy = msm_iommu_domain_destroy,
- .attach_dev = msm_iommu_attach_dev,
- .detach_dev = msm_iommu_detach_dev,
- .map = msm_iommu_map,
- .unmap = msm_iommu_unmap,
- .iova_to_phys = msm_iommu_iova_to_phys,
- .domain_has_cap = msm_iommu_domain_has_cap
-};
-
-static int __init get_tex_class(int icp, int ocp, int mt, int nos)
-{
- int i = 0;
- unsigned int prrr = 0;
- unsigned int nmrr = 0;
- int c_icp, c_ocp, c_mt, c_nos;
-
- RCP15_PRRR(prrr);
- RCP15_NMRR(nmrr);
-
- for (i = 0; i < NUM_TEX_CLASS; i++) {
- c_nos = PRRR_NOS(prrr, i);
- c_mt = PRRR_MT(prrr, i);
- c_icp = NMRR_ICP(nmrr, i);
- c_ocp = NMRR_OCP(nmrr, i);
-
- if (icp == c_icp && ocp == c_ocp && c_mt == mt && c_nos == nos)
- return i;
- }
-
- return -ENODEV;
-}
-
-static void __init setup_iommu_tex_classes(void)
-{
- msm_iommu_tex_class[MSM_IOMMU_ATTR_NONCACHED] =
- get_tex_class(CP_NONCACHED, CP_NONCACHED, MT_NORMAL, 1);
-
- msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WB_WA] =
- get_tex_class(CP_WB_WA, CP_WB_WA, MT_NORMAL, 1);
-
- msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WB_NWA] =
- get_tex_class(CP_WB_NWA, CP_WB_NWA, MT_NORMAL, 1);
-
- msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WT] =
- get_tex_class(CP_WT, CP_WT, MT_NORMAL, 1);
-}
-
-static int __init msm_iommu_init(void)
-{
- setup_iommu_tex_classes();
- register_iommu(&msm_iommu_ops);
- return 0;
-}
-
-subsys_initcall(msm_iommu_init);
-
-MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Stepan Moskovchenko <stepanm@codeaurora.org>");
diff --git a/arch/arm/mach-msm/iommu_dev.c b/arch/arm/mach-msm/iommu_dev.c
deleted file mode 100644
index 8e8fb079852d..000000000000
--- a/arch/arm/mach-msm/iommu_dev.c
+++ /dev/null
@@ -1,422 +0,0 @@
-/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/io.h>
-#include <linux/clk.h>
-#include <linux/iommu.h>
-#include <linux/interrupt.h>
-#include <linux/err.h>
-#include <linux/slab.h>
-
-#include <mach/iommu_hw-8xxx.h>
-#include <mach/iommu.h>
-#include <mach/clk.h>
-
-struct iommu_ctx_iter_data {
- /* input */
- const char *name;
-
- /* output */
- struct device *dev;
-};
-
-static struct platform_device *msm_iommu_root_dev;
-
-static int each_iommu_ctx(struct device *dev, void *data)
-{
- struct iommu_ctx_iter_data *res = data;
- struct msm_iommu_ctx_dev *c = dev->platform_data;
-
- if (!res || !c || !c->name || !res->name)
- return -EINVAL;
-
- if (!strcmp(res->name, c->name)) {
- res->dev = dev;
- return 1;
- }
- return 0;
-}
-
-static int each_iommu(struct device *dev, void *data)
-{
- return device_for_each_child(dev, data, each_iommu_ctx);
-}
-
-struct device *msm_iommu_get_ctx(const char *ctx_name)
-{
- struct iommu_ctx_iter_data r;
- int found;
-
- if (!msm_iommu_root_dev) {
- pr_err("No root IOMMU device.\n");
- goto fail;
- }
-
- r.name = ctx_name;
- found = device_for_each_child(&msm_iommu_root_dev->dev, &r, each_iommu);
-
- if (!found) {
- pr_err("Could not find context <%s>\n", ctx_name);
- goto fail;
- }
-
- return r.dev;
-fail:
- return NULL;
-}
-EXPORT_SYMBOL(msm_iommu_get_ctx);
-
-static void msm_iommu_reset(void __iomem *base, int ncb)
-{
- int ctx;
-
- SET_RPUE(base, 0);
- SET_RPUEIE(base, 0);
- SET_ESRRESTORE(base, 0);
- SET_TBE(base, 0);
- SET_CR(base, 0);
- SET_SPDMBE(base, 0);
- SET_TESTBUSCR(base, 0);
- SET_TLBRSW(base, 0);
- SET_GLOBAL_TLBIALL(base, 0);
- SET_RPU_ACR(base, 0);
- SET_TLBLKCRWE(base, 1);
-
- for (ctx = 0; ctx < ncb; ctx++) {
- SET_BPRCOSH(base, ctx, 0);
- SET_BPRCISH(base, ctx, 0);
- SET_BPRCNSH(base, ctx, 0);
- SET_BPSHCFG(base, ctx, 0);
- SET_BPMTCFG(base, ctx, 0);
- SET_ACTLR(base, ctx, 0);
- SET_SCTLR(base, ctx, 0);
- SET_FSRRESTORE(base, ctx, 0);
- SET_TTBR0(base, ctx, 0);
- SET_TTBR1(base, ctx, 0);
- SET_TTBCR(base, ctx, 0);
- SET_BFBCR(base, ctx, 0);
- SET_PAR(base, ctx, 0);
- SET_FAR(base, ctx, 0);
- SET_CTX_TLBIALL(base, ctx, 0);
- SET_TLBFLPTER(base, ctx, 0);
- SET_TLBSLPTER(base, ctx, 0);
- SET_TLBLKCR(base, ctx, 0);
- SET_PRRR(base, ctx, 0);
- SET_NMRR(base, ctx, 0);
- SET_CONTEXTIDR(base, ctx, 0);
- }
-}
-
-static int msm_iommu_probe(struct platform_device *pdev)
-{
- struct resource *r, *r2;
- struct clk *iommu_clk;
- struct clk *iommu_pclk;
- struct msm_iommu_drvdata *drvdata;
- struct msm_iommu_dev *iommu_dev = pdev->dev.platform_data;
- void __iomem *regs_base;
- resource_size_t len;
- int ret, irq, par;
-
- if (pdev->id == -1) {
- msm_iommu_root_dev = pdev;
- return 0;
- }
-
- drvdata = kzalloc(sizeof(*drvdata), GFP_KERNEL);
-
- if (!drvdata) {
- ret = -ENOMEM;
- goto fail;
- }
-
- if (!iommu_dev) {
- ret = -ENODEV;
- goto fail;
- }
-
- iommu_pclk = clk_get(NULL, "smmu_pclk");
- if (IS_ERR(iommu_pclk)) {
- ret = -ENODEV;
- goto fail;
- }
-
- ret = clk_enable(iommu_pclk);
- if (ret)
- goto fail_enable;
-
- iommu_clk = clk_get(&pdev->dev, "iommu_clk");
-
- if (!IS_ERR(iommu_clk)) {
- if (clk_get_rate(iommu_clk) == 0)
- clk_set_min_rate(iommu_clk, 1);
-
- ret = clk_enable(iommu_clk);
- if (ret) {
- clk_put(iommu_clk);
- goto fail_pclk;
- }
- } else
- iommu_clk = NULL;
-
- r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "physbase");
-
- if (!r) {
- ret = -ENODEV;
- goto fail_clk;
- }
-
- len = resource_size(r);
-
- r2 = request_mem_region(r->start, len, r->name);
- if (!r2) {
- pr_err("Could not request memory region: start=%p, len=%d\n",
- (void *) r->start, len);
- ret = -EBUSY;
- goto fail_clk;
- }
-
- regs_base = ioremap(r2->start, len);
-
- if (!regs_base) {
- pr_err("Could not ioremap: start=%p, len=%d\n",
- (void *) r2->start, len);
- ret = -EBUSY;
- goto fail_mem;
- }
-
- irq = platform_get_irq_byname(pdev, "secure_irq");
- if (irq < 0) {
- ret = -ENODEV;
- goto fail_io;
- }
-
- msm_iommu_reset(regs_base, iommu_dev->ncb);
-
- SET_M(regs_base, 0, 1);
- SET_PAR(regs_base, 0, 0);
- SET_V2PCFG(regs_base, 0, 1);
- SET_V2PPR(regs_base, 0, 0);
- par = GET_PAR(regs_base, 0);
- SET_V2PCFG(regs_base, 0, 0);
- SET_M(regs_base, 0, 0);
-
- if (!par) {
- pr_err("%s: Invalid PAR value detected\n", iommu_dev->name);
- ret = -ENODEV;
- goto fail_io;
- }
-
- ret = request_irq(irq, msm_iommu_fault_handler, 0,
- "msm_iommu_secure_irpt_handler", drvdata);
- if (ret) {
- pr_err("Request IRQ %d failed with ret=%d\n", irq, ret);
- goto fail_io;
- }
-
-
- drvdata->pclk = iommu_pclk;
- drvdata->clk = iommu_clk;
- drvdata->base = regs_base;
- drvdata->irq = irq;
- drvdata->ncb = iommu_dev->ncb;
-
- pr_info("device %s mapped at %p, irq %d with %d ctx banks\n",
- iommu_dev->name, regs_base, irq, iommu_dev->ncb);
-
- platform_set_drvdata(pdev, drvdata);
-
- if (iommu_clk)
- clk_disable(iommu_clk);
-
- clk_disable(iommu_pclk);
-
- return 0;
-fail_io:
- iounmap(regs_base);
-fail_mem:
- release_mem_region(r->start, len);
-fail_clk:
- if (iommu_clk) {
- clk_disable(iommu_clk);
- clk_put(iommu_clk);
- }
-fail_pclk:
- clk_disable(iommu_pclk);
-fail_enable:
- clk_put(iommu_pclk);
-fail:
- kfree(drvdata);
- return ret;
-}
-
-static int msm_iommu_remove(struct platform_device *pdev)
-{
- struct msm_iommu_drvdata *drv = NULL;
-
- drv = platform_get_drvdata(pdev);
- if (drv) {
- if (drv->clk)
- clk_put(drv->clk);
- clk_put(drv->pclk);
- memset(drv, 0, sizeof(*drv));
- kfree(drv);
- platform_set_drvdata(pdev, NULL);
- }
- return 0;
-}
-
-static int msm_iommu_ctx_probe(struct platform_device *pdev)
-{
- struct msm_iommu_ctx_dev *c = pdev->dev.platform_data;
- struct msm_iommu_drvdata *drvdata;
- struct msm_iommu_ctx_drvdata *ctx_drvdata = NULL;
- int i, ret;
- if (!c || !pdev->dev.parent) {
- ret = -EINVAL;
- goto fail;
- }
-
- drvdata = dev_get_drvdata(pdev->dev.parent);
-
- if (!drvdata) {
- ret = -ENODEV;
- goto fail;
- }
-
- ctx_drvdata = kzalloc(sizeof(*ctx_drvdata), GFP_KERNEL);
- if (!ctx_drvdata) {
- ret = -ENOMEM;
- goto fail;
- }
- ctx_drvdata->num = c->num;
- ctx_drvdata->pdev = pdev;
-
- INIT_LIST_HEAD(&ctx_drvdata->attached_elm);
- platform_set_drvdata(pdev, ctx_drvdata);
-
- ret = clk_enable(drvdata->pclk);
- if (ret)
- goto fail;
-
- if (drvdata->clk) {
- ret = clk_enable(drvdata->clk);
- if (ret) {
- clk_disable(drvdata->pclk);
- goto fail;
- }
- }
-
- /* Program the M2V tables for this context */
- for (i = 0; i < MAX_NUM_MIDS; i++) {
- int mid = c->mids[i];
- if (mid == -1)
- break;
-
- SET_M2VCBR_N(drvdata->base, mid, 0);
- SET_CBACR_N(drvdata->base, c->num, 0);
-
- /* Set VMID = 0 */
- SET_VMID(drvdata->base, mid, 0);
-
- /* Set the context number for that MID to this context */
- SET_CBNDX(drvdata->base, mid, c->num);
-
- /* Set MID associated with this context bank to 0*/
- SET_CBVMID(drvdata->base, c->num, 0);
-
- /* Set the ASID for TLB tagging for this context */
- SET_CONTEXTIDR_ASID(drvdata->base, c->num, c->num);
-
- /* Set security bit override to be Non-secure */
- SET_NSCFG(drvdata->base, mid, 3);
- }
-
- if (drvdata->clk)
- clk_disable(drvdata->clk);
- clk_disable(drvdata->pclk);
-
- dev_info(&pdev->dev, "context %s using bank %d\n", c->name, c->num);
- return 0;
-fail:
- kfree(ctx_drvdata);
- return ret;
-}
-
-static int msm_iommu_ctx_remove(struct platform_device *pdev)
-{
- struct msm_iommu_ctx_drvdata *drv = NULL;
- drv = platform_get_drvdata(pdev);
- if (drv) {
- memset(drv, 0, sizeof(struct msm_iommu_ctx_drvdata));
- kfree(drv);
- platform_set_drvdata(pdev, NULL);
- }
- return 0;
-}
-
-static struct platform_driver msm_iommu_driver = {
- .driver = {
- .name = "msm_iommu",
- },
- .probe = msm_iommu_probe,
- .remove = msm_iommu_remove,
-};
-
-static struct platform_driver msm_iommu_ctx_driver = {
- .driver = {
- .name = "msm_iommu_ctx",
- },
- .probe = msm_iommu_ctx_probe,
- .remove = msm_iommu_ctx_remove,
-};
-
-static int __init msm_iommu_driver_init(void)
-{
- int ret;
- ret = platform_driver_register(&msm_iommu_driver);
- if (ret != 0) {
- pr_err("Failed to register IOMMU driver\n");
- goto error;
- }
-
- ret = platform_driver_register(&msm_iommu_ctx_driver);
- if (ret != 0) {
- pr_err("Failed to register IOMMU context driver\n");
- goto error;
- }
-
-error:
- return ret;
-}
-
-static void __exit msm_iommu_driver_exit(void)
-{
- platform_driver_unregister(&msm_iommu_ctx_driver);
- platform_driver_unregister(&msm_iommu_driver);
-}
-
-subsys_initcall(msm_iommu_driver_init);
-module_exit(msm_iommu_driver_exit);
-
-MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Stepan Moskovchenko <stepanm@codeaurora.org>");
diff --git a/arch/arm/mach-mx5/board-cpuimx51.c b/arch/arm/mach-mx5/board-cpuimx51.c
index 4efa02ee1639..add0d42de7af 100644
--- a/arch/arm/mach-mx5/board-cpuimx51.c
+++ b/arch/arm/mach-mx5/board-cpuimx51.c
@@ -245,6 +245,8 @@ __setup("otg_mode=", eukrea_cpuimx51_otg_mode);
*/
static void __init eukrea_cpuimx51_init(void)
{
+ imx51_soc_init();
+
mxc_iomux_v3_setup_multiple_pads(eukrea_cpuimx51_pads,
ARRAY_SIZE(eukrea_cpuimx51_pads));
diff --git a/arch/arm/mach-mx5/board-cpuimx51sd.c b/arch/arm/mach-mx5/board-cpuimx51sd.c
index 5ef25a596143..ff096d587299 100644
--- a/arch/arm/mach-mx5/board-cpuimx51sd.c
+++ b/arch/arm/mach-mx5/board-cpuimx51sd.c
@@ -264,6 +264,8 @@ static struct platform_device *platform_devices[] __initdata = {
static void __init eukrea_cpuimx51sd_init(void)
{
+ imx51_soc_init();
+
mxc_iomux_v3_setup_multiple_pads(eukrea_cpuimx51sd_pads,
ARRAY_SIZE(eukrea_cpuimx51sd_pads));
diff --git a/arch/arm/mach-mx5/board-mx50_rdp.c b/arch/arm/mach-mx5/board-mx50_rdp.c
index 11210e1ae42a..7de25c6712eb 100644
--- a/arch/arm/mach-mx5/board-mx50_rdp.c
+++ b/arch/arm/mach-mx5/board-mx50_rdp.c
@@ -192,6 +192,8 @@ static const struct imxi2c_platform_data i2c_data __initconst = {
*/
static void __init mx50_rdp_board_init(void)
{
+ imx50_soc_init();
+
mxc_iomux_v3_setup_multiple_pads(mx50_rdp_pads,
ARRAY_SIZE(mx50_rdp_pads));
diff --git a/arch/arm/mach-mx5/board-mx51_3ds.c b/arch/arm/mach-mx5/board-mx51_3ds.c
index 63dfbeafbc1e..3112d15feebc 100644
--- a/arch/arm/mach-mx5/board-mx51_3ds.c
+++ b/arch/arm/mach-mx5/board-mx51_3ds.c
@@ -135,6 +135,8 @@ static struct spi_board_info mx51_3ds_spi_nor_device[] = {
*/
static void __init mx51_3ds_init(void)
{
+ imx51_soc_init();
+
mxc_iomux_v3_setup_multiple_pads(mx51_3ds_pads,
ARRAY_SIZE(mx51_3ds_pads));
diff --git a/arch/arm/mach-mx5/board-mx51_babbage.c b/arch/arm/mach-mx5/board-mx51_babbage.c
index c7b3fabf50f9..6021dd00ec75 100644
--- a/arch/arm/mach-mx5/board-mx51_babbage.c
+++ b/arch/arm/mach-mx5/board-mx51_babbage.c
@@ -340,6 +340,8 @@ static void __init mx51_babbage_init(void)
iomux_v3_cfg_t power_key = _MX51_PAD_EIM_A27__GPIO2_21 |
MUX_PAD_CTRL(PAD_CTL_SRE_FAST | PAD_CTL_DSE_HIGH | PAD_CTL_PUS_100K_UP);
+ imx51_soc_init();
+
#if defined(CONFIG_CPU_FREQ_IMX)
get_cpu_op = mx51_get_cpu_op;
#endif
diff --git a/arch/arm/mach-mx5/board-mx51_efikamx.c b/arch/arm/mach-mx5/board-mx51_efikamx.c
index 6e362315291b..3be603b9075a 100644
--- a/arch/arm/mach-mx5/board-mx51_efikamx.c
+++ b/arch/arm/mach-mx5/board-mx51_efikamx.c
@@ -236,6 +236,8 @@ late_initcall(mx51_efikamx_power_init);
static void __init mx51_efikamx_init(void)
{
+ imx51_soc_init();
+
mxc_iomux_v3_setup_multiple_pads(mx51efikamx_pads,
ARRAY_SIZE(mx51efikamx_pads));
efika_board_common_init();
diff --git a/arch/arm/mach-mx5/board-mx51_efikasb.c b/arch/arm/mach-mx5/board-mx51_efikasb.c
index 474fc6e4c6df..4b2e522de0f8 100644
--- a/arch/arm/mach-mx5/board-mx51_efikasb.c
+++ b/arch/arm/mach-mx5/board-mx51_efikasb.c
@@ -248,6 +248,8 @@ static void __init mx51_efikasb_board_id(void)
static void __init efikasb_board_init(void)
{
+ imx51_soc_init();
+
mxc_iomux_v3_setup_multiple_pads(mx51efikasb_pads,
ARRAY_SIZE(mx51efikasb_pads));
efika_board_common_init();
diff --git a/arch/arm/mach-mx5/board-mx53_evk.c b/arch/arm/mach-mx5/board-mx53_evk.c
index f87d571882c6..0d9218a6e2d2 100644
--- a/arch/arm/mach-mx5/board-mx53_evk.c
+++ b/arch/arm/mach-mx5/board-mx53_evk.c
@@ -117,6 +117,8 @@ static const struct spi_imx_master mx53_evk_spi_data __initconst = {
static void __init mx53_evk_board_init(void)
{
+ imx53_soc_init();
+
mxc_iomux_v3_setup_multiple_pads(mx53_evk_pads,
ARRAY_SIZE(mx53_evk_pads));
mx53_evk_init_uart();
diff --git a/arch/arm/mach-mx5/board-mx53_loco.c b/arch/arm/mach-mx5/board-mx53_loco.c
index 1b947e8c9c0c..359c3e248add 100644
--- a/arch/arm/mach-mx5/board-mx53_loco.c
+++ b/arch/arm/mach-mx5/board-mx53_loco.c
@@ -227,6 +227,8 @@ static const struct imxi2c_platform_data mx53_loco_i2c_data __initconst = {
static void __init mx53_loco_board_init(void)
{
+ imx53_soc_init();
+
mxc_iomux_v3_setup_multiple_pads(mx53_loco_pads,
ARRAY_SIZE(mx53_loco_pads));
imx53_add_imx_uart(0, NULL);
diff --git a/arch/arm/mach-mx5/board-mx53_smd.c b/arch/arm/mach-mx5/board-mx53_smd.c
index 817c08938f55..bc02894eafef 100644
--- a/arch/arm/mach-mx5/board-mx53_smd.c
+++ b/arch/arm/mach-mx5/board-mx53_smd.c
@@ -113,6 +113,8 @@ static const struct imxi2c_platform_data mx53_smd_i2c_data __initconst = {
static void __init mx53_smd_board_init(void)
{
+ imx53_soc_init();
+
mxc_iomux_v3_setup_multiple_pads(mx53_smd_pads,
ARRAY_SIZE(mx53_smd_pads));
mx53_smd_init_uart();
diff --git a/arch/arm/mach-mx5/clock-mx51-mx53.c b/arch/arm/mach-mx5/clock-mx51-mx53.c
index 6b89c1bf4eb2..cd79e3435e28 100644
--- a/arch/arm/mach-mx5/clock-mx51-mx53.c
+++ b/arch/arm/mach-mx5/clock-mx51-mx53.c
@@ -1442,7 +1442,8 @@ static struct clk_lookup mx51_lookups[] = {
_REGISTER_CLOCK(NULL, "gpt_32k", gpt_32k_clk)
_REGISTER_CLOCK("imx51-ecspi.0", NULL, ecspi1_clk)
_REGISTER_CLOCK("imx51-ecspi.1", NULL, ecspi2_clk)
- _REGISTER_CLOCK("imx51-cspi.0", NULL, cspi_clk)
+ /* i.mx51 has the i.mx35 type cspi */
+ _REGISTER_CLOCK("imx35-cspi.0", NULL, cspi_clk)
_REGISTER_CLOCK("sdhci-esdhc-imx.0", NULL, esdhc1_clk)
_REGISTER_CLOCK("sdhci-esdhc-imx.1", NULL, esdhc2_clk)
_REGISTER_CLOCK("sdhci-esdhc-imx.2", NULL, esdhc3_clk)
@@ -1471,9 +1472,11 @@ static struct clk_lookup mx53_lookups[] = {
_REGISTER_CLOCK("sdhci-esdhc-imx.1", NULL, esdhc2_mx53_clk)
_REGISTER_CLOCK("sdhci-esdhc-imx.2", NULL, esdhc3_mx53_clk)
_REGISTER_CLOCK("sdhci-esdhc-imx.3", NULL, esdhc4_mx53_clk)
- _REGISTER_CLOCK("imx53-ecspi.0", NULL, ecspi1_clk)
- _REGISTER_CLOCK("imx53-ecspi.1", NULL, ecspi2_clk)
- _REGISTER_CLOCK("imx53-cspi.0", NULL, cspi_clk)
+ /* i.mx53 has the i.mx51 type ecspi */
+ _REGISTER_CLOCK("imx51-ecspi.0", NULL, ecspi1_clk)
+ _REGISTER_CLOCK("imx51-ecspi.1", NULL, ecspi2_clk)
+ /* i.mx53 has the i.mx25 type cspi */
+ _REGISTER_CLOCK("imx35-cspi.0", NULL, cspi_clk)
_REGISTER_CLOCK("imx2-wdt.0", NULL, dummy_clk)
_REGISTER_CLOCK("imx2-wdt.1", NULL, dummy_clk)
};
diff --git a/arch/arm/mach-mx5/devices.c b/arch/arm/mach-mx5/devices.c
index 153ada53e575..371ca8c8414c 100644
--- a/arch/arm/mach-mx5/devices.c
+++ b/arch/arm/mach-mx5/devices.c
@@ -12,7 +12,6 @@
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
-#include <linux/gpio.h>
#include <mach/hardware.h>
#include <mach/imx-uart.h>
#include <mach/irqs.h>
@@ -119,66 +118,3 @@ struct platform_device mxc_usbh2_device = {
.coherent_dma_mask = DMA_BIT_MASK(32),
},
};
-
-static struct mxc_gpio_port mxc_gpio_ports[] = {
- {
- .chip.label = "gpio-0",
- .base = MX51_IO_ADDRESS(MX51_GPIO1_BASE_ADDR),
- .irq = MX51_MXC_INT_GPIO1_LOW,
- .irq_high = MX51_MXC_INT_GPIO1_HIGH,
- .virtual_irq_start = MXC_GPIO_IRQ_START
- },
- {
- .chip.label = "gpio-1",
- .base = MX51_IO_ADDRESS(MX51_GPIO2_BASE_ADDR),
- .irq = MX51_MXC_INT_GPIO2_LOW,
- .irq_high = MX51_MXC_INT_GPIO2_HIGH,
- .virtual_irq_start = MXC_GPIO_IRQ_START + 32 * 1
- },
- {
- .chip.label = "gpio-2",
- .base = MX51_IO_ADDRESS(MX51_GPIO3_BASE_ADDR),
- .irq = MX51_MXC_INT_GPIO3_LOW,
- .irq_high = MX51_MXC_INT_GPIO3_HIGH,
- .virtual_irq_start = MXC_GPIO_IRQ_START + 32 * 2
- },
- {
- .chip.label = "gpio-3",
- .base = MX51_IO_ADDRESS(MX51_GPIO4_BASE_ADDR),
- .irq = MX51_MXC_INT_GPIO4_LOW,
- .irq_high = MX51_MXC_INT_GPIO4_HIGH,
- .virtual_irq_start = MXC_GPIO_IRQ_START + 32 * 3
- },
- {
- .chip.label = "gpio-4",
- .base = MX53_IO_ADDRESS(MX53_GPIO5_BASE_ADDR),
- .irq = MX53_INT_GPIO5_LOW,
- .irq_high = MX53_INT_GPIO5_HIGH,
- .virtual_irq_start = MXC_GPIO_IRQ_START + 32 * 4
- },
- {
- .chip.label = "gpio-5",
- .base = MX53_IO_ADDRESS(MX53_GPIO6_BASE_ADDR),
- .irq = MX53_INT_GPIO6_LOW,
- .irq_high = MX53_INT_GPIO6_HIGH,
- .virtual_irq_start = MXC_GPIO_IRQ_START + 32 * 5
- },
- {
- .chip.label = "gpio-6",
- .base = MX53_IO_ADDRESS(MX53_GPIO7_BASE_ADDR),
- .irq = MX53_INT_GPIO7_LOW,
- .irq_high = MX53_INT_GPIO7_HIGH,
- .virtual_irq_start = MXC_GPIO_IRQ_START + 32 * 6
- },
-};
-
-int __init imx51_register_gpios(void)
-{
- return mxc_gpio_init(mxc_gpio_ports, 4);
-}
-
-int __init imx53_register_gpios(void)
-{
- return mxc_gpio_init(mxc_gpio_ports, ARRAY_SIZE(mxc_gpio_ports));
-}
-
diff --git a/arch/arm/mach-mx5/mm-mx50.c b/arch/arm/mach-mx5/mm-mx50.c
index b9c363b514a9..77e374c726fa 100644
--- a/arch/arm/mach-mx5/mm-mx50.c
+++ b/arch/arm/mach-mx5/mm-mx50.c
@@ -26,7 +26,6 @@
#include <mach/hardware.h>
#include <mach/common.h>
#include <mach/iomux-v3.h>
-#include <mach/gpio.h>
#include <mach/irqs.h>
/*
@@ -56,17 +55,18 @@ void __init imx50_init_early(void)
mxc_arch_reset_init(MX50_IO_ADDRESS(MX50_WDOG_BASE_ADDR));
}
-static struct mxc_gpio_port imx50_gpio_ports[] = {
- DEFINE_IMX_GPIO_PORT_IRQ_HIGH(MX50, 0, 1, MX50_INT_GPIO1_LOW, MX50_INT_GPIO1_HIGH),
- DEFINE_IMX_GPIO_PORT_IRQ_HIGH(MX50, 1, 2, MX50_INT_GPIO2_LOW, MX50_INT_GPIO2_HIGH),
- DEFINE_IMX_GPIO_PORT_IRQ_HIGH(MX50, 2, 3, MX50_INT_GPIO3_LOW, MX50_INT_GPIO3_HIGH),
- DEFINE_IMX_GPIO_PORT_IRQ_HIGH(MX50, 3, 4, MX50_INT_GPIO3_LOW, MX50_INT_GPIO3_HIGH),
- DEFINE_IMX_GPIO_PORT_IRQ_HIGH(MX50, 4, 5, MX50_INT_GPIO3_LOW, MX50_INT_GPIO3_HIGH),
- DEFINE_IMX_GPIO_PORT_IRQ_HIGH(MX50, 5, 6, MX50_INT_GPIO3_LOW, MX50_INT_GPIO3_HIGH),
-};
-
void __init mx50_init_irq(void)
{
tzic_init_irq(MX50_IO_ADDRESS(MX50_TZIC_BASE_ADDR));
- mxc_gpio_init(imx50_gpio_ports, ARRAY_SIZE(imx50_gpio_ports));
+}
+
+void __init imx50_soc_init(void)
+{
+ /* i.mx50 has the i.mx31 type gpio */
+ mxc_register_gpio("imx31-gpio", 0, MX50_GPIO1_BASE_ADDR, SZ_16K, MX50_INT_GPIO1_LOW, MX50_INT_GPIO1_HIGH);
+ mxc_register_gpio("imx31-gpio", 1, MX50_GPIO2_BASE_ADDR, SZ_16K, MX50_INT_GPIO2_LOW, MX50_INT_GPIO2_HIGH);
+ mxc_register_gpio("imx31-gpio", 2, MX50_GPIO3_BASE_ADDR, SZ_16K, MX50_INT_GPIO3_LOW, MX50_INT_GPIO3_HIGH);
+ mxc_register_gpio("imx31-gpio", 3, MX50_GPIO4_BASE_ADDR, SZ_16K, MX50_INT_GPIO4_LOW, MX50_INT_GPIO4_HIGH);
+ mxc_register_gpio("imx31-gpio", 4, MX50_GPIO5_BASE_ADDR, SZ_16K, MX50_INT_GPIO5_LOW, MX50_INT_GPIO5_HIGH);
+ mxc_register_gpio("imx31-gpio", 5, MX50_GPIO6_BASE_ADDR, SZ_16K, MX50_INT_GPIO6_LOW, MX50_INT_GPIO6_HIGH);
}
diff --git a/arch/arm/mach-mx5/mm.c b/arch/arm/mach-mx5/mm.c
index ff557301b42b..665843d6c2b2 100644
--- a/arch/arm/mach-mx5/mm.c
+++ b/arch/arm/mach-mx5/mm.c
@@ -69,8 +69,6 @@ void __init imx53_init_early(void)
mxc_arch_reset_init(MX53_IO_ADDRESS(MX53_WDOG1_BASE_ADDR));
}
-int imx51_register_gpios(void);
-
void __init mx51_init_irq(void)
{
unsigned long tzic_addr;
@@ -86,11 +84,8 @@ void __init mx51_init_irq(void)
panic("unable to map TZIC interrupt controller\n");
tzic_init_irq(tzic_virt);
- imx51_register_gpios();
}
-int imx53_register_gpios(void);
-
void __init mx53_init_irq(void)
{
unsigned long tzic_addr;
@@ -103,5 +98,25 @@ void __init mx53_init_irq(void)
panic("unable to map TZIC interrupt controller\n");
tzic_init_irq(tzic_virt);
- imx53_register_gpios();
+}
+
+void __init imx51_soc_init(void)
+{
+ /* i.mx51 has the i.mx31 type gpio */
+ mxc_register_gpio("imx31-gpio", 0, MX51_GPIO1_BASE_ADDR, SZ_16K, MX51_MXC_INT_GPIO1_LOW, MX51_MXC_INT_GPIO1_HIGH);
+ mxc_register_gpio("imx31-gpio", 1, MX51_GPIO2_BASE_ADDR, SZ_16K, MX51_MXC_INT_GPIO2_LOW, MX51_MXC_INT_GPIO2_HIGH);
+ mxc_register_gpio("imx31-gpio", 2, MX51_GPIO3_BASE_ADDR, SZ_16K, MX51_MXC_INT_GPIO3_LOW, MX51_MXC_INT_GPIO3_HIGH);
+ mxc_register_gpio("imx31-gpio", 3, MX51_GPIO4_BASE_ADDR, SZ_16K, MX51_MXC_INT_GPIO4_LOW, MX51_MXC_INT_GPIO4_HIGH);
+}
+
+void __init imx53_soc_init(void)
+{
+ /* i.mx53 has the i.mx31 type gpio */
+ mxc_register_gpio("imx31-gpio", 0, MX53_GPIO1_BASE_ADDR, SZ_16K, MX53_INT_GPIO1_LOW, MX53_INT_GPIO1_HIGH);
+ mxc_register_gpio("imx31-gpio", 1, MX53_GPIO2_BASE_ADDR, SZ_16K, MX53_INT_GPIO2_LOW, MX53_INT_GPIO2_HIGH);
+ mxc_register_gpio("imx31-gpio", 2, MX53_GPIO3_BASE_ADDR, SZ_16K, MX53_INT_GPIO3_LOW, MX53_INT_GPIO3_HIGH);
+ mxc_register_gpio("imx31-gpio", 3, MX53_GPIO4_BASE_ADDR, SZ_16K, MX53_INT_GPIO4_LOW, MX53_INT_GPIO4_HIGH);
+ mxc_register_gpio("imx31-gpio", 4, MX53_GPIO5_BASE_ADDR, SZ_16K, MX53_INT_GPIO5_LOW, MX53_INT_GPIO5_HIGH);
+ mxc_register_gpio("imx31-gpio", 5, MX53_GPIO6_BASE_ADDR, SZ_16K, MX53_INT_GPIO6_LOW, MX53_INT_GPIO6_HIGH);
+ mxc_register_gpio("imx31-gpio", 6, MX53_GPIO7_BASE_ADDR, SZ_16K, MX53_INT_GPIO7_LOW, MX53_INT_GPIO7_HIGH);
}
diff --git a/arch/arm/mach-mxs/Makefile b/arch/arm/mach-mxs/Makefile
index 58e892376bf2..6c38262a3aaa 100644
--- a/arch/arm/mach-mxs/Makefile
+++ b/arch/arm/mach-mxs/Makefile
@@ -1,5 +1,5 @@
# Common support
-obj-y := clock.o devices.o gpio.o icoll.o iomux.o system.o timer.o
+obj-y := clock.o devices.o icoll.o iomux.o system.o timer.o
obj-$(CONFIG_MXS_OCOTP) += ocotp.o
obj-$(CONFIG_PM) += pm.o
diff --git a/arch/arm/mach-mxs/devices.c b/arch/arm/mach-mxs/devices.c
index cfdb6b284702..fe3e847930c9 100644
--- a/arch/arm/mach-mxs/devices.c
+++ b/arch/arm/mach-mxs/devices.c
@@ -88,3 +88,14 @@ int __init mxs_add_amba_device(const struct amba_device *dev)
return amba_device_register(adev, &iomem_resource);
}
+
+struct device mxs_apbh_bus = {
+ .init_name = "mxs_apbh",
+ .parent = &platform_bus,
+};
+
+static int __init mxs_device_init(void)
+{
+ return device_register(&mxs_apbh_bus);
+}
+core_initcall(mxs_device_init);
diff --git a/arch/arm/mach-mxs/devices/Makefile b/arch/arm/mach-mxs/devices/Makefile
index 324f2824d38d..351915c683ff 100644
--- a/arch/arm/mach-mxs/devices/Makefile
+++ b/arch/arm/mach-mxs/devices/Makefile
@@ -6,4 +6,5 @@ obj-$(CONFIG_MXS_HAVE_PLATFORM_FLEXCAN) += platform-flexcan.o
obj-$(CONFIG_MXS_HAVE_PLATFORM_MXS_I2C) += platform-mxs-i2c.o
obj-$(CONFIG_MXS_HAVE_PLATFORM_MXS_MMC) += platform-mxs-mmc.o
obj-$(CONFIG_MXS_HAVE_PLATFORM_MXS_PWM) += platform-mxs-pwm.o
+obj-y += platform-gpio-mxs.o
obj-$(CONFIG_MXS_HAVE_PLATFORM_MXSFB) += platform-mxsfb.o
diff --git a/arch/arm/mach-mxs/devices/platform-auart.c b/arch/arm/mach-mxs/devices/platform-auart.c
index 796606cce0ce..27608f5d2ac8 100644
--- a/arch/arm/mach-mxs/devices/platform-auart.c
+++ b/arch/arm/mach-mxs/devices/platform-auart.c
@@ -6,6 +6,7 @@
* the terms of the GNU General Public License version 2 as published by the
* Free Software Foundation.
*/
+#include <linux/dma-mapping.h>
#include <asm/sizes.h>
#include <mach/mx23.h>
#include <mach/mx28.h>
diff --git a/arch/arm/mach-mxs/devices/platform-dma.c b/arch/arm/mach-mxs/devices/platform-dma.c
index 295c4424d5d9..6a0202b1016c 100644
--- a/arch/arm/mach-mxs/devices/platform-dma.c
+++ b/arch/arm/mach-mxs/devices/platform-dma.c
@@ -6,6 +6,7 @@
* Free Software Foundation.
*/
#include <linux/compiler.h>
+#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/init.h>
diff --git a/arch/arm/mach-mxs/devices/platform-fec.c b/arch/arm/mach-mxs/devices/platform-fec.c
index 9859cf283335..ae96a4fd8f14 100644
--- a/arch/arm/mach-mxs/devices/platform-fec.c
+++ b/arch/arm/mach-mxs/devices/platform-fec.c
@@ -6,6 +6,7 @@
* the terms of the GNU General Public License version 2 as published by the
* Free Software Foundation.
*/
+#include <linux/dma-mapping.h>
#include <asm/sizes.h>
#include <mach/mx28.h>
#include <mach/devices-common.h>
diff --git a/arch/arm/mach-mxs/devices/platform-gpio-mxs.c b/arch/arm/mach-mxs/devices/platform-gpio-mxs.c
new file mode 100644
index 000000000000..ed0885e414e0
--- /dev/null
+++ b/arch/arm/mach-mxs/devices/platform-gpio-mxs.c
@@ -0,0 +1,53 @@
+/*
+ * Copyright 2011 Freescale Semiconductor, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 as published by the
+ * Free Software Foundation.
+ */
+#include <linux/compiler.h>
+#include <linux/err.h>
+#include <linux/init.h>
+
+#include <mach/mx23.h>
+#include <mach/mx28.h>
+#include <mach/devices-common.h>
+
+struct platform_device *__init mxs_add_gpio(
+ int id, resource_size_t iobase, int irq)
+{
+ struct resource res[] = {
+ {
+ .start = iobase,
+ .end = iobase + SZ_8K - 1,
+ .flags = IORESOURCE_MEM,
+ }, {
+ .start = irq,
+ .end = irq,
+ .flags = IORESOURCE_IRQ,
+ },
+ };
+
+ return platform_device_register_resndata(&mxs_apbh_bus,
+ "gpio-mxs", id, res, ARRAY_SIZE(res), NULL, 0);
+}
+
+static int __init mxs_add_mxs_gpio(void)
+{
+ if (cpu_is_mx23()) {
+ mxs_add_gpio(0, MX23_PINCTRL_BASE_ADDR, MX23_INT_GPIO0);
+ mxs_add_gpio(1, MX23_PINCTRL_BASE_ADDR, MX23_INT_GPIO1);
+ mxs_add_gpio(2, MX23_PINCTRL_BASE_ADDR, MX23_INT_GPIO2);
+ }
+
+ if (cpu_is_mx28()) {
+ mxs_add_gpio(0, MX28_PINCTRL_BASE_ADDR, MX28_INT_GPIO0);
+ mxs_add_gpio(1, MX28_PINCTRL_BASE_ADDR, MX28_INT_GPIO1);
+ mxs_add_gpio(2, MX28_PINCTRL_BASE_ADDR, MX28_INT_GPIO2);
+ mxs_add_gpio(3, MX28_PINCTRL_BASE_ADDR, MX28_INT_GPIO3);
+ mxs_add_gpio(4, MX28_PINCTRL_BASE_ADDR, MX28_INT_GPIO4);
+ }
+
+ return 0;
+}
+postcore_initcall(mxs_add_mxs_gpio);
diff --git a/arch/arm/mach-mxs/gpio.c b/arch/arm/mach-mxs/gpio.c
deleted file mode 100644
index 2c950fef71a8..000000000000
--- a/arch/arm/mach-mxs/gpio.c
+++ /dev/null
@@ -1,331 +0,0 @@
-/*
- * MXC GPIO support. (c) 2008 Daniel Mack <daniel@caiaq.de>
- * Copyright 2008 Juergen Beisert, kernel@pengutronix.de
- *
- * Based on code from Freescale,
- * Copyright (C) 2004-2010 Freescale Semiconductor, Inc. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
- * MA 02110-1301, USA.
- */
-
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-#include <linux/irq.h>
-#include <linux/gpio.h>
-#include <mach/mx23.h>
-#include <mach/mx28.h>
-#include <asm-generic/bug.h>
-
-#include "gpio.h"
-
-static struct mxs_gpio_port *mxs_gpio_ports;
-static int gpio_table_size;
-
-#define PINCTRL_DOUT(n) ((cpu_is_mx23() ? 0x0500 : 0x0700) + (n) * 0x10)
-#define PINCTRL_DIN(n) ((cpu_is_mx23() ? 0x0600 : 0x0900) + (n) * 0x10)
-#define PINCTRL_DOE(n) ((cpu_is_mx23() ? 0x0700 : 0x0b00) + (n) * 0x10)
-#define PINCTRL_PIN2IRQ(n) ((cpu_is_mx23() ? 0x0800 : 0x1000) + (n) * 0x10)
-#define PINCTRL_IRQEN(n) ((cpu_is_mx23() ? 0x0900 : 0x1100) + (n) * 0x10)
-#define PINCTRL_IRQLEV(n) ((cpu_is_mx23() ? 0x0a00 : 0x1200) + (n) * 0x10)
-#define PINCTRL_IRQPOL(n) ((cpu_is_mx23() ? 0x0b00 : 0x1300) + (n) * 0x10)
-#define PINCTRL_IRQSTAT(n) ((cpu_is_mx23() ? 0x0c00 : 0x1400) + (n) * 0x10)
-
-#define GPIO_INT_FALL_EDGE 0x0
-#define GPIO_INT_LOW_LEV 0x1
-#define GPIO_INT_RISE_EDGE 0x2
-#define GPIO_INT_HIGH_LEV 0x3
-#define GPIO_INT_LEV_MASK (1 << 0)
-#define GPIO_INT_POL_MASK (1 << 1)
-
-/* Note: This driver assumes 32 GPIOs are handled in one register */
-
-static void clear_gpio_irqstatus(struct mxs_gpio_port *port, u32 index)
-{
- __mxs_clrl(1 << index, port->base + PINCTRL_IRQSTAT(port->id));
-}
-
-static void set_gpio_irqenable(struct mxs_gpio_port *port, u32 index,
- int enable)
-{
- if (enable) {
- __mxs_setl(1 << index, port->base + PINCTRL_IRQEN(port->id));
- __mxs_setl(1 << index, port->base + PINCTRL_PIN2IRQ(port->id));
- } else {
- __mxs_clrl(1 << index, port->base + PINCTRL_IRQEN(port->id));
- }
-}
-
-static void mxs_gpio_ack_irq(struct irq_data *d)
-{
- u32 gpio = irq_to_gpio(d->irq);
- clear_gpio_irqstatus(&mxs_gpio_ports[gpio / 32], gpio & 0x1f);
-}
-
-static void mxs_gpio_mask_irq(struct irq_data *d)
-{
- u32 gpio = irq_to_gpio(d->irq);
- set_gpio_irqenable(&mxs_gpio_ports[gpio / 32], gpio & 0x1f, 0);
-}
-
-static void mxs_gpio_unmask_irq(struct irq_data *d)
-{
- u32 gpio = irq_to_gpio(d->irq);
- set_gpio_irqenable(&mxs_gpio_ports[gpio / 32], gpio & 0x1f, 1);
-}
-
-static int mxs_gpio_get(struct gpio_chip *chip, unsigned offset);
-
-static int mxs_gpio_set_irq_type(struct irq_data *d, unsigned int type)
-{
- u32 gpio = irq_to_gpio(d->irq);
- u32 pin_mask = 1 << (gpio & 31);
- struct mxs_gpio_port *port = &mxs_gpio_ports[gpio / 32];
- void __iomem *pin_addr;
- int edge;
-
- switch (type) {
- case IRQ_TYPE_EDGE_RISING:
- edge = GPIO_INT_RISE_EDGE;
- break;
- case IRQ_TYPE_EDGE_FALLING:
- edge = GPIO_INT_FALL_EDGE;
- break;
- case IRQ_TYPE_LEVEL_LOW:
- edge = GPIO_INT_LOW_LEV;
- break;
- case IRQ_TYPE_LEVEL_HIGH:
- edge = GPIO_INT_HIGH_LEV;
- break;
- default:
- return -EINVAL;
- }
-
- /* set level or edge */
- pin_addr = port->base + PINCTRL_IRQLEV(port->id);
- if (edge & GPIO_INT_LEV_MASK)
- __mxs_setl(pin_mask, pin_addr);
- else
- __mxs_clrl(pin_mask, pin_addr);
-
- /* set polarity */
- pin_addr = port->base + PINCTRL_IRQPOL(port->id);
- if (edge & GPIO_INT_POL_MASK)
- __mxs_setl(pin_mask, pin_addr);
- else
- __mxs_clrl(pin_mask, pin_addr);
-
- clear_gpio_irqstatus(port, gpio & 0x1f);
-
- return 0;
-}
-
-/* MXS has one interrupt *per* gpio port */
-static void mxs_gpio_irq_handler(u32 irq, struct irq_desc *desc)
-{
- u32 irq_stat;
- struct mxs_gpio_port *port = (struct mxs_gpio_port *)irq_get_handler_data(irq);
- u32 gpio_irq_no_base = port->virtual_irq_start;
-
- desc->irq_data.chip->irq_ack(&desc->irq_data);
-
- irq_stat = __raw_readl(port->base + PINCTRL_IRQSTAT(port->id)) &
- __raw_readl(port->base + PINCTRL_IRQEN(port->id));
-
- while (irq_stat != 0) {
- int irqoffset = fls(irq_stat) - 1;
- generic_handle_irq(gpio_irq_no_base + irqoffset);
- irq_stat &= ~(1 << irqoffset);
- }
-}
-
-/*
- * Set interrupt number "irq" in the GPIO as a wake-up source.
- * While system is running, all registered GPIO interrupts need to have
- * wake-up enabled. When system is suspended, only selected GPIO interrupts
- * need to have wake-up enabled.
- * @param irq interrupt source number
- * @param enable enable as wake-up if equal to non-zero
- * @return This function returns 0 on success.
- */
-static int mxs_gpio_set_wake_irq(struct irq_data *d, unsigned int enable)
-{
- u32 gpio = irq_to_gpio(d->irq);
- u32 gpio_idx = gpio & 0x1f;
- struct mxs_gpio_port *port = &mxs_gpio_ports[gpio / 32];
-
- if (enable) {
- if (port->irq_high && (gpio_idx >= 16))
- enable_irq_wake(port->irq_high);
- else
- enable_irq_wake(port->irq);
- } else {
- if (port->irq_high && (gpio_idx >= 16))
- disable_irq_wake(port->irq_high);
- else
- disable_irq_wake(port->irq);
- }
-
- return 0;
-}
-
-static struct irq_chip gpio_irq_chip = {
- .name = "mxs gpio",
- .irq_ack = mxs_gpio_ack_irq,
- .irq_mask = mxs_gpio_mask_irq,
- .irq_unmask = mxs_gpio_unmask_irq,
- .irq_set_type = mxs_gpio_set_irq_type,
- .irq_set_wake = mxs_gpio_set_wake_irq,
-};
-
-static void mxs_set_gpio_direction(struct gpio_chip *chip, unsigned offset,
- int dir)
-{
- struct mxs_gpio_port *port =
- container_of(chip, struct mxs_gpio_port, chip);
- void __iomem *pin_addr = port->base + PINCTRL_DOE(port->id);
-
- if (dir)
- __mxs_setl(1 << offset, pin_addr);
- else
- __mxs_clrl(1 << offset, pin_addr);
-}
-
-static int mxs_gpio_get(struct gpio_chip *chip, unsigned offset)
-{
- struct mxs_gpio_port *port =
- container_of(chip, struct mxs_gpio_port, chip);
-
- return (__raw_readl(port->base + PINCTRL_DIN(port->id)) >> offset) & 1;
-}
-
-static void mxs_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
-{
- struct mxs_gpio_port *port =
- container_of(chip, struct mxs_gpio_port, chip);
- void __iomem *pin_addr = port->base + PINCTRL_DOUT(port->id);
-
- if (value)
- __mxs_setl(1 << offset, pin_addr);
- else
- __mxs_clrl(1 << offset, pin_addr);
-}
-
-static int mxs_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
-{
- struct mxs_gpio_port *port =
- container_of(chip, struct mxs_gpio_port, chip);
-
- return port->virtual_irq_start + offset;
-}
-
-static int mxs_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
-{
- mxs_set_gpio_direction(chip, offset, 0);
- return 0;
-}
-
-static int mxs_gpio_direction_output(struct gpio_chip *chip,
- unsigned offset, int value)
-{
- mxs_gpio_set(chip, offset, value);
- mxs_set_gpio_direction(chip, offset, 1);
- return 0;
-}
-
-int __init mxs_gpio_init(struct mxs_gpio_port *port, int cnt)
-{
- int i, j;
-
- /* save for local usage */
- mxs_gpio_ports = port;
- gpio_table_size = cnt;
-
- pr_info("MXS GPIO hardware\n");
-
- for (i = 0; i < cnt; i++) {
- /* disable the interrupt and clear the status */
- __raw_writel(0, port[i].base + PINCTRL_PIN2IRQ(i));
- __raw_writel(0, port[i].base + PINCTRL_IRQEN(i));
-
- /* clear address has to be used to clear IRQSTAT bits */
- __mxs_clrl(~0U, port[i].base + PINCTRL_IRQSTAT(i));
-
- for (j = port[i].virtual_irq_start;
- j < port[i].virtual_irq_start + 32; j++) {
- irq_set_chip_and_handler(j, &gpio_irq_chip,
- handle_level_irq);
- set_irq_flags(j, IRQF_VALID);
- }
-
- /* setup one handler for each entry */
- irq_set_chained_handler(port[i].irq, mxs_gpio_irq_handler);
- irq_set_handler_data(port[i].irq, &port[i]);
-
- /* register gpio chip */
- port[i].chip.direction_input = mxs_gpio_direction_input;
- port[i].chip.direction_output = mxs_gpio_direction_output;
- port[i].chip.get = mxs_gpio_get;
- port[i].chip.set = mxs_gpio_set;
- port[i].chip.to_irq = mxs_gpio_to_irq;
- port[i].chip.base = i * 32;
- port[i].chip.ngpio = 32;
-
- /* its a serious configuration bug when it fails */
- BUG_ON(gpiochip_add(&port[i].chip) < 0);
- }
-
- return 0;
-}
-
-#define MX23_GPIO_BASE MX23_IO_ADDRESS(MX23_PINCTRL_BASE_ADDR)
-#define MX28_GPIO_BASE MX28_IO_ADDRESS(MX28_PINCTRL_BASE_ADDR)
-
-#define DEFINE_MXS_GPIO_PORT(_base, _irq, _id) \
- { \
- .chip.label = "gpio-" #_id, \
- .id = _id, \
- .irq = _irq, \
- .base = _base, \
- .virtual_irq_start = MXS_GPIO_IRQ_START + (_id) * 32, \
- }
-
-#ifdef CONFIG_SOC_IMX23
-static struct mxs_gpio_port mx23_gpio_ports[] = {
- DEFINE_MXS_GPIO_PORT(MX23_GPIO_BASE, MX23_INT_GPIO0, 0),
- DEFINE_MXS_GPIO_PORT(MX23_GPIO_BASE, MX23_INT_GPIO1, 1),
- DEFINE_MXS_GPIO_PORT(MX23_GPIO_BASE, MX23_INT_GPIO2, 2),
-};
-
-int __init mx23_register_gpios(void)
-{
- return mxs_gpio_init(mx23_gpio_ports, ARRAY_SIZE(mx23_gpio_ports));
-}
-#endif
-
-#ifdef CONFIG_SOC_IMX28
-static struct mxs_gpio_port mx28_gpio_ports[] = {
- DEFINE_MXS_GPIO_PORT(MX28_GPIO_BASE, MX28_INT_GPIO0, 0),
- DEFINE_MXS_GPIO_PORT(MX28_GPIO_BASE, MX28_INT_GPIO1, 1),
- DEFINE_MXS_GPIO_PORT(MX28_GPIO_BASE, MX28_INT_GPIO2, 2),
- DEFINE_MXS_GPIO_PORT(MX28_GPIO_BASE, MX28_INT_GPIO3, 3),
- DEFINE_MXS_GPIO_PORT(MX28_GPIO_BASE, MX28_INT_GPIO4, 4),
-};
-
-int __init mx28_register_gpios(void)
-{
- return mxs_gpio_init(mx28_gpio_ports, ARRAY_SIZE(mx28_gpio_ports));
-}
-#endif
diff --git a/arch/arm/mach-mxs/gpio.h b/arch/arm/mach-mxs/gpio.h
deleted file mode 100644
index 005bb06630b1..000000000000
--- a/arch/arm/mach-mxs/gpio.h
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * Copyright 2007 Freescale Semiconductor, Inc. All Rights Reserved.
- * Copyright 2008 Juergen Beisert, kernel@pengutronix.de
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
- * MA 02110-1301, USA.
- */
-
-#ifndef __MXS_GPIO_H__
-#define __MXS_GPIO_H__
-
-struct mxs_gpio_port {
- void __iomem *base;
- int id;
- int irq;
- int irq_high;
- int virtual_irq_start;
- struct gpio_chip chip;
-};
-
-int mxs_gpio_init(struct mxs_gpio_port*, int);
-
-#endif /* __MXS_GPIO_H__ */
diff --git a/arch/arm/mach-mxs/include/mach/devices-common.h b/arch/arm/mach-mxs/include/mach/devices-common.h
index 7a37469ed5bf..812d7a813a78 100644
--- a/arch/arm/mach-mxs/include/mach/devices-common.h
+++ b/arch/arm/mach-mxs/include/mach/devices-common.h
@@ -11,6 +11,8 @@
#include <linux/init.h>
#include <linux/amba/bus.h>
+extern struct device mxs_apbh_bus;
+
struct platform_device *mxs_add_platform_device_dmamask(
const char *name, int id,
const struct resource *res, unsigned int num_resources,
diff --git a/arch/arm/mach-mxs/mach-mx28evk.c b/arch/arm/mach-mxs/mach-mx28evk.c
index eacdc6b0e70a..56767a5cce0e 100644
--- a/arch/arm/mach-mxs/mach-mx28evk.c
+++ b/arch/arm/mach-mxs/mach-mx28evk.c
@@ -26,7 +26,6 @@
#include <mach/iomux-mx28.h>
#include "devices-mx28.h"
-#include "gpio.h"
#define MX28EVK_FLEXCAN_SWITCH MXS_GPIO_NR(2, 13)
#define MX28EVK_FEC_PHY_POWER MXS_GPIO_NR(2, 15)
diff --git a/arch/arm/mach-mxs/mm-mx23.c b/arch/arm/mach-mxs/mm-mx23.c
index 5148cd64a6b7..1b2345ac1a87 100644
--- a/arch/arm/mach-mxs/mm-mx23.c
+++ b/arch/arm/mach-mxs/mm-mx23.c
@@ -41,5 +41,4 @@ void __init mx23_map_io(void)
void __init mx23_init_irq(void)
{
icoll_init_irq();
- mx23_register_gpios();
}
diff --git a/arch/arm/mach-mxs/mm-mx28.c b/arch/arm/mach-mxs/mm-mx28.c
index 7e4cea32ebc6..b6e18ddb92c0 100644
--- a/arch/arm/mach-mxs/mm-mx28.c
+++ b/arch/arm/mach-mxs/mm-mx28.c
@@ -41,5 +41,4 @@ void __init mx28_map_io(void)
void __init mx28_init_irq(void)
{
icoll_init_irq();
- mx28_register_gpios();
}
diff --git a/arch/arm/mach-omap1/gpio15xx.c b/arch/arm/mach-omap1/gpio15xx.c
index 364137c2042c..399da4ce017b 100644
--- a/arch/arm/mach-omap1/gpio15xx.c
+++ b/arch/arm/mach-omap1/gpio15xx.c
@@ -34,11 +34,22 @@ static struct __initdata resource omap15xx_mpu_gpio_resources[] = {
},
};
+static struct omap_gpio_reg_offs omap15xx_mpuio_regs = {
+ .revision = USHRT_MAX,
+ .direction = OMAP_MPUIO_IO_CNTL,
+ .datain = OMAP_MPUIO_INPUT_LATCH,
+ .dataout = OMAP_MPUIO_OUTPUT,
+ .irqstatus = OMAP_MPUIO_GPIO_INT,
+ .irqenable = OMAP_MPUIO_GPIO_MASKIT,
+ .irqenable_inv = true,
+};
+
static struct __initdata omap_gpio_platform_data omap15xx_mpu_gpio_config = {
.virtual_irq_start = IH_MPUIO_BASE,
.bank_type = METHOD_MPUIO,
.bank_width = 16,
.bank_stride = 1,
+ .regs = &omap15xx_mpuio_regs,
};
static struct platform_device omap15xx_mpu_gpio = {
@@ -64,10 +75,21 @@ static struct __initdata resource omap15xx_gpio_resources[] = {
},
};
+static struct omap_gpio_reg_offs omap15xx_gpio_regs = {
+ .revision = USHRT_MAX,
+ .direction = OMAP1510_GPIO_DIR_CONTROL,
+ .datain = OMAP1510_GPIO_DATA_INPUT,
+ .dataout = OMAP1510_GPIO_DATA_OUTPUT,
+ .irqstatus = OMAP1510_GPIO_INT_STATUS,
+ .irqenable = OMAP1510_GPIO_INT_MASK,
+ .irqenable_inv = true,
+};
+
static struct __initdata omap_gpio_platform_data omap15xx_gpio_config = {
.virtual_irq_start = IH_GPIO_BASE,
.bank_type = METHOD_GPIO_1510,
.bank_width = 16,
+ .regs = &omap15xx_gpio_regs,
};
static struct platform_device omap15xx_gpio = {
diff --git a/arch/arm/mach-omap1/gpio16xx.c b/arch/arm/mach-omap1/gpio16xx.c
index 293a246e2824..0f399bd0e70e 100644
--- a/arch/arm/mach-omap1/gpio16xx.c
+++ b/arch/arm/mach-omap1/gpio16xx.c
@@ -37,11 +37,22 @@ static struct __initdata resource omap16xx_mpu_gpio_resources[] = {
},
};
+static struct omap_gpio_reg_offs omap16xx_mpuio_regs = {
+ .revision = USHRT_MAX,
+ .direction = OMAP_MPUIO_IO_CNTL,
+ .datain = OMAP_MPUIO_INPUT_LATCH,
+ .dataout = OMAP_MPUIO_OUTPUT,
+ .irqstatus = OMAP_MPUIO_GPIO_INT,
+ .irqenable = OMAP_MPUIO_GPIO_MASKIT,
+ .irqenable_inv = true,
+};
+
static struct __initdata omap_gpio_platform_data omap16xx_mpu_gpio_config = {
.virtual_irq_start = IH_MPUIO_BASE,
.bank_type = METHOD_MPUIO,
.bank_width = 16,
.bank_stride = 1,
+ .regs = &omap16xx_mpuio_regs,
};
static struct platform_device omap16xx_mpu_gpio = {
@@ -67,10 +78,24 @@ static struct __initdata resource omap16xx_gpio1_resources[] = {
},
};
+static struct omap_gpio_reg_offs omap16xx_gpio_regs = {
+ .revision = OMAP1610_GPIO_REVISION,
+ .direction = OMAP1610_GPIO_DIRECTION,
+ .set_dataout = OMAP1610_GPIO_SET_DATAOUT,
+ .clr_dataout = OMAP1610_GPIO_CLEAR_DATAOUT,
+ .datain = OMAP1610_GPIO_DATAIN,
+ .dataout = OMAP1610_GPIO_DATAOUT,
+ .irqstatus = OMAP1610_GPIO_IRQSTATUS1,
+ .irqenable = OMAP1610_GPIO_IRQENABLE1,
+ .set_irqenable = OMAP1610_GPIO_SET_IRQENABLE1,
+ .clr_irqenable = OMAP1610_GPIO_CLEAR_IRQENABLE1,
+};
+
static struct __initdata omap_gpio_platform_data omap16xx_gpio1_config = {
.virtual_irq_start = IH_GPIO_BASE,
.bank_type = METHOD_GPIO_1610,
.bank_width = 16,
+ .regs = &omap16xx_gpio_regs,
};
static struct platform_device omap16xx_gpio1 = {
@@ -100,6 +125,7 @@ static struct __initdata omap_gpio_platform_data omap16xx_gpio2_config = {
.virtual_irq_start = IH_GPIO_BASE + 16,
.bank_type = METHOD_GPIO_1610,
.bank_width = 16,
+ .regs = &omap16xx_gpio_regs,
};
static struct platform_device omap16xx_gpio2 = {
@@ -129,6 +155,7 @@ static struct __initdata omap_gpio_platform_data omap16xx_gpio3_config = {
.virtual_irq_start = IH_GPIO_BASE + 32,
.bank_type = METHOD_GPIO_1610,
.bank_width = 16,
+ .regs = &omap16xx_gpio_regs,
};
static struct platform_device omap16xx_gpio3 = {
@@ -158,6 +185,7 @@ static struct __initdata omap_gpio_platform_data omap16xx_gpio4_config = {
.virtual_irq_start = IH_GPIO_BASE + 48,
.bank_type = METHOD_GPIO_1610,
.bank_width = 16,
+ .regs = &omap16xx_gpio_regs,
};
static struct platform_device omap16xx_gpio4 = {
diff --git a/arch/arm/mach-omap1/gpio7xx.c b/arch/arm/mach-omap1/gpio7xx.c
index c6ad248d63a6..5ab63eab0ff5 100644
--- a/arch/arm/mach-omap1/gpio7xx.c
+++ b/arch/arm/mach-omap1/gpio7xx.c
@@ -39,11 +39,22 @@ static struct __initdata resource omap7xx_mpu_gpio_resources[] = {
},
};
+static struct omap_gpio_reg_offs omap7xx_mpuio_regs = {
+ .revision = USHRT_MAX,
+ .direction = OMAP_MPUIO_IO_CNTL / 2,
+ .datain = OMAP_MPUIO_INPUT_LATCH / 2,
+ .dataout = OMAP_MPUIO_OUTPUT / 2,
+ .irqstatus = OMAP_MPUIO_GPIO_INT / 2,
+ .irqenable = OMAP_MPUIO_GPIO_MASKIT / 2,
+ .irqenable_inv = true,
+};
+
static struct __initdata omap_gpio_platform_data omap7xx_mpu_gpio_config = {
.virtual_irq_start = IH_MPUIO_BASE,
.bank_type = METHOD_MPUIO,
.bank_width = 32,
.bank_stride = 2,
+ .regs = &omap7xx_mpuio_regs,
};
static struct platform_device omap7xx_mpu_gpio = {
@@ -69,10 +80,21 @@ static struct __initdata resource omap7xx_gpio1_resources[] = {
},
};
+static struct omap_gpio_reg_offs omap7xx_gpio_regs = {
+ .revision = USHRT_MAX,
+ .direction = OMAP7XX_GPIO_DIR_CONTROL,
+ .datain = OMAP7XX_GPIO_DATA_INPUT,
+ .dataout = OMAP7XX_GPIO_DATA_OUTPUT,
+ .irqstatus = OMAP7XX_GPIO_INT_STATUS,
+ .irqenable = OMAP7XX_GPIO_INT_MASK,
+ .irqenable_inv = true,
+};
+
static struct __initdata omap_gpio_platform_data omap7xx_gpio1_config = {
.virtual_irq_start = IH_GPIO_BASE,
.bank_type = METHOD_GPIO_7XX,
.bank_width = 32,
+ .regs = &omap7xx_gpio_regs,
};
static struct platform_device omap7xx_gpio1 = {
@@ -102,6 +124,7 @@ static struct __initdata omap_gpio_platform_data omap7xx_gpio2_config = {
.virtual_irq_start = IH_GPIO_BASE + 32,
.bank_type = METHOD_GPIO_7XX,
.bank_width = 32,
+ .regs = &omap7xx_gpio_regs,
};
static struct platform_device omap7xx_gpio2 = {
@@ -131,6 +154,7 @@ static struct __initdata omap_gpio_platform_data omap7xx_gpio3_config = {
.virtual_irq_start = IH_GPIO_BASE + 64,
.bank_type = METHOD_GPIO_7XX,
.bank_width = 32,
+ .regs = &omap7xx_gpio_regs,
};
static struct platform_device omap7xx_gpio3 = {
@@ -160,6 +184,7 @@ static struct __initdata omap_gpio_platform_data omap7xx_gpio4_config = {
.virtual_irq_start = IH_GPIO_BASE + 96,
.bank_type = METHOD_GPIO_7XX,
.bank_width = 32,
+ .regs = &omap7xx_gpio_regs,
};
static struct platform_device omap7xx_gpio4 = {
@@ -189,6 +214,7 @@ static struct __initdata omap_gpio_platform_data omap7xx_gpio5_config = {
.virtual_irq_start = IH_GPIO_BASE + 128,
.bank_type = METHOD_GPIO_7XX,
.bank_width = 32,
+ .regs = &omap7xx_gpio_regs,
};
static struct platform_device omap7xx_gpio5 = {
@@ -218,6 +244,7 @@ static struct __initdata omap_gpio_platform_data omap7xx_gpio6_config = {
.virtual_irq_start = IH_GPIO_BASE + 160,
.bank_type = METHOD_GPIO_7XX,
.bank_width = 32,
+ .regs = &omap7xx_gpio_regs,
};
static struct platform_device omap7xx_gpio6 = {
diff --git a/arch/arm/mach-omap1/pm_bus.c b/arch/arm/mach-omap1/pm_bus.c
index 334fb8871bc3..943072d5a1d5 100644
--- a/arch/arm/mach-omap1/pm_bus.c
+++ b/arch/arm/mach-omap1/pm_bus.c
@@ -32,7 +32,7 @@ static int omap1_pm_runtime_suspend(struct device *dev)
if (ret)
return ret;
- ret = pm_runtime_clk_suspend(dev);
+ ret = pm_clk_suspend(dev);
if (ret) {
pm_generic_runtime_resume(dev);
return ret;
@@ -45,24 +45,24 @@ static int omap1_pm_runtime_resume(struct device *dev)
{
dev_dbg(dev, "%s\n", __func__);
- pm_runtime_clk_resume(dev);
+ pm_clk_resume(dev);
return pm_generic_runtime_resume(dev);
}
-static struct dev_power_domain default_power_domain = {
+static struct dev_pm_domain default_pm_domain = {
.ops = {
.runtime_suspend = omap1_pm_runtime_suspend,
.runtime_resume = omap1_pm_runtime_resume,
USE_PLATFORM_PM_SLEEP_OPS
},
};
-#define OMAP1_PWR_DOMAIN (&default_power_domain)
+#define OMAP1_PM_DOMAIN (&default_pm_domain)
#else
-#define OMAP1_PWR_DOMAIN NULL
+#define OMAP1_PM_DOMAIN NULL
#endif /* CONFIG_PM_RUNTIME */
static struct pm_clk_notifier_block platform_bus_notifier = {
- .pwr_domain = OMAP1_PWR_DOMAIN,
+ .pm_domain = OMAP1_PM_DOMAIN,
.con_ids = { "ick", "fck", NULL, },
};
@@ -71,7 +71,7 @@ static int __init omap1_pm_runtime_init(void)
if (!cpu_class_is_omap1())
return -ENODEV;
- pm_runtime_clk_add_notifier(&platform_bus_type, &platform_bus_notifier);
+ pm_clk_add_notifier(&platform_bus_type, &platform_bus_notifier);
return 0;
}
diff --git a/arch/arm/mach-omap2/gpio.c b/arch/arm/mach-omap2/gpio.c
index 9529842ae054..2765cdc3152d 100644
--- a/arch/arm/mach-omap2/gpio.c
+++ b/arch/arm/mach-omap2/gpio.c
@@ -61,13 +61,45 @@ static int omap2_gpio_dev_init(struct omap_hwmod *oh, void *unused)
pdata->dbck_flag = dev_attr->dbck_flag;
pdata->virtual_irq_start = IH_GPIO_BASE + 32 * (id - 1);
+ pdata->regs = kzalloc(sizeof(struct omap_gpio_reg_offs), GFP_KERNEL);
+ if (!pdata) {
+ pr_err("gpio%d: Memory allocation failed\n", id);
+ return -ENOMEM;
+ }
+
switch (oh->class->rev) {
case 0:
case 1:
pdata->bank_type = METHOD_GPIO_24XX;
+ pdata->regs->revision = OMAP24XX_GPIO_REVISION;
+ pdata->regs->direction = OMAP24XX_GPIO_OE;
+ pdata->regs->datain = OMAP24XX_GPIO_DATAIN;
+ pdata->regs->dataout = OMAP24XX_GPIO_DATAOUT;
+ pdata->regs->set_dataout = OMAP24XX_GPIO_SETDATAOUT;
+ pdata->regs->clr_dataout = OMAP24XX_GPIO_CLEARDATAOUT;
+ pdata->regs->irqstatus = OMAP24XX_GPIO_IRQSTATUS1;
+ pdata->regs->irqstatus2 = OMAP24XX_GPIO_IRQSTATUS2;
+ pdata->regs->irqenable = OMAP24XX_GPIO_IRQENABLE1;
+ pdata->regs->set_irqenable = OMAP24XX_GPIO_SETIRQENABLE1;
+ pdata->regs->clr_irqenable = OMAP24XX_GPIO_CLEARIRQENABLE1;
+ pdata->regs->debounce = OMAP24XX_GPIO_DEBOUNCE_VAL;
+ pdata->regs->debounce_en = OMAP24XX_GPIO_DEBOUNCE_EN;
break;
case 2:
pdata->bank_type = METHOD_GPIO_44XX;
+ pdata->regs->revision = OMAP4_GPIO_REVISION;
+ pdata->regs->direction = OMAP4_GPIO_OE;
+ pdata->regs->datain = OMAP4_GPIO_DATAIN;
+ pdata->regs->dataout = OMAP4_GPIO_DATAOUT;
+ pdata->regs->set_dataout = OMAP4_GPIO_SETDATAOUT;
+ pdata->regs->clr_dataout = OMAP4_GPIO_CLEARDATAOUT;
+ pdata->regs->irqstatus = OMAP4_GPIO_IRQSTATUS0;
+ pdata->regs->irqstatus2 = OMAP4_GPIO_IRQSTATUS1;
+ pdata->regs->irqenable = OMAP4_GPIO_IRQSTATUSSET0;
+ pdata->regs->set_irqenable = OMAP4_GPIO_IRQSTATUSSET0;
+ pdata->regs->clr_irqenable = OMAP4_GPIO_IRQSTATUSCLR0;
+ pdata->regs->debounce = OMAP4_GPIO_DEBOUNCINGTIME;
+ pdata->regs->debounce_en = OMAP4_GPIO_DEBOUNCENABLE;
break;
default:
WARN(1, "Invalid gpio bank_type\n");
@@ -87,6 +119,8 @@ static int omap2_gpio_dev_init(struct omap_hwmod *oh, void *unused)
return PTR_ERR(od);
}
+ omap_device_disable_idle_on_suspend(od);
+
gpio_bank_count++;
return 0;
}
diff --git a/arch/arm/mach-omap2/serial.c b/arch/arm/mach-omap2/serial.c
index 1ac361b7b8cb..466fc722fa0f 100644
--- a/arch/arm/mach-omap2/serial.c
+++ b/arch/arm/mach-omap2/serial.c
@@ -805,6 +805,7 @@ void __init omap_serial_init_port(struct omap_board_data *bdata)
WARN(IS_ERR(od), "Could not build omap_device for %s: %s.\n",
name, oh->name);
+ omap_device_disable_idle_on_suspend(od);
oh->mux = omap_hwmod_mux_init(bdata->pads, bdata->pads_cnt);
uart->irq = oh->mpu_irqs[0].irq;
diff --git a/arch/arm/mach-s3c2410/include/mach/spi-gpio.h b/arch/arm/mach-s3c2410/include/mach/spi-gpio.h
deleted file mode 100644
index dcef2287cb38..000000000000
--- a/arch/arm/mach-s3c2410/include/mach/spi-gpio.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/* arch/arm/mach-s3c2410/include/mach/spi-gpio.h
- *
- * Copyright (c) 2006 Simtec Electronics
- * Ben Dooks <ben@simtec.co.uk>
- *
- * S3C2410 - SPI Controller platform_device info
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#ifndef __ASM_ARCH_SPIGPIO_H
-#define __ASM_ARCH_SPIGPIO_H __FILE__
-
-struct s3c2410_spigpio_info {
- unsigned long pin_clk;
- unsigned long pin_mosi;
- unsigned long pin_miso;
-
- int num_chipselect;
- int bus_num;
-
- void (*chip_select)(struct s3c2410_spigpio_info *spi, int cs);
-};
-
-
-#endif /* __ASM_ARCH_SPIGPIO_H */
diff --git a/arch/arm/mach-s3c2410/mach-qt2410.c b/arch/arm/mach-s3c2410/mach-qt2410.c
index e8f49feef28c..f44f77531b1e 100644
--- a/arch/arm/mach-s3c2410/mach-qt2410.c
+++ b/arch/arm/mach-s3c2410/mach-qt2410.c
@@ -32,7 +32,7 @@
#include <linux/platform_device.h>
#include <linux/serial_core.h>
#include <linux/spi/spi.h>
-#include <linux/spi/spi_bitbang.h>
+#include <linux/spi/spi_gpio.h>
#include <linux/io.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand.h>
@@ -53,8 +53,6 @@
#include <mach/fb.h>
#include <plat/nand.h>
#include <plat/udc.h>
-#include <mach/spi.h>
-#include <mach/spi-gpio.h>
#include <plat/iic.h>
#include <plat/common-smdk.h>
@@ -216,32 +214,16 @@ static struct platform_device qt2410_led = {
/* SPI */
-static void spi_gpio_cs(struct s3c2410_spigpio_info *spi, int cs)
-{
- switch (cs) {
- case BITBANG_CS_ACTIVE:
- gpio_set_value(S3C2410_GPB(5), 0);
- break;
- case BITBANG_CS_INACTIVE:
- gpio_set_value(S3C2410_GPB(5), 1);
- break;
- }
-}
-
-static struct s3c2410_spigpio_info spi_gpio_cfg = {
- .pin_clk = S3C2410_GPG(7),
- .pin_mosi = S3C2410_GPG(6),
- .pin_miso = S3C2410_GPG(5),
- .chip_select = &spi_gpio_cs,
+static struct spi_gpio_platform_data spi_gpio_cfg = {
+ .sck = S3C2410_GPG(7),
+ .mosi = S3C2410_GPG(6),
+ .miso = S3C2410_GPG(5),
};
-
static struct platform_device qt2410_spi = {
- .name = "s3c24xx-spi-gpio",
- .id = 1,
- .dev = {
- .platform_data = &spi_gpio_cfg,
- },
+ .name = "spi-gpio",
+ .id = 1,
+ .dev.platform_data = &spi_gpio_cfg,
};
/* Board devices */
diff --git a/arch/arm/mach-s3c2412/mach-jive.c b/arch/arm/mach-s3c2412/mach-jive.c
index 85dcaeb9e62f..5eeb47580b0c 100644
--- a/arch/arm/mach-s3c2412/mach-jive.c
+++ b/arch/arm/mach-s3c2412/mach-jive.c
@@ -25,6 +25,7 @@
#include <video/ili9320.h>
#include <linux/spi/spi.h>
+#include <linux/spi/spi_gpio.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
@@ -38,7 +39,6 @@
#include <mach/regs-gpio.h>
#include <mach/regs-mem.h>
#include <mach/regs-lcd.h>
-#include <mach/spi-gpio.h>
#include <mach/fb.h>
#include <asm/mach-types.h>
@@ -389,45 +389,30 @@ static struct ili9320_platdata jive_lcm_config = {
/* LCD SPI support */
-static void jive_lcd_spi_chipselect(struct s3c2410_spigpio_info *spi, int cs)
-{
- gpio_set_value(S3C2410_GPB(7), cs ? 0 : 1);
-}
-
-static struct s3c2410_spigpio_info jive_lcd_spi = {
- .bus_num = 1,
- .pin_clk = S3C2410_GPG(8),
- .pin_mosi = S3C2410_GPB(8),
- .num_chipselect = 1,
- .chip_select = jive_lcd_spi_chipselect,
+static struct spi_gpio_platform_data jive_lcd_spi = {
+ .sck = S3C2410_GPG(8),
+ .mosi = S3C2410_GPB(8),
+ .miso = SPI_GPIO_NO_MISO,
};
static struct platform_device jive_device_lcdspi = {
- .name = "spi_s3c24xx_gpio",
+ .name = "spi-gpio",
.id = 1,
- .num_resources = 0,
.dev.platform_data = &jive_lcd_spi,
};
-/* WM8750 audio code SPI definition */
-static void jive_wm8750_chipselect(struct s3c2410_spigpio_info *spi, int cs)
-{
- gpio_set_value(S3C2410_GPH(10), cs ? 0 : 1);
-}
+/* WM8750 audio code SPI definition */
-static struct s3c2410_spigpio_info jive_wm8750_spi = {
- .bus_num = 2,
- .pin_clk = S3C2410_GPB(4),
- .pin_mosi = S3C2410_GPB(9),
- .num_chipselect = 1,
- .chip_select = jive_wm8750_chipselect,
+static struct spi_gpio_platform_data jive_wm8750_spi = {
+ .sck = S3C2410_GPB(4),
+ .mosi = S3C2410_GPB(9),
+ .miso = SPI_GPIO_NO_MISO,
};
static struct platform_device jive_device_wm8750 = {
- .name = "spi_s3c24xx_gpio",
+ .name = "spi-gpio",
.id = 2,
- .num_resources = 0,
.dev.platform_data = &jive_wm8750_spi,
};
@@ -441,12 +426,14 @@ static struct spi_board_info __initdata jive_spi_devs[] = {
.mode = SPI_MODE_3, /* CPOL=1, CPHA=1 */
.max_speed_hz = 100000,
.platform_data = &jive_lcm_config,
+ .controller_data = (void *)S3C2410_GPB(7),
}, {
.modalias = "WM8750",
.bus_num = 2,
.chip_select = 0,
.mode = SPI_MODE_0, /* CPOL=0, CPHA=0 */
.max_speed_hz = 100000,
+ .controller_data = (void *)S3C2410_GPH(10),
},
};
diff --git a/arch/arm/mach-s3c2440/mach-gta02.c b/arch/arm/mach-s3c2440/mach-gta02.c
index 716662008ce2..c10ddf4ed7f1 100644
--- a/arch/arm/mach-s3c2440/mach-gta02.c
+++ b/arch/arm/mach-s3c2440/mach-gta02.c
@@ -74,7 +74,6 @@
#include <mach/fb.h>
#include <mach/spi.h>
-#include <mach/spi-gpio.h>
#include <plat/usb-control.h>
#include <mach/regs-mem.h>
#include <mach/hardware.h>
diff --git a/arch/arm/mach-shmobile/board-ap4evb.c b/arch/arm/mach-shmobile/board-ap4evb.c
index 803bc6edfca4..b473b8efac68 100644
--- a/arch/arm/mach-shmobile/board-ap4evb.c
+++ b/arch/arm/mach-shmobile/board-ap4evb.c
@@ -1408,9 +1408,14 @@ static void __init ap4evb_init(void)
platform_add_devices(ap4evb_devices, ARRAY_SIZE(ap4evb_devices));
+ sh7372_add_device_to_domain(&sh7372_a4lc, &lcdc1_device);
+ sh7372_add_device_to_domain(&sh7372_a4lc, &lcdc_device);
+ sh7372_add_device_to_domain(&sh7372_a4mp, &fsi_device);
+
hdmi_init_pm_clock();
fsi_init_pm_clock();
sh7372_pm_init();
+ pm_clk_add(&fsi_device.dev, "spu2");
}
static void __init ap4evb_timer_init(void)
diff --git a/arch/arm/mach-shmobile/board-mackerel.c b/arch/arm/mach-shmobile/board-mackerel.c
index 3802f2afabef..5b36b6c5b448 100644
--- a/arch/arm/mach-shmobile/board-mackerel.c
+++ b/arch/arm/mach-shmobile/board-mackerel.c
@@ -1582,8 +1582,13 @@ static void __init mackerel_init(void)
platform_add_devices(mackerel_devices, ARRAY_SIZE(mackerel_devices));
+ sh7372_add_device_to_domain(&sh7372_a4lc, &lcdc_device);
+ sh7372_add_device_to_domain(&sh7372_a4lc, &hdmi_lcdc_device);
+ sh7372_add_device_to_domain(&sh7372_a4mp, &fsi_device);
+
hdmi_init_pm_clock();
sh7372_pm_init();
+ pm_clk_add(&fsi_device.dev, "spu2");
}
static void __init mackerel_timer_init(void)
diff --git a/arch/arm/mach-shmobile/clock-sh7372.c b/arch/arm/mach-shmobile/clock-sh7372.c
index c0800d83971e..91f5779abdd3 100644
--- a/arch/arm/mach-shmobile/clock-sh7372.c
+++ b/arch/arm/mach-shmobile/clock-sh7372.c
@@ -662,6 +662,7 @@ static struct clk_lookup lookups[] = {
CLKDEV_ICK_ID("ick", "sh-mobile-hdmi", &div6_reparent_clks[DIV6_HDMI]),
CLKDEV_ICK_ID("icka", "sh_fsi2", &div6_reparent_clks[DIV6_FSIA]),
CLKDEV_ICK_ID("ickb", "sh_fsi2", &div6_reparent_clks[DIV6_FSIB]),
+ CLKDEV_ICK_ID("spu2", "sh_fsi2", &mstp_clks[MSTP223]),
};
void __init sh7372_clock_init(void)
diff --git a/arch/arm/mach-shmobile/include/mach/sh7372.h b/arch/arm/mach-shmobile/include/mach/sh7372.h
index df20d7670172..ce595cee86cd 100644
--- a/arch/arm/mach-shmobile/include/mach/sh7372.h
+++ b/arch/arm/mach-shmobile/include/mach/sh7372.h
@@ -12,6 +12,7 @@
#define __ASM_SH7372_H__
#include <linux/sh_clk.h>
+#include <linux/pm_domain.h>
/*
* Pin Function Controller:
@@ -470,4 +471,32 @@ extern struct clk sh7372_fsibck_clk;
extern struct clk sh7372_fsidiva_clk;
extern struct clk sh7372_fsidivb_clk;
+struct platform_device;
+
+struct sh7372_pm_domain {
+ struct generic_pm_domain genpd;
+ unsigned int bit_shift;
+};
+
+static inline struct sh7372_pm_domain *to_sh7372_pd(struct generic_pm_domain *d)
+{
+ return container_of(d, struct sh7372_pm_domain, genpd);
+}
+
+#ifdef CONFIG_PM
+extern struct sh7372_pm_domain sh7372_a4lc;
+extern struct sh7372_pm_domain sh7372_a4mp;
+extern struct sh7372_pm_domain sh7372_d4;
+extern struct sh7372_pm_domain sh7372_a3rv;
+extern struct sh7372_pm_domain sh7372_a3ri;
+extern struct sh7372_pm_domain sh7372_a3sg;
+
+extern void sh7372_init_pm_domain(struct sh7372_pm_domain *sh7372_pd);
+extern void sh7372_add_device_to_domain(struct sh7372_pm_domain *sh7372_pd,
+ struct platform_device *pdev);
+#else
+#define sh7372_init_pm_domain(pd) do { } while(0)
+#define sh7372_add_device_to_domain(pd, pdev) do { } while(0)
+#endif /* CONFIG_PM */
+
#endif /* __ASM_SH7372_H__ */
diff --git a/arch/arm/mach-shmobile/pm-sh7372.c b/arch/arm/mach-shmobile/pm-sh7372.c
index 8e4aadf14c9f..933fb411be0f 100644
--- a/arch/arm/mach-shmobile/pm-sh7372.c
+++ b/arch/arm/mach-shmobile/pm-sh7372.c
@@ -15,16 +15,176 @@
#include <linux/list.h>
#include <linux/err.h>
#include <linux/slab.h>
+#include <linux/pm_runtime.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
#include <asm/system.h>
#include <asm/io.h>
#include <asm/tlbflush.h>
#include <mach/common.h>
+#include <mach/sh7372.h>
#define SMFRAM 0xe6a70000
#define SYSTBCR 0xe6150024
#define SBAR 0xe6180020
#define APARMBAREA 0xe6f10020
+#define SPDCR 0xe6180008
+#define SWUCR 0xe6180014
+#define PSTR 0xe6180080
+
+#define PSTR_RETRIES 100
+#define PSTR_DELAY_US 10
+
+#ifdef CONFIG_PM
+
+static int pd_power_down(struct generic_pm_domain *genpd)
+{
+ struct sh7372_pm_domain *sh7372_pd = to_sh7372_pd(genpd);
+ unsigned int mask = 1 << sh7372_pd->bit_shift;
+
+ if (__raw_readl(PSTR) & mask) {
+ unsigned int retry_count;
+
+ __raw_writel(mask, SPDCR);
+
+ for (retry_count = PSTR_RETRIES; retry_count; retry_count--) {
+ if (!(__raw_readl(SPDCR) & mask))
+ break;
+ cpu_relax();
+ }
+ }
+
+ pr_debug("sh7372 power domain down 0x%08x -> PSTR = 0x%08x\n",
+ mask, __raw_readl(PSTR));
+
+ return 0;
+}
+
+static int pd_power_up(struct generic_pm_domain *genpd)
+{
+ struct sh7372_pm_domain *sh7372_pd = to_sh7372_pd(genpd);
+ unsigned int mask = 1 << sh7372_pd->bit_shift;
+ unsigned int retry_count;
+ int ret = 0;
+
+ if (__raw_readl(PSTR) & mask)
+ goto out;
+
+ __raw_writel(mask, SWUCR);
+
+ for (retry_count = 2 * PSTR_RETRIES; retry_count; retry_count--) {
+ if (!(__raw_readl(SWUCR) & mask))
+ goto out;
+ if (retry_count > PSTR_RETRIES)
+ udelay(PSTR_DELAY_US);
+ else
+ cpu_relax();
+ }
+ if (__raw_readl(SWUCR) & mask)
+ ret = -EIO;
+
+ out:
+ pr_debug("sh7372 power domain up 0x%08x -> PSTR = 0x%08x\n",
+ mask, __raw_readl(PSTR));
+
+ return ret;
+}
+
+static int pd_power_up_a3rv(struct generic_pm_domain *genpd)
+{
+ int ret = pd_power_up(genpd);
+
+ /* force A4LC on after A3RV has been requested on */
+ pm_genpd_poweron(&sh7372_a4lc.genpd);
+
+ return ret;
+}
+
+static int pd_power_down_a3rv(struct generic_pm_domain *genpd)
+{
+ int ret = pd_power_down(genpd);
+
+ /* try to power down A4LC after A3RV is requested off */
+ genpd_queue_power_off_work(&sh7372_a4lc.genpd);
+
+ return ret;
+}
+
+static int pd_power_down_a4lc(struct generic_pm_domain *genpd)
+{
+ /* only power down A4LC if A3RV is off */
+ if (!(__raw_readl(PSTR) & (1 << sh7372_a3rv.bit_shift)))
+ return pd_power_down(genpd);
+
+ return -EBUSY;
+}
+
+static bool pd_active_wakeup(struct device *dev)
+{
+ return true;
+}
+
+void sh7372_init_pm_domain(struct sh7372_pm_domain *sh7372_pd)
+{
+ struct generic_pm_domain *genpd = &sh7372_pd->genpd;
+
+ pm_genpd_init(genpd, NULL, false);
+ genpd->stop_device = pm_clk_suspend;
+ genpd->start_device = pm_clk_resume;
+ genpd->active_wakeup = pd_active_wakeup;
+
+ if (sh7372_pd == &sh7372_a4lc) {
+ genpd->power_off = pd_power_down_a4lc;
+ genpd->power_on = pd_power_up;
+ } else if (sh7372_pd == &sh7372_a3rv) {
+ genpd->power_off = pd_power_down_a3rv;
+ genpd->power_on = pd_power_up_a3rv;
+ } else {
+ genpd->power_off = pd_power_down;
+ genpd->power_on = pd_power_up;
+ }
+ genpd->power_on(&sh7372_pd->genpd);
+}
+
+void sh7372_add_device_to_domain(struct sh7372_pm_domain *sh7372_pd,
+ struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+
+ if (!dev->power.subsys_data) {
+ pm_clk_init(dev);
+ pm_clk_add(dev, NULL);
+ }
+ pm_genpd_add_device(&sh7372_pd->genpd, dev);
+}
+
+struct sh7372_pm_domain sh7372_a4lc = {
+ .bit_shift = 1,
+};
+
+struct sh7372_pm_domain sh7372_a4mp = {
+ .bit_shift = 2,
+};
+
+struct sh7372_pm_domain sh7372_d4 = {
+ .bit_shift = 3,
+};
+
+struct sh7372_pm_domain sh7372_a3rv = {
+ .bit_shift = 6,
+};
+
+struct sh7372_pm_domain sh7372_a3ri = {
+ .bit_shift = 8,
+};
+
+struct sh7372_pm_domain sh7372_a3sg = {
+ .bit_shift = 13,
+};
+
+#endif /* CONFIG_PM */
+
static void sh7372_enter_core_standby(void)
{
void __iomem *smfram = (void __iomem *)SMFRAM;
diff --git a/arch/arm/mach-shmobile/pm_runtime.c b/arch/arm/mach-shmobile/pm_runtime.c
index 2d1b67a59e4a..6ec454e1e063 100644
--- a/arch/arm/mach-shmobile/pm_runtime.c
+++ b/arch/arm/mach-shmobile/pm_runtime.c
@@ -14,6 +14,7 @@
#include <linux/kernel.h>
#include <linux/io.h>
#include <linux/pm_runtime.h>
+#include <linux/pm_domain.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/sh_clk.h>
@@ -28,31 +29,38 @@ static int default_platform_runtime_idle(struct device *dev)
return pm_runtime_suspend(dev);
}
-static struct dev_power_domain default_power_domain = {
+static struct dev_pm_domain default_pm_domain = {
.ops = {
- .runtime_suspend = pm_runtime_clk_suspend,
- .runtime_resume = pm_runtime_clk_resume,
+ .runtime_suspend = pm_clk_suspend,
+ .runtime_resume = pm_clk_resume,
.runtime_idle = default_platform_runtime_idle,
USE_PLATFORM_PM_SLEEP_OPS
},
};
-#define DEFAULT_PWR_DOMAIN_PTR (&default_power_domain)
+#define DEFAULT_PM_DOMAIN_PTR (&default_pm_domain)
#else
-#define DEFAULT_PWR_DOMAIN_PTR NULL
+#define DEFAULT_PM_DOMAIN_PTR NULL
#endif /* CONFIG_PM_RUNTIME */
static struct pm_clk_notifier_block platform_bus_notifier = {
- .pwr_domain = DEFAULT_PWR_DOMAIN_PTR,
+ .pm_domain = DEFAULT_PM_DOMAIN_PTR,
.con_ids = { NULL, },
};
static int __init sh_pm_runtime_init(void)
{
- pm_runtime_clk_add_notifier(&platform_bus_type, &platform_bus_notifier);
+ pm_clk_add_notifier(&platform_bus_type, &platform_bus_notifier);
return 0;
}
core_initcall(sh_pm_runtime_init);
+
+static int __init sh_pm_runtime_late_init(void)
+{
+ pm_genpd_poweroff_unused();
+ return 0;
+}
+late_initcall(sh_pm_runtime_late_init);
diff --git a/arch/arm/mach-shmobile/setup-sh7372.c b/arch/arm/mach-shmobile/setup-sh7372.c
index cd807eea69e2..79f0413d8725 100644
--- a/arch/arm/mach-shmobile/setup-sh7372.c
+++ b/arch/arm/mach-shmobile/setup-sh7372.c
@@ -841,11 +841,22 @@ static struct platform_device *sh7372_late_devices[] __initdata = {
void __init sh7372_add_standard_devices(void)
{
+ sh7372_init_pm_domain(&sh7372_a4lc);
+ sh7372_init_pm_domain(&sh7372_a4mp);
+ sh7372_init_pm_domain(&sh7372_d4);
+ sh7372_init_pm_domain(&sh7372_a3rv);
+ sh7372_init_pm_domain(&sh7372_a3ri);
+ sh7372_init_pm_domain(&sh7372_a3sg);
+
platform_add_devices(sh7372_early_devices,
ARRAY_SIZE(sh7372_early_devices));
platform_add_devices(sh7372_late_devices,
ARRAY_SIZE(sh7372_late_devices));
+
+ sh7372_add_device_to_domain(&sh7372_a3rv, &vpu_device);
+ sh7372_add_device_to_domain(&sh7372_a4mp, &spu0_device);
+ sh7372_add_device_to_domain(&sh7372_a4mp, &spu1_device);
}
void __init sh7372_add_early_devices(void)
diff --git a/arch/arm/mach-tegra/Makefile b/arch/arm/mach-tegra/Makefile
index 823c703e573c..ed58ef9019b5 100644
--- a/arch/arm/mach-tegra/Makefile
+++ b/arch/arm/mach-tegra/Makefile
@@ -4,7 +4,6 @@ obj-y += io.o
obj-y += irq.o
obj-y += clock.o
obj-y += timer.o
-obj-y += gpio.o
obj-y += pinmux.o
obj-y += powergate.o
obj-y += fuse.o
diff --git a/arch/arm/mach-tegra/clock.c b/arch/arm/mach-tegra/clock.c
index e028320ab423..f8d41ffc0ca9 100644
--- a/arch/arm/mach-tegra/clock.c
+++ b/arch/arm/mach-tegra/clock.c
@@ -585,7 +585,7 @@ static const struct file_operations possible_parents_fops = {
static int clk_debugfs_register_one(struct clk *c)
{
- struct dentry *d, *child, *child_tmp;
+ struct dentry *d;
d = debugfs_create_dir(c->name, clk_debugfs_root);
if (!d)
@@ -614,10 +614,7 @@ static int clk_debugfs_register_one(struct clk *c)
return 0;
err_out:
- d = c->dent;
- list_for_each_entry_safe(child, child_tmp, &d->d_subdirs, d_u.d_child)
- debugfs_remove(child);
- debugfs_remove(c->dent);
+ debugfs_remove_recursive(c->dent);
return -ENOMEM;
}
diff --git a/arch/arm/mach-tegra/gpio.c b/arch/arm/mach-tegra/gpio.c
deleted file mode 100644
index 919d63837736..000000000000
--- a/arch/arm/mach-tegra/gpio.c
+++ /dev/null
@@ -1,431 +0,0 @@
-/*
- * arch/arm/mach-tegra/gpio.c
- *
- * Copyright (c) 2010 Google, Inc
- *
- * Author:
- * Erik Gilling <konkers@google.com>
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/init.h>
-#include <linux/irq.h>
-#include <linux/interrupt.h>
-
-#include <linux/io.h>
-#include <linux/gpio.h>
-
-#include <asm/mach/irq.h>
-
-#include <mach/iomap.h>
-#include <mach/suspend.h>
-
-#define GPIO_BANK(x) ((x) >> 5)
-#define GPIO_PORT(x) (((x) >> 3) & 0x3)
-#define GPIO_BIT(x) ((x) & 0x7)
-
-#define GPIO_REG(x) (IO_TO_VIRT(TEGRA_GPIO_BASE) + \
- GPIO_BANK(x) * 0x80 + \
- GPIO_PORT(x) * 4)
-
-#define GPIO_CNF(x) (GPIO_REG(x) + 0x00)
-#define GPIO_OE(x) (GPIO_REG(x) + 0x10)
-#define GPIO_OUT(x) (GPIO_REG(x) + 0X20)
-#define GPIO_IN(x) (GPIO_REG(x) + 0x30)
-#define GPIO_INT_STA(x) (GPIO_REG(x) + 0x40)
-#define GPIO_INT_ENB(x) (GPIO_REG(x) + 0x50)
-#define GPIO_INT_LVL(x) (GPIO_REG(x) + 0x60)
-#define GPIO_INT_CLR(x) (GPIO_REG(x) + 0x70)
-
-#define GPIO_MSK_CNF(x) (GPIO_REG(x) + 0x800)
-#define GPIO_MSK_OE(x) (GPIO_REG(x) + 0x810)
-#define GPIO_MSK_OUT(x) (GPIO_REG(x) + 0X820)
-#define GPIO_MSK_INT_STA(x) (GPIO_REG(x) + 0x840)
-#define GPIO_MSK_INT_ENB(x) (GPIO_REG(x) + 0x850)
-#define GPIO_MSK_INT_LVL(x) (GPIO_REG(x) + 0x860)
-
-#define GPIO_INT_LVL_MASK 0x010101
-#define GPIO_INT_LVL_EDGE_RISING 0x000101
-#define GPIO_INT_LVL_EDGE_FALLING 0x000100
-#define GPIO_INT_LVL_EDGE_BOTH 0x010100
-#define GPIO_INT_LVL_LEVEL_HIGH 0x000001
-#define GPIO_INT_LVL_LEVEL_LOW 0x000000
-
-struct tegra_gpio_bank {
- int bank;
- int irq;
- spinlock_t lvl_lock[4];
-#ifdef CONFIG_PM
- u32 cnf[4];
- u32 out[4];
- u32 oe[4];
- u32 int_enb[4];
- u32 int_lvl[4];
-#endif
-};
-
-
-static struct tegra_gpio_bank tegra_gpio_banks[] = {
- {.bank = 0, .irq = INT_GPIO1},
- {.bank = 1, .irq = INT_GPIO2},
- {.bank = 2, .irq = INT_GPIO3},
- {.bank = 3, .irq = INT_GPIO4},
- {.bank = 4, .irq = INT_GPIO5},
- {.bank = 5, .irq = INT_GPIO6},
- {.bank = 6, .irq = INT_GPIO7},
-};
-
-static int tegra_gpio_compose(int bank, int port, int bit)
-{
- return (bank << 5) | ((port & 0x3) << 3) | (bit & 0x7);
-}
-
-static void tegra_gpio_mask_write(u32 reg, int gpio, int value)
-{
- u32 val;
-
- val = 0x100 << GPIO_BIT(gpio);
- if (value)
- val |= 1 << GPIO_BIT(gpio);
- __raw_writel(val, reg);
-}
-
-void tegra_gpio_enable(int gpio)
-{
- tegra_gpio_mask_write(GPIO_MSK_CNF(gpio), gpio, 1);
-}
-
-void tegra_gpio_disable(int gpio)
-{
- tegra_gpio_mask_write(GPIO_MSK_CNF(gpio), gpio, 0);
-}
-
-static void tegra_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
-{
- tegra_gpio_mask_write(GPIO_MSK_OUT(offset), offset, value);
-}
-
-static int tegra_gpio_get(struct gpio_chip *chip, unsigned offset)
-{
- return (__raw_readl(GPIO_IN(offset)) >> GPIO_BIT(offset)) & 0x1;
-}
-
-static int tegra_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
-{
- tegra_gpio_mask_write(GPIO_MSK_OE(offset), offset, 0);
- return 0;
-}
-
-static int tegra_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
- int value)
-{
- tegra_gpio_set(chip, offset, value);
- tegra_gpio_mask_write(GPIO_MSK_OE(offset), offset, 1);
- return 0;
-}
-
-
-
-static struct gpio_chip tegra_gpio_chip = {
- .label = "tegra-gpio",
- .direction_input = tegra_gpio_direction_input,
- .get = tegra_gpio_get,
- .direction_output = tegra_gpio_direction_output,
- .set = tegra_gpio_set,
- .base = 0,
- .ngpio = TEGRA_NR_GPIOS,
-};
-
-static void tegra_gpio_irq_ack(struct irq_data *d)
-{
- int gpio = d->irq - INT_GPIO_BASE;
-
- __raw_writel(1 << GPIO_BIT(gpio), GPIO_INT_CLR(gpio));
-}
-
-static void tegra_gpio_irq_mask(struct irq_data *d)
-{
- int gpio = d->irq - INT_GPIO_BASE;
-
- tegra_gpio_mask_write(GPIO_MSK_INT_ENB(gpio), gpio, 0);
-}
-
-static void tegra_gpio_irq_unmask(struct irq_data *d)
-{
- int gpio = d->irq - INT_GPIO_BASE;
-
- tegra_gpio_mask_write(GPIO_MSK_INT_ENB(gpio), gpio, 1);
-}
-
-static int tegra_gpio_irq_set_type(struct irq_data *d, unsigned int type)
-{
- int gpio = d->irq - INT_GPIO_BASE;
- struct tegra_gpio_bank *bank = irq_data_get_irq_chip_data(d);
- int port = GPIO_PORT(gpio);
- int lvl_type;
- int val;
- unsigned long flags;
-
- switch (type & IRQ_TYPE_SENSE_MASK) {
- case IRQ_TYPE_EDGE_RISING:
- lvl_type = GPIO_INT_LVL_EDGE_RISING;
- break;
-
- case IRQ_TYPE_EDGE_FALLING:
- lvl_type = GPIO_INT_LVL_EDGE_FALLING;
- break;
-
- case IRQ_TYPE_EDGE_BOTH:
- lvl_type = GPIO_INT_LVL_EDGE_BOTH;
- break;
-
- case IRQ_TYPE_LEVEL_HIGH:
- lvl_type = GPIO_INT_LVL_LEVEL_HIGH;
- break;
-
- case IRQ_TYPE_LEVEL_LOW:
- lvl_type = GPIO_INT_LVL_LEVEL_LOW;
- break;
-
- default:
- return -EINVAL;
- }
-
- spin_lock_irqsave(&bank->lvl_lock[port], flags);
-
- val = __raw_readl(GPIO_INT_LVL(gpio));
- val &= ~(GPIO_INT_LVL_MASK << GPIO_BIT(gpio));
- val |= lvl_type << GPIO_BIT(gpio);
- __raw_writel(val, GPIO_INT_LVL(gpio));
-
- spin_unlock_irqrestore(&bank->lvl_lock[port], flags);
-
- if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
- __irq_set_handler_locked(d->irq, handle_level_irq);
- else if (type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING))
- __irq_set_handler_locked(d->irq, handle_edge_irq);
-
- return 0;
-}
-
-static void tegra_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
-{
- struct tegra_gpio_bank *bank;
- int port;
- int pin;
- int unmasked = 0;
- struct irq_chip *chip = irq_desc_get_chip(desc);
-
- chained_irq_enter(chip, desc);
-
- bank = irq_get_handler_data(irq);
-
- for (port = 0; port < 4; port++) {
- int gpio = tegra_gpio_compose(bank->bank, port, 0);
- unsigned long sta = __raw_readl(GPIO_INT_STA(gpio)) &
- __raw_readl(GPIO_INT_ENB(gpio));
- u32 lvl = __raw_readl(GPIO_INT_LVL(gpio));
-
- for_each_set_bit(pin, &sta, 8) {
- __raw_writel(1 << pin, GPIO_INT_CLR(gpio));
-
- /* if gpio is edge triggered, clear condition
- * before executing the hander so that we don't
- * miss edges
- */
- if (lvl & (0x100 << pin)) {
- unmasked = 1;
- chained_irq_exit(chip, desc);
- }
-
- generic_handle_irq(gpio_to_irq(gpio + pin));
- }
- }
-
- if (!unmasked)
- chained_irq_exit(chip, desc);
-
-}
-
-#ifdef CONFIG_PM
-void tegra_gpio_resume(void)
-{
- unsigned long flags;
- int b;
- int p;
-
- local_irq_save(flags);
-
- for (b = 0; b < ARRAY_SIZE(tegra_gpio_banks); b++) {
- struct tegra_gpio_bank *bank = &tegra_gpio_banks[b];
-
- for (p = 0; p < ARRAY_SIZE(bank->oe); p++) {
- unsigned int gpio = (b<<5) | (p<<3);
- __raw_writel(bank->cnf[p], GPIO_CNF(gpio));
- __raw_writel(bank->out[p], GPIO_OUT(gpio));
- __raw_writel(bank->oe[p], GPIO_OE(gpio));
- __raw_writel(bank->int_lvl[p], GPIO_INT_LVL(gpio));
- __raw_writel(bank->int_enb[p], GPIO_INT_ENB(gpio));
- }
- }
-
- local_irq_restore(flags);
-}
-
-void tegra_gpio_suspend(void)
-{
- unsigned long flags;
- int b;
- int p;
-
- local_irq_save(flags);
- for (b = 0; b < ARRAY_SIZE(tegra_gpio_banks); b++) {
- struct tegra_gpio_bank *bank = &tegra_gpio_banks[b];
-
- for (p = 0; p < ARRAY_SIZE(bank->oe); p++) {
- unsigned int gpio = (b<<5) | (p<<3);
- bank->cnf[p] = __raw_readl(GPIO_CNF(gpio));
- bank->out[p] = __raw_readl(GPIO_OUT(gpio));
- bank->oe[p] = __raw_readl(GPIO_OE(gpio));
- bank->int_enb[p] = __raw_readl(GPIO_INT_ENB(gpio));
- bank->int_lvl[p] = __raw_readl(GPIO_INT_LVL(gpio));
- }
- }
- local_irq_restore(flags);
-}
-
-static int tegra_gpio_wake_enable(struct irq_data *d, unsigned int enable)
-{
- struct tegra_gpio_bank *bank = irq_data_get_irq_chip_data(d);
- return irq_set_irq_wake(bank->irq, enable);
-}
-#endif
-
-static struct irq_chip tegra_gpio_irq_chip = {
- .name = "GPIO",
- .irq_ack = tegra_gpio_irq_ack,
- .irq_mask = tegra_gpio_irq_mask,
- .irq_unmask = tegra_gpio_irq_unmask,
- .irq_set_type = tegra_gpio_irq_set_type,
-#ifdef CONFIG_PM
- .irq_set_wake = tegra_gpio_wake_enable,
-#endif
-};
-
-
-/* This lock class tells lockdep that GPIO irqs are in a different
- * category than their parents, so it won't report false recursion.
- */
-static struct lock_class_key gpio_lock_class;
-
-static int __init tegra_gpio_init(void)
-{
- struct tegra_gpio_bank *bank;
- int i;
- int j;
-
- for (i = 0; i < 7; i++) {
- for (j = 0; j < 4; j++) {
- int gpio = tegra_gpio_compose(i, j, 0);
- __raw_writel(0x00, GPIO_INT_ENB(gpio));
- }
- }
-
- gpiochip_add(&tegra_gpio_chip);
-
- for (i = INT_GPIO_BASE; i < (INT_GPIO_BASE + TEGRA_NR_GPIOS); i++) {
- bank = &tegra_gpio_banks[GPIO_BANK(irq_to_gpio(i))];
-
- irq_set_lockdep_class(i, &gpio_lock_class);
- irq_set_chip_data(i, bank);
- irq_set_chip_and_handler(i, &tegra_gpio_irq_chip,
- handle_simple_irq);
- set_irq_flags(i, IRQF_VALID);
- }
-
- for (i = 0; i < ARRAY_SIZE(tegra_gpio_banks); i++) {
- bank = &tegra_gpio_banks[i];
-
- irq_set_chained_handler(bank->irq, tegra_gpio_irq_handler);
- irq_set_handler_data(bank->irq, bank);
-
- for (j = 0; j < 4; j++)
- spin_lock_init(&bank->lvl_lock[j]);
- }
-
- return 0;
-}
-
-postcore_initcall(tegra_gpio_init);
-
-void __init tegra_gpio_config(struct tegra_gpio_table *table, int num)
-{
- int i;
-
- for (i = 0; i < num; i++) {
- int gpio = table[i].gpio;
-
- if (table[i].enable)
- tegra_gpio_enable(gpio);
- else
- tegra_gpio_disable(gpio);
- }
-}
-
-#ifdef CONFIG_DEBUG_FS
-
-#include <linux/debugfs.h>
-#include <linux/seq_file.h>
-
-static int dbg_gpio_show(struct seq_file *s, void *unused)
-{
- int i;
- int j;
-
- for (i = 0; i < 7; i++) {
- for (j = 0; j < 4; j++) {
- int gpio = tegra_gpio_compose(i, j, 0);
- seq_printf(s,
- "%d:%d %02x %02x %02x %02x %02x %02x %06x\n",
- i, j,
- __raw_readl(GPIO_CNF(gpio)),
- __raw_readl(GPIO_OE(gpio)),
- __raw_readl(GPIO_OUT(gpio)),
- __raw_readl(GPIO_IN(gpio)),
- __raw_readl(GPIO_INT_STA(gpio)),
- __raw_readl(GPIO_INT_ENB(gpio)),
- __raw_readl(GPIO_INT_LVL(gpio)));
- }
- }
- return 0;
-}
-
-static int dbg_gpio_open(struct inode *inode, struct file *file)
-{
- return single_open(file, dbg_gpio_show, &inode->i_private);
-}
-
-static const struct file_operations debug_fops = {
- .open = dbg_gpio_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static int __init tegra_gpio_debuginit(void)
-{
- (void) debugfs_create_file("tegra_gpio", S_IRUGO,
- NULL, NULL, &debug_fops);
- return 0;
-}
-late_initcall(tegra_gpio_debuginit);
-#endif
diff --git a/arch/arm/mach-ux500/clock.c b/arch/arm/mach-ux500/clock.c
index 32ce90840ee1..7d107be63eb4 100644
--- a/arch/arm/mach-ux500/clock.c
+++ b/arch/arm/mach-ux500/clock.c
@@ -635,16 +635,13 @@ static const struct file_operations set_rate_fops = {
static struct dentry *clk_debugfs_register_dir(struct clk *c,
struct dentry *p_dentry)
{
- struct dentry *d, *clk_d, *child, *child_tmp;
- char s[255];
- char *p = s;
+ struct dentry *d, *clk_d;
+ const char *p = c->name;
- if (c->name == NULL)
- p += sprintf(p, "BUG");
- else
- p += sprintf(p, "%s", c->name);
+ if (!p)
+ p = "BUG";
- clk_d = debugfs_create_dir(s, p_dentry);
+ clk_d = debugfs_create_dir(p, p_dentry);
if (!clk_d)
return NULL;
@@ -666,24 +663,10 @@ static struct dentry *clk_debugfs_register_dir(struct clk *c,
return clk_d;
err_out:
- d = clk_d;
- list_for_each_entry_safe(child, child_tmp, &d->d_subdirs, d_u.d_child)
- debugfs_remove(child);
- debugfs_remove(clk_d);
+ debugfs_remove_recursive(clk_d);
return NULL;
}
-static void clk_debugfs_remove_dir(struct dentry *cdentry)
-{
- struct dentry *d, *child, *child_tmp;
-
- d = cdentry;
- list_for_each_entry_safe(child, child_tmp, &d->d_subdirs, d_u.d_child)
- debugfs_remove(child);
- debugfs_remove(cdentry);
- return ;
-}
-
static int clk_debugfs_register_one(struct clk *c)
{
struct clk *pa = c->parent_periph;
@@ -700,7 +683,7 @@ static int clk_debugfs_register_one(struct clk *c)
c->dent_bus = clk_debugfs_register_dir(c,
bpa->dent_bus ? bpa->dent_bus : bpa->dent);
if ((!c->dent_bus) && (c->dent)) {
- clk_debugfs_remove_dir(c->dent);
+ debugfs_remove_recursive(c->dent);
c->dent = NULL;
return -ENOMEM;
}
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
index 55657c222d7c..3b5ea68acbb8 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -322,11 +322,11 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
fault = __do_page_fault(mm, addr, fsr, tsk);
up_read(&mm->mmap_sem);
- perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, addr);
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
if (fault & VM_FAULT_MAJOR)
- perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, regs, addr);
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, addr);
else if (fault & VM_FAULT_MINOR)
- perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, regs, addr);
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, addr);
/*
* Handle the "normal" case first - VM_FAULT_MAJOR / VM_FAULT_MINOR
diff --git a/arch/arm/plat-mxc/Makefile b/arch/arm/plat-mxc/Makefile
index a1387875a491..d53c35fe2ea7 100644
--- a/arch/arm/plat-mxc/Makefile
+++ b/arch/arm/plat-mxc/Makefile
@@ -3,7 +3,7 @@
#
# Common support
-obj-y := clock.o gpio.o time.o devices.o cpu.o system.o irq-common.o
+obj-y := clock.o time.o devices.o cpu.o system.o irq-common.o
# MX51 uses the TZIC interrupt controller, older platforms use AVIC
obj-$(CONFIG_MXC_TZIC) += tzic.o
diff --git a/arch/arm/plat-mxc/devices.c b/arch/arm/plat-mxc/devices.c
index eee1b6096a08..fb166b20f60f 100644
--- a/arch/arm/plat-mxc/devices.c
+++ b/arch/arm/plat-mxc/devices.c
@@ -89,3 +89,14 @@ err:
return pdev;
}
+
+struct device mxc_aips_bus = {
+ .init_name = "mxc_aips",
+ .parent = &platform_bus,
+};
+
+static int __init mxc_device_init(void)
+{
+ return device_register(&mxc_aips_bus);
+}
+core_initcall(mxc_device_init);
diff --git a/arch/arm/plat-mxc/devices/Makefile b/arch/arm/plat-mxc/devices/Makefile
index ad2922acf480..b41bf972b54b 100644
--- a/arch/arm/plat-mxc/devices/Makefile
+++ b/arch/arm/plat-mxc/devices/Makefile
@@ -2,6 +2,7 @@ obj-$(CONFIG_IMX_HAVE_PLATFORM_FEC) += platform-fec.o
obj-$(CONFIG_IMX_HAVE_PLATFORM_FLEXCAN) += platform-flexcan.o
obj-$(CONFIG_IMX_HAVE_PLATFORM_FSL_USB2_UDC) += platform-fsl-usb2-udc.o
obj-$(CONFIG_IMX_HAVE_PLATFORM_GPIO_KEYS) += platform-gpio_keys.o
+obj-y += platform-gpio-mxc.o
obj-$(CONFIG_IMX_HAVE_PLATFORM_IMX21_HCD) += platform-imx21-hcd.o
obj-$(CONFIG_IMX_HAVE_PLATFORM_IMX2_WDT) += platform-imx2-wdt.o
obj-$(CONFIG_IMX_HAVE_PLATFORM_IMXDI_RTC) += platform-imxdi_rtc.o
diff --git a/arch/arm/plat-mxc/devices/platform-fec.c b/arch/arm/plat-mxc/devices/platform-fec.c
index ccc789e21daa..4fc6ffc2a13e 100644
--- a/arch/arm/plat-mxc/devices/platform-fec.c
+++ b/arch/arm/plat-mxc/devices/platform-fec.c
@@ -6,6 +6,7 @@
* the terms of the GNU General Public License version 2 as published by the
* Free Software Foundation.
*/
+#include <linux/dma-mapping.h>
#include <asm/sizes.h>
#include <mach/hardware.h>
#include <mach/devices-common.h>
diff --git a/arch/arm/plat-mxc/devices/platform-fsl-usb2-udc.c b/arch/arm/plat-mxc/devices/platform-fsl-usb2-udc.c
index 59c33f6e401c..23ce08e6ffd2 100644
--- a/arch/arm/plat-mxc/devices/platform-fsl-usb2-udc.c
+++ b/arch/arm/plat-mxc/devices/platform-fsl-usb2-udc.c
@@ -6,6 +6,7 @@
* the terms of the GNU General Public License version 2 as published by the
* Free Software Foundation.
*/
+#include <linux/dma-mapping.h>
#include <mach/hardware.h>
#include <mach/devices-common.h>
diff --git a/arch/arm/plat-mxc/devices/platform-gpio-mxc.c b/arch/arm/plat-mxc/devices/platform-gpio-mxc.c
new file mode 100644
index 000000000000..a7919a241032
--- /dev/null
+++ b/arch/arm/plat-mxc/devices/platform-gpio-mxc.c
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2011 Freescale Semiconductor, Inc. All Rights Reserved.
+ * Copyright 2011 Linaro Limited
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 as published by the
+ * Free Software Foundation.
+ */
+#include <mach/devices-common.h>
+
+struct platform_device *__init mxc_register_gpio(char *name, int id,
+ resource_size_t iobase, resource_size_t iosize, int irq, int irq_high)
+{
+ struct resource res[] = {
+ {
+ .start = iobase,
+ .end = iobase + iosize - 1,
+ .flags = IORESOURCE_MEM,
+ }, {
+ .start = irq,
+ .end = irq,
+ .flags = IORESOURCE_IRQ,
+ }, {
+ .start = irq_high,
+ .end = irq_high,
+ .flags = IORESOURCE_IRQ,
+ },
+ };
+
+ return platform_device_register_resndata(&mxc_aips_bus,
+ name, id, res, ARRAY_SIZE(res), NULL, 0);
+}
diff --git a/arch/arm/plat-mxc/devices/platform-imx-fb.c b/arch/arm/plat-mxc/devices/platform-imx-fb.c
index 79a1cb18a5b0..2b0b5e0aa998 100644
--- a/arch/arm/plat-mxc/devices/platform-imx-fb.c
+++ b/arch/arm/plat-mxc/devices/platform-imx-fb.c
@@ -6,6 +6,7 @@
* the terms of the GNU General Public License version 2 as published by the
* Free Software Foundation.
*/
+#include <linux/dma-mapping.h>
#include <mach/hardware.h>
#include <mach/devices-common.h>
diff --git a/arch/arm/plat-mxc/devices/platform-ipu-core.c b/arch/arm/plat-mxc/devices/platform-ipu-core.c
index edf65034aea5..79d340ae0af1 100644
--- a/arch/arm/plat-mxc/devices/platform-ipu-core.c
+++ b/arch/arm/plat-mxc/devices/platform-ipu-core.c
@@ -6,6 +6,7 @@
* the terms of the GNU General Public License version 2 as published by the
* Free Software Foundation.
*/
+#include <linux/dma-mapping.h>
#include <mach/hardware.h>
#include <mach/devices-common.h>
diff --git a/arch/arm/plat-mxc/devices/platform-mxc-ehci.c b/arch/arm/plat-mxc/devices/platform-mxc-ehci.c
index cc488f4b6204..e1763e03e7cb 100644
--- a/arch/arm/plat-mxc/devices/platform-mxc-ehci.c
+++ b/arch/arm/plat-mxc/devices/platform-mxc-ehci.c
@@ -6,6 +6,7 @@
* the terms of the GNU General Public License version 2 as published by the
* Free Software Foundation.
*/
+#include <linux/dma-mapping.h>
#include <mach/hardware.h>
#include <mach/devices-common.h>
diff --git a/arch/arm/plat-mxc/devices/platform-mxc-mmc.c b/arch/arm/plat-mxc/devices/platform-mxc-mmc.c
index 90d762f6f93b..540d3a7d92df 100644
--- a/arch/arm/plat-mxc/devices/platform-mxc-mmc.c
+++ b/arch/arm/plat-mxc/devices/platform-mxc-mmc.c
@@ -6,6 +6,7 @@
* the terms of the GNU General Public License version 2 as published by the
* Free Software Foundation.
*/
+#include <linux/dma-mapping.h>
#include <mach/hardware.h>
#include <mach/devices-common.h>
diff --git a/arch/arm/plat-mxc/devices/platform-spi_imx.c b/arch/arm/plat-mxc/devices/platform-spi_imx.c
index f97eb3615b2c..9bfae8bd5b8d 100644
--- a/arch/arm/plat-mxc/devices/platform-spi_imx.c
+++ b/arch/arm/plat-mxc/devices/platform-spi_imx.c
@@ -40,9 +40,10 @@ const struct imx_spi_imx_data imx21_cspi_data[] __initconst = {
#endif
#ifdef CONFIG_SOC_IMX25
+/* i.mx25 has the i.mx35 type cspi */
const struct imx_spi_imx_data imx25_cspi_data[] __initconst = {
#define imx25_cspi_data_entry(_id, _hwid) \
- imx_spi_imx_data_entry(MX25, CSPI, "imx25-cspi", _id, _hwid, SZ_16K)
+ imx_spi_imx_data_entry(MX25, CSPI, "imx35-cspi", _id, _hwid, SZ_16K)
imx25_cspi_data_entry(0, 1),
imx25_cspi_data_entry(1, 2),
imx25_cspi_data_entry(2, 3),
@@ -79,8 +80,9 @@ const struct imx_spi_imx_data imx35_cspi_data[] __initconst = {
#endif /* ifdef CONFIG_SOC_IMX35 */
#ifdef CONFIG_SOC_IMX51
+/* i.mx51 has the i.mx35 type cspi */
const struct imx_spi_imx_data imx51_cspi_data __initconst =
- imx_spi_imx_data_entry_single(MX51, CSPI, "imx51-cspi", 2, , SZ_4K);
+ imx_spi_imx_data_entry_single(MX51, CSPI, "imx35-cspi", 2, , SZ_4K);
const struct imx_spi_imx_data imx51_ecspi_data[] __initconst = {
#define imx51_ecspi_data_entry(_id, _hwid) \
@@ -91,12 +93,14 @@ const struct imx_spi_imx_data imx51_ecspi_data[] __initconst = {
#endif /* ifdef CONFIG_SOC_IMX51 */
#ifdef CONFIG_SOC_IMX53
+/* i.mx53 has the i.mx35 type cspi */
const struct imx_spi_imx_data imx53_cspi_data __initconst =
- imx_spi_imx_data_entry_single(MX53, CSPI, "imx53-cspi", 0, , SZ_4K);
+ imx_spi_imx_data_entry_single(MX53, CSPI, "imx35-cspi", 0, , SZ_4K);
+/* i.mx53 has the i.mx51 type ecspi */
const struct imx_spi_imx_data imx53_ecspi_data[] __initconst = {
#define imx53_ecspi_data_entry(_id, _hwid) \
- imx_spi_imx_data_entry(MX53, ECSPI, "imx53-ecspi", _id, _hwid, SZ_4K)
+ imx_spi_imx_data_entry(MX53, ECSPI, "imx51-ecspi", _id, _hwid, SZ_4K)
imx53_ecspi_data_entry(0, 1),
imx53_ecspi_data_entry(1, 2),
};
diff --git a/arch/arm/plat-mxc/gpio.c b/arch/arm/plat-mxc/gpio.c
deleted file mode 100644
index 6cd6d7f686f6..000000000000
--- a/arch/arm/plat-mxc/gpio.c
+++ /dev/null
@@ -1,361 +0,0 @@
-/*
- * MXC GPIO support. (c) 2008 Daniel Mack <daniel@caiaq.de>
- * Copyright 2008 Juergen Beisert, kernel@pengutronix.de
- *
- * Based on code from Freescale,
- * Copyright (C) 2004-2010 Freescale Semiconductor, Inc. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
- */
-
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-#include <linux/irq.h>
-#include <linux/gpio.h>
-#include <mach/hardware.h>
-#include <asm-generic/bug.h>
-
-static struct mxc_gpio_port *mxc_gpio_ports;
-static int gpio_table_size;
-
-#define cpu_is_mx1_mx2() (cpu_is_mx1() || cpu_is_mx2())
-
-#define GPIO_DR (cpu_is_mx1_mx2() ? 0x1c : 0x00)
-#define GPIO_GDIR (cpu_is_mx1_mx2() ? 0x00 : 0x04)
-#define GPIO_PSR (cpu_is_mx1_mx2() ? 0x24 : 0x08)
-#define GPIO_ICR1 (cpu_is_mx1_mx2() ? 0x28 : 0x0C)
-#define GPIO_ICR2 (cpu_is_mx1_mx2() ? 0x2C : 0x10)
-#define GPIO_IMR (cpu_is_mx1_mx2() ? 0x30 : 0x14)
-#define GPIO_ISR (cpu_is_mx1_mx2() ? 0x34 : 0x18)
-
-#define GPIO_INT_LOW_LEV (cpu_is_mx1_mx2() ? 0x3 : 0x0)
-#define GPIO_INT_HIGH_LEV (cpu_is_mx1_mx2() ? 0x2 : 0x1)
-#define GPIO_INT_RISE_EDGE (cpu_is_mx1_mx2() ? 0x0 : 0x2)
-#define GPIO_INT_FALL_EDGE (cpu_is_mx1_mx2() ? 0x1 : 0x3)
-#define GPIO_INT_NONE 0x4
-
-/* Note: This driver assumes 32 GPIOs are handled in one register */
-
-static void _clear_gpio_irqstatus(struct mxc_gpio_port *port, u32 index)
-{
- __raw_writel(1 << index, port->base + GPIO_ISR);
-}
-
-static void _set_gpio_irqenable(struct mxc_gpio_port *port, u32 index,
- int enable)
-{
- u32 l;
-
- l = __raw_readl(port->base + GPIO_IMR);
- l = (l & (~(1 << index))) | (!!enable << index);
- __raw_writel(l, port->base + GPIO_IMR);
-}
-
-static void gpio_ack_irq(struct irq_data *d)
-{
- u32 gpio = irq_to_gpio(d->irq);
- _clear_gpio_irqstatus(&mxc_gpio_ports[gpio / 32], gpio & 0x1f);
-}
-
-static void gpio_mask_irq(struct irq_data *d)
-{
- u32 gpio = irq_to_gpio(d->irq);
- _set_gpio_irqenable(&mxc_gpio_ports[gpio / 32], gpio & 0x1f, 0);
-}
-
-static void gpio_unmask_irq(struct irq_data *d)
-{
- u32 gpio = irq_to_gpio(d->irq);
- _set_gpio_irqenable(&mxc_gpio_ports[gpio / 32], gpio & 0x1f, 1);
-}
-
-static int mxc_gpio_get(struct gpio_chip *chip, unsigned offset);
-
-static int gpio_set_irq_type(struct irq_data *d, u32 type)
-{
- u32 gpio = irq_to_gpio(d->irq);
- struct mxc_gpio_port *port = &mxc_gpio_ports[gpio / 32];
- u32 bit, val;
- int edge;
- void __iomem *reg = port->base;
-
- port->both_edges &= ~(1 << (gpio & 31));
- switch (type) {
- case IRQ_TYPE_EDGE_RISING:
- edge = GPIO_INT_RISE_EDGE;
- break;
- case IRQ_TYPE_EDGE_FALLING:
- edge = GPIO_INT_FALL_EDGE;
- break;
- case IRQ_TYPE_EDGE_BOTH:
- val = mxc_gpio_get(&port->chip, gpio & 31);
- if (val) {
- edge = GPIO_INT_LOW_LEV;
- pr_debug("mxc: set GPIO %d to low trigger\n", gpio);
- } else {
- edge = GPIO_INT_HIGH_LEV;
- pr_debug("mxc: set GPIO %d to high trigger\n", gpio);
- }
- port->both_edges |= 1 << (gpio & 31);
- break;
- case IRQ_TYPE_LEVEL_LOW:
- edge = GPIO_INT_LOW_LEV;
- break;
- case IRQ_TYPE_LEVEL_HIGH:
- edge = GPIO_INT_HIGH_LEV;
- break;
- default:
- return -EINVAL;
- }
-
- reg += GPIO_ICR1 + ((gpio & 0x10) >> 2); /* lower or upper register */
- bit = gpio & 0xf;
- val = __raw_readl(reg) & ~(0x3 << (bit << 1));
- __raw_writel(val | (edge << (bit << 1)), reg);
- _clear_gpio_irqstatus(port, gpio & 0x1f);
-
- return 0;
-}
-
-static void mxc_flip_edge(struct mxc_gpio_port *port, u32 gpio)
-{
- void __iomem *reg = port->base;
- u32 bit, val;
- int edge;
-
- reg += GPIO_ICR1 + ((gpio & 0x10) >> 2); /* lower or upper register */
- bit = gpio & 0xf;
- val = __raw_readl(reg);
- edge = (val >> (bit << 1)) & 3;
- val &= ~(0x3 << (bit << 1));
- if (edge == GPIO_INT_HIGH_LEV) {
- edge = GPIO_INT_LOW_LEV;
- pr_debug("mxc: switch GPIO %d to low trigger\n", gpio);
- } else if (edge == GPIO_INT_LOW_LEV) {
- edge = GPIO_INT_HIGH_LEV;
- pr_debug("mxc: switch GPIO %d to high trigger\n", gpio);
- } else {
- pr_err("mxc: invalid configuration for GPIO %d: %x\n",
- gpio, edge);
- return;
- }
- __raw_writel(val | (edge << (bit << 1)), reg);
-}
-
-/* handle 32 interrupts in one status register */
-static void mxc_gpio_irq_handler(struct mxc_gpio_port *port, u32 irq_stat)
-{
- u32 gpio_irq_no_base = port->virtual_irq_start;
-
- while (irq_stat != 0) {
- int irqoffset = fls(irq_stat) - 1;
-
- if (port->both_edges & (1 << irqoffset))
- mxc_flip_edge(port, irqoffset);
-
- generic_handle_irq(gpio_irq_no_base + irqoffset);
-
- irq_stat &= ~(1 << irqoffset);
- }
-}
-
-/* MX1 and MX3 has one interrupt *per* gpio port */
-static void mx3_gpio_irq_handler(u32 irq, struct irq_desc *desc)
-{
- u32 irq_stat;
- struct mxc_gpio_port *port = irq_get_handler_data(irq);
-
- irq_stat = __raw_readl(port->base + GPIO_ISR) &
- __raw_readl(port->base + GPIO_IMR);
-
- mxc_gpio_irq_handler(port, irq_stat);
-}
-
-/* MX2 has one interrupt *for all* gpio ports */
-static void mx2_gpio_irq_handler(u32 irq, struct irq_desc *desc)
-{
- int i;
- u32 irq_msk, irq_stat;
- struct mxc_gpio_port *port = irq_get_handler_data(irq);
-
- /* walk through all interrupt status registers */
- for (i = 0; i < gpio_table_size; i++) {
- irq_msk = __raw_readl(port[i].base + GPIO_IMR);
- if (!irq_msk)
- continue;
-
- irq_stat = __raw_readl(port[i].base + GPIO_ISR) & irq_msk;
- if (irq_stat)
- mxc_gpio_irq_handler(&port[i], irq_stat);
- }
-}
-
-/*
- * Set interrupt number "irq" in the GPIO as a wake-up source.
- * While system is running, all registered GPIO interrupts need to have
- * wake-up enabled. When system is suspended, only selected GPIO interrupts
- * need to have wake-up enabled.
- * @param irq interrupt source number
- * @param enable enable as wake-up if equal to non-zero
- * @return This function returns 0 on success.
- */
-static int gpio_set_wake_irq(struct irq_data *d, u32 enable)
-{
- u32 gpio = irq_to_gpio(d->irq);
- u32 gpio_idx = gpio & 0x1F;
- struct mxc_gpio_port *port = &mxc_gpio_ports[gpio / 32];
-
- if (enable) {
- if (port->irq_high && (gpio_idx >= 16))
- enable_irq_wake(port->irq_high);
- else
- enable_irq_wake(port->irq);
- } else {
- if (port->irq_high && (gpio_idx >= 16))
- disable_irq_wake(port->irq_high);
- else
- disable_irq_wake(port->irq);
- }
-
- return 0;
-}
-
-static struct irq_chip gpio_irq_chip = {
- .name = "GPIO",
- .irq_ack = gpio_ack_irq,
- .irq_mask = gpio_mask_irq,
- .irq_unmask = gpio_unmask_irq,
- .irq_set_type = gpio_set_irq_type,
- .irq_set_wake = gpio_set_wake_irq,
-};
-
-static void _set_gpio_direction(struct gpio_chip *chip, unsigned offset,
- int dir)
-{
- struct mxc_gpio_port *port =
- container_of(chip, struct mxc_gpio_port, chip);
- u32 l;
- unsigned long flags;
-
- spin_lock_irqsave(&port->lock, flags);
- l = __raw_readl(port->base + GPIO_GDIR);
- if (dir)
- l |= 1 << offset;
- else
- l &= ~(1 << offset);
- __raw_writel(l, port->base + GPIO_GDIR);
- spin_unlock_irqrestore(&port->lock, flags);
-}
-
-static void mxc_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
-{
- struct mxc_gpio_port *port =
- container_of(chip, struct mxc_gpio_port, chip);
- void __iomem *reg = port->base + GPIO_DR;
- u32 l;
- unsigned long flags;
-
- spin_lock_irqsave(&port->lock, flags);
- l = (__raw_readl(reg) & (~(1 << offset))) | (!!value << offset);
- __raw_writel(l, reg);
- spin_unlock_irqrestore(&port->lock, flags);
-}
-
-static int mxc_gpio_get(struct gpio_chip *chip, unsigned offset)
-{
- struct mxc_gpio_port *port =
- container_of(chip, struct mxc_gpio_port, chip);
-
- return (__raw_readl(port->base + GPIO_PSR) >> offset) & 1;
-}
-
-static int mxc_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
-{
- _set_gpio_direction(chip, offset, 0);
- return 0;
-}
-
-static int mxc_gpio_direction_output(struct gpio_chip *chip,
- unsigned offset, int value)
-{
- mxc_gpio_set(chip, offset, value);
- _set_gpio_direction(chip, offset, 1);
- return 0;
-}
-
-/*
- * This lock class tells lockdep that GPIO irqs are in a different
- * category than their parents, so it won't report false recursion.
- */
-static struct lock_class_key gpio_lock_class;
-
-int __init mxc_gpio_init(struct mxc_gpio_port *port, int cnt)
-{
- int i, j;
-
- /* save for local usage */
- mxc_gpio_ports = port;
- gpio_table_size = cnt;
-
- printk(KERN_INFO "MXC GPIO hardware\n");
-
- for (i = 0; i < cnt; i++) {
- /* disable the interrupt and clear the status */
- __raw_writel(0, port[i].base + GPIO_IMR);
- __raw_writel(~0, port[i].base + GPIO_ISR);
- for (j = port[i].virtual_irq_start;
- j < port[i].virtual_irq_start + 32; j++) {
- irq_set_lockdep_class(j, &gpio_lock_class);
- irq_set_chip_and_handler(j, &gpio_irq_chip,
- handle_level_irq);
- set_irq_flags(j, IRQF_VALID);
- }
-
- /* register gpio chip */
- port[i].chip.direction_input = mxc_gpio_direction_input;
- port[i].chip.direction_output = mxc_gpio_direction_output;
- port[i].chip.get = mxc_gpio_get;
- port[i].chip.set = mxc_gpio_set;
- port[i].chip.base = i * 32;
- port[i].chip.ngpio = 32;
-
- spin_lock_init(&port[i].lock);
-
- /* its a serious configuration bug when it fails */
- BUG_ON( gpiochip_add(&port[i].chip) < 0 );
-
- if (cpu_is_mx1() || cpu_is_mx3() || cpu_is_mx25() || cpu_is_mx51()) {
- /* setup one handler for each entry */
- irq_set_chained_handler(port[i].irq,
- mx3_gpio_irq_handler);
- irq_set_handler_data(port[i].irq, &port[i]);
- if (port[i].irq_high) {
- /* setup handler for GPIO 16 to 31 */
- irq_set_chained_handler(port[i].irq_high,
- mx3_gpio_irq_handler);
- irq_set_handler_data(port[i].irq_high,
- &port[i]);
- }
- }
- }
-
- if (cpu_is_mx2()) {
- /* setup one handler for all GPIO interrupts */
- irq_set_chained_handler(port[0].irq, mx2_gpio_irq_handler);
- irq_set_handler_data(port[0].irq, port);
- }
-
- return 0;
-}
diff --git a/arch/arm/plat-mxc/include/mach/common.h b/arch/arm/plat-mxc/include/mach/common.h
index da7991832af6..4e3d97890d69 100644
--- a/arch/arm/plat-mxc/include/mach/common.h
+++ b/arch/arm/plat-mxc/include/mach/common.h
@@ -43,6 +43,15 @@ extern void mx35_init_irq(void);
extern void mx50_init_irq(void);
extern void mx51_init_irq(void);
extern void mx53_init_irq(void);
+extern void imx1_soc_init(void);
+extern void imx21_soc_init(void);
+extern void imx25_soc_init(void);
+extern void imx27_soc_init(void);
+extern void imx31_soc_init(void);
+extern void imx35_soc_init(void);
+extern void imx50_soc_init(void);
+extern void imx51_soc_init(void);
+extern void imx53_soc_init(void);
extern void epit_timer_init(struct clk *timer_clk, void __iomem *base, int irq);
extern void mxc_timer_init(struct clk *timer_clk, void __iomem *, int);
extern int mx1_clocks_init(unsigned long fref);
@@ -55,7 +64,8 @@ extern int mx51_clocks_init(unsigned long ckil, unsigned long osc,
unsigned long ckih1, unsigned long ckih2);
extern int mx53_clocks_init(unsigned long ckil, unsigned long osc,
unsigned long ckih1, unsigned long ckih2);
-extern int mxc_register_gpios(void);
+extern struct platform_device *mxc_register_gpio(char *name, int id,
+ resource_size_t iobase, resource_size_t iosize, int irq, int irq_high);
extern int mxc_register_device(struct platform_device *pdev, void *data);
extern void mxc_set_cpu_type(unsigned int type);
extern void mxc_arch_reset_init(void __iomem *);
diff --git a/arch/arm/plat-mxc/include/mach/devices-common.h b/arch/arm/plat-mxc/include/mach/devices-common.h
index fa8477337f91..03f626645374 100644
--- a/arch/arm/plat-mxc/include/mach/devices-common.h
+++ b/arch/arm/plat-mxc/include/mach/devices-common.h
@@ -10,6 +10,8 @@
#include <linux/platform_device.h>
#include <linux/init.h>
+extern struct device mxc_aips_bus;
+
struct platform_device *imx_add_platform_device_dmamask(
const char *name, int id,
const struct resource *res, unsigned int num_resources,
diff --git a/arch/arm/plat-mxc/include/mach/gpio.h b/arch/arm/plat-mxc/include/mach/gpio.h
index a2747f12813e..31c820c1b796 100644
--- a/arch/arm/plat-mxc/include/mach/gpio.h
+++ b/arch/arm/plat-mxc/include/mach/gpio.h
@@ -36,31 +36,4 @@
#define gpio_to_irq(gpio) (MXC_GPIO_IRQ_START + (gpio))
#define irq_to_gpio(irq) ((irq) - MXC_GPIO_IRQ_START)
-struct mxc_gpio_port {
- void __iomem *base;
- int irq;
- int irq_high;
- int virtual_irq_start;
- struct gpio_chip chip;
- u32 both_edges;
- spinlock_t lock;
-};
-
-#define DEFINE_IMX_GPIO_PORT_IRQ_HIGH(soc, _id, _hwid, _irq, _irq_high) \
- { \
- .chip.label = "gpio-" #_id, \
- .irq = _irq, \
- .irq_high = _irq_high, \
- .base = soc ## _IO_ADDRESS( \
- soc ## _GPIO ## _hwid ## _BASE_ADDR), \
- .virtual_irq_start = MXC_GPIO_IRQ_START + (_id) * 32, \
- }
-
-#define DEFINE_IMX_GPIO_PORT_IRQ(soc, _id, _hwid, _irq) \
- DEFINE_IMX_GPIO_PORT_IRQ_HIGH(soc, _id, _hwid, _irq, 0)
-#define DEFINE_IMX_GPIO_PORT(soc, _id, _hwid) \
- DEFINE_IMX_GPIO_PORT_IRQ(soc, _id, _hwid, 0)
-
-int mxc_gpio_init(struct mxc_gpio_port*, int);
-
#endif
diff --git a/arch/arm/plat-mxc/include/mach/irqs.h b/arch/arm/plat-mxc/include/mach/irqs.h
index 35c89bcdf758..00e812bbd81d 100644
--- a/arch/arm/plat-mxc/include/mach/irqs.h
+++ b/arch/arm/plat-mxc/include/mach/irqs.h
@@ -11,6 +11,8 @@
#ifndef __ASM_ARCH_MXC_IRQS_H__
#define __ASM_ARCH_MXC_IRQS_H__
+#include <asm-generic/gpio.h>
+
/*
* SoCs with TZIC interrupt controller have 128 IRQs, those with AVIC have 64
*/
@@ -22,30 +24,13 @@
#define MXC_GPIO_IRQ_START MXC_INTERNAL_IRQS
-/* these are ordered by size to support multi-SoC kernels */
-#if defined CONFIG_SOC_IMX53
-#define MXC_GPIO_IRQS (32 * 7)
-#elif defined CONFIG_ARCH_MX2
-#define MXC_GPIO_IRQS (32 * 6)
-#elif defined CONFIG_SOC_IMX50
-#define MXC_GPIO_IRQS (32 * 6)
-#elif defined CONFIG_ARCH_MX1
-#define MXC_GPIO_IRQS (32 * 4)
-#elif defined CONFIG_ARCH_MX25
-#define MXC_GPIO_IRQS (32 * 4)
-#elif defined CONFIG_SOC_IMX51
-#define MXC_GPIO_IRQS (32 * 4)
-#elif defined CONFIG_ARCH_MX3
-#define MXC_GPIO_IRQS (32 * 3)
-#endif
-
/*
* The next 16 interrupts are for board specific purposes. Since
* the kernel can only run on one machine at a time, we can re-use
* these. If you need more, increase MXC_BOARD_IRQS, but keep it
* within sensible limits.
*/
-#define MXC_BOARD_IRQ_START (MXC_INTERNAL_IRQS + MXC_GPIO_IRQS)
+#define MXC_BOARD_IRQ_START (MXC_INTERNAL_IRQS + ARCH_NR_GPIOS)
#ifdef CONFIG_MACH_MX31ADS_WM1133_EV1
#define MXC_BOARD_IRQS 80
diff --git a/arch/arm/plat-nomadik/include/plat/ste_dma40.h b/arch/arm/plat-nomadik/include/plat/ste_dma40.h
index c44886062f8e..685c78716d95 100644
--- a/arch/arm/plat-nomadik/include/plat/ste_dma40.h
+++ b/arch/arm/plat-nomadik/include/plat/ste_dma40.h
@@ -10,6 +10,7 @@
#define STE_DMA40_H
#include <linux/dmaengine.h>
+#include <linux/scatterlist.h>
#include <linux/workqueue.h>
#include <linux/interrupt.h>
diff --git a/arch/arm/plat-omap/clock.c b/arch/arm/plat-omap/clock.c
index c9122dd6ee8d..964704f40bbe 100644
--- a/arch/arm/plat-omap/clock.c
+++ b/arch/arm/plat-omap/clock.c
@@ -480,13 +480,10 @@ static struct dentry *clk_debugfs_root;
static int clk_debugfs_register_one(struct clk *c)
{
int err;
- struct dentry *d, *child, *child_tmp;
+ struct dentry *d;
struct clk *pa = c->parent;
- char s[255];
- char *p = s;
- p += sprintf(p, "%s", c->name);
- d = debugfs_create_dir(s, pa ? pa->dent : clk_debugfs_root);
+ d = debugfs_create_dir(c->name, pa ? pa->dent : clk_debugfs_root);
if (!d)
return -ENOMEM;
c->dent = d;
@@ -509,10 +506,7 @@ static int clk_debugfs_register_one(struct clk *c)
return 0;
err_out:
- d = c->dent;
- list_for_each_entry_safe(child, child_tmp, &d->d_subdirs, d_u.d_child)
- debugfs_remove(child);
- debugfs_remove(c->dent);
+ debugfs_remove_recursive(c->dent);
return err;
}
diff --git a/arch/arm/plat-omap/include/plat/gpio.h b/arch/arm/plat-omap/include/plat/gpio.h
index ec97e00cb581..91e8de3db085 100644
--- a/arch/arm/plat-omap/include/plat/gpio.h
+++ b/arch/arm/plat-omap/include/plat/gpio.h
@@ -174,12 +174,32 @@ struct omap_gpio_dev_attr {
bool dbck_flag; /* dbck required or not - True for OMAP3&4 */
};
+struct omap_gpio_reg_offs {
+ u16 revision;
+ u16 direction;
+ u16 datain;
+ u16 dataout;
+ u16 set_dataout;
+ u16 clr_dataout;
+ u16 irqstatus;
+ u16 irqstatus2;
+ u16 irqenable;
+ u16 set_irqenable;
+ u16 clr_irqenable;
+ u16 debounce;
+ u16 debounce_en;
+
+ bool irqenable_inv;
+};
+
struct omap_gpio_platform_data {
u16 virtual_irq_start;
int bank_type;
int bank_width; /* GPIO bank width */
int bank_stride; /* Only needed for omap1 MPUIO */
bool dbck_flag; /* dbck required or not - True for OMAP3&4 */
+
+ struct omap_gpio_reg_offs *regs;
};
/* TODO: Analyze removing gpio_bank_count usage from driver code */
diff --git a/arch/arm/plat-omap/include/plat/omap_device.h b/arch/arm/plat-omap/include/plat/omap_device.h
index e4c349ff9fd8..ee405b36df4b 100644
--- a/arch/arm/plat-omap/include/plat/omap_device.h
+++ b/arch/arm/plat-omap/include/plat/omap_device.h
@@ -44,6 +44,10 @@ extern struct device omap_device_parent;
#define OMAP_DEVICE_STATE_IDLE 2
#define OMAP_DEVICE_STATE_SHUTDOWN 3
+/* omap_device.flags values */
+#define OMAP_DEVICE_SUSPENDED BIT(0)
+#define OMAP_DEVICE_NO_IDLE_ON_SUSPEND BIT(1)
+
/**
* struct omap_device - omap_device wrapper for platform_devices
* @pdev: platform_device
@@ -73,6 +77,7 @@ struct omap_device {
s8 pm_lat_level;
u8 hwmods_cnt;
u8 _state;
+ u8 flags;
};
/* Device driver interface (call via platform_data fn ptrs) */
@@ -117,6 +122,10 @@ int omap_device_enable_hwmods(struct omap_device *od);
int omap_device_disable_clocks(struct omap_device *od);
int omap_device_enable_clocks(struct omap_device *od);
+static inline void omap_device_disable_idle_on_suspend(struct omap_device *od)
+{
+ od->flags |= OMAP_DEVICE_NO_IDLE_ON_SUSPEND;
+}
/*
* Entries should be kept in latency order ascending
diff --git a/arch/arm/plat-omap/omap_device.c b/arch/arm/plat-omap/omap_device.c
index 49fc0df0c21f..2526fa312b8a 100644
--- a/arch/arm/plat-omap/omap_device.c
+++ b/arch/arm/plat-omap/omap_device.c
@@ -537,6 +537,7 @@ int omap_early_device_register(struct omap_device *od)
return 0;
}
+#ifdef CONFIG_PM_RUNTIME
static int _od_runtime_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
@@ -563,13 +564,55 @@ static int _od_runtime_resume(struct device *dev)
return pm_generic_runtime_resume(dev);
}
+#endif
-static struct dev_power_domain omap_device_power_domain = {
+#ifdef CONFIG_SUSPEND
+static int _od_suspend_noirq(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct omap_device *od = to_omap_device(pdev);
+ int ret;
+
+ if (od->flags & OMAP_DEVICE_NO_IDLE_ON_SUSPEND)
+ return pm_generic_suspend_noirq(dev);
+
+ ret = pm_generic_suspend_noirq(dev);
+
+ if (!ret && !pm_runtime_status_suspended(dev)) {
+ if (pm_generic_runtime_suspend(dev) == 0) {
+ omap_device_idle(pdev);
+ od->flags |= OMAP_DEVICE_SUSPENDED;
+ }
+ }
+
+ return ret;
+}
+
+static int _od_resume_noirq(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct omap_device *od = to_omap_device(pdev);
+
+ if (od->flags & OMAP_DEVICE_NO_IDLE_ON_SUSPEND)
+ return pm_generic_resume_noirq(dev);
+
+ if ((od->flags & OMAP_DEVICE_SUSPENDED) &&
+ !pm_runtime_status_suspended(dev)) {
+ od->flags &= ~OMAP_DEVICE_SUSPENDED;
+ omap_device_enable(pdev);
+ pm_generic_runtime_resume(dev);
+ }
+
+ return pm_generic_resume_noirq(dev);
+}
+#endif
+
+static struct dev_pm_domain omap_device_pm_domain = {
.ops = {
- .runtime_suspend = _od_runtime_suspend,
- .runtime_idle = _od_runtime_idle,
- .runtime_resume = _od_runtime_resume,
+ SET_RUNTIME_PM_OPS(_od_runtime_suspend, _od_runtime_resume,
+ _od_runtime_idle)
USE_PLATFORM_PM_SLEEP_OPS
+ SET_SYSTEM_SLEEP_PM_OPS(_od_suspend_noirq, _od_resume_noirq)
}
};
@@ -586,7 +629,7 @@ int omap_device_register(struct omap_device *od)
pr_debug("omap_device: %s: registering\n", od->pdev.name);
od->pdev.dev.parent = &omap_device_parent;
- od->pdev.dev.pwr_domain = &omap_device_power_domain;
+ od->pdev.dev.pm_domain = &omap_device_pm_domain;
return platform_device_register(&od->pdev);
}
diff --git a/arch/arm/plat-pxa/include/plat/sdhci.h b/arch/arm/plat-pxa/include/plat/sdhci.h
deleted file mode 100644
index 1ab332e37d7d..000000000000
--- a/arch/arm/plat-pxa/include/plat/sdhci.h
+++ /dev/null
@@ -1,35 +0,0 @@
-/* linux/arch/arm/plat-pxa/include/plat/sdhci.h
- *
- * Copyright 2010 Marvell
- * Zhangfei Gao <zhangfei.gao@marvell.com>
- *
- * PXA Platform - SDHCI platform data definitions
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef __PLAT_PXA_SDHCI_H
-#define __PLAT_PXA_SDHCI_H
-
-/* pxa specific flag */
-/* Require clock free running */
-#define PXA_FLAG_DISABLE_CLOCK_GATING (1<<0)
-
-/* Board design supports 8-bit data on SD/SDIO BUS */
-#define PXA_FLAG_SD_8_BIT_CAPABLE_SLOT (1<<2)
-
-/*
- * struct pxa_sdhci_platdata() - Platform device data for PXA SDHCI
- * @max_speed: the maximum speed supported
- * @quirks: quirks of specific device
- * @flags: flags for platform requirement
- */
-struct sdhci_pxa_platdata {
- unsigned int max_speed;
- unsigned int quirks;
- unsigned int flags;
-};
-
-#endif /* __PLAT_PXA_SDHCI_H */
diff --git a/arch/arm/plat-samsung/clock.c b/arch/arm/plat-samsung/clock.c
index 772892826ffc..0c9f95d98561 100644
--- a/arch/arm/plat-samsung/clock.c
+++ b/arch/arm/plat-samsung/clock.c
@@ -458,7 +458,7 @@ static struct dentry *clk_debugfs_root;
static int clk_debugfs_register_one(struct clk *c)
{
int err;
- struct dentry *d, *child, *child_tmp;
+ struct dentry *d;
struct clk *pa = c->parent;
char s[255];
char *p = s;
@@ -488,10 +488,7 @@ static int clk_debugfs_register_one(struct clk *c)
return 0;
err_out:
- d = c->dent;
- list_for_each_entry_safe(child, child_tmp, &d->d_subdirs, d_u.d_child)
- debugfs_remove(child);
- debugfs_remove(c->dent);
+ debugfs_remove_recursive(c->dent);
return err;
}
diff --git a/arch/arm/plat-spear/clock.c b/arch/arm/plat-spear/clock.c
index 6fa474cb398e..67dd00381ea6 100644
--- a/arch/arm/plat-spear/clock.c
+++ b/arch/arm/plat-spear/clock.c
@@ -916,7 +916,7 @@ static struct dentry *clk_debugfs_root;
static int clk_debugfs_register_one(struct clk *c)
{
int err;
- struct dentry *d, *child;
+ struct dentry *d;
struct clk *pa = c->pclk;
char s[255];
char *p = s;
@@ -951,10 +951,7 @@ static int clk_debugfs_register_one(struct clk *c)
return 0;
err_out:
- d = c->dent;
- list_for_each_entry(child, &d->d_subdirs, d_u.d_child)
- debugfs_remove(child);
- debugfs_remove(c->dent);
+ debugfs_remove_recursive(c->dent);
return err;
}
diff --git a/arch/avr32/include/asm/delay.h b/arch/avr32/include/asm/delay.h
index a0ed9a9839a5..9670e127b7b2 100644
--- a/arch/avr32/include/asm/delay.h
+++ b/arch/avr32/include/asm/delay.h
@@ -1,26 +1 @@
-#ifndef __ASM_AVR32_DELAY_H
-#define __ASM_AVR32_DELAY_H
-
-/*
- * Copyright (C) 1993 Linus Torvalds
- *
- * Delay routines calling functions in arch/avr32/lib/delay.c
- */
-
-extern void __bad_udelay(void);
-extern void __bad_ndelay(void);
-
-extern void __udelay(unsigned long usecs);
-extern void __ndelay(unsigned long nsecs);
-extern void __const_udelay(unsigned long xloops);
-extern void __delay(unsigned long loops);
-
-#define udelay(n) (__builtin_constant_p(n) ? \
- ((n) > 20000 ? __bad_udelay() : __const_udelay((n) * 0x10c6ul)) : \
- __udelay(n))
-
-#define ndelay(n) (__builtin_constant_p(n) ? \
- ((n) > 20000 ? __bad_ndelay() : __const_udelay((n) * 5ul)) : \
- __ndelay(n))
-
-#endif /* __ASM_AVR32_DELAY_H */
+#include <asm-generic/delay.h>
diff --git a/arch/avr32/kernel/module.c b/arch/avr32/kernel/module.c
index a727f54d64d6..596f7305d93f 100644
--- a/arch/avr32/kernel/module.c
+++ b/arch/avr32/kernel/module.c
@@ -19,13 +19,6 @@
#include <linux/moduleloader.h>
#include <linux/vmalloc.h>
-void *module_alloc(unsigned long size)
-{
- if (size == 0)
- return NULL;
- return vmalloc(size);
-}
-
void module_free(struct module *mod, void *module_region)
{
vfree(mod->arch.syminfo);
@@ -299,15 +292,6 @@ int apply_relocate_add(Elf32_Shdr *sechdrs, const char *strtab,
return ret;
}
-int apply_relocate(Elf32_Shdr *sechdrs, const char *strtab,
- unsigned int symindex, unsigned int relindex,
- struct module *module)
-{
- printk(KERN_ERR "module %s: REL relocations are not supported\n",
- module->name);
- return -ENOEXEC;
-}
-
int module_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs,
struct module *module)
{
@@ -316,7 +300,3 @@ int module_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs,
return 0;
}
-
-void module_arch_cleanup(struct module *module)
-{
-}
diff --git a/arch/blackfin/Kconfig b/arch/blackfin/Kconfig
index d619b17c4413..c7476295de80 100644
--- a/arch/blackfin/Kconfig
+++ b/arch/blackfin/Kconfig
@@ -953,6 +953,16 @@ config BFIN_GPTIMERS
To compile this driver as a module, choose M here: the module
will be called gptimers.
+config HAVE_PWM
+ tristate "Enable PWM API support"
+ depends on BFIN_GPTIMERS
+ help
+ Enable support for the Pulse Width Modulation framework (as
+ found in linux/pwm.h).
+
+ To compile this driver as a module, choose M here: the module
+ will be called pwm.
+
choice
prompt "Uncached DMA region"
default DMA_UNCACHED_1M
diff --git a/arch/blackfin/configs/BF561-EZKIT_defconfig b/arch/blackfin/configs/BF561-EZKIT_defconfig
index 1c0a82a10591..d7ff2aee3fbc 100644
--- a/arch/blackfin/configs/BF561-EZKIT_defconfig
+++ b/arch/blackfin/configs/BF561-EZKIT_defconfig
@@ -58,13 +58,13 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_MTD=y
CONFIG_MTD_PARTITIONS=y
CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_CHAR=m
+CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
-CONFIG_MTD_CFI=m
-CONFIG_MTD_CFI_AMDSTD=m
+CONFIG_MTD_CFI=y
+CONFIG_MTD_CFI_AMDSTD=y
CONFIG_MTD_RAM=y
CONFIG_MTD_ROM=m
-CONFIG_MTD_PHYSMAP=m
+CONFIG_MTD_PHYSMAP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_NETDEVICES=y
CONFIG_NET_ETHERNET=y
diff --git a/arch/blackfin/include/asm/Kbuild b/arch/blackfin/include/asm/Kbuild
index 9e7c5379d3ff..7a075eaf6041 100644
--- a/arch/blackfin/include/asm/Kbuild
+++ b/arch/blackfin/include/asm/Kbuild
@@ -1,5 +1,48 @@
include include/asm-generic/Kbuild.asm
+generic-y += auxvec.h
+generic-y += bitsperlong.h
+generic-y += bugs.h
+generic-y += cputime.h
+generic-y += current.h
+generic-y += device.h
+generic-y += div64.h
+generic-y += emergency-restart.h
+generic-y += errno.h
+generic-y += fb.h
+generic-y += futex.h
+generic-y += hw_irq.h
+generic-y += ioctl.h
+generic-y += ipcbuf.h
+generic-y += irq_regs.h
+generic-y += kdebug.h
+generic-y += kmap_types.h
+generic-y += local64.h
+generic-y += local.h
+generic-y += mman.h
+generic-y += msgbuf.h
+generic-y += param.h
+generic-y += percpu.h
+generic-y += pgalloc.h
+generic-y += resource.h
+generic-y += scatterlist.h
+generic-y += sembuf.h
+generic-y += serial.h
+generic-y += setup.h
+generic-y += shmbuf.h
+generic-y += shmparam.h
+generic-y += socket.h
+generic-y += sockios.h
+generic-y += statfs.h
+generic-y += termbits.h
+generic-y += termios.h
+generic-y += topology.h
+generic-y += types.h
+generic-y += ucontext.h
+generic-y += unaligned.h
+generic-y += user.h
+generic-y += xor.h
+
header-y += bfin_sport.h
header-y += cachectl.h
header-y += fixed_code.h
diff --git a/arch/blackfin/include/asm/atomic.h b/arch/blackfin/include/asm/atomic.h
index e48508957160..4c707dbe1ff9 100644
--- a/arch/blackfin/include/asm/atomic.h
+++ b/arch/blackfin/include/asm/atomic.h
@@ -1,8 +1,8 @@
/*
- * Copyright 2004-2009 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later.
- */
+ * Copyright 2004-2009 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
#ifndef __ARCH_BLACKFIN_ATOMIC__
#define __ARCH_BLACKFIN_ATOMIC__
@@ -76,11 +76,6 @@ static inline void atomic_set_mask(int mask, atomic_t *v)
__raw_atomic_set_asm(&v->counter, mask);
}
-static inline int atomic_test_mask(int mask, atomic_t *v)
-{
- return __raw_atomic_test_asm(&v->counter, mask);
-}
-
/* Atomic operations are already serializing */
#define smp_mb__before_atomic_dec() barrier()
#define smp_mb__after_atomic_dec() barrier()
diff --git a/arch/blackfin/include/asm/auxvec.h b/arch/blackfin/include/asm/auxvec.h
deleted file mode 100644
index 41fa68b71287..000000000000
--- a/arch/blackfin/include/asm/auxvec.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/auxvec.h>
diff --git a/arch/blackfin/include/asm/bitsperlong.h b/arch/blackfin/include/asm/bitsperlong.h
deleted file mode 100644
index 6dc0bb0c13b2..000000000000
--- a/arch/blackfin/include/asm/bitsperlong.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/bitsperlong.h>
diff --git a/arch/blackfin/include/asm/blackfin.h b/arch/blackfin/include/asm/blackfin.h
index eb7c1441d8f9..0928700b6bc4 100644
--- a/arch/blackfin/include/asm/blackfin.h
+++ b/arch/blackfin/include/asm/blackfin.h
@@ -1,9 +1,9 @@
/*
* Common header file for Blackfin family of processors.
*
- * Copyright 2004-2009 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later.
+ * Copyright 2004-2009 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
*/
#ifndef _BLACKFIN_H_
diff --git a/arch/blackfin/include/asm/bugs.h b/arch/blackfin/include/asm/bugs.h
deleted file mode 100644
index 61791e1ad9f5..000000000000
--- a/arch/blackfin/include/asm/bugs.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/bugs.h>
diff --git a/arch/blackfin/include/asm/cputime.h b/arch/blackfin/include/asm/cputime.h
deleted file mode 100644
index 6d68ad7e0ea3..000000000000
--- a/arch/blackfin/include/asm/cputime.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/cputime.h>
diff --git a/arch/blackfin/include/asm/current.h b/arch/blackfin/include/asm/current.h
deleted file mode 100644
index 4c51401b5537..000000000000
--- a/arch/blackfin/include/asm/current.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/current.h>
diff --git a/arch/blackfin/include/asm/device.h b/arch/blackfin/include/asm/device.h
deleted file mode 100644
index f0a4c256403b..000000000000
--- a/arch/blackfin/include/asm/device.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/device.h>
diff --git a/arch/blackfin/include/asm/div64.h b/arch/blackfin/include/asm/div64.h
deleted file mode 100644
index 6cd978cefb28..000000000000
--- a/arch/blackfin/include/asm/div64.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/div64.h>
diff --git a/arch/blackfin/include/asm/dpmc.h b/arch/blackfin/include/asm/dpmc.h
index edf2a2ad5183..c4ec959dad78 100644
--- a/arch/blackfin/include/asm/dpmc.h
+++ b/arch/blackfin/include/asm/dpmc.h
@@ -117,7 +117,6 @@
#ifndef __ASSEMBLY__
void sleep_mode(u32 sic_iwr0, u32 sic_iwr1, u32 sic_iwr2);
-void hibernate_mode(u32 sic_iwr0, u32 sic_iwr1, u32 sic_iwr2);
void sleep_deeper(u32 sic_iwr0, u32 sic_iwr1, u32 sic_iwr2);
void do_hibernate(int wakeup);
void set_dram_srfs(void);
@@ -134,32 +133,6 @@ struct bfin_dpmc_platform_data {
unsigned short vr_settling_time; /* in us */
};
-#else
-
-#define PM_PUSH(x) \
- R0 = [P0 + (x - SRAM_BASE_ADDRESS)];\
- [--SP] = R0;\
-
-#define PM_POP(x) \
- R0 = [SP++];\
- [P0 + (x - SRAM_BASE_ADDRESS)] = R0;\
-
-#define PM_SYS_PUSH(x) \
- R0 = [P0 + (x - PLL_CTL)];\
- [--SP] = R0;\
-
-#define PM_SYS_POP(x) \
- R0 = [SP++];\
- [P0 + (x - PLL_CTL)] = R0;\
-
-#define PM_SYS_PUSH16(x) \
- R0 = w[P0 + (x - PLL_CTL)];\
- [--SP] = R0;\
-
-#define PM_SYS_POP16(x) \
- R0 = [SP++];\
- w[P0 + (x - PLL_CTL)] = R0;\
-
#endif
#endif /*_BLACKFIN_DPMC_H_*/
diff --git a/arch/blackfin/include/asm/emergency-restart.h b/arch/blackfin/include/asm/emergency-restart.h
deleted file mode 100644
index 3711bd9d50bd..000000000000
--- a/arch/blackfin/include/asm/emergency-restart.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/emergency-restart.h>
diff --git a/arch/blackfin/include/asm/errno.h b/arch/blackfin/include/asm/errno.h
deleted file mode 100644
index 4c82b503d92f..000000000000
--- a/arch/blackfin/include/asm/errno.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/errno.h>
diff --git a/arch/blackfin/include/asm/fb.h b/arch/blackfin/include/asm/fb.h
deleted file mode 100644
index 3a4988e8df45..000000000000
--- a/arch/blackfin/include/asm/fb.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/fb.h>
diff --git a/arch/blackfin/include/asm/futex.h b/arch/blackfin/include/asm/futex.h
deleted file mode 100644
index 0b745828f42b..000000000000
--- a/arch/blackfin/include/asm/futex.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/futex.h>
diff --git a/arch/blackfin/include/asm/gpio.h b/arch/blackfin/include/asm/gpio.h
index 1ef8417f5d27..5a25856381ff 100644
--- a/arch/blackfin/include/asm/gpio.h
+++ b/arch/blackfin/include/asm/gpio.h
@@ -16,58 +16,13 @@
#include <mach/gpio.h>
-#define GPIO_0 0
-#define GPIO_1 1
-#define GPIO_2 2
-#define GPIO_3 3
-#define GPIO_4 4
-#define GPIO_5 5
-#define GPIO_6 6
-#define GPIO_7 7
-#define GPIO_8 8
-#define GPIO_9 9
-#define GPIO_10 10
-#define GPIO_11 11
-#define GPIO_12 12
-#define GPIO_13 13
-#define GPIO_14 14
-#define GPIO_15 15
-#define GPIO_16 16
-#define GPIO_17 17
-#define GPIO_18 18
-#define GPIO_19 19
-#define GPIO_20 20
-#define GPIO_21 21
-#define GPIO_22 22
-#define GPIO_23 23
-#define GPIO_24 24
-#define GPIO_25 25
-#define GPIO_26 26
-#define GPIO_27 27
-#define GPIO_28 28
-#define GPIO_29 29
-#define GPIO_30 30
-#define GPIO_31 31
-#define GPIO_32 32
-#define GPIO_33 33
-#define GPIO_34 34
-#define GPIO_35 35
-#define GPIO_36 36
-#define GPIO_37 37
-#define GPIO_38 38
-#define GPIO_39 39
-#define GPIO_40 40
-#define GPIO_41 41
-#define GPIO_42 42
-#define GPIO_43 43
-#define GPIO_44 44
-#define GPIO_45 45
-#define GPIO_46 46
-#define GPIO_47 47
-
#define PERIPHERAL_USAGE 1
#define GPIO_USAGE 0
+#ifndef BFIN_GPIO_PINT
+# define BFIN_GPIO_PINT 0
+#endif
+
#ifndef __ASSEMBLY__
#include <linux/compiler.h>
@@ -89,7 +44,7 @@
* MODIFICATION HISTORY :
**************************************************************/
-#ifndef CONFIG_BF54x
+#if !BFIN_GPIO_PINT
void set_gpio_dir(unsigned, unsigned short);
void set_gpio_inen(unsigned, unsigned short);
void set_gpio_polar(unsigned, unsigned short);
@@ -164,6 +119,10 @@ struct gpio_port_t {
#ifdef BFIN_SPECIAL_GPIO_BANKS
void bfin_special_gpio_free(unsigned gpio);
int bfin_special_gpio_request(unsigned gpio, const char *label);
+# ifdef CONFIG_PM
+void bfin_special_gpio_pm_hibernate_restore(void);
+void bfin_special_gpio_pm_hibernate_suspend(void);
+# endif
#endif
#ifdef CONFIG_PM
@@ -182,7 +141,7 @@ static inline void bfin_pm_standby_restore(void)
void bfin_gpio_pm_hibernate_restore(void);
void bfin_gpio_pm_hibernate_suspend(void);
-#ifndef CONFIG_BF54x
+# if !BFIN_GPIO_PINT
int gpio_pm_wakeup_ctrl(unsigned gpio, unsigned ctrl);
struct gpio_port_s {
@@ -199,8 +158,9 @@ struct gpio_port_s {
unsigned short reserved;
unsigned short mux;
};
-#endif /*CONFIG_BF54x*/
+# endif
#endif /*CONFIG_PM*/
+
/***********************************************************
*
* FUNCTIONS: Blackfin GPIO Driver
diff --git a/arch/blackfin/include/asm/gptimers.h b/arch/blackfin/include/asm/gptimers.h
index 38657dac1235..38bddcb190c8 100644
--- a/arch/blackfin/include/asm/gptimers.h
+++ b/arch/blackfin/include/asm/gptimers.h
@@ -193,6 +193,16 @@ uint16_t get_enabled_gptimers(void);
uint32_t get_gptimer_status(unsigned int group);
void set_gptimer_status(unsigned int group, uint32_t value);
+static inline void enable_gptimer(unsigned int timer_id)
+{
+ enable_gptimers(1 << timer_id);
+}
+
+static inline void disable_gptimer(unsigned int timer_id)
+{
+ disable_gptimers(1 << timer_id);
+}
+
/*
* All Blackfin system MMRs are padded to 32bits even if the register
* itself is only 16bits. So use a helper macro to streamline this.
@@ -209,6 +219,15 @@ struct bfin_gptimer_regs {
u32 width;
};
+/*
+ * bfin group timer registers layout
+ */
+struct bfin_gptimer_group_regs {
+ __BFP(enable);
+ __BFP(disable);
+ u32 status;
+};
+
#undef __BFP
#endif
diff --git a/arch/blackfin/include/asm/hw_irq.h b/arch/blackfin/include/asm/hw_irq.h
deleted file mode 100644
index 1f5ef7da0045..000000000000
--- a/arch/blackfin/include/asm/hw_irq.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/hw_irq.h>
diff --git a/arch/blackfin/include/asm/ioctl.h b/arch/blackfin/include/asm/ioctl.h
deleted file mode 100644
index b279fe06dfe5..000000000000
--- a/arch/blackfin/include/asm/ioctl.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/ioctl.h>
diff --git a/arch/blackfin/include/asm/ipcbuf.h b/arch/blackfin/include/asm/ipcbuf.h
deleted file mode 100644
index 84c7e51cb6d0..000000000000
--- a/arch/blackfin/include/asm/ipcbuf.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/ipcbuf.h>
diff --git a/arch/blackfin/include/asm/irq_regs.h b/arch/blackfin/include/asm/irq_regs.h
deleted file mode 100644
index 3dd9c0b70270..000000000000
--- a/arch/blackfin/include/asm/irq_regs.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/irq_regs.h>
diff --git a/arch/blackfin/include/asm/irqflags.h b/arch/blackfin/include/asm/irqflags.h
index b4bbb75a9e15..43eb4749de3d 100644
--- a/arch/blackfin/include/asm/irqflags.h
+++ b/arch/blackfin/include/asm/irqflags.h
@@ -18,12 +18,12 @@
extern unsigned long bfin_irq_flags;
#endif
-static inline void bfin_sti(unsigned long flags)
+static inline notrace void bfin_sti(unsigned long flags)
{
asm volatile("sti %0;" : : "d" (flags));
}
-static inline unsigned long bfin_cli(void)
+static inline notrace unsigned long bfin_cli(void)
{
unsigned long flags;
asm volatile("cli %0;" : "=d" (flags));
@@ -40,22 +40,22 @@ static inline unsigned long bfin_cli(void)
/*
* Hard, untraced CPU interrupt flag manipulation and access.
*/
-static inline void __hard_local_irq_disable(void)
+static inline notrace void __hard_local_irq_disable(void)
{
bfin_cli();
}
-static inline void __hard_local_irq_enable(void)
+static inline notrace void __hard_local_irq_enable(void)
{
bfin_sti(bfin_irq_flags);
}
-static inline unsigned long hard_local_save_flags(void)
+static inline notrace unsigned long hard_local_save_flags(void)
{
return bfin_read_IMASK();
}
-static inline unsigned long __hard_local_irq_save(void)
+static inline notrace unsigned long __hard_local_irq_save(void)
{
unsigned long flags;
flags = bfin_cli();
@@ -65,18 +65,18 @@ static inline unsigned long __hard_local_irq_save(void)
return flags;
}
-static inline int hard_irqs_disabled_flags(unsigned long flags)
+static inline notrace int hard_irqs_disabled_flags(unsigned long flags)
{
return (flags & ~0x3f) == 0;
}
-static inline int hard_irqs_disabled(void)
+static inline notrace int hard_irqs_disabled(void)
{
unsigned long flags = hard_local_save_flags();
return hard_irqs_disabled_flags(flags);
}
-static inline void __hard_local_irq_restore(unsigned long flags)
+static inline notrace void __hard_local_irq_restore(unsigned long flags)
{
if (!hard_irqs_disabled_flags(flags))
__hard_local_irq_enable();
@@ -113,31 +113,31 @@ void ipipe_check_context(struct ipipe_domain *ipd);
/*
* Interrupt pipe interface to linux/irqflags.h.
*/
-static inline void arch_local_irq_disable(void)
+static inline notrace void arch_local_irq_disable(void)
{
__check_irqop_context();
__ipipe_stall_root();
barrier();
}
-static inline void arch_local_irq_enable(void)
+static inline notrace void arch_local_irq_enable(void)
{
barrier();
__check_irqop_context();
__ipipe_unstall_root();
}
-static inline unsigned long arch_local_save_flags(void)
+static inline notrace unsigned long arch_local_save_flags(void)
{
return __ipipe_test_root() ? bfin_no_irqs : bfin_irq_flags;
}
-static inline int arch_irqs_disabled_flags(unsigned long flags)
+static inline notrace int arch_irqs_disabled_flags(unsigned long flags)
{
return flags == bfin_no_irqs;
}
-static inline unsigned long arch_local_irq_save(void)
+static inline notrace unsigned long arch_local_irq_save(void)
{
unsigned long flags;
@@ -148,13 +148,13 @@ static inline unsigned long arch_local_irq_save(void)
return flags;
}
-static inline void arch_local_irq_restore(unsigned long flags)
+static inline notrace void arch_local_irq_restore(unsigned long flags)
{
__check_irqop_context();
__ipipe_restore_root(flags == bfin_no_irqs);
}
-static inline unsigned long arch_mangle_irq_bits(int virt, unsigned long real)
+static inline notrace unsigned long arch_mangle_irq_bits(int virt, unsigned long real)
{
/*
* Merge virtual and real interrupt mask bits into a single
@@ -163,7 +163,7 @@ static inline unsigned long arch_mangle_irq_bits(int virt, unsigned long real)
return (real & ~(1 << 31)) | ((virt != 0) << 31);
}
-static inline int arch_demangle_irq_bits(unsigned long *x)
+static inline notrace int arch_demangle_irq_bits(unsigned long *x)
{
int virt = (*x & (1 << 31)) != 0;
*x &= ~(1L << 31);
@@ -174,7 +174,7 @@ static inline int arch_demangle_irq_bits(unsigned long *x)
* Interface to various arch routines that may be traced.
*/
#ifdef CONFIG_IPIPE_TRACE_IRQSOFF
-static inline void hard_local_irq_disable(void)
+static inline notrace void hard_local_irq_disable(void)
{
if (!hard_irqs_disabled()) {
__hard_local_irq_disable();
@@ -182,7 +182,7 @@ static inline void hard_local_irq_disable(void)
}
}
-static inline void hard_local_irq_enable(void)
+static inline notrace void hard_local_irq_enable(void)
{
if (hard_irqs_disabled()) {
ipipe_trace_end(0x80000000);
@@ -190,7 +190,7 @@ static inline void hard_local_irq_enable(void)
}
}
-static inline unsigned long hard_local_irq_save(void)
+static inline notrace unsigned long hard_local_irq_save(void)
{
unsigned long flags = hard_local_save_flags();
if (!hard_irqs_disabled_flags(flags)) {
@@ -200,7 +200,7 @@ static inline unsigned long hard_local_irq_save(void)
return flags;
}
-static inline void hard_local_irq_restore(unsigned long flags)
+static inline notrace void hard_local_irq_restore(unsigned long flags)
{
if (!hard_irqs_disabled_flags(flags)) {
ipipe_trace_end(0x80000001);
diff --git a/arch/blackfin/include/asm/kdebug.h b/arch/blackfin/include/asm/kdebug.h
deleted file mode 100644
index 6ece1b037665..000000000000
--- a/arch/blackfin/include/asm/kdebug.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/kdebug.h>
diff --git a/arch/blackfin/include/asm/kmap_types.h b/arch/blackfin/include/asm/kmap_types.h
deleted file mode 100644
index 3575c64af42a..000000000000
--- a/arch/blackfin/include/asm/kmap_types.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/kmap_types.h>
diff --git a/arch/blackfin/include/asm/local.h b/arch/blackfin/include/asm/local.h
deleted file mode 100644
index c11c530f74d0..000000000000
--- a/arch/blackfin/include/asm/local.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/local.h>
diff --git a/arch/blackfin/include/asm/local64.h b/arch/blackfin/include/asm/local64.h
deleted file mode 100644
index 36c93b5cc239..000000000000
--- a/arch/blackfin/include/asm/local64.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/local64.h>
diff --git a/arch/blackfin/include/asm/mman.h b/arch/blackfin/include/asm/mman.h
deleted file mode 100644
index 8eebf89f5ab1..000000000000
--- a/arch/blackfin/include/asm/mman.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/mman.h>
diff --git a/arch/blackfin/include/asm/module.h b/arch/blackfin/include/asm/module.h
index 4282b169ead9..ed5689b82c9f 100644
--- a/arch/blackfin/include/asm/module.h
+++ b/arch/blackfin/include/asm/module.h
@@ -1,8 +1,8 @@
/*
- * Copyright 2004-2008 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later.
- */
+ * Copyright 2004-2008 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
#ifndef _ASM_BFIN_MODULE_H
#define _ASM_BFIN_MODULE_H
diff --git a/arch/blackfin/include/asm/msgbuf.h b/arch/blackfin/include/asm/msgbuf.h
deleted file mode 100644
index 809134c644a6..000000000000
--- a/arch/blackfin/include/asm/msgbuf.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/msgbuf.h>
diff --git a/arch/blackfin/include/asm/mutex.h b/arch/blackfin/include/asm/mutex.h
index f726e3a80ad0..ff6101aa2c71 100644
--- a/arch/blackfin/include/asm/mutex.h
+++ b/arch/blackfin/include/asm/mutex.h
@@ -1,76 +1 @@
-/*
- * Pull in the generic implementation for the mutex fastpath.
- *
- * TODO: implement optimized primitives instead, or leave the generic
- * implementation in place, or pick the atomic_xchg() based generic
- * implementation. (see asm-generic/mutex-xchg.h for details)
- *
- * Copyright 2006-2009 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later.
- */
-
-#ifndef _ASM_MUTEX_H
-#define _ASM_MUTEX_H
-
-#ifndef CONFIG_SMP
-#include <asm-generic/mutex.h>
-#else
-
-static inline void
-__mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
-{
- if (unlikely(atomic_dec_return(count) < 0))
- fail_fn(count);
- else
- smp_mb();
-}
-
-static inline int
-__mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *))
-{
- if (unlikely(atomic_dec_return(count) < 0))
- return fail_fn(count);
- else {
- smp_mb();
- return 0;
- }
-}
-
-static inline void
-__mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *))
-{
- smp_mb();
- if (unlikely(atomic_inc_return(count) <= 0))
- fail_fn(count);
-}
-
-#define __mutex_slowpath_needs_to_unlock() 1
-
-static inline int
-__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
-{
- /*
- * We have two variants here. The cmpxchg based one is the best one
- * because it never induce a false contention state. It is included
- * here because architectures using the inc/dec algorithms over the
- * xchg ones are much more likely to support cmpxchg natively.
- *
- * If not we fall back to the spinlock based variant - that is
- * just as efficient (and simpler) as a 'destructive' probing of
- * the mutex state would be.
- */
-#ifdef __HAVE_ARCH_CMPXCHG
- if (likely(atomic_cmpxchg(count, 1, 0) == 1)) {
- smp_mb();
- return 1;
- }
- return 0;
-#else
- return fail_fn(count);
-#endif
-}
-
-#endif
-
-#endif
+#include <asm-generic/mutex-dec.h>
diff --git a/arch/blackfin/include/asm/page.h b/arch/blackfin/include/asm/page.h
index d0ce975bcd48..7202404966f6 100644
--- a/arch/blackfin/include/asm/page.h
+++ b/arch/blackfin/include/asm/page.h
@@ -1,8 +1,8 @@
/*
- * Copyright 2004-2009 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later.
- */
+ * Copyright 2004-2009 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
#ifndef _BLACKFIN_PAGE_H
#define _BLACKFIN_PAGE_H
diff --git a/arch/blackfin/include/asm/param.h b/arch/blackfin/include/asm/param.h
deleted file mode 100644
index 965d45427975..000000000000
--- a/arch/blackfin/include/asm/param.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/param.h>
diff --git a/arch/blackfin/include/asm/pda.h b/arch/blackfin/include/asm/pda.h
index d49bb261d9b7..28c2498c9c98 100644
--- a/arch/blackfin/include/asm/pda.h
+++ b/arch/blackfin/include/asm/pda.h
@@ -54,6 +54,16 @@ struct blackfin_pda { /* Per-processor Data Area */
#endif
};
+struct blackfin_initial_pda {
+ void *retx;
+#ifdef CONFIG_DEBUG_DOUBLEFAULT
+ void *dcplb_doublefault_addr;
+ void *icplb_doublefault_addr;
+ void *retx_doublefault;
+ unsigned seqstat_doublefault;
+#endif
+};
+
extern struct blackfin_pda cpu_pda[];
#endif /* __ASSEMBLY__ */
diff --git a/arch/blackfin/include/asm/percpu.h b/arch/blackfin/include/asm/percpu.h
deleted file mode 100644
index 06a959d67234..000000000000
--- a/arch/blackfin/include/asm/percpu.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/percpu.h>
diff --git a/arch/blackfin/include/asm/pgalloc.h b/arch/blackfin/include/asm/pgalloc.h
deleted file mode 100644
index f261cb7dda06..000000000000
--- a/arch/blackfin/include/asm/pgalloc.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/pgalloc.h>
diff --git a/arch/blackfin/include/asm/resource.h b/arch/blackfin/include/asm/resource.h
deleted file mode 100644
index 04bc4db8921b..000000000000
--- a/arch/blackfin/include/asm/resource.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/resource.h>
diff --git a/arch/blackfin/include/asm/scatterlist.h b/arch/blackfin/include/asm/scatterlist.h
deleted file mode 100644
index d177a1588958..000000000000
--- a/arch/blackfin/include/asm/scatterlist.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _BLACKFIN_SCATTERLIST_H
-#define _BLACKFIN_SCATTERLIST_H
-
-#include <asm-generic/scatterlist.h>
-
-#endif /* !(_BLACKFIN_SCATTERLIST_H) */
diff --git a/arch/blackfin/include/asm/sections.h b/arch/blackfin/include/asm/sections.h
index 14a3e66d9167..fbd408475725 100644
--- a/arch/blackfin/include/asm/sections.h
+++ b/arch/blackfin/include/asm/sections.h
@@ -1,8 +1,8 @@
/*
- * Copyright 2004-2009 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later.
- */
+ * Copyright 2004-2009 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
#ifndef _BLACKFIN_SECTIONS_H
#define _BLACKFIN_SECTIONS_H
diff --git a/arch/blackfin/include/asm/sembuf.h b/arch/blackfin/include/asm/sembuf.h
deleted file mode 100644
index 7673b83cfef7..000000000000
--- a/arch/blackfin/include/asm/sembuf.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/sembuf.h>
diff --git a/arch/blackfin/include/asm/serial.h b/arch/blackfin/include/asm/serial.h
deleted file mode 100644
index a0cb0caff152..000000000000
--- a/arch/blackfin/include/asm/serial.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/serial.h>
diff --git a/arch/blackfin/include/asm/setup.h b/arch/blackfin/include/asm/setup.h
deleted file mode 100644
index 552df83f1a49..000000000000
--- a/arch/blackfin/include/asm/setup.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/setup.h>
diff --git a/arch/blackfin/include/asm/shmbuf.h b/arch/blackfin/include/asm/shmbuf.h
deleted file mode 100644
index 83c05fc2de38..000000000000
--- a/arch/blackfin/include/asm/shmbuf.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/shmbuf.h>
diff --git a/arch/blackfin/include/asm/shmparam.h b/arch/blackfin/include/asm/shmparam.h
deleted file mode 100644
index 93f30deb95d0..000000000000
--- a/arch/blackfin/include/asm/shmparam.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/shmparam.h>
diff --git a/arch/blackfin/include/asm/sigcontext.h b/arch/blackfin/include/asm/sigcontext.h
index ce4081a4d815..906bdc1f5fda 100644
--- a/arch/blackfin/include/asm/sigcontext.h
+++ b/arch/blackfin/include/asm/sigcontext.h
@@ -1,8 +1,8 @@
/*
- * Copyright 2004-2008 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later.
- */
+ * Copyright 2004-2008 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
#ifndef _ASM_BLACKFIN_SIGCONTEXT_H
#define _ASM_BLACKFIN_SIGCONTEXT_H
diff --git a/arch/blackfin/include/asm/socket.h b/arch/blackfin/include/asm/socket.h
deleted file mode 100644
index 6b71384b9d8b..000000000000
--- a/arch/blackfin/include/asm/socket.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/socket.h>
diff --git a/arch/blackfin/include/asm/sockios.h b/arch/blackfin/include/asm/sockios.h
deleted file mode 100644
index def6d4746ee7..000000000000
--- a/arch/blackfin/include/asm/sockios.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/sockios.h>
diff --git a/arch/blackfin/include/asm/spinlock.h b/arch/blackfin/include/asm/spinlock.h
index 1f286e71c21f..2336093fca23 100644
--- a/arch/blackfin/include/asm/spinlock.h
+++ b/arch/blackfin/include/asm/spinlock.h
@@ -1,8 +1,8 @@
/*
- * Copyright 2004-2009 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later.
- */
+ * Copyright 2004-2009 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
#ifndef __BFIN_SPINLOCK_H
#define __BFIN_SPINLOCK_H
diff --git a/arch/blackfin/include/asm/statfs.h b/arch/blackfin/include/asm/statfs.h
deleted file mode 100644
index 0b91fe198c20..000000000000
--- a/arch/blackfin/include/asm/statfs.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/statfs.h>
diff --git a/arch/blackfin/include/asm/termbits.h b/arch/blackfin/include/asm/termbits.h
deleted file mode 100644
index 3935b106de79..000000000000
--- a/arch/blackfin/include/asm/termbits.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/termbits.h>
diff --git a/arch/blackfin/include/asm/termios.h b/arch/blackfin/include/asm/termios.h
deleted file mode 100644
index 280d78a9d966..000000000000
--- a/arch/blackfin/include/asm/termios.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/termios.h>
diff --git a/arch/blackfin/include/asm/topology.h b/arch/blackfin/include/asm/topology.h
deleted file mode 100644
index 5428f333a02c..000000000000
--- a/arch/blackfin/include/asm/topology.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/topology.h>
diff --git a/arch/blackfin/include/asm/types.h b/arch/blackfin/include/asm/types.h
deleted file mode 100644
index b9e79bc580dd..000000000000
--- a/arch/blackfin/include/asm/types.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/types.h>
diff --git a/arch/blackfin/include/asm/ucontext.h b/arch/blackfin/include/asm/ucontext.h
deleted file mode 100644
index 9bc07b9f30fb..000000000000
--- a/arch/blackfin/include/asm/ucontext.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/ucontext.h>
diff --git a/arch/blackfin/include/asm/unaligned.h b/arch/blackfin/include/asm/unaligned.h
deleted file mode 100644
index 6cecbbb2111f..000000000000
--- a/arch/blackfin/include/asm/unaligned.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/unaligned.h>
diff --git a/arch/blackfin/include/asm/user.h b/arch/blackfin/include/asm/user.h
deleted file mode 100644
index 4792a60831e4..000000000000
--- a/arch/blackfin/include/asm/user.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/user.h>
diff --git a/arch/blackfin/include/asm/xor.h b/arch/blackfin/include/asm/xor.h
deleted file mode 100644
index c82eb12a5b18..000000000000
--- a/arch/blackfin/include/asm/xor.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/xor.h>
diff --git a/arch/blackfin/kernel/Makefile b/arch/blackfin/kernel/Makefile
index d550b24d9e9b..b7bdc42fe1a3 100644
--- a/arch/blackfin/kernel/Makefile
+++ b/arch/blackfin/kernel/Makefile
@@ -21,6 +21,7 @@ obj-$(CONFIG_FUNCTION_TRACER) += ftrace-entry.o
obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
CFLAGS_REMOVE_ftrace.o = -pg
+obj-$(CONFIG_HAVE_PWM) += pwm.o
obj-$(CONFIG_IPIPE) += ipipe.o
obj-$(CONFIG_BFIN_GPTIMERS) += gptimers.o
obj-$(CONFIG_CPLB_INFO) += cplbinfo.o
diff --git a/arch/blackfin/kernel/asm-offsets.c b/arch/blackfin/kernel/asm-offsets.c
index bd32c09b9349..17e35465a416 100644
--- a/arch/blackfin/kernel/asm-offsets.c
+++ b/arch/blackfin/kernel/asm-offsets.c
@@ -138,6 +138,16 @@ int main(void)
DEFINE(PDA_DF_SEQSTAT, offsetof(struct blackfin_pda, seqstat_doublefault));
DEFINE(PDA_DF_RETX, offsetof(struct blackfin_pda, retx_doublefault));
#endif
+
+ /* PDA initial management */
+ DEFINE(PDA_INIT_RETX, offsetof(struct blackfin_initial_pda, retx));
+#ifdef CONFIG_DEBUG_DOUBLEFAULT
+ DEFINE(PDA_INIT_DF_DCPLB, offsetof(struct blackfin_initial_pda, dcplb_doublefault_addr));
+ DEFINE(PDA_INIT_DF_ICPLB, offsetof(struct blackfin_initial_pda, icplb_doublefault_addr));
+ DEFINE(PDA_INIT_DF_SEQSTAT, offsetof(struct blackfin_initial_pda, seqstat_doublefault));
+ DEFINE(PDA_INIT_DF_RETX, offsetof(struct blackfin_initial_pda, retx_doublefault));
+#endif
+
#ifdef CONFIG_SMP
/* Inter-core lock (in L2 SRAM) */
DEFINE(SIZEOF_CORELOCK, sizeof(struct corelock_slot));
diff --git a/arch/blackfin/kernel/bfin_gpio.c b/arch/blackfin/kernel/bfin_gpio.c
index bcf8cf6fe412..02796b88443d 100644
--- a/arch/blackfin/kernel/bfin_gpio.c
+++ b/arch/blackfin/kernel/bfin_gpio.c
@@ -118,6 +118,9 @@ static struct str_ident {
#if defined(CONFIG_PM)
static struct gpio_port_s gpio_bank_saved[GPIO_BANK_NUM];
+# ifdef BF538_FAMILY
+static unsigned short port_fer_saved[3];
+# endif
#endif
static void gpio_error(unsigned gpio)
@@ -604,6 +607,11 @@ void bfin_gpio_pm_hibernate_suspend(void)
{
int i, bank;
+#ifdef BF538_FAMILY
+ for (i = 0; i < ARRAY_SIZE(port_fer_saved); ++i)
+ port_fer_saved[i] = *port_fer[i];
+#endif
+
for (i = 0; i < MAX_BLACKFIN_GPIOS; i += GPIO_BANKSIZE) {
bank = gpio_bank(i);
@@ -625,6 +633,10 @@ void bfin_gpio_pm_hibernate_suspend(void)
gpio_bank_saved[bank].maska = gpio_array[bank]->maska;
}
+#ifdef BFIN_SPECIAL_GPIO_BANKS
+ bfin_special_gpio_pm_hibernate_suspend();
+#endif
+
AWA_DUMMY_READ(maska);
}
@@ -632,6 +644,11 @@ void bfin_gpio_pm_hibernate_restore(void)
{
int i, bank;
+#ifdef BF538_FAMILY
+ for (i = 0; i < ARRAY_SIZE(port_fer_saved); ++i)
+ *port_fer[i] = port_fer_saved[i];
+#endif
+
for (i = 0; i < MAX_BLACKFIN_GPIOS; i += GPIO_BANKSIZE) {
bank = gpio_bank(i);
@@ -653,6 +670,11 @@ void bfin_gpio_pm_hibernate_restore(void)
gpio_array[bank]->both = gpio_bank_saved[bank].both;
gpio_array[bank]->maska = gpio_bank_saved[bank].maska;
}
+
+#ifdef BFIN_SPECIAL_GPIO_BANKS
+ bfin_special_gpio_pm_hibernate_restore();
+#endif
+
AWA_DUMMY_READ(maska);
}
@@ -691,9 +713,9 @@ void bfin_gpio_pm_hibernate_restore(void)
gpio_array[bank]->port_mux = gpio_bank_saved[bank].mux;
gpio_array[bank]->port_fer = gpio_bank_saved[bank].fer;
gpio_array[bank]->inen = gpio_bank_saved[bank].inen;
- gpio_array[bank]->dir_set = gpio_bank_saved[bank].dir;
gpio_array[bank]->data_set = gpio_bank_saved[bank].data
- | gpio_bank_saved[bank].dir;
+ & gpio_bank_saved[bank].dir;
+ gpio_array[bank]->dir_set = gpio_bank_saved[bank].dir;
}
}
#endif
diff --git a/arch/blackfin/kernel/debug-mmrs.c b/arch/blackfin/kernel/debug-mmrs.c
index fce4807ceef9..92f664826281 100644
--- a/arch/blackfin/kernel/debug-mmrs.c
+++ b/arch/blackfin/kernel/debug-mmrs.c
@@ -27,7 +27,7 @@
#define PORT_MUX BFIN_PORT_MUX
#endif
-#define _d(name, bits, addr, perms) debugfs_create_x##bits(name, perms, parent, (u##bits *)addr)
+#define _d(name, bits, addr, perms) debugfs_create_x##bits(name, perms, parent, (u##bits *)(addr))
#define d(name, bits, addr) _d(name, bits, addr, S_IRUSR|S_IWUSR)
#define d_RO(name, bits, addr) _d(name, bits, addr, S_IRUSR)
#define d_WO(name, bits, addr) _d(name, bits, addr, S_IWUSR)
@@ -223,7 +223,8 @@ bfin_debug_mmrs_dma(struct dentry *parent, unsigned long base, int num, char mdm
__DMA(CURR_DESC_PTR, curr_desc_ptr);
__DMA(CURR_ADDR, curr_addr);
__DMA(IRQ_STATUS, irq_status);
- __DMA(PERIPHERAL_MAP, peripheral_map);
+ if (strcmp(pfx, "IMDMA") != 0)
+ __DMA(PERIPHERAL_MAP, peripheral_map);
__DMA(CURR_X_COUNT, curr_x_count);
__DMA(CURR_Y_COUNT, curr_y_count);
}
@@ -277,6 +278,32 @@ bfin_debug_mmrs_gptimer(struct dentry *parent, unsigned long base, int num)
}
#define GPTIMER(num) bfin_debug_mmrs_gptimer(parent, TIMER##num##_CONFIG, num)
+#define GPTIMER_GROUP_OFF(mmr) REGS_OFF(gptimer_group, mmr)
+#define __GPTIMER_GROUP(uname, lname) __REGS(gptimer_group, #uname, lname)
+static void __init __maybe_unused
+bfin_debug_mmrs_gptimer_group(struct dentry *parent, unsigned long base, int num)
+{
+ char buf[32], *_buf;
+
+ if (num == -1) {
+ _buf = buf + sprintf(buf, "TIMER_");
+ __GPTIMER_GROUP(ENABLE, enable);
+ __GPTIMER_GROUP(DISABLE, disable);
+ __GPTIMER_GROUP(STATUS, status);
+ } else {
+ /* These MMRs are a bit odd as the group # is a suffix */
+ _buf = buf + sprintf(buf, "TIMER_ENABLE%i", num);
+ d(buf, 16, base + GPTIMER_GROUP_OFF(enable));
+
+ _buf = buf + sprintf(buf, "TIMER_DISABLE%i", num);
+ d(buf, 16, base + GPTIMER_GROUP_OFF(disable));
+
+ _buf = buf + sprintf(buf, "TIMER_STATUS%i", num);
+ d(buf, 32, base + GPTIMER_GROUP_OFF(status));
+ }
+}
+#define GPTIMER_GROUP(mmr, num) bfin_debug_mmrs_gptimer_group(parent, mmr, num)
+
/*
* Handshake MDMA
*/
@@ -296,6 +323,29 @@ bfin_debug_mmrs_hmdma(struct dentry *parent, unsigned long base, int num)
#define HMDMA(num) bfin_debug_mmrs_hmdma(parent, HMDMA##num##_CONTROL, num)
/*
+ * Peripheral Interrupts (PINT/GPIO)
+ */
+#ifdef PINT0_MASK_SET
+#define __PINT(uname, lname) __REGS(pint, #uname, lname)
+static void __init __maybe_unused
+bfin_debug_mmrs_pint(struct dentry *parent, unsigned long base, int num)
+{
+ char buf[32], *_buf = REGS_STR_PFX(buf, PINT, num);
+ __PINT(MASK_SET, mask_set);
+ __PINT(MASK_CLEAR, mask_clear);
+ __PINT(REQUEST, request);
+ __PINT(ASSIGN, assign);
+ __PINT(EDGE_SET, edge_set);
+ __PINT(EDGE_CLEAR, edge_clear);
+ __PINT(INVERT_SET, invert_set);
+ __PINT(INVERT_CLEAR, invert_clear);
+ __PINT(PINSTATE, pinstate);
+ __PINT(LATCH, latch);
+}
+#define PINT(num) bfin_debug_mmrs_pint(parent, PINT##num##_MASK_SET, num)
+#endif
+
+/*
* Port/GPIO
*/
#define bfin_gpio_regs gpio_port_t
@@ -747,7 +797,7 @@ static int __init bfin_debug_mmrs_init(void)
#endif
parent = debugfs_create_dir("dmac", top);
-#ifdef DMA_TC_CNT
+#ifdef DMAC_TC_CNT
D16(DMAC_TC_CNT);
D16(DMAC_TC_PER);
#endif
@@ -1005,29 +1055,19 @@ static int __init bfin_debug_mmrs_init(void)
#endif
parent = debugfs_create_dir("gptimer", top);
-#ifdef TIMER_DISABLE
- D16(TIMER_DISABLE);
- D16(TIMER_ENABLE);
- D32(TIMER_STATUS);
+#ifdef TIMER_ENABLE
+ GPTIMER_GROUP(TIMER_ENABLE, -1);
#endif
-#ifdef TIMER_DISABLE0
- D16(TIMER_DISABLE0);
- D16(TIMER_ENABLE0);
- D32(TIMER_STATUS0);
+#ifdef TIMER_ENABLE0
+ GPTIMER_GROUP(TIMER_ENABLE0, 0);
#endif
-#ifdef TIMER_DISABLE1
- D16(TIMER_DISABLE1);
- D16(TIMER_ENABLE1);
- D32(TIMER_STATUS1);
+#ifdef TIMER_ENABLE1
+ GPTIMER_GROUP(TIMER_ENABLE1, 1);
#endif
/* XXX: Should convert BF561 MMR names */
#ifdef TMRS4_DISABLE
- D16(TMRS4_DISABLE);
- D16(TMRS4_ENABLE);
- D32(TMRS4_STATUS);
- D16(TMRS8_DISABLE);
- D16(TMRS8_ENABLE);
- D32(TMRS8_STATUS);
+ GPTIMER_GROUP(TMRS4_ENABLE, 0);
+ GPTIMER_GROUP(TMRS8_ENABLE, 1);
#endif
GPTIMER(0);
GPTIMER(1);
@@ -1253,6 +1293,14 @@ static int __init bfin_debug_mmrs_init(void)
D32(OTP_DATA3);
#endif
+#ifdef PINT0_MASK_SET
+ parent = debugfs_create_dir("pint", top);
+ PINT(0);
+ PINT(1);
+ PINT(2);
+ PINT(3);
+#endif
+
#ifdef PIXC_CTL
parent = debugfs_create_dir("pixc", top);
D16(PIXC_CTL);
@@ -1816,7 +1864,6 @@ static int __init bfin_debug_mmrs_init(void)
{
int num;
unsigned long base;
- char *_buf, buf[32];
base = PORTA_FER;
for (num = 0; num < 10; ++num) {
@@ -1824,24 +1871,6 @@ static int __init bfin_debug_mmrs_init(void)
base += sizeof(struct bfin_gpio_regs);
}
-#define __PINT(uname, lname) __REGS(pint, #uname, lname)
- parent = debugfs_create_dir("pint", top);
- base = PINT0_MASK_SET;
- for (num = 0; num < 4; ++num) {
- _buf = REGS_STR_PFX(buf, PINT, num);
- __PINT(MASK_SET, mask_set);
- __PINT(MASK_CLEAR, mask_clear);
- __PINT(IRQ, irq);
- __PINT(ASSIGN, assign);
- __PINT(EDGE_SET, edge_set);
- __PINT(EDGE_CLEAR, edge_clear);
- __PINT(INVERT_SET, invert_set);
- __PINT(INVERT_CLEAR, invert_clear);
- __PINT(PINSTATE, pinstate);
- __PINT(LATCH, latch);
- base += sizeof(struct bfin_pint_regs);
- }
-
}
#endif /* BF54x */
diff --git a/arch/blackfin/kernel/gptimers.c b/arch/blackfin/kernel/gptimers.c
index 8b81dc04488a..06459f4bf43a 100644
--- a/arch/blackfin/kernel/gptimers.c
+++ b/arch/blackfin/kernel/gptimers.c
@@ -25,49 +25,33 @@
#define BFIN_TIMER_NUM_GROUP (BFIN_TIMER_OCTET(MAX_BLACKFIN_GPTIMERS - 1) + 1)
-typedef struct {
- uint16_t config;
- uint16_t __pad;
- uint32_t counter;
- uint32_t period;
- uint32_t width;
-} GPTIMER_timer_regs;
-
-typedef struct {
- uint16_t enable;
- uint16_t __pad0;
- uint16_t disable;
- uint16_t __pad1;
- uint32_t status;
-} GPTIMER_group_regs;
-
-static volatile GPTIMER_timer_regs *const timer_regs[MAX_BLACKFIN_GPTIMERS] =
+static struct bfin_gptimer_regs * const timer_regs[MAX_BLACKFIN_GPTIMERS] =
{
- (GPTIMER_timer_regs *)TIMER0_CONFIG,
- (GPTIMER_timer_regs *)TIMER1_CONFIG,
- (GPTIMER_timer_regs *)TIMER2_CONFIG,
+ (void *)TIMER0_CONFIG,
+ (void *)TIMER1_CONFIG,
+ (void *)TIMER2_CONFIG,
#if (MAX_BLACKFIN_GPTIMERS > 3)
- (GPTIMER_timer_regs *)TIMER3_CONFIG,
- (GPTIMER_timer_regs *)TIMER4_CONFIG,
- (GPTIMER_timer_regs *)TIMER5_CONFIG,
- (GPTIMER_timer_regs *)TIMER6_CONFIG,
- (GPTIMER_timer_regs *)TIMER7_CONFIG,
+ (void *)TIMER3_CONFIG,
+ (void *)TIMER4_CONFIG,
+ (void *)TIMER5_CONFIG,
+ (void *)TIMER6_CONFIG,
+ (void *)TIMER7_CONFIG,
# if (MAX_BLACKFIN_GPTIMERS > 8)
- (GPTIMER_timer_regs *)TIMER8_CONFIG,
- (GPTIMER_timer_regs *)TIMER9_CONFIG,
- (GPTIMER_timer_regs *)TIMER10_CONFIG,
+ (void *)TIMER8_CONFIG,
+ (void *)TIMER9_CONFIG,
+ (void *)TIMER10_CONFIG,
# if (MAX_BLACKFIN_GPTIMERS > 11)
- (GPTIMER_timer_regs *)TIMER11_CONFIG,
+ (void *)TIMER11_CONFIG,
# endif
# endif
#endif
};
-static volatile GPTIMER_group_regs *const group_regs[BFIN_TIMER_NUM_GROUP] =
+static struct bfin_gptimer_group_regs * const group_regs[BFIN_TIMER_NUM_GROUP] =
{
- (GPTIMER_group_regs *)TIMER0_GROUP_REG,
+ (void *)TIMER0_GROUP_REG,
#if (MAX_BLACKFIN_GPTIMERS > 8)
- (GPTIMER_group_regs *)TIMER8_GROUP_REG,
+ (void *)TIMER8_GROUP_REG,
#endif
};
@@ -140,7 +124,7 @@ static uint32_t const timil_mask[MAX_BLACKFIN_GPTIMERS] =
void set_gptimer_pwidth(unsigned int timer_id, uint32_t value)
{
tassert(timer_id < MAX_BLACKFIN_GPTIMERS);
- timer_regs[timer_id]->width = value;
+ bfin_write(&timer_regs[timer_id]->width, value);
SSYNC();
}
EXPORT_SYMBOL(set_gptimer_pwidth);
@@ -148,14 +132,14 @@ EXPORT_SYMBOL(set_gptimer_pwidth);
uint32_t get_gptimer_pwidth(unsigned int timer_id)
{
tassert(timer_id < MAX_BLACKFIN_GPTIMERS);
- return timer_regs[timer_id]->width;
+ return bfin_read(&timer_regs[timer_id]->width);
}
EXPORT_SYMBOL(get_gptimer_pwidth);
void set_gptimer_period(unsigned int timer_id, uint32_t period)
{
tassert(timer_id < MAX_BLACKFIN_GPTIMERS);
- timer_regs[timer_id]->period = period;
+ bfin_write(&timer_regs[timer_id]->period, period);
SSYNC();
}
EXPORT_SYMBOL(set_gptimer_period);
@@ -163,71 +147,76 @@ EXPORT_SYMBOL(set_gptimer_period);
uint32_t get_gptimer_period(unsigned int timer_id)
{
tassert(timer_id < MAX_BLACKFIN_GPTIMERS);
- return timer_regs[timer_id]->period;
+ return bfin_read(&timer_regs[timer_id]->period);
}
EXPORT_SYMBOL(get_gptimer_period);
uint32_t get_gptimer_count(unsigned int timer_id)
{
tassert(timer_id < MAX_BLACKFIN_GPTIMERS);
- return timer_regs[timer_id]->counter;
+ return bfin_read(&timer_regs[timer_id]->counter);
}
EXPORT_SYMBOL(get_gptimer_count);
uint32_t get_gptimer_status(unsigned int group)
{
tassert(group < BFIN_TIMER_NUM_GROUP);
- return group_regs[group]->status;
+ return bfin_read(&group_regs[group]->status);
}
EXPORT_SYMBOL(get_gptimer_status);
void set_gptimer_status(unsigned int group, uint32_t value)
{
tassert(group < BFIN_TIMER_NUM_GROUP);
- group_regs[group]->status = value;
+ bfin_write(&group_regs[group]->status, value);
SSYNC();
}
EXPORT_SYMBOL(set_gptimer_status);
+static uint32_t read_gptimer_status(unsigned int timer_id)
+{
+ return bfin_read(&group_regs[BFIN_TIMER_OCTET(timer_id)]->status);
+}
+
int get_gptimer_intr(unsigned int timer_id)
{
tassert(timer_id < MAX_BLACKFIN_GPTIMERS);
- return !!(group_regs[BFIN_TIMER_OCTET(timer_id)]->status & timil_mask[timer_id]);
+ return !!(read_gptimer_status(timer_id) & timil_mask[timer_id]);
}
EXPORT_SYMBOL(get_gptimer_intr);
void clear_gptimer_intr(unsigned int timer_id)
{
tassert(timer_id < MAX_BLACKFIN_GPTIMERS);
- group_regs[BFIN_TIMER_OCTET(timer_id)]->status = timil_mask[timer_id];
+ bfin_write(&group_regs[BFIN_TIMER_OCTET(timer_id)]->status, timil_mask[timer_id]);
}
EXPORT_SYMBOL(clear_gptimer_intr);
int get_gptimer_over(unsigned int timer_id)
{
tassert(timer_id < MAX_BLACKFIN_GPTIMERS);
- return !!(group_regs[BFIN_TIMER_OCTET(timer_id)]->status & tovf_mask[timer_id]);
+ return !!(read_gptimer_status(timer_id) & tovf_mask[timer_id]);
}
EXPORT_SYMBOL(get_gptimer_over);
void clear_gptimer_over(unsigned int timer_id)
{
tassert(timer_id < MAX_BLACKFIN_GPTIMERS);
- group_regs[BFIN_TIMER_OCTET(timer_id)]->status = tovf_mask[timer_id];
+ bfin_write(&group_regs[BFIN_TIMER_OCTET(timer_id)]->status, tovf_mask[timer_id]);
}
EXPORT_SYMBOL(clear_gptimer_over);
int get_gptimer_run(unsigned int timer_id)
{
tassert(timer_id < MAX_BLACKFIN_GPTIMERS);
- return !!(group_regs[BFIN_TIMER_OCTET(timer_id)]->status & trun_mask[timer_id]);
+ return !!(read_gptimer_status(timer_id) & trun_mask[timer_id]);
}
EXPORT_SYMBOL(get_gptimer_run);
void set_gptimer_config(unsigned int timer_id, uint16_t config)
{
tassert(timer_id < MAX_BLACKFIN_GPTIMERS);
- timer_regs[timer_id]->config = config;
+ bfin_write(&timer_regs[timer_id]->config, config);
SSYNC();
}
EXPORT_SYMBOL(set_gptimer_config);
@@ -235,7 +224,7 @@ EXPORT_SYMBOL(set_gptimer_config);
uint16_t get_gptimer_config(unsigned int timer_id)
{
tassert(timer_id < MAX_BLACKFIN_GPTIMERS);
- return timer_regs[timer_id]->config;
+ return bfin_read(&timer_regs[timer_id]->config);
}
EXPORT_SYMBOL(get_gptimer_config);
@@ -244,7 +233,7 @@ void enable_gptimers(uint16_t mask)
int i;
tassert((mask & ~BLACKFIN_GPTIMER_IDMASK) == 0);
for (i = 0; i < BFIN_TIMER_NUM_GROUP; ++i) {
- group_regs[i]->enable = mask & 0xFF;
+ bfin_write(&group_regs[i]->enable, mask & 0xFF);
mask >>= 8;
}
SSYNC();
@@ -257,7 +246,7 @@ static void _disable_gptimers(uint16_t mask)
uint16_t m = mask;
tassert((mask & ~BLACKFIN_GPTIMER_IDMASK) == 0);
for (i = 0; i < BFIN_TIMER_NUM_GROUP; ++i) {
- group_regs[i]->disable = m & 0xFF;
+ bfin_write(&group_regs[i]->disable, m & 0xFF);
m >>= 8;
}
}
@@ -268,7 +257,7 @@ void disable_gptimers(uint16_t mask)
_disable_gptimers(mask);
for (i = 0; i < MAX_BLACKFIN_GPTIMERS; ++i)
if (mask & (1 << i))
- group_regs[BFIN_TIMER_OCTET(i)]->status = trun_mask[i];
+ bfin_write(&group_regs[BFIN_TIMER_OCTET(i)]->status, trun_mask[i]);
SSYNC();
}
EXPORT_SYMBOL(disable_gptimers);
@@ -283,7 +272,7 @@ EXPORT_SYMBOL(disable_gptimers_sync);
void set_gptimer_pulse_hi(unsigned int timer_id)
{
tassert(timer_id < MAX_BLACKFIN_GPTIMERS);
- timer_regs[timer_id]->config |= TIMER_PULSE_HI;
+ bfin_write_or(&timer_regs[timer_id]->config, TIMER_PULSE_HI);
SSYNC();
}
EXPORT_SYMBOL(set_gptimer_pulse_hi);
@@ -291,7 +280,7 @@ EXPORT_SYMBOL(set_gptimer_pulse_hi);
void clear_gptimer_pulse_hi(unsigned int timer_id)
{
tassert(timer_id < MAX_BLACKFIN_GPTIMERS);
- timer_regs[timer_id]->config &= ~TIMER_PULSE_HI;
+ bfin_write_and(&timer_regs[timer_id]->config, ~TIMER_PULSE_HI);
SSYNC();
}
EXPORT_SYMBOL(clear_gptimer_pulse_hi);
@@ -301,7 +290,7 @@ uint16_t get_enabled_gptimers(void)
int i;
uint16_t result = 0;
for (i = 0; i < BFIN_TIMER_NUM_GROUP; ++i)
- result |= (group_regs[i]->enable << (i << 3));
+ result |= (bfin_read(&group_regs[i]->enable) << (i << 3));
return result;
}
EXPORT_SYMBOL(get_enabled_gptimers);
diff --git a/arch/blackfin/kernel/module.c b/arch/blackfin/kernel/module.c
index 35e350cad9d9..4489efc52883 100644
--- a/arch/blackfin/kernel/module.c
+++ b/arch/blackfin/kernel/module.c
@@ -16,19 +16,6 @@
#include <asm/cacheflush.h>
#include <asm/uaccess.h>
-void *module_alloc(unsigned long size)
-{
- if (size == 0)
- return NULL;
- return vmalloc(size);
-}
-
-/* Free memory returned from module_alloc */
-void module_free(struct module *mod, void *module_region)
-{
- vfree(module_region);
-}
-
/* Transfer the section to the L1 memory */
int
module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
@@ -150,14 +137,6 @@ module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
return 0;
}
-int
-apply_relocate(Elf_Shdr * sechdrs, const char *strtab,
- unsigned int symindex, unsigned int relsec, struct module *mod)
-{
- pr_err(".rel unsupported\n");
- return -ENOEXEC;
-}
-
/*************************************************************************/
/* FUNCTION : apply_relocate_add */
/* ABSTRACT : Blackfin specific relocation handling for the loadable */
diff --git a/arch/blackfin/kernel/process.c b/arch/blackfin/kernel/process.c
index 6a660fa921b5..6a80a9e9fc4a 100644
--- a/arch/blackfin/kernel/process.c
+++ b/arch/blackfin/kernel/process.c
@@ -140,7 +140,6 @@ EXPORT_SYMBOL(kernel_thread);
*/
void start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
{
- set_fs(USER_DS);
regs->pc = new_ip;
if (current->mm)
regs->p5 = current->mm->start_data;
diff --git a/arch/blackfin/kernel/pwm.c b/arch/blackfin/kernel/pwm.c
new file mode 100644
index 000000000000..33f5942733bd
--- /dev/null
+++ b/arch/blackfin/kernel/pwm.c
@@ -0,0 +1,100 @@
+/*
+ * Blackfin Pulse Width Modulation (PWM) core
+ *
+ * Copyright (c) 2011 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/module.h>
+#include <linux/pwm.h>
+#include <linux/slab.h>
+
+#include <asm/gptimers.h>
+#include <asm/portmux.h>
+
+struct pwm_device {
+ unsigned id;
+ unsigned short pin;
+};
+
+static const unsigned short pwm_to_gptimer_per[] = {
+ P_TMR0, P_TMR1, P_TMR2, P_TMR3, P_TMR4, P_TMR5,
+ P_TMR6, P_TMR7, P_TMR8, P_TMR9, P_TMR10, P_TMR11,
+};
+
+struct pwm_device *pwm_request(int pwm_id, const char *label)
+{
+ struct pwm_device *pwm;
+ int ret;
+
+ /* XXX: pwm_id really should be unsigned */
+ if (pwm_id < 0)
+ return NULL;
+
+ pwm = kzalloc(sizeof(*pwm), GFP_KERNEL);
+ if (!pwm)
+ return pwm;
+
+ pwm->id = pwm_id;
+ if (pwm->id >= ARRAY_SIZE(pwm_to_gptimer_per))
+ goto err;
+
+ pwm->pin = pwm_to_gptimer_per[pwm->id];
+ ret = peripheral_request(pwm->pin, label);
+ if (ret)
+ goto err;
+
+ return pwm;
+ err:
+ kfree(pwm);
+ return NULL;
+}
+EXPORT_SYMBOL(pwm_request);
+
+void pwm_free(struct pwm_device *pwm)
+{
+ peripheral_free(pwm->pin);
+ kfree(pwm);
+}
+EXPORT_SYMBOL(pwm_free);
+
+int pwm_config(struct pwm_device *pwm, int duty_ns, int period_ns)
+{
+ unsigned long period, duty;
+ unsigned long long val;
+
+ if (duty_ns < 0 || duty_ns > period_ns)
+ return -EINVAL;
+
+ val = (unsigned long long)get_sclk() * period_ns;
+ do_div(val, NSEC_PER_SEC);
+ period = val;
+
+ val = (unsigned long long)period * duty_ns;
+ do_div(val, period_ns);
+ duty = period - val;
+
+ if (duty >= period)
+ duty = period - 1;
+
+ set_gptimer_config(pwm->id, TIMER_MODE_PWM | TIMER_PERIOD_CNT);
+ set_gptimer_pwidth(pwm->id, duty);
+ set_gptimer_period(pwm->id, period);
+
+ return 0;
+}
+EXPORT_SYMBOL(pwm_config);
+
+int pwm_enable(struct pwm_device *pwm)
+{
+ enable_gptimer(pwm->id);
+ return 0;
+}
+EXPORT_SYMBOL(pwm_enable);
+
+void pwm_disable(struct pwm_device *pwm)
+{
+ disable_gptimer(pwm->id);
+}
+EXPORT_SYMBOL(pwm_disable);
diff --git a/arch/blackfin/kernel/reboot.c b/arch/blackfin/kernel/reboot.c
index 488bdc51aaa5..c4c0081b1996 100644
--- a/arch/blackfin/kernel/reboot.c
+++ b/arch/blackfin/kernel/reboot.c
@@ -54,7 +54,9 @@ static void bfin_reset(void)
/* The BF526 ROM will crash during reset */
#if defined(__ADSPBF522__) || defined(__ADSPBF524__) || defined(__ADSPBF526__)
- bfin_read_SWRST();
+ /* Seems to be fixed with newer parts though ... */
+ if (__SILICON_REVISION__ < 1 && bfin_revid() < 1)
+ bfin_read_SWRST();
#endif
/* Wait for the SWRST write to complete. Cannot rely on SSYNC
diff --git a/arch/blackfin/kernel/setup.c b/arch/blackfin/kernel/setup.c
index 536bd9d7e0cf..dfa2525a442d 100644
--- a/arch/blackfin/kernel/setup.c
+++ b/arch/blackfin/kernel/setup.c
@@ -54,8 +54,7 @@ EXPORT_SYMBOL(mtd_size);
#endif
char __initdata command_line[COMMAND_LINE_SIZE];
-void __initdata *init_retx, *init_saved_retx, *init_saved_seqstat,
- *init_saved_icplb_fault_addr, *init_saved_dcplb_fault_addr;
+struct blackfin_initial_pda __initdata initial_pda;
/* boot memmap, for parsing "memmap=" */
#define BFIN_MEMMAP_MAX 128 /* number of entries in bfin_memmap */
@@ -957,13 +956,16 @@ void __init setup_arch(char **cmdline_p)
printk(KERN_EMERG "Recovering from DOUBLE FAULT event\n");
#ifdef CONFIG_DEBUG_DOUBLEFAULT
/* We assume the crashing kernel, and the current symbol table match */
- printk(KERN_EMERG " While handling exception (EXCAUSE = 0x%x) at %pF\n",
- (int)init_saved_seqstat & SEQSTAT_EXCAUSE, init_saved_retx);
- printk(KERN_NOTICE " DCPLB_FAULT_ADDR: %pF\n", init_saved_dcplb_fault_addr);
- printk(KERN_NOTICE " ICPLB_FAULT_ADDR: %pF\n", init_saved_icplb_fault_addr);
+ printk(KERN_EMERG " While handling exception (EXCAUSE = %#x) at %pF\n",
+ initial_pda.seqstat_doublefault & SEQSTAT_EXCAUSE,
+ initial_pda.retx_doublefault);
+ printk(KERN_NOTICE " DCPLB_FAULT_ADDR: %pF\n",
+ initial_pda.dcplb_doublefault_addr);
+ printk(KERN_NOTICE " ICPLB_FAULT_ADDR: %pF\n",
+ initial_pda.icplb_doublefault_addr);
#endif
printk(KERN_NOTICE " The instruction at %pF caused a double exception\n",
- init_retx);
+ initial_pda.retx);
} else if (_bfin_swrst & RESET_WDOG)
printk(KERN_INFO "Recovering from Watchdog event\n");
else if (_bfin_swrst & RESET_SOFTWARE)
diff --git a/arch/blackfin/kernel/time.c b/arch/blackfin/kernel/time.c
index 8d73724c0092..ceb2bf63dfe2 100644
--- a/arch/blackfin/kernel/time.c
+++ b/arch/blackfin/kernel/time.c
@@ -51,7 +51,7 @@ void __init setup_core_timer(void)
u32 tcount;
/* power up the timer, but don't enable it just yet */
- bfin_write_TCNTL(1);
+ bfin_write_TCNTL(TMPWR);
CSYNC();
/* the TSCALE prescaler counter */
@@ -64,7 +64,7 @@ void __init setup_core_timer(void)
/* now enable the timer */
CSYNC();
- bfin_write_TCNTL(7);
+ bfin_write_TCNTL(TAUTORLD | TMREN | TMPWR);
}
#endif
diff --git a/arch/blackfin/kernel/vmlinux.lds.S b/arch/blackfin/kernel/vmlinux.lds.S
index 3ac5b66d14aa..ba35864b2b74 100644
--- a/arch/blackfin/kernel/vmlinux.lds.S
+++ b/arch/blackfin/kernel/vmlinux.lds.S
@@ -155,6 +155,7 @@ SECTIONS
SECURITY_INITCALL
INIT_RAM_FS
+ . = ALIGN(PAGE_SIZE);
___per_cpu_load = .;
PERCPU_INPUT(32)
diff --git a/arch/blackfin/mach-bf518/Kconfig b/arch/blackfin/mach-bf518/Kconfig
index 1d9f631a7f94..bde92a19970e 100644
--- a/arch/blackfin/mach-bf518/Kconfig
+++ b/arch/blackfin/mach-bf518/Kconfig
@@ -11,55 +11,75 @@ menu "BF518 Specific Configuration"
comment "Alternative Multiplexing Scheme"
choice
- prompt "SPORT0"
- default BF518_SPORT0_PORTG
+ prompt "PWM Channel Pins"
+ default BF518_PWM_ALL_PORTF
help
- Select PORT used for SPORT0. See Hardware Reference Manual
+ Select pins used for the PWM channels:
+ PWM_AH PWM_AL PWM_BH PWM_BL PWM_CH PWM_CL
-config BF518_SPORT0_PORTF
- bool "PORT F"
+ See the Hardware Reference Manual for more details.
+
+config BF518_PWM_ALL_PORTF
+ bool "PF1 - PF6"
help
- PORT F
+ PF{1,2,3,4,5,6} <-> PWM_{AH,AL,BH,BL,CH,CL}
-config BF518_SPORT0_PORTG
- bool "PORT G"
+config BF518_PWM_PORTF_PORTG
+ bool "PF11 - PF14 / PG1 - PG2"
help
- PORT G
+ PF{11,12,13,14} <-> PWM_{AH,AL,BH,BL}
+ PG{1,2} <-> PWM_{CH,CL}
+
endchoice
choice
- prompt "SPORT0 TSCLK Location"
- depends on BF518_SPORT0_PORTG
- default BF518_SPORT0_TSCLK_PG10
+ prompt "PWM Sync Pin"
+ default BF518_PWM_SYNC_PF7
help
- Select PIN used for SPORT0_TSCLK. See Hardware Reference Manual
+ Select the pin used for PWM_SYNC.
-config BF518_SPORT0_TSCLK_PG10
- bool "PORT PG10"
- help
- PORT PG10
+ See the Hardware Reference Manual for more details.
+
+config BF518_PWM_SYNC_PF7
+ bool "PF7"
+config BF518_PWM_SYNC_PF15
+ bool "PF15"
+endchoice
-config BF518_SPORT0_TSCLK_PG14
- bool "PORT PG14"
+choice
+ prompt "PWM Trip B Pin"
+ default BF518_PWM_TRIPB_PG10
help
- PORT PG14
+ Select the pin used for PWM_TRIPB.
+
+ See the Hardware Reference Manual for more details.
+
+config BF518_PWM_TRIPB_PG10
+ bool "PG10"
+config BF518_PWM_TRIPB_PG14
+ bool "PG14"
endchoice
choice
- prompt "UART1"
- default BF518_UART1_PORTF
+ prompt "PPI / Timer Pins"
+ default BF518_PPI_TMR_PG5
help
- Select PORT used for UART1. See Hardware Reference Manual
+ Select pins used for PPI/Timer:
+ PPICLK PPIFS1 PPIFS2
+ TMRCLK TMR0 TMR1
-config BF518_UART1_PORTF
- bool "PORT F"
+ See the Hardware Reference Manual for more details.
+
+config BF518_PPI_TMR_PG5
+ bool "PG5 - PG7"
help
- PORT F
+ PG{5,6,7} <-> {PPICLK/TMRCLK,TMR0/PPIFS1,TMR1/PPIFS2}
-config BF518_UART1_PORTG
- bool "PORT G"
+config BF518_PPI_TMR_PG12
+ bool "PG12 - PG14"
help
- PORT G
+ PG{12,13,14} <-> {PPICLK/TMRCLK,TMR0/PPIFS1,TMR1/PPIFS2}
+
endchoice
comment "Hysteresis/Schmitt Trigger Control"
diff --git a/arch/blackfin/mach-bf518/boards/ezbrd.c b/arch/blackfin/mach-bf518/boards/ezbrd.c
index c0ccadcfa44e..d78fc2cc7d16 100644
--- a/arch/blackfin/mach-bf518/boards/ezbrd.c
+++ b/arch/blackfin/mach-bf518/boards/ezbrd.c
@@ -187,43 +187,16 @@ static struct flash_platform_data bfin_spi_flash_data = {
/* SPI flash chip (m25p64) */
static struct bfin5xx_spi_chip spi_flash_chip_info = {
.enable_dma = 0, /* use dma transfer with this chip*/
- .bits_per_word = 8,
};
#endif
-#if defined(CONFIG_BFIN_SPI_ADC) \
- || defined(CONFIG_BFIN_SPI_ADC_MODULE)
-/* SPI ADC chip */
-static struct bfin5xx_spi_chip spi_adc_chip_info = {
- .enable_dma = 1, /* use dma transfer with this chip*/
- .bits_per_word = 16,
-};
-#endif
-
-#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
-#if defined(CONFIG_NET_DSA_KSZ8893M) \
- || defined(CONFIG_NET_DSA_KSZ8893M_MODULE)
-/* SPI SWITCH CHIP */
-static struct bfin5xx_spi_chip spi_switch_info = {
- .enable_dma = 0,
- .bits_per_word = 8,
-};
-#endif
-#endif
-
#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
static struct bfin5xx_spi_chip mmc_spi_chip_info = {
.enable_dma = 0,
- .bits_per_word = 8,
};
#endif
#if defined(CONFIG_TOUCHSCREEN_AD7877) || defined(CONFIG_TOUCHSCREEN_AD7877_MODULE)
-static struct bfin5xx_spi_chip spi_ad7877_chip_info = {
- .enable_dma = 0,
- .bits_per_word = 16,
-};
-
static const struct ad7877_platform_data bfin_ad7877_ts_info = {
.model = 7877,
.vref_delay_usecs = 50, /* internal, no capacitor */
@@ -239,21 +212,6 @@ static const struct ad7877_platform_data bfin_ad7877_ts_info = {
};
#endif
-#if defined(CONFIG_SND_SOC_WM8731) || defined(CONFIG_SND_SOC_WM8731_MODULE) \
- && defined(CONFIG_SND_SOC_WM8731_SPI)
-static struct bfin5xx_spi_chip spi_wm8731_chip_info = {
- .enable_dma = 0,
- .bits_per_word = 16,
-};
-#endif
-
-#if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE)
-static struct bfin5xx_spi_chip spidev_chip_info = {
- .enable_dma = 0,
- .bits_per_word = 8,
-};
-#endif
-
static struct spi_board_info bfin_spi_board_info[] __initdata = {
#if defined(CONFIG_MTD_M25P80) \
|| defined(CONFIG_MTD_M25P80_MODULE)
@@ -269,18 +227,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
},
#endif
-#if defined(CONFIG_BFIN_SPI_ADC) \
- || defined(CONFIG_BFIN_SPI_ADC_MODULE)
- {
- .modalias = "bfin_spi_adc", /* Name of spi_driver for this device */
- .max_speed_hz = 6250000, /* max spi clock (SCK) speed in HZ */
- .bus_num = 0, /* Framework bus number */
- .chip_select = 1, /* Framework chip select. */
- .platform_data = NULL, /* No spi_driver specific config */
- .controller_data = &spi_adc_chip_info,
- },
-#endif
-
#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
#if defined(CONFIG_NET_DSA_KSZ8893M) \
|| defined(CONFIG_NET_DSA_KSZ8893M_MODULE)
@@ -290,7 +236,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
.bus_num = 0,
.chip_select = 1,
.platform_data = NULL,
- .controller_data = &spi_switch_info,
.mode = SPI_MODE_3,
},
#endif
@@ -314,7 +259,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
.max_speed_hz = 12500000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 2,
- .controller_data = &spi_ad7877_chip_info,
},
#endif
#if defined(CONFIG_SND_SOC_WM8731) || defined(CONFIG_SND_SOC_WM8731_MODULE) \
@@ -324,7 +268,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
.max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 5,
- .controller_data = &spi_wm8731_chip_info,
.mode = SPI_MODE_0,
},
#endif
@@ -334,7 +277,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
.max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 1,
- .controller_data = &spidev_chip_info,
},
#endif
#if defined(CONFIG_FB_BFIN_LQ035Q1) || defined(CONFIG_FB_BFIN_LQ035Q1_MODULE)
@@ -343,7 +285,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
.max_speed_hz = 20000000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 1,
- .controller_data = &lq035q1_spi_chip_info,
.mode = SPI_CPHA | SPI_CPOL,
},
#endif
diff --git a/arch/blackfin/mach-bf518/boards/tcm-bf518.c b/arch/blackfin/mach-bf518/boards/tcm-bf518.c
index 50fc5c89e379..55c127908815 100644
--- a/arch/blackfin/mach-bf518/boards/tcm-bf518.c
+++ b/arch/blackfin/mach-bf518/boards/tcm-bf518.c
@@ -138,32 +138,16 @@ static struct flash_platform_data bfin_spi_flash_data = {
/* SPI flash chip (m25p64) */
static struct bfin5xx_spi_chip spi_flash_chip_info = {
.enable_dma = 0, /* use dma transfer with this chip*/
- .bits_per_word = 8,
-};
-#endif
-
-#if defined(CONFIG_BFIN_SPI_ADC) \
- || defined(CONFIG_BFIN_SPI_ADC_MODULE)
-/* SPI ADC chip */
-static struct bfin5xx_spi_chip spi_adc_chip_info = {
- .enable_dma = 1, /* use dma transfer with this chip*/
- .bits_per_word = 16,
};
#endif
#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
static struct bfin5xx_spi_chip mmc_spi_chip_info = {
.enable_dma = 0,
- .bits_per_word = 8,
};
#endif
#if defined(CONFIG_TOUCHSCREEN_AD7877) || defined(CONFIG_TOUCHSCREEN_AD7877_MODULE)
-static struct bfin5xx_spi_chip spi_ad7877_chip_info = {
- .enable_dma = 0,
- .bits_per_word = 16,
-};
-
static const struct ad7877_platform_data bfin_ad7877_ts_info = {
.model = 7877,
.vref_delay_usecs = 50, /* internal, no capacitor */
@@ -179,21 +163,6 @@ static const struct ad7877_platform_data bfin_ad7877_ts_info = {
};
#endif
-#if defined(CONFIG_SND_SOC_WM8731) || defined(CONFIG_SND_SOC_WM8731_MODULE) \
- && defined(CONFIG_SND_SOC_WM8731_SPI)
-static struct bfin5xx_spi_chip spi_wm8731_chip_info = {
- .enable_dma = 0,
- .bits_per_word = 16,
-};
-#endif
-
-#if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE)
-static struct bfin5xx_spi_chip spidev_chip_info = {
- .enable_dma = 0,
- .bits_per_word = 8,
-};
-#endif
-
static struct spi_board_info bfin_spi_board_info[] __initdata = {
#if defined(CONFIG_MTD_M25P80) \
|| defined(CONFIG_MTD_M25P80_MODULE)
@@ -209,18 +178,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
},
#endif
-#if defined(CONFIG_BFIN_SPI_ADC) \
- || defined(CONFIG_BFIN_SPI_ADC_MODULE)
- {
- .modalias = "bfin_spi_adc", /* Name of spi_driver for this device */
- .max_speed_hz = 6250000, /* max spi clock (SCK) speed in HZ */
- .bus_num = 0, /* Framework bus number */
- .chip_select = 1, /* Framework chip select. */
- .platform_data = NULL, /* No spi_driver specific config */
- .controller_data = &spi_adc_chip_info,
- },
-#endif
-
#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
{
.modalias = "mmc_spi",
@@ -239,7 +196,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
.max_speed_hz = 12500000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 2,
- .controller_data = &spi_ad7877_chip_info,
},
#endif
#if defined(CONFIG_SND_SOC_WM8731) || defined(CONFIG_SND_SOC_WM8731_MODULE) \
@@ -249,7 +205,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
.max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 5,
- .controller_data = &spi_wm8731_chip_info,
.mode = SPI_MODE_0,
},
#endif
@@ -259,7 +214,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
.max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 1,
- .controller_data = &spidev_chip_info,
},
#endif
#if defined(CONFIG_FB_BFIN_LQ035Q1) || defined(CONFIG_FB_BFIN_LQ035Q1_MODULE)
@@ -268,7 +222,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
.max_speed_hz = 20000000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 1,
- .controller_data = &lq035q1_spi_chip_info,
.mode = SPI_CPHA | SPI_CPOL,
},
#endif
diff --git a/arch/blackfin/mach-bf518/include/mach/anomaly.h b/arch/blackfin/mach-bf518/include/mach/anomaly.h
index d2f076fbbc9e..56383f7cbc07 100644
--- a/arch/blackfin/mach-bf518/include/mach/anomaly.h
+++ b/arch/blackfin/mach-bf518/include/mach/anomaly.h
@@ -11,10 +11,9 @@
*/
/* This file should be up to date with:
- * - Revision E, 01/26/2010; ADSP-BF512/BF514/BF516/BF518 Blackfin Processor Anomaly List
+ * - Revision F, 05/23/2011; ADSP-BF512/BF514/BF516/BF518 Blackfin Processor Anomaly List
*/
-/* We plan on not supporting 0.0 silicon, but 0.1 isn't out yet - sorry */
#if __SILICON_REVISION__ < 0
# error will not work on BF518 silicon version
#endif
@@ -77,19 +76,29 @@
/* False Hardware Error when RETI Points to Invalid Memory */
#define ANOMALY_05000461 (1)
/* Synchronization Problem at Startup May Cause SPORT Transmit Channels to Misalign */
-#define ANOMALY_05000462 (1)
-/* PLL Latches Incorrect Settings During Reset */
-#define ANOMALY_05000469 (1)
+#define ANOMALY_05000462 (__SILICON_REVISION__ < 2)
/* Incorrect Default MSEL Value in PLL_CTL */
-#define ANOMALY_05000472 (1)
+#define ANOMALY_05000472 (__SILICON_REVISION__ < 2)
/* Interrupted SPORT Receive Data Register Read Results In Underflow when SLEN > 15 */
#define ANOMALY_05000473 (1)
/* TESTSET Instruction Cannot Be Interrupted */
#define ANOMALY_05000477 (1)
/* Reads of ITEST_COMMAND and ITEST_DATA Registers Cause Cache Corruption */
#define ANOMALY_05000481 (1)
-/* IFLUSH sucks at life */
+/* PLL Latches Incorrect Settings During Reset */
+#define ANOMALY_05000482 (__SILICON_REVISION__ < 2)
+/* PLL_CTL Change Using bfrom_SysControl() Can Result in Processor Overclocking */
+#define ANOMALY_05000485 (__SILICON_REVISION__ < 2)
+/* SPI Master Boot Can Fail Under Certain Conditions */
+#define ANOMALY_05000490 (1)
+/* Instruction Memory Stalls Can Cause IFLUSH to Fail */
#define ANOMALY_05000491 (1)
+/* EXCPT Instruction May Be Lost If NMI Happens Simultaneously */
+#define ANOMALY_05000494 (1)
+/* CNT_COMMAND Functionality Depends on CNT_IMASK Configuration */
+#define ANOMALY_05000498 (1)
+/* RXS Bit in SPI_STAT May Become Stuck In RX DMA Modes */
+#define ANOMALY_05000501 (1)
/* Anomalies that don't exist on this proc */
#define ANOMALY_05000099 (0)
@@ -157,6 +166,5 @@
#define ANOMALY_05000474 (0)
#define ANOMALY_05000475 (0)
#define ANOMALY_05000480 (0)
-#define ANOMALY_05000485 (0)
#endif
diff --git a/arch/blackfin/mach-bf518/include/mach/portmux.h b/arch/blackfin/mach-bf518/include/mach/portmux.h
index cd84a569b04e..b3b806f468da 100644
--- a/arch/blackfin/mach-bf518/include/mach/portmux.h
+++ b/arch/blackfin/mach-bf518/include/mach/portmux.h
@@ -81,9 +81,15 @@
#define P_PPI0_D14 (P_DEFINED | P_IDENT(GPIO_PF14) | P_FUNCT(1))
#define P_PPI0_D15 (P_DEFINED | P_IDENT(GPIO_PF15) | P_FUNCT(1))
+#ifndef CONFIG_BF518_PPI_TMR_PG12
+#define P_PPI0_CLK (P_DEFINED | P_IDENT(GPIO_PG5) | P_FUNCT(1))
+#define P_PPI0_FS1 (P_DEFINED | P_IDENT(GPIO_PG6) | P_FUNCT(1))
+#define P_PPI0_FS2 (P_DEFINED | P_IDENT(GPIO_PG7) | P_FUNCT(1))
+#else
#define P_PPI0_CLK (P_DEFINED | P_IDENT(GPIO_PG12) | P_FUNCT(1))
#define P_PPI0_FS1 (P_DEFINED | P_IDENT(GPIO_PG13) | P_FUNCT(1))
#define P_PPI0_FS2 (P_DEFINED | P_IDENT(GPIO_PG14) | P_FUNCT(1))
+#endif
#define P_PPI0_FS3 (P_DEFINED | P_IDENT(GPIO_PG15) | P_FUNCT(1))
/* SPI Port Mux */
@@ -139,9 +145,15 @@
#define P_UART1_RX (P_DEFINED | P_IDENT(GPIO_PH7) | P_FUNCT(1))
/* Timer */
+#ifndef CONFIG_BF518_PPI_TMR_PG12
#define P_TMRCLK (P_DEFINED | P_IDENT(GPIO_PG5) | P_FUNCT(2))
#define P_TMR0 (P_DEFINED | P_IDENT(GPIO_PG6) | P_FUNCT(2))
#define P_TMR1 (P_DEFINED | P_IDENT(GPIO_PG7) | P_FUNCT(2))
+#else
+#define P_TMRCLK (P_DEFINED | P_IDENT(GPIO_PG12) | P_FUNCT(2))
+#define P_TMR0 (P_DEFINED | P_IDENT(GPIO_PG13) | P_FUNCT(2))
+#define P_TMR1 (P_DEFINED | P_IDENT(GPIO_PG14) | P_FUNCT(2))
+#endif
#define P_TMR2 (P_DEFINED | P_IDENT(GPIO_PF9) | P_FUNCT(2))
#define P_TMR3 (P_DEFINED | P_IDENT(GPIO_PF10) | P_FUNCT(2))
#define P_TMR4 (P_DEFINED | P_IDENT(GPIO_PG9) | P_FUNCT(2))
@@ -158,23 +170,33 @@
#define P_TWI0_SDA (P_DONTCARE)
/* PWM */
-#define P_PWM0_AH (P_DEFINED | P_IDENT(GPIO_PF1) | P_FUNCT(2))
-#define P_PWM0_AL (P_DEFINED | P_IDENT(GPIO_PF2) | P_FUNCT(2))
-#define P_PWM0_BH (P_DEFINED | P_IDENT(GPIO_PF3) | P_FUNCT(2))
-#define P_PWM0_BL (P_DEFINED | P_IDENT(GPIO_PF4) | P_FUNCT(2))
-#define P_PWM0_CH (P_DEFINED | P_IDENT(GPIO_PF5) | P_FUNCT(2))
-#define P_PWM0_CL (P_DEFINED | P_IDENT(GPIO_PF6) | P_FUNCT(2))
-#define P_PWM0_SYNC (P_DEFINED | P_IDENT(GPIO_PF7) | P_FUNCT(2))
-
-#define P_PWM1_AH (P_DEFINED | P_IDENT(GPIO_PF11) | P_FUNCT(2))
-#define P_PWM1_AL (P_DEFINED | P_IDENT(GPIO_PF12) | P_FUNCT(2))
-#define P_PWM1_BH (P_DEFINED | P_IDENT(GPIO_PF13) | P_FUNCT(2))
-#define P_PWM1_BL (P_DEFINED | P_IDENT(GPIO_PF14) | P_FUNCT(2))
-#define P_PWM1_CH (P_DEFINED | P_IDENT(GPIO_PG1) | P_FUNCT(2))
-#define P_PWM1_CL (P_DEFINED | P_IDENT(GPIO_PG2) | P_FUNCT(2))
-#define P_PWM1_SYNC (P_DEFINED | P_IDENT(GPIO_PF15) | P_FUNCT(2))
-
+#ifndef CONFIG_BF518_PWM_PORTF_PORTG
+#define P_PWM_AH (P_DEFINED | P_IDENT(GPIO_PF1) | P_FUNCT(2))
+#define P_PWM_AL (P_DEFINED | P_IDENT(GPIO_PF2) | P_FUNCT(2))
+#define P_PWM_BH (P_DEFINED | P_IDENT(GPIO_PF3) | P_FUNCT(2))
+#define P_PWM_BL (P_DEFINED | P_IDENT(GPIO_PF4) | P_FUNCT(2))
+#define P_PWM_CH (P_DEFINED | P_IDENT(GPIO_PF5) | P_FUNCT(2))
+#define P_PWM_CL (P_DEFINED | P_IDENT(GPIO_PF6) | P_FUNCT(2))
+#else
+#define P_PWM_AH (P_DEFINED | P_IDENT(GPIO_PF11) | P_FUNCT(2))
+#define P_PWM_AL (P_DEFINED | P_IDENT(GPIO_PF12) | P_FUNCT(2))
+#define P_PWM_BH (P_DEFINED | P_IDENT(GPIO_PF13) | P_FUNCT(2))
+#define P_PWM_BL (P_DEFINED | P_IDENT(GPIO_PF14) | P_FUNCT(2))
+#define P_PWM_CH (P_DEFINED | P_IDENT(GPIO_PG1) | P_FUNCT(2))
+#define P_PWM_CL (P_DEFINED | P_IDENT(GPIO_PG2) | P_FUNCT(2))
+#endif
+
+#ifndef CONFIG_BF518_PWM_SYNC_PF15
+#define P_PWM_SYNC (P_DEFINED | P_IDENT(GPIO_PF7) | P_FUNCT(2))
+#else
+#define P_PWM_SYNC (P_DEFINED | P_IDENT(GPIO_PF15) | P_FUNCT(2))
+#endif
+
+#ifndef CONFIG_BF518_PWM_TRIPB_PG14
+#define P_PWM_TRIPB (P_DEFINED | P_IDENT(GPIO_PG10) | P_FUNCT(2))
+#else
#define P_PWM_TRIPB (P_DEFINED | P_IDENT(GPIO_PG14) | P_FUNCT(2))
+#endif
/* RSI */
#define P_RSI_DATA0 (P_DEFINED | P_IDENT(GPIO_PG3) | P_FUNCT(1))
diff --git a/arch/blackfin/mach-bf527/boards/ad7160eval.c b/arch/blackfin/mach-bf527/boards/ad7160eval.c
index ccab4c689dc3..c04df43f6391 100644
--- a/arch/blackfin/mach-bf527/boards/ad7160eval.c
+++ b/arch/blackfin/mach-bf527/boards/ad7160eval.c
@@ -265,29 +265,12 @@ static struct flash_platform_data bfin_spi_flash_data = {
/* SPI flash chip (m25p64) */
static struct bfin5xx_spi_chip spi_flash_chip_info = {
.enable_dma = 0, /* use dma transfer with this chip*/
- .bits_per_word = 8,
-};
-#endif
-
-#if defined(CONFIG_SND_BF5XX_SOC_AD183X) \
- || defined(CONFIG_SND_BF5XX_SOC_AD183X_MODULE)
-static struct bfin5xx_spi_chip ad1836_spi_chip_info = {
- .enable_dma = 0,
- .bits_per_word = 16,
};
#endif
#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
static struct bfin5xx_spi_chip mmc_spi_chip_info = {
.enable_dma = 0,
- .bits_per_word = 8,
-};
-#endif
-
-#if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE)
-static struct bfin5xx_spi_chip spidev_chip_info = {
- .enable_dma = 0,
- .bits_per_word = 8,
};
#endif
@@ -328,7 +311,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
.max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 4,
- .controller_data = &ad1836_spi_chip_info,
},
#endif
#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
@@ -347,7 +329,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
.max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 1,
- .controller_data = &spidev_chip_info,
},
#endif
};
diff --git a/arch/blackfin/mach-bf527/boards/cm_bf527.c b/arch/blackfin/mach-bf527/boards/cm_bf527.c
index c9d6dc88f0e6..6400341cc230 100644
--- a/arch/blackfin/mach-bf527/boards/cm_bf527.c
+++ b/arch/blackfin/mach-bf527/boards/cm_bf527.c
@@ -354,40 +354,16 @@ static struct flash_platform_data bfin_spi_flash_data = {
/* SPI flash chip (m25p64) */
static struct bfin5xx_spi_chip spi_flash_chip_info = {
.enable_dma = 0, /* use dma transfer with this chip*/
- .bits_per_word = 8,
-};
-#endif
-
-#if defined(CONFIG_BFIN_SPI_ADC) \
- || defined(CONFIG_BFIN_SPI_ADC_MODULE)
-/* SPI ADC chip */
-static struct bfin5xx_spi_chip spi_adc_chip_info = {
- .enable_dma = 1, /* use dma transfer with this chip*/
- .bits_per_word = 16,
-};
-#endif
-
-#if defined(CONFIG_SND_BF5XX_SOC_AD183X) \
- || defined(CONFIG_SND_BF5XX_SOC_AD183X_MODULE)
-static struct bfin5xx_spi_chip ad1836_spi_chip_info = {
- .enable_dma = 0,
- .bits_per_word = 16,
};
#endif
#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
static struct bfin5xx_spi_chip mmc_spi_chip_info = {
.enable_dma = 0,
- .bits_per_word = 8,
};
#endif
#if defined(CONFIG_TOUCHSCREEN_AD7877) || defined(CONFIG_TOUCHSCREEN_AD7877_MODULE)
-static struct bfin5xx_spi_chip spi_ad7877_chip_info = {
- .enable_dma = 0,
- .bits_per_word = 16,
-};
-
static const struct ad7877_platform_data bfin_ad7877_ts_info = {
.model = 7877,
.vref_delay_usecs = 50, /* internal, no capacitor */
@@ -403,21 +379,6 @@ static const struct ad7877_platform_data bfin_ad7877_ts_info = {
};
#endif
-#if defined(CONFIG_SND_SOC_WM8731) || defined(CONFIG_SND_SOC_WM8731_MODULE) \
- && defined(CONFIG_SND_SOC_WM8731_SPI)
-static struct bfin5xx_spi_chip spi_wm8731_chip_info = {
- .enable_dma = 0,
- .bits_per_word = 16,
-};
-#endif
-
-#if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE)
-static struct bfin5xx_spi_chip spidev_chip_info = {
- .enable_dma = 0,
- .bits_per_word = 8,
-};
-#endif
-
static struct spi_board_info bfin_spi_board_info[] __initdata = {
#if defined(CONFIG_MTD_M25P80) \
|| defined(CONFIG_MTD_M25P80_MODULE)
@@ -433,18 +394,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
},
#endif
-#if defined(CONFIG_BFIN_SPI_ADC) \
- || defined(CONFIG_BFIN_SPI_ADC_MODULE)
- {
- .modalias = "bfin_spi_adc", /* Name of spi_driver for this device */
- .max_speed_hz = 6250000, /* max spi clock (SCK) speed in HZ */
- .bus_num = 0, /* Framework bus number */
- .chip_select = 1, /* Framework chip select. */
- .platform_data = NULL, /* No spi_driver specific config */
- .controller_data = &spi_adc_chip_info,
- },
-#endif
-
#if defined(CONFIG_SND_BF5XX_SOC_AD183X) \
|| defined(CONFIG_SND_BF5XX_SOC_AD183X_MODULE)
{
@@ -452,7 +401,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
.max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 4,
- .controller_data = &ad1836_spi_chip_info,
},
#endif
#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
@@ -473,7 +421,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
.max_speed_hz = 12500000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 2,
- .controller_data = &spi_ad7877_chip_info,
},
#endif
#if defined(CONFIG_SND_SOC_WM8731) || defined(CONFIG_SND_SOC_WM8731_MODULE) \
@@ -483,7 +430,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
.max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 5,
- .controller_data = &spi_wm8731_chip_info,
.mode = SPI_MODE_0,
},
#endif
@@ -493,7 +439,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
.max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 1,
- .controller_data = &spidev_chip_info,
},
#endif
};
diff --git a/arch/blackfin/mach-bf527/boards/ezbrd.c b/arch/blackfin/mach-bf527/boards/ezbrd.c
index b7101aa6e3aa..6dbb1b403763 100644
--- a/arch/blackfin/mach-bf527/boards/ezbrd.c
+++ b/arch/blackfin/mach-bf527/boards/ezbrd.c
@@ -253,32 +253,16 @@ static struct flash_platform_data bfin_spi_flash_data = {
/* SPI flash chip (sst25wf040) */
static struct bfin5xx_spi_chip spi_flash_chip_info = {
.enable_dma = 0, /* use dma transfer with this chip*/
- .bits_per_word = 8,
-};
-#endif
-
-#if defined(CONFIG_BFIN_SPI_ADC) \
- || defined(CONFIG_BFIN_SPI_ADC_MODULE)
-/* SPI ADC chip */
-static struct bfin5xx_spi_chip spi_adc_chip_info = {
- .enable_dma = 1, /* use dma transfer with this chip*/
- .bits_per_word = 16,
};
#endif
#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
static struct bfin5xx_spi_chip mmc_spi_chip_info = {
.enable_dma = 0,
- .bits_per_word = 8,
};
#endif
#if defined(CONFIG_TOUCHSCREEN_AD7877) || defined(CONFIG_TOUCHSCREEN_AD7877_MODULE)
-static struct bfin5xx_spi_chip spi_ad7877_chip_info = {
- .enable_dma = 0,
- .bits_per_word = 16,
-};
-
static const struct ad7877_platform_data bfin_ad7877_ts_info = {
.model = 7877,
.vref_delay_usecs = 50, /* internal, no capacitor */
@@ -311,35 +295,6 @@ static const struct ad7879_platform_data bfin_ad7879_ts_info = {
};
#endif
-#if defined(CONFIG_TOUCHSCREEN_AD7879_SPI) || defined(CONFIG_TOUCHSCREEN_AD7879_SPI_MODULE)
-static struct bfin5xx_spi_chip spi_ad7879_chip_info = {
- .enable_dma = 0,
- .bits_per_word = 16,
-};
-#endif
-
-#if defined(CONFIG_SND_SOC_WM8731) || defined(CONFIG_SND_SOC_WM8731_MODULE) \
- && defined(CONFIG_SND_SOC_WM8731_SPI)
-static struct bfin5xx_spi_chip spi_wm8731_chip_info = {
- .enable_dma = 0,
- .bits_per_word = 16,
-};
-#endif
-
-#if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE)
-static struct bfin5xx_spi_chip spidev_chip_info = {
- .enable_dma = 0,
- .bits_per_word = 8,
-};
-#endif
-
-#if defined(CONFIG_FB_BFIN_LQ035Q1) || defined(CONFIG_FB_BFIN_LQ035Q1_MODULE)
-static struct bfin5xx_spi_chip lq035q1_spi_chip_info = {
- .enable_dma = 0,
- .bits_per_word = 8,
-};
-#endif
-
static struct spi_board_info bfin_spi_board_info[] __initdata = {
#if defined(CONFIG_MTD_M25P80) \
|| defined(CONFIG_MTD_M25P80_MODULE)
@@ -355,18 +310,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
},
#endif
-#if defined(CONFIG_BFIN_SPI_ADC) \
- || defined(CONFIG_BFIN_SPI_ADC_MODULE)
- {
- .modalias = "bfin_spi_adc", /* Name of spi_driver for this device */
- .max_speed_hz = 6250000, /* max spi clock (SCK) speed in HZ */
- .bus_num = 0, /* Framework bus number */
- .chip_select = 1, /* Framework chip select. */
- .platform_data = NULL, /* No spi_driver specific config */
- .controller_data = &spi_adc_chip_info,
- },
-#endif
-
#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
{
.modalias = "mmc_spi",
@@ -385,7 +328,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
.max_speed_hz = 12500000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 2,
- .controller_data = &spi_ad7877_chip_info,
},
#endif
#if defined(CONFIG_TOUCHSCREEN_AD7879_SPI) || defined(CONFIG_TOUCHSCREEN_AD7879_SPI_MODULE)
@@ -396,7 +338,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
.max_speed_hz = 5000000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 5,
- .controller_data = &spi_ad7879_chip_info,
.mode = SPI_CPHA | SPI_CPOL,
},
#endif
@@ -407,7 +348,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
.max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 5,
- .controller_data = &spi_wm8731_chip_info,
.mode = SPI_MODE_0,
},
#endif
@@ -417,7 +357,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
.max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 1,
- .controller_data = &spidev_chip_info,
},
#endif
#if defined(CONFIG_FB_BFIN_LQ035Q1) || defined(CONFIG_FB_BFIN_LQ035Q1_MODULE)
@@ -426,7 +365,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
.max_speed_hz = 20000000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 1,
- .controller_data = &lq035q1_spi_chip_info,
.mode = SPI_CPHA | SPI_CPOL,
},
#endif
diff --git a/arch/blackfin/mach-bf527/boards/ezkit.c b/arch/blackfin/mach-bf527/boards/ezkit.c
index e67ac7720668..4e9dc9cf8241 100644
--- a/arch/blackfin/mach-bf527/boards/ezkit.c
+++ b/arch/blackfin/mach-bf527/boards/ezkit.c
@@ -409,6 +409,9 @@ static struct resource net2272_bfin_resources[] = {
.end = 0x20300000 + 0x100,
.flags = IORESOURCE_MEM,
}, {
+ .start = 1,
+ .flags = IORESOURCE_BUS,
+ }, {
.start = IRQ_PF7,
.end = IRQ_PF7,
.flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL,
@@ -448,40 +451,16 @@ static struct flash_platform_data bfin_spi_flash_data = {
/* SPI flash chip (m25p64) */
static struct bfin5xx_spi_chip spi_flash_chip_info = {
.enable_dma = 0, /* use dma transfer with this chip*/
- .bits_per_word = 8,
-};
-#endif
-
-#if defined(CONFIG_BFIN_SPI_ADC) \
- || defined(CONFIG_BFIN_SPI_ADC_MODULE)
-/* SPI ADC chip */
-static struct bfin5xx_spi_chip spi_adc_chip_info = {
- .enable_dma = 1, /* use dma transfer with this chip*/
- .bits_per_word = 16,
-};
-#endif
-
-#if defined(CONFIG_SND_BF5XX_SOC_AD183X) \
- || defined(CONFIG_SND_BF5XX_SOC_AD183X_MODULE)
-static struct bfin5xx_spi_chip ad1836_spi_chip_info = {
- .enable_dma = 0,
- .bits_per_word = 16,
};
#endif
#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
static struct bfin5xx_spi_chip mmc_spi_chip_info = {
.enable_dma = 0,
- .bits_per_word = 8,
};
#endif
#if defined(CONFIG_TOUCHSCREEN_AD7877) || defined(CONFIG_TOUCHSCREEN_AD7877_MODULE)
-static struct bfin5xx_spi_chip spi_ad7877_chip_info = {
- .enable_dma = 0,
- .bits_per_word = 16,
-};
-
static const struct ad7877_platform_data bfin_ad7877_ts_info = {
.model = 7877,
.vref_delay_usecs = 50, /* internal, no capacitor */
@@ -513,20 +492,6 @@ static const struct ad7879_platform_data bfin_ad7879_ts_info = {
};
#endif
-#if defined(CONFIG_TOUCHSCREEN_AD7879_SPI) || defined(CONFIG_TOUCHSCREEN_AD7879_SPI_MODULE)
-static struct bfin5xx_spi_chip spi_ad7879_chip_info = {
- .enable_dma = 0,
- .bits_per_word = 16,
-};
-#endif
-
-#if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE)
-static struct bfin5xx_spi_chip spidev_chip_info = {
- .enable_dma = 0,
- .bits_per_word = 8,
-};
-#endif
-
#if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE) || \
defined(CONFIG_SND_BF5XX_TDM) || defined(CONFIG_SND_BF5XX_TDM_MODULE)
@@ -574,9 +539,25 @@ static struct resource bfin_snd_resources[][4] = {
BFIN_SND_RES(0),
BFIN_SND_RES(1),
};
+#endif
-static struct platform_device bfin_pcm = {
- .name = "bfin-pcm-audio",
+#if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE)
+static struct platform_device bfin_i2s_pcm = {
+ .name = "bfin-i2s-pcm-audio",
+ .id = -1,
+};
+#endif
+
+#if defined(CONFIG_SND_BF5XX_TDM) || defined(CONFIG_SND_BF5XX_TDM_MODULE)
+static struct platform_device bfin_tdm_pcm = {
+ .name = "bfin-tdm-pcm-audio",
+ .id = -1,
+};
+#endif
+
+#if defined(CONFIG_SND_BF5XX_AC97) || defined(CONFIG_SND_BF5XX_AC97_MODULE)
+static struct platform_device bfin_ac97_pcm = {
+ .name = "bfin-ac97-pcm-audio",
.id = -1,
};
#endif
@@ -605,13 +586,6 @@ static struct platform_device bfin_tdm = {
};
#endif
-#if defined(CONFIG_FB_BFIN_LQ035Q1) || defined(CONFIG_FB_BFIN_LQ035Q1_MODULE)
-static struct bfin5xx_spi_chip lq035q1_spi_chip_info = {
- .enable_dma = 0,
- .bits_per_word = 8,
-};
-#endif
-
static struct spi_board_info bfin_spi_board_info[] __initdata = {
#if defined(CONFIG_MTD_M25P80) \
|| defined(CONFIG_MTD_M25P80_MODULE)
@@ -627,18 +601,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
},
#endif
-#if defined(CONFIG_BFIN_SPI_ADC) \
- || defined(CONFIG_BFIN_SPI_ADC_MODULE)
- {
- .modalias = "bfin_spi_adc", /* Name of spi_driver for this device */
- .max_speed_hz = 6250000, /* max spi clock (SCK) speed in HZ */
- .bus_num = 0, /* Framework bus number */
- .chip_select = 1, /* Framework chip select. */
- .platform_data = NULL, /* No spi_driver specific config */
- .controller_data = &spi_adc_chip_info,
- },
-#endif
-
#if defined(CONFIG_SND_BF5XX_SOC_AD183X) \
|| defined(CONFIG_SND_BF5XX_SOC_AD183X_MODULE)
{
@@ -647,7 +609,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
.bus_num = 0,
.chip_select = 4,
.platform_data = "ad1836",
- .controller_data = &ad1836_spi_chip_info,
.mode = SPI_MODE_3,
},
#endif
@@ -670,7 +631,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
.max_speed_hz = 12500000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 2,
- .controller_data = &spi_ad7877_chip_info,
},
#endif
#if defined(CONFIG_TOUCHSCREEN_AD7879_SPI) || defined(CONFIG_TOUCHSCREEN_AD7879_SPI_MODULE)
@@ -681,7 +641,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
.max_speed_hz = 5000000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 3,
- .controller_data = &spi_ad7879_chip_info,
.mode = SPI_CPHA | SPI_CPOL,
},
#endif
@@ -691,7 +650,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
.max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 1,
- .controller_data = &spidev_chip_info,
},
#endif
#if defined(CONFIG_FB_BFIN_LQ035Q1) || defined(CONFIG_FB_BFIN_LQ035Q1_MODULE)
@@ -700,7 +658,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
.max_speed_hz = 20000000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 7,
- .controller_data = &lq035q1_spi_chip_info,
.mode = SPI_CPHA | SPI_CPOL,
},
#endif
@@ -1276,9 +1233,16 @@ static struct platform_device *stamp_devices[] __initdata = {
&ezkit_flash_device,
#endif
-#if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE) || \
- defined(CONFIG_SND_BF5XX_TDM) || defined(CONFIG_SND_BF5XX_TDM_MODULE)
- &bfin_pcm,
+#if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE)
+ &bfin_i2s_pcm,
+#endif
+
+#if defined(CONFIG_SND_BF5XX_TDM) || defined(CONFIG_SND_BF5XX_TDM_MODULE)
+ &bfin_tdm_pcm,
+#endif
+
+#if defined(CONFIG_SND_BF5XX_AC97) || defined(CONFIG_SND_BF5XX_AC97_MODULE)
+ &bfin_ac97_pcm,
#endif
#if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE)
diff --git a/arch/blackfin/mach-bf527/boards/tll6527m.c b/arch/blackfin/mach-bf527/boards/tll6527m.c
index 18d303dd5627..ec4bc7429c9f 100644
--- a/arch/blackfin/mach-bf527/boards/tll6527m.c
+++ b/arch/blackfin/mach-bf527/boards/tll6527m.c
@@ -314,29 +314,12 @@ static struct flash_platform_data bfin_spi_flash_data = {
/* SPI flash chip (m25p64) */
static struct bfin5xx_spi_chip spi_flash_chip_info = {
.enable_dma = 0, /* use dma transfer with this chip*/
- .bits_per_word = 8,
-};
-#endif
-
-#if defined(CONFIG_BFIN_SPI_ADC) \
- || defined(CONFIG_BFIN_SPI_ADC_MODULE)
-/* SPI ADC chip */
-static struct bfin5xx_spi_chip spi_adc_chip_info = {
- .enable_dma = 0, /* use dma transfer with this chip*/
-/*
- * tll6527m V1.0 does not support native spi slave selects
- * hence DMA mode will not be useful since the ADC needs
- * CS to toggle for each sample and cs_change_per_word
- * seems to be removed from spi_bfin5xx.c
- */
- .bits_per_word = 16,
};
#endif
#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
static struct bfin5xx_spi_chip mmc_spi_chip_info = {
.enable_dma = 0,
- .bits_per_word = 8,
};
#endif
@@ -359,21 +342,6 @@ static const struct ad7879_platform_data bfin_ad7879_ts_info = {
};
#endif
-#if defined(CONFIG_TOUCHSCREEN_AD7879_SPI) \
- || defined(CONFIG_TOUCHSCREEN_AD7879_SPI_MODULE)
-static struct bfin5xx_spi_chip spi_ad7879_chip_info = {
- .enable_dma = 0,
- .bits_per_word = 16,
-};
-#endif
-
-#if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE)
-static struct bfin5xx_spi_chip spidev_chip_info = {
- .enable_dma = 0,
- .bits_per_word = 8,
-};
-#endif
-
#if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE)
static struct platform_device bfin_i2s = {
.name = "bfin-i2s",
@@ -382,24 +350,7 @@ static struct platform_device bfin_i2s = {
};
#endif
-#if defined(CONFIG_FB_BFIN_LQ035Q1) || defined(CONFIG_FB_BFIN_LQ035Q1_MODULE)
-static struct bfin5xx_spi_chip lq035q1_spi_chip_info = {
- .enable_dma = 0,
- .bits_per_word = 8,
-};
-#endif
-
#if defined(CONFIG_GPIO_MCP23S08) || defined(CONFIG_GPIO_MCP23S08_MODULE)
-static struct bfin5xx_spi_chip spi_mcp23s08_sys_chip_info = {
- .enable_dma = 0,
- .bits_per_word = 8,
-};
-
-static struct bfin5xx_spi_chip spi_mcp23s08_usr_chip_info = {
- .enable_dma = 0,
- .bits_per_word = 8,
-};
-
#include <linux/spi/mcp23s08.h>
static const struct mcp23s08_platform_data bfin_mcp23s08_sys_gpio_info = {
.chip[0].is_present = true,
@@ -429,22 +380,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
},
#endif
-#if defined(CONFIG_BFIN_SPI_ADC)
- || defined(CONFIG_BFIN_SPI_ADC_MODULE)
- {
- .modalias = "bfin_spi_adc",
- /* Name of spi_driver for this device */
- .max_speed_hz = 10000000,
- /* max spi clock (SCK) speed in HZ */
- .bus_num = 0, /* Framework bus number */
- .chip_select = EXP_GPIO_SPISEL_BASE + 0x04 + MAX_CTRL_CS,
- /* Framework chip select. */
- .platform_data = NULL, /* No spi_driver specific config */
- .controller_data = &spi_adc_chip_info,
- .mode = SPI_MODE_0,
- },
-#endif
-
#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
{
.modalias = "mmc_spi",
@@ -470,7 +405,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
/* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = EXP_GPIO_SPISEL_BASE + 0x07 + MAX_CTRL_CS,
- .controller_data = &spi_ad7879_chip_info,
.mode = SPI_CPHA | SPI_CPOL,
},
#endif
@@ -482,7 +416,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
.bus_num = 0,
.chip_select = EXP_GPIO_SPISEL_BASE + 0x03 + MAX_CTRL_CS,
.mode = SPI_CPHA | SPI_CPOL,
- .controller_data = &spidev_chip_info,
},
#endif
#if defined(CONFIG_FB_BFIN_LQ035Q1) || defined(CONFIG_FB_BFIN_LQ035Q1_MODULE)
@@ -491,7 +424,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
.max_speed_hz = 20000000,
.bus_num = 0,
.chip_select = EXP_GPIO_SPISEL_BASE + 0x06 + MAX_CTRL_CS,
- .controller_data = &lq035q1_spi_chip_info,
.mode = SPI_CPHA | SPI_CPOL,
},
#endif
@@ -502,7 +434,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
.max_speed_hz = 5000000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = EXP_GPIO_SPISEL_BASE + 0x01 + MAX_CTRL_CS,
- .controller_data = &spi_mcp23s08_sys_chip_info,
.mode = SPI_CPHA | SPI_CPOL,
},
{
@@ -511,7 +442,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
.max_speed_hz = 5000000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = EXP_GPIO_SPISEL_BASE + 0x02 + MAX_CTRL_CS,
- .controller_data = &spi_mcp23s08_usr_chip_info,
.mode = SPI_CPHA | SPI_CPOL,
},
#endif
diff --git a/arch/blackfin/mach-bf527/include/mach/anomaly.h b/arch/blackfin/mach-bf527/include/mach/anomaly.h
index e66a7e89cd3c..688470611e15 100644
--- a/arch/blackfin/mach-bf527/include/mach/anomaly.h
+++ b/arch/blackfin/mach-bf527/include/mach/anomaly.h
@@ -11,8 +11,8 @@
*/
/* This file should be up to date with:
- * - Revision E, 03/15/2010; ADSP-BF526 Blackfin Processor Anomaly List
- * - Revision H, 04/29/2010; ADSP-BF527 Blackfin Processor Anomaly List
+ * - Revision F, 05/23/2011; ADSP-BF526 Blackfin Processor Anomaly List
+ * - Revision I, 05/23/2011; ADSP-BF527 Blackfin Processor Anomaly List
*/
#ifndef _MACH_ANOMALY_H_
@@ -57,7 +57,7 @@
/* Incorrect Access of OTP_STATUS During otp_write() Function */
#define ANOMALY_05000328 (_ANOMALY_BF527(< 2))
/* Host DMA Boot Modes Are Not Functional */
-#define ANOMALY_05000330 (__SILICON_REVISION__ < 2)
+#define ANOMALY_05000330 (_ANOMALY_BF527(< 2))
/* Disallowed Configuration Prevents Subsequent Allowed Configuration on Host DMA Port */
#define ANOMALY_05000337 (_ANOMALY_BF527(< 2))
/* Ethernet MAC MDIO Reads Do Not Meet IEEE Specification */
@@ -135,7 +135,7 @@
/* Incorrect Default Internal Voltage Regulator Setting */
#define ANOMALY_05000410 (_ANOMALY_BF527(< 2))
/* bfrom_SysControl() Firmware Function Cannot be Used to Enter Power Saving Modes */
-#define ANOMALY_05000411 (_ANOMALY_BF526_BF527(< 1, < 2))
+#define ANOMALY_05000411 (_ANOMALY_BF526(< 1))
/* OTP_CHECK_FOR_PREV_WRITE Bit is Not Functional in bfrom_OtpWrite() API */
#define ANOMALY_05000414 (_ANOMALY_BF526_BF527(< 1, < 2))
/* DEB2_URGENT Bit Not Functional */
@@ -181,11 +181,11 @@
/* IFLUSH Instruction at End of Hardware Loop Causes Infinite Stall */
#define ANOMALY_05000443 (1)
/* The WURESET Bit in the SYSCR Register is not Functional */
-#define ANOMALY_05000445 (1)
-/* USB DMA Mode 1 Short Packet Data Corruption */
+#define ANOMALY_05000445 (_ANOMALY_BF527(>= 0))
+/* USB DMA Short Packet Data Corruption */
#define ANOMALY_05000450 (1)
/* BCODE_QUICKBOOT, BCODE_ALLBOOT, and BCODE_FULLBOOT Settings in SYSCR Register Not Functional */
-#define ANOMALY_05000451 (1)
+#define ANOMALY_05000451 (_ANOMALY_BF527(>= 0))
/* Incorrect Default Hysteresis Setting for RESET, NMI, and BMODE Signals */
#define ANOMALY_05000452 (_ANOMALY_BF526_BF527(< 1, >= 0))
/* USB Receive Interrupt Is Not Generated in DMA Mode 1 */
@@ -198,19 +198,19 @@
#define ANOMALY_05000461 (1)
/* Synchronization Problem at Startup May Cause SPORT Transmit Channels to Misalign */
#define ANOMALY_05000462 (1)
-/* USB Rx DMA hang */
+/* USB Rx DMA Hang */
#define ANOMALY_05000465 (1)
/* TxPktRdy Bit Not Set for Transmit Endpoint When Core and DMA Access USB Endpoint FIFOs Simultaneously */
#define ANOMALY_05000466 (1)
-/* Possible RX data corruption when control & data EP FIFOs are accessed via the core */
+/* Possible USB RX Data Corruption When Control & Data EP FIFOs are Accessed via the Core */
#define ANOMALY_05000467 (1)
/* PLL Latches Incorrect Settings During Reset */
#define ANOMALY_05000469 (1)
/* Incorrect Default MSEL Value in PLL_CTL */
#define ANOMALY_05000472 (_ANOMALY_BF526(>= 0))
-/* Interrupted 32-Bit SPORT Data Register Access Results In Underflow */
+/* Interrupted SPORT Receive Data Register Read Results In Underflow when SLEN > 15 */
#define ANOMALY_05000473 (1)
-/* Possible Lockup Condition whem Modifying PLL from External Memory */
+/* Possible Lockup Condition when Modifying PLL from External Memory */
#define ANOMALY_05000475 (1)
/* TESTSET Instruction Cannot Be Interrupted */
#define ANOMALY_05000477 (1)
@@ -219,11 +219,19 @@
/* Possible USB Data Corruption When Multiple Endpoints Are Accessed by the Core */
#define ANOMALY_05000483 (1)
/* PLL_CTL Change Using bfrom_SysControl() Can Result in Processor Overclocking */
-#define ANOMALY_05000485 (_ANOMALY_BF526_BF527(< 2, < 3))
+#define ANOMALY_05000485 (_ANOMALY_BF526_BF527(< 2, >= 0))
/* The CODEC Zero-Cross Detect Feature is not Functional */
#define ANOMALY_05000487 (1)
-/* IFLUSH sucks at life */
+/* SPI Master Boot Can Fail Under Certain Conditions */
+#define ANOMALY_05000490 (1)
+/* Instruction Memory Stalls Can Cause IFLUSH to Fail */
#define ANOMALY_05000491 (1)
+/* EXCPT Instruction May Be Lost If NMI Happens Simultaneously */
+#define ANOMALY_05000494 (1)
+/* CNT_COMMAND Functionality Depends on CNT_IMASK Configuration */
+#define ANOMALY_05000498 (1)
+/* RXS Bit in SPI_STAT May Become Stuck In RX DMA Modes */
+#define ANOMALY_05000501 (1)
/* Anomalies that don't exist on this proc */
#define ANOMALY_05000099 (0)
diff --git a/arch/blackfin/mach-bf533/boards/H8606.c b/arch/blackfin/mach-bf533/boards/H8606.c
index d4bfcea56828..eb325ed6607e 100644
--- a/arch/blackfin/mach-bf533/boards/H8606.c
+++ b/arch/blackfin/mach-bf533/boards/H8606.c
@@ -159,22 +159,6 @@ static struct flash_platform_data bfin_spi_flash_data = {
/* SPI flash chip (m25p64) */
static struct bfin5xx_spi_chip spi_flash_chip_info = {
.enable_dma = 0, /* use dma transfer with this chip*/
- .bits_per_word = 8,
-};
-#endif
-
-#if defined(CONFIG_BFIN_SPI_ADC) || defined(CONFIG_BFIN_SPI_ADC_MODULE)
-/* SPI ADC chip */
-static struct bfin5xx_spi_chip spi_adc_chip_info = {
- .enable_dma = 1, /* use dma transfer with this chip*/
- .bits_per_word = 16,
-};
-#endif
-
-#if defined(CONFIG_SND_BF5XX_SOC_AD183X) || defined(CONFIG_SND_BF5XX_SOC_AD183X_MODULE)
-static struct bfin5xx_spi_chip ad1836_spi_chip_info = {
- .enable_dma = 0,
- .bits_per_word = 16,
};
#endif
@@ -195,24 +179,12 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
},
#endif
-#if defined(CONFIG_BFIN_SPI_ADC) || defined(CONFIG_BFIN_SPI_ADC_MODULE)
- {
- .modalias = "bfin_spi_adc", /* Name of spi_driver for this device */
- .max_speed_hz = 4, /* actual baudrate is SCLK/(2xspeed_hz) */
- .bus_num = 1, /* Framework bus number */
- .chip_select = 1, /* Framework chip select. */
- .platform_data = NULL, /* No spi_driver specific config */
- .controller_data = &spi_adc_chip_info,
- },
-#endif
-
#if defined(CONFIG_SND_BF5XX_SOC_AD183X) || defined(CONFIG_SND_BF5XX_SOC_AD183X_MODULE)
{
.modalias = "ad183x",
.max_speed_hz = 16,
.bus_num = 1,
.chip_select = 4,
- .controller_data = &ad1836_spi_chip_info,
},
#endif
diff --git a/arch/blackfin/mach-bf533/boards/blackstamp.c b/arch/blackfin/mach-bf533/boards/blackstamp.c
index 87b5af3693c1..b0ec825fb4ec 100644
--- a/arch/blackfin/mach-bf533/boards/blackstamp.c
+++ b/arch/blackfin/mach-bf533/boards/blackstamp.c
@@ -102,21 +102,12 @@ static struct flash_platform_data bfin_spi_flash_data = {
/* SPI flash chip (m25p64) */
static struct bfin5xx_spi_chip spi_flash_chip_info = {
.enable_dma = 0, /* use dma transfer with this chip*/
- .bits_per_word = 8,
};
#endif
#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
static struct bfin5xx_spi_chip mmc_spi_chip_info = {
.enable_dma = 0,
- .bits_per_word = 8,
-};
-#endif
-
-#if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE)
-static struct bfin5xx_spi_chip spidev_chip_info = {
- .enable_dma = 0,
- .bits_per_word = 8,
};
#endif
@@ -151,7 +142,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
.max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 7,
- .controller_data = &spidev_chip_info,
},
#endif
};
diff --git a/arch/blackfin/mach-bf533/boards/cm_bf533.c b/arch/blackfin/mach-bf533/boards/cm_bf533.c
index 4d5604eaa7c2..14f54a31e74c 100644
--- a/arch/blackfin/mach-bf533/boards/cm_bf533.c
+++ b/arch/blackfin/mach-bf533/boards/cm_bf533.c
@@ -59,29 +59,12 @@ static struct flash_platform_data bfin_spi_flash_data = {
/* SPI flash chip (m25p64) */
static struct bfin5xx_spi_chip spi_flash_chip_info = {
.enable_dma = 0, /* use dma transfer with this chip*/
- .bits_per_word = 8,
-};
-#endif
-
-/* SPI ADC chip */
-#if defined(CONFIG_BFIN_SPI_ADC) || defined(CONFIG_BFIN_SPI_ADC_MODULE)
-static struct bfin5xx_spi_chip spi_adc_chip_info = {
- .enable_dma = 1, /* use dma transfer with this chip*/
- .bits_per_word = 16,
-};
-#endif
-
-#if defined(CONFIG_SND_BF5XX_SOC_AD183X) || defined(CONFIG_SND_BF5XX_SOC_AD183X_MODULE)
-static struct bfin5xx_spi_chip ad1836_spi_chip_info = {
- .enable_dma = 0,
- .bits_per_word = 16,
};
#endif
#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
static struct bfin5xx_spi_chip mmc_spi_chip_info = {
.enable_dma = 0,
- .bits_per_word = 8,
};
#endif
@@ -99,24 +82,12 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
},
#endif
-#if defined(CONFIG_BFIN_SPI_ADC) || defined(CONFIG_BFIN_SPI_ADC_MODULE)
- {
- .modalias = "bfin_spi_adc", /* Name of spi_driver for this device */
- .max_speed_hz = 6250000, /* max spi clock (SCK) speed in HZ */
- .bus_num = 0, /* Framework bus number */
- .chip_select = 2, /* Framework chip select. */
- .platform_data = NULL, /* No spi_driver specific config */
- .controller_data = &spi_adc_chip_info,
- },
-#endif
-
#if defined(CONFIG_SND_BF5XX_SOC_AD183X) || defined(CONFIG_SND_BF5XX_SOC_AD183X_MODULE)
{
.modalias = "ad183x",
.max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 4,
- .controller_data = &ad1836_spi_chip_info,
},
#endif
diff --git a/arch/blackfin/mach-bf533/boards/ezkit.c b/arch/blackfin/mach-bf533/boards/ezkit.c
index b67b91d82242..ecd2801f050d 100644
--- a/arch/blackfin/mach-bf533/boards/ezkit.c
+++ b/arch/blackfin/mach-bf533/boards/ezkit.c
@@ -210,29 +210,6 @@ static struct flash_platform_data bfin_spi_flash_data = {
/* SPI flash chip (m25p64) */
static struct bfin5xx_spi_chip spi_flash_chip_info = {
.enable_dma = 0, /* use dma transfer with this chip*/
- .bits_per_word = 8,
-};
-#endif
-
-#if defined(CONFIG_BFIN_SPI_ADC) || defined(CONFIG_BFIN_SPI_ADC_MODULE)
-/* SPI ADC chip */
-static struct bfin5xx_spi_chip spi_adc_chip_info = {
- .enable_dma = 1, /* use dma transfer with this chip*/
- .bits_per_word = 16,
-};
-#endif
-
-#if defined(CONFIG_SND_BF5XX_SOC_AD183X) || defined(CONFIG_SND_BF5XX_SOC_AD183X_MODULE)
-static struct bfin5xx_spi_chip ad1836_spi_chip_info = {
- .enable_dma = 0,
- .bits_per_word = 16,
-};
-#endif
-
-#if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE)
-static struct bfin5xx_spi_chip spidev_chip_info = {
- .enable_dma = 0,
- .bits_per_word = 8,
};
#endif
@@ -250,24 +227,12 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
},
#endif
-#if defined(CONFIG_BFIN_SPI_ADC) || defined(CONFIG_BFIN_SPI_ADC_MODULE)
- {
- .modalias = "bfin_spi_adc", /* Name of spi_driver for this device */
- .max_speed_hz = 6250000, /* max spi clock (SCK) speed in HZ */
- .bus_num = 0, /* Framework bus number */
- .chip_select = 1, /* Framework chip select. */
- .platform_data = NULL, /* No spi_driver specific config */
- .controller_data = &spi_adc_chip_info,
- },
-#endif
-
#if defined(CONFIG_SND_BF5XX_SOC_AD183X) || defined(CONFIG_SND_BF5XX_SOC_AD183X_MODULE)
{
.modalias = "ad183x",
.max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 4,
- .controller_data = &ad1836_spi_chip_info,
},
#endif
#if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE)
@@ -276,7 +241,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
.max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 1,
- .controller_data = &spidev_chip_info,
},
#endif
};
diff --git a/arch/blackfin/mach-bf533/boards/ip0x.c b/arch/blackfin/mach-bf533/boards/ip0x.c
index a377d8afea03..fbee77fa9211 100644
--- a/arch/blackfin/mach-bf533/boards/ip0x.c
+++ b/arch/blackfin/mach-bf533/boards/ip0x.c
@@ -110,7 +110,6 @@ static struct platform_device dm9000_device2 = {
#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
static struct bfin5xx_spi_chip mmc_spi_chip_info = {
.enable_dma = 0, /* if 1 - block!!! */
- .bits_per_word = 8,
};
#endif
diff --git a/arch/blackfin/mach-bf533/boards/stamp.c b/arch/blackfin/mach-bf533/boards/stamp.c
index 43224ef00b8c..964a8e5f79b4 100644
--- a/arch/blackfin/mach-bf533/boards/stamp.c
+++ b/arch/blackfin/mach-bf533/boards/stamp.c
@@ -80,6 +80,9 @@ static struct resource net2272_bfin_resources[] = {
.end = 0x20300000 + 0x100,
.flags = IORESOURCE_MEM,
}, {
+ .start = 1,
+ .flags = IORESOURCE_BUS,
+ }, {
.start = IRQ_PF10,
.end = IRQ_PF10,
.flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL,
@@ -172,29 +175,6 @@ static struct flash_platform_data bfin_spi_flash_data = {
/* SPI flash chip (m25p64) */
static struct bfin5xx_spi_chip spi_flash_chip_info = {
.enable_dma = 0, /* use dma transfer with this chip*/
- .bits_per_word = 8,
-};
-#endif
-
-#if defined(CONFIG_BFIN_SPI_ADC) || defined(CONFIG_BFIN_SPI_ADC_MODULE)
-/* SPI ADC chip */
-static struct bfin5xx_spi_chip spi_adc_chip_info = {
- .enable_dma = 1, /* use dma transfer with this chip*/
- .bits_per_word = 16,
-};
-#endif
-
-#if defined(CONFIG_SND_BF5XX_SOC_AD183X) || defined(CONFIG_SND_BF5XX_SOC_AD183X_MODULE)
-static struct bfin5xx_spi_chip ad1836_spi_chip_info = {
- .enable_dma = 0,
- .bits_per_word = 16,
-};
-#endif
-
-#if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE)
-static struct bfin5xx_spi_chip spidev_chip_info = {
- .enable_dma = 0,
- .bits_per_word = 8,
};
#endif
@@ -221,7 +201,6 @@ static struct mmc_spi_platform_data bfin_mmc_spi_pdata = {
static struct bfin5xx_spi_chip mmc_spi_chip_info = {
.enable_dma = 0,
- .bits_per_word = 8,
.pio_interrupt = 0,
};
#endif
@@ -240,17 +219,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
},
#endif
-#if defined(CONFIG_BFIN_SPI_ADC) || defined(CONFIG_BFIN_SPI_ADC_MODULE)
- {
- .modalias = "bfin_spi_adc", /* Name of spi_driver for this device */
- .max_speed_hz = 6250000, /* max spi clock (SCK) speed in HZ */
- .bus_num = 0, /* Framework bus number */
- .chip_select = 1, /* Framework chip select. */
- .platform_data = NULL, /* No spi_driver specific config */
- .controller_data = &spi_adc_chip_info,
- },
-#endif
-
#if defined(CONFIG_SND_BF5XX_SOC_AD183X) || defined(CONFIG_SND_BF5XX_SOC_AD183X_MODULE)
{
.modalias = "ad183x",
@@ -258,7 +226,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
.bus_num = 0,
.chip_select = 4,
.platform_data = "ad1836", /* only includes chip name for the moment */
- .controller_data = &ad1836_spi_chip_info,
.mode = SPI_MODE_3,
},
#endif
@@ -269,7 +236,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
.max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 1,
- .controller_data = &spidev_chip_info,
},
#endif
#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
@@ -659,6 +625,41 @@ static struct platform_device *stamp_devices[] __initdata = {
#endif
};
+static int __init net2272_init(void)
+{
+#if defined(CONFIG_USB_NET2272) || defined(CONFIG_USB_NET2272_MODULE)
+ int ret;
+
+ /* Set PF0 to 0, PF1 to 1 make /AMS3 work properly */
+ ret = gpio_request(GPIO_PF0, "net2272");
+ if (ret)
+ return ret;
+
+ ret = gpio_request(GPIO_PF1, "net2272");
+ if (ret) {
+ gpio_free(GPIO_PF0);
+ return ret;
+ }
+
+ ret = gpio_request(GPIO_PF11, "net2272");
+ if (ret) {
+ gpio_free(GPIO_PF0);
+ gpio_free(GPIO_PF1);
+ return ret;
+ }
+
+ gpio_direction_output(GPIO_PF0, 0);
+ gpio_direction_output(GPIO_PF1, 1);
+
+ /* Reset the USB chip */
+ gpio_direction_output(GPIO_PF11, 0);
+ mdelay(2);
+ gpio_set_value(GPIO_PF11, 1);
+#endif
+
+ return 0;
+}
+
static int __init stamp_init(void)
{
int ret;
@@ -685,6 +686,9 @@ static int __init stamp_init(void)
}
#endif
+ if (net2272_init())
+ pr_warning("unable to configure net2272; it probably won't work\n");
+
spi_register_board_info(bfin_spi_board_info, ARRAY_SIZE(bfin_spi_board_info));
return 0;
}
diff --git a/arch/blackfin/mach-bf533/include/mach/anomaly.h b/arch/blackfin/mach-bf533/include/mach/anomaly.h
index 72aa59440f82..03f2b40912a3 100644
--- a/arch/blackfin/mach-bf533/include/mach/anomaly.h
+++ b/arch/blackfin/mach-bf533/include/mach/anomaly.h
@@ -11,7 +11,7 @@
*/
/* This file should be up to date with:
- * - Revision F, 05/25/2010; ADSP-BF531/BF532/BF533 Blackfin Processor Anomaly List
+ * - Revision G, 05/23/2011; ADSP-BF531/BF532/BF533 Blackfin Processor Anomaly List
*/
#ifndef _MACH_ANOMALY_H_
@@ -152,7 +152,7 @@
#define ANOMALY_05000277 (__SILICON_REVISION__ < 6)
/* Disabling Peripherals with DMA Running May Cause DMA System Instability */
#define ANOMALY_05000278 (__SILICON_REVISION__ < 6)
-/* False Hardware Error Exception when ISR Context Is Not Restored */
+/* False Hardware Error when ISR Context Is Not Restored */
#define ANOMALY_05000281 (__SILICON_REVISION__ < 6)
/* Memory DMA Corruption with 32-Bit Data and Traffic Control */
#define ANOMALY_05000282 (__SILICON_REVISION__ < 6)
@@ -210,18 +210,25 @@
#define ANOMALY_05000462 (1)
/* Boot Failure When SDRAM Control Signals Toggle Coming Out Of Reset */
#define ANOMALY_05000471 (1)
-/* Interrupted 32-Bit SPORT Data Register Access Results In Underflow */
+/* Interrupted SPORT Receive Data Register Read Results In Underflow when SLEN > 15 */
#define ANOMALY_05000473 (1)
-/* Possible Lockup Condition whem Modifying PLL from External Memory */
+/* Possible Lockup Condition when Modifying PLL from External Memory */
#define ANOMALY_05000475 (1)
/* TESTSET Instruction Cannot Be Interrupted */
#define ANOMALY_05000477 (1)
/* Reads of ITEST_COMMAND and ITEST_DATA Registers Cause Cache Corruption */
#define ANOMALY_05000481 (1)
-/* IFLUSH sucks at life */
+/* PLL May Latch Incorrect Values Coming Out of Reset */
+#define ANOMALY_05000489 (1)
+/* Instruction Memory Stalls Can Cause IFLUSH to Fail */
#define ANOMALY_05000491 (1)
+/* EXCPT Instruction May Be Lost If NMI Happens Simultaneously */
+#define ANOMALY_05000494 (1)
+/* RXS Bit in SPI_STAT May Become Stuck In RX DMA Modes */
+#define ANOMALY_05000501 (1)
-/* These anomalies have been "phased" out of analog.com anomaly sheets and are
+/*
+ * These anomalies have been "phased" out of analog.com anomaly sheets and are
* here to show running on older silicon just isn't feasible.
*/
diff --git a/arch/blackfin/mach-bf537/boards/cm_bf537e.c b/arch/blackfin/mach-bf537/boards/cm_bf537e.c
index d582b810e7a7..44fd8409db10 100644
--- a/arch/blackfin/mach-bf537/boards/cm_bf537e.c
+++ b/arch/blackfin/mach-bf537/boards/cm_bf537e.c
@@ -61,29 +61,12 @@ static struct flash_platform_data bfin_spi_flash_data = {
/* SPI flash chip (m25p64) */
static struct bfin5xx_spi_chip spi_flash_chip_info = {
.enable_dma = 0, /* use dma transfer with this chip*/
- .bits_per_word = 8,
-};
-#endif
-
-#if defined(CONFIG_BFIN_SPI_ADC) || defined(CONFIG_BFIN_SPI_ADC_MODULE)
-/* SPI ADC chip */
-static struct bfin5xx_spi_chip spi_adc_chip_info = {
- .enable_dma = 1, /* use dma transfer with this chip*/
- .bits_per_word = 16,
-};
-#endif
-
-#if defined(CONFIG_SND_BF5XX_SOC_AD183X) || defined(CONFIG_SND_BF5XX_SOC_AD183X_MODULE)
-static struct bfin5xx_spi_chip ad1836_spi_chip_info = {
- .enable_dma = 0,
- .bits_per_word = 16,
};
#endif
#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
static struct bfin5xx_spi_chip mmc_spi_chip_info = {
.enable_dma = 0,
- .bits_per_word = 8,
};
#endif
@@ -101,24 +84,12 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
},
#endif
-#if defined(CONFIG_BFIN_SPI_ADC) || defined(CONFIG_BFIN_SPI_ADC_MODULE)
- {
- .modalias = "bfin_spi_adc", /* Name of spi_driver for this device */
- .max_speed_hz = 6250000, /* max spi clock (SCK) speed in HZ */
- .bus_num = 0, /* Framework bus number */
- .chip_select = 1, /* Framework chip select. */
- .platform_data = NULL, /* No spi_driver specific config */
- .controller_data = &spi_adc_chip_info,
- },
-#endif
-
#if defined(CONFIG_SND_BF5XX_SOC_AD183X) || defined(CONFIG_SND_BF5XX_SOC_AD183X_MODULE)
{
.modalias = "ad183x",
.max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 4,
- .controller_data = &ad1836_spi_chip_info,
},
#endif
@@ -766,6 +737,24 @@ static struct platform_device *cm_bf537e_devices[] __initdata = {
#endif
};
+static int __init net2272_init(void)
+{
+#if defined(CONFIG_USB_NET2272) || defined(CONFIG_USB_NET2272_MODULE)
+ int ret;
+
+ ret = gpio_request(GPIO_PG14, "net2272");
+ if (ret)
+ return ret;
+
+ /* Reset USB Chip, PG14 */
+ gpio_direction_output(GPIO_PG14, 0);
+ mdelay(2);
+ gpio_set_value(GPIO_PG14, 1);
+#endif
+
+ return 0;
+}
+
static int __init cm_bf537e_init(void)
{
printk(KERN_INFO "%s(): registering device resources\n", __func__);
@@ -777,6 +766,10 @@ static int __init cm_bf537e_init(void)
#if defined(CONFIG_PATA_PLATFORM) || defined(CONFIG_PATA_PLATFORM_MODULE)
irq_set_status_flags(PATA_INT, IRQ_NOAUTOEN);
#endif
+
+ if (net2272_init())
+ pr_warning("unable to configure net2272; it probably won't work\n");
+
return 0;
}
diff --git a/arch/blackfin/mach-bf537/boards/cm_bf537u.c b/arch/blackfin/mach-bf537/boards/cm_bf537u.c
index cbb8098604c5..1b4ac5c64aae 100644
--- a/arch/blackfin/mach-bf537/boards/cm_bf537u.c
+++ b/arch/blackfin/mach-bf537/boards/cm_bf537u.c
@@ -62,29 +62,12 @@ static struct flash_platform_data bfin_spi_flash_data = {
/* SPI flash chip (m25p64) */
static struct bfin5xx_spi_chip spi_flash_chip_info = {
.enable_dma = 0, /* use dma transfer with this chip*/
- .bits_per_word = 8,
-};
-#endif
-
-#if defined(CONFIG_BFIN_SPI_ADC) || defined(CONFIG_BFIN_SPI_ADC_MODULE)
-/* SPI ADC chip */
-static struct bfin5xx_spi_chip spi_adc_chip_info = {
- .enable_dma = 1, /* use dma transfer with this chip*/
- .bits_per_word = 16,
-};
-#endif
-
-#if defined(CONFIG_SND_BF5XX_SOC_AD183X) || defined(CONFIG_SND_BF5XX_SOC_AD183X_MODULE)
-static struct bfin5xx_spi_chip ad1836_spi_chip_info = {
- .enable_dma = 0,
- .bits_per_word = 16,
};
#endif
#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
static struct bfin5xx_spi_chip mmc_spi_chip_info = {
.enable_dma = 0,
- .bits_per_word = 8,
};
#endif
@@ -102,24 +85,12 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
},
#endif
-#if defined(CONFIG_BFIN_SPI_ADC) || defined(CONFIG_BFIN_SPI_ADC_MODULE)
- {
- .modalias = "bfin_spi_adc", /* Name of spi_driver for this device */
- .max_speed_hz = 6250000, /* max spi clock (SCK) speed in HZ */
- .bus_num = 0, /* Framework bus number */
- .chip_select = 1, /* Framework chip select. */
- .platform_data = NULL, /* No spi_driver specific config */
- .controller_data = &spi_adc_chip_info,
- },
-#endif
-
#if defined(CONFIG_SND_BF5XX_SOC_AD183X) || defined(CONFIG_SND_BF5XX_SOC_AD183X_MODULE)
{
.modalias = "ad183x",
.max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 4,
- .controller_data = &ad1836_spi_chip_info,
},
#endif
@@ -731,6 +702,36 @@ static struct platform_device *cm_bf537u_devices[] __initdata = {
#endif
};
+static int __init net2272_init(void)
+{
+#if defined(CONFIG_USB_NET2272) || defined(CONFIG_USB_NET2272_MODULE)
+ int ret;
+
+ ret = gpio_request(GPIO_PH15, driver_name);
+ if (ret)
+ return ret;
+
+ ret = gpio_request(GPIO_PH13, "net2272");
+ if (ret) {
+ gpio_free(GPIO_PH15);
+ return ret;
+ }
+
+ /* Set PH15 Low make /AMS2 work properly */
+ gpio_direction_output(GPIO_PH15, 0);
+
+ /* enable CLKBUF output */
+ bfin_write_VR_CTL(bfin_read_VR_CTL() | CLKBUFOE);
+
+ /* Reset the USB chip */
+ gpio_direction_output(GPIO_PH13, 0);
+ mdelay(2);
+ gpio_set_value(GPIO_PH13, 1);
+#endif
+
+ return 0;
+}
+
static int __init cm_bf537u_init(void)
{
printk(KERN_INFO "%s(): registering device resources\n", __func__);
@@ -742,6 +743,10 @@ static int __init cm_bf537u_init(void)
#if defined(CONFIG_PATA_PLATFORM) || defined(CONFIG_PATA_PLATFORM_MODULE)
irq_set_status_flags(PATA_INT, IRQ_NOAUTOEN);
#endif
+
+ if (net2272_init())
+ pr_warning("unable to configure net2272; it probably won't work\n");
+
return 0;
}
diff --git a/arch/blackfin/mach-bf537/boards/dnp5370.c b/arch/blackfin/mach-bf537/boards/dnp5370.c
index 6b4ff4605bff..8bc951de979d 100644
--- a/arch/blackfin/mach-bf537/boards/dnp5370.c
+++ b/arch/blackfin/mach-bf537/boards/dnp5370.c
@@ -130,7 +130,6 @@ static struct platform_device asmb_flash_device = {
static struct bfin5xx_spi_chip mmc_spi_chip_info = {
.enable_dma = 0, /* use no dma transfer with this chip*/
- .bits_per_word = 8,
};
#endif
@@ -161,7 +160,6 @@ static struct flash_platform_data bfin_spi_dataflash_data = {
static struct bfin5xx_spi_chip spi_dataflash_chip_info = {
.enable_dma = 0, /* use no dma transfer with this chip*/
- .bits_per_word = 8,
};
#endif
diff --git a/arch/blackfin/mach-bf537/boards/minotaur.c b/arch/blackfin/mach-bf537/boards/minotaur.c
index bfb3671a78da..c62f9dccd9f7 100644
--- a/arch/blackfin/mach-bf537/boards/minotaur.c
+++ b/arch/blackfin/mach-bf537/boards/minotaur.c
@@ -159,14 +159,12 @@ static struct flash_platform_data bfin_spi_flash_data = {
/* SPI flash chip (m25p64) */
static struct bfin5xx_spi_chip spi_flash_chip_info = {
.enable_dma = 0, /* use dma transfer with this chip*/
- .bits_per_word = 8,
};
#endif
#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
static struct bfin5xx_spi_chip mmc_spi_chip_info = {
.enable_dma = 0,
- .bits_per_word = 8,
};
#endif
diff --git a/arch/blackfin/mach-bf537/boards/pnav10.c b/arch/blackfin/mach-bf537/boards/pnav10.c
index 9389f03e3b0a..3b8151d99b9a 100644
--- a/arch/blackfin/mach-bf537/boards/pnav10.c
+++ b/arch/blackfin/mach-bf537/boards/pnav10.c
@@ -184,40 +184,16 @@ static struct flash_platform_data bfin_spi_flash_data = {
/* SPI flash chip (m25p64) */
static struct bfin5xx_spi_chip spi_flash_chip_info = {
.enable_dma = 0, /* use dma transfer with this chip*/
- .bits_per_word = 8,
-};
-#endif
-
-#if defined(CONFIG_BFIN_SPI_ADC) \
- || defined(CONFIG_BFIN_SPI_ADC_MODULE)
-/* SPI ADC chip */
-static struct bfin5xx_spi_chip spi_adc_chip_info = {
- .enable_dma = 1, /* use dma transfer with this chip*/
- .bits_per_word = 16,
-};
-#endif
-
-#if defined(CONFIG_SND_BF5XX_SOC_AD183X) \
- || defined(CONFIG_SND_BF5XX_SOC_AD183X_MODULE)
-static struct bfin5xx_spi_chip ad1836_spi_chip_info = {
- .enable_dma = 0,
- .bits_per_word = 16,
};
#endif
#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
static struct bfin5xx_spi_chip mmc_spi_chip_info = {
.enable_dma = 0,
- .bits_per_word = 8,
};
#endif
#if defined(CONFIG_TOUCHSCREEN_AD7877) || defined(CONFIG_TOUCHSCREEN_AD7877_MODULE)
-static struct bfin5xx_spi_chip spi_ad7877_chip_info = {
- .enable_dma = 0,
- .bits_per_word = 16,
-};
-
static const struct ad7877_platform_data bfin_ad7877_ts_info = {
.model = 7877,
.vref_delay_usecs = 50, /* internal, no capacitor */
@@ -248,18 +224,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
},
#endif
-#if defined(CONFIG_BFIN_SPI_ADC) \
- || defined(CONFIG_BFIN_SPI_ADC_MODULE)
- {
- .modalias = "bfin_spi_adc", /* Name of spi_driver for this device */
- .max_speed_hz = 6250000, /* max spi clock (SCK) speed in HZ */
- .bus_num = 0, /* Framework bus number */
- .chip_select = 1, /* Framework chip select. */
- .platform_data = NULL, /* No spi_driver specific config */
- .controller_data = &spi_adc_chip_info,
- },
-#endif
-
#if defined(CONFIG_SND_BF5XX_SOC_AD183X) \
|| defined(CONFIG_SND_BF5XX_SOC_AD183X_MODULE)
{
@@ -267,7 +231,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
.max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 4,
- .controller_data = &ad1836_spi_chip_info,
},
#endif
#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
@@ -288,7 +251,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
.max_speed_hz = 12500000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 5,
- .controller_data = &spi_ad7877_chip_info,
},
#endif
diff --git a/arch/blackfin/mach-bf537/boards/stamp.c b/arch/blackfin/mach-bf537/boards/stamp.c
index 76db1d483173..b52e6728f64f 100644
--- a/arch/blackfin/mach-bf537/boards/stamp.c
+++ b/arch/blackfin/mach-bf537/boards/stamp.c
@@ -367,6 +367,9 @@ static struct resource net2272_bfin_resources[] = {
.end = 0x20300000 + 0x100,
.flags = IORESOURCE_MEM,
}, {
+ .start = 1,
+ .flags = IORESOURCE_BUS,
+ }, {
.start = IRQ_PF7,
.end = IRQ_PF7,
.flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL,
@@ -533,49 +536,11 @@ static struct flash_platform_data bfin_spi_flash_data = {
/* SPI flash chip (m25p64) */
static struct bfin5xx_spi_chip spi_flash_chip_info = {
.enable_dma = 0, /* use dma transfer with this chip*/
- .bits_per_word = 8,
-};
-#endif
-
-#if defined(CONFIG_BFIN_SPI_ADC) \
- || defined(CONFIG_BFIN_SPI_ADC_MODULE)
-/* SPI ADC chip */
-static struct bfin5xx_spi_chip spi_adc_chip_info = {
- .enable_dma = 1, /* use dma transfer with this chip*/
- .bits_per_word = 16,
-};
-#endif
-
-#if defined(CONFIG_SND_BF5XX_SOC_AD183X) \
- || defined(CONFIG_SND_BF5XX_SOC_AD183X_MODULE)
-static struct bfin5xx_spi_chip ad1836_spi_chip_info = {
- .enable_dma = 0,
- .bits_per_word = 16,
-};
-#endif
-
-#if defined(CONFIG_SND_BF5XX_SOC_AD193X) \
- || defined(CONFIG_SND_BF5XX_SOC_AD193X_MODULE)
-static struct bfin5xx_spi_chip ad1938_spi_chip_info = {
- .enable_dma = 0,
- .bits_per_word = 8,
-};
-#endif
-
-#if defined(CONFIG_SND_BF5XX_SOC_ADAV80X) \
- || defined(CONFIG_SND_BF5XX_SOC_ADAV80X_MODULE)
-static struct bfin5xx_spi_chip adav801_spi_chip_info = {
- .enable_dma = 0,
- .bits_per_word = 8,
};
#endif
#if defined(CONFIG_INPUT_AD714X_SPI) || defined(CONFIG_INPUT_AD714X_SPI_MODULE)
#include <linux/input/ad714x.h>
-static struct bfin5xx_spi_chip ad7147_spi_chip_info = {
- .enable_dma = 0,
- .bits_per_word = 16,
-};
static struct ad714x_slider_plat ad7147_spi_slider_plat[] = {
{
@@ -685,7 +650,6 @@ static struct ad714x_platform_data ad7142_i2c_platform_data = {
#if defined(CONFIG_AD2S90) || defined(CONFIG_AD2S90_MODULE)
static struct bfin5xx_spi_chip ad2s90_spi_chip_info = {
.enable_dma = 0,
- .bits_per_word = 16,
};
#endif
@@ -697,7 +661,6 @@ static unsigned short ad2s120x_platform_data[] = {
static struct bfin5xx_spi_chip ad2s120x_spi_chip_info = {
.enable_dma = 0,
- .bits_per_word = 16,
};
#endif
@@ -714,14 +677,12 @@ static unsigned short ad2s1210_platform_data[] = {
static struct bfin5xx_spi_chip ad2s1210_spi_chip_info = {
.enable_dma = 0,
- .bits_per_word = 8,
};
#endif
#if defined(CONFIG_AD7314) || defined(CONFIG_AD7314_MODULE)
static struct bfin5xx_spi_chip ad7314_spi_chip_info = {
.enable_dma = 0,
- .bits_per_word = 16,
};
#endif
@@ -735,7 +696,6 @@ static unsigned short ad7816_platform_data[] = {
static struct bfin5xx_spi_chip ad7816_spi_chip_info = {
.enable_dma = 0,
- .bits_per_word = 8,
};
#endif
@@ -749,7 +709,6 @@ static unsigned long adt7310_platform_data[3] = {
static struct bfin5xx_spi_chip adt7310_spi_chip_info = {
.enable_dma = 0,
- .bits_per_word = 8,
};
#endif
@@ -758,11 +717,6 @@ static unsigned short ad7298_platform_data[] = {
GPIO_PF7, /* busy_pin */
0,
};
-
-static struct bfin5xx_spi_chip ad7298_spi_chip_info = {
- .enable_dma = 0,
- .bits_per_word = 16,
-};
#endif
#if defined(CONFIG_ADT7316_SPI) || defined(CONFIG_ADT7316_SPI_MODULE)
@@ -773,7 +727,6 @@ static unsigned long adt7316_spi_data[2] = {
static struct bfin5xx_spi_chip adt7316_spi_chip_info = {
.enable_dma = 0,
- .bits_per_word = 8,
};
#endif
@@ -800,18 +753,12 @@ static struct mmc_spi_platform_data bfin_mmc_spi_pdata = {
static struct bfin5xx_spi_chip mmc_spi_chip_info = {
.enable_dma = 0,
- .bits_per_word = 8,
.pio_interrupt = 0,
};
#endif
#if defined(CONFIG_TOUCHSCREEN_AD7877) || defined(CONFIG_TOUCHSCREEN_AD7877_MODULE)
#include <linux/spi/ad7877.h>
-static struct bfin5xx_spi_chip spi_ad7877_chip_info = {
- .enable_dma = 0,
- .bits_per_word = 16,
-};
-
static const struct ad7877_platform_data bfin_ad7877_ts_info = {
.model = 7877,
.vref_delay_usecs = 50, /* internal, no capacitor */
@@ -883,39 +830,13 @@ static const struct adxl34x_platform_data adxl34x_info = {
};
#endif
-#if defined(CONFIG_TOUCHSCREEN_AD7879_SPI) || defined(CONFIG_TOUCHSCREEN_AD7879_SPI_MODULE)
-static struct bfin5xx_spi_chip spi_ad7879_chip_info = {
- .enable_dma = 0,
- .bits_per_word = 16,
-};
-#endif
-
-#if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE)
-static struct bfin5xx_spi_chip spidev_chip_info = {
- .enable_dma = 0,
- .bits_per_word = 8,
-};
-#endif
-
-#if defined(CONFIG_FB_BFIN_LQ035Q1) || defined(CONFIG_FB_BFIN_LQ035Q1_MODULE)
-static struct bfin5xx_spi_chip lq035q1_spi_chip_info = {
- .enable_dma = 0,
- .bits_per_word = 8,
-};
-#endif
-
#if defined(CONFIG_ENC28J60) || defined(CONFIG_ENC28J60_MODULE)
static struct bfin5xx_spi_chip enc28j60_spi_chip_info = {
.enable_dma = 1,
- .bits_per_word = 8,
};
#endif
#if defined(CONFIG_ADF702X) || defined(CONFIG_ADF702X_MODULE)
-static struct bfin5xx_spi_chip adf7021_spi_chip_info = {
- .bits_per_word = 16,
-};
-
#include <linux/spi/adf702x.h>
#define TXREG 0x0160A470
static const u32 adf7021_regs[] = {
@@ -959,10 +880,6 @@ static inline void adf702x_mac_init(void) {}
#if defined(CONFIG_TOUCHSCREEN_ADS7846) || defined(CONFIG_TOUCHSCREEN_ADS7846_MODULE)
#include <linux/spi/ads7846.h>
-static struct bfin5xx_spi_chip ad7873_spi_chip_info = {
- .bits_per_word = 8,
-};
-
static int ads7873_get_pendown_state(void)
{
return gpio_get_value(GPIO_PF6);
@@ -1009,21 +926,12 @@ static struct flash_platform_data bfin_spi_dataflash_data = {
/* DataFlash chip */
static struct bfin5xx_spi_chip data_flash_chip_info = {
.enable_dma = 0, /* use dma transfer with this chip*/
- .bits_per_word = 8,
-};
-#endif
-
-#if defined(CONFIG_INPUT_ADXL34X_SPI) || defined(CONFIG_INPUT_ADXL34X_SPI_MODULE)
-static struct bfin5xx_spi_chip spi_adxl34x_chip_info = {
- .enable_dma = 0, /* use dma transfer with this chip*/
- .bits_per_word = 8,
};
#endif
#if defined(CONFIG_AD7476) || defined(CONFIG_AD7476_MODULE)
static struct bfin5xx_spi_chip spi_ad7476_chip_info = {
.enable_dma = 0, /* use dma transfer with this chip*/
- .bits_per_word = 8,
};
#endif
@@ -1053,17 +961,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
.mode = SPI_MODE_3,
},
#endif
-#if defined(CONFIG_BFIN_SPI_ADC) \
- || defined(CONFIG_BFIN_SPI_ADC_MODULE)
- {
- .modalias = "bfin_spi_adc", /* Name of spi_driver for this device */
- .max_speed_hz = 6250000, /* max spi clock (SCK) speed in HZ */
- .bus_num = 0, /* Framework bus number */
- .chip_select = 1, /* Framework chip select. */
- .platform_data = NULL, /* No spi_driver specific config */
- .controller_data = &spi_adc_chip_info,
- },
-#endif
#if defined(CONFIG_SND_BF5XX_SOC_AD183X) \
|| defined(CONFIG_SND_BF5XX_SOC_AD183X_MODULE)
@@ -1073,7 +970,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
.bus_num = 0,
.chip_select = 4,
.platform_data = "ad1836", /* only includes chip name for the moment */
- .controller_data = &ad1836_spi_chip_info,
.mode = SPI_MODE_3,
},
#endif
@@ -1084,7 +980,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
.max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 5,
- .controller_data = &ad1938_spi_chip_info,
.mode = SPI_MODE_3,
},
#endif
@@ -1095,7 +990,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
.max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 1,
- .controller_data = &adav801_spi_chip_info,
.mode = SPI_MODE_3,
},
#endif
@@ -1109,7 +1003,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
.chip_select = 5,
.mode = SPI_MODE_3,
.platform_data = &ad7147_spi_platform_data,
- .controller_data = &ad7147_spi_chip_info,
},
#endif
@@ -1188,7 +1081,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
.bus_num = 0,
.chip_select = 4, /* CS, change it for your board */
.platform_data = ad7298_platform_data,
- .controller_data = &ad7298_spi_chip_info,
.mode = SPI_MODE_3,
},
#endif
@@ -1225,7 +1117,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
.max_speed_hz = 12500000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 1,
- .controller_data = &spi_ad7877_chip_info,
},
#endif
#if defined(CONFIG_TOUCHSCREEN_AD7879_SPI) || defined(CONFIG_TOUCHSCREEN_AD7879_SPI_MODULE)
@@ -1236,7 +1127,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
.max_speed_hz = 5000000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 1,
- .controller_data = &spi_ad7879_chip_info,
.mode = SPI_CPHA | SPI_CPOL,
},
#endif
@@ -1246,7 +1136,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
.max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 1,
- .controller_data = &spidev_chip_info,
},
#endif
#if defined(CONFIG_FB_BFIN_LQ035Q1) || defined(CONFIG_FB_BFIN_LQ035Q1_MODULE)
@@ -1255,7 +1144,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
.max_speed_hz = 20000000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 2,
- .controller_data = &lq035q1_spi_chip_info,
.mode = SPI_CPHA | SPI_CPOL,
},
#endif
@@ -1278,7 +1166,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
.max_speed_hz = 5000000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 2,
- .controller_data = &spi_adxl34x_chip_info,
.mode = SPI_MODE_3,
},
#endif
@@ -1288,7 +1175,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
.max_speed_hz = 16000000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = GPIO_PF10 + MAX_CTRL_CS, /* GPIO controlled SSEL */
- .controller_data = &adf7021_spi_chip_info,
.platform_data = &adf7021_platform_data,
.mode = SPI_MODE_0,
},
@@ -1300,7 +1186,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
.bus_num = 0,
.irq = IRQ_PF6,
.chip_select = GPIO_PF10 + MAX_CTRL_CS, /* GPIO controlled SSEL */
- .controller_data = &ad7873_spi_chip_info,
.platform_data = &ad7873_pdata,
.mode = SPI_MODE_0,
},
@@ -2632,9 +2517,25 @@ static struct resource bfin_snd_resources[][4] = {
BFIN_SND_RES(0),
BFIN_SND_RES(1),
};
+#endif
+
+#if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE)
+static struct platform_device bfin_i2s_pcm = {
+ .name = "bfin-i2s-pcm-audio",
+ .id = -1,
+};
+#endif
-static struct platform_device bfin_pcm = {
- .name = "bfin-pcm-audio",
+#if defined(CONFIG_SND_BF5XX_TDM) || defined(CONFIG_SND_BF5XX_TDM_MODULE)
+static struct platform_device bfin_tdm_pcm = {
+ .name = "bfin-tdm-pcm-audio",
+ .id = -1,
+};
+#endif
+
+#if defined(CONFIG_SND_BF5XX_AC97) || defined(CONFIG_SND_BF5XX_AC97_MODULE)
+static struct platform_device bfin_ac97_pcm = {
+ .name = "bfin-ac97-pcm-audio",
.id = -1,
};
#endif
@@ -2869,10 +2770,16 @@ static struct platform_device *stamp_devices[] __initdata = {
&stamp_flash_device,
#endif
-#if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE) || \
- defined(CONFIG_SND_BF5XX_TDM) || defined(CONFIG_SND_BF5XX_TDM_MODULE) || \
- defined(CONFIG_SND_BF5XX_AC97) || defined(CONFIG_SND_BF5XX_AC97_MODULE)
- &bfin_pcm,
+#if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE)
+ &bfin_i2s_pcm,
+#endif
+
+#if defined(CONFIG_SND_BF5XX_TDM) || defined(CONFIG_SND_BF5XX_TDM_MODULE)
+ &bfin_tdm_pcm,
+#endif
+
+#if defined(CONFIG_SND_BF5XX_AC97) || defined(CONFIG_SND_BF5XX_AC97_MODULE)
+ &bfin_ac97_pcm,
#endif
#if defined(CONFIG_SND_BF5XX_SOC_AD73311) || defined(CONFIG_SND_BF5XX_SOC_AD73311_MODULE)
@@ -2916,6 +2823,24 @@ static struct platform_device *stamp_devices[] __initdata = {
#endif
};
+static int __init net2272_init(void)
+{
+#if defined(CONFIG_USB_NET2272) || defined(CONFIG_USB_NET2272_MODULE)
+ int ret;
+
+ ret = gpio_request(GPIO_PF6, "net2272");
+ if (ret)
+ return ret;
+
+ /* Reset the USB chip */
+ gpio_direction_output(GPIO_PF6, 0);
+ mdelay(2);
+ gpio_set_value(GPIO_PF6, 1);
+#endif
+
+ return 0;
+}
+
static int __init stamp_init(void)
{
printk(KERN_INFO "%s(): registering device resources\n", __func__);
@@ -2926,6 +2851,9 @@ static int __init stamp_init(void)
ARRAY_SIZE(bfin_i2c_board_info));
spi_register_board_info(bfin_spi_board_info, ARRAY_SIZE(bfin_spi_board_info));
+ if (net2272_init())
+ pr_warning("unable to configure net2272; it probably won't work\n");
+
return 0;
}
diff --git a/arch/blackfin/mach-bf537/boards/tcm_bf537.c b/arch/blackfin/mach-bf537/boards/tcm_bf537.c
index 164a7e02c022..9b7287abdfa1 100644
--- a/arch/blackfin/mach-bf537/boards/tcm_bf537.c
+++ b/arch/blackfin/mach-bf537/boards/tcm_bf537.c
@@ -62,29 +62,12 @@ static struct flash_platform_data bfin_spi_flash_data = {
/* SPI flash chip (m25p64) */
static struct bfin5xx_spi_chip spi_flash_chip_info = {
.enable_dma = 0, /* use dma transfer with this chip*/
- .bits_per_word = 8,
-};
-#endif
-
-#if defined(CONFIG_BFIN_SPI_ADC) || defined(CONFIG_BFIN_SPI_ADC_MODULE)
-/* SPI ADC chip */
-static struct bfin5xx_spi_chip spi_adc_chip_info = {
- .enable_dma = 1, /* use dma transfer with this chip*/
- .bits_per_word = 16,
-};
-#endif
-
-#if defined(CONFIG_SND_BF5XX_SOC_AD183X) || defined(CONFIG_SND_BF5XX_SOC_AD183X_MODULE)
-static struct bfin5xx_spi_chip ad1836_spi_chip_info = {
- .enable_dma = 0,
- .bits_per_word = 16,
};
#endif
#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
static struct bfin5xx_spi_chip mmc_spi_chip_info = {
.enable_dma = 0,
- .bits_per_word = 8,
};
#endif
@@ -102,24 +85,12 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
},
#endif
-#if defined(CONFIG_BFIN_SPI_ADC) || defined(CONFIG_BFIN_SPI_ADC_MODULE)
- {
- .modalias = "bfin_spi_adc", /* Name of spi_driver for this device */
- .max_speed_hz = 6250000, /* max spi clock (SCK) speed in HZ */
- .bus_num = 0, /* Framework bus number */
- .chip_select = 1, /* Framework chip select. */
- .platform_data = NULL, /* No spi_driver specific config */
- .controller_data = &spi_adc_chip_info,
- },
-#endif
-
#if defined(CONFIG_SND_BF5XX_SOC_AD183X) || defined(CONFIG_SND_BF5XX_SOC_AD183X_MODULE)
{
.modalias = "ad183x",
.max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 4,
- .controller_data = &ad1836_spi_chip_info,
},
#endif
@@ -733,6 +704,24 @@ static struct platform_device *cm_bf537_devices[] __initdata = {
#endif
};
+static int __init net2272_init(void)
+{
+#if defined(CONFIG_USB_NET2272) || defined(CONFIG_USB_NET2272_MODULE)
+ int ret;
+
+ ret = gpio_request(GPIO_PG14, "net2272");
+ if (ret)
+ return ret;
+
+ /* Reset USB Chip, PG14 */
+ gpio_direction_output(GPIO_PG14, 0);
+ mdelay(2);
+ gpio_set_value(GPIO_PG14, 1);
+#endif
+
+ return 0;
+}
+
static int __init tcm_bf537_init(void)
{
printk(KERN_INFO "%s(): registering device resources\n", __func__);
@@ -744,6 +733,10 @@ static int __init tcm_bf537_init(void)
#if defined(CONFIG_PATA_PLATFORM) || defined(CONFIG_PATA_PLATFORM_MODULE)
irq_set_status_flags(PATA_INT, IRQ_NOAUTOEN);
#endif
+
+ if (net2272_init())
+ pr_warning("unable to configure net2272; it probably won't work\n");
+
return 0;
}
diff --git a/arch/blackfin/mach-bf537/include/mach/anomaly.h b/arch/blackfin/mach-bf537/include/mach/anomaly.h
index 7f8e5a9f5db6..543cd3fb305e 100644
--- a/arch/blackfin/mach-bf537/include/mach/anomaly.h
+++ b/arch/blackfin/mach-bf537/include/mach/anomaly.h
@@ -11,7 +11,7 @@
*/
/* This file should be up to date with:
- * - Revision E, 05/25/2010; ADSP-BF534/ADSP-BF536/ADSP-BF537 Blackfin Processor Anomaly List
+ * - Revision F, 05/23/2011; ADSP-BF534/ADSP-BF536/ADSP-BF537 Blackfin Processor Anomaly List
*/
#ifndef _MACH_ANOMALY_H_
@@ -44,18 +44,12 @@
#define ANOMALY_05000119 (1)
/* Rx.H Cannot Be Used to Access 16-bit System MMR Registers */
#define ANOMALY_05000122 (1)
-/* Killed 32-Bit MMR Write Leads to Next System MMR Access Thinking It Should Be 32-Bit */
-#define ANOMALY_05000157 (__SILICON_REVISION__ < 2)
/* PPI_DELAY Not Functional in PPI Modes with 0 Frame Syncs */
#define ANOMALY_05000180 (1)
-/* Instruction Cache Is Not Functional */
-#define ANOMALY_05000237 (__SILICON_REVISION__ < 2)
/* If I-Cache Is On, CSYNC/SSYNC/IDLE Around Change of Control Causes Failures */
#define ANOMALY_05000244 (__SILICON_REVISION__ < 3)
/* False Hardware Error from an Access in the Shadow of a Conditional Branch */
#define ANOMALY_05000245 (1)
-/* Buffered CLKIN Output Is Disabled by Default */
-#define ANOMALY_05000247 (1)
/* Incorrect Bit Shift of Data Word in Multichannel (TDM) Mode in Certain Conditions */
#define ANOMALY_05000250 (__SILICON_REVISION__ < 3)
/* EMAC TX DMA Error After an Early Frame Abort */
@@ -98,7 +92,7 @@
#define ANOMALY_05000278 (((ANOMALY_BF536 || ANOMALY_BF537) && __SILICON_REVISION__ < 3) || (ANOMALY_BF534 && __SILICON_REVISION__ < 2))
/* SPI Master Boot Mode Does Not Work Well with Atmel Data Flash Devices */
#define ANOMALY_05000280 (1)
-/* False Hardware Error Exception when ISR Context Is Not Restored */
+/* False Hardware Error when ISR Context Is Not Restored */
#define ANOMALY_05000281 (__SILICON_REVISION__ < 3)
/* Memory DMA Corruption with 32-Bit Data and Traffic Control */
#define ANOMALY_05000282 (__SILICON_REVISION__ < 3)
@@ -162,9 +156,9 @@
#define ANOMALY_05000461 (1)
/* Synchronization Problem at Startup May Cause SPORT Transmit Channels to Misalign */
#define ANOMALY_05000462 (1)
-/* Interrupted 32-Bit SPORT Data Register Access Results In Underflow */
+/* Interrupted SPORT Receive Data Register Read Results In Underflow when SLEN > 15 */
#define ANOMALY_05000473 (1)
-/* Possible Lockup Condition whem Modifying PLL from External Memory */
+/* Possible Lockup Condition when Modifying PLL from External Memory */
#define ANOMALY_05000475 (1)
/* TESTSET Instruction Cannot Be Interrupted */
#define ANOMALY_05000477 (1)
@@ -172,8 +166,26 @@
#define ANOMALY_05000480 (__SILICON_REVISION__ < 3)
/* Reads of ITEST_COMMAND and ITEST_DATA Registers Cause Cache Corruption */
#define ANOMALY_05000481 (1)
-/* IFLUSH sucks at life */
+/* PLL May Latch Incorrect Values Coming Out of Reset */
+#define ANOMALY_05000489 (1)
+/* Instruction Memory Stalls Can Cause IFLUSH to Fail */
#define ANOMALY_05000491 (1)
+/* EXCPT Instruction May Be Lost If NMI Happens Simultaneously */
+#define ANOMALY_05000494 (1)
+/* RXS Bit in SPI_STAT May Become Stuck In RX DMA Modes */
+#define ANOMALY_05000501 (1)
+
+/*
+ * These anomalies have been "phased" out of analog.com anomaly sheets and are
+ * here to show running on older silicon just isn't feasible.
+ */
+
+/* Killed 32-Bit MMR Write Leads to Next System MMR Access Thinking It Should Be 32-Bit */
+#define ANOMALY_05000157 (__SILICON_REVISION__ < 2)
+/* Instruction Cache Is Not Functional */
+#define ANOMALY_05000237 (__SILICON_REVISION__ < 2)
+/* Buffered CLKIN Output Is Disabled by Default */
+#define ANOMALY_05000247 (__SILICON_REVISION__ < 2)
/* Anomalies that don't exist on this proc */
#define ANOMALY_05000099 (0)
diff --git a/arch/blackfin/mach-bf538/boards/ezkit.c b/arch/blackfin/mach-bf538/boards/ezkit.c
index e61424ef35eb..629f3c333415 100644
--- a/arch/blackfin/mach-bf538/boards/ezkit.c
+++ b/arch/blackfin/mach-bf538/boards/ezkit.c
@@ -502,7 +502,6 @@ static struct flash_platform_data bfin_spi_flash_data = {
static struct bfin5xx_spi_chip spi_flash_chip_info = {
.enable_dma = 0, /* use dma transfer with this chip*/
- .bits_per_word = 8,
};
#endif
@@ -523,13 +522,6 @@ static const struct ad7879_platform_data bfin_ad7879_ts_info = {
};
#endif
-#if defined(CONFIG_TOUCHSCREEN_AD7879_SPI) || defined(CONFIG_TOUCHSCREEN_AD7879_SPI_MODULE)
-static struct bfin5xx_spi_chip spi_ad7879_chip_info = {
- .enable_dma = 0,
- .bits_per_word = 16,
-};
-#endif
-
#if defined(CONFIG_FB_BFIN_LQ035Q1) || defined(CONFIG_FB_BFIN_LQ035Q1_MODULE)
#include <asm/bfin-lq035q1.h>
@@ -559,20 +551,6 @@ static struct platform_device bfin_lq035q1_device = {
};
#endif
-#if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE)
-static struct bfin5xx_spi_chip spidev_chip_info = {
- .enable_dma = 0,
- .bits_per_word = 8,
-};
-#endif
-
-#if defined(CONFIG_FB_BFIN_LQ035Q1) || defined(CONFIG_FB_BFIN_LQ035Q1_MODULE)
-static struct bfin5xx_spi_chip lq035q1_spi_chip_info = {
- .enable_dma = 0,
- .bits_per_word = 8,
-};
-#endif
-
static struct spi_board_info bf538_spi_board_info[] __initdata = {
#if defined(CONFIG_MTD_M25P80) \
|| defined(CONFIG_MTD_M25P80_MODULE)
@@ -595,7 +573,6 @@ static struct spi_board_info bf538_spi_board_info[] __initdata = {
.max_speed_hz = 5000000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 1,
- .controller_data = &spi_ad7879_chip_info,
.mode = SPI_CPHA | SPI_CPOL,
},
#endif
@@ -605,7 +582,6 @@ static struct spi_board_info bf538_spi_board_info[] __initdata = {
.max_speed_hz = 20000000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 2,
- .controller_data = &lq035q1_spi_chip_info,
.mode = SPI_CPHA | SPI_CPOL,
},
#endif
@@ -615,7 +591,6 @@ static struct spi_board_info bf538_spi_board_info[] __initdata = {
.max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 1,
- .controller_data = &spidev_chip_info,
},
#endif
};
diff --git a/arch/blackfin/mach-bf538/ext-gpio.c b/arch/blackfin/mach-bf538/ext-gpio.c
index 180b1252679f..471a9b184d5b 100644
--- a/arch/blackfin/mach-bf538/ext-gpio.c
+++ b/arch/blackfin/mach-bf538/ext-gpio.c
@@ -1,7 +1,7 @@
/*
* GPIOLIB interface for BF538/9 PORT C, D, and E GPIOs
*
- * Copyright 2009 Analog Devices Inc.
+ * Copyright 2009-2011 Analog Devices Inc.
*
* Licensed under the GPL-2 or later.
*/
@@ -121,3 +121,38 @@ static int __init bf538_extgpio_setup(void)
gpiochip_add(&bf538_porte_chip);
}
arch_initcall(bf538_extgpio_setup);
+
+#ifdef CONFIG_PM
+static struct {
+ u16 data, dir, inen;
+} gpio_bank_saved[3];
+
+static void __iomem * const port_bases[3] = {
+ (void *)PORTCIO,
+ (void *)PORTDIO,
+ (void *)PORTEIO,
+};
+
+void bfin_special_gpio_pm_hibernate_suspend(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(port_bases); ++i) {
+ gpio_bank_saved[i].data = read_PORTIO(port_bases[i]);
+ gpio_bank_saved[i].inen = read_PORTIO_INEN(port_bases[i]);
+ gpio_bank_saved[i].dir = read_PORTIO_DIR(port_bases[i]);
+ }
+}
+
+void bfin_special_gpio_pm_hibernate_restore(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(port_bases); ++i) {
+ write_PORTIO_INEN(port_bases[i], gpio_bank_saved[i].inen);
+ write_PORTIO_SET(port_bases[i],
+ gpio_bank_saved[i].data & gpio_bank_saved[i].dir);
+ write_PORTIO_DIR(port_bases[i], gpio_bank_saved[i].dir);
+ }
+}
+#endif
diff --git a/arch/blackfin/mach-bf538/include/mach/anomaly.h b/arch/blackfin/mach-bf538/include/mach/anomaly.h
index 55e7d0712a94..b6ca99788710 100644
--- a/arch/blackfin/mach-bf538/include/mach/anomaly.h
+++ b/arch/blackfin/mach-bf538/include/mach/anomaly.h
@@ -11,8 +11,8 @@
*/
/* This file should be up to date with:
- * - Revision I, 05/25/2010; ADSP-BF538/BF538F Blackfin Processor Anomaly List
- * - Revision N, 05/25/2010; ADSP-BF539/BF539F Blackfin Processor Anomaly List
+ * - Revision J, 05/23/2011; ADSP-BF538/BF538F Blackfin Processor Anomaly List
+ * - Revision O, 05/23/2011; ADSP-BF539/BF539F Blackfin Processor Anomaly List
*/
#ifndef _MACH_ANOMALY_H_
@@ -56,25 +56,21 @@
#define ANOMALY_05000229 (1)
/* PPI_FS3 Is Not Driven in 2 or 3 Internal Frame Sync Transmit Modes */
#define ANOMALY_05000233 (1)
-/* If I-Cache Is On, CSYNC/SSYNC/IDLE Around Change of Control Causes Failures */
-#define ANOMALY_05000244 (__SILICON_REVISION__ < 3)
/* False Hardware Error from an Access in the Shadow of a Conditional Branch */
#define ANOMALY_05000245 (1)
/* Maximum External Clock Speed for Timers */
#define ANOMALY_05000253 (1)
-/* DCPLB_FAULT_ADDR MMR Register May Be Corrupted */
-#define ANOMALY_05000261 (__SILICON_REVISION__ < 3)
/* High I/O Activity Causes Output Voltage of Internal Voltage Regulator (Vddint) to Decrease */
#define ANOMALY_05000270 (__SILICON_REVISION__ < 4)
/* Certain Data Cache Writethrough Modes Fail for Vddint <= 0.9V */
-#define ANOMALY_05000272 (1)
+#define ANOMALY_05000272 (ANOMALY_BF538)
/* Writes to Synchronous SDRAM Memory May Be Lost */
#define ANOMALY_05000273 (__SILICON_REVISION__ < 4)
/* Writes to an I/O Data Register One SCLK Cycle after an Edge Is Detected May Clear Interrupt */
#define ANOMALY_05000277 (__SILICON_REVISION__ < 4)
/* Disabling Peripherals with DMA Running May Cause DMA System Instability */
#define ANOMALY_05000278 (__SILICON_REVISION__ < 4)
-/* False Hardware Error Exception when ISR Context Is Not Restored */
+/* False Hardware Error when ISR Context Is Not Restored */
#define ANOMALY_05000281 (__SILICON_REVISION__ < 4)
/* Memory DMA Corruption with 32-Bit Data and Traffic Control */
#define ANOMALY_05000282 (__SILICON_REVISION__ < 4)
@@ -102,8 +98,10 @@
#define ANOMALY_05000313 (__SILICON_REVISION__ < 4)
/* Killed System MMR Write Completes Erroneously on Next System MMR Access */
#define ANOMALY_05000315 (__SILICON_REVISION__ < 4)
+/* PFx Glitch on Write to PORTFIO or PORTFIO_TOGGLE */
+#define ANOMALY_05000317 (__SILICON_REVISION__ < 4) /* XXX: Same as 05000318 */
/* PFx Glitch on Write to FIO_FLAG_D or FIO_FLAG_T */
-#define ANOMALY_05000318 (ANOMALY_BF539 && __SILICON_REVISION__ < 4)
+#define ANOMALY_05000318 (__SILICON_REVISION__ < 4) /* XXX: Same as 05000317 */
/* Regulator Programming Blocked when Hibernate Wakeup Source Remains Active */
#define ANOMALY_05000355 (__SILICON_REVISION__ < 5)
/* Serial Port (SPORT) Multichannel Transmit Failure when Channel 0 Is Disabled */
@@ -134,16 +132,32 @@
#define ANOMALY_05000461 (1)
/* Synchronization Problem at Startup May Cause SPORT Transmit Channels to Misalign */
#define ANOMALY_05000462 (1)
-/* Interrupted 32-Bit SPORT Data Register Access Results In Underflow */
+/* Interrupted SPORT Receive Data Register Read Results In Underflow when SLEN > 15 */
#define ANOMALY_05000473 (1)
-/* Possible Lockup Condition whem Modifying PLL from External Memory */
+/* Possible Lockup Condition when Modifying PLL from External Memory */
#define ANOMALY_05000475 (1)
/* TESTSET Instruction Cannot Be Interrupted */
#define ANOMALY_05000477 (1)
/* Reads of ITEST_COMMAND and ITEST_DATA Registers Cause Cache Corruption */
#define ANOMALY_05000481 (1)
-/* IFLUSH sucks at life */
+/* PLL May Latch Incorrect Values Coming Out of Reset */
+#define ANOMALY_05000489 (1)
+/* Instruction Memory Stalls Can Cause IFLUSH to Fail */
#define ANOMALY_05000491 (1)
+/* EXCPT Instruction May Be Lost If NMI Happens Simultaneously */
+#define ANOMALY_05000494 (1)
+/* RXS Bit in SPI_STAT May Become Stuck In RX DMA Modes */
+#define ANOMALY_05000501 (1)
+
+/*
+ * These anomalies have been "phased" out of analog.com anomaly sheets and are
+ * here to show running on older silicon just isn't feasible.
+ */
+
+/* If I-Cache Is On, CSYNC/SSYNC/IDLE Around Change of Control Causes Failures */
+#define ANOMALY_05000244 (__SILICON_REVISION__ < 3)
+/* DCPLB_FAULT_ADDR MMR Register May Be Corrupted */
+#define ANOMALY_05000261 (__SILICON_REVISION__ < 3)
/* Anomalies that don't exist on this proc */
#define ANOMALY_05000099 (0)
diff --git a/arch/blackfin/mach-bf538/include/mach/gpio.h b/arch/blackfin/mach-bf538/include/mach/gpio.h
index 8a5beeece996..3561c7d8935b 100644
--- a/arch/blackfin/mach-bf538/include/mach/gpio.h
+++ b/arch/blackfin/mach-bf538/include/mach/gpio.h
@@ -8,7 +8,10 @@
#define _MACH_GPIO_H_
#define MAX_BLACKFIN_GPIOS 16
+#ifdef CONFIG_GPIOLIB
+/* We only use the special logic with GPIOLIB devices */
#define BFIN_SPECIAL_GPIO_BANKS 3
+#endif
#define GPIO_PF0 0 /* PF */
#define GPIO_PF1 1
diff --git a/arch/blackfin/mach-bf548/boards/cm_bf548.c b/arch/blackfin/mach-bf548/boards/cm_bf548.c
index d11502ac5623..212b9e0a08c8 100644
--- a/arch/blackfin/mach-bf548/boards/cm_bf548.c
+++ b/arch/blackfin/mach-bf548/boards/cm_bf548.c
@@ -861,16 +861,10 @@ static struct flash_platform_data bfin_spi_flash_data = {
static struct bfin5xx_spi_chip spi_flash_chip_info = {
.enable_dma = 0, /* use dma transfer with this chip*/
- .bits_per_word = 8,
};
#endif
#if defined(CONFIG_TOUCHSCREEN_AD7877) || defined(CONFIG_TOUCHSCREEN_AD7877_MODULE)
-static struct bfin5xx_spi_chip spi_ad7877_chip_info = {
- .enable_dma = 0,
- .bits_per_word = 16,
-};
-
static const struct ad7877_platform_data bfin_ad7877_ts_info = {
.model = 7877,
.vref_delay_usecs = 50, /* internal, no capacitor */
@@ -886,13 +880,6 @@ static const struct ad7877_platform_data bfin_ad7877_ts_info = {
};
#endif
-#if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE)
-static struct bfin5xx_spi_chip spidev_chip_info = {
- .enable_dma = 0,
- .bits_per_word = 8,
-};
-#endif
-
static struct spi_board_info bf54x_spi_board_info[] __initdata = {
#if defined(CONFIG_MTD_M25P80) \
|| defined(CONFIG_MTD_M25P80_MODULE)
@@ -915,7 +902,6 @@ static struct spi_board_info bf54x_spi_board_info[] __initdata = {
.max_speed_hz = 12500000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 2,
- .controller_data = &spi_ad7877_chip_info,
},
#endif
#if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE)
@@ -924,7 +910,6 @@ static struct spi_board_info bf54x_spi_board_info[] __initdata = {
.max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 1,
- .controller_data = &spidev_chip_info,
},
#endif
};
diff --git a/arch/blackfin/mach-bf548/boards/ezkit.c b/arch/blackfin/mach-bf548/boards/ezkit.c
index 311bf9970fe7..cd9cbb68de69 100644
--- a/arch/blackfin/mach-bf548/boards/ezkit.c
+++ b/arch/blackfin/mach-bf548/boards/ezkit.c
@@ -1018,24 +1018,10 @@ static struct flash_platform_data bfin_spi_flash_data = {
static struct bfin5xx_spi_chip spi_flash_chip_info = {
.enable_dma = 0, /* use dma transfer with this chip*/
- .bits_per_word = 8,
-};
-#endif
-
-#if defined(CONFIG_SND_BF5XX_SOC_AD183X) \
- || defined(CONFIG_SND_BF5XX_SOC_AD183X_MODULE)
-static struct bfin5xx_spi_chip ad1836_spi_chip_info = {
- .enable_dma = 0,
- .bits_per_word = 16,
};
#endif
#if defined(CONFIG_TOUCHSCREEN_AD7877) || defined(CONFIG_TOUCHSCREEN_AD7877_MODULE)
-static struct bfin5xx_spi_chip spi_ad7877_chip_info = {
- .enable_dma = 0,
- .bits_per_word = 16,
-};
-
static const struct ad7877_platform_data bfin_ad7877_ts_info = {
.model = 7877,
.vref_delay_usecs = 50, /* internal, no capacitor */
@@ -1051,20 +1037,6 @@ static const struct ad7877_platform_data bfin_ad7877_ts_info = {
};
#endif
-#if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE)
-static struct bfin5xx_spi_chip spidev_chip_info = {
- .enable_dma = 0,
- .bits_per_word = 8,
-};
-#endif
-
-#if defined(CONFIG_INPUT_ADXL34X_SPI) || defined(CONFIG_INPUT_ADXL34X_SPI_MODULE)
-static struct bfin5xx_spi_chip spi_adxl34x_chip_info = {
- .enable_dma = 0, /* use dma transfer with this chip*/
- .bits_per_word = 8,
-};
-#endif
-
static struct spi_board_info bfin_spi_board_info[] __initdata = {
#if defined(CONFIG_MTD_M25P80) \
|| defined(CONFIG_MTD_M25P80_MODULE)
@@ -1086,7 +1058,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
.max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
.bus_num = 1,
.chip_select = 4,
- .controller_data = &ad1836_spi_chip_info,
},
#endif
#if defined(CONFIG_TOUCHSCREEN_AD7877) || defined(CONFIG_TOUCHSCREEN_AD7877_MODULE)
@@ -1097,7 +1068,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
.max_speed_hz = 12500000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 2,
- .controller_data = &spi_ad7877_chip_info,
},
#endif
#if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE)
@@ -1106,7 +1076,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
.max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 1,
- .controller_data = &spidev_chip_info,
},
#endif
#if defined(CONFIG_INPUT_ADXL34X_SPI) || defined(CONFIG_INPUT_ADXL34X_SPI_MODULE)
@@ -1117,7 +1086,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
.max_speed_hz = 5000000, /* max spi clock (SCK) speed in HZ */
.bus_num = 1,
.chip_select = 2,
- .controller_data = &spi_adxl34x_chip_info,
.mode = SPI_MODE_3,
},
#endif
diff --git a/arch/blackfin/mach-bf548/include/mach/anomaly.h b/arch/blackfin/mach-bf548/include/mach/anomaly.h
index 9e70785bdde3..ac96ee83b00e 100644
--- a/arch/blackfin/mach-bf548/include/mach/anomaly.h
+++ b/arch/blackfin/mach-bf548/include/mach/anomaly.h
@@ -11,7 +11,7 @@
*/
/* This file should be up to date with:
- * - Revision J, 06/03/2010; ADSP-BF542/BF544/BF547/BF548/BF549 Blackfin Processor Anomaly List
+ * - Revision K, 05/23/2011; ADSP-BF542/BF544/BF547/BF548/BF549 Blackfin Processor Anomaly List
*/
#ifndef _MACH_ANOMALY_H_
@@ -29,117 +29,37 @@
/* Rx.H Cannot Be Used to Access 16-bit System MMR Registers */
#define ANOMALY_05000122 (1)
/* Data Corruption/Core Hang with L2/L3 Configured in Writeback Cache Mode */
-#define ANOMALY_05000220 (1)
+#define ANOMALY_05000220 (__SILICON_REVISION__ < 4)
/* False Hardware Error from an Access in the Shadow of a Conditional Branch */
#define ANOMALY_05000245 (1)
/* Sensitivity To Noise with Slow Input Edge Rates on External SPORT TX and RX Clocks */
#define ANOMALY_05000265 (1)
/* Certain Data Cache Writethrough Modes Fail for Vddint <= 0.9V */
#define ANOMALY_05000272 (1)
-/* False Hardware Error Exception when ISR Context Is Not Restored */
-#define ANOMALY_05000281 (__SILICON_REVISION__ < 1)
-/* SSYNCs After Writes To CAN/DMA MMR Registers Are Not Always Handled Correctly */
-#define ANOMALY_05000304 (__SILICON_REVISION__ < 1)
/* False Hardware Errors Caused by Fetches at the Boundary of Reserved Memory */
#define ANOMALY_05000310 (1)
-/* Errors when SSYNC, CSYNC, or Loads to LT, LB and LC Registers Are Interrupted */
-#define ANOMALY_05000312 (__SILICON_REVISION__ < 1)
-/* TWI Slave Boot Mode Is Not Functional */
-#define ANOMALY_05000324 (__SILICON_REVISION__ < 1)
/* FIFO Boot Mode Not Functional */
#define ANOMALY_05000325 (__SILICON_REVISION__ < 2)
-/* Data Lost When Core and DMA Accesses Are Made to the USB FIFO Simultaneously */
-#define ANOMALY_05000327 (__SILICON_REVISION__ < 1)
-/* Incorrect Access of OTP_STATUS During otp_write() Function */
-#define ANOMALY_05000328 (__SILICON_REVISION__ < 1)
-/* Synchronous Burst Flash Boot Mode Is Not Functional */
-#define ANOMALY_05000329 (__SILICON_REVISION__ < 1)
-/* Host DMA Boot Modes Are Not Functional */
-#define ANOMALY_05000330 (__SILICON_REVISION__ < 1)
-/* Inadequate Timing Margins on DDR DQS to DQ and DQM Skew */
-#define ANOMALY_05000334 (__SILICON_REVISION__ < 1)
-/* Inadequate Rotary Debounce Logic Duration */
-#define ANOMALY_05000335 (__SILICON_REVISION__ < 1)
-/* Phantom Interrupt Occurs After First Configuration of Host DMA Port */
-#define ANOMALY_05000336 (__SILICON_REVISION__ < 1)
-/* Disallowed Configuration Prevents Subsequent Allowed Configuration on Host DMA Port */
-#define ANOMALY_05000337 (__SILICON_REVISION__ < 1)
-/* Slave-Mode SPI0 MISO Failure With CPHA = 0 */
-#define ANOMALY_05000338 (__SILICON_REVISION__ < 1)
-/* If Memory Reads Are Enabled on SDH or HOSTDP, Other DMAC1 Peripherals Cannot Read */
-#define ANOMALY_05000340 (__SILICON_REVISION__ < 1)
-/* Boot Host Wait (HWAIT) and Boot Host Wait Alternate (HWAITA) Signals Are Swapped */
-#define ANOMALY_05000344 (__SILICON_REVISION__ < 1)
-/* USB Calibration Value Is Not Initialized */
-#define ANOMALY_05000346 (__SILICON_REVISION__ < 1)
-/* USB Calibration Value to use */
-#define ANOMALY_05000346_value 0x5411
-/* Preboot Routine Incorrectly Alters Reset Value of USB Register */
-#define ANOMALY_05000347 (__SILICON_REVISION__ < 1)
-/* Data Lost when Core Reads SDH Data FIFO */
-#define ANOMALY_05000349 (__SILICON_REVISION__ < 1)
-/* PLL Status Register Is Inaccurate */
-#define ANOMALY_05000351 (__SILICON_REVISION__ < 1)
/* bfrom_SysControl() Firmware Function Performs Improper System Reset */
/*
* Note: anomaly sheet says this is fixed with bf54x-0.2+, but testing
* shows that the fix itself does not cover all cases.
*/
#define ANOMALY_05000353 (1)
-/* Regulator Programming Blocked when Hibernate Wakeup Source Remains Active */
-#define ANOMALY_05000355 (__SILICON_REVISION__ < 1)
-/* System Stalled During A Core Access To AMC While A Core Access To NFC FIFO Is Required */
-#define ANOMALY_05000356 (__SILICON_REVISION__ < 1)
/* Serial Port (SPORT) Multichannel Transmit Failure when Channel 0 Is Disabled */
#define ANOMALY_05000357 (1)
/* External Memory Read Access Hangs Core With PLL Bypass */
#define ANOMALY_05000360 (1)
/* DMAs that Go Urgent during Tight Core Writes to External Memory Are Blocked */
#define ANOMALY_05000365 (1)
-/* WURESET Bit In SYSCR Register Does Not Properly Indicate Hibernate Wake-Up */
-#define ANOMALY_05000367 (__SILICON_REVISION__ < 1)
/* Addressing Conflict between Boot ROM and Asynchronous Memory */
#define ANOMALY_05000369 (1)
-/* Default PLL MSEL and SSEL Settings Can Cause 400MHz Product To Violate Specifications */
-#define ANOMALY_05000370 (__SILICON_REVISION__ < 1)
/* Possible RETS Register Corruption when Subroutine Is under 5 Cycles in Duration */
#define ANOMALY_05000371 (__SILICON_REVISION__ < 2)
-/* USB DP/DM Data Pins May Lose State When Entering Hibernate */
-#define ANOMALY_05000372 (__SILICON_REVISION__ < 1)
/* Security/Authentication Speedpath Causes Authentication To Fail To Initiate */
#define ANOMALY_05000378 (__SILICON_REVISION__ < 2)
/* 16-Bit NAND FLASH Boot Mode Is Not Functional */
#define ANOMALY_05000379 (1)
-/* 8-Bit NAND Flash Boot Mode Not Functional */
-#define ANOMALY_05000382 (__SILICON_REVISION__ < 1)
-/* Some ATAPI Modes Are Not Functional */
-#define ANOMALY_05000383 (1)
-/* Boot from OTP Memory Not Functional */
-#define ANOMALY_05000385 (__SILICON_REVISION__ < 1)
-/* bfrom_SysControl() Firmware Routine Not Functional */
-#define ANOMALY_05000386 (__SILICON_REVISION__ < 1)
-/* Programmable Preboot Settings Not Functional */
-#define ANOMALY_05000387 (__SILICON_REVISION__ < 1)
-/* CRC32 Checksum Support Not Functional */
-#define ANOMALY_05000388 (__SILICON_REVISION__ < 1)
-/* Reset Vector Must Not Be in SDRAM Memory Space */
-#define ANOMALY_05000389 (__SILICON_REVISION__ < 1)
-/* Changed Meaning of BCODE Field in SYSCR Register */
-#define ANOMALY_05000390 (__SILICON_REVISION__ < 1)
-/* Repeated Boot from Page-Mode or Burst-Mode Flash Memory May Fail */
-#define ANOMALY_05000391 (__SILICON_REVISION__ < 1)
-/* pTempCurrent Not Present in ADI_BOOT_DATA Structure */
-#define ANOMALY_05000392 (__SILICON_REVISION__ < 1)
-/* Deprecated Value of dTempByteCount in ADI_BOOT_DATA Structure */
-#define ANOMALY_05000393 (__SILICON_REVISION__ < 1)
-/* Log Buffer Not Functional */
-#define ANOMALY_05000394 (__SILICON_REVISION__ < 1)
-/* Hook Routine Not Functional */
-#define ANOMALY_05000395 (__SILICON_REVISION__ < 1)
-/* Header Indirect Bit Not Functional */
-#define ANOMALY_05000396 (__SILICON_REVISION__ < 1)
-/* BK_ONES, BK_ZEROS, and BK_DATECODE Constants Not Functional */
-#define ANOMALY_05000397 (__SILICON_REVISION__ < 1)
/* Lockbox SESR Disallows Certain User Interrupts */
#define ANOMALY_05000404 (__SILICON_REVISION__ < 2)
/* Lockbox SESR Firmware Does Not Save/Restore Full Context */
@@ -161,7 +81,7 @@
/* Speculative Fetches Can Cause Undesired External FIFO Operations */
#define ANOMALY_05000416 (1)
/* Multichannel SPORT Channel Misalignment Under Specific Configuration */
-#define ANOMALY_05000425 (1)
+#define ANOMALY_05000425 (__SILICON_REVISION__ < 4)
/* Speculative Fetches of Indirect-Pointer Instructions Can Cause False Hardware Errors */
#define ANOMALY_05000426 (1)
/* CORE_EPPI_PRIO bit and SYS_EPPI_PRIO bit in the HMDMA1_CONTROL register are not functional */
@@ -174,8 +94,6 @@
#define ANOMALY_05000431 (__SILICON_REVISION__ < 3)
/* SW Breakpoints Ignored Upon Return From Lockbox Authentication */
#define ANOMALY_05000434 (1)
-/* OTP Write Accesses Not Supported */
-#define ANOMALY_05000442 (__SILICON_REVISION__ < 1)
/* IFLUSH Instruction at End of Hardware Loop Causes Infinite Stall */
#define ANOMALY_05000443 (1)
/* CDMAPRIO and L2DMAPRIO Bits in the SYSCR Register Are Not Functional */
@@ -186,34 +104,32 @@
#define ANOMALY_05000448 (__SILICON_REVISION__ == 1)
/* Reduced Timing Margins on DDR Output Setup and Hold (tDS and tDH) */
#define ANOMALY_05000449 (__SILICON_REVISION__ == 1)
-/* USB DMA Mode 1 Short Packet Data Corruption */
+/* USB DMA Short Packet Data Corruption */
#define ANOMALY_05000450 (1)
-/* Incorrect Default Hysteresis Setting for RESET, NMI, and BMODE Signals */
-#define ANOMALY_05000452 (__SILICON_REVISION__ < 1)
/* USB Receive Interrupt Is Not Generated in DMA Mode 1 */
#define ANOMALY_05000456 (1)
/* Host DMA Port Responds to Certain Bus Activity Without HOST_CE Assertion */
#define ANOMALY_05000457 (1)
/* USB DMA Mode 1 Failure When Multiple USB DMA Channels Are Concurrently Enabled */
-#define ANOMALY_05000460 (1)
+#define ANOMALY_05000460 (__SILICON_REVISION__ < 4)
/* False Hardware Error when RETI Points to Invalid Memory */
#define ANOMALY_05000461 (1)
/* Synchronization Problem at Startup May Cause SPORT Transmit Channels to Misalign */
-#define ANOMALY_05000462 (1)
+#define ANOMALY_05000462 (__SILICON_REVISION__ < 4)
/* USB DMA RX Data Corruption */
-#define ANOMALY_05000463 (1)
+#define ANOMALY_05000463 (__SILICON_REVISION__ < 4)
/* USB TX DMA Hang */
-#define ANOMALY_05000464 (1)
-/* USB Rx DMA hang */
+#define ANOMALY_05000464 (__SILICON_REVISION__ < 4)
+/* USB Rx DMA Hang */
#define ANOMALY_05000465 (1)
/* TxPktRdy Bit Not Set for Transmit Endpoint When Core and DMA Access USB Endpoint FIFOs Simultaneously */
-#define ANOMALY_05000466 (1)
-/* Possible RX data corruption when control & data EP FIFOs are accessed via the core */
-#define ANOMALY_05000467 (1)
-/* Interrupted 32-Bit SPORT Data Register Access Results In Underflow */
+#define ANOMALY_05000466 (__SILICON_REVISION__ < 4)
+/* Possible USB RX Data Corruption When Control & Data EP FIFOs are Accessed via the Core */
+#define ANOMALY_05000467 (__SILICON_REVISION__ < 4)
+/* Interrupted SPORT Receive Data Register Read Results In Underflow when SLEN > 15 */
#define ANOMALY_05000473 (1)
-/* Access to DDR-SDRAM causes system hang under certain PLL/VR settings */
-#define ANOMALY_05000474 (1)
+/* Access to DDR SDRAM Causes System Hang with Certain PLL Settings */
+#define ANOMALY_05000474 (__SILICON_REVISION__ < 4)
/* TESTSET Instruction Cannot Be Interrupted */
#define ANOMALY_05000477 (1)
/* Reads of ITEST_COMMAND and ITEST_DATA Registers Cause Cache Corruption */
@@ -223,9 +139,111 @@
/* DDR Trim May Not Be Performed for Certain VLEV Values in OTP Page PBS00L */
#define ANOMALY_05000484 (__SILICON_REVISION__ < 3)
/* PLL_CTL Change Using bfrom_SysControl() Can Result in Processor Overclocking */
-#define ANOMALY_05000485 (__SILICON_REVISION__ >= 2)
-/* IFLUSH sucks at life */
+#define ANOMALY_05000485 (__SILICON_REVISION__ > 1 && __SILICON_REVISION__ < 4)
+/* PLL May Latch Incorrect Values Coming Out of Reset */
+#define ANOMALY_05000489 (1)
+/* SPI Master Boot Can Fail Under Certain Conditions */
+#define ANOMALY_05000490 (1)
+/* Instruction Memory Stalls Can Cause IFLUSH to Fail */
#define ANOMALY_05000491 (1)
+/* EXCPT Instruction May Be Lost If NMI Happens Simultaneously */
+#define ANOMALY_05000494 (1)
+/* CNT_COMMAND Functionality Depends on CNT_IMASK Configuration */
+#define ANOMALY_05000498 (1)
+/* Nand Flash Controller Hangs When the AMC Requests the Async Pins During the last 16 Bytes of a Page Write Operation. */
+#define ANOMALY_05000500 (1)
+/* RXS Bit in SPI_STAT May Become Stuck In RX DMA Modes */
+#define ANOMALY_05000501 (1)
+/* Async Memory Writes May Be Skipped When Using Odd Clock Ratios */
+#define ANOMALY_05000502 (1)
+
+/*
+ * These anomalies have been "phased" out of analog.com anomaly sheets and are
+ * here to show running on older silicon just isn't feasible.
+ */
+
+/* False Hardware Error when ISR Context Is Not Restored */
+#define ANOMALY_05000281 (__SILICON_REVISION__ < 1)
+/* SSYNCs After Writes To CAN/DMA MMR Registers Are Not Always Handled Correctly */
+#define ANOMALY_05000304 (__SILICON_REVISION__ < 1)
+/* Errors when SSYNC, CSYNC, or Loads to LT, LB and LC Registers Are Interrupted */
+#define ANOMALY_05000312 (__SILICON_REVISION__ < 1)
+/* TWI Slave Boot Mode Is Not Functional */
+#define ANOMALY_05000324 (__SILICON_REVISION__ < 1)
+/* Data Lost When Core and DMA Accesses Are Made to the USB FIFO Simultaneously */
+#define ANOMALY_05000327 (__SILICON_REVISION__ < 1)
+/* Incorrect Access of OTP_STATUS During otp_write() Function */
+#define ANOMALY_05000328 (__SILICON_REVISION__ < 1)
+/* Synchronous Burst Flash Boot Mode Is Not Functional */
+#define ANOMALY_05000329 (__SILICON_REVISION__ < 1)
+/* Host DMA Boot Modes Are Not Functional */
+#define ANOMALY_05000330 (__SILICON_REVISION__ < 1)
+/* Inadequate Timing Margins on DDR DQS to DQ and DQM Skew */
+#define ANOMALY_05000334 (__SILICON_REVISION__ < 1)
+/* Inadequate Rotary Debounce Logic Duration */
+#define ANOMALY_05000335 (__SILICON_REVISION__ < 1)
+/* Phantom Interrupt Occurs After First Configuration of Host DMA Port */
+#define ANOMALY_05000336 (__SILICON_REVISION__ < 1)
+/* Disallowed Configuration Prevents Subsequent Allowed Configuration on Host DMA Port */
+#define ANOMALY_05000337 (__SILICON_REVISION__ < 1)
+/* Slave-Mode SPI0 MISO Failure With CPHA = 0 */
+#define ANOMALY_05000338 (__SILICON_REVISION__ < 1)
+/* If Memory Reads Are Enabled on SDH or HOSTDP, Other DMAC1 Peripherals Cannot Read */
+#define ANOMALY_05000340 (__SILICON_REVISION__ < 1)
+/* Boot Host Wait (HWAIT) and Boot Host Wait Alternate (HWAITA) Signals Are Swapped */
+#define ANOMALY_05000344 (__SILICON_REVISION__ < 1)
+/* USB Calibration Value Is Not Initialized */
+#define ANOMALY_05000346 (__SILICON_REVISION__ < 1)
+/* USB Calibration Value to use */
+#define ANOMALY_05000346_value 0x5411
+/* Preboot Routine Incorrectly Alters Reset Value of USB Register */
+#define ANOMALY_05000347 (__SILICON_REVISION__ < 1)
+/* Data Lost when Core Reads SDH Data FIFO */
+#define ANOMALY_05000349 (__SILICON_REVISION__ < 1)
+/* PLL Status Register Is Inaccurate */
+#define ANOMALY_05000351 (__SILICON_REVISION__ < 1)
+/* Regulator Programming Blocked when Hibernate Wakeup Source Remains Active */
+#define ANOMALY_05000355 (__SILICON_REVISION__ < 1)
+/* System Stalled During A Core Access To AMC While A Core Access To NFC FIFO Is Required */
+#define ANOMALY_05000356 (__SILICON_REVISION__ < 1)
+/* WURESET Bit In SYSCR Register Does Not Properly Indicate Hibernate Wake-Up */
+#define ANOMALY_05000367 (__SILICON_REVISION__ < 1)
+/* Default PLL MSEL and SSEL Settings Can Cause 400MHz Product To Violate Specifications */
+#define ANOMALY_05000370 (__SILICON_REVISION__ < 1)
+/* USB DP/DM Data Pins May Lose State When Entering Hibernate */
+#define ANOMALY_05000372 (__SILICON_REVISION__ < 1)
+/* 8-Bit NAND Flash Boot Mode Not Functional */
+#define ANOMALY_05000382 (__SILICON_REVISION__ < 1)
+/* Boot from OTP Memory Not Functional */
+#define ANOMALY_05000385 (__SILICON_REVISION__ < 1)
+/* bfrom_SysControl() Firmware Routine Not Functional */
+#define ANOMALY_05000386 (__SILICON_REVISION__ < 1)
+/* Programmable Preboot Settings Not Functional */
+#define ANOMALY_05000387 (__SILICON_REVISION__ < 1)
+/* CRC32 Checksum Support Not Functional */
+#define ANOMALY_05000388 (__SILICON_REVISION__ < 1)
+/* Reset Vector Must Not Be in SDRAM Memory Space */
+#define ANOMALY_05000389 (__SILICON_REVISION__ < 1)
+/* Changed Meaning of BCODE Field in SYSCR Register */
+#define ANOMALY_05000390 (__SILICON_REVISION__ < 1)
+/* Repeated Boot from Page-Mode or Burst-Mode Flash Memory May Fail */
+#define ANOMALY_05000391 (__SILICON_REVISION__ < 1)
+/* pTempCurrent Not Present in ADI_BOOT_DATA Structure */
+#define ANOMALY_05000392 (__SILICON_REVISION__ < 1)
+/* Deprecated Value of dTempByteCount in ADI_BOOT_DATA Structure */
+#define ANOMALY_05000393 (__SILICON_REVISION__ < 1)
+/* Log Buffer Not Functional */
+#define ANOMALY_05000394 (__SILICON_REVISION__ < 1)
+/* Hook Routine Not Functional */
+#define ANOMALY_05000395 (__SILICON_REVISION__ < 1)
+/* Header Indirect Bit Not Functional */
+#define ANOMALY_05000396 (__SILICON_REVISION__ < 1)
+/* BK_ONES, BK_ZEROS, and BK_DATECODE Constants Not Functional */
+#define ANOMALY_05000397 (__SILICON_REVISION__ < 1)
+/* OTP Write Accesses Not Supported */
+#define ANOMALY_05000442 (__SILICON_REVISION__ < 1)
+/* Incorrect Default Hysteresis Setting for RESET, NMI, and BMODE Signals */
+#define ANOMALY_05000452 (__SILICON_REVISION__ < 1)
/* Anomalies that don't exist on this proc */
#define ANOMALY_05000099 (0)
diff --git a/arch/blackfin/mach-bf548/include/mach/gpio.h b/arch/blackfin/mach-bf548/include/mach/gpio.h
index 7db433514e3f..35c8ced46158 100644
--- a/arch/blackfin/mach-bf548/include/mach/gpio.h
+++ b/arch/blackfin/mach-bf548/include/mach/gpio.h
@@ -170,6 +170,8 @@
#define MAX_BLACKFIN_GPIOS 160
+#define BFIN_GPIO_PINT 1
+
#ifndef __ASSEMBLY__
struct gpio_port_t {
diff --git a/arch/blackfin/mach-bf548/include/mach/irq.h b/arch/blackfin/mach-bf548/include/mach/irq.h
index 533b8095b540..10dc142c518d 100644
--- a/arch/blackfin/mach-bf548/include/mach/irq.h
+++ b/arch/blackfin/mach-bf548/include/mach/irq.h
@@ -438,7 +438,7 @@
struct bfin_pint_regs {
u32 mask_set;
u32 mask_clear;
- u32 irq;
+ u32 request;
u32 assign;
u32 edge_set;
u32 edge_clear;
diff --git a/arch/blackfin/mach-bf561/boards/acvilon.c b/arch/blackfin/mach-bf561/boards/acvilon.c
index 9231a942892b..972e1347c6bc 100644
--- a/arch/blackfin/mach-bf561/boards/acvilon.c
+++ b/arch/blackfin/mach-bf561/boards/acvilon.c
@@ -364,14 +364,6 @@ static struct flash_platform_data bfin_spi_dataflash_data = {
/* DataFlash chip */
static struct bfin5xx_spi_chip data_flash_chip_info = {
.enable_dma = 0, /* use dma transfer with this chip */
- .bits_per_word = 8,
-};
-#endif
-
-#if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE)
-static struct bfin5xx_spi_chip spidev_chip_info = {
- .enable_dma = 0,
- .bits_per_word = 8,
};
#endif
@@ -420,7 +412,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
.max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 3,
- .controller_data = &spidev_chip_info,
},
#endif
#if defined(CONFIG_MTD_DATAFLASH) || defined(CONFIG_MTD_DATAFLASH_MODULE)
diff --git a/arch/blackfin/mach-bf561/boards/cm_bf561.c b/arch/blackfin/mach-bf561/boards/cm_bf561.c
index 87595cd38afe..e4f397d1d65b 100644
--- a/arch/blackfin/mach-bf561/boards/cm_bf561.c
+++ b/arch/blackfin/mach-bf561/boards/cm_bf561.c
@@ -60,29 +60,6 @@ static struct flash_platform_data bfin_spi_flash_data = {
/* SPI flash chip (m25p64) */
static struct bfin5xx_spi_chip spi_flash_chip_info = {
.enable_dma = 0, /* use dma transfer with this chip*/
- .bits_per_word = 8,
-};
-#endif
-
-#if defined(CONFIG_BFIN_SPI_ADC) || defined(CONFIG_BFIN_SPI_ADC_MODULE)
-/* SPI ADC chip */
-static struct bfin5xx_spi_chip spi_adc_chip_info = {
- .enable_dma = 1, /* use dma transfer with this chip*/
- .bits_per_word = 16,
-};
-#endif
-
-#if defined(CONFIG_SND_BF5XX_SOC_AD183X) || defined(CONFIG_SND_BF5XX_SOC_AD183X_MODULE)
-static struct bfin5xx_spi_chip ad1836_spi_chip_info = {
- .enable_dma = 0,
- .bits_per_word = 16,
-};
-#endif
-
-#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
-static struct bfin5xx_spi_chip mmc_spi_chip_info = {
- .enable_dma = 0,
- .bits_per_word = 8,
};
#endif
@@ -100,24 +77,12 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
},
#endif
-#if defined(CONFIG_BFIN_SPI_ADC) || defined(CONFIG_BFIN_SPI_ADC_MODULE)
- {
- .modalias = "bfin_spi_adc", /* Name of spi_driver for this device */
- .max_speed_hz = 6250000, /* max spi clock (SCK) speed in HZ */
- .bus_num = 0, /* Framework bus number */
- .chip_select = 1, /* Framework chip select. */
- .platform_data = NULL, /* No spi_driver specific config */
- .controller_data = &spi_adc_chip_info,
- },
-#endif
-
#if defined(CONFIG_SND_BF5XX_SOC_AD183X) || defined(CONFIG_SND_BF5XX_SOC_AD183X_MODULE)
{
.modalias = "ad183x",
.max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 4,
- .controller_data = &ad1836_spi_chip_info,
},
#endif
#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
@@ -126,7 +91,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
.max_speed_hz = 20000000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 1,
- .controller_data = &mmc_spi_chip_info,
.mode = SPI_MODE_3,
},
#endif
@@ -532,6 +496,24 @@ static struct platform_device *cm_bf561_devices[] __initdata = {
#endif
};
+static int __init net2272_init(void)
+{
+#if defined(CONFIG_USB_NET2272) || defined(CONFIG_USB_NET2272_MODULE)
+ int ret;
+
+ ret = gpio_request(GPIO_PF46, "net2272");
+ if (ret)
+ return ret;
+
+ /* Reset USB Chip, PF46 */
+ gpio_direction_output(GPIO_PF46, 0);
+ mdelay(2);
+ gpio_set_value(GPIO_PF46, 1);
+#endif
+
+ return 0;
+}
+
static int __init cm_bf561_init(void)
{
printk(KERN_INFO "%s(): registering device resources\n", __func__);
@@ -543,6 +525,10 @@ static int __init cm_bf561_init(void)
#if defined(CONFIG_PATA_PLATFORM) || defined(CONFIG_PATA_PLATFORM_MODULE)
irq_set_status_flags(PATA_INT, IRQ_NOAUTOEN);
#endif
+
+ if (net2272_init())
+ pr_warning("unable to configure net2272; it probably won't work\n");
+
return 0;
}
diff --git a/arch/blackfin/mach-bf561/boards/ezkit.c b/arch/blackfin/mach-bf561/boards/ezkit.c
index 5067984a62e7..9490dc800ca5 100644
--- a/arch/blackfin/mach-bf561/boards/ezkit.c
+++ b/arch/blackfin/mach-bf561/boards/ezkit.c
@@ -108,6 +108,9 @@ static struct resource net2272_bfin_resources[] = {
.end = 0x2C000000 + 0x7F,
.flags = IORESOURCE_MEM,
}, {
+ .start = 1,
+ .flags = IORESOURCE_BUS,
+ }, {
.start = IRQ_PF10,
.end = IRQ_PF10,
.flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWLEVEL,
@@ -283,21 +286,6 @@ static struct platform_device ezkit_flash_device = {
};
#endif
-#if defined(CONFIG_SND_BF5XX_SOC_AD183X) \
- || defined(CONFIG_SND_BF5XX_SOC_AD183X_MODULE)
-static struct bfin5xx_spi_chip ad1836_spi_chip_info = {
- .enable_dma = 0,
- .bits_per_word = 16,
-};
-#endif
-
-#if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE)
-static struct bfin5xx_spi_chip spidev_chip_info = {
- .enable_dma = 0,
- .bits_per_word = 8,
-};
-#endif
-
#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
/* SPI (0) */
static struct resource bfin_spi0_resource[] = {
@@ -345,7 +333,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
.bus_num = 0,
.chip_select = 4,
.platform_data = "ad1836", /* only includes chip name for the moment */
- .controller_data = &ad1836_spi_chip_info,
.mode = SPI_MODE_3,
},
#endif
@@ -355,7 +342,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
.max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 1,
- .controller_data = &spidev_chip_info,
},
#endif
};
@@ -516,6 +502,24 @@ static struct platform_device *ezkit_devices[] __initdata = {
#endif
};
+static int __init net2272_init(void)
+{
+#if defined(CONFIG_USB_NET2272) || defined(CONFIG_USB_NET2272_MODULE)
+ int ret;
+
+ ret = gpio_request(GPIO_PF11, "net2272");
+ if (ret)
+ return ret;
+
+ /* Reset the USB chip */
+ gpio_direction_output(GPIO_PF11, 0);
+ mdelay(2);
+ gpio_set_value(GPIO_PF11, 1);
+#endif
+
+ return 0;
+}
+
static int __init ezkit_init(void)
{
int ret;
@@ -542,6 +546,9 @@ static int __init ezkit_init(void)
udelay(400);
#endif
+ if (net2272_init())
+ pr_warning("unable to configure net2272; it probably won't work\n");
+
spi_register_board_info(bfin_spi_board_info, ARRAY_SIZE(bfin_spi_board_info));
return 0;
}
diff --git a/arch/blackfin/mach-bf561/include/mach/anomaly.h b/arch/blackfin/mach-bf561/include/mach/anomaly.h
index 22b5ab773027..836baeed303a 100644
--- a/arch/blackfin/mach-bf561/include/mach/anomaly.h
+++ b/arch/blackfin/mach-bf561/include/mach/anomaly.h
@@ -11,7 +11,7 @@
*/
/* This file should be up to date with:
- * - Revision R, 05/25/2010; ADSP-BF561 Blackfin Processor Anomaly List
+ * - Revision S, 05/23/2011; ADSP-BF561 Blackfin Processor Anomaly List
*/
#ifndef _MACH_ANOMALY_H_
@@ -26,62 +26,16 @@
#define ANOMALY_05000074 (1)
/* UART Line Status Register (UART_LSR) Bits Are Not Updated at the Same Time */
#define ANOMALY_05000099 (__SILICON_REVISION__ < 5)
-/* Trace Buffers May Contain Errors in Emulation Mode and/or Exception, NMI, Reset Handlers */
-#define ANOMALY_05000116 (__SILICON_REVISION__ < 3)
/* TESTSET Instructions Restricted to 32-Bit Aligned Memory Locations */
#define ANOMALY_05000120 (1)
/* Rx.H Cannot Be Used to Access 16-bit System MMR Registers */
#define ANOMALY_05000122 (1)
-/* Erroneous Exception when Enabling Cache */
-#define ANOMALY_05000125 (__SILICON_REVISION__ < 3)
/* SIGNBITS Instruction Not Functional under Certain Conditions */
#define ANOMALY_05000127 (1)
-/* Two bits in the Watchpoint Status Register (WPSTAT) are swapped */
-#define ANOMALY_05000134 (__SILICON_REVISION__ < 3)
-/* Enable wires from the Data Watchpoint Address Control Register (WPDACTL) are swapped */
-#define ANOMALY_05000135 (__SILICON_REVISION__ < 3)
-/* Stall in multi-unit DMA operations */
-#define ANOMALY_05000136 (__SILICON_REVISION__ < 3)
-/* Allowing the SPORT RX FIFO to fill will cause an overflow */
-#define ANOMALY_05000140 (__SILICON_REVISION__ < 3)
-/* Infinite Stall may occur with a particular sequence of consecutive dual dag events */
-#define ANOMALY_05000141 (__SILICON_REVISION__ < 3)
-/* Interrupts may be lost when a programmable input flag is configured to be edge sensitive */
-#define ANOMALY_05000142 (__SILICON_REVISION__ < 3)
-/* DMA and TESTSET conflict when both are accessing external memory */
-#define ANOMALY_05000144 (__SILICON_REVISION__ < 3)
-/* In PWM_OUT mode, you must enable the PPI block to generate a waveform from PPI_CLK */
-#define ANOMALY_05000145 (__SILICON_REVISION__ < 3)
-/* MDMA may lose the first few words of a descriptor chain */
-#define ANOMALY_05000146 (__SILICON_REVISION__ < 3)
-/* Source MDMA descriptor may stop with a DMA Error near beginning of descriptor fetch */
-#define ANOMALY_05000147 (__SILICON_REVISION__ < 3)
/* IMDMA S1/D1 Channel May Stall */
#define ANOMALY_05000149 (1)
-/* DMA engine may lose data due to incorrect handshaking */
-#define ANOMALY_05000150 (__SILICON_REVISION__ < 3)
-/* DMA stalls when all three controllers read data from the same source */
-#define ANOMALY_05000151 (__SILICON_REVISION__ < 3)
-/* Execution stall when executing in L2 and doing external accesses */
-#define ANOMALY_05000152 (__SILICON_REVISION__ < 3)
-/* Frame Delay in SPORT Multichannel Mode */
-#define ANOMALY_05000153 (__SILICON_REVISION__ < 3)
-/* SPORT TFS signal stays active in multichannel mode outside of valid channels */
-#define ANOMALY_05000154 (__SILICON_REVISION__ < 3)
/* Timers in PWM-Out Mode with PPI GP Receive (Input) Mode with 0 Frame Syncs */
#define ANOMALY_05000156 (__SILICON_REVISION__ < 4)
-/* Killed 32-Bit MMR Write Leads to Next System MMR Access Thinking It Should Be 32-Bit */
-#define ANOMALY_05000157 (__SILICON_REVISION__ < 3)
-/* DMA Lock-up at CCLK to SCLK ratios of 4:1, 2:1, or 1:1 */
-#define ANOMALY_05000159 (__SILICON_REVISION__ < 3)
-/* A read from external memory may return a wrong value with data cache enabled */
-#define ANOMALY_05000160 (__SILICON_REVISION__ < 3)
-/* Data Cache Fill data can be corrupted after/during Instruction DMA if certain core stalls exist */
-#define ANOMALY_05000161 (__SILICON_REVISION__ < 3)
-/* DMEM_CONTROL<12> is not set on Reset */
-#define ANOMALY_05000162 (__SILICON_REVISION__ < 3)
-/* SPORT Transmit Data Is Not Gated by External Frame Sync in Certain Conditions */
-#define ANOMALY_05000163 (__SILICON_REVISION__ < 3)
/* PPI Data Lengths between 8 and 16 Do Not Zero Out Upper Bits */
#define ANOMALY_05000166 (1)
/* Turning SPORTs on while External Frame Sync Is Active May Corrupt Data */
@@ -92,10 +46,6 @@
#define ANOMALY_05000169 (__SILICON_REVISION__ < 5)
/* Boot-ROM Modifies SICA_IWRx Wakeup Registers */
#define ANOMALY_05000171 (__SILICON_REVISION__ < 5)
-/* DSPID register values incorrect */
-#define ANOMALY_05000172 (__SILICON_REVISION__ < 3)
-/* DMA vs Core accesses to external memory */
-#define ANOMALY_05000173 (__SILICON_REVISION__ < 3)
/* Cache Fill Buffer Data lost */
#define ANOMALY_05000174 (__SILICON_REVISION__ < 5)
/* Overlapping Sequencer and Memory Stalls */
@@ -124,8 +74,6 @@
#define ANOMALY_05000189 (__SILICON_REVISION__ < 5)
/* PPI Not Functional at Core Voltage < 1Volt */
#define ANOMALY_05000190 (1)
-/* PPI does not invert the Driving PPICLK edge in Transmit Modes */
-#define ANOMALY_05000191 (__SILICON_REVISION__ < 3)
/* False I/O Pin Interrupts on Edge-Sensitive Inputs When Polarity Setting Is Changed */
#define ANOMALY_05000193 (__SILICON_REVISION__ < 5)
/* Restarting SPORT in Specific Modes May Cause Data Corruption */
@@ -217,10 +165,10 @@
/* Timing Requirements Change for External Frame Sync PPI Modes with Non-Zero PPI_DELAY */
#define ANOMALY_05000276 (__SILICON_REVISION__ < 5)
/* Writes to an I/O Data Register One SCLK Cycle after an Edge Is Detected May Clear Interrupt */
-#define ANOMALY_05000277 (__SILICON_REVISION__ < 3)
+#define ANOMALY_05000277 (__SILICON_REVISION__ < 5)
/* Disabling Peripherals with DMA Running May Cause DMA System Instability */
#define ANOMALY_05000278 (__SILICON_REVISION__ < 5)
-/* False Hardware Error Exception when ISR Context Is Not Restored */
+/* False Hardware Error when ISR Context Is Not Restored */
/* Temporarily walk around for bug 5423 till this issue is confirmed by
* official anomaly document. It looks 05000281 still exists on bf561
* v0.5.
@@ -274,8 +222,6 @@
#define ANOMALY_05000366 (1)
/* Possible RETS Register Corruption when Subroutine Is under 5 Cycles in Duration */
#define ANOMALY_05000371 (1)
-/* SSYNC Stalls Processor when Executed from Non-Cacheable Memory */
-#define ANOMALY_05000402 (__SILICON_REVISION__ == 4)
/* Level-Sensitive External GPIO Wakeups May Cause Indefinite Stall */
#define ANOMALY_05000403 (1)
/* TESTSET Instruction Causes Data Corruption with Writeback Data Cache Enabled */
@@ -298,16 +244,82 @@
#define ANOMALY_05000462 (1)
/* Boot Failure When SDRAM Control Signals Toggle Coming Out Of Reset */
#define ANOMALY_05000471 (1)
-/* Interrupted 32-Bit SPORT Data Register Access Results In Underflow */
+/* Interrupted SPORT Receive Data Register Read Results In Underflow when SLEN > 15 */
#define ANOMALY_05000473 (1)
-/* Possible Lockup Condition whem Modifying PLL from External Memory */
+/* Possible Lockup Condition when Modifying PLL from External Memory */
#define ANOMALY_05000475 (1)
/* TESTSET Instruction Cannot Be Interrupted */
#define ANOMALY_05000477 (1)
/* Reads of ITEST_COMMAND and ITEST_DATA Registers Cause Cache Corruption */
#define ANOMALY_05000481 (1)
-/* IFLUSH sucks at life */
+/* PLL May Latch Incorrect Values Coming Out of Reset */
+#define ANOMALY_05000489 (1)
+/* Instruction Memory Stalls Can Cause IFLUSH to Fail */
#define ANOMALY_05000491 (1)
+/* EXCPT Instruction May Be Lost If NMI Happens Simultaneously */
+#define ANOMALY_05000494 (1)
+/* RXS Bit in SPI_STAT May Become Stuck In RX DMA Modes */
+#define ANOMALY_05000501 (1)
+
+/*
+ * These anomalies have been "phased" out of analog.com anomaly sheets and are
+ * here to show running on older silicon just isn't feasible.
+ */
+
+/* Trace Buffers May Contain Errors in Emulation Mode and/or Exception, NMI, Reset Handlers */
+#define ANOMALY_05000116 (__SILICON_REVISION__ < 3)
+/* Erroneous Exception when Enabling Cache */
+#define ANOMALY_05000125 (__SILICON_REVISION__ < 3)
+/* Two bits in the Watchpoint Status Register (WPSTAT) are swapped */
+#define ANOMALY_05000134 (__SILICON_REVISION__ < 3)
+/* Enable wires from the Data Watchpoint Address Control Register (WPDACTL) are swapped */
+#define ANOMALY_05000135 (__SILICON_REVISION__ < 3)
+/* Stall in multi-unit DMA operations */
+#define ANOMALY_05000136 (__SILICON_REVISION__ < 3)
+/* Allowing the SPORT RX FIFO to fill will cause an overflow */
+#define ANOMALY_05000140 (__SILICON_REVISION__ < 3)
+/* Infinite Stall may occur with a particular sequence of consecutive dual dag events */
+#define ANOMALY_05000141 (__SILICON_REVISION__ < 3)
+/* Interrupts may be lost when a programmable input flag is configured to be edge sensitive */
+#define ANOMALY_05000142 (__SILICON_REVISION__ < 3)
+/* DMA and TESTSET conflict when both are accessing external memory */
+#define ANOMALY_05000144 (__SILICON_REVISION__ < 3)
+/* In PWM_OUT mode, you must enable the PPI block to generate a waveform from PPI_CLK */
+#define ANOMALY_05000145 (__SILICON_REVISION__ < 3)
+/* MDMA may lose the first few words of a descriptor chain */
+#define ANOMALY_05000146 (__SILICON_REVISION__ < 3)
+/* Source MDMA descriptor may stop with a DMA Error near beginning of descriptor fetch */
+#define ANOMALY_05000147 (__SILICON_REVISION__ < 3)
+/* DMA engine may lose data due to incorrect handshaking */
+#define ANOMALY_05000150 (__SILICON_REVISION__ < 3)
+/* DMA stalls when all three controllers read data from the same source */
+#define ANOMALY_05000151 (__SILICON_REVISION__ < 3)
+/* Execution stall when executing in L2 and doing external accesses */
+#define ANOMALY_05000152 (__SILICON_REVISION__ < 3)
+/* Frame Delay in SPORT Multichannel Mode */
+#define ANOMALY_05000153 (__SILICON_REVISION__ < 3)
+/* SPORT TFS signal stays active in multichannel mode outside of valid channels */
+#define ANOMALY_05000154 (__SILICON_REVISION__ < 3)
+/* Killed 32-Bit MMR Write Leads to Next System MMR Access Thinking It Should Be 32-Bit */
+#define ANOMALY_05000157 (__SILICON_REVISION__ < 3)
+/* DMA Lock-up at CCLK to SCLK ratios of 4:1, 2:1, or 1:1 */
+#define ANOMALY_05000159 (__SILICON_REVISION__ < 3)
+/* A read from external memory may return a wrong value with data cache enabled */
+#define ANOMALY_05000160 (__SILICON_REVISION__ < 3)
+/* Data Cache Fill data can be corrupted after/during Instruction DMA if certain core stalls exist */
+#define ANOMALY_05000161 (__SILICON_REVISION__ < 3)
+/* DMEM_CONTROL<12> is not set on Reset */
+#define ANOMALY_05000162 (__SILICON_REVISION__ < 3)
+/* SPORT Transmit Data Is Not Gated by External Frame Sync in Certain Conditions */
+#define ANOMALY_05000163 (__SILICON_REVISION__ < 3)
+/* DSPID register values incorrect */
+#define ANOMALY_05000172 (__SILICON_REVISION__ < 3)
+/* DMA vs Core accesses to external memory */
+#define ANOMALY_05000173 (__SILICON_REVISION__ < 3)
+/* PPI does not invert the Driving PPICLK edge in Transmit Modes */
+#define ANOMALY_05000191 (__SILICON_REVISION__ < 3)
+/* SSYNC Stalls Processor when Executed from Non-Cacheable Memory */
+#define ANOMALY_05000402 (__SILICON_REVISION__ == 4)
/* Anomalies that don't exist on this proc */
#define ANOMALY_05000119 (0)
diff --git a/arch/blackfin/mach-bf561/include/mach/gpio.h b/arch/blackfin/mach-bf561/include/mach/gpio.h
index 57d5eab59faf..f9f8b2adf4ba 100644
--- a/arch/blackfin/mach-bf561/include/mach/gpio.h
+++ b/arch/blackfin/mach-bf561/include/mach/gpio.h
@@ -58,9 +58,9 @@
#define GPIO_PF46 46
#define GPIO_PF47 47
-#define PORT_FIO0 GPIO_0
-#define PORT_FIO1 GPIO_16
-#define PORT_FIO2 GPIO_32
+#define PORT_FIO0 GPIO_PF0
+#define PORT_FIO1 GPIO_PF16
+#define PORT_FIO2 GPIO_PF32
#include <mach-common/ports-f.h>
diff --git a/arch/blackfin/mach-bf561/secondary.S b/arch/blackfin/mach-bf561/secondary.S
index 4c462838f4e1..01e5408620ac 100644
--- a/arch/blackfin/mach-bf561/secondary.S
+++ b/arch/blackfin/mach-bf561/secondary.S
@@ -23,108 +23,78 @@
#define INITIAL_STACK (COREB_L1_SCRATCH_START + L1_SCRATCH_LENGTH - 12)
ENTRY(_coreb_trampoline_start)
- /* Set the SYSCFG register */
- R0 = 0x36;
- SYSCFG = R0; /*Enable Cycle Counter and Nesting Of Interrupts(3rd Bit)*/
- R0 = 0;
-
- /*Clear Out All the data and pointer Registers*/
- R1 = R0;
- R2 = R0;
- R3 = R0;
- R4 = R0;
- R5 = R0;
- R6 = R0;
- R7 = R0;
-
- P0 = R0;
- P1 = R0;
- P2 = R0;
- P3 = R0;
- P4 = R0;
- P5 = R0;
-
- LC0 = r0;
- LC1 = r0;
- L0 = r0;
- L1 = r0;
- L2 = r0;
- L3 = r0;
-
- /* Clear Out All the DAG Registers*/
- B0 = r0;
- B1 = r0;
- B2 = r0;
- B3 = r0;
-
- I0 = r0;
- I1 = r0;
- I2 = r0;
- I3 = r0;
-
- M0 = r0;
- M1 = r0;
- M2 = r0;
- M3 = r0;
+ /* Enable Cycle Counter and Nesting Of Interrupts */
+#ifdef CONFIG_BFIN_SCRATCH_REG_CYCLES
+ R0 = SYSCFG_SNEN;
+#else
+ R0 = SYSCFG_SNEN | SYSCFG_CCEN;
+#endif
+ SYSCFG = R0;
- trace_buffer_init(p0,r0);
+ /* Optimization register tricks: keep a base value in the
+ * reserved P registers so we use the load/store with an
+ * offset syntax. R0 = [P5 + <constant>];
+ * P5 - core MMR base
+ * R6 - 0
+ */
+ r6 = 0;
+ p5.l = 0;
+ p5.h = hi(COREMMR_BASE);
- /* Turn off the icache */
- p0.l = LO(IMEM_CONTROL);
- p0.h = HI(IMEM_CONTROL);
- R1 = [p0];
- R0 = ~ENICPLB;
- R0 = R0 & R1;
+ /* Zero out registers required by Blackfin ABI */
- /* Disabling of CPLBs should be proceeded by a CSYNC */
+ /* Disable circular buffers */
+ L0 = r6;
+ L1 = r6;
+ L2 = r6;
+ L3 = r6;
+
+ /* Disable hardware loops in case we were started by 'go' */
+ LC0 = r6;
+ LC1 = r6;
+
+ /*
+ * Clear ITEST_COMMAND and DTEST_COMMAND registers,
+ * Leaving these as non-zero can confuse the emulator
+ */
+ [p5 + (DTEST_COMMAND - COREMMR_BASE)] = r6;
+ [p5 + (ITEST_COMMAND - COREMMR_BASE)] = r6;
CSYNC;
- [p0] = R0;
+
+ trace_buffer_init(p0,r0);
+
+ /* Turn off the icache */
+ r1 = [p5 + (IMEM_CONTROL - COREMMR_BASE)];
+ BITCLR (r1, ENICPLB_P);
+ [p5 + (IMEM_CONTROL - COREMMR_BASE)] = r1;
SSYNC;
/* Turn off the dcache */
- p0.l = LO(DMEM_CONTROL);
- p0.h = HI(DMEM_CONTROL);
- R1 = [p0];
- R0 = ~ENDCPLB;
- R0 = R0 & R1;
-
- /* Disabling of CPLBs should be proceeded by a CSYNC */
- CSYNC;
- [p0] = R0;
+ r1 = [p5 + (DMEM_CONTROL - COREMMR_BASE)];
+ BITCLR (r1, ENDCPLB_P);
+ [p5 + (DMEM_CONTROL - COREMMR_BASE)] = r1;
SSYNC;
/* in case of double faults, save a few things */
- p0.l = _init_retx_coreb;
- p0.h = _init_retx_coreb;
- R0 = RETX;
- [P0] = R0;
-
+ p1.l = _initial_pda_coreb;
+ p1.h = _initial_pda_coreb;
+ r4 = RETX;
#ifdef CONFIG_DEBUG_DOUBLEFAULT
/* Only save these if we are storing them,
* This happens here, since L1 gets clobbered
* below
*/
GET_PDA(p0, r0);
- r7 = [p0 + PDA_DF_RETX];
- p1.l = _init_saved_retx_coreb;
- p1.h = _init_saved_retx_coreb;
- [p1] = r7;
-
- r7 = [p0 + PDA_DF_DCPLB];
- p1.l = _init_saved_dcplb_fault_addr_coreb;
- p1.h = _init_saved_dcplb_fault_addr_coreb;
- [p1] = r7;
-
- r7 = [p0 + PDA_DF_ICPLB];
- p1.l = _init_saved_icplb_fault_addr_coreb;
- p1.h = _init_saved_icplb_fault_addr_coreb;
- [p1] = r7;
-
- r7 = [p0 + PDA_DF_SEQSTAT];
- p1.l = _init_saved_seqstat_coreb;
- p1.h = _init_saved_seqstat_coreb;
- [p1] = r7;
+ r0 = [p0 + PDA_DF_RETX];
+ r1 = [p0 + PDA_DF_DCPLB];
+ r2 = [p0 + PDA_DF_ICPLB];
+ r3 = [p0 + PDA_DF_SEQSTAT];
+ [p1 + PDA_INIT_DF_RETX] = r0;
+ [p1 + PDA_INIT_DF_DCPLB] = r1;
+ [p1 + PDA_INIT_DF_ICPLB] = r2;
+ [p1 + PDA_INIT_DF_SEQSTAT] = r3;
#endif
+ [p1 + PDA_INIT_RETX] = r4;
/* Initialize stack pointer */
sp.l = lo(INITIAL_STACK);
@@ -138,19 +108,13 @@ ENTRY(_coreb_trampoline_start)
/* EVT15 = _real_start */
- p0.l = lo(EVT15);
- p0.h = hi(EVT15);
p1.l = _coreb_start;
p1.h = _coreb_start;
- [p0] = p1;
+ [p5 + (EVT15 - COREMMR_BASE)] = p1;
csync;
- p0.l = lo(IMASK);
- p0.h = hi(IMASK);
- p1.l = IMASK_IVG15;
- p1.h = 0x0;
- [p0] = p1;
- csync;
+ r0 = EVT_IVG15 (z);
+ sti r0;
raise 15;
p0.l = .LWAIT_HERE;
diff --git a/arch/blackfin/mach-common/dpmc_modes.S b/arch/blackfin/mach-common/dpmc_modes.S
index 9cfdd49a3127..1c534d298de4 100644
--- a/arch/blackfin/mach-common/dpmc_modes.S
+++ b/arch/blackfin/mach-common/dpmc_modes.S
@@ -12,8 +12,8 @@
.section .l1.text
ENTRY(_sleep_mode)
- [--SP] = ( R7:0, P5:0 );
- [--SP] = RETS;
+ [--SP] = (R7:4, P5:3);
+ [--SP] = RETS;
call _set_sic_iwr;
@@ -46,15 +46,25 @@ ENTRY(_sleep_mode)
call _test_pll_locked;
RETS = [SP++];
- ( R7:0, P5:0 ) = [SP++];
+ (R7:4, P5:3) = [SP++];
RTS;
ENDPROC(_sleep_mode)
+/*
+ * This func never returns as it puts the part into hibernate, and
+ * is only called from do_hibernate, so we don't bother saving or
+ * restoring any of the normal C runtime state. When we wake up,
+ * the entry point will be in do_hibernate and not here.
+ *
+ * We accept just one argument -- the value to write to VR_CTL.
+ */
ENTRY(_hibernate_mode)
- [--SP] = ( R7:0, P5:0 );
- [--SP] = RETS;
+ /* Save/setup the regs we need early for minor pipeline optimization */
+ R4 = R0;
+ P3.H = hi(VR_CTL);
+ P3.L = lo(VR_CTL);
- R3 = R0;
+ /* Disable all wakeup sources */
R0 = IWR_DISABLE_ALL;
R1 = IWR_DISABLE_ALL;
R2 = IWR_DISABLE_ALL;
@@ -62,10 +72,8 @@ ENTRY(_hibernate_mode)
call _set_dram_srfs;
SSYNC;
- P0.H = hi(VR_CTL);
- P0.L = lo(VR_CTL);
-
- W[P0] = R3.L;
+ /* Finally, we climb into our cave to hibernate */
+ W[P3] = R4.L;
CLI R2;
IDLE;
.Lforever:
@@ -73,8 +81,8 @@ ENTRY(_hibernate_mode)
ENDPROC(_hibernate_mode)
ENTRY(_sleep_deeper)
- [--SP] = ( R7:0, P5:0 );
- [--SP] = RETS;
+ [--SP] = (R7:4, P5:3);
+ [--SP] = RETS;
CLI R4;
@@ -167,7 +175,7 @@ ENTRY(_sleep_deeper)
STI R4;
RETS = [SP++];
- ( R7:0, P5:0 ) = [SP++];
+ (R7:4, P5:3) = [SP++];
RTS;
ENDPROC(_sleep_deeper)
@@ -188,21 +196,20 @@ ENTRY(_set_dram_srfs)
#else /* SDRAM */
P0.L = lo(EBIU_SDGCTL);
P0.H = hi(EBIU_SDGCTL);
+ P1.L = lo(EBIU_SDSTAT);
+ P1.H = hi(EBIU_SDSTAT);
+
R2 = [P0];
BITSET(R2, 24); /* SRFS enter self-refresh mode */
[P0] = R2;
SSYNC;
- P0.L = lo(EBIU_SDSTAT);
- P0.H = hi(EBIU_SDSTAT);
1:
- R2 = w[P0];
+ R2 = w[P1];
SSYNC;
cc = BITTST(R2, 1); /* SDSRA poll self-refresh status */
if !cc jump 1b;
- P0.L = lo(EBIU_SDGCTL);
- P0.H = hi(EBIU_SDGCTL);
R2 = [P0];
BITCLR(R2, 0); /* SCTLE disable CLKOUT */
[P0] = R2;
@@ -212,6 +219,7 @@ ENDPROC(_set_dram_srfs)
ENTRY(_unset_dram_srfs)
/* set the dram out of self refresh mode */
+
#if defined(EBIU_RSTCTL) /* DDR */
P0.H = hi(EBIU_RSTCTL);
P0.L = lo(EBIU_RSTCTL);
@@ -219,42 +227,39 @@ ENTRY(_unset_dram_srfs)
BITCLR(R2, 3); /* clear SRREQ bit */
[P0] = R2;
#elif defined(EBIU_SDGCTL) /* SDRAM */
-
- P0.L = lo(EBIU_SDGCTL); /* release CLKOUT from self-refresh */
+ /* release CLKOUT from self-refresh */
+ P0.L = lo(EBIU_SDGCTL);
P0.H = hi(EBIU_SDGCTL);
+
R2 = [P0];
BITSET(R2, 0); /* SCTLE enable CLKOUT */
[P0] = R2
SSYNC;
- P0.L = lo(EBIU_SDGCTL); /* release SDRAM from self-refresh */
- P0.H = hi(EBIU_SDGCTL);
+ /* release SDRAM from self-refresh */
R2 = [P0];
BITCLR(R2, 24); /* clear SRFS bit */
[P0] = R2
#endif
+
SSYNC;
RTS;
ENDPROC(_unset_dram_srfs)
ENTRY(_set_sic_iwr)
-#if defined(CONFIG_BF54x) || defined(CONFIG_BF52x) || defined(CONFIG_BF561) || \
- defined(CONFIG_BF538) || defined(CONFIG_BF539) || defined(CONFIG_BF51x)
- P0.H = hi(SIC_IWR0);
- P0.L = lo(SIC_IWR0);
- P1.H = hi(SIC_IWR1);
- P1.L = lo(SIC_IWR1);
- [P1] = R1;
-#if defined(CONFIG_BF54x)
- P1.H = hi(SIC_IWR2);
- P1.L = lo(SIC_IWR2);
- [P1] = R2;
-#endif
+#ifdef SIC_IWR0
+ P0.H = hi(SYSMMR_BASE);
+ P0.L = lo(SYSMMR_BASE);
+ [P0 + (SIC_IWR0 - SYSMMR_BASE)] = R0;
+ [P0 + (SIC_IWR1 - SYSMMR_BASE)] = R1;
+# ifdef SIC_IWR2
+ [P0 + (SIC_IWR2 - SYSMMR_BASE)] = R2;
+# endif
#else
P0.H = hi(SIC_IWR);
P0.L = lo(SIC_IWR);
-#endif
[P0] = R0;
+#endif
SSYNC;
RTS;
@@ -272,206 +277,55 @@ ENDPROC(_test_pll_locked)
.section .text
-ENTRY(_do_hibernate)
- [--SP] = ( R7:0, P5:0 );
- [--SP] = RETS;
- /* Save System MMRs */
- R2 = R0;
- P0.H = hi(PLL_CTL);
- P0.L = lo(PLL_CTL);
-
-#ifdef SIC_IMASK0
- PM_SYS_PUSH(SIC_IMASK0)
-#endif
-#ifdef SIC_IMASK1
- PM_SYS_PUSH(SIC_IMASK1)
-#endif
-#ifdef SIC_IMASK2
- PM_SYS_PUSH(SIC_IMASK2)
-#endif
-#ifdef SIC_IMASK
- PM_SYS_PUSH(SIC_IMASK)
-#endif
-#ifdef SIC_IAR0
- PM_SYS_PUSH(SIC_IAR0)
- PM_SYS_PUSH(SIC_IAR1)
- PM_SYS_PUSH(SIC_IAR2)
-#endif
-#ifdef SIC_IAR3
- PM_SYS_PUSH(SIC_IAR3)
-#endif
-#ifdef SIC_IAR4
- PM_SYS_PUSH(SIC_IAR4)
- PM_SYS_PUSH(SIC_IAR5)
- PM_SYS_PUSH(SIC_IAR6)
-#endif
-#ifdef SIC_IAR7
- PM_SYS_PUSH(SIC_IAR7)
-#endif
-#ifdef SIC_IAR8
- PM_SYS_PUSH(SIC_IAR8)
- PM_SYS_PUSH(SIC_IAR9)
- PM_SYS_PUSH(SIC_IAR10)
- PM_SYS_PUSH(SIC_IAR11)
-#endif
+#define PM_REG0 R7
+#define PM_REG1 R6
+#define PM_REG2 R5
+#define PM_REG3 R4
+#define PM_REG4 R3
+#define PM_REG5 R2
+#define PM_REG6 R1
+#define PM_REG7 R0
+#define PM_REG8 P5
+#define PM_REG9 P4
+#define PM_REG10 P3
+#define PM_REG11 P2
+#define PM_REG12 P1
+#define PM_REG13 P0
+
+#define PM_REGSET0 R7:7
+#define PM_REGSET1 R7:6
+#define PM_REGSET2 R7:5
+#define PM_REGSET3 R7:4
+#define PM_REGSET4 R7:3
+#define PM_REGSET5 R7:2
+#define PM_REGSET6 R7:1
+#define PM_REGSET7 R7:0
+#define PM_REGSET8 R7:0, P5:5
+#define PM_REGSET9 R7:0, P5:4
+#define PM_REGSET10 R7:0, P5:3
+#define PM_REGSET11 R7:0, P5:2
+#define PM_REGSET12 R7:0, P5:1
+#define PM_REGSET13 R7:0, P5:0
+
+#define _PM_PUSH(n, x, w, base) PM_REG##n = w[FP + ((x) - (base))];
+#define _PM_POP(n, x, w, base) w[FP + ((x) - (base))] = PM_REG##n;
+#define PM_PUSH_SYNC(n) [--sp] = (PM_REGSET##n);
+#define PM_POP_SYNC(n) (PM_REGSET##n) = [sp++];
+#define PM_PUSH(n, x) PM_REG##n = [FP++];
+#define PM_POP(n, x) [FP--] = PM_REG##n;
+#define PM_CORE_PUSH(n, x) _PM_PUSH(n, x, , COREMMR_BASE)
+#define PM_CORE_POP(n, x) _PM_POP(n, x, , COREMMR_BASE)
+#define PM_SYS_PUSH(n, x) _PM_PUSH(n, x, , SYSMMR_BASE)
+#define PM_SYS_POP(n, x) _PM_POP(n, x, , SYSMMR_BASE)
+#define PM_SYS_PUSH16(n, x) _PM_PUSH(n, x, w, SYSMMR_BASE)
+#define PM_SYS_POP16(n, x) _PM_POP(n, x, w, SYSMMR_BASE)
-#ifdef SIC_IWR
- PM_SYS_PUSH(SIC_IWR)
-#endif
-#ifdef SIC_IWR0
- PM_SYS_PUSH(SIC_IWR0)
-#endif
-#ifdef SIC_IWR1
- PM_SYS_PUSH(SIC_IWR1)
-#endif
-#ifdef SIC_IWR2
- PM_SYS_PUSH(SIC_IWR2)
-#endif
-
-#ifdef PINT0_ASSIGN
- PM_SYS_PUSH(PINT0_MASK_SET)
- PM_SYS_PUSH(PINT1_MASK_SET)
- PM_SYS_PUSH(PINT2_MASK_SET)
- PM_SYS_PUSH(PINT3_MASK_SET)
- PM_SYS_PUSH(PINT0_ASSIGN)
- PM_SYS_PUSH(PINT1_ASSIGN)
- PM_SYS_PUSH(PINT2_ASSIGN)
- PM_SYS_PUSH(PINT3_ASSIGN)
- PM_SYS_PUSH(PINT0_INVERT_SET)
- PM_SYS_PUSH(PINT1_INVERT_SET)
- PM_SYS_PUSH(PINT2_INVERT_SET)
- PM_SYS_PUSH(PINT3_INVERT_SET)
- PM_SYS_PUSH(PINT0_EDGE_SET)
- PM_SYS_PUSH(PINT1_EDGE_SET)
- PM_SYS_PUSH(PINT2_EDGE_SET)
- PM_SYS_PUSH(PINT3_EDGE_SET)
-#endif
-
- PM_SYS_PUSH(EBIU_AMBCTL0)
- PM_SYS_PUSH(EBIU_AMBCTL1)
- PM_SYS_PUSH16(EBIU_AMGCTL)
-
-#ifdef EBIU_FCTL
- PM_SYS_PUSH(EBIU_MBSCTL)
- PM_SYS_PUSH(EBIU_MODE)
- PM_SYS_PUSH(EBIU_FCTL)
-#endif
-
-#ifdef PORTCIO_FER
- PM_SYS_PUSH16(PORTCIO_DIR)
- PM_SYS_PUSH16(PORTCIO_INEN)
- PM_SYS_PUSH16(PORTCIO)
- PM_SYS_PUSH16(PORTCIO_FER)
- PM_SYS_PUSH16(PORTDIO_DIR)
- PM_SYS_PUSH16(PORTDIO_INEN)
- PM_SYS_PUSH16(PORTDIO)
- PM_SYS_PUSH16(PORTDIO_FER)
- PM_SYS_PUSH16(PORTEIO_DIR)
- PM_SYS_PUSH16(PORTEIO_INEN)
- PM_SYS_PUSH16(PORTEIO)
- PM_SYS_PUSH16(PORTEIO_FER)
-#endif
-
- PM_SYS_PUSH16(SYSCR)
-
- /* Save Core MMRs */
- P0.H = hi(SRAM_BASE_ADDRESS);
- P0.L = lo(SRAM_BASE_ADDRESS);
-
- PM_PUSH(DMEM_CONTROL)
- PM_PUSH(DCPLB_ADDR0)
- PM_PUSH(DCPLB_ADDR1)
- PM_PUSH(DCPLB_ADDR2)
- PM_PUSH(DCPLB_ADDR3)
- PM_PUSH(DCPLB_ADDR4)
- PM_PUSH(DCPLB_ADDR5)
- PM_PUSH(DCPLB_ADDR6)
- PM_PUSH(DCPLB_ADDR7)
- PM_PUSH(DCPLB_ADDR8)
- PM_PUSH(DCPLB_ADDR9)
- PM_PUSH(DCPLB_ADDR10)
- PM_PUSH(DCPLB_ADDR11)
- PM_PUSH(DCPLB_ADDR12)
- PM_PUSH(DCPLB_ADDR13)
- PM_PUSH(DCPLB_ADDR14)
- PM_PUSH(DCPLB_ADDR15)
- PM_PUSH(DCPLB_DATA0)
- PM_PUSH(DCPLB_DATA1)
- PM_PUSH(DCPLB_DATA2)
- PM_PUSH(DCPLB_DATA3)
- PM_PUSH(DCPLB_DATA4)
- PM_PUSH(DCPLB_DATA5)
- PM_PUSH(DCPLB_DATA6)
- PM_PUSH(DCPLB_DATA7)
- PM_PUSH(DCPLB_DATA8)
- PM_PUSH(DCPLB_DATA9)
- PM_PUSH(DCPLB_DATA10)
- PM_PUSH(DCPLB_DATA11)
- PM_PUSH(DCPLB_DATA12)
- PM_PUSH(DCPLB_DATA13)
- PM_PUSH(DCPLB_DATA14)
- PM_PUSH(DCPLB_DATA15)
- PM_PUSH(IMEM_CONTROL)
- PM_PUSH(ICPLB_ADDR0)
- PM_PUSH(ICPLB_ADDR1)
- PM_PUSH(ICPLB_ADDR2)
- PM_PUSH(ICPLB_ADDR3)
- PM_PUSH(ICPLB_ADDR4)
- PM_PUSH(ICPLB_ADDR5)
- PM_PUSH(ICPLB_ADDR6)
- PM_PUSH(ICPLB_ADDR7)
- PM_PUSH(ICPLB_ADDR8)
- PM_PUSH(ICPLB_ADDR9)
- PM_PUSH(ICPLB_ADDR10)
- PM_PUSH(ICPLB_ADDR11)
- PM_PUSH(ICPLB_ADDR12)
- PM_PUSH(ICPLB_ADDR13)
- PM_PUSH(ICPLB_ADDR14)
- PM_PUSH(ICPLB_ADDR15)
- PM_PUSH(ICPLB_DATA0)
- PM_PUSH(ICPLB_DATA1)
- PM_PUSH(ICPLB_DATA2)
- PM_PUSH(ICPLB_DATA3)
- PM_PUSH(ICPLB_DATA4)
- PM_PUSH(ICPLB_DATA5)
- PM_PUSH(ICPLB_DATA6)
- PM_PUSH(ICPLB_DATA7)
- PM_PUSH(ICPLB_DATA8)
- PM_PUSH(ICPLB_DATA9)
- PM_PUSH(ICPLB_DATA10)
- PM_PUSH(ICPLB_DATA11)
- PM_PUSH(ICPLB_DATA12)
- PM_PUSH(ICPLB_DATA13)
- PM_PUSH(ICPLB_DATA14)
- PM_PUSH(ICPLB_DATA15)
- PM_PUSH(EVT0)
- PM_PUSH(EVT1)
- PM_PUSH(EVT2)
- PM_PUSH(EVT3)
- PM_PUSH(EVT4)
- PM_PUSH(EVT5)
- PM_PUSH(EVT6)
- PM_PUSH(EVT7)
- PM_PUSH(EVT8)
- PM_PUSH(EVT9)
- PM_PUSH(EVT10)
- PM_PUSH(EVT11)
- PM_PUSH(EVT12)
- PM_PUSH(EVT13)
- PM_PUSH(EVT14)
- PM_PUSH(EVT15)
- PM_PUSH(IMASK)
- PM_PUSH(ILAT)
- PM_PUSH(IPRIO)
- PM_PUSH(TCNTL)
- PM_PUSH(TPERIOD)
- PM_PUSH(TSCALE)
- PM_PUSH(TCOUNT)
- PM_PUSH(TBUFCTL)
-
- /* Save Core Registers */
- [--sp] = SYSCFG;
- [--sp] = ( R7:0, P5:0 );
+ENTRY(_do_hibernate)
+ /*
+ * Save the core regs early so we can blow them away when
+ * saving/restoring MMR states
+ */
+ [--sp] = (R7:0, P5:0);
[--sp] = fp;
[--sp] = usp;
@@ -506,47 +360,497 @@ ENTRY(_do_hibernate)
[--sp] = LB0;
[--sp] = LB1;
+ /* We can't push RETI directly as that'll change IPEND[4] */
+ r7 = RETI;
+ [--sp] = RETS;
[--sp] = ASTAT;
[--sp] = CYCLES;
[--sp] = CYCLES2;
-
- [--sp] = RETS;
- r0 = RETI;
- [--sp] = r0;
+ [--sp] = SYSCFG;
[--sp] = RETX;
- [--sp] = RETN;
- [--sp] = RETE;
[--sp] = SEQSTAT;
+ [--sp] = r7;
+
+ /* Save first func arg in M3 */
+ M3 = R0;
+
+ /* Save system MMRs */
+ FP.H = hi(SYSMMR_BASE);
+ FP.L = lo(SYSMMR_BASE);
+
+#ifdef SIC_IMASK0
+ PM_SYS_PUSH(0, SIC_IMASK0)
+ PM_SYS_PUSH(1, SIC_IMASK1)
+# ifdef SIC_IMASK2
+ PM_SYS_PUSH(2, SIC_IMASK2)
+# endif
+#else
+ PM_SYS_PUSH(0, SIC_IMASK)
+#endif
+#ifdef SIC_IAR0
+ PM_SYS_PUSH(3, SIC_IAR0)
+ PM_SYS_PUSH(4, SIC_IAR1)
+ PM_SYS_PUSH(5, SIC_IAR2)
+#endif
+#ifdef SIC_IAR3
+ PM_SYS_PUSH(6, SIC_IAR3)
+#endif
+#ifdef SIC_IAR4
+ PM_SYS_PUSH(7, SIC_IAR4)
+ PM_SYS_PUSH(8, SIC_IAR5)
+ PM_SYS_PUSH(9, SIC_IAR6)
+#endif
+#ifdef SIC_IAR7
+ PM_SYS_PUSH(10, SIC_IAR7)
+#endif
+#ifdef SIC_IAR8
+ PM_SYS_PUSH(11, SIC_IAR8)
+ PM_SYS_PUSH(12, SIC_IAR9)
+ PM_SYS_PUSH(13, SIC_IAR10)
+#endif
+ PM_PUSH_SYNC(13)
+#ifdef SIC_IAR11
+ PM_SYS_PUSH(0, SIC_IAR11)
+#endif
+
+#ifdef SIC_IWR
+ PM_SYS_PUSH(1, SIC_IWR)
+#endif
+#ifdef SIC_IWR0
+ PM_SYS_PUSH(1, SIC_IWR0)
+#endif
+#ifdef SIC_IWR1
+ PM_SYS_PUSH(2, SIC_IWR1)
+#endif
+#ifdef SIC_IWR2
+ PM_SYS_PUSH(3, SIC_IWR2)
+#endif
+
+#ifdef PINT0_ASSIGN
+ PM_SYS_PUSH(4, PINT0_MASK_SET)
+ PM_SYS_PUSH(5, PINT1_MASK_SET)
+ PM_SYS_PUSH(6, PINT2_MASK_SET)
+ PM_SYS_PUSH(7, PINT3_MASK_SET)
+ PM_SYS_PUSH(8, PINT0_ASSIGN)
+ PM_SYS_PUSH(9, PINT1_ASSIGN)
+ PM_SYS_PUSH(10, PINT2_ASSIGN)
+ PM_SYS_PUSH(11, PINT3_ASSIGN)
+ PM_SYS_PUSH(12, PINT0_INVERT_SET)
+ PM_SYS_PUSH(13, PINT1_INVERT_SET)
+ PM_PUSH_SYNC(13)
+ PM_SYS_PUSH(0, PINT2_INVERT_SET)
+ PM_SYS_PUSH(1, PINT3_INVERT_SET)
+ PM_SYS_PUSH(2, PINT0_EDGE_SET)
+ PM_SYS_PUSH(3, PINT1_EDGE_SET)
+ PM_SYS_PUSH(4, PINT2_EDGE_SET)
+ PM_SYS_PUSH(5, PINT3_EDGE_SET)
+#endif
+
+ PM_SYS_PUSH16(6, SYSCR)
+
+ PM_SYS_PUSH16(7, EBIU_AMGCTL)
+ PM_SYS_PUSH(8, EBIU_AMBCTL0)
+ PM_SYS_PUSH(9, EBIU_AMBCTL1)
+#ifdef EBIU_FCTL
+ PM_SYS_PUSH(10, EBIU_MBSCTL)
+ PM_SYS_PUSH(11, EBIU_MODE)
+ PM_SYS_PUSH(12, EBIU_FCTL)
+ PM_PUSH_SYNC(12)
+#else
+ PM_PUSH_SYNC(9)
+#endif
+
+ /* Save Core MMRs */
+ I0.H = hi(COREMMR_BASE);
+ I0.L = lo(COREMMR_BASE);
+ I1 = I0;
+ I2 = I0;
+ I3 = I0;
+ B0 = I0;
+ B1 = I0;
+ B2 = I0;
+ B3 = I0;
+ I1.L = lo(DCPLB_ADDR0);
+ I2.L = lo(DCPLB_DATA0);
+ I3.L = lo(ICPLB_ADDR0);
+ B0.L = lo(ICPLB_DATA0);
+ B1.L = lo(EVT2);
+ B2.L = lo(IMASK);
+ B3.L = lo(TCNTL);
+
+ /* DCPLB Addr */
+ FP = I1;
+ PM_PUSH(0, DCPLB_ADDR0)
+ PM_PUSH(1, DCPLB_ADDR1)
+ PM_PUSH(2, DCPLB_ADDR2)
+ PM_PUSH(3, DCPLB_ADDR3)
+ PM_PUSH(4, DCPLB_ADDR4)
+ PM_PUSH(5, DCPLB_ADDR5)
+ PM_PUSH(6, DCPLB_ADDR6)
+ PM_PUSH(7, DCPLB_ADDR7)
+ PM_PUSH(8, DCPLB_ADDR8)
+ PM_PUSH(9, DCPLB_ADDR9)
+ PM_PUSH(10, DCPLB_ADDR10)
+ PM_PUSH(11, DCPLB_ADDR11)
+ PM_PUSH(12, DCPLB_ADDR12)
+ PM_PUSH(13, DCPLB_ADDR13)
+ PM_PUSH_SYNC(13)
+ PM_PUSH(0, DCPLB_ADDR14)
+ PM_PUSH(1, DCPLB_ADDR15)
+
+ /* DCPLB Data */
+ FP = I2;
+ PM_PUSH(2, DCPLB_DATA0)
+ PM_PUSH(3, DCPLB_DATA1)
+ PM_PUSH(4, DCPLB_DATA2)
+ PM_PUSH(5, DCPLB_DATA3)
+ PM_PUSH(6, DCPLB_DATA4)
+ PM_PUSH(7, DCPLB_DATA5)
+ PM_PUSH(8, DCPLB_DATA6)
+ PM_PUSH(9, DCPLB_DATA7)
+ PM_PUSH(10, DCPLB_DATA8)
+ PM_PUSH(11, DCPLB_DATA9)
+ PM_PUSH(12, DCPLB_DATA10)
+ PM_PUSH(13, DCPLB_DATA11)
+ PM_PUSH_SYNC(13)
+ PM_PUSH(0, DCPLB_DATA12)
+ PM_PUSH(1, DCPLB_DATA13)
+ PM_PUSH(2, DCPLB_DATA14)
+ PM_PUSH(3, DCPLB_DATA15)
+
+ /* ICPLB Addr */
+ FP = I3;
+ PM_PUSH(4, ICPLB_ADDR0)
+ PM_PUSH(5, ICPLB_ADDR1)
+ PM_PUSH(6, ICPLB_ADDR2)
+ PM_PUSH(7, ICPLB_ADDR3)
+ PM_PUSH(8, ICPLB_ADDR4)
+ PM_PUSH(9, ICPLB_ADDR5)
+ PM_PUSH(10, ICPLB_ADDR6)
+ PM_PUSH(11, ICPLB_ADDR7)
+ PM_PUSH(12, ICPLB_ADDR8)
+ PM_PUSH(13, ICPLB_ADDR9)
+ PM_PUSH_SYNC(13)
+ PM_PUSH(0, ICPLB_ADDR10)
+ PM_PUSH(1, ICPLB_ADDR11)
+ PM_PUSH(2, ICPLB_ADDR12)
+ PM_PUSH(3, ICPLB_ADDR13)
+ PM_PUSH(4, ICPLB_ADDR14)
+ PM_PUSH(5, ICPLB_ADDR15)
+
+ /* ICPLB Data */
+ FP = B0;
+ PM_PUSH(6, ICPLB_DATA0)
+ PM_PUSH(7, ICPLB_DATA1)
+ PM_PUSH(8, ICPLB_DATA2)
+ PM_PUSH(9, ICPLB_DATA3)
+ PM_PUSH(10, ICPLB_DATA4)
+ PM_PUSH(11, ICPLB_DATA5)
+ PM_PUSH(12, ICPLB_DATA6)
+ PM_PUSH(13, ICPLB_DATA7)
+ PM_PUSH_SYNC(13)
+ PM_PUSH(0, ICPLB_DATA8)
+ PM_PUSH(1, ICPLB_DATA9)
+ PM_PUSH(2, ICPLB_DATA10)
+ PM_PUSH(3, ICPLB_DATA11)
+ PM_PUSH(4, ICPLB_DATA12)
+ PM_PUSH(5, ICPLB_DATA13)
+ PM_PUSH(6, ICPLB_DATA14)
+ PM_PUSH(7, ICPLB_DATA15)
+
+ /* Event Vectors */
+ FP = B1;
+ PM_PUSH(8, EVT2)
+ PM_PUSH(9, EVT3)
+ FP += 4; /* EVT4 */
+ PM_PUSH(10, EVT5)
+ PM_PUSH(11, EVT6)
+ PM_PUSH(12, EVT7)
+ PM_PUSH(13, EVT8)
+ PM_PUSH_SYNC(13)
+ PM_PUSH(0, EVT9)
+ PM_PUSH(1, EVT10)
+ PM_PUSH(2, EVT11)
+ PM_PUSH(3, EVT12)
+ PM_PUSH(4, EVT13)
+ PM_PUSH(5, EVT14)
+ PM_PUSH(6, EVT15)
+
+ /* CEC */
+ FP = B2;
+ PM_PUSH(7, IMASK)
+ FP += 4; /* IPEND */
+ PM_PUSH(8, ILAT)
+ PM_PUSH(9, IPRIO)
+
+ /* Core Timer */
+ FP = B3;
+ PM_PUSH(10, TCNTL)
+ PM_PUSH(11, TPERIOD)
+ PM_PUSH(12, TSCALE)
+ PM_PUSH(13, TCOUNT)
+ PM_PUSH_SYNC(13)
+
+ /* Misc non-contiguous registers */
+ FP = I0;
+ PM_CORE_PUSH(0, DMEM_CONTROL);
+ PM_CORE_PUSH(1, IMEM_CONTROL);
+ PM_CORE_PUSH(2, TBUFCTL);
+ PM_PUSH_SYNC(2)
+
+ /* Setup args to hibernate mode early for pipeline optimization */
+ R0 = M3;
+ P1.H = _hibernate_mode;
+ P1.L = _hibernate_mode;
/* Save Magic, return address and Stack Pointer */
- P0.H = 0;
- P0.L = 0;
- R0.H = 0xDEAD; /* Hibernate Magic */
- R0.L = 0xBEEF;
- [P0++] = R0; /* Store Hibernate Magic */
- R0.H = .Lpm_resume_here;
- R0.L = .Lpm_resume_here;
- [P0++] = R0; /* Save Return Address */
+ P0 = 0;
+ R1.H = 0xDEAD; /* Hibernate Magic */
+ R1.L = 0xBEEF;
+ R2.H = .Lpm_resume_here;
+ R2.L = .Lpm_resume_here;
+ [P0++] = R1; /* Store Hibernate Magic */
+ [P0++] = R2; /* Save Return Address */
[P0++] = SP; /* Save Stack Pointer */
- P0.H = _hibernate_mode;
- P0.L = _hibernate_mode;
- R0 = R2;
- call (P0); /* Goodbye */
+
+ /* Must use an indirect call as we need to jump to L1 */
+ call (P1); /* Goodbye */
.Lpm_resume_here:
+ /* Restore Core MMRs */
+ I0.H = hi(COREMMR_BASE);
+ I0.L = lo(COREMMR_BASE);
+ I1 = I0;
+ I2 = I0;
+ I3 = I0;
+ B0 = I0;
+ B1 = I0;
+ B2 = I0;
+ B3 = I0;
+ I1.L = lo(DCPLB_ADDR15);
+ I2.L = lo(DCPLB_DATA15);
+ I3.L = lo(ICPLB_ADDR15);
+ B0.L = lo(ICPLB_DATA15);
+ B1.L = lo(EVT15);
+ B2.L = lo(IPRIO);
+ B3.L = lo(TCOUNT);
+
+ /* Misc non-contiguous registers */
+ FP = I0;
+ PM_POP_SYNC(2)
+ PM_CORE_POP(2, TBUFCTL)
+ PM_CORE_POP(1, IMEM_CONTROL)
+ PM_CORE_POP(0, DMEM_CONTROL)
+
+ /* Core Timer */
+ PM_POP_SYNC(13)
+ FP = B3;
+ PM_POP(13, TCOUNT)
+ PM_POP(12, TSCALE)
+ PM_POP(11, TPERIOD)
+ PM_POP(10, TCNTL)
+
+ /* CEC */
+ FP = B2;
+ PM_POP(9, IPRIO)
+ PM_POP(8, ILAT)
+ FP += -4; /* IPEND */
+ PM_POP(7, IMASK)
+
+ /* Event Vectors */
+ FP = B1;
+ PM_POP(6, EVT15)
+ PM_POP(5, EVT14)
+ PM_POP(4, EVT13)
+ PM_POP(3, EVT12)
+ PM_POP(2, EVT11)
+ PM_POP(1, EVT10)
+ PM_POP(0, EVT9)
+ PM_POP_SYNC(13)
+ PM_POP(13, EVT8)
+ PM_POP(12, EVT7)
+ PM_POP(11, EVT6)
+ PM_POP(10, EVT5)
+ FP += -4; /* EVT4 */
+ PM_POP(9, EVT3)
+ PM_POP(8, EVT2)
+
+ /* ICPLB Data */
+ FP = B0;
+ PM_POP(7, ICPLB_DATA15)
+ PM_POP(6, ICPLB_DATA14)
+ PM_POP(5, ICPLB_DATA13)
+ PM_POP(4, ICPLB_DATA12)
+ PM_POP(3, ICPLB_DATA11)
+ PM_POP(2, ICPLB_DATA10)
+ PM_POP(1, ICPLB_DATA9)
+ PM_POP(0, ICPLB_DATA8)
+ PM_POP_SYNC(13)
+ PM_POP(13, ICPLB_DATA7)
+ PM_POP(12, ICPLB_DATA6)
+ PM_POP(11, ICPLB_DATA5)
+ PM_POP(10, ICPLB_DATA4)
+ PM_POP(9, ICPLB_DATA3)
+ PM_POP(8, ICPLB_DATA2)
+ PM_POP(7, ICPLB_DATA1)
+ PM_POP(6, ICPLB_DATA0)
+
+ /* ICPLB Addr */
+ FP = I3;
+ PM_POP(5, ICPLB_ADDR15)
+ PM_POP(4, ICPLB_ADDR14)
+ PM_POP(3, ICPLB_ADDR13)
+ PM_POP(2, ICPLB_ADDR12)
+ PM_POP(1, ICPLB_ADDR11)
+ PM_POP(0, ICPLB_ADDR10)
+ PM_POP_SYNC(13)
+ PM_POP(13, ICPLB_ADDR9)
+ PM_POP(12, ICPLB_ADDR8)
+ PM_POP(11, ICPLB_ADDR7)
+ PM_POP(10, ICPLB_ADDR6)
+ PM_POP(9, ICPLB_ADDR5)
+ PM_POP(8, ICPLB_ADDR4)
+ PM_POP(7, ICPLB_ADDR3)
+ PM_POP(6, ICPLB_ADDR2)
+ PM_POP(5, ICPLB_ADDR1)
+ PM_POP(4, ICPLB_ADDR0)
+
+ /* DCPLB Data */
+ FP = I2;
+ PM_POP(3, DCPLB_DATA15)
+ PM_POP(2, DCPLB_DATA14)
+ PM_POP(1, DCPLB_DATA13)
+ PM_POP(0, DCPLB_DATA12)
+ PM_POP_SYNC(13)
+ PM_POP(13, DCPLB_DATA11)
+ PM_POP(12, DCPLB_DATA10)
+ PM_POP(11, DCPLB_DATA9)
+ PM_POP(10, DCPLB_DATA8)
+ PM_POP(9, DCPLB_DATA7)
+ PM_POP(8, DCPLB_DATA6)
+ PM_POP(7, DCPLB_DATA5)
+ PM_POP(6, DCPLB_DATA4)
+ PM_POP(5, DCPLB_DATA3)
+ PM_POP(4, DCPLB_DATA2)
+ PM_POP(3, DCPLB_DATA1)
+ PM_POP(2, DCPLB_DATA0)
+
+ /* DCPLB Addr */
+ FP = I1;
+ PM_POP(1, DCPLB_ADDR15)
+ PM_POP(0, DCPLB_ADDR14)
+ PM_POP_SYNC(13)
+ PM_POP(13, DCPLB_ADDR13)
+ PM_POP(12, DCPLB_ADDR12)
+ PM_POP(11, DCPLB_ADDR11)
+ PM_POP(10, DCPLB_ADDR10)
+ PM_POP(9, DCPLB_ADDR9)
+ PM_POP(8, DCPLB_ADDR8)
+ PM_POP(7, DCPLB_ADDR7)
+ PM_POP(6, DCPLB_ADDR6)
+ PM_POP(5, DCPLB_ADDR5)
+ PM_POP(4, DCPLB_ADDR4)
+ PM_POP(3, DCPLB_ADDR3)
+ PM_POP(2, DCPLB_ADDR2)
+ PM_POP(1, DCPLB_ADDR1)
+ PM_POP(0, DCPLB_ADDR0)
+
+ /* Restore System MMRs */
+ FP.H = hi(SYSMMR_BASE);
+ FP.L = lo(SYSMMR_BASE);
+
+#ifdef EBIU_FCTL
+ PM_POP_SYNC(12)
+ PM_SYS_POP(12, EBIU_FCTL)
+ PM_SYS_POP(11, EBIU_MODE)
+ PM_SYS_POP(10, EBIU_MBSCTL)
+#else
+ PM_POP_SYNC(9)
+#endif
+ PM_SYS_POP(9, EBIU_AMBCTL1)
+ PM_SYS_POP(8, EBIU_AMBCTL0)
+ PM_SYS_POP16(7, EBIU_AMGCTL)
+
+ PM_SYS_POP16(6, SYSCR)
+
+#ifdef PINT0_ASSIGN
+ PM_SYS_POP(5, PINT3_EDGE_SET)
+ PM_SYS_POP(4, PINT2_EDGE_SET)
+ PM_SYS_POP(3, PINT1_EDGE_SET)
+ PM_SYS_POP(2, PINT0_EDGE_SET)
+ PM_SYS_POP(1, PINT3_INVERT_SET)
+ PM_SYS_POP(0, PINT2_INVERT_SET)
+ PM_POP_SYNC(13)
+ PM_SYS_POP(13, PINT1_INVERT_SET)
+ PM_SYS_POP(12, PINT0_INVERT_SET)
+ PM_SYS_POP(11, PINT3_ASSIGN)
+ PM_SYS_POP(10, PINT2_ASSIGN)
+ PM_SYS_POP(9, PINT1_ASSIGN)
+ PM_SYS_POP(8, PINT0_ASSIGN)
+ PM_SYS_POP(7, PINT3_MASK_SET)
+ PM_SYS_POP(6, PINT2_MASK_SET)
+ PM_SYS_POP(5, PINT1_MASK_SET)
+ PM_SYS_POP(4, PINT0_MASK_SET)
+#endif
+
+#ifdef SIC_IWR2
+ PM_SYS_POP(3, SIC_IWR2)
+#endif
+#ifdef SIC_IWR1
+ PM_SYS_POP(2, SIC_IWR1)
+#endif
+#ifdef SIC_IWR0
+ PM_SYS_POP(1, SIC_IWR0)
+#endif
+#ifdef SIC_IWR
+ PM_SYS_POP(1, SIC_IWR)
+#endif
+
+#ifdef SIC_IAR11
+ PM_SYS_POP(0, SIC_IAR11)
+#endif
+ PM_POP_SYNC(13)
+#ifdef SIC_IAR8
+ PM_SYS_POP(13, SIC_IAR10)
+ PM_SYS_POP(12, SIC_IAR9)
+ PM_SYS_POP(11, SIC_IAR8)
+#endif
+#ifdef SIC_IAR7
+ PM_SYS_POP(10, SIC_IAR7)
+#endif
+#ifdef SIC_IAR6
+ PM_SYS_POP(9, SIC_IAR6)
+ PM_SYS_POP(8, SIC_IAR5)
+ PM_SYS_POP(7, SIC_IAR4)
+#endif
+#ifdef SIC_IAR3
+ PM_SYS_POP(6, SIC_IAR3)
+#endif
+#ifdef SIC_IAR0
+ PM_SYS_POP(5, SIC_IAR2)
+ PM_SYS_POP(4, SIC_IAR1)
+ PM_SYS_POP(3, SIC_IAR0)
+#endif
+#ifdef SIC_IMASK0
+# ifdef SIC_IMASK2
+ PM_SYS_POP(2, SIC_IMASK2)
+# endif
+ PM_SYS_POP(1, SIC_IMASK1)
+ PM_SYS_POP(0, SIC_IMASK0)
+#else
+ PM_SYS_POP(0, SIC_IMASK)
+#endif
+
/* Restore Core Registers */
+ RETI = [sp++];
SEQSTAT = [sp++];
- RETE = [sp++];
- RETN = [sp++];
RETX = [sp++];
- r0 = [sp++];
- RETI = r0;
- RETS = [sp++];
-
+ SYSCFG = [sp++];
CYCLES2 = [sp++];
CYCLES = [sp++];
ASTAT = [sp++];
+ RETS = [sp++];
LB1 = [sp++];
LB0 = [sp++];
@@ -581,204 +885,10 @@ ENTRY(_do_hibernate)
usp = [sp++];
fp = [sp++];
-
- ( R7 : 0, P5 : 0) = [ SP ++ ];
- SYSCFG = [sp++];
-
- /* Restore Core MMRs */
-
- PM_POP(TBUFCTL)
- PM_POP(TCOUNT)
- PM_POP(TSCALE)
- PM_POP(TPERIOD)
- PM_POP(TCNTL)
- PM_POP(IPRIO)
- PM_POP(ILAT)
- PM_POP(IMASK)
- PM_POP(EVT15)
- PM_POP(EVT14)
- PM_POP(EVT13)
- PM_POP(EVT12)
- PM_POP(EVT11)
- PM_POP(EVT10)
- PM_POP(EVT9)
- PM_POP(EVT8)
- PM_POP(EVT7)
- PM_POP(EVT6)
- PM_POP(EVT5)
- PM_POP(EVT4)
- PM_POP(EVT3)
- PM_POP(EVT2)
- PM_POP(EVT1)
- PM_POP(EVT0)
- PM_POP(ICPLB_DATA15)
- PM_POP(ICPLB_DATA14)
- PM_POP(ICPLB_DATA13)
- PM_POP(ICPLB_DATA12)
- PM_POP(ICPLB_DATA11)
- PM_POP(ICPLB_DATA10)
- PM_POP(ICPLB_DATA9)
- PM_POP(ICPLB_DATA8)
- PM_POP(ICPLB_DATA7)
- PM_POP(ICPLB_DATA6)
- PM_POP(ICPLB_DATA5)
- PM_POP(ICPLB_DATA4)
- PM_POP(ICPLB_DATA3)
- PM_POP(ICPLB_DATA2)
- PM_POP(ICPLB_DATA1)
- PM_POP(ICPLB_DATA0)
- PM_POP(ICPLB_ADDR15)
- PM_POP(ICPLB_ADDR14)
- PM_POP(ICPLB_ADDR13)
- PM_POP(ICPLB_ADDR12)
- PM_POP(ICPLB_ADDR11)
- PM_POP(ICPLB_ADDR10)
- PM_POP(ICPLB_ADDR9)
- PM_POP(ICPLB_ADDR8)
- PM_POP(ICPLB_ADDR7)
- PM_POP(ICPLB_ADDR6)
- PM_POP(ICPLB_ADDR5)
- PM_POP(ICPLB_ADDR4)
- PM_POP(ICPLB_ADDR3)
- PM_POP(ICPLB_ADDR2)
- PM_POP(ICPLB_ADDR1)
- PM_POP(ICPLB_ADDR0)
- PM_POP(IMEM_CONTROL)
- PM_POP(DCPLB_DATA15)
- PM_POP(DCPLB_DATA14)
- PM_POP(DCPLB_DATA13)
- PM_POP(DCPLB_DATA12)
- PM_POP(DCPLB_DATA11)
- PM_POP(DCPLB_DATA10)
- PM_POP(DCPLB_DATA9)
- PM_POP(DCPLB_DATA8)
- PM_POP(DCPLB_DATA7)
- PM_POP(DCPLB_DATA6)
- PM_POP(DCPLB_DATA5)
- PM_POP(DCPLB_DATA4)
- PM_POP(DCPLB_DATA3)
- PM_POP(DCPLB_DATA2)
- PM_POP(DCPLB_DATA1)
- PM_POP(DCPLB_DATA0)
- PM_POP(DCPLB_ADDR15)
- PM_POP(DCPLB_ADDR14)
- PM_POP(DCPLB_ADDR13)
- PM_POP(DCPLB_ADDR12)
- PM_POP(DCPLB_ADDR11)
- PM_POP(DCPLB_ADDR10)
- PM_POP(DCPLB_ADDR9)
- PM_POP(DCPLB_ADDR8)
- PM_POP(DCPLB_ADDR7)
- PM_POP(DCPLB_ADDR6)
- PM_POP(DCPLB_ADDR5)
- PM_POP(DCPLB_ADDR4)
- PM_POP(DCPLB_ADDR3)
- PM_POP(DCPLB_ADDR2)
- PM_POP(DCPLB_ADDR1)
- PM_POP(DCPLB_ADDR0)
- PM_POP(DMEM_CONTROL)
-
- /* Restore System MMRs */
-
- P0.H = hi(PLL_CTL);
- P0.L = lo(PLL_CTL);
- PM_SYS_POP16(SYSCR)
-
-#ifdef PORTCIO_FER
- PM_SYS_POP16(PORTEIO_FER)
- PM_SYS_POP16(PORTEIO)
- PM_SYS_POP16(PORTEIO_INEN)
- PM_SYS_POP16(PORTEIO_DIR)
- PM_SYS_POP16(PORTDIO_FER)
- PM_SYS_POP16(PORTDIO)
- PM_SYS_POP16(PORTDIO_INEN)
- PM_SYS_POP16(PORTDIO_DIR)
- PM_SYS_POP16(PORTCIO_FER)
- PM_SYS_POP16(PORTCIO)
- PM_SYS_POP16(PORTCIO_INEN)
- PM_SYS_POP16(PORTCIO_DIR)
-#endif
-
-#ifdef EBIU_FCTL
- PM_SYS_POP(EBIU_FCTL)
- PM_SYS_POP(EBIU_MODE)
- PM_SYS_POP(EBIU_MBSCTL)
-#endif
- PM_SYS_POP16(EBIU_AMGCTL)
- PM_SYS_POP(EBIU_AMBCTL1)
- PM_SYS_POP(EBIU_AMBCTL0)
-
-#ifdef PINT0_ASSIGN
- PM_SYS_POP(PINT3_EDGE_SET)
- PM_SYS_POP(PINT2_EDGE_SET)
- PM_SYS_POP(PINT1_EDGE_SET)
- PM_SYS_POP(PINT0_EDGE_SET)
- PM_SYS_POP(PINT3_INVERT_SET)
- PM_SYS_POP(PINT2_INVERT_SET)
- PM_SYS_POP(PINT1_INVERT_SET)
- PM_SYS_POP(PINT0_INVERT_SET)
- PM_SYS_POP(PINT3_ASSIGN)
- PM_SYS_POP(PINT2_ASSIGN)
- PM_SYS_POP(PINT1_ASSIGN)
- PM_SYS_POP(PINT0_ASSIGN)
- PM_SYS_POP(PINT3_MASK_SET)
- PM_SYS_POP(PINT2_MASK_SET)
- PM_SYS_POP(PINT1_MASK_SET)
- PM_SYS_POP(PINT0_MASK_SET)
-#endif
-
-#ifdef SIC_IWR2
- PM_SYS_POP(SIC_IWR2)
-#endif
-#ifdef SIC_IWR1
- PM_SYS_POP(SIC_IWR1)
-#endif
-#ifdef SIC_IWR0
- PM_SYS_POP(SIC_IWR0)
-#endif
-#ifdef SIC_IWR
- PM_SYS_POP(SIC_IWR)
-#endif
-
-#ifdef SIC_IAR8
- PM_SYS_POP(SIC_IAR11)
- PM_SYS_POP(SIC_IAR10)
- PM_SYS_POP(SIC_IAR9)
- PM_SYS_POP(SIC_IAR8)
-#endif
-#ifdef SIC_IAR7
- PM_SYS_POP(SIC_IAR7)
-#endif
-#ifdef SIC_IAR6
- PM_SYS_POP(SIC_IAR6)
- PM_SYS_POP(SIC_IAR5)
- PM_SYS_POP(SIC_IAR4)
-#endif
-#ifdef SIC_IAR3
- PM_SYS_POP(SIC_IAR3)
-#endif
-#ifdef SIC_IAR0
- PM_SYS_POP(SIC_IAR2)
- PM_SYS_POP(SIC_IAR1)
- PM_SYS_POP(SIC_IAR0)
-#endif
-#ifdef SIC_IMASK
- PM_SYS_POP(SIC_IMASK)
-#endif
-#ifdef SIC_IMASK2
- PM_SYS_POP(SIC_IMASK2)
-#endif
-#ifdef SIC_IMASK1
- PM_SYS_POP(SIC_IMASK1)
-#endif
-#ifdef SIC_IMASK0
- PM_SYS_POP(SIC_IMASK0)
-#endif
+ (R7:0, P5:0) = [sp++];
[--sp] = RETI; /* Clear Global Interrupt Disable */
SP += 4;
- RETS = [SP++];
- ( R7:0, P5:0 ) = [SP++];
RTS;
ENDPROC(_do_hibernate)
diff --git a/arch/blackfin/mach-common/head.S b/arch/blackfin/mach-common/head.S
index 76de5724c1e3..8b4d98854403 100644
--- a/arch/blackfin/mach-common/head.S
+++ b/arch/blackfin/mach-common/head.S
@@ -85,37 +85,25 @@ ENTRY(__start)
SSYNC;
/* in case of double faults, save a few things */
- p0.l = _init_retx;
- p0.h = _init_retx;
- R0 = RETX;
- [P0] = R0;
-
+ p1.l = _initial_pda;
+ p1.h = _initial_pda;
+ r4 = RETX;
#ifdef CONFIG_DEBUG_DOUBLEFAULT
/* Only save these if we are storing them,
* This happens here, since L1 gets clobbered
* below
*/
GET_PDA(p0, r0);
- r5 = [p0 + PDA_DF_RETX];
- p1.l = _init_saved_retx;
- p1.h = _init_saved_retx;
- [p1] = r5;
-
- r5 = [p0 + PDA_DF_DCPLB];
- p1.l = _init_saved_dcplb_fault_addr;
- p1.h = _init_saved_dcplb_fault_addr;
- [p1] = r5;
-
- r5 = [p0 + PDA_DF_ICPLB];
- p1.l = _init_saved_icplb_fault_addr;
- p1.h = _init_saved_icplb_fault_addr;
- [p1] = r5;
-
- r5 = [p0 + PDA_DF_SEQSTAT];
- p1.l = _init_saved_seqstat;
- p1.h = _init_saved_seqstat;
- [p1] = r5;
+ r0 = [p0 + PDA_DF_RETX];
+ r1 = [p0 + PDA_DF_DCPLB];
+ r2 = [p0 + PDA_DF_ICPLB];
+ r3 = [p0 + PDA_DF_SEQSTAT];
+ [p1 + PDA_INIT_DF_RETX] = r0;
+ [p1 + PDA_INIT_DF_DCPLB] = r1;
+ [p1 + PDA_INIT_DF_ICPLB] = r2;
+ [p1 + PDA_INIT_DF_SEQSTAT] = r3;
#endif
+ [p1 + PDA_INIT_RETX] = r4;
/* Initialize stack pointer */
sp.l = _init_thread_union + THREAD_SIZE;
diff --git a/arch/blackfin/mach-common/ints-priority.c b/arch/blackfin/mach-common/ints-priority.c
index 1177369f9922..332dace6af34 100644
--- a/arch/blackfin/mach-common/ints-priority.c
+++ b/arch/blackfin/mach-common/ints-priority.c
@@ -444,7 +444,7 @@ static inline void bfin_set_irq_handler(unsigned irq, irq_flow_handler_t handle)
static DECLARE_BITMAP(gpio_enabled, MAX_BLACKFIN_GPIOS);
extern void bfin_gpio_irq_prepare(unsigned gpio);
-#if !defined(CONFIG_BF54x)
+#if !BFIN_GPIO_PINT
static void bfin_gpio_ack_irq(struct irq_data *d)
{
@@ -633,7 +633,7 @@ void bfin_demux_gpio_irq(unsigned int inta_irq,
bfin_demux_gpio_block(irq);
}
-#else /* CONFIG_BF54x */
+#else
#define NR_PINT_SYS_IRQS 4
#define NR_PINT_BITS 32
@@ -647,24 +647,11 @@ void bfin_demux_gpio_irq(unsigned int inta_irq,
static unsigned char irq2pint_lut[NR_PINTS];
static unsigned char pint2irq_lut[NR_PINT_SYS_IRQS * NR_PINT_BITS];
-struct pin_int_t {
- unsigned int mask_set;
- unsigned int mask_clear;
- unsigned int request;
- unsigned int assign;
- unsigned int edge_set;
- unsigned int edge_clear;
- unsigned int invert_set;
- unsigned int invert_clear;
- unsigned int pinstate;
- unsigned int latch;
-};
-
-static struct pin_int_t *pint[NR_PINT_SYS_IRQS] = {
- (struct pin_int_t *)PINT0_MASK_SET,
- (struct pin_int_t *)PINT1_MASK_SET,
- (struct pin_int_t *)PINT2_MASK_SET,
- (struct pin_int_t *)PINT3_MASK_SET,
+static struct bfin_pint_regs * const pint[NR_PINT_SYS_IRQS] = {
+ (struct bfin_pint_regs *)PINT0_MASK_SET,
+ (struct bfin_pint_regs *)PINT1_MASK_SET,
+ (struct bfin_pint_regs *)PINT2_MASK_SET,
+ (struct bfin_pint_regs *)PINT3_MASK_SET,
};
inline unsigned int get_irq_base(u32 bank, u8 bmap)
@@ -981,7 +968,7 @@ int __init init_arch_irq(void)
local_irq_disable();
-#ifdef CONFIG_BF54x
+#if BFIN_GPIO_PINT
# ifdef CONFIG_PINTx_REASSIGN
pint[0]->assign = CONFIG_PINT0_ASSIGN;
pint[1]->assign = CONFIG_PINT1_ASSIGN;
@@ -999,16 +986,16 @@ int __init init_arch_irq(void)
irq_set_chip(irq, &bfin_internal_irqchip);
switch (irq) {
-#if defined(BF537_FAMILY)
- case IRQ_PH_INTA_MAC_RX:
- case IRQ_PF_INTA_PG_INTA:
-#elif defined(BF533_FAMILY)
- case IRQ_PROG_INTA:
-#elif defined(CONFIG_BF54x)
+#if BFIN_GPIO_PINT
case IRQ_PINT0:
case IRQ_PINT1:
case IRQ_PINT2:
case IRQ_PINT3:
+#elif defined(BF537_FAMILY)
+ case IRQ_PH_INTA_MAC_RX:
+ case IRQ_PF_INTA_PG_INTA:
+#elif defined(BF533_FAMILY)
+ case IRQ_PROG_INTA:
#elif defined(CONFIG_BF52x) || defined(CONFIG_BF51x)
case IRQ_PORTF_INTA:
case IRQ_PORTG_INTA:
diff --git a/arch/blackfin/mach-common/smp.c b/arch/blackfin/mach-common/smp.c
index 35e7e1eb0188..1c143a4de5f5 100644
--- a/arch/blackfin/mach-common/smp.c
+++ b/arch/blackfin/mach-common/smp.c
@@ -45,9 +45,7 @@ struct corelock_slot corelock __attribute__ ((__section__(".l2.bss")));
unsigned long blackfin_iflush_l1_entry[NR_CPUS];
#endif
-void __cpuinitdata *init_retx_coreb, *init_saved_retx_coreb,
- *init_saved_seqstat_coreb, *init_saved_icplb_fault_addr_coreb,
- *init_saved_dcplb_fault_addr_coreb;
+struct blackfin_initial_pda __cpuinitdata initial_pda_coreb;
#define BFIN_IPI_RESCHEDULE 0
#define BFIN_IPI_CALL_FUNC 1
@@ -369,13 +367,16 @@ void __cpuinit secondary_start_kernel(void)
if (_bfin_swrst & SWRST_DBL_FAULT_B) {
printk(KERN_EMERG "CoreB Recovering from DOUBLE FAULT event\n");
#ifdef CONFIG_DEBUG_DOUBLEFAULT
- printk(KERN_EMERG " While handling exception (EXCAUSE = 0x%x) at %pF\n",
- (int)init_saved_seqstat_coreb & SEQSTAT_EXCAUSE, init_saved_retx_coreb);
- printk(KERN_NOTICE " DCPLB_FAULT_ADDR: %pF\n", init_saved_dcplb_fault_addr_coreb);
- printk(KERN_NOTICE " ICPLB_FAULT_ADDR: %pF\n", init_saved_icplb_fault_addr_coreb);
+ printk(KERN_EMERG " While handling exception (EXCAUSE = %#x) at %pF\n",
+ initial_pda_coreb.seqstat_doublefault & SEQSTAT_EXCAUSE,
+ initial_pda_coreb.retx_doublefault);
+ printk(KERN_NOTICE " DCPLB_FAULT_ADDR: %pF\n",
+ initial_pda_coreb.dcplb_doublefault_addr);
+ printk(KERN_NOTICE " ICPLB_FAULT_ADDR: %pF\n",
+ initial_pda_coreb.icplb_doublefault_addr);
#endif
printk(KERN_NOTICE " The instruction at %pF caused a double exception\n",
- init_retx_coreb);
+ initial_pda_coreb.retx);
}
/*
diff --git a/arch/cris/kernel/module.c b/arch/cris/kernel/module.c
index bcd502f74cda..37400f5869e6 100644
--- a/arch/cris/kernel/module.c
+++ b/arch/cris/kernel/module.c
@@ -30,45 +30,19 @@
#endif
#ifdef CONFIG_ETRAX_KMALLOCED_MODULES
-#define MALLOC_MODULE(size) kmalloc(size, GFP_KERNEL)
-#define FREE_MODULE(region) kfree(region)
-#else
-#define MALLOC_MODULE(size) vmalloc_exec(size)
-#define FREE_MODULE(region) vfree(region)
-#endif
-
void *module_alloc(unsigned long size)
{
if (size == 0)
return NULL;
- return MALLOC_MODULE(size);
+ return kmalloc(size, GFP_KERNEL);
}
-
/* Free memory returned from module_alloc */
void module_free(struct module *mod, void *module_region)
{
- FREE_MODULE(module_region);
-}
-
-/* We don't need anything special. */
-int module_frob_arch_sections(Elf_Ehdr *hdr,
- Elf_Shdr *sechdrs,
- char *secstrings,
- struct module *mod)
-{
- return 0;
-}
-
-int apply_relocate(Elf32_Shdr *sechdrs,
- const char *strtab,
- unsigned int symindex,
- unsigned int relsec,
- struct module *me)
-{
- printk(KERN_ERR "module %s: REL relocation unsupported\n", me->name);
- return -ENOEXEC;
+ kfree(module_region);
}
+#endif
int apply_relocate_add(Elf32_Shdr *sechdrs,
const char *strtab,
@@ -108,14 +82,3 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
return 0;
}
-
-int module_finalize(const Elf_Ehdr *hdr,
- const Elf_Shdr *sechdrs,
- struct module *me)
-{
- return 0;
-}
-
-void module_arch_cleanup(struct module *mod)
-{
-}
diff --git a/arch/frv/kernel/module.c b/arch/frv/kernel/module.c
index 711763c8a6f3..9d9835f1fe2b 100644
--- a/arch/frv/kernel/module.c
+++ b/arch/frv/kernel/module.c
@@ -22,57 +22,6 @@
#define DEBUGP(fmt...)
#endif
-void *module_alloc(unsigned long size)
-{
- if (size == 0)
- return NULL;
-
- return vmalloc_exec(size);
-}
-
-
-/* Free memory returned from module_alloc */
-void module_free(struct module *mod, void *module_region)
-{
- vfree(module_region);
-}
-
-/* We don't need anything special. */
-int module_frob_arch_sections(Elf_Ehdr *hdr,
- Elf_Shdr *sechdrs,
- char *secstrings,
- struct module *mod)
-{
- return 0;
-}
-
-int apply_relocate(Elf32_Shdr *sechdrs,
- const char *strtab,
- unsigned int symindex,
- unsigned int relsec,
- struct module *me)
-{
- printk(KERN_ERR "module %s: ADD RELOCATION unsupported\n", me->name);
- return -ENOEXEC;
-}
-
-int apply_relocate_add(Elf32_Shdr *sechdrs,
- const char *strtab,
- unsigned int symindex,
- unsigned int relsec,
- struct module *me)
-{
- printk(KERN_ERR "module %s: ADD RELOCATION unsupported\n", me->name);
- return -ENOEXEC;
-}
-
-int module_finalize(const Elf_Ehdr *hdr,
- const Elf_Shdr *sechdrs,
- struct module *me)
-{
- return 0;
-}
-
-void module_arch_cleanup(struct module *mod)
-{
-}
+/* TODO: At least one of apply_relocate or apply_relocate_add must be
+ * implemented in order to get working module support.
+ */
diff --git a/arch/h8300/Kconfig.cpu b/arch/h8300/Kconfig.cpu
index d236ab4232ca..15c22286ae79 100644
--- a/arch/h8300/Kconfig.cpu
+++ b/arch/h8300/Kconfig.cpu
@@ -162,9 +162,7 @@ config H8300_TPU_CH
int "TPU channel"
depends on H8300_TPU
-config PREEMPT
- bool "Preemptible Kernel"
- default n
+source "kernel/Kconfig.preempt"
source "mm/Kconfig"
diff --git a/arch/h8300/kernel/module.c b/arch/h8300/kernel/module.c
index db4953dc4e1b..1d526e05db19 100644
--- a/arch/h8300/kernel/module.c
+++ b/arch/h8300/kernel/module.c
@@ -11,40 +11,6 @@
#define DEBUGP(fmt...)
#endif
-void *module_alloc(unsigned long size)
-{
- if (size == 0)
- return NULL;
- return vmalloc(size);
-}
-
-
-/* Free memory returned from module_alloc */
-void module_free(struct module *mod, void *module_region)
-{
- vfree(module_region);
-}
-
-/* We don't need anything special. */
-int module_frob_arch_sections(Elf_Ehdr *hdr,
- Elf_Shdr *sechdrs,
- char *secstrings,
- struct module *mod)
-{
- return 0;
-}
-
-int apply_relocate(Elf32_Shdr *sechdrs,
- const char *strtab,
- unsigned int symindex,
- unsigned int relsec,
- struct module *me)
-{
- printk(KERN_ERR "module %s: RELOCATION unsupported\n",
- me->name);
- return -ENOEXEC;
-}
-
int apply_relocate_add(Elf32_Shdr *sechdrs,
const char *strtab,
unsigned int symindex,
@@ -107,14 +73,3 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
me->name, rela[i].r_offset);
return -ENOEXEC;
}
-
-int module_finalize(const Elf_Ehdr *hdr,
- const Elf_Shdr *sechdrs,
- struct module *me)
-{
- return 0;
-}
-
-void module_arch_cleanup(struct module *mod)
-{
-}
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 38280ef4a2af..137b277f7e56 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -101,6 +101,9 @@ config GENERIC_IOMAP
bool
default y
+config ARCH_CLOCKSOURCE_DATA
+ def_bool y
+
config SCHED_OMIT_FRAME_POINTER
bool
default y
@@ -627,27 +630,6 @@ source "drivers/pci/hotplug/Kconfig"
source "drivers/pcmcia/Kconfig"
-config DMAR
- bool "Support for DMA Remapping Devices (EXPERIMENTAL)"
- depends on IA64_GENERIC && ACPI && EXPERIMENTAL
- help
- DMA remapping (DMAR) devices support enables independent address
- translations for Direct Memory Access (DMA) from devices.
- These DMA remapping devices are reported via ACPI tables
- and include PCI device scope covered by these DMA
- remapping devices.
-
-config DMAR_DEFAULT_ON
- def_bool y
- prompt "Enable DMA Remapping Devices by default"
- depends on DMAR
- help
- Selecting this option will enable a DMAR device at boot time if
- one is found. If this option is not selected, DMAR support can
- be enabled by passing intel_iommu=on to the kernel. It is
- recommended you say N here while the DMAR code remains
- experimental.
-
endmenu
endif
@@ -681,6 +663,3 @@ source "lib/Kconfig"
config IOMMU_HELPER
def_bool (IA64_HP_ZX1 || IA64_HP_ZX1_SWIOTLB || IA64_GENERIC || SWIOTLB)
-
-config IOMMU_API
- def_bool (DMAR)
diff --git a/arch/ia64/include/asm/clocksource.h b/arch/ia64/include/asm/clocksource.h
new file mode 100644
index 000000000000..5c8596e4cb02
--- /dev/null
+++ b/arch/ia64/include/asm/clocksource.h
@@ -0,0 +1,10 @@
+/* IA64-specific clocksource additions */
+
+#ifndef _ASM_IA64_CLOCKSOURCE_H
+#define _ASM_IA64_CLOCKSOURCE_H
+
+struct arch_clocksource_data {
+ void *fsys_mmio; /* used by fsyscall asm code */
+};
+
+#endif /* _ASM_IA64_CLOCKSOURCE_H */
diff --git a/arch/ia64/include/asm/paravirt.h b/arch/ia64/include/asm/paravirt.h
index 2eb0a981a09a..32551d304cd7 100644
--- a/arch/ia64/include/asm/paravirt.h
+++ b/arch/ia64/include/asm/paravirt.h
@@ -281,6 +281,10 @@ paravirt_init_missing_ticks_accounting(int cpu)
pv_time_ops.init_missing_ticks_accounting(cpu);
}
+struct jump_label_key;
+extern struct jump_label_key paravirt_steal_enabled;
+extern struct jump_label_key paravirt_steal_rq_enabled;
+
static inline int
paravirt_do_steal_accounting(unsigned long *new_itm)
{
diff --git a/arch/ia64/kernel/cyclone.c b/arch/ia64/kernel/cyclone.c
index f64097b5118a..4826ff957a3d 100644
--- a/arch/ia64/kernel/cyclone.c
+++ b/arch/ia64/kernel/cyclone.c
@@ -115,7 +115,7 @@ int __init init_cyclone_clock(void)
}
/* initialize last tick */
cyclone_mc = cyclone_timer;
- clocksource_cyclone.fsys_mmio = cyclone_timer;
+ clocksource_cyclone.archdata.fsys_mmio = cyclone_timer;
clocksource_register_hz(&clocksource_cyclone, CYCLONE_TIMER_FREQ);
return 0;
diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
index 1481b0a28ca0..24603be24c14 100644
--- a/arch/ia64/kernel/module.c
+++ b/arch/ia64/kernel/module.c
@@ -304,14 +304,6 @@ plt_target (struct plt_entry *plt)
#endif /* !USE_BRL */
-void *
-module_alloc (unsigned long size)
-{
- if (!size)
- return NULL;
- return vmalloc(size);
-}
-
void
module_free (struct module *mod, void *module_region)
{
@@ -853,14 +845,6 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
return 0;
}
-int
-apply_relocate (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symindex,
- unsigned int relsec, struct module *mod)
-{
- printk(KERN_ERR "module %s: REL relocs in section %u unsupported\n", mod->name, relsec);
- return -ENOEXEC;
-}
-
/*
* Modules contain a single unwind table which covers both the core and the init text
* sections but since the two are not contiguous, we need to split this table up such that
diff --git a/arch/ia64/kernel/paravirt.c b/arch/ia64/kernel/paravirt.c
index a21d7bb9c69c..100868216c55 100644
--- a/arch/ia64/kernel/paravirt.c
+++ b/arch/ia64/kernel/paravirt.c
@@ -634,6 +634,8 @@ struct pv_irq_ops pv_irq_ops = {
* pv_time_ops
* time operations
*/
+struct jump_label_key paravirt_steal_enabled;
+struct jump_label_key paravirt_steal_rq_enabled;
static int
ia64_native_do_steal_accounting(unsigned long *new_itm)
diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c
index 85118dfe9bb5..43920de425f1 100644
--- a/arch/ia64/kernel/time.c
+++ b/arch/ia64/kernel/time.c
@@ -468,7 +468,7 @@ void update_vsyscall(struct timespec *wall, struct timespec *wtm,
fsyscall_gtod_data.clk_mask = c->mask;
fsyscall_gtod_data.clk_mult = mult;
fsyscall_gtod_data.clk_shift = c->shift;
- fsyscall_gtod_data.clk_fsys_mmio = c->fsys_mmio;
+ fsyscall_gtod_data.clk_fsys_mmio = c->archdata.fsys_mmio;
fsyscall_gtod_data.clk_cycle_last = c->cycle_last;
/* copy kernel time structures */
diff --git a/arch/ia64/kvm/Kconfig b/arch/ia64/kvm/Kconfig
index fa4d1e59deb0..9806e55f91be 100644
--- a/arch/ia64/kvm/Kconfig
+++ b/arch/ia64/kvm/Kconfig
@@ -49,6 +49,5 @@ config KVM_INTEL
extensions.
source drivers/vhost/Kconfig
-source drivers/virtio/Kconfig
endif # VIRTUALIZATION
diff --git a/arch/ia64/sn/kernel/irq.c b/arch/ia64/sn/kernel/irq.c
index 81a1f4e6bcd8..485c42d97e83 100644
--- a/arch/ia64/sn/kernel/irq.c
+++ b/arch/ia64/sn/kernel/irq.c
@@ -112,8 +112,6 @@ static void sn_ack_irq(struct irq_data *data)
irq_move_irq(data);
}
-static void sn_irq_info_free(struct rcu_head *head);
-
struct sn_irq_info *sn_retarget_vector(struct sn_irq_info *sn_irq_info,
nasid_t nasid, int slice)
{
@@ -177,7 +175,7 @@ struct sn_irq_info *sn_retarget_vector(struct sn_irq_info *sn_irq_info,
spin_lock(&sn_irq_info_lock);
list_replace_rcu(&sn_irq_info->list, &new_irq_info->list);
spin_unlock(&sn_irq_info_lock);
- call_rcu(&sn_irq_info->rcu, sn_irq_info_free);
+ kfree_rcu(sn_irq_info, rcu);
finish_up:
@@ -338,14 +336,6 @@ static void unregister_intr_pda(struct sn_irq_info *sn_irq_info)
rcu_read_unlock();
}
-static void sn_irq_info_free(struct rcu_head *head)
-{
- struct sn_irq_info *sn_irq_info;
-
- sn_irq_info = container_of(head, struct sn_irq_info, rcu);
- kfree(sn_irq_info);
-}
-
void sn_irq_fixup(struct pci_dev *pci_dev, struct sn_irq_info *sn_irq_info)
{
nasid_t nasid = sn_irq_info->irq_nasid;
@@ -399,7 +389,7 @@ void sn_irq_unfixup(struct pci_dev *pci_dev)
spin_unlock(&sn_irq_info_lock);
if (list_empty(sn_irq_lh[sn_irq_info->irq_irq]))
free_irq_vector(sn_irq_info->irq_irq);
- call_rcu(&sn_irq_info->rcu, sn_irq_info_free);
+ kfree_rcu(sn_irq_info, rcu);
pci_dev_put(pci_dev);
}
diff --git a/arch/ia64/sn/kernel/sn2/timer.c b/arch/ia64/sn/kernel/sn2/timer.c
index c34efda122e1..0f8844e49363 100644
--- a/arch/ia64/sn/kernel/sn2/timer.c
+++ b/arch/ia64/sn/kernel/sn2/timer.c
@@ -54,7 +54,7 @@ ia64_sn_udelay (unsigned long usecs)
void __init sn_timer_init(void)
{
- clocksource_sn2.fsys_mmio = RTC_COUNTER_ADDR;
+ clocksource_sn2.archdata.fsys_mmio = RTC_COUNTER_ADDR;
clocksource_register_hz(&clocksource_sn2, sn_rtc_cycles_per_second);
ia64_udelay = &ia64_sn_udelay;
diff --git a/arch/m32r/Kconfig b/arch/m32r/Kconfig
index 85b44e858225..b92b9445255d 100644
--- a/arch/m32r/Kconfig
+++ b/arch/m32r/Kconfig
@@ -268,17 +268,7 @@ config SCHED_OMIT_FRAME_POINTER
bool
default y
-config PREEMPT
- bool "Preemptible Kernel"
- help
- This option reduces the latency of the kernel when reacting to
- real-time or interactive events by allowing a low priority process to
- be preempted even if it is in kernel mode executing a system call.
- This allows applications to run more reliably even when the system is
- under load.
-
- Say Y here if you are building a kernel for a desktop, embedded
- or real-time system. Say N if you are unsure.
+source "kernel/Kconfig.preempt"
config SMP
bool "Symmetric multi-processing support"
diff --git a/arch/m32r/include/asm/delay.h b/arch/m32r/include/asm/delay.h
index 9dd9e999ea69..9670e127b7b2 100644
--- a/arch/m32r/include/asm/delay.h
+++ b/arch/m32r/include/asm/delay.h
@@ -1,26 +1 @@
-#ifndef _ASM_M32R_DELAY_H
-#define _ASM_M32R_DELAY_H
-
-/*
- * Copyright (C) 1993 Linus Torvalds
- *
- * Delay routines calling functions in arch/m32r/lib/delay.c
- */
-
-extern void __bad_udelay(void);
-extern void __bad_ndelay(void);
-
-extern void __udelay(unsigned long usecs);
-extern void __ndelay(unsigned long nsecs);
-extern void __const_udelay(unsigned long xloops);
-extern void __delay(unsigned long loops);
-
-#define udelay(n) (__builtin_constant_p(n) ? \
- ((n) > 20000 ? __bad_udelay() : __const_udelay((n) * 0x10c7ul)) : \
- __udelay(n))
-
-#define ndelay(n) (__builtin_constant_p(n) ? \
- ((n) > 20000 ? __bad_ndelay() : __const_udelay((n) * 5ul)) : \
- __ndelay(n))
-
-#endif /* _ASM_M32R_DELAY_H */
+#include <asm-generic/delay.h>
diff --git a/arch/m32r/kernel/module.c b/arch/m32r/kernel/module.c
index cb5f37d78d49..3071fe83ffc8 100644
--- a/arch/m32r/kernel/module.c
+++ b/arch/m32r/kernel/module.c
@@ -28,33 +28,6 @@
#define DEBUGP(fmt...)
#endif
-void *module_alloc(unsigned long size)
-{
- if (size == 0)
- return NULL;
-#ifdef CONFIG_MMU
- return vmalloc_exec(size);
-#else
- return vmalloc(size);
-#endif
-}
-
-
-/* Free memory returned from module_alloc */
-void module_free(struct module *mod, void *module_region)
-{
- vfree(module_region);
-}
-
-/* We don't need anything special. */
-int module_frob_arch_sections(Elf_Ehdr *hdr,
- Elf_Shdr *sechdrs,
- char *secstrings,
- struct module *mod)
-{
- return 0;
-}
-
#define COPY_UNALIGNED_WORD(sw, tw, align) \
{ \
void *__s = &(sw), *__t = &(tw); \
@@ -243,14 +216,3 @@ int apply_relocate(Elf32_Shdr *sechdrs,
return 0;
}
-
-int module_finalize(const Elf_Ehdr *hdr,
- const Elf_Shdr *sechdrs,
- struct module *me)
-{
- return 0;
-}
-
-void module_arch_cleanup(struct module *mod)
-{
-}
diff --git a/arch/m68k/emu/nfeth.c b/arch/m68k/emu/nfeth.c
index 8b6e201b2c20..c5748bb4ea71 100644
--- a/arch/m68k/emu/nfeth.c
+++ b/arch/m68k/emu/nfeth.c
@@ -16,6 +16,7 @@
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
+#include <linux/interrupt.h>
#include <linux/module.h>
#include <asm/natfeat.h>
#include <asm/virtconvert.h>
@@ -204,7 +205,6 @@ static struct net_device * __init nfeth_probe(int unit)
dev->irq = nfEtherIRQ;
dev->netdev_ops = &nfeth_netdev_ops;
- dev->flags |= NETIF_F_NO_CSUM;
memcpy(dev->dev_addr, mac, ETH_ALEN);
priv = netdev_priv(dev);
diff --git a/arch/m68k/kernel/module_mm.c b/arch/m68k/kernel/module_mm.c
index cd6bcb1c957e..ceafc47c96d5 100644
--- a/arch/m68k/kernel/module_mm.c
+++ b/arch/m68k/kernel/module_mm.c
@@ -19,29 +19,6 @@
#ifdef CONFIG_MODULES
-void *module_alloc(unsigned long size)
-{
- if (size == 0)
- return NULL;
- return vmalloc(size);
-}
-
-
-/* Free memory returned from module_alloc */
-void module_free(struct module *mod, void *module_region)
-{
- vfree(module_region);
-}
-
-/* We don't need anything special. */
-int module_frob_arch_sections(Elf_Ehdr *hdr,
- Elf_Shdr *sechdrs,
- char *secstrings,
- struct module *mod)
-{
- return 0;
-}
-
int apply_relocate(Elf32_Shdr *sechdrs,
const char *strtab,
unsigned int symindex,
@@ -131,10 +108,6 @@ int module_finalize(const Elf_Ehdr *hdr,
return 0;
}
-void module_arch_cleanup(struct module *mod)
-{
-}
-
#endif /* CONFIG_MODULES */
void module_fixup(struct module *mod, struct m68k_fixup_info *start,
diff --git a/arch/m68k/kernel/module_no.c b/arch/m68k/kernel/module_no.c
index d11ffae7956a..5a097c6063fa 100644
--- a/arch/m68k/kernel/module_no.c
+++ b/arch/m68k/kernel/module_no.c
@@ -11,29 +11,6 @@
#define DEBUGP(fmt...)
#endif
-void *module_alloc(unsigned long size)
-{
- if (size == 0)
- return NULL;
- return vmalloc(size);
-}
-
-
-/* Free memory returned from module_alloc */
-void module_free(struct module *mod, void *module_region)
-{
- vfree(module_region);
-}
-
-/* We don't need anything special. */
-int module_frob_arch_sections(Elf_Ehdr *hdr,
- Elf_Shdr *sechdrs,
- char *secstrings,
- struct module *mod)
-{
- return 0;
-}
-
int apply_relocate(Elf32_Shdr *sechdrs,
const char *strtab,
unsigned int symindex,
@@ -113,14 +90,3 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
}
return 0;
}
-
-int module_finalize(const Elf_Ehdr *hdr,
- const Elf_Shdr *sechdrs,
- struct module *me)
-{
- return 0;
-}
-
-void module_arch_cleanup(struct module *mod)
-{
-}
diff --git a/arch/microblaze/include/asm/pci-bridge.h b/arch/microblaze/include/asm/pci-bridge.h
index 746df91e5796..242be57a319c 100644
--- a/arch/microblaze/include/asm/pci-bridge.h
+++ b/arch/microblaze/include/asm/pci-bridge.h
@@ -19,9 +19,6 @@ enum {
*/
PCI_REASSIGN_ALL_RSRC = 0x00000001,
- /* Re-assign all bus numbers */
- PCI_REASSIGN_ALL_BUS = 0x00000002,
-
/* Do not try to assign, just use existing setup */
PCI_PROBE_ONLY = 0x00000004,
@@ -110,16 +107,6 @@ static inline struct pci_controller *pci_bus_to_host(const struct pci_bus *bus)
return bus->sysdata;
}
-static inline struct device_node *pci_bus_to_OF_node(struct pci_bus *bus)
-{
- struct pci_controller *host;
-
- if (bus->self)
- return pci_device_to_OF_node(bus->self);
- host = pci_bus_to_host(bus);
- return host ? host->dn : NULL;
-}
-
static inline int isa_vaddr_is_ioport(void __iomem *address)
{
/* No specific ISA handling on ppc32 at this stage, it
diff --git a/arch/microblaze/include/asm/pci.h b/arch/microblaze/include/asm/pci.h
index ba65cf472544..1dd9d6b1e275 100644
--- a/arch/microblaze/include/asm/pci.h
+++ b/arch/microblaze/include/asm/pci.h
@@ -40,8 +40,7 @@ struct pci_dev;
* Set this to 1 if you want the kernel to re-assign all PCI
* bus numbers (don't do that on ppc64 yet !)
*/
-#define pcibios_assign_all_busses() \
- (pci_has_flag(PCI_REASSIGN_ALL_BUS))
+#define pcibios_assign_all_busses() 0
static inline void pcibios_set_master(struct pci_dev *dev)
{
diff --git a/arch/microblaze/include/asm/prom.h b/arch/microblaze/include/asm/prom.h
index d0890d36ef61..9bd01ecb00d6 100644
--- a/arch/microblaze/include/asm/prom.h
+++ b/arch/microblaze/include/asm/prom.h
@@ -29,21 +29,6 @@
extern int early_uartlite_console(void);
extern int early_uart16550_console(void);
-#ifdef CONFIG_PCI
-/*
- * PCI <-> OF matching functions
- * (XXX should these be here?)
- */
-struct pci_bus;
-struct pci_dev;
-extern int pci_device_from_OF_node(struct device_node *node,
- u8 *bus, u8 *devfn);
-extern struct device_node *pci_busdev_to_OF_node(struct pci_bus *bus,
- int devfn);
-extern struct device_node *pci_device_to_OF_node(struct pci_dev *dev);
-extern void pci_create_OF_bus_map(void);
-#endif
-
/*
* OF address retreival & translation
*/
diff --git a/arch/microblaze/kernel/module.c b/arch/microblaze/kernel/module.c
index 0e73f6606547..142426f631bb 100644
--- a/arch/microblaze/kernel/module.c
+++ b/arch/microblaze/kernel/module.c
@@ -18,37 +18,6 @@
#include <asm/pgtable.h>
#include <asm/cacheflush.h>
-void *module_alloc(unsigned long size)
-{
- void *ret;
- ret = (size == 0) ? NULL : vmalloc(size);
- pr_debug("module_alloc (%08lx@%08lx)\n", size, (unsigned long int)ret);
- return ret;
-}
-
-void module_free(struct module *module, void *region)
-{
- pr_debug("module_free(%s,%08lx)\n", module->name,
- (unsigned long)region);
- vfree(region);
-}
-
-int module_frob_arch_sections(Elf_Ehdr *hdr,
- Elf_Shdr *sechdrs,
- char *secstrings,
- struct module *mod)
-{
- return 0;
-}
-
-int apply_relocate(Elf32_Shdr *sechdrs, const char *strtab,
- unsigned int symindex, unsigned int relsec, struct module *module)
-{
- printk(KERN_ERR "module %s: ADD RELOCATION unsupported\n",
- module->name);
- return -ENOEXEC;
-}
-
int apply_relocate_add(Elf32_Shdr *sechdrs, const char *strtab,
unsigned int symindex, unsigned int relsec, struct module *module)
{
@@ -155,7 +124,3 @@ int module_finalize(const Elf32_Ehdr *hdr, const Elf_Shdr *sechdrs,
flush_dcache();
return 0;
}
-
-void module_arch_cleanup(struct module *mod)
-{
-}
diff --git a/arch/microblaze/pci/Makefile b/arch/microblaze/pci/Makefile
index 9889cc2e1294..d1114fbd4780 100644
--- a/arch/microblaze/pci/Makefile
+++ b/arch/microblaze/pci/Makefile
@@ -2,5 +2,5 @@
# Makefile
#
-obj-$(CONFIG_PCI) += pci_32.o pci-common.o indirect_pci.o iomap.o
+obj-$(CONFIG_PCI) += pci-common.o indirect_pci.o iomap.o
obj-$(CONFIG_PCI_XILINX) += xilinx_pci.o
diff --git a/arch/microblaze/pci/pci-common.c b/arch/microblaze/pci/pci-common.c
index 53599067d2f9..041b1d86d75b 100644
--- a/arch/microblaze/pci/pci-common.c
+++ b/arch/microblaze/pci/pci-common.c
@@ -50,6 +50,11 @@ unsigned int pci_flags;
static struct dma_map_ops *pci_dma_ops = &dma_direct_ops;
+unsigned long isa_io_base;
+unsigned long pci_dram_offset;
+static int pci_bus_count;
+
+
void set_pci_dma_ops(struct dma_map_ops *dma_ops)
{
pci_dma_ops = dma_ops;
@@ -1558,6 +1563,112 @@ void __devinit pcibios_setup_phb_resources(struct pci_controller *hose)
(unsigned long)hose->io_base_virt - _IO_BASE);
}
+struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus)
+{
+ struct pci_controller *hose = bus->sysdata;
+
+ return of_node_get(hose->dn);
+}
+
+static void __devinit pcibios_scan_phb(struct pci_controller *hose)
+{
+ struct pci_bus *bus;
+ struct device_node *node = hose->dn;
+ unsigned long io_offset;
+ struct resource *res = &hose->io_resource;
+
+ pr_debug("PCI: Scanning PHB %s\n",
+ node ? node->full_name : "<NO NAME>");
+
+ /* Create an empty bus for the toplevel */
+ bus = pci_create_bus(hose->parent, hose->first_busno, hose->ops, hose);
+ if (bus == NULL) {
+ printk(KERN_ERR "Failed to create bus for PCI domain %04x\n",
+ hose->global_number);
+ return;
+ }
+ bus->secondary = hose->first_busno;
+ hose->bus = bus;
+
+ /* Fixup IO space offset */
+ io_offset = (unsigned long)hose->io_base_virt - isa_io_base;
+ res->start = (res->start + io_offset) & 0xffffffffu;
+ res->end = (res->end + io_offset) & 0xffffffffu;
+
+ /* Wire up PHB bus resources */
+ pcibios_setup_phb_resources(hose);
+
+ /* Scan children */
+ hose->last_busno = bus->subordinate = pci_scan_child_bus(bus);
+}
+
+static int __init pcibios_init(void)
+{
+ struct pci_controller *hose, *tmp;
+ int next_busno = 0;
+
+ printk(KERN_INFO "PCI: Probing PCI hardware\n");
+
+ /* Scan all of the recorded PCI controllers. */
+ list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
+ hose->last_busno = 0xff;
+ pcibios_scan_phb(hose);
+ printk(KERN_INFO "calling pci_bus_add_devices()\n");
+ pci_bus_add_devices(hose->bus);
+ if (next_busno <= hose->last_busno)
+ next_busno = hose->last_busno + 1;
+ }
+ pci_bus_count = next_busno;
+
+ /* Call common code to handle resource allocation */
+ pcibios_resource_survey();
+
+ return 0;
+}
+
+subsys_initcall(pcibios_init);
+
+static struct pci_controller *pci_bus_to_hose(int bus)
+{
+ struct pci_controller *hose, *tmp;
+
+ list_for_each_entry_safe(hose, tmp, &hose_list, list_node)
+ if (bus >= hose->first_busno && bus <= hose->last_busno)
+ return hose;
+ return NULL;
+}
+
+/* Provide information on locations of various I/O regions in physical
+ * memory. Do this on a per-card basis so that we choose the right
+ * root bridge.
+ * Note that the returned IO or memory base is a physical address
+ */
+
+long sys_pciconfig_iobase(long which, unsigned long bus, unsigned long devfn)
+{
+ struct pci_controller *hose;
+ long result = -EOPNOTSUPP;
+
+ hose = pci_bus_to_hose(bus);
+ if (!hose)
+ return -ENODEV;
+
+ switch (which) {
+ case IOBASE_BRIDGE_NUMBER:
+ return (long)hose->first_busno;
+ case IOBASE_MEMORY:
+ return (long)hose->pci_mem_offset;
+ case IOBASE_IO:
+ return (long)hose->io_base_phys;
+ case IOBASE_ISA_IO:
+ return (long)isa_io_base;
+ case IOBASE_ISA_MEM:
+ return (long)isa_mem_base;
+ }
+
+ return result;
+}
+
/*
* Null PCI config access functions, for the case when we can't
* find a hose.
@@ -1626,3 +1737,4 @@ int early_find_capability(struct pci_controller *hose, int bus, int devfn,
{
return pci_bus_find_capability(fake_pci_bus(hose, bus), devfn, cap);
}
+
diff --git a/arch/microblaze/pci/pci_32.c b/arch/microblaze/pci/pci_32.c
deleted file mode 100644
index 92728a6cfd80..000000000000
--- a/arch/microblaze/pci/pci_32.c
+++ /dev/null
@@ -1,432 +0,0 @@
-/*
- * Common pmac/prep/chrp pci routines. -- Cort
- */
-
-#include <linux/kernel.h>
-#include <linux/pci.h>
-#include <linux/delay.h>
-#include <linux/string.h>
-#include <linux/init.h>
-#include <linux/capability.h>
-#include <linux/sched.h>
-#include <linux/errno.h>
-#include <linux/bootmem.h>
-#include <linux/irq.h>
-#include <linux/list.h>
-#include <linux/of.h>
-#include <linux/slab.h>
-
-#include <asm/processor.h>
-#include <asm/io.h>
-#include <asm/prom.h>
-#include <asm/sections.h>
-#include <asm/pci-bridge.h>
-#include <asm/byteorder.h>
-#include <asm/uaccess.h>
-
-#undef DEBUG
-
-unsigned long isa_io_base;
-unsigned long pci_dram_offset;
-int pcibios_assign_bus_offset = 1;
-
-static u8 *pci_to_OF_bus_map;
-
-/* By default, we don't re-assign bus numbers. We do this only on
- * some pmacs
- */
-static int pci_assign_all_buses;
-
-static int pci_bus_count;
-
-/*
- * Functions below are used on OpenFirmware machines.
- */
-static void
-make_one_node_map(struct device_node *node, u8 pci_bus)
-{
- const int *bus_range;
- int len;
-
- if (pci_bus >= pci_bus_count)
- return;
- bus_range = of_get_property(node, "bus-range", &len);
- if (bus_range == NULL || len < 2 * sizeof(int)) {
- printk(KERN_WARNING "Can't get bus-range for %s, "
- "assuming it starts at 0\n", node->full_name);
- pci_to_OF_bus_map[pci_bus] = 0;
- } else
- pci_to_OF_bus_map[pci_bus] = bus_range[0];
-
- for_each_child_of_node(node, node) {
- struct pci_dev *dev;
- const unsigned int *class_code, *reg;
-
- class_code = of_get_property(node, "class-code", NULL);
- if (!class_code ||
- ((*class_code >> 8) != PCI_CLASS_BRIDGE_PCI &&
- (*class_code >> 8) != PCI_CLASS_BRIDGE_CARDBUS))
- continue;
- reg = of_get_property(node, "reg", NULL);
- if (!reg)
- continue;
- dev = pci_get_bus_and_slot(pci_bus, ((reg[0] >> 8) & 0xff));
- if (!dev || !dev->subordinate) {
- pci_dev_put(dev);
- continue;
- }
- make_one_node_map(node, dev->subordinate->number);
- pci_dev_put(dev);
- }
-}
-
-void
-pcibios_make_OF_bus_map(void)
-{
- int i;
- struct pci_controller *hose, *tmp;
- struct property *map_prop;
- struct device_node *dn;
-
- pci_to_OF_bus_map = kmalloc(pci_bus_count, GFP_KERNEL);
- if (!pci_to_OF_bus_map) {
- printk(KERN_ERR "Can't allocate OF bus map !\n");
- return;
- }
-
- /* We fill the bus map with invalid values, that helps
- * debugging.
- */
- for (i = 0; i < pci_bus_count; i++)
- pci_to_OF_bus_map[i] = 0xff;
-
- /* For each hose, we begin searching bridges */
- list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
- struct device_node *node = hose->dn;
-
- if (!node)
- continue;
- make_one_node_map(node, hose->first_busno);
- }
- dn = of_find_node_by_path("/");
- map_prop = of_find_property(dn, "pci-OF-bus-map", NULL);
- if (map_prop) {
- BUG_ON(pci_bus_count > map_prop->length);
- memcpy(map_prop->value, pci_to_OF_bus_map, pci_bus_count);
- }
- of_node_put(dn);
-#ifdef DEBUG
- printk(KERN_INFO "PCI->OF bus map:\n");
- for (i = 0; i < pci_bus_count; i++) {
- if (pci_to_OF_bus_map[i] == 0xff)
- continue;
- printk(KERN_INFO "%d -> %d\n", i, pci_to_OF_bus_map[i]);
- }
-#endif
-}
-
-typedef int (*pci_OF_scan_iterator)(struct device_node *node, void *data);
-
-static struct device_node *scan_OF_pci_childs(struct device_node *parent,
- pci_OF_scan_iterator filter, void *data)
-{
- struct device_node *node;
- struct device_node *sub_node;
-
- for_each_child_of_node(parent, node) {
- const unsigned int *class_code;
-
- if (filter(node, data)) {
- of_node_put(node);
- return node;
- }
-
- /* For PCI<->PCI bridges or CardBus bridges, we go down
- * Note: some OFs create a parent node "multifunc-device" as
- * a fake root for all functions of a multi-function device,
- * we go down them as well.
- */
- class_code = of_get_property(node, "class-code", NULL);
- if ((!class_code ||
- ((*class_code >> 8) != PCI_CLASS_BRIDGE_PCI &&
- (*class_code >> 8) != PCI_CLASS_BRIDGE_CARDBUS)) &&
- strcmp(node->name, "multifunc-device"))
- continue;
- sub_node = scan_OF_pci_childs(node, filter, data);
- if (sub_node) {
- of_node_put(node);
- return sub_node;
- }
- }
- return NULL;
-}
-
-static struct device_node *scan_OF_for_pci_dev(struct device_node *parent,
- unsigned int devfn)
-{
- struct device_node *np, *cnp;
- const u32 *reg;
- unsigned int psize;
-
- for_each_child_of_node(parent, np) {
- reg = of_get_property(np, "reg", &psize);
- if (reg && psize >= 4 && ((reg[0] >> 8) & 0xff) == devfn)
- return np;
-
- /* Note: some OFs create a parent node "multifunc-device" as
- * a fake root for all functions of a multi-function device,
- * we go down them as well. */
- if (!strcmp(np->name, "multifunc-device")) {
- cnp = scan_OF_for_pci_dev(np, devfn);
- if (cnp)
- return cnp;
- }
- }
- return NULL;
-}
-
-
-static struct device_node *scan_OF_for_pci_bus(struct pci_bus *bus)
-{
- struct device_node *parent, *np;
-
- /* Are we a root bus ? */
- if (bus->self == NULL || bus->parent == NULL) {
- struct pci_controller *hose = pci_bus_to_host(bus);
- if (hose == NULL)
- return NULL;
- return of_node_get(hose->dn);
- }
-
- /* not a root bus, we need to get our parent */
- parent = scan_OF_for_pci_bus(bus->parent);
- if (parent == NULL)
- return NULL;
-
- /* now iterate for children for a match */
- np = scan_OF_for_pci_dev(parent, bus->self->devfn);
- of_node_put(parent);
-
- return np;
-}
-
-/*
- * Scans the OF tree for a device node matching a PCI device
- */
-struct device_node *
-pci_busdev_to_OF_node(struct pci_bus *bus, int devfn)
-{
- struct device_node *parent, *np;
-
- pr_debug("pci_busdev_to_OF_node(%d,0x%x)\n", bus->number, devfn);
- parent = scan_OF_for_pci_bus(bus);
- if (parent == NULL)
- return NULL;
- pr_debug(" parent is %s\n", parent ? parent->full_name : "<NULL>");
- np = scan_OF_for_pci_dev(parent, devfn);
- of_node_put(parent);
- pr_debug(" result is %s\n", np ? np->full_name : "<NULL>");
-
- /* XXX most callers don't release the returned node
- * mostly because ppc64 doesn't increase the refcount,
- * we need to fix that.
- */
- return np;
-}
-EXPORT_SYMBOL(pci_busdev_to_OF_node);
-
-struct device_node*
-pci_device_to_OF_node(struct pci_dev *dev)
-{
- return pci_busdev_to_OF_node(dev->bus, dev->devfn);
-}
-EXPORT_SYMBOL(pci_device_to_OF_node);
-
-static int
-find_OF_pci_device_filter(struct device_node *node, void *data)
-{
- return ((void *)node == data);
-}
-
-/*
- * Returns the PCI device matching a given OF node
- */
-int
-pci_device_from_OF_node(struct device_node *node, u8 *bus, u8 *devfn)
-{
- const unsigned int *reg;
- struct pci_controller *hose;
- struct pci_dev *dev = NULL;
-
- /* Make sure it's really a PCI device */
- hose = pci_find_hose_for_OF_device(node);
- if (!hose || !hose->dn)
- return -ENODEV;
- if (!scan_OF_pci_childs(hose->dn,
- find_OF_pci_device_filter, (void *)node))
- return -ENODEV;
- reg = of_get_property(node, "reg", NULL);
- if (!reg)
- return -ENODEV;
- *bus = (reg[0] >> 16) & 0xff;
- *devfn = ((reg[0] >> 8) & 0xff);
-
- /* Ok, here we need some tweak. If we have already renumbered
- * all busses, we can't rely on the OF bus number any more.
- * the pci_to_OF_bus_map is not enough as several PCI busses
- * may match the same OF bus number.
- */
- if (!pci_to_OF_bus_map)
- return 0;
-
- for_each_pci_dev(dev)
- if (pci_to_OF_bus_map[dev->bus->number] == *bus &&
- dev->devfn == *devfn) {
- *bus = dev->bus->number;
- pci_dev_put(dev);
- return 0;
- }
-
- return -ENODEV;
-}
-EXPORT_SYMBOL(pci_device_from_OF_node);
-
-/* We create the "pci-OF-bus-map" property now so it appears in the
- * /proc device tree
- */
-void __init
-pci_create_OF_bus_map(void)
-{
- struct property *of_prop;
- struct device_node *dn;
-
- of_prop = (struct property *) alloc_bootmem(sizeof(struct property) + \
- 256);
- if (!of_prop)
- return;
- dn = of_find_node_by_path("/");
- if (dn) {
- memset(of_prop, -1, sizeof(struct property) + 256);
- of_prop->name = "pci-OF-bus-map";
- of_prop->length = 256;
- of_prop->value = &of_prop[1];
- prom_add_property(dn, of_prop);
- of_node_put(dn);
- }
-}
-
-static void __devinit pcibios_scan_phb(struct pci_controller *hose)
-{
- struct pci_bus *bus;
- struct device_node *node = hose->dn;
- unsigned long io_offset;
- struct resource *res = &hose->io_resource;
-
- pr_debug("PCI: Scanning PHB %s\n",
- node ? node->full_name : "<NO NAME>");
-
- /* Create an empty bus for the toplevel */
- bus = pci_create_bus(hose->parent, hose->first_busno, hose->ops, hose);
- if (bus == NULL) {
- printk(KERN_ERR "Failed to create bus for PCI domain %04x\n",
- hose->global_number);
- return;
- }
- bus.dev->of_node = of_node_get(node);
- bus->secondary = hose->first_busno;
- hose->bus = bus;
-
- /* Fixup IO space offset */
- io_offset = (unsigned long)hose->io_base_virt - isa_io_base;
- res->start = (res->start + io_offset) & 0xffffffffu;
- res->end = (res->end + io_offset) & 0xffffffffu;
-
- /* Wire up PHB bus resources */
- pcibios_setup_phb_resources(hose);
-
- /* Scan children */
- hose->last_busno = bus->subordinate = pci_scan_child_bus(bus);
-}
-
-static int __init pcibios_init(void)
-{
- struct pci_controller *hose, *tmp;
- int next_busno = 0;
-
- printk(KERN_INFO "PCI: Probing PCI hardware\n");
-
- if (pci_flags & PCI_REASSIGN_ALL_BUS) {
- printk(KERN_INFO "setting pci_asign_all_busses\n");
- pci_assign_all_buses = 1;
- }
-
- /* Scan all of the recorded PCI controllers. */
- list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
- if (pci_assign_all_buses)
- hose->first_busno = next_busno;
- hose->last_busno = 0xff;
- pcibios_scan_phb(hose);
- printk(KERN_INFO "calling pci_bus_add_devices()\n");
- pci_bus_add_devices(hose->bus);
- if (pci_assign_all_buses || next_busno <= hose->last_busno)
- next_busno = hose->last_busno + \
- pcibios_assign_bus_offset;
- }
- pci_bus_count = next_busno;
-
- /* OpenFirmware based machines need a map of OF bus
- * numbers vs. kernel bus numbers since we may have to
- * remap them.
- */
- if (pci_assign_all_buses)
- pcibios_make_OF_bus_map();
-
- /* Call common code to handle resource allocation */
- pcibios_resource_survey();
-
- return 0;
-}
-
-subsys_initcall(pcibios_init);
-
-static struct pci_controller*
-pci_bus_to_hose(int bus)
-{
- struct pci_controller *hose, *tmp;
-
- list_for_each_entry_safe(hose, tmp, &hose_list, list_node)
- if (bus >= hose->first_busno && bus <= hose->last_busno)
- return hose;
- return NULL;
-}
-
-/* Provide information on locations of various I/O regions in physical
- * memory. Do this on a per-card basis so that we choose the right
- * root bridge.
- * Note that the returned IO or memory base is a physical address
- */
-
-long sys_pciconfig_iobase(long which, unsigned long bus, unsigned long devfn)
-{
- struct pci_controller *hose;
- long result = -EOPNOTSUPP;
-
- hose = pci_bus_to_hose(bus);
- if (!hose)
- return -ENODEV;
-
- switch (which) {
- case IOBASE_BRIDGE_NUMBER:
- return (long)hose->first_busno;
- case IOBASE_MEMORY:
- return (long)hose->pci_mem_offset;
- case IOBASE_IO:
- return (long)hose->io_base_phys;
- case IOBASE_ISA_IO:
- return (long)isa_io_base;
- case IOBASE_ISA_MEM:
- return (long)isa_mem_base;
- }
-
- return result;
-}
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 653da62d0682..177cdaf83564 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -185,6 +185,7 @@ config MACH_JAZZ
select CSRC_R4K
select DEFAULT_SGI_PARTITION if CPU_BIG_ENDIAN
select GENERIC_ISA_DMA
+ select HAVE_PCSPKR_PLATFORM
select IRQ_CPU
select I8253
select I8259
@@ -266,6 +267,7 @@ config MIPS_MALTA
select CSRC_R4K
select DMA_NONCOHERENT
select GENERIC_ISA_DMA
+ select HAVE_PCSPKR_PLATFORM
select IRQ_CPU
select IRQ_GIC
select HW_HAS_PCI
@@ -640,6 +642,7 @@ config SNI_RM
select DEFAULT_SGI_PARTITION if CPU_BIG_ENDIAN
select DMA_NONCOHERENT
select GENERIC_ISA_DMA
+ select HAVE_PCSPKR_PLATFORM
select HW_HAS_EISA
select HW_HAS_PCI
select IRQ_CPU
@@ -2388,6 +2391,7 @@ config MMU
config I8253
bool
select CLKSRC_I8253
+ select CLKEVT_I8253
select MIPS_EXTERNAL_TIMER
config ZONE_DMA32
@@ -2489,20 +2493,4 @@ source "security/Kconfig"
source "crypto/Kconfig"
-menuconfig VIRTUALIZATION
- bool "Virtualization"
- default n
- ---help---
- Say Y here to get to see options for using your Linux host to run other
- operating systems inside virtual machines (guests).
- This option alone does not add any kernel code.
-
- If you say N, all options in this submenu will be skipped and disabled.
-
-if VIRTUALIZATION
-
-source drivers/virtio/Kconfig
-
-endif # VIRTUALIZATION
-
source "lib/Kconfig"
diff --git a/arch/mips/cobalt/time.c b/arch/mips/cobalt/time.c
index 0162f9edc693..3bff3b820baf 100644
--- a/arch/mips/cobalt/time.c
+++ b/arch/mips/cobalt/time.c
@@ -17,10 +17,10 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
+#include <linux/i8253.h>
#include <linux/init.h>
#include <asm/gt64120.h>
-#include <asm/i8253.h>
#include <asm/time.h>
#define GT641XX_BASE_CLOCK 50000000 /* 50MHz */
diff --git a/arch/mips/include/asm/i8253.h b/arch/mips/include/asm/i8253.h
deleted file mode 100644
index 9ad011366f73..000000000000
--- a/arch/mips/include/asm/i8253.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
- * Machine specific IO port address definition for generic.
- * Written by Osamu Tomita <tomita@cinet.co.jp>
- */
-#ifndef __ASM_I8253_H
-#define __ASM_I8253_H
-
-#include <linux/spinlock.h>
-
-/* i8253A PIT registers */
-#define PIT_MODE 0x43
-#define PIT_CH0 0x40
-#define PIT_CH2 0x42
-
-#define PIT_LATCH LATCH
-
-extern raw_spinlock_t i8253_lock;
-
-extern void setup_pit_timer(void);
-
-#define inb_pit inb_p
-#define outb_pit outb_p
-
-#endif /* __ASM_I8253_H */
diff --git a/arch/mips/include/asm/stacktrace.h b/arch/mips/include/asm/stacktrace.h
index 0bf82818aa53..780ee2c2a2ac 100644
--- a/arch/mips/include/asm/stacktrace.h
+++ b/arch/mips/include/asm/stacktrace.h
@@ -7,6 +7,10 @@
extern int raw_show_trace;
extern unsigned long unwind_stack(struct task_struct *task, unsigned long *sp,
unsigned long pc, unsigned long *ra);
+extern unsigned long unwind_stack_by_address(unsigned long stack_page,
+ unsigned long *sp,
+ unsigned long pc,
+ unsigned long *ra);
#else
#define raw_show_trace 1
static inline unsigned long unwind_stack(struct task_struct *task,
diff --git a/arch/mips/jazz/irq.c b/arch/mips/jazz/irq.c
index 260df4750949..ca9bd2069142 100644
--- a/arch/mips/jazz/irq.c
+++ b/arch/mips/jazz/irq.c
@@ -7,6 +7,7 @@
* Copyright (C) 1994 - 2001, 2003, 07 Ralf Baechle
*/
#include <linux/clockchips.h>
+#include <linux/i8253.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
@@ -15,7 +16,6 @@
#include <linux/irq.h>
#include <asm/irq_cpu.h>
-#include <asm/i8253.h>
#include <asm/i8259.h>
#include <asm/io.h>
#include <asm/jazz.h>
diff --git a/arch/mips/kernel/i8253.c b/arch/mips/kernel/i8253.c
index 391221b6a6aa..be4ee7d63e04 100644
--- a/arch/mips/kernel/i8253.c
+++ b/arch/mips/kernel/i8253.c
@@ -3,96 +3,16 @@
*
*/
#include <linux/clockchips.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/jiffies.h>
+#include <linux/i8253.h>
#include <linux/module.h>
#include <linux/smp.h>
-#include <linux/spinlock.h>
#include <linux/irq.h>
-#include <asm/delay.h>
-#include <asm/i8253.h>
-#include <asm/io.h>
#include <asm/time.h>
-DEFINE_RAW_SPINLOCK(i8253_lock);
-EXPORT_SYMBOL(i8253_lock);
-
-/*
- * Initialize the PIT timer.
- *
- * This is also called after resume to bring the PIT into operation again.
- */
-static void init_pit_timer(enum clock_event_mode mode,
- struct clock_event_device *evt)
-{
- raw_spin_lock(&i8253_lock);
-
- switch(mode) {
- case CLOCK_EVT_MODE_PERIODIC:
- /* binary, mode 2, LSB/MSB, ch 0 */
- outb_p(0x34, PIT_MODE);
- outb_p(LATCH & 0xff , PIT_CH0); /* LSB */
- outb(LATCH >> 8 , PIT_CH0); /* MSB */
- break;
-
- case CLOCK_EVT_MODE_SHUTDOWN:
- case CLOCK_EVT_MODE_UNUSED:
- if (evt->mode == CLOCK_EVT_MODE_PERIODIC ||
- evt->mode == CLOCK_EVT_MODE_ONESHOT) {
- outb_p(0x30, PIT_MODE);
- outb_p(0, PIT_CH0);
- outb_p(0, PIT_CH0);
- }
- break;
-
- case CLOCK_EVT_MODE_ONESHOT:
- /* One shot setup */
- outb_p(0x38, PIT_MODE);
- break;
-
- case CLOCK_EVT_MODE_RESUME:
- /* Nothing to do here */
- break;
- }
- raw_spin_unlock(&i8253_lock);
-}
-
-/*
- * Program the next event in oneshot mode
- *
- * Delta is given in PIT ticks
- */
-static int pit_next_event(unsigned long delta, struct clock_event_device *evt)
-{
- raw_spin_lock(&i8253_lock);
- outb_p(delta & 0xff , PIT_CH0); /* LSB */
- outb(delta >> 8 , PIT_CH0); /* MSB */
- raw_spin_unlock(&i8253_lock);
-
- return 0;
-}
-
-/*
- * On UP the PIT can serve all of the possible timer functions. On SMP systems
- * it can be solely used for the global tick.
- *
- * The profiling and update capabilites are switched off once the local apic is
- * registered. This mechanism replaces the previous #ifdef LOCAL_APIC -
- * !using_apic_timer decisions in do_timer_interrupt_hook()
- */
-static struct clock_event_device pit_clockevent = {
- .name = "pit",
- .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
- .set_mode = init_pit_timer,
- .set_next_event = pit_next_event,
- .irq = 0,
-};
-
static irqreturn_t timer_interrupt(int irq, void *dev_id)
{
- pit_clockevent.event_handler(&pit_clockevent);
+ i8253_clockevent.event_handler(&i8253_clockevent);
return IRQ_HANDLED;
}
@@ -103,25 +23,9 @@ static struct irqaction irq0 = {
.name = "timer"
};
-/*
- * Initialize the conversion factor and the min/max deltas of the clock event
- * structure and register the clock event source with the framework.
- */
void __init setup_pit_timer(void)
{
- struct clock_event_device *cd = &pit_clockevent;
- unsigned int cpu = smp_processor_id();
-
- /*
- * Start pit with the boot cpu mask and make it global after the
- * IO_APIC has been initialized.
- */
- cd->cpumask = cpumask_of(cpu);
- clockevent_set_clock(cd, CLOCK_TICK_RATE);
- cd->max_delta_ns = clockevent_delta2ns(0x7FFF, cd);
- cd->min_delta_ns = clockevent_delta2ns(0xF, cd);
- clockevents_register_device(cd);
-
+ clockevent_i8253_init(true);
setup_irq(0, &irq0);
}
diff --git a/arch/mips/kernel/module.c b/arch/mips/kernel/module.c
index dd940b701963..4b930ac4aff2 100644
--- a/arch/mips/kernel/module.c
+++ b/arch/mips/kernel/module.c
@@ -45,30 +45,14 @@ static struct mips_hi16 *mips_hi16_list;
static LIST_HEAD(dbe_list);
static DEFINE_SPINLOCK(dbe_lock);
+#ifdef MODULE_START
void *module_alloc(unsigned long size)
{
-#ifdef MODULE_START
return __vmalloc_node_range(size, 1, MODULE_START, MODULE_END,
GFP_KERNEL, PAGE_KERNEL, -1,
__builtin_return_address(0));
-#else
- if (size == 0)
- return NULL;
- return vmalloc(size);
-#endif
-}
-
-/* Free memory returned from module_alloc */
-void module_free(struct module *mod, void *module_region)
-{
- vfree(module_region);
-}
-
-int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
- char *secstrings, struct module *mod)
-{
- return 0;
}
+#endif
static int apply_r_mips_none(struct module *me, u32 *location, Elf_Addr v)
{
diff --git a/arch/mips/kernel/perf_event.c b/arch/mips/kernel/perf_event.c
index a8244854d3dc..d0deaab9ace2 100644
--- a/arch/mips/kernel/perf_event.c
+++ b/arch/mips/kernel/perf_event.c
@@ -527,7 +527,7 @@ handle_associated_event(struct cpu_hw_events *cpuc,
if (!mipspmu_event_set_period(event, hwc, idx))
return;
- if (perf_event_overflow(event, 0, data, regs))
+ if (perf_event_overflow(event, data, regs))
mipspmu->disable_event(idx);
}
diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c
index 75266ff4cc33..e5ad09a9baf7 100644
--- a/arch/mips/kernel/perf_event_mipsxx.c
+++ b/arch/mips/kernel/perf_event_mipsxx.c
@@ -377,6 +377,20 @@ static const struct mips_perf_event mipsxxcore_cache_map
[C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
},
},
+[C(NODE)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
+ [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
+ [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
+ [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
+ },
+},
};
/* 74K core has completely different cache event map. */
@@ -480,6 +494,20 @@ static const struct mips_perf_event mipsxx74Kcore_cache_map
[C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
},
},
+[C(NODE)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
+ [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
+ [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
+ [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
+ },
+},
};
#ifdef CONFIG_MIPS_MT_SMP
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index d2112d3cf115..c28fbe6107bc 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -373,18 +373,18 @@ unsigned long thread_saved_pc(struct task_struct *tsk)
#ifdef CONFIG_KALLSYMS
-/* used by show_backtrace() */
-unsigned long unwind_stack(struct task_struct *task, unsigned long *sp,
- unsigned long pc, unsigned long *ra)
+/* generic stack unwinding function */
+unsigned long notrace unwind_stack_by_address(unsigned long stack_page,
+ unsigned long *sp,
+ unsigned long pc,
+ unsigned long *ra)
{
- unsigned long stack_page;
struct mips_frame_info info;
unsigned long size, ofs;
int leaf;
extern void ret_from_irq(void);
extern void ret_from_exception(void);
- stack_page = (unsigned long)task_stack_page(task);
if (!stack_page)
return 0;
@@ -443,6 +443,15 @@ unsigned long unwind_stack(struct task_struct *task, unsigned long *sp,
*ra = 0;
return __kernel_text_address(pc) ? pc : 0;
}
+EXPORT_SYMBOL(unwind_stack_by_address);
+
+/* used by show_backtrace() */
+unsigned long unwind_stack(struct task_struct *task, unsigned long *sp,
+ unsigned long pc, unsigned long *ra)
+{
+ unsigned long stack_page = (unsigned long)task_stack_page(task);
+ return unwind_stack_by_address(stack_page, sp, pc, ra);
+}
#endif
/*
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index e9b3af27d844..b7517e3abc85 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -578,12 +578,12 @@ static int simulate_llsc(struct pt_regs *regs, unsigned int opcode)
{
if ((opcode & OPCODE) == LL) {
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
- 1, 0, regs, 0);
+ 1, regs, 0);
return simulate_ll(regs, opcode);
}
if ((opcode & OPCODE) == SC) {
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
- 1, 0, regs, 0);
+ 1, regs, 0);
return simulate_sc(regs, opcode);
}
@@ -602,7 +602,7 @@ static int simulate_rdhwr(struct pt_regs *regs, unsigned int opcode)
int rd = (opcode & RD) >> 11;
int rt = (opcode & RT) >> 16;
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
- 1, 0, regs, 0);
+ 1, regs, 0);
switch (rd) {
case 0: /* CPU number */
regs->regs[rt] = smp_processor_id();
@@ -640,7 +640,7 @@ static int simulate_sync(struct pt_regs *regs, unsigned int opcode)
{
if ((opcode & OPCODE) == SPEC0 && (opcode & FUNC) == SYNC) {
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
- 1, 0, regs, 0);
+ 1, regs, 0);
return 0;
}
diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c
index cfea1adfa153..eb319b580353 100644
--- a/arch/mips/kernel/unaligned.c
+++ b/arch/mips/kernel/unaligned.c
@@ -111,8 +111,7 @@ static void emulate_load_store_insn(struct pt_regs *regs,
unsigned long value;
unsigned int res;
- perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
- 1, 0, regs, 0);
+ perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
/*
* This load never faults.
@@ -517,7 +516,7 @@ asmlinkage void do_ade(struct pt_regs *regs)
mm_segment_t seg;
perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS,
- 1, 0, regs, regs->cp0_badvaddr);
+ 1, regs, regs->cp0_badvaddr);
/*
* Did we catch a fault trying to load an instruction?
* Or are we running in MIPS16 mode?
diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c
index d32cb0503110..dbf2f93a5091 100644
--- a/arch/mips/math-emu/cp1emu.c
+++ b/arch/mips/math-emu/cp1emu.c
@@ -272,8 +272,7 @@ static int cop1Emulate(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
}
emul:
- perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
- 1, 0, xcp, 0);
+ perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, xcp, 0);
MIPS_FPU_EMU_INC_STATS(emulated);
switch (MIPSInst_OPCODE(ir)) {
case ldc1_op:{
diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
index 137ee76a0045..937cf3368164 100644
--- a/arch/mips/mm/fault.c
+++ b/arch/mips/mm/fault.c
@@ -145,7 +145,7 @@ good_area:
* the fault.
*/
fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0);
- perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
@@ -154,12 +154,10 @@ good_area:
BUG();
}
if (fault & VM_FAULT_MAJOR) {
- perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ,
- 1, 0, regs, address);
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address);
tsk->maj_flt++;
} else {
- perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN,
- 1, 0, regs, address);
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address);
tsk->min_flt++;
}
diff --git a/arch/mips/mti-malta/malta-time.c b/arch/mips/mti-malta/malta-time.c
index 1620b83cd13e..f8ee945ee411 100644
--- a/arch/mips/mti-malta/malta-time.c
+++ b/arch/mips/mti-malta/malta-time.c
@@ -19,6 +19,7 @@
*/
#include <linux/types.h>
+#include <linux/i8253.h>
#include <linux/init.h>
#include <linux/kernel_stat.h>
#include <linux/sched.h>
@@ -31,7 +32,6 @@
#include <asm/mipsregs.h>
#include <asm/mipsmtregs.h>
#include <asm/hardirq.h>
-#include <asm/i8253.h>
#include <asm/irq.h>
#include <asm/div64.h>
#include <asm/cpu.h>
diff --git a/arch/mips/oprofile/Makefile b/arch/mips/oprofile/Makefile
index 4b9d7044e26c..29f2f13eb31c 100644
--- a/arch/mips/oprofile/Makefile
+++ b/arch/mips/oprofile/Makefile
@@ -8,7 +8,7 @@ DRIVER_OBJS = $(addprefix ../../../drivers/oprofile/, \
oprofilefs.o oprofile_stats.o \
timer_int.o )
-oprofile-y := $(DRIVER_OBJS) common.o
+oprofile-y := $(DRIVER_OBJS) common.o backtrace.o
oprofile-$(CONFIG_CPU_MIPS32) += op_model_mipsxx.o
oprofile-$(CONFIG_CPU_MIPS64) += op_model_mipsxx.o
diff --git a/arch/mips/oprofile/backtrace.c b/arch/mips/oprofile/backtrace.c
new file mode 100644
index 000000000000..6854ed5097d2
--- /dev/null
+++ b/arch/mips/oprofile/backtrace.c
@@ -0,0 +1,175 @@
+#include <linux/oprofile.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/uaccess.h>
+#include <asm/ptrace.h>
+#include <asm/stacktrace.h>
+#include <linux/stacktrace.h>
+#include <linux/kernel.h>
+#include <asm/sections.h>
+#include <asm/inst.h>
+
+struct stackframe {
+ unsigned long sp;
+ unsigned long pc;
+ unsigned long ra;
+};
+
+static inline int get_mem(unsigned long addr, unsigned long *result)
+{
+ unsigned long *address = (unsigned long *) addr;
+ if (!access_ok(VERIFY_READ, addr, sizeof(unsigned long)))
+ return -1;
+ if (__copy_from_user_inatomic(result, address, sizeof(unsigned long)))
+ return -3;
+ return 0;
+}
+
+/*
+ * These two instruction helpers were taken from process.c
+ */
+static inline int is_ra_save_ins(union mips_instruction *ip)
+{
+ /* sw / sd $ra, offset($sp) */
+ return (ip->i_format.opcode == sw_op || ip->i_format.opcode == sd_op)
+ && ip->i_format.rs == 29 && ip->i_format.rt == 31;
+}
+
+static inline int is_sp_move_ins(union mips_instruction *ip)
+{
+ /* addiu/daddiu sp,sp,-imm */
+ if (ip->i_format.rs != 29 || ip->i_format.rt != 29)
+ return 0;
+ if (ip->i_format.opcode == addiu_op || ip->i_format.opcode == daddiu_op)
+ return 1;
+ return 0;
+}
+
+/*
+ * Looks for specific instructions that mark the end of a function.
+ * This usually means we ran into the code area of the previous function.
+ */
+static inline int is_end_of_function_marker(union mips_instruction *ip)
+{
+ /* jr ra */
+ if (ip->r_format.func == jr_op && ip->r_format.rs == 31)
+ return 1;
+ /* lui gp */
+ if (ip->i_format.opcode == lui_op && ip->i_format.rt == 28)
+ return 1;
+ return 0;
+}
+
+/*
+ * TODO for userspace stack unwinding:
+ * - handle cases where the stack is adjusted inside a function
+ * (generally doesn't happen)
+ * - find optimal value for max_instr_check
+ * - try to find a way to handle leaf functions
+ */
+
+static inline int unwind_user_frame(struct stackframe *old_frame,
+ const unsigned int max_instr_check)
+{
+ struct stackframe new_frame = *old_frame;
+ off_t ra_offset = 0;
+ size_t stack_size = 0;
+ unsigned long addr;
+
+ if (old_frame->pc == 0 || old_frame->sp == 0 || old_frame->ra == 0)
+ return -9;
+
+ for (addr = new_frame.pc; (addr + max_instr_check > new_frame.pc)
+ && (!ra_offset || !stack_size); --addr) {
+ union mips_instruction ip;
+
+ if (get_mem(addr, (unsigned long *) &ip))
+ return -11;
+
+ if (is_sp_move_ins(&ip)) {
+ int stack_adjustment = ip.i_format.simmediate;
+ if (stack_adjustment > 0)
+ /* This marks the end of the previous function,
+ which means we overran. */
+ break;
+ stack_size = (unsigned) stack_adjustment;
+ } else if (is_ra_save_ins(&ip)) {
+ int ra_slot = ip.i_format.simmediate;
+ if (ra_slot < 0)
+ /* This shouldn't happen. */
+ break;
+ ra_offset = ra_slot;
+ } else if (is_end_of_function_marker(&ip))
+ break;
+ }
+
+ if (!ra_offset || !stack_size)
+ return -1;
+
+ if (ra_offset) {
+ new_frame.ra = old_frame->sp + ra_offset;
+ if (get_mem(new_frame.ra, &(new_frame.ra)))
+ return -13;
+ }
+
+ if (stack_size) {
+ new_frame.sp = old_frame->sp + stack_size;
+ if (get_mem(new_frame.sp, &(new_frame.sp)))
+ return -14;
+ }
+
+ if (new_frame.sp > old_frame->sp)
+ return -2;
+
+ new_frame.pc = old_frame->ra;
+ *old_frame = new_frame;
+
+ return 0;
+}
+
+static inline void do_user_backtrace(unsigned long low_addr,
+ struct stackframe *frame,
+ unsigned int depth)
+{
+ const unsigned int max_instr_check = 512;
+ const unsigned long high_addr = low_addr + THREAD_SIZE;
+
+ while (depth-- && !unwind_user_frame(frame, max_instr_check)) {
+ oprofile_add_trace(frame->ra);
+ if (frame->sp < low_addr || frame->sp > high_addr)
+ break;
+ }
+}
+
+#ifndef CONFIG_KALLSYMS
+static inline void do_kernel_backtrace(unsigned long low_addr,
+ struct stackframe *frame,
+ unsigned int depth) { }
+#else
+static inline void do_kernel_backtrace(unsigned long low_addr,
+ struct stackframe *frame,
+ unsigned int depth)
+{
+ while (depth-- && frame->pc) {
+ frame->pc = unwind_stack_by_address(low_addr,
+ &(frame->sp),
+ frame->pc,
+ &(frame->ra));
+ oprofile_add_trace(frame->ra);
+ }
+}
+#endif
+
+void notrace op_mips_backtrace(struct pt_regs *const regs, unsigned int depth)
+{
+ struct stackframe frame = { .sp = regs->regs[29],
+ .pc = regs->cp0_epc,
+ .ra = regs->regs[31] };
+ const int userspace = user_mode(regs);
+ const unsigned long low_addr = ALIGN(frame.sp, THREAD_SIZE);
+
+ if (userspace)
+ do_user_backtrace(low_addr, &frame, depth);
+ else
+ do_kernel_backtrace(low_addr, &frame, depth);
+}
diff --git a/arch/mips/oprofile/common.c b/arch/mips/oprofile/common.c
index f9eb1aba6345..d1f2d4c52d42 100644
--- a/arch/mips/oprofile/common.c
+++ b/arch/mips/oprofile/common.c
@@ -115,6 +115,7 @@ int __init oprofile_arch_init(struct oprofile_operations *ops)
ops->start = op_mips_start;
ops->stop = op_mips_stop;
ops->cpu_type = lmodel->cpu_type;
+ ops->backtrace = op_mips_backtrace;
printk(KERN_INFO "oprofile: using %s performance monitoring.\n",
lmodel->cpu_type);
diff --git a/arch/mips/oprofile/op_impl.h b/arch/mips/oprofile/op_impl.h
index f04b54fb37d1..7c2da27ece04 100644
--- a/arch/mips/oprofile/op_impl.h
+++ b/arch/mips/oprofile/op_impl.h
@@ -36,4 +36,6 @@ struct op_mips_model {
unsigned char num_counters;
};
+void op_mips_backtrace(struct pt_regs * const regs, unsigned int depth);
+
#endif
diff --git a/arch/mips/sgi-ip22/ip22-time.c b/arch/mips/sgi-ip22/ip22-time.c
index 1a94c9894188..607192449335 100644
--- a/arch/mips/sgi-ip22/ip22-time.c
+++ b/arch/mips/sgi-ip22/ip22-time.c
@@ -10,6 +10,7 @@
* Copyright (C) 2003, 06 Ralf Baechle (ralf@linux-mips.org)
*/
#include <linux/bcd.h>
+#include <linux/i8253.h>
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/kernel.h>
@@ -20,7 +21,6 @@
#include <asm/cpu.h>
#include <asm/mipsregs.h>
-#include <asm/i8253.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/time.h>
diff --git a/arch/mips/sni/time.c b/arch/mips/sni/time.c
index 0904d4d30cb3..ec0be14996a4 100644
--- a/arch/mips/sni/time.c
+++ b/arch/mips/sni/time.c
@@ -1,11 +1,11 @@
#include <linux/types.h>
+#include <linux/i8253.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/smp.h>
#include <linux/time.h>
#include <linux/clockchips.h>
-#include <asm/i8253.h>
#include <asm/sni.h>
#include <asm/time.h>
#include <asm-generic/rtc.h>
diff --git a/arch/mn10300/kernel/module.c b/arch/mn10300/kernel/module.c
index 196a111e2e29..216ad23c9570 100644
--- a/arch/mn10300/kernel/module.c
+++ b/arch/mn10300/kernel/module.c
@@ -32,36 +32,6 @@
#define DEBUGP(fmt, ...)
#endif
-/*
- * allocate storage for a module
- */
-void *module_alloc(unsigned long size)
-{
- if (size == 0)
- return NULL;
- return vmalloc_exec(size);
-}
-
-/*
- * free memory returned from module_alloc()
- */
-void module_free(struct module *mod, void *module_region)
-{
- vfree(module_region);
-}
-
-/*
- * allow the arch to fix up the section table
- * - we don't need anything special
- */
-int module_frob_arch_sections(Elf_Ehdr *hdr,
- Elf_Shdr *sechdrs,
- char *secstrings,
- struct module *mod)
-{
- return 0;
-}
-
static void reloc_put16(uint8_t *p, uint32_t val)
{
p[0] = val & 0xff;
@@ -81,20 +51,6 @@ static void reloc_put32(uint8_t *p, uint32_t val)
}
/*
- * apply a REL relocation
- */
-int apply_relocate(Elf32_Shdr *sechdrs,
- const char *strtab,
- unsigned int symindex,
- unsigned int relsec,
- struct module *me)
-{
- printk(KERN_ERR "module %s: RELOCATION unsupported\n",
- me->name);
- return -ENOEXEC;
-}
-
-/*
* apply a RELA relocation
*/
int apply_relocate_add(Elf32_Shdr *sechdrs,
@@ -198,20 +154,3 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
}
return 0;
}
-
-/*
- * finish loading the module
- */
-int module_finalize(const Elf_Ehdr *hdr,
- const Elf_Shdr *sechdrs,
- struct module *me)
-{
- return 0;
-}
-
-/*
- * finish clearing the module
- */
-void module_arch_cleanup(struct module *mod)
-{
-}
diff --git a/arch/openrisc/Kconfig b/arch/openrisc/Kconfig
new file mode 100644
index 000000000000..4558bafbd1a2
--- /dev/null
+++ b/arch/openrisc/Kconfig
@@ -0,0 +1,207 @@
+#
+# For a description of the syntax of this configuration file,
+# see Documentation/kbuild/config-language.txt.
+#
+
+config OPENRISC
+ def_bool y
+ select OF
+ select OF_EARLY_FLATTREE
+ select HAVE_MEMBLOCK
+ select ARCH_WANT_OPTIONAL_GPIOLIB
+ select HAVE_ARCH_TRACEHOOK
+ select HAVE_GENERIC_HARDIRQS
+ select GENERIC_IRQ_CHIP
+ select GENERIC_IRQ_PROBE
+ select GENERIC_IRQ_SHOW
+ select GENERIC_IOMAP
+
+config MMU
+ def_bool y
+
+config WISHBONE_BUS_BIG_ENDIAN
+ def_bool y
+
+config SYMBOL_PREFIX
+ string
+ default ""
+
+config HAVE_DMA_ATTRS
+ def_bool y
+
+config UID16
+ def_bool y
+
+config RWSEM_GENERIC_SPINLOCK
+ def_bool y
+
+config RWSEM_XCHGADD_ALGORITHM
+ def_bool n
+
+config GENERIC_HWEIGHT
+ def_bool y
+
+config GENERIC_IOMAP
+ def_bool y
+
+config NO_IOPORT
+ def_bool y
+
+config GENERIC_GPIO
+ def_bool y
+
+config GENERIC_CLOCKEVENTS
+ def_bool y
+
+config TRACE_IRQFLAGS_SUPPORT
+ def_bool y
+
+# For now, use generic checksum functions
+#These can be reimplemented in assembly later if so inclined
+config GENERIC_CSUM
+ def_bool y
+
+config GENERIC_FIND_NEXT_BIT
+ def_bool y
+
+source "init/Kconfig"
+
+
+menu "Processor type and features"
+
+choice
+ prompt "Subarchitecture"
+ default OR1K_1200
+
+config OR1K_1200
+ bool "OR1200"
+ help
+ Generic OpenRISC 1200 architecture
+
+endchoice
+
+config OPENRISC_BUILTIN_DTB
+ string "Builtin DTB"
+ default ""
+
+menu "Class II Instructions"
+
+config OPENRISC_HAVE_INST_FF1
+ bool "Have instruction l.ff1"
+ default y
+ help
+ Select this if your implementation has the Class II instruction l.ff1
+
+config OPENRISC_HAVE_INST_FL1
+ bool "Have instruction l.fl1"
+ default y
+ help
+ Select this if your implementation has the Class II instruction l.fl1
+
+config OPENRISC_HAVE_INST_MUL
+ bool "Have instruction l.mul for hardware multiply"
+ default y
+ help
+ Select this if your implementation has a hardware multiply instruction
+
+config OPENRISC_HAVE_INST_DIV
+ bool "Have instruction l.div for hardware divide"
+ default y
+ help
+ Select this if your implementation has a hardware divide instruction
+endmenu
+
+
+source "kernel/time/Kconfig"
+source kernel/Kconfig.hz
+source kernel/Kconfig.preempt
+source "mm/Kconfig"
+
+config OPENRISC_NO_SPR_SR_DSX
+ bool "use SPR_SR_DSX software emulation" if OR1K_1200
+ default y
+ help
+ SPR_SR_DSX bit is status register bit indicating whether
+ the last exception has happened in delay slot.
+
+ OpenRISC architecture makes it optional to have it implemented
+ in hardware and the OR1200 does not have it.
+
+ Say N here if you know that your OpenRISC processor has
+ SPR_SR_DSX bit implemented. Say Y if you are unsure.
+
+config CMDLINE
+ string "Default kernel command string"
+ default ""
+ help
+ On some architectures there is currently no way for the boot loader
+ to pass arguments to the kernel. For these architectures, you should
+ supply some command-line options at build time by entering them
+ here.
+
+menu "Debugging options"
+
+config DEBUG_STACKOVERFLOW
+ bool "Check for kernel stack overflow"
+ default y
+ help
+ Make extra checks for space avaliable on stack in some
+ critical functions. This will cause kernel to run a bit slower,
+ but will catch most of kernel stack overruns and exit gracefuly.
+
+ Say Y if you are unsure.
+
+config JUMP_UPON_UNHANDLED_EXCEPTION
+ bool "Try to die gracefully"
+ default y
+ help
+ Now this puts kernel into infinite loop after first oops. Till
+ your kernel crashes this doesn't have any influence.
+
+ Say Y if you are unsure.
+
+config OPENRISC_EXCEPTION_DEBUG
+ bool "Print processor state at each exception"
+ default n
+ help
+ This option will make your kernel unusable for all but kernel
+ debugging.
+
+ Say N if you are unsure.
+
+config OPENRISC_ESR_EXCEPTION_BUG_CHECK
+ bool "Check for possible ESR exception bug"
+ default n
+ help
+ This option enables some checks that might expose some problems
+ in kernel.
+
+ Say N if you are unsure.
+
+endmenu
+
+endmenu
+
+menu "Executable file formats"
+
+source "fs/Kconfig.binfmt"
+
+endmenu
+
+source "net/Kconfig"
+
+source "drivers/Kconfig"
+
+source "fs/Kconfig"
+
+source "security/Kconfig"
+
+source "crypto/Kconfig"
+
+source "lib/Kconfig"
+
+menu "Kernel hacking"
+
+source "lib/Kconfig.debug"
+
+endmenu
diff --git a/arch/openrisc/Makefile b/arch/openrisc/Makefile
new file mode 100644
index 000000000000..158ae4c0dc6c
--- /dev/null
+++ b/arch/openrisc/Makefile
@@ -0,0 +1,55 @@
+# BK Id: %F% %I% %G% %U% %#%
+#
+# This file is included by the global makefile so that you can add your own
+# architecture-specific flags and dependencies. Remember to do have actions
+# for "archclean" and "archdep" for cleaning up and making dependencies for
+# this architecture
+#
+# This file is subject to the terms and conditions of the GNU General Public
+# License. See the file "COPYING" in the main directory of this archive
+# for more details.
+#
+# Copyright (C) 1994 by Linus Torvalds
+# Modifications for the OpenRISC architecture:
+# Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+# Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+#
+# Based on:
+# arch/i386/Makefile
+
+KBUILD_DEFCONFIG := or1ksim_defconfig
+
+LDFLAGS :=
+OBJCOPYFLAGS := -O binary -R .note -R .comment -S
+LDFLAGS_vmlinux :=
+LIBGCC := $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name)
+
+KBUILD_CFLAGS += -pipe -ffixed-r10
+
+ifeq ($(CONFIG_OPENRISC_HAVE_INST_MUL),y)
+ KBUILD_CFLAGS += $(call cc-option,-mhard-mul)
+else
+ KBUILD_CFLAGS += $(call cc-option,-msoft-mul)
+endif
+
+ifeq ($(CONFIG_OPENRISC_HAVE_INST_DIV),y)
+ KBUILD_CFLAGS += $(call cc-option,-mhard-div)
+else
+ KBUILD_CFLAGS += $(call cc-option,-msoft-div)
+endif
+
+head-y := arch/openrisc/kernel/head.o arch/openrisc/kernel/init_task.o
+
+core-y += arch/openrisc/lib/ \
+ arch/openrisc/kernel/ \
+ arch/openrisc/mm/
+libs-y += $(LIBGCC)
+
+ifneq '$(CONFIG_OPENRISC_BUILTIN_DTB)' '""'
+BUILTIN_DTB := y
+else
+BUILTIN_DTB := n
+endif
+core-$(BUILTIN_DTB) += arch/openrisc/boot/
+
+all: vmlinux
diff --git a/arch/openrisc/README.openrisc b/arch/openrisc/README.openrisc
new file mode 100644
index 000000000000..c9f7edf2b9a2
--- /dev/null
+++ b/arch/openrisc/README.openrisc
@@ -0,0 +1,99 @@
+OpenRISC Linux
+==============
+
+This is a port of Linux to the OpenRISC class of microprocessors; the initial
+target architecture, specifically, is the 32-bit OpenRISC 1000 family (or1k).
+
+For information about OpenRISC processors and ongoing development:
+
+ website http://openrisc.net
+
+For more information about Linux on OpenRISC, please contact South Pole AB.
+
+ email: info@southpole.se
+
+ website: http://southpole.se
+ http://southpoleconsulting.com
+
+---------------------------------------------------------------------
+
+Build instructions for OpenRISC toolchain and Linux
+===================================================
+
+In order to build and run Linux for OpenRISC, you'll need at least a basic
+toolchain and, perhaps, the architectural simulator. Steps to get these bits
+in place are outlined here.
+
+1) The toolchain can be obtained from openrisc.net. Instructions for building
+a toolchain can be found at:
+
+http://openrisc.net/toolchain-build.html
+
+2) or1ksim (optional)
+
+or1ksim is the architectural simulator which will allow you to actually run
+your OpenRISC Linux kernel if you don't have an OpenRISC processor at hand.
+
+ git clone git://openrisc.net/jonas/or1ksim-svn
+
+ cd or1ksim
+ ./configure --prefix=$OPENRISC_PREFIX
+ make
+ make install
+
+3) Linux kernel
+
+Build the kernel as usual
+
+ make ARCH=openrisc defconfig
+ make ARCH=openrisc
+
+4) Run in architectural simulator
+
+Grab the or1ksim platform configuration file (from the or1ksim source) and
+together with your freshly built vmlinux, run your kernel with the following
+incantation:
+
+ sim -f arch/openrisc/or1ksim.cfg vmlinux
+
+---------------------------------------------------------------------
+
+Terminology
+===========
+
+In the code, the following particles are used on symbols to limit the scope
+to more or less specific processor implementations:
+
+openrisc: the OpenRISC class of processors
+or1k: the OpenRISC 1000 family of processors
+or1200: the OpenRISC 1200 processor
+
+---------------------------------------------------------------------
+
+History
+========
+
+18. 11. 2003 Matjaz Breskvar (phoenix@bsemi.com)
+ initial port of linux to OpenRISC/or32 architecture.
+ all the core stuff is implemented and seams usable.
+
+08. 12. 2003 Matjaz Breskvar (phoenix@bsemi.com)
+ complete change of TLB miss handling.
+ rewrite of exceptions handling.
+ fully functional sash-3.6 in default initrd.
+ a much improved version with changes all around.
+
+10. 04. 2004 Matjaz Breskvar (phoenix@bsemi.com)
+ alot of bugfixes all over.
+ ethernet support, functional http and telnet servers.
+ running many standard linux apps.
+
+26. 06. 2004 Matjaz Breskvar (phoenix@bsemi.com)
+ port to 2.6.x
+
+30. 11. 2004 Matjaz Breskvar (phoenix@bsemi.com)
+ lots of bugfixes and enhancments.
+ added opencores framebuffer driver.
+
+09. 10. 2010 Jonas Bonn (jonas@southpole.se)
+ major rewrite to bring up to par with upstream Linux 2.6.36
diff --git a/arch/openrisc/TODO.openrisc b/arch/openrisc/TODO.openrisc
new file mode 100644
index 000000000000..acfeef9c58e3
--- /dev/null
+++ b/arch/openrisc/TODO.openrisc
@@ -0,0 +1,16 @@
+The OpenRISC Linux port is fully functional and has been tracking upstream
+since 2.6.35. There are, however, remaining items to be completed within
+the coming months. Here's a list of known-to-be-less-than-stellar items
+that are due for investigation shortly, i.e. our TODO list:
+
+-- Implement the rest of the DMA API... dma_map_sg, etc.
+
+-- Consolidate usage of memblock and bootmem... move everything over to
+ memblock.
+
+-- Finish the renaming cleanup... there are references to or32 in the code
+ which was an older name for the architecture. The name we've settled on is
+ or1k and this change is slowly trickling through the stack. For the time
+ being, or32 is equivalent to or1k.
+
+-- Implement optimized version of memcpy and memset
diff --git a/arch/openrisc/boot/Makefile b/arch/openrisc/boot/Makefile
new file mode 100644
index 000000000000..98ca185097a5
--- /dev/null
+++ b/arch/openrisc/boot/Makefile
@@ -0,0 +1,15 @@
+
+
+ifneq '$(CONFIG_OPENRISC_BUILTIN_DTB)' '""'
+BUILTIN_DTB := $(patsubst "%",%,$(CONFIG_OPENRISC_BUILTIN_DTB)).dtb.o
+else
+BUILTIN_DTB :=
+endif
+obj-y += $(BUILTIN_DTB)
+
+clean-files := *.dtb.S
+
+#DTC_FLAGS ?= -p 1024
+
+$(obj)/%.dtb: $(src)/dts/%.dts
+ $(call cmd,dtc)
diff --git a/arch/openrisc/boot/dts/or1ksim.dts b/arch/openrisc/boot/dts/or1ksim.dts
new file mode 100644
index 000000000000..5d4f9027afaf
--- /dev/null
+++ b/arch/openrisc/boot/dts/or1ksim.dts
@@ -0,0 +1,50 @@
+/dts-v1/;
+/ {
+ compatible = "opencores,or1ksim";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ interrupt-parent = <&pic>;
+
+ chosen {
+ bootargs = "console=uart,mmio,0x90000000,115200";
+ };
+
+ memory@0 {
+ device_type = "memory";
+ reg = <0x00000000 0x02000000>;
+ };
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ cpu@0 {
+ compatible = "opencores,or1200-rtlsvn481";
+ reg = <0>;
+ clock-frequency = <20000000>;
+ };
+ };
+
+ /*
+ * OR1K PIC is built into CPU and accessed via special purpose
+ * registers. It is not addressable and, hence, has no 'reg'
+ * property.
+ */
+ pic: pic {
+ compatible = "opencores,or1k-pic";
+ #interrupt-cells = <1>;
+ interrupt-controller;
+ };
+
+ serial0: serial@90000000 {
+ compatible = "opencores,uart16550-rtlsvn105", "ns16550a";
+ reg = <0x90000000 0x100>;
+ interrupts = <2>;
+ clock-frequency = <20000000>;
+ };
+
+ enet0: ethoc@92000000 {
+ compatible = "opencores,ethmac-rtlsvn338";
+ reg = <0x92000000 0x100>;
+ interrupts = <4>;
+ };
+};
diff --git a/arch/openrisc/configs/or1ksim_defconfig b/arch/openrisc/configs/or1ksim_defconfig
new file mode 100644
index 000000000000..ea172bdfa36a
--- /dev/null
+++ b/arch/openrisc/configs/or1ksim_defconfig
@@ -0,0 +1,65 @@
+CONFIG_CROSS_COMPILE="or32-linux-"
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_BLK_DEV_INITRD=y
+# CONFIG_RD_GZIP is not set
+CONFIG_EXPERT=y
+# CONFIG_SYSCTL_SYSCALL is not set
+# CONFIG_KALLSYMS is not set
+# CONFIG_EPOLL is not set
+# CONFIG_TIMERFD is not set
+# CONFIG_EVENTFD is not set
+# CONFIG_AIO is not set
+# CONFIG_VM_EVENT_COUNTERS is not set
+# CONFIG_COMPAT_BRK is not set
+CONFIG_SLOB=y
+CONFIG_MODULES=y
+# CONFIG_BLOCK is not set
+CONFIG_OPENRISC_BUILTIN_DTB="or1ksim"
+CONFIG_NO_HZ=y
+CONFIG_HZ_100=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_INET_LRO is not set
+# CONFIG_INET_DIAG is not set
+CONFIG_TCP_CONG_ADVANCED=y
+# CONFIG_TCP_CONG_BIC is not set
+# CONFIG_TCP_CONG_CUBIC is not set
+# CONFIG_TCP_CONG_WESTWOOD is not set
+# CONFIG_TCP_CONG_HTCP is not set
+# CONFIG_IPV6 is not set
+# CONFIG_WIRELESS is not set
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+# CONFIG_PREVENT_FIRMWARE_BUILD is not set
+# CONFIG_FW_LOADER is not set
+CONFIG_PROC_DEVICETREE=y
+CONFIG_NETDEVICES=y
+CONFIG_MICREL_PHY=y
+CONFIG_NET_ETHERNET=y
+CONFIG_ETHOC=y
+# CONFIG_NETDEV_1000 is not set
+# CONFIG_NETDEV_10000 is not set
+# CONFIG_WLAN is not set
+# CONFIG_INPUT is not set
+# CONFIG_SERIO is not set
+# CONFIG_VT is not set
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_DEVKMEM is not set
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_OF_PLATFORM=y
+# CONFIG_HW_RANDOM is not set
+# CONFIG_HWMON is not set
+# CONFIG_MFD_SUPPORT is not set
+# CONFIG_USB_SUPPORT is not set
+# CONFIG_DNOTIFY is not set
+CONFIG_TMPFS=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
+# CONFIG_ENABLE_WARN_DEPRECATED is not set
+# CONFIG_ENABLE_MUST_CHECK is not set
diff --git a/arch/openrisc/include/asm/Kbuild b/arch/openrisc/include/asm/Kbuild
new file mode 100644
index 000000000000..11162e6c878f
--- /dev/null
+++ b/arch/openrisc/include/asm/Kbuild
@@ -0,0 +1,64 @@
+include include/asm-generic/Kbuild.asm
+
+header-y += spr_defs.h
+
+generic-y += atomic.h
+generic-y += auxvec.h
+generic-y += bitsperlong.h
+generic-y += bug.h
+generic-y += bugs.h
+generic-y += cacheflush.h
+generic-y += checksum.h
+generic-y += cmpxchg.h
+generic-y += cmpxchg-local.h
+generic-y += cpumask.h
+generic-y += cputime.h
+generic-y += current.h
+generic-y += device.h
+generic-y += div64.h
+generic-y += dma.h
+generic-y += emergency-restart.h
+generic-y += errno.h
+generic-y += fb.h
+generic-y += fcntl.h
+generic-y += ftrace.h
+generic-y += futex.h
+generic-y += hardirq.h
+generic-y += hw_irq.h
+generic-y += ioctl.h
+generic-y += ioctls.h
+generic-y += ipcbuf.h
+generic-y += irq_regs.h
+generic-y += kdebug.h
+generic-y += kmap_types.h
+generic-y += local.h
+generic-y += mman.h
+generic-y += module.h
+generic-y += msgbuf.h
+generic-y += pci.h
+generic-y += percpu.h
+generic-y += poll.h
+generic-y += posix_types.h
+generic-y += resource.h
+generic-y += rmap.h
+generic-y += scatterlist.h
+generic-y += sections.h
+generic-y += segment.h
+generic-y += sembuf.h
+generic-y += setup.h
+generic-y += shmbuf.h
+generic-y += shmparam.h
+generic-y += siginfo.h
+generic-y += signal.h
+generic-y += socket.h
+generic-y += sockios.h
+generic-y += statfs.h
+generic-y += stat.h
+generic-y += string.h
+generic-y += swab.h
+generic-y += termbits.h
+generic-y += termios.h
+generic-y += topology.h
+generic-y += types.h
+generic-y += ucontext.h
+generic-y += user.h
diff --git a/arch/openrisc/include/asm/asm-offsets.h b/arch/openrisc/include/asm/asm-offsets.h
new file mode 100644
index 000000000000..d370ee36a182
--- /dev/null
+++ b/arch/openrisc/include/asm/asm-offsets.h
@@ -0,0 +1 @@
+#include <generated/asm-offsets.h>
diff --git a/arch/openrisc/include/asm/bitops.h b/arch/openrisc/include/asm/bitops.h
new file mode 100644
index 000000000000..a9e11efae14d
--- /dev/null
+++ b/arch/openrisc/include/asm/bitops.h
@@ -0,0 +1,59 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others. All original copyrights apply as per the original source
+ * declaration.
+ *
+ * OpenRISC implementation:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ * et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ASM_OPENRISC_BITOPS_H
+#define __ASM_OPENRISC_BITOPS_H
+
+/*
+ * Where we haven't written assembly versions yet, we fall back to the
+ * generic implementations. Otherwise, we pull in our (hopefully)
+ * optimized versions.
+ */
+
+#include <linux/irqflags.h>
+#include <linux/compiler.h>
+
+/*
+ * clear_bit may not imply a memory barrier
+ */
+#ifndef smp_mb__before_clear_bit
+#define smp_mb__before_clear_bit() smp_mb()
+#define smp_mb__after_clear_bit() smp_mb()
+#endif
+
+#include <asm/bitops/__ffs.h>
+#include <asm-generic/bitops/ffz.h>
+#include <asm/bitops/fls.h>
+#include <asm/bitops/__fls.h>
+#include <asm-generic/bitops/fls64.h>
+#include <asm-generic/bitops/find.h>
+
+#ifndef _LINUX_BITOPS_H
+#error only <linux/bitops.h> can be included directly
+#endif
+
+#include <asm-generic/bitops/sched.h>
+#include <asm/bitops/ffs.h>
+#include <asm-generic/bitops/hweight.h>
+#include <asm-generic/bitops/lock.h>
+
+#include <asm-generic/bitops/atomic.h>
+#include <asm-generic/bitops/non-atomic.h>
+#include <asm-generic/bitops/ext2-atomic.h>
+
+#endif /* __ASM_GENERIC_BITOPS_H */
diff --git a/arch/openrisc/include/asm/bitops/__ffs.h b/arch/openrisc/include/asm/bitops/__ffs.h
new file mode 100644
index 000000000000..6c8368a34059
--- /dev/null
+++ b/arch/openrisc/include/asm/bitops/__ffs.h
@@ -0,0 +1,33 @@
+/*
+ * OpenRISC Linux
+ *
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ASM_OPENRISC___FFS_H
+#define __ASM_OPENRISC___FFS_H
+
+
+#ifdef CONFIG_OPENRISC_HAVE_INST_FF1
+
+static inline unsigned long __ffs(unsigned long x)
+{
+ int ret;
+
+ __asm__ ("l.ff1 %0,%1"
+ : "=r" (ret)
+ : "r" (x));
+
+ return ret-1;
+}
+
+#else
+#include <asm-generic/bitops/__ffs.h>
+#endif
+
+#endif /* __ASM_OPENRISC___FFS_H */
diff --git a/arch/openrisc/include/asm/bitops/__fls.h b/arch/openrisc/include/asm/bitops/__fls.h
new file mode 100644
index 000000000000..c4ecdb4c523b
--- /dev/null
+++ b/arch/openrisc/include/asm/bitops/__fls.h
@@ -0,0 +1,33 @@
+/*
+ * OpenRISC Linux
+ *
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ASM_OPENRISC___FLS_H
+#define __ASM_OPENRISC___FLS_H
+
+
+#ifdef CONFIG_OPENRISC_HAVE_INST_FL1
+
+static inline unsigned long __fls(unsigned long x)
+{
+ int ret;
+
+ __asm__ ("l.fl1 %0,%1"
+ : "=r" (ret)
+ : "r" (x));
+
+ return ret-1;
+}
+
+#else
+#include <asm-generic/bitops/__fls.h>
+#endif
+
+#endif /* __ASM_OPENRISC___FLS_H */
diff --git a/arch/openrisc/include/asm/bitops/ffs.h b/arch/openrisc/include/asm/bitops/ffs.h
new file mode 100644
index 000000000000..9de46246ebc7
--- /dev/null
+++ b/arch/openrisc/include/asm/bitops/ffs.h
@@ -0,0 +1,32 @@
+/*
+ * OpenRISC Linux
+ *
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ASM_OPENRISC_FFS_H
+#define __ASM_OPENRISC_FFS_H
+
+#ifdef CONFIG_OPENRISC_HAVE_INST_FF1
+
+static inline int ffs(int x)
+{
+ int ret;
+
+ __asm__ ("l.ff1 %0,%1"
+ : "=r" (ret)
+ : "r" (x));
+
+ return ret;
+}
+
+#else
+#include <asm-generic/bitops/ffs.h>
+#endif
+
+#endif /* __ASM_OPENRISC_FFS_H */
diff --git a/arch/openrisc/include/asm/bitops/fls.h b/arch/openrisc/include/asm/bitops/fls.h
new file mode 100644
index 000000000000..9efbf9ad86c4
--- /dev/null
+++ b/arch/openrisc/include/asm/bitops/fls.h
@@ -0,0 +1,33 @@
+/*
+ * OpenRISC Linux
+ *
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ASM_OPENRISC_FLS_H
+#define __ASM_OPENRISC_FLS_H
+
+
+#ifdef CONFIG_OPENRISC_HAVE_INST_FL1
+
+static inline int fls(int x)
+{
+ int ret;
+
+ __asm__ ("l.fl1 %0,%1"
+ : "=r" (ret)
+ : "r" (x));
+
+ return ret;
+}
+
+#else
+#include <asm-generic/bitops/fls.h>
+#endif
+
+#endif /* __ASM_OPENRISC_FLS_H */
diff --git a/arch/openrisc/include/asm/byteorder.h b/arch/openrisc/include/asm/byteorder.h
new file mode 100644
index 000000000000..60d14f7e14e2
--- /dev/null
+++ b/arch/openrisc/include/asm/byteorder.h
@@ -0,0 +1 @@
+#include <linux/byteorder/big_endian.h>
diff --git a/arch/openrisc/include/asm/cache.h b/arch/openrisc/include/asm/cache.h
new file mode 100644
index 000000000000..4ce7a01a252d
--- /dev/null
+++ b/arch/openrisc/include/asm/cache.h
@@ -0,0 +1,29 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others. All original copyrights apply as per the original source
+ * declaration.
+ *
+ * OpenRISC implementation:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ * et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ASM_OPENRISC_CACHE_H
+#define __ASM_OPENRISC_CACHE_H
+
+/* FIXME: How can we replace these with values from the CPU...
+ * they shouldn't be hard-coded!
+ */
+
+#define L1_CACHE_BYTES 16
+#define L1_CACHE_SHIFT 4
+
+#endif /* __ASM_OPENRISC_CACHE_H */
diff --git a/arch/openrisc/include/asm/cpuinfo.h b/arch/openrisc/include/asm/cpuinfo.h
new file mode 100644
index 000000000000..917318b6a970
--- /dev/null
+++ b/arch/openrisc/include/asm/cpuinfo.h
@@ -0,0 +1,34 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others. All original copyrights apply as per the original source
+ * declaration.
+ *
+ * OpenRISC implementation:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ * et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ASM_OPENRISC_CPUINFO_H
+#define __ASM_OPENRISC_CPUINFO_H
+
+struct cpuinfo {
+ u32 clock_frequency;
+
+ u32 icache_size;
+ u32 icache_block_size;
+
+ u32 dcache_size;
+ u32 dcache_block_size;
+};
+
+extern struct cpuinfo cpuinfo;
+
+#endif /* __ASM_OPENRISC_CPUINFO_H */
diff --git a/arch/openrisc/include/asm/delay.h b/arch/openrisc/include/asm/delay.h
new file mode 100644
index 000000000000..17f8bf5a5ac2
--- /dev/null
+++ b/arch/openrisc/include/asm/delay.h
@@ -0,0 +1,24 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others. All original copyrights apply as per the original source
+ * declaration.
+ *
+ * OpenRISC implementation:
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ASM_OPENRISC_DELAY_H
+#define __ASM_OPENRISC_DELAY_H
+
+#include <asm-generic/delay.h>
+
+extern unsigned long loops_per_jiffy;
+
+#endif
diff --git a/arch/openrisc/include/asm/dma-mapping.h b/arch/openrisc/include/asm/dma-mapping.h
new file mode 100644
index 000000000000..052f877b52a5
--- /dev/null
+++ b/arch/openrisc/include/asm/dma-mapping.h
@@ -0,0 +1,134 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others. All original copyrights apply as per the original source
+ * declaration.
+ *
+ * OpenRISC implementation:
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ASM_OPENRISC_DMA_MAPPING_H
+#define __ASM_OPENRISC_DMA_MAPPING_H
+
+/*
+ * See Documentation/PCI/PCI-DMA-mapping.txt and
+ * Documentation/DMA-API.txt for documentation.
+ *
+ * This file is written with the intention of eventually moving over
+ * to largely using asm-generic/dma-mapping-common.h in its place.
+ */
+
+#include <linux/dma-debug.h>
+#include <asm-generic/dma-coherent.h>
+#include <linux/kmemcheck.h>
+
+#define DMA_ERROR_CODE (~(dma_addr_t)0x0)
+
+int dma_mapping_error(struct device *dev, dma_addr_t dma_addr);
+
+#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
+#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
+
+void *or1k_dma_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flag);
+void or1k_dma_free_coherent(struct device *dev, size_t size, void *vaddr,
+ dma_addr_t dma_handle);
+dma_addr_t or1k_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size,
+ enum dma_data_direction dir,
+ struct dma_attrs *attrs);
+void or1k_unmap_page(struct device *dev, dma_addr_t dma_handle,
+ size_t size, enum dma_data_direction dir,
+ struct dma_attrs *attrs);
+void or1k_sync_single_for_cpu(struct device *dev,
+ dma_addr_t dma_handle, size_t size,
+ enum dma_data_direction dir);
+void or1k_sync_single_for_device(struct device *dev,
+ dma_addr_t dma_handle, size_t size,
+ enum dma_data_direction dir);
+
+static inline void *dma_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flag)
+{
+ void *memory;
+
+ memory = or1k_dma_alloc_coherent(dev, size, dma_handle, flag);
+
+ debug_dma_alloc_coherent(dev, size, *dma_handle, memory);
+ return memory;
+}
+
+static inline void dma_free_coherent(struct device *dev, size_t size,
+ void *cpu_addr, dma_addr_t dma_handle)
+{
+ debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
+ or1k_dma_free_coherent(dev, size, cpu_addr, dma_handle);
+}
+
+static inline dma_addr_t dma_map_single(struct device *dev, void *ptr,
+ size_t size,
+ enum dma_data_direction dir)
+{
+ dma_addr_t addr;
+
+ kmemcheck_mark_initialized(ptr, size);
+ BUG_ON(!valid_dma_direction(dir));
+ addr = or1k_map_page(dev, virt_to_page(ptr),
+ (unsigned long)ptr & ~PAGE_MASK, size,
+ dir, NULL);
+ debug_dma_map_page(dev, virt_to_page(ptr),
+ (unsigned long)ptr & ~PAGE_MASK, size,
+ dir, addr, true);
+ return addr;
+}
+
+static inline void dma_unmap_single(struct device *dev, dma_addr_t addr,
+ size_t size,
+ enum dma_data_direction dir)
+{
+ BUG_ON(!valid_dma_direction(dir));
+ or1k_unmap_page(dev, addr, size, dir, NULL);
+ debug_dma_unmap_page(dev, addr, size, dir, true);
+}
+
+static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
+ size_t size,
+ enum dma_data_direction dir)
+{
+ BUG_ON(!valid_dma_direction(dir));
+ or1k_sync_single_for_cpu(dev, addr, size, dir);
+ debug_dma_sync_single_for_cpu(dev, addr, size, dir);
+}
+
+static inline void dma_sync_single_for_device(struct device *dev,
+ dma_addr_t addr, size_t size,
+ enum dma_data_direction dir)
+{
+ BUG_ON(!valid_dma_direction(dir));
+ or1k_sync_single_for_device(dev, addr, size, dir);
+ debug_dma_sync_single_for_device(dev, addr, size, dir);
+}
+
+static inline int dma_supported(struct device *dev, u64 dma_mask)
+{
+ /* Support 32 bit DMA mask exclusively */
+ return dma_mask == 0xffffffffULL;
+}
+
+static inline int dma_set_mask(struct device *dev, u64 dma_mask)
+{
+ if (!dev->dma_mask || !dma_supported(dev, dma_mask))
+ return -EIO;
+
+ *dev->dma_mask = dma_mask;
+
+ return 0;
+}
+#endif /* __ASM_OPENRISC_DMA_MAPPING_H */
diff --git a/arch/openrisc/include/asm/elf.h b/arch/openrisc/include/asm/elf.h
new file mode 100644
index 000000000000..2ce603bbfdd3
--- /dev/null
+++ b/arch/openrisc/include/asm/elf.h
@@ -0,0 +1,108 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others. All original copyrights apply as per the original source
+ * declaration.
+ *
+ * OpenRISC implementation:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ * et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ASM_OPENRISC_ELF_H
+#define __ASM_OPENRISC_ELF_H
+
+/*
+ * ELF register definitions..
+ */
+#include <linux/types.h>
+#include <linux/ptrace.h>
+
+
+/* The OR1K relocation types... not all relevant for module loader */
+#define R_OR32_NONE 0
+#define R_OR32_32 1
+#define R_OR32_16 2
+#define R_OR32_8 3
+#define R_OR32_CONST 4
+#define R_OR32_CONSTH 5
+#define R_OR32_JUMPTARG 6
+#define R_OR32_VTINHERIT 7
+#define R_OR32_VTENTRY 8
+
+typedef unsigned long elf_greg_t;
+
+/*
+ * Note that NGREG is defined to ELF_NGREG in include/linux/elfcore.h, and is
+ * thus exposed to user-space.
+ */
+#define ELF_NGREG (sizeof(struct user_regs_struct) / sizeof(elf_greg_t))
+typedef elf_greg_t elf_gregset_t[ELF_NGREG];
+
+/* A placeholder; OR32 does not have fp support yes, so no fp regs for now. */
+typedef unsigned long elf_fpregset_t;
+
+/* This should be moved to include/linux/elf.h */
+#define EM_OR32 0x8472
+#define EM_OPENRISC 92 /* OpenRISC 32-bit embedded processor */
+
+/*
+ * These are used to set parameters in the core dumps.
+ */
+#define ELF_ARCH EM_OR32
+#define ELF_CLASS ELFCLASS32
+#define ELF_DATA ELFDATA2MSB
+
+#ifdef __KERNEL__
+
+/*
+ * This is used to ensure we don't load something for the wrong architecture.
+ */
+
+#define elf_check_arch(x) \
+ (((x)->e_machine == EM_OR32) || ((x)->e_machine == EM_OPENRISC))
+
+/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
+ use of this is to invoke "./ld.so someprog" to test out a new version of
+ the loader. We need to make sure that it is out of the way of the program
+ that it will "exec", and that there is sufficient room for the brk. */
+
+#define ELF_ET_DYN_BASE (0x08000000)
+
+/*
+ * Enable dump using regset.
+ * This covers all of general/DSP/FPU regs.
+ */
+#define CORE_DUMP_USE_REGSET
+
+#define ELF_EXEC_PAGESIZE 8192
+
+extern void dump_elf_thread(elf_greg_t *dest, struct pt_regs *pt);
+#define ELF_CORE_COPY_REGS(dest, regs) dump_elf_thread(dest, regs);
+
+/* This yields a mask that user programs can use to figure out what
+ instruction set this cpu supports. This could be done in userspace,
+ but it's not easy, and we've already done it here. */
+
+#define ELF_HWCAP (0)
+
+/* This yields a string that ld.so will use to load implementation
+ specific libraries for optimization. This is more specific in
+ intent than poking at uname or /proc/cpuinfo.
+
+ For the moment, we have only optimizations for the Intel generations,
+ but that could change... */
+
+#define ELF_PLATFORM (NULL)
+
+#define SET_PERSONALITY(ex) set_personality(PER_LINUX)
+
+#endif /* __KERNEL__ */
+#endif
diff --git a/arch/openrisc/include/asm/fixmap.h b/arch/openrisc/include/asm/fixmap.h
new file mode 100644
index 000000000000..52733416c1f3
--- /dev/null
+++ b/arch/openrisc/include/asm/fixmap.h
@@ -0,0 +1,87 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others. All original copyrights apply as per the original source
+ * declaration.
+ *
+ * OpenRISC implementation:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ * et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ASM_OPENRISC_FIXMAP_H
+#define __ASM_OPENRISC_FIXMAP_H
+
+/* Why exactly do we need 2 empty pages between the top of the fixed
+ * addresses and the top of virtual memory? Something is using that
+ * memory space but not sure what right now... If you find it, leave
+ * a comment here.
+ */
+#define FIXADDR_TOP ((unsigned long) (-2*PAGE_SIZE))
+
+#include <linux/kernel.h>
+#include <asm/page.h>
+
+/*
+ * On OpenRISC we use these special fixed_addresses for doing ioremap
+ * early in the boot process before memory initialization is complete.
+ * This is used, in particular, by the early serial console code.
+ *
+ * It's not really 'fixmap', per se, but fits loosely into the same
+ * paradigm.
+ */
+enum fixed_addresses {
+ /*
+ * FIX_IOREMAP entries are useful for mapping physical address
+ * space before ioremap() is useable, e.g. really early in boot
+ * before kmalloc() is working.
+ */
+#define FIX_N_IOREMAPS 32
+ FIX_IOREMAP_BEGIN,
+ FIX_IOREMAP_END = FIX_IOREMAP_BEGIN + FIX_N_IOREMAPS - 1,
+ __end_of_fixed_addresses
+};
+
+#define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
+/* FIXADDR_BOTTOM might be a better name here... */
+#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
+
+#define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT))
+#define __virt_to_fix(x) ((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT)
+
+/*
+ * 'index to address' translation. If anyone tries to use the idx
+ * directly without tranlation, we catch the bug with a NULL-deference
+ * kernel oops. Illegal ranges of incoming indices are caught too.
+ */
+static __always_inline unsigned long fix_to_virt(const unsigned int idx)
+{
+ /*
+ * this branch gets completely eliminated after inlining,
+ * except when someone tries to use fixaddr indices in an
+ * illegal way. (such as mixing up address types or using
+ * out-of-range indices).
+ *
+ * If it doesn't get removed, the linker will complain
+ * loudly with a reasonably clear error message..
+ */
+ if (idx >= __end_of_fixed_addresses)
+ BUG();
+
+ return __fix_to_virt(idx);
+}
+
+static inline unsigned long virt_to_fix(const unsigned long vaddr)
+{
+ BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START);
+ return __virt_to_fix(vaddr);
+}
+
+#endif
diff --git a/arch/openrisc/include/asm/gpio.h b/arch/openrisc/include/asm/gpio.h
new file mode 100644
index 000000000000..0b0d174f47cd
--- /dev/null
+++ b/arch/openrisc/include/asm/gpio.h
@@ -0,0 +1,65 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others. All original copyrights apply as per the original source
+ * declaration.
+ *
+ * OpenRISC implementation:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ * et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ASM_OPENRISC_GPIO_H
+#define __ASM_OPENRISC_GPIO_H
+
+#include <linux/errno.h>
+#include <asm-generic/gpio.h>
+
+#ifdef CONFIG_GPIOLIB
+
+/*
+ * OpenRISC (or1k) does not have on-chip GPIO's so there is not really
+ * any standardized implementation that makes sense here. If passing
+ * through gpiolib becomes a bottleneck then it may make sense, on a
+ * case-by-case basis, to implement these inlined/rapid versions.
+ *
+ * Just call gpiolib.
+ */
+static inline int gpio_get_value(unsigned int gpio)
+{
+ return __gpio_get_value(gpio);
+}
+
+static inline void gpio_set_value(unsigned int gpio, int value)
+{
+ __gpio_set_value(gpio, value);
+}
+
+static inline int gpio_cansleep(unsigned int gpio)
+{
+ return __gpio_cansleep(gpio);
+}
+
+/*
+ * Not implemented, yet.
+ */
+static inline int gpio_to_irq(unsigned int gpio)
+{
+ return -ENOSYS;
+}
+
+static inline int irq_to_gpio(unsigned int irq)
+{
+ return -EINVAL;
+}
+
+#endif /* CONFIG_GPIOLIB */
+
+#endif /* __ASM_OPENRISC_GPIO_H */
diff --git a/arch/openrisc/include/asm/io.h b/arch/openrisc/include/asm/io.h
new file mode 100644
index 000000000000..07f5299d6c28
--- /dev/null
+++ b/arch/openrisc/include/asm/io.h
@@ -0,0 +1,51 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others. All original copyrights apply as per the original source
+ * declaration.
+ *
+ * OpenRISC implementation:
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ * et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ASM_OPENRISC_IO_H
+#define __ASM_OPENRISC_IO_H
+
+/*
+ * PCI: can we really do 0 here if we have no port IO?
+ */
+#define IO_SPACE_LIMIT 0
+
+/* OpenRISC has no port IO */
+#define HAVE_ARCH_PIO_SIZE 1
+#define PIO_RESERVED 0X0UL
+#define PIO_OFFSET 0
+#define PIO_MASK 0
+
+#include <asm-generic/io.h>
+
+extern void __iomem *__ioremap(phys_addr_t offset, unsigned long size,
+ pgprot_t prot);
+
+static inline void __iomem *ioremap(phys_addr_t offset, unsigned long size)
+{
+ return __ioremap(offset, size, PAGE_KERNEL);
+}
+
+/* #define _PAGE_CI 0x002 */
+static inline void __iomem *ioremap_nocache(phys_addr_t offset,
+ unsigned long size)
+{
+ return __ioremap(offset, size,
+ __pgprot(pgprot_val(PAGE_KERNEL) | _PAGE_CI));
+}
+
+extern void iounmap(void *addr);
+#endif
diff --git a/arch/openrisc/include/asm/irq.h b/arch/openrisc/include/asm/irq.h
new file mode 100644
index 000000000000..eb612b1865d2
--- /dev/null
+++ b/arch/openrisc/include/asm/irq.h
@@ -0,0 +1,27 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others. All original copyrights apply as per the original source
+ * declaration.
+ *
+ * OpenRISC implementation:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ * et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ASM_OPENRISC_IRQ_H__
+#define __ASM_OPENRISC_IRQ_H__
+
+#define NR_IRQS 32
+#include <asm-generic/irq.h>
+
+#define NO_IRQ (-1)
+
+#endif /* __ASM_OPENRISC_IRQ_H__ */
diff --git a/arch/openrisc/include/asm/irqflags.h b/arch/openrisc/include/asm/irqflags.h
new file mode 100644
index 000000000000..dc86c653d70b
--- /dev/null
+++ b/arch/openrisc/include/asm/irqflags.h
@@ -0,0 +1,29 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others. All original copyrights apply as per the original source
+ * declaration.
+ *
+ * OpenRISC implementation:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ * et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef ___ASM_OPENRISC_IRQFLAGS_H
+#define ___ASM_OPENRISC_IRQFLAGS_H
+
+#include <asm/spr_defs.h>
+
+#define ARCH_IRQ_DISABLED 0x00
+#define ARCH_IRQ_ENABLED (SPR_SR_IEE|SPR_SR_TEE)
+
+#include <asm-generic/irqflags.h>
+
+#endif /* ___ASM_OPENRISC_IRQFLAGS_H */
diff --git a/arch/openrisc/include/asm/linkage.h b/arch/openrisc/include/asm/linkage.h
new file mode 100644
index 000000000000..e2638752091a
--- /dev/null
+++ b/arch/openrisc/include/asm/linkage.h
@@ -0,0 +1,25 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others. All original copyrights apply as per the original source
+ * declaration.
+ *
+ * OpenRISC implementation:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ * et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ASM_OPENRISC_LINKAGE_H
+#define __ASM_OPENRISC_LINKAGE_H
+
+#define __ALIGN .align 0
+#define __ALIGN_STR ".align 0"
+
+#endif /* __ASM_OPENRISC_LINKAGE_H */
diff --git a/arch/openrisc/include/asm/memblock.h b/arch/openrisc/include/asm/memblock.h
new file mode 100644
index 000000000000..bbe5a1c788cb
--- /dev/null
+++ b/arch/openrisc/include/asm/memblock.h
@@ -0,0 +1,24 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others. All original copyrights apply as per the original source
+ * declaration.
+ *
+ * OpenRISC implementation:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ * et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ASM_OPENRISC_MEMBLOCK_H
+#define __ASM_OPENRISC_MEMBLOCK_H
+
+/* empty */
+
+#endif /* __ASM_OPENRISC_MEMBLOCK_H */
diff --git a/arch/openrisc/include/asm/mmu.h b/arch/openrisc/include/asm/mmu.h
new file mode 100644
index 000000000000..d069bc2ddfa4
--- /dev/null
+++ b/arch/openrisc/include/asm/mmu.h
@@ -0,0 +1,26 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others. All original copyrights apply as per the original source
+ * declaration.
+ *
+ * OpenRISC implementation:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ * et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ASM_OPENRISC_MMU_H
+#define __ASM_OPENRISC_MMU_H
+
+#ifndef __ASSEMBLY__
+typedef unsigned long mm_context_t;
+#endif
+
+#endif
diff --git a/arch/openrisc/include/asm/mmu_context.h b/arch/openrisc/include/asm/mmu_context.h
new file mode 100644
index 000000000000..e94b814d2e3c
--- /dev/null
+++ b/arch/openrisc/include/asm/mmu_context.h
@@ -0,0 +1,43 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others. All original copyrights apply as per the original source
+ * declaration.
+ *
+ * OpenRISC implementation:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ * et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ASM_OPENRISC_MMU_CONTEXT_H
+#define __ASM_OPENRISC_MMU_CONTEXT_H
+
+#include <asm-generic/mm_hooks.h>
+
+extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
+extern void destroy_context(struct mm_struct *mm);
+extern void switch_mm(struct mm_struct *prev, struct mm_struct *next,
+ struct task_struct *tsk);
+
+#define deactivate_mm(tsk, mm) do { } while (0)
+
+#define activate_mm(prev, next) switch_mm((prev), (next), NULL)
+
+/* current active pgd - this is similar to other processors pgd
+ * registers like cr3 on the i386
+ */
+
+extern volatile pgd_t *current_pgd; /* defined in arch/openrisc/mm/fault.c */
+
+static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
+{
+}
+
+#endif
diff --git a/arch/openrisc/include/asm/mutex.h b/arch/openrisc/include/asm/mutex.h
new file mode 100644
index 000000000000..b85a0cfa9fc9
--- /dev/null
+++ b/arch/openrisc/include/asm/mutex.h
@@ -0,0 +1,27 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others. All original copyrights apply as per the original source
+ * declaration.
+ *
+ * OpenRISC implementation:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ * et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+/*
+ * Pull in the generic implementation for the mutex fastpath.
+ *
+ * TODO: implement optimized primitives instead, or leave the generic
+ * implementation in place, or pick the atomic_xchg() based generic
+ * implementation. (see asm-generic/mutex-xchg.h for details)
+ */
+
+#include <asm-generic/mutex-dec.h>
diff --git a/arch/openrisc/include/asm/page.h b/arch/openrisc/include/asm/page.h
new file mode 100644
index 000000000000..b041b344b229
--- /dev/null
+++ b/arch/openrisc/include/asm/page.h
@@ -0,0 +1,110 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others. All original copyrights apply as per the original source
+ * declaration.
+ *
+ * OpenRISC implementation:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ * et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ASM_OPENRISC_PAGE_H
+#define __ASM_OPENRISC_PAGE_H
+
+
+/* PAGE_SHIFT determines the page size */
+
+#define PAGE_SHIFT 13
+#ifdef __ASSEMBLY__
+#define PAGE_SIZE (1 << PAGE_SHIFT)
+#else
+#define PAGE_SIZE (1UL << PAGE_SHIFT)
+#endif
+#define PAGE_MASK (~(PAGE_SIZE-1))
+
+#define PAGE_OFFSET 0xc0000000
+#define KERNELBASE PAGE_OFFSET
+
+/* This is not necessarily the right place for this, but it's needed by
+ * drivers/of/fdt.c
+ */
+#include <asm/setup.h>
+
+#ifndef __ASSEMBLY__
+
+#define get_user_page(vaddr) __get_free_page(GFP_KERNEL)
+#define free_user_page(page, addr) free_page(addr)
+
+#define clear_page(page) memset((page), 0, PAGE_SIZE)
+#define copy_page(to, from) memcpy((to), (from), PAGE_SIZE)
+
+#define clear_user_page(page, vaddr, pg) clear_page(page)
+#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
+
+/*
+ * These are used to make use of C type-checking..
+ */
+typedef struct {
+ unsigned long pte;
+} pte_t;
+typedef struct {
+ unsigned long pgd;
+} pgd_t;
+typedef struct {
+ unsigned long pgprot;
+} pgprot_t;
+typedef struct page *pgtable_t;
+
+#define pte_val(x) ((x).pte)
+#define pgd_val(x) ((x).pgd)
+#define pgprot_val(x) ((x).pgprot)
+
+#define __pte(x) ((pte_t) { (x) })
+#define __pgd(x) ((pgd_t) { (x) })
+#define __pgprot(x) ((pgprot_t) { (x) })
+
+extern unsigned long memory_start;
+extern unsigned long memory_end;
+
+#endif /* !__ASSEMBLY__ */
+
+
+#ifndef __ASSEMBLY__
+
+#define __va(x) ((void *)((unsigned long)(x) + PAGE_OFFSET))
+#define __pa(x) ((unsigned long) (x) - PAGE_OFFSET)
+
+#define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT)
+#define pfn_to_virt(pfn) __va((pfn) << PAGE_SHIFT)
+
+#define virt_to_page(addr) \
+ (mem_map + (((unsigned long)(addr)-PAGE_OFFSET) >> PAGE_SHIFT))
+#define page_to_virt(page) \
+ ((((page) - mem_map) << PAGE_SHIFT) + PAGE_OFFSET)
+
+#define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
+
+#define pfn_valid(pfn) ((pfn) < max_mapnr)
+
+#define virt_addr_valid(kaddr) (((void *)(kaddr) >= (void *)PAGE_OFFSET) && \
+ ((void *)(kaddr) < (void *)memory_end))
+
+#endif /* __ASSEMBLY__ */
+
+
+#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
+ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+
+
+#include <asm-generic/memory_model.h>
+#include <asm-generic/getorder.h>
+
+#endif /* __ASM_OPENRISC_PAGE_H */
diff --git a/arch/openrisc/include/asm/param.h b/arch/openrisc/include/asm/param.h
new file mode 100644
index 000000000000..c39a336610e2
--- /dev/null
+++ b/arch/openrisc/include/asm/param.h
@@ -0,0 +1,26 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others. All original copyrights apply as per the original source
+ * declaration.
+ *
+ * OpenRISC implementation:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ * et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ASM_OPENRISC_PARAM_H
+#define __ASM_OPENRISC_PARAM_H
+
+#define EXEC_PAGESIZE 8192
+
+#include <asm-generic/param.h>
+
+#endif /* __ASM_OPENRISC_PARAM_H */
diff --git a/arch/openrisc/include/asm/pgalloc.h b/arch/openrisc/include/asm/pgalloc.h
new file mode 100644
index 000000000000..05c39ecd2efd
--- /dev/null
+++ b/arch/openrisc/include/asm/pgalloc.h
@@ -0,0 +1,102 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others. All original copyrights apply as per the original source
+ * declaration.
+ *
+ * OpenRISC implementation:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ * et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ASM_OPENRISC_PGALLOC_H
+#define __ASM_OPENRISC_PGALLOC_H
+
+#include <asm/page.h>
+#include <linux/threads.h>
+#include <linux/mm.h>
+#include <linux/memblock.h>
+#include <linux/bootmem.h>
+
+extern int mem_init_done;
+
+#define pmd_populate_kernel(mm, pmd, pte) \
+ set_pmd(pmd, __pmd(_KERNPG_TABLE + __pa(pte)))
+
+static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
+ struct page *pte)
+{
+ set_pmd(pmd, __pmd(_KERNPG_TABLE +
+ ((unsigned long)page_to_pfn(pte) <<
+ (unsigned long) PAGE_SHIFT)));
+}
+
+/*
+ * Allocate and free page tables.
+ */
+static inline pgd_t *pgd_alloc(struct mm_struct *mm)
+{
+ pgd_t *ret = (pgd_t *)__get_free_page(GFP_KERNEL);
+
+ if (ret) {
+ memset(ret, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
+ memcpy(ret + USER_PTRS_PER_PGD,
+ swapper_pg_dir + USER_PTRS_PER_PGD,
+ (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
+
+ }
+ return ret;
+}
+
+#if 0
+/* FIXME: This seems to be the preferred style, but we are using
+ * current_pgd (from mm->pgd) to load kernel pages so we need it
+ * initialized. This needs to be looked into.
+ */
+extern inline pgd_t *pgd_alloc(struct mm_struct *mm)
+{
+ return (pgd_t *)get_zeroed_page(GFP_KERNEL);
+}
+#endif
+
+static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
+{
+ free_page((unsigned long)pgd);
+}
+
+extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address);
+
+static inline struct page *pte_alloc_one(struct mm_struct *mm,
+ unsigned long address)
+{
+ struct page *pte;
+ pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0);
+ if (pte)
+ clear_page(page_address(pte));
+ return pte;
+}
+
+static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
+{
+ free_page((unsigned long)pte);
+}
+
+static inline void pte_free(struct mm_struct *mm, struct page *pte)
+{
+ __free_page(pte);
+}
+
+
+#define __pte_free_tlb(tlb, pte, addr) tlb_remove_page((tlb), (pte))
+#define pmd_pgtable(pmd) pmd_page(pmd)
+
+#define check_pgt_cache() do { } while (0)
+
+#endif
diff --git a/arch/openrisc/include/asm/pgtable.h b/arch/openrisc/include/asm/pgtable.h
new file mode 100644
index 000000000000..043505d7f684
--- /dev/null
+++ b/arch/openrisc/include/asm/pgtable.h
@@ -0,0 +1,463 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others. All original copyrights apply as per the original source
+ * declaration.
+ *
+ * OpenRISC implementation:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ * et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+/* or32 pgtable.h - macros and functions to manipulate page tables
+ *
+ * Based on:
+ * include/asm-cris/pgtable.h
+ */
+
+#ifndef __ASM_OPENRISC_PGTABLE_H
+#define __ASM_OPENRISC_PGTABLE_H
+
+#include <asm-generic/pgtable-nopmd.h>
+
+#ifndef __ASSEMBLY__
+#include <asm/mmu.h>
+#include <asm/fixmap.h>
+
+/*
+ * The Linux memory management assumes a three-level page table setup. On
+ * or32, we use that, but "fold" the mid level into the top-level page
+ * table. Since the MMU TLB is software loaded through an interrupt, it
+ * supports any page table structure, so we could have used a three-level
+ * setup, but for the amounts of memory we normally use, a two-level is
+ * probably more efficient.
+ *
+ * This file contains the functions and defines necessary to modify and use
+ * the or32 page table tree.
+ */
+
+extern void paging_init(void);
+
+/* Certain architectures need to do special things when pte's
+ * within a page table are directly modified. Thus, the following
+ * hook is made available.
+ */
+#define set_pte(pteptr, pteval) ((*(pteptr)) = (pteval))
+#define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval)
+/*
+ * (pmds are folded into pgds so this doesn't get actually called,
+ * but the define is needed for a generic inline function.)
+ */
+#define set_pmd(pmdptr, pmdval) (*(pmdptr) = pmdval)
+
+#define PGDIR_SHIFT (PAGE_SHIFT + (PAGE_SHIFT-2))
+#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
+#define PGDIR_MASK (~(PGDIR_SIZE-1))
+
+/*
+ * entries per page directory level: we use a two-level, so
+ * we don't really have any PMD directory physically.
+ * pointers are 4 bytes so we can use the page size and
+ * divide it by 4 (shift by 2).
+ */
+#define PTRS_PER_PTE (1UL << (PAGE_SHIFT-2))
+
+#define PTRS_PER_PGD (1UL << (PAGE_SHIFT-2))
+
+/* calculate how many PGD entries a user-level program can use
+ * the first mappable virtual address is 0
+ * (TASK_SIZE is the maximum virtual address space)
+ */
+
+#define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE)
+#define FIRST_USER_ADDRESS 0
+
+/*
+ * Kernels own virtual memory area.
+ */
+
+/*
+ * The size and location of the vmalloc area are chosen so that modules
+ * placed in this area aren't more than a 28-bit signed offset from any
+ * kernel functions that they may need. This greatly simplifies handling
+ * of the relocations for l.j and l.jal instructions as we don't need to
+ * introduce any trampolines for reaching "distant" code.
+ *
+ * 64 MB of vmalloc area is comparable to what's available on other arches.
+ */
+
+#define VMALLOC_START (PAGE_OFFSET-0x04000000)
+#define VMALLOC_END (PAGE_OFFSET)
+#define VMALLOC_VMADDR(x) ((unsigned long)(x))
+
+/* Define some higher level generic page attributes.
+ *
+ * If you change _PAGE_CI definition be sure to change it in
+ * io.h for ioremap_nocache() too.
+ */
+
+/*
+ * An OR32 PTE looks like this:
+ *
+ * | 31 ... 10 | 9 | 8 ... 6 | 5 | 4 | 3 | 2 | 1 | 0 |
+ * Phys pg.num L PP Index D A WOM WBC CI CC
+ *
+ * L : link
+ * PPI: Page protection index
+ * D : Dirty
+ * A : Accessed
+ * WOM: Weakly ordered memory
+ * WBC: Write-back cache
+ * CI : Cache inhibit
+ * CC : Cache coherent
+ *
+ * The protection bits below should correspond to the layout of the actual
+ * PTE as per above
+ */
+
+#define _PAGE_CC 0x001 /* software: pte contains a translation */
+#define _PAGE_CI 0x002 /* cache inhibit */
+#define _PAGE_WBC 0x004 /* write back cache */
+#define _PAGE_FILE 0x004 /* set: pagecache, unset: swap (when !PRESENT) */
+#define _PAGE_WOM 0x008 /* weakly ordered memory */
+
+#define _PAGE_A 0x010 /* accessed */
+#define _PAGE_D 0x020 /* dirty */
+#define _PAGE_URE 0x040 /* user read enable */
+#define _PAGE_UWE 0x080 /* user write enable */
+
+#define _PAGE_SRE 0x100 /* superuser read enable */
+#define _PAGE_SWE 0x200 /* superuser write enable */
+#define _PAGE_EXEC 0x400 /* software: page is executable */
+#define _PAGE_U_SHARED 0x800 /* software: page is shared in user space */
+
+/* 0x001 is cache coherency bit, which should always be set to
+ * 1 - for SMP (when we support it)
+ * 0 - otherwise
+ *
+ * we just reuse this bit in software for _PAGE_PRESENT and
+ * force it to 0 when loading it into TLB.
+ */
+#define _PAGE_PRESENT _PAGE_CC
+#define _PAGE_USER _PAGE_URE
+#define _PAGE_WRITE (_PAGE_UWE | _PAGE_SWE)
+#define _PAGE_DIRTY _PAGE_D
+#define _PAGE_ACCESSED _PAGE_A
+#define _PAGE_NO_CACHE _PAGE_CI
+#define _PAGE_SHARED _PAGE_U_SHARED
+#define _PAGE_READ (_PAGE_URE | _PAGE_SRE)
+
+#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
+#define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED)
+#define _PAGE_ALL (_PAGE_PRESENT | _PAGE_ACCESSED)
+#define _KERNPG_TABLE \
+ (_PAGE_BASE | _PAGE_SRE | _PAGE_SWE | _PAGE_ACCESSED | _PAGE_DIRTY)
+
+#define PAGE_NONE __pgprot(_PAGE_ALL)
+#define PAGE_READONLY __pgprot(_PAGE_ALL | _PAGE_URE | _PAGE_SRE)
+#define PAGE_READONLY_X __pgprot(_PAGE_ALL | _PAGE_URE | _PAGE_SRE | _PAGE_EXEC)
+#define PAGE_SHARED \
+ __pgprot(_PAGE_ALL | _PAGE_URE | _PAGE_SRE | _PAGE_UWE | _PAGE_SWE \
+ | _PAGE_SHARED)
+#define PAGE_SHARED_X \
+ __pgprot(_PAGE_ALL | _PAGE_URE | _PAGE_SRE | _PAGE_UWE | _PAGE_SWE \
+ | _PAGE_SHARED | _PAGE_EXEC)
+#define PAGE_COPY __pgprot(_PAGE_ALL | _PAGE_URE | _PAGE_SRE)
+#define PAGE_COPY_X __pgprot(_PAGE_ALL | _PAGE_URE | _PAGE_SRE | _PAGE_EXEC)
+
+#define PAGE_KERNEL \
+ __pgprot(_PAGE_ALL | _PAGE_SRE | _PAGE_SWE \
+ | _PAGE_SHARED | _PAGE_DIRTY | _PAGE_EXEC)
+#define PAGE_KERNEL_RO \
+ __pgprot(_PAGE_ALL | _PAGE_SRE \
+ | _PAGE_SHARED | _PAGE_DIRTY | _PAGE_EXEC)
+#define PAGE_KERNEL_NOCACHE \
+ __pgprot(_PAGE_ALL | _PAGE_SRE | _PAGE_SWE \
+ | _PAGE_SHARED | _PAGE_DIRTY | _PAGE_EXEC | _PAGE_CI)
+
+#define __P000 PAGE_NONE
+#define __P001 PAGE_READONLY_X
+#define __P010 PAGE_COPY
+#define __P011 PAGE_COPY_X
+#define __P100 PAGE_READONLY
+#define __P101 PAGE_READONLY_X
+#define __P110 PAGE_COPY
+#define __P111 PAGE_COPY_X
+
+#define __S000 PAGE_NONE
+#define __S001 PAGE_READONLY_X
+#define __S010 PAGE_SHARED
+#define __S011 PAGE_SHARED_X
+#define __S100 PAGE_READONLY
+#define __S101 PAGE_READONLY_X
+#define __S110 PAGE_SHARED
+#define __S111 PAGE_SHARED_X
+
+/* zero page used for uninitialized stuff */
+extern unsigned long empty_zero_page[2048];
+#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
+
+/* number of bits that fit into a memory pointer */
+#define BITS_PER_PTR (8*sizeof(unsigned long))
+
+/* to align the pointer to a pointer address */
+#define PTR_MASK (~(sizeof(void *)-1))
+
+/* sizeof(void*)==1<<SIZEOF_PTR_LOG2 */
+/* 64-bit machines, beware! SRB. */
+#define SIZEOF_PTR_LOG2 2
+
+/* to find an entry in a page-table */
+#define PAGE_PTR(address) \
+((unsigned long)(address)>>(PAGE_SHIFT-SIZEOF_PTR_LOG2)&PTR_MASK&~PAGE_MASK)
+
+/* to set the page-dir */
+#define SET_PAGE_DIR(tsk, pgdir)
+
+#define pte_none(x) (!pte_val(x))
+#define pte_present(x) (pte_val(x) & _PAGE_PRESENT)
+#define pte_clear(mm, addr, xp) do { pte_val(*(xp)) = 0; } while (0)
+
+#define pmd_none(x) (!pmd_val(x))
+#define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK)) != _KERNPG_TABLE)
+#define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT)
+#define pmd_clear(xp) do { pmd_val(*(xp)) = 0; } while (0)
+
+/*
+ * The following only work if pte_present() is true.
+ * Undefined behaviour if not..
+ */
+
+static inline int pte_read(pte_t pte) { return pte_val(pte) & _PAGE_READ; }
+static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITE; }
+static inline int pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_EXEC; }
+static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
+static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
+static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
+static inline int pte_special(pte_t pte) { return 0; }
+static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
+
+static inline pte_t pte_wrprotect(pte_t pte)
+{
+ pte_val(pte) &= ~(_PAGE_WRITE);
+ return pte;
+}
+
+static inline pte_t pte_rdprotect(pte_t pte)
+{
+ pte_val(pte) &= ~(_PAGE_READ);
+ return pte;
+}
+
+static inline pte_t pte_exprotect(pte_t pte)
+{
+ pte_val(pte) &= ~(_PAGE_EXEC);
+ return pte;
+}
+
+static inline pte_t pte_mkclean(pte_t pte)
+{
+ pte_val(pte) &= ~(_PAGE_DIRTY);
+ return pte;
+}
+
+static inline pte_t pte_mkold(pte_t pte)
+{
+ pte_val(pte) &= ~(_PAGE_ACCESSED);
+ return pte;
+}
+
+static inline pte_t pte_mkwrite(pte_t pte)
+{
+ pte_val(pte) |= _PAGE_WRITE;
+ return pte;
+}
+
+static inline pte_t pte_mkread(pte_t pte)
+{
+ pte_val(pte) |= _PAGE_READ;
+ return pte;
+}
+
+static inline pte_t pte_mkexec(pte_t pte)
+{
+ pte_val(pte) |= _PAGE_EXEC;
+ return pte;
+}
+
+static inline pte_t pte_mkdirty(pte_t pte)
+{
+ pte_val(pte) |= _PAGE_DIRTY;
+ return pte;
+}
+
+static inline pte_t pte_mkyoung(pte_t pte)
+{
+ pte_val(pte) |= _PAGE_ACCESSED;
+ return pte;
+}
+
+/*
+ * Conversion functions: convert a page and protection to a page entry,
+ * and a page entry and page directory to the page they refer to.
+ */
+
+/* What actually goes as arguments to the various functions is less than
+ * obvious, but a rule of thumb is that struct page's goes as struct page *,
+ * really physical DRAM addresses are unsigned long's, and DRAM "virtual"
+ * addresses (the 0xc0xxxxxx's) goes as void *'s.
+ */
+
+static inline pte_t __mk_pte(void *page, pgprot_t pgprot)
+{
+ pte_t pte;
+ /* the PTE needs a physical address */
+ pte_val(pte) = __pa(page) | pgprot_val(pgprot);
+ return pte;
+}
+
+#define mk_pte(page, pgprot) __mk_pte(page_address(page), (pgprot))
+
+#define mk_pte_phys(physpage, pgprot) \
+({ \
+ pte_t __pte; \
+ \
+ pte_val(__pte) = (physpage) + pgprot_val(pgprot); \
+ __pte; \
+})
+
+static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
+{
+ pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot);
+ return pte;
+}
+
+
+/*
+ * pte_val refers to a page in the 0x0xxxxxxx physical DRAM interval
+ * __pte_page(pte_val) refers to the "virtual" DRAM interval
+ * pte_pagenr refers to the page-number counted starting from the virtual
+ * DRAM start
+ */
+
+static inline unsigned long __pte_page(pte_t pte)
+{
+ /* the PTE contains a physical address */
+ return (unsigned long)__va(pte_val(pte) & PAGE_MASK);
+}
+
+#define pte_pagenr(pte) ((__pte_page(pte) - PAGE_OFFSET) >> PAGE_SHIFT)
+
+/* permanent address of a page */
+
+#define __page_address(page) (PAGE_OFFSET + (((page) - mem_map) << PAGE_SHIFT))
+#define pte_page(pte) (mem_map+pte_pagenr(pte))
+
+/*
+ * only the pte's themselves need to point to physical DRAM (see above)
+ * the pagetable links are purely handled within the kernel SW and thus
+ * don't need the __pa and __va transformations.
+ */
+static inline void pmd_set(pmd_t *pmdp, pte_t *ptep)
+{
+ pmd_val(*pmdp) = _KERNPG_TABLE | (unsigned long) ptep;
+}
+
+#define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
+#define pmd_page_kernel(pmd) ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
+
+/* to find an entry in a page-table-directory. */
+#define pgd_index(address) ((address >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
+
+#define __pgd_offset(address) pgd_index(address)
+
+#define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address))
+
+/* to find an entry in a kernel page-table-directory */
+#define pgd_offset_k(address) pgd_offset(&init_mm, address)
+
+#define __pmd_offset(address) \
+ (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
+
+/*
+ * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
+ *
+ * this macro returns the index of the entry in the pte page which would
+ * control the given virtual address
+ */
+#define __pte_offset(address) \
+ (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
+#define pte_offset_kernel(dir, address) \
+ ((pte_t *) pmd_page_kernel(*(dir)) + __pte_offset(address))
+#define pte_offset_map(dir, address) \
+ ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address))
+#define pte_offset_map_nested(dir, address) \
+ pte_offset_map(dir, address)
+
+#define pte_unmap(pte) do { } while (0)
+#define pte_unmap_nested(pte) do { } while (0)
+#define pte_pfn(x) ((unsigned long)(((x).pte)) >> PAGE_SHIFT)
+#define pfn_pte(pfn, prot) __pte((((pfn) << PAGE_SHIFT)) | pgprot_val(prot))
+
+#define pte_ERROR(e) \
+ printk(KERN_ERR "%s:%d: bad pte %p(%08lx).\n", \
+ __FILE__, __LINE__, &(e), pte_val(e))
+#define pgd_ERROR(e) \
+ printk(KERN_ERR "%s:%d: bad pgd %p(%08lx).\n", \
+ __FILE__, __LINE__, &(e), pgd_val(e))
+
+extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; /* defined in head.S */
+
+/*
+ * or32 doesn't have any external MMU info: the kernel page
+ * tables contain all the necessary information.
+ *
+ * Actually I am not sure on what this could be used for.
+ */
+static inline void update_mmu_cache(struct vm_area_struct *vma,
+ unsigned long address, pte_t *pte)
+{
+}
+
+/* __PHX__ FIXME, SWAP, this probably doesn't work */
+
+/* Encode and de-code a swap entry (must be !pte_none(e) && !pte_present(e)) */
+/* Since the PAGE_PRESENT bit is bit 4, we can use the bits above */
+
+#define __swp_type(x) (((x).val >> 5) & 0x7f)
+#define __swp_offset(x) ((x).val >> 12)
+#define __swp_entry(type, offset) \
+ ((swp_entry_t) { ((type) << 5) | ((offset) << 12) })
+#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
+#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
+
+/* Encode and decode a nonlinear file mapping entry */
+
+#define PTE_FILE_MAX_BITS 26
+#define pte_to_pgoff(x) (pte_val(x) >> 6)
+#define pgoff_to_pte(x) __pte(((x) << 6) | _PAGE_FILE)
+
+#define kern_addr_valid(addr) (1)
+
+#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
+ remap_pfn_range(vma, vaddr, pfn, size, prot)
+
+#include <asm-generic/pgtable.h>
+
+/*
+ * No page table caches to initialise
+ */
+#define pgtable_cache_init() do { } while (0)
+#define io_remap_page_range remap_page_range
+
+typedef pte_t *pte_addr_t;
+
+#endif /* __ASSEMBLY__ */
+#endif /* __ASM_OPENRISC_PGTABLE_H */
diff --git a/arch/openrisc/include/asm/processor.h b/arch/openrisc/include/asm/processor.h
new file mode 100644
index 000000000000..bb54c97b9783
--- /dev/null
+++ b/arch/openrisc/include/asm/processor.h
@@ -0,0 +1,113 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others. All original copyrights apply as per the original source
+ * declaration.
+ *
+ * OpenRISC implementation:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ * et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ASM_OPENRISC_PROCESSOR_H
+#define __ASM_OPENRISC_PROCESSOR_H
+
+#include <asm/spr_defs.h>
+#include <asm/page.h>
+#include <asm/ptrace.h>
+
+#define STACK_TOP TASK_SIZE
+#define STACK_TOP_MAX STACK_TOP
+/* Kernel and user SR register setting */
+#define KERNEL_SR (SPR_SR_DME | SPR_SR_IME | SPR_SR_ICE \
+ | SPR_SR_DCE | SPR_SR_SM)
+#define USER_SR (SPR_SR_DME | SPR_SR_IME | SPR_SR_ICE \
+ | SPR_SR_DCE | SPR_SR_IEE | SPR_SR_TEE)
+/*
+ * Default implementation of macro that returns current
+ * instruction pointer ("program counter").
+ */
+#define current_text_addr() ({ __label__ _l; _l: &&_l; })
+
+/*
+ * User space process size. This is hardcoded into a few places,
+ * so don't change it unless you know what you are doing.
+ */
+
+#define TASK_SIZE (0x80000000UL)
+
+/* This decides where the kernel will search for a free chunk of vm
+ * space during mmap's.
+ */
+#define TASK_UNMAPPED_BASE (TASK_SIZE / 8 * 3)
+
+#ifndef __ASSEMBLY__
+
+struct task_struct;
+
+struct thread_struct {
+};
+
+/*
+ * At user->kernel entry, the pt_regs struct is stacked on the top of the
+ * kernel-stack. This macro allows us to find those regs for a task.
+ * Notice that subsequent pt_regs stackings, like recursive interrupts
+ * occurring while we're in the kernel, won't affect this - only the first
+ * user->kernel transition registers are reached by this (i.e. not regs
+ * for running signal handler)
+ */
+#define user_regs(thread_info) (((struct pt_regs *)((unsigned long)(thread_info) + THREAD_SIZE - STACK_FRAME_OVERHEAD)) - 1)
+
+/*
+ * Dito but for the currently running task
+ */
+
+#define task_pt_regs(task) user_regs(task_thread_info(task))
+#define current_regs() user_regs(current_thread_info())
+
+extern inline void prepare_to_copy(struct task_struct *tsk)
+{
+}
+
+#define INIT_SP (sizeof(init_stack) + (unsigned long) &init_stack)
+
+#define INIT_THREAD { }
+
+
+#define KSTK_EIP(tsk) (task_pt_regs(tsk)->pc);
+#define KSTK_ESP(tsk) (task_pt_regs(tsk)->sp);
+
+
+extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
+
+void start_thread(struct pt_regs *regs, unsigned long nip, unsigned long sp);
+void release_thread(struct task_struct *);
+unsigned long get_wchan(struct task_struct *p);
+
+/*
+ * Free current thread data structures etc..
+ */
+
+extern inline void exit_thread(void)
+{
+ /* Nothing needs to be done. */
+}
+
+/*
+ * Return saved PC of a blocked thread. For now, this is the "user" PC
+ */
+extern unsigned long thread_saved_pc(struct task_struct *t);
+
+#define init_stack (init_thread_union.stack)
+
+#define cpu_relax() do { } while (0)
+
+#endif /* __ASSEMBLY__ */
+#endif /* __ASM_OPENRISC_PROCESSOR_H */
diff --git a/arch/openrisc/include/asm/prom.h b/arch/openrisc/include/asm/prom.h
new file mode 100644
index 000000000000..e1f3fe26606c
--- /dev/null
+++ b/arch/openrisc/include/asm/prom.h
@@ -0,0 +1,77 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others. All original copyrights apply as per the original source
+ * declaration.
+ *
+ * OpenRISC implementation:
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ * et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/of.h> /* linux/of.h gets to determine #include ordering */
+
+#ifndef _ASM_OPENRISC_PROM_H
+#define _ASM_OPENRISC_PROM_H
+#ifdef __KERNEL__
+#ifndef __ASSEMBLY__
+
+#include <linux/types.h>
+#include <asm/irq.h>
+#include <linux/atomic.h>
+#include <linux/of_irq.h>
+#include <linux/of_fdt.h>
+#include <linux/of_address.h>
+#include <linux/proc_fs.h>
+#include <linux/platform_device.h>
+#define HAVE_ARCH_DEVTREE_FIXUPS
+
+/* Other Prototypes */
+extern int early_uartlite_console(void);
+
+/* Parse the ibm,dma-window property of an OF node into the busno, phys and
+ * size parameters.
+ */
+void of_parse_dma_window(struct device_node *dn, const void *dma_window_prop,
+ unsigned long *busno, unsigned long *phys, unsigned long *size);
+
+extern void kdump_move_device_tree(void);
+
+/* CPU OF node matching */
+struct device_node *of_get_cpu_node(int cpu, unsigned int *thread);
+
+/* Get the MAC address */
+extern const void *of_get_mac_address(struct device_node *np);
+
+/**
+ * of_irq_map_pci - Resolve the interrupt for a PCI device
+ * @pdev: the device whose interrupt is to be resolved
+ * @out_irq: structure of_irq filled by this function
+ *
+ * This function resolves the PCI interrupt for a given PCI device. If a
+ * device-node exists for a given pci_dev, it will use normal OF tree
+ * walking. If not, it will implement standard swizzling and walk up the
+ * PCI tree until an device-node is found, at which point it will finish
+ * resolving using the OF tree walking.
+ */
+struct pci_dev;
+extern int of_irq_map_pci(struct pci_dev *pdev, struct of_irq *out_irq);
+
+/* This routine is here to provide compatibility with how powerpc
+ * handles IRQ mapping for OF device nodes. We precompute and permanently
+ * register them in the platform_device objects, whereas powerpc computes them
+ * on request.
+ */
+static inline void irq_dispose_mapping(unsigned int virq)
+{
+}
+
+#endif /* __ASSEMBLY__ */
+#endif /* __KERNEL__ */
+#endif /* _ASM_OPENRISC_PROM_H */
diff --git a/arch/openrisc/include/asm/ptrace.h b/arch/openrisc/include/asm/ptrace.h
new file mode 100644
index 000000000000..054537c5f9c9
--- /dev/null
+++ b/arch/openrisc/include/asm/ptrace.h
@@ -0,0 +1,131 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others. All original copyrights apply as per the original source
+ * declaration.
+ *
+ * OpenRISC implementation:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ * et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ASM_OPENRISC_PTRACE_H
+#define __ASM_OPENRISC_PTRACE_H
+
+#include <asm/spr_defs.h>
+
+#ifndef __ASSEMBLY__
+/*
+ * This is the layout of the regset returned by the GETREGSET ptrace call
+ */
+struct user_regs_struct {
+ /* GPR R0-R31... */
+ unsigned long gpr[32];
+ unsigned long pc;
+ unsigned long sr;
+ unsigned long pad1;
+ unsigned long pad2;
+};
+#endif
+
+#ifdef __KERNEL__
+
+/*
+ * Make kernel PTrace/register structures opaque to userspace... userspace can
+ * access thread state via the regset mechanism. This allows us a bit of
+ * flexibility in how we order the registers on the stack, permitting some
+ * optimizations like packing call-clobbered registers together so that
+ * they share a cacheline (not done yet, though... future optimization).
+ */
+
+#ifndef __ASSEMBLY__
+/*
+ * This struct describes how the registers are laid out on the kernel stack
+ * during a syscall or other kernel entry.
+ *
+ * This structure should always be cacheline aligned on the stack.
+ * FIXME: I don't think that's the case right now. The alignment is
+ * taken care of elsewhere... head.S, process.c, etc.
+ */
+
+struct pt_regs {
+ union {
+ struct {
+ /* Named registers */
+ long sr; /* Stored in place of r0 */
+ long sp; /* r1 */
+ };
+ struct {
+ /* Old style */
+ long offset[2];
+ long gprs[30];
+ };
+ struct {
+ /* New style */
+ long gpr[32];
+ };
+ };
+ long pc;
+ long orig_gpr11; /* For restarting system calls */
+ long syscallno; /* Syscall number (used by strace) */
+ long dummy; /* Cheap alignment fix */
+};
+#endif /* __ASSEMBLY__ */
+
+/* TODO: Rename this to REDZONE because that's what it is */
+#define STACK_FRAME_OVERHEAD 128 /* size of minimum stack frame */
+
+#define instruction_pointer(regs) ((regs)->pc)
+#define user_mode(regs) (((regs)->sr & SPR_SR_SM) == 0)
+#define user_stack_pointer(regs) ((unsigned long)(regs)->sp)
+#define profile_pc(regs) instruction_pointer(regs)
+
+/*
+ * Offsets used by 'ptrace' system call interface.
+ */
+#define PT_SR 0
+#define PT_SP 4
+#define PT_GPR2 8
+#define PT_GPR3 12
+#define PT_GPR4 16
+#define PT_GPR5 20
+#define PT_GPR6 24
+#define PT_GPR7 28
+#define PT_GPR8 32
+#define PT_GPR9 36
+#define PT_GPR10 40
+#define PT_GPR11 44
+#define PT_GPR12 48
+#define PT_GPR13 52
+#define PT_GPR14 56
+#define PT_GPR15 60
+#define PT_GPR16 64
+#define PT_GPR17 68
+#define PT_GPR18 72
+#define PT_GPR19 76
+#define PT_GPR20 80
+#define PT_GPR21 84
+#define PT_GPR22 88
+#define PT_GPR23 92
+#define PT_GPR24 96
+#define PT_GPR25 100
+#define PT_GPR26 104
+#define PT_GPR27 108
+#define PT_GPR28 112
+#define PT_GPR29 116
+#define PT_GPR30 120
+#define PT_GPR31 124
+#define PT_PC 128
+#define PT_ORIG_GPR11 132
+#define PT_SYSCALLNO 136
+
+#endif /* __KERNEL__ */
+
+#endif /* __ASM_OPENRISC_PTRACE_H */
diff --git a/arch/openrisc/include/asm/serial.h b/arch/openrisc/include/asm/serial.h
new file mode 100644
index 000000000000..270a45241639
--- /dev/null
+++ b/arch/openrisc/include/asm/serial.h
@@ -0,0 +1,36 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others. All original copyrights apply as per the original source
+ * declaration.
+ *
+ * OpenRISC implementation:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ * et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ASM_OPENRISC_SERIAL_H
+#define __ASM_OPENRISC_SERIAL_H
+
+#ifdef __KERNEL__
+
+#include <asm/cpuinfo.h>
+
+/* There's a generic version of this file, but it assumes a 1.8MHz UART clk...
+ * this, on the other hand, assumes the UART clock is tied to the system
+ * clock... 8250_early.c (early 8250 serial console) actually uses this, so
+ * it needs to be correct to get the early console working.
+ */
+
+#define BASE_BAUD (cpuinfo.clock_frequency/16)
+
+#endif /* __KERNEL__ */
+
+#endif /* __ASM_OPENRISC_SERIAL_H */
diff --git a/arch/openrisc/include/asm/sigcontext.h b/arch/openrisc/include/asm/sigcontext.h
new file mode 100644
index 000000000000..54a5c50132e3
--- /dev/null
+++ b/arch/openrisc/include/asm/sigcontext.h
@@ -0,0 +1,38 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others. All original copyrights apply as per the original source
+ * declaration.
+ *
+ * OpenRISC implementation:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ * et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ASM_OPENRISC_SIGCONTEXT_H
+#define __ASM_OPENRISC_SIGCONTEXT_H
+
+#include <asm/ptrace.h>
+
+/* This struct is saved by setup_frame in signal.c, to keep the current
+ context while a signal handler is executed. It's restored by sys_sigreturn.
+
+ To keep things simple, we use pt_regs here even though normally you just
+ specify the list of regs to save. Then we can use copy_from_user on the
+ entire regs instead of a bunch of get_user's as well...
+*/
+
+struct sigcontext {
+ struct pt_regs regs; /* needs to be first */
+ unsigned long oldmask;
+ unsigned long usp; /* usp before stacking this gunk on it */
+};
+
+#endif /* __ASM_OPENRISC_SIGCONTEXT_H */
diff --git a/arch/openrisc/include/asm/spinlock.h b/arch/openrisc/include/asm/spinlock.h
new file mode 100644
index 000000000000..fd00a3a24123
--- /dev/null
+++ b/arch/openrisc/include/asm/spinlock.h
@@ -0,0 +1,24 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others. All original copyrights apply as per the original source
+ * declaration.
+ *
+ * OpenRISC implementation:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ * et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ASM_OPENRISC_SPINLOCK_H
+#define __ASM_OPENRISC_SPINLOCK_H
+
+#error "or32 doesn't do SMP yet"
+
+#endif
diff --git a/arch/openrisc/include/asm/spr.h b/arch/openrisc/include/asm/spr.h
new file mode 100644
index 000000000000..1cccb42dd477
--- /dev/null
+++ b/arch/openrisc/include/asm/spr.h
@@ -0,0 +1,42 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others. All original copyrights apply as per the original source
+ * declaration.
+ *
+ * OpenRISC implementation:
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ASM_OPENRISC_SPR_H
+#define __ASM_OPENRISC_SPR_H
+
+#define mtspr(_spr, _val) __asm__ __volatile__ ( \
+ "l.mtspr r0,%1,%0" \
+ : : "K" (_spr), "r" (_val))
+#define mtspr_off(_spr, _off, _val) __asm__ __volatile__ ( \
+ "l.mtspr %0,%1,%2" \
+ : : "r" (_off), "r" (_val), "K" (_spr))
+
+static inline unsigned long mfspr(unsigned long add)
+{
+ unsigned long ret;
+ __asm__ __volatile__ ("l.mfspr %0,r0,%1" : "=r" (ret) : "K" (add));
+ return ret;
+}
+
+static inline unsigned long mfspr_off(unsigned long add, unsigned long offset)
+{
+ unsigned long ret;
+ __asm__ __volatile__ ("l.mfspr %0,%1,%2" : "=r" (ret)
+ : "r" (offset), "K" (add));
+ return ret;
+}
+
+#endif
diff --git a/arch/openrisc/include/asm/spr_defs.h b/arch/openrisc/include/asm/spr_defs.h
new file mode 100644
index 000000000000..5dbc668865c4
--- /dev/null
+++ b/arch/openrisc/include/asm/spr_defs.h
@@ -0,0 +1,604 @@
+/*
+ * OpenRISC Linux
+ *
+ * SPR Definitions
+ *
+ * Copyright (C) 2000 Damjan Lampret
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2008, 2010 Embecosm Limited
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ * et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This file is part of OpenRISC 1000 Architectural Simulator.
+ */
+
+#ifndef SPR_DEFS__H
+#define SPR_DEFS__H
+
+/* Definition of special-purpose registers (SPRs). */
+
+#define MAX_GRPS (32)
+#define MAX_SPRS_PER_GRP_BITS (11)
+#define MAX_SPRS_PER_GRP (1 << MAX_SPRS_PER_GRP_BITS)
+#define MAX_SPRS (0x10000)
+
+/* Base addresses for the groups */
+#define SPRGROUP_SYS (0 << MAX_SPRS_PER_GRP_BITS)
+#define SPRGROUP_DMMU (1 << MAX_SPRS_PER_GRP_BITS)
+#define SPRGROUP_IMMU (2 << MAX_SPRS_PER_GRP_BITS)
+#define SPRGROUP_DC (3 << MAX_SPRS_PER_GRP_BITS)
+#define SPRGROUP_IC (4 << MAX_SPRS_PER_GRP_BITS)
+#define SPRGROUP_MAC (5 << MAX_SPRS_PER_GRP_BITS)
+#define SPRGROUP_D (6 << MAX_SPRS_PER_GRP_BITS)
+#define SPRGROUP_PC (7 << MAX_SPRS_PER_GRP_BITS)
+#define SPRGROUP_PM (8 << MAX_SPRS_PER_GRP_BITS)
+#define SPRGROUP_PIC (9 << MAX_SPRS_PER_GRP_BITS)
+#define SPRGROUP_TT (10 << MAX_SPRS_PER_GRP_BITS)
+#define SPRGROUP_FP (11 << MAX_SPRS_PER_GRP_BITS)
+
+/* System control and status group */
+#define SPR_VR (SPRGROUP_SYS + 0)
+#define SPR_UPR (SPRGROUP_SYS + 1)
+#define SPR_CPUCFGR (SPRGROUP_SYS + 2)
+#define SPR_DMMUCFGR (SPRGROUP_SYS + 3)
+#define SPR_IMMUCFGR (SPRGROUP_SYS + 4)
+#define SPR_DCCFGR (SPRGROUP_SYS + 5)
+#define SPR_ICCFGR (SPRGROUP_SYS + 6)
+#define SPR_DCFGR (SPRGROUP_SYS + 7)
+#define SPR_PCCFGR (SPRGROUP_SYS + 8)
+#define SPR_NPC (SPRGROUP_SYS + 16) /* CZ 21/06/01 */
+#define SPR_SR (SPRGROUP_SYS + 17) /* CZ 21/06/01 */
+#define SPR_PPC (SPRGROUP_SYS + 18) /* CZ 21/06/01 */
+#define SPR_FPCSR (SPRGROUP_SYS + 20) /* CZ 21/06/01 */
+#define SPR_EPCR_BASE (SPRGROUP_SYS + 32) /* CZ 21/06/01 */
+#define SPR_EPCR_LAST (SPRGROUP_SYS + 47) /* CZ 21/06/01 */
+#define SPR_EEAR_BASE (SPRGROUP_SYS + 48)
+#define SPR_EEAR_LAST (SPRGROUP_SYS + 63)
+#define SPR_ESR_BASE (SPRGROUP_SYS + 64)
+#define SPR_ESR_LAST (SPRGROUP_SYS + 79)
+#define SPR_GPR_BASE (SPRGROUP_SYS + 1024)
+
+/* Data MMU group */
+#define SPR_DMMUCR (SPRGROUP_DMMU + 0)
+#define SPR_DTLBEIR (SPRGROUP_DMMU + 2)
+#define SPR_DTLBMR_BASE(WAY) (SPRGROUP_DMMU + 0x200 + (WAY) * 0x100)
+#define SPR_DTLBMR_LAST(WAY) (SPRGROUP_DMMU + 0x27f + (WAY) * 0x100)
+#define SPR_DTLBTR_BASE(WAY) (SPRGROUP_DMMU + 0x280 + (WAY) * 0x100)
+#define SPR_DTLBTR_LAST(WAY) (SPRGROUP_DMMU + 0x2ff + (WAY) * 0x100)
+
+/* Instruction MMU group */
+#define SPR_IMMUCR (SPRGROUP_IMMU + 0)
+#define SPR_ITLBEIR (SPRGROUP_IMMU + 2)
+#define SPR_ITLBMR_BASE(WAY) (SPRGROUP_IMMU + 0x200 + (WAY) * 0x100)
+#define SPR_ITLBMR_LAST(WAY) (SPRGROUP_IMMU + 0x27f + (WAY) * 0x100)
+#define SPR_ITLBTR_BASE(WAY) (SPRGROUP_IMMU + 0x280 + (WAY) * 0x100)
+#define SPR_ITLBTR_LAST(WAY) (SPRGROUP_IMMU + 0x2ff + (WAY) * 0x100)
+
+/* Data cache group */
+#define SPR_DCCR (SPRGROUP_DC + 0)
+#define SPR_DCBPR (SPRGROUP_DC + 1)
+#define SPR_DCBFR (SPRGROUP_DC + 2)
+#define SPR_DCBIR (SPRGROUP_DC + 3)
+#define SPR_DCBWR (SPRGROUP_DC + 4)
+#define SPR_DCBLR (SPRGROUP_DC + 5)
+#define SPR_DCR_BASE(WAY) (SPRGROUP_DC + 0x200 + (WAY) * 0x200)
+#define SPR_DCR_LAST(WAY) (SPRGROUP_DC + 0x3ff + (WAY) * 0x200)
+
+/* Instruction cache group */
+#define SPR_ICCR (SPRGROUP_IC + 0)
+#define SPR_ICBPR (SPRGROUP_IC + 1)
+#define SPR_ICBIR (SPRGROUP_IC + 2)
+#define SPR_ICBLR (SPRGROUP_IC + 3)
+#define SPR_ICR_BASE(WAY) (SPRGROUP_IC + 0x200 + (WAY) * 0x200)
+#define SPR_ICR_LAST(WAY) (SPRGROUP_IC + 0x3ff + (WAY) * 0x200)
+
+/* MAC group */
+#define SPR_MACLO (SPRGROUP_MAC + 1)
+#define SPR_MACHI (SPRGROUP_MAC + 2)
+
+/* Debug group */
+#define SPR_DVR(N) (SPRGROUP_D + (N))
+#define SPR_DCR(N) (SPRGROUP_D + 8 + (N))
+#define SPR_DMR1 (SPRGROUP_D + 16)
+#define SPR_DMR2 (SPRGROUP_D + 17)
+#define SPR_DWCR0 (SPRGROUP_D + 18)
+#define SPR_DWCR1 (SPRGROUP_D + 19)
+#define SPR_DSR (SPRGROUP_D + 20)
+#define SPR_DRR (SPRGROUP_D + 21)
+
+/* Performance counters group */
+#define SPR_PCCR(N) (SPRGROUP_PC + (N))
+#define SPR_PCMR(N) (SPRGROUP_PC + 8 + (N))
+
+/* Power management group */
+#define SPR_PMR (SPRGROUP_PM + 0)
+
+/* PIC group */
+#define SPR_PICMR (SPRGROUP_PIC + 0)
+#define SPR_PICPR (SPRGROUP_PIC + 1)
+#define SPR_PICSR (SPRGROUP_PIC + 2)
+
+/* Tick Timer group */
+#define SPR_TTMR (SPRGROUP_TT + 0)
+#define SPR_TTCR (SPRGROUP_TT + 1)
+
+/*
+ * Bit definitions for the Version Register
+ *
+ */
+#define SPR_VR_VER 0xff000000 /* Processor version */
+#define SPR_VR_CFG 0x00ff0000 /* Processor configuration */
+#define SPR_VR_RES 0x0000ffc0 /* Reserved */
+#define SPR_VR_REV 0x0000003f /* Processor revision */
+
+#define SPR_VR_VER_OFF 24
+#define SPR_VR_CFG_OFF 16
+#define SPR_VR_REV_OFF 0
+
+/*
+ * Bit definitions for the Unit Present Register
+ *
+ */
+#define SPR_UPR_UP 0x00000001 /* UPR present */
+#define SPR_UPR_DCP 0x00000002 /* Data cache present */
+#define SPR_UPR_ICP 0x00000004 /* Instruction cache present */
+#define SPR_UPR_DMP 0x00000008 /* Data MMU present */
+#define SPR_UPR_IMP 0x00000010 /* Instruction MMU present */
+#define SPR_UPR_MP 0x00000020 /* MAC present */
+#define SPR_UPR_DUP 0x00000040 /* Debug unit present */
+#define SPR_UPR_PCUP 0x00000080 /* Performance counters unit present */
+#define SPR_UPR_PMP 0x00000100 /* Power management present */
+#define SPR_UPR_PICP 0x00000200 /* PIC present */
+#define SPR_UPR_TTP 0x00000400 /* Tick timer present */
+#define SPR_UPR_RES 0x00fe0000 /* Reserved */
+#define SPR_UPR_CUP 0xff000000 /* Context units present */
+
+/*
+ * JPB: Bit definitions for the CPU configuration register
+ *
+ */
+#define SPR_CPUCFGR_NSGF 0x0000000f /* Number of shadow GPR files */
+#define SPR_CPUCFGR_CGF 0x00000010 /* Custom GPR file */
+#define SPR_CPUCFGR_OB32S 0x00000020 /* ORBIS32 supported */
+#define SPR_CPUCFGR_OB64S 0x00000040 /* ORBIS64 supported */
+#define SPR_CPUCFGR_OF32S 0x00000080 /* ORFPX32 supported */
+#define SPR_CPUCFGR_OF64S 0x00000100 /* ORFPX64 supported */
+#define SPR_CPUCFGR_OV64S 0x00000200 /* ORVDX64 supported */
+#define SPR_CPUCFGR_RES 0xfffffc00 /* Reserved */
+
+/*
+ * JPB: Bit definitions for the Debug configuration register and other
+ * constants.
+ *
+ */
+
+#define SPR_DCFGR_NDP 0x00000007 /* Number of matchpoints mask */
+#define SPR_DCFGR_NDP1 0x00000000 /* One matchpoint supported */
+#define SPR_DCFGR_NDP2 0x00000001 /* Two matchpoints supported */
+#define SPR_DCFGR_NDP3 0x00000002 /* Three matchpoints supported */
+#define SPR_DCFGR_NDP4 0x00000003 /* Four matchpoints supported */
+#define SPR_DCFGR_NDP5 0x00000004 /* Five matchpoints supported */
+#define SPR_DCFGR_NDP6 0x00000005 /* Six matchpoints supported */
+#define SPR_DCFGR_NDP7 0x00000006 /* Seven matchpoints supported */
+#define SPR_DCFGR_NDP8 0x00000007 /* Eight matchpoints supported */
+#define SPR_DCFGR_WPCI 0x00000008 /* Watchpoint counters implemented */
+
+#define MATCHPOINTS_TO_NDP(n) (1 == n ? SPR_DCFGR_NDP1 : \
+ 2 == n ? SPR_DCFGR_NDP2 : \
+ 3 == n ? SPR_DCFGR_NDP3 : \
+ 4 == n ? SPR_DCFGR_NDP4 : \
+ 5 == n ? SPR_DCFGR_NDP5 : \
+ 6 == n ? SPR_DCFGR_NDP6 : \
+ 7 == n ? SPR_DCFGR_NDP7 : SPR_DCFGR_NDP8)
+#define MAX_MATCHPOINTS 8
+#define MAX_WATCHPOINTS (MAX_MATCHPOINTS + 2)
+
+/*
+ * Bit definitions for the Supervision Register
+ *
+ */
+#define SPR_SR_SM 0x00000001 /* Supervisor Mode */
+#define SPR_SR_TEE 0x00000002 /* Tick timer Exception Enable */
+#define SPR_SR_IEE 0x00000004 /* Interrupt Exception Enable */
+#define SPR_SR_DCE 0x00000008 /* Data Cache Enable */
+#define SPR_SR_ICE 0x00000010 /* Instruction Cache Enable */
+#define SPR_SR_DME 0x00000020 /* Data MMU Enable */
+#define SPR_SR_IME 0x00000040 /* Instruction MMU Enable */
+#define SPR_SR_LEE 0x00000080 /* Little Endian Enable */
+#define SPR_SR_CE 0x00000100 /* CID Enable */
+#define SPR_SR_F 0x00000200 /* Condition Flag */
+#define SPR_SR_CY 0x00000400 /* Carry flag */
+#define SPR_SR_OV 0x00000800 /* Overflow flag */
+#define SPR_SR_OVE 0x00001000 /* Overflow flag Exception */
+#define SPR_SR_DSX 0x00002000 /* Delay Slot Exception */
+#define SPR_SR_EPH 0x00004000 /* Exception Prefix High */
+#define SPR_SR_FO 0x00008000 /* Fixed one */
+#define SPR_SR_SUMRA 0x00010000 /* Supervisor SPR read access */
+#define SPR_SR_RES 0x0ffe0000 /* Reserved */
+#define SPR_SR_CID 0xf0000000 /* Context ID */
+
+/*
+ * Bit definitions for the Data MMU Control Register
+ *
+ */
+#define SPR_DMMUCR_P2S 0x0000003e /* Level 2 Page Size */
+#define SPR_DMMUCR_P1S 0x000007c0 /* Level 1 Page Size */
+#define SPR_DMMUCR_VADDR_WIDTH 0x0000f800 /* Virtual ADDR Width */
+#define SPR_DMMUCR_PADDR_WIDTH 0x000f0000 /* Physical ADDR Width */
+
+/*
+ * Bit definitions for the Instruction MMU Control Register
+ *
+ */
+#define SPR_IMMUCR_P2S 0x0000003e /* Level 2 Page Size */
+#define SPR_IMMUCR_P1S 0x000007c0 /* Level 1 Page Size */
+#define SPR_IMMUCR_VADDR_WIDTH 0x0000f800 /* Virtual ADDR Width */
+#define SPR_IMMUCR_PADDR_WIDTH 0x000f0000 /* Physical ADDR Width */
+
+/*
+ * Bit definitions for the Data TLB Match Register
+ *
+ */
+#define SPR_DTLBMR_V 0x00000001 /* Valid */
+#define SPR_DTLBMR_PL1 0x00000002 /* Page Level 1 (if 0 then PL2) */
+#define SPR_DTLBMR_CID 0x0000003c /* Context ID */
+#define SPR_DTLBMR_LRU 0x000000c0 /* Least Recently Used */
+#define SPR_DTLBMR_VPN 0xfffff000 /* Virtual Page Number */
+
+/*
+ * Bit definitions for the Data TLB Translate Register
+ *
+ */
+#define SPR_DTLBTR_CC 0x00000001 /* Cache Coherency */
+#define SPR_DTLBTR_CI 0x00000002 /* Cache Inhibit */
+#define SPR_DTLBTR_WBC 0x00000004 /* Write-Back Cache */
+#define SPR_DTLBTR_WOM 0x00000008 /* Weakly-Ordered Memory */
+#define SPR_DTLBTR_A 0x00000010 /* Accessed */
+#define SPR_DTLBTR_D 0x00000020 /* Dirty */
+#define SPR_DTLBTR_URE 0x00000040 /* User Read Enable */
+#define SPR_DTLBTR_UWE 0x00000080 /* User Write Enable */
+#define SPR_DTLBTR_SRE 0x00000100 /* Supervisor Read Enable */
+#define SPR_DTLBTR_SWE 0x00000200 /* Supervisor Write Enable */
+#define SPR_DTLBTR_PPN 0xfffff000 /* Physical Page Number */
+
+/*
+ * Bit definitions for the Instruction TLB Match Register
+ *
+ */
+#define SPR_ITLBMR_V 0x00000001 /* Valid */
+#define SPR_ITLBMR_PL1 0x00000002 /* Page Level 1 (if 0 then PL2) */
+#define SPR_ITLBMR_CID 0x0000003c /* Context ID */
+#define SPR_ITLBMR_LRU 0x000000c0 /* Least Recently Used */
+#define SPR_ITLBMR_VPN 0xfffff000 /* Virtual Page Number */
+
+/*
+ * Bit definitions for the Instruction TLB Translate Register
+ *
+ */
+#define SPR_ITLBTR_CC 0x00000001 /* Cache Coherency */
+#define SPR_ITLBTR_CI 0x00000002 /* Cache Inhibit */
+#define SPR_ITLBTR_WBC 0x00000004 /* Write-Back Cache */
+#define SPR_ITLBTR_WOM 0x00000008 /* Weakly-Ordered Memory */
+#define SPR_ITLBTR_A 0x00000010 /* Accessed */
+#define SPR_ITLBTR_D 0x00000020 /* Dirty */
+#define SPR_ITLBTR_SXE 0x00000040 /* User Read Enable */
+#define SPR_ITLBTR_UXE 0x00000080 /* User Write Enable */
+#define SPR_ITLBTR_PPN 0xfffff000 /* Physical Page Number */
+
+/*
+ * Bit definitions for Data Cache Control register
+ *
+ */
+#define SPR_DCCR_EW 0x000000ff /* Enable ways */
+
+/*
+ * Bit definitions for Insn Cache Control register
+ *
+ */
+#define SPR_ICCR_EW 0x000000ff /* Enable ways */
+
+/*
+ * Bit definitions for Data Cache Configuration Register
+ *
+ */
+
+#define SPR_DCCFGR_NCW 0x00000007
+#define SPR_DCCFGR_NCS 0x00000078
+#define SPR_DCCFGR_CBS 0x00000080
+#define SPR_DCCFGR_CWS 0x00000100
+#define SPR_DCCFGR_CCRI 0x00000200
+#define SPR_DCCFGR_CBIRI 0x00000400
+#define SPR_DCCFGR_CBPRI 0x00000800
+#define SPR_DCCFGR_CBLRI 0x00001000
+#define SPR_DCCFGR_CBFRI 0x00002000
+#define SPR_DCCFGR_CBWBRI 0x00004000
+
+#define SPR_DCCFGR_NCW_OFF 0
+#define SPR_DCCFGR_NCS_OFF 3
+#define SPR_DCCFGR_CBS_OFF 7
+
+/*
+ * Bit definitions for Instruction Cache Configuration Register
+ *
+ */
+#define SPR_ICCFGR_NCW 0x00000007
+#define SPR_ICCFGR_NCS 0x00000078
+#define SPR_ICCFGR_CBS 0x00000080
+#define SPR_ICCFGR_CCRI 0x00000200
+#define SPR_ICCFGR_CBIRI 0x00000400
+#define SPR_ICCFGR_CBPRI 0x00000800
+#define SPR_ICCFGR_CBLRI 0x00001000
+
+#define SPR_ICCFGR_NCW_OFF 0
+#define SPR_ICCFGR_NCS_OFF 3
+#define SPR_ICCFGR_CBS_OFF 7
+
+/*
+ * Bit definitions for Data MMU Configuration Register
+ *
+ */
+
+#define SPR_DMMUCFGR_NTW 0x00000003
+#define SPR_DMMUCFGR_NTS 0x0000001C
+#define SPR_DMMUCFGR_NAE 0x000000E0
+#define SPR_DMMUCFGR_CRI 0x00000100
+#define SPR_DMMUCFGR_PRI 0x00000200
+#define SPR_DMMUCFGR_TEIRI 0x00000400
+#define SPR_DMMUCFGR_HTR 0x00000800
+
+#define SPR_DMMUCFGR_NTW_OFF 0
+#define SPR_DMMUCFGR_NTS_OFF 2
+
+/*
+ * Bit definitions for Instruction MMU Configuration Register
+ *
+ */
+
+#define SPR_IMMUCFGR_NTW 0x00000003
+#define SPR_IMMUCFGR_NTS 0x0000001C
+#define SPR_IMMUCFGR_NAE 0x000000E0
+#define SPR_IMMUCFGR_CRI 0x00000100
+#define SPR_IMMUCFGR_PRI 0x00000200
+#define SPR_IMMUCFGR_TEIRI 0x00000400
+#define SPR_IMMUCFGR_HTR 0x00000800
+
+#define SPR_IMMUCFGR_NTW_OFF 0
+#define SPR_IMMUCFGR_NTS_OFF 2
+
+/*
+ * Bit definitions for Debug Control registers
+ *
+ */
+#define SPR_DCR_DP 0x00000001 /* DVR/DCR present */
+#define SPR_DCR_CC 0x0000000e /* Compare condition */
+#define SPR_DCR_SC 0x00000010 /* Signed compare */
+#define SPR_DCR_CT 0x000000e0 /* Compare to */
+
+/* Bit results with SPR_DCR_CC mask */
+#define SPR_DCR_CC_MASKED 0x00000000
+#define SPR_DCR_CC_EQUAL 0x00000002
+#define SPR_DCR_CC_LESS 0x00000004
+#define SPR_DCR_CC_LESSE 0x00000006
+#define SPR_DCR_CC_GREAT 0x00000008
+#define SPR_DCR_CC_GREATE 0x0000000a
+#define SPR_DCR_CC_NEQUAL 0x0000000c
+
+/* Bit results with SPR_DCR_CT mask */
+#define SPR_DCR_CT_DISABLED 0x00000000
+#define SPR_DCR_CT_IFEA 0x00000020
+#define SPR_DCR_CT_LEA 0x00000040
+#define SPR_DCR_CT_SEA 0x00000060
+#define SPR_DCR_CT_LD 0x00000080
+#define SPR_DCR_CT_SD 0x000000a0
+#define SPR_DCR_CT_LSEA 0x000000c0
+#define SPR_DCR_CT_LSD 0x000000e0
+/* SPR_DCR_CT_LSD doesn't seem to be implemented anywhere in or1ksim. 2004-1-30 HP */
+
+/*
+ * Bit definitions for Debug Mode 1 register
+ *
+ */
+#define SPR_DMR1_CW 0x000fffff /* Chain register pair data */
+#define SPR_DMR1_CW0_AND 0x00000001
+#define SPR_DMR1_CW0_OR 0x00000002
+#define SPR_DMR1_CW0 (SPR_DMR1_CW0_AND | SPR_DMR1_CW0_OR)
+#define SPR_DMR1_CW1_AND 0x00000004
+#define SPR_DMR1_CW1_OR 0x00000008
+#define SPR_DMR1_CW1 (SPR_DMR1_CW1_AND | SPR_DMR1_CW1_OR)
+#define SPR_DMR1_CW2_AND 0x00000010
+#define SPR_DMR1_CW2_OR 0x00000020
+#define SPR_DMR1_CW2 (SPR_DMR1_CW2_AND | SPR_DMR1_CW2_OR)
+#define SPR_DMR1_CW3_AND 0x00000040
+#define SPR_DMR1_CW3_OR 0x00000080
+#define SPR_DMR1_CW3 (SPR_DMR1_CW3_AND | SPR_DMR1_CW3_OR)
+#define SPR_DMR1_CW4_AND 0x00000100
+#define SPR_DMR1_CW4_OR 0x00000200
+#define SPR_DMR1_CW4 (SPR_DMR1_CW4_AND | SPR_DMR1_CW4_OR)
+#define SPR_DMR1_CW5_AND 0x00000400
+#define SPR_DMR1_CW5_OR 0x00000800
+#define SPR_DMR1_CW5 (SPR_DMR1_CW5_AND | SPR_DMR1_CW5_OR)
+#define SPR_DMR1_CW6_AND 0x00001000
+#define SPR_DMR1_CW6_OR 0x00002000
+#define SPR_DMR1_CW6 (SPR_DMR1_CW6_AND | SPR_DMR1_CW6_OR)
+#define SPR_DMR1_CW7_AND 0x00004000
+#define SPR_DMR1_CW7_OR 0x00008000
+#define SPR_DMR1_CW7 (SPR_DMR1_CW7_AND | SPR_DMR1_CW7_OR)
+#define SPR_DMR1_CW8_AND 0x00010000
+#define SPR_DMR1_CW8_OR 0x00020000
+#define SPR_DMR1_CW8 (SPR_DMR1_CW8_AND | SPR_DMR1_CW8_OR)
+#define SPR_DMR1_CW9_AND 0x00040000
+#define SPR_DMR1_CW9_OR 0x00080000
+#define SPR_DMR1_CW9 (SPR_DMR1_CW9_AND | SPR_DMR1_CW9_OR)
+#define SPR_DMR1_RES1 0x00300000 /* Reserved */
+#define SPR_DMR1_ST 0x00400000 /* Single-step trace*/
+#define SPR_DMR1_BT 0x00800000 /* Branch trace */
+#define SPR_DMR1_RES2 0xff000000 /* Reserved */
+
+/*
+ * Bit definitions for Debug Mode 2 register. AWTC and WGB corrected by JPB
+ *
+ */
+#define SPR_DMR2_WCE0 0x00000001 /* Watchpoint counter 0 enable */
+#define SPR_DMR2_WCE1 0x00000002 /* Watchpoint counter 0 enable */
+#define SPR_DMR2_AWTC 0x00000ffc /* Assign watchpoints to counters */
+#define SPR_DMR2_AWTC_OFF 2 /* Bit offset to AWTC field */
+#define SPR_DMR2_WGB 0x003ff000 /* Watchpoints generating breakpoint */
+#define SPR_DMR2_WGB_OFF 12 /* Bit offset to WGB field */
+#define SPR_DMR2_WBS 0xffc00000 /* JPB: Watchpoint status */
+#define SPR_DMR2_WBS_OFF 22 /* Bit offset to WBS field */
+
+/*
+ * Bit definitions for Debug watchpoint counter registers
+ *
+ */
+#define SPR_DWCR_COUNT 0x0000ffff /* Count */
+#define SPR_DWCR_MATCH 0xffff0000 /* Match */
+#define SPR_DWCR_MATCH_OFF 16 /* Match bit offset */
+
+/*
+ * Bit definitions for Debug stop register
+ *
+ */
+#define SPR_DSR_RSTE 0x00000001 /* Reset exception */
+#define SPR_DSR_BUSEE 0x00000002 /* Bus error exception */
+#define SPR_DSR_DPFE 0x00000004 /* Data Page Fault exception */
+#define SPR_DSR_IPFE 0x00000008 /* Insn Page Fault exception */
+#define SPR_DSR_TTE 0x00000010 /* Tick Timer exception */
+#define SPR_DSR_AE 0x00000020 /* Alignment exception */
+#define SPR_DSR_IIE 0x00000040 /* Illegal Instruction exception */
+#define SPR_DSR_IE 0x00000080 /* Interrupt exception */
+#define SPR_DSR_DME 0x00000100 /* DTLB miss exception */
+#define SPR_DSR_IME 0x00000200 /* ITLB miss exception */
+#define SPR_DSR_RE 0x00000400 /* Range exception */
+#define SPR_DSR_SCE 0x00000800 /* System call exception */
+#define SPR_DSR_FPE 0x00001000 /* Floating Point Exception */
+#define SPR_DSR_TE 0x00002000 /* Trap exception */
+
+/*
+ * Bit definitions for Debug reason register
+ *
+ */
+#define SPR_DRR_RSTE 0x00000001 /* Reset exception */
+#define SPR_DRR_BUSEE 0x00000002 /* Bus error exception */
+#define SPR_DRR_DPFE 0x00000004 /* Data Page Fault exception */
+#define SPR_DRR_IPFE 0x00000008 /* Insn Page Fault exception */
+#define SPR_DRR_TTE 0x00000010 /* Tick Timer exception */
+#define SPR_DRR_AE 0x00000020 /* Alignment exception */
+#define SPR_DRR_IIE 0x00000040 /* Illegal Instruction exception */
+#define SPR_DRR_IE 0x00000080 /* Interrupt exception */
+#define SPR_DRR_DME 0x00000100 /* DTLB miss exception */
+#define SPR_DRR_IME 0x00000200 /* ITLB miss exception */
+#define SPR_DRR_RE 0x00000400 /* Range exception */
+#define SPR_DRR_SCE 0x00000800 /* System call exception */
+#define SPR_DRR_FPE 0x00001000 /* Floating Point Exception */
+#define SPR_DRR_TE 0x00002000 /* Trap exception */
+
+/*
+ * Bit definitions for Performance counters mode registers
+ *
+ */
+#define SPR_PCMR_CP 0x00000001 /* Counter present */
+#define SPR_PCMR_UMRA 0x00000002 /* User mode read access */
+#define SPR_PCMR_CISM 0x00000004 /* Count in supervisor mode */
+#define SPR_PCMR_CIUM 0x00000008 /* Count in user mode */
+#define SPR_PCMR_LA 0x00000010 /* Load access event */
+#define SPR_PCMR_SA 0x00000020 /* Store access event */
+#define SPR_PCMR_IF 0x00000040 /* Instruction fetch event*/
+#define SPR_PCMR_DCM 0x00000080 /* Data cache miss event */
+#define SPR_PCMR_ICM 0x00000100 /* Insn cache miss event */
+#define SPR_PCMR_IFS 0x00000200 /* Insn fetch stall event */
+#define SPR_PCMR_LSUS 0x00000400 /* LSU stall event */
+#define SPR_PCMR_BS 0x00000800 /* Branch stall event */
+#define SPR_PCMR_DTLBM 0x00001000 /* DTLB miss event */
+#define SPR_PCMR_ITLBM 0x00002000 /* ITLB miss event */
+#define SPR_PCMR_DDS 0x00004000 /* Data dependency stall event */
+#define SPR_PCMR_WPE 0x03ff8000 /* Watchpoint events */
+
+/*
+ * Bit definitions for the Power management register
+ *
+ */
+#define SPR_PMR_SDF 0x0000000f /* Slow down factor */
+#define SPR_PMR_DME 0x00000010 /* Doze mode enable */
+#define SPR_PMR_SME 0x00000020 /* Sleep mode enable */
+#define SPR_PMR_DCGE 0x00000040 /* Dynamic clock gating enable */
+#define SPR_PMR_SUME 0x00000080 /* Suspend mode enable */
+
+/*
+ * Bit definitions for PICMR
+ *
+ */
+#define SPR_PICMR_IUM 0xfffffffc /* Interrupt unmask */
+
+/*
+ * Bit definitions for PICPR
+ *
+ */
+#define SPR_PICPR_IPRIO 0xfffffffc /* Interrupt priority */
+
+/*
+ * Bit definitions for PICSR
+ *
+ */
+#define SPR_PICSR_IS 0xffffffff /* Interrupt status */
+
+/*
+ * Bit definitions for Tick Timer Control Register
+ *
+ */
+
+#define SPR_TTCR_CNT 0xffffffff /* Count, time period */
+#define SPR_TTMR_TP 0x0fffffff /* Time period */
+#define SPR_TTMR_IP 0x10000000 /* Interrupt Pending */
+#define SPR_TTMR_IE 0x20000000 /* Interrupt Enable */
+#define SPR_TTMR_DI 0x00000000 /* Disabled */
+#define SPR_TTMR_RT 0x40000000 /* Restart tick */
+#define SPR_TTMR_SR 0x80000000 /* Single run */
+#define SPR_TTMR_CR 0xc0000000 /* Continuous run */
+#define SPR_TTMR_M 0xc0000000 /* Tick mode */
+
+/*
+ * Bit definitions for the FP Control Status Register
+ *
+ */
+#define SPR_FPCSR_FPEE 0x00000001 /* Floating Point Exception Enable */
+#define SPR_FPCSR_RM 0x00000006 /* Rounding Mode */
+#define SPR_FPCSR_OVF 0x00000008 /* Overflow Flag */
+#define SPR_FPCSR_UNF 0x00000010 /* Underflow Flag */
+#define SPR_FPCSR_SNF 0x00000020 /* SNAN Flag */
+#define SPR_FPCSR_QNF 0x00000040 /* QNAN Flag */
+#define SPR_FPCSR_ZF 0x00000080 /* Zero Flag */
+#define SPR_FPCSR_IXF 0x00000100 /* Inexact Flag */
+#define SPR_FPCSR_IVF 0x00000200 /* Invalid Flag */
+#define SPR_FPCSR_INF 0x00000400 /* Infinity Flag */
+#define SPR_FPCSR_DZF 0x00000800 /* Divide By Zero Flag */
+#define SPR_FPCSR_ALLF (SPR_FPCSR_OVF | SPR_FPCSR_UNF | SPR_FPCSR_SNF | \
+ SPR_FPCSR_QNF | SPR_FPCSR_ZF | SPR_FPCSR_IXF | \
+ SPR_FPCSR_IVF | SPR_FPCSR_INF | SPR_FPCSR_DZF)
+
+#define FPCSR_RM_RN (0<<1)
+#define FPCSR_RM_RZ (1<<1)
+#define FPCSR_RM_RIP (2<<1)
+#define FPCSR_RM_RIN (3<<1)
+
+/*
+ * l.nop constants
+ *
+ */
+#define NOP_NOP 0x0000 /* Normal nop instruction */
+#define NOP_EXIT 0x0001 /* End of simulation */
+#define NOP_REPORT 0x0002 /* Simple report */
+/*#define NOP_PRINTF 0x0003 Simprintf instruction (obsolete)*/
+#define NOP_PUTC 0x0004 /* JPB: Simputc instruction */
+#define NOP_CNT_RESET 0x0005 /* Reset statistics counters */
+#define NOP_GET_TICKS 0x0006 /* JPB: Get # ticks running */
+#define NOP_GET_PS 0x0007 /* JPB: Get picosecs/cycle */
+#define NOP_REPORT_FIRST 0x0400 /* Report with number */
+#define NOP_REPORT_LAST 0x03ff /* Report with number */
+
+#endif /* SPR_DEFS__H */
diff --git a/arch/openrisc/include/asm/syscall.h b/arch/openrisc/include/asm/syscall.h
new file mode 100644
index 000000000000..9f0337055d26
--- /dev/null
+++ b/arch/openrisc/include/asm/syscall.h
@@ -0,0 +1,77 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others. All original copyrights apply as per the original source
+ * declaration.
+ *
+ * OpenRISC implementation:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ * et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ASM_OPENRISC_SYSCALL_H__
+#define __ASM_OPENRISC_SYSCALL_H__
+
+#include <linux/err.h>
+#include <linux/sched.h>
+
+static inline int
+syscall_get_nr(struct task_struct *task, struct pt_regs *regs)
+{
+ return regs->syscallno ? regs->syscallno : -1;
+}
+
+static inline void
+syscall_rollback(struct task_struct *task, struct pt_regs *regs)
+{
+ regs->gpr[11] = regs->orig_gpr11;
+}
+
+static inline long
+syscall_get_error(struct task_struct *task, struct pt_regs *regs)
+{
+ return IS_ERR_VALUE(regs->gpr[11]) ? regs->gpr[11] : 0;
+}
+
+static inline long
+syscall_get_return_value(struct task_struct *task, struct pt_regs *regs)
+{
+ return regs->gpr[11];
+}
+
+static inline void
+syscall_set_return_value(struct task_struct *task, struct pt_regs *regs,
+ int error, long val)
+{
+ if (error)
+ regs->gpr[11] = -error;
+ else
+ regs->gpr[11] = val;
+}
+
+static inline void
+syscall_get_arguments(struct task_struct *task, struct pt_regs *regs,
+ unsigned int i, unsigned int n, unsigned long *args)
+{
+ BUG_ON(i + n > 6);
+
+ memcpy(args, &regs->gpr[3 + i], n * sizeof(args[0]));
+}
+
+static inline void
+syscall_set_arguments(struct task_struct *task, struct pt_regs *regs,
+ unsigned int i, unsigned int n, const unsigned long *args)
+{
+ BUG_ON(i + n > 6);
+
+ memcpy(&regs->gpr[3 + i], args, n * sizeof(args[0]));
+}
+
+#endif
diff --git a/arch/openrisc/include/asm/syscalls.h b/arch/openrisc/include/asm/syscalls.h
new file mode 100644
index 000000000000..84a978af44d7
--- /dev/null
+++ b/arch/openrisc/include/asm/syscalls.h
@@ -0,0 +1,27 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others. All original copyrights apply as per the original source
+ * declaration.
+ *
+ * OpenRISC implementation:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ * et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ASM_OPENRISC_SYSCALLS_H
+#define __ASM_OPENRISC_SYSCALLS_H
+
+asmlinkage long sys_or1k_atomic(unsigned long type, unsigned long *v1,
+ unsigned long *v2);
+
+#include <asm-generic/syscalls.h>
+
+#endif /* __ASM_OPENRISC_SYSCALLS_H */
diff --git a/arch/openrisc/include/asm/system.h b/arch/openrisc/include/asm/system.h
new file mode 100644
index 000000000000..cf658882186b
--- /dev/null
+++ b/arch/openrisc/include/asm/system.h
@@ -0,0 +1,35 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others. All original copyrights apply as per the original source
+ * declaration.
+ *
+ * OpenRISC implementation:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ * et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ASM_OPENRISC_SYSTEM_H
+#define __ASM_OPENRISC_SYSTEM_H
+
+#ifdef __KERNEL__
+#ifndef __ASSEMBLY__
+
+#include <asm/spr.h>
+#include <asm-generic/system.h>
+
+/* We probably need this definition, but the generic system.h provides it
+ * and it's not used on our arch anyway...
+ */
+/*#define nop() __asm__ __volatile__ ("l.nop"::)*/
+
+#endif /* __ASSEMBLY__ */
+#endif /* __KERNEL__ */
+#endif /* __ASM_OPENRISC_SYSTEM_H */
diff --git a/arch/openrisc/include/asm/thread_info.h b/arch/openrisc/include/asm/thread_info.h
new file mode 100644
index 000000000000..07a8bc080ef2
--- /dev/null
+++ b/arch/openrisc/include/asm/thread_info.h
@@ -0,0 +1,134 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others. All original copyrights apply as per the original source
+ * declaration.
+ *
+ * OpenRISC implementation:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ * et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef _ASM_THREAD_INFO_H
+#define _ASM_THREAD_INFO_H
+
+#ifdef __KERNEL__
+
+#ifndef __ASSEMBLY__
+#include <asm/types.h>
+#include <asm/processor.h>
+#endif
+
+
+/* THREAD_SIZE is the size of the task_struct/kernel_stack combo.
+ * normally, the stack is found by doing something like p + THREAD_SIZE
+ * in or32, a page is 8192 bytes, which seems like a sane size
+ */
+
+#define THREAD_SIZE_ORDER 0
+#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
+
+/*
+ * low level task data that entry.S needs immediate access to
+ * - this struct should fit entirely inside of one cache line
+ * - this struct shares the supervisor stack pages
+ * - if the contents of this structure are changed, the assembly constants
+ * must also be changed
+ */
+#ifndef __ASSEMBLY__
+
+typedef unsigned long mm_segment_t;
+
+struct thread_info {
+ struct task_struct *task; /* main task structure */
+ struct exec_domain *exec_domain; /* execution domain */
+ unsigned long flags; /* low level flags */
+ __u32 cpu; /* current CPU */
+ __s32 preempt_count; /* 0 => preemptable, <0 => BUG */
+
+ mm_segment_t addr_limit; /* thread address space:
+ 0-0x7FFFFFFF for user-thead
+ 0-0xFFFFFFFF for kernel-thread
+ */
+ struct restart_block restart_block;
+ __u8 supervisor_stack[0];
+
+ /* saved context data */
+ unsigned long ksp;
+};
+#endif
+
+/*
+ * macros/functions for gaining access to the thread information structure
+ *
+ * preempt_count needs to be 1 initially, until the scheduler is functional.
+ */
+#ifndef __ASSEMBLY__
+#define INIT_THREAD_INFO(tsk) \
+{ \
+ .task = &tsk, \
+ .exec_domain = &default_exec_domain, \
+ .flags = 0, \
+ .cpu = 0, \
+ .preempt_count = 1, \
+ .addr_limit = KERNEL_DS, \
+ .restart_block = { \
+ .fn = do_no_restart_syscall, \
+ }, \
+ .ksp = 0, \
+}
+
+#define init_thread_info (init_thread_union.thread_info)
+
+/* how to get the thread information struct from C */
+register struct thread_info *current_thread_info_reg asm("r10");
+#define current_thread_info() (current_thread_info_reg)
+
+#define get_thread_info(ti) get_task_struct((ti)->task)
+#define put_thread_info(ti) put_task_struct((ti)->task)
+
+#endif /* !__ASSEMBLY__ */
+
+/*
+ * thread information flags
+ * these are process state flags that various assembly files may need to
+ * access
+ * - pending work-to-be-done flags are in LSW
+ * - other flags in MSW
+ */
+#define TIF_SYSCALL_TRACE 0 /* syscall trace active */
+#define TIF_NOTIFY_RESUME 1 /* resumption notification requested */
+#define TIF_SIGPENDING 2 /* signal pending */
+#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
+#define TIF_SINGLESTEP 4 /* restore singlestep on return to user
+ * mode
+ */
+#define TIF_SYSCALL_TRACEPOINT 8 /* for ftrace syscall instrumentation */
+#define TIF_RESTORE_SIGMASK 9
+#define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling * TIF_NEED_RESCHED
+ */
+#define TIF_MEMDIE 17
+
+#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
+#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
+#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
+#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
+#define _TIF_SINGLESTEP (1<<TIF_SINGLESTEP)
+#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK)
+#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
+
+
+/* Work to do when returning from interrupt/exception */
+/* For OpenRISC, this is anything in the LSW other than syscall trace */
+#define _TIF_WORK_MASK (0xff & ~(_TIF_SYSCALL_TRACE|_TIF_SINGLESTEP))
+
+#endif /* __KERNEL__ */
+
+#endif /* _ASM_THREAD_INFO_H */
diff --git a/arch/openrisc/include/asm/timex.h b/arch/openrisc/include/asm/timex.h
new file mode 100644
index 000000000000..9935cad1b9b9
--- /dev/null
+++ b/arch/openrisc/include/asm/timex.h
@@ -0,0 +1,36 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others. All original copyrights apply as per the original source
+ * declaration.
+ *
+ * OpenRISC implementation:
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ASM_OPENRISC_TIMEX_H
+#define __ASM_OPENRISC_TIMEX_H
+
+#define get_cycles get_cycles
+
+#include <asm-generic/timex.h>
+#include <asm/spr.h>
+#include <asm/spr_defs.h>
+
+static inline cycles_t get_cycles(void)
+{
+ return mfspr(SPR_TTCR);
+}
+
+/* This isn't really used any more */
+#define CLOCK_TICK_RATE 1000
+
+#define ARCH_HAS_READ_CURRENT_TIMER
+
+#endif
diff --git a/arch/openrisc/include/asm/tlb.h b/arch/openrisc/include/asm/tlb.h
new file mode 100644
index 000000000000..fa4376a4515d
--- /dev/null
+++ b/arch/openrisc/include/asm/tlb.h
@@ -0,0 +1,34 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others. All original copyrights apply as per the original source
+ * declaration.
+ *
+ * OpenRISC implementation:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ * et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ASM_OPENRISC_TLB_H__
+#define __ASM_OPENRISC_TLB_H__
+
+/*
+ * or32 doesn't need any special per-pte or
+ * per-vma handling..
+ */
+#define tlb_start_vma(tlb, vma) do { } while (0)
+#define tlb_end_vma(tlb, vma) do { } while (0)
+#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
+
+#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
+#include <linux/pagemap.h>
+#include <asm-generic/tlb.h>
+
+#endif /* __ASM_OPENRISC_TLB_H__ */
diff --git a/arch/openrisc/include/asm/tlbflush.h b/arch/openrisc/include/asm/tlbflush.h
new file mode 100644
index 000000000000..6a2accd6cb67
--- /dev/null
+++ b/arch/openrisc/include/asm/tlbflush.h
@@ -0,0 +1,55 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others. All original copyrights apply as per the original source
+ * declaration.
+ *
+ * OpenRISC implementation:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ * et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ASM_OPENRISC_TLBFLUSH_H
+#define __ASM_OPENRISC_TLBFLUSH_H
+
+#include <linux/mm.h>
+#include <asm/processor.h>
+#include <asm/pgtable.h>
+#include <asm/pgalloc.h>
+#include <asm/current.h>
+#include <linux/sched.h>
+
+/*
+ * - flush_tlb() flushes the current mm struct TLBs
+ * - flush_tlb_all() flushes all processes TLBs
+ * - flush_tlb_mm(mm) flushes the specified mm context TLB's
+ * - flush_tlb_page(vma, vmaddr) flushes one page
+ * - flush_tlb_range(mm, start, end) flushes a range of pages
+ */
+
+void flush_tlb_all(void);
+void flush_tlb_mm(struct mm_struct *mm);
+void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr);
+void flush_tlb_range(struct vm_area_struct *vma,
+ unsigned long start,
+ unsigned long end);
+
+static inline void flush_tlb(void)
+{
+ flush_tlb_mm(current->mm);
+}
+
+static inline void flush_tlb_kernel_range(unsigned long start,
+ unsigned long end)
+{
+ flush_tlb_range(NULL, start, end);
+}
+
+#endif /* __ASM_OPENRISC_TLBFLUSH_H */
diff --git a/arch/openrisc/include/asm/uaccess.h b/arch/openrisc/include/asm/uaccess.h
new file mode 100644
index 000000000000..c310e45b538e
--- /dev/null
+++ b/arch/openrisc/include/asm/uaccess.h
@@ -0,0 +1,355 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others. All original copyrights apply as per the original source
+ * declaration.
+ *
+ * OpenRISC implementation:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ * et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ASM_OPENRISC_UACCESS_H
+#define __ASM_OPENRISC_UACCESS_H
+
+/*
+ * User space memory access functions
+ */
+#include <linux/errno.h>
+#include <linux/thread_info.h>
+#include <linux/prefetch.h>
+#include <linux/string.h>
+#include <linux/thread_info.h>
+#include <asm/page.h>
+
+#define VERIFY_READ 0
+#define VERIFY_WRITE 1
+
+/*
+ * The fs value determines whether argument validity checking should be
+ * performed or not. If get_fs() == USER_DS, checking is performed, with
+ * get_fs() == KERNEL_DS, checking is bypassed.
+ *
+ * For historical reasons, these macros are grossly misnamed.
+ */
+
+/* addr_limit is the maximum accessible address for the task. we misuse
+ * the KERNEL_DS and USER_DS values to both assign and compare the
+ * addr_limit values through the equally misnamed get/set_fs macros.
+ * (see above)
+ */
+
+#define KERNEL_DS (~0UL)
+#define get_ds() (KERNEL_DS)
+
+#define USER_DS (TASK_SIZE)
+#define get_fs() (current_thread_info()->addr_limit)
+#define set_fs(x) (current_thread_info()->addr_limit = (x))
+
+#define segment_eq(a, b) ((a) == (b))
+
+/* Ensure that the range from addr to addr+size is all within the process'
+ * address space
+ */
+#define __range_ok(addr, size) (size <= get_fs() && addr <= (get_fs()-size))
+
+/* Ensure that addr is below task's addr_limit */
+#define __addr_ok(addr) ((unsigned long) addr < get_fs())
+
+#define access_ok(type, addr, size) \
+ __range_ok((unsigned long)addr, (unsigned long)size)
+
+/*
+ * The exception table consists of pairs of addresses: the first is the
+ * address of an instruction that is allowed to fault, and the second is
+ * the address at which the program should continue. No registers are
+ * modified, so it is entirely up to the continuation code to figure out
+ * what to do.
+ *
+ * All the routines below use bits of fixup code that are out of line
+ * with the main instruction path. This means when everything is well,
+ * we don't even have to jump over them. Further, they do not intrude
+ * on our cache or tlb entries.
+ */
+
+struct exception_table_entry {
+ unsigned long insn, fixup;
+};
+
+/* Returns 0 if exception not found and fixup otherwise. */
+extern unsigned long search_exception_table(unsigned long);
+extern void sort_exception_table(void);
+
+/*
+ * These are the main single-value transfer routines. They automatically
+ * use the right size if we just have the right pointer type.
+ *
+ * This gets kind of ugly. We want to return _two_ values in "get_user()"
+ * and yet we don't want to do any pointers, because that is too much
+ * of a performance impact. Thus we have a few rather ugly macros here,
+ * and hide all the uglyness from the user.
+ *
+ * The "__xxx" versions of the user access functions are versions that
+ * do not verify the address space, that must have been done previously
+ * with a separate "access_ok()" call (this is used when we do multiple
+ * accesses to the same area of user memory).
+ *
+ * As we use the same address space for kernel and user data on the
+ * PowerPC, we can just do these as direct assignments. (Of course, the
+ * exception handling means that it's no longer "just"...)
+ */
+#define get_user(x, ptr) \
+ __get_user_check((x), (ptr), sizeof(*(ptr)))
+#define put_user(x, ptr) \
+ __put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
+
+#define __get_user(x, ptr) \
+ __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
+#define __put_user(x, ptr) \
+ __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
+
+extern long __put_user_bad(void);
+
+#define __put_user_nocheck(x, ptr, size) \
+({ \
+ long __pu_err; \
+ __put_user_size((x), (ptr), (size), __pu_err); \
+ __pu_err; \
+})
+
+#define __put_user_check(x, ptr, size) \
+({ \
+ long __pu_err = -EFAULT; \
+ __typeof__(*(ptr)) *__pu_addr = (ptr); \
+ if (access_ok(VERIFY_WRITE, __pu_addr, size)) \
+ __put_user_size((x), __pu_addr, (size), __pu_err); \
+ __pu_err; \
+})
+
+#define __put_user_size(x, ptr, size, retval) \
+do { \
+ retval = 0; \
+ switch (size) { \
+ case 1: __put_user_asm(x, ptr, retval, "l.sb"); break; \
+ case 2: __put_user_asm(x, ptr, retval, "l.sh"); break; \
+ case 4: __put_user_asm(x, ptr, retval, "l.sw"); break; \
+ case 8: __put_user_asm2(x, ptr, retval); break; \
+ default: __put_user_bad(); \
+ } \
+} while (0)
+
+struct __large_struct {
+ unsigned long buf[100];
+};
+#define __m(x) (*(struct __large_struct *)(x))
+
+/*
+ * We don't tell gcc that we are accessing memory, but this is OK
+ * because we do not write to any memory gcc knows about, so there
+ * are no aliasing issues.
+ */
+#define __put_user_asm(x, addr, err, op) \
+ __asm__ __volatile__( \
+ "1: "op" 0(%2),%1\n" \
+ "2:\n" \
+ ".section .fixup,\"ax\"\n" \
+ "3: l.addi %0,r0,%3\n" \
+ " l.j 2b\n" \
+ " l.nop\n" \
+ ".previous\n" \
+ ".section __ex_table,\"a\"\n" \
+ " .align 2\n" \
+ " .long 1b,3b\n" \
+ ".previous" \
+ : "=r"(err) \
+ : "r"(x), "r"(addr), "i"(-EFAULT), "0"(err))
+
+#define __put_user_asm2(x, addr, err) \
+ __asm__ __volatile__( \
+ "1: l.sw 0(%2),%1\n" \
+ "2: l.sw 4(%2),%H1\n" \
+ "3:\n" \
+ ".section .fixup,\"ax\"\n" \
+ "4: l.addi %0,r0,%3\n" \
+ " l.j 3b\n" \
+ " l.nop\n" \
+ ".previous\n" \
+ ".section __ex_table,\"a\"\n" \
+ " .align 2\n" \
+ " .long 1b,4b\n" \
+ " .long 2b,4b\n" \
+ ".previous" \
+ : "=r"(err) \
+ : "r"(x), "r"(addr), "i"(-EFAULT), "0"(err))
+
+#define __get_user_nocheck(x, ptr, size) \
+({ \
+ long __gu_err, __gu_val; \
+ __get_user_size(__gu_val, (ptr), (size), __gu_err); \
+ (x) = (__typeof__(*(ptr)))__gu_val; \
+ __gu_err; \
+})
+
+#define __get_user_check(x, ptr, size) \
+({ \
+ long __gu_err = -EFAULT, __gu_val = 0; \
+ const __typeof__(*(ptr)) * __gu_addr = (ptr); \
+ if (access_ok(VERIFY_READ, __gu_addr, size)) \
+ __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
+ (x) = (__typeof__(*(ptr)))__gu_val; \
+ __gu_err; \
+})
+
+extern long __get_user_bad(void);
+
+#define __get_user_size(x, ptr, size, retval) \
+do { \
+ retval = 0; \
+ switch (size) { \
+ case 1: __get_user_asm(x, ptr, retval, "l.lbz"); break; \
+ case 2: __get_user_asm(x, ptr, retval, "l.lhz"); break; \
+ case 4: __get_user_asm(x, ptr, retval, "l.lwz"); break; \
+ case 8: __get_user_asm2(x, ptr, retval); \
+ default: (x) = __get_user_bad(); \
+ } \
+} while (0)
+
+#define __get_user_asm(x, addr, err, op) \
+ __asm__ __volatile__( \
+ "1: "op" %1,0(%2)\n" \
+ "2:\n" \
+ ".section .fixup,\"ax\"\n" \
+ "3: l.addi %0,r0,%3\n" \
+ " l.addi %1,r0,0\n" \
+ " l.j 2b\n" \
+ " l.nop\n" \
+ ".previous\n" \
+ ".section __ex_table,\"a\"\n" \
+ " .align 2\n" \
+ " .long 1b,3b\n" \
+ ".previous" \
+ : "=r"(err), "=r"(x) \
+ : "r"(addr), "i"(-EFAULT), "0"(err))
+
+#define __get_user_asm2(x, addr, err) \
+ __asm__ __volatile__( \
+ "1: l.lwz %1,0(%2)\n" \
+ "2: l.lwz %H1,4(%2)\n" \
+ "3:\n" \
+ ".section .fixup,\"ax\"\n" \
+ "4: l.addi %0,r0,%3\n" \
+ " l.addi %1,r0,0\n" \
+ " l.addi %H1,r0,0\n" \
+ " l.j 3b\n" \
+ " l.nop\n" \
+ ".previous\n" \
+ ".section __ex_table,\"a\"\n" \
+ " .align 2\n" \
+ " .long 1b,4b\n" \
+ " .long 2b,4b\n" \
+ ".previous" \
+ : "=r"(err), "=&r"(x) \
+ : "r"(addr), "i"(-EFAULT), "0"(err))
+
+/* more complex routines */
+
+extern unsigned long __must_check
+__copy_tofrom_user(void *to, const void *from, unsigned long size);
+
+#define __copy_from_user(to, from, size) \
+ __copy_tofrom_user(to, from, size)
+#define __copy_to_user(to, from, size) \
+ __copy_tofrom_user(to, from, size)
+
+#define __copy_to_user_inatomic __copy_to_user
+#define __copy_from_user_inatomic __copy_from_user
+
+static inline unsigned long
+copy_from_user(void *to, const void *from, unsigned long n)
+{
+ unsigned long over;
+
+ if (access_ok(VERIFY_READ, from, n))
+ return __copy_tofrom_user(to, from, n);
+ if ((unsigned long)from < TASK_SIZE) {
+ over = (unsigned long)from + n - TASK_SIZE;
+ return __copy_tofrom_user(to, from, n - over) + over;
+ }
+ return n;
+}
+
+static inline unsigned long
+copy_to_user(void *to, const void *from, unsigned long n)
+{
+ unsigned long over;
+
+ if (access_ok(VERIFY_WRITE, to, n))
+ return __copy_tofrom_user(to, from, n);
+ if ((unsigned long)to < TASK_SIZE) {
+ over = (unsigned long)to + n - TASK_SIZE;
+ return __copy_tofrom_user(to, from, n - over) + over;
+ }
+ return n;
+}
+
+extern unsigned long __clear_user(void *addr, unsigned long size);
+
+static inline __must_check unsigned long
+clear_user(void *addr, unsigned long size)
+{
+
+ if (access_ok(VERIFY_WRITE, addr, size))
+ return __clear_user(addr, size);
+ if ((unsigned long)addr < TASK_SIZE) {
+ unsigned long over = (unsigned long)addr + size - TASK_SIZE;
+ return __clear_user(addr, size - over) + over;
+ }
+ return size;
+}
+
+extern int __strncpy_from_user(char *dst, const char *src, long count);
+
+static inline long strncpy_from_user(char *dst, const char *src, long count)
+{
+ if (access_ok(VERIFY_READ, src, 1))
+ return __strncpy_from_user(dst, src, count);
+ return -EFAULT;
+}
+
+/*
+ * Return the size of a string (including the ending 0)
+ *
+ * Return 0 for error
+ */
+
+extern int __strnlen_user(const char *str, long len, unsigned long top);
+
+/*
+ * Returns the length of the string at str (including the null byte),
+ * or 0 if we hit a page we can't access,
+ * or something > len if we didn't find a null byte.
+ *
+ * The `top' parameter to __strnlen_user is to make sure that
+ * we can never overflow from the user area into kernel space.
+ */
+static inline long strnlen_user(const char __user *str, long len)
+{
+ unsigned long top = (unsigned long)get_fs();
+ unsigned long res = 0;
+
+ if (__addr_ok(str))
+ res = __strnlen_user(str, len, top);
+
+ return res;
+}
+
+#define strlen_user(str) strnlen_user(str, TASK_SIZE-1)
+
+#endif /* __ASM_OPENRISC_UACCESS_H */
diff --git a/arch/openrisc/include/asm/unaligned.h b/arch/openrisc/include/asm/unaligned.h
new file mode 100644
index 000000000000..1141cbd6fd72
--- /dev/null
+++ b/arch/openrisc/include/asm/unaligned.h
@@ -0,0 +1,51 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others. All original copyrights apply as per the original source
+ * declaration.
+ *
+ * OpenRISC implementation:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ * et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ASM_OPENRISC_UNALIGNED_H
+#define __ASM_OPENRISC_UNALIGNED_H
+
+/*
+ * This is copied from the generic implementation and the C-struct
+ * variant replaced with the memmove variant. The GCC compiler
+ * for the OR32 arch optimizes too aggressively for the C-struct
+ * variant to work, so use the memmove variant instead.
+ *
+ * It may be worth considering implementing the unaligned access
+ * exception handler and allowing unaligned accesses (access_ok.h)...
+ * not sure if it would be much of a performance win without further
+ * investigation.
+ */
+#include <asm/byteorder.h>
+
+#if defined(__LITTLE_ENDIAN)
+# include <linux/unaligned/le_memmove.h>
+# include <linux/unaligned/be_byteshift.h>
+# include <linux/unaligned/generic.h>
+# define get_unaligned __get_unaligned_le
+# define put_unaligned __put_unaligned_le
+#elif defined(__BIG_ENDIAN)
+# include <linux/unaligned/be_memmove.h>
+# include <linux/unaligned/le_byteshift.h>
+# include <linux/unaligned/generic.h>
+# define get_unaligned __get_unaligned_be
+# define put_unaligned __put_unaligned_be
+#else
+# error need to define endianess
+#endif
+
+#endif /* __ASM_OPENRISC_UNALIGNED_H */
diff --git a/arch/openrisc/include/asm/unistd.h b/arch/openrisc/include/asm/unistd.h
new file mode 100644
index 000000000000..89af3ab5c2e9
--- /dev/null
+++ b/arch/openrisc/include/asm/unistd.h
@@ -0,0 +1,31 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others. All original copyrights apply as per the original source
+ * declaration.
+ *
+ * OpenRISC implementation:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ * et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#if !defined(__ASM_OPENRISC_UNISTD_H) || defined(__SYSCALL)
+#define __ASM_OPENRISC_UNISTD_H
+
+#define __ARCH_HAVE_MMU
+
+#define sys_mmap2 sys_mmap_pgoff
+
+#include <asm-generic/unistd.h>
+
+#define __NR_or1k_atomic __NR_arch_specific_syscall
+__SYSCALL(__NR_or1k_atomic, sys_or1k_atomic)
+
+#endif /* __ASM_OPENRISC_UNISTD_H */
diff --git a/arch/openrisc/kernel/Makefile b/arch/openrisc/kernel/Makefile
new file mode 100644
index 000000000000..9a4c2706d795
--- /dev/null
+++ b/arch/openrisc/kernel/Makefile
@@ -0,0 +1,14 @@
+#
+# Makefile for the linux kernel.
+#
+
+extra-y := head.o vmlinux.lds init_task.o
+
+obj-y := setup.o idle.o or32_ksyms.o process.o dma.o \
+ traps.o time.o irq.o entry.o ptrace.o signal.o sys_or32.o \
+ sys_call_table.o
+
+obj-$(CONFIG_MODULES) += module.o
+obj-$(CONFIG_OF) += prom.o
+
+clean:
diff --git a/arch/openrisc/kernel/asm-offsets.c b/arch/openrisc/kernel/asm-offsets.c
new file mode 100644
index 000000000000..1a242a0d7583
--- /dev/null
+++ b/arch/openrisc/kernel/asm-offsets.c
@@ -0,0 +1,70 @@
+/*
+ * OpenRISC asm-offsets.c
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others. All original copyrights apply as per the original source
+ * declaration.
+ *
+ * Modifications for the OpenRISC architecture:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * This program is used to generate definitions needed by
+ * assembly language modules.
+ *
+ * We use the technique used in the OSF Mach kernel code:
+ * generate asm statements containing #defines,
+ * compile this file to assembler, and then extract the
+ * #defines from the assembly-language output.
+ */
+
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/ptrace.h>
+#include <linux/mman.h>
+#include <linux/mm.h>
+#include <linux/io.h>
+#include <linux/thread_info.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/processor.h>
+
+#define DEFINE(sym, val) \
+ asm volatile("\n->" #sym " %0 " #val : : "i" (val))
+
+#define BLANK() asm volatile("\n->" : : )
+
+int main(void)
+{
+ /* offsets into the task_struct */
+ DEFINE(TASK_STATE, offsetof(struct task_struct, state));
+ DEFINE(TASK_FLAGS, offsetof(struct task_struct, flags));
+ DEFINE(TASK_PTRACE, offsetof(struct task_struct, ptrace));
+ DEFINE(TASK_THREAD, offsetof(struct task_struct, thread));
+ DEFINE(TASK_MM, offsetof(struct task_struct, mm));
+ DEFINE(TASK_ACTIVE_MM, offsetof(struct task_struct, active_mm));
+
+ /* offsets into thread_info */
+ DEFINE(TI_TASK, offsetof(struct thread_info, task));
+ DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
+ DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count));
+ DEFINE(TI_KSP, offsetof(struct thread_info, ksp));
+
+ DEFINE(PT_SIZE, sizeof(struct pt_regs));
+
+ /* Interrupt register frame */
+ DEFINE(STACK_FRAME_OVERHEAD, STACK_FRAME_OVERHEAD);
+ DEFINE(INT_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs));
+
+ DEFINE(NUM_USER_SEGMENTS, TASK_SIZE >> 28);
+ return 0;
+}
diff --git a/arch/openrisc/kernel/dma.c b/arch/openrisc/kernel/dma.c
new file mode 100644
index 000000000000..968d3ee477e3
--- /dev/null
+++ b/arch/openrisc/kernel/dma.c
@@ -0,0 +1,191 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others. All original copyrights apply as per the original source
+ * declaration.
+ *
+ * Modifications for the OpenRISC architecture:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * DMA mapping callbacks...
+ * As alloc_coherent is the only DMA callback being used currently, that's
+ * the only thing implemented properly. The rest need looking into...
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/dma-debug.h>
+
+#include <asm/cpuinfo.h>
+#include <asm/spr_defs.h>
+#include <asm/tlbflush.h>
+
+static int page_set_nocache(pte_t *pte, unsigned long addr,
+ unsigned long next, struct mm_walk *walk)
+{
+ unsigned long cl;
+
+ pte_val(*pte) |= _PAGE_CI;
+
+ /*
+ * Flush the page out of the TLB so that the new page flags get
+ * picked up next time there's an access
+ */
+ flush_tlb_page(NULL, addr);
+
+ /* Flush page out of dcache */
+ for (cl = __pa(addr); cl < __pa(next); cl += cpuinfo.dcache_block_size)
+ mtspr(SPR_DCBFR, cl);
+
+ return 0;
+}
+
+static int page_clear_nocache(pte_t *pte, unsigned long addr,
+ unsigned long next, struct mm_walk *walk)
+{
+ pte_val(*pte) &= ~_PAGE_CI;
+
+ /*
+ * Flush the page out of the TLB so that the new page flags get
+ * picked up next time there's an access
+ */
+ flush_tlb_page(NULL, addr);
+
+ return 0;
+}
+
+/*
+ * Alloc "coherent" memory, which for OpenRISC means simply uncached.
+ *
+ * This function effectively just calls __get_free_pages, sets the
+ * cache-inhibit bit on those pages, and makes sure that the pages are
+ * flushed out of the cache before they are used.
+ *
+ */
+void *or1k_dma_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp)
+{
+ unsigned long va;
+ void *page;
+ struct mm_walk walk = {
+ .pte_entry = page_set_nocache,
+ .mm = &init_mm
+ };
+
+ page = alloc_pages_exact(size, gfp);
+ if (!page)
+ return NULL;
+
+ /* This gives us the real physical address of the first page. */
+ *dma_handle = __pa(page);
+
+ va = (unsigned long)page;
+
+ /*
+ * We need to iterate through the pages, clearing the dcache for
+ * them and setting the cache-inhibit bit.
+ */
+ if (walk_page_range(va, va + size, &walk)) {
+ free_pages_exact(page, size);
+ return NULL;
+ }
+
+ return (void *)va;
+}
+
+void or1k_dma_free_coherent(struct device *dev, size_t size, void *vaddr,
+ dma_addr_t dma_handle)
+{
+ unsigned long va = (unsigned long)vaddr;
+ struct mm_walk walk = {
+ .pte_entry = page_clear_nocache,
+ .mm = &init_mm
+ };
+
+ /* walk_page_range shouldn't be able to fail here */
+ WARN_ON(walk_page_range(va, va + size, &walk));
+
+ free_pages_exact(vaddr, size);
+}
+
+dma_addr_t or1k_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size,
+ enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+{
+ unsigned long cl;
+ dma_addr_t addr = page_to_phys(page) + offset;
+
+ switch (dir) {
+ case DMA_TO_DEVICE:
+ /* Flush the dcache for the requested range */
+ for (cl = addr; cl < addr + size;
+ cl += cpuinfo.dcache_block_size)
+ mtspr(SPR_DCBFR, cl);
+ break;
+ case DMA_FROM_DEVICE:
+ /* Invalidate the dcache for the requested range */
+ for (cl = addr; cl < addr + size;
+ cl += cpuinfo.dcache_block_size)
+ mtspr(SPR_DCBIR, cl);
+ break;
+ default:
+ /*
+ * NOTE: If dir == DMA_BIDIRECTIONAL then there's no need to
+ * flush nor invalidate the cache here as the area will need
+ * to be manually synced anyway.
+ */
+ break;
+ }
+
+ return addr;
+}
+
+void or1k_unmap_page(struct device *dev, dma_addr_t dma_handle,
+ size_t size, enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+{
+ /* Nothing special to do here... */
+}
+
+void or1k_sync_single_for_cpu(struct device *dev,
+ dma_addr_t dma_handle, size_t size,
+ enum dma_data_direction dir)
+{
+ unsigned long cl;
+ dma_addr_t addr = dma_handle;
+
+ /* Invalidate the dcache for the requested range */
+ for (cl = addr; cl < addr + size; cl += cpuinfo.dcache_block_size)
+ mtspr(SPR_DCBIR, cl);
+}
+
+void or1k_sync_single_for_device(struct device *dev,
+ dma_addr_t dma_handle, size_t size,
+ enum dma_data_direction dir)
+{
+ unsigned long cl;
+ dma_addr_t addr = dma_handle;
+
+ /* Flush the dcache for the requested range */
+ for (cl = addr; cl < addr + size; cl += cpuinfo.dcache_block_size)
+ mtspr(SPR_DCBFR, cl);
+}
+
+/* Number of entries preallocated for DMA-API debugging */
+#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
+
+static int __init dma_init(void)
+{
+ dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
+
+ return 0;
+}
+
+fs_initcall(dma_init);
diff --git a/arch/openrisc/kernel/entry.S b/arch/openrisc/kernel/entry.S
new file mode 100644
index 000000000000..d5f9c35a583f
--- /dev/null
+++ b/arch/openrisc/kernel/entry.S
@@ -0,0 +1,1128 @@
+/*
+ * OpenRISC entry.S
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others. All original copyrights apply as per the original source
+ * declaration.
+ *
+ * Modifications for the OpenRISC architecture:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2005 Gyorgy Jeney <nog@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/linkage.h>
+
+#include <asm/processor.h>
+#include <asm/unistd.h>
+#include <asm/thread_info.h>
+#include <asm/errno.h>
+#include <asm/spr_defs.h>
+#include <asm/page.h>
+#include <asm/mmu.h>
+#include <asm/pgtable.h>
+#include <asm/asm-offsets.h>
+
+#define DISABLE_INTERRUPTS(t1,t2) \
+ l.mfspr t2,r0,SPR_SR ;\
+ l.movhi t1,hi(~(SPR_SR_IEE|SPR_SR_TEE)) ;\
+ l.ori t1,t1,lo(~(SPR_SR_IEE|SPR_SR_TEE)) ;\
+ l.and t2,t2,t1 ;\
+ l.mtspr r0,t2,SPR_SR
+
+#define ENABLE_INTERRUPTS(t1) \
+ l.mfspr t1,r0,SPR_SR ;\
+ l.ori t1,t1,lo(SPR_SR_IEE|SPR_SR_TEE) ;\
+ l.mtspr r0,t1,SPR_SR
+
+/* =========================================================[ macros ]=== */
+
+/*
+ * We need to disable interrupts at beginning of RESTORE_ALL
+ * since interrupt might come in after we've loaded EPC return address
+ * and overwrite EPC with address somewhere in RESTORE_ALL
+ * which is of course wrong!
+ */
+
+#define RESTORE_ALL \
+ DISABLE_INTERRUPTS(r3,r4) ;\
+ l.lwz r3,PT_PC(r1) ;\
+ l.mtspr r0,r3,SPR_EPCR_BASE ;\
+ l.lwz r3,PT_SR(r1) ;\
+ l.mtspr r0,r3,SPR_ESR_BASE ;\
+ l.lwz r2,PT_GPR2(r1) ;\
+ l.lwz r3,PT_GPR3(r1) ;\
+ l.lwz r4,PT_GPR4(r1) ;\
+ l.lwz r5,PT_GPR5(r1) ;\
+ l.lwz r6,PT_GPR6(r1) ;\
+ l.lwz r7,PT_GPR7(r1) ;\
+ l.lwz r8,PT_GPR8(r1) ;\
+ l.lwz r9,PT_GPR9(r1) ;\
+ l.lwz r10,PT_GPR10(r1) ;\
+ l.lwz r11,PT_GPR11(r1) ;\
+ l.lwz r12,PT_GPR12(r1) ;\
+ l.lwz r13,PT_GPR13(r1) ;\
+ l.lwz r14,PT_GPR14(r1) ;\
+ l.lwz r15,PT_GPR15(r1) ;\
+ l.lwz r16,PT_GPR16(r1) ;\
+ l.lwz r17,PT_GPR17(r1) ;\
+ l.lwz r18,PT_GPR18(r1) ;\
+ l.lwz r19,PT_GPR19(r1) ;\
+ l.lwz r20,PT_GPR20(r1) ;\
+ l.lwz r21,PT_GPR21(r1) ;\
+ l.lwz r22,PT_GPR22(r1) ;\
+ l.lwz r23,PT_GPR23(r1) ;\
+ l.lwz r24,PT_GPR24(r1) ;\
+ l.lwz r25,PT_GPR25(r1) ;\
+ l.lwz r26,PT_GPR26(r1) ;\
+ l.lwz r27,PT_GPR27(r1) ;\
+ l.lwz r28,PT_GPR28(r1) ;\
+ l.lwz r29,PT_GPR29(r1) ;\
+ l.lwz r30,PT_GPR30(r1) ;\
+ l.lwz r31,PT_GPR31(r1) ;\
+ l.lwz r1,PT_SP(r1) ;\
+ l.rfe
+
+
+#define EXCEPTION_ENTRY(handler) \
+ .global handler ;\
+handler: ;\
+ /* r1, EPCR, ESR a already saved */ ;\
+ l.sw PT_GPR2(r1),r2 ;\
+ l.sw PT_GPR3(r1),r3 ;\
+ l.sw PT_ORIG_GPR11(r1),r11 ;\
+ /* r4 already save */ ;\
+ l.sw PT_GPR5(r1),r5 ;\
+ l.sw PT_GPR6(r1),r6 ;\
+ l.sw PT_GPR7(r1),r7 ;\
+ l.sw PT_GPR8(r1),r8 ;\
+ l.sw PT_GPR9(r1),r9 ;\
+ /* r10 already saved */ ;\
+ l.sw PT_GPR11(r1),r11 ;\
+ /* r12 already saved */ ;\
+ l.sw PT_GPR13(r1),r13 ;\
+ l.sw PT_GPR14(r1),r14 ;\
+ l.sw PT_GPR15(r1),r15 ;\
+ l.sw PT_GPR16(r1),r16 ;\
+ l.sw PT_GPR17(r1),r17 ;\
+ l.sw PT_GPR18(r1),r18 ;\
+ l.sw PT_GPR19(r1),r19 ;\
+ l.sw PT_GPR20(r1),r20 ;\
+ l.sw PT_GPR21(r1),r21 ;\
+ l.sw PT_GPR22(r1),r22 ;\
+ l.sw PT_GPR23(r1),r23 ;\
+ l.sw PT_GPR24(r1),r24 ;\
+ l.sw PT_GPR25(r1),r25 ;\
+ l.sw PT_GPR26(r1),r26 ;\
+ l.sw PT_GPR27(r1),r27 ;\
+ l.sw PT_GPR28(r1),r28 ;\
+ l.sw PT_GPR29(r1),r29 ;\
+ /* r30 already save */ ;\
+/* l.sw PT_GPR30(r1),r30*/ ;\
+ l.sw PT_GPR31(r1),r31 ;\
+ l.sw PT_SYSCALLNO(r1),r0
+
+#define UNHANDLED_EXCEPTION(handler,vector) \
+ .global handler ;\
+handler: ;\
+ /* r1, EPCR, ESR already saved */ ;\
+ l.sw PT_GPR2(r1),r2 ;\
+ l.sw PT_GPR3(r1),r3 ;\
+ l.sw PT_ORIG_GPR11(r1),r11 ;\
+ l.sw PT_GPR5(r1),r5 ;\
+ l.sw PT_GPR6(r1),r6 ;\
+ l.sw PT_GPR7(r1),r7 ;\
+ l.sw PT_GPR8(r1),r8 ;\
+ l.sw PT_GPR9(r1),r9 ;\
+ /* r10 already saved */ ;\
+ l.sw PT_GPR11(r1),r11 ;\
+ /* r12 already saved */ ;\
+ l.sw PT_GPR13(r1),r13 ;\
+ l.sw PT_GPR14(r1),r14 ;\
+ l.sw PT_GPR15(r1),r15 ;\
+ l.sw PT_GPR16(r1),r16 ;\
+ l.sw PT_GPR17(r1),r17 ;\
+ l.sw PT_GPR18(r1),r18 ;\
+ l.sw PT_GPR19(r1),r19 ;\
+ l.sw PT_GPR20(r1),r20 ;\
+ l.sw PT_GPR21(r1),r21 ;\
+ l.sw PT_GPR22(r1),r22 ;\
+ l.sw PT_GPR23(r1),r23 ;\
+ l.sw PT_GPR24(r1),r24 ;\
+ l.sw PT_GPR25(r1),r25 ;\
+ l.sw PT_GPR26(r1),r26 ;\
+ l.sw PT_GPR27(r1),r27 ;\
+ l.sw PT_GPR28(r1),r28 ;\
+ l.sw PT_GPR29(r1),r29 ;\
+ /* r31 already saved */ ;\
+ l.sw PT_GPR30(r1),r30 ;\
+/* l.sw PT_GPR31(r1),r31 */ ;\
+ l.sw PT_SYSCALLNO(r1),r0 ;\
+ l.addi r3,r1,0 ;\
+ /* r4 is exception EA */ ;\
+ l.addi r5,r0,vector ;\
+ l.jal unhandled_exception ;\
+ l.nop ;\
+ l.j _ret_from_exception ;\
+ l.nop
+
+/*
+ * NOTE: one should never assume that SPR_EPC, SPR_ESR, SPR_EEAR
+ * contain the same values as when exception we're handling
+ * occured. in fact they never do. if you need them use
+ * values saved on stack (for SPR_EPC, SPR_ESR) or content
+ * of r4 (for SPR_EEAR). for details look at EXCEPTION_HANDLE()
+ * in 'arch/or32/kernel/head.S'
+ */
+
+/* =====================================================[ exceptions] === */
+
+/* ---[ 0x100: RESET exception ]----------------------------------------- */
+
+EXCEPTION_ENTRY(_tng_kernel_start)
+ l.jal _start
+ l.andi r0,r0,0
+
+/* ---[ 0x200: BUS exception ]------------------------------------------- */
+
+EXCEPTION_ENTRY(_bus_fault_handler)
+ /* r4: EA of fault (set by EXCEPTION_HANDLE) */
+ l.jal do_bus_fault
+ l.addi r3,r1,0 /* pt_regs */
+
+ l.j _ret_from_exception
+ l.nop
+
+/* ---[ 0x300: Data Page Fault exception ]------------------------------- */
+
+EXCEPTION_ENTRY(_data_page_fault_handler)
+ /* set up parameters for do_page_fault */
+ l.addi r3,r1,0 // pt_regs
+ /* r4 set be EXCEPTION_HANDLE */ // effective address of fault
+ l.ori r5,r0,0x300 // exception vector
+
+ /*
+ * __PHX__: TODO
+ *
+ * all this can be written much simpler. look at
+ * DTLB miss handler in the CONFIG_GUARD_PROTECTED_CORE part
+ */
+#ifdef CONFIG_OPENRISC_NO_SPR_SR_DSX
+ l.lwz r6,PT_PC(r3) // address of an offending insn
+ l.lwz r6,0(r6) // instruction that caused pf
+
+ l.srli r6,r6,26 // check opcode for jump insn
+ l.sfeqi r6,0 // l.j
+ l.bf 8f
+ l.sfeqi r6,1 // l.jal
+ l.bf 8f
+ l.sfeqi r6,3 // l.bnf
+ l.bf 8f
+ l.sfeqi r6,4 // l.bf
+ l.bf 8f
+ l.sfeqi r6,0x11 // l.jr
+ l.bf 8f
+ l.sfeqi r6,0x12 // l.jalr
+ l.bf 8f
+
+ l.nop
+
+ l.j 9f
+ l.nop
+8:
+
+ l.lwz r6,PT_PC(r3) // address of an offending insn
+ l.addi r6,r6,4
+ l.lwz r6,0(r6) // instruction that caused pf
+ l.srli r6,r6,26 // get opcode
+9:
+
+#else
+
+ l.mfspr r6,r0,SPR_SR // SR
+// l.lwz r6,PT_SR(r3) // ESR
+ l.andi r6,r6,SPR_SR_DSX // check for delay slot exception
+ l.sfeqi r6,0x1 // exception happened in delay slot
+ l.bnf 7f
+ l.lwz r6,PT_PC(r3) // address of an offending insn
+
+ l.addi r6,r6,4 // offending insn is in delay slot
+7:
+ l.lwz r6,0(r6) // instruction that caused pf
+ l.srli r6,r6,26 // check opcode for write access
+#endif
+
+ l.sfgeui r6,0x34 // check opcode for write access
+ l.bnf 1f
+ l.sfleui r6,0x37
+ l.bnf 1f
+ l.ori r6,r0,0x1 // write access
+ l.j 2f
+ l.nop
+1: l.ori r6,r0,0x0 // !write access
+2:
+
+ /* call fault.c handler in or32/mm/fault.c */
+ l.jal do_page_fault
+ l.nop
+ l.j _ret_from_exception
+ l.nop
+
+/* ---[ 0x400: Insn Page Fault exception ]------------------------------- */
+
+EXCEPTION_ENTRY(_insn_page_fault_handler)
+ /* set up parameters for do_page_fault */
+ l.addi r3,r1,0 // pt_regs
+ /* r4 set be EXCEPTION_HANDLE */ // effective address of fault
+ l.ori r5,r0,0x400 // exception vector
+ l.ori r6,r0,0x0 // !write access
+
+ /* call fault.c handler in or32/mm/fault.c */
+ l.jal do_page_fault
+ l.nop
+ l.j _ret_from_exception
+ l.nop
+
+
+/* ---[ 0x500: Timer exception ]----------------------------------------- */
+
+EXCEPTION_ENTRY(_timer_handler)
+ l.jal timer_interrupt
+ l.addi r3,r1,0 /* pt_regs */
+
+ l.j _ret_from_intr
+ l.nop
+
+/* ---[ 0x600: Aligment exception ]-------------------------------------- */
+
+EXCEPTION_ENTRY(_alignment_handler)
+ /* r4: EA of fault (set by EXCEPTION_HANDLE) */
+ l.jal do_unaligned_access
+ l.addi r3,r1,0 /* pt_regs */
+
+ l.j _ret_from_exception
+ l.nop
+
+#if 0
+EXCEPTION_ENTRY(_aligment_handler)
+// l.mfspr r2,r0,SPR_EEAR_BASE /* Load the efective addres */
+ l.addi r2,r4,0
+// l.mfspr r5,r0,SPR_EPCR_BASE /* Load the insn address */
+ l.lwz r5,PT_PC(r1)
+
+ l.lwz r3,0(r5) /* Load insn */
+ l.srli r4,r3,26 /* Shift left to get the insn opcode */
+
+ l.sfeqi r4,0x00 /* Check if the load/store insn is in delay slot */
+ l.bf jmp
+ l.sfeqi r4,0x01
+ l.bf jmp
+ l.sfeqi r4,0x03
+ l.bf jmp
+ l.sfeqi r4,0x04
+ l.bf jmp
+ l.sfeqi r4,0x11
+ l.bf jr
+ l.sfeqi r4,0x12
+ l.bf jr
+ l.nop
+ l.j 1f
+ l.addi r5,r5,4 /* Increment PC to get return insn address */
+
+jmp:
+ l.slli r4,r3,6 /* Get the signed extended jump length */
+ l.srai r4,r4,4
+
+ l.lwz r3,4(r5) /* Load the real load/store insn */
+
+ l.add r5,r5,r4 /* Calculate jump target address */
+
+ l.j 1f
+ l.srli r4,r3,26 /* Shift left to get the insn opcode */
+
+jr:
+ l.slli r4,r3,9 /* Shift to get the reg nb */
+ l.andi r4,r4,0x7c
+
+ l.lwz r3,4(r5) /* Load the real load/store insn */
+
+ l.add r4,r4,r1 /* Load the jump register value from the stack */
+ l.lwz r5,0(r4)
+
+ l.srli r4,r3,26 /* Shift left to get the insn opcode */
+
+
+1:
+// l.mtspr r0,r5,SPR_EPCR_BASE
+ l.sw PT_PC(r1),r5
+
+ l.sfeqi r4,0x26
+ l.bf lhs
+ l.sfeqi r4,0x25
+ l.bf lhz
+ l.sfeqi r4,0x22
+ l.bf lws
+ l.sfeqi r4,0x21
+ l.bf lwz
+ l.sfeqi r4,0x37
+ l.bf sh
+ l.sfeqi r4,0x35
+ l.bf sw
+ l.nop
+
+1: l.j 1b /* I don't know what to do */
+ l.nop
+
+lhs: l.lbs r5,0(r2)
+ l.slli r5,r5,8
+ l.lbz r6,1(r2)
+ l.or r5,r5,r6
+ l.srli r4,r3,19
+ l.andi r4,r4,0x7c
+ l.add r4,r4,r1
+ l.j align_end
+ l.sw 0(r4),r5
+
+lhz: l.lbz r5,0(r2)
+ l.slli r5,r5,8
+ l.lbz r6,1(r2)
+ l.or r5,r5,r6
+ l.srli r4,r3,19
+ l.andi r4,r4,0x7c
+ l.add r4,r4,r1
+ l.j align_end
+ l.sw 0(r4),r5
+
+lws: l.lbs r5,0(r2)
+ l.slli r5,r5,24
+ l.lbz r6,1(r2)
+ l.slli r6,r6,16
+ l.or r5,r5,r6
+ l.lbz r6,2(r2)
+ l.slli r6,r6,8
+ l.or r5,r5,r6
+ l.lbz r6,3(r2)
+ l.or r5,r5,r6
+ l.srli r4,r3,19
+ l.andi r4,r4,0x7c
+ l.add r4,r4,r1
+ l.j align_end
+ l.sw 0(r4),r5
+
+lwz: l.lbz r5,0(r2)
+ l.slli r5,r5,24
+ l.lbz r6,1(r2)
+ l.slli r6,r6,16
+ l.or r5,r5,r6
+ l.lbz r6,2(r2)
+ l.slli r6,r6,8
+ l.or r5,r5,r6
+ l.lbz r6,3(r2)
+ l.or r5,r5,r6
+ l.srli r4,r3,19
+ l.andi r4,r4,0x7c
+ l.add r4,r4,r1
+ l.j align_end
+ l.sw 0(r4),r5
+
+sh:
+ l.srli r4,r3,9
+ l.andi r4,r4,0x7c
+ l.add r4,r4,r1
+ l.lwz r5,0(r4)
+ l.sb 1(r2),r5
+ l.srli r5,r5,8
+ l.j align_end
+ l.sb 0(r2),r5
+
+sw:
+ l.srli r4,r3,9
+ l.andi r4,r4,0x7c
+ l.add r4,r4,r1
+ l.lwz r5,0(r4)
+ l.sb 3(r2),r5
+ l.srli r5,r5,8
+ l.sb 2(r2),r5
+ l.srli r5,r5,8
+ l.sb 1(r2),r5
+ l.srli r5,r5,8
+ l.j align_end
+ l.sb 0(r2),r5
+
+align_end:
+ l.j _ret_from_intr
+ l.nop
+#endif
+
+/* ---[ 0x700: Illegal insn exception ]---------------------------------- */
+
+EXCEPTION_ENTRY(_illegal_instruction_handler)
+ /* r4: EA of fault (set by EXCEPTION_HANDLE) */
+ l.jal do_illegal_instruction
+ l.addi r3,r1,0 /* pt_regs */
+
+ l.j _ret_from_exception
+ l.nop
+
+/* ---[ 0x800: External interrupt exception ]---------------------------- */
+
+EXCEPTION_ENTRY(_external_irq_handler)
+#ifdef CONFIG_OPENRISC_ESR_EXCEPTION_BUG_CHECK
+ l.lwz r4,PT_SR(r1) // were interrupts enabled ?
+ l.andi r4,r4,SPR_SR_IEE
+ l.sfeqi r4,0
+ l.bnf 1f // ext irq enabled, all ok.
+ l.nop
+
+ l.addi r1,r1,-0x8
+ l.movhi r3,hi(42f)
+ l.ori r3,r3,lo(42f)
+ l.sw 0x0(r1),r3
+ l.jal printk
+ l.sw 0x4(r1),r4
+ l.addi r1,r1,0x8
+
+ .section .rodata, "a"
+42:
+ .string "\n\rESR interrupt bug: in _external_irq_handler (ESR %x)\n\r"
+ .align 4
+ .previous
+
+ l.ori r4,r4,SPR_SR_IEE // fix the bug
+// l.sw PT_SR(r1),r4
+1:
+#endif
+ l.addi r3,r1,0
+ l.movhi r8,hi(do_IRQ)
+ l.ori r8,r8,lo(do_IRQ)
+ l.jalr r8
+ l.nop
+ l.j _ret_from_intr
+ l.nop
+
+/* ---[ 0x900: DTLB miss exception ]------------------------------------- */
+
+
+/* ---[ 0xa00: ITLB miss exception ]------------------------------------- */
+
+
+/* ---[ 0xb00: Range exception ]----------------------------------------- */
+
+UNHANDLED_EXCEPTION(_vector_0xb00,0xb00)
+
+/* ---[ 0xc00: Syscall exception ]--------------------------------------- */
+
+/*
+ * Syscalls are a special type of exception in that they are
+ * _explicitly_ invoked by userspace and can therefore be
+ * held to conform to the same ABI as normal functions with
+ * respect to whether registers are preserved across the call
+ * or not.
+ */
+
+/* Upon syscall entry we just save the callee-saved registers
+ * and not the call-clobbered ones.
+ */
+
+_string_syscall_return:
+ .string "syscall return %ld \n\r\0"
+ .align 4
+
+ENTRY(_sys_call_handler)
+ /* syscalls run with interrupts enabled */
+ ENABLE_INTERRUPTS(r29) // enable interrupts, r29 is temp
+
+ /* r1, EPCR, ESR a already saved */
+ l.sw PT_GPR2(r1),r2
+ /* r3-r8 must be saved because syscall restart relies
+ * on us being able to restart the syscall args... technically
+ * they should be clobbered, otherwise
+ */
+ l.sw PT_GPR3(r1),r3
+ /* r4 already saved */
+ /* r4 holds the EEAR address of the fault, load the original r4 */
+ l.lwz r4,PT_GPR4(r1)
+ l.sw PT_GPR5(r1),r5
+ l.sw PT_GPR6(r1),r6
+ l.sw PT_GPR7(r1),r7
+ l.sw PT_GPR8(r1),r8
+ l.sw PT_GPR9(r1),r9
+ /* r10 already saved */
+ l.sw PT_GPR11(r1),r11
+ l.sw PT_ORIG_GPR11(r1),r11
+ /* r12,r13 already saved */
+
+ /* r14-r28 (even) aren't touched by the syscall fast path below
+ * so we don't need to save them. However, the functions that return
+ * to userspace via a call to switch() DO need to save these because
+ * switch() effectively clobbers them... saving these registers for
+ * such functions is handled in their syscall wrappers (see fork, vfork,
+ * and clone, below).
+
+ /* r30 is the only register we clobber in the fast path */
+ /* r30 already saved */
+/* l.sw PT_GPR30(r1),r30 */
+ /* This is used by do_signal to determine whether to check for
+ * syscall restart or not */
+ l.sw PT_SYSCALLNO(r1),r11
+
+_syscall_check_trace_enter:
+ /* If TIF_SYSCALL_TRACE is set, then we want to do syscall tracing */
+ l.lwz r30,TI_FLAGS(r10)
+ l.andi r30,r30,_TIF_SYSCALL_TRACE
+ l.sfne r30,r0
+ l.bf _syscall_trace_enter
+ l.nop
+
+_syscall_check:
+ /* Ensure that the syscall number is reasonable */
+ l.sfgeui r11,__NR_syscalls
+ l.bf _syscall_badsys
+ l.nop
+
+_syscall_call:
+ l.movhi r29,hi(sys_call_table)
+ l.ori r29,r29,lo(sys_call_table)
+ l.slli r11,r11,2
+ l.add r29,r29,r11
+ l.lwz r29,0(r29)
+
+ l.jalr r29
+ l.nop
+
+_syscall_return:
+ /* All syscalls return here... just pay attention to ret_from_fork
+ * which does it in a round-about way.
+ */
+ l.sw PT_GPR11(r1),r11 // save return value
+
+#if 0
+_syscall_debug:
+ l.movhi r3,hi(_string_syscall_return)
+ l.ori r3,r3,lo(_string_syscall_return)
+ l.ori r27,r0,1
+ l.sw -4(r1),r27
+ l.sw -8(r1),r11
+ l.addi r1,r1,-8
+ l.movhi r27,hi(printk)
+ l.ori r27,r27,lo(printk)
+ l.jalr r27
+ l.nop
+ l.addi r1,r1,8
+#endif
+
+_syscall_check_trace_leave:
+ /* r30 is a callee-saved register so this should still hold the
+ * _TIF_SYSCALL_TRACE flag from _syscall_check_trace_enter above...
+ * _syscall_trace_leave expects syscall result to be in pt_regs->r11.
+ */
+ l.sfne r30,r0
+ l.bf _syscall_trace_leave
+ l.nop
+
+/* This is where the exception-return code begins... interrupts need to be
+ * disabled the rest of the way here because we can't afford to miss any
+ * interrupts that set NEED_RESCHED or SIGNALPENDING... really true? */
+
+_syscall_check_work:
+ /* Here we need to disable interrupts */
+ DISABLE_INTERRUPTS(r27,r29)
+ l.lwz r30,TI_FLAGS(r10)
+ l.andi r30,r30,_TIF_WORK_MASK
+ l.sfne r30,r0
+
+ l.bnf _syscall_resume_userspace
+ l.nop
+
+ /* Work pending follows a different return path, so we need to
+ * make sure that all the call-saved registers get into pt_regs
+ * before branching...
+ */
+ l.sw PT_GPR14(r1),r14
+ l.sw PT_GPR16(r1),r16
+ l.sw PT_GPR18(r1),r18
+ l.sw PT_GPR20(r1),r20
+ l.sw PT_GPR22(r1),r22
+ l.sw PT_GPR24(r1),r24
+ l.sw PT_GPR26(r1),r26
+ l.sw PT_GPR28(r1),r28
+
+ /* _work_pending needs to be called with interrupts disabled */
+ l.j _work_pending
+ l.nop
+
+_syscall_resume_userspace:
+// ENABLE_INTERRUPTS(r29)
+
+
+/* This is the hot path for returning to userspace from a syscall. If there's
+ * work to be done and the branch to _work_pending was taken above, then the
+ * return to userspace will be done via the normal exception return path...
+ * that path restores _all_ registers and will overwrite the "clobbered"
+ * registers with whatever garbage is in pt_regs -- that's OK because those
+ * registers are clobbered anyway and because the extra work is insignificant
+ * in the context of the extra work that _work_pending is doing.
+
+/* Once again, syscalls are special and only guarantee to preserve the
+ * same registers as a normal function call */
+
+/* The assumption here is that the registers r14-r28 (even) are untouched and
+ * don't need to be restored... be sure that that's really the case!
+ */
+
+/* This is still too much... we should only be restoring what we actually
+ * clobbered... we should even be using 'scratch' (odd) regs above so that
+ * we don't need to restore anything, hardly...
+ */
+
+ l.lwz r2,PT_GPR2(r1)
+
+ /* Restore args */
+ /* r3-r8 are technically clobbered, but syscall restart needs these
+ * to be restored...
+ */
+ l.lwz r3,PT_GPR3(r1)
+ l.lwz r4,PT_GPR4(r1)
+ l.lwz r5,PT_GPR5(r1)
+ l.lwz r6,PT_GPR6(r1)
+ l.lwz r7,PT_GPR7(r1)
+ l.lwz r8,PT_GPR8(r1)
+
+ l.lwz r9,PT_GPR9(r1)
+ l.lwz r10,PT_GPR10(r1)
+ l.lwz r11,PT_GPR11(r1)
+
+ /* r30 is the only register we clobber in the fast path */
+ l.lwz r30,PT_GPR30(r1)
+
+ /* Here we use r13-r19 (odd) as scratch regs */
+ l.lwz r13,PT_PC(r1)
+ l.lwz r15,PT_SR(r1)
+ l.lwz r1,PT_SP(r1)
+ /* Interrupts need to be disabled for setting EPCR and ESR
+ * so that another interrupt doesn't come in here and clobber
+ * them before we can use them for our l.rfe */
+ DISABLE_INTERRUPTS(r17,r19)
+ l.mtspr r0,r13,SPR_EPCR_BASE
+ l.mtspr r0,r15,SPR_ESR_BASE
+ l.rfe
+
+/* End of hot path!
+ * Keep the below tracing and error handling out of the hot path...
+*/
+
+_syscall_trace_enter:
+ /* Here we pass pt_regs to do_syscall_trace_enter. Make sure
+ * that function is really getting all the info it needs as
+ * pt_regs isn't a complete set of userspace regs, just the
+ * ones relevant to the syscall...
+ *
+ * Note use of delay slot for setting argument.
+ */
+ l.jal do_syscall_trace_enter
+ l.addi r3,r1,0
+
+ /* Restore arguments (not preserved across do_syscall_trace_enter)
+ * so that we can do the syscall for real and return to the syscall
+ * hot path.
+ */
+ l.lwz r11,PT_SYSCALLNO(r1)
+ l.lwz r3,PT_GPR3(r1)
+ l.lwz r4,PT_GPR4(r1)
+ l.lwz r5,PT_GPR5(r1)
+ l.lwz r6,PT_GPR6(r1)
+ l.lwz r7,PT_GPR7(r1)
+
+ l.j _syscall_check
+ l.lwz r8,PT_GPR8(r1)
+
+_syscall_trace_leave:
+ l.jal do_syscall_trace_leave
+ l.addi r3,r1,0
+
+ l.j _syscall_check_work
+ l.nop
+
+_syscall_badsys:
+ /* Here we effectively pretend to have executed an imaginary
+ * syscall that returns -ENOSYS and then return to the regular
+ * syscall hot path.
+ * Note that "return value" is set in the delay slot...
+ */
+ l.j _syscall_return
+ l.addi r11,r0,-ENOSYS
+
+/******* END SYSCALL HANDLING *******/
+
+/* ---[ 0xd00: Trap exception ]------------------------------------------ */
+
+UNHANDLED_EXCEPTION(_vector_0xd00,0xd00)
+
+/* ---[ 0xe00: Trap exception ]------------------------------------------ */
+
+EXCEPTION_ENTRY(_trap_handler)
+ /* r4: EA of fault (set by EXCEPTION_HANDLE) */
+ l.jal do_trap
+ l.addi r3,r1,0 /* pt_regs */
+
+ l.j _ret_from_exception
+ l.nop
+
+/* ---[ 0xf00: Reserved exception ]-------------------------------------- */
+
+UNHANDLED_EXCEPTION(_vector_0xf00,0xf00)
+
+/* ---[ 0x1000: Reserved exception ]------------------------------------- */
+
+UNHANDLED_EXCEPTION(_vector_0x1000,0x1000)
+
+/* ---[ 0x1100: Reserved exception ]------------------------------------- */
+
+UNHANDLED_EXCEPTION(_vector_0x1100,0x1100)
+
+/* ---[ 0x1200: Reserved exception ]------------------------------------- */
+
+UNHANDLED_EXCEPTION(_vector_0x1200,0x1200)
+
+/* ---[ 0x1300: Reserved exception ]------------------------------------- */
+
+UNHANDLED_EXCEPTION(_vector_0x1300,0x1300)
+
+/* ---[ 0x1400: Reserved exception ]------------------------------------- */
+
+UNHANDLED_EXCEPTION(_vector_0x1400,0x1400)
+
+/* ---[ 0x1500: Reserved exception ]------------------------------------- */
+
+UNHANDLED_EXCEPTION(_vector_0x1500,0x1500)
+
+/* ---[ 0x1600: Reserved exception ]------------------------------------- */
+
+UNHANDLED_EXCEPTION(_vector_0x1600,0x1600)
+
+/* ---[ 0x1700: Reserved exception ]------------------------------------- */
+
+UNHANDLED_EXCEPTION(_vector_0x1700,0x1700)
+
+/* ---[ 0x1800: Reserved exception ]------------------------------------- */
+
+UNHANDLED_EXCEPTION(_vector_0x1800,0x1800)
+
+/* ---[ 0x1900: Reserved exception ]------------------------------------- */
+
+UNHANDLED_EXCEPTION(_vector_0x1900,0x1900)
+
+/* ---[ 0x1a00: Reserved exception ]------------------------------------- */
+
+UNHANDLED_EXCEPTION(_vector_0x1a00,0x1a00)
+
+/* ---[ 0x1b00: Reserved exception ]------------------------------------- */
+
+UNHANDLED_EXCEPTION(_vector_0x1b00,0x1b00)
+
+/* ---[ 0x1c00: Reserved exception ]------------------------------------- */
+
+UNHANDLED_EXCEPTION(_vector_0x1c00,0x1c00)
+
+/* ---[ 0x1d00: Reserved exception ]------------------------------------- */
+
+UNHANDLED_EXCEPTION(_vector_0x1d00,0x1d00)
+
+/* ---[ 0x1e00: Reserved exception ]------------------------------------- */
+
+UNHANDLED_EXCEPTION(_vector_0x1e00,0x1e00)
+
+/* ---[ 0x1f00: Reserved exception ]------------------------------------- */
+
+UNHANDLED_EXCEPTION(_vector_0x1f00,0x1f00)
+
+/* ========================================================[ return ] === */
+
+_work_pending:
+ /*
+ * if (current_thread_info->flags & _TIF_NEED_RESCHED)
+ * schedule();
+ */
+ l.lwz r5,TI_FLAGS(r10)
+ l.andi r3,r5,_TIF_NEED_RESCHED
+ l.sfnei r3,0
+ l.bnf _work_notifysig
+ l.nop
+ l.jal schedule
+ l.nop
+ l.j _resume_userspace
+ l.nop
+
+/* Handle pending signals and notify-resume requests.
+ * do_notify_resume must be passed the latest pushed pt_regs, not
+ * necessarily the "userspace" ones. Also, pt_regs->syscallno
+ * must be set so that the syscall restart functionality works.
+ */
+_work_notifysig:
+ l.jal do_notify_resume
+ l.ori r3,r1,0 /* pt_regs */
+
+_resume_userspace:
+ DISABLE_INTERRUPTS(r3,r4)
+ l.lwz r3,TI_FLAGS(r10)
+ l.andi r3,r3,_TIF_WORK_MASK
+ l.sfnei r3,0
+ l.bf _work_pending
+ l.nop
+
+_restore_all:
+ RESTORE_ALL
+ /* This returns to userspace code */
+
+
+ENTRY(_ret_from_intr)
+ENTRY(_ret_from_exception)
+ l.lwz r4,PT_SR(r1)
+ l.andi r3,r4,SPR_SR_SM
+ l.sfeqi r3,0
+ l.bnf _restore_all
+ l.nop
+ l.j _resume_userspace
+ l.nop
+
+ENTRY(ret_from_fork)
+ l.jal schedule_tail
+ l.nop
+
+ /* _syscall_returns expect r11 to contain return value */
+ l.lwz r11,PT_GPR11(r1)
+
+ /* The syscall fast path return expects call-saved registers
+ * r12-r28 to be untouched, so we restore them here as they
+ * will have been effectively clobbered when arriving here
+ * via the call to switch()
+ */
+ l.lwz r12,PT_GPR12(r1)
+ l.lwz r14,PT_GPR14(r1)
+ l.lwz r16,PT_GPR16(r1)
+ l.lwz r18,PT_GPR18(r1)
+ l.lwz r20,PT_GPR20(r1)
+ l.lwz r22,PT_GPR22(r1)
+ l.lwz r24,PT_GPR24(r1)
+ l.lwz r26,PT_GPR26(r1)
+ l.lwz r28,PT_GPR28(r1)
+
+ l.j _syscall_return
+ l.nop
+
+/* Since syscalls don't save call-clobbered registers, the args to
+ * kernel_thread_helper will need to be passed through callee-saved
+ * registers and copied to the parameter registers when the thread
+ * begins running.
+ *
+ * See arch/openrisc/kernel/process.c:
+ * The args are passed as follows:
+ * arg1 (r3) : passed in r20
+ * arg2 (r4) : passed in r22
+ */
+
+ENTRY(_kernel_thread_helper)
+ l.or r3,r20,r0
+ l.or r4,r22,r0
+ l.movhi r31,hi(kernel_thread_helper)
+ l.ori r31,r31,lo(kernel_thread_helper)
+ l.jr r31
+ l.nop
+
+
+/* ========================================================[ switch ] === */
+
+/*
+ * This routine switches between two different tasks. The process
+ * state of one is saved on its kernel stack. Then the state
+ * of the other is restored from its kernel stack. The memory
+ * management hardware is updated to the second process's state.
+ * Finally, we can return to the second process, via the 'return'.
+ *
+ * Note: there are two ways to get to the "going out" portion
+ * of this code; either by coming in via the entry (_switch)
+ * or via "fork" which must set up an environment equivalent
+ * to the "_switch" path. If you change this (or in particular, the
+ * SAVE_REGS macro), you'll have to change the fork code also.
+ */
+
+
+/* _switch MUST never lay on page boundry, cause it runs from
+ * effective addresses and beeing interrupted by iTLB miss would kill it.
+ * dTLB miss seams to never accour in the bad place since data accesses
+ * are from task structures which are always page aligned.
+ *
+ * The problem happens in RESTORE_ALL_NO_R11 where we first set the EPCR
+ * register, then load the previous register values and only at the end call
+ * the l.rfe instruction. If get TLB miss in beetwen the EPCR register gets
+ * garbled and we end up calling l.rfe with the wrong EPCR. (same probably
+ * holds for ESR)
+ *
+ * To avoid this problems it is sufficient to align _switch to
+ * some nice round number smaller than it's size...
+ */
+
+/* ABI rules apply here... we either enter _switch via schedule() or via
+ * an imaginary call to which we shall return at return_from_fork. Either
+ * way, we are a function call and only need to preserve the callee-saved
+ * registers when we return. As such, we don't need to save the registers
+ * on the stack that we won't be returning as they were...
+ */
+
+ .align 0x400
+ENTRY(_switch)
+ /* We don't store SR as _switch only gets called in a context where
+ * the SR will be the same going in and coming out... */
+
+ /* Set up new pt_regs struct for saving task state */
+ l.addi r1,r1,-(INT_FRAME_SIZE)
+
+ /* No need to store r1/PT_SP as it goes into KSP below */
+ l.sw PT_GPR2(r1),r2
+ l.sw PT_GPR9(r1),r9
+ /* This is wrong, r12 shouldn't be here... but GCC is broken for the time being
+ * and expects r12 to be callee-saved... */
+ l.sw PT_GPR12(r1),r12
+ l.sw PT_GPR14(r1),r14
+ l.sw PT_GPR16(r1),r16
+ l.sw PT_GPR18(r1),r18
+ l.sw PT_GPR20(r1),r20
+ l.sw PT_GPR22(r1),r22
+ l.sw PT_GPR24(r1),r24
+ l.sw PT_GPR26(r1),r26
+ l.sw PT_GPR28(r1),r28
+ l.sw PT_GPR30(r1),r30
+
+ l.addi r11,r10,0 /* Save old 'current' to 'last' return value*/
+
+ /* We use thread_info->ksp for storing the address of the above
+ * structure so that we can get back to it later... we don't want
+ * to lose the value of thread_info->ksp, though, so store it as
+ * pt_regs->sp so that we can easily restore it when we are made
+ * live again...
+ */
+
+ /* Save the old value of thread_info->ksp as pt_regs->sp */
+ l.lwz r29,TI_KSP(r10)
+ l.sw PT_SP(r1),r29
+
+ /* Swap kernel stack pointers */
+ l.sw TI_KSP(r10),r1 /* Save old stack pointer */
+ l.or r10,r4,r0 /* Set up new current_thread_info */
+ l.lwz r1,TI_KSP(r10) /* Load new stack pointer */
+
+ /* Restore the old value of thread_info->ksp */
+ l.lwz r29,PT_SP(r1)
+ l.sw TI_KSP(r10),r29
+
+ /* ...and restore the registers, except r11 because the return value
+ * has already been set above.
+ */
+ l.lwz r2,PT_GPR2(r1)
+ l.lwz r9,PT_GPR9(r1)
+ /* No need to restore r10 */
+ /* ...and do not restore r11 */
+
+ /* This is wrong, r12 shouldn't be here... but GCC is broken for the time being
+ * and expects r12 to be callee-saved... */
+ l.lwz r12,PT_GPR12(r1)
+ l.lwz r14,PT_GPR14(r1)
+ l.lwz r16,PT_GPR16(r1)
+ l.lwz r18,PT_GPR18(r1)
+ l.lwz r20,PT_GPR20(r1)
+ l.lwz r22,PT_GPR22(r1)
+ l.lwz r24,PT_GPR24(r1)
+ l.lwz r26,PT_GPR26(r1)
+ l.lwz r28,PT_GPR28(r1)
+ l.lwz r30,PT_GPR30(r1)
+
+ /* Unwind stack to pre-switch state */
+ l.addi r1,r1,(INT_FRAME_SIZE)
+
+ /* Return via the link-register back to where we 'came from', where that can be
+ * either schedule() or return_from_fork()... */
+ l.jr r9
+ l.nop
+
+/* ==================================================================== */
+
+/* These all use the delay slot for setting the argument register, so the
+ * jump is always happening after the l.addi instruction.
+ *
+ * These are all just wrappers that don't touch the link-register r9, so the
+ * return from the "real" syscall function will return back to the syscall
+ * code that did the l.jal that brought us here.
+ */
+
+/* fork requires that we save all the callee-saved registers because they
+ * are all effectively clobbered by the call to _switch. Here we store
+ * all the registers that aren't touched by the syscall fast path and thus
+ * weren't saved there.
+ */
+
+_fork_save_extra_regs_and_call:
+ l.sw PT_GPR14(r1),r14
+ l.sw PT_GPR16(r1),r16
+ l.sw PT_GPR18(r1),r18
+ l.sw PT_GPR20(r1),r20
+ l.sw PT_GPR22(r1),r22
+ l.sw PT_GPR24(r1),r24
+ l.sw PT_GPR26(r1),r26
+ l.jr r29
+ l.sw PT_GPR28(r1),r28
+
+ENTRY(sys_clone)
+ l.movhi r29,hi(_sys_clone)
+ l.ori r29,r29,lo(_sys_clone)
+ l.j _fork_save_extra_regs_and_call
+ l.addi r7,r1,0
+
+ENTRY(sys_fork)
+ l.movhi r29,hi(_sys_fork)
+ l.ori r29,r29,lo(_sys_fork)
+ l.j _fork_save_extra_regs_and_call
+ l.addi r3,r1,0
+
+ENTRY(sys_execve)
+ l.j _sys_execve
+ l.addi r6,r1,0
+
+ENTRY(sys_sigaltstack)
+ l.j _sys_sigaltstack
+ l.addi r5,r1,0
+
+ENTRY(sys_rt_sigreturn)
+ l.j _sys_rt_sigreturn
+ l.addi r3,r1,0
+
+/* This is a catch-all syscall for atomic instructions for the OpenRISC 1000.
+ * The functions takes a variable number of parameters depending on which
+ * particular flavour of atomic you want... parameter 1 is a flag identifying
+ * the atomic in question. Currently, this function implements the
+ * following variants:
+ *
+ * XCHG:
+ * @flag: 1
+ * @ptr1:
+ * @ptr2:
+ * Atomically exchange the values in pointers 1 and 2.
+ *
+ */
+
+ENTRY(sys_or1k_atomic)
+ /* FIXME: This ignores r3 and always does an XCHG */
+ DISABLE_INTERRUPTS(r17,r19)
+ l.lwz r30,0(r4)
+ l.lwz r28,0(r5)
+ l.sw 0(r4),r28
+ l.sw 0(r5),r30
+ ENABLE_INTERRUPTS(r17)
+ l.jr r9
+ l.or r11,r0,r0
+
+/* ============================================================[ EOF ]=== */
diff --git a/arch/openrisc/kernel/head.S b/arch/openrisc/kernel/head.S
new file mode 100644
index 000000000000..c75018d22644
--- /dev/null
+++ b/arch/openrisc/kernel/head.S
@@ -0,0 +1,1607 @@
+/*
+ * OpenRISC head.S
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others. All original copyrights apply as per the original source
+ * declaration.
+ *
+ * Modifications for the OpenRISC architecture:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/linkage.h>
+#include <linux/threads.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <asm/processor.h>
+#include <asm/page.h>
+#include <asm/mmu.h>
+#include <asm/pgtable.h>
+#include <asm/cache.h>
+#include <asm/spr_defs.h>
+#include <asm/asm-offsets.h>
+
+#define tophys(rd,rs) \
+ l.movhi rd,hi(-KERNELBASE) ;\
+ l.add rd,rd,rs
+
+#define CLEAR_GPR(gpr) \
+ l.or gpr,r0,r0
+
+#define LOAD_SYMBOL_2_GPR(gpr,symbol) \
+ l.movhi gpr,hi(symbol) ;\
+ l.ori gpr,gpr,lo(symbol)
+
+
+#define UART_BASE_ADD 0x90000000
+
+#define EXCEPTION_SR (SPR_SR_DME | SPR_SR_IME | SPR_SR_DCE | SPR_SR_ICE | SPR_SR_SM)
+#define SYSCALL_SR (SPR_SR_DME | SPR_SR_IME | SPR_SR_DCE | SPR_SR_ICE | SPR_SR_IEE | SPR_SR_TEE | SPR_SR_SM)
+
+/* ============================================[ tmp store locations ]=== */
+
+/*
+ * emergency_print temporary stores
+ */
+#define EMERGENCY_PRINT_STORE_GPR4 l.sw 0x20(r0),r4
+#define EMERGENCY_PRINT_LOAD_GPR4 l.lwz r4,0x20(r0)
+
+#define EMERGENCY_PRINT_STORE_GPR5 l.sw 0x24(r0),r5
+#define EMERGENCY_PRINT_LOAD_GPR5 l.lwz r5,0x24(r0)
+
+#define EMERGENCY_PRINT_STORE_GPR6 l.sw 0x28(r0),r6
+#define EMERGENCY_PRINT_LOAD_GPR6 l.lwz r6,0x28(r0)
+
+#define EMERGENCY_PRINT_STORE_GPR7 l.sw 0x2c(r0),r7
+#define EMERGENCY_PRINT_LOAD_GPR7 l.lwz r7,0x2c(r0)
+
+#define EMERGENCY_PRINT_STORE_GPR8 l.sw 0x30(r0),r8
+#define EMERGENCY_PRINT_LOAD_GPR8 l.lwz r8,0x30(r0)
+
+#define EMERGENCY_PRINT_STORE_GPR9 l.sw 0x34(r0),r9
+#define EMERGENCY_PRINT_LOAD_GPR9 l.lwz r9,0x34(r0)
+
+
+/*
+ * TLB miss handlers temorary stores
+ */
+#define EXCEPTION_STORE_GPR9 l.sw 0x10(r0),r9
+#define EXCEPTION_LOAD_GPR9 l.lwz r9,0x10(r0)
+
+#define EXCEPTION_STORE_GPR2 l.sw 0x64(r0),r2
+#define EXCEPTION_LOAD_GPR2 l.lwz r2,0x64(r0)
+
+#define EXCEPTION_STORE_GPR3 l.sw 0x68(r0),r3
+#define EXCEPTION_LOAD_GPR3 l.lwz r3,0x68(r0)
+
+#define EXCEPTION_STORE_GPR4 l.sw 0x6c(r0),r4
+#define EXCEPTION_LOAD_GPR4 l.lwz r4,0x6c(r0)
+
+#define EXCEPTION_STORE_GPR5 l.sw 0x70(r0),r5
+#define EXCEPTION_LOAD_GPR5 l.lwz r5,0x70(r0)
+
+#define EXCEPTION_STORE_GPR6 l.sw 0x74(r0),r6
+#define EXCEPTION_LOAD_GPR6 l.lwz r6,0x74(r0)
+
+
+/*
+ * EXCEPTION_HANDLE temporary stores
+ */
+
+#define EXCEPTION_T_STORE_GPR30 l.sw 0x78(r0),r30
+#define EXCEPTION_T_LOAD_GPR30(reg) l.lwz reg,0x78(r0)
+
+#define EXCEPTION_T_STORE_GPR10 l.sw 0x7c(r0),r10
+#define EXCEPTION_T_LOAD_GPR10(reg) l.lwz reg,0x7c(r0)
+
+#define EXCEPTION_T_STORE_SP l.sw 0x80(r0),r1
+#define EXCEPTION_T_LOAD_SP(reg) l.lwz reg,0x80(r0)
+
+/*
+ * For UNHANLDED_EXCEPTION
+ */
+
+#define EXCEPTION_T_STORE_GPR31 l.sw 0x84(r0),r31
+#define EXCEPTION_T_LOAD_GPR31(reg) l.lwz reg,0x84(r0)
+
+/* =========================================================[ macros ]=== */
+
+
+#define GET_CURRENT_PGD(reg,t1) \
+ LOAD_SYMBOL_2_GPR(reg,current_pgd) ;\
+ tophys (t1,reg) ;\
+ l.lwz reg,0(t1)
+
+
+/*
+ * DSCR: this is a common hook for handling exceptions. it will save
+ * the needed registers, set up stack and pointer to current
+ * then jump to the handler while enabling MMU
+ *
+ * PRMS: handler - a function to jump to. it has to save the
+ * remaining registers to kernel stack, call
+ * appropriate arch-independant exception handler
+ * and finaly jump to ret_from_except
+ *
+ * PREQ: unchanged state from the time exception happened
+ *
+ * POST: SAVED the following registers original value
+ * to the new created exception frame pointed to by r1
+ *
+ * r1 - ksp pointing to the new (exception) frame
+ * r4 - EEAR exception EA
+ * r10 - current pointing to current_thread_info struct
+ * r12 - syscall 0, since we didn't come from syscall
+ * r13 - temp it actually contains new SR, not needed anymore
+ * r31 - handler address of the handler we'll jump to
+ *
+ * handler has to save remaining registers to the exception
+ * ksp frame *before* tainting them!
+ *
+ * NOTE: this function is not reentrant per se. reentrancy is guaranteed
+ * by processor disabling all exceptions/interrupts when exception
+ * accours.
+ *
+ * OPTM: no need to make it so wasteful to extract ksp when in user mode
+ */
+
+#define EXCEPTION_HANDLE(handler) \
+ EXCEPTION_T_STORE_GPR30 ;\
+ l.mfspr r30,r0,SPR_ESR_BASE ;\
+ l.andi r30,r30,SPR_SR_SM ;\
+ l.sfeqi r30,0 ;\
+ EXCEPTION_T_STORE_GPR10 ;\
+ l.bnf 2f /* kernel_mode */ ;\
+ EXCEPTION_T_STORE_SP /* delay slot */ ;\
+1: /* user_mode: */ ;\
+ LOAD_SYMBOL_2_GPR(r1,current_thread_info_set) ;\
+ tophys (r30,r1) ;\
+ /* r10: current_thread_info */ ;\
+ l.lwz r10,0(r30) ;\
+ tophys (r30,r10) ;\
+ l.lwz r1,(TI_KSP)(r30) ;\
+ /* fall through */ ;\
+2: /* kernel_mode: */ ;\
+ /* create new stack frame, save only needed gprs */ ;\
+ /* r1: KSP, r10: current, r4: EEAR, r31: __pa(KSP) */ ;\
+ /* r12: temp, syscall indicator */ ;\
+ l.addi r1,r1,-(INT_FRAME_SIZE) ;\
+ /* r1 is KSP, r30 is __pa(KSP) */ ;\
+ tophys (r30,r1) ;\
+ l.sw PT_GPR12(r30),r12 ;\
+ l.mfspr r12,r0,SPR_EPCR_BASE ;\
+ l.sw PT_PC(r30),r12 ;\
+ l.mfspr r12,r0,SPR_ESR_BASE ;\
+ l.sw PT_SR(r30),r12 ;\
+ /* save r30 */ ;\
+ EXCEPTION_T_LOAD_GPR30(r12) ;\
+ l.sw PT_GPR30(r30),r12 ;\
+ /* save r10 as was prior to exception */ ;\
+ EXCEPTION_T_LOAD_GPR10(r12) ;\
+ l.sw PT_GPR10(r30),r12 ;\
+ /* save PT_SP as was prior to exception */ ;\
+ EXCEPTION_T_LOAD_SP(r12) ;\
+ l.sw PT_SP(r30),r12 ;\
+ /* save exception r4, set r4 = EA */ ;\
+ l.sw PT_GPR4(r30),r4 ;\
+ l.mfspr r4,r0,SPR_EEAR_BASE ;\
+ /* r12 == 1 if we come from syscall */ ;\
+ CLEAR_GPR(r12) ;\
+ /* ----- turn on MMU ----- */ ;\
+ l.ori r30,r0,(EXCEPTION_SR) ;\
+ l.mtspr r0,r30,SPR_ESR_BASE ;\
+ /* r30: EA address of handler */ ;\
+ LOAD_SYMBOL_2_GPR(r30,handler) ;\
+ l.mtspr r0,r30,SPR_EPCR_BASE ;\
+ l.rfe
+
+/*
+ * this doesn't work
+ *
+ *
+ * #ifdef CONFIG_JUMP_UPON_UNHANDLED_EXCEPTION
+ * #define UNHANDLED_EXCEPTION(handler) \
+ * l.ori r3,r0,0x1 ;\
+ * l.mtspr r0,r3,SPR_SR ;\
+ * l.movhi r3,hi(0xf0000100) ;\
+ * l.ori r3,r3,lo(0xf0000100) ;\
+ * l.jr r3 ;\
+ * l.nop 1
+ *
+ * #endif
+ */
+
+/* DSCR: this is the same as EXCEPTION_HANDLE(), we are just
+ * a bit more carefull (if we have a PT_SP or current pointer
+ * corruption) and set them up from 'current_set'
+ *
+ */
+#define UNHANDLED_EXCEPTION(handler) \
+ EXCEPTION_T_STORE_GPR31 ;\
+ EXCEPTION_T_STORE_GPR10 ;\
+ EXCEPTION_T_STORE_SP ;\
+ /* temporary store r3, r9 into r1, r10 */ ;\
+ l.addi r1,r3,0x0 ;\
+ l.addi r10,r9,0x0 ;\
+ /* the string referenced by r3 must be low enough */ ;\
+ l.jal _emergency_print ;\
+ l.ori r3,r0,lo(_string_unhandled_exception) ;\
+ l.mfspr r3,r0,SPR_NPC ;\
+ l.jal _emergency_print_nr ;\
+ l.andi r3,r3,0x1f00 ;\
+ /* the string referenced by r3 must be low enough */ ;\
+ l.jal _emergency_print ;\
+ l.ori r3,r0,lo(_string_epc_prefix) ;\
+ l.jal _emergency_print_nr ;\
+ l.mfspr r3,r0,SPR_EPCR_BASE ;\
+ l.jal _emergency_print ;\
+ l.ori r3,r0,lo(_string_nl) ;\
+ /* end of printing */ ;\
+ l.addi r3,r1,0x0 ;\
+ l.addi r9,r10,0x0 ;\
+ /* extract current, ksp from current_set */ ;\
+ LOAD_SYMBOL_2_GPR(r1,_unhandled_stack_top) ;\
+ LOAD_SYMBOL_2_GPR(r10,init_thread_union) ;\
+ /* create new stack frame, save only needed gprs */ ;\
+ /* r1: KSP, r10: current, r31: __pa(KSP) */ ;\
+ /* r12: temp, syscall indicator, r13 temp */ ;\
+ l.addi r1,r1,-(INT_FRAME_SIZE) ;\
+ /* r1 is KSP, r31 is __pa(KSP) */ ;\
+ tophys (r31,r1) ;\
+ l.sw PT_GPR12(r31),r12 ;\
+ l.mfspr r12,r0,SPR_EPCR_BASE ;\
+ l.sw PT_PC(r31),r12 ;\
+ l.mfspr r12,r0,SPR_ESR_BASE ;\
+ l.sw PT_SR(r31),r12 ;\
+ /* save r31 */ ;\
+ EXCEPTION_T_LOAD_GPR31(r12) ;\
+ l.sw PT_GPR31(r31),r12 ;\
+ /* save r10 as was prior to exception */ ;\
+ EXCEPTION_T_LOAD_GPR10(r12) ;\
+ l.sw PT_GPR10(r31),r12 ;\
+ /* save PT_SP as was prior to exception */ ;\
+ EXCEPTION_T_LOAD_SP(r12) ;\
+ l.sw PT_SP(r31),r12 ;\
+ l.sw PT_GPR13(r31),r13 ;\
+ /* --> */ ;\
+ /* save exception r4, set r4 = EA */ ;\
+ l.sw PT_GPR4(r31),r4 ;\
+ l.mfspr r4,r0,SPR_EEAR_BASE ;\
+ /* r12 == 1 if we come from syscall */ ;\
+ CLEAR_GPR(r12) ;\
+ /* ----- play a MMU trick ----- */ ;\
+ l.ori r31,r0,(EXCEPTION_SR) ;\
+ l.mtspr r0,r31,SPR_ESR_BASE ;\
+ /* r31: EA address of handler */ ;\
+ LOAD_SYMBOL_2_GPR(r31,handler) ;\
+ l.mtspr r0,r31,SPR_EPCR_BASE ;\
+ l.rfe
+
+/* =====================================================[ exceptions] === */
+
+/* ---[ 0x100: RESET exception ]----------------------------------------- */
+ .org 0x100
+ /* Jump to .init code at _start which lives in the .head section
+ * and will be discarded after boot.
+ */
+ LOAD_SYMBOL_2_GPR(r4, _start)
+ tophys (r3,r4) /* MMU disabled */
+ l.jr r3
+ l.nop
+
+/* ---[ 0x200: BUS exception ]------------------------------------------- */
+ .org 0x200
+_dispatch_bus_fault:
+ EXCEPTION_HANDLE(_bus_fault_handler)
+
+/* ---[ 0x300: Data Page Fault exception ]------------------------------- */
+ .org 0x300
+_dispatch_do_dpage_fault:
+// totaly disable timer interrupt
+// l.mtspr r0,r0,SPR_TTMR
+// DEBUG_TLB_PROBE(0x300)
+// EXCEPTION_DEBUG_VALUE_ER_ENABLED(0x300)
+ EXCEPTION_HANDLE(_data_page_fault_handler)
+
+/* ---[ 0x400: Insn Page Fault exception ]------------------------------- */
+ .org 0x400
+_dispatch_do_ipage_fault:
+// totaly disable timer interrupt
+// l.mtspr r0,r0,SPR_TTMR
+// DEBUG_TLB_PROBE(0x400)
+// EXCEPTION_DEBUG_VALUE_ER_ENABLED(0x400)
+ EXCEPTION_HANDLE(_insn_page_fault_handler)
+
+/* ---[ 0x500: Timer exception ]----------------------------------------- */
+ .org 0x500
+ EXCEPTION_HANDLE(_timer_handler)
+
+/* ---[ 0x600: Aligment exception ]-------------------------------------- */
+ .org 0x600
+ EXCEPTION_HANDLE(_alignment_handler)
+
+/* ---[ 0x700: Illegal insn exception ]---------------------------------- */
+ .org 0x700
+ EXCEPTION_HANDLE(_illegal_instruction_handler)
+
+/* ---[ 0x800: External interrupt exception ]---------------------------- */
+ .org 0x800
+ EXCEPTION_HANDLE(_external_irq_handler)
+
+/* ---[ 0x900: DTLB miss exception ]------------------------------------- */
+ .org 0x900
+ l.j boot_dtlb_miss_handler
+ l.nop
+
+/* ---[ 0xa00: ITLB miss exception ]------------------------------------- */
+ .org 0xa00
+ l.j boot_itlb_miss_handler
+ l.nop
+
+/* ---[ 0xb00: Range exception ]----------------------------------------- */
+ .org 0xb00
+ UNHANDLED_EXCEPTION(_vector_0xb00)
+
+/* ---[ 0xc00: Syscall exception ]--------------------------------------- */
+ .org 0xc00
+ EXCEPTION_HANDLE(_sys_call_handler)
+
+/* ---[ 0xd00: Trap exception ]------------------------------------------ */
+ .org 0xd00
+ UNHANDLED_EXCEPTION(_vector_0xd00)
+
+/* ---[ 0xe00: Trap exception ]------------------------------------------ */
+ .org 0xe00
+// UNHANDLED_EXCEPTION(_vector_0xe00)
+ EXCEPTION_HANDLE(_trap_handler)
+
+/* ---[ 0xf00: Reserved exception ]-------------------------------------- */
+ .org 0xf00
+ UNHANDLED_EXCEPTION(_vector_0xf00)
+
+/* ---[ 0x1000: Reserved exception ]------------------------------------- */
+ .org 0x1000
+ UNHANDLED_EXCEPTION(_vector_0x1000)
+
+/* ---[ 0x1100: Reserved exception ]------------------------------------- */
+ .org 0x1100
+ UNHANDLED_EXCEPTION(_vector_0x1100)
+
+/* ---[ 0x1200: Reserved exception ]------------------------------------- */
+ .org 0x1200
+ UNHANDLED_EXCEPTION(_vector_0x1200)
+
+/* ---[ 0x1300: Reserved exception ]------------------------------------- */
+ .org 0x1300
+ UNHANDLED_EXCEPTION(_vector_0x1300)
+
+/* ---[ 0x1400: Reserved exception ]------------------------------------- */
+ .org 0x1400
+ UNHANDLED_EXCEPTION(_vector_0x1400)
+
+/* ---[ 0x1500: Reserved exception ]------------------------------------- */
+ .org 0x1500
+ UNHANDLED_EXCEPTION(_vector_0x1500)
+
+/* ---[ 0x1600: Reserved exception ]------------------------------------- */
+ .org 0x1600
+ UNHANDLED_EXCEPTION(_vector_0x1600)
+
+/* ---[ 0x1700: Reserved exception ]------------------------------------- */
+ .org 0x1700
+ UNHANDLED_EXCEPTION(_vector_0x1700)
+
+/* ---[ 0x1800: Reserved exception ]------------------------------------- */
+ .org 0x1800
+ UNHANDLED_EXCEPTION(_vector_0x1800)
+
+/* ---[ 0x1900: Reserved exception ]------------------------------------- */
+ .org 0x1900
+ UNHANDLED_EXCEPTION(_vector_0x1900)
+
+/* ---[ 0x1a00: Reserved exception ]------------------------------------- */
+ .org 0x1a00
+ UNHANDLED_EXCEPTION(_vector_0x1a00)
+
+/* ---[ 0x1b00: Reserved exception ]------------------------------------- */
+ .org 0x1b00
+ UNHANDLED_EXCEPTION(_vector_0x1b00)
+
+/* ---[ 0x1c00: Reserved exception ]------------------------------------- */
+ .org 0x1c00
+ UNHANDLED_EXCEPTION(_vector_0x1c00)
+
+/* ---[ 0x1d00: Reserved exception ]------------------------------------- */
+ .org 0x1d00
+ UNHANDLED_EXCEPTION(_vector_0x1d00)
+
+/* ---[ 0x1e00: Reserved exception ]------------------------------------- */
+ .org 0x1e00
+ UNHANDLED_EXCEPTION(_vector_0x1e00)
+
+/* ---[ 0x1f00: Reserved exception ]------------------------------------- */
+ .org 0x1f00
+ UNHANDLED_EXCEPTION(_vector_0x1f00)
+
+ .org 0x2000
+/* ===================================================[ kernel start ]=== */
+
+/* .text*/
+
+/* This early stuff belongs in HEAD, but some of the functions below definitely
+ * don't... */
+
+ __HEAD
+ .global _start
+_start:
+ /*
+ * ensure a deterministic start
+ */
+
+ l.ori r3,r0,0x1
+ l.mtspr r0,r3,SPR_SR
+
+ CLEAR_GPR(r1)
+ CLEAR_GPR(r2)
+ CLEAR_GPR(r3)
+ CLEAR_GPR(r4)
+ CLEAR_GPR(r5)
+ CLEAR_GPR(r6)
+ CLEAR_GPR(r7)
+ CLEAR_GPR(r8)
+ CLEAR_GPR(r9)
+ CLEAR_GPR(r10)
+ CLEAR_GPR(r11)
+ CLEAR_GPR(r12)
+ CLEAR_GPR(r13)
+ CLEAR_GPR(r14)
+ CLEAR_GPR(r15)
+ CLEAR_GPR(r16)
+ CLEAR_GPR(r17)
+ CLEAR_GPR(r18)
+ CLEAR_GPR(r19)
+ CLEAR_GPR(r20)
+ CLEAR_GPR(r21)
+ CLEAR_GPR(r22)
+ CLEAR_GPR(r23)
+ CLEAR_GPR(r24)
+ CLEAR_GPR(r25)
+ CLEAR_GPR(r26)
+ CLEAR_GPR(r27)
+ CLEAR_GPR(r28)
+ CLEAR_GPR(r29)
+ CLEAR_GPR(r30)
+ CLEAR_GPR(r31)
+
+ /*
+ * set up initial ksp and current
+ */
+ LOAD_SYMBOL_2_GPR(r1,init_thread_union+0x2000) // setup kernel stack
+ LOAD_SYMBOL_2_GPR(r10,init_thread_union) // setup current
+ tophys (r31,r10)
+ l.sw TI_KSP(r31), r1
+
+ l.ori r4,r0,0x0
+
+
+ /*
+ * .data contains initialized data,
+ * .bss contains uninitialized data - clear it up
+ */
+clear_bss:
+ LOAD_SYMBOL_2_GPR(r24, __bss_start)
+ LOAD_SYMBOL_2_GPR(r26, _end)
+ tophys(r28,r24)
+ tophys(r30,r26)
+ CLEAR_GPR(r24)
+ CLEAR_GPR(r26)
+1:
+ l.sw (0)(r28),r0
+ l.sfltu r28,r30
+ l.bf 1b
+ l.addi r28,r28,4
+
+enable_ic:
+ l.jal _ic_enable
+ l.nop
+
+enable_dc:
+ l.jal _dc_enable
+ l.nop
+
+flush_tlb:
+ /*
+ * I N V A L I D A T E T L B e n t r i e s
+ */
+ LOAD_SYMBOL_2_GPR(r5,SPR_DTLBMR_BASE(0))
+ LOAD_SYMBOL_2_GPR(r6,SPR_ITLBMR_BASE(0))
+ l.addi r7,r0,128 /* Maximum number of sets */
+1:
+ l.mtspr r5,r0,0x0
+ l.mtspr r6,r0,0x0
+
+ l.addi r5,r5,1
+ l.addi r6,r6,1
+ l.sfeq r7,r0
+ l.bnf 1b
+ l.addi r7,r7,-1
+
+
+/* The MMU needs to be enabled before or32_early_setup is called */
+
+enable_mmu:
+ /*
+ * enable dmmu & immu
+ * SR[5] = 0, SR[6] = 0, 6th and 7th bit of SR set to 0
+ */
+ l.mfspr r30,r0,SPR_SR
+ l.movhi r28,hi(SPR_SR_DME | SPR_SR_IME)
+ l.ori r28,r28,lo(SPR_SR_DME | SPR_SR_IME)
+ l.or r30,r30,r28
+ l.mtspr r0,r30,SPR_SR
+ l.nop
+ l.nop
+ l.nop
+ l.nop
+ l.nop
+ l.nop
+ l.nop
+ l.nop
+ l.nop
+ l.nop
+ l.nop
+ l.nop
+ l.nop
+ l.nop
+ l.nop
+ l.nop
+
+ // reset the simulation counters
+ l.nop 5
+
+ LOAD_SYMBOL_2_GPR(r24, or32_early_setup)
+ l.jalr r24
+ l.nop
+
+clear_regs:
+ /*
+ * clear all GPRS to increase determinism
+ */
+ CLEAR_GPR(r2)
+ CLEAR_GPR(r3)
+ CLEAR_GPR(r4)
+ CLEAR_GPR(r5)
+ CLEAR_GPR(r6)
+ CLEAR_GPR(r7)
+ CLEAR_GPR(r8)
+ CLEAR_GPR(r9)
+ CLEAR_GPR(r11)
+ CLEAR_GPR(r12)
+ CLEAR_GPR(r13)
+ CLEAR_GPR(r14)
+ CLEAR_GPR(r15)
+ CLEAR_GPR(r16)
+ CLEAR_GPR(r17)
+ CLEAR_GPR(r18)
+ CLEAR_GPR(r19)
+ CLEAR_GPR(r20)
+ CLEAR_GPR(r21)
+ CLEAR_GPR(r22)
+ CLEAR_GPR(r23)
+ CLEAR_GPR(r24)
+ CLEAR_GPR(r25)
+ CLEAR_GPR(r26)
+ CLEAR_GPR(r27)
+ CLEAR_GPR(r28)
+ CLEAR_GPR(r29)
+ CLEAR_GPR(r30)
+ CLEAR_GPR(r31)
+
+jump_start_kernel:
+ /*
+ * jump to kernel entry (start_kernel)
+ */
+ LOAD_SYMBOL_2_GPR(r30, start_kernel)
+ l.jr r30
+ l.nop
+
+/* ========================================[ cache ]=== */
+
+ /* aligment here so we don't change memory offsets with
+ * memory controler defined
+ */
+ .align 0x2000
+
+_ic_enable:
+ /* Check if IC present and skip enabling otherwise */
+ l.mfspr r24,r0,SPR_UPR
+ l.andi r26,r24,SPR_UPR_ICP
+ l.sfeq r26,r0
+ l.bf 9f
+ l.nop
+
+ /* Disable IC */
+ l.mfspr r6,r0,SPR_SR
+ l.addi r5,r0,-1
+ l.xori r5,r5,SPR_SR_ICE
+ l.and r5,r6,r5
+ l.mtspr r0,r5,SPR_SR
+
+ /* Establish cache block size
+ If BS=0, 16;
+ If BS=1, 32;
+ r14 contain block size
+ */
+ l.mfspr r24,r0,SPR_ICCFGR
+ l.andi r26,r24,SPR_ICCFGR_CBS
+ l.srli r28,r26,7
+ l.ori r30,r0,16
+ l.sll r14,r30,r28
+
+ /* Establish number of cache sets
+ r16 contains number of cache sets
+ r28 contains log(# of cache sets)
+ */
+ l.andi r26,r24,SPR_ICCFGR_NCS
+ l.srli r28,r26,3
+ l.ori r30,r0,1
+ l.sll r16,r30,r28
+
+ /* Invalidate IC */
+ l.addi r6,r0,0
+ l.sll r5,r14,r28
+// l.mul r5,r14,r16
+// l.trap 1
+// l.addi r5,r0,IC_SIZE
+1:
+ l.mtspr r0,r6,SPR_ICBIR
+ l.sfne r6,r5
+ l.bf 1b
+ l.add r6,r6,r14
+ // l.addi r6,r6,IC_LINE
+
+ /* Enable IC */
+ l.mfspr r6,r0,SPR_SR
+ l.ori r6,r6,SPR_SR_ICE
+ l.mtspr r0,r6,SPR_SR
+ l.nop
+ l.nop
+ l.nop
+ l.nop
+ l.nop
+ l.nop
+ l.nop
+ l.nop
+ l.nop
+ l.nop
+9:
+ l.jr r9
+ l.nop
+
+_dc_enable:
+ /* Check if DC present and skip enabling otherwise */
+ l.mfspr r24,r0,SPR_UPR
+ l.andi r26,r24,SPR_UPR_DCP
+ l.sfeq r26,r0
+ l.bf 9f
+ l.nop
+
+ /* Disable DC */
+ l.mfspr r6,r0,SPR_SR
+ l.addi r5,r0,-1
+ l.xori r5,r5,SPR_SR_DCE
+ l.and r5,r6,r5
+ l.mtspr r0,r5,SPR_SR
+
+ /* Establish cache block size
+ If BS=0, 16;
+ If BS=1, 32;
+ r14 contain block size
+ */
+ l.mfspr r24,r0,SPR_DCCFGR
+ l.andi r26,r24,SPR_DCCFGR_CBS
+ l.srli r28,r26,7
+ l.ori r30,r0,16
+ l.sll r14,r30,r28
+
+ /* Establish number of cache sets
+ r16 contains number of cache sets
+ r28 contains log(# of cache sets)
+ */
+ l.andi r26,r24,SPR_DCCFGR_NCS
+ l.srli r28,r26,3
+ l.ori r30,r0,1
+ l.sll r16,r30,r28
+
+ /* Invalidate DC */
+ l.addi r6,r0,0
+ l.sll r5,r14,r28
+1:
+ l.mtspr r0,r6,SPR_DCBIR
+ l.sfne r6,r5
+ l.bf 1b
+ l.add r6,r6,r14
+
+ /* Enable DC */
+ l.mfspr r6,r0,SPR_SR
+ l.ori r6,r6,SPR_SR_DCE
+ l.mtspr r0,r6,SPR_SR
+9:
+ l.jr r9
+ l.nop
+
+/* ===============================================[ page table masks ]=== */
+
+/* bit 4 is used in hardware as write back cache bit. we never use this bit
+ * explicitly, so we can reuse it as _PAGE_FILE bit and mask it out when
+ * writing into hardware pte's
+ */
+
+#define DTLB_UP_CONVERT_MASK 0x3fa
+#define ITLB_UP_CONVERT_MASK 0x3a
+
+/* for SMP we'd have (this is a bit subtle, CC must be always set
+ * for SMP, but since we have _PAGE_PRESENT bit always defined
+ * we can just modify the mask)
+ */
+#define DTLB_SMP_CONVERT_MASK 0x3fb
+#define ITLB_SMP_CONVERT_MASK 0x3b
+
+/* ---[ boot dtlb miss handler ]----------------------------------------- */
+
+boot_dtlb_miss_handler:
+
+/* mask for DTLB_MR register: - (0) sets V (valid) bit,
+ * - (31-12) sets bits belonging to VPN (31-12)
+ */
+#define DTLB_MR_MASK 0xfffff001
+
+/* mask for DTLB_TR register: - (2) sets CI (cache inhibit) bit,
+ * - (4) sets A (access) bit,
+ * - (5) sets D (dirty) bit,
+ * - (8) sets SRE (superuser read) bit
+ * - (9) sets SWE (superuser write) bit
+ * - (31-12) sets bits belonging to VPN (31-12)
+ */
+#define DTLB_TR_MASK 0xfffff332
+
+/* These are for masking out the VPN/PPN value from the MR/TR registers...
+ * it's not the same as the PFN */
+#define VPN_MASK 0xfffff000
+#define PPN_MASK 0xfffff000
+
+
+ EXCEPTION_STORE_GPR6
+
+#if 0
+ l.mfspr r6,r0,SPR_ESR_BASE //
+ l.andi r6,r6,SPR_SR_SM // are we in kernel mode ?
+ l.sfeqi r6,0 // r6 == 0x1 --> SM
+ l.bf exit_with_no_dtranslation //
+ l.nop
+#endif
+
+ /* this could be optimized by moving storing of
+ * non r6 registers here, and jumping r6 restore
+ * if not in supervisor mode
+ */
+
+ EXCEPTION_STORE_GPR2
+ EXCEPTION_STORE_GPR3
+ EXCEPTION_STORE_GPR4
+ EXCEPTION_STORE_GPR5
+
+ l.mfspr r4,r0,SPR_EEAR_BASE // get the offending EA
+
+immediate_translation:
+ CLEAR_GPR(r6)
+
+ l.srli r3,r4,0xd // r3 <- r4 / 8192 (sets are relative to page size (8Kb) NOT VPN size (4Kb)
+
+ l.mfspr r6, r0, SPR_DMMUCFGR
+ l.andi r6, r6, SPR_DMMUCFGR_NTS
+ l.srli r6, r6, SPR_DMMUCFGR_NTS_OFF
+ l.ori r5, r0, 0x1
+ l.sll r5, r5, r6 // r5 = number DMMU sets
+ l.addi r6, r5, -1 // r6 = nsets mask
+ l.and r2, r3, r6 // r2 <- r3 % NSETS_MASK
+
+ l.or r6,r6,r4 // r6 <- r4
+ l.ori r6,r6,~(VPN_MASK) // r6 <- VPN :VPN .xfff - clear up lo(r6) to 0x**** *fff
+ l.movhi r5,hi(DTLB_MR_MASK) // r5 <- ffff:0000.x000
+ l.ori r5,r5,lo(DTLB_MR_MASK) // r5 <- ffff:1111.x001 - apply DTLB_MR_MASK
+ l.and r5,r5,r6 // r5 <- VPN :VPN .x001 - we have DTLBMR entry
+ l.mtspr r2,r5,SPR_DTLBMR_BASE(0) // set DTLBMR
+
+ /* set up DTLB with no translation for EA <= 0xbfffffff */
+ LOAD_SYMBOL_2_GPR(r6,0xbfffffff)
+ l.sfgeu r6,r4 // flag if r6 >= r4 (if 0xbfffffff >= EA)
+ l.bf 1f // goto out
+ l.and r3,r4,r4 // delay slot :: 24 <- r4 (if flag==1)
+
+ tophys(r3,r4) // r3 <- PA
+1:
+ l.ori r3,r3,~(PPN_MASK) // r3 <- PPN :PPN .xfff - clear up lo(r6) to 0x**** *fff
+ l.movhi r5,hi(DTLB_TR_MASK) // r5 <- ffff:0000.x000
+ l.ori r5,r5,lo(DTLB_TR_MASK) // r5 <- ffff:1111.x330 - apply DTLB_MR_MASK
+ l.and r5,r5,r3 // r5 <- PPN :PPN .x330 - we have DTLBTR entry
+ l.mtspr r2,r5,SPR_DTLBTR_BASE(0) // set DTLBTR
+
+ EXCEPTION_LOAD_GPR6
+ EXCEPTION_LOAD_GPR5
+ EXCEPTION_LOAD_GPR4
+ EXCEPTION_LOAD_GPR3
+ EXCEPTION_LOAD_GPR2
+
+ l.rfe // SR <- ESR, PC <- EPC
+
+exit_with_no_dtranslation:
+ /* EA out of memory or not in supervisor mode */
+ EXCEPTION_LOAD_GPR6
+ EXCEPTION_LOAD_GPR4
+ l.j _dispatch_bus_fault
+
+/* ---[ boot itlb miss handler ]----------------------------------------- */
+
+boot_itlb_miss_handler:
+
+/* mask for ITLB_MR register: - sets V (valid) bit,
+ * - sets bits belonging to VPN (15-12)
+ */
+#define ITLB_MR_MASK 0xfffff001
+
+/* mask for ITLB_TR register: - sets A (access) bit,
+ * - sets SXE (superuser execute) bit
+ * - sets bits belonging to VPN (15-12)
+ */
+#define ITLB_TR_MASK 0xfffff050
+
+/*
+#define VPN_MASK 0xffffe000
+#define PPN_MASK 0xffffe000
+*/
+
+
+
+ EXCEPTION_STORE_GPR2
+ EXCEPTION_STORE_GPR3
+ EXCEPTION_STORE_GPR4
+ EXCEPTION_STORE_GPR5
+ EXCEPTION_STORE_GPR6
+
+#if 0
+ l.mfspr r6,r0,SPR_ESR_BASE //
+ l.andi r6,r6,SPR_SR_SM // are we in kernel mode ?
+ l.sfeqi r6,0 // r6 == 0x1 --> SM
+ l.bf exit_with_no_itranslation
+ l.nop
+#endif
+
+
+ l.mfspr r4,r0,SPR_EEAR_BASE // get the offending EA
+
+earlyearly:
+ CLEAR_GPR(r6)
+
+ l.srli r3,r4,0xd // r3 <- r4 / 8192 (sets are relative to page size (8Kb) NOT VPN size (4Kb)
+
+ l.mfspr r6, r0, SPR_IMMUCFGR
+ l.andi r6, r6, SPR_IMMUCFGR_NTS
+ l.srli r6, r6, SPR_IMMUCFGR_NTS_OFF
+ l.ori r5, r0, 0x1
+ l.sll r5, r5, r6 // r5 = number IMMU sets from IMMUCFGR
+ l.addi r6, r5, -1 // r6 = nsets mask
+ l.and r2, r3, r6 // r2 <- r3 % NSETS_MASK
+
+ l.or r6,r6,r4 // r6 <- r4
+ l.ori r6,r6,~(VPN_MASK) // r6 <- VPN :VPN .xfff - clear up lo(r6) to 0x**** *fff
+ l.movhi r5,hi(ITLB_MR_MASK) // r5 <- ffff:0000.x000
+ l.ori r5,r5,lo(ITLB_MR_MASK) // r5 <- ffff:1111.x001 - apply ITLB_MR_MASK
+ l.and r5,r5,r6 // r5 <- VPN :VPN .x001 - we have ITLBMR entry
+ l.mtspr r2,r5,SPR_ITLBMR_BASE(0) // set ITLBMR
+
+ /*
+ * set up ITLB with no translation for EA <= 0x0fffffff
+ *
+ * we need this for head.S mapping (EA = PA). if we move all functions
+ * which run with mmu enabled into entry.S, we might be able to eliminate this.
+ *
+ */
+ LOAD_SYMBOL_2_GPR(r6,0x0fffffff)
+ l.sfgeu r6,r4 // flag if r6 >= r4 (if 0xb0ffffff >= EA)
+ l.bf 1f // goto out
+ l.and r3,r4,r4 // delay slot :: 24 <- r4 (if flag==1)
+
+ tophys(r3,r4) // r3 <- PA
+1:
+ l.ori r3,r3,~(PPN_MASK) // r3 <- PPN :PPN .xfff - clear up lo(r6) to 0x**** *fff
+ l.movhi r5,hi(ITLB_TR_MASK) // r5 <- ffff:0000.x000
+ l.ori r5,r5,lo(ITLB_TR_MASK) // r5 <- ffff:1111.x050 - apply ITLB_MR_MASK
+ l.and r5,r5,r3 // r5 <- PPN :PPN .x050 - we have ITLBTR entry
+ l.mtspr r2,r5,SPR_ITLBTR_BASE(0) // set ITLBTR
+
+ EXCEPTION_LOAD_GPR6
+ EXCEPTION_LOAD_GPR5
+ EXCEPTION_LOAD_GPR4
+ EXCEPTION_LOAD_GPR3
+ EXCEPTION_LOAD_GPR2
+
+ l.rfe // SR <- ESR, PC <- EPC
+
+exit_with_no_itranslation:
+ EXCEPTION_LOAD_GPR4
+ EXCEPTION_LOAD_GPR6
+ l.j _dispatch_bus_fault
+ l.nop
+
+/* ====================================================================== */
+/*
+ * Stuff below here shouldn't go into .head section... maybe this stuff
+ * can be moved to entry.S ???
+ */
+
+/* ==============================================[ DTLB miss handler ]=== */
+
+/*
+ * Comments:
+ * Exception handlers are entered with MMU off so the following handler
+ * needs to use physical addressing
+ *
+ */
+
+ .text
+ENTRY(dtlb_miss_handler)
+ EXCEPTION_STORE_GPR2
+ EXCEPTION_STORE_GPR3
+ EXCEPTION_STORE_GPR4
+ EXCEPTION_STORE_GPR5
+ EXCEPTION_STORE_GPR6
+ /*
+ * get EA of the miss
+ */
+ l.mfspr r2,r0,SPR_EEAR_BASE
+ /*
+ * pmd = (pmd_t *)(current_pgd + pgd_index(daddr));
+ */
+ GET_CURRENT_PGD(r3,r5) // r3 is current_pgd, r5 is temp
+ l.srli r4,r2,0x18 // >> PAGE_SHIFT + (PAGE_SHIFT - 2)
+ l.slli r4,r4,0x2 // to get address << 2
+ l.add r5,r4,r3 // r4 is pgd_index(daddr)
+ /*
+ * if (pmd_none(*pmd))
+ * goto pmd_none:
+ */
+ tophys (r4,r5)
+ l.lwz r3,0x0(r4) // get *pmd value
+ l.sfne r3,r0
+ l.bnf d_pmd_none
+ l.andi r3,r3,~PAGE_MASK //0x1fff // ~PAGE_MASK
+ /*
+ * if (pmd_bad(*pmd))
+ * pmd_clear(pmd)
+ * goto pmd_bad:
+ */
+// l.sfeq r3,r0 // check *pmd value
+// l.bf d_pmd_good
+ l.addi r3,r0,0xffffe000 // PAGE_MASK
+// l.j d_pmd_bad
+// l.sw 0x0(r4),r0 // clear pmd
+d_pmd_good:
+ /*
+ * pte = *pte_offset(pmd, daddr);
+ */
+ l.lwz r4,0x0(r4) // get **pmd value
+ l.and r4,r4,r3 // & PAGE_MASK
+ l.srli r5,r2,0xd // >> PAGE_SHIFT, r2 == EEAR
+ l.andi r3,r5,0x7ff // (1UL << PAGE_SHIFT - 2) - 1
+ l.slli r3,r3,0x2 // to get address << 2
+ l.add r3,r3,r4
+ l.lwz r2,0x0(r3) // this is pte at last
+ /*
+ * if (!pte_present(pte))
+ */
+ l.andi r4,r2,0x1
+ l.sfne r4,r0 // is pte present
+ l.bnf d_pte_not_present
+ l.addi r3,r0,0xffffe3fa // PAGE_MASK | DTLB_UP_CONVERT_MASK
+ /*
+ * fill DTLB TR register
+ */
+ l.and r4,r2,r3 // apply the mask
+ // Determine number of DMMU sets
+ l.mfspr r6, r0, SPR_DMMUCFGR
+ l.andi r6, r6, SPR_DMMUCFGR_NTS
+ l.srli r6, r6, SPR_DMMUCFGR_NTS_OFF
+ l.ori r3, r0, 0x1
+ l.sll r3, r3, r6 // r3 = number DMMU sets DMMUCFGR
+ l.addi r6, r3, -1 // r6 = nsets mask
+ l.and r5, r5, r6 // calc offset: & (NUM_TLB_ENTRIES-1)
+ //NUM_TLB_ENTRIES
+ l.mtspr r5,r4,SPR_DTLBTR_BASE(0)
+ /*
+ * fill DTLB MR register
+ */
+ l.mfspr r2,r0,SPR_EEAR_BASE
+ l.addi r3,r0,0xffffe000 // PAGE_MASK
+ l.and r4,r2,r3 // apply PAGE_MASK to EA (__PHX__ do we really need this?)
+ l.ori r4,r4,0x1 // set hardware valid bit: DTBL_MR entry
+ l.mtspr r5,r4,SPR_DTLBMR_BASE(0)
+
+ EXCEPTION_LOAD_GPR2
+ EXCEPTION_LOAD_GPR3
+ EXCEPTION_LOAD_GPR4
+ EXCEPTION_LOAD_GPR5
+ EXCEPTION_LOAD_GPR6
+ l.rfe
+d_pmd_bad:
+ l.nop 1
+ EXCEPTION_LOAD_GPR2
+ EXCEPTION_LOAD_GPR3
+ EXCEPTION_LOAD_GPR4
+ EXCEPTION_LOAD_GPR5
+ EXCEPTION_LOAD_GPR6
+ l.rfe
+d_pmd_none:
+d_pte_not_present:
+ EXCEPTION_LOAD_GPR2
+ EXCEPTION_LOAD_GPR3
+ EXCEPTION_LOAD_GPR4
+ EXCEPTION_LOAD_GPR5
+ EXCEPTION_LOAD_GPR6
+ l.j _dispatch_do_dpage_fault
+ l.nop
+
+/* ==============================================[ ITLB miss handler ]=== */
+ENTRY(itlb_miss_handler)
+ EXCEPTION_STORE_GPR2
+ EXCEPTION_STORE_GPR3
+ EXCEPTION_STORE_GPR4
+ EXCEPTION_STORE_GPR5
+ EXCEPTION_STORE_GPR6
+ /*
+ * get EA of the miss
+ */
+ l.mfspr r2,r0,SPR_EEAR_BASE
+
+ /*
+ * pmd = (pmd_t *)(current_pgd + pgd_index(daddr));
+ *
+ */
+ GET_CURRENT_PGD(r3,r5) // r3 is current_pgd, r5 is temp
+ l.srli r4,r2,0x18 // >> PAGE_SHIFT + (PAGE_SHIFT - 2)
+ l.slli r4,r4,0x2 // to get address << 2
+ l.add r5,r4,r3 // r4 is pgd_index(daddr)
+ /*
+ * if (pmd_none(*pmd))
+ * goto pmd_none:
+ */
+ tophys (r4,r5)
+ l.lwz r3,0x0(r4) // get *pmd value
+ l.sfne r3,r0
+ l.bnf i_pmd_none
+ l.andi r3,r3,0x1fff // ~PAGE_MASK
+ /*
+ * if (pmd_bad(*pmd))
+ * pmd_clear(pmd)
+ * goto pmd_bad:
+ */
+
+// l.sfeq r3,r0 // check *pmd value
+// l.bf i_pmd_good
+ l.addi r3,r0,0xffffe000 // PAGE_MASK
+// l.j i_pmd_bad
+// l.sw 0x0(r4),r0 // clear pmd
+
+i_pmd_good:
+ /*
+ * pte = *pte_offset(pmd, iaddr);
+ *
+ */
+ l.lwz r4,0x0(r4) // get **pmd value
+ l.and r4,r4,r3 // & PAGE_MASK
+ l.srli r5,r2,0xd // >> PAGE_SHIFT, r2 == EEAR
+ l.andi r3,r5,0x7ff // (1UL << PAGE_SHIFT - 2) - 1
+ l.slli r3,r3,0x2 // to get address << 2
+ l.add r3,r3,r4
+ l.lwz r2,0x0(r3) // this is pte at last
+ /*
+ * if (!pte_present(pte))
+ *
+ */
+ l.andi r4,r2,0x1
+ l.sfne r4,r0 // is pte present
+ l.bnf i_pte_not_present
+ l.addi r3,r0,0xffffe03a // PAGE_MASK | ITLB_UP_CONVERT_MASK
+ /*
+ * fill ITLB TR register
+ */
+ l.and r4,r2,r3 // apply the mask
+ l.andi r3,r2,0x7c0 // _PAGE_EXEC | _PAGE_SRE | _PAGE_SWE | _PAGE_URE | _PAGE_UWE
+// l.andi r3,r2,0x400 // _PAGE_EXEC
+ l.sfeq r3,r0
+ l.bf itlb_tr_fill //_workaround
+ // Determine number of IMMU sets
+ l.mfspr r6, r0, SPR_IMMUCFGR
+ l.andi r6, r6, SPR_IMMUCFGR_NTS
+ l.srli r6, r6, SPR_IMMUCFGR_NTS_OFF
+ l.ori r3, r0, 0x1
+ l.sll r3, r3, r6 // r3 = number IMMU sets IMMUCFGR
+ l.addi r6, r3, -1 // r6 = nsets mask
+ l.and r5, r5, r6 // calc offset: & (NUM_TLB_ENTRIES-1)
+
+/*
+ * __PHX__ :: fixme
+ * we should not just blindly set executable flags,
+ * but it does help with ping. the clean way would be to find out
+ * (and fix it) why stack doesn't have execution permissions
+ */
+
+itlb_tr_fill_workaround:
+ l.ori r4,r4,0xc0 // | (SPR_ITLBTR_UXE | ITLBTR_SXE)
+itlb_tr_fill:
+ l.mtspr r5,r4,SPR_ITLBTR_BASE(0)
+ /*
+ * fill DTLB MR register
+ */
+ l.mfspr r2,r0,SPR_EEAR_BASE
+ l.addi r3,r0,0xffffe000 // PAGE_MASK
+ l.and r4,r2,r3 // apply PAGE_MASK to EA (__PHX__ do we really need this?)
+ l.ori r4,r4,0x1 // set hardware valid bit: DTBL_MR entry
+ l.mtspr r5,r4,SPR_ITLBMR_BASE(0)
+
+ EXCEPTION_LOAD_GPR2
+ EXCEPTION_LOAD_GPR3
+ EXCEPTION_LOAD_GPR4
+ EXCEPTION_LOAD_GPR5
+ EXCEPTION_LOAD_GPR6
+ l.rfe
+
+i_pmd_bad:
+ l.nop 1
+ EXCEPTION_LOAD_GPR2
+ EXCEPTION_LOAD_GPR3
+ EXCEPTION_LOAD_GPR4
+ EXCEPTION_LOAD_GPR5
+ EXCEPTION_LOAD_GPR6
+ l.rfe
+i_pmd_none:
+i_pte_not_present:
+ EXCEPTION_LOAD_GPR2
+ EXCEPTION_LOAD_GPR3
+ EXCEPTION_LOAD_GPR4
+ EXCEPTION_LOAD_GPR5
+ EXCEPTION_LOAD_GPR6
+ l.j _dispatch_do_ipage_fault
+ l.nop
+
+/* ==============================================[ boot tlb handlers ]=== */
+
+
+/* =================================================[ debugging aids ]=== */
+
+ .align 64
+_immu_trampoline:
+ .space 64
+_immu_trampoline_top:
+
+#define TRAMP_SLOT_0 (0x0)
+#define TRAMP_SLOT_1 (0x4)
+#define TRAMP_SLOT_2 (0x8)
+#define TRAMP_SLOT_3 (0xc)
+#define TRAMP_SLOT_4 (0x10)
+#define TRAMP_SLOT_5 (0x14)
+#define TRAMP_FRAME_SIZE (0x18)
+
+ENTRY(_immu_trampoline_workaround)
+ // r2 EEA
+ // r6 is physical EEA
+ tophys(r6,r2)
+
+ LOAD_SYMBOL_2_GPR(r5,_immu_trampoline)
+ tophys (r3,r5) // r3 is trampoline (physical)
+
+ LOAD_SYMBOL_2_GPR(r4,0x15000000)
+ l.sw TRAMP_SLOT_0(r3),r4
+ l.sw TRAMP_SLOT_1(r3),r4
+ l.sw TRAMP_SLOT_4(r3),r4
+ l.sw TRAMP_SLOT_5(r3),r4
+
+ // EPC = EEA - 0x4
+ l.lwz r4,0x0(r6) // load op @ EEA + 0x0 (fc address)
+ l.sw TRAMP_SLOT_3(r3),r4 // store it to _immu_trampoline_data
+ l.lwz r4,-0x4(r6) // load op @ EEA - 0x4 (f8 address)
+ l.sw TRAMP_SLOT_2(r3),r4 // store it to _immu_trampoline_data
+
+ l.srli r5,r4,26 // check opcode for write access
+ l.sfeqi r5,0 // l.j
+ l.bf 0f
+ l.sfeqi r5,0x11 // l.jr
+ l.bf 1f
+ l.sfeqi r5,1 // l.jal
+ l.bf 2f
+ l.sfeqi r5,0x12 // l.jalr
+ l.bf 3f
+ l.sfeqi r5,3 // l.bnf
+ l.bf 4f
+ l.sfeqi r5,4 // l.bf
+ l.bf 5f
+99:
+ l.nop
+ l.j 99b // should never happen
+ l.nop 1
+
+ // r2 is EEA
+ // r3 is trampoline address (physical)
+ // r4 is instruction
+ // r6 is physical(EEA)
+ //
+ // r5
+
+2: // l.jal
+
+ /* 19 20 aa aa l.movhi r9,0xaaaa
+ * a9 29 bb bb l.ori r9,0xbbbb
+ *
+ * where 0xaaaabbbb is EEA + 0x4 shifted right 2
+ */
+
+ l.addi r6,r2,0x4 // this is 0xaaaabbbb
+
+ // l.movhi r9,0xaaaa
+ l.ori r5,r0,0x1920 // 0x1920 == l.movhi r9
+ l.sh (TRAMP_SLOT_0+0x0)(r3),r5
+ l.srli r5,r6,16
+ l.sh (TRAMP_SLOT_0+0x2)(r3),r5
+
+ // l.ori r9,0xbbbb
+ l.ori r5,r0,0xa929 // 0xa929 == l.ori r9
+ l.sh (TRAMP_SLOT_1+0x0)(r3),r5
+ l.andi r5,r6,0xffff
+ l.sh (TRAMP_SLOT_1+0x2)(r3),r5
+
+ /* falthrough, need to set up new jump offset */
+
+
+0: // l.j
+ l.slli r6,r4,6 // original offset shifted left 6 - 2
+// l.srli r6,r6,6 // original offset shifted right 2
+
+ l.slli r4,r2,4 // old jump position: EEA shifted left 4
+// l.srli r4,r4,6 // old jump position: shifted right 2
+
+ l.addi r5,r3,0xc // new jump position (physical)
+ l.slli r5,r5,4 // new jump position: shifted left 4
+
+ // calculate new jump offset
+ // new_off = old_off + (old_jump - new_jump)
+
+ l.sub r5,r4,r5 // old_jump - new_jump
+ l.add r5,r6,r5 // orig_off + (old_jump - new_jump)
+ l.srli r5,r5,6 // new offset shifted right 2
+
+ // r5 is new jump offset
+ // l.j has opcode 0x0...
+ l.sw TRAMP_SLOT_2(r3),r5 // write it back
+
+ l.j trampoline_out
+ l.nop
+
+/* ----------------------------- */
+
+3: // l.jalr
+
+ /* 19 20 aa aa l.movhi r9,0xaaaa
+ * a9 29 bb bb l.ori r9,0xbbbb
+ *
+ * where 0xaaaabbbb is EEA + 0x4 shifted right 2
+ */
+
+ l.addi r6,r2,0x4 // this is 0xaaaabbbb
+
+ // l.movhi r9,0xaaaa
+ l.ori r5,r0,0x1920 // 0x1920 == l.movhi r9
+ l.sh (TRAMP_SLOT_0+0x0)(r3),r5
+ l.srli r5,r6,16
+ l.sh (TRAMP_SLOT_0+0x2)(r3),r5
+
+ // l.ori r9,0xbbbb
+ l.ori r5,r0,0xa929 // 0xa929 == l.ori r9
+ l.sh (TRAMP_SLOT_1+0x0)(r3),r5
+ l.andi r5,r6,0xffff
+ l.sh (TRAMP_SLOT_1+0x2)(r3),r5
+
+ l.lhz r5,(TRAMP_SLOT_2+0x0)(r3) // load hi part of jump instruction
+ l.andi r5,r5,0x3ff // clear out opcode part
+ l.ori r5,r5,0x4400 // opcode changed from l.jalr -> l.jr
+ l.sh (TRAMP_SLOT_2+0x0)(r3),r5 // write it back
+
+ /* falthrough */
+
+1: // l.jr
+ l.j trampoline_out
+ l.nop
+
+/* ----------------------------- */
+
+4: // l.bnf
+5: // l.bf
+ l.slli r6,r4,6 // original offset shifted left 6 - 2
+// l.srli r6,r6,6 // original offset shifted right 2
+
+ l.slli r4,r2,4 // old jump position: EEA shifted left 4
+// l.srli r4,r4,6 // old jump position: shifted right 2
+
+ l.addi r5,r3,0xc // new jump position (physical)
+ l.slli r5,r5,4 // new jump position: shifted left 4
+
+ // calculate new jump offset
+ // new_off = old_off + (old_jump - new_jump)
+
+ l.add r6,r6,r4 // (orig_off + old_jump)
+ l.sub r6,r6,r5 // (orig_off + old_jump) - new_jump
+ l.srli r6,r6,6 // new offset shifted right 2
+
+ // r6 is new jump offset
+ l.lwz r4,(TRAMP_SLOT_2+0x0)(r3) // load jump instruction
+ l.srli r4,r4,16
+ l.andi r4,r4,0xfc00 // get opcode part
+ l.slli r4,r4,16
+ l.or r6,r4,r6 // l.b(n)f new offset
+ l.sw TRAMP_SLOT_2(r3),r6 // write it back
+
+ /* we need to add l.j to EEA + 0x8 */
+ tophys (r4,r2) // may not be needed (due to shifts down_
+ l.addi r4,r4,(0x8 - 0x8) // jump target = r2 + 0x8 (compensate for 0x8)
+ // jump position = r5 + 0x8 (0x8 compensated)
+ l.sub r4,r4,r5 // jump offset = target - new_position + 0x8
+
+ l.slli r4,r4,4 // the amount of info in imediate of jump
+ l.srli r4,r4,6 // jump instruction with offset
+ l.sw TRAMP_SLOT_4(r3),r4 // write it to 4th slot
+
+ /* fallthrough */
+
+trampoline_out:
+ // set up new EPC to point to our trampoline code
+ LOAD_SYMBOL_2_GPR(r5,_immu_trampoline)
+ l.mtspr r0,r5,SPR_EPCR_BASE
+
+ // immu_trampoline is (4x) CACHE_LINE aligned
+ // and only 6 instructions long,
+ // so we need to invalidate only 2 lines
+
+ /* Establish cache block size
+ If BS=0, 16;
+ If BS=1, 32;
+ r14 contain block size
+ */
+ l.mfspr r21,r0,SPR_ICCFGR
+ l.andi r21,r21,SPR_ICCFGR_CBS
+ l.srli r21,r21,7
+ l.ori r23,r0,16
+ l.sll r14,r23,r21
+
+ l.mtspr r0,r5,SPR_ICBIR
+ l.add r5,r5,r14
+ l.mtspr r0,r5,SPR_ICBIR
+
+ l.jr r9
+ l.nop
+
+
+/*
+ * DSCR: prints a string referenced by r3.
+ *
+ * PRMS: r3 - address of the first character of null
+ * terminated string to be printed
+ *
+ * PREQ: UART at UART_BASE_ADD has to be initialized
+ *
+ * POST: caller should be aware that r3, r9 are changed
+ */
+ENTRY(_emergency_print)
+ EMERGENCY_PRINT_STORE_GPR4
+ EMERGENCY_PRINT_STORE_GPR5
+ EMERGENCY_PRINT_STORE_GPR6
+ EMERGENCY_PRINT_STORE_GPR7
+2:
+ l.lbz r7,0(r3)
+ l.sfeq r7,r0
+ l.bf 9f
+ l.nop
+
+// putc:
+ l.movhi r4,hi(UART_BASE_ADD)
+
+ l.addi r6,r0,0x20
+1: l.lbz r5,5(r4)
+ l.andi r5,r5,0x20
+ l.sfeq r5,r6
+ l.bnf 1b
+ l.nop
+
+ l.sb 0(r4),r7
+
+ l.addi r6,r0,0x60
+1: l.lbz r5,5(r4)
+ l.andi r5,r5,0x60
+ l.sfeq r5,r6
+ l.bnf 1b
+ l.nop
+
+ /* next character */
+ l.j 2b
+ l.addi r3,r3,0x1
+
+9:
+ EMERGENCY_PRINT_LOAD_GPR7
+ EMERGENCY_PRINT_LOAD_GPR6
+ EMERGENCY_PRINT_LOAD_GPR5
+ EMERGENCY_PRINT_LOAD_GPR4
+ l.jr r9
+ l.nop
+
+ENTRY(_emergency_print_nr)
+ EMERGENCY_PRINT_STORE_GPR4
+ EMERGENCY_PRINT_STORE_GPR5
+ EMERGENCY_PRINT_STORE_GPR6
+ EMERGENCY_PRINT_STORE_GPR7
+ EMERGENCY_PRINT_STORE_GPR8
+
+ l.addi r8,r0,32 // shift register
+
+1: /* remove leading zeros */
+ l.addi r8,r8,-0x4
+ l.srl r7,r3,r8
+ l.andi r7,r7,0xf
+
+ /* don't skip the last zero if number == 0x0 */
+ l.sfeqi r8,0x4
+ l.bf 2f
+ l.nop
+
+ l.sfeq r7,r0
+ l.bf 1b
+ l.nop
+
+2:
+ l.srl r7,r3,r8
+
+ l.andi r7,r7,0xf
+ l.sflts r8,r0
+ l.bf 9f
+
+ l.sfgtui r7,0x9
+ l.bnf 8f
+ l.nop
+ l.addi r7,r7,0x27
+
+8:
+ l.addi r7,r7,0x30
+// putc:
+ l.movhi r4,hi(UART_BASE_ADD)
+
+ l.addi r6,r0,0x20
+1: l.lbz r5,5(r4)
+ l.andi r5,r5,0x20
+ l.sfeq r5,r6
+ l.bnf 1b
+ l.nop
+
+ l.sb 0(r4),r7
+
+ l.addi r6,r0,0x60
+1: l.lbz r5,5(r4)
+ l.andi r5,r5,0x60
+ l.sfeq r5,r6
+ l.bnf 1b
+ l.nop
+
+ /* next character */
+ l.j 2b
+ l.addi r8,r8,-0x4
+
+9:
+ EMERGENCY_PRINT_LOAD_GPR8
+ EMERGENCY_PRINT_LOAD_GPR7
+ EMERGENCY_PRINT_LOAD_GPR6
+ EMERGENCY_PRINT_LOAD_GPR5
+ EMERGENCY_PRINT_LOAD_GPR4
+ l.jr r9
+ l.nop
+
+
+/*
+ * This should be used for debugging only.
+ * It messes up the Linux early serial output
+ * somehow, so use it sparingly and essentially
+ * only if you need to debug something that goes wrong
+ * before Linux gets the early serial going.
+ *
+ * Furthermore, you'll have to make sure you set the
+ * UART_DEVISOR correctly according to the system
+ * clock rate.
+ *
+ *
+ */
+
+
+
+#define SYS_CLK 20000000
+//#define SYS_CLK 1843200
+#define OR32_CONSOLE_BAUD 115200
+#define UART_DIVISOR SYS_CLK/(16*OR32_CONSOLE_BAUD)
+
+ENTRY(_early_uart_init)
+ l.movhi r3,hi(UART_BASE_ADD)
+
+ l.addi r4,r0,0x7
+ l.sb 0x2(r3),r4
+
+ l.addi r4,r0,0x0
+ l.sb 0x1(r3),r4
+
+ l.addi r4,r0,0x3
+ l.sb 0x3(r3),r4
+
+ l.lbz r5,3(r3)
+ l.ori r4,r5,0x80
+ l.sb 0x3(r3),r4
+ l.addi r4,r0,((UART_DIVISOR>>8) & 0x000000ff)
+ l.sb UART_DLM(r3),r4
+ l.addi r4,r0,((UART_DIVISOR) & 0x000000ff)
+ l.sb UART_DLL(r3),r4
+ l.sb 0x3(r3),r5
+
+ l.jr r9
+ l.nop
+
+_string_copying_linux:
+ .string "\n\n\n\n\n\rCopying Linux... \0"
+
+_string_ok_booting:
+ .string "Ok, booting the kernel.\n\r\0"
+
+_string_unhandled_exception:
+ .string "\n\rRunarunaround: Unhandled exception 0x\0"
+
+_string_epc_prefix:
+ .string ": EPC=0x\0"
+
+_string_nl:
+ .string "\n\r\0"
+
+ .global _string_esr_irq_bug
+_string_esr_irq_bug:
+ .string "\n\rESR external interrupt bug, for details look into entry.S\n\r\0"
+
+
+
+/* ========================================[ page aligned structures ]=== */
+
+/*
+ * .data section should be page aligned
+ * (look into arch/or32/kernel/vmlinux.lds)
+ */
+ .section .data,"aw"
+ .align 8192
+ .global empty_zero_page
+empty_zero_page:
+ .space 8192
+
+ .global swapper_pg_dir
+swapper_pg_dir:
+ .space 8192
+
+ .global _unhandled_stack
+_unhandled_stack:
+ .space 8192
+_unhandled_stack_top:
+
+/* ============================================================[ EOF ]=== */
diff --git a/arch/openrisc/kernel/idle.c b/arch/openrisc/kernel/idle.c
new file mode 100644
index 000000000000..d5bc5f813e89
--- /dev/null
+++ b/arch/openrisc/kernel/idle.c
@@ -0,0 +1,77 @@
+/*
+ * OpenRISC idle.c
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others. All original copyrights apply as per the original source
+ * declaration.
+ *
+ * Modifications for the OpenRISC architecture:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Idle daemon for or32. Idle daemon will handle any action
+ * that needs to be taken when the system becomes idle.
+ */
+
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/stddef.h>
+#include <linux/unistd.h>
+#include <linux/ptrace.h>
+#include <linux/slab.h>
+#include <linux/tick.h>
+
+#include <asm/pgtable.h>
+#include <asm/uaccess.h>
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/processor.h>
+#include <asm/mmu.h>
+#include <asm/cache.h>
+#include <asm/pgalloc.h>
+
+void (*powersave) (void) = NULL;
+
+static inline void pm_idle(void)
+{
+ barrier();
+}
+
+void cpu_idle(void)
+{
+ set_thread_flag(TIF_POLLING_NRFLAG);
+
+ /* endless idle loop with no priority at all */
+ while (1) {
+ tick_nohz_stop_sched_tick(1);
+
+ while (!need_resched()) {
+ check_pgt_cache();
+ rmb();
+
+ clear_thread_flag(TIF_POLLING_NRFLAG);
+
+ local_irq_disable();
+ /* Don't trace irqs off for idle */
+ stop_critical_timings();
+ if (!need_resched() && powersave != NULL)
+ powersave();
+ start_critical_timings();
+ local_irq_enable();
+ set_thread_flag(TIF_POLLING_NRFLAG);
+ }
+
+ tick_nohz_restart_sched_tick();
+ preempt_enable_no_resched();
+ schedule();
+ preempt_disable();
+ }
+}
diff --git a/arch/openrisc/kernel/init_task.c b/arch/openrisc/kernel/init_task.c
new file mode 100644
index 000000000000..45744a384927
--- /dev/null
+++ b/arch/openrisc/kernel/init_task.c
@@ -0,0 +1,41 @@
+/*
+ * OpenRISC init_task.c
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others. All original copyrights apply as per the original source
+ * declaration.
+ *
+ * Modifications for the OpenRISC architecture:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/init_task.h>
+#include <linux/mqueue.h>
+
+static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
+static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
+
+/*
+ * Initial thread structure.
+ *
+ * We need to make sure that this is THREAD_SIZE aligned due to the
+ * way process stacks are handled. This is done by having a special
+ * "init_task" linker map entry..
+ */
+union thread_union init_thread_union __init_task_data = {
+ INIT_THREAD_INFO(init_task)
+};
+
+/*
+ * Initial task structure.
+ *
+ * All other task structs will be allocated on slabs in fork.c
+ */
+struct task_struct init_task = INIT_TASK(init_task);
+EXPORT_SYMBOL(init_task);
diff --git a/arch/openrisc/kernel/irq.c b/arch/openrisc/kernel/irq.c
new file mode 100644
index 000000000000..59b302338331
--- /dev/null
+++ b/arch/openrisc/kernel/irq.c
@@ -0,0 +1,172 @@
+/*
+ * OpenRISC irq.c
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others. All original copyrights apply as per the original source
+ * declaration.
+ *
+ * Modifications for the OpenRISC architecture:
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/ptrace.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/of.h>
+#include <linux/ftrace.h>
+#include <linux/irq.h>
+#include <linux/seq_file.h>
+#include <linux/kernel_stat.h>
+
+#include <linux/irqflags.h>
+
+/* read interrupt enabled status */
+unsigned long arch_local_save_flags(void)
+{
+ return mfspr(SPR_SR) & (SPR_SR_IEE|SPR_SR_TEE);
+}
+EXPORT_SYMBOL(arch_local_save_flags);
+
+/* set interrupt enabled status */
+void arch_local_irq_restore(unsigned long flags)
+{
+ mtspr(SPR_SR, ((mfspr(SPR_SR) & ~(SPR_SR_IEE|SPR_SR_TEE)) | flags));
+}
+EXPORT_SYMBOL(arch_local_irq_restore);
+
+
+/* OR1K PIC implementation */
+
+/* We're a couple of cycles faster than the generic implementations with
+ * these 'fast' versions.
+ */
+
+static void or1k_pic_mask(struct irq_data *data)
+{
+ mtspr(SPR_PICMR, mfspr(SPR_PICMR) & ~(1UL << data->irq));
+}
+
+static void or1k_pic_unmask(struct irq_data *data)
+{
+ mtspr(SPR_PICMR, mfspr(SPR_PICMR) | (1UL << data->irq));
+}
+
+static void or1k_pic_ack(struct irq_data *data)
+{
+ /* EDGE-triggered interrupts need to be ack'ed in order to clear
+ * the latch.
+ * LEVER-triggered interrupts do not need to be ack'ed; however,
+ * ack'ing the interrupt has no ill-effect and is quicker than
+ * trying to figure out what type it is...
+ */
+
+ /* The OpenRISC 1000 spec says to write a 1 to the bit to ack the
+ * interrupt, but the OR1200 does this backwards and requires a 0
+ * to be written...
+ */
+
+#ifdef CONFIG_OR1K_1200
+ /* There are two oddities with the OR1200 PIC implementation:
+ * i) LEVEL-triggered interrupts are latched and need to be cleared
+ * ii) the interrupt latch is cleared by writing a 0 to the bit,
+ * as opposed to a 1 as mandated by the spec
+ */
+
+ mtspr(SPR_PICSR, mfspr(SPR_PICSR) & ~(1UL << data->irq));
+#else
+ WARN(1, "Interrupt handling possibily broken\n");
+ mtspr(SPR_PICSR, (1UL << irq));
+#endif
+}
+
+static void or1k_pic_mask_ack(struct irq_data *data)
+{
+ /* Comments for pic_ack apply here, too */
+
+#ifdef CONFIG_OR1K_1200
+ mtspr(SPR_PICSR, mfspr(SPR_PICSR) & ~(1UL << data->irq));
+#else
+ WARN(1, "Interrupt handling possibily broken\n");
+ mtspr(SPR_PICSR, (1UL << irq));
+#endif
+}
+
+static int or1k_pic_set_type(struct irq_data *data, unsigned int flow_type)
+{
+ /* There's nothing to do in the PIC configuration when changing
+ * flow type. Level and edge-triggered interrupts are both
+ * supported, but it's PIC-implementation specific which type
+ * is handled. */
+
+ return irq_setup_alt_chip(data, flow_type);
+}
+
+static inline int pic_get_irq(int first)
+{
+ int irq;
+
+ irq = ffs(mfspr(SPR_PICSR) >> first);
+
+ return irq ? irq + first - 1 : NO_IRQ;
+}
+
+static void __init or1k_irq_init(void)
+{
+ struct irq_chip_generic *gc;
+ struct irq_chip_type *ct;
+
+ /* Disable all interrupts until explicitly requested */
+ mtspr(SPR_PICMR, (0UL));
+
+ gc = irq_alloc_generic_chip("or1k-PIC", 1, 0, 0, handle_level_irq);
+ ct = gc->chip_types;
+
+ ct->chip.irq_unmask = or1k_pic_unmask;
+ ct->chip.irq_mask = or1k_pic_mask;
+ ct->chip.irq_ack = or1k_pic_ack;
+ ct->chip.irq_mask_ack = or1k_pic_mask_ack;
+ ct->chip.irq_set_type = or1k_pic_set_type;
+
+ /* The OR1K PIC can handle both level and edge trigged
+ * interrupts in roughly the same manner
+ */
+#if 0
+ /* FIXME: chip.type??? */
+ ct->chip.type = IRQ_TYPE_EDGE_BOTH | IRQ_TYPE_LEVEL_MASK;
+#endif
+
+ irq_setup_generic_chip(gc, IRQ_MSK(NR_IRQS), 0,
+ IRQ_NOREQUEST, IRQ_LEVEL | IRQ_NOPROBE);
+}
+
+void __init init_IRQ(void)
+{
+ or1k_irq_init();
+}
+
+void __irq_entry do_IRQ(struct pt_regs *regs)
+{
+ int irq = -1;
+ struct pt_regs *old_regs = set_irq_regs(regs);
+
+ irq_enter();
+
+ while ((irq = pic_get_irq(irq + 1)) != NO_IRQ)
+ generic_handle_irq(irq);
+
+ irq_exit();
+ set_irq_regs(old_regs);
+}
+
+unsigned int irq_create_of_mapping(struct device_node *controller,
+ const u32 *intspec, unsigned int intsize)
+{
+ return intspec[0];
+}
+EXPORT_SYMBOL_GPL(irq_create_of_mapping);
diff --git a/arch/openrisc/kernel/module.c b/arch/openrisc/kernel/module.c
new file mode 100644
index 000000000000..10ff50f0202a
--- /dev/null
+++ b/arch/openrisc/kernel/module.c
@@ -0,0 +1,72 @@
+/*
+ * OpenRISC module.c
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others. All original copyrights apply as per the original source
+ * declaration.
+ *
+ * Modifications for the OpenRISC architecture:
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/moduleloader.h>
+#include <linux/elf.h>
+
+int apply_relocate_add(Elf32_Shdr *sechdrs,
+ const char *strtab,
+ unsigned int symindex,
+ unsigned int relsec,
+ struct module *me)
+{
+ unsigned int i;
+ Elf32_Rela *rel = (void *)sechdrs[relsec].sh_addr;
+ Elf32_Sym *sym;
+ uint32_t *location;
+ uint32_t value;
+
+ pr_debug("Applying relocate section %u to %u\n", relsec,
+ sechdrs[relsec].sh_info);
+ for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
+ /* This is where to make the change */
+ location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
+ + rel[i].r_offset;
+
+ /* This is the symbol it is referring to. Note that all
+ undefined symbols have been resolved. */
+ sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
+ + ELF32_R_SYM(rel[i].r_info);
+ value = sym->st_value + rel[i].r_addend;
+
+ switch (ELF32_R_TYPE(rel[i].r_info)) {
+ case R_OR32_32:
+ *location = value;
+ break;
+ case R_OR32_CONST:
+ location = (uint16_t *)location + 1;
+ *((uint16_t *)location) = (uint16_t) (value);
+ break;
+ case R_OR32_CONSTH:
+ location = (uint16_t *)location + 1;
+ *((uint16_t *)location) = (uint16_t) (value >> 16);
+ break;
+ case R_OR32_JUMPTARG:
+ value -= (uint32_t)location;
+ value >>= 2;
+ value &= 0x03ffffff;
+ value |= *location & 0xfc000000;
+ *location = value;
+ break;
+ default:
+ pr_err("module %s: Unknown relocation: %u\n",
+ me->name, ELF32_R_TYPE(rel[i].r_info));
+ break;
+ }
+ }
+
+ return 0;
+}
diff --git a/arch/openrisc/kernel/or32_ksyms.c b/arch/openrisc/kernel/or32_ksyms.c
new file mode 100644
index 000000000000..83ccf7c0c58d
--- /dev/null
+++ b/arch/openrisc/kernel/or32_ksyms.c
@@ -0,0 +1,46 @@
+/*
+ * OpenRISC or32_ksyms.c
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others. All original copyrights apply as per the original source
+ * declaration.
+ *
+ * Modifications for the OpenRISC architecture:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/elfcore.h>
+#include <linux/sched.h>
+#include <linux/in6.h>
+#include <linux/interrupt.h>
+#include <linux/vmalloc.h>
+#include <linux/semaphore.h>
+
+#include <asm/processor.h>
+#include <asm/uaccess.h>
+#include <asm/checksum.h>
+#include <asm/io.h>
+#include <asm/hardirq.h>
+#include <asm/delay.h>
+#include <asm/pgalloc.h>
+
+#define DECLARE_EXPORT(name) extern void name(void); EXPORT_SYMBOL(name)
+
+/* compiler generated symbols */
+DECLARE_EXPORT(__udivsi3);
+DECLARE_EXPORT(__divsi3);
+DECLARE_EXPORT(__umodsi3);
+DECLARE_EXPORT(__modsi3);
+DECLARE_EXPORT(__muldi3);
+DECLARE_EXPORT(__ashrdi3);
+DECLARE_EXPORT(__ashldi3);
+DECLARE_EXPORT(__lshrdi3);
+
+EXPORT_SYMBOL(__copy_tofrom_user);
diff --git a/arch/openrisc/kernel/process.c b/arch/openrisc/kernel/process.c
new file mode 100644
index 000000000000..e4209af879ec
--- /dev/null
+++ b/arch/openrisc/kernel/process.c
@@ -0,0 +1,311 @@
+/*
+ * OpenRISC process.c
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others. All original copyrights apply as per the original source
+ * declaration.
+ *
+ * Modifications for the OpenRISC architecture:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * This file handles the architecture-dependent parts of process handling...
+ */
+
+#define __KERNEL_SYSCALLS__
+#include <stdarg.h>
+
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/stddef.h>
+#include <linux/unistd.h>
+#include <linux/ptrace.h>
+#include <linux/slab.h>
+#include <linux/elfcore.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/init_task.h>
+#include <linux/mqueue.h>
+#include <linux/fs.h>
+
+#include <asm/uaccess.h>
+#include <asm/pgtable.h>
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/processor.h>
+#include <asm/spr_defs.h>
+
+#include <linux/smp.h>
+
+/*
+ * Pointer to Current thread info structure.
+ *
+ * Used at user space -> kernel transitions.
+ */
+struct thread_info *current_thread_info_set[NR_CPUS] = { &init_thread_info, };
+
+void machine_restart(void)
+{
+ printk(KERN_INFO "*** MACHINE RESTART ***\n");
+ __asm__("l.nop 1");
+}
+
+/*
+ * Similar to machine_power_off, but don't shut off power. Add code
+ * here to freeze the system for e.g. post-mortem debug purpose when
+ * possible. This halt has nothing to do with the idle halt.
+ */
+void machine_halt(void)
+{
+ printk(KERN_INFO "*** MACHINE HALT ***\n");
+ __asm__("l.nop 1");
+}
+
+/* If or when software power-off is implemented, add code here. */
+void machine_power_off(void)
+{
+ printk(KERN_INFO "*** MACHINE POWER OFF ***\n");
+ __asm__("l.nop 1");
+}
+
+void (*pm_power_off) (void) = machine_power_off;
+
+/*
+ * When a process does an "exec", machine state like FPU and debug
+ * registers need to be reset. This is a hook function for that.
+ * Currently we don't have any such state to reset, so this is empty.
+ */
+void flush_thread(void)
+{
+}
+
+void show_regs(struct pt_regs *regs)
+{
+ extern void show_registers(struct pt_regs *regs);
+
+ /* __PHX__ cleanup this mess */
+ show_registers(regs);
+}
+
+unsigned long thread_saved_pc(struct task_struct *t)
+{
+ return (unsigned long)user_regs(t->stack)->pc;
+}
+
+void release_thread(struct task_struct *dead_task)
+{
+}
+
+/*
+ * Copy the thread-specific (arch specific) info from the current
+ * process to the new one p
+ */
+extern asmlinkage void ret_from_fork(void);
+
+int
+copy_thread(unsigned long clone_flags, unsigned long usp,
+ unsigned long unused, struct task_struct *p, struct pt_regs *regs)
+{
+ struct pt_regs *childregs;
+ struct pt_regs *kregs;
+ unsigned long sp = (unsigned long)task_stack_page(p) + THREAD_SIZE;
+ struct thread_info *ti;
+ unsigned long top_of_kernel_stack;
+
+ top_of_kernel_stack = sp;
+
+ p->set_child_tid = p->clear_child_tid = NULL;
+
+ /* Copy registers */
+ /* redzone */
+ sp -= STACK_FRAME_OVERHEAD;
+ sp -= sizeof(struct pt_regs);
+ childregs = (struct pt_regs *)sp;
+
+ /* Copy parent registers */
+ *childregs = *regs;
+
+ if ((childregs->sr & SPR_SR_SM) == 1) {
+ /* for kernel thread, set `current_thread_info'
+ * and stackptr in new task
+ */
+ childregs->sp = (unsigned long)task_stack_page(p) + THREAD_SIZE;
+ childregs->gpr[10] = (unsigned long)task_thread_info(p);
+ } else {
+ childregs->sp = usp;
+ }
+
+ childregs->gpr[11] = 0; /* Result from fork() */
+
+ /*
+ * The way this works is that at some point in the future
+ * some task will call _switch to switch to the new task.
+ * That will pop off the stack frame created below and start
+ * the new task running at ret_from_fork. The new task will
+ * do some house keeping and then return from the fork or clone
+ * system call, using the stack frame created above.
+ */
+ /* redzone */
+ sp -= STACK_FRAME_OVERHEAD;
+ sp -= sizeof(struct pt_regs);
+ kregs = (struct pt_regs *)sp;
+
+ ti = task_thread_info(p);
+ ti->ksp = sp;
+
+ /* kregs->sp must store the location of the 'pre-switch' kernel stack
+ * pointer... for a newly forked process, this is simply the top of
+ * the kernel stack.
+ */
+ kregs->sp = top_of_kernel_stack;
+ kregs->gpr[3] = (unsigned long)current; /* arg to schedule_tail */
+ kregs->gpr[10] = (unsigned long)task_thread_info(p);
+ kregs->gpr[9] = (unsigned long)ret_from_fork;
+
+ return 0;
+}
+
+/*
+ * Set up a thread for executing a new program
+ */
+void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long sp)
+{
+ unsigned long sr = regs->sr & ~SPR_SR_SM;
+
+ set_fs(USER_DS);
+ memset(regs->gpr, 0, sizeof(regs->gpr));
+
+ regs->pc = pc;
+ regs->sr = sr;
+ regs->sp = sp;
+
+/* printk("start thread, ksp = %lx\n", current_thread_info()->ksp);*/
+}
+
+/* Fill in the fpu structure for a core dump. */
+int dump_fpu(struct pt_regs *regs, elf_fpregset_t * fpu)
+{
+ /* TODO */
+ return 0;
+}
+
+extern struct thread_info *_switch(struct thread_info *old_ti,
+ struct thread_info *new_ti);
+
+struct task_struct *__switch_to(struct task_struct *old,
+ struct task_struct *new)
+{
+ struct task_struct *last;
+ struct thread_info *new_ti, *old_ti;
+ unsigned long flags;
+
+ local_irq_save(flags);
+
+ /* current_set is an array of saved current pointers
+ * (one for each cpu). we need them at user->kernel transition,
+ * while we save them at kernel->user transition
+ */
+ new_ti = new->stack;
+ old_ti = old->stack;
+
+ current_thread_info_set[smp_processor_id()] = new_ti;
+ last = (_switch(old_ti, new_ti))->task;
+
+ local_irq_restore(flags);
+
+ return last;
+}
+
+/*
+ * Write out registers in core dump format, as defined by the
+ * struct user_regs_struct
+ */
+void dump_elf_thread(elf_greg_t *dest, struct pt_regs* regs)
+{
+ dest[0] = 0; /* r0 */
+ memcpy(dest+1, regs->gpr+1, 31*sizeof(unsigned long));
+ dest[32] = regs->pc;
+ dest[33] = regs->sr;
+ dest[34] = 0;
+ dest[35] = 0;
+}
+
+extern void _kernel_thread_helper(void);
+
+void __noreturn kernel_thread_helper(int (*fn) (void *), void *arg)
+{
+ do_exit(fn(arg));
+}
+
+/*
+ * Create a kernel thread.
+ */
+int kernel_thread(int (*fn) (void *), void *arg, unsigned long flags)
+{
+ struct pt_regs regs;
+
+ memset(&regs, 0, sizeof(regs));
+
+ regs.gpr[20] = (unsigned long)fn;
+ regs.gpr[22] = (unsigned long)arg;
+ regs.sr = mfspr(SPR_SR);
+ regs.pc = (unsigned long)_kernel_thread_helper;
+
+ return do_fork(flags | CLONE_VM | CLONE_UNTRACED,
+ 0, &regs, 0, NULL, NULL);
+}
+
+/*
+ * sys_execve() executes a new program.
+ */
+asmlinkage long _sys_execve(const char __user *name,
+ const char __user * const __user *argv,
+ const char __user * const __user *envp,
+ struct pt_regs *regs)
+{
+ int error;
+ char *filename;
+
+ filename = getname(name);
+ error = PTR_ERR(filename);
+
+ if (IS_ERR(filename))
+ goto out;
+
+ error = do_execve(filename, argv, envp, regs);
+ putname(filename);
+
+out:
+ return error;
+}
+
+unsigned long get_wchan(struct task_struct *p)
+{
+ /* TODO */
+
+ return 0;
+}
+
+int kernel_execve(const char *filename, char *const argv[], char *const envp[])
+{
+ register long __res asm("r11") = __NR_execve;
+ register long __a asm("r3") = (long)(filename);
+ register long __b asm("r4") = (long)(argv);
+ register long __c asm("r5") = (long)(envp);
+ __asm__ volatile ("l.sys 1"
+ : "=r" (__res), "=r"(__a), "=r"(__b), "=r"(__c)
+ : "0"(__res), "1"(__a), "2"(__b), "3"(__c)
+ : "r6", "r7", "r8", "r12", "r13", "r15",
+ "r17", "r19", "r21", "r23", "r25", "r27",
+ "r29", "r31");
+ __asm__ volatile ("l.nop");
+ return __res;
+}
diff --git a/arch/openrisc/kernel/prom.c b/arch/openrisc/kernel/prom.c
new file mode 100644
index 000000000000..1bb58ba89afa
--- /dev/null
+++ b/arch/openrisc/kernel/prom.c
@@ -0,0 +1,108 @@
+/*
+ * OpenRISC prom.c
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others. All original copyrights apply as per the original source
+ * declaration.
+ *
+ * Modifications for the OpenRISC architecture:
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Architecture specific procedures for creating, accessing and
+ * interpreting the device tree.
+ *
+ */
+
+#include <stdarg.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/init.h>
+#include <linux/threads.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/stringify.h>
+#include <linux/delay.h>
+#include <linux/initrd.h>
+#include <linux/bitops.h>
+#include <linux/module.h>
+#include <linux/kexec.h>
+#include <linux/debugfs.h>
+#include <linux/irq.h>
+#include <linux/memblock.h>
+#include <linux/of_fdt.h>
+
+#include <asm/prom.h>
+#include <asm/page.h>
+#include <asm/processor.h>
+#include <asm/irq.h>
+#include <linux/io.h>
+#include <asm/system.h>
+#include <asm/mmu.h>
+#include <asm/pgtable.h>
+#include <asm/sections.h>
+#include <asm/setup.h>
+
+extern char cmd_line[COMMAND_LINE_SIZE];
+
+void __init early_init_dt_add_memory_arch(u64 base, u64 size)
+{
+ size &= PAGE_MASK;
+ memblock_add(base, size);
+}
+
+void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
+{
+ return __va(memblock_alloc(size, align));
+}
+
+void __init early_init_devtree(void *params)
+{
+ void *alloc;
+
+ /* Setup flat device-tree pointer */
+ initial_boot_params = params;
+
+
+ /* Retrieve various informations from the /chosen node of the
+ * device-tree, including the platform type, initrd location and
+ * size, TCE reserve, and more ...
+ */
+ of_scan_flat_dt(early_init_dt_scan_chosen, cmd_line);
+
+ /* Scan memory nodes and rebuild MEMBLOCKs */
+ memblock_init();
+ of_scan_flat_dt(early_init_dt_scan_root, NULL);
+ of_scan_flat_dt(early_init_dt_scan_memory, NULL);
+
+ /* Save command line for /proc/cmdline and then parse parameters */
+ strlcpy(boot_command_line, cmd_line, COMMAND_LINE_SIZE);
+
+ memblock_analyze();
+
+ /* We must copy the flattend device tree from init memory to regular
+ * memory because the device tree references the strings in it
+ * directly.
+ */
+
+ alloc = __va(memblock_alloc(initial_boot_params->totalsize, PAGE_SIZE));
+
+ memcpy(alloc, initial_boot_params, initial_boot_params->totalsize);
+
+ initial_boot_params = alloc;
+}
+
+#ifdef CONFIG_BLK_DEV_INITRD
+void __init early_init_dt_setup_initrd_arch(unsigned long start,
+ unsigned long end)
+{
+ initrd_start = (unsigned long)__va(start);
+ initrd_end = (unsigned long)__va(end);
+ initrd_below_start_ok = 1;
+}
+#endif
diff --git a/arch/openrisc/kernel/ptrace.c b/arch/openrisc/kernel/ptrace.c
new file mode 100644
index 000000000000..656b94beab89
--- /dev/null
+++ b/arch/openrisc/kernel/ptrace.c
@@ -0,0 +1,211 @@
+/*
+ * OpenRISC ptrace.c
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others. All original copyrights apply as per the original source
+ * declaration.
+ *
+ * Modifications for the OpenRISC architecture:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2005 Gyorgy Jeney <nog@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <stddef.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+
+#include <linux/mm.h>
+#include <linux/errno.h>
+#include <linux/ptrace.h>
+#include <linux/audit.h>
+#include <linux/regset.h>
+#include <linux/tracehook.h>
+#include <linux/elf.h>
+
+#include <asm/thread_info.h>
+#include <asm/segment.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/system.h>
+
+/*
+ * Copy the thread state to a regset that can be interpreted by userspace.
+ *
+ * It doesn't matter what our internal pt_regs structure looks like. The
+ * important thing is that we export a consistent view of the thread state
+ * to userspace. As such, we need to make sure that the regset remains
+ * ABI compatible as defined by the struct user_regs_struct:
+ *
+ * (Each item is a 32-bit word)
+ * r0 = 0 (exported for clarity)
+ * 31 GPRS r1-r31
+ * PC (Program counter)
+ * SR (Supervision register)
+ */
+static int genregs_get(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ void *kbuf, void __user * ubuf)
+{
+ const struct pt_regs *regs = task_pt_regs(target);
+ int ret;
+
+ /* r0 */
+ ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, 0, 4);
+
+ if (!ret)
+ ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+ regs->gpr+1, 4, 4*32);
+ if (!ret)
+ ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+ &regs->pc, 4*32, 4*33);
+ if (!ret)
+ ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+ &regs->sr, 4*33, 4*34);
+ if (!ret)
+ ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
+ 4*34, -1);
+
+ return ret;
+}
+
+/*
+ * Set the thread state from a regset passed in via ptrace
+ */
+static int genregs_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user * ubuf)
+{
+ struct pt_regs *regs = task_pt_regs(target);
+ int ret;
+
+ /* ignore r0 */
+ ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 0, 4);
+ /* r1 - r31 */
+ if (!ret)
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ regs->gpr+1, 4, 4*32);
+ /* PC */
+ if (!ret)
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &regs->pc, 4*32, 4*33);
+ /*
+ * Skip SR and padding... userspace isn't allowed to changes bits in
+ * the Supervision register
+ */
+ if (!ret)
+ ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
+ 4*33, -1);
+
+ return ret;
+}
+
+/*
+ * Define the register sets available on OpenRISC under Linux
+ */
+enum or1k_regset {
+ REGSET_GENERAL,
+};
+
+static const struct user_regset or1k_regsets[] = {
+ [REGSET_GENERAL] = {
+ .core_note_type = NT_PRSTATUS,
+ .n = ELF_NGREG,
+ .size = sizeof(long),
+ .align = sizeof(long),
+ .get = genregs_get,
+ .set = genregs_set,
+ },
+};
+
+static const struct user_regset_view user_or1k_native_view = {
+ .name = "or1k",
+ .e_machine = EM_OPENRISC,
+ .regsets = or1k_regsets,
+ .n = ARRAY_SIZE(or1k_regsets),
+};
+
+const struct user_regset_view *task_user_regset_view(struct task_struct *task)
+{
+ return &user_or1k_native_view;
+}
+
+/*
+ * does not yet catch signals sent when the child dies.
+ * in exit.c or in signal.c.
+ */
+
+
+/*
+ * Called by kernel/ptrace.c when detaching..
+ *
+ * Make sure the single step bit is not set.
+ */
+void ptrace_disable(struct task_struct *child)
+{
+ pr_debug("ptrace_disable(): TODO\n");
+
+ user_disable_single_step(child);
+ clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
+}
+
+long arch_ptrace(struct task_struct *child, long request, unsigned long addr,
+ unsigned long data)
+{
+ int ret;
+
+ switch (request) {
+ default:
+ ret = ptrace_request(child, request, addr, data);
+ break;
+ }
+
+ return ret;
+}
+
+/*
+ * Notification of system call entry/exit
+ * - triggered by current->work.syscall_trace
+ */
+asmlinkage long do_syscall_trace_enter(struct pt_regs *regs)
+{
+ long ret = 0;
+
+ if (test_thread_flag(TIF_SYSCALL_TRACE) &&
+ tracehook_report_syscall_entry(regs))
+ /*
+ * Tracing decided this syscall should not happen.
+ * We'll return a bogus call number to get an ENOSYS
+ * error, but leave the original number in <something>.
+ */
+ ret = -1L;
+
+ /* Are these regs right??? */
+ if (unlikely(current->audit_context))
+ audit_syscall_entry(audit_arch(), regs->syscallno,
+ regs->gpr[3], regs->gpr[4],
+ regs->gpr[5], regs->gpr[6]);
+
+ return ret ? : regs->syscallno;
+}
+
+asmlinkage void do_syscall_trace_leave(struct pt_regs *regs)
+{
+ int step;
+
+ if (unlikely(current->audit_context))
+ audit_syscall_exit(AUDITSC_RESULT(regs->gpr[11]),
+ regs->gpr[11]);
+
+ step = test_thread_flag(TIF_SINGLESTEP);
+ if (step || test_thread_flag(TIF_SYSCALL_TRACE))
+ tracehook_report_syscall_exit(regs, step);
+}
diff --git a/arch/openrisc/kernel/setup.c b/arch/openrisc/kernel/setup.c
new file mode 100644
index 000000000000..1422f747f52b
--- /dev/null
+++ b/arch/openrisc/kernel/setup.c
@@ -0,0 +1,381 @@
+/*
+ * OpenRISC setup.c
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others. All original copyrights apply as per the original source
+ * declaration.
+ *
+ * Modifications for the OpenRISC architecture:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * This file handles the architecture-dependent parts of initialization
+ */
+
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/stddef.h>
+#include <linux/unistd.h>
+#include <linux/ptrace.h>
+#include <linux/slab.h>
+#include <linux/tty.h>
+#include <linux/ioport.h>
+#include <linux/delay.h>
+#include <linux/console.h>
+#include <linux/init.h>
+#include <linux/bootmem.h>
+#include <linux/seq_file.h>
+#include <linux/serial.h>
+#include <linux/initrd.h>
+#include <linux/of_fdt.h>
+#include <linux/of.h>
+#include <linux/memblock.h>
+#include <linux/device.h>
+#include <linux/of_platform.h>
+
+#include <asm/segment.h>
+#include <asm/system.h>
+#include <asm/pgtable.h>
+#include <asm/types.h>
+#include <asm/setup.h>
+#include <asm/io.h>
+#include <asm/cpuinfo.h>
+#include <asm/delay.h>
+
+#include "vmlinux.h"
+
+char __initdata cmd_line[COMMAND_LINE_SIZE] = CONFIG_CMDLINE;
+
+static unsigned long __init setup_memory(void)
+{
+ unsigned long bootmap_size;
+ unsigned long ram_start_pfn;
+ unsigned long free_ram_start_pfn;
+ unsigned long ram_end_pfn;
+ phys_addr_t memory_start, memory_end;
+ struct memblock_region *region;
+
+ memory_end = memory_start = 0;
+
+ /* Find main memory where is the kernel */
+ for_each_memblock(memory, region) {
+ memory_start = region->base;
+ memory_end = region->base + region->size;
+ printk(KERN_INFO "%s: Memory: 0x%x-0x%x\n", __func__,
+ memory_start, memory_end);
+ }
+
+ if (!memory_end) {
+ panic("No memory!");
+ }
+
+ ram_start_pfn = PFN_UP(memory_start);
+ /* free_ram_start_pfn is first page after kernel */
+ free_ram_start_pfn = PFN_UP(__pa(&_end));
+ ram_end_pfn = PFN_DOWN(memblock_end_of_DRAM());
+
+ max_pfn = ram_end_pfn;
+
+ /*
+ * initialize the boot-time allocator (with low memory only).
+ *
+ * This makes the memory from the end of the kernel to the end of
+ * RAM usable.
+ * init_bootmem sets the global values min_low_pfn, max_low_pfn.
+ */
+ bootmap_size = init_bootmem(free_ram_start_pfn,
+ ram_end_pfn - ram_start_pfn);
+ free_bootmem(PFN_PHYS(free_ram_start_pfn),
+ (ram_end_pfn - free_ram_start_pfn) << PAGE_SHIFT);
+ reserve_bootmem(PFN_PHYS(free_ram_start_pfn), bootmap_size,
+ BOOTMEM_DEFAULT);
+
+ for_each_memblock(reserved, region) {
+ printk(KERN_INFO "Reserved - 0x%08x-0x%08x\n",
+ (u32) region->base, (u32) region->size);
+ reserve_bootmem(region->base, region->size, BOOTMEM_DEFAULT);
+ }
+
+ return ram_end_pfn;
+}
+
+struct cpuinfo cpuinfo;
+
+static void print_cpuinfo(void)
+{
+ unsigned long upr = mfspr(SPR_UPR);
+ unsigned long vr = mfspr(SPR_VR);
+ unsigned int version;
+ unsigned int revision;
+
+ version = (vr & SPR_VR_VER) >> 24;
+ revision = (vr & SPR_VR_REV);
+
+ printk(KERN_INFO "CPU: OpenRISC-%x (revision %d) @%d MHz\n",
+ version, revision, cpuinfo.clock_frequency / 1000000);
+
+ if (!(upr & SPR_UPR_UP)) {
+ printk(KERN_INFO
+ "-- no UPR register... unable to detect configuration\n");
+ return;
+ }
+
+ if (upr & SPR_UPR_DCP)
+ printk(KERN_INFO
+ "-- dcache: %4d bytes total, %2d bytes/line, %d way(s)\n",
+ cpuinfo.dcache_size, cpuinfo.dcache_block_size, 1);
+ else
+ printk(KERN_INFO "-- dcache disabled\n");
+ if (upr & SPR_UPR_ICP)
+ printk(KERN_INFO
+ "-- icache: %4d bytes total, %2d bytes/line, %d way(s)\n",
+ cpuinfo.icache_size, cpuinfo.icache_block_size, 1);
+ else
+ printk(KERN_INFO "-- icache disabled\n");
+
+ if (upr & SPR_UPR_DMP)
+ printk(KERN_INFO "-- dmmu: %4d entries, %lu way(s)\n",
+ 1 << ((mfspr(SPR_DMMUCFGR) & SPR_DMMUCFGR_NTS) >> 2),
+ 1 + (mfspr(SPR_DMMUCFGR) & SPR_DMMUCFGR_NTW));
+ if (upr & SPR_UPR_IMP)
+ printk(KERN_INFO "-- immu: %4d entries, %lu way(s)\n",
+ 1 << ((mfspr(SPR_IMMUCFGR) & SPR_IMMUCFGR_NTS) >> 2),
+ 1 + (mfspr(SPR_IMMUCFGR) & SPR_IMMUCFGR_NTW));
+
+ printk(KERN_INFO "-- additional features:\n");
+ if (upr & SPR_UPR_DUP)
+ printk(KERN_INFO "-- debug unit\n");
+ if (upr & SPR_UPR_PCUP)
+ printk(KERN_INFO "-- performance counters\n");
+ if (upr & SPR_UPR_PMP)
+ printk(KERN_INFO "-- power management\n");
+ if (upr & SPR_UPR_PICP)
+ printk(KERN_INFO "-- PIC\n");
+ if (upr & SPR_UPR_TTP)
+ printk(KERN_INFO "-- timer\n");
+ if (upr & SPR_UPR_CUP)
+ printk(KERN_INFO "-- custom unit(s)\n");
+}
+
+void __init setup_cpuinfo(void)
+{
+ struct device_node *cpu;
+ unsigned long iccfgr, dccfgr;
+ unsigned long cache_set_size, cache_ways;
+
+ cpu = of_find_compatible_node(NULL, NULL, "opencores,or1200-rtlsvn481");
+ if (!cpu)
+ panic("No compatible CPU found in device tree...\n");
+
+ iccfgr = mfspr(SPR_ICCFGR);
+ cache_ways = 1 << (iccfgr & SPR_ICCFGR_NCW);
+ cache_set_size = 1 << ((iccfgr & SPR_ICCFGR_NCS) >> 3);
+ cpuinfo.icache_block_size = 16 << ((iccfgr & SPR_ICCFGR_CBS) >> 7);
+ cpuinfo.icache_size =
+ cache_set_size * cache_ways * cpuinfo.icache_block_size;
+
+ dccfgr = mfspr(SPR_DCCFGR);
+ cache_ways = 1 << (dccfgr & SPR_DCCFGR_NCW);
+ cache_set_size = 1 << ((dccfgr & SPR_DCCFGR_NCS) >> 3);
+ cpuinfo.dcache_block_size = 16 << ((dccfgr & SPR_DCCFGR_CBS) >> 7);
+ cpuinfo.dcache_size =
+ cache_set_size * cache_ways * cpuinfo.dcache_block_size;
+
+ if (of_property_read_u32(cpu, "clock-frequency",
+ &cpuinfo.clock_frequency)) {
+ printk(KERN_WARNING
+ "Device tree missing CPU 'clock-frequency' parameter."
+ "Assuming frequency 25MHZ"
+ "This is probably not what you want.");
+ }
+
+ of_node_put(cpu);
+
+ print_cpuinfo();
+}
+
+/**
+ * or32_early_setup
+ *
+ * Handles the pointer to the device tree that this kernel is to use
+ * for establishing the available platform devices.
+ *
+ * For now, this is limited to using the built-in device tree. In the future,
+ * it is intended that this function will take a pointer to the device tree
+ * that is potentially built-in, but potentially also passed in by the
+ * bootloader, or discovered by some equally clever means...
+ */
+
+void __init or32_early_setup(void)
+{
+
+ early_init_devtree(__dtb_start);
+
+ printk(KERN_INFO "Compiled-in FDT at 0x%p\n", __dtb_start);
+}
+
+static int __init openrisc_device_probe(void)
+{
+ of_platform_populate(NULL, NULL, NULL, NULL);
+
+ return 0;
+}
+
+device_initcall(openrisc_device_probe);
+
+static inline unsigned long extract_value_bits(unsigned long reg,
+ short bit_nr, short width)
+{
+ return (reg >> bit_nr) & (0 << width);
+}
+
+static inline unsigned long extract_value(unsigned long reg, unsigned long mask)
+{
+ while (!(mask & 0x1)) {
+ reg = reg >> 1;
+ mask = mask >> 1;
+ }
+ return mask & reg;
+}
+
+void __init detect_unit_config(unsigned long upr, unsigned long mask,
+ char *text, void (*func) (void))
+{
+ if (text != NULL)
+ printk("%s", text);
+
+ if (upr & mask) {
+ if (func != NULL)
+ func();
+ else
+ printk("present\n");
+ } else
+ printk("not present\n");
+}
+
+/*
+ * calibrate_delay
+ *
+ * Lightweight calibrate_delay implementation that calculates loops_per_jiffy
+ * from the clock frequency passed in via the device tree
+ *
+ */
+
+void __cpuinit calibrate_delay(void)
+{
+ const int *val;
+ struct device_node *cpu = NULL;
+ cpu = of_find_compatible_node(NULL, NULL, "opencores,or1200-rtlsvn481");
+ val = of_get_property(cpu, "clock-frequency", NULL);
+ if (!val)
+ panic("no cpu 'clock-frequency' parameter in device tree");
+ loops_per_jiffy = *val / HZ;
+ pr_cont("%lu.%02lu BogoMIPS (lpj=%lu)\n",
+ loops_per_jiffy / (500000 / HZ),
+ (loops_per_jiffy / (5000 / HZ)) % 100, loops_per_jiffy);
+}
+
+void __init setup_arch(char **cmdline_p)
+{
+ unsigned long max_low_pfn;
+
+ unflatten_device_tree();
+
+ setup_cpuinfo();
+
+ /* process 1's initial memory region is the kernel code/data */
+ init_mm.start_code = (unsigned long)&_stext;
+ init_mm.end_code = (unsigned long)&_etext;
+ init_mm.end_data = (unsigned long)&_edata;
+ init_mm.brk = (unsigned long)&_end;
+
+#ifdef CONFIG_BLK_DEV_INITRD
+ initrd_start = (unsigned long)&__initrd_start;
+ initrd_end = (unsigned long)&__initrd_end;
+ if (initrd_start == initrd_end) {
+ initrd_start = 0;
+ initrd_end = 0;
+ }
+ initrd_below_start_ok = 1;
+#endif
+
+ /* setup bootmem allocator */
+ max_low_pfn = setup_memory();
+
+ /* paging_init() sets up the MMU and marks all pages as reserved */
+ paging_init();
+
+#if defined(CONFIG_VT) && defined(CONFIG_DUMMY_CONSOLE)
+ if (!conswitchp)
+ conswitchp = &dummy_con;
+#endif
+
+ *cmdline_p = cmd_line;
+
+ printk(KERN_INFO "OpenRISC Linux -- http://openrisc.net\n");
+}
+
+static int show_cpuinfo(struct seq_file *m, void *v)
+{
+ unsigned long vr;
+ int version, revision;
+
+ vr = mfspr(SPR_VR);
+ version = (vr & SPR_VR_VER) >> 24;
+ revision = vr & SPR_VR_REV;
+
+ return seq_printf(m,
+ "cpu\t\t: OpenRISC-%x\n"
+ "revision\t: %d\n"
+ "frequency\t: %ld\n"
+ "dcache size\t: %d bytes\n"
+ "dcache block size\t: %d bytes\n"
+ "icache size\t: %d bytes\n"
+ "icache block size\t: %d bytes\n"
+ "immu\t\t: %d entries, %lu ways\n"
+ "dmmu\t\t: %d entries, %lu ways\n"
+ "bogomips\t: %lu.%02lu\n",
+ version,
+ revision,
+ loops_per_jiffy * HZ,
+ cpuinfo.dcache_size,
+ cpuinfo.dcache_block_size,
+ cpuinfo.icache_size,
+ cpuinfo.icache_block_size,
+ 1 << ((mfspr(SPR_DMMUCFGR) & SPR_DMMUCFGR_NTS) >> 2),
+ 1 + (mfspr(SPR_DMMUCFGR) & SPR_DMMUCFGR_NTW),
+ 1 << ((mfspr(SPR_IMMUCFGR) & SPR_IMMUCFGR_NTS) >> 2),
+ 1 + (mfspr(SPR_IMMUCFGR) & SPR_IMMUCFGR_NTW),
+ (loops_per_jiffy * HZ) / 500000,
+ ((loops_per_jiffy * HZ) / 5000) % 100);
+}
+
+static void *c_start(struct seq_file *m, loff_t * pos)
+{
+ /* We only have one CPU... */
+ return *pos < 1 ? (void *)1 : NULL;
+}
+
+static void *c_next(struct seq_file *m, void *v, loff_t * pos)
+{
+ ++*pos;
+ return NULL;
+}
+
+static void c_stop(struct seq_file *m, void *v)
+{
+}
+
+const struct seq_operations cpuinfo_op = {
+ .start = c_start,
+ .next = c_next,
+ .stop = c_stop,
+ .show = show_cpuinfo,
+};
diff --git a/arch/openrisc/kernel/signal.c b/arch/openrisc/kernel/signal.c
new file mode 100644
index 000000000000..5f759c76834e
--- /dev/null
+++ b/arch/openrisc/kernel/signal.c
@@ -0,0 +1,396 @@
+/*
+ * OpenRISC signal.c
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others. All original copyrights apply as per the original source
+ * declaration.
+ *
+ * Modifications for the OpenRISC architecture:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/kernel.h>
+#include <linux/signal.h>
+#include <linux/errno.h>
+#include <linux/wait.h>
+#include <linux/ptrace.h>
+#include <linux/unistd.h>
+#include <linux/stddef.h>
+#include <linux/tracehook.h>
+
+#include <asm/processor.h>
+#include <asm/ucontext.h>
+#include <asm/uaccess.h>
+
+#define DEBUG_SIG 0
+
+#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
+
+asmlinkage long
+_sys_sigaltstack(const stack_t *uss, stack_t *uoss, struct pt_regs *regs)
+{
+ return do_sigaltstack(uss, uoss, regs->sp);
+}
+
+struct rt_sigframe {
+ struct siginfo *pinfo;
+ void *puc;
+ struct siginfo info;
+ struct ucontext uc;
+ unsigned char retcode[16]; /* trampoline code */
+};
+
+static int restore_sigcontext(struct pt_regs *regs, struct sigcontext *sc)
+{
+ unsigned int err = 0;
+ unsigned long old_usp;
+
+ /* Alwys make any pending restarted system call return -EINTR */
+ current_thread_info()->restart_block.fn = do_no_restart_syscall;
+
+ /* restore the regs from &sc->regs (same as sc, since regs is first)
+ * (sc is already checked for VERIFY_READ since the sigframe was
+ * checked in sys_sigreturn previously)
+ */
+
+ if (__copy_from_user(regs, sc, sizeof(struct pt_regs)))
+ goto badframe;
+
+ /* make sure the SM-bit is cleared so user-mode cannot fool us */
+ regs->sr &= ~SPR_SR_SM;
+
+ /* restore the old USP as it was before we stacked the sc etc.
+ * (we cannot just pop the sigcontext since we aligned the sp and
+ * stuff after pushing it)
+ */
+
+ err |= __get_user(old_usp, &sc->usp);
+
+ regs->sp = old_usp;
+
+ /* TODO: the other ports use regs->orig_XX to disable syscall checks
+ * after this completes, but we don't use that mechanism. maybe we can
+ * use it now ?
+ */
+
+ return err;
+
+badframe:
+ return 1;
+}
+
+asmlinkage long _sys_rt_sigreturn(struct pt_regs *regs)
+{
+ struct rt_sigframe *frame = (struct rt_sigframe __user *)regs->sp;
+ sigset_t set;
+ stack_t st;
+
+ /*
+ * Since we stacked the signal on a dword boundary,
+ * then frame should be dword aligned here. If it's
+ * not, then the user is trying to mess with us.
+ */
+ if (((long)frame) & 3)
+ goto badframe;
+
+ if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
+ goto badframe;
+ if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
+ goto badframe;
+
+ sigdelsetmask(&set, ~_BLOCKABLE);
+ spin_lock_irq(&current->sighand->siglock);
+ current->blocked = set;
+ recalc_sigpending();
+ spin_unlock_irq(&current->sighand->siglock);
+
+ if (restore_sigcontext(regs, &frame->uc.uc_mcontext))
+ goto badframe;
+
+ if (__copy_from_user(&st, &frame->uc.uc_stack, sizeof(st)))
+ goto badframe;
+ /* It is more difficult to avoid calling this function than to
+ call it and ignore errors. */
+ do_sigaltstack(&st, NULL, regs->sp);
+
+ return regs->gpr[11];
+
+badframe:
+ force_sig(SIGSEGV, current);
+ return 0;
+}
+
+/*
+ * Set up a signal frame.
+ */
+
+static int setup_sigcontext(struct sigcontext *sc, struct pt_regs *regs,
+ unsigned long mask)
+{
+ int err = 0;
+ unsigned long usp = regs->sp;
+
+ /* copy the regs. they are first in sc so we can use sc directly */
+
+ err |= __copy_to_user(sc, regs, sizeof(struct pt_regs));
+
+ /* then some other stuff */
+
+ err |= __put_user(mask, &sc->oldmask);
+
+ err |= __put_user(usp, &sc->usp);
+
+ return err;
+}
+
+static inline unsigned long align_sigframe(unsigned long sp)
+{
+ return sp & ~3UL;
+}
+
+/*
+ * Work out where the signal frame should go. It's either on the user stack
+ * or the alternate stack.
+ */
+
+static inline void __user *get_sigframe(struct k_sigaction *ka,
+ struct pt_regs *regs, size_t frame_size)
+{
+ unsigned long sp = regs->sp;
+ int onsigstack = on_sig_stack(sp);
+
+ /* redzone */
+ sp -= STACK_FRAME_OVERHEAD;
+
+ /* This is the X/Open sanctioned signal stack switching. */
+ if ((ka->sa.sa_flags & SA_ONSTACK) && !onsigstack) {
+ if (current->sas_ss_size)
+ sp = current->sas_ss_sp + current->sas_ss_size;
+ }
+
+ sp = align_sigframe(sp - frame_size);
+
+ /*
+ * If we are on the alternate signal stack and would overflow it, don't.
+ * Return an always-bogus address instead so we will die with SIGSEGV.
+ */
+ if (onsigstack && !likely(on_sig_stack(sp)))
+ return (void __user *)-1L;
+
+ return (void __user *)sp;
+}
+
+/* grab and setup a signal frame.
+ *
+ * basically we stack a lot of state info, and arrange for the
+ * user-mode program to return to the kernel using either a
+ * trampoline which performs the syscall sigreturn, or a provided
+ * user-mode trampoline.
+ */
+static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
+ sigset_t *set, struct pt_regs *regs)
+{
+ struct rt_sigframe *frame;
+ unsigned long return_ip;
+ int err = 0;
+
+ frame = get_sigframe(ka, regs, sizeof(*frame));
+
+ if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
+ goto give_sigsegv;
+
+ err |= __put_user(&frame->info, &frame->pinfo);
+ err |= __put_user(&frame->uc, &frame->puc);
+
+ if (ka->sa.sa_flags & SA_SIGINFO)
+ err |= copy_siginfo_to_user(&frame->info, info);
+ if (err)
+ goto give_sigsegv;
+
+ /* Clear all the bits of the ucontext we don't use. */
+ err |= __clear_user(&frame->uc, offsetof(struct ucontext, uc_mcontext));
+ err |= __put_user(0, &frame->uc.uc_flags);
+ err |= __put_user(NULL, &frame->uc.uc_link);
+ err |= __put_user((void *)current->sas_ss_sp,
+ &frame->uc.uc_stack.ss_sp);
+ err |= __put_user(sas_ss_flags(regs->sp), &frame->uc.uc_stack.ss_flags);
+ err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
+ err |= setup_sigcontext(&frame->uc.uc_mcontext, regs, set->sig[0]);
+
+ err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
+
+ if (err)
+ goto give_sigsegv;
+
+ /* trampoline - the desired return ip is the retcode itself */
+ return_ip = (unsigned long)&frame->retcode;
+ /* This is l.ori r11,r0,__NR_sigreturn, l.sys 1 */
+ err |= __put_user(0xa960, (short *)(frame->retcode + 0));
+ err |= __put_user(__NR_rt_sigreturn, (short *)(frame->retcode + 2));
+ err |= __put_user(0x20000001, (unsigned long *)(frame->retcode + 4));
+ err |= __put_user(0x15000000, (unsigned long *)(frame->retcode + 8));
+
+ if (err)
+ goto give_sigsegv;
+
+ /* TODO what is the current->exec_domain stuff and invmap ? */
+
+ /* Set up registers for signal handler */
+ regs->pc = (unsigned long)ka->sa.sa_handler; /* what we enter NOW */
+ regs->gpr[9] = (unsigned long)return_ip; /* what we enter LATER */
+ regs->gpr[3] = (unsigned long)sig; /* arg 1: signo */
+ regs->gpr[4] = (unsigned long)&frame->info; /* arg 2: (siginfo_t*) */
+ regs->gpr[5] = (unsigned long)&frame->uc; /* arg 3: ucontext */
+
+ /* actually move the usp to reflect the stacked frame */
+ regs->sp = (unsigned long)frame;
+
+ return;
+
+give_sigsegv:
+ if (sig == SIGSEGV)
+ ka->sa.sa_handler = SIG_DFL;
+ force_sig(SIGSEGV, current);
+}
+
+static inline void
+handle_signal(unsigned long sig,
+ siginfo_t *info, struct k_sigaction *ka,
+ sigset_t *oldset, struct pt_regs *regs)
+{
+ setup_rt_frame(sig, ka, info, oldset, regs);
+
+ if (ka->sa.sa_flags & SA_ONESHOT)
+ ka->sa.sa_handler = SIG_DFL;
+
+ spin_lock_irq(&current->sighand->siglock);
+ sigorsets(&current->blocked, &current->blocked, &ka->sa.sa_mask);
+ if (!(ka->sa.sa_flags & SA_NODEFER))
+ sigaddset(&current->blocked, sig);
+ recalc_sigpending();
+
+ spin_unlock_irq(&current->sighand->siglock);
+}
+
+/*
+ * Note that 'init' is a special process: it doesn't get signals it doesn't
+ * want to handle. Thus you cannot kill init even with a SIGKILL even by
+ * mistake.
+ *
+ * Also note that the regs structure given here as an argument, is the latest
+ * pushed pt_regs. It may or may not be the same as the first pushed registers
+ * when the initial usermode->kernelmode transition took place. Therefore
+ * we can use user_mode(regs) to see if we came directly from kernel or user
+ * mode below.
+ */
+
+void do_signal(struct pt_regs *regs)
+{
+ siginfo_t info;
+ int signr;
+ struct k_sigaction ka;
+
+ /*
+ * We want the common case to go fast, which
+ * is why we may in certain cases get here from
+ * kernel mode. Just return without doing anything
+ * if so.
+ */
+ if (!user_mode(regs))
+ return;
+
+ signr = get_signal_to_deliver(&info, &ka, regs, NULL);
+
+ /* If we are coming out of a syscall then we need
+ * to check if the syscall was interrupted and wants to be
+ * restarted after handling the signal. If so, the original
+ * syscall number is put back into r11 and the PC rewound to
+ * point at the l.sys instruction that resulted in the
+ * original syscall. Syscall results other than the four
+ * below mean that the syscall executed to completion and no
+ * restart is necessary.
+ */
+ if (regs->syscallno) {
+ int restart = 0;
+
+ switch (regs->gpr[11]) {
+ case -ERESTART_RESTARTBLOCK:
+ case -ERESTARTNOHAND:
+ /* Restart if there is no signal handler */
+ restart = (signr <= 0);
+ break;
+ case -ERESTARTSYS:
+ /* Restart if there no signal handler or
+ * SA_RESTART flag is set */
+ restart = (signr <= 0 || (ka.sa.sa_flags & SA_RESTART));
+ break;
+ case -ERESTARTNOINTR:
+ /* Always restart */
+ restart = 1;
+ break;
+ }
+
+ if (restart) {
+ if (regs->gpr[11] == -ERESTART_RESTARTBLOCK)
+ regs->gpr[11] = __NR_restart_syscall;
+ else
+ regs->gpr[11] = regs->orig_gpr11;
+ regs->pc -= 4;
+ } else {
+ regs->gpr[11] = -EINTR;
+ }
+ }
+
+ if (signr <= 0) {
+ /* no signal to deliver so we just put the saved sigmask
+ * back */
+ if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
+ clear_thread_flag(TIF_RESTORE_SIGMASK);
+ sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
+ }
+
+ } else { /* signr > 0 */
+ sigset_t *oldset;
+
+ if (current_thread_info()->flags & _TIF_RESTORE_SIGMASK)
+ oldset = &current->saved_sigmask;
+ else
+ oldset = &current->blocked;
+
+ /* Whee! Actually deliver the signal. */
+ handle_signal(signr, &info, &ka, oldset, regs);
+ /* a signal was successfully delivered; the saved
+ * sigmask will have been stored in the signal frame,
+ * and will be restored by sigreturn, so we can simply
+ * clear the TIF_RESTORE_SIGMASK flag */
+ if (test_thread_flag(TIF_RESTORE_SIGMASK))
+ clear_thread_flag(TIF_RESTORE_SIGMASK);
+
+ tracehook_signal_handler(signr, &info, &ka, regs,
+ test_thread_flag(TIF_SINGLESTEP));
+ }
+
+ return;
+}
+
+asmlinkage void do_notify_resume(struct pt_regs *regs)
+{
+ if (current_thread_info()->flags & _TIF_SIGPENDING)
+ do_signal(regs);
+
+ if (current_thread_info()->flags & _TIF_NOTIFY_RESUME) {
+ clear_thread_flag(TIF_NOTIFY_RESUME);
+ tracehook_notify_resume(regs);
+ if (current->replacement_session_keyring)
+ key_replace_session_keyring();
+ }
+}
diff --git a/arch/openrisc/kernel/sys_call_table.c b/arch/openrisc/kernel/sys_call_table.c
new file mode 100644
index 000000000000..e1f8ce8c72a8
--- /dev/null
+++ b/arch/openrisc/kernel/sys_call_table.c
@@ -0,0 +1,28 @@
+/*
+ * OpenRISC sys_call_table.c
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others. All original copyrights apply as per the original source
+ * declaration.
+ *
+ * Modifications for the OpenRISC architecture:
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/syscalls.h>
+#include <linux/signal.h>
+#include <linux/unistd.h>
+
+#include <asm/syscalls.h>
+
+#undef __SYSCALL
+#define __SYSCALL(nr, call) [nr] = (call),
+
+void *sys_call_table[__NR_syscalls] = {
+#include <asm/unistd.h>
+};
diff --git a/arch/openrisc/kernel/sys_or32.c b/arch/openrisc/kernel/sys_or32.c
new file mode 100644
index 000000000000..57060084c0cc
--- /dev/null
+++ b/arch/openrisc/kernel/sys_or32.c
@@ -0,0 +1,57 @@
+/*
+ * OpenRISC sys_or32.c
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others. All original copyrights apply as per the original source
+ * declaration.
+ *
+ * Modifications for the OpenRISC architecture:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * This file contains various random system calls that
+ * have a non-standard calling sequence on some platforms.
+ * Since we don't have to do any backwards compatibility, our
+ * versions are done in the most "normal" way possible.
+ */
+
+#include <linux/errno.h>
+#include <linux/syscalls.h>
+#include <linux/mm.h>
+
+#include <asm/syscalls.h>
+
+/* These are secondary entry points as the primary entry points are defined in
+ * entry.S where we add the 'regs' parameter value
+ */
+
+asmlinkage long _sys_clone(unsigned long clone_flags, unsigned long newsp,
+ int __user *parent_tid, int __user *child_tid,
+ struct pt_regs *regs)
+{
+ long ret;
+
+ /* FIXME: Is alignment necessary? */
+ /* newsp = ALIGN(newsp, 4); */
+
+ if (!newsp)
+ newsp = regs->sp;
+
+ ret = do_fork(clone_flags, newsp, regs, 0, parent_tid, child_tid);
+
+ return ret;
+}
+
+asmlinkage int _sys_fork(struct pt_regs *regs)
+{
+#ifdef CONFIG_MMU
+ return do_fork(SIGCHLD, regs->sp, regs, 0, NULL, NULL);
+#else
+ return -EINVAL;
+#endif
+}
diff --git a/arch/openrisc/kernel/time.c b/arch/openrisc/kernel/time.c
new file mode 100644
index 000000000000..bd946ef1623d
--- /dev/null
+++ b/arch/openrisc/kernel/time.c
@@ -0,0 +1,181 @@
+/*
+ * OpenRISC time.c
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others. All original copyrights apply as per the original source
+ * declaration.
+ *
+ * Modifications for the OpenRISC architecture:
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/time.h>
+#include <linux/timex.h>
+#include <linux/interrupt.h>
+#include <linux/ftrace.h>
+
+#include <linux/clocksource.h>
+#include <linux/clockchips.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+
+#include <asm/cpuinfo.h>
+
+static int openrisc_timer_set_next_event(unsigned long delta,
+ struct clock_event_device *dev)
+{
+ u32 c;
+
+ /* Read 32-bit counter value, add delta, mask off the low 28 bits.
+ * We're guaranteed delta won't be bigger than 28 bits because the
+ * generic timekeeping code ensures that for us.
+ */
+ c = mfspr(SPR_TTCR);
+ c += delta;
+ c &= SPR_TTMR_TP;
+
+ /* Set counter and enable interrupt.
+ * Keep timer in continuous mode always.
+ */
+ mtspr(SPR_TTMR, SPR_TTMR_CR | SPR_TTMR_IE | c);
+
+ return 0;
+}
+
+static void openrisc_timer_set_mode(enum clock_event_mode mode,
+ struct clock_event_device *evt)
+{
+ switch (mode) {
+ case CLOCK_EVT_MODE_PERIODIC:
+ pr_debug(KERN_INFO "%s: periodic\n", __func__);
+ BUG();
+ break;
+ case CLOCK_EVT_MODE_ONESHOT:
+ pr_debug(KERN_INFO "%s: oneshot\n", __func__);
+ break;
+ case CLOCK_EVT_MODE_UNUSED:
+ pr_debug(KERN_INFO "%s: unused\n", __func__);
+ break;
+ case CLOCK_EVT_MODE_SHUTDOWN:
+ pr_debug(KERN_INFO "%s: shutdown\n", __func__);
+ break;
+ case CLOCK_EVT_MODE_RESUME:
+ pr_debug(KERN_INFO "%s: resume\n", __func__);
+ break;
+ }
+}
+
+/* This is the clock event device based on the OR1K tick timer.
+ * As the timer is being used as a continuous clock-source (required for HR
+ * timers) we cannot enable the PERIODIC feature. The tick timer can run using
+ * one-shot events, so no problem.
+ */
+
+static struct clock_event_device clockevent_openrisc_timer = {
+ .name = "openrisc_timer_clockevent",
+ .features = CLOCK_EVT_FEAT_ONESHOT,
+ .rating = 300,
+ .set_next_event = openrisc_timer_set_next_event,
+ .set_mode = openrisc_timer_set_mode,
+};
+
+static inline void timer_ack(void)
+{
+ /* Clear the IP bit and disable further interrupts */
+ /* This can be done very simply... we just need to keep the timer
+ running, so just maintain the CR bits while clearing the rest
+ of the register
+ */
+ mtspr(SPR_TTMR, SPR_TTMR_CR);
+}
+
+/*
+ * The timer interrupt is mostly handled in generic code nowadays... this
+ * function just acknowledges the interrupt and fires the event handler that
+ * has been set on the clockevent device by the generic time management code.
+ *
+ * This function needs to be called by the timer exception handler and that's
+ * all the exception handler needs to do.
+ */
+
+irqreturn_t __irq_entry timer_interrupt(struct pt_regs *regs)
+{
+ struct pt_regs *old_regs = set_irq_regs(regs);
+ struct clock_event_device *evt = &clockevent_openrisc_timer;
+
+ timer_ack();
+
+ /*
+ * update_process_times() expects us to have called irq_enter().
+ */
+ irq_enter();
+ evt->event_handler(evt);
+ irq_exit();
+
+ set_irq_regs(old_regs);
+
+ return IRQ_HANDLED;
+}
+
+static __init void openrisc_clockevent_init(void)
+{
+ clockevents_calc_mult_shift(&clockevent_openrisc_timer,
+ cpuinfo.clock_frequency, 4);
+
+ /* We only have 28 bits */
+ clockevent_openrisc_timer.max_delta_ns =
+ clockevent_delta2ns((u32) 0x0fffffff, &clockevent_openrisc_timer);
+ clockevent_openrisc_timer.min_delta_ns =
+ clockevent_delta2ns(1, &clockevent_openrisc_timer);
+ clockevent_openrisc_timer.cpumask = cpumask_of(0);
+ clockevents_register_device(&clockevent_openrisc_timer);
+}
+
+/**
+ * Clocksource: Based on OpenRISC timer/counter
+ *
+ * This sets up the OpenRISC Tick Timer as a clock source. The tick timer
+ * is 32 bits wide and runs at the CPU clock frequency.
+ */
+
+static cycle_t openrisc_timer_read(struct clocksource *cs)
+{
+ return (cycle_t) mfspr(SPR_TTCR);
+}
+
+static struct clocksource openrisc_timer = {
+ .name = "openrisc_timer",
+ .rating = 200,
+ .read = openrisc_timer_read,
+ .mask = CLOCKSOURCE_MASK(32),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+static int __init openrisc_timer_init(void)
+{
+ if (clocksource_register_hz(&openrisc_timer, cpuinfo.clock_frequency))
+ panic("failed to register clocksource");
+
+ /* Enable the incrementer: 'continuous' mode with interrupt disabled */
+ mtspr(SPR_TTMR, SPR_TTMR_CR);
+
+ return 0;
+}
+
+void __init time_init(void)
+{
+ u32 upr;
+
+ upr = mfspr(SPR_UPR);
+ if (!(upr & SPR_UPR_TTP))
+ panic("Linux not supported on devices without tick timer");
+
+ openrisc_timer_init();
+ openrisc_clockevent_init();
+}
diff --git a/arch/openrisc/kernel/traps.c b/arch/openrisc/kernel/traps.c
new file mode 100644
index 000000000000..a4ec44a052b2
--- /dev/null
+++ b/arch/openrisc/kernel/traps.c
@@ -0,0 +1,366 @@
+/*
+ * OpenRISC traps.c
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others. All original copyrights apply as per the original source
+ * declaration.
+ *
+ * Modifications for the OpenRISC architecture:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Here we handle the break vectors not used by the system call
+ * mechanism, as well as some general stack/register dumping
+ * things.
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/kmod.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/ptrace.h>
+#include <linux/timer.h>
+#include <linux/mm.h>
+#include <linux/kallsyms.h>
+#include <asm/uaccess.h>
+
+#include <asm/system.h>
+#include <asm/segment.h>
+#include <asm/io.h>
+#include <asm/pgtable.h>
+
+extern char _etext, _stext;
+
+int kstack_depth_to_print = 0x180;
+
+static inline int valid_stack_ptr(struct thread_info *tinfo, void *p)
+{
+ return p > (void *)tinfo && p < (void *)tinfo + THREAD_SIZE - 3;
+}
+
+void show_trace(struct task_struct *task, unsigned long *stack)
+{
+ struct thread_info *context;
+ unsigned long addr;
+
+ context = (struct thread_info *)
+ ((unsigned long)stack & (~(THREAD_SIZE - 1)));
+
+ while (valid_stack_ptr(context, stack)) {
+ addr = *stack++;
+ if (__kernel_text_address(addr)) {
+ printk(" [<%08lx>]", addr);
+ print_symbol(" %s", addr);
+ printk("\n");
+ }
+ }
+ printk(" =======================\n");
+}
+
+/* displays a short stack trace */
+void show_stack(struct task_struct *task, unsigned long *esp)
+{
+ unsigned long addr, *stack;
+ int i;
+
+ if (esp == NULL)
+ esp = (unsigned long *)&esp;
+
+ stack = esp;
+
+ printk("Stack dump [0x%08lx]:\n", (unsigned long)esp);
+ for (i = 0; i < kstack_depth_to_print; i++) {
+ if (kstack_end(stack))
+ break;
+ if (__get_user(addr, stack)) {
+ /* This message matches "failing address" marked
+ s390 in ksymoops, so lines containing it will
+ not be filtered out by ksymoops. */
+ printk("Failing address 0x%lx\n", (unsigned long)stack);
+ break;
+ }
+ stack++;
+
+ printk("sp + %02d: 0x%08lx\n", i * 4, addr);
+ }
+ printk("\n");
+
+ show_trace(task, esp);
+
+ return;
+}
+
+void show_trace_task(struct task_struct *tsk)
+{
+ /*
+ * TODO: SysRq-T trace dump...
+ */
+}
+
+/*
+ * The architecture-independent backtrace generator
+ */
+void dump_stack(void)
+{
+ unsigned long stack;
+
+ show_stack(current, &stack);
+}
+
+void show_registers(struct pt_regs *regs)
+{
+ int i;
+ int in_kernel = 1;
+ unsigned long esp;
+
+ esp = (unsigned long)(&regs->sp);
+ if (user_mode(regs))
+ in_kernel = 0;
+
+ printk("CPU #: %d\n"
+ " PC: %08lx SR: %08lx SP: %08lx\n",
+ smp_processor_id(), regs->pc, regs->sr, regs->sp);
+ printk("GPR00: %08lx GPR01: %08lx GPR02: %08lx GPR03: %08lx\n",
+ 0L, regs->gpr[1], regs->gpr[2], regs->gpr[3]);
+ printk("GPR04: %08lx GPR05: %08lx GPR06: %08lx GPR07: %08lx\n",
+ regs->gpr[4], regs->gpr[5], regs->gpr[6], regs->gpr[7]);
+ printk("GPR08: %08lx GPR09: %08lx GPR10: %08lx GPR11: %08lx\n",
+ regs->gpr[8], regs->gpr[9], regs->gpr[10], regs->gpr[11]);
+ printk("GPR12: %08lx GPR13: %08lx GPR14: %08lx GPR15: %08lx\n",
+ regs->gpr[12], regs->gpr[13], regs->gpr[14], regs->gpr[15]);
+ printk("GPR16: %08lx GPR17: %08lx GPR18: %08lx GPR19: %08lx\n",
+ regs->gpr[16], regs->gpr[17], regs->gpr[18], regs->gpr[19]);
+ printk("GPR20: %08lx GPR21: %08lx GPR22: %08lx GPR23: %08lx\n",
+ regs->gpr[20], regs->gpr[21], regs->gpr[22], regs->gpr[23]);
+ printk("GPR24: %08lx GPR25: %08lx GPR26: %08lx GPR27: %08lx\n",
+ regs->gpr[24], regs->gpr[25], regs->gpr[26], regs->gpr[27]);
+ printk("GPR28: %08lx GPR29: %08lx GPR30: %08lx GPR31: %08lx\n",
+ regs->gpr[28], regs->gpr[29], regs->gpr[30], regs->gpr[31]);
+ printk(" RES: %08lx oGPR11: %08lx syscallno: %08lx\n",
+ regs->gpr[11], regs->orig_gpr11, regs->syscallno);
+
+ printk("Process %s (pid: %d, stackpage=%08lx)\n",
+ current->comm, current->pid, (unsigned long)current);
+ /*
+ * When in-kernel, we also print out the stack and code at the
+ * time of the fault..
+ */
+ if (in_kernel) {
+
+ printk("\nStack: ");
+ show_stack(NULL, (unsigned long *)esp);
+
+ printk("\nCode: ");
+ if (regs->pc < PAGE_OFFSET)
+ goto bad;
+
+ for (i = -24; i < 24; i++) {
+ unsigned char c;
+ if (__get_user(c, &((unsigned char *)regs->pc)[i])) {
+bad:
+ printk(" Bad PC value.");
+ break;
+ }
+
+ if (i == 0)
+ printk("(%02x) ", c);
+ else
+ printk("%02x ", c);
+ }
+ }
+ printk("\n");
+}
+
+void nommu_dump_state(struct pt_regs *regs,
+ unsigned long ea, unsigned long vector)
+{
+ int i;
+ unsigned long addr, stack = regs->sp;
+
+ printk("\n\r[nommu_dump_state] :: ea %lx, vector %lx\n\r", ea, vector);
+
+ printk("CPU #: %d\n"
+ " PC: %08lx SR: %08lx SP: %08lx\n",
+ 0, regs->pc, regs->sr, regs->sp);
+ printk("GPR00: %08lx GPR01: %08lx GPR02: %08lx GPR03: %08lx\n",
+ 0L, regs->gpr[1], regs->gpr[2], regs->gpr[3]);
+ printk("GPR04: %08lx GPR05: %08lx GPR06: %08lx GPR07: %08lx\n",
+ regs->gpr[4], regs->gpr[5], regs->gpr[6], regs->gpr[7]);
+ printk("GPR08: %08lx GPR09: %08lx GPR10: %08lx GPR11: %08lx\n",
+ regs->gpr[8], regs->gpr[9], regs->gpr[10], regs->gpr[11]);
+ printk("GPR12: %08lx GPR13: %08lx GPR14: %08lx GPR15: %08lx\n",
+ regs->gpr[12], regs->gpr[13], regs->gpr[14], regs->gpr[15]);
+ printk("GPR16: %08lx GPR17: %08lx GPR18: %08lx GPR19: %08lx\n",
+ regs->gpr[16], regs->gpr[17], regs->gpr[18], regs->gpr[19]);
+ printk("GPR20: %08lx GPR21: %08lx GPR22: %08lx GPR23: %08lx\n",
+ regs->gpr[20], regs->gpr[21], regs->gpr[22], regs->gpr[23]);
+ printk("GPR24: %08lx GPR25: %08lx GPR26: %08lx GPR27: %08lx\n",
+ regs->gpr[24], regs->gpr[25], regs->gpr[26], regs->gpr[27]);
+ printk("GPR28: %08lx GPR29: %08lx GPR30: %08lx GPR31: %08lx\n",
+ regs->gpr[28], regs->gpr[29], regs->gpr[30], regs->gpr[31]);
+ printk(" RES: %08lx oGPR11: %08lx syscallno: %08lx\n",
+ regs->gpr[11], regs->orig_gpr11, regs->syscallno);
+
+ printk("Process %s (pid: %d, stackpage=%08lx)\n",
+ ((struct task_struct *)(__pa(current)))->comm,
+ ((struct task_struct *)(__pa(current)))->pid,
+ (unsigned long)current);
+
+ printk("\nStack: ");
+ printk("Stack dump [0x%08lx]:\n", (unsigned long)stack);
+ for (i = 0; i < kstack_depth_to_print; i++) {
+ if (((long)stack & (THREAD_SIZE - 1)) == 0)
+ break;
+ stack++;
+
+ printk("%lx :: sp + %02d: 0x%08lx\n", stack, i * 4,
+ *((unsigned long *)(__pa(stack))));
+ }
+ printk("\n");
+
+ printk("Call Trace: ");
+ i = 1;
+ while (((long)stack & (THREAD_SIZE - 1)) != 0) {
+ addr = *((unsigned long *)__pa(stack));
+ stack++;
+
+ if (kernel_text_address(addr)) {
+ if (i && ((i % 6) == 0))
+ printk("\n ");
+ printk(" [<%08lx>]", addr);
+ i++;
+ }
+ }
+ printk("\n");
+
+ printk("\nCode: ");
+
+ for (i = -24; i < 24; i++) {
+ unsigned char c;
+ c = ((unsigned char *)(__pa(regs->pc)))[i];
+
+ if (i == 0)
+ printk("(%02x) ", c);
+ else
+ printk("%02x ", c);
+ }
+ printk("\n");
+}
+
+/* This is normally the 'Oops' routine */
+void die(const char *str, struct pt_regs *regs, long err)
+{
+
+ console_verbose();
+ printk("\n%s#: %04lx\n", str, err & 0xffff);
+ show_registers(regs);
+#ifdef CONFIG_JUMP_UPON_UNHANDLED_EXCEPTION
+ printk("\n\nUNHANDLED_EXCEPTION: entering infinite loop\n");
+
+ /* shut down interrupts */
+ local_irq_disable();
+
+ __asm__ __volatile__("l.nop 1");
+ do {} while (1);
+#endif
+ do_exit(SIGSEGV);
+}
+
+/* This is normally the 'Oops' routine */
+void die_if_kernel(const char *str, struct pt_regs *regs, long err)
+{
+ if (user_mode(regs))
+ return;
+
+ die(str, regs, err);
+}
+
+void unhandled_exception(struct pt_regs *regs, int ea, int vector)
+{
+ printk("Unable to handle exception at EA =0x%x, vector 0x%x",
+ ea, vector);
+ die("Oops", regs, 9);
+}
+
+void __init trap_init(void)
+{
+ /* Nothing needs to be done */
+}
+
+asmlinkage void do_trap(struct pt_regs *regs, unsigned long address)
+{
+ siginfo_t info;
+ memset(&info, 0, sizeof(info));
+ info.si_signo = SIGTRAP;
+ info.si_code = TRAP_TRACE;
+ info.si_addr = (void *)address;
+ force_sig_info(SIGTRAP, &info, current);
+
+ regs->pc += 4;
+}
+
+asmlinkage void do_unaligned_access(struct pt_regs *regs, unsigned long address)
+{
+ siginfo_t info;
+
+ if (user_mode(regs)) {
+ /* Send a SIGSEGV */
+ info.si_signo = SIGSEGV;
+ info.si_errno = 0;
+ /* info.si_code has been set above */
+ info.si_addr = (void *)address;
+ force_sig_info(SIGSEGV, &info, current);
+ } else {
+ printk("KERNEL: Unaligned Access 0x%.8lx\n", address);
+ show_registers(regs);
+ die("Die:", regs, address);
+ }
+
+}
+
+asmlinkage void do_bus_fault(struct pt_regs *regs, unsigned long address)
+{
+ siginfo_t info;
+
+ if (user_mode(regs)) {
+ /* Send a SIGBUS */
+ info.si_signo = SIGBUS;
+ info.si_errno = 0;
+ info.si_code = BUS_ADRERR;
+ info.si_addr = (void *)address;
+ force_sig_info(SIGBUS, &info, current);
+ } else { /* Kernel mode */
+ printk("KERNEL: Bus error (SIGBUS) 0x%.8lx\n", address);
+ show_registers(regs);
+ die("Die:", regs, address);
+ }
+}
+
+asmlinkage void do_illegal_instruction(struct pt_regs *regs,
+ unsigned long address)
+{
+ siginfo_t info;
+
+ if (user_mode(regs)) {
+ /* Send a SIGILL */
+ info.si_signo = SIGILL;
+ info.si_errno = 0;
+ info.si_code = ILL_ILLOPC;
+ info.si_addr = (void *)address;
+ force_sig_info(SIGBUS, &info, current);
+ } else { /* Kernel mode */
+ printk("KERNEL: Illegal instruction (SIGILL) 0x%.8lx\n",
+ address);
+ show_registers(regs);
+ die("Die:", regs, address);
+ }
+}
diff --git a/arch/openrisc/kernel/vmlinux.h b/arch/openrisc/kernel/vmlinux.h
new file mode 100644
index 000000000000..ee842a2d3f36
--- /dev/null
+++ b/arch/openrisc/kernel/vmlinux.h
@@ -0,0 +1,12 @@
+#ifndef __OPENRISC_VMLINUX_H_
+#define __OPENRISC_VMLINUX_H_
+
+extern char _stext, _etext, _edata, _end;
+#ifdef CONFIG_BLK_DEV_INITRD
+extern char __initrd_start, __initrd_end;
+extern char __initramfs_start;
+#endif
+
+extern u32 __dtb_start[];
+
+#endif
diff --git a/arch/openrisc/kernel/vmlinux.lds.S b/arch/openrisc/kernel/vmlinux.lds.S
new file mode 100644
index 000000000000..2d69a853b742
--- /dev/null
+++ b/arch/openrisc/kernel/vmlinux.lds.S
@@ -0,0 +1,115 @@
+/*
+ * OpenRISC vmlinux.lds.S
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others. All original copyrights apply as per the original source
+ * declaration.
+ *
+ * Modifications for the OpenRISC architecture:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * ld script for OpenRISC architecture
+ */
+
+/* TODO
+ * - clean up __offset & stuff
+ * - change all 8192 aligment to PAGE !!!
+ * - recheck if all aligments are really needed
+ */
+
+# define LOAD_OFFSET PAGE_OFFSET
+# define LOAD_BASE PAGE_OFFSET
+
+#include <asm/page.h>
+#include <asm/cache.h>
+#include <asm-generic/vmlinux.lds.h>
+
+OUTPUT_FORMAT("elf32-or32", "elf32-or32", "elf32-or32")
+jiffies = jiffies_64 + 4;
+
+SECTIONS
+{
+ /* Read-only sections, merged into text segment: */
+ . = LOAD_BASE ;
+
+ /* _s_kernel_ro must be page aligned */
+ . = ALIGN(PAGE_SIZE);
+ _s_kernel_ro = .;
+
+ .text : AT(ADDR(.text) - LOAD_OFFSET)
+ {
+ _stext = .;
+ TEXT_TEXT
+ SCHED_TEXT
+ LOCK_TEXT
+ KPROBES_TEXT
+ IRQENTRY_TEXT
+ *(.fixup)
+ *(.text.__*)
+ _etext = .;
+ }
+ /* TODO: Check if fixup and text.__* are really necessary
+ * fixup is definitely necessary
+ */
+
+ _sdata = .;
+
+ /* Page alignment required for RO_DATA_SECTION */
+ RO_DATA_SECTION(PAGE_SIZE)
+ _e_kernel_ro = .;
+
+ /* Whatever comes after _e_kernel_ro had better be page-aligend, too */
+
+ /* 32 here is cacheline size... recheck this */
+ RW_DATA_SECTION(32, PAGE_SIZE, PAGE_SIZE)
+
+ _edata = .;
+
+ EXCEPTION_TABLE(4)
+ NOTES
+
+ /* Init code and data */
+ . = ALIGN(PAGE_SIZE);
+ __init_begin = .;
+
+ HEAD_TEXT_SECTION
+
+ /* Page aligned */
+ INIT_TEXT_SECTION(PAGE_SIZE)
+
+ /* Align __setup_start on 16 byte boundary */
+ INIT_DATA_SECTION(16)
+
+ PERCPU_SECTION(L1_CACHE_BYTES)
+
+ __init_end = .;
+
+ . = ALIGN(PAGE_SIZE);
+ .initrd : AT(ADDR(.initrd) - LOAD_OFFSET)
+ {
+ __initrd_start = .;
+ *(.initrd)
+ __initrd_end = .;
+ FILL (0);
+ . = ALIGN (PAGE_SIZE);
+ }
+
+ __vmlinux_end = .; /* last address of the physical file */
+
+ BSS_SECTION(0, 0, 0x20)
+
+ _end = .;
+
+ /* Throw in the debugging sections */
+ STABS_DEBUG
+ DWARF_DEBUG
+
+ /* Sections to be discarded -- must be last */
+ DISCARDS
+}
diff --git a/arch/openrisc/lib/Makefile b/arch/openrisc/lib/Makefile
new file mode 100644
index 000000000000..966f65dbc6f0
--- /dev/null
+++ b/arch/openrisc/lib/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for or32 specific library files..
+#
+
+obj-y = string.o delay.o
diff --git a/arch/openrisc/lib/delay.c b/arch/openrisc/lib/delay.c
new file mode 100644
index 000000000000..01d9740ae6f3
--- /dev/null
+++ b/arch/openrisc/lib/delay.c
@@ -0,0 +1,60 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others. All original copyrights apply as per the original source
+ * declaration.
+ *
+ * Modifications for the OpenRISC architecture:
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation
+ *
+ * Precise Delay Loops
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <asm/delay.h>
+#include <asm/timex.h>
+#include <asm/processor.h>
+
+int __devinit read_current_timer(unsigned long *timer_value)
+{
+ *timer_value = mfspr(SPR_TTCR);
+ return 0;
+}
+
+void __delay(unsigned long cycles)
+{
+ cycles_t target = get_cycles() + cycles;
+
+ while (get_cycles() < target)
+ cpu_relax();
+}
+EXPORT_SYMBOL(__delay);
+
+inline void __const_udelay(unsigned long xloops)
+{
+ unsigned long long loops;
+
+ loops = xloops * loops_per_jiffy * HZ;
+
+ __delay(loops >> 32);
+}
+EXPORT_SYMBOL(__const_udelay);
+
+void __udelay(unsigned long usecs)
+{
+ __const_udelay(usecs * 0x10C7UL); /* 2**32 / 1000000 (rounded up) */
+}
+EXPORT_SYMBOL(__udelay);
+
+void __ndelay(unsigned long nsecs)
+{
+ __const_udelay(nsecs * 0x5UL); /* 2**32 / 1000000000 (rounded up) */
+}
+EXPORT_SYMBOL(__ndelay);
diff --git a/arch/openrisc/lib/string.S b/arch/openrisc/lib/string.S
new file mode 100644
index 000000000000..465f04bc7deb
--- /dev/null
+++ b/arch/openrisc/lib/string.S
@@ -0,0 +1,204 @@
+/*
+ * OpenRISC string.S
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others. All original copyrights apply as per the original source
+ * declaration.
+ *
+ * Modifications for the OpenRISC architecture:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/linkage.h>
+#include <asm/errno.h>
+
+ /*
+ * this can be optimized by doing gcc inline assemlby with
+ * proper constraints (no need to save args registers...)
+ *
+ */
+
+
+/*
+ *
+ * int __copy_tofrom_user(void *to, const void *from, unsigned long size);
+ *
+ * NOTE: it returns number of bytes NOT copied !!!
+ *
+ */
+ .global __copy_tofrom_user
+__copy_tofrom_user:
+ l.addi r1,r1,-12
+ l.sw 0(r1),r6
+ l.sw 4(r1),r4
+ l.sw 8(r1),r3
+
+ l.addi r11,r5,0
+2: l.sfeq r11,r0
+ l.bf 1f
+ l.addi r11,r11,-1
+8: l.lbz r6,0(r4)
+9: l.sb 0(r3),r6
+ l.addi r3,r3,1
+ l.j 2b
+ l.addi r4,r4,1
+1:
+ l.addi r11,r11,1 // r11 holds the return value
+
+ l.lwz r6,0(r1)
+ l.lwz r4,4(r1)
+ l.lwz r3,8(r1)
+ l.jr r9
+ l.addi r1,r1,12
+
+ .section .fixup, "ax"
+99:
+ l.j 1b
+ l.nop
+ .previous
+
+ .section __ex_table, "a"
+ .long 8b, 99b // read fault
+ .long 9b, 99b // write fault
+ .previous
+
+/*
+ * unsigned long clear_user(void *addr, unsigned long size) ;
+ *
+ * NOTE: it returns number of bytes NOT cleared !!!
+ */
+ .global __clear_user
+__clear_user:
+ l.addi r1,r1,-8
+ l.sw 0(r1),r4
+ l.sw 4(r1),r3
+
+2: l.sfeq r4,r0
+ l.bf 1f
+ l.addi r4,r4,-1
+9: l.sb 0(r3),r0
+ l.j 2b
+ l.addi r3,r3,1
+
+1:
+ l.addi r11,r4,1
+
+ l.lwz r4,0(r1)
+ l.lwz r3,4(r1)
+ l.jr r9
+ l.addi r1,r1,8
+
+ .section .fixup, "ax"
+99:
+ l.j 1b
+ l.nop
+ .previous
+
+ .section __ex_table, "a"
+ .long 9b, 99b // write fault
+ .previous
+
+/*
+ * long strncpy_from_user(char *dst, const char *src, long count)
+ *
+ *
+ */
+ .global __strncpy_from_user
+__strncpy_from_user:
+ l.addi r1,r1,-16
+ l.sw 0(r1),r6
+ l.sw 4(r1),r5
+ l.sw 8(r1),r4
+ l.sw 12(r1),r3
+
+ l.addi r11,r5,0
+2: l.sfeq r5,r0
+ l.bf 1f
+ l.addi r5,r5,-1
+8: l.lbz r6,0(r4)
+ l.sfeq r6,r0
+ l.bf 1f
+9: l.sb 0(r3),r6
+ l.addi r3,r3,1
+ l.j 2b
+ l.addi r4,r4,1
+1:
+ l.lwz r6,0(r1)
+ l.addi r5,r5,1
+ l.sub r11,r11,r5 // r11 holds the return value
+
+ l.lwz r6,0(r1)
+ l.lwz r5,4(r1)
+ l.lwz r4,8(r1)
+ l.lwz r3,12(r1)
+ l.jr r9
+ l.addi r1,r1,16
+
+ .section .fixup, "ax"
+99:
+ l.movhi r11,hi(-EFAULT)
+ l.ori r11,r11,lo(-EFAULT)
+
+ l.lwz r6,0(r1)
+ l.lwz r5,4(r1)
+ l.lwz r4,8(r1)
+ l.lwz r3,12(r1)
+ l.jr r9
+ l.addi r1,r1,16
+ .previous
+
+ .section __ex_table, "a"
+ .long 8b, 99b // read fault
+ .previous
+
+/*
+ * extern int __strnlen_user(const char *str, long len, unsigned long top);
+ *
+ *
+ * RTRN: - length of a string including NUL termination character
+ * - on page fault 0
+ */
+
+ .global __strnlen_user
+__strnlen_user:
+ l.addi r1,r1,-8
+ l.sw 0(r1),r6
+ l.sw 4(r1),r3
+
+ l.addi r11,r0,0
+2: l.sfeq r11,r4
+ l.bf 1f
+ l.addi r11,r11,1
+8: l.lbz r6,0(r3)
+ l.sfeq r6,r0
+ l.bf 1f
+ l.sfgeu r3,r5 // are we over the top ?
+ l.bf 99f
+ l.j 2b
+ l.addi r3,r3,1
+
+1:
+ l.lwz r6,0(r1)
+ l.lwz r3,4(r1)
+ l.jr r9
+ l.addi r1,r1,8
+
+ .section .fixup, "ax"
+99:
+ l.addi r11,r0,0
+
+ l.lwz r6,0(r1)
+ l.lwz r3,4(r1)
+ l.jr r9
+ l.addi r1,r1,8
+ .previous
+
+ .section __ex_table, "a"
+ .long 8b, 99b // read fault
+ .previous
diff --git a/arch/openrisc/mm/Makefile b/arch/openrisc/mm/Makefile
new file mode 100644
index 000000000000..324ba2634529
--- /dev/null
+++ b/arch/openrisc/mm/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for the linux openrisc-specific parts of the memory manager.
+#
+
+obj-y := fault.o tlb.o init.o ioremap.o
diff --git a/arch/openrisc/mm/fault.c b/arch/openrisc/mm/fault.c
new file mode 100644
index 000000000000..a5dce82f864b
--- /dev/null
+++ b/arch/openrisc/mm/fault.c
@@ -0,0 +1,338 @@
+/*
+ * OpenRISC fault.c
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others. All original copyrights apply as per the original source
+ * declaration.
+ *
+ * Modifications for the OpenRISC architecture:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+
+#include <asm/uaccess.h>
+#include <asm/siginfo.h>
+#include <asm/signal.h>
+
+#define NUM_TLB_ENTRIES 64
+#define TLB_OFFSET(add) (((add) >> PAGE_SHIFT) & (NUM_TLB_ENTRIES-1))
+
+unsigned long pte_misses; /* updated by do_page_fault() */
+unsigned long pte_errors; /* updated by do_page_fault() */
+
+/* __PHX__ :: - check the vmalloc_fault in do_page_fault()
+ * - also look into include/asm-or32/mmu_context.h
+ */
+volatile pgd_t *current_pgd;
+
+extern void die(char *, struct pt_regs *, long);
+
+/*
+ * This routine handles page faults. It determines the address,
+ * and the problem, and then passes it off to one of the appropriate
+ * routines.
+ *
+ * If this routine detects a bad access, it returns 1, otherwise it
+ * returns 0.
+ */
+
+asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long address,
+ unsigned long vector, int write_acc)
+{
+ struct task_struct *tsk;
+ struct mm_struct *mm;
+ struct vm_area_struct *vma;
+ siginfo_t info;
+ int fault;
+
+ tsk = current;
+
+ /*
+ * We fault-in kernel-space virtual memory on-demand. The
+ * 'reference' page table is init_mm.pgd.
+ *
+ * NOTE! We MUST NOT take any locks for this case. We may
+ * be in an interrupt or a critical region, and should
+ * only copy the information from the master page table,
+ * nothing more.
+ *
+ * NOTE2: This is done so that, when updating the vmalloc
+ * mappings we don't have to walk all processes pgdirs and
+ * add the high mappings all at once. Instead we do it as they
+ * are used. However vmalloc'ed page entries have the PAGE_GLOBAL
+ * bit set so sometimes the TLB can use a lingering entry.
+ *
+ * This verifies that the fault happens in kernel space
+ * and that the fault was not a protection error.
+ */
+
+ if (address >= VMALLOC_START &&
+ (vector != 0x300 && vector != 0x400) &&
+ !user_mode(regs))
+ goto vmalloc_fault;
+
+ /* If exceptions were enabled, we can reenable them here */
+ if (user_mode(regs)) {
+ /* Exception was in userspace: reenable interrupts */
+ local_irq_enable();
+ } else {
+ /* If exception was in a syscall, then IRQ's may have
+ * been enabled or disabled. If they were enabled,
+ * reenable them.
+ */
+ if (regs->sr && (SPR_SR_IEE | SPR_SR_TEE))
+ local_irq_enable();
+ }
+
+ mm = tsk->mm;
+ info.si_code = SEGV_MAPERR;
+
+ /*
+ * If we're in an interrupt or have no user
+ * context, we must not take the fault..
+ */
+
+ if (in_interrupt() || !mm)
+ goto no_context;
+
+ down_read(&mm->mmap_sem);
+ vma = find_vma(mm, address);
+
+ if (!vma)
+ goto bad_area;
+
+ if (vma->vm_start <= address)
+ goto good_area;
+
+ if (!(vma->vm_flags & VM_GROWSDOWN))
+ goto bad_area;
+
+ if (user_mode(regs)) {
+ /*
+ * accessing the stack below usp is always a bug.
+ * we get page-aligned addresses so we can only check
+ * if we're within a page from usp, but that might be
+ * enough to catch brutal errors at least.
+ */
+ if (address + PAGE_SIZE < regs->sp)
+ goto bad_area;
+ }
+ if (expand_stack(vma, address))
+ goto bad_area;
+
+ /*
+ * Ok, we have a good vm_area for this memory access, so
+ * we can handle it..
+ */
+
+good_area:
+ info.si_code = SEGV_ACCERR;
+
+ /* first do some preliminary protection checks */
+
+ if (write_acc) {
+ if (!(vma->vm_flags & VM_WRITE))
+ goto bad_area;
+ } else {
+ /* not present */
+ if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
+ goto bad_area;
+ }
+
+ /* are we trying to execute nonexecutable area */
+ if ((vector == 0x400) && !(vma->vm_page_prot.pgprot & _PAGE_EXEC))
+ goto bad_area;
+
+ /*
+ * If for any reason at all we couldn't handle the fault,
+ * make sure we exit gracefully rather than endlessly redo
+ * the fault.
+ */
+
+ fault = handle_mm_fault(mm, vma, address, write_acc);
+ if (unlikely(fault & VM_FAULT_ERROR)) {
+ if (fault & VM_FAULT_OOM)
+ goto out_of_memory;
+ else if (fault & VM_FAULT_SIGBUS)
+ goto do_sigbus;
+ BUG();
+ }
+ /*RGD modeled on Cris */
+ if (fault & VM_FAULT_MAJOR)
+ tsk->maj_flt++;
+ else
+ tsk->min_flt++;
+
+ up_read(&mm->mmap_sem);
+ return;
+
+ /*
+ * Something tried to access memory that isn't in our memory map..
+ * Fix it, but check if it's kernel or user first..
+ */
+
+bad_area:
+ up_read(&mm->mmap_sem);
+
+bad_area_nosemaphore:
+
+ /* User mode accesses just cause a SIGSEGV */
+
+ if (user_mode(regs)) {
+ info.si_signo = SIGSEGV;
+ info.si_errno = 0;
+ /* info.si_code has been set above */
+ info.si_addr = (void *)address;
+ force_sig_info(SIGSEGV, &info, tsk);
+ return;
+ }
+
+no_context:
+
+ /* Are we prepared to handle this kernel fault?
+ *
+ * (The kernel has valid exception-points in the source
+ * when it acesses user-memory. When it fails in one
+ * of those points, we find it in a table and do a jump
+ * to some fixup code that loads an appropriate error
+ * code)
+ */
+
+ {
+ const struct exception_table_entry *entry;
+
+ __asm__ __volatile__("l.nop 42");
+
+ if ((entry = search_exception_tables(regs->pc)) != NULL) {
+ /* Adjust the instruction pointer in the stackframe */
+ regs->pc = entry->fixup;
+ return;
+ }
+ }
+
+ /*
+ * Oops. The kernel tried to access some bad page. We'll have to
+ * terminate things with extreme prejudice.
+ */
+
+ if ((unsigned long)(address) < PAGE_SIZE)
+ printk(KERN_ALERT
+ "Unable to handle kernel NULL pointer dereference");
+ else
+ printk(KERN_ALERT "Unable to handle kernel access");
+ printk(" at virtual address 0x%08lx\n", address);
+
+ die("Oops", regs, write_acc);
+
+ do_exit(SIGKILL);
+
+ /*
+ * We ran out of memory, or some other thing happened to us that made
+ * us unable to handle the page fault gracefully.
+ */
+
+out_of_memory:
+ __asm__ __volatile__("l.nop 42");
+ __asm__ __volatile__("l.nop 1");
+
+ up_read(&mm->mmap_sem);
+ printk("VM: killing process %s\n", tsk->comm);
+ if (user_mode(regs))
+ do_exit(SIGKILL);
+ goto no_context;
+
+do_sigbus:
+ up_read(&mm->mmap_sem);
+
+ /*
+ * Send a sigbus, regardless of whether we were in kernel
+ * or user mode.
+ */
+ info.si_signo = SIGBUS;
+ info.si_errno = 0;
+ info.si_code = BUS_ADRERR;
+ info.si_addr = (void *)address;
+ force_sig_info(SIGBUS, &info, tsk);
+
+ /* Kernel mode? Handle exceptions or die */
+ if (!user_mode(regs))
+ goto no_context;
+ return;
+
+vmalloc_fault:
+ {
+ /*
+ * Synchronize this task's top level page-table
+ * with the 'reference' page table.
+ *
+ * Use current_pgd instead of tsk->active_mm->pgd
+ * since the latter might be unavailable if this
+ * code is executed in a misfortunately run irq
+ * (like inside schedule() between switch_mm and
+ * switch_to...).
+ */
+
+ int offset = pgd_index(address);
+ pgd_t *pgd, *pgd_k;
+ pud_t *pud, *pud_k;
+ pmd_t *pmd, *pmd_k;
+ pte_t *pte_k;
+
+/*
+ phx_warn("do_page_fault(): vmalloc_fault will not work, "
+ "since current_pgd assign a proper value somewhere\n"
+ "anyhow we don't need this at the moment\n");
+
+ phx_mmu("vmalloc_fault");
+*/
+ pgd = (pgd_t *)current_pgd + offset;
+ pgd_k = init_mm.pgd + offset;
+
+ /* Since we're two-level, we don't need to do both
+ * set_pgd and set_pmd (they do the same thing). If
+ * we go three-level at some point, do the right thing
+ * with pgd_present and set_pgd here.
+ *
+ * Also, since the vmalloc area is global, we don't
+ * need to copy individual PTE's, it is enough to
+ * copy the pgd pointer into the pte page of the
+ * root task. If that is there, we'll find our pte if
+ * it exists.
+ */
+
+ pud = pud_offset(pgd, address);
+ pud_k = pud_offset(pgd_k, address);
+ if (!pud_present(*pud_k))
+ goto no_context;
+
+ pmd = pmd_offset(pud, address);
+ pmd_k = pmd_offset(pud_k, address);
+
+ if (!pmd_present(*pmd_k))
+ goto bad_area_nosemaphore;
+
+ set_pmd(pmd, *pmd_k);
+
+ /* Make sure the actual PTE exists as well to
+ * catch kernel vmalloc-area accesses to non-mapped
+ * addresses. If we don't do this, this will just
+ * silently loop forever.
+ */
+
+ pte_k = pte_offset_kernel(pmd_k, address);
+ if (!pte_present(*pte_k))
+ goto no_context;
+
+ return;
+ }
+}
diff --git a/arch/openrisc/mm/init.c b/arch/openrisc/mm/init.c
new file mode 100644
index 000000000000..359dcb20fe85
--- /dev/null
+++ b/arch/openrisc/mm/init.c
@@ -0,0 +1,283 @@
+/*
+ * OpenRISC idle.c
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others. All original copyrights apply as per the original source
+ * declaration.
+ *
+ * Modifications for the OpenRISC architecture:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/ptrace.h>
+#include <linux/mman.h>
+#include <linux/mm.h>
+#include <linux/swap.h>
+#include <linux/smp.h>
+#include <linux/bootmem.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/blkdev.h> /* for initrd_* */
+#include <linux/pagemap.h>
+#include <linux/memblock.h>
+
+#include <asm/system.h>
+#include <asm/segment.h>
+#include <asm/pgalloc.h>
+#include <asm/pgtable.h>
+#include <asm/dma.h>
+#include <asm/io.h>
+#include <asm/tlb.h>
+#include <asm/mmu_context.h>
+#include <asm/kmap_types.h>
+#include <asm/fixmap.h>
+#include <asm/tlbflush.h>
+
+int mem_init_done;
+
+DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
+
+static void __init zone_sizes_init(void)
+{
+ unsigned long zones_size[MAX_NR_ZONES];
+
+ /* Clear the zone sizes */
+ memset(zones_size, 0, sizeof(zones_size));
+
+ /*
+ * We use only ZONE_NORMAL
+ */
+ zones_size[ZONE_NORMAL] = max_low_pfn;
+
+ free_area_init(zones_size);
+}
+
+extern const char _s_kernel_ro[], _e_kernel_ro[];
+
+/*
+ * Map all physical memory into kernel's address space.
+ *
+ * This is explicitly coded for two-level page tables, so if you need
+ * something else then this needs to change.
+ */
+static void __init map_ram(void)
+{
+ unsigned long v, p, e;
+ pgprot_t prot;
+ pgd_t *pge;
+ pud_t *pue;
+ pmd_t *pme;
+ pte_t *pte;
+ /* These mark extents of read-only kernel pages...
+ * ...from vmlinux.lds.S
+ */
+ struct memblock_region *region;
+
+ v = PAGE_OFFSET;
+
+ for_each_memblock(memory, region) {
+ p = (u32) region->base & PAGE_MASK;
+ e = p + (u32) region->size;
+
+ v = (u32) __va(p);
+ pge = pgd_offset_k(v);
+
+ while (p < e) {
+ int j;
+ pue = pud_offset(pge, v);
+ pme = pmd_offset(pue, v);
+
+ if ((u32) pue != (u32) pge || (u32) pme != (u32) pge) {
+ panic("%s: OR1K kernel hardcoded for "
+ "two-level page tables",
+ __func__);
+ }
+
+ /* Alloc one page for holding PTE's... */
+ pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
+ set_pmd(pme, __pmd(_KERNPG_TABLE + __pa(pte)));
+
+ /* Fill the newly allocated page with PTE'S */
+ for (j = 0; p < e && j < PTRS_PER_PGD;
+ v += PAGE_SIZE, p += PAGE_SIZE, j++, pte++) {
+ if (v >= (u32) _e_kernel_ro ||
+ v < (u32) _s_kernel_ro)
+ prot = PAGE_KERNEL;
+ else
+ prot = PAGE_KERNEL_RO;
+
+ set_pte(pte, mk_pte_phys(p, prot));
+ }
+
+ pge++;
+ }
+
+ printk(KERN_INFO "%s: Memory: 0x%x-0x%x\n", __func__,
+ region->base, region->base + region->size);
+ }
+}
+
+void __init paging_init(void)
+{
+ extern void tlb_init(void);
+
+ unsigned long end;
+ int i;
+
+ printk(KERN_INFO "Setting up paging and PTEs.\n");
+
+ /* clear out the init_mm.pgd that will contain the kernel's mappings */
+
+ for (i = 0; i < PTRS_PER_PGD; i++)
+ swapper_pg_dir[i] = __pgd(0);
+
+ /* make sure the current pgd table points to something sane
+ * (even if it is most probably not used until the next
+ * switch_mm)
+ */
+ current_pgd = init_mm.pgd;
+
+ end = (unsigned long)__va(max_low_pfn * PAGE_SIZE);
+
+ map_ram();
+
+ zone_sizes_init();
+
+ /* self modifying code ;) */
+ /* Since the old TLB miss handler has been running up until now,
+ * the kernel pages are still all RW, so we can still modify the
+ * text directly... after this change and a TLB flush, the kernel
+ * pages will become RO.
+ */
+ {
+ extern unsigned long dtlb_miss_handler;
+ extern unsigned long itlb_miss_handler;
+
+ unsigned long *dtlb_vector = __va(0x900);
+ unsigned long *itlb_vector = __va(0xa00);
+
+ printk(KERN_INFO "dtlb_miss_handler %p\n", &dtlb_miss_handler);
+ *dtlb_vector = ((unsigned long)&dtlb_miss_handler -
+ (unsigned long)dtlb_vector) >> 2;
+
+ printk(KERN_INFO "itlb_miss_handler %p\n", &itlb_miss_handler);
+ *itlb_vector = ((unsigned long)&itlb_miss_handler -
+ (unsigned long)itlb_vector) >> 2;
+ }
+
+ /* Invalidate instruction caches after code modification */
+ mtspr(SPR_ICBIR, 0x900);
+ mtspr(SPR_ICBIR, 0xa00);
+
+ /* New TLB miss handlers and kernel page tables are in now place.
+ * Make sure that page flags get updated for all pages in TLB by
+ * flushing the TLB and forcing all TLB entries to be recreated
+ * from their page table flags.
+ */
+ flush_tlb_all();
+}
+
+/* References to section boundaries */
+
+extern char _stext, _etext, _edata, __bss_start, _end;
+extern char __init_begin, __init_end;
+
+static int __init free_pages_init(void)
+{
+ int reservedpages, pfn;
+
+ /* this will put all low memory onto the freelists */
+ totalram_pages = free_all_bootmem();
+
+ reservedpages = 0;
+ for (pfn = 0; pfn < max_low_pfn; pfn++) {
+ /*
+ * Only count reserved RAM pages
+ */
+ if (PageReserved(mem_map + pfn))
+ reservedpages++;
+ }
+
+ return reservedpages;
+}
+
+static void __init set_max_mapnr_init(void)
+{
+ max_mapnr = num_physpages = max_low_pfn;
+}
+
+void __init mem_init(void)
+{
+ int codesize, reservedpages, datasize, initsize;
+
+ if (!mem_map)
+ BUG();
+
+ set_max_mapnr_init();
+
+ high_memory = (void *)__va(max_low_pfn * PAGE_SIZE);
+
+ /* clear the zero-page */
+ memset((void *)empty_zero_page, 0, PAGE_SIZE);
+
+ reservedpages = free_pages_init();
+
+ codesize = (unsigned long)&_etext - (unsigned long)&_stext;
+ datasize = (unsigned long)&_edata - (unsigned long)&_etext;
+ initsize = (unsigned long)&__init_end - (unsigned long)&__init_begin;
+
+ printk(KERN_INFO
+ "Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init, %ldk highmem)\n",
+ (unsigned long)nr_free_pages() << (PAGE_SHIFT - 10),
+ max_mapnr << (PAGE_SHIFT - 10), codesize >> 10,
+ reservedpages << (PAGE_SHIFT - 10), datasize >> 10,
+ initsize >> 10, (unsigned long)(0 << (PAGE_SHIFT - 10))
+ );
+
+ printk("mem_init_done ...........................................\n");
+ mem_init_done = 1;
+ return;
+}
+
+#ifdef CONFIG_BLK_DEV_INITRD
+void free_initrd_mem(unsigned long start, unsigned long end)
+{
+ printk(KERN_INFO "Freeing initrd memory: %ldk freed\n",
+ (end - start) >> 10);
+
+ for (; start < end; start += PAGE_SIZE) {
+ ClearPageReserved(virt_to_page(start));
+ init_page_count(virt_to_page(start));
+ free_page(start);
+ totalram_pages++;
+ }
+}
+#endif
+
+void free_initmem(void)
+{
+ unsigned long addr;
+
+ addr = (unsigned long)(&__init_begin);
+ for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
+ ClearPageReserved(virt_to_page(addr));
+ init_page_count(virt_to_page(addr));
+ free_page(addr);
+ totalram_pages++;
+ }
+ printk(KERN_INFO "Freeing unused kernel memory: %luk freed\n",
+ ((unsigned long)&__init_end -
+ (unsigned long)&__init_begin) >> 10);
+}
diff --git a/arch/openrisc/mm/ioremap.c b/arch/openrisc/mm/ioremap.c
new file mode 100644
index 000000000000..62b08ef392be
--- /dev/null
+++ b/arch/openrisc/mm/ioremap.c
@@ -0,0 +1,137 @@
+/*
+ * OpenRISC ioremap.c
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others. All original copyrights apply as per the original source
+ * declaration.
+ *
+ * Modifications for the OpenRISC architecture:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/vmalloc.h>
+#include <linux/io.h>
+#include <asm/pgalloc.h>
+#include <asm/kmap_types.h>
+#include <asm/fixmap.h>
+#include <asm/bug.h>
+#include <asm/pgtable.h>
+#include <linux/sched.h>
+#include <asm/tlbflush.h>
+
+extern int mem_init_done;
+
+static unsigned int fixmaps_used __initdata;
+
+/*
+ * Remap an arbitrary physical address space into the kernel virtual
+ * address space. Needed when the kernel wants to access high addresses
+ * directly.
+ *
+ * NOTE! We need to allow non-page-aligned mappings too: we will obviously
+ * have to convert them into an offset in a page-aligned mapping, but the
+ * caller shouldn't need to know that small detail.
+ */
+void __iomem *__init_refok
+__ioremap(phys_addr_t addr, unsigned long size, pgprot_t prot)
+{
+ phys_addr_t p;
+ unsigned long v;
+ unsigned long offset, last_addr;
+ struct vm_struct *area = NULL;
+
+ /* Don't allow wraparound or zero size */
+ last_addr = addr + size - 1;
+ if (!size || last_addr < addr)
+ return NULL;
+
+ /*
+ * Mappings have to be page-aligned
+ */
+ offset = addr & ~PAGE_MASK;
+ p = addr & PAGE_MASK;
+ size = PAGE_ALIGN(last_addr + 1) - p;
+
+ if (likely(mem_init_done)) {
+ area = get_vm_area(size, VM_IOREMAP);
+ if (!area)
+ return NULL;
+ v = (unsigned long)area->addr;
+ } else {
+ if ((fixmaps_used + (size >> PAGE_SHIFT)) > FIX_N_IOREMAPS)
+ return NULL;
+ v = fix_to_virt(FIX_IOREMAP_BEGIN + fixmaps_used);
+ fixmaps_used += (size >> PAGE_SHIFT);
+ }
+
+ if (ioremap_page_range(v, v + size, p, prot)) {
+ if (likely(mem_init_done))
+ vfree(area->addr);
+ else
+ fixmaps_used -= (size >> PAGE_SHIFT);
+ return NULL;
+ }
+
+ return (void __iomem *)(offset + (char *)v);
+}
+
+void iounmap(void *addr)
+{
+ /* If the page is from the fixmap pool then we just clear out
+ * the fixmap mapping.
+ */
+ if (unlikely((unsigned long)addr > FIXADDR_START)) {
+ /* This is a bit broken... we don't really know
+ * how big the area is so it's difficult to know
+ * how many fixed pages to invalidate...
+ * just flush tlb and hope for the best...
+ * consider this a FIXME
+ *
+ * Really we should be clearing out one or more page
+ * table entries for these virtual addresses so that
+ * future references cause a page fault... for now, we
+ * rely on two things:
+ * i) this code never gets called on known boards
+ * ii) invalid accesses to the freed areas aren't made
+ */
+ flush_tlb_all();
+ return;
+ }
+
+ return vfree((void *)(PAGE_MASK & (unsigned long)addr));
+}
+
+/**
+ * OK, this one's a bit tricky... ioremap can get called before memory is
+ * initialized (early serial console does this) and will want to alloc a page
+ * for its mapping. No userspace pages will ever get allocated before memory
+ * is initialized so this applies only to kernel pages. In the event that
+ * this is called before memory is initialized we allocate the page using
+ * the memblock infrastructure.
+ */
+
+pte_t __init_refok *pte_alloc_one_kernel(struct mm_struct *mm,
+ unsigned long address)
+{
+ pte_t *pte;
+
+ if (likely(mem_init_done)) {
+ pte = (pte_t *) __get_free_page(GFP_KERNEL | __GFP_REPEAT);
+ } else {
+ pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
+#if 0
+ /* FIXME: use memblock... */
+ pte = (pte_t *) __va(memblock_alloc(PAGE_SIZE, PAGE_SIZE));
+#endif
+ }
+
+ if (pte)
+ clear_page(pte);
+ return pte;
+}
diff --git a/arch/openrisc/mm/tlb.c b/arch/openrisc/mm/tlb.c
new file mode 100644
index 000000000000..56b0b89624af
--- /dev/null
+++ b/arch/openrisc/mm/tlb.c
@@ -0,0 +1,193 @@
+/*
+ * OpenRISC tlb.c
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others. All original copyrights apply as per the original source
+ * declaration.
+ *
+ * Modifications for the OpenRISC architecture:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Julius Baxter <julius.baxter@orsoc.se>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/ptrace.h>
+#include <linux/mman.h>
+#include <linux/mm.h>
+#include <linux/init.h>
+
+#include <asm/system.h>
+#include <asm/segment.h>
+#include <asm/tlbflush.h>
+#include <asm/pgtable.h>
+#include <asm/mmu_context.h>
+#include <asm/spr_defs.h>
+
+#define NO_CONTEXT -1
+
+#define NUM_DTLB_SETS (1 << ((mfspr(SPR_IMMUCFGR) & SPR_IMMUCFGR_NTS) >> \
+ SPR_DMMUCFGR_NTS_OFF))
+#define NUM_ITLB_SETS (1 << ((mfspr(SPR_IMMUCFGR) & SPR_IMMUCFGR_NTS) >> \
+ SPR_IMMUCFGR_NTS_OFF))
+#define DTLB_OFFSET(addr) (((addr) >> PAGE_SHIFT) & (NUM_DTLB_SETS-1))
+#define ITLB_OFFSET(addr) (((addr) >> PAGE_SHIFT) & (NUM_ITLB_SETS-1))
+/*
+ * Invalidate all TLB entries.
+ *
+ * This comes down to setting the 'valid' bit for all xTLBMR registers to 0.
+ * Easiest way to accomplish this is to just zero out the xTLBMR register
+ * completely.
+ *
+ */
+
+void flush_tlb_all(void)
+{
+ int i;
+ unsigned long num_tlb_sets;
+
+ /* Determine number of sets for IMMU. */
+ /* FIXME: Assumption is I & D nsets equal. */
+ num_tlb_sets = NUM_ITLB_SETS;
+
+ for (i = 0; i < num_tlb_sets; i++) {
+ mtspr_off(SPR_DTLBMR_BASE(0), i, 0);
+ mtspr_off(SPR_ITLBMR_BASE(0), i, 0);
+ }
+}
+
+#define have_dtlbeir (mfspr(SPR_DMMUCFGR) & SPR_DMMUCFGR_TEIRI)
+#define have_itlbeir (mfspr(SPR_IMMUCFGR) & SPR_IMMUCFGR_TEIRI)
+
+/*
+ * Invalidate a single page. This is what the xTLBEIR register is for.
+ *
+ * There's no point in checking the vma for PAGE_EXEC to determine whether it's
+ * the data or instruction TLB that should be flushed... that would take more
+ * than the few instructions that the following compiles down to!
+ *
+ * The case where we don't have the xTLBEIR register really only works for
+ * MMU's with a single way and is hard-coded that way.
+ */
+
+#define flush_dtlb_page_eir(addr) mtspr(SPR_DTLBEIR, addr)
+#define flush_dtlb_page_no_eir(addr) \
+ mtspr_off(SPR_DTLBMR_BASE(0), DTLB_OFFSET(addr), 0);
+
+#define flush_itlb_page_eir(addr) mtspr(SPR_ITLBEIR, addr)
+#define flush_itlb_page_no_eir(addr) \
+ mtspr_off(SPR_ITLBMR_BASE(0), ITLB_OFFSET(addr), 0);
+
+void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
+{
+ if (have_dtlbeir)
+ flush_dtlb_page_eir(addr);
+ else
+ flush_dtlb_page_no_eir(addr);
+
+ if (have_itlbeir)
+ flush_itlb_page_eir(addr);
+ else
+ flush_itlb_page_no_eir(addr);
+}
+
+void flush_tlb_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end)
+{
+ int addr;
+ bool dtlbeir;
+ bool itlbeir;
+
+ dtlbeir = have_dtlbeir;
+ itlbeir = have_itlbeir;
+
+ for (addr = start; addr < end; addr += PAGE_SIZE) {
+ if (dtlbeir)
+ flush_dtlb_page_eir(addr);
+ else
+ flush_dtlb_page_no_eir(addr);
+
+ if (itlbeir)
+ flush_itlb_page_eir(addr);
+ else
+ flush_itlb_page_no_eir(addr);
+ }
+}
+
+/*
+ * Invalidate the selected mm context only.
+ *
+ * FIXME: Due to some bug here, we're flushing everything for now.
+ * This should be changed to loop over over mm and call flush_tlb_range.
+ */
+
+void flush_tlb_mm(struct mm_struct *mm)
+{
+
+ /* Was seeing bugs with the mm struct passed to us. Scrapped most of
+ this function. */
+ /* Several architctures do this */
+ flush_tlb_all();
+}
+
+/* called in schedule() just before actually doing the switch_to */
+
+void switch_mm(struct mm_struct *prev, struct mm_struct *next,
+ struct task_struct *next_tsk)
+{
+ /* remember the pgd for the fault handlers
+ * this is similar to the pgd register in some other CPU's.
+ * we need our own copy of it because current and active_mm
+ * might be invalid at points where we still need to derefer
+ * the pgd.
+ */
+ current_pgd = next->pgd;
+
+ /* We don't have context support implemented, so flush all
+ * entries belonging to previous map
+ */
+
+ if (prev != next)
+ flush_tlb_mm(prev);
+
+}
+
+/*
+ * Initialize the context related info for a new mm_struct
+ * instance.
+ */
+
+int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
+{
+ mm->context = NO_CONTEXT;
+ return 0;
+}
+
+/* called by __exit_mm to destroy the used MMU context if any before
+ * destroying the mm itself. this is only called when the last user of the mm
+ * drops it.
+ */
+
+void destroy_context(struct mm_struct *mm)
+{
+ flush_tlb_mm(mm);
+
+}
+
+/* called once during VM initialization, from init.c */
+
+void __init tlb_init(void)
+{
+ /* Do nothing... */
+ /* invalidate the entire TLB */
+ /* flush_tlb_all(); */
+}
diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
index cedbbb8b18d9..5e34ccf39a49 100644
--- a/arch/parisc/kernel/module.c
+++ b/arch/parisc/kernel/module.c
@@ -540,18 +540,6 @@ static Elf_Addr get_stub(struct module *me, unsigned long value, long addend,
return (Elf_Addr)stub;
}
-int apply_relocate(Elf_Shdr *sechdrs,
- const char *strtab,
- unsigned int symindex,
- unsigned int relsec,
- struct module *me)
-{
- /* parisc should not need this ... */
- printk(KERN_ERR "module %s: RELOCATION unsupported\n",
- me->name);
- return -ENOEXEC;
-}
-
#ifndef CONFIG_64BIT
int apply_relocate_add(Elf_Shdr *sechdrs,
const char *strtab,
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 2729c6663d8a..cdf7a0a64406 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -134,6 +134,7 @@ config PPC
select GENERIC_IRQ_SHOW_LEVEL
select HAVE_RCU_TABLE_FREE if SMP
select HAVE_SYSCALL_TRACEPOINTS
+ select HAVE_BPF_JIT if (PPC64 && NET)
config EARLY_PRINTK
bool
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
index b7212b619c52..b94740f36b1a 100644
--- a/arch/powerpc/Makefile
+++ b/arch/powerpc/Makefile
@@ -154,7 +154,8 @@ core-y += arch/powerpc/kernel/ \
arch/powerpc/lib/ \
arch/powerpc/sysdev/ \
arch/powerpc/platforms/ \
- arch/powerpc/math-emu/
+ arch/powerpc/math-emu/ \
+ arch/powerpc/net/
core-$(CONFIG_XMON) += arch/powerpc/xmon/
core-$(CONFIG_KVM) += arch/powerpc/kvm/
diff --git a/arch/powerpc/include/asm/8253pit.h b/arch/powerpc/include/asm/8253pit.h
deleted file mode 100644
index a71c9c1455a7..000000000000
--- a/arch/powerpc/include/asm/8253pit.h
+++ /dev/null
@@ -1,3 +0,0 @@
-/*
- * 8253/8254 Programmable Interval Timer
- */
diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h
index c0d842cfd012..e30442c539ce 100644
--- a/arch/powerpc/include/asm/cputable.h
+++ b/arch/powerpc/include/asm/cputable.h
@@ -179,8 +179,9 @@ extern const char *powerpc_base_platform;
#define LONG_ASM_CONST(x) 0
#endif
-
-#define CPU_FTR_HVMODE_206 LONG_ASM_CONST(0x0000000800000000)
+#define CPU_FTR_HVMODE LONG_ASM_CONST(0x0000000200000000)
+#define CPU_FTR_ARCH_201 LONG_ASM_CONST(0x0000000400000000)
+#define CPU_FTR_ARCH_206 LONG_ASM_CONST(0x0000000800000000)
#define CPU_FTR_CFAR LONG_ASM_CONST(0x0000001000000000)
#define CPU_FTR_IABR LONG_ASM_CONST(0x0000002000000000)
#define CPU_FTR_MMCRA LONG_ASM_CONST(0x0000004000000000)
@@ -401,9 +402,10 @@ extern const char *powerpc_base_platform;
CPU_FTR_MMCRA | CPU_FTR_CP_USE_DCBTZ | \
CPU_FTR_STCX_CHECKS_ADDRESS)
#define CPU_FTRS_PPC970 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
- CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
+ CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | CPU_FTR_ARCH_201 | \
CPU_FTR_ALTIVEC_COMP | CPU_FTR_CAN_NAP | CPU_FTR_MMCRA | \
- CPU_FTR_CP_USE_DCBTZ | CPU_FTR_STCX_CHECKS_ADDRESS)
+ CPU_FTR_CP_USE_DCBTZ | CPU_FTR_STCX_CHECKS_ADDRESS | \
+ CPU_FTR_HVMODE)
#define CPU_FTRS_POWER5 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
CPU_FTR_MMCRA | CPU_FTR_SMT | \
@@ -417,13 +419,13 @@ extern const char *powerpc_base_platform;
CPU_FTR_DSCR | CPU_FTR_UNALIGNED_LD_STD | \
CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_CFAR)
#define CPU_FTRS_POWER7 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
- CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | CPU_FTR_HVMODE_206 |\
+ CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | CPU_FTR_ARCH_206 |\
CPU_FTR_MMCRA | CPU_FTR_SMT | \
CPU_FTR_COHERENT_ICACHE | \
CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \
CPU_FTR_DSCR | CPU_FTR_SAO | CPU_FTR_ASYM_SMT | \
CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \
- CPU_FTR_ICSWX | CPU_FTR_CFAR)
+ CPU_FTR_ICSWX | CPU_FTR_CFAR | CPU_FTR_HVMODE)
#define CPU_FTRS_CELL (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \
diff --git a/arch/powerpc/include/asm/emulated_ops.h b/arch/powerpc/include/asm/emulated_ops.h
index 45921672b97a..2cc41c715d2b 100644
--- a/arch/powerpc/include/asm/emulated_ops.h
+++ b/arch/powerpc/include/asm/emulated_ops.h
@@ -78,14 +78,14 @@ extern void ppc_warn_emulated_print(const char *type);
#define PPC_WARN_EMULATED(type, regs) \
do { \
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, \
- 1, 0, regs, 0); \
+ 1, regs, 0); \
__PPC_WARN_EMULATED(type); \
} while (0)
#define PPC_WARN_ALIGNMENT(type, regs) \
do { \
perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, \
- 1, 0, regs, regs->dar); \
+ 1, regs, regs->dar); \
__PPC_WARN_EMULATED(type); \
} while (0)
diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h
index f5dfe3411f64..8057f4f6980f 100644
--- a/arch/powerpc/include/asm/exception-64s.h
+++ b/arch/powerpc/include/asm/exception-64s.h
@@ -61,19 +61,22 @@
#define EXC_HV H
#define EXC_STD
-#define EXCEPTION_PROLOG_1(area) \
+#define __EXCEPTION_PROLOG_1(area, extra, vec) \
GET_PACA(r13); \
std r9,area+EX_R9(r13); /* save r9 - r12 */ \
std r10,area+EX_R10(r13); \
- std r11,area+EX_R11(r13); \
- std r12,area+EX_R12(r13); \
BEGIN_FTR_SECTION_NESTED(66); \
mfspr r10,SPRN_CFAR; \
std r10,area+EX_CFAR(r13); \
END_FTR_SECTION_NESTED(CPU_FTR_CFAR, CPU_FTR_CFAR, 66); \
- GET_SCRATCH0(r9); \
- std r9,area+EX_R13(r13); \
- mfcr r9
+ mfcr r9; \
+ extra(vec); \
+ std r11,area+EX_R11(r13); \
+ std r12,area+EX_R12(r13); \
+ GET_SCRATCH0(r10); \
+ std r10,area+EX_R13(r13)
+#define EXCEPTION_PROLOG_1(area, extra, vec) \
+ __EXCEPTION_PROLOG_1(area, extra, vec)
#define __EXCEPTION_PROLOG_PSERIES_1(label, h) \
ld r12,PACAKBASE(r13); /* get high part of &label */ \
@@ -85,13 +88,65 @@
mtspr SPRN_##h##SRR1,r10; \
h##rfid; \
b . /* prevent speculative execution */
-#define EXCEPTION_PROLOG_PSERIES_1(label, h) \
+#define EXCEPTION_PROLOG_PSERIES_1(label, h) \
__EXCEPTION_PROLOG_PSERIES_1(label, h)
-#define EXCEPTION_PROLOG_PSERIES(area, label, h) \
- EXCEPTION_PROLOG_1(area); \
+#define EXCEPTION_PROLOG_PSERIES(area, label, h, extra, vec) \
+ EXCEPTION_PROLOG_1(area, extra, vec); \
EXCEPTION_PROLOG_PSERIES_1(label, h);
+#define __KVMTEST(n) \
+ lbz r10,HSTATE_IN_GUEST(r13); \
+ cmpwi r10,0; \
+ bne do_kvm_##n
+
+#define __KVM_HANDLER(area, h, n) \
+do_kvm_##n: \
+ ld r10,area+EX_R10(r13); \
+ stw r9,HSTATE_SCRATCH1(r13); \
+ ld r9,area+EX_R9(r13); \
+ std r12,HSTATE_SCRATCH0(r13); \
+ li r12,n; \
+ b kvmppc_interrupt
+
+#define __KVM_HANDLER_SKIP(area, h, n) \
+do_kvm_##n: \
+ cmpwi r10,KVM_GUEST_MODE_SKIP; \
+ ld r10,area+EX_R10(r13); \
+ beq 89f; \
+ stw r9,HSTATE_SCRATCH1(r13); \
+ ld r9,area+EX_R9(r13); \
+ std r12,HSTATE_SCRATCH0(r13); \
+ li r12,n; \
+ b kvmppc_interrupt; \
+89: mtocrf 0x80,r9; \
+ ld r9,area+EX_R9(r13); \
+ b kvmppc_skip_##h##interrupt
+
+#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
+#define KVMTEST(n) __KVMTEST(n)
+#define KVM_HANDLER(area, h, n) __KVM_HANDLER(area, h, n)
+#define KVM_HANDLER_SKIP(area, h, n) __KVM_HANDLER_SKIP(area, h, n)
+
+#else
+#define KVMTEST(n)
+#define KVM_HANDLER(area, h, n)
+#define KVM_HANDLER_SKIP(area, h, n)
+#endif
+
+#ifdef CONFIG_KVM_BOOK3S_PR
+#define KVMTEST_PR(n) __KVMTEST(n)
+#define KVM_HANDLER_PR(area, h, n) __KVM_HANDLER(area, h, n)
+#define KVM_HANDLER_PR_SKIP(area, h, n) __KVM_HANDLER_SKIP(area, h, n)
+
+#else
+#define KVMTEST_PR(n)
+#define KVM_HANDLER_PR(area, h, n)
+#define KVM_HANDLER_PR_SKIP(area, h, n)
+#endif
+
+#define NOTEST(n)
+
/*
* The common exception prolog is used for all except a few exceptions
* such as a segment miss on a kernel address. We have to be prepared
@@ -164,57 +219,58 @@
.globl label##_pSeries; \
label##_pSeries: \
HMT_MEDIUM; \
- DO_KVM vec; \
SET_SCRATCH0(r13); /* save r13 */ \
- EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common, EXC_STD)
+ EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common, \
+ EXC_STD, KVMTEST_PR, vec)
#define STD_EXCEPTION_HV(loc, vec, label) \
. = loc; \
.globl label##_hv; \
label##_hv: \
HMT_MEDIUM; \
- DO_KVM vec; \
- SET_SCRATCH0(r13); /* save r13 */ \
- EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common, EXC_HV)
+ SET_SCRATCH0(r13); /* save r13 */ \
+ EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common, \
+ EXC_HV, KVMTEST, vec)
-#define __MASKABLE_EXCEPTION_PSERIES(vec, label, h) \
- HMT_MEDIUM; \
- DO_KVM vec; \
- SET_SCRATCH0(r13); /* save r13 */ \
- GET_PACA(r13); \
- std r9,PACA_EXGEN+EX_R9(r13); /* save r9, r10 */ \
- std r10,PACA_EXGEN+EX_R10(r13); \
+#define __SOFTEN_TEST(h) \
lbz r10,PACASOFTIRQEN(r13); \
- mfcr r9; \
cmpwi r10,0; \
- beq masked_##h##interrupt; \
- GET_SCRATCH0(r10); \
- std r10,PACA_EXGEN+EX_R13(r13); \
- std r11,PACA_EXGEN+EX_R11(r13); \
- std r12,PACA_EXGEN+EX_R12(r13); \
- ld r12,PACAKBASE(r13); /* get high part of &label */ \
- ld r10,PACAKMSR(r13); /* get MSR value for kernel */ \
- mfspr r11,SPRN_##h##SRR0; /* save SRR0 */ \
- LOAD_HANDLER(r12,label##_common) \
- mtspr SPRN_##h##SRR0,r12; \
- mfspr r12,SPRN_##h##SRR1; /* and SRR1 */ \
- mtspr SPRN_##h##SRR1,r10; \
- h##rfid; \
- b . /* prevent speculative execution */
-#define _MASKABLE_EXCEPTION_PSERIES(vec, label, h) \
- __MASKABLE_EXCEPTION_PSERIES(vec, label, h)
+ beq masked_##h##interrupt
+#define _SOFTEN_TEST(h) __SOFTEN_TEST(h)
+
+#define SOFTEN_TEST_PR(vec) \
+ KVMTEST_PR(vec); \
+ _SOFTEN_TEST(EXC_STD)
+
+#define SOFTEN_TEST_HV(vec) \
+ KVMTEST(vec); \
+ _SOFTEN_TEST(EXC_HV)
+
+#define SOFTEN_TEST_HV_201(vec) \
+ KVMTEST(vec); \
+ _SOFTEN_TEST(EXC_STD)
+
+#define __MASKABLE_EXCEPTION_PSERIES(vec, label, h, extra) \
+ HMT_MEDIUM; \
+ SET_SCRATCH0(r13); /* save r13 */ \
+ __EXCEPTION_PROLOG_1(PACA_EXGEN, extra, vec); \
+ EXCEPTION_PROLOG_PSERIES_1(label##_common, h);
+#define _MASKABLE_EXCEPTION_PSERIES(vec, label, h, extra) \
+ __MASKABLE_EXCEPTION_PSERIES(vec, label, h, extra)
#define MASKABLE_EXCEPTION_PSERIES(loc, vec, label) \
. = loc; \
.globl label##_pSeries; \
label##_pSeries: \
- _MASKABLE_EXCEPTION_PSERIES(vec, label, EXC_STD)
+ _MASKABLE_EXCEPTION_PSERIES(vec, label, \
+ EXC_STD, SOFTEN_TEST_PR)
#define MASKABLE_EXCEPTION_HV(loc, vec, label) \
. = loc; \
.globl label##_hv; \
label##_hv: \
- _MASKABLE_EXCEPTION_PSERIES(vec, label, EXC_HV)
+ _MASKABLE_EXCEPTION_PSERIES(vec, label, \
+ EXC_HV, SOFTEN_TEST_HV)
#ifdef CONFIG_PPC_ISERIES
#define DISABLE_INTS \
diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h
index fd8201dddd4b..1c324ff55ea8 100644
--- a/arch/powerpc/include/asm/hvcall.h
+++ b/arch/powerpc/include/asm/hvcall.h
@@ -29,6 +29,10 @@
#define H_LONG_BUSY_ORDER_100_SEC 9905 /* Long busy, hint that 100sec \
is a good time to retry */
#define H_LONG_BUSY_END_RANGE 9905 /* End of long busy range */
+
+/* Internal value used in book3s_hv kvm support; not returned to guests */
+#define H_TOO_HARD 9999
+
#define H_HARDWARE -1 /* Hardware error */
#define H_FUNCTION -2 /* Function not supported */
#define H_PRIVILEGE -3 /* Caller not privileged */
@@ -100,6 +104,7 @@
#define H_PAGE_SET_ACTIVE H_PAGE_STATE_CHANGE
#define H_AVPN (1UL<<(63-32)) /* An avpn is provided as a sanity test */
#define H_ANDCOND (1UL<<(63-33))
+#define H_LOCAL (1UL<<(63-35))
#define H_ICACHE_INVALIDATE (1UL<<(63-40)) /* icbi, etc. (ignored for IO pages) */
#define H_ICACHE_SYNCHRONIZE (1UL<<(63-41)) /* dcbst, icbi, etc (ignored for IO pages */
#define H_COALESCE_CAND (1UL<<(63-42)) /* page is a good candidate for coalescing */
diff --git a/arch/powerpc/include/asm/hw_breakpoint.h b/arch/powerpc/include/asm/hw_breakpoint.h
index 1c33ec17ca36..80fd4d2b4a62 100644
--- a/arch/powerpc/include/asm/hw_breakpoint.h
+++ b/arch/powerpc/include/asm/hw_breakpoint.h
@@ -57,7 +57,7 @@ void hw_breakpoint_pmu_read(struct perf_event *bp);
extern void flush_ptrace_hw_breakpoint(struct task_struct *tsk);
extern struct pmu perf_ops_bp;
-extern void ptrace_triggered(struct perf_event *bp, int nmi,
+extern void ptrace_triggered(struct perf_event *bp,
struct perf_sample_data *data, struct pt_regs *regs);
static inline void hw_breakpoint_disable(void)
{
diff --git a/arch/powerpc/include/asm/kvm.h b/arch/powerpc/include/asm/kvm.h
index d2ca5ed3877b..a4f6c85431f8 100644
--- a/arch/powerpc/include/asm/kvm.h
+++ b/arch/powerpc/include/asm/kvm.h
@@ -22,6 +22,10 @@
#include <linux/types.h>
+/* Select powerpc specific features in <linux/kvm.h> */
+#define __KVM_HAVE_SPAPR_TCE
+#define __KVM_HAVE_PPC_SMT
+
struct kvm_regs {
__u64 pc;
__u64 cr;
@@ -272,4 +276,15 @@ struct kvm_guest_debug_arch {
#define KVM_INTERRUPT_UNSET -2U
#define KVM_INTERRUPT_SET_LEVEL -3U
+/* for KVM_CAP_SPAPR_TCE */
+struct kvm_create_spapr_tce {
+ __u64 liobn;
+ __u32 window_size;
+};
+
+/* for KVM_ALLOCATE_RMA */
+struct kvm_allocate_rma {
+ __u64 rma_size;
+};
+
#endif /* __LINUX_KVM_POWERPC_H */
diff --git a/arch/powerpc/include/asm/kvm_asm.h b/arch/powerpc/include/asm/kvm_asm.h
index 0951b17f4eb5..7b1f0e0fc653 100644
--- a/arch/powerpc/include/asm/kvm_asm.h
+++ b/arch/powerpc/include/asm/kvm_asm.h
@@ -64,8 +64,12 @@
#define BOOK3S_INTERRUPT_PROGRAM 0x700
#define BOOK3S_INTERRUPT_FP_UNAVAIL 0x800
#define BOOK3S_INTERRUPT_DECREMENTER 0x900
+#define BOOK3S_INTERRUPT_HV_DECREMENTER 0x980
#define BOOK3S_INTERRUPT_SYSCALL 0xc00
#define BOOK3S_INTERRUPT_TRACE 0xd00
+#define BOOK3S_INTERRUPT_H_DATA_STORAGE 0xe00
+#define BOOK3S_INTERRUPT_H_INST_STORAGE 0xe20
+#define BOOK3S_INTERRUPT_H_EMUL_ASSIST 0xe40
#define BOOK3S_INTERRUPT_PERFMON 0xf00
#define BOOK3S_INTERRUPT_ALTIVEC 0xf20
#define BOOK3S_INTERRUPT_VSX 0xf40
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
index d62e703f1214..98da010252a3 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -24,20 +24,6 @@
#include <linux/kvm_host.h>
#include <asm/kvm_book3s_asm.h>
-struct kvmppc_slb {
- u64 esid;
- u64 vsid;
- u64 orige;
- u64 origv;
- bool valid : 1;
- bool Ks : 1;
- bool Kp : 1;
- bool nx : 1;
- bool large : 1; /* PTEs are 16MB */
- bool tb : 1; /* 1TB segment */
- bool class : 1;
-};
-
struct kvmppc_bat {
u64 raw;
u32 bepi;
@@ -67,11 +53,22 @@ struct kvmppc_sid_map {
#define VSID_POOL_SIZE (SID_CONTEXTS * 16)
#endif
+struct hpte_cache {
+ struct hlist_node list_pte;
+ struct hlist_node list_pte_long;
+ struct hlist_node list_vpte;
+ struct hlist_node list_vpte_long;
+ struct rcu_head rcu_head;
+ u64 host_va;
+ u64 pfn;
+ ulong slot;
+ struct kvmppc_pte pte;
+};
+
struct kvmppc_vcpu_book3s {
struct kvm_vcpu vcpu;
struct kvmppc_book3s_shadow_vcpu *shadow_vcpu;
struct kvmppc_sid_map sid_map[SID_MAP_NUM];
- struct kvmppc_slb slb[64];
struct {
u64 esid;
u64 vsid;
@@ -81,7 +78,6 @@ struct kvmppc_vcpu_book3s {
struct kvmppc_bat dbat[8];
u64 hid[6];
u64 gqr[8];
- int slb_nr;
u64 sdr1;
u64 hior;
u64 msr_mask;
@@ -93,7 +89,13 @@ struct kvmppc_vcpu_book3s {
u64 vsid_max;
#endif
int context_id[SID_CONTEXTS];
- ulong prog_flags; /* flags to inject when giving a 700 trap */
+
+ struct hlist_head hpte_hash_pte[HPTEG_HASH_NUM_PTE];
+ struct hlist_head hpte_hash_pte_long[HPTEG_HASH_NUM_PTE_LONG];
+ struct hlist_head hpte_hash_vpte[HPTEG_HASH_NUM_VPTE];
+ struct hlist_head hpte_hash_vpte_long[HPTEG_HASH_NUM_VPTE_LONG];
+ int hpte_cache_count;
+ spinlock_t mmu_lock;
};
#define CONTEXT_HOST 0
@@ -110,8 +112,10 @@ extern void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong ea, ulong ea_mask)
extern void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 vp, u64 vp_mask);
extern void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end);
extern void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 new_msr);
+extern void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr);
extern void kvmppc_mmu_book3s_64_init(struct kvm_vcpu *vcpu);
extern void kvmppc_mmu_book3s_32_init(struct kvm_vcpu *vcpu);
+extern void kvmppc_mmu_book3s_hv_init(struct kvm_vcpu *vcpu);
extern int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte);
extern int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr);
extern void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu);
@@ -123,19 +127,22 @@ extern int kvmppc_mmu_hpte_init(struct kvm_vcpu *vcpu);
extern void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte);
extern int kvmppc_mmu_hpte_sysinit(void);
extern void kvmppc_mmu_hpte_sysexit(void);
+extern int kvmppc_mmu_hv_init(void);
extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, bool data);
extern int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, bool data);
extern void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec);
+extern void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags);
extern void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat,
bool upper, u32 val);
extern void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr);
extern int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu);
extern pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn);
-extern ulong kvmppc_trampoline_lowmem;
-extern ulong kvmppc_trampoline_enter;
+extern void kvmppc_handler_lowmem_trampoline(void);
+extern void kvmppc_handler_trampoline_enter(void);
extern void kvmppc_rmcall(ulong srr0, ulong srr1);
+extern void kvmppc_hv_entry_trampoline(void);
extern void kvmppc_load_up_fpu(void);
extern void kvmppc_load_up_altivec(void);
extern void kvmppc_load_up_vsx(void);
@@ -147,15 +154,32 @@ static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu)
return container_of(vcpu, struct kvmppc_vcpu_book3s, vcpu);
}
-static inline ulong dsisr(void)
+extern void kvm_return_point(void);
+
+/* Also add subarch specific defines */
+
+#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
+#include <asm/kvm_book3s_32.h>
+#endif
+#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
+#include <asm/kvm_book3s_64.h>
+#endif
+
+#ifdef CONFIG_KVM_BOOK3S_PR
+
+static inline unsigned long kvmppc_interrupt_offset(struct kvm_vcpu *vcpu)
{
- ulong r;
- asm ( "mfdsisr %0 " : "=r" (r) );
- return r;
+ return to_book3s(vcpu)->hior;
}
-extern void kvm_return_point(void);
-static inline struct kvmppc_book3s_shadow_vcpu *to_svcpu(struct kvm_vcpu *vcpu);
+static inline void kvmppc_update_int_pending(struct kvm_vcpu *vcpu,
+ unsigned long pending_now, unsigned long old_pending)
+{
+ if (pending_now)
+ vcpu->arch.shared->int_pending = 1;
+ else if (old_pending)
+ vcpu->arch.shared->int_pending = 0;
+}
static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val)
{
@@ -244,6 +268,120 @@ static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
return to_svcpu(vcpu)->fault_dar;
}
+static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu)
+{
+ ulong crit_raw = vcpu->arch.shared->critical;
+ ulong crit_r1 = kvmppc_get_gpr(vcpu, 1);
+ bool crit;
+
+ /* Truncate crit indicators in 32 bit mode */
+ if (!(vcpu->arch.shared->msr & MSR_SF)) {
+ crit_raw &= 0xffffffff;
+ crit_r1 &= 0xffffffff;
+ }
+
+ /* Critical section when crit == r1 */
+ crit = (crit_raw == crit_r1);
+ /* ... and we're in supervisor mode */
+ crit = crit && !(vcpu->arch.shared->msr & MSR_PR);
+
+ return crit;
+}
+#else /* CONFIG_KVM_BOOK3S_PR */
+
+static inline unsigned long kvmppc_interrupt_offset(struct kvm_vcpu *vcpu)
+{
+ return 0;
+}
+
+static inline void kvmppc_update_int_pending(struct kvm_vcpu *vcpu,
+ unsigned long pending_now, unsigned long old_pending)
+{
+}
+
+static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val)
+{
+ vcpu->arch.gpr[num] = val;
+}
+
+static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num)
+{
+ return vcpu->arch.gpr[num];
+}
+
+static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val)
+{
+ vcpu->arch.cr = val;
+}
+
+static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.cr;
+}
+
+static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, u32 val)
+{
+ vcpu->arch.xer = val;
+}
+
+static inline u32 kvmppc_get_xer(struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.xer;
+}
+
+static inline void kvmppc_set_ctr(struct kvm_vcpu *vcpu, ulong val)
+{
+ vcpu->arch.ctr = val;
+}
+
+static inline ulong kvmppc_get_ctr(struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.ctr;
+}
+
+static inline void kvmppc_set_lr(struct kvm_vcpu *vcpu, ulong val)
+{
+ vcpu->arch.lr = val;
+}
+
+static inline ulong kvmppc_get_lr(struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.lr;
+}
+
+static inline void kvmppc_set_pc(struct kvm_vcpu *vcpu, ulong val)
+{
+ vcpu->arch.pc = val;
+}
+
+static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.pc;
+}
+
+static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu)
+{
+ ulong pc = kvmppc_get_pc(vcpu);
+
+ /* Load the instruction manually if it failed to do so in the
+ * exit path */
+ if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED)
+ kvmppc_ld(vcpu, &pc, sizeof(u32), &vcpu->arch.last_inst, false);
+
+ return vcpu->arch.last_inst;
+}
+
+static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.fault_dar;
+}
+
+static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu)
+{
+ return false;
+}
+#endif
+
/* Magic register values loaded into r3 and r4 before the 'sc' assembly
* instruction for the OSI hypercalls */
#define OSI_SC_MAGIC_R3 0x113724FA
@@ -251,12 +389,4 @@ static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
#define INS_DCBZ 0x7c0007ec
-/* Also add subarch specific defines */
-
-#ifdef CONFIG_PPC_BOOK3S_32
-#include <asm/kvm_book3s_32.h>
-#else
-#include <asm/kvm_book3s_64.h>
-#endif
-
#endif /* __ASM_KVM_BOOK3S_H__ */
diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h
index 4cadd612d575..e43fe42b9875 100644
--- a/arch/powerpc/include/asm/kvm_book3s_64.h
+++ b/arch/powerpc/include/asm/kvm_book3s_64.h
@@ -20,9 +20,13 @@
#ifndef __ASM_KVM_BOOK3S_64_H__
#define __ASM_KVM_BOOK3S_64_H__
+#ifdef CONFIG_KVM_BOOK3S_PR
static inline struct kvmppc_book3s_shadow_vcpu *to_svcpu(struct kvm_vcpu *vcpu)
{
return &get_paca()->shadow_vcpu;
}
+#endif
+
+#define SPAPR_TCE_SHIFT 12
#endif /* __ASM_KVM_BOOK3S_64_H__ */
diff --git a/arch/powerpc/include/asm/kvm_book3s_asm.h b/arch/powerpc/include/asm/kvm_book3s_asm.h
index d5a8a3861635..ef7b3688c3b6 100644
--- a/arch/powerpc/include/asm/kvm_book3s_asm.h
+++ b/arch/powerpc/include/asm/kvm_book3s_asm.h
@@ -60,6 +60,36 @@ kvmppc_resume_\intno:
#else /*__ASSEMBLY__ */
+/*
+ * This struct goes in the PACA on 64-bit processors. It is used
+ * to store host state that needs to be saved when we enter a guest
+ * and restored when we exit, but isn't specific to any particular
+ * guest or vcpu. It also has some scratch fields used by the guest
+ * exit code.
+ */
+struct kvmppc_host_state {
+ ulong host_r1;
+ ulong host_r2;
+ ulong host_msr;
+ ulong vmhandler;
+ ulong scratch0;
+ ulong scratch1;
+ u8 in_guest;
+
+#ifdef CONFIG_KVM_BOOK3S_64_HV
+ struct kvm_vcpu *kvm_vcpu;
+ struct kvmppc_vcore *kvm_vcore;
+ unsigned long xics_phys;
+ u64 dabr;
+ u64 host_mmcr[3];
+ u32 host_pmc[8];
+ u64 host_purr;
+ u64 host_spurr;
+ u64 host_dscr;
+ u64 dec_expires;
+#endif
+};
+
struct kvmppc_book3s_shadow_vcpu {
ulong gpr[14];
u32 cr;
@@ -73,17 +103,12 @@ struct kvmppc_book3s_shadow_vcpu {
ulong shadow_srr1;
ulong fault_dar;
- ulong host_r1;
- ulong host_r2;
- ulong handler;
- ulong scratch0;
- ulong scratch1;
- ulong vmhandler;
- u8 in_guest;
-
#ifdef CONFIG_PPC_BOOK3S_32
u32 sr[16]; /* Guest SRs */
+
+ struct kvmppc_host_state hstate;
#endif
+
#ifdef CONFIG_PPC_BOOK3S_64
u8 slb_max; /* highest used guest slb entry */
struct {
diff --git a/arch/powerpc/include/asm/kvm_booke.h b/arch/powerpc/include/asm/kvm_booke.h
index 9c9ba3d59b1b..a90e09188777 100644
--- a/arch/powerpc/include/asm/kvm_booke.h
+++ b/arch/powerpc/include/asm/kvm_booke.h
@@ -93,4 +93,8 @@ static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
return vcpu->arch.fault_dear;
}
+static inline ulong kvmppc_get_msr(struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.shared->msr;
+}
#endif /* __ASM_KVM_BOOKE_H__ */
diff --git a/arch/powerpc/include/asm/kvm_e500.h b/arch/powerpc/include/asm/kvm_e500.h
index 7a2a565f88c4..adbfca9dd100 100644
--- a/arch/powerpc/include/asm/kvm_e500.h
+++ b/arch/powerpc/include/asm/kvm_e500.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2008 Freescale Semiconductor, Inc. All rights reserved.
+ * Copyright (C) 2008-2011 Freescale Semiconductor, Inc. All rights reserved.
*
* Author: Yu Liu, <yu.liu@freescale.com>
*
@@ -29,17 +29,25 @@ struct tlbe{
u32 mas7;
};
+#define E500_TLB_VALID 1
+#define E500_TLB_DIRTY 2
+
+struct tlbe_priv {
+ pfn_t pfn;
+ unsigned int flags; /* E500_TLB_* */
+};
+
+struct vcpu_id_table;
+
struct kvmppc_vcpu_e500 {
/* Unmodified copy of the guest's TLB. */
- struct tlbe *guest_tlb[E500_TLB_NUM];
- /* TLB that's actually used when the guest is running. */
- struct tlbe *shadow_tlb[E500_TLB_NUM];
- /* Pages which are referenced in the shadow TLB. */
- struct page **shadow_pages[E500_TLB_NUM];
+ struct tlbe *gtlb_arch[E500_TLB_NUM];
- unsigned int guest_tlb_size[E500_TLB_NUM];
- unsigned int shadow_tlb_size[E500_TLB_NUM];
- unsigned int guest_tlb_nv[E500_TLB_NUM];
+ /* KVM internal information associated with each guest TLB entry */
+ struct tlbe_priv *gtlb_priv[E500_TLB_NUM];
+
+ unsigned int gtlb_size[E500_TLB_NUM];
+ unsigned int gtlb_nv[E500_TLB_NUM];
u32 host_pid[E500_PID_NUM];
u32 pid[E500_PID_NUM];
@@ -53,6 +61,10 @@ struct kvmppc_vcpu_e500 {
u32 mas5;
u32 mas6;
u32 mas7;
+
+ /* vcpu id table */
+ struct vcpu_id_table *idt;
+
u32 l1csr0;
u32 l1csr1;
u32 hid0;
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index 186f150b9b89..cc22b282d755 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -25,15 +25,23 @@
#include <linux/interrupt.h>
#include <linux/types.h>
#include <linux/kvm_types.h>
+#include <linux/threads.h>
+#include <linux/spinlock.h>
#include <linux/kvm_para.h>
+#include <linux/list.h>
+#include <linux/atomic.h>
#include <asm/kvm_asm.h>
+#include <asm/processor.h>
-#define KVM_MAX_VCPUS 1
+#define KVM_MAX_VCPUS NR_CPUS
+#define KVM_MAX_VCORES NR_CPUS
#define KVM_MEMORY_SLOTS 32
/* memory slots that does not exposed to userspace */
#define KVM_PRIVATE_MEM_SLOTS 4
+#ifdef CONFIG_KVM_MMIO
#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
+#endif
/* We don't currently support large pages. */
#define KVM_HPAGE_GFN_SHIFT(x) 0
@@ -57,6 +65,10 @@ struct kvm;
struct kvm_run;
struct kvm_vcpu;
+struct lppaca;
+struct slb_shadow;
+struct dtl;
+
struct kvm_vm_stat {
u32 remote_tlb_flush;
};
@@ -133,9 +145,74 @@ struct kvmppc_exit_timing {
};
};
+struct kvmppc_pginfo {
+ unsigned long pfn;
+ atomic_t refcnt;
+};
+
+struct kvmppc_spapr_tce_table {
+ struct list_head list;
+ struct kvm *kvm;
+ u64 liobn;
+ u32 window_size;
+ struct page *pages[0];
+};
+
+struct kvmppc_rma_info {
+ void *base_virt;
+ unsigned long base_pfn;
+ unsigned long npages;
+ struct list_head list;
+ atomic_t use_count;
+};
+
struct kvm_arch {
+#ifdef CONFIG_KVM_BOOK3S_64_HV
+ unsigned long hpt_virt;
+ unsigned long ram_npages;
+ unsigned long ram_psize;
+ unsigned long ram_porder;
+ struct kvmppc_pginfo *ram_pginfo;
+ unsigned int lpid;
+ unsigned int host_lpid;
+ unsigned long host_lpcr;
+ unsigned long sdr1;
+ unsigned long host_sdr1;
+ int tlbie_lock;
+ int n_rma_pages;
+ unsigned long lpcr;
+ unsigned long rmor;
+ struct kvmppc_rma_info *rma;
+ struct list_head spapr_tce_tables;
+ unsigned short last_vcpu[NR_CPUS];
+ struct kvmppc_vcore *vcores[KVM_MAX_VCORES];
+#endif /* CONFIG_KVM_BOOK3S_64_HV */
};
+/*
+ * Struct for a virtual core.
+ * Note: entry_exit_count combines an entry count in the bottom 8 bits
+ * and an exit count in the next 8 bits. This is so that we can
+ * atomically increment the entry count iff the exit count is 0
+ * without taking the lock.
+ */
+struct kvmppc_vcore {
+ int n_runnable;
+ int n_blocked;
+ int num_threads;
+ int entry_exit_count;
+ int n_woken;
+ int nap_count;
+ u16 pcpu;
+ u8 vcore_running;
+ u8 in_guest;
+ struct list_head runnable_threads;
+ spinlock_t lock;
+};
+
+#define VCORE_ENTRY_COUNT(vc) ((vc)->entry_exit_count & 0xff)
+#define VCORE_EXIT_COUNT(vc) ((vc)->entry_exit_count >> 8)
+
struct kvmppc_pte {
ulong eaddr;
u64 vpage;
@@ -163,16 +240,18 @@ struct kvmppc_mmu {
bool (*is_dcbz32)(struct kvm_vcpu *vcpu);
};
-struct hpte_cache {
- struct hlist_node list_pte;
- struct hlist_node list_pte_long;
- struct hlist_node list_vpte;
- struct hlist_node list_vpte_long;
- struct rcu_head rcu_head;
- u64 host_va;
- u64 pfn;
- ulong slot;
- struct kvmppc_pte pte;
+struct kvmppc_slb {
+ u64 esid;
+ u64 vsid;
+ u64 orige;
+ u64 origv;
+ bool valid : 1;
+ bool Ks : 1;
+ bool Kp : 1;
+ bool nx : 1;
+ bool large : 1; /* PTEs are 16MB */
+ bool tb : 1; /* 1TB segment */
+ bool class : 1;
};
struct kvm_vcpu_arch {
@@ -187,6 +266,9 @@ struct kvm_vcpu_arch {
ulong highmem_handler;
ulong rmcall;
ulong host_paca_phys;
+ struct kvmppc_slb slb[64];
+ int slb_max; /* 1 + index of last valid entry in slb[] */
+ int slb_nr; /* total number of entries in SLB */
struct kvmppc_mmu mmu;
#endif
@@ -195,13 +277,19 @@ struct kvm_vcpu_arch {
u64 fpr[32];
u64 fpscr;
+#ifdef CONFIG_SPE
+ ulong evr[32];
+ ulong spefscr;
+ ulong host_spefscr;
+ u64 acc;
+#endif
#ifdef CONFIG_ALTIVEC
vector128 vr[32];
vector128 vscr;
#endif
#ifdef CONFIG_VSX
- u64 vsr[32];
+ u64 vsr[64];
#endif
#ifdef CONFIG_PPC_BOOK3S
@@ -209,22 +297,27 @@ struct kvm_vcpu_arch {
u32 qpr[32];
#endif
-#ifdef CONFIG_BOOKE
ulong pc;
ulong ctr;
ulong lr;
ulong xer;
u32 cr;
-#endif
#ifdef CONFIG_PPC_BOOK3S
- ulong shadow_msr;
ulong hflags;
ulong guest_owned_ext;
+ ulong purr;
+ ulong spurr;
+ ulong dscr;
+ ulong amr;
+ ulong uamor;
+ u32 ctrl;
+ ulong dabr;
#endif
u32 vrsave; /* also USPRG0 */
u32 mmucr;
+ ulong shadow_msr;
ulong sprg4;
ulong sprg5;
ulong sprg6;
@@ -249,6 +342,7 @@ struct kvm_vcpu_arch {
u32 pvr;
u32 shadow_pid;
+ u32 shadow_pid1;
u32 pid;
u32 swap_pid;
@@ -258,6 +352,9 @@ struct kvm_vcpu_arch {
u32 dbcr1;
u32 dbsr;
+ u64 mmcr[3];
+ u32 pmc[8];
+
#ifdef CONFIG_KVM_EXIT_TIMING
struct mutex exit_timing_lock;
struct kvmppc_exit_timing timing_exit;
@@ -272,8 +369,12 @@ struct kvm_vcpu_arch {
struct dentry *debugfs_exit_timing;
#endif
+#ifdef CONFIG_PPC_BOOK3S
+ ulong fault_dar;
+ u32 fault_dsisr;
+#endif
+
#ifdef CONFIG_BOOKE
- u32 last_inst;
ulong fault_dear;
ulong fault_esr;
ulong queued_dear;
@@ -288,25 +389,47 @@ struct kvm_vcpu_arch {
u8 dcr_is_write;
u8 osi_needed;
u8 osi_enabled;
+ u8 hcall_needed;
u32 cpr0_cfgaddr; /* holds the last set cpr0_cfgaddr */
struct hrtimer dec_timer;
struct tasklet_struct tasklet;
u64 dec_jiffies;
+ u64 dec_expires;
unsigned long pending_exceptions;
+ u16 last_cpu;
+ u8 ceded;
+ u8 prodded;
+ u32 last_inst;
+
+ struct lppaca *vpa;
+ struct slb_shadow *slb_shadow;
+ struct dtl *dtl;
+ struct dtl *dtl_end;
+
+ struct kvmppc_vcore *vcore;
+ int ret;
+ int trap;
+ int state;
+ int ptid;
+ wait_queue_head_t cpu_run;
+
struct kvm_vcpu_arch_shared *shared;
unsigned long magic_page_pa; /* phys addr to map the magic page to */
unsigned long magic_page_ea; /* effect. addr to map the magic page to */
-#ifdef CONFIG_PPC_BOOK3S
- struct hlist_head hpte_hash_pte[HPTEG_HASH_NUM_PTE];
- struct hlist_head hpte_hash_pte_long[HPTEG_HASH_NUM_PTE_LONG];
- struct hlist_head hpte_hash_vpte[HPTEG_HASH_NUM_VPTE];
- struct hlist_head hpte_hash_vpte_long[HPTEG_HASH_NUM_VPTE_LONG];
- int hpte_cache_count;
- spinlock_t mmu_lock;
+#ifdef CONFIG_KVM_BOOK3S_64_HV
+ struct kvm_vcpu_arch_shared shregs;
+
+ struct list_head run_list;
+ struct task_struct *run_task;
+ struct kvm_run *kvm_run;
#endif
};
+#define KVMPPC_VCPU_BUSY_IN_HOST 0
+#define KVMPPC_VCPU_BLOCKED 1
+#define KVMPPC_VCPU_RUNNABLE 2
+
#endif /* __POWERPC_KVM_HOST_H__ */
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index 9345238edecf..d121f49d62b8 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -33,6 +33,9 @@
#else
#include <asm/kvm_booke.h>
#endif
+#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
+#include <asm/paca.h>
+#endif
enum emulation_result {
EMULATE_DONE, /* no further processing */
@@ -42,6 +45,7 @@ enum emulation_result {
EMULATE_AGAIN, /* something went wrong. go again */
};
+extern int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
extern int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
extern char kvmppc_handlers_start[];
extern unsigned long kvmppc_handler_len;
@@ -109,6 +113,27 @@ extern void kvmppc_booke_exit(void);
extern void kvmppc_core_destroy_mmu(struct kvm_vcpu *vcpu);
extern int kvmppc_kvm_pv(struct kvm_vcpu *vcpu);
+extern void kvmppc_map_magic(struct kvm_vcpu *vcpu);
+
+extern long kvmppc_alloc_hpt(struct kvm *kvm);
+extern void kvmppc_free_hpt(struct kvm *kvm);
+extern long kvmppc_prepare_vrma(struct kvm *kvm,
+ struct kvm_userspace_memory_region *mem);
+extern void kvmppc_map_vrma(struct kvm *kvm,
+ struct kvm_userspace_memory_region *mem);
+extern int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu);
+extern long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
+ struct kvm_create_spapr_tce *args);
+extern long kvm_vm_ioctl_allocate_rma(struct kvm *kvm,
+ struct kvm_allocate_rma *rma);
+extern struct kvmppc_rma_info *kvm_alloc_rma(void);
+extern void kvm_release_rma(struct kvmppc_rma_info *ri);
+extern int kvmppc_core_init_vm(struct kvm *kvm);
+extern void kvmppc_core_destroy_vm(struct kvm *kvm);
+extern int kvmppc_core_prepare_memory_region(struct kvm *kvm,
+ struct kvm_userspace_memory_region *mem);
+extern void kvmppc_core_commit_memory_region(struct kvm *kvm,
+ struct kvm_userspace_memory_region *mem);
/*
* Cuts out inst bits with ordering according to spec.
@@ -151,4 +176,20 @@ int kvmppc_set_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid);
+#ifdef CONFIG_KVM_BOOK3S_64_HV
+static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
+{
+ paca[cpu].kvm_hstate.xics_phys = addr;
+}
+
+extern void kvm_rma_init(void);
+
+#else
+static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
+{}
+
+static inline void kvm_rma_init(void)
+{}
+#endif
+
#endif /* __POWERPC_KVM_PPC_H__ */
diff --git a/arch/powerpc/include/asm/mmu-hash64.h b/arch/powerpc/include/asm/mmu-hash64.h
index d865bd909c7d..b445e0af4c2b 100644
--- a/arch/powerpc/include/asm/mmu-hash64.h
+++ b/arch/powerpc/include/asm/mmu-hash64.h
@@ -90,13 +90,19 @@ extern char initial_stab[];
#define HPTE_R_PP0 ASM_CONST(0x8000000000000000)
#define HPTE_R_TS ASM_CONST(0x4000000000000000)
+#define HPTE_R_KEY_HI ASM_CONST(0x3000000000000000)
#define HPTE_R_RPN_SHIFT 12
-#define HPTE_R_RPN ASM_CONST(0x3ffffffffffff000)
-#define HPTE_R_FLAGS ASM_CONST(0x00000000000003ff)
+#define HPTE_R_RPN ASM_CONST(0x0ffffffffffff000)
#define HPTE_R_PP ASM_CONST(0x0000000000000003)
#define HPTE_R_N ASM_CONST(0x0000000000000004)
+#define HPTE_R_G ASM_CONST(0x0000000000000008)
+#define HPTE_R_M ASM_CONST(0x0000000000000010)
+#define HPTE_R_I ASM_CONST(0x0000000000000020)
+#define HPTE_R_W ASM_CONST(0x0000000000000040)
+#define HPTE_R_WIMG ASM_CONST(0x0000000000000078)
#define HPTE_R_C ASM_CONST(0x0000000000000080)
#define HPTE_R_R ASM_CONST(0x0000000000000100)
+#define HPTE_R_KEY_LO ASM_CONST(0x0000000000000e00)
#define HPTE_V_1TB_SEG ASM_CONST(0x4000000000000000)
#define HPTE_V_VRMA_MASK ASM_CONST(0x4001ffffff000000)
diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h
index 74126765106a..a6da12859959 100644
--- a/arch/powerpc/include/asm/paca.h
+++ b/arch/powerpc/include/asm/paca.h
@@ -147,9 +147,12 @@ struct paca_struct {
struct dtl_entry *dtl_curr; /* pointer corresponding to dtl_ridx */
#ifdef CONFIG_KVM_BOOK3S_HANDLER
+#ifdef CONFIG_KVM_BOOK3S_PR
/* We use this to store guest state in */
struct kvmppc_book3s_shadow_vcpu shadow_vcpu;
#endif
+ struct kvmppc_host_state kvm_hstate;
+#endif
};
extern struct paca_struct *paca;
diff --git a/arch/powerpc/include/asm/pci-bridge.h b/arch/powerpc/include/asm/pci-bridge.h
index b90dbf8e5cd9..90bd3ed48165 100644
--- a/arch/powerpc/include/asm/pci-bridge.h
+++ b/arch/powerpc/include/asm/pci-bridge.h
@@ -171,15 +171,9 @@ static inline struct pci_controller *pci_bus_to_host(const struct pci_bus *bus)
#ifndef CONFIG_PPC64
-static inline struct device_node *pci_bus_to_OF_node(struct pci_bus *bus)
-{
- struct pci_controller *host;
-
- if (bus->self)
- return pci_device_to_OF_node(bus->self);
- host = pci_bus_to_host(bus);
- return host ? host->dn : NULL;
-}
+extern int pci_device_from_OF_node(struct device_node *node,
+ u8 *bus, u8 *devfn);
+extern void pci_create_OF_bus_map(void);
static inline int isa_vaddr_is_ioport(void __iomem *address)
{
@@ -223,17 +217,8 @@ struct pci_dn {
/* Get the pointer to a device_node's pci_dn */
#define PCI_DN(dn) ((struct pci_dn *) (dn)->data)
-extern struct device_node *fetch_dev_dn(struct pci_dev *dev);
extern void * update_dn_pci_info(struct device_node *dn, void *data);
-/* Get a device_node from a pci_dev. This code must be fast except
- * in the case where the sysdata is incorrect and needs to be fixed
- * up (this will only happen once). */
-static inline struct device_node *pci_device_to_OF_node(struct pci_dev *dev)
-{
- return dev->dev.of_node ? dev->dev.of_node : fetch_dev_dn(dev);
-}
-
static inline int pci_device_from_OF_node(struct device_node *np,
u8 *bus, u8 *devfn)
{
@@ -244,14 +229,6 @@ static inline int pci_device_from_OF_node(struct device_node *np,
return 0;
}
-static inline struct device_node *pci_bus_to_OF_node(struct pci_bus *bus)
-{
- if (bus->self)
- return pci_device_to_OF_node(bus->self);
- else
- return bus->dev.of_node; /* Must be root bus (PHB) */
-}
-
/** Find the bus corresponding to the indicated device node */
extern struct pci_bus *pcibios_find_pci_bus(struct device_node *dn);
diff --git a/arch/powerpc/include/asm/pci.h b/arch/powerpc/include/asm/pci.h
index 7d7790954e02..1f522680ea17 100644
--- a/arch/powerpc/include/asm/pci.h
+++ b/arch/powerpc/include/asm/pci.h
@@ -179,8 +179,7 @@ extern int remove_phb_dynamic(struct pci_controller *phb);
extern struct pci_dev *of_create_pci_dev(struct device_node *node,
struct pci_bus *bus, int devfn);
-extern void of_scan_pci_bridge(struct device_node *node,
- struct pci_dev *dev);
+extern void of_scan_pci_bridge(struct pci_dev *dev);
extern void of_scan_bus(struct device_node *node, struct pci_bus *bus);
extern void of_rescan_bus(struct device_node *node, struct pci_bus *bus);
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
index e472659d906c..e980faae4225 100644
--- a/arch/powerpc/include/asm/ppc-opcode.h
+++ b/arch/powerpc/include/asm/ppc-opcode.h
@@ -71,6 +71,42 @@
#define PPC_INST_ERATSX 0x7c000126
#define PPC_INST_ERATSX_DOT 0x7c000127
+/* Misc instructions for BPF compiler */
+#define PPC_INST_LD 0xe8000000
+#define PPC_INST_LHZ 0xa0000000
+#define PPC_INST_LWZ 0x80000000
+#define PPC_INST_STD 0xf8000000
+#define PPC_INST_STDU 0xf8000001
+#define PPC_INST_MFLR 0x7c0802a6
+#define PPC_INST_MTLR 0x7c0803a6
+#define PPC_INST_CMPWI 0x2c000000
+#define PPC_INST_CMPDI 0x2c200000
+#define PPC_INST_CMPLW 0x7c000040
+#define PPC_INST_CMPLWI 0x28000000
+#define PPC_INST_ADDI 0x38000000
+#define PPC_INST_ADDIS 0x3c000000
+#define PPC_INST_ADD 0x7c000214
+#define PPC_INST_SUB 0x7c000050
+#define PPC_INST_BLR 0x4e800020
+#define PPC_INST_BLRL 0x4e800021
+#define PPC_INST_MULLW 0x7c0001d6
+#define PPC_INST_MULHWU 0x7c000016
+#define PPC_INST_MULLI 0x1c000000
+#define PPC_INST_DIVWU 0x7c0003d6
+#define PPC_INST_RLWINM 0x54000000
+#define PPC_INST_RLDICR 0x78000004
+#define PPC_INST_SLW 0x7c000030
+#define PPC_INST_SRW 0x7c000430
+#define PPC_INST_AND 0x7c000038
+#define PPC_INST_ANDDOT 0x7c000039
+#define PPC_INST_OR 0x7c000378
+#define PPC_INST_ANDI 0x70000000
+#define PPC_INST_ORI 0x60000000
+#define PPC_INST_ORIS 0x64000000
+#define PPC_INST_NEG 0x7c0000d0
+#define PPC_INST_BRANCH 0x48000000
+#define PPC_INST_BRANCH_COND 0x40800000
+
/* macros to insert fields into opcodes */
#define __PPC_RA(a) (((a) & 0x1f) << 16)
#define __PPC_RB(b) (((b) & 0x1f) << 11)
@@ -83,6 +119,10 @@
#define __PPC_T_TLB(t) (((t) & 0x3) << 21)
#define __PPC_WC(w) (((w) & 0x3) << 21)
#define __PPC_WS(w) (((w) & 0x1f) << 11)
+#define __PPC_SH(s) __PPC_WS(s)
+#define __PPC_MB(s) (((s) & 0x1f) << 6)
+#define __PPC_ME(s) (((s) & 0x1f) << 1)
+#define __PPC_BI(s) (((s) & 0x1f) << 16)
/*
* Only use the larx hint bit on 64bit CPUs. e500v1/v2 based CPUs will treat a
diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h
index 1b422381fc16..368f72f79808 100644
--- a/arch/powerpc/include/asm/ppc_asm.h
+++ b/arch/powerpc/include/asm/ppc_asm.h
@@ -150,18 +150,22 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR)
#define REST_16VSRSU(n,b,base) REST_8VSRSU(n,b,base); REST_8VSRSU(n+8,b,base)
#define REST_32VSRSU(n,b,base) REST_16VSRSU(n,b,base); REST_16VSRSU(n+16,b,base)
-#define SAVE_EVR(n,s,base) evmergehi s,s,n; stw s,THREAD_EVR0+4*(n)(base)
-#define SAVE_2EVRS(n,s,base) SAVE_EVR(n,s,base); SAVE_EVR(n+1,s,base)
-#define SAVE_4EVRS(n,s,base) SAVE_2EVRS(n,s,base); SAVE_2EVRS(n+2,s,base)
-#define SAVE_8EVRS(n,s,base) SAVE_4EVRS(n,s,base); SAVE_4EVRS(n+4,s,base)
-#define SAVE_16EVRS(n,s,base) SAVE_8EVRS(n,s,base); SAVE_8EVRS(n+8,s,base)
-#define SAVE_32EVRS(n,s,base) SAVE_16EVRS(n,s,base); SAVE_16EVRS(n+16,s,base)
-#define REST_EVR(n,s,base) lwz s,THREAD_EVR0+4*(n)(base); evmergelo n,s,n
-#define REST_2EVRS(n,s,base) REST_EVR(n,s,base); REST_EVR(n+1,s,base)
-#define REST_4EVRS(n,s,base) REST_2EVRS(n,s,base); REST_2EVRS(n+2,s,base)
-#define REST_8EVRS(n,s,base) REST_4EVRS(n,s,base); REST_4EVRS(n+4,s,base)
-#define REST_16EVRS(n,s,base) REST_8EVRS(n,s,base); REST_8EVRS(n+8,s,base)
-#define REST_32EVRS(n,s,base) REST_16EVRS(n,s,base); REST_16EVRS(n+16,s,base)
+/*
+ * b = base register for addressing, o = base offset from register of 1st EVR
+ * n = first EVR, s = scratch
+ */
+#define SAVE_EVR(n,s,b,o) evmergehi s,s,n; stw s,o+4*(n)(b)
+#define SAVE_2EVRS(n,s,b,o) SAVE_EVR(n,s,b,o); SAVE_EVR(n+1,s,b,o)
+#define SAVE_4EVRS(n,s,b,o) SAVE_2EVRS(n,s,b,o); SAVE_2EVRS(n+2,s,b,o)
+#define SAVE_8EVRS(n,s,b,o) SAVE_4EVRS(n,s,b,o); SAVE_4EVRS(n+4,s,b,o)
+#define SAVE_16EVRS(n,s,b,o) SAVE_8EVRS(n,s,b,o); SAVE_8EVRS(n+8,s,b,o)
+#define SAVE_32EVRS(n,s,b,o) SAVE_16EVRS(n,s,b,o); SAVE_16EVRS(n+16,s,b,o)
+#define REST_EVR(n,s,b,o) lwz s,o+4*(n)(b); evmergelo n,s,n
+#define REST_2EVRS(n,s,b,o) REST_EVR(n,s,b,o); REST_EVR(n+1,s,b,o)
+#define REST_4EVRS(n,s,b,o) REST_2EVRS(n,s,b,o); REST_2EVRS(n+2,s,b,o)
+#define REST_8EVRS(n,s,b,o) REST_4EVRS(n,s,b,o); REST_4EVRS(n+4,s,b,o)
+#define REST_16EVRS(n,s,b,o) REST_8EVRS(n,s,b,o); REST_8EVRS(n+8,s,b,o)
+#define REST_32EVRS(n,s,b,o) REST_16EVRS(n,s,b,o); REST_16EVRS(n+16,s,b,o)
/* Macros to adjust thread priority for hardware multithreading */
#define HMT_VERY_LOW or 31,31,31 # very low priority
diff --git a/arch/powerpc/include/asm/prom.h b/arch/powerpc/include/asm/prom.h
index c189aa5fe1f4..b823536375dc 100644
--- a/arch/powerpc/include/asm/prom.h
+++ b/arch/powerpc/include/asm/prom.h
@@ -22,20 +22,6 @@
#define HAVE_ARCH_DEVTREE_FIXUPS
-#ifdef CONFIG_PPC32
-/*
- * PCI <-> OF matching functions
- * (XXX should these be here?)
- */
-struct pci_bus;
-struct pci_dev;
-extern int pci_device_from_OF_node(struct device_node *node,
- u8* bus, u8* devfn);
-extern struct device_node* pci_busdev_to_OF_node(struct pci_bus *, int);
-extern struct device_node* pci_device_to_OF_node(struct pci_dev *);
-extern void pci_create_OF_bus_map(void);
-#endif
-
/*
* OF address retreival & translation
*/
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index c5cae0dd176c..ddbe57ae8584 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -189,6 +189,9 @@
#define SPRN_CTR 0x009 /* Count Register */
#define SPRN_DSCR 0x11
#define SPRN_CFAR 0x1c /* Come From Address Register */
+#define SPRN_AMR 0x1d /* Authority Mask Register */
+#define SPRN_UAMOR 0x9d /* User Authority Mask Override Register */
+#define SPRN_AMOR 0x15d /* Authority Mask Override Register */
#define SPRN_ACOP 0x1F /* Available Coprocessor Register */
#define SPRN_CTRLF 0x088
#define SPRN_CTRLT 0x098
@@ -232,22 +235,28 @@
#define LPCR_VPM0 (1ul << (63-0))
#define LPCR_VPM1 (1ul << (63-1))
#define LPCR_ISL (1ul << (63-2))
+#define LPCR_VC_SH (63-2)
#define LPCR_DPFD_SH (63-11)
#define LPCR_VRMA_L (1ul << (63-12))
#define LPCR_VRMA_LP0 (1ul << (63-15))
#define LPCR_VRMA_LP1 (1ul << (63-16))
+#define LPCR_VRMASD_SH (63-16)
#define LPCR_RMLS 0x1C000000 /* impl dependent rmo limit sel */
+#define LPCR_RMLS_SH (63-37)
#define LPCR_ILE 0x02000000 /* !HV irqs set MSR:LE */
#define LPCR_PECE 0x00007000 /* powersave exit cause enable */
#define LPCR_PECE0 0x00004000 /* ext. exceptions can cause exit */
#define LPCR_PECE1 0x00002000 /* decrementer can cause exit */
#define LPCR_PECE2 0x00001000 /* machine check etc can cause exit */
#define LPCR_MER 0x00000800 /* Mediated External Exception */
+#define LPCR_LPES 0x0000000c
#define LPCR_LPES0 0x00000008 /* LPAR Env selector 0 */
#define LPCR_LPES1 0x00000004 /* LPAR Env selector 1 */
+#define LPCR_LPES_SH 2
#define LPCR_RMI 0x00000002 /* real mode is cache inhibit */
#define LPCR_HDICE 0x00000001 /* Hyp Decr enable (HV,PR,EE) */
#define SPRN_LPID 0x13F /* Logical Partition Identifier */
+#define LPID_RSVD 0x3ff /* Reserved LPID for partn switching */
#define SPRN_HMER 0x150 /* Hardware m? error recovery */
#define SPRN_HMEER 0x151 /* Hardware m? enable error recovery */
#define SPRN_HEIR 0x153 /* Hypervisor Emulated Instruction Register */
@@ -298,6 +307,7 @@
#define SPRN_HASH1 0x3D2 /* Primary Hash Address Register */
#define SPRN_HASH2 0x3D3 /* Secondary Hash Address Resgister */
#define SPRN_HID0 0x3F0 /* Hardware Implementation Register 0 */
+#define HID0_HDICE_SH (63 - 23) /* 970 HDEC interrupt enable */
#define HID0_EMCP (1<<31) /* Enable Machine Check pin */
#define HID0_EBA (1<<29) /* Enable Bus Address Parity */
#define HID0_EBD (1<<28) /* Enable Bus Data Parity */
@@ -353,6 +363,13 @@
#define SPRN_IABR2 0x3FA /* 83xx */
#define SPRN_IBCR 0x135 /* 83xx Insn Breakpoint Control Reg */
#define SPRN_HID4 0x3F4 /* 970 HID4 */
+#define HID4_LPES0 (1ul << (63-0)) /* LPAR env. sel. bit 0 */
+#define HID4_RMLS2_SH (63 - 2) /* Real mode limit bottom 2 bits */
+#define HID4_LPID5_SH (63 - 6) /* partition ID bottom 4 bits */
+#define HID4_RMOR_SH (63 - 22) /* real mode offset (16 bits) */
+#define HID4_LPES1 (1 << (63-57)) /* LPAR env. sel. bit 1 */
+#define HID4_RMLS0_SH (63 - 58) /* Real mode limit top bit */
+#define HID4_LPID1_SH 0 /* partition ID top 2 bits */
#define SPRN_HID4_GEKKO 0x3F3 /* Gekko HID4 */
#define SPRN_HID5 0x3F6 /* 970 HID5 */
#define SPRN_HID6 0x3F9 /* BE HID 6 */
@@ -802,28 +819,28 @@
mfspr rX,SPRN_SPRG_PACA; \
FTR_SECTION_ELSE_NESTED(66); \
mfspr rX,SPRN_SPRG_HPACA; \
- ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_HVMODE_206, 66)
+ ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_HVMODE, 66)
#define SET_PACA(rX) \
BEGIN_FTR_SECTION_NESTED(66); \
mtspr SPRN_SPRG_PACA,rX; \
FTR_SECTION_ELSE_NESTED(66); \
mtspr SPRN_SPRG_HPACA,rX; \
- ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_HVMODE_206, 66)
+ ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_HVMODE, 66)
#define GET_SCRATCH0(rX) \
BEGIN_FTR_SECTION_NESTED(66); \
mfspr rX,SPRN_SPRG_SCRATCH0; \
FTR_SECTION_ELSE_NESTED(66); \
mfspr rX,SPRN_SPRG_HSCRATCH0; \
- ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_HVMODE_206, 66)
+ ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_HVMODE, 66)
#define SET_SCRATCH0(rX) \
BEGIN_FTR_SECTION_NESTED(66); \
mtspr SPRN_SPRG_SCRATCH0,rX; \
FTR_SECTION_ELSE_NESTED(66); \
mtspr SPRN_SPRG_HSCRATCH0,rX; \
- ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_HVMODE_206, 66)
+ ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_HVMODE, 66)
#else /* CONFIG_PPC_BOOK3S_64 */
#define GET_SCRATCH0(rX) mfspr rX,SPRN_SPRG_SCRATCH0
diff --git a/arch/powerpc/include/asm/reg_booke.h b/arch/powerpc/include/asm/reg_booke.h
index 0f0ad9fa01c1..9ec0b39f9ddc 100644
--- a/arch/powerpc/include/asm/reg_booke.h
+++ b/arch/powerpc/include/asm/reg_booke.h
@@ -318,6 +318,7 @@
#define ESR_ILK 0x00100000 /* Instr. Cache Locking */
#define ESR_PUO 0x00040000 /* Unimplemented Operation exception */
#define ESR_BO 0x00020000 /* Byte Ordering */
+#define ESR_SPV 0x00000080 /* Signal Processing operation */
/* Bit definitions related to the DBCR0. */
#if defined(CONFIG_40x)
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 36e1c8a29be8..54b935f2f5de 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -128,6 +128,7 @@ int main(void)
DEFINE(ICACHEL1LINESPERPAGE, offsetof(struct ppc64_caches, ilines_per_page));
/* paca */
DEFINE(PACA_SIZE, sizeof(struct paca_struct));
+ DEFINE(PACA_LOCK_TOKEN, offsetof(struct paca_struct, lock_token));
DEFINE(PACAPACAINDEX, offsetof(struct paca_struct, paca_index));
DEFINE(PACAPROCSTART, offsetof(struct paca_struct, cpu_start));
DEFINE(PACAKSAVE, offsetof(struct paca_struct, kstack));
@@ -187,7 +188,9 @@ int main(void)
DEFINE(LPPACASRR1, offsetof(struct lppaca, saved_srr1));
DEFINE(LPPACAANYINT, offsetof(struct lppaca, int_dword.any_int));
DEFINE(LPPACADECRINT, offsetof(struct lppaca, int_dword.fields.decr_int));
+ DEFINE(LPPACA_PMCINUSE, offsetof(struct lppaca, pmcregs_in_use));
DEFINE(LPPACA_DTLIDX, offsetof(struct lppaca, dtl_idx));
+ DEFINE(LPPACA_YIELDCOUNT, offsetof(struct lppaca, yield_count));
DEFINE(PACA_DTL_RIDX, offsetof(struct paca_struct, dtl_ridx));
#endif /* CONFIG_PPC_STD_MMU_64 */
DEFINE(PACAEMERGSP, offsetof(struct paca_struct, emergency_sp));
@@ -198,11 +201,6 @@ int main(void)
DEFINE(PACA_USER_TIME, offsetof(struct paca_struct, user_time));
DEFINE(PACA_SYSTEM_TIME, offsetof(struct paca_struct, system_time));
DEFINE(PACA_TRAP_SAVE, offsetof(struct paca_struct, trap_save));
-#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
- DEFINE(PACA_KVM_SVCPU, offsetof(struct paca_struct, shadow_vcpu));
- DEFINE(SVCPU_SLB, offsetof(struct kvmppc_book3s_shadow_vcpu, slb));
- DEFINE(SVCPU_SLB_MAX, offsetof(struct kvmppc_book3s_shadow_vcpu, slb_max));
-#endif
#endif /* CONFIG_PPC64 */
/* RTAS */
@@ -397,67 +395,160 @@ int main(void)
DEFINE(VCPU_HOST_PID, offsetof(struct kvm_vcpu, arch.host_pid));
DEFINE(VCPU_GPRS, offsetof(struct kvm_vcpu, arch.gpr));
DEFINE(VCPU_VRSAVE, offsetof(struct kvm_vcpu, arch.vrsave));
+ DEFINE(VCPU_FPRS, offsetof(struct kvm_vcpu, arch.fpr));
+ DEFINE(VCPU_FPSCR, offsetof(struct kvm_vcpu, arch.fpscr));
+#ifdef CONFIG_ALTIVEC
+ DEFINE(VCPU_VRS, offsetof(struct kvm_vcpu, arch.vr));
+ DEFINE(VCPU_VSCR, offsetof(struct kvm_vcpu, arch.vscr));
+#endif
+#ifdef CONFIG_VSX
+ DEFINE(VCPU_VSRS, offsetof(struct kvm_vcpu, arch.vsr));
+#endif
+ DEFINE(VCPU_XER, offsetof(struct kvm_vcpu, arch.xer));
+ DEFINE(VCPU_CTR, offsetof(struct kvm_vcpu, arch.ctr));
+ DEFINE(VCPU_LR, offsetof(struct kvm_vcpu, arch.lr));
+ DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr));
+ DEFINE(VCPU_PC, offsetof(struct kvm_vcpu, arch.pc));
+#ifdef CONFIG_KVM_BOOK3S_64_HV
+ DEFINE(VCPU_MSR, offsetof(struct kvm_vcpu, arch.shregs.msr));
+ DEFINE(VCPU_SRR0, offsetof(struct kvm_vcpu, arch.shregs.srr0));
+ DEFINE(VCPU_SRR1, offsetof(struct kvm_vcpu, arch.shregs.srr1));
+ DEFINE(VCPU_SPRG0, offsetof(struct kvm_vcpu, arch.shregs.sprg0));
+ DEFINE(VCPU_SPRG1, offsetof(struct kvm_vcpu, arch.shregs.sprg1));
+ DEFINE(VCPU_SPRG2, offsetof(struct kvm_vcpu, arch.shregs.sprg2));
+ DEFINE(VCPU_SPRG3, offsetof(struct kvm_vcpu, arch.shregs.sprg3));
+#endif
DEFINE(VCPU_SPRG4, offsetof(struct kvm_vcpu, arch.sprg4));
DEFINE(VCPU_SPRG5, offsetof(struct kvm_vcpu, arch.sprg5));
DEFINE(VCPU_SPRG6, offsetof(struct kvm_vcpu, arch.sprg6));
DEFINE(VCPU_SPRG7, offsetof(struct kvm_vcpu, arch.sprg7));
DEFINE(VCPU_SHADOW_PID, offsetof(struct kvm_vcpu, arch.shadow_pid));
+ DEFINE(VCPU_SHADOW_PID1, offsetof(struct kvm_vcpu, arch.shadow_pid1));
DEFINE(VCPU_SHARED, offsetof(struct kvm_vcpu, arch.shared));
DEFINE(VCPU_SHARED_MSR, offsetof(struct kvm_vcpu_arch_shared, msr));
+ DEFINE(VCPU_SHADOW_MSR, offsetof(struct kvm_vcpu, arch.shadow_msr));
/* book3s */
+#ifdef CONFIG_KVM_BOOK3S_64_HV
+ DEFINE(KVM_LPID, offsetof(struct kvm, arch.lpid));
+ DEFINE(KVM_SDR1, offsetof(struct kvm, arch.sdr1));
+ DEFINE(KVM_HOST_LPID, offsetof(struct kvm, arch.host_lpid));
+ DEFINE(KVM_HOST_LPCR, offsetof(struct kvm, arch.host_lpcr));
+ DEFINE(KVM_HOST_SDR1, offsetof(struct kvm, arch.host_sdr1));
+ DEFINE(KVM_TLBIE_LOCK, offsetof(struct kvm, arch.tlbie_lock));
+ DEFINE(KVM_ONLINE_CPUS, offsetof(struct kvm, online_vcpus.counter));
+ DEFINE(KVM_LAST_VCPU, offsetof(struct kvm, arch.last_vcpu));
+ DEFINE(KVM_LPCR, offsetof(struct kvm, arch.lpcr));
+ DEFINE(KVM_RMOR, offsetof(struct kvm, arch.rmor));
+ DEFINE(VCPU_DSISR, offsetof(struct kvm_vcpu, arch.shregs.dsisr));
+ DEFINE(VCPU_DAR, offsetof(struct kvm_vcpu, arch.shregs.dar));
+#endif
#ifdef CONFIG_PPC_BOOK3S
+ DEFINE(VCPU_KVM, offsetof(struct kvm_vcpu, kvm));
+ DEFINE(VCPU_VCPUID, offsetof(struct kvm_vcpu, vcpu_id));
DEFINE(VCPU_HOST_RETIP, offsetof(struct kvm_vcpu, arch.host_retip));
DEFINE(VCPU_HOST_MSR, offsetof(struct kvm_vcpu, arch.host_msr));
- DEFINE(VCPU_SHADOW_MSR, offsetof(struct kvm_vcpu, arch.shadow_msr));
+ DEFINE(VCPU_PURR, offsetof(struct kvm_vcpu, arch.purr));
+ DEFINE(VCPU_SPURR, offsetof(struct kvm_vcpu, arch.spurr));
+ DEFINE(VCPU_DSCR, offsetof(struct kvm_vcpu, arch.dscr));
+ DEFINE(VCPU_AMR, offsetof(struct kvm_vcpu, arch.amr));
+ DEFINE(VCPU_UAMOR, offsetof(struct kvm_vcpu, arch.uamor));
+ DEFINE(VCPU_CTRL, offsetof(struct kvm_vcpu, arch.ctrl));
+ DEFINE(VCPU_DABR, offsetof(struct kvm_vcpu, arch.dabr));
DEFINE(VCPU_TRAMPOLINE_LOWMEM, offsetof(struct kvm_vcpu, arch.trampoline_lowmem));
DEFINE(VCPU_TRAMPOLINE_ENTER, offsetof(struct kvm_vcpu, arch.trampoline_enter));
DEFINE(VCPU_HIGHMEM_HANDLER, offsetof(struct kvm_vcpu, arch.highmem_handler));
DEFINE(VCPU_RMCALL, offsetof(struct kvm_vcpu, arch.rmcall));
DEFINE(VCPU_HFLAGS, offsetof(struct kvm_vcpu, arch.hflags));
+ DEFINE(VCPU_DEC, offsetof(struct kvm_vcpu, arch.dec));
+ DEFINE(VCPU_DEC_EXPIRES, offsetof(struct kvm_vcpu, arch.dec_expires));
+ DEFINE(VCPU_PENDING_EXC, offsetof(struct kvm_vcpu, arch.pending_exceptions));
+ DEFINE(VCPU_VPA, offsetof(struct kvm_vcpu, arch.vpa));
+ DEFINE(VCPU_MMCR, offsetof(struct kvm_vcpu, arch.mmcr));
+ DEFINE(VCPU_PMC, offsetof(struct kvm_vcpu, arch.pmc));
+ DEFINE(VCPU_SLB, offsetof(struct kvm_vcpu, arch.slb));
+ DEFINE(VCPU_SLB_MAX, offsetof(struct kvm_vcpu, arch.slb_max));
+ DEFINE(VCPU_SLB_NR, offsetof(struct kvm_vcpu, arch.slb_nr));
+ DEFINE(VCPU_LAST_CPU, offsetof(struct kvm_vcpu, arch.last_cpu));
+ DEFINE(VCPU_FAULT_DSISR, offsetof(struct kvm_vcpu, arch.fault_dsisr));
+ DEFINE(VCPU_FAULT_DAR, offsetof(struct kvm_vcpu, arch.fault_dar));
+ DEFINE(VCPU_LAST_INST, offsetof(struct kvm_vcpu, arch.last_inst));
+ DEFINE(VCPU_TRAP, offsetof(struct kvm_vcpu, arch.trap));
+ DEFINE(VCPU_PTID, offsetof(struct kvm_vcpu, arch.ptid));
+ DEFINE(VCORE_ENTRY_EXIT, offsetof(struct kvmppc_vcore, entry_exit_count));
+ DEFINE(VCORE_NAP_COUNT, offsetof(struct kvmppc_vcore, nap_count));
+ DEFINE(VCORE_IN_GUEST, offsetof(struct kvmppc_vcore, in_guest));
DEFINE(VCPU_SVCPU, offsetof(struct kvmppc_vcpu_book3s, shadow_vcpu) -
offsetof(struct kvmppc_vcpu_book3s, vcpu));
- DEFINE(SVCPU_CR, offsetof(struct kvmppc_book3s_shadow_vcpu, cr));
- DEFINE(SVCPU_XER, offsetof(struct kvmppc_book3s_shadow_vcpu, xer));
- DEFINE(SVCPU_CTR, offsetof(struct kvmppc_book3s_shadow_vcpu, ctr));
- DEFINE(SVCPU_LR, offsetof(struct kvmppc_book3s_shadow_vcpu, lr));
- DEFINE(SVCPU_PC, offsetof(struct kvmppc_book3s_shadow_vcpu, pc));
- DEFINE(SVCPU_R0, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[0]));
- DEFINE(SVCPU_R1, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[1]));
- DEFINE(SVCPU_R2, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[2]));
- DEFINE(SVCPU_R3, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[3]));
- DEFINE(SVCPU_R4, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[4]));
- DEFINE(SVCPU_R5, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[5]));
- DEFINE(SVCPU_R6, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[6]));
- DEFINE(SVCPU_R7, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[7]));
- DEFINE(SVCPU_R8, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[8]));
- DEFINE(SVCPU_R9, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[9]));
- DEFINE(SVCPU_R10, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[10]));
- DEFINE(SVCPU_R11, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[11]));
- DEFINE(SVCPU_R12, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[12]));
- DEFINE(SVCPU_R13, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[13]));
- DEFINE(SVCPU_HOST_R1, offsetof(struct kvmppc_book3s_shadow_vcpu, host_r1));
- DEFINE(SVCPU_HOST_R2, offsetof(struct kvmppc_book3s_shadow_vcpu, host_r2));
- DEFINE(SVCPU_VMHANDLER, offsetof(struct kvmppc_book3s_shadow_vcpu,
- vmhandler));
- DEFINE(SVCPU_SCRATCH0, offsetof(struct kvmppc_book3s_shadow_vcpu,
- scratch0));
- DEFINE(SVCPU_SCRATCH1, offsetof(struct kvmppc_book3s_shadow_vcpu,
- scratch1));
- DEFINE(SVCPU_IN_GUEST, offsetof(struct kvmppc_book3s_shadow_vcpu,
- in_guest));
- DEFINE(SVCPU_FAULT_DSISR, offsetof(struct kvmppc_book3s_shadow_vcpu,
- fault_dsisr));
- DEFINE(SVCPU_FAULT_DAR, offsetof(struct kvmppc_book3s_shadow_vcpu,
- fault_dar));
- DEFINE(SVCPU_LAST_INST, offsetof(struct kvmppc_book3s_shadow_vcpu,
- last_inst));
- DEFINE(SVCPU_SHADOW_SRR1, offsetof(struct kvmppc_book3s_shadow_vcpu,
- shadow_srr1));
+ DEFINE(VCPU_SLB_E, offsetof(struct kvmppc_slb, orige));
+ DEFINE(VCPU_SLB_V, offsetof(struct kvmppc_slb, origv));
+ DEFINE(VCPU_SLB_SIZE, sizeof(struct kvmppc_slb));
+
+#ifdef CONFIG_PPC_BOOK3S_64
+#ifdef CONFIG_KVM_BOOK3S_PR
+# define SVCPU_FIELD(x, f) DEFINE(x, offsetof(struct paca_struct, shadow_vcpu.f))
+#else
+# define SVCPU_FIELD(x, f)
+#endif
+# define HSTATE_FIELD(x, f) DEFINE(x, offsetof(struct paca_struct, kvm_hstate.f))
+#else /* 32-bit */
+# define SVCPU_FIELD(x, f) DEFINE(x, offsetof(struct kvmppc_book3s_shadow_vcpu, f))
+# define HSTATE_FIELD(x, f) DEFINE(x, offsetof(struct kvmppc_book3s_shadow_vcpu, hstate.f))
+#endif
+
+ SVCPU_FIELD(SVCPU_CR, cr);
+ SVCPU_FIELD(SVCPU_XER, xer);
+ SVCPU_FIELD(SVCPU_CTR, ctr);
+ SVCPU_FIELD(SVCPU_LR, lr);
+ SVCPU_FIELD(SVCPU_PC, pc);
+ SVCPU_FIELD(SVCPU_R0, gpr[0]);
+ SVCPU_FIELD(SVCPU_R1, gpr[1]);
+ SVCPU_FIELD(SVCPU_R2, gpr[2]);
+ SVCPU_FIELD(SVCPU_R3, gpr[3]);
+ SVCPU_FIELD(SVCPU_R4, gpr[4]);
+ SVCPU_FIELD(SVCPU_R5, gpr[5]);
+ SVCPU_FIELD(SVCPU_R6, gpr[6]);
+ SVCPU_FIELD(SVCPU_R7, gpr[7]);
+ SVCPU_FIELD(SVCPU_R8, gpr[8]);
+ SVCPU_FIELD(SVCPU_R9, gpr[9]);
+ SVCPU_FIELD(SVCPU_R10, gpr[10]);
+ SVCPU_FIELD(SVCPU_R11, gpr[11]);
+ SVCPU_FIELD(SVCPU_R12, gpr[12]);
+ SVCPU_FIELD(SVCPU_R13, gpr[13]);
+ SVCPU_FIELD(SVCPU_FAULT_DSISR, fault_dsisr);
+ SVCPU_FIELD(SVCPU_FAULT_DAR, fault_dar);
+ SVCPU_FIELD(SVCPU_LAST_INST, last_inst);
+ SVCPU_FIELD(SVCPU_SHADOW_SRR1, shadow_srr1);
#ifdef CONFIG_PPC_BOOK3S_32
- DEFINE(SVCPU_SR, offsetof(struct kvmppc_book3s_shadow_vcpu, sr));
+ SVCPU_FIELD(SVCPU_SR, sr);
#endif
-#else
+#ifdef CONFIG_PPC64
+ SVCPU_FIELD(SVCPU_SLB, slb);
+ SVCPU_FIELD(SVCPU_SLB_MAX, slb_max);
+#endif
+
+ HSTATE_FIELD(HSTATE_HOST_R1, host_r1);
+ HSTATE_FIELD(HSTATE_HOST_R2, host_r2);
+ HSTATE_FIELD(HSTATE_HOST_MSR, host_msr);
+ HSTATE_FIELD(HSTATE_VMHANDLER, vmhandler);
+ HSTATE_FIELD(HSTATE_SCRATCH0, scratch0);
+ HSTATE_FIELD(HSTATE_SCRATCH1, scratch1);
+ HSTATE_FIELD(HSTATE_IN_GUEST, in_guest);
+
+#ifdef CONFIG_KVM_BOOK3S_64_HV
+ HSTATE_FIELD(HSTATE_KVM_VCPU, kvm_vcpu);
+ HSTATE_FIELD(HSTATE_KVM_VCORE, kvm_vcore);
+ HSTATE_FIELD(HSTATE_XICS_PHYS, xics_phys);
+ HSTATE_FIELD(HSTATE_MMCR, host_mmcr);
+ HSTATE_FIELD(HSTATE_PMC, host_pmc);
+ HSTATE_FIELD(HSTATE_PURR, host_purr);
+ HSTATE_FIELD(HSTATE_SPURR, host_spurr);
+ HSTATE_FIELD(HSTATE_DSCR, host_dscr);
+ HSTATE_FIELD(HSTATE_DABR, dabr);
+ HSTATE_FIELD(HSTATE_DECEXP, dec_expires);
+#endif /* CONFIG_KVM_BOOK3S_64_HV */
+
+#else /* CONFIG_PPC_BOOK3S */
DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr));
DEFINE(VCPU_XER, offsetof(struct kvm_vcpu, arch.xer));
DEFINE(VCPU_LR, offsetof(struct kvm_vcpu, arch.lr));
@@ -467,7 +558,7 @@ int main(void)
DEFINE(VCPU_FAULT_DEAR, offsetof(struct kvm_vcpu, arch.fault_dear));
DEFINE(VCPU_FAULT_ESR, offsetof(struct kvm_vcpu, arch.fault_esr));
#endif /* CONFIG_PPC_BOOK3S */
-#endif
+#endif /* CONFIG_KVM */
#ifdef CONFIG_KVM_GUEST
DEFINE(KVM_MAGIC_SCRATCH1, offsetof(struct kvm_vcpu_arch_shared,
@@ -497,6 +588,13 @@ int main(void)
DEFINE(TLBCAM_MAS7, offsetof(struct tlbcam, MAS7));
#endif
+#if defined(CONFIG_KVM) && defined(CONFIG_SPE)
+ DEFINE(VCPU_EVR, offsetof(struct kvm_vcpu, arch.evr[0]));
+ DEFINE(VCPU_ACC, offsetof(struct kvm_vcpu, arch.acc));
+ DEFINE(VCPU_SPEFSCR, offsetof(struct kvm_vcpu, arch.spefscr));
+ DEFINE(VCPU_HOST_SPEFSCR, offsetof(struct kvm_vcpu, arch.host_spefscr));
+#endif
+
#ifdef CONFIG_KVM_EXIT_TIMING
DEFINE(VCPU_TIMING_EXIT_TBU, offsetof(struct kvm_vcpu,
arch.timing_exit.tv32.tbu));
diff --git a/arch/powerpc/kernel/cpu_setup_power7.S b/arch/powerpc/kernel/cpu_setup_power7.S
index 4f9a93fcfe07..76797c5105d6 100644
--- a/arch/powerpc/kernel/cpu_setup_power7.S
+++ b/arch/powerpc/kernel/cpu_setup_power7.S
@@ -45,12 +45,12 @@ _GLOBAL(__restore_cpu_power7)
blr
__init_hvmode_206:
- /* Disable CPU_FTR_HVMODE_206 and exit if MSR:HV is not set */
+ /* Disable CPU_FTR_HVMODE and exit if MSR:HV is not set */
mfmsr r3
rldicl. r0,r3,4,63
bnelr
ld r5,CPU_SPEC_FEATURES(r4)
- LOAD_REG_IMMEDIATE(r6,CPU_FTR_HVMODE_206)
+ LOAD_REG_IMMEDIATE(r6,CPU_FTR_HVMODE)
xor r5,r5,r6
std r5,CPU_SPEC_FEATURES(r4)
blr
@@ -61,19 +61,23 @@ __init_LPCR:
* LPES = 0b01 (HSRR0/1 used for 0x500)
* PECE = 0b111
* DPFD = 4
+ * HDICE = 0
+ * VC = 0b100 (VPM0=1, VPM1=0, ISL=0)
+ * VRMASD = 0b10000 (L=1, LP=00)
*
* Other bits untouched for now
*/
mfspr r3,SPRN_LPCR
- ori r3,r3,(LPCR_LPES0|LPCR_LPES1)
- xori r3,r3, LPCR_LPES0
+ li r5,1
+ rldimi r3,r5, LPCR_LPES_SH, 64-LPCR_LPES_SH-2
ori r3,r3,(LPCR_PECE0|LPCR_PECE1|LPCR_PECE2)
- li r5,7
- sldi r5,r5,LPCR_DPFD_SH
- andc r3,r3,r5
li r5,4
- sldi r5,r5,LPCR_DPFD_SH
- or r3,r3,r5
+ rldimi r3,r5, LPCR_DPFD_SH, 64-LPCR_DPFD_SH-3
+ clrrdi r3,r3,1 /* clear HDICE */
+ li r5,4
+ rldimi r3,r5, LPCR_VC_SH, 0
+ li r5,0x10
+ rldimi r3,r5, LPCR_VRMASD_SH, 64-LPCR_VRMASD_SH-5
mtspr SPRN_LPCR,r3
isync
blr
diff --git a/arch/powerpc/kernel/cpu_setup_ppc970.S b/arch/powerpc/kernel/cpu_setup_ppc970.S
index 27f2507279d8..12fac8df01c5 100644
--- a/arch/powerpc/kernel/cpu_setup_ppc970.S
+++ b/arch/powerpc/kernel/cpu_setup_ppc970.S
@@ -76,7 +76,7 @@ _GLOBAL(__setup_cpu_ppc970)
/* Do nothing if not running in HV mode */
mfmsr r0
rldicl. r0,r0,4,63
- beqlr
+ beq no_hv_mode
mfspr r0,SPRN_HID0
li r11,5 /* clear DOZE and SLEEP */
@@ -90,7 +90,7 @@ _GLOBAL(__setup_cpu_ppc970MP)
/* Do nothing if not running in HV mode */
mfmsr r0
rldicl. r0,r0,4,63
- beqlr
+ beq no_hv_mode
mfspr r0,SPRN_HID0
li r11,0x15 /* clear DOZE and SLEEP */
@@ -109,6 +109,14 @@ load_hids:
sync
isync
+ /* Try to set LPES = 01 in HID4 */
+ mfspr r0,SPRN_HID4
+ clrldi r0,r0,1 /* clear LPES0 */
+ ori r0,r0,HID4_LPES1 /* set LPES1 */
+ sync
+ mtspr SPRN_HID4,r0
+ isync
+
/* Save away cpu state */
LOAD_REG_ADDR(r5,cpu_state_storage)
@@ -117,11 +125,21 @@ load_hids:
std r3,CS_HID0(r5)
mfspr r3,SPRN_HID1
std r3,CS_HID1(r5)
- mfspr r3,SPRN_HID4
- std r3,CS_HID4(r5)
+ mfspr r4,SPRN_HID4
+ std r4,CS_HID4(r5)
mfspr r3,SPRN_HID5
std r3,CS_HID5(r5)
+ /* See if we successfully set LPES1 to 1; if not we are in Apple mode */
+ andi. r4,r4,HID4_LPES1
+ bnelr
+
+no_hv_mode:
+ /* Disable CPU_FTR_HVMODE and exit, since we don't have HV mode */
+ ld r5,CPU_SPEC_FEATURES(r4)
+ LOAD_REG_IMMEDIATE(r6,CPU_FTR_HVMODE)
+ andc r5,r5,r6
+ std r5,CPU_SPEC_FEATURES(r4)
blr
/* Called with no MMU context (typically MSR:IR/DR off) to
diff --git a/arch/powerpc/kernel/e500-pmu.c b/arch/powerpc/kernel/e500-pmu.c
index b150b510510f..cb2e2949c8d1 100644
--- a/arch/powerpc/kernel/e500-pmu.c
+++ b/arch/powerpc/kernel/e500-pmu.c
@@ -75,6 +75,11 @@ static int e500_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
[C(OP_WRITE)] = { -1, -1 },
[C(OP_PREFETCH)] = { -1, -1 },
},
+ [C(NODE)] = { /* RESULT_ACCESS RESULT_MISS */
+ [C(OP_READ)] = { -1, -1 },
+ [C(OP_WRITE)] = { -1, -1 },
+ [C(OP_PREFETCH)] = { -1, -1 },
+ },
};
static int num_events = 128;
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index a85f4874cba7..41b02c792aa3 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -40,7 +40,6 @@ __start_interrupts:
.globl system_reset_pSeries;
system_reset_pSeries:
HMT_MEDIUM;
- DO_KVM 0x100;
SET_SCRATCH0(r13)
#ifdef CONFIG_PPC_P7_NAP
BEGIN_FTR_SECTION
@@ -50,82 +49,73 @@ BEGIN_FTR_SECTION
* state loss at this time.
*/
mfspr r13,SPRN_SRR1
- rlwinm r13,r13,47-31,30,31
- cmpwi cr0,r13,1
- bne 1f
- b .power7_wakeup_noloss
-1: cmpwi cr0,r13,2
- bne 1f
- b .power7_wakeup_loss
+ rlwinm. r13,r13,47-31,30,31
+ beq 9f
+
+ /* waking up from powersave (nap) state */
+ cmpwi cr1,r13,2
/* Total loss of HV state is fatal, we could try to use the
* PIR to locate a PACA, then use an emergency stack etc...
* but for now, let's just stay stuck here
*/
-1: cmpwi cr0,r13,3
- beq .
-END_FTR_SECTION_IFSET(CPU_FTR_HVMODE_206)
+ bgt cr1,.
+ GET_PACA(r13)
+
+#ifdef CONFIG_KVM_BOOK3S_64_HV
+ lbz r0,PACAPROCSTART(r13)
+ cmpwi r0,0x80
+ bne 1f
+ li r0,0
+ stb r0,PACAPROCSTART(r13)
+ b kvm_start_guest
+1:
+#endif
+
+ beq cr1,2f
+ b .power7_wakeup_noloss
+2: b .power7_wakeup_loss
+9:
+END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
#endif /* CONFIG_PPC_P7_NAP */
- EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD)
+ EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD,
+ NOTEST, 0x100)
. = 0x200
-_machine_check_pSeries:
- HMT_MEDIUM
- DO_KVM 0x200
- SET_SCRATCH0(r13)
- EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common, EXC_STD)
+machine_check_pSeries_1:
+ /* This is moved out of line as it can be patched by FW, but
+ * some code path might still want to branch into the original
+ * vector
+ */
+ b machine_check_pSeries
. = 0x300
.globl data_access_pSeries
data_access_pSeries:
HMT_MEDIUM
- DO_KVM 0x300
SET_SCRATCH0(r13)
+#ifndef CONFIG_POWER4_ONLY
BEGIN_FTR_SECTION
- GET_PACA(r13)
- std r9,PACA_EXSLB+EX_R9(r13)
- std r10,PACA_EXSLB+EX_R10(r13)
- mfspr r10,SPRN_DAR
- mfspr r9,SPRN_DSISR
- srdi r10,r10,60
- rlwimi r10,r9,16,0x20
- mfcr r9
- cmpwi r10,0x2c
- beq do_stab_bolted_pSeries
- ld r10,PACA_EXSLB+EX_R10(r13)
- std r11,PACA_EXGEN+EX_R11(r13)
- ld r11,PACA_EXSLB+EX_R9(r13)
- std r12,PACA_EXGEN+EX_R12(r13)
- GET_SCRATCH0(r12)
- std r10,PACA_EXGEN+EX_R10(r13)
- std r11,PACA_EXGEN+EX_R9(r13)
- std r12,PACA_EXGEN+EX_R13(r13)
- EXCEPTION_PROLOG_PSERIES_1(data_access_common, EXC_STD)
-FTR_SECTION_ELSE
- EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common, EXC_STD)
-ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_SLB)
+ b data_access_check_stab
+data_access_not_stab:
+END_MMU_FTR_SECTION_IFCLR(MMU_FTR_SLB)
+#endif
+ EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common, EXC_STD,
+ KVMTEST_PR, 0x300)
. = 0x380
.globl data_access_slb_pSeries
data_access_slb_pSeries:
HMT_MEDIUM
- DO_KVM 0x380
SET_SCRATCH0(r13)
- GET_PACA(r13)
+ EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST_PR, 0x380)
std r3,PACA_EXSLB+EX_R3(r13)
mfspr r3,SPRN_DAR
- std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */
- mfcr r9
#ifdef __DISABLED__
/* Keep that around for when we re-implement dynamic VSIDs */
cmpdi r3,0
bge slb_miss_user_pseries
#endif /* __DISABLED__ */
- std r10,PACA_EXSLB+EX_R10(r13)
- std r11,PACA_EXSLB+EX_R11(r13)
- std r12,PACA_EXSLB+EX_R12(r13)
- GET_SCRATCH0(r10)
- std r10,PACA_EXSLB+EX_R13(r13)
- mfspr r12,SPRN_SRR1 /* and SRR1 */
+ mfspr r12,SPRN_SRR1
#ifndef CONFIG_RELOCATABLE
b .slb_miss_realmode
#else
@@ -147,24 +137,16 @@ data_access_slb_pSeries:
.globl instruction_access_slb_pSeries
instruction_access_slb_pSeries:
HMT_MEDIUM
- DO_KVM 0x480
SET_SCRATCH0(r13)
- GET_PACA(r13)
+ EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST_PR, 0x480)
std r3,PACA_EXSLB+EX_R3(r13)
mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */
- std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */
- mfcr r9
#ifdef __DISABLED__
/* Keep that around for when we re-implement dynamic VSIDs */
cmpdi r3,0
bge slb_miss_user_pseries
#endif /* __DISABLED__ */
- std r10,PACA_EXSLB+EX_R10(r13)
- std r11,PACA_EXSLB+EX_R11(r13)
- std r12,PACA_EXSLB+EX_R12(r13)
- GET_SCRATCH0(r10)
- std r10,PACA_EXSLB+EX_R13(r13)
- mfspr r12,SPRN_SRR1 /* and SRR1 */
+ mfspr r12,SPRN_SRR1
#ifndef CONFIG_RELOCATABLE
b .slb_miss_realmode
#else
@@ -184,26 +166,46 @@ instruction_access_slb_pSeries:
hardware_interrupt_pSeries:
hardware_interrupt_hv:
BEGIN_FTR_SECTION
- _MASKABLE_EXCEPTION_PSERIES(0x500, hardware_interrupt, EXC_STD)
+ _MASKABLE_EXCEPTION_PSERIES(0x502, hardware_interrupt,
+ EXC_HV, SOFTEN_TEST_HV)
+ KVM_HANDLER(PACA_EXGEN, EXC_HV, 0x502)
FTR_SECTION_ELSE
- _MASKABLE_EXCEPTION_PSERIES(0x502, hardware_interrupt, EXC_HV)
- ALT_FTR_SECTION_END_IFCLR(CPU_FTR_HVMODE_206)
+ _MASKABLE_EXCEPTION_PSERIES(0x500, hardware_interrupt,
+ EXC_STD, SOFTEN_TEST_HV_201)
+ KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x500)
+ ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
STD_EXCEPTION_PSERIES(0x600, 0x600, alignment)
+ KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x600)
+
STD_EXCEPTION_PSERIES(0x700, 0x700, program_check)
+ KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x700)
+
STD_EXCEPTION_PSERIES(0x800, 0x800, fp_unavailable)
+ KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x800)
MASKABLE_EXCEPTION_PSERIES(0x900, 0x900, decrementer)
- MASKABLE_EXCEPTION_HV(0x980, 0x980, decrementer)
+ MASKABLE_EXCEPTION_HV(0x980, 0x982, decrementer)
STD_EXCEPTION_PSERIES(0xa00, 0xa00, trap_0a)
+ KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xa00)
+
STD_EXCEPTION_PSERIES(0xb00, 0xb00, trap_0b)
+ KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xb00)
. = 0xc00
.globl system_call_pSeries
system_call_pSeries:
HMT_MEDIUM
- DO_KVM 0xc00
+#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
+ SET_SCRATCH0(r13)
+ GET_PACA(r13)
+ std r9,PACA_EXGEN+EX_R9(r13)
+ std r10,PACA_EXGEN+EX_R10(r13)
+ mfcr r9
+ KVMTEST(0xc00)
+ GET_SCRATCH0(r13)
+#endif
BEGIN_FTR_SECTION
cmpdi r0,0x1ebe
beq- 1f
@@ -220,6 +222,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE)
rfid
b . /* prevent speculative execution */
+ KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xc00)
+
/* Fast LE/BE switch system call */
1: mfspr r12,SPRN_SRR1
xori r12,r12,MSR_LE
@@ -228,6 +232,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE)
b .
STD_EXCEPTION_PSERIES(0xd00, 0xd00, single_step)
+ KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xd00)
/* At 0xe??? we have a bunch of hypervisor exceptions, we branch
* out of line to handle them
@@ -262,30 +267,93 @@ vsx_unavailable_pSeries_1:
#ifdef CONFIG_CBE_RAS
STD_EXCEPTION_HV(0x1200, 0x1202, cbe_system_error)
+ KVM_HANDLER_PR_SKIP(PACA_EXGEN, EXC_HV, 0x1202)
#endif /* CONFIG_CBE_RAS */
+
STD_EXCEPTION_PSERIES(0x1300, 0x1300, instruction_breakpoint)
+ KVM_HANDLER_PR_SKIP(PACA_EXGEN, EXC_STD, 0x1300)
+
#ifdef CONFIG_CBE_RAS
STD_EXCEPTION_HV(0x1600, 0x1602, cbe_maintenance)
+ KVM_HANDLER_PR_SKIP(PACA_EXGEN, EXC_HV, 0x1602)
#endif /* CONFIG_CBE_RAS */
+
STD_EXCEPTION_PSERIES(0x1700, 0x1700, altivec_assist)
+ KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x1700)
+
#ifdef CONFIG_CBE_RAS
STD_EXCEPTION_HV(0x1800, 0x1802, cbe_thermal)
+ KVM_HANDLER_PR_SKIP(PACA_EXGEN, EXC_HV, 0x1802)
#endif /* CONFIG_CBE_RAS */
. = 0x3000
/*** Out of line interrupts support ***/
+ /* moved from 0x200 */
+machine_check_pSeries:
+ .globl machine_check_fwnmi
+machine_check_fwnmi:
+ HMT_MEDIUM
+ SET_SCRATCH0(r13) /* save r13 */
+ EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common,
+ EXC_STD, KVMTEST, 0x200)
+ KVM_HANDLER_SKIP(PACA_EXMC, EXC_STD, 0x200)
+
+#ifndef CONFIG_POWER4_ONLY
+ /* moved from 0x300 */
+data_access_check_stab:
+ GET_PACA(r13)
+ std r9,PACA_EXSLB+EX_R9(r13)
+ std r10,PACA_EXSLB+EX_R10(r13)
+ mfspr r10,SPRN_DAR
+ mfspr r9,SPRN_DSISR
+ srdi r10,r10,60
+ rlwimi r10,r9,16,0x20
+#ifdef CONFIG_KVM_BOOK3S_PR
+ lbz r9,HSTATE_IN_GUEST(r13)
+ rlwimi r10,r9,8,0x300
+#endif
+ mfcr r9
+ cmpwi r10,0x2c
+ beq do_stab_bolted_pSeries
+ mtcrf 0x80,r9
+ ld r9,PACA_EXSLB+EX_R9(r13)
+ ld r10,PACA_EXSLB+EX_R10(r13)
+ b data_access_not_stab
+do_stab_bolted_pSeries:
+ std r11,PACA_EXSLB+EX_R11(r13)
+ std r12,PACA_EXSLB+EX_R12(r13)
+ GET_SCRATCH0(r10)
+ std r10,PACA_EXSLB+EX_R13(r13)
+ EXCEPTION_PROLOG_PSERIES_1(.do_stab_bolted, EXC_STD)
+#endif /* CONFIG_POWER4_ONLY */
+
+ KVM_HANDLER_PR_SKIP(PACA_EXGEN, EXC_STD, 0x300)
+ KVM_HANDLER_PR_SKIP(PACA_EXSLB, EXC_STD, 0x380)
+ KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x400)
+ KVM_HANDLER_PR(PACA_EXSLB, EXC_STD, 0x480)
+ KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x900)
+ KVM_HANDLER(PACA_EXGEN, EXC_HV, 0x982)
+
+ .align 7
/* moved from 0xe00 */
- STD_EXCEPTION_HV(., 0xe00, h_data_storage)
- STD_EXCEPTION_HV(., 0xe20, h_instr_storage)
- STD_EXCEPTION_HV(., 0xe40, emulation_assist)
- STD_EXCEPTION_HV(., 0xe60, hmi_exception) /* need to flush cache ? */
+ STD_EXCEPTION_HV(., 0xe02, h_data_storage)
+ KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0xe02)
+ STD_EXCEPTION_HV(., 0xe22, h_instr_storage)
+ KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe22)
+ STD_EXCEPTION_HV(., 0xe42, emulation_assist)
+ KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe42)
+ STD_EXCEPTION_HV(., 0xe62, hmi_exception) /* need to flush cache ? */
+ KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe62)
/* moved from 0xf00 */
STD_EXCEPTION_PSERIES(., 0xf00, performance_monitor)
+ KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf00)
STD_EXCEPTION_PSERIES(., 0xf20, altivec_unavailable)
+ KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf20)
STD_EXCEPTION_PSERIES(., 0xf40, vsx_unavailable)
+ KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf40)
/*
* An interrupt came in while soft-disabled; clear EE in SRR1,
@@ -317,14 +385,6 @@ masked_Hinterrupt:
hrfid
b .
- .align 7
-do_stab_bolted_pSeries:
- std r11,PACA_EXSLB+EX_R11(r13)
- std r12,PACA_EXSLB+EX_R12(r13)
- GET_SCRATCH0(r10)
- std r10,PACA_EXSLB+EX_R13(r13)
- EXCEPTION_PROLOG_PSERIES_1(.do_stab_bolted, EXC_STD)
-
#ifdef CONFIG_PPC_PSERIES
/*
* Vectors for the FWNMI option. Share common code.
@@ -334,14 +394,8 @@ do_stab_bolted_pSeries:
system_reset_fwnmi:
HMT_MEDIUM
SET_SCRATCH0(r13) /* save r13 */
- EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD)
-
- .globl machine_check_fwnmi
- .align 7
-machine_check_fwnmi:
- HMT_MEDIUM
- SET_SCRATCH0(r13) /* save r13 */
- EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common, EXC_STD)
+ EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD,
+ NOTEST, 0x100)
#endif /* CONFIG_PPC_PSERIES */
@@ -376,7 +430,11 @@ slb_miss_user_pseries:
/* KVM's trampoline code needs to be close to the interrupt handlers */
#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
+#ifdef CONFIG_KVM_BOOK3S_PR
#include "../kvm/book3s_rmhandlers.S"
+#else
+#include "../kvm/book3s_hv_rmhandlers.S"
+#endif
#endif
.align 7
diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S
index 5ecf54cfa7d4..fe37dd0dfd17 100644
--- a/arch/powerpc/kernel/head_fsl_booke.S
+++ b/arch/powerpc/kernel/head_fsl_booke.S
@@ -656,7 +656,7 @@ load_up_spe:
cmpi 0,r4,0
beq 1f
addi r4,r4,THREAD /* want THREAD of last_task_used_spe */
- SAVE_32EVRS(0,r10,r4)
+ SAVE_32EVRS(0,r10,r4,THREAD_EVR0)
evxor evr10, evr10, evr10 /* clear out evr10 */
evmwumiaa evr10, evr10, evr10 /* evr10 <- ACC = 0 * 0 + ACC */
li r5,THREAD_ACC
@@ -676,7 +676,7 @@ load_up_spe:
stw r4,THREAD_USED_SPE(r5)
evlddx evr4,r10,r5
evmra evr4,evr4
- REST_32EVRS(0,r10,r5)
+ REST_32EVRS(0,r10,r5,THREAD_EVR0)
#ifndef CONFIG_SMP
subi r4,r5,THREAD
stw r4,last_task_used_spe@l(r3)
@@ -787,13 +787,11 @@ _GLOBAL(giveup_spe)
addi r3,r3,THREAD /* want THREAD of task */
lwz r5,PT_REGS(r3)
cmpi 0,r5,0
- SAVE_32EVRS(0, r4, r3)
+ SAVE_32EVRS(0, r4, r3, THREAD_EVR0)
evxor evr6, evr6, evr6 /* clear out evr6 */
evmwumiaa evr6, evr6, evr6 /* evr6 <- ACC = 0 * 0 + ACC */
li r4,THREAD_ACC
evstddx evr6, r4, r3 /* save off accumulator */
- mfspr r6,SPRN_SPEFSCR
- stw r6,THREAD_SPEFSCR(r3) /* save spefscr register value */
beq 1f
lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5)
lis r3,MSR_SPE@h
diff --git a/arch/powerpc/kernel/idle_power7.S b/arch/powerpc/kernel/idle_power7.S
index f8f0bc7f1d4f..3a70845a51c7 100644
--- a/arch/powerpc/kernel/idle_power7.S
+++ b/arch/powerpc/kernel/idle_power7.S
@@ -73,7 +73,6 @@ _GLOBAL(power7_idle)
b .
_GLOBAL(power7_wakeup_loss)
- GET_PACA(r13)
ld r1,PACAR1(r13)
REST_NVGPRS(r1)
REST_GPR(2, r1)
@@ -87,7 +86,6 @@ _GLOBAL(power7_wakeup_loss)
rfid
_GLOBAL(power7_wakeup_noloss)
- GET_PACA(r13)
ld r1,PACAR1(r13)
ld r4,_MSR(r1)
ld r5,_NIP(r1)
diff --git a/arch/powerpc/kernel/module.c b/arch/powerpc/kernel/module.c
index 49cee9df225b..a1cd701b5753 100644
--- a/arch/powerpc/kernel/module.c
+++ b/arch/powerpc/kernel/module.c
@@ -31,20 +31,6 @@
LIST_HEAD(module_bug_list);
-void *module_alloc(unsigned long size)
-{
- if (size == 0)
- return NULL;
-
- return vmalloc_exec(size);
-}
-
-/* Free memory returned from module_alloc */
-void module_free(struct module *mod, void *module_region)
-{
- vfree(module_region);
-}
-
static const Elf_Shdr *find_section(const Elf_Ehdr *hdr,
const Elf_Shdr *sechdrs,
const char *name)
@@ -93,7 +79,3 @@ int module_finalize(const Elf_Ehdr *hdr,
return 0;
}
-
-void module_arch_cleanup(struct module *mod)
-{
-}
diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
index f832773fc28e..0b6d79617d7b 100644
--- a/arch/powerpc/kernel/module_32.c
+++ b/arch/powerpc/kernel/module_32.c
@@ -174,17 +174,6 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
return 0;
}
-int apply_relocate(Elf32_Shdr *sechdrs,
- const char *strtab,
- unsigned int symindex,
- unsigned int relsec,
- struct module *module)
-{
- printk(KERN_ERR "%s: Non-ADD RELOCATION unsupported\n",
- module->name);
- return -ENOEXEC;
-}
-
static inline int entry_matches(struct ppc_plt_entry *entry, Elf32_Addr val)
{
if (entry->jump[0] == 0x3d600000 + ((val + 0x8000) >> 16)
diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c
index 8fbb12508bf3..9f44a775a106 100644
--- a/arch/powerpc/kernel/module_64.c
+++ b/arch/powerpc/kernel/module_64.c
@@ -243,16 +243,6 @@ int module_frob_arch_sections(Elf64_Ehdr *hdr,
return 0;
}
-int apply_relocate(Elf64_Shdr *sechdrs,
- const char *strtab,
- unsigned int symindex,
- unsigned int relsec,
- struct module *me)
-{
- printk(KERN_ERR "%s: Non-ADD RELOCATION unsupported\n", me->name);
- return -ENOEXEC;
-}
-
/* r2 is the TOC pointer: it actually points 0x8000 into the TOC (this
gives the value maximum span in an instruction which uses a signed
offset) */
diff --git a/arch/powerpc/kernel/mpc7450-pmu.c b/arch/powerpc/kernel/mpc7450-pmu.c
index 2cc5e0301d0b..845a58478890 100644
--- a/arch/powerpc/kernel/mpc7450-pmu.c
+++ b/arch/powerpc/kernel/mpc7450-pmu.c
@@ -388,6 +388,11 @@ static int mpc7450_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
[C(OP_WRITE)] = { -1, -1 },
[C(OP_PREFETCH)] = { -1, -1 },
},
+ [C(NODE)] = { /* RESULT_ACCESS RESULT_MISS */
+ [C(OP_READ)] = { -1, -1 },
+ [C(OP_WRITE)] = { -1, -1 },
+ [C(OP_PREFETCH)] = { -1, -1 },
+ },
};
struct power_pmu mpc7450_pmu = {
diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c
index efeb88184182..0a5a899846bb 100644
--- a/arch/powerpc/kernel/paca.c
+++ b/arch/powerpc/kernel/paca.c
@@ -167,7 +167,7 @@ void setup_paca(struct paca_struct *new_paca)
* if we do a GET_PACA() before the feature fixups have been
* applied
*/
- if (cpu_has_feature(CPU_FTR_HVMODE_206))
+ if (cpu_has_feature(CPU_FTR_HVMODE))
mtspr(SPRN_SPRG_HPACA, local_paca);
#endif
mtspr(SPRN_SPRG_PACA, local_paca);
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
index 893af2a9cd03..a3c92770e422 100644
--- a/arch/powerpc/kernel/pci-common.c
+++ b/arch/powerpc/kernel/pci-common.c
@@ -1097,9 +1097,6 @@ void __devinit pcibios_setup_bus_devices(struct pci_bus *bus)
if (dev->is_added)
continue;
- /* Setup OF node pointer in the device */
- dev->dev.of_node = pci_device_to_OF_node(dev);
-
/* Fixup NUMA node as it may not be setup yet by the generic
* code and is needed by the DMA init
*/
@@ -1685,6 +1682,13 @@ int early_find_capability(struct pci_controller *hose, int bus, int devfn,
return pci_bus_find_capability(fake_pci_bus(hose, bus), devfn, cap);
}
+struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus)
+{
+ struct pci_controller *hose = bus->sysdata;
+
+ return of_node_get(hose->dn);
+}
+
/**
* pci_scan_phb - Given a pci_controller, setup and scan the PCI bus
* @hose: Pointer to the PCI host controller instance structure
@@ -1705,7 +1709,6 @@ void __devinit pcibios_scan_phb(struct pci_controller *hose)
hose->global_number);
return;
}
- bus->dev.of_node = of_node_get(node);
bus->secondary = hose->first_busno;
hose->bus = bus;
diff --git a/arch/powerpc/kernel/pci_32.c b/arch/powerpc/kernel/pci_32.c
index bedb370459f2..86585508e9c1 100644
--- a/arch/powerpc/kernel/pci_32.c
+++ b/arch/powerpc/kernel/pci_32.c
@@ -167,150 +167,26 @@ pcibios_make_OF_bus_map(void)
#endif
}
-typedef int (*pci_OF_scan_iterator)(struct device_node* node, void* data);
-
-static struct device_node*
-scan_OF_pci_childs(struct device_node *parent, pci_OF_scan_iterator filter, void* data)
-{
- struct device_node *node;
- struct device_node* sub_node;
-
- for_each_child_of_node(parent, node) {
- const unsigned int *class_code;
-
- if (filter(node, data)) {
- of_node_put(node);
- return node;
- }
-
- /* For PCI<->PCI bridges or CardBus bridges, we go down
- * Note: some OFs create a parent node "multifunc-device" as
- * a fake root for all functions of a multi-function device,
- * we go down them as well.
- */
- class_code = of_get_property(node, "class-code", NULL);
- if ((!class_code || ((*class_code >> 8) != PCI_CLASS_BRIDGE_PCI &&
- (*class_code >> 8) != PCI_CLASS_BRIDGE_CARDBUS)) &&
- strcmp(node->name, "multifunc-device"))
- continue;
- sub_node = scan_OF_pci_childs(node, filter, data);
- if (sub_node) {
- of_node_put(node);
- return sub_node;
- }
- }
- return NULL;
-}
-
-static struct device_node *scan_OF_for_pci_dev(struct device_node *parent,
- unsigned int devfn)
-{
- struct device_node *np, *cnp;
- const u32 *reg;
- unsigned int psize;
-
- for_each_child_of_node(parent, np) {
- reg = of_get_property(np, "reg", &psize);
- if (reg && psize >= 4 && ((reg[0] >> 8) & 0xff) == devfn)
- return np;
-
- /* Note: some OFs create a parent node "multifunc-device" as
- * a fake root for all functions of a multi-function device,
- * we go down them as well. */
- if (!strcmp(np->name, "multifunc-device")) {
- cnp = scan_OF_for_pci_dev(np, devfn);
- if (cnp)
- return cnp;
- }
- }
- return NULL;
-}
-
-
-static struct device_node *scan_OF_for_pci_bus(struct pci_bus *bus)
-{
- struct device_node *parent, *np;
-
- /* Are we a root bus ? */
- if (bus->self == NULL || bus->parent == NULL) {
- struct pci_controller *hose = pci_bus_to_host(bus);
- if (hose == NULL)
- return NULL;
- return of_node_get(hose->dn);
- }
-
- /* not a root bus, we need to get our parent */
- parent = scan_OF_for_pci_bus(bus->parent);
- if (parent == NULL)
- return NULL;
-
- /* now iterate for children for a match */
- np = scan_OF_for_pci_dev(parent, bus->self->devfn);
- of_node_put(parent);
-
- return np;
-}
-
-/*
- * Scans the OF tree for a device node matching a PCI device
- */
-struct device_node *
-pci_busdev_to_OF_node(struct pci_bus *bus, int devfn)
-{
- struct device_node *parent, *np;
-
- pr_debug("pci_busdev_to_OF_node(%d,0x%x)\n", bus->number, devfn);
- parent = scan_OF_for_pci_bus(bus);
- if (parent == NULL)
- return NULL;
- pr_debug(" parent is %s\n", parent ? parent->full_name : "<NULL>");
- np = scan_OF_for_pci_dev(parent, devfn);
- of_node_put(parent);
- pr_debug(" result is %s\n", np ? np->full_name : "<NULL>");
-
- /* XXX most callers don't release the returned node
- * mostly because ppc64 doesn't increase the refcount,
- * we need to fix that.
- */
- return np;
-}
-EXPORT_SYMBOL(pci_busdev_to_OF_node);
-
-struct device_node*
-pci_device_to_OF_node(struct pci_dev *dev)
-{
- return pci_busdev_to_OF_node(dev->bus, dev->devfn);
-}
-EXPORT_SYMBOL(pci_device_to_OF_node);
-
-static int
-find_OF_pci_device_filter(struct device_node* node, void* data)
-{
- return ((void *)node == data);
-}
/*
* Returns the PCI device matching a given OF node
*/
-int
-pci_device_from_OF_node(struct device_node* node, u8* bus, u8* devfn)
+int pci_device_from_OF_node(struct device_node *node, u8 *bus, u8 *devfn)
{
- const unsigned int *reg;
- struct pci_controller* hose;
- struct pci_dev* dev = NULL;
-
- /* Make sure it's really a PCI device */
- hose = pci_find_hose_for_OF_device(node);
- if (!hose || !hose->dn)
- return -ENODEV;
- if (!scan_OF_pci_childs(hose->dn,
- find_OF_pci_device_filter, (void *)node))
+ struct pci_dev *dev = NULL;
+ const __be32 *reg;
+ int size;
+
+ /* Check if it might have a chance to be a PCI device */
+ if (!pci_find_hose_for_OF_device(node))
return -ENODEV;
- reg = of_get_property(node, "reg", NULL);
- if (!reg)
+
+ reg = of_get_property(node, "reg", &size);
+ if (!reg || size < 5 * sizeof(u32))
return -ENODEV;
- *bus = (reg[0] >> 16) & 0xff;
- *devfn = ((reg[0] >> 8) & 0xff);
+
+ *bus = (be32_to_cpup(&reg[0]) >> 16) & 0xff;
+ *devfn = (be32_to_cpup(&reg[0]) >> 8) & 0xff;
/* Ok, here we need some tweak. If we have already renumbered
* all busses, we can't rely on the OF bus number any more.
diff --git a/arch/powerpc/kernel/pci_dn.c b/arch/powerpc/kernel/pci_dn.c
index 6baabc13306a..478f8d78716b 100644
--- a/arch/powerpc/kernel/pci_dn.c
+++ b/arch/powerpc/kernel/pci_dn.c
@@ -142,53 +142,6 @@ void __devinit pci_devs_phb_init_dynamic(struct pci_controller *phb)
traverse_pci_devices(dn, update_dn_pci_info, phb);
}
-/*
- * Traversal func that looks for a <busno,devfcn> value.
- * If found, the pci_dn is returned (thus terminating the traversal).
- */
-static void *is_devfn_node(struct device_node *dn, void *data)
-{
- int busno = ((unsigned long)data >> 8) & 0xff;
- int devfn = ((unsigned long)data) & 0xff;
- struct pci_dn *pci = dn->data;
-
- if (pci && (devfn == pci->devfn) && (busno == pci->busno))
- return dn;
- return NULL;
-}
-
-/*
- * This is the "slow" path for looking up a device_node from a
- * pci_dev. It will hunt for the device under its parent's
- * phb and then update of_node pointer.
- *
- * It may also do fixups on the actual device since this happens
- * on the first read/write.
- *
- * Note that it also must deal with devices that don't exist.
- * In this case it may probe for real hardware ("just in case")
- * and add a device_node to the device tree if necessary.
- *
- * Is this function necessary anymore now that dev->dev.of_node is
- * used to store the node pointer?
- *
- */
-struct device_node *fetch_dev_dn(struct pci_dev *dev)
-{
- struct pci_controller *phb = dev->sysdata;
- struct device_node *dn;
- unsigned long searchval = (dev->bus->number << 8) | dev->devfn;
-
- if (WARN_ON(!phb))
- return NULL;
-
- dn = traverse_pci_devices(phb->dn, is_devfn_node, (void *)searchval);
- if (dn)
- dev->dev.of_node = dn;
- return dn;
-}
-EXPORT_SYMBOL(fetch_dev_dn);
-
/**
* pci_devs_phb_init - Initialize phbs and pci devs under them.
*
diff --git a/arch/powerpc/kernel/pci_of_scan.c b/arch/powerpc/kernel/pci_of_scan.c
index 1e89a72fd030..fe0a5ad6f73e 100644
--- a/arch/powerpc/kernel/pci_of_scan.c
+++ b/arch/powerpc/kernel/pci_of_scan.c
@@ -202,9 +202,9 @@ EXPORT_SYMBOL(of_create_pci_dev);
* this routine in turn call of_scan_bus() recusively to scan for more child
* devices.
*/
-void __devinit of_scan_pci_bridge(struct device_node *node,
- struct pci_dev *dev)
+void __devinit of_scan_pci_bridge(struct pci_dev *dev)
{
+ struct device_node *node = dev->dev.of_node;
struct pci_bus *bus;
const u32 *busrange, *ranges;
int len, i, mode;
@@ -238,7 +238,6 @@ void __devinit of_scan_pci_bridge(struct device_node *node,
bus->primary = dev->bus->number;
bus->subordinate = busrange[1];
bus->bridge_ctl = 0;
- bus->dev.of_node = of_node_get(node);
/* parse ranges property */
/* PCI #address-cells == 3 and #size-cells == 2 always */
@@ -335,9 +334,7 @@ static void __devinit __of_scan_bus(struct device_node *node,
list_for_each_entry(dev, &bus->devices, bus_list) {
if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE ||
dev->hdr_type == PCI_HEADER_TYPE_CARDBUS) {
- struct device_node *child = pci_device_to_OF_node(dev);
- if (child)
- of_scan_pci_bridge(child, dev);
+ of_scan_pci_bridge(dev);
}
}
}
diff --git a/arch/powerpc/kernel/perf_event.c b/arch/powerpc/kernel/perf_event.c
index 822f63008ae1..14967de98876 100644
--- a/arch/powerpc/kernel/perf_event.c
+++ b/arch/powerpc/kernel/perf_event.c
@@ -1207,7 +1207,7 @@ struct pmu power_pmu = {
* here so there is no possibility of being interrupted.
*/
static void record_and_restart(struct perf_event *event, unsigned long val,
- struct pt_regs *regs, int nmi)
+ struct pt_regs *regs)
{
u64 period = event->hw.sample_period;
s64 prev, delta, left;
@@ -1258,7 +1258,7 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
if (event->attr.sample_type & PERF_SAMPLE_ADDR)
perf_get_data_addr(regs, &data.addr);
- if (perf_event_overflow(event, nmi, &data, regs))
+ if (perf_event_overflow(event, &data, regs))
power_pmu_stop(event, 0);
}
}
@@ -1346,7 +1346,7 @@ static void perf_event_interrupt(struct pt_regs *regs)
if ((int)val < 0) {
/* event has overflowed */
found = 1;
- record_and_restart(event, val, regs, nmi);
+ record_and_restart(event, val, regs);
}
}
diff --git a/arch/powerpc/kernel/perf_event_fsl_emb.c b/arch/powerpc/kernel/perf_event_fsl_emb.c
index b0dc8f7069cd..0a6d2a9d569c 100644
--- a/arch/powerpc/kernel/perf_event_fsl_emb.c
+++ b/arch/powerpc/kernel/perf_event_fsl_emb.c
@@ -568,7 +568,7 @@ static struct pmu fsl_emb_pmu = {
* here so there is no possibility of being interrupted.
*/
static void record_and_restart(struct perf_event *event, unsigned long val,
- struct pt_regs *regs, int nmi)
+ struct pt_regs *regs)
{
u64 period = event->hw.sample_period;
s64 prev, delta, left;
@@ -616,7 +616,7 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
perf_sample_data_init(&data, 0);
data.period = event->hw.last_period;
- if (perf_event_overflow(event, nmi, &data, regs))
+ if (perf_event_overflow(event, &data, regs))
fsl_emb_pmu_stop(event, 0);
}
}
@@ -644,7 +644,7 @@ static void perf_event_interrupt(struct pt_regs *regs)
if (event) {
/* event has overflowed */
found = 1;
- record_and_restart(event, val, regs, nmi);
+ record_and_restart(event, val, regs);
} else {
/*
* Disabled counter is negative,
diff --git a/arch/powerpc/kernel/power4-pmu.c b/arch/powerpc/kernel/power4-pmu.c
index ead8b3c2649e..e9dbc2d35c9c 100644
--- a/arch/powerpc/kernel/power4-pmu.c
+++ b/arch/powerpc/kernel/power4-pmu.c
@@ -587,6 +587,11 @@ static int power4_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
[C(OP_WRITE)] = { -1, -1 },
[C(OP_PREFETCH)] = { -1, -1 },
},
+ [C(NODE)] = { /* RESULT_ACCESS RESULT_MISS */
+ [C(OP_READ)] = { -1, -1 },
+ [C(OP_WRITE)] = { -1, -1 },
+ [C(OP_PREFETCH)] = { -1, -1 },
+ },
};
static struct power_pmu power4_pmu = {
diff --git a/arch/powerpc/kernel/power5+-pmu.c b/arch/powerpc/kernel/power5+-pmu.c
index eca0ac595cb6..f58a2bd41b59 100644
--- a/arch/powerpc/kernel/power5+-pmu.c
+++ b/arch/powerpc/kernel/power5+-pmu.c
@@ -653,6 +653,11 @@ static int power5p_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
[C(OP_WRITE)] = { -1, -1 },
[C(OP_PREFETCH)] = { -1, -1 },
},
+ [C(NODE)] = { /* RESULT_ACCESS RESULT_MISS */
+ [C(OP_READ)] = { -1, -1 },
+ [C(OP_WRITE)] = { -1, -1 },
+ [C(OP_PREFETCH)] = { -1, -1 },
+ },
};
static struct power_pmu power5p_pmu = {
diff --git a/arch/powerpc/kernel/power5-pmu.c b/arch/powerpc/kernel/power5-pmu.c
index d5ff0f64a5e6..b1acab684142 100644
--- a/arch/powerpc/kernel/power5-pmu.c
+++ b/arch/powerpc/kernel/power5-pmu.c
@@ -595,6 +595,11 @@ static int power5_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
[C(OP_WRITE)] = { -1, -1 },
[C(OP_PREFETCH)] = { -1, -1 },
},
+ [C(NODE)] = { /* RESULT_ACCESS RESULT_MISS */
+ [C(OP_READ)] = { -1, -1 },
+ [C(OP_WRITE)] = { -1, -1 },
+ [C(OP_PREFETCH)] = { -1, -1 },
+ },
};
static struct power_pmu power5_pmu = {
diff --git a/arch/powerpc/kernel/power6-pmu.c b/arch/powerpc/kernel/power6-pmu.c
index 31603927e376..b24a3a23d073 100644
--- a/arch/powerpc/kernel/power6-pmu.c
+++ b/arch/powerpc/kernel/power6-pmu.c
@@ -516,6 +516,11 @@ static int power6_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
[C(OP_WRITE)] = { -1, -1 },
[C(OP_PREFETCH)] = { -1, -1 },
},
+ [C(NODE)] = { /* RESULT_ACCESS RESULT_MISS */
+ [C(OP_READ)] = { -1, -1 },
+ [C(OP_WRITE)] = { -1, -1 },
+ [C(OP_PREFETCH)] = { -1, -1 },
+ },
};
static struct power_pmu power6_pmu = {
diff --git a/arch/powerpc/kernel/power7-pmu.c b/arch/powerpc/kernel/power7-pmu.c
index 593740fcb799..6d9dccb2ea59 100644
--- a/arch/powerpc/kernel/power7-pmu.c
+++ b/arch/powerpc/kernel/power7-pmu.c
@@ -342,6 +342,11 @@ static int power7_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
[C(OP_WRITE)] = { -1, -1 },
[C(OP_PREFETCH)] = { -1, -1 },
},
+ [C(NODE)] = { /* RESULT_ACCESS RESULT_MISS */
+ [C(OP_READ)] = { -1, -1 },
+ [C(OP_WRITE)] = { -1, -1 },
+ [C(OP_PREFETCH)] = { -1, -1 },
+ },
};
static struct power_pmu power7_pmu = {
diff --git a/arch/powerpc/kernel/ppc970-pmu.c b/arch/powerpc/kernel/ppc970-pmu.c
index 9a6e093858fe..b121de9658eb 100644
--- a/arch/powerpc/kernel/ppc970-pmu.c
+++ b/arch/powerpc/kernel/ppc970-pmu.c
@@ -467,6 +467,11 @@ static int ppc970_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
[C(OP_WRITE)] = { -1, -1 },
[C(OP_PREFETCH)] = { -1, -1 },
},
+ [C(NODE)] = { /* RESULT_ACCESS RESULT_MISS */
+ [C(OP_READ)] = { -1, -1 },
+ [C(OP_WRITE)] = { -1, -1 },
+ [C(OP_PREFETCH)] = { -1, -1 },
+ },
};
static struct power_pmu ppc970_pmu = {
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 91e52df3d81d..ec2d0edeb134 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -96,6 +96,7 @@ void flush_fp_to_thread(struct task_struct *tsk)
preempt_enable();
}
}
+EXPORT_SYMBOL_GPL(flush_fp_to_thread);
void enable_kernel_fp(void)
{
@@ -145,6 +146,7 @@ void flush_altivec_to_thread(struct task_struct *tsk)
preempt_enable();
}
}
+EXPORT_SYMBOL_GPL(flush_altivec_to_thread);
#endif /* CONFIG_ALTIVEC */
#ifdef CONFIG_VSX
@@ -186,6 +188,7 @@ void flush_vsx_to_thread(struct task_struct *tsk)
preempt_enable();
}
}
+EXPORT_SYMBOL_GPL(flush_vsx_to_thread);
#endif /* CONFIG_VSX */
#ifdef CONFIG_SPE
@@ -213,6 +216,7 @@ void flush_spe_to_thread(struct task_struct *tsk)
#ifdef CONFIG_SMP
BUG_ON(tsk != current);
#endif
+ tsk->thread.spefscr = mfspr(SPRN_SPEFSCR);
giveup_spe(tsk);
}
preempt_enable();
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
index cb22024f2b42..05b7dd217f60 100644
--- a/arch/powerpc/kernel/ptrace.c
+++ b/arch/powerpc/kernel/ptrace.c
@@ -882,7 +882,7 @@ void user_disable_single_step(struct task_struct *task)
}
#ifdef CONFIG_HAVE_HW_BREAKPOINT
-void ptrace_triggered(struct perf_event *bp, int nmi,
+void ptrace_triggered(struct perf_event *bp,
struct perf_sample_data *data, struct pt_regs *regs)
{
struct perf_event_attr attr;
@@ -973,7 +973,7 @@ int ptrace_set_debugreg(struct task_struct *task, unsigned long addr,
&attr.bp_type);
thread->ptrace_bps[0] = bp = register_user_hw_breakpoint(&attr,
- ptrace_triggered, task);
+ ptrace_triggered, NULL, task);
if (IS_ERR(bp)) {
thread->ptrace_bps[0] = NULL;
ptrace_put_breakpoints(task);
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index 79fca2651b65..22051ef04bd9 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -375,6 +375,9 @@ void __init check_for_initrd(void)
int threads_per_core, threads_shift;
cpumask_t threads_core_mask;
+EXPORT_SYMBOL_GPL(threads_per_core);
+EXPORT_SYMBOL_GPL(threads_shift);
+EXPORT_SYMBOL_GPL(threads_core_mask);
static void __init cpu_init_thread_core_maps(int tpc)
{
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index a88bf2713d41..532054f24ecb 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -63,6 +63,7 @@
#include <asm/kexec.h>
#include <asm/mmu_context.h>
#include <asm/code-patching.h>
+#include <asm/kvm_ppc.h>
#include "setup.h"
@@ -580,6 +581,8 @@ void __init setup_arch(char **cmdline_p)
/* Initialize the MMU context management stuff */
mmu_context_init();
+ kvm_rma_init();
+
ppc64_boot_msg(0x15, "Setup Done");
}
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 8ebc6700b98d..09a85a9045d6 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -243,6 +243,7 @@ void smp_send_reschedule(int cpu)
if (likely(smp_ops))
smp_ops->message_pass(cpu, PPC_MSG_RESCHEDULE);
}
+EXPORT_SYMBOL_GPL(smp_send_reschedule);
void arch_send_call_function_single_ipi(int cpu)
{
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index f33acfd872ad..03b29a6759ab 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -544,7 +544,7 @@ DEFINE_PER_CPU(u8, irq_work_pending);
#endif /* 32 vs 64 bit */
-void set_irq_work_pending(void)
+void arch_irq_work_raise(void)
{
preempt_disable();
set_irq_work_pending_flag();
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 1a0141426cda..f19d9777d3c1 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -1387,10 +1387,7 @@ void SPEFloatingPointException(struct pt_regs *regs)
int code = 0;
int err;
- preempt_disable();
- if (regs->msr & MSR_SPE)
- giveup_spe(current);
- preempt_enable();
+ flush_spe_to_thread(current);
spefscr = current->thread.spefscr;
fpexc_mode = current->thread.fpexc_mode;
diff --git a/arch/powerpc/kvm/44x_tlb.c b/arch/powerpc/kvm/44x_tlb.c
index 5f3cff83e089..33aa715dab28 100644
--- a/arch/powerpc/kvm/44x_tlb.c
+++ b/arch/powerpc/kvm/44x_tlb.c
@@ -387,8 +387,10 @@ static void kvmppc_44x_invalidate(struct kvm_vcpu *vcpu,
}
}
-void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode)
+void kvmppc_mmu_msr_notify(struct kvm_vcpu *vcpu, u32 old_msr)
{
+ int usermode = vcpu->arch.shared->msr & MSR_PR;
+
vcpu->arch.shadow_pid = !usermode;
}
diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig
index b7baff78f90c..78133deb4b64 100644
--- a/arch/powerpc/kvm/Kconfig
+++ b/arch/powerpc/kvm/Kconfig
@@ -20,7 +20,6 @@ config KVM
bool
select PREEMPT_NOTIFIERS
select ANON_INODES
- select KVM_MMIO
config KVM_BOOK3S_HANDLER
bool
@@ -28,16 +27,22 @@ config KVM_BOOK3S_HANDLER
config KVM_BOOK3S_32_HANDLER
bool
select KVM_BOOK3S_HANDLER
+ select KVM_MMIO
config KVM_BOOK3S_64_HANDLER
bool
select KVM_BOOK3S_HANDLER
+config KVM_BOOK3S_PR
+ bool
+ select KVM_MMIO
+
config KVM_BOOK3S_32
tristate "KVM support for PowerPC book3s_32 processors"
depends on EXPERIMENTAL && PPC_BOOK3S_32 && !SMP && !PTE_64BIT
select KVM
select KVM_BOOK3S_32_HANDLER
+ select KVM_BOOK3S_PR
---help---
Support running unmodified book3s_32 guest kernels
in virtual machines on book3s_32 host processors.
@@ -50,8 +55,8 @@ config KVM_BOOK3S_32
config KVM_BOOK3S_64
tristate "KVM support for PowerPC book3s_64 processors"
depends on EXPERIMENTAL && PPC_BOOK3S_64
- select KVM
select KVM_BOOK3S_64_HANDLER
+ select KVM
---help---
Support running unmodified book3s_64 and book3s_32 guest kernels
in virtual machines on book3s_64 host processors.
@@ -61,10 +66,34 @@ config KVM_BOOK3S_64
If unsure, say N.
+config KVM_BOOK3S_64_HV
+ bool "KVM support for POWER7 and PPC970 using hypervisor mode in host"
+ depends on KVM_BOOK3S_64
+ ---help---
+ Support running unmodified book3s_64 guest kernels in
+ virtual machines on POWER7 and PPC970 processors that have
+ hypervisor mode available to the host.
+
+ If you say Y here, KVM will use the hardware virtualization
+ facilities of POWER7 (and later) processors, meaning that
+ guest operating systems will run at full hardware speed
+ using supervisor and user modes. However, this also means
+ that KVM is not usable under PowerVM (pHyp), is only usable
+ on POWER7 (or later) processors and PPC970-family processors,
+ and cannot emulate a different processor from the host processor.
+
+ If unsure, say N.
+
+config KVM_BOOK3S_64_PR
+ def_bool y
+ depends on KVM_BOOK3S_64 && !KVM_BOOK3S_64_HV
+ select KVM_BOOK3S_PR
+
config KVM_440
bool "KVM support for PowerPC 440 processors"
depends on EXPERIMENTAL && 44x
select KVM
+ select KVM_MMIO
---help---
Support running unmodified 440 guest kernels in virtual machines on
440 host processors.
@@ -89,6 +118,7 @@ config KVM_E500
bool "KVM support for PowerPC E500 processors"
depends on EXPERIMENTAL && E500
select KVM
+ select KVM_MMIO
---help---
Support running unmodified E500 guest kernels in virtual machines on
E500 host processors.
@@ -99,6 +129,5 @@ config KVM_E500
If unsure, say N.
source drivers/vhost/Kconfig
-source drivers/virtio/Kconfig
endif # VIRTUALIZATION
diff --git a/arch/powerpc/kvm/Makefile b/arch/powerpc/kvm/Makefile
index 4d6863823f69..08428e2c188d 100644
--- a/arch/powerpc/kvm/Makefile
+++ b/arch/powerpc/kvm/Makefile
@@ -38,24 +38,42 @@ kvm-e500-objs := \
e500_emulate.o
kvm-objs-$(CONFIG_KVM_E500) := $(kvm-e500-objs)
-kvm-book3s_64-objs := \
- $(common-objs-y) \
+kvm-book3s_64-objs-$(CONFIG_KVM_BOOK3S_64_PR) := \
+ ../../../virt/kvm/coalesced_mmio.o \
fpu.o \
book3s_paired_singles.o \
- book3s.o \
+ book3s_pr.o \
book3s_emulate.o \
book3s_interrupts.o \
book3s_mmu_hpte.o \
book3s_64_mmu_host.o \
book3s_64_mmu.o \
book3s_32_mmu.o
-kvm-objs-$(CONFIG_KVM_BOOK3S_64) := $(kvm-book3s_64-objs)
+
+kvm-book3s_64-objs-$(CONFIG_KVM_BOOK3S_64_HV) := \
+ book3s_hv.o \
+ book3s_hv_interrupts.o \
+ book3s_64_mmu_hv.o
+kvm-book3s_64-builtin-objs-$(CONFIG_KVM_BOOK3S_64_HV) := \
+ book3s_hv_rm_mmu.o \
+ book3s_64_vio_hv.o \
+ book3s_hv_builtin.o
+
+kvm-book3s_64-module-objs := \
+ ../../../virt/kvm/kvm_main.o \
+ powerpc.o \
+ emulate.o \
+ book3s.o \
+ $(kvm-book3s_64-objs-y)
+
+kvm-objs-$(CONFIG_KVM_BOOK3S_64) := $(kvm-book3s_64-module-objs)
kvm-book3s_32-objs := \
$(common-objs-y) \
fpu.o \
book3s_paired_singles.o \
book3s.o \
+ book3s_pr.o \
book3s_emulate.o \
book3s_interrupts.o \
book3s_mmu_hpte.o \
@@ -70,3 +88,4 @@ obj-$(CONFIG_KVM_E500) += kvm.o
obj-$(CONFIG_KVM_BOOK3S_64) += kvm.o
obj-$(CONFIG_KVM_BOOK3S_32) += kvm.o
+obj-y += $(kvm-book3s_64-builtin-objs-y)
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index 0f95b5cce033..f68a34d16035 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -17,7 +17,6 @@
#include <linux/kvm_host.h>
#include <linux/err.h>
#include <linux/slab.h>
-#include "trace.h"
#include <asm/reg.h>
#include <asm/cputable.h>
@@ -28,25 +27,17 @@
#include <asm/kvm_ppc.h>
#include <asm/kvm_book3s.h>
#include <asm/mmu_context.h>
+#include <asm/page.h>
#include <linux/gfp.h>
#include <linux/sched.h>
#include <linux/vmalloc.h>
#include <linux/highmem.h>
+#include "trace.h"
+
#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
/* #define EXIT_DEBUG */
-/* #define DEBUG_EXT */
-
-static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
- ulong msr);
-
-/* Some compatibility defines */
-#ifdef CONFIG_PPC_BOOK3S_32
-#define MSR_USER32 MSR_USER
-#define MSR_USER64 MSR_USER
-#define HW_PAGE_SIZE PAGE_SIZE
-#endif
struct kvm_stats_debugfs_item debugfs_entries[] = {
{ "exits", VCPU_STAT(sum_exits) },
@@ -77,100 +68,11 @@ void kvmppc_core_load_guest_debugstate(struct kvm_vcpu *vcpu)
{
}
-void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
-{
-#ifdef CONFIG_PPC_BOOK3S_64
- memcpy(to_svcpu(vcpu)->slb, to_book3s(vcpu)->slb_shadow, sizeof(to_svcpu(vcpu)->slb));
- memcpy(&get_paca()->shadow_vcpu, to_book3s(vcpu)->shadow_vcpu,
- sizeof(get_paca()->shadow_vcpu));
- to_svcpu(vcpu)->slb_max = to_book3s(vcpu)->slb_shadow_max;
-#endif
-
-#ifdef CONFIG_PPC_BOOK3S_32
- current->thread.kvm_shadow_vcpu = to_book3s(vcpu)->shadow_vcpu;
-#endif
-}
-
-void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
-{
-#ifdef CONFIG_PPC_BOOK3S_64
- memcpy(to_book3s(vcpu)->slb_shadow, to_svcpu(vcpu)->slb, sizeof(to_svcpu(vcpu)->slb));
- memcpy(to_book3s(vcpu)->shadow_vcpu, &get_paca()->shadow_vcpu,
- sizeof(get_paca()->shadow_vcpu));
- to_book3s(vcpu)->slb_shadow_max = to_svcpu(vcpu)->slb_max;
-#endif
-
- kvmppc_giveup_ext(vcpu, MSR_FP);
- kvmppc_giveup_ext(vcpu, MSR_VEC);
- kvmppc_giveup_ext(vcpu, MSR_VSX);
-}
-
-static void kvmppc_recalc_shadow_msr(struct kvm_vcpu *vcpu)
-{
- ulong smsr = vcpu->arch.shared->msr;
-
- /* Guest MSR values */
- smsr &= MSR_FE0 | MSR_FE1 | MSR_SF | MSR_SE | MSR_BE | MSR_DE;
- /* Process MSR values */
- smsr |= MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_PR | MSR_EE;
- /* External providers the guest reserved */
- smsr |= (vcpu->arch.shared->msr & vcpu->arch.guest_owned_ext);
- /* 64-bit Process MSR values */
-#ifdef CONFIG_PPC_BOOK3S_64
- smsr |= MSR_ISF | MSR_HV;
-#endif
- vcpu->arch.shadow_msr = smsr;
-}
-
-void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr)
-{
- ulong old_msr = vcpu->arch.shared->msr;
-
-#ifdef EXIT_DEBUG
- printk(KERN_INFO "KVM: Set MSR to 0x%llx\n", msr);
-#endif
-
- msr &= to_book3s(vcpu)->msr_mask;
- vcpu->arch.shared->msr = msr;
- kvmppc_recalc_shadow_msr(vcpu);
-
- if (msr & MSR_POW) {
- if (!vcpu->arch.pending_exceptions) {
- kvm_vcpu_block(vcpu);
- vcpu->stat.halt_wakeup++;
-
- /* Unset POW bit after we woke up */
- msr &= ~MSR_POW;
- vcpu->arch.shared->msr = msr;
- }
- }
-
- if ((vcpu->arch.shared->msr & (MSR_PR|MSR_IR|MSR_DR)) !=
- (old_msr & (MSR_PR|MSR_IR|MSR_DR))) {
- kvmppc_mmu_flush_segments(vcpu);
- kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
-
- /* Preload magic page segment when in kernel mode */
- if (!(msr & MSR_PR) && vcpu->arch.magic_page_pa) {
- struct kvm_vcpu_arch *a = &vcpu->arch;
-
- if (msr & MSR_DR)
- kvmppc_mmu_map_segment(vcpu, a->magic_page_ea);
- else
- kvmppc_mmu_map_segment(vcpu, a->magic_page_pa);
- }
- }
-
- /* Preload FPU if it's enabled */
- if (vcpu->arch.shared->msr & MSR_FP)
- kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
-}
-
void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags)
{
vcpu->arch.shared->srr0 = kvmppc_get_pc(vcpu);
vcpu->arch.shared->srr1 = vcpu->arch.shared->msr | flags;
- kvmppc_set_pc(vcpu, to_book3s(vcpu)->hior + vec);
+ kvmppc_set_pc(vcpu, kvmppc_interrupt_offset(vcpu) + vec);
vcpu->arch.mmu.reset_msr(vcpu);
}
@@ -204,11 +106,13 @@ static int kvmppc_book3s_vec2irqprio(unsigned int vec)
static void kvmppc_book3s_dequeue_irqprio(struct kvm_vcpu *vcpu,
unsigned int vec)
{
+ unsigned long old_pending = vcpu->arch.pending_exceptions;
+
clear_bit(kvmppc_book3s_vec2irqprio(vec),
&vcpu->arch.pending_exceptions);
- if (!vcpu->arch.pending_exceptions)
- vcpu->arch.shared->int_pending = 0;
+ kvmppc_update_int_pending(vcpu, vcpu->arch.pending_exceptions,
+ old_pending);
}
void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec)
@@ -225,8 +129,8 @@ void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec)
void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags)
{
- to_book3s(vcpu)->prog_flags = flags;
- kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_PROGRAM);
+ /* might as well deliver this straight away */
+ kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_PROGRAM, flags);
}
void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu)
@@ -266,21 +170,7 @@ int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu, unsigned int priority)
{
int deliver = 1;
int vec = 0;
- ulong flags = 0ULL;
- ulong crit_raw = vcpu->arch.shared->critical;
- ulong crit_r1 = kvmppc_get_gpr(vcpu, 1);
- bool crit;
-
- /* Truncate crit indicators in 32 bit mode */
- if (!(vcpu->arch.shared->msr & MSR_SF)) {
- crit_raw &= 0xffffffff;
- crit_r1 &= 0xffffffff;
- }
-
- /* Critical section when crit == r1 */
- crit = (crit_raw == crit_r1);
- /* ... and we're in supervisor mode */
- crit = crit && !(vcpu->arch.shared->msr & MSR_PR);
+ bool crit = kvmppc_critical_section(vcpu);
switch (priority) {
case BOOK3S_IRQPRIO_DECREMENTER:
@@ -315,7 +205,6 @@ int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu, unsigned int priority)
break;
case BOOK3S_IRQPRIO_PROGRAM:
vec = BOOK3S_INTERRUPT_PROGRAM;
- flags = to_book3s(vcpu)->prog_flags;
break;
case BOOK3S_IRQPRIO_VSX:
vec = BOOK3S_INTERRUPT_VSX;
@@ -346,7 +235,7 @@ int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu, unsigned int priority)
#endif
if (deliver)
- kvmppc_inject_interrupt(vcpu, vec, flags);
+ kvmppc_inject_interrupt(vcpu, vec, 0);
return deliver;
}
@@ -392,64 +281,7 @@ void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu)
}
/* Tell the guest about our interrupt status */
- if (*pending)
- vcpu->arch.shared->int_pending = 1;
- else if (old_pending)
- vcpu->arch.shared->int_pending = 0;
-}
-
-void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr)
-{
- u32 host_pvr;
-
- vcpu->arch.hflags &= ~BOOK3S_HFLAG_SLB;
- vcpu->arch.pvr = pvr;
-#ifdef CONFIG_PPC_BOOK3S_64
- if ((pvr >= 0x330000) && (pvr < 0x70330000)) {
- kvmppc_mmu_book3s_64_init(vcpu);
- to_book3s(vcpu)->hior = 0xfff00000;
- to_book3s(vcpu)->msr_mask = 0xffffffffffffffffULL;
- } else
-#endif
- {
- kvmppc_mmu_book3s_32_init(vcpu);
- to_book3s(vcpu)->hior = 0;
- to_book3s(vcpu)->msr_mask = 0xffffffffULL;
- }
-
- /* If we are in hypervisor level on 970, we can tell the CPU to
- * treat DCBZ as 32 bytes store */
- vcpu->arch.hflags &= ~BOOK3S_HFLAG_DCBZ32;
- if (vcpu->arch.mmu.is_dcbz32(vcpu) && (mfmsr() & MSR_HV) &&
- !strcmp(cur_cpu_spec->platform, "ppc970"))
- vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32;
-
- /* Cell performs badly if MSR_FEx are set. So let's hope nobody
- really needs them in a VM on Cell and force disable them. */
- if (!strcmp(cur_cpu_spec->platform, "ppc-cell-be"))
- to_book3s(vcpu)->msr_mask &= ~(MSR_FE0 | MSR_FE1);
-
-#ifdef CONFIG_PPC_BOOK3S_32
- /* 32 bit Book3S always has 32 byte dcbz */
- vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32;
-#endif
-
- /* On some CPUs we can execute paired single operations natively */
- asm ( "mfpvr %0" : "=r"(host_pvr));
- switch (host_pvr) {
- case 0x00080200: /* lonestar 2.0 */
- case 0x00088202: /* lonestar 2.2 */
- case 0x70000100: /* gekko 1.0 */
- case 0x00080100: /* gekko 2.0 */
- case 0x00083203: /* gekko 2.3a */
- case 0x00083213: /* gekko 2.3b */
- case 0x00083204: /* gekko 2.4 */
- case 0x00083214: /* gekko 2.4e (8SE) - retail HW2 */
- case 0x00087200: /* broadway */
- vcpu->arch.hflags |= BOOK3S_HFLAG_NATIVE_PS;
- /* Enable HID2.PSE - in case we need it later */
- mtspr(SPRN_HID2_GEKKO, mfspr(SPRN_HID2_GEKKO) | (1 << 29));
- }
+ kvmppc_update_int_pending(vcpu, *pending, old_pending);
}
pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn)
@@ -471,44 +303,6 @@ pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn)
return gfn_to_pfn(vcpu->kvm, gfn);
}
-/* Book3s_32 CPUs always have 32 bytes cache line size, which Linux assumes. To
- * make Book3s_32 Linux work on Book3s_64, we have to make sure we trap dcbz to
- * emulate 32 bytes dcbz length.
- *
- * The Book3s_64 inventors also realized this case and implemented a special bit
- * in the HID5 register, which is a hypervisor ressource. Thus we can't use it.
- *
- * My approach here is to patch the dcbz instruction on executing pages.
- */
-static void kvmppc_patch_dcbz(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte)
-{
- struct page *hpage;
- u64 hpage_offset;
- u32 *page;
- int i;
-
- hpage = gfn_to_page(vcpu->kvm, pte->raddr >> PAGE_SHIFT);
- if (is_error_page(hpage)) {
- kvm_release_page_clean(hpage);
- return;
- }
-
- hpage_offset = pte->raddr & ~PAGE_MASK;
- hpage_offset &= ~0xFFFULL;
- hpage_offset /= 4;
-
- get_page(hpage);
- page = kmap_atomic(hpage, KM_USER0);
-
- /* patch dcbz into reserved instruction, so we trap */
- for (i=hpage_offset; i < hpage_offset + (HW_PAGE_SIZE / 4); i++)
- if ((page[i] & 0xff0007ff) == INS_DCBZ)
- page[i] &= 0xfffffff7;
-
- kunmap_atomic(page, KM_USER0);
- put_page(hpage);
-}
-
static int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, bool data,
struct kvmppc_pte *pte)
{
@@ -606,519 +400,6 @@ mmio:
return EMULATE_DO_MMIO;
}
-static int kvmppc_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
-{
- ulong mp_pa = vcpu->arch.magic_page_pa;
-
- if (unlikely(mp_pa) &&
- unlikely((mp_pa & KVM_PAM) >> PAGE_SHIFT == gfn)) {
- return 1;
- }
-
- return kvm_is_visible_gfn(vcpu->kvm, gfn);
-}
-
-int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
- ulong eaddr, int vec)
-{
- bool data = (vec == BOOK3S_INTERRUPT_DATA_STORAGE);
- int r = RESUME_GUEST;
- int relocated;
- int page_found = 0;
- struct kvmppc_pte pte;
- bool is_mmio = false;
- bool dr = (vcpu->arch.shared->msr & MSR_DR) ? true : false;
- bool ir = (vcpu->arch.shared->msr & MSR_IR) ? true : false;
- u64 vsid;
-
- relocated = data ? dr : ir;
-
- /* Resolve real address if translation turned on */
- if (relocated) {
- page_found = vcpu->arch.mmu.xlate(vcpu, eaddr, &pte, data);
- } else {
- pte.may_execute = true;
- pte.may_read = true;
- pte.may_write = true;
- pte.raddr = eaddr & KVM_PAM;
- pte.eaddr = eaddr;
- pte.vpage = eaddr >> 12;
- }
-
- switch (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) {
- case 0:
- pte.vpage |= ((u64)VSID_REAL << (SID_SHIFT - 12));
- break;
- case MSR_DR:
- case MSR_IR:
- vcpu->arch.mmu.esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid);
-
- if ((vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) == MSR_DR)
- pte.vpage |= ((u64)VSID_REAL_DR << (SID_SHIFT - 12));
- else
- pte.vpage |= ((u64)VSID_REAL_IR << (SID_SHIFT - 12));
- pte.vpage |= vsid;
-
- if (vsid == -1)
- page_found = -EINVAL;
- break;
- }
-
- if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
- (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) {
- /*
- * If we do the dcbz hack, we have to NX on every execution,
- * so we can patch the executing code. This renders our guest
- * NX-less.
- */
- pte.may_execute = !data;
- }
-
- if (page_found == -ENOENT) {
- /* Page not found in guest PTE entries */
- vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu);
- vcpu->arch.shared->dsisr = to_svcpu(vcpu)->fault_dsisr;
- vcpu->arch.shared->msr |=
- (to_svcpu(vcpu)->shadow_srr1 & 0x00000000f8000000ULL);
- kvmppc_book3s_queue_irqprio(vcpu, vec);
- } else if (page_found == -EPERM) {
- /* Storage protection */
- vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu);
- vcpu->arch.shared->dsisr =
- to_svcpu(vcpu)->fault_dsisr & ~DSISR_NOHPTE;
- vcpu->arch.shared->dsisr |= DSISR_PROTFAULT;
- vcpu->arch.shared->msr |=
- (to_svcpu(vcpu)->shadow_srr1 & 0x00000000f8000000ULL);
- kvmppc_book3s_queue_irqprio(vcpu, vec);
- } else if (page_found == -EINVAL) {
- /* Page not found in guest SLB */
- vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu);
- kvmppc_book3s_queue_irqprio(vcpu, vec + 0x80);
- } else if (!is_mmio &&
- kvmppc_visible_gfn(vcpu, pte.raddr >> PAGE_SHIFT)) {
- /* The guest's PTE is not mapped yet. Map on the host */
- kvmppc_mmu_map_page(vcpu, &pte);
- if (data)
- vcpu->stat.sp_storage++;
- else if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
- (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32)))
- kvmppc_patch_dcbz(vcpu, &pte);
- } else {
- /* MMIO */
- vcpu->stat.mmio_exits++;
- vcpu->arch.paddr_accessed = pte.raddr;
- r = kvmppc_emulate_mmio(run, vcpu);
- if ( r == RESUME_HOST_NV )
- r = RESUME_HOST;
- }
-
- return r;
-}
-
-static inline int get_fpr_index(int i)
-{
-#ifdef CONFIG_VSX
- i *= 2;
-#endif
- return i;
-}
-
-/* Give up external provider (FPU, Altivec, VSX) */
-void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr)
-{
- struct thread_struct *t = &current->thread;
- u64 *vcpu_fpr = vcpu->arch.fpr;
-#ifdef CONFIG_VSX
- u64 *vcpu_vsx = vcpu->arch.vsr;
-#endif
- u64 *thread_fpr = (u64*)t->fpr;
- int i;
-
- if (!(vcpu->arch.guest_owned_ext & msr))
- return;
-
-#ifdef DEBUG_EXT
- printk(KERN_INFO "Giving up ext 0x%lx\n", msr);
-#endif
-
- switch (msr) {
- case MSR_FP:
- giveup_fpu(current);
- for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++)
- vcpu_fpr[i] = thread_fpr[get_fpr_index(i)];
-
- vcpu->arch.fpscr = t->fpscr.val;
- break;
- case MSR_VEC:
-#ifdef CONFIG_ALTIVEC
- giveup_altivec(current);
- memcpy(vcpu->arch.vr, t->vr, sizeof(vcpu->arch.vr));
- vcpu->arch.vscr = t->vscr;
-#endif
- break;
- case MSR_VSX:
-#ifdef CONFIG_VSX
- __giveup_vsx(current);
- for (i = 0; i < ARRAY_SIZE(vcpu->arch.vsr); i++)
- vcpu_vsx[i] = thread_fpr[get_fpr_index(i) + 1];
-#endif
- break;
- default:
- BUG();
- }
-
- vcpu->arch.guest_owned_ext &= ~msr;
- current->thread.regs->msr &= ~msr;
- kvmppc_recalc_shadow_msr(vcpu);
-}
-
-static int kvmppc_read_inst(struct kvm_vcpu *vcpu)
-{
- ulong srr0 = kvmppc_get_pc(vcpu);
- u32 last_inst = kvmppc_get_last_inst(vcpu);
- int ret;
-
- ret = kvmppc_ld(vcpu, &srr0, sizeof(u32), &last_inst, false);
- if (ret == -ENOENT) {
- ulong msr = vcpu->arch.shared->msr;
-
- msr = kvmppc_set_field(msr, 33, 33, 1);
- msr = kvmppc_set_field(msr, 34, 36, 0);
- vcpu->arch.shared->msr = kvmppc_set_field(msr, 42, 47, 0);
- kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_INST_STORAGE);
- return EMULATE_AGAIN;
- }
-
- return EMULATE_DONE;
-}
-
-static int kvmppc_check_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr)
-{
-
- /* Need to do paired single emulation? */
- if (!(vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE))
- return EMULATE_DONE;
-
- /* Read out the instruction */
- if (kvmppc_read_inst(vcpu) == EMULATE_DONE)
- /* Need to emulate */
- return EMULATE_FAIL;
-
- return EMULATE_AGAIN;
-}
-
-/* Handle external providers (FPU, Altivec, VSX) */
-static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
- ulong msr)
-{
- struct thread_struct *t = &current->thread;
- u64 *vcpu_fpr = vcpu->arch.fpr;
-#ifdef CONFIG_VSX
- u64 *vcpu_vsx = vcpu->arch.vsr;
-#endif
- u64 *thread_fpr = (u64*)t->fpr;
- int i;
-
- /* When we have paired singles, we emulate in software */
- if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE)
- return RESUME_GUEST;
-
- if (!(vcpu->arch.shared->msr & msr)) {
- kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
- return RESUME_GUEST;
- }
-
- /* We already own the ext */
- if (vcpu->arch.guest_owned_ext & msr) {
- return RESUME_GUEST;
- }
-
-#ifdef DEBUG_EXT
- printk(KERN_INFO "Loading up ext 0x%lx\n", msr);
-#endif
-
- current->thread.regs->msr |= msr;
-
- switch (msr) {
- case MSR_FP:
- for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++)
- thread_fpr[get_fpr_index(i)] = vcpu_fpr[i];
-
- t->fpscr.val = vcpu->arch.fpscr;
- t->fpexc_mode = 0;
- kvmppc_load_up_fpu();
- break;
- case MSR_VEC:
-#ifdef CONFIG_ALTIVEC
- memcpy(t->vr, vcpu->arch.vr, sizeof(vcpu->arch.vr));
- t->vscr = vcpu->arch.vscr;
- t->vrsave = -1;
- kvmppc_load_up_altivec();
-#endif
- break;
- case MSR_VSX:
-#ifdef CONFIG_VSX
- for (i = 0; i < ARRAY_SIZE(vcpu->arch.vsr); i++)
- thread_fpr[get_fpr_index(i) + 1] = vcpu_vsx[i];
- kvmppc_load_up_vsx();
-#endif
- break;
- default:
- BUG();
- }
-
- vcpu->arch.guest_owned_ext |= msr;
-
- kvmppc_recalc_shadow_msr(vcpu);
-
- return RESUME_GUEST;
-}
-
-int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
- unsigned int exit_nr)
-{
- int r = RESUME_HOST;
-
- vcpu->stat.sum_exits++;
-
- run->exit_reason = KVM_EXIT_UNKNOWN;
- run->ready_for_interrupt_injection = 1;
-
- trace_kvm_book3s_exit(exit_nr, vcpu);
- kvm_resched(vcpu);
- switch (exit_nr) {
- case BOOK3S_INTERRUPT_INST_STORAGE:
- vcpu->stat.pf_instruc++;
-
-#ifdef CONFIG_PPC_BOOK3S_32
- /* We set segments as unused segments when invalidating them. So
- * treat the respective fault as segment fault. */
- if (to_svcpu(vcpu)->sr[kvmppc_get_pc(vcpu) >> SID_SHIFT]
- == SR_INVALID) {
- kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
- r = RESUME_GUEST;
- break;
- }
-#endif
-
- /* only care about PTEG not found errors, but leave NX alone */
- if (to_svcpu(vcpu)->shadow_srr1 & 0x40000000) {
- r = kvmppc_handle_pagefault(run, vcpu, kvmppc_get_pc(vcpu), exit_nr);
- vcpu->stat.sp_instruc++;
- } else if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
- (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) {
- /*
- * XXX If we do the dcbz hack we use the NX bit to flush&patch the page,
- * so we can't use the NX bit inside the guest. Let's cross our fingers,
- * that no guest that needs the dcbz hack does NX.
- */
- kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFUL);
- r = RESUME_GUEST;
- } else {
- vcpu->arch.shared->msr |=
- to_svcpu(vcpu)->shadow_srr1 & 0x58000000;
- kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
- r = RESUME_GUEST;
- }
- break;
- case BOOK3S_INTERRUPT_DATA_STORAGE:
- {
- ulong dar = kvmppc_get_fault_dar(vcpu);
- vcpu->stat.pf_storage++;
-
-#ifdef CONFIG_PPC_BOOK3S_32
- /* We set segments as unused segments when invalidating them. So
- * treat the respective fault as segment fault. */
- if ((to_svcpu(vcpu)->sr[dar >> SID_SHIFT]) == SR_INVALID) {
- kvmppc_mmu_map_segment(vcpu, dar);
- r = RESUME_GUEST;
- break;
- }
-#endif
-
- /* The only case we need to handle is missing shadow PTEs */
- if (to_svcpu(vcpu)->fault_dsisr & DSISR_NOHPTE) {
- r = kvmppc_handle_pagefault(run, vcpu, dar, exit_nr);
- } else {
- vcpu->arch.shared->dar = dar;
- vcpu->arch.shared->dsisr = to_svcpu(vcpu)->fault_dsisr;
- kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
- r = RESUME_GUEST;
- }
- break;
- }
- case BOOK3S_INTERRUPT_DATA_SEGMENT:
- if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_fault_dar(vcpu)) < 0) {
- vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu);
- kvmppc_book3s_queue_irqprio(vcpu,
- BOOK3S_INTERRUPT_DATA_SEGMENT);
- }
- r = RESUME_GUEST;
- break;
- case BOOK3S_INTERRUPT_INST_SEGMENT:
- if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)) < 0) {
- kvmppc_book3s_queue_irqprio(vcpu,
- BOOK3S_INTERRUPT_INST_SEGMENT);
- }
- r = RESUME_GUEST;
- break;
- /* We're good on these - the host merely wanted to get our attention */
- case BOOK3S_INTERRUPT_DECREMENTER:
- vcpu->stat.dec_exits++;
- r = RESUME_GUEST;
- break;
- case BOOK3S_INTERRUPT_EXTERNAL:
- vcpu->stat.ext_intr_exits++;
- r = RESUME_GUEST;
- break;
- case BOOK3S_INTERRUPT_PERFMON:
- r = RESUME_GUEST;
- break;
- case BOOK3S_INTERRUPT_PROGRAM:
- {
- enum emulation_result er;
- ulong flags;
-
-program_interrupt:
- flags = to_svcpu(vcpu)->shadow_srr1 & 0x1f0000ull;
-
- if (vcpu->arch.shared->msr & MSR_PR) {
-#ifdef EXIT_DEBUG
- printk(KERN_INFO "Userspace triggered 0x700 exception at 0x%lx (0x%x)\n", kvmppc_get_pc(vcpu), kvmppc_get_last_inst(vcpu));
-#endif
- if ((kvmppc_get_last_inst(vcpu) & 0xff0007ff) !=
- (INS_DCBZ & 0xfffffff7)) {
- kvmppc_core_queue_program(vcpu, flags);
- r = RESUME_GUEST;
- break;
- }
- }
-
- vcpu->stat.emulated_inst_exits++;
- er = kvmppc_emulate_instruction(run, vcpu);
- switch (er) {
- case EMULATE_DONE:
- r = RESUME_GUEST_NV;
- break;
- case EMULATE_AGAIN:
- r = RESUME_GUEST;
- break;
- case EMULATE_FAIL:
- printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n",
- __func__, kvmppc_get_pc(vcpu), kvmppc_get_last_inst(vcpu));
- kvmppc_core_queue_program(vcpu, flags);
- r = RESUME_GUEST;
- break;
- case EMULATE_DO_MMIO:
- run->exit_reason = KVM_EXIT_MMIO;
- r = RESUME_HOST_NV;
- break;
- default:
- BUG();
- }
- break;
- }
- case BOOK3S_INTERRUPT_SYSCALL:
- if (vcpu->arch.osi_enabled &&
- (((u32)kvmppc_get_gpr(vcpu, 3)) == OSI_SC_MAGIC_R3) &&
- (((u32)kvmppc_get_gpr(vcpu, 4)) == OSI_SC_MAGIC_R4)) {
- /* MOL hypercalls */
- u64 *gprs = run->osi.gprs;
- int i;
-
- run->exit_reason = KVM_EXIT_OSI;
- for (i = 0; i < 32; i++)
- gprs[i] = kvmppc_get_gpr(vcpu, i);
- vcpu->arch.osi_needed = 1;
- r = RESUME_HOST_NV;
- } else if (!(vcpu->arch.shared->msr & MSR_PR) &&
- (((u32)kvmppc_get_gpr(vcpu, 0)) == KVM_SC_MAGIC_R0)) {
- /* KVM PV hypercalls */
- kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu));
- r = RESUME_GUEST;
- } else {
- /* Guest syscalls */
- vcpu->stat.syscall_exits++;
- kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
- r = RESUME_GUEST;
- }
- break;
- case BOOK3S_INTERRUPT_FP_UNAVAIL:
- case BOOK3S_INTERRUPT_ALTIVEC:
- case BOOK3S_INTERRUPT_VSX:
- {
- int ext_msr = 0;
-
- switch (exit_nr) {
- case BOOK3S_INTERRUPT_FP_UNAVAIL: ext_msr = MSR_FP; break;
- case BOOK3S_INTERRUPT_ALTIVEC: ext_msr = MSR_VEC; break;
- case BOOK3S_INTERRUPT_VSX: ext_msr = MSR_VSX; break;
- }
-
- switch (kvmppc_check_ext(vcpu, exit_nr)) {
- case EMULATE_DONE:
- /* everything ok - let's enable the ext */
- r = kvmppc_handle_ext(vcpu, exit_nr, ext_msr);
- break;
- case EMULATE_FAIL:
- /* we need to emulate this instruction */
- goto program_interrupt;
- break;
- default:
- /* nothing to worry about - go again */
- break;
- }
- break;
- }
- case BOOK3S_INTERRUPT_ALIGNMENT:
- if (kvmppc_read_inst(vcpu) == EMULATE_DONE) {
- vcpu->arch.shared->dsisr = kvmppc_alignment_dsisr(vcpu,
- kvmppc_get_last_inst(vcpu));
- vcpu->arch.shared->dar = kvmppc_alignment_dar(vcpu,
- kvmppc_get_last_inst(vcpu));
- kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
- }
- r = RESUME_GUEST;
- break;
- case BOOK3S_INTERRUPT_MACHINE_CHECK:
- case BOOK3S_INTERRUPT_TRACE:
- kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
- r = RESUME_GUEST;
- break;
- default:
- /* Ugh - bork here! What did we get? */
- printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | msr=0x%lx\n",
- exit_nr, kvmppc_get_pc(vcpu), to_svcpu(vcpu)->shadow_srr1);
- r = RESUME_HOST;
- BUG();
- break;
- }
-
-
- if (!(r & RESUME_HOST)) {
- /* To avoid clobbering exit_reason, only check for signals if
- * we aren't already exiting to userspace for some other
- * reason. */
- if (signal_pending(current)) {
-#ifdef EXIT_DEBUG
- printk(KERN_EMERG "KVM: Going back to host\n");
-#endif
- vcpu->stat.signal_exits++;
- run->exit_reason = KVM_EXIT_INTR;
- r = -EINTR;
- } else {
- /* In case an interrupt came in that was triggered
- * from userspace (like DEC), we need to check what
- * to inject now! */
- kvmppc_core_deliver_interrupts(vcpu);
- }
- }
-
- trace_kvm_book3s_reenter(r, vcpu);
-
- return r;
-}
-
int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
{
return 0;
@@ -1179,69 +460,6 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
return 0;
}
-int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
- struct kvm_sregs *sregs)
-{
- struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
- int i;
-
- sregs->pvr = vcpu->arch.pvr;
-
- sregs->u.s.sdr1 = to_book3s(vcpu)->sdr1;
- if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) {
- for (i = 0; i < 64; i++) {
- sregs->u.s.ppc64.slb[i].slbe = vcpu3s->slb[i].orige | i;
- sregs->u.s.ppc64.slb[i].slbv = vcpu3s->slb[i].origv;
- }
- } else {
- for (i = 0; i < 16; i++)
- sregs->u.s.ppc32.sr[i] = vcpu->arch.shared->sr[i];
-
- for (i = 0; i < 8; i++) {
- sregs->u.s.ppc32.ibat[i] = vcpu3s->ibat[i].raw;
- sregs->u.s.ppc32.dbat[i] = vcpu3s->dbat[i].raw;
- }
- }
-
- return 0;
-}
-
-int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
- struct kvm_sregs *sregs)
-{
- struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
- int i;
-
- kvmppc_set_pvr(vcpu, sregs->pvr);
-
- vcpu3s->sdr1 = sregs->u.s.sdr1;
- if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) {
- for (i = 0; i < 64; i++) {
- vcpu->arch.mmu.slbmte(vcpu, sregs->u.s.ppc64.slb[i].slbv,
- sregs->u.s.ppc64.slb[i].slbe);
- }
- } else {
- for (i = 0; i < 16; i++) {
- vcpu->arch.mmu.mtsrin(vcpu, i, sregs->u.s.ppc32.sr[i]);
- }
- for (i = 0; i < 8; i++) {
- kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), false,
- (u32)sregs->u.s.ppc32.ibat[i]);
- kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), true,
- (u32)(sregs->u.s.ppc32.ibat[i] >> 32));
- kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), false,
- (u32)sregs->u.s.ppc32.dbat[i]);
- kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), true,
- (u32)(sregs->u.s.ppc32.dbat[i] >> 32));
- }
- }
-
- /* Flush the MMU after messing with the segments */
- kvmppc_mmu_pte_flush(vcpu, 0, 0);
-
- return 0;
-}
-
int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
{
return -ENOTSUPP;
@@ -1296,202 +514,3 @@ out:
mutex_unlock(&kvm->slots_lock);
return r;
}
-
-int kvmppc_core_check_processor_compat(void)
-{
- return 0;
-}
-
-struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
-{
- struct kvmppc_vcpu_book3s *vcpu_book3s;
- struct kvm_vcpu *vcpu;
- int err = -ENOMEM;
- unsigned long p;
-
- vcpu_book3s = vzalloc(sizeof(struct kvmppc_vcpu_book3s));
- if (!vcpu_book3s)
- goto out;
-
- vcpu_book3s->shadow_vcpu = (struct kvmppc_book3s_shadow_vcpu *)
- kzalloc(sizeof(*vcpu_book3s->shadow_vcpu), GFP_KERNEL);
- if (!vcpu_book3s->shadow_vcpu)
- goto free_vcpu;
-
- vcpu = &vcpu_book3s->vcpu;
- err = kvm_vcpu_init(vcpu, kvm, id);
- if (err)
- goto free_shadow_vcpu;
-
- p = __get_free_page(GFP_KERNEL|__GFP_ZERO);
- /* the real shared page fills the last 4k of our page */
- vcpu->arch.shared = (void*)(p + PAGE_SIZE - 4096);
- if (!p)
- goto uninit_vcpu;
-
- vcpu->arch.host_retip = kvm_return_point;
- vcpu->arch.host_msr = mfmsr();
-#ifdef CONFIG_PPC_BOOK3S_64
- /* default to book3s_64 (970fx) */
- vcpu->arch.pvr = 0x3C0301;
-#else
- /* default to book3s_32 (750) */
- vcpu->arch.pvr = 0x84202;
-#endif
- kvmppc_set_pvr(vcpu, vcpu->arch.pvr);
- vcpu_book3s->slb_nr = 64;
-
- /* remember where some real-mode handlers are */
- vcpu->arch.trampoline_lowmem = kvmppc_trampoline_lowmem;
- vcpu->arch.trampoline_enter = kvmppc_trampoline_enter;
- vcpu->arch.highmem_handler = (ulong)kvmppc_handler_highmem;
-#ifdef CONFIG_PPC_BOOK3S_64
- vcpu->arch.rmcall = *(ulong*)kvmppc_rmcall;
-#else
- vcpu->arch.rmcall = (ulong)kvmppc_rmcall;
-#endif
-
- vcpu->arch.shadow_msr = MSR_USER64;
-
- err = kvmppc_mmu_init(vcpu);
- if (err < 0)
- goto uninit_vcpu;
-
- return vcpu;
-
-uninit_vcpu:
- kvm_vcpu_uninit(vcpu);
-free_shadow_vcpu:
- kfree(vcpu_book3s->shadow_vcpu);
-free_vcpu:
- vfree(vcpu_book3s);
-out:
- return ERR_PTR(err);
-}
-
-void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
-{
- struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
-
- free_page((unsigned long)vcpu->arch.shared & PAGE_MASK);
- kvm_vcpu_uninit(vcpu);
- kfree(vcpu_book3s->shadow_vcpu);
- vfree(vcpu_book3s);
-}
-
-extern int __kvmppc_vcpu_entry(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
-int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
-{
- int ret;
- double fpr[32][TS_FPRWIDTH];
- unsigned int fpscr;
- int fpexc_mode;
-#ifdef CONFIG_ALTIVEC
- vector128 vr[32];
- vector128 vscr;
- unsigned long uninitialized_var(vrsave);
- int used_vr;
-#endif
-#ifdef CONFIG_VSX
- int used_vsr;
-#endif
- ulong ext_msr;
-
- /* No need to go into the guest when all we do is going out */
- if (signal_pending(current)) {
- kvm_run->exit_reason = KVM_EXIT_INTR;
- return -EINTR;
- }
-
- /* Save FPU state in stack */
- if (current->thread.regs->msr & MSR_FP)
- giveup_fpu(current);
- memcpy(fpr, current->thread.fpr, sizeof(current->thread.fpr));
- fpscr = current->thread.fpscr.val;
- fpexc_mode = current->thread.fpexc_mode;
-
-#ifdef CONFIG_ALTIVEC
- /* Save Altivec state in stack */
- used_vr = current->thread.used_vr;
- if (used_vr) {
- if (current->thread.regs->msr & MSR_VEC)
- giveup_altivec(current);
- memcpy(vr, current->thread.vr, sizeof(current->thread.vr));
- vscr = current->thread.vscr;
- vrsave = current->thread.vrsave;
- }
-#endif
-
-#ifdef CONFIG_VSX
- /* Save VSX state in stack */
- used_vsr = current->thread.used_vsr;
- if (used_vsr && (current->thread.regs->msr & MSR_VSX))
- __giveup_vsx(current);
-#endif
-
- /* Remember the MSR with disabled extensions */
- ext_msr = current->thread.regs->msr;
-
- /* XXX we get called with irq disabled - change that! */
- local_irq_enable();
-
- /* Preload FPU if it's enabled */
- if (vcpu->arch.shared->msr & MSR_FP)
- kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
-
- ret = __kvmppc_vcpu_entry(kvm_run, vcpu);
-
- local_irq_disable();
-
- current->thread.regs->msr = ext_msr;
-
- /* Make sure we save the guest FPU/Altivec/VSX state */
- kvmppc_giveup_ext(vcpu, MSR_FP);
- kvmppc_giveup_ext(vcpu, MSR_VEC);
- kvmppc_giveup_ext(vcpu, MSR_VSX);
-
- /* Restore FPU state from stack */
- memcpy(current->thread.fpr, fpr, sizeof(current->thread.fpr));
- current->thread.fpscr.val = fpscr;
- current->thread.fpexc_mode = fpexc_mode;
-
-#ifdef CONFIG_ALTIVEC
- /* Restore Altivec state from stack */
- if (used_vr && current->thread.used_vr) {
- memcpy(current->thread.vr, vr, sizeof(current->thread.vr));
- current->thread.vscr = vscr;
- current->thread.vrsave = vrsave;
- }
- current->thread.used_vr = used_vr;
-#endif
-
-#ifdef CONFIG_VSX
- current->thread.used_vsr = used_vsr;
-#endif
-
- return ret;
-}
-
-static int kvmppc_book3s_init(void)
-{
- int r;
-
- r = kvm_init(NULL, sizeof(struct kvmppc_vcpu_book3s), 0,
- THIS_MODULE);
-
- if (r)
- return r;
-
- r = kvmppc_mmu_hpte_sysinit();
-
- return r;
-}
-
-static void kvmppc_book3s_exit(void)
-{
- kvmppc_mmu_hpte_sysexit();
- kvm_exit();
-}
-
-module_init(kvmppc_book3s_init);
-module_exit(kvmppc_book3s_exit);
diff --git a/arch/powerpc/kvm/book3s_64_mmu.c b/arch/powerpc/kvm/book3s_64_mmu.c
index d7889ef3211e..c6d3e194b6b4 100644
--- a/arch/powerpc/kvm/book3s_64_mmu.c
+++ b/arch/powerpc/kvm/book3s_64_mmu.c
@@ -41,36 +41,36 @@ static void kvmppc_mmu_book3s_64_reset_msr(struct kvm_vcpu *vcpu)
}
static struct kvmppc_slb *kvmppc_mmu_book3s_64_find_slbe(
- struct kvmppc_vcpu_book3s *vcpu_book3s,
+ struct kvm_vcpu *vcpu,
gva_t eaddr)
{
int i;
u64 esid = GET_ESID(eaddr);
u64 esid_1t = GET_ESID_1T(eaddr);
- for (i = 0; i < vcpu_book3s->slb_nr; i++) {
+ for (i = 0; i < vcpu->arch.slb_nr; i++) {
u64 cmp_esid = esid;
- if (!vcpu_book3s->slb[i].valid)
+ if (!vcpu->arch.slb[i].valid)
continue;
- if (vcpu_book3s->slb[i].tb)
+ if (vcpu->arch.slb[i].tb)
cmp_esid = esid_1t;
- if (vcpu_book3s->slb[i].esid == cmp_esid)
- return &vcpu_book3s->slb[i];
+ if (vcpu->arch.slb[i].esid == cmp_esid)
+ return &vcpu->arch.slb[i];
}
dprintk("KVM: No SLB entry found for 0x%lx [%llx | %llx]\n",
eaddr, esid, esid_1t);
- for (i = 0; i < vcpu_book3s->slb_nr; i++) {
- if (vcpu_book3s->slb[i].vsid)
+ for (i = 0; i < vcpu->arch.slb_nr; i++) {
+ if (vcpu->arch.slb[i].vsid)
dprintk(" %d: %c%c%c %llx %llx\n", i,
- vcpu_book3s->slb[i].valid ? 'v' : ' ',
- vcpu_book3s->slb[i].large ? 'l' : ' ',
- vcpu_book3s->slb[i].tb ? 't' : ' ',
- vcpu_book3s->slb[i].esid,
- vcpu_book3s->slb[i].vsid);
+ vcpu->arch.slb[i].valid ? 'v' : ' ',
+ vcpu->arch.slb[i].large ? 'l' : ' ',
+ vcpu->arch.slb[i].tb ? 't' : ' ',
+ vcpu->arch.slb[i].esid,
+ vcpu->arch.slb[i].vsid);
}
return NULL;
@@ -81,7 +81,7 @@ static u64 kvmppc_mmu_book3s_64_ea_to_vp(struct kvm_vcpu *vcpu, gva_t eaddr,
{
struct kvmppc_slb *slb;
- slb = kvmppc_mmu_book3s_64_find_slbe(to_book3s(vcpu), eaddr);
+ slb = kvmppc_mmu_book3s_64_find_slbe(vcpu, eaddr);
if (!slb)
return 0;
@@ -180,7 +180,7 @@ static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
return 0;
}
- slbe = kvmppc_mmu_book3s_64_find_slbe(vcpu_book3s, eaddr);
+ slbe = kvmppc_mmu_book3s_64_find_slbe(vcpu, eaddr);
if (!slbe)
goto no_seg_found;
@@ -320,10 +320,10 @@ static void kvmppc_mmu_book3s_64_slbmte(struct kvm_vcpu *vcpu, u64 rs, u64 rb)
esid_1t = GET_ESID_1T(rb);
slb_nr = rb & 0xfff;
- if (slb_nr > vcpu_book3s->slb_nr)
+ if (slb_nr > vcpu->arch.slb_nr)
return;
- slbe = &vcpu_book3s->slb[slb_nr];
+ slbe = &vcpu->arch.slb[slb_nr];
slbe->large = (rs & SLB_VSID_L) ? 1 : 0;
slbe->tb = (rs & SLB_VSID_B_1T) ? 1 : 0;
@@ -344,38 +344,35 @@ static void kvmppc_mmu_book3s_64_slbmte(struct kvm_vcpu *vcpu, u64 rs, u64 rb)
static u64 kvmppc_mmu_book3s_64_slbmfee(struct kvm_vcpu *vcpu, u64 slb_nr)
{
- struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
struct kvmppc_slb *slbe;
- if (slb_nr > vcpu_book3s->slb_nr)
+ if (slb_nr > vcpu->arch.slb_nr)
return 0;
- slbe = &vcpu_book3s->slb[slb_nr];
+ slbe = &vcpu->arch.slb[slb_nr];
return slbe->orige;
}
static u64 kvmppc_mmu_book3s_64_slbmfev(struct kvm_vcpu *vcpu, u64 slb_nr)
{
- struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
struct kvmppc_slb *slbe;
- if (slb_nr > vcpu_book3s->slb_nr)
+ if (slb_nr > vcpu->arch.slb_nr)
return 0;
- slbe = &vcpu_book3s->slb[slb_nr];
+ slbe = &vcpu->arch.slb[slb_nr];
return slbe->origv;
}
static void kvmppc_mmu_book3s_64_slbie(struct kvm_vcpu *vcpu, u64 ea)
{
- struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
struct kvmppc_slb *slbe;
dprintk("KVM MMU: slbie(0x%llx)\n", ea);
- slbe = kvmppc_mmu_book3s_64_find_slbe(vcpu_book3s, ea);
+ slbe = kvmppc_mmu_book3s_64_find_slbe(vcpu, ea);
if (!slbe)
return;
@@ -389,13 +386,12 @@ static void kvmppc_mmu_book3s_64_slbie(struct kvm_vcpu *vcpu, u64 ea)
static void kvmppc_mmu_book3s_64_slbia(struct kvm_vcpu *vcpu)
{
- struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
int i;
dprintk("KVM MMU: slbia()\n");
- for (i = 1; i < vcpu_book3s->slb_nr; i++)
- vcpu_book3s->slb[i].valid = false;
+ for (i = 1; i < vcpu->arch.slb_nr; i++)
+ vcpu->arch.slb[i].valid = false;
if (vcpu->arch.shared->msr & MSR_IR) {
kvmppc_mmu_flush_segments(vcpu);
@@ -464,7 +460,7 @@ static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
ulong mp_ea = vcpu->arch.magic_page_ea;
if (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) {
- slb = kvmppc_mmu_book3s_64_find_slbe(to_book3s(vcpu), ea);
+ slb = kvmppc_mmu_book3s_64_find_slbe(vcpu, ea);
if (slb)
gvsid = slb->vsid;
}
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
new file mode 100644
index 000000000000..bc3a2ea94217
--- /dev/null
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -0,0 +1,180 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright 2010 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
+ */
+
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/kvm.h>
+#include <linux/kvm_host.h>
+#include <linux/highmem.h>
+#include <linux/gfp.h>
+#include <linux/slab.h>
+#include <linux/hugetlb.h>
+
+#include <asm/tlbflush.h>
+#include <asm/kvm_ppc.h>
+#include <asm/kvm_book3s.h>
+#include <asm/mmu-hash64.h>
+#include <asm/hvcall.h>
+#include <asm/synch.h>
+#include <asm/ppc-opcode.h>
+#include <asm/cputable.h>
+
+/* For now use fixed-size 16MB page table */
+#define HPT_ORDER 24
+#define HPT_NPTEG (1ul << (HPT_ORDER - 7)) /* 128B per pteg */
+#define HPT_HASH_MASK (HPT_NPTEG - 1)
+
+/* Pages in the VRMA are 16MB pages */
+#define VRMA_PAGE_ORDER 24
+#define VRMA_VSID 0x1ffffffUL /* 1TB VSID reserved for VRMA */
+
+/* POWER7 has 10-bit LPIDs, PPC970 has 6-bit LPIDs */
+#define MAX_LPID_970 63
+#define NR_LPIDS (LPID_RSVD + 1)
+unsigned long lpid_inuse[BITS_TO_LONGS(NR_LPIDS)];
+
+long kvmppc_alloc_hpt(struct kvm *kvm)
+{
+ unsigned long hpt;
+ unsigned long lpid;
+
+ hpt = __get_free_pages(GFP_KERNEL|__GFP_ZERO|__GFP_REPEAT|__GFP_NOWARN,
+ HPT_ORDER - PAGE_SHIFT);
+ if (!hpt) {
+ pr_err("kvm_alloc_hpt: Couldn't alloc HPT\n");
+ return -ENOMEM;
+ }
+ kvm->arch.hpt_virt = hpt;
+
+ do {
+ lpid = find_first_zero_bit(lpid_inuse, NR_LPIDS);
+ if (lpid >= NR_LPIDS) {
+ pr_err("kvm_alloc_hpt: No LPIDs free\n");
+ free_pages(hpt, HPT_ORDER - PAGE_SHIFT);
+ return -ENOMEM;
+ }
+ } while (test_and_set_bit(lpid, lpid_inuse));
+
+ kvm->arch.sdr1 = __pa(hpt) | (HPT_ORDER - 18);
+ kvm->arch.lpid = lpid;
+
+ pr_info("KVM guest htab at %lx, LPID %lx\n", hpt, lpid);
+ return 0;
+}
+
+void kvmppc_free_hpt(struct kvm *kvm)
+{
+ clear_bit(kvm->arch.lpid, lpid_inuse);
+ free_pages(kvm->arch.hpt_virt, HPT_ORDER - PAGE_SHIFT);
+}
+
+void kvmppc_map_vrma(struct kvm *kvm, struct kvm_userspace_memory_region *mem)
+{
+ unsigned long i;
+ unsigned long npages = kvm->arch.ram_npages;
+ unsigned long pfn;
+ unsigned long *hpte;
+ unsigned long hash;
+ struct kvmppc_pginfo *pginfo = kvm->arch.ram_pginfo;
+
+ if (!pginfo)
+ return;
+
+ /* VRMA can't be > 1TB */
+ if (npages > 1ul << (40 - kvm->arch.ram_porder))
+ npages = 1ul << (40 - kvm->arch.ram_porder);
+ /* Can't use more than 1 HPTE per HPTEG */
+ if (npages > HPT_NPTEG)
+ npages = HPT_NPTEG;
+
+ for (i = 0; i < npages; ++i) {
+ pfn = pginfo[i].pfn;
+ if (!pfn)
+ break;
+ /* can't use hpt_hash since va > 64 bits */
+ hash = (i ^ (VRMA_VSID ^ (VRMA_VSID << 25))) & HPT_HASH_MASK;
+ /*
+ * We assume that the hash table is empty and no
+ * vcpus are using it at this stage. Since we create
+ * at most one HPTE per HPTEG, we just assume entry 7
+ * is available and use it.
+ */
+ hpte = (unsigned long *) (kvm->arch.hpt_virt + (hash << 7));
+ hpte += 7 * 2;
+ /* HPTE low word - RPN, protection, etc. */
+ hpte[1] = (pfn << PAGE_SHIFT) | HPTE_R_R | HPTE_R_C |
+ HPTE_R_M | PP_RWXX;
+ wmb();
+ hpte[0] = HPTE_V_1TB_SEG | (VRMA_VSID << (40 - 16)) |
+ (i << (VRMA_PAGE_ORDER - 16)) | HPTE_V_BOLTED |
+ HPTE_V_LARGE | HPTE_V_VALID;
+ }
+}
+
+int kvmppc_mmu_hv_init(void)
+{
+ unsigned long host_lpid, rsvd_lpid;
+
+ if (!cpu_has_feature(CPU_FTR_HVMODE))
+ return -EINVAL;
+
+ memset(lpid_inuse, 0, sizeof(lpid_inuse));
+
+ if (cpu_has_feature(CPU_FTR_ARCH_206)) {
+ host_lpid = mfspr(SPRN_LPID); /* POWER7 */
+ rsvd_lpid = LPID_RSVD;
+ } else {
+ host_lpid = 0; /* PPC970 */
+ rsvd_lpid = MAX_LPID_970;
+ }
+
+ set_bit(host_lpid, lpid_inuse);
+ /* rsvd_lpid is reserved for use in partition switching */
+ set_bit(rsvd_lpid, lpid_inuse);
+
+ return 0;
+}
+
+void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
+{
+}
+
+static void kvmppc_mmu_book3s_64_hv_reset_msr(struct kvm_vcpu *vcpu)
+{
+ kvmppc_set_msr(vcpu, MSR_SF | MSR_ME);
+}
+
+static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
+ struct kvmppc_pte *gpte, bool data)
+{
+ return -ENOENT;
+}
+
+void kvmppc_mmu_book3s_hv_init(struct kvm_vcpu *vcpu)
+{
+ struct kvmppc_mmu *mmu = &vcpu->arch.mmu;
+
+ if (cpu_has_feature(CPU_FTR_ARCH_206))
+ vcpu->arch.slb_nr = 32; /* POWER7 */
+ else
+ vcpu->arch.slb_nr = 64;
+
+ mmu->xlate = kvmppc_mmu_book3s_64_hv_xlate;
+ mmu->reset_msr = kvmppc_mmu_book3s_64_hv_reset_msr;
+
+ vcpu->arch.hflags |= BOOK3S_HFLAG_SLB;
+}
diff --git a/arch/powerpc/kvm/book3s_64_vio_hv.c b/arch/powerpc/kvm/book3s_64_vio_hv.c
new file mode 100644
index 000000000000..ea0f8c537c28
--- /dev/null
+++ b/arch/powerpc/kvm/book3s_64_vio_hv.c
@@ -0,0 +1,73 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright 2010 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
+ * Copyright 2011 David Gibson, IBM Corporation <dwg@au1.ibm.com>
+ */
+
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/kvm.h>
+#include <linux/kvm_host.h>
+#include <linux/highmem.h>
+#include <linux/gfp.h>
+#include <linux/slab.h>
+#include <linux/hugetlb.h>
+#include <linux/list.h>
+
+#include <asm/tlbflush.h>
+#include <asm/kvm_ppc.h>
+#include <asm/kvm_book3s.h>
+#include <asm/mmu-hash64.h>
+#include <asm/hvcall.h>
+#include <asm/synch.h>
+#include <asm/ppc-opcode.h>
+#include <asm/kvm_host.h>
+#include <asm/udbg.h>
+
+#define TCES_PER_PAGE (PAGE_SIZE / sizeof(u64))
+
+long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
+ unsigned long ioba, unsigned long tce)
+{
+ struct kvm *kvm = vcpu->kvm;
+ struct kvmppc_spapr_tce_table *stt;
+
+ /* udbg_printf("H_PUT_TCE(): liobn=0x%lx ioba=0x%lx, tce=0x%lx\n", */
+ /* liobn, ioba, tce); */
+
+ list_for_each_entry(stt, &kvm->arch.spapr_tce_tables, list) {
+ if (stt->liobn == liobn) {
+ unsigned long idx = ioba >> SPAPR_TCE_SHIFT;
+ struct page *page;
+ u64 *tbl;
+
+ /* udbg_printf("H_PUT_TCE: liobn 0x%lx => stt=%p window_size=0x%x\n", */
+ /* liobn, stt, stt->window_size); */
+ if (ioba >= stt->window_size)
+ return H_PARAMETER;
+
+ page = stt->pages[idx / TCES_PER_PAGE];
+ tbl = (u64 *)page_address(page);
+
+ /* FIXME: Need to validate the TCE itself */
+ /* udbg_printf("tce @ %p\n", &tbl[idx % TCES_PER_PAGE]); */
+ tbl[idx % TCES_PER_PAGE] = tce;
+ return H_SUCCESS;
+ }
+ }
+
+ /* Didn't find the liobn, punt it to userspace */
+ return H_TOO_HARD;
+}
diff --git a/arch/powerpc/kvm/book3s_exports.c b/arch/powerpc/kvm/book3s_exports.c
index 1dd5a1ddfd0d..88c8f26add02 100644
--- a/arch/powerpc/kvm/book3s_exports.c
+++ b/arch/powerpc/kvm/book3s_exports.c
@@ -20,8 +20,11 @@
#include <linux/module.h>
#include <asm/kvm_book3s.h>
-EXPORT_SYMBOL_GPL(kvmppc_trampoline_enter);
-EXPORT_SYMBOL_GPL(kvmppc_trampoline_lowmem);
+#ifdef CONFIG_KVM_BOOK3S_64_HV
+EXPORT_SYMBOL_GPL(kvmppc_hv_entry_trampoline);
+#else
+EXPORT_SYMBOL_GPL(kvmppc_handler_trampoline_enter);
+EXPORT_SYMBOL_GPL(kvmppc_handler_lowmem_trampoline);
EXPORT_SYMBOL_GPL(kvmppc_rmcall);
EXPORT_SYMBOL_GPL(kvmppc_load_up_fpu);
#ifdef CONFIG_ALTIVEC
@@ -30,3 +33,5 @@ EXPORT_SYMBOL_GPL(kvmppc_load_up_altivec);
#ifdef CONFIG_VSX
EXPORT_SYMBOL_GPL(kvmppc_load_up_vsx);
#endif
+#endif
+
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
new file mode 100644
index 000000000000..cc0d7f1b19ab
--- /dev/null
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -0,0 +1,1269 @@
+/*
+ * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
+ * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved.
+ *
+ * Authors:
+ * Paul Mackerras <paulus@au1.ibm.com>
+ * Alexander Graf <agraf@suse.de>
+ * Kevin Wolf <mail@kevin-wolf.de>
+ *
+ * Description: KVM functions specific to running on Book 3S
+ * processors in hypervisor mode (specifically POWER7 and later).
+ *
+ * This file is derived from arch/powerpc/kvm/book3s.c,
+ * by Alexander Graf <agraf@suse.de>.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kvm_host.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/preempt.h>
+#include <linux/sched.h>
+#include <linux/delay.h>
+#include <linux/fs.h>
+#include <linux/anon_inodes.h>
+#include <linux/cpumask.h>
+#include <linux/spinlock.h>
+#include <linux/page-flags.h>
+
+#include <asm/reg.h>
+#include <asm/cputable.h>
+#include <asm/cacheflush.h>
+#include <asm/tlbflush.h>
+#include <asm/uaccess.h>
+#include <asm/io.h>
+#include <asm/kvm_ppc.h>
+#include <asm/kvm_book3s.h>
+#include <asm/mmu_context.h>
+#include <asm/lppaca.h>
+#include <asm/processor.h>
+#include <asm/cputhreads.h>
+#include <asm/page.h>
+#include <linux/gfp.h>
+#include <linux/sched.h>
+#include <linux/vmalloc.h>
+#include <linux/highmem.h>
+
+/*
+ * For now, limit memory to 64GB and require it to be large pages.
+ * This value is chosen because it makes the ram_pginfo array be
+ * 64kB in size, which is about as large as we want to be trying
+ * to allocate with kmalloc.
+ */
+#define MAX_MEM_ORDER 36
+
+#define LARGE_PAGE_ORDER 24 /* 16MB pages */
+
+/* #define EXIT_DEBUG */
+/* #define EXIT_DEBUG_SIMPLE */
+/* #define EXIT_DEBUG_INT */
+
+void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+{
+ local_paca->kvm_hstate.kvm_vcpu = vcpu;
+ local_paca->kvm_hstate.kvm_vcore = vcpu->arch.vcore;
+}
+
+void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
+{
+}
+
+static void kvmppc_vcpu_blocked(struct kvm_vcpu *vcpu);
+static void kvmppc_vcpu_unblocked(struct kvm_vcpu *vcpu);
+
+void kvmppc_vcpu_block(struct kvm_vcpu *vcpu)
+{
+ u64 now;
+ unsigned long dec_nsec;
+
+ now = get_tb();
+ if (now >= vcpu->arch.dec_expires && !kvmppc_core_pending_dec(vcpu))
+ kvmppc_core_queue_dec(vcpu);
+ if (vcpu->arch.pending_exceptions)
+ return;
+ if (vcpu->arch.dec_expires != ~(u64)0) {
+ dec_nsec = (vcpu->arch.dec_expires - now) * NSEC_PER_SEC /
+ tb_ticks_per_sec;
+ hrtimer_start(&vcpu->arch.dec_timer, ktime_set(0, dec_nsec),
+ HRTIMER_MODE_REL);
+ }
+
+ kvmppc_vcpu_blocked(vcpu);
+
+ kvm_vcpu_block(vcpu);
+ vcpu->stat.halt_wakeup++;
+
+ if (vcpu->arch.dec_expires != ~(u64)0)
+ hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
+
+ kvmppc_vcpu_unblocked(vcpu);
+}
+
+void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr)
+{
+ vcpu->arch.shregs.msr = msr;
+}
+
+void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr)
+{
+ vcpu->arch.pvr = pvr;
+}
+
+void kvmppc_dump_regs(struct kvm_vcpu *vcpu)
+{
+ int r;
+
+ pr_err("vcpu %p (%d):\n", vcpu, vcpu->vcpu_id);
+ pr_err("pc = %.16lx msr = %.16llx trap = %x\n",
+ vcpu->arch.pc, vcpu->arch.shregs.msr, vcpu->arch.trap);
+ for (r = 0; r < 16; ++r)
+ pr_err("r%2d = %.16lx r%d = %.16lx\n",
+ r, kvmppc_get_gpr(vcpu, r),
+ r+16, kvmppc_get_gpr(vcpu, r+16));
+ pr_err("ctr = %.16lx lr = %.16lx\n",
+ vcpu->arch.ctr, vcpu->arch.lr);
+ pr_err("srr0 = %.16llx srr1 = %.16llx\n",
+ vcpu->arch.shregs.srr0, vcpu->arch.shregs.srr1);
+ pr_err("sprg0 = %.16llx sprg1 = %.16llx\n",
+ vcpu->arch.shregs.sprg0, vcpu->arch.shregs.sprg1);
+ pr_err("sprg2 = %.16llx sprg3 = %.16llx\n",
+ vcpu->arch.shregs.sprg2, vcpu->arch.shregs.sprg3);
+ pr_err("cr = %.8x xer = %.16lx dsisr = %.8x\n",
+ vcpu->arch.cr, vcpu->arch.xer, vcpu->arch.shregs.dsisr);
+ pr_err("dar = %.16llx\n", vcpu->arch.shregs.dar);
+ pr_err("fault dar = %.16lx dsisr = %.8x\n",
+ vcpu->arch.fault_dar, vcpu->arch.fault_dsisr);
+ pr_err("SLB (%d entries):\n", vcpu->arch.slb_max);
+ for (r = 0; r < vcpu->arch.slb_max; ++r)
+ pr_err(" ESID = %.16llx VSID = %.16llx\n",
+ vcpu->arch.slb[r].orige, vcpu->arch.slb[r].origv);
+ pr_err("lpcr = %.16lx sdr1 = %.16lx last_inst = %.8x\n",
+ vcpu->kvm->arch.lpcr, vcpu->kvm->arch.sdr1,
+ vcpu->arch.last_inst);
+}
+
+struct kvm_vcpu *kvmppc_find_vcpu(struct kvm *kvm, int id)
+{
+ int r;
+ struct kvm_vcpu *v, *ret = NULL;
+
+ mutex_lock(&kvm->lock);
+ kvm_for_each_vcpu(r, v, kvm) {
+ if (v->vcpu_id == id) {
+ ret = v;
+ break;
+ }
+ }
+ mutex_unlock(&kvm->lock);
+ return ret;
+}
+
+static void init_vpa(struct kvm_vcpu *vcpu, struct lppaca *vpa)
+{
+ vpa->shared_proc = 1;
+ vpa->yield_count = 1;
+}
+
+static unsigned long do_h_register_vpa(struct kvm_vcpu *vcpu,
+ unsigned long flags,
+ unsigned long vcpuid, unsigned long vpa)
+{
+ struct kvm *kvm = vcpu->kvm;
+ unsigned long pg_index, ra, len;
+ unsigned long pg_offset;
+ void *va;
+ struct kvm_vcpu *tvcpu;
+
+ tvcpu = kvmppc_find_vcpu(kvm, vcpuid);
+ if (!tvcpu)
+ return H_PARAMETER;
+
+ flags >>= 63 - 18;
+ flags &= 7;
+ if (flags == 0 || flags == 4)
+ return H_PARAMETER;
+ if (flags < 4) {
+ if (vpa & 0x7f)
+ return H_PARAMETER;
+ /* registering new area; convert logical addr to real */
+ pg_index = vpa >> kvm->arch.ram_porder;
+ pg_offset = vpa & (kvm->arch.ram_psize - 1);
+ if (pg_index >= kvm->arch.ram_npages)
+ return H_PARAMETER;
+ if (kvm->arch.ram_pginfo[pg_index].pfn == 0)
+ return H_PARAMETER;
+ ra = kvm->arch.ram_pginfo[pg_index].pfn << PAGE_SHIFT;
+ ra |= pg_offset;
+ va = __va(ra);
+ if (flags <= 1)
+ len = *(unsigned short *)(va + 4);
+ else
+ len = *(unsigned int *)(va + 4);
+ if (pg_offset + len > kvm->arch.ram_psize)
+ return H_PARAMETER;
+ switch (flags) {
+ case 1: /* register VPA */
+ if (len < 640)
+ return H_PARAMETER;
+ tvcpu->arch.vpa = va;
+ init_vpa(vcpu, va);
+ break;
+ case 2: /* register DTL */
+ if (len < 48)
+ return H_PARAMETER;
+ if (!tvcpu->arch.vpa)
+ return H_RESOURCE;
+ len -= len % 48;
+ tvcpu->arch.dtl = va;
+ tvcpu->arch.dtl_end = va + len;
+ break;
+ case 3: /* register SLB shadow buffer */
+ if (len < 8)
+ return H_PARAMETER;
+ if (!tvcpu->arch.vpa)
+ return H_RESOURCE;
+ tvcpu->arch.slb_shadow = va;
+ len = (len - 16) / 16;
+ tvcpu->arch.slb_shadow = va;
+ break;
+ }
+ } else {
+ switch (flags) {
+ case 5: /* unregister VPA */
+ if (tvcpu->arch.slb_shadow || tvcpu->arch.dtl)
+ return H_RESOURCE;
+ tvcpu->arch.vpa = NULL;
+ break;
+ case 6: /* unregister DTL */
+ tvcpu->arch.dtl = NULL;
+ break;
+ case 7: /* unregister SLB shadow buffer */
+ tvcpu->arch.slb_shadow = NULL;
+ break;
+ }
+ }
+ return H_SUCCESS;
+}
+
+int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
+{
+ unsigned long req = kvmppc_get_gpr(vcpu, 3);
+ unsigned long target, ret = H_SUCCESS;
+ struct kvm_vcpu *tvcpu;
+
+ switch (req) {
+ case H_CEDE:
+ vcpu->arch.shregs.msr |= MSR_EE;
+ vcpu->arch.ceded = 1;
+ smp_mb();
+ if (!vcpu->arch.prodded)
+ kvmppc_vcpu_block(vcpu);
+ else
+ vcpu->arch.prodded = 0;
+ smp_mb();
+ vcpu->arch.ceded = 0;
+ break;
+ case H_PROD:
+ target = kvmppc_get_gpr(vcpu, 4);
+ tvcpu = kvmppc_find_vcpu(vcpu->kvm, target);
+ if (!tvcpu) {
+ ret = H_PARAMETER;
+ break;
+ }
+ tvcpu->arch.prodded = 1;
+ smp_mb();
+ if (vcpu->arch.ceded) {
+ if (waitqueue_active(&vcpu->wq)) {
+ wake_up_interruptible(&vcpu->wq);
+ vcpu->stat.halt_wakeup++;
+ }
+ }
+ break;
+ case H_CONFER:
+ break;
+ case H_REGISTER_VPA:
+ ret = do_h_register_vpa(vcpu, kvmppc_get_gpr(vcpu, 4),
+ kvmppc_get_gpr(vcpu, 5),
+ kvmppc_get_gpr(vcpu, 6));
+ break;
+ default:
+ return RESUME_HOST;
+ }
+ kvmppc_set_gpr(vcpu, 3, ret);
+ vcpu->arch.hcall_needed = 0;
+ return RESUME_GUEST;
+}
+
+static int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
+ struct task_struct *tsk)
+{
+ int r = RESUME_HOST;
+
+ vcpu->stat.sum_exits++;
+
+ run->exit_reason = KVM_EXIT_UNKNOWN;
+ run->ready_for_interrupt_injection = 1;
+ switch (vcpu->arch.trap) {
+ /* We're good on these - the host merely wanted to get our attention */
+ case BOOK3S_INTERRUPT_HV_DECREMENTER:
+ vcpu->stat.dec_exits++;
+ r = RESUME_GUEST;
+ break;
+ case BOOK3S_INTERRUPT_EXTERNAL:
+ vcpu->stat.ext_intr_exits++;
+ r = RESUME_GUEST;
+ break;
+ case BOOK3S_INTERRUPT_PERFMON:
+ r = RESUME_GUEST;
+ break;
+ case BOOK3S_INTERRUPT_PROGRAM:
+ {
+ ulong flags;
+ /*
+ * Normally program interrupts are delivered directly
+ * to the guest by the hardware, but we can get here
+ * as a result of a hypervisor emulation interrupt
+ * (e40) getting turned into a 700 by BML RTAS.
+ */
+ flags = vcpu->arch.shregs.msr & 0x1f0000ull;
+ kvmppc_core_queue_program(vcpu, flags);
+ r = RESUME_GUEST;
+ break;
+ }
+ case BOOK3S_INTERRUPT_SYSCALL:
+ {
+ /* hcall - punt to userspace */
+ int i;
+
+ if (vcpu->arch.shregs.msr & MSR_PR) {
+ /* sc 1 from userspace - reflect to guest syscall */
+ kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_SYSCALL);
+ r = RESUME_GUEST;
+ break;
+ }
+ run->papr_hcall.nr = kvmppc_get_gpr(vcpu, 3);
+ for (i = 0; i < 9; ++i)
+ run->papr_hcall.args[i] = kvmppc_get_gpr(vcpu, 4 + i);
+ run->exit_reason = KVM_EXIT_PAPR_HCALL;
+ vcpu->arch.hcall_needed = 1;
+ r = RESUME_HOST;
+ break;
+ }
+ /*
+ * We get these next two if the guest does a bad real-mode access,
+ * as we have enabled VRMA (virtualized real mode area) mode in the
+ * LPCR. We just generate an appropriate DSI/ISI to the guest.
+ */
+ case BOOK3S_INTERRUPT_H_DATA_STORAGE:
+ vcpu->arch.shregs.dsisr = vcpu->arch.fault_dsisr;
+ vcpu->arch.shregs.dar = vcpu->arch.fault_dar;
+ kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_DATA_STORAGE, 0);
+ r = RESUME_GUEST;
+ break;
+ case BOOK3S_INTERRUPT_H_INST_STORAGE:
+ kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_INST_STORAGE,
+ 0x08000000);
+ r = RESUME_GUEST;
+ break;
+ /*
+ * This occurs if the guest executes an illegal instruction.
+ * We just generate a program interrupt to the guest, since
+ * we don't emulate any guest instructions at this stage.
+ */
+ case BOOK3S_INTERRUPT_H_EMUL_ASSIST:
+ kvmppc_core_queue_program(vcpu, 0x80000);
+ r = RESUME_GUEST;
+ break;
+ default:
+ kvmppc_dump_regs(vcpu);
+ printk(KERN_EMERG "trap=0x%x | pc=0x%lx | msr=0x%llx\n",
+ vcpu->arch.trap, kvmppc_get_pc(vcpu),
+ vcpu->arch.shregs.msr);
+ r = RESUME_HOST;
+ BUG();
+ break;
+ }
+
+
+ if (!(r & RESUME_HOST)) {
+ /* To avoid clobbering exit_reason, only check for signals if
+ * we aren't already exiting to userspace for some other
+ * reason. */
+ if (signal_pending(tsk)) {
+ vcpu->stat.signal_exits++;
+ run->exit_reason = KVM_EXIT_INTR;
+ r = -EINTR;
+ } else {
+ kvmppc_core_deliver_interrupts(vcpu);
+ }
+ }
+
+ return r;
+}
+
+int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
+ struct kvm_sregs *sregs)
+{
+ int i;
+
+ sregs->pvr = vcpu->arch.pvr;
+
+ memset(sregs, 0, sizeof(struct kvm_sregs));
+ for (i = 0; i < vcpu->arch.slb_max; i++) {
+ sregs->u.s.ppc64.slb[i].slbe = vcpu->arch.slb[i].orige;
+ sregs->u.s.ppc64.slb[i].slbv = vcpu->arch.slb[i].origv;
+ }
+
+ return 0;
+}
+
+int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
+ struct kvm_sregs *sregs)
+{
+ int i, j;
+
+ kvmppc_set_pvr(vcpu, sregs->pvr);
+
+ j = 0;
+ for (i = 0; i < vcpu->arch.slb_nr; i++) {
+ if (sregs->u.s.ppc64.slb[i].slbe & SLB_ESID_V) {
+ vcpu->arch.slb[j].orige = sregs->u.s.ppc64.slb[i].slbe;
+ vcpu->arch.slb[j].origv = sregs->u.s.ppc64.slb[i].slbv;
+ ++j;
+ }
+ }
+ vcpu->arch.slb_max = j;
+
+ return 0;
+}
+
+int kvmppc_core_check_processor_compat(void)
+{
+ if (cpu_has_feature(CPU_FTR_HVMODE))
+ return 0;
+ return -EIO;
+}
+
+struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
+{
+ struct kvm_vcpu *vcpu;
+ int err = -EINVAL;
+ int core;
+ struct kvmppc_vcore *vcore;
+
+ core = id / threads_per_core;
+ if (core >= KVM_MAX_VCORES)
+ goto out;
+
+ err = -ENOMEM;
+ vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
+ if (!vcpu)
+ goto out;
+
+ err = kvm_vcpu_init(vcpu, kvm, id);
+ if (err)
+ goto free_vcpu;
+
+ vcpu->arch.shared = &vcpu->arch.shregs;
+ vcpu->arch.last_cpu = -1;
+ vcpu->arch.mmcr[0] = MMCR0_FC;
+ vcpu->arch.ctrl = CTRL_RUNLATCH;
+ /* default to host PVR, since we can't spoof it */
+ vcpu->arch.pvr = mfspr(SPRN_PVR);
+ kvmppc_set_pvr(vcpu, vcpu->arch.pvr);
+
+ kvmppc_mmu_book3s_hv_init(vcpu);
+
+ /*
+ * Some vcpus may start out in stopped state. If we initialize
+ * them to busy-in-host state they will stop other vcpus in the
+ * vcore from running. Instead we initialize them to blocked
+ * state, effectively considering them to be stopped until we
+ * see the first run ioctl for them.
+ */
+ vcpu->arch.state = KVMPPC_VCPU_BLOCKED;
+
+ init_waitqueue_head(&vcpu->arch.cpu_run);
+
+ mutex_lock(&kvm->lock);
+ vcore = kvm->arch.vcores[core];
+ if (!vcore) {
+ vcore = kzalloc(sizeof(struct kvmppc_vcore), GFP_KERNEL);
+ if (vcore) {
+ INIT_LIST_HEAD(&vcore->runnable_threads);
+ spin_lock_init(&vcore->lock);
+ }
+ kvm->arch.vcores[core] = vcore;
+ }
+ mutex_unlock(&kvm->lock);
+
+ if (!vcore)
+ goto free_vcpu;
+
+ spin_lock(&vcore->lock);
+ ++vcore->num_threads;
+ ++vcore->n_blocked;
+ spin_unlock(&vcore->lock);
+ vcpu->arch.vcore = vcore;
+
+ return vcpu;
+
+free_vcpu:
+ kfree(vcpu);
+out:
+ return ERR_PTR(err);
+}
+
+void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
+{
+ kvm_vcpu_uninit(vcpu);
+ kfree(vcpu);
+}
+
+static void kvmppc_vcpu_blocked(struct kvm_vcpu *vcpu)
+{
+ struct kvmppc_vcore *vc = vcpu->arch.vcore;
+
+ spin_lock(&vc->lock);
+ vcpu->arch.state = KVMPPC_VCPU_BLOCKED;
+ ++vc->n_blocked;
+ if (vc->n_runnable > 0 &&
+ vc->n_runnable + vc->n_blocked == vc->num_threads) {
+ vcpu = list_first_entry(&vc->runnable_threads, struct kvm_vcpu,
+ arch.run_list);
+ wake_up(&vcpu->arch.cpu_run);
+ }
+ spin_unlock(&vc->lock);
+}
+
+static void kvmppc_vcpu_unblocked(struct kvm_vcpu *vcpu)
+{
+ struct kvmppc_vcore *vc = vcpu->arch.vcore;
+
+ spin_lock(&vc->lock);
+ vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;
+ --vc->n_blocked;
+ spin_unlock(&vc->lock);
+}
+
+extern int __kvmppc_vcore_entry(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
+extern void xics_wake_cpu(int cpu);
+
+static void kvmppc_remove_runnable(struct kvmppc_vcore *vc,
+ struct kvm_vcpu *vcpu)
+{
+ struct kvm_vcpu *v;
+
+ if (vcpu->arch.state != KVMPPC_VCPU_RUNNABLE)
+ return;
+ vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;
+ --vc->n_runnable;
+ /* decrement the physical thread id of each following vcpu */
+ v = vcpu;
+ list_for_each_entry_continue(v, &vc->runnable_threads, arch.run_list)
+ --v->arch.ptid;
+ list_del(&vcpu->arch.run_list);
+}
+
+static void kvmppc_start_thread(struct kvm_vcpu *vcpu)
+{
+ int cpu;
+ struct paca_struct *tpaca;
+ struct kvmppc_vcore *vc = vcpu->arch.vcore;
+
+ cpu = vc->pcpu + vcpu->arch.ptid;
+ tpaca = &paca[cpu];
+ tpaca->kvm_hstate.kvm_vcpu = vcpu;
+ tpaca->kvm_hstate.kvm_vcore = vc;
+ smp_wmb();
+#ifdef CONFIG_PPC_ICP_NATIVE
+ if (vcpu->arch.ptid) {
+ tpaca->cpu_start = 0x80;
+ tpaca->kvm_hstate.in_guest = KVM_GUEST_MODE_GUEST;
+ wmb();
+ xics_wake_cpu(cpu);
+ ++vc->n_woken;
+ }
+#endif
+}
+
+static void kvmppc_wait_for_nap(struct kvmppc_vcore *vc)
+{
+ int i;
+
+ HMT_low();
+ i = 0;
+ while (vc->nap_count < vc->n_woken) {
+ if (++i >= 1000000) {
+ pr_err("kvmppc_wait_for_nap timeout %d %d\n",
+ vc->nap_count, vc->n_woken);
+ break;
+ }
+ cpu_relax();
+ }
+ HMT_medium();
+}
+
+/*
+ * Check that we are on thread 0 and that any other threads in
+ * this core are off-line.
+ */
+static int on_primary_thread(void)
+{
+ int cpu = smp_processor_id();
+ int thr = cpu_thread_in_core(cpu);
+
+ if (thr)
+ return 0;
+ while (++thr < threads_per_core)
+ if (cpu_online(cpu + thr))
+ return 0;
+ return 1;
+}
+
+/*
+ * Run a set of guest threads on a physical core.
+ * Called with vc->lock held.
+ */
+static int kvmppc_run_core(struct kvmppc_vcore *vc)
+{
+ struct kvm_vcpu *vcpu, *vnext;
+ long ret;
+ u64 now;
+
+ /* don't start if any threads have a signal pending */
+ list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list)
+ if (signal_pending(vcpu->arch.run_task))
+ return 0;
+
+ /*
+ * Make sure we are running on thread 0, and that
+ * secondary threads are offline.
+ * XXX we should also block attempts to bring any
+ * secondary threads online.
+ */
+ if (threads_per_core > 1 && !on_primary_thread()) {
+ list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list)
+ vcpu->arch.ret = -EBUSY;
+ goto out;
+ }
+
+ vc->n_woken = 0;
+ vc->nap_count = 0;
+ vc->entry_exit_count = 0;
+ vc->vcore_running = 1;
+ vc->in_guest = 0;
+ vc->pcpu = smp_processor_id();
+ list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list)
+ kvmppc_start_thread(vcpu);
+ vcpu = list_first_entry(&vc->runnable_threads, struct kvm_vcpu,
+ arch.run_list);
+
+ spin_unlock(&vc->lock);
+
+ preempt_disable();
+ kvm_guest_enter();
+ __kvmppc_vcore_entry(NULL, vcpu);
+
+ /* wait for secondary threads to finish writing their state to memory */
+ spin_lock(&vc->lock);
+ if (vc->nap_count < vc->n_woken)
+ kvmppc_wait_for_nap(vc);
+ /* prevent other vcpu threads from doing kvmppc_start_thread() now */
+ vc->vcore_running = 2;
+ spin_unlock(&vc->lock);
+
+ /* make sure updates to secondary vcpu structs are visible now */
+ smp_mb();
+ kvm_guest_exit();
+
+ preempt_enable();
+ kvm_resched(vcpu);
+
+ now = get_tb();
+ list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) {
+ /* cancel pending dec exception if dec is positive */
+ if (now < vcpu->arch.dec_expires &&
+ kvmppc_core_pending_dec(vcpu))
+ kvmppc_core_dequeue_dec(vcpu);
+ if (!vcpu->arch.trap) {
+ if (signal_pending(vcpu->arch.run_task)) {
+ vcpu->arch.kvm_run->exit_reason = KVM_EXIT_INTR;
+ vcpu->arch.ret = -EINTR;
+ }
+ continue; /* didn't get to run */
+ }
+ ret = kvmppc_handle_exit(vcpu->arch.kvm_run, vcpu,
+ vcpu->arch.run_task);
+ vcpu->arch.ret = ret;
+ vcpu->arch.trap = 0;
+ }
+
+ spin_lock(&vc->lock);
+ out:
+ vc->vcore_running = 0;
+ list_for_each_entry_safe(vcpu, vnext, &vc->runnable_threads,
+ arch.run_list) {
+ if (vcpu->arch.ret != RESUME_GUEST) {
+ kvmppc_remove_runnable(vc, vcpu);
+ wake_up(&vcpu->arch.cpu_run);
+ }
+ }
+
+ return 1;
+}
+
+static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
+{
+ int ptid;
+ int wait_state;
+ struct kvmppc_vcore *vc;
+ DEFINE_WAIT(wait);
+
+ /* No need to go into the guest when all we do is going out */
+ if (signal_pending(current)) {
+ kvm_run->exit_reason = KVM_EXIT_INTR;
+ return -EINTR;
+ }
+
+ /* On PPC970, check that we have an RMA region */
+ if (!vcpu->kvm->arch.rma && cpu_has_feature(CPU_FTR_ARCH_201))
+ return -EPERM;
+
+ kvm_run->exit_reason = 0;
+ vcpu->arch.ret = RESUME_GUEST;
+ vcpu->arch.trap = 0;
+
+ flush_fp_to_thread(current);
+ flush_altivec_to_thread(current);
+ flush_vsx_to_thread(current);
+
+ /*
+ * Synchronize with other threads in this virtual core
+ */
+ vc = vcpu->arch.vcore;
+ spin_lock(&vc->lock);
+ /* This happens the first time this is called for a vcpu */
+ if (vcpu->arch.state == KVMPPC_VCPU_BLOCKED)
+ --vc->n_blocked;
+ vcpu->arch.state = KVMPPC_VCPU_RUNNABLE;
+ ptid = vc->n_runnable;
+ vcpu->arch.run_task = current;
+ vcpu->arch.kvm_run = kvm_run;
+ vcpu->arch.ptid = ptid;
+ list_add_tail(&vcpu->arch.run_list, &vc->runnable_threads);
+ ++vc->n_runnable;
+
+ wait_state = TASK_INTERRUPTIBLE;
+ while (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE) {
+ if (signal_pending(current)) {
+ if (!vc->vcore_running) {
+ kvm_run->exit_reason = KVM_EXIT_INTR;
+ vcpu->arch.ret = -EINTR;
+ break;
+ }
+ /* have to wait for vcore to stop executing guest */
+ wait_state = TASK_UNINTERRUPTIBLE;
+ smp_send_reschedule(vc->pcpu);
+ }
+
+ if (!vc->vcore_running &&
+ vc->n_runnable + vc->n_blocked == vc->num_threads) {
+ /* we can run now */
+ if (kvmppc_run_core(vc))
+ continue;
+ }
+
+ if (vc->vcore_running == 1 && VCORE_EXIT_COUNT(vc) == 0)
+ kvmppc_start_thread(vcpu);
+
+ /* wait for other threads to come in, or wait for vcore */
+ prepare_to_wait(&vcpu->arch.cpu_run, &wait, wait_state);
+ spin_unlock(&vc->lock);
+ schedule();
+ finish_wait(&vcpu->arch.cpu_run, &wait);
+ spin_lock(&vc->lock);
+ }
+
+ if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE)
+ kvmppc_remove_runnable(vc, vcpu);
+ spin_unlock(&vc->lock);
+
+ return vcpu->arch.ret;
+}
+
+int kvmppc_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+ int r;
+
+ do {
+ r = kvmppc_run_vcpu(run, vcpu);
+
+ if (run->exit_reason == KVM_EXIT_PAPR_HCALL &&
+ !(vcpu->arch.shregs.msr & MSR_PR)) {
+ r = kvmppc_pseries_do_hcall(vcpu);
+ kvmppc_core_deliver_interrupts(vcpu);
+ }
+ } while (r == RESUME_GUEST);
+ return r;
+}
+
+static long kvmppc_stt_npages(unsigned long window_size)
+{
+ return ALIGN((window_size >> SPAPR_TCE_SHIFT)
+ * sizeof(u64), PAGE_SIZE) / PAGE_SIZE;
+}
+
+static void release_spapr_tce_table(struct kvmppc_spapr_tce_table *stt)
+{
+ struct kvm *kvm = stt->kvm;
+ int i;
+
+ mutex_lock(&kvm->lock);
+ list_del(&stt->list);
+ for (i = 0; i < kvmppc_stt_npages(stt->window_size); i++)
+ __free_page(stt->pages[i]);
+ kfree(stt);
+ mutex_unlock(&kvm->lock);
+
+ kvm_put_kvm(kvm);
+}
+
+static int kvm_spapr_tce_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ struct kvmppc_spapr_tce_table *stt = vma->vm_file->private_data;
+ struct page *page;
+
+ if (vmf->pgoff >= kvmppc_stt_npages(stt->window_size))
+ return VM_FAULT_SIGBUS;
+
+ page = stt->pages[vmf->pgoff];
+ get_page(page);
+ vmf->page = page;
+ return 0;
+}
+
+static const struct vm_operations_struct kvm_spapr_tce_vm_ops = {
+ .fault = kvm_spapr_tce_fault,
+};
+
+static int kvm_spapr_tce_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ vma->vm_ops = &kvm_spapr_tce_vm_ops;
+ return 0;
+}
+
+static int kvm_spapr_tce_release(struct inode *inode, struct file *filp)
+{
+ struct kvmppc_spapr_tce_table *stt = filp->private_data;
+
+ release_spapr_tce_table(stt);
+ return 0;
+}
+
+static struct file_operations kvm_spapr_tce_fops = {
+ .mmap = kvm_spapr_tce_mmap,
+ .release = kvm_spapr_tce_release,
+};
+
+long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
+ struct kvm_create_spapr_tce *args)
+{
+ struct kvmppc_spapr_tce_table *stt = NULL;
+ long npages;
+ int ret = -ENOMEM;
+ int i;
+
+ /* Check this LIOBN hasn't been previously allocated */
+ list_for_each_entry(stt, &kvm->arch.spapr_tce_tables, list) {
+ if (stt->liobn == args->liobn)
+ return -EBUSY;
+ }
+
+ npages = kvmppc_stt_npages(args->window_size);
+
+ stt = kzalloc(sizeof(*stt) + npages* sizeof(struct page *),
+ GFP_KERNEL);
+ if (!stt)
+ goto fail;
+
+ stt->liobn = args->liobn;
+ stt->window_size = args->window_size;
+ stt->kvm = kvm;
+
+ for (i = 0; i < npages; i++) {
+ stt->pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ if (!stt->pages[i])
+ goto fail;
+ }
+
+ kvm_get_kvm(kvm);
+
+ mutex_lock(&kvm->lock);
+ list_add(&stt->list, &kvm->arch.spapr_tce_tables);
+
+ mutex_unlock(&kvm->lock);
+
+ return anon_inode_getfd("kvm-spapr-tce", &kvm_spapr_tce_fops,
+ stt, O_RDWR);
+
+fail:
+ if (stt) {
+ for (i = 0; i < npages; i++)
+ if (stt->pages[i])
+ __free_page(stt->pages[i]);
+
+ kfree(stt);
+ }
+ return ret;
+}
+
+/* Work out RMLS (real mode limit selector) field value for a given RMA size.
+ Assumes POWER7 or PPC970. */
+static inline int lpcr_rmls(unsigned long rma_size)
+{
+ switch (rma_size) {
+ case 32ul << 20: /* 32 MB */
+ if (cpu_has_feature(CPU_FTR_ARCH_206))
+ return 8; /* only supported on POWER7 */
+ return -1;
+ case 64ul << 20: /* 64 MB */
+ return 3;
+ case 128ul << 20: /* 128 MB */
+ return 7;
+ case 256ul << 20: /* 256 MB */
+ return 4;
+ case 1ul << 30: /* 1 GB */
+ return 2;
+ case 16ul << 30: /* 16 GB */
+ return 1;
+ case 256ul << 30: /* 256 GB */
+ return 0;
+ default:
+ return -1;
+ }
+}
+
+static int kvm_rma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ struct kvmppc_rma_info *ri = vma->vm_file->private_data;
+ struct page *page;
+
+ if (vmf->pgoff >= ri->npages)
+ return VM_FAULT_SIGBUS;
+
+ page = pfn_to_page(ri->base_pfn + vmf->pgoff);
+ get_page(page);
+ vmf->page = page;
+ return 0;
+}
+
+static const struct vm_operations_struct kvm_rma_vm_ops = {
+ .fault = kvm_rma_fault,
+};
+
+static int kvm_rma_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ vma->vm_flags |= VM_RESERVED;
+ vma->vm_ops = &kvm_rma_vm_ops;
+ return 0;
+}
+
+static int kvm_rma_release(struct inode *inode, struct file *filp)
+{
+ struct kvmppc_rma_info *ri = filp->private_data;
+
+ kvm_release_rma(ri);
+ return 0;
+}
+
+static struct file_operations kvm_rma_fops = {
+ .mmap = kvm_rma_mmap,
+ .release = kvm_rma_release,
+};
+
+long kvm_vm_ioctl_allocate_rma(struct kvm *kvm, struct kvm_allocate_rma *ret)
+{
+ struct kvmppc_rma_info *ri;
+ long fd;
+
+ ri = kvm_alloc_rma();
+ if (!ri)
+ return -ENOMEM;
+
+ fd = anon_inode_getfd("kvm-rma", &kvm_rma_fops, ri, O_RDWR);
+ if (fd < 0)
+ kvm_release_rma(ri);
+
+ ret->rma_size = ri->npages << PAGE_SHIFT;
+ return fd;
+}
+
+static struct page *hva_to_page(unsigned long addr)
+{
+ struct page *page[1];
+ int npages;
+
+ might_sleep();
+
+ npages = get_user_pages_fast(addr, 1, 1, page);
+
+ if (unlikely(npages != 1))
+ return 0;
+
+ return page[0];
+}
+
+int kvmppc_core_prepare_memory_region(struct kvm *kvm,
+ struct kvm_userspace_memory_region *mem)
+{
+ unsigned long psize, porder;
+ unsigned long i, npages, totalpages;
+ unsigned long pg_ix;
+ struct kvmppc_pginfo *pginfo;
+ unsigned long hva;
+ struct kvmppc_rma_info *ri = NULL;
+ struct page *page;
+
+ /* For now, only allow 16MB pages */
+ porder = LARGE_PAGE_ORDER;
+ psize = 1ul << porder;
+ if ((mem->memory_size & (psize - 1)) ||
+ (mem->guest_phys_addr & (psize - 1))) {
+ pr_err("bad memory_size=%llx @ %llx\n",
+ mem->memory_size, mem->guest_phys_addr);
+ return -EINVAL;
+ }
+
+ npages = mem->memory_size >> porder;
+ totalpages = (mem->guest_phys_addr + mem->memory_size) >> porder;
+
+ /* More memory than we have space to track? */
+ if (totalpages > (1ul << (MAX_MEM_ORDER - LARGE_PAGE_ORDER)))
+ return -EINVAL;
+
+ /* Do we already have an RMA registered? */
+ if (mem->guest_phys_addr == 0 && kvm->arch.rma)
+ return -EINVAL;
+
+ if (totalpages > kvm->arch.ram_npages)
+ kvm->arch.ram_npages = totalpages;
+
+ /* Is this one of our preallocated RMAs? */
+ if (mem->guest_phys_addr == 0) {
+ struct vm_area_struct *vma;
+
+ down_read(&current->mm->mmap_sem);
+ vma = find_vma(current->mm, mem->userspace_addr);
+ if (vma && vma->vm_file &&
+ vma->vm_file->f_op == &kvm_rma_fops &&
+ mem->userspace_addr == vma->vm_start)
+ ri = vma->vm_file->private_data;
+ up_read(&current->mm->mmap_sem);
+ if (!ri && cpu_has_feature(CPU_FTR_ARCH_201)) {
+ pr_err("CPU requires an RMO\n");
+ return -EINVAL;
+ }
+ }
+
+ if (ri) {
+ unsigned long rma_size;
+ unsigned long lpcr;
+ long rmls;
+
+ rma_size = ri->npages << PAGE_SHIFT;
+ if (rma_size > mem->memory_size)
+ rma_size = mem->memory_size;
+ rmls = lpcr_rmls(rma_size);
+ if (rmls < 0) {
+ pr_err("Can't use RMA of 0x%lx bytes\n", rma_size);
+ return -EINVAL;
+ }
+ atomic_inc(&ri->use_count);
+ kvm->arch.rma = ri;
+ kvm->arch.n_rma_pages = rma_size >> porder;
+
+ /* Update LPCR and RMOR */
+ lpcr = kvm->arch.lpcr;
+ if (cpu_has_feature(CPU_FTR_ARCH_201)) {
+ /* PPC970; insert RMLS value (split field) in HID4 */
+ lpcr &= ~((1ul << HID4_RMLS0_SH) |
+ (3ul << HID4_RMLS2_SH));
+ lpcr |= ((rmls >> 2) << HID4_RMLS0_SH) |
+ ((rmls & 3) << HID4_RMLS2_SH);
+ /* RMOR is also in HID4 */
+ lpcr |= ((ri->base_pfn >> (26 - PAGE_SHIFT)) & 0xffff)
+ << HID4_RMOR_SH;
+ } else {
+ /* POWER7 */
+ lpcr &= ~(LPCR_VPM0 | LPCR_VRMA_L);
+ lpcr |= rmls << LPCR_RMLS_SH;
+ kvm->arch.rmor = kvm->arch.rma->base_pfn << PAGE_SHIFT;
+ }
+ kvm->arch.lpcr = lpcr;
+ pr_info("Using RMO at %lx size %lx (LPCR = %lx)\n",
+ ri->base_pfn << PAGE_SHIFT, rma_size, lpcr);
+ }
+
+ pg_ix = mem->guest_phys_addr >> porder;
+ pginfo = kvm->arch.ram_pginfo + pg_ix;
+ for (i = 0; i < npages; ++i, ++pg_ix) {
+ if (ri && pg_ix < kvm->arch.n_rma_pages) {
+ pginfo[i].pfn = ri->base_pfn +
+ (pg_ix << (porder - PAGE_SHIFT));
+ continue;
+ }
+ hva = mem->userspace_addr + (i << porder);
+ page = hva_to_page(hva);
+ if (!page) {
+ pr_err("oops, no pfn for hva %lx\n", hva);
+ goto err;
+ }
+ /* Check it's a 16MB page */
+ if (!PageHead(page) ||
+ compound_order(page) != (LARGE_PAGE_ORDER - PAGE_SHIFT)) {
+ pr_err("page at %lx isn't 16MB (o=%d)\n",
+ hva, compound_order(page));
+ goto err;
+ }
+ pginfo[i].pfn = page_to_pfn(page);
+ }
+
+ return 0;
+
+ err:
+ return -EINVAL;
+}
+
+void kvmppc_core_commit_memory_region(struct kvm *kvm,
+ struct kvm_userspace_memory_region *mem)
+{
+ if (mem->guest_phys_addr == 0 && mem->memory_size != 0 &&
+ !kvm->arch.rma)
+ kvmppc_map_vrma(kvm, mem);
+}
+
+int kvmppc_core_init_vm(struct kvm *kvm)
+{
+ long r;
+ unsigned long npages = 1ul << (MAX_MEM_ORDER - LARGE_PAGE_ORDER);
+ long err = -ENOMEM;
+ unsigned long lpcr;
+
+ /* Allocate hashed page table */
+ r = kvmppc_alloc_hpt(kvm);
+ if (r)
+ return r;
+
+ INIT_LIST_HEAD(&kvm->arch.spapr_tce_tables);
+
+ kvm->arch.ram_pginfo = kzalloc(npages * sizeof(struct kvmppc_pginfo),
+ GFP_KERNEL);
+ if (!kvm->arch.ram_pginfo) {
+ pr_err("kvmppc_core_init_vm: couldn't alloc %lu bytes\n",
+ npages * sizeof(struct kvmppc_pginfo));
+ goto out_free;
+ }
+
+ kvm->arch.ram_npages = 0;
+ kvm->arch.ram_psize = 1ul << LARGE_PAGE_ORDER;
+ kvm->arch.ram_porder = LARGE_PAGE_ORDER;
+ kvm->arch.rma = NULL;
+ kvm->arch.n_rma_pages = 0;
+
+ kvm->arch.host_sdr1 = mfspr(SPRN_SDR1);
+
+ if (cpu_has_feature(CPU_FTR_ARCH_201)) {
+ /* PPC970; HID4 is effectively the LPCR */
+ unsigned long lpid = kvm->arch.lpid;
+ kvm->arch.host_lpid = 0;
+ kvm->arch.host_lpcr = lpcr = mfspr(SPRN_HID4);
+ lpcr &= ~((3 << HID4_LPID1_SH) | (0xful << HID4_LPID5_SH));
+ lpcr |= ((lpid >> 4) << HID4_LPID1_SH) |
+ ((lpid & 0xf) << HID4_LPID5_SH);
+ } else {
+ /* POWER7; init LPCR for virtual RMA mode */
+ kvm->arch.host_lpid = mfspr(SPRN_LPID);
+ kvm->arch.host_lpcr = lpcr = mfspr(SPRN_LPCR);
+ lpcr &= LPCR_PECE | LPCR_LPES;
+ lpcr |= (4UL << LPCR_DPFD_SH) | LPCR_HDICE |
+ LPCR_VPM0 | LPCR_VRMA_L;
+ }
+ kvm->arch.lpcr = lpcr;
+
+ return 0;
+
+ out_free:
+ kvmppc_free_hpt(kvm);
+ return err;
+}
+
+void kvmppc_core_destroy_vm(struct kvm *kvm)
+{
+ struct kvmppc_pginfo *pginfo;
+ unsigned long i;
+
+ if (kvm->arch.ram_pginfo) {
+ pginfo = kvm->arch.ram_pginfo;
+ kvm->arch.ram_pginfo = NULL;
+ for (i = kvm->arch.n_rma_pages; i < kvm->arch.ram_npages; ++i)
+ if (pginfo[i].pfn)
+ put_page(pfn_to_page(pginfo[i].pfn));
+ kfree(pginfo);
+ }
+ if (kvm->arch.rma) {
+ kvm_release_rma(kvm->arch.rma);
+ kvm->arch.rma = NULL;
+ }
+
+ kvmppc_free_hpt(kvm);
+ WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables));
+}
+
+/* These are stubs for now */
+void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end)
+{
+}
+
+/* We don't need to emulate any privileged instructions or dcbz */
+int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
+ unsigned int inst, int *advance)
+{
+ return EMULATE_FAIL;
+}
+
+int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
+{
+ return EMULATE_FAIL;
+}
+
+int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
+{
+ return EMULATE_FAIL;
+}
+
+static int kvmppc_book3s_hv_init(void)
+{
+ int r;
+
+ r = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
+
+ if (r)
+ return r;
+
+ r = kvmppc_mmu_hv_init();
+
+ return r;
+}
+
+static void kvmppc_book3s_hv_exit(void)
+{
+ kvm_exit();
+}
+
+module_init(kvmppc_book3s_hv_init);
+module_exit(kvmppc_book3s_hv_exit);
diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c
new file mode 100644
index 000000000000..d43120355eec
--- /dev/null
+++ b/arch/powerpc/kvm/book3s_hv_builtin.c
@@ -0,0 +1,155 @@
+/*
+ * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kvm_host.h>
+#include <linux/preempt.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <linux/bootmem.h>
+#include <linux/init.h>
+
+#include <asm/cputable.h>
+#include <asm/kvm_ppc.h>
+#include <asm/kvm_book3s.h>
+
+/*
+ * This maintains a list of RMAs (real mode areas) for KVM guests to use.
+ * Each RMA has to be physically contiguous and of a size that the
+ * hardware supports. PPC970 and POWER7 support 64MB, 128MB and 256MB,
+ * and other larger sizes. Since we are unlikely to be allocate that
+ * much physically contiguous memory after the system is up and running,
+ * we preallocate a set of RMAs in early boot for KVM to use.
+ */
+static unsigned long kvm_rma_size = 64 << 20; /* 64MB */
+static unsigned long kvm_rma_count;
+
+static int __init early_parse_rma_size(char *p)
+{
+ if (!p)
+ return 1;
+
+ kvm_rma_size = memparse(p, &p);
+
+ return 0;
+}
+early_param("kvm_rma_size", early_parse_rma_size);
+
+static int __init early_parse_rma_count(char *p)
+{
+ if (!p)
+ return 1;
+
+ kvm_rma_count = simple_strtoul(p, NULL, 0);
+
+ return 0;
+}
+early_param("kvm_rma_count", early_parse_rma_count);
+
+static struct kvmppc_rma_info *rma_info;
+static LIST_HEAD(free_rmas);
+static DEFINE_SPINLOCK(rma_lock);
+
+/* Work out RMLS (real mode limit selector) field value for a given RMA size.
+ Assumes POWER7 or PPC970. */
+static inline int lpcr_rmls(unsigned long rma_size)
+{
+ switch (rma_size) {
+ case 32ul << 20: /* 32 MB */
+ if (cpu_has_feature(CPU_FTR_ARCH_206))
+ return 8; /* only supported on POWER7 */
+ return -1;
+ case 64ul << 20: /* 64 MB */
+ return 3;
+ case 128ul << 20: /* 128 MB */
+ return 7;
+ case 256ul << 20: /* 256 MB */
+ return 4;
+ case 1ul << 30: /* 1 GB */
+ return 2;
+ case 16ul << 30: /* 16 GB */
+ return 1;
+ case 256ul << 30: /* 256 GB */
+ return 0;
+ default:
+ return -1;
+ }
+}
+
+/*
+ * Called at boot time while the bootmem allocator is active,
+ * to allocate contiguous physical memory for the real memory
+ * areas for guests.
+ */
+void kvm_rma_init(void)
+{
+ unsigned long i;
+ unsigned long j, npages;
+ void *rma;
+ struct page *pg;
+
+ /* Only do this on PPC970 in HV mode */
+ if (!cpu_has_feature(CPU_FTR_HVMODE) ||
+ !cpu_has_feature(CPU_FTR_ARCH_201))
+ return;
+
+ if (!kvm_rma_size || !kvm_rma_count)
+ return;
+
+ /* Check that the requested size is one supported in hardware */
+ if (lpcr_rmls(kvm_rma_size) < 0) {
+ pr_err("RMA size of 0x%lx not supported\n", kvm_rma_size);
+ return;
+ }
+
+ npages = kvm_rma_size >> PAGE_SHIFT;
+ rma_info = alloc_bootmem(kvm_rma_count * sizeof(struct kvmppc_rma_info));
+ for (i = 0; i < kvm_rma_count; ++i) {
+ rma = alloc_bootmem_align(kvm_rma_size, kvm_rma_size);
+ pr_info("Allocated KVM RMA at %p (%ld MB)\n", rma,
+ kvm_rma_size >> 20);
+ rma_info[i].base_virt = rma;
+ rma_info[i].base_pfn = __pa(rma) >> PAGE_SHIFT;
+ rma_info[i].npages = npages;
+ list_add_tail(&rma_info[i].list, &free_rmas);
+ atomic_set(&rma_info[i].use_count, 0);
+
+ pg = pfn_to_page(rma_info[i].base_pfn);
+ for (j = 0; j < npages; ++j) {
+ atomic_inc(&pg->_count);
+ ++pg;
+ }
+ }
+}
+
+struct kvmppc_rma_info *kvm_alloc_rma(void)
+{
+ struct kvmppc_rma_info *ri;
+
+ ri = NULL;
+ spin_lock(&rma_lock);
+ if (!list_empty(&free_rmas)) {
+ ri = list_first_entry(&free_rmas, struct kvmppc_rma_info, list);
+ list_del(&ri->list);
+ atomic_inc(&ri->use_count);
+ }
+ spin_unlock(&rma_lock);
+ return ri;
+}
+EXPORT_SYMBOL_GPL(kvm_alloc_rma);
+
+void kvm_release_rma(struct kvmppc_rma_info *ri)
+{
+ if (atomic_dec_and_test(&ri->use_count)) {
+ spin_lock(&rma_lock);
+ list_add_tail(&ri->list, &free_rmas);
+ spin_unlock(&rma_lock);
+
+ }
+}
+EXPORT_SYMBOL_GPL(kvm_release_rma);
+
diff --git a/arch/powerpc/kvm/book3s_hv_interrupts.S b/arch/powerpc/kvm/book3s_hv_interrupts.S
new file mode 100644
index 000000000000..3f7b674dd4bf
--- /dev/null
+++ b/arch/powerpc/kvm/book3s_hv_interrupts.S
@@ -0,0 +1,166 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
+ *
+ * Derived from book3s_interrupts.S, which is:
+ * Copyright SUSE Linux Products GmbH 2009
+ *
+ * Authors: Alexander Graf <agraf@suse.de>
+ */
+
+#include <asm/ppc_asm.h>
+#include <asm/kvm_asm.h>
+#include <asm/reg.h>
+#include <asm/page.h>
+#include <asm/asm-offsets.h>
+#include <asm/exception-64s.h>
+#include <asm/ppc-opcode.h>
+
+/*****************************************************************************
+ * *
+ * Guest entry / exit code that is in kernel module memory (vmalloc) *
+ * *
+ ****************************************************************************/
+
+/* Registers:
+ * r4: vcpu pointer
+ */
+_GLOBAL(__kvmppc_vcore_entry)
+
+ /* Write correct stack frame */
+ mflr r0
+ std r0,PPC_LR_STKOFF(r1)
+
+ /* Save host state to the stack */
+ stdu r1, -SWITCH_FRAME_SIZE(r1)
+
+ /* Save non-volatile registers (r14 - r31) */
+ SAVE_NVGPRS(r1)
+
+ /* Save host DSCR */
+BEGIN_FTR_SECTION
+ mfspr r3, SPRN_DSCR
+ std r3, HSTATE_DSCR(r13)
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
+
+ /* Save host DABR */
+ mfspr r3, SPRN_DABR
+ std r3, HSTATE_DABR(r13)
+
+ /* Hard-disable interrupts */
+ mfmsr r10
+ std r10, HSTATE_HOST_MSR(r13)
+ rldicl r10,r10,48,1
+ rotldi r10,r10,16
+ mtmsrd r10,1
+
+ /* Save host PMU registers and load guest PMU registers */
+ /* R4 is live here (vcpu pointer) but not r3 or r5 */
+ li r3, 1
+ sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
+ mfspr r7, SPRN_MMCR0 /* save MMCR0 */
+ mtspr SPRN_MMCR0, r3 /* freeze all counters, disable interrupts */
+ isync
+ ld r3, PACALPPACAPTR(r13) /* is the host using the PMU? */
+ lbz r5, LPPACA_PMCINUSE(r3)
+ cmpwi r5, 0
+ beq 31f /* skip if not */
+ mfspr r5, SPRN_MMCR1
+ mfspr r6, SPRN_MMCRA
+ std r7, HSTATE_MMCR(r13)
+ std r5, HSTATE_MMCR + 8(r13)
+ std r6, HSTATE_MMCR + 16(r13)
+ mfspr r3, SPRN_PMC1
+ mfspr r5, SPRN_PMC2
+ mfspr r6, SPRN_PMC3
+ mfspr r7, SPRN_PMC4
+ mfspr r8, SPRN_PMC5
+ mfspr r9, SPRN_PMC6
+BEGIN_FTR_SECTION
+ mfspr r10, SPRN_PMC7
+ mfspr r11, SPRN_PMC8
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
+ stw r3, HSTATE_PMC(r13)
+ stw r5, HSTATE_PMC + 4(r13)
+ stw r6, HSTATE_PMC + 8(r13)
+ stw r7, HSTATE_PMC + 12(r13)
+ stw r8, HSTATE_PMC + 16(r13)
+ stw r9, HSTATE_PMC + 20(r13)
+BEGIN_FTR_SECTION
+ stw r10, HSTATE_PMC + 24(r13)
+ stw r11, HSTATE_PMC + 28(r13)
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
+31:
+
+ /*
+ * Put whatever is in the decrementer into the
+ * hypervisor decrementer.
+ */
+ mfspr r8,SPRN_DEC
+ mftb r7
+ mtspr SPRN_HDEC,r8
+ extsw r8,r8
+ add r8,r8,r7
+ std r8,HSTATE_DECEXP(r13)
+
+ /*
+ * On PPC970, if the guest vcpu has an external interrupt pending,
+ * send ourselves an IPI so as to interrupt the guest once it
+ * enables interrupts. (It must have interrupts disabled,
+ * otherwise we would already have delivered the interrupt.)
+ */
+BEGIN_FTR_SECTION
+ ld r0, VCPU_PENDING_EXC(r4)
+ li r7, (1 << BOOK3S_IRQPRIO_EXTERNAL)
+ oris r7, r7, (1 << BOOK3S_IRQPRIO_EXTERNAL_LEVEL)@h
+ and. r0, r0, r7
+ beq 32f
+ mr r31, r4
+ lhz r3, PACAPACAINDEX(r13)
+ bl smp_send_reschedule
+ nop
+ mr r4, r31
+32:
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
+
+ /* Jump to partition switch code */
+ bl .kvmppc_hv_entry_trampoline
+ nop
+
+/*
+ * We return here in virtual mode after the guest exits
+ * with something that we can't handle in real mode.
+ * Interrupts are enabled again at this point.
+ */
+
+.global kvmppc_handler_highmem
+kvmppc_handler_highmem:
+
+ /*
+ * Register usage at this point:
+ *
+ * R1 = host R1
+ * R2 = host R2
+ * R12 = exit handler id
+ * R13 = PACA
+ */
+
+ /* Restore non-volatile host registers (r14 - r31) */
+ REST_NVGPRS(r1)
+
+ addi r1, r1, SWITCH_FRAME_SIZE
+ ld r0, PPC_LR_STKOFF(r1)
+ mtlr r0
+ blr
diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
new file mode 100644
index 000000000000..fcfe6b055558
--- /dev/null
+++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
@@ -0,0 +1,370 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * Copyright 2010-2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
+ */
+
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/kvm.h>
+#include <linux/kvm_host.h>
+#include <linux/hugetlb.h>
+
+#include <asm/tlbflush.h>
+#include <asm/kvm_ppc.h>
+#include <asm/kvm_book3s.h>
+#include <asm/mmu-hash64.h>
+#include <asm/hvcall.h>
+#include <asm/synch.h>
+#include <asm/ppc-opcode.h>
+
+/* For now use fixed-size 16MB page table */
+#define HPT_ORDER 24
+#define HPT_NPTEG (1ul << (HPT_ORDER - 7)) /* 128B per pteg */
+#define HPT_HASH_MASK (HPT_NPTEG - 1)
+
+#define HPTE_V_HVLOCK 0x40UL
+
+static inline long lock_hpte(unsigned long *hpte, unsigned long bits)
+{
+ unsigned long tmp, old;
+
+ asm volatile(" ldarx %0,0,%2\n"
+ " and. %1,%0,%3\n"
+ " bne 2f\n"
+ " ori %0,%0,%4\n"
+ " stdcx. %0,0,%2\n"
+ " beq+ 2f\n"
+ " li %1,%3\n"
+ "2: isync"
+ : "=&r" (tmp), "=&r" (old)
+ : "r" (hpte), "r" (bits), "i" (HPTE_V_HVLOCK)
+ : "cc", "memory");
+ return old == 0;
+}
+
+long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
+ long pte_index, unsigned long pteh, unsigned long ptel)
+{
+ unsigned long porder;
+ struct kvm *kvm = vcpu->kvm;
+ unsigned long i, lpn, pa;
+ unsigned long *hpte;
+
+ /* only handle 4k, 64k and 16M pages for now */
+ porder = 12;
+ if (pteh & HPTE_V_LARGE) {
+ if (cpu_has_feature(CPU_FTR_ARCH_206) &&
+ (ptel & 0xf000) == 0x1000) {
+ /* 64k page */
+ porder = 16;
+ } else if ((ptel & 0xff000) == 0) {
+ /* 16M page */
+ porder = 24;
+ /* lowest AVA bit must be 0 for 16M pages */
+ if (pteh & 0x80)
+ return H_PARAMETER;
+ } else
+ return H_PARAMETER;
+ }
+ lpn = (ptel & HPTE_R_RPN) >> kvm->arch.ram_porder;
+ if (lpn >= kvm->arch.ram_npages || porder > kvm->arch.ram_porder)
+ return H_PARAMETER;
+ pa = kvm->arch.ram_pginfo[lpn].pfn << PAGE_SHIFT;
+ if (!pa)
+ return H_PARAMETER;
+ /* Check WIMG */
+ if ((ptel & HPTE_R_WIMG) != HPTE_R_M &&
+ (ptel & HPTE_R_WIMG) != (HPTE_R_W | HPTE_R_I | HPTE_R_M))
+ return H_PARAMETER;
+ pteh &= ~0x60UL;
+ ptel &= ~(HPTE_R_PP0 - kvm->arch.ram_psize);
+ ptel |= pa;
+ if (pte_index >= (HPT_NPTEG << 3))
+ return H_PARAMETER;
+ if (likely((flags & H_EXACT) == 0)) {
+ pte_index &= ~7UL;
+ hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
+ for (i = 0; ; ++i) {
+ if (i == 8)
+ return H_PTEG_FULL;
+ if ((*hpte & HPTE_V_VALID) == 0 &&
+ lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID))
+ break;
+ hpte += 2;
+ }
+ } else {
+ i = 0;
+ hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
+ if (!lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID))
+ return H_PTEG_FULL;
+ }
+ hpte[1] = ptel;
+ eieio();
+ hpte[0] = pteh;
+ asm volatile("ptesync" : : : "memory");
+ atomic_inc(&kvm->arch.ram_pginfo[lpn].refcnt);
+ vcpu->arch.gpr[4] = pte_index + i;
+ return H_SUCCESS;
+}
+
+static unsigned long compute_tlbie_rb(unsigned long v, unsigned long r,
+ unsigned long pte_index)
+{
+ unsigned long rb, va_low;
+
+ rb = (v & ~0x7fUL) << 16; /* AVA field */
+ va_low = pte_index >> 3;
+ if (v & HPTE_V_SECONDARY)
+ va_low = ~va_low;
+ /* xor vsid from AVA */
+ if (!(v & HPTE_V_1TB_SEG))
+ va_low ^= v >> 12;
+ else
+ va_low ^= v >> 24;
+ va_low &= 0x7ff;
+ if (v & HPTE_V_LARGE) {
+ rb |= 1; /* L field */
+ if (cpu_has_feature(CPU_FTR_ARCH_206) &&
+ (r & 0xff000)) {
+ /* non-16MB large page, must be 64k */
+ /* (masks depend on page size) */
+ rb |= 0x1000; /* page encoding in LP field */
+ rb |= (va_low & 0x7f) << 16; /* 7b of VA in AVA/LP field */
+ rb |= (va_low & 0xfe); /* AVAL field (P7 doesn't seem to care) */
+ }
+ } else {
+ /* 4kB page */
+ rb |= (va_low & 0x7ff) << 12; /* remaining 11b of VA */
+ }
+ rb |= (v >> 54) & 0x300; /* B field */
+ return rb;
+}
+
+#define LOCK_TOKEN (*(u32 *)(&get_paca()->lock_token))
+
+static inline int try_lock_tlbie(unsigned int *lock)
+{
+ unsigned int tmp, old;
+ unsigned int token = LOCK_TOKEN;
+
+ asm volatile("1:lwarx %1,0,%2\n"
+ " cmpwi cr0,%1,0\n"
+ " bne 2f\n"
+ " stwcx. %3,0,%2\n"
+ " bne- 1b\n"
+ " isync\n"
+ "2:"
+ : "=&r" (tmp), "=&r" (old)
+ : "r" (lock), "r" (token)
+ : "cc", "memory");
+ return old == 0;
+}
+
+long kvmppc_h_remove(struct kvm_vcpu *vcpu, unsigned long flags,
+ unsigned long pte_index, unsigned long avpn,
+ unsigned long va)
+{
+ struct kvm *kvm = vcpu->kvm;
+ unsigned long *hpte;
+ unsigned long v, r, rb;
+
+ if (pte_index >= (HPT_NPTEG << 3))
+ return H_PARAMETER;
+ hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
+ while (!lock_hpte(hpte, HPTE_V_HVLOCK))
+ cpu_relax();
+ if ((hpte[0] & HPTE_V_VALID) == 0 ||
+ ((flags & H_AVPN) && (hpte[0] & ~0x7fUL) != avpn) ||
+ ((flags & H_ANDCOND) && (hpte[0] & avpn) != 0)) {
+ hpte[0] &= ~HPTE_V_HVLOCK;
+ return H_NOT_FOUND;
+ }
+ if (atomic_read(&kvm->online_vcpus) == 1)
+ flags |= H_LOCAL;
+ vcpu->arch.gpr[4] = v = hpte[0] & ~HPTE_V_HVLOCK;
+ vcpu->arch.gpr[5] = r = hpte[1];
+ rb = compute_tlbie_rb(v, r, pte_index);
+ hpte[0] = 0;
+ if (!(flags & H_LOCAL)) {
+ while(!try_lock_tlbie(&kvm->arch.tlbie_lock))
+ cpu_relax();
+ asm volatile("ptesync" : : : "memory");
+ asm volatile(PPC_TLBIE(%1,%0)"; eieio; tlbsync"
+ : : "r" (rb), "r" (kvm->arch.lpid));
+ asm volatile("ptesync" : : : "memory");
+ kvm->arch.tlbie_lock = 0;
+ } else {
+ asm volatile("ptesync" : : : "memory");
+ asm volatile("tlbiel %0" : : "r" (rb));
+ asm volatile("ptesync" : : : "memory");
+ }
+ return H_SUCCESS;
+}
+
+long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu)
+{
+ struct kvm *kvm = vcpu->kvm;
+ unsigned long *args = &vcpu->arch.gpr[4];
+ unsigned long *hp, tlbrb[4];
+ long int i, found;
+ long int n_inval = 0;
+ unsigned long flags, req, pte_index;
+ long int local = 0;
+ long int ret = H_SUCCESS;
+
+ if (atomic_read(&kvm->online_vcpus) == 1)
+ local = 1;
+ for (i = 0; i < 4; ++i) {
+ pte_index = args[i * 2];
+ flags = pte_index >> 56;
+ pte_index &= ((1ul << 56) - 1);
+ req = flags >> 6;
+ flags &= 3;
+ if (req == 3)
+ break;
+ if (req != 1 || flags == 3 ||
+ pte_index >= (HPT_NPTEG << 3)) {
+ /* parameter error */
+ args[i * 2] = ((0xa0 | flags) << 56) + pte_index;
+ ret = H_PARAMETER;
+ break;
+ }
+ hp = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
+ while (!lock_hpte(hp, HPTE_V_HVLOCK))
+ cpu_relax();
+ found = 0;
+ if (hp[0] & HPTE_V_VALID) {
+ switch (flags & 3) {
+ case 0: /* absolute */
+ found = 1;
+ break;
+ case 1: /* andcond */
+ if (!(hp[0] & args[i * 2 + 1]))
+ found = 1;
+ break;
+ case 2: /* AVPN */
+ if ((hp[0] & ~0x7fUL) == args[i * 2 + 1])
+ found = 1;
+ break;
+ }
+ }
+ if (!found) {
+ hp[0] &= ~HPTE_V_HVLOCK;
+ args[i * 2] = ((0x90 | flags) << 56) + pte_index;
+ continue;
+ }
+ /* insert R and C bits from PTE */
+ flags |= (hp[1] >> 5) & 0x0c;
+ args[i * 2] = ((0x80 | flags) << 56) + pte_index;
+ tlbrb[n_inval++] = compute_tlbie_rb(hp[0], hp[1], pte_index);
+ hp[0] = 0;
+ }
+ if (n_inval == 0)
+ return ret;
+
+ if (!local) {
+ while(!try_lock_tlbie(&kvm->arch.tlbie_lock))
+ cpu_relax();
+ asm volatile("ptesync" : : : "memory");
+ for (i = 0; i < n_inval; ++i)
+ asm volatile(PPC_TLBIE(%1,%0)
+ : : "r" (tlbrb[i]), "r" (kvm->arch.lpid));
+ asm volatile("eieio; tlbsync; ptesync" : : : "memory");
+ kvm->arch.tlbie_lock = 0;
+ } else {
+ asm volatile("ptesync" : : : "memory");
+ for (i = 0; i < n_inval; ++i)
+ asm volatile("tlbiel %0" : : "r" (tlbrb[i]));
+ asm volatile("ptesync" : : : "memory");
+ }
+ return ret;
+}
+
+long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
+ unsigned long pte_index, unsigned long avpn,
+ unsigned long va)
+{
+ struct kvm *kvm = vcpu->kvm;
+ unsigned long *hpte;
+ unsigned long v, r, rb;
+
+ if (pte_index >= (HPT_NPTEG << 3))
+ return H_PARAMETER;
+ hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
+ while (!lock_hpte(hpte, HPTE_V_HVLOCK))
+ cpu_relax();
+ if ((hpte[0] & HPTE_V_VALID) == 0 ||
+ ((flags & H_AVPN) && (hpte[0] & ~0x7fUL) != avpn)) {
+ hpte[0] &= ~HPTE_V_HVLOCK;
+ return H_NOT_FOUND;
+ }
+ if (atomic_read(&kvm->online_vcpus) == 1)
+ flags |= H_LOCAL;
+ v = hpte[0];
+ r = hpte[1] & ~(HPTE_R_PP0 | HPTE_R_PP | HPTE_R_N |
+ HPTE_R_KEY_HI | HPTE_R_KEY_LO);
+ r |= (flags << 55) & HPTE_R_PP0;
+ r |= (flags << 48) & HPTE_R_KEY_HI;
+ r |= flags & (HPTE_R_PP | HPTE_R_N | HPTE_R_KEY_LO);
+ rb = compute_tlbie_rb(v, r, pte_index);
+ hpte[0] = v & ~HPTE_V_VALID;
+ if (!(flags & H_LOCAL)) {
+ while(!try_lock_tlbie(&kvm->arch.tlbie_lock))
+ cpu_relax();
+ asm volatile("ptesync" : : : "memory");
+ asm volatile(PPC_TLBIE(%1,%0)"; eieio; tlbsync"
+ : : "r" (rb), "r" (kvm->arch.lpid));
+ asm volatile("ptesync" : : : "memory");
+ kvm->arch.tlbie_lock = 0;
+ } else {
+ asm volatile("ptesync" : : : "memory");
+ asm volatile("tlbiel %0" : : "r" (rb));
+ asm volatile("ptesync" : : : "memory");
+ }
+ hpte[1] = r;
+ eieio();
+ hpte[0] = v & ~HPTE_V_HVLOCK;
+ asm volatile("ptesync" : : : "memory");
+ return H_SUCCESS;
+}
+
+static unsigned long reverse_xlate(struct kvm *kvm, unsigned long realaddr)
+{
+ long int i;
+ unsigned long offset, rpn;
+
+ offset = realaddr & (kvm->arch.ram_psize - 1);
+ rpn = (realaddr - offset) >> PAGE_SHIFT;
+ for (i = 0; i < kvm->arch.ram_npages; ++i)
+ if (rpn == kvm->arch.ram_pginfo[i].pfn)
+ return (i << PAGE_SHIFT) + offset;
+ return HPTE_R_RPN; /* all 1s in the RPN field */
+}
+
+long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags,
+ unsigned long pte_index)
+{
+ struct kvm *kvm = vcpu->kvm;
+ unsigned long *hpte, r;
+ int i, n = 1;
+
+ if (pte_index >= (HPT_NPTEG << 3))
+ return H_PARAMETER;
+ if (flags & H_READ_4) {
+ pte_index &= ~3;
+ n = 4;
+ }
+ for (i = 0; i < n; ++i, ++pte_index) {
+ hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
+ r = hpte[1];
+ if ((flags & H_R_XLATE) && (hpte[0] & HPTE_V_VALID))
+ r = reverse_xlate(kvm, r & HPTE_R_RPN) |
+ (r & ~HPTE_R_RPN);
+ vcpu->arch.gpr[4 + i * 2] = hpte[0];
+ vcpu->arch.gpr[5 + i * 2] = r;
+ }
+ return H_SUCCESS;
+}
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
new file mode 100644
index 000000000000..6dd33581a228
--- /dev/null
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -0,0 +1,1345 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
+ *
+ * Derived from book3s_rmhandlers.S and other files, which are:
+ *
+ * Copyright SUSE Linux Products GmbH 2009
+ *
+ * Authors: Alexander Graf <agraf@suse.de>
+ */
+
+#include <asm/ppc_asm.h>
+#include <asm/kvm_asm.h>
+#include <asm/reg.h>
+#include <asm/page.h>
+#include <asm/asm-offsets.h>
+#include <asm/exception-64s.h>
+
+/*****************************************************************************
+ * *
+ * Real Mode handlers that need to be in the linear mapping *
+ * *
+ ****************************************************************************/
+
+ .globl kvmppc_skip_interrupt
+kvmppc_skip_interrupt:
+ mfspr r13,SPRN_SRR0
+ addi r13,r13,4
+ mtspr SPRN_SRR0,r13
+ GET_SCRATCH0(r13)
+ rfid
+ b .
+
+ .globl kvmppc_skip_Hinterrupt
+kvmppc_skip_Hinterrupt:
+ mfspr r13,SPRN_HSRR0
+ addi r13,r13,4
+ mtspr SPRN_HSRR0,r13
+ GET_SCRATCH0(r13)
+ hrfid
+ b .
+
+/*
+ * Call kvmppc_handler_trampoline_enter in real mode.
+ * Must be called with interrupts hard-disabled.
+ *
+ * Input Registers:
+ *
+ * LR = return address to continue at after eventually re-enabling MMU
+ */
+_GLOBAL(kvmppc_hv_entry_trampoline)
+ mfmsr r10
+ LOAD_REG_ADDR(r5, kvmppc_hv_entry)
+ li r0,MSR_RI
+ andc r0,r10,r0
+ li r6,MSR_IR | MSR_DR
+ andc r6,r10,r6
+ mtmsrd r0,1 /* clear RI in MSR */
+ mtsrr0 r5
+ mtsrr1 r6
+ RFI
+
+#define ULONG_SIZE 8
+#define VCPU_GPR(n) (VCPU_GPRS + (n * ULONG_SIZE))
+
+/******************************************************************************
+ * *
+ * Entry code *
+ * *
+ *****************************************************************************/
+
+#define XICS_XIRR 4
+#define XICS_QIRR 0xc
+
+/*
+ * We come in here when wakened from nap mode on a secondary hw thread.
+ * Relocation is off and most register values are lost.
+ * r13 points to the PACA.
+ */
+ .globl kvm_start_guest
+kvm_start_guest:
+ ld r1,PACAEMERGSP(r13)
+ subi r1,r1,STACK_FRAME_OVERHEAD
+
+ /* get vcpu pointer */
+ ld r4, HSTATE_KVM_VCPU(r13)
+
+ /* We got here with an IPI; clear it */
+ ld r5, HSTATE_XICS_PHYS(r13)
+ li r0, 0xff
+ li r6, XICS_QIRR
+ li r7, XICS_XIRR
+ lwzcix r8, r5, r7 /* ack the interrupt */
+ sync
+ stbcix r0, r5, r6 /* clear it */
+ stwcix r8, r5, r7 /* EOI it */
+
+.global kvmppc_hv_entry
+kvmppc_hv_entry:
+
+ /* Required state:
+ *
+ * R4 = vcpu pointer
+ * MSR = ~IR|DR
+ * R13 = PACA
+ * R1 = host R1
+ * all other volatile GPRS = free
+ */
+ mflr r0
+ std r0, HSTATE_VMHANDLER(r13)
+
+ ld r14, VCPU_GPR(r14)(r4)
+ ld r15, VCPU_GPR(r15)(r4)
+ ld r16, VCPU_GPR(r16)(r4)
+ ld r17, VCPU_GPR(r17)(r4)
+ ld r18, VCPU_GPR(r18)(r4)
+ ld r19, VCPU_GPR(r19)(r4)
+ ld r20, VCPU_GPR(r20)(r4)
+ ld r21, VCPU_GPR(r21)(r4)
+ ld r22, VCPU_GPR(r22)(r4)
+ ld r23, VCPU_GPR(r23)(r4)
+ ld r24, VCPU_GPR(r24)(r4)
+ ld r25, VCPU_GPR(r25)(r4)
+ ld r26, VCPU_GPR(r26)(r4)
+ ld r27, VCPU_GPR(r27)(r4)
+ ld r28, VCPU_GPR(r28)(r4)
+ ld r29, VCPU_GPR(r29)(r4)
+ ld r30, VCPU_GPR(r30)(r4)
+ ld r31, VCPU_GPR(r31)(r4)
+
+ /* Load guest PMU registers */
+ /* R4 is live here (vcpu pointer) */
+ li r3, 1
+ sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
+ mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
+ isync
+ lwz r3, VCPU_PMC(r4) /* always load up guest PMU registers */
+ lwz r5, VCPU_PMC + 4(r4) /* to prevent information leak */
+ lwz r6, VCPU_PMC + 8(r4)
+ lwz r7, VCPU_PMC + 12(r4)
+ lwz r8, VCPU_PMC + 16(r4)
+ lwz r9, VCPU_PMC + 20(r4)
+BEGIN_FTR_SECTION
+ lwz r10, VCPU_PMC + 24(r4)
+ lwz r11, VCPU_PMC + 28(r4)
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
+ mtspr SPRN_PMC1, r3
+ mtspr SPRN_PMC2, r5
+ mtspr SPRN_PMC3, r6
+ mtspr SPRN_PMC4, r7
+ mtspr SPRN_PMC5, r8
+ mtspr SPRN_PMC6, r9
+BEGIN_FTR_SECTION
+ mtspr SPRN_PMC7, r10
+ mtspr SPRN_PMC8, r11
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
+ ld r3, VCPU_MMCR(r4)
+ ld r5, VCPU_MMCR + 8(r4)
+ ld r6, VCPU_MMCR + 16(r4)
+ mtspr SPRN_MMCR1, r5
+ mtspr SPRN_MMCRA, r6
+ mtspr SPRN_MMCR0, r3
+ isync
+
+ /* Load up FP, VMX and VSX registers */
+ bl kvmppc_load_fp
+
+BEGIN_FTR_SECTION
+ /* Switch DSCR to guest value */
+ ld r5, VCPU_DSCR(r4)
+ mtspr SPRN_DSCR, r5
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
+
+ /*
+ * Set the decrementer to the guest decrementer.
+ */
+ ld r8,VCPU_DEC_EXPIRES(r4)
+ mftb r7
+ subf r3,r7,r8
+ mtspr SPRN_DEC,r3
+ stw r3,VCPU_DEC(r4)
+
+ ld r5, VCPU_SPRG0(r4)
+ ld r6, VCPU_SPRG1(r4)
+ ld r7, VCPU_SPRG2(r4)
+ ld r8, VCPU_SPRG3(r4)
+ mtspr SPRN_SPRG0, r5
+ mtspr SPRN_SPRG1, r6
+ mtspr SPRN_SPRG2, r7
+ mtspr SPRN_SPRG3, r8
+
+ /* Save R1 in the PACA */
+ std r1, HSTATE_HOST_R1(r13)
+
+ /* Increment yield count if they have a VPA */
+ ld r3, VCPU_VPA(r4)
+ cmpdi r3, 0
+ beq 25f
+ lwz r5, LPPACA_YIELDCOUNT(r3)
+ addi r5, r5, 1
+ stw r5, LPPACA_YIELDCOUNT(r3)
+25:
+ /* Load up DAR and DSISR */
+ ld r5, VCPU_DAR(r4)
+ lwz r6, VCPU_DSISR(r4)
+ mtspr SPRN_DAR, r5
+ mtspr SPRN_DSISR, r6
+
+ /* Set partition DABR */
+ li r5,3
+ ld r6,VCPU_DABR(r4)
+ mtspr SPRN_DABRX,r5
+ mtspr SPRN_DABR,r6
+
+BEGIN_FTR_SECTION
+ /* Restore AMR and UAMOR, set AMOR to all 1s */
+ ld r5,VCPU_AMR(r4)
+ ld r6,VCPU_UAMOR(r4)
+ li r7,-1
+ mtspr SPRN_AMR,r5
+ mtspr SPRN_UAMOR,r6
+ mtspr SPRN_AMOR,r7
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
+
+ /* Clear out SLB */
+ li r6,0
+ slbmte r6,r6
+ slbia
+ ptesync
+
+BEGIN_FTR_SECTION
+ b 30f
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
+ /*
+ * POWER7 host -> guest partition switch code.
+ * We don't have to lock against concurrent tlbies,
+ * but we do have to coordinate across hardware threads.
+ */
+ /* Increment entry count iff exit count is zero. */
+ ld r5,HSTATE_KVM_VCORE(r13)
+ addi r9,r5,VCORE_ENTRY_EXIT
+21: lwarx r3,0,r9
+ cmpwi r3,0x100 /* any threads starting to exit? */
+ bge secondary_too_late /* if so we're too late to the party */
+ addi r3,r3,1
+ stwcx. r3,0,r9
+ bne 21b
+
+ /* Primary thread switches to guest partition. */
+ ld r9,VCPU_KVM(r4) /* pointer to struct kvm */
+ lwz r6,VCPU_PTID(r4)
+ cmpwi r6,0
+ bne 20f
+ ld r6,KVM_SDR1(r9)
+ lwz r7,KVM_LPID(r9)
+ li r0,LPID_RSVD /* switch to reserved LPID */
+ mtspr SPRN_LPID,r0
+ ptesync
+ mtspr SPRN_SDR1,r6 /* switch to partition page table */
+ mtspr SPRN_LPID,r7
+ isync
+ li r0,1
+ stb r0,VCORE_IN_GUEST(r5) /* signal secondaries to continue */
+ b 10f
+
+ /* Secondary threads wait for primary to have done partition switch */
+20: lbz r0,VCORE_IN_GUEST(r5)
+ cmpwi r0,0
+ beq 20b
+
+ /* Set LPCR. Set the MER bit if there is a pending external irq. */
+10: ld r8,KVM_LPCR(r9)
+ ld r0,VCPU_PENDING_EXC(r4)
+ li r7,(1 << BOOK3S_IRQPRIO_EXTERNAL)
+ oris r7,r7,(1 << BOOK3S_IRQPRIO_EXTERNAL_LEVEL)@h
+ and. r0,r0,r7
+ beq 11f
+ ori r8,r8,LPCR_MER
+11: mtspr SPRN_LPCR,r8
+ ld r8,KVM_RMOR(r9)
+ mtspr SPRN_RMOR,r8
+ isync
+
+ /* Check if HDEC expires soon */
+ mfspr r3,SPRN_HDEC
+ cmpwi r3,10
+ li r12,BOOK3S_INTERRUPT_HV_DECREMENTER
+ mr r9,r4
+ blt hdec_soon
+
+ /*
+ * Invalidate the TLB if we could possibly have stale TLB
+ * entries for this partition on this core due to the use
+ * of tlbiel.
+ * XXX maybe only need this on primary thread?
+ */
+ ld r9,VCPU_KVM(r4) /* pointer to struct kvm */
+ lwz r5,VCPU_VCPUID(r4)
+ lhz r6,PACAPACAINDEX(r13)
+ rldimi r6,r5,0,62 /* XXX map as if threads 1:1 p:v */
+ lhz r8,VCPU_LAST_CPU(r4)
+ sldi r7,r6,1 /* see if this is the same vcpu */
+ add r7,r7,r9 /* as last ran on this pcpu */
+ lhz r0,KVM_LAST_VCPU(r7)
+ cmpw r6,r8 /* on the same cpu core as last time? */
+ bne 3f
+ cmpw r0,r5 /* same vcpu as this core last ran? */
+ beq 1f
+3: sth r6,VCPU_LAST_CPU(r4) /* if not, invalidate partition TLB */
+ sth r5,KVM_LAST_VCPU(r7)
+ li r6,128
+ mtctr r6
+ li r7,0x800 /* IS field = 0b10 */
+ ptesync
+2: tlbiel r7
+ addi r7,r7,0x1000
+ bdnz 2b
+ ptesync
+1:
+
+ /* Save purr/spurr */
+ mfspr r5,SPRN_PURR
+ mfspr r6,SPRN_SPURR
+ std r5,HSTATE_PURR(r13)
+ std r6,HSTATE_SPURR(r13)
+ ld r7,VCPU_PURR(r4)
+ ld r8,VCPU_SPURR(r4)
+ mtspr SPRN_PURR,r7
+ mtspr SPRN_SPURR,r8
+ b 31f
+
+ /*
+ * PPC970 host -> guest partition switch code.
+ * We have to lock against concurrent tlbies,
+ * using native_tlbie_lock to lock against host tlbies
+ * and kvm->arch.tlbie_lock to lock against guest tlbies.
+ * We also have to invalidate the TLB since its
+ * entries aren't tagged with the LPID.
+ */
+30: ld r9,VCPU_KVM(r4) /* pointer to struct kvm */
+
+ /* first take native_tlbie_lock */
+ .section ".toc","aw"
+toc_tlbie_lock:
+ .tc native_tlbie_lock[TC],native_tlbie_lock
+ .previous
+ ld r3,toc_tlbie_lock@toc(2)
+ lwz r8,PACA_LOCK_TOKEN(r13)
+24: lwarx r0,0,r3
+ cmpwi r0,0
+ bne 24b
+ stwcx. r8,0,r3
+ bne 24b
+ isync
+
+ ld r7,KVM_LPCR(r9) /* use kvm->arch.lpcr to store HID4 */
+ li r0,0x18f
+ rotldi r0,r0,HID4_LPID5_SH /* all lpid bits in HID4 = 1 */
+ or r0,r7,r0
+ ptesync
+ sync
+ mtspr SPRN_HID4,r0 /* switch to reserved LPID */
+ isync
+ li r0,0
+ stw r0,0(r3) /* drop native_tlbie_lock */
+
+ /* invalidate the whole TLB */
+ li r0,256
+ mtctr r0
+ li r6,0
+25: tlbiel r6
+ addi r6,r6,0x1000
+ bdnz 25b
+ ptesync
+
+ /* Take the guest's tlbie_lock */
+ addi r3,r9,KVM_TLBIE_LOCK
+24: lwarx r0,0,r3
+ cmpwi r0,0
+ bne 24b
+ stwcx. r8,0,r3
+ bne 24b
+ isync
+ ld r6,KVM_SDR1(r9)
+ mtspr SPRN_SDR1,r6 /* switch to partition page table */
+
+ /* Set up HID4 with the guest's LPID etc. */
+ sync
+ mtspr SPRN_HID4,r7
+ isync
+
+ /* drop the guest's tlbie_lock */
+ li r0,0
+ stw r0,0(r3)
+
+ /* Check if HDEC expires soon */
+ mfspr r3,SPRN_HDEC
+ cmpwi r3,10
+ li r12,BOOK3S_INTERRUPT_HV_DECREMENTER
+ mr r9,r4
+ blt hdec_soon
+
+ /* Enable HDEC interrupts */
+ mfspr r0,SPRN_HID0
+ li r3,1
+ rldimi r0,r3, HID0_HDICE_SH, 64-HID0_HDICE_SH-1
+ sync
+ mtspr SPRN_HID0,r0
+ mfspr r0,SPRN_HID0
+ mfspr r0,SPRN_HID0
+ mfspr r0,SPRN_HID0
+ mfspr r0,SPRN_HID0
+ mfspr r0,SPRN_HID0
+ mfspr r0,SPRN_HID0
+
+ /* Load up guest SLB entries */
+31: lwz r5,VCPU_SLB_MAX(r4)
+ cmpwi r5,0
+ beq 9f
+ mtctr r5
+ addi r6,r4,VCPU_SLB
+1: ld r8,VCPU_SLB_E(r6)
+ ld r9,VCPU_SLB_V(r6)
+ slbmte r9,r8
+ addi r6,r6,VCPU_SLB_SIZE
+ bdnz 1b
+9:
+
+ /* Restore state of CTRL run bit; assume 1 on entry */
+ lwz r5,VCPU_CTRL(r4)
+ andi. r5,r5,1
+ bne 4f
+ mfspr r6,SPRN_CTRLF
+ clrrdi r6,r6,1
+ mtspr SPRN_CTRLT,r6
+4:
+ ld r6, VCPU_CTR(r4)
+ lwz r7, VCPU_XER(r4)
+
+ mtctr r6
+ mtxer r7
+
+ /* Move SRR0 and SRR1 into the respective regs */
+ ld r6, VCPU_SRR0(r4)
+ ld r7, VCPU_SRR1(r4)
+ mtspr SPRN_SRR0, r6
+ mtspr SPRN_SRR1, r7
+
+ ld r10, VCPU_PC(r4)
+
+ ld r11, VCPU_MSR(r4) /* r10 = vcpu->arch.msr & ~MSR_HV */
+ rldicl r11, r11, 63 - MSR_HV_LG, 1
+ rotldi r11, r11, 1 + MSR_HV_LG
+ ori r11, r11, MSR_ME
+
+fast_guest_return:
+ mtspr SPRN_HSRR0,r10
+ mtspr SPRN_HSRR1,r11
+
+ /* Activate guest mode, so faults get handled by KVM */
+ li r9, KVM_GUEST_MODE_GUEST
+ stb r9, HSTATE_IN_GUEST(r13)
+
+ /* Enter guest */
+
+ ld r5, VCPU_LR(r4)
+ lwz r6, VCPU_CR(r4)
+ mtlr r5
+ mtcr r6
+
+ ld r0, VCPU_GPR(r0)(r4)
+ ld r1, VCPU_GPR(r1)(r4)
+ ld r2, VCPU_GPR(r2)(r4)
+ ld r3, VCPU_GPR(r3)(r4)
+ ld r5, VCPU_GPR(r5)(r4)
+ ld r6, VCPU_GPR(r6)(r4)
+ ld r7, VCPU_GPR(r7)(r4)
+ ld r8, VCPU_GPR(r8)(r4)
+ ld r9, VCPU_GPR(r9)(r4)
+ ld r10, VCPU_GPR(r10)(r4)
+ ld r11, VCPU_GPR(r11)(r4)
+ ld r12, VCPU_GPR(r12)(r4)
+ ld r13, VCPU_GPR(r13)(r4)
+
+ ld r4, VCPU_GPR(r4)(r4)
+
+ hrfid
+ b .
+
+/******************************************************************************
+ * *
+ * Exit code *
+ * *
+ *****************************************************************************/
+
+/*
+ * We come here from the first-level interrupt handlers.
+ */
+ .globl kvmppc_interrupt
+kvmppc_interrupt:
+ /*
+ * Register contents:
+ * R12 = interrupt vector
+ * R13 = PACA
+ * guest CR, R12 saved in shadow VCPU SCRATCH1/0
+ * guest R13 saved in SPRN_SCRATCH0
+ */
+ /* abuse host_r2 as third scratch area; we get r2 from PACATOC(r13) */
+ std r9, HSTATE_HOST_R2(r13)
+ ld r9, HSTATE_KVM_VCPU(r13)
+
+ /* Save registers */
+
+ std r0, VCPU_GPR(r0)(r9)
+ std r1, VCPU_GPR(r1)(r9)
+ std r2, VCPU_GPR(r2)(r9)
+ std r3, VCPU_GPR(r3)(r9)
+ std r4, VCPU_GPR(r4)(r9)
+ std r5, VCPU_GPR(r5)(r9)
+ std r6, VCPU_GPR(r6)(r9)
+ std r7, VCPU_GPR(r7)(r9)
+ std r8, VCPU_GPR(r8)(r9)
+ ld r0, HSTATE_HOST_R2(r13)
+ std r0, VCPU_GPR(r9)(r9)
+ std r10, VCPU_GPR(r10)(r9)
+ std r11, VCPU_GPR(r11)(r9)
+ ld r3, HSTATE_SCRATCH0(r13)
+ lwz r4, HSTATE_SCRATCH1(r13)
+ std r3, VCPU_GPR(r12)(r9)
+ stw r4, VCPU_CR(r9)
+
+ /* Restore R1/R2 so we can handle faults */
+ ld r1, HSTATE_HOST_R1(r13)
+ ld r2, PACATOC(r13)
+
+ mfspr r10, SPRN_SRR0
+ mfspr r11, SPRN_SRR1
+ std r10, VCPU_SRR0(r9)
+ std r11, VCPU_SRR1(r9)
+ andi. r0, r12, 2 /* need to read HSRR0/1? */
+ beq 1f
+ mfspr r10, SPRN_HSRR0
+ mfspr r11, SPRN_HSRR1
+ clrrdi r12, r12, 2
+1: std r10, VCPU_PC(r9)
+ std r11, VCPU_MSR(r9)
+
+ GET_SCRATCH0(r3)
+ mflr r4
+ std r3, VCPU_GPR(r13)(r9)
+ std r4, VCPU_LR(r9)
+
+ /* Unset guest mode */
+ li r0, KVM_GUEST_MODE_NONE
+ stb r0, HSTATE_IN_GUEST(r13)
+
+ stw r12,VCPU_TRAP(r9)
+
+ /* See if this is a leftover HDEC interrupt */
+ cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER
+ bne 2f
+ mfspr r3,SPRN_HDEC
+ cmpwi r3,0
+ bge ignore_hdec
+2:
+ /* See if this is something we can handle in real mode */
+ cmpwi r12,BOOK3S_INTERRUPT_SYSCALL
+ beq hcall_try_real_mode
+hcall_real_cont:
+
+ /* Check for mediated interrupts (could be done earlier really ...) */
+BEGIN_FTR_SECTION
+ cmpwi r12,BOOK3S_INTERRUPT_EXTERNAL
+ bne+ 1f
+ ld r5,VCPU_KVM(r9)
+ ld r5,KVM_LPCR(r5)
+ andi. r0,r11,MSR_EE
+ beq 1f
+ andi. r0,r5,LPCR_MER
+ bne bounce_ext_interrupt
+1:
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
+
+ /* Save DEC */
+ mfspr r5,SPRN_DEC
+ mftb r6
+ extsw r5,r5
+ add r5,r5,r6
+ std r5,VCPU_DEC_EXPIRES(r9)
+
+ /* Save HEIR (HV emulation assist reg) in last_inst
+ if this is an HEI (HV emulation interrupt, e40) */
+ li r3,-1
+BEGIN_FTR_SECTION
+ cmpwi r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST
+ bne 11f
+ mfspr r3,SPRN_HEIR
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
+11: stw r3,VCPU_LAST_INST(r9)
+
+ /* Save more register state */
+ mfxer r5
+ mfdar r6
+ mfdsisr r7
+ mfctr r8
+
+ stw r5, VCPU_XER(r9)
+ std r6, VCPU_DAR(r9)
+ stw r7, VCPU_DSISR(r9)
+ std r8, VCPU_CTR(r9)
+ /* grab HDAR & HDSISR if HV data storage interrupt (HDSI) */
+BEGIN_FTR_SECTION
+ cmpwi r12,BOOK3S_INTERRUPT_H_DATA_STORAGE
+ beq 6f
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
+7: std r6, VCPU_FAULT_DAR(r9)
+ stw r7, VCPU_FAULT_DSISR(r9)
+
+ /* Save guest CTRL register, set runlatch to 1 */
+ mfspr r6,SPRN_CTRLF
+ stw r6,VCPU_CTRL(r9)
+ andi. r0,r6,1
+ bne 4f
+ ori r6,r6,1
+ mtspr SPRN_CTRLT,r6
+4:
+ /* Read the guest SLB and save it away */
+ lwz r0,VCPU_SLB_NR(r9) /* number of entries in SLB */
+ mtctr r0
+ li r6,0
+ addi r7,r9,VCPU_SLB
+ li r5,0
+1: slbmfee r8,r6
+ andis. r0,r8,SLB_ESID_V@h
+ beq 2f
+ add r8,r8,r6 /* put index in */
+ slbmfev r3,r6
+ std r8,VCPU_SLB_E(r7)
+ std r3,VCPU_SLB_V(r7)
+ addi r7,r7,VCPU_SLB_SIZE
+ addi r5,r5,1
+2: addi r6,r6,1
+ bdnz 1b
+ stw r5,VCPU_SLB_MAX(r9)
+
+ /*
+ * Save the guest PURR/SPURR
+ */
+BEGIN_FTR_SECTION
+ mfspr r5,SPRN_PURR
+ mfspr r6,SPRN_SPURR
+ ld r7,VCPU_PURR(r9)
+ ld r8,VCPU_SPURR(r9)
+ std r5,VCPU_PURR(r9)
+ std r6,VCPU_SPURR(r9)
+ subf r5,r7,r5
+ subf r6,r8,r6
+
+ /*
+ * Restore host PURR/SPURR and add guest times
+ * so that the time in the guest gets accounted.
+ */
+ ld r3,HSTATE_PURR(r13)
+ ld r4,HSTATE_SPURR(r13)
+ add r3,r3,r5
+ add r4,r4,r6
+ mtspr SPRN_PURR,r3
+ mtspr SPRN_SPURR,r4
+END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_201)
+
+ /* Clear out SLB */
+ li r5,0
+ slbmte r5,r5
+ slbia
+ ptesync
+
+hdec_soon:
+BEGIN_FTR_SECTION
+ b 32f
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
+ /*
+ * POWER7 guest -> host partition switch code.
+ * We don't have to lock against tlbies but we do
+ * have to coordinate the hardware threads.
+ */
+ /* Increment the threads-exiting-guest count in the 0xff00
+ bits of vcore->entry_exit_count */
+ lwsync
+ ld r5,HSTATE_KVM_VCORE(r13)
+ addi r6,r5,VCORE_ENTRY_EXIT
+41: lwarx r3,0,r6
+ addi r0,r3,0x100
+ stwcx. r0,0,r6
+ bne 41b
+
+ /*
+ * At this point we have an interrupt that we have to pass
+ * up to the kernel or qemu; we can't handle it in real mode.
+ * Thus we have to do a partition switch, so we have to
+ * collect the other threads, if we are the first thread
+ * to take an interrupt. To do this, we set the HDEC to 0,
+ * which causes an HDEC interrupt in all threads within 2ns
+ * because the HDEC register is shared between all 4 threads.
+ * However, we don't need to bother if this is an HDEC
+ * interrupt, since the other threads will already be on their
+ * way here in that case.
+ */
+ cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER
+ beq 40f
+ cmpwi r3,0x100 /* Are we the first here? */
+ bge 40f
+ cmpwi r3,1
+ ble 40f
+ li r0,0
+ mtspr SPRN_HDEC,r0
+40:
+
+ /* Secondary threads wait for primary to do partition switch */
+ ld r4,VCPU_KVM(r9) /* pointer to struct kvm */
+ ld r5,HSTATE_KVM_VCORE(r13)
+ lwz r3,VCPU_PTID(r9)
+ cmpwi r3,0
+ beq 15f
+ HMT_LOW
+13: lbz r3,VCORE_IN_GUEST(r5)
+ cmpwi r3,0
+ bne 13b
+ HMT_MEDIUM
+ b 16f
+
+ /* Primary thread waits for all the secondaries to exit guest */
+15: lwz r3,VCORE_ENTRY_EXIT(r5)
+ srwi r0,r3,8
+ clrldi r3,r3,56
+ cmpw r3,r0
+ bne 15b
+ isync
+
+ /* Primary thread switches back to host partition */
+ ld r6,KVM_HOST_SDR1(r4)
+ lwz r7,KVM_HOST_LPID(r4)
+ li r8,LPID_RSVD /* switch to reserved LPID */
+ mtspr SPRN_LPID,r8
+ ptesync
+ mtspr SPRN_SDR1,r6 /* switch to partition page table */
+ mtspr SPRN_LPID,r7
+ isync
+ li r0,0
+ stb r0,VCORE_IN_GUEST(r5)
+ lis r8,0x7fff /* MAX_INT@h */
+ mtspr SPRN_HDEC,r8
+
+16: ld r8,KVM_HOST_LPCR(r4)
+ mtspr SPRN_LPCR,r8
+ isync
+ b 33f
+
+ /*
+ * PPC970 guest -> host partition switch code.
+ * We have to lock against concurrent tlbies, and
+ * we have to flush the whole TLB.
+ */
+32: ld r4,VCPU_KVM(r9) /* pointer to struct kvm */
+
+ /* Take the guest's tlbie_lock */
+ lwz r8,PACA_LOCK_TOKEN(r13)
+ addi r3,r4,KVM_TLBIE_LOCK
+24: lwarx r0,0,r3
+ cmpwi r0,0
+ bne 24b
+ stwcx. r8,0,r3
+ bne 24b
+ isync
+
+ ld r7,KVM_HOST_LPCR(r4) /* use kvm->arch.host_lpcr for HID4 */
+ li r0,0x18f
+ rotldi r0,r0,HID4_LPID5_SH /* all lpid bits in HID4 = 1 */
+ or r0,r7,r0
+ ptesync
+ sync
+ mtspr SPRN_HID4,r0 /* switch to reserved LPID */
+ isync
+ li r0,0
+ stw r0,0(r3) /* drop guest tlbie_lock */
+
+ /* invalidate the whole TLB */
+ li r0,256
+ mtctr r0
+ li r6,0
+25: tlbiel r6
+ addi r6,r6,0x1000
+ bdnz 25b
+ ptesync
+
+ /* take native_tlbie_lock */
+ ld r3,toc_tlbie_lock@toc(2)
+24: lwarx r0,0,r3
+ cmpwi r0,0
+ bne 24b
+ stwcx. r8,0,r3
+ bne 24b
+ isync
+
+ ld r6,KVM_HOST_SDR1(r4)
+ mtspr SPRN_SDR1,r6 /* switch to host page table */
+
+ /* Set up host HID4 value */
+ sync
+ mtspr SPRN_HID4,r7
+ isync
+ li r0,0
+ stw r0,0(r3) /* drop native_tlbie_lock */
+
+ lis r8,0x7fff /* MAX_INT@h */
+ mtspr SPRN_HDEC,r8
+
+ /* Disable HDEC interrupts */
+ mfspr r0,SPRN_HID0
+ li r3,0
+ rldimi r0,r3, HID0_HDICE_SH, 64-HID0_HDICE_SH-1
+ sync
+ mtspr SPRN_HID0,r0
+ mfspr r0,SPRN_HID0
+ mfspr r0,SPRN_HID0
+ mfspr r0,SPRN_HID0
+ mfspr r0,SPRN_HID0
+ mfspr r0,SPRN_HID0
+ mfspr r0,SPRN_HID0
+
+ /* load host SLB entries */
+33: ld r8,PACA_SLBSHADOWPTR(r13)
+
+ .rept SLB_NUM_BOLTED
+ ld r5,SLBSHADOW_SAVEAREA(r8)
+ ld r6,SLBSHADOW_SAVEAREA+8(r8)
+ andis. r7,r5,SLB_ESID_V@h
+ beq 1f
+ slbmte r6,r5
+1: addi r8,r8,16
+ .endr
+
+ /* Save and reset AMR and UAMOR before turning on the MMU */
+BEGIN_FTR_SECTION
+ mfspr r5,SPRN_AMR
+ mfspr r6,SPRN_UAMOR
+ std r5,VCPU_AMR(r9)
+ std r6,VCPU_UAMOR(r9)
+ li r6,0
+ mtspr SPRN_AMR,r6
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
+
+ /* Restore host DABR and DABRX */
+ ld r5,HSTATE_DABR(r13)
+ li r6,7
+ mtspr SPRN_DABR,r5
+ mtspr SPRN_DABRX,r6
+
+ /* Switch DSCR back to host value */
+BEGIN_FTR_SECTION
+ mfspr r8, SPRN_DSCR
+ ld r7, HSTATE_DSCR(r13)
+ std r8, VCPU_DSCR(r7)
+ mtspr SPRN_DSCR, r7
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
+
+ /* Save non-volatile GPRs */
+ std r14, VCPU_GPR(r14)(r9)
+ std r15, VCPU_GPR(r15)(r9)
+ std r16, VCPU_GPR(r16)(r9)
+ std r17, VCPU_GPR(r17)(r9)
+ std r18, VCPU_GPR(r18)(r9)
+ std r19, VCPU_GPR(r19)(r9)
+ std r20, VCPU_GPR(r20)(r9)
+ std r21, VCPU_GPR(r21)(r9)
+ std r22, VCPU_GPR(r22)(r9)
+ std r23, VCPU_GPR(r23)(r9)
+ std r24, VCPU_GPR(r24)(r9)
+ std r25, VCPU_GPR(r25)(r9)
+ std r26, VCPU_GPR(r26)(r9)
+ std r27, VCPU_GPR(r27)(r9)
+ std r28, VCPU_GPR(r28)(r9)
+ std r29, VCPU_GPR(r29)(r9)
+ std r30, VCPU_GPR(r30)(r9)
+ std r31, VCPU_GPR(r31)(r9)
+
+ /* Save SPRGs */
+ mfspr r3, SPRN_SPRG0
+ mfspr r4, SPRN_SPRG1
+ mfspr r5, SPRN_SPRG2
+ mfspr r6, SPRN_SPRG3
+ std r3, VCPU_SPRG0(r9)
+ std r4, VCPU_SPRG1(r9)
+ std r5, VCPU_SPRG2(r9)
+ std r6, VCPU_SPRG3(r9)
+
+ /* Increment yield count if they have a VPA */
+ ld r8, VCPU_VPA(r9) /* do they have a VPA? */
+ cmpdi r8, 0
+ beq 25f
+ lwz r3, LPPACA_YIELDCOUNT(r8)
+ addi r3, r3, 1
+ stw r3, LPPACA_YIELDCOUNT(r8)
+25:
+ /* Save PMU registers if requested */
+ /* r8 and cr0.eq are live here */
+ li r3, 1
+ sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
+ mfspr r4, SPRN_MMCR0 /* save MMCR0 */
+ mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
+ isync
+ beq 21f /* if no VPA, save PMU stuff anyway */
+ lbz r7, LPPACA_PMCINUSE(r8)
+ cmpwi r7, 0 /* did they ask for PMU stuff to be saved? */
+ bne 21f
+ std r3, VCPU_MMCR(r9) /* if not, set saved MMCR0 to FC */
+ b 22f
+21: mfspr r5, SPRN_MMCR1
+ mfspr r6, SPRN_MMCRA
+ std r4, VCPU_MMCR(r9)
+ std r5, VCPU_MMCR + 8(r9)
+ std r6, VCPU_MMCR + 16(r9)
+ mfspr r3, SPRN_PMC1
+ mfspr r4, SPRN_PMC2
+ mfspr r5, SPRN_PMC3
+ mfspr r6, SPRN_PMC4
+ mfspr r7, SPRN_PMC5
+ mfspr r8, SPRN_PMC6
+BEGIN_FTR_SECTION
+ mfspr r10, SPRN_PMC7
+ mfspr r11, SPRN_PMC8
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
+ stw r3, VCPU_PMC(r9)
+ stw r4, VCPU_PMC + 4(r9)
+ stw r5, VCPU_PMC + 8(r9)
+ stw r6, VCPU_PMC + 12(r9)
+ stw r7, VCPU_PMC + 16(r9)
+ stw r8, VCPU_PMC + 20(r9)
+BEGIN_FTR_SECTION
+ stw r10, VCPU_PMC + 24(r9)
+ stw r11, VCPU_PMC + 28(r9)
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
+22:
+ /* save FP state */
+ mr r3, r9
+ bl .kvmppc_save_fp
+
+ /* Secondary threads go off to take a nap on POWER7 */
+BEGIN_FTR_SECTION
+ lwz r0,VCPU_PTID(r3)
+ cmpwi r0,0
+ bne secondary_nap
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
+
+ /*
+ * Reload DEC. HDEC interrupts were disabled when
+ * we reloaded the host's LPCR value.
+ */
+ ld r3, HSTATE_DECEXP(r13)
+ mftb r4
+ subf r4, r4, r3
+ mtspr SPRN_DEC, r4
+
+ /* Reload the host's PMU registers */
+ ld r3, PACALPPACAPTR(r13) /* is the host using the PMU? */
+ lbz r4, LPPACA_PMCINUSE(r3)
+ cmpwi r4, 0
+ beq 23f /* skip if not */
+ lwz r3, HSTATE_PMC(r13)
+ lwz r4, HSTATE_PMC + 4(r13)
+ lwz r5, HSTATE_PMC + 8(r13)
+ lwz r6, HSTATE_PMC + 12(r13)
+ lwz r8, HSTATE_PMC + 16(r13)
+ lwz r9, HSTATE_PMC + 20(r13)
+BEGIN_FTR_SECTION
+ lwz r10, HSTATE_PMC + 24(r13)
+ lwz r11, HSTATE_PMC + 28(r13)
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
+ mtspr SPRN_PMC1, r3
+ mtspr SPRN_PMC2, r4
+ mtspr SPRN_PMC3, r5
+ mtspr SPRN_PMC4, r6
+ mtspr SPRN_PMC5, r8
+ mtspr SPRN_PMC6, r9
+BEGIN_FTR_SECTION
+ mtspr SPRN_PMC7, r10
+ mtspr SPRN_PMC8, r11
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
+ ld r3, HSTATE_MMCR(r13)
+ ld r4, HSTATE_MMCR + 8(r13)
+ ld r5, HSTATE_MMCR + 16(r13)
+ mtspr SPRN_MMCR1, r4
+ mtspr SPRN_MMCRA, r5
+ mtspr SPRN_MMCR0, r3
+ isync
+23:
+ /*
+ * For external and machine check interrupts, we need
+ * to call the Linux handler to process the interrupt.
+ * We do that by jumping to the interrupt vector address
+ * which we have in r12. The [h]rfid at the end of the
+ * handler will return to the book3s_hv_interrupts.S code.
+ * For other interrupts we do the rfid to get back
+ * to the book3s_interrupts.S code here.
+ */
+ ld r8, HSTATE_VMHANDLER(r13)
+ ld r7, HSTATE_HOST_MSR(r13)
+
+ cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
+ beq 11f
+ cmpwi r12, BOOK3S_INTERRUPT_MACHINE_CHECK
+
+ /* RFI into the highmem handler, or branch to interrupt handler */
+12: mfmsr r6
+ mtctr r12
+ li r0, MSR_RI
+ andc r6, r6, r0
+ mtmsrd r6, 1 /* Clear RI in MSR */
+ mtsrr0 r8
+ mtsrr1 r7
+ beqctr
+ RFI
+
+11:
+BEGIN_FTR_SECTION
+ b 12b
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
+ mtspr SPRN_HSRR0, r8
+ mtspr SPRN_HSRR1, r7
+ ba 0x500
+
+6: mfspr r6,SPRN_HDAR
+ mfspr r7,SPRN_HDSISR
+ b 7b
+
+/*
+ * Try to handle an hcall in real mode.
+ * Returns to the guest if we handle it, or continues on up to
+ * the kernel if we can't (i.e. if we don't have a handler for
+ * it, or if the handler returns H_TOO_HARD).
+ */
+ .globl hcall_try_real_mode
+hcall_try_real_mode:
+ ld r3,VCPU_GPR(r3)(r9)
+ andi. r0,r11,MSR_PR
+ bne hcall_real_cont
+ clrrdi r3,r3,2
+ cmpldi r3,hcall_real_table_end - hcall_real_table
+ bge hcall_real_cont
+ LOAD_REG_ADDR(r4, hcall_real_table)
+ lwzx r3,r3,r4
+ cmpwi r3,0
+ beq hcall_real_cont
+ add r3,r3,r4
+ mtctr r3
+ mr r3,r9 /* get vcpu pointer */
+ ld r4,VCPU_GPR(r4)(r9)
+ bctrl
+ cmpdi r3,H_TOO_HARD
+ beq hcall_real_fallback
+ ld r4,HSTATE_KVM_VCPU(r13)
+ std r3,VCPU_GPR(r3)(r4)
+ ld r10,VCPU_PC(r4)
+ ld r11,VCPU_MSR(r4)
+ b fast_guest_return
+
+ /* We've attempted a real mode hcall, but it's punted it back
+ * to userspace. We need to restore some clobbered volatiles
+ * before resuming the pass-it-to-qemu path */
+hcall_real_fallback:
+ li r12,BOOK3S_INTERRUPT_SYSCALL
+ ld r9, HSTATE_KVM_VCPU(r13)
+ ld r11, VCPU_MSR(r9)
+
+ b hcall_real_cont
+
+ .globl hcall_real_table
+hcall_real_table:
+ .long 0 /* 0 - unused */
+ .long .kvmppc_h_remove - hcall_real_table
+ .long .kvmppc_h_enter - hcall_real_table
+ .long .kvmppc_h_read - hcall_real_table
+ .long 0 /* 0x10 - H_CLEAR_MOD */
+ .long 0 /* 0x14 - H_CLEAR_REF */
+ .long .kvmppc_h_protect - hcall_real_table
+ .long 0 /* 0x1c - H_GET_TCE */
+ .long .kvmppc_h_put_tce - hcall_real_table
+ .long 0 /* 0x24 - H_SET_SPRG0 */
+ .long .kvmppc_h_set_dabr - hcall_real_table
+ .long 0 /* 0x2c */
+ .long 0 /* 0x30 */
+ .long 0 /* 0x34 */
+ .long 0 /* 0x38 */
+ .long 0 /* 0x3c */
+ .long 0 /* 0x40 */
+ .long 0 /* 0x44 */
+ .long 0 /* 0x48 */
+ .long 0 /* 0x4c */
+ .long 0 /* 0x50 */
+ .long 0 /* 0x54 */
+ .long 0 /* 0x58 */
+ .long 0 /* 0x5c */
+ .long 0 /* 0x60 */
+ .long 0 /* 0x64 */
+ .long 0 /* 0x68 */
+ .long 0 /* 0x6c */
+ .long 0 /* 0x70 */
+ .long 0 /* 0x74 */
+ .long 0 /* 0x78 */
+ .long 0 /* 0x7c */
+ .long 0 /* 0x80 */
+ .long 0 /* 0x84 */
+ .long 0 /* 0x88 */
+ .long 0 /* 0x8c */
+ .long 0 /* 0x90 */
+ .long 0 /* 0x94 */
+ .long 0 /* 0x98 */
+ .long 0 /* 0x9c */
+ .long 0 /* 0xa0 */
+ .long 0 /* 0xa4 */
+ .long 0 /* 0xa8 */
+ .long 0 /* 0xac */
+ .long 0 /* 0xb0 */
+ .long 0 /* 0xb4 */
+ .long 0 /* 0xb8 */
+ .long 0 /* 0xbc */
+ .long 0 /* 0xc0 */
+ .long 0 /* 0xc4 */
+ .long 0 /* 0xc8 */
+ .long 0 /* 0xcc */
+ .long 0 /* 0xd0 */
+ .long 0 /* 0xd4 */
+ .long 0 /* 0xd8 */
+ .long 0 /* 0xdc */
+ .long 0 /* 0xe0 */
+ .long 0 /* 0xe4 */
+ .long 0 /* 0xe8 */
+ .long 0 /* 0xec */
+ .long 0 /* 0xf0 */
+ .long 0 /* 0xf4 */
+ .long 0 /* 0xf8 */
+ .long 0 /* 0xfc */
+ .long 0 /* 0x100 */
+ .long 0 /* 0x104 */
+ .long 0 /* 0x108 */
+ .long 0 /* 0x10c */
+ .long 0 /* 0x110 */
+ .long 0 /* 0x114 */
+ .long 0 /* 0x118 */
+ .long 0 /* 0x11c */
+ .long 0 /* 0x120 */
+ .long .kvmppc_h_bulk_remove - hcall_real_table
+hcall_real_table_end:
+
+ignore_hdec:
+ mr r4,r9
+ b fast_guest_return
+
+bounce_ext_interrupt:
+ mr r4,r9
+ mtspr SPRN_SRR0,r10
+ mtspr SPRN_SRR1,r11
+ li r10,BOOK3S_INTERRUPT_EXTERNAL
+ LOAD_REG_IMMEDIATE(r11,MSR_SF | MSR_ME);
+ b fast_guest_return
+
+_GLOBAL(kvmppc_h_set_dabr)
+ std r4,VCPU_DABR(r3)
+ mtspr SPRN_DABR,r4
+ li r3,0
+ blr
+
+secondary_too_late:
+ ld r5,HSTATE_KVM_VCORE(r13)
+ HMT_LOW
+13: lbz r3,VCORE_IN_GUEST(r5)
+ cmpwi r3,0
+ bne 13b
+ HMT_MEDIUM
+ ld r11,PACA_SLBSHADOWPTR(r13)
+
+ .rept SLB_NUM_BOLTED
+ ld r5,SLBSHADOW_SAVEAREA(r11)
+ ld r6,SLBSHADOW_SAVEAREA+8(r11)
+ andis. r7,r5,SLB_ESID_V@h
+ beq 1f
+ slbmte r6,r5
+1: addi r11,r11,16
+ .endr
+ b 50f
+
+secondary_nap:
+ /* Clear any pending IPI */
+50: ld r5, HSTATE_XICS_PHYS(r13)
+ li r0, 0xff
+ li r6, XICS_QIRR
+ stbcix r0, r5, r6
+
+ /* increment the nap count and then go to nap mode */
+ ld r4, HSTATE_KVM_VCORE(r13)
+ addi r4, r4, VCORE_NAP_COUNT
+ lwsync /* make previous updates visible */
+51: lwarx r3, 0, r4
+ addi r3, r3, 1
+ stwcx. r3, 0, r4
+ bne 51b
+ isync
+
+ mfspr r4, SPRN_LPCR
+ li r0, LPCR_PECE
+ andc r4, r4, r0
+ ori r4, r4, LPCR_PECE0 /* exit nap on interrupt */
+ mtspr SPRN_LPCR, r4
+ li r0, 0
+ std r0, HSTATE_SCRATCH0(r13)
+ ptesync
+ ld r0, HSTATE_SCRATCH0(r13)
+1: cmpd r0, r0
+ bne 1b
+ nap
+ b .
+
+/*
+ * Save away FP, VMX and VSX registers.
+ * r3 = vcpu pointer
+ */
+_GLOBAL(kvmppc_save_fp)
+ mfmsr r9
+ ori r8,r9,MSR_FP
+#ifdef CONFIG_ALTIVEC
+BEGIN_FTR_SECTION
+ oris r8,r8,MSR_VEC@h
+END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
+#endif
+#ifdef CONFIG_VSX
+BEGIN_FTR_SECTION
+ oris r8,r8,MSR_VSX@h
+END_FTR_SECTION_IFSET(CPU_FTR_VSX)
+#endif
+ mtmsrd r8
+ isync
+#ifdef CONFIG_VSX
+BEGIN_FTR_SECTION
+ reg = 0
+ .rept 32
+ li r6,reg*16+VCPU_VSRS
+ stxvd2x reg,r6,r3
+ reg = reg + 1
+ .endr
+FTR_SECTION_ELSE
+#endif
+ reg = 0
+ .rept 32
+ stfd reg,reg*8+VCPU_FPRS(r3)
+ reg = reg + 1
+ .endr
+#ifdef CONFIG_VSX
+ALT_FTR_SECTION_END_IFSET(CPU_FTR_VSX)
+#endif
+ mffs fr0
+ stfd fr0,VCPU_FPSCR(r3)
+
+#ifdef CONFIG_ALTIVEC
+BEGIN_FTR_SECTION
+ reg = 0
+ .rept 32
+ li r6,reg*16+VCPU_VRS
+ stvx reg,r6,r3
+ reg = reg + 1
+ .endr
+ mfvscr vr0
+ li r6,VCPU_VSCR
+ stvx vr0,r6,r3
+END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
+#endif
+ mfspr r6,SPRN_VRSAVE
+ stw r6,VCPU_VRSAVE(r3)
+ mtmsrd r9
+ isync
+ blr
+
+/*
+ * Load up FP, VMX and VSX registers
+ * r4 = vcpu pointer
+ */
+ .globl kvmppc_load_fp
+kvmppc_load_fp:
+ mfmsr r9
+ ori r8,r9,MSR_FP
+#ifdef CONFIG_ALTIVEC
+BEGIN_FTR_SECTION
+ oris r8,r8,MSR_VEC@h
+END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
+#endif
+#ifdef CONFIG_VSX
+BEGIN_FTR_SECTION
+ oris r8,r8,MSR_VSX@h
+END_FTR_SECTION_IFSET(CPU_FTR_VSX)
+#endif
+ mtmsrd r8
+ isync
+ lfd fr0,VCPU_FPSCR(r4)
+ MTFSF_L(fr0)
+#ifdef CONFIG_VSX
+BEGIN_FTR_SECTION
+ reg = 0
+ .rept 32
+ li r7,reg*16+VCPU_VSRS
+ lxvd2x reg,r7,r4
+ reg = reg + 1
+ .endr
+FTR_SECTION_ELSE
+#endif
+ reg = 0
+ .rept 32
+ lfd reg,reg*8+VCPU_FPRS(r4)
+ reg = reg + 1
+ .endr
+#ifdef CONFIG_VSX
+ALT_FTR_SECTION_END_IFSET(CPU_FTR_VSX)
+#endif
+
+#ifdef CONFIG_ALTIVEC
+BEGIN_FTR_SECTION
+ li r7,VCPU_VSCR
+ lvx vr0,r7,r4
+ mtvscr vr0
+ reg = 0
+ .rept 32
+ li r7,reg*16+VCPU_VRS
+ lvx reg,r7,r4
+ reg = reg + 1
+ .endr
+END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
+#endif
+ lwz r7,VCPU_VRSAVE(r4)
+ mtspr SPRN_VRSAVE,r7
+ blr
diff --git a/arch/powerpc/kvm/book3s_interrupts.S b/arch/powerpc/kvm/book3s_interrupts.S
index 2f0bc928b08a..c54b0e30cf3f 100644
--- a/arch/powerpc/kvm/book3s_interrupts.S
+++ b/arch/powerpc/kvm/book3s_interrupts.S
@@ -29,8 +29,7 @@
#define ULONG_SIZE 8
#define FUNC(name) GLUE(.,name)
-#define GET_SHADOW_VCPU(reg) \
- addi reg, r13, PACA_KVM_SVCPU
+#define GET_SHADOW_VCPU_R13
#define DISABLE_INTERRUPTS \
mfmsr r0; \
@@ -43,8 +42,8 @@
#define ULONG_SIZE 4
#define FUNC(name) name
-#define GET_SHADOW_VCPU(reg) \
- lwz reg, (THREAD + THREAD_KVM_SVCPU)(r2)
+#define GET_SHADOW_VCPU_R13 \
+ lwz r13, (THREAD + THREAD_KVM_SVCPU)(r2)
#define DISABLE_INTERRUPTS \
mfmsr r0; \
@@ -85,7 +84,7 @@
* r3: kvm_run pointer
* r4: vcpu pointer
*/
-_GLOBAL(__kvmppc_vcpu_entry)
+_GLOBAL(__kvmppc_vcpu_run)
kvm_start_entry:
/* Write correct stack frame */
@@ -107,17 +106,11 @@ kvm_start_entry:
/* Load non-volatile guest state from the vcpu */
VCPU_LOAD_NVGPRS(r4)
- GET_SHADOW_VCPU(r5)
-
- /* Save R1/R2 in the PACA */
- PPC_STL r1, SVCPU_HOST_R1(r5)
- PPC_STL r2, SVCPU_HOST_R2(r5)
+kvm_start_lightweight:
- /* XXX swap in/out on load? */
+ GET_SHADOW_VCPU_R13
PPC_LL r3, VCPU_HIGHMEM_HANDLER(r4)
- PPC_STL r3, SVCPU_VMHANDLER(r5)
-
-kvm_start_lightweight:
+ PPC_STL r3, HSTATE_VMHANDLER(r13)
PPC_LL r10, VCPU_SHADOW_MSR(r4) /* r10 = vcpu->arch.shadow_msr */
diff --git a/arch/powerpc/kvm/book3s_mmu_hpte.c b/arch/powerpc/kvm/book3s_mmu_hpte.c
index 79751d8dd131..41cb0017e757 100644
--- a/arch/powerpc/kvm/book3s_mmu_hpte.c
+++ b/arch/powerpc/kvm/book3s_mmu_hpte.c
@@ -21,7 +21,6 @@
#include <linux/kvm_host.h>
#include <linux/hash.h>
#include <linux/slab.h>
-#include "trace.h"
#include <asm/kvm_ppc.h>
#include <asm/kvm_book3s.h>
@@ -29,6 +28,8 @@
#include <asm/mmu_context.h>
#include <asm/hw_irq.h>
+#include "trace.h"
+
#define PTE_SIZE 12
static struct kmem_cache *hpte_cache;
@@ -58,30 +59,31 @@ static inline u64 kvmppc_mmu_hash_vpte_long(u64 vpage)
void kvmppc_mmu_hpte_cache_map(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
{
u64 index;
+ struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
trace_kvm_book3s_mmu_map(pte);
- spin_lock(&vcpu->arch.mmu_lock);
+ spin_lock(&vcpu3s->mmu_lock);
/* Add to ePTE list */
index = kvmppc_mmu_hash_pte(pte->pte.eaddr);
- hlist_add_head_rcu(&pte->list_pte, &vcpu->arch.hpte_hash_pte[index]);
+ hlist_add_head_rcu(&pte->list_pte, &vcpu3s->hpte_hash_pte[index]);
/* Add to ePTE_long list */
index = kvmppc_mmu_hash_pte_long(pte->pte.eaddr);
hlist_add_head_rcu(&pte->list_pte_long,
- &vcpu->arch.hpte_hash_pte_long[index]);
+ &vcpu3s->hpte_hash_pte_long[index]);
/* Add to vPTE list */
index = kvmppc_mmu_hash_vpte(pte->pte.vpage);
- hlist_add_head_rcu(&pte->list_vpte, &vcpu->arch.hpte_hash_vpte[index]);
+ hlist_add_head_rcu(&pte->list_vpte, &vcpu3s->hpte_hash_vpte[index]);
/* Add to vPTE_long list */
index = kvmppc_mmu_hash_vpte_long(pte->pte.vpage);
hlist_add_head_rcu(&pte->list_vpte_long,
- &vcpu->arch.hpte_hash_vpte_long[index]);
+ &vcpu3s->hpte_hash_vpte_long[index]);
- spin_unlock(&vcpu->arch.mmu_lock);
+ spin_unlock(&vcpu3s->mmu_lock);
}
static void free_pte_rcu(struct rcu_head *head)
@@ -92,16 +94,18 @@ static void free_pte_rcu(struct rcu_head *head)
static void invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
{
+ struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
+
trace_kvm_book3s_mmu_invalidate(pte);
/* Different for 32 and 64 bit */
kvmppc_mmu_invalidate_pte(vcpu, pte);
- spin_lock(&vcpu->arch.mmu_lock);
+ spin_lock(&vcpu3s->mmu_lock);
/* pte already invalidated in between? */
if (hlist_unhashed(&pte->list_pte)) {
- spin_unlock(&vcpu->arch.mmu_lock);
+ spin_unlock(&vcpu3s->mmu_lock);
return;
}
@@ -115,14 +119,15 @@ static void invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
else
kvm_release_pfn_clean(pte->pfn);
- spin_unlock(&vcpu->arch.mmu_lock);
+ spin_unlock(&vcpu3s->mmu_lock);
- vcpu->arch.hpte_cache_count--;
+ vcpu3s->hpte_cache_count--;
call_rcu(&pte->rcu_head, free_pte_rcu);
}
static void kvmppc_mmu_pte_flush_all(struct kvm_vcpu *vcpu)
{
+ struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
struct hpte_cache *pte;
struct hlist_node *node;
int i;
@@ -130,7 +135,7 @@ static void kvmppc_mmu_pte_flush_all(struct kvm_vcpu *vcpu)
rcu_read_lock();
for (i = 0; i < HPTEG_HASH_NUM_VPTE_LONG; i++) {
- struct hlist_head *list = &vcpu->arch.hpte_hash_vpte_long[i];
+ struct hlist_head *list = &vcpu3s->hpte_hash_vpte_long[i];
hlist_for_each_entry_rcu(pte, node, list, list_vpte_long)
invalidate_pte(vcpu, pte);
@@ -141,12 +146,13 @@ static void kvmppc_mmu_pte_flush_all(struct kvm_vcpu *vcpu)
static void kvmppc_mmu_pte_flush_page(struct kvm_vcpu *vcpu, ulong guest_ea)
{
+ struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
struct hlist_head *list;
struct hlist_node *node;
struct hpte_cache *pte;
/* Find the list of entries in the map */
- list = &vcpu->arch.hpte_hash_pte[kvmppc_mmu_hash_pte(guest_ea)];
+ list = &vcpu3s->hpte_hash_pte[kvmppc_mmu_hash_pte(guest_ea)];
rcu_read_lock();
@@ -160,12 +166,13 @@ static void kvmppc_mmu_pte_flush_page(struct kvm_vcpu *vcpu, ulong guest_ea)
static void kvmppc_mmu_pte_flush_long(struct kvm_vcpu *vcpu, ulong guest_ea)
{
+ struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
struct hlist_head *list;
struct hlist_node *node;
struct hpte_cache *pte;
/* Find the list of entries in the map */
- list = &vcpu->arch.hpte_hash_pte_long[
+ list = &vcpu3s->hpte_hash_pte_long[
kvmppc_mmu_hash_pte_long(guest_ea)];
rcu_read_lock();
@@ -203,12 +210,13 @@ void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong guest_ea, ulong ea_mask)
/* Flush with mask 0xfffffffff */
static void kvmppc_mmu_pte_vflush_short(struct kvm_vcpu *vcpu, u64 guest_vp)
{
+ struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
struct hlist_head *list;
struct hlist_node *node;
struct hpte_cache *pte;
u64 vp_mask = 0xfffffffffULL;
- list = &vcpu->arch.hpte_hash_vpte[kvmppc_mmu_hash_vpte(guest_vp)];
+ list = &vcpu3s->hpte_hash_vpte[kvmppc_mmu_hash_vpte(guest_vp)];
rcu_read_lock();
@@ -223,12 +231,13 @@ static void kvmppc_mmu_pte_vflush_short(struct kvm_vcpu *vcpu, u64 guest_vp)
/* Flush with mask 0xffffff000 */
static void kvmppc_mmu_pte_vflush_long(struct kvm_vcpu *vcpu, u64 guest_vp)
{
+ struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
struct hlist_head *list;
struct hlist_node *node;
struct hpte_cache *pte;
u64 vp_mask = 0xffffff000ULL;
- list = &vcpu->arch.hpte_hash_vpte_long[
+ list = &vcpu3s->hpte_hash_vpte_long[
kvmppc_mmu_hash_vpte_long(guest_vp)];
rcu_read_lock();
@@ -261,6 +270,7 @@ void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 guest_vp, u64 vp_mask)
void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end)
{
+ struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
struct hlist_node *node;
struct hpte_cache *pte;
int i;
@@ -270,7 +280,7 @@ void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end)
rcu_read_lock();
for (i = 0; i < HPTEG_HASH_NUM_VPTE_LONG; i++) {
- struct hlist_head *list = &vcpu->arch.hpte_hash_vpte_long[i];
+ struct hlist_head *list = &vcpu3s->hpte_hash_vpte_long[i];
hlist_for_each_entry_rcu(pte, node, list, list_vpte_long)
if ((pte->pte.raddr >= pa_start) &&
@@ -283,12 +293,13 @@ void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end)
struct hpte_cache *kvmppc_mmu_hpte_cache_next(struct kvm_vcpu *vcpu)
{
+ struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
struct hpte_cache *pte;
pte = kmem_cache_zalloc(hpte_cache, GFP_KERNEL);
- vcpu->arch.hpte_cache_count++;
+ vcpu3s->hpte_cache_count++;
- if (vcpu->arch.hpte_cache_count == HPTEG_CACHE_NUM)
+ if (vcpu3s->hpte_cache_count == HPTEG_CACHE_NUM)
kvmppc_mmu_pte_flush_all(vcpu);
return pte;
@@ -309,17 +320,19 @@ static void kvmppc_mmu_hpte_init_hash(struct hlist_head *hash_list, int len)
int kvmppc_mmu_hpte_init(struct kvm_vcpu *vcpu)
{
+ struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
+
/* init hpte lookup hashes */
- kvmppc_mmu_hpte_init_hash(vcpu->arch.hpte_hash_pte,
- ARRAY_SIZE(vcpu->arch.hpte_hash_pte));
- kvmppc_mmu_hpte_init_hash(vcpu->arch.hpte_hash_pte_long,
- ARRAY_SIZE(vcpu->arch.hpte_hash_pte_long));
- kvmppc_mmu_hpte_init_hash(vcpu->arch.hpte_hash_vpte,
- ARRAY_SIZE(vcpu->arch.hpte_hash_vpte));
- kvmppc_mmu_hpte_init_hash(vcpu->arch.hpte_hash_vpte_long,
- ARRAY_SIZE(vcpu->arch.hpte_hash_vpte_long));
-
- spin_lock_init(&vcpu->arch.mmu_lock);
+ kvmppc_mmu_hpte_init_hash(vcpu3s->hpte_hash_pte,
+ ARRAY_SIZE(vcpu3s->hpte_hash_pte));
+ kvmppc_mmu_hpte_init_hash(vcpu3s->hpte_hash_pte_long,
+ ARRAY_SIZE(vcpu3s->hpte_hash_pte_long));
+ kvmppc_mmu_hpte_init_hash(vcpu3s->hpte_hash_vpte,
+ ARRAY_SIZE(vcpu3s->hpte_hash_vpte));
+ kvmppc_mmu_hpte_init_hash(vcpu3s->hpte_hash_vpte_long,
+ ARRAY_SIZE(vcpu3s->hpte_hash_vpte_long));
+
+ spin_lock_init(&vcpu3s->mmu_lock);
return 0;
}
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
new file mode 100644
index 000000000000..0c0d3f274437
--- /dev/null
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -0,0 +1,1029 @@
+/*
+ * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved.
+ *
+ * Authors:
+ * Alexander Graf <agraf@suse.de>
+ * Kevin Wolf <mail@kevin-wolf.de>
+ * Paul Mackerras <paulus@samba.org>
+ *
+ * Description:
+ * Functions relating to running KVM on Book 3S processors where
+ * we don't have access to hypervisor mode, and we run the guest
+ * in problem state (user mode).
+ *
+ * This file is derived from arch/powerpc/kvm/44x.c,
+ * by Hollis Blanchard <hollisb@us.ibm.com>.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kvm_host.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+
+#include <asm/reg.h>
+#include <asm/cputable.h>
+#include <asm/cacheflush.h>
+#include <asm/tlbflush.h>
+#include <asm/uaccess.h>
+#include <asm/io.h>
+#include <asm/kvm_ppc.h>
+#include <asm/kvm_book3s.h>
+#include <asm/mmu_context.h>
+#include <linux/gfp.h>
+#include <linux/sched.h>
+#include <linux/vmalloc.h>
+#include <linux/highmem.h>
+
+#include "trace.h"
+
+/* #define EXIT_DEBUG */
+/* #define DEBUG_EXT */
+
+static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
+ ulong msr);
+
+/* Some compatibility defines */
+#ifdef CONFIG_PPC_BOOK3S_32
+#define MSR_USER32 MSR_USER
+#define MSR_USER64 MSR_USER
+#define HW_PAGE_SIZE PAGE_SIZE
+#endif
+
+void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+{
+#ifdef CONFIG_PPC_BOOK3S_64
+ memcpy(to_svcpu(vcpu)->slb, to_book3s(vcpu)->slb_shadow, sizeof(to_svcpu(vcpu)->slb));
+ memcpy(&get_paca()->shadow_vcpu, to_book3s(vcpu)->shadow_vcpu,
+ sizeof(get_paca()->shadow_vcpu));
+ to_svcpu(vcpu)->slb_max = to_book3s(vcpu)->slb_shadow_max;
+#endif
+
+#ifdef CONFIG_PPC_BOOK3S_32
+ current->thread.kvm_shadow_vcpu = to_book3s(vcpu)->shadow_vcpu;
+#endif
+}
+
+void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
+{
+#ifdef CONFIG_PPC_BOOK3S_64
+ memcpy(to_book3s(vcpu)->slb_shadow, to_svcpu(vcpu)->slb, sizeof(to_svcpu(vcpu)->slb));
+ memcpy(to_book3s(vcpu)->shadow_vcpu, &get_paca()->shadow_vcpu,
+ sizeof(get_paca()->shadow_vcpu));
+ to_book3s(vcpu)->slb_shadow_max = to_svcpu(vcpu)->slb_max;
+#endif
+
+ kvmppc_giveup_ext(vcpu, MSR_FP);
+ kvmppc_giveup_ext(vcpu, MSR_VEC);
+ kvmppc_giveup_ext(vcpu, MSR_VSX);
+}
+
+static void kvmppc_recalc_shadow_msr(struct kvm_vcpu *vcpu)
+{
+ ulong smsr = vcpu->arch.shared->msr;
+
+ /* Guest MSR values */
+ smsr &= MSR_FE0 | MSR_FE1 | MSR_SF | MSR_SE | MSR_BE | MSR_DE;
+ /* Process MSR values */
+ smsr |= MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_PR | MSR_EE;
+ /* External providers the guest reserved */
+ smsr |= (vcpu->arch.shared->msr & vcpu->arch.guest_owned_ext);
+ /* 64-bit Process MSR values */
+#ifdef CONFIG_PPC_BOOK3S_64
+ smsr |= MSR_ISF | MSR_HV;
+#endif
+ vcpu->arch.shadow_msr = smsr;
+}
+
+void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr)
+{
+ ulong old_msr = vcpu->arch.shared->msr;
+
+#ifdef EXIT_DEBUG
+ printk(KERN_INFO "KVM: Set MSR to 0x%llx\n", msr);
+#endif
+
+ msr &= to_book3s(vcpu)->msr_mask;
+ vcpu->arch.shared->msr = msr;
+ kvmppc_recalc_shadow_msr(vcpu);
+
+ if (msr & MSR_POW) {
+ if (!vcpu->arch.pending_exceptions) {
+ kvm_vcpu_block(vcpu);
+ vcpu->stat.halt_wakeup++;
+
+ /* Unset POW bit after we woke up */
+ msr &= ~MSR_POW;
+ vcpu->arch.shared->msr = msr;
+ }
+ }
+
+ if ((vcpu->arch.shared->msr & (MSR_PR|MSR_IR|MSR_DR)) !=
+ (old_msr & (MSR_PR|MSR_IR|MSR_DR))) {
+ kvmppc_mmu_flush_segments(vcpu);
+ kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
+
+ /* Preload magic page segment when in kernel mode */
+ if (!(msr & MSR_PR) && vcpu->arch.magic_page_pa) {
+ struct kvm_vcpu_arch *a = &vcpu->arch;
+
+ if (msr & MSR_DR)
+ kvmppc_mmu_map_segment(vcpu, a->magic_page_ea);
+ else
+ kvmppc_mmu_map_segment(vcpu, a->magic_page_pa);
+ }
+ }
+
+ /* Preload FPU if it's enabled */
+ if (vcpu->arch.shared->msr & MSR_FP)
+ kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
+}
+
+void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr)
+{
+ u32 host_pvr;
+
+ vcpu->arch.hflags &= ~BOOK3S_HFLAG_SLB;
+ vcpu->arch.pvr = pvr;
+#ifdef CONFIG_PPC_BOOK3S_64
+ if ((pvr >= 0x330000) && (pvr < 0x70330000)) {
+ kvmppc_mmu_book3s_64_init(vcpu);
+ to_book3s(vcpu)->hior = 0xfff00000;
+ to_book3s(vcpu)->msr_mask = 0xffffffffffffffffULL;
+ } else
+#endif
+ {
+ kvmppc_mmu_book3s_32_init(vcpu);
+ to_book3s(vcpu)->hior = 0;
+ to_book3s(vcpu)->msr_mask = 0xffffffffULL;
+ }
+
+ /* If we are in hypervisor level on 970, we can tell the CPU to
+ * treat DCBZ as 32 bytes store */
+ vcpu->arch.hflags &= ~BOOK3S_HFLAG_DCBZ32;
+ if (vcpu->arch.mmu.is_dcbz32(vcpu) && (mfmsr() & MSR_HV) &&
+ !strcmp(cur_cpu_spec->platform, "ppc970"))
+ vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32;
+
+ /* Cell performs badly if MSR_FEx are set. So let's hope nobody
+ really needs them in a VM on Cell and force disable them. */
+ if (!strcmp(cur_cpu_spec->platform, "ppc-cell-be"))
+ to_book3s(vcpu)->msr_mask &= ~(MSR_FE0 | MSR_FE1);
+
+#ifdef CONFIG_PPC_BOOK3S_32
+ /* 32 bit Book3S always has 32 byte dcbz */
+ vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32;
+#endif
+
+ /* On some CPUs we can execute paired single operations natively */
+ asm ( "mfpvr %0" : "=r"(host_pvr));
+ switch (host_pvr) {
+ case 0x00080200: /* lonestar 2.0 */
+ case 0x00088202: /* lonestar 2.2 */
+ case 0x70000100: /* gekko 1.0 */
+ case 0x00080100: /* gekko 2.0 */
+ case 0x00083203: /* gekko 2.3a */
+ case 0x00083213: /* gekko 2.3b */
+ case 0x00083204: /* gekko 2.4 */
+ case 0x00083214: /* gekko 2.4e (8SE) - retail HW2 */
+ case 0x00087200: /* broadway */
+ vcpu->arch.hflags |= BOOK3S_HFLAG_NATIVE_PS;
+ /* Enable HID2.PSE - in case we need it later */
+ mtspr(SPRN_HID2_GEKKO, mfspr(SPRN_HID2_GEKKO) | (1 << 29));
+ }
+}
+
+/* Book3s_32 CPUs always have 32 bytes cache line size, which Linux assumes. To
+ * make Book3s_32 Linux work on Book3s_64, we have to make sure we trap dcbz to
+ * emulate 32 bytes dcbz length.
+ *
+ * The Book3s_64 inventors also realized this case and implemented a special bit
+ * in the HID5 register, which is a hypervisor ressource. Thus we can't use it.
+ *
+ * My approach here is to patch the dcbz instruction on executing pages.
+ */
+static void kvmppc_patch_dcbz(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte)
+{
+ struct page *hpage;
+ u64 hpage_offset;
+ u32 *page;
+ int i;
+
+ hpage = gfn_to_page(vcpu->kvm, pte->raddr >> PAGE_SHIFT);
+ if (is_error_page(hpage)) {
+ kvm_release_page_clean(hpage);
+ return;
+ }
+
+ hpage_offset = pte->raddr & ~PAGE_MASK;
+ hpage_offset &= ~0xFFFULL;
+ hpage_offset /= 4;
+
+ get_page(hpage);
+ page = kmap_atomic(hpage, KM_USER0);
+
+ /* patch dcbz into reserved instruction, so we trap */
+ for (i=hpage_offset; i < hpage_offset + (HW_PAGE_SIZE / 4); i++)
+ if ((page[i] & 0xff0007ff) == INS_DCBZ)
+ page[i] &= 0xfffffff7;
+
+ kunmap_atomic(page, KM_USER0);
+ put_page(hpage);
+}
+
+static int kvmppc_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
+{
+ ulong mp_pa = vcpu->arch.magic_page_pa;
+
+ if (unlikely(mp_pa) &&
+ unlikely((mp_pa & KVM_PAM) >> PAGE_SHIFT == gfn)) {
+ return 1;
+ }
+
+ return kvm_is_visible_gfn(vcpu->kvm, gfn);
+}
+
+int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
+ ulong eaddr, int vec)
+{
+ bool data = (vec == BOOK3S_INTERRUPT_DATA_STORAGE);
+ int r = RESUME_GUEST;
+ int relocated;
+ int page_found = 0;
+ struct kvmppc_pte pte;
+ bool is_mmio = false;
+ bool dr = (vcpu->arch.shared->msr & MSR_DR) ? true : false;
+ bool ir = (vcpu->arch.shared->msr & MSR_IR) ? true : false;
+ u64 vsid;
+
+ relocated = data ? dr : ir;
+
+ /* Resolve real address if translation turned on */
+ if (relocated) {
+ page_found = vcpu->arch.mmu.xlate(vcpu, eaddr, &pte, data);
+ } else {
+ pte.may_execute = true;
+ pte.may_read = true;
+ pte.may_write = true;
+ pte.raddr = eaddr & KVM_PAM;
+ pte.eaddr = eaddr;
+ pte.vpage = eaddr >> 12;
+ }
+
+ switch (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) {
+ case 0:
+ pte.vpage |= ((u64)VSID_REAL << (SID_SHIFT - 12));
+ break;
+ case MSR_DR:
+ case MSR_IR:
+ vcpu->arch.mmu.esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid);
+
+ if ((vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) == MSR_DR)
+ pte.vpage |= ((u64)VSID_REAL_DR << (SID_SHIFT - 12));
+ else
+ pte.vpage |= ((u64)VSID_REAL_IR << (SID_SHIFT - 12));
+ pte.vpage |= vsid;
+
+ if (vsid == -1)
+ page_found = -EINVAL;
+ break;
+ }
+
+ if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
+ (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) {
+ /*
+ * If we do the dcbz hack, we have to NX on every execution,
+ * so we can patch the executing code. This renders our guest
+ * NX-less.
+ */
+ pte.may_execute = !data;
+ }
+
+ if (page_found == -ENOENT) {
+ /* Page not found in guest PTE entries */
+ vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu);
+ vcpu->arch.shared->dsisr = to_svcpu(vcpu)->fault_dsisr;
+ vcpu->arch.shared->msr |=
+ (to_svcpu(vcpu)->shadow_srr1 & 0x00000000f8000000ULL);
+ kvmppc_book3s_queue_irqprio(vcpu, vec);
+ } else if (page_found == -EPERM) {
+ /* Storage protection */
+ vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu);
+ vcpu->arch.shared->dsisr =
+ to_svcpu(vcpu)->fault_dsisr & ~DSISR_NOHPTE;
+ vcpu->arch.shared->dsisr |= DSISR_PROTFAULT;
+ vcpu->arch.shared->msr |=
+ (to_svcpu(vcpu)->shadow_srr1 & 0x00000000f8000000ULL);
+ kvmppc_book3s_queue_irqprio(vcpu, vec);
+ } else if (page_found == -EINVAL) {
+ /* Page not found in guest SLB */
+ vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu);
+ kvmppc_book3s_queue_irqprio(vcpu, vec + 0x80);
+ } else if (!is_mmio &&
+ kvmppc_visible_gfn(vcpu, pte.raddr >> PAGE_SHIFT)) {
+ /* The guest's PTE is not mapped yet. Map on the host */
+ kvmppc_mmu_map_page(vcpu, &pte);
+ if (data)
+ vcpu->stat.sp_storage++;
+ else if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
+ (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32)))
+ kvmppc_patch_dcbz(vcpu, &pte);
+ } else {
+ /* MMIO */
+ vcpu->stat.mmio_exits++;
+ vcpu->arch.paddr_accessed = pte.raddr;
+ r = kvmppc_emulate_mmio(run, vcpu);
+ if ( r == RESUME_HOST_NV )
+ r = RESUME_HOST;
+ }
+
+ return r;
+}
+
+static inline int get_fpr_index(int i)
+{
+#ifdef CONFIG_VSX
+ i *= 2;
+#endif
+ return i;
+}
+
+/* Give up external provider (FPU, Altivec, VSX) */
+void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr)
+{
+ struct thread_struct *t = &current->thread;
+ u64 *vcpu_fpr = vcpu->arch.fpr;
+#ifdef CONFIG_VSX
+ u64 *vcpu_vsx = vcpu->arch.vsr;
+#endif
+ u64 *thread_fpr = (u64*)t->fpr;
+ int i;
+
+ if (!(vcpu->arch.guest_owned_ext & msr))
+ return;
+
+#ifdef DEBUG_EXT
+ printk(KERN_INFO "Giving up ext 0x%lx\n", msr);
+#endif
+
+ switch (msr) {
+ case MSR_FP:
+ giveup_fpu(current);
+ for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++)
+ vcpu_fpr[i] = thread_fpr[get_fpr_index(i)];
+
+ vcpu->arch.fpscr = t->fpscr.val;
+ break;
+ case MSR_VEC:
+#ifdef CONFIG_ALTIVEC
+ giveup_altivec(current);
+ memcpy(vcpu->arch.vr, t->vr, sizeof(vcpu->arch.vr));
+ vcpu->arch.vscr = t->vscr;
+#endif
+ break;
+ case MSR_VSX:
+#ifdef CONFIG_VSX
+ __giveup_vsx(current);
+ for (i = 0; i < ARRAY_SIZE(vcpu->arch.vsr); i++)
+ vcpu_vsx[i] = thread_fpr[get_fpr_index(i) + 1];
+#endif
+ break;
+ default:
+ BUG();
+ }
+
+ vcpu->arch.guest_owned_ext &= ~msr;
+ current->thread.regs->msr &= ~msr;
+ kvmppc_recalc_shadow_msr(vcpu);
+}
+
+static int kvmppc_read_inst(struct kvm_vcpu *vcpu)
+{
+ ulong srr0 = kvmppc_get_pc(vcpu);
+ u32 last_inst = kvmppc_get_last_inst(vcpu);
+ int ret;
+
+ ret = kvmppc_ld(vcpu, &srr0, sizeof(u32), &last_inst, false);
+ if (ret == -ENOENT) {
+ ulong msr = vcpu->arch.shared->msr;
+
+ msr = kvmppc_set_field(msr, 33, 33, 1);
+ msr = kvmppc_set_field(msr, 34, 36, 0);
+ vcpu->arch.shared->msr = kvmppc_set_field(msr, 42, 47, 0);
+ kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_INST_STORAGE);
+ return EMULATE_AGAIN;
+ }
+
+ return EMULATE_DONE;
+}
+
+static int kvmppc_check_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr)
+{
+
+ /* Need to do paired single emulation? */
+ if (!(vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE))
+ return EMULATE_DONE;
+
+ /* Read out the instruction */
+ if (kvmppc_read_inst(vcpu) == EMULATE_DONE)
+ /* Need to emulate */
+ return EMULATE_FAIL;
+
+ return EMULATE_AGAIN;
+}
+
+/* Handle external providers (FPU, Altivec, VSX) */
+static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
+ ulong msr)
+{
+ struct thread_struct *t = &current->thread;
+ u64 *vcpu_fpr = vcpu->arch.fpr;
+#ifdef CONFIG_VSX
+ u64 *vcpu_vsx = vcpu->arch.vsr;
+#endif
+ u64 *thread_fpr = (u64*)t->fpr;
+ int i;
+
+ /* When we have paired singles, we emulate in software */
+ if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE)
+ return RESUME_GUEST;
+
+ if (!(vcpu->arch.shared->msr & msr)) {
+ kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
+ return RESUME_GUEST;
+ }
+
+ /* We already own the ext */
+ if (vcpu->arch.guest_owned_ext & msr) {
+ return RESUME_GUEST;
+ }
+
+#ifdef DEBUG_EXT
+ printk(KERN_INFO "Loading up ext 0x%lx\n", msr);
+#endif
+
+ current->thread.regs->msr |= msr;
+
+ switch (msr) {
+ case MSR_FP:
+ for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++)
+ thread_fpr[get_fpr_index(i)] = vcpu_fpr[i];
+
+ t->fpscr.val = vcpu->arch.fpscr;
+ t->fpexc_mode = 0;
+ kvmppc_load_up_fpu();
+ break;
+ case MSR_VEC:
+#ifdef CONFIG_ALTIVEC
+ memcpy(t->vr, vcpu->arch.vr, sizeof(vcpu->arch.vr));
+ t->vscr = vcpu->arch.vscr;
+ t->vrsave = -1;
+ kvmppc_load_up_altivec();
+#endif
+ break;
+ case MSR_VSX:
+#ifdef CONFIG_VSX
+ for (i = 0; i < ARRAY_SIZE(vcpu->arch.vsr); i++)
+ thread_fpr[get_fpr_index(i) + 1] = vcpu_vsx[i];
+ kvmppc_load_up_vsx();
+#endif
+ break;
+ default:
+ BUG();
+ }
+
+ vcpu->arch.guest_owned_ext |= msr;
+
+ kvmppc_recalc_shadow_msr(vcpu);
+
+ return RESUME_GUEST;
+}
+
+int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
+ unsigned int exit_nr)
+{
+ int r = RESUME_HOST;
+
+ vcpu->stat.sum_exits++;
+
+ run->exit_reason = KVM_EXIT_UNKNOWN;
+ run->ready_for_interrupt_injection = 1;
+
+ trace_kvm_book3s_exit(exit_nr, vcpu);
+ kvm_resched(vcpu);
+ switch (exit_nr) {
+ case BOOK3S_INTERRUPT_INST_STORAGE:
+ vcpu->stat.pf_instruc++;
+
+#ifdef CONFIG_PPC_BOOK3S_32
+ /* We set segments as unused segments when invalidating them. So
+ * treat the respective fault as segment fault. */
+ if (to_svcpu(vcpu)->sr[kvmppc_get_pc(vcpu) >> SID_SHIFT]
+ == SR_INVALID) {
+ kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
+ r = RESUME_GUEST;
+ break;
+ }
+#endif
+
+ /* only care about PTEG not found errors, but leave NX alone */
+ if (to_svcpu(vcpu)->shadow_srr1 & 0x40000000) {
+ r = kvmppc_handle_pagefault(run, vcpu, kvmppc_get_pc(vcpu), exit_nr);
+ vcpu->stat.sp_instruc++;
+ } else if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
+ (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) {
+ /*
+ * XXX If we do the dcbz hack we use the NX bit to flush&patch the page,
+ * so we can't use the NX bit inside the guest. Let's cross our fingers,
+ * that no guest that needs the dcbz hack does NX.
+ */
+ kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFUL);
+ r = RESUME_GUEST;
+ } else {
+ vcpu->arch.shared->msr |=
+ to_svcpu(vcpu)->shadow_srr1 & 0x58000000;
+ kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
+ r = RESUME_GUEST;
+ }
+ break;
+ case BOOK3S_INTERRUPT_DATA_STORAGE:
+ {
+ ulong dar = kvmppc_get_fault_dar(vcpu);
+ vcpu->stat.pf_storage++;
+
+#ifdef CONFIG_PPC_BOOK3S_32
+ /* We set segments as unused segments when invalidating them. So
+ * treat the respective fault as segment fault. */
+ if ((to_svcpu(vcpu)->sr[dar >> SID_SHIFT]) == SR_INVALID) {
+ kvmppc_mmu_map_segment(vcpu, dar);
+ r = RESUME_GUEST;
+ break;
+ }
+#endif
+
+ /* The only case we need to handle is missing shadow PTEs */
+ if (to_svcpu(vcpu)->fault_dsisr & DSISR_NOHPTE) {
+ r = kvmppc_handle_pagefault(run, vcpu, dar, exit_nr);
+ } else {
+ vcpu->arch.shared->dar = dar;
+ vcpu->arch.shared->dsisr = to_svcpu(vcpu)->fault_dsisr;
+ kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
+ r = RESUME_GUEST;
+ }
+ break;
+ }
+ case BOOK3S_INTERRUPT_DATA_SEGMENT:
+ if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_fault_dar(vcpu)) < 0) {
+ vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu);
+ kvmppc_book3s_queue_irqprio(vcpu,
+ BOOK3S_INTERRUPT_DATA_SEGMENT);
+ }
+ r = RESUME_GUEST;
+ break;
+ case BOOK3S_INTERRUPT_INST_SEGMENT:
+ if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)) < 0) {
+ kvmppc_book3s_queue_irqprio(vcpu,
+ BOOK3S_INTERRUPT_INST_SEGMENT);
+ }
+ r = RESUME_GUEST;
+ break;
+ /* We're good on these - the host merely wanted to get our attention */
+ case BOOK3S_INTERRUPT_DECREMENTER:
+ vcpu->stat.dec_exits++;
+ r = RESUME_GUEST;
+ break;
+ case BOOK3S_INTERRUPT_EXTERNAL:
+ vcpu->stat.ext_intr_exits++;
+ r = RESUME_GUEST;
+ break;
+ case BOOK3S_INTERRUPT_PERFMON:
+ r = RESUME_GUEST;
+ break;
+ case BOOK3S_INTERRUPT_PROGRAM:
+ {
+ enum emulation_result er;
+ ulong flags;
+
+program_interrupt:
+ flags = to_svcpu(vcpu)->shadow_srr1 & 0x1f0000ull;
+
+ if (vcpu->arch.shared->msr & MSR_PR) {
+#ifdef EXIT_DEBUG
+ printk(KERN_INFO "Userspace triggered 0x700 exception at 0x%lx (0x%x)\n", kvmppc_get_pc(vcpu), kvmppc_get_last_inst(vcpu));
+#endif
+ if ((kvmppc_get_last_inst(vcpu) & 0xff0007ff) !=
+ (INS_DCBZ & 0xfffffff7)) {
+ kvmppc_core_queue_program(vcpu, flags);
+ r = RESUME_GUEST;
+ break;
+ }
+ }
+
+ vcpu->stat.emulated_inst_exits++;
+ er = kvmppc_emulate_instruction(run, vcpu);
+ switch (er) {
+ case EMULATE_DONE:
+ r = RESUME_GUEST_NV;
+ break;
+ case EMULATE_AGAIN:
+ r = RESUME_GUEST;
+ break;
+ case EMULATE_FAIL:
+ printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n",
+ __func__, kvmppc_get_pc(vcpu), kvmppc_get_last_inst(vcpu));
+ kvmppc_core_queue_program(vcpu, flags);
+ r = RESUME_GUEST;
+ break;
+ case EMULATE_DO_MMIO:
+ run->exit_reason = KVM_EXIT_MMIO;
+ r = RESUME_HOST_NV;
+ break;
+ default:
+ BUG();
+ }
+ break;
+ }
+ case BOOK3S_INTERRUPT_SYSCALL:
+ if (vcpu->arch.osi_enabled &&
+ (((u32)kvmppc_get_gpr(vcpu, 3)) == OSI_SC_MAGIC_R3) &&
+ (((u32)kvmppc_get_gpr(vcpu, 4)) == OSI_SC_MAGIC_R4)) {
+ /* MOL hypercalls */
+ u64 *gprs = run->osi.gprs;
+ int i;
+
+ run->exit_reason = KVM_EXIT_OSI;
+ for (i = 0; i < 32; i++)
+ gprs[i] = kvmppc_get_gpr(vcpu, i);
+ vcpu->arch.osi_needed = 1;
+ r = RESUME_HOST_NV;
+ } else if (!(vcpu->arch.shared->msr & MSR_PR) &&
+ (((u32)kvmppc_get_gpr(vcpu, 0)) == KVM_SC_MAGIC_R0)) {
+ /* KVM PV hypercalls */
+ kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu));
+ r = RESUME_GUEST;
+ } else {
+ /* Guest syscalls */
+ vcpu->stat.syscall_exits++;
+ kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
+ r = RESUME_GUEST;
+ }
+ break;
+ case BOOK3S_INTERRUPT_FP_UNAVAIL:
+ case BOOK3S_INTERRUPT_ALTIVEC:
+ case BOOK3S_INTERRUPT_VSX:
+ {
+ int ext_msr = 0;
+
+ switch (exit_nr) {
+ case BOOK3S_INTERRUPT_FP_UNAVAIL: ext_msr = MSR_FP; break;
+ case BOOK3S_INTERRUPT_ALTIVEC: ext_msr = MSR_VEC; break;
+ case BOOK3S_INTERRUPT_VSX: ext_msr = MSR_VSX; break;
+ }
+
+ switch (kvmppc_check_ext(vcpu, exit_nr)) {
+ case EMULATE_DONE:
+ /* everything ok - let's enable the ext */
+ r = kvmppc_handle_ext(vcpu, exit_nr, ext_msr);
+ break;
+ case EMULATE_FAIL:
+ /* we need to emulate this instruction */
+ goto program_interrupt;
+ break;
+ default:
+ /* nothing to worry about - go again */
+ break;
+ }
+ break;
+ }
+ case BOOK3S_INTERRUPT_ALIGNMENT:
+ if (kvmppc_read_inst(vcpu) == EMULATE_DONE) {
+ vcpu->arch.shared->dsisr = kvmppc_alignment_dsisr(vcpu,
+ kvmppc_get_last_inst(vcpu));
+ vcpu->arch.shared->dar = kvmppc_alignment_dar(vcpu,
+ kvmppc_get_last_inst(vcpu));
+ kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
+ }
+ r = RESUME_GUEST;
+ break;
+ case BOOK3S_INTERRUPT_MACHINE_CHECK:
+ case BOOK3S_INTERRUPT_TRACE:
+ kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
+ r = RESUME_GUEST;
+ break;
+ default:
+ /* Ugh - bork here! What did we get? */
+ printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | msr=0x%lx\n",
+ exit_nr, kvmppc_get_pc(vcpu), to_svcpu(vcpu)->shadow_srr1);
+ r = RESUME_HOST;
+ BUG();
+ break;
+ }
+
+
+ if (!(r & RESUME_HOST)) {
+ /* To avoid clobbering exit_reason, only check for signals if
+ * we aren't already exiting to userspace for some other
+ * reason. */
+ if (signal_pending(current)) {
+#ifdef EXIT_DEBUG
+ printk(KERN_EMERG "KVM: Going back to host\n");
+#endif
+ vcpu->stat.signal_exits++;
+ run->exit_reason = KVM_EXIT_INTR;
+ r = -EINTR;
+ } else {
+ /* In case an interrupt came in that was triggered
+ * from userspace (like DEC), we need to check what
+ * to inject now! */
+ kvmppc_core_deliver_interrupts(vcpu);
+ }
+ }
+
+ trace_kvm_book3s_reenter(r, vcpu);
+
+ return r;
+}
+
+int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
+ struct kvm_sregs *sregs)
+{
+ struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
+ int i;
+
+ sregs->pvr = vcpu->arch.pvr;
+
+ sregs->u.s.sdr1 = to_book3s(vcpu)->sdr1;
+ if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) {
+ for (i = 0; i < 64; i++) {
+ sregs->u.s.ppc64.slb[i].slbe = vcpu->arch.slb[i].orige | i;
+ sregs->u.s.ppc64.slb[i].slbv = vcpu->arch.slb[i].origv;
+ }
+ } else {
+ for (i = 0; i < 16; i++)
+ sregs->u.s.ppc32.sr[i] = vcpu->arch.shared->sr[i];
+
+ for (i = 0; i < 8; i++) {
+ sregs->u.s.ppc32.ibat[i] = vcpu3s->ibat[i].raw;
+ sregs->u.s.ppc32.dbat[i] = vcpu3s->dbat[i].raw;
+ }
+ }
+
+ return 0;
+}
+
+int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
+ struct kvm_sregs *sregs)
+{
+ struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
+ int i;
+
+ kvmppc_set_pvr(vcpu, sregs->pvr);
+
+ vcpu3s->sdr1 = sregs->u.s.sdr1;
+ if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) {
+ for (i = 0; i < 64; i++) {
+ vcpu->arch.mmu.slbmte(vcpu, sregs->u.s.ppc64.slb[i].slbv,
+ sregs->u.s.ppc64.slb[i].slbe);
+ }
+ } else {
+ for (i = 0; i < 16; i++) {
+ vcpu->arch.mmu.mtsrin(vcpu, i, sregs->u.s.ppc32.sr[i]);
+ }
+ for (i = 0; i < 8; i++) {
+ kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), false,
+ (u32)sregs->u.s.ppc32.ibat[i]);
+ kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), true,
+ (u32)(sregs->u.s.ppc32.ibat[i] >> 32));
+ kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), false,
+ (u32)sregs->u.s.ppc32.dbat[i]);
+ kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), true,
+ (u32)(sregs->u.s.ppc32.dbat[i] >> 32));
+ }
+ }
+
+ /* Flush the MMU after messing with the segments */
+ kvmppc_mmu_pte_flush(vcpu, 0, 0);
+
+ return 0;
+}
+
+int kvmppc_core_check_processor_compat(void)
+{
+ return 0;
+}
+
+struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
+{
+ struct kvmppc_vcpu_book3s *vcpu_book3s;
+ struct kvm_vcpu *vcpu;
+ int err = -ENOMEM;
+ unsigned long p;
+
+ vcpu_book3s = vzalloc(sizeof(struct kvmppc_vcpu_book3s));
+ if (!vcpu_book3s)
+ goto out;
+
+ vcpu_book3s->shadow_vcpu = (struct kvmppc_book3s_shadow_vcpu *)
+ kzalloc(sizeof(*vcpu_book3s->shadow_vcpu), GFP_KERNEL);
+ if (!vcpu_book3s->shadow_vcpu)
+ goto free_vcpu;
+
+ vcpu = &vcpu_book3s->vcpu;
+ err = kvm_vcpu_init(vcpu, kvm, id);
+ if (err)
+ goto free_shadow_vcpu;
+
+ p = __get_free_page(GFP_KERNEL|__GFP_ZERO);
+ /* the real shared page fills the last 4k of our page */
+ vcpu->arch.shared = (void*)(p + PAGE_SIZE - 4096);
+ if (!p)
+ goto uninit_vcpu;
+
+ vcpu->arch.host_retip = kvm_return_point;
+ vcpu->arch.host_msr = mfmsr();
+#ifdef CONFIG_PPC_BOOK3S_64
+ /* default to book3s_64 (970fx) */
+ vcpu->arch.pvr = 0x3C0301;
+#else
+ /* default to book3s_32 (750) */
+ vcpu->arch.pvr = 0x84202;
+#endif
+ kvmppc_set_pvr(vcpu, vcpu->arch.pvr);
+ vcpu->arch.slb_nr = 64;
+
+ /* remember where some real-mode handlers are */
+ vcpu->arch.trampoline_lowmem = __pa(kvmppc_handler_lowmem_trampoline);
+ vcpu->arch.trampoline_enter = __pa(kvmppc_handler_trampoline_enter);
+ vcpu->arch.highmem_handler = (ulong)kvmppc_handler_highmem;
+#ifdef CONFIG_PPC_BOOK3S_64
+ vcpu->arch.rmcall = *(ulong*)kvmppc_rmcall;
+#else
+ vcpu->arch.rmcall = (ulong)kvmppc_rmcall;
+#endif
+
+ vcpu->arch.shadow_msr = MSR_USER64;
+
+ err = kvmppc_mmu_init(vcpu);
+ if (err < 0)
+ goto uninit_vcpu;
+
+ return vcpu;
+
+uninit_vcpu:
+ kvm_vcpu_uninit(vcpu);
+free_shadow_vcpu:
+ kfree(vcpu_book3s->shadow_vcpu);
+free_vcpu:
+ vfree(vcpu_book3s);
+out:
+ return ERR_PTR(err);
+}
+
+void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
+{
+ struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
+
+ free_page((unsigned long)vcpu->arch.shared & PAGE_MASK);
+ kvm_vcpu_uninit(vcpu);
+ kfree(vcpu_book3s->shadow_vcpu);
+ vfree(vcpu_book3s);
+}
+
+int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
+{
+ int ret;
+ double fpr[32][TS_FPRWIDTH];
+ unsigned int fpscr;
+ int fpexc_mode;
+#ifdef CONFIG_ALTIVEC
+ vector128 vr[32];
+ vector128 vscr;
+ unsigned long uninitialized_var(vrsave);
+ int used_vr;
+#endif
+#ifdef CONFIG_VSX
+ int used_vsr;
+#endif
+ ulong ext_msr;
+
+ /* No need to go into the guest when all we do is going out */
+ if (signal_pending(current)) {
+ kvm_run->exit_reason = KVM_EXIT_INTR;
+ return -EINTR;
+ }
+
+ /* Save FPU state in stack */
+ if (current->thread.regs->msr & MSR_FP)
+ giveup_fpu(current);
+ memcpy(fpr, current->thread.fpr, sizeof(current->thread.fpr));
+ fpscr = current->thread.fpscr.val;
+ fpexc_mode = current->thread.fpexc_mode;
+
+#ifdef CONFIG_ALTIVEC
+ /* Save Altivec state in stack */
+ used_vr = current->thread.used_vr;
+ if (used_vr) {
+ if (current->thread.regs->msr & MSR_VEC)
+ giveup_altivec(current);
+ memcpy(vr, current->thread.vr, sizeof(current->thread.vr));
+ vscr = current->thread.vscr;
+ vrsave = current->thread.vrsave;
+ }
+#endif
+
+#ifdef CONFIG_VSX
+ /* Save VSX state in stack */
+ used_vsr = current->thread.used_vsr;
+ if (used_vsr && (current->thread.regs->msr & MSR_VSX))
+ __giveup_vsx(current);
+#endif
+
+ /* Remember the MSR with disabled extensions */
+ ext_msr = current->thread.regs->msr;
+
+ /* Preload FPU if it's enabled */
+ if (vcpu->arch.shared->msr & MSR_FP)
+ kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
+
+ kvm_guest_enter();
+
+ ret = __kvmppc_vcpu_run(kvm_run, vcpu);
+
+ kvm_guest_exit();
+
+ local_irq_disable();
+
+ current->thread.regs->msr = ext_msr;
+
+ /* Make sure we save the guest FPU/Altivec/VSX state */
+ kvmppc_giveup_ext(vcpu, MSR_FP);
+ kvmppc_giveup_ext(vcpu, MSR_VEC);
+ kvmppc_giveup_ext(vcpu, MSR_VSX);
+
+ /* Restore FPU state from stack */
+ memcpy(current->thread.fpr, fpr, sizeof(current->thread.fpr));
+ current->thread.fpscr.val = fpscr;
+ current->thread.fpexc_mode = fpexc_mode;
+
+#ifdef CONFIG_ALTIVEC
+ /* Restore Altivec state from stack */
+ if (used_vr && current->thread.used_vr) {
+ memcpy(current->thread.vr, vr, sizeof(current->thread.vr));
+ current->thread.vscr = vscr;
+ current->thread.vrsave = vrsave;
+ }
+ current->thread.used_vr = used_vr;
+#endif
+
+#ifdef CONFIG_VSX
+ current->thread.used_vsr = used_vsr;
+#endif
+
+ return ret;
+}
+
+int kvmppc_core_prepare_memory_region(struct kvm *kvm,
+ struct kvm_userspace_memory_region *mem)
+{
+ return 0;
+}
+
+void kvmppc_core_commit_memory_region(struct kvm *kvm,
+ struct kvm_userspace_memory_region *mem)
+{
+}
+
+int kvmppc_core_init_vm(struct kvm *kvm)
+{
+ return 0;
+}
+
+void kvmppc_core_destroy_vm(struct kvm *kvm)
+{
+}
+
+static int kvmppc_book3s_init(void)
+{
+ int r;
+
+ r = kvm_init(NULL, sizeof(struct kvmppc_vcpu_book3s), 0,
+ THIS_MODULE);
+
+ if (r)
+ return r;
+
+ r = kvmppc_mmu_hpte_sysinit();
+
+ return r;
+}
+
+static void kvmppc_book3s_exit(void)
+{
+ kvmppc_mmu_hpte_sysexit();
+ kvm_exit();
+}
+
+module_init(kvmppc_book3s_init);
+module_exit(kvmppc_book3s_exit);
diff --git a/arch/powerpc/kvm/book3s_rmhandlers.S b/arch/powerpc/kvm/book3s_rmhandlers.S
index 1a1b34487e71..c1f877c4a884 100644
--- a/arch/powerpc/kvm/book3s_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_rmhandlers.S
@@ -36,41 +36,44 @@
#if defined(CONFIG_PPC_BOOK3S_64)
#define LOAD_SHADOW_VCPU(reg) GET_PACA(reg)
-#define SHADOW_VCPU_OFF PACA_KVM_SVCPU
#define MSR_NOIRQ MSR_KERNEL & ~(MSR_IR | MSR_DR)
#define FUNC(name) GLUE(.,name)
+kvmppc_skip_interrupt:
+ /*
+ * Here all GPRs are unchanged from when the interrupt happened
+ * except for r13, which is saved in SPRG_SCRATCH0.
+ */
+ mfspr r13, SPRN_SRR0
+ addi r13, r13, 4
+ mtspr SPRN_SRR0, r13
+ GET_SCRATCH0(r13)
+ rfid
+ b .
+
+kvmppc_skip_Hinterrupt:
+ /*
+ * Here all GPRs are unchanged from when the interrupt happened
+ * except for r13, which is saved in SPRG_SCRATCH0.
+ */
+ mfspr r13, SPRN_HSRR0
+ addi r13, r13, 4
+ mtspr SPRN_HSRR0, r13
+ GET_SCRATCH0(r13)
+ hrfid
+ b .
+
#elif defined(CONFIG_PPC_BOOK3S_32)
-#define LOAD_SHADOW_VCPU(reg) \
- mfspr reg, SPRN_SPRG_THREAD; \
- lwz reg, THREAD_KVM_SVCPU(reg); \
- /* PPC32 can have a NULL pointer - let's check for that */ \
- mtspr SPRN_SPRG_SCRATCH1, r12; /* Save r12 */ \
- mfcr r12; \
- cmpwi reg, 0; \
- bne 1f; \
- mfspr reg, SPRN_SPRG_SCRATCH0; \
- mtcr r12; \
- mfspr r12, SPRN_SPRG_SCRATCH1; \
- b kvmppc_resume_\intno; \
-1:; \
- mtcr r12; \
- mfspr r12, SPRN_SPRG_SCRATCH1; \
- tophys(reg, reg)
-
-#define SHADOW_VCPU_OFF 0
#define MSR_NOIRQ MSR_KERNEL
#define FUNC(name) name
-#endif
-
.macro INTERRUPT_TRAMPOLINE intno
.global kvmppc_trampoline_\intno
kvmppc_trampoline_\intno:
- SET_SCRATCH0(r13) /* Save r13 */
+ mtspr SPRN_SPRG_SCRATCH0, r13 /* Save r13 */
/*
* First thing to do is to find out if we're coming
@@ -78,19 +81,28 @@ kvmppc_trampoline_\intno:
*
* To distinguish, we check a magic byte in the PACA/current
*/
- LOAD_SHADOW_VCPU(r13)
- PPC_STL r12, (SHADOW_VCPU_OFF + SVCPU_SCRATCH0)(r13)
+ mfspr r13, SPRN_SPRG_THREAD
+ lwz r13, THREAD_KVM_SVCPU(r13)
+ /* PPC32 can have a NULL pointer - let's check for that */
+ mtspr SPRN_SPRG_SCRATCH1, r12 /* Save r12 */
mfcr r12
- stw r12, (SHADOW_VCPU_OFF + SVCPU_SCRATCH1)(r13)
- lbz r12, (SHADOW_VCPU_OFF + SVCPU_IN_GUEST)(r13)
+ cmpwi r13, 0
+ bne 1f
+2: mtcr r12
+ mfspr r12, SPRN_SPRG_SCRATCH1
+ mfspr r13, SPRN_SPRG_SCRATCH0 /* r13 = original r13 */
+ b kvmppc_resume_\intno /* Get back original handler */
+
+1: tophys(r13, r13)
+ stw r12, HSTATE_SCRATCH1(r13)
+ mfspr r12, SPRN_SPRG_SCRATCH1
+ stw r12, HSTATE_SCRATCH0(r13)
+ lbz r12, HSTATE_IN_GUEST(r13)
cmpwi r12, KVM_GUEST_MODE_NONE
bne ..kvmppc_handler_hasmagic_\intno
/* No KVM guest? Then jump back to the Linux handler! */
- lwz r12, (SHADOW_VCPU_OFF + SVCPU_SCRATCH1)(r13)
- mtcr r12
- PPC_LL r12, (SHADOW_VCPU_OFF + SVCPU_SCRATCH0)(r13)
- GET_SCRATCH0(r13) /* r13 = original r13 */
- b kvmppc_resume_\intno /* Get back original handler */
+ lwz r12, HSTATE_SCRATCH1(r13)
+ b 2b
/* Now we know we're handling a KVM guest */
..kvmppc_handler_hasmagic_\intno:
@@ -112,9 +124,6 @@ INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_MACHINE_CHECK
INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_DATA_STORAGE
INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_INST_STORAGE
INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_EXTERNAL
-#ifdef CONFIG_PPC_BOOK3S_64
-INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_EXTERNAL_HV
-#endif
INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_ALIGNMENT
INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_PROGRAM
INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_FP_UNAVAIL
@@ -124,14 +133,6 @@ INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_TRACE
INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_PERFMON
INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_ALTIVEC
-/* Those are only available on 64 bit machines */
-
-#ifdef CONFIG_PPC_BOOK3S_64
-INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_DATA_SEGMENT
-INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_INST_SEGMENT
-INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_VSX
-#endif
-
/*
* Bring us back to the faulting code, but skip the
* faulting instruction.
@@ -143,8 +144,8 @@ INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_VSX
*
* R12 = free
* R13 = Shadow VCPU (PACA)
- * SVCPU.SCRATCH0 = guest R12
- * SVCPU.SCRATCH1 = guest CR
+ * HSTATE.SCRATCH0 = guest R12
+ * HSTATE.SCRATCH1 = guest CR
* SPRG_SCRATCH0 = guest R13
*
*/
@@ -156,13 +157,14 @@ kvmppc_handler_skip_ins:
mtsrr0 r12
/* Clean up all state */
- lwz r12, (SHADOW_VCPU_OFF + SVCPU_SCRATCH1)(r13)
+ lwz r12, HSTATE_SCRATCH1(r13)
mtcr r12
- PPC_LL r12, (SHADOW_VCPU_OFF + SVCPU_SCRATCH0)(r13)
+ PPC_LL r12, HSTATE_SCRATCH0(r13)
GET_SCRATCH0(r13)
/* And get back into the code */
RFI
+#endif
/*
* This trampoline brings us back to a real mode handler
@@ -251,12 +253,4 @@ define_load_up(altivec)
define_load_up(vsx)
#endif
-.global kvmppc_trampoline_lowmem
-kvmppc_trampoline_lowmem:
- PPC_LONG kvmppc_handler_lowmem_trampoline - CONFIG_KERNEL_START
-
-.global kvmppc_trampoline_enter
-kvmppc_trampoline_enter:
- PPC_LONG kvmppc_handler_trampoline_enter - CONFIG_KERNEL_START
-
#include "book3s_segment.S"
diff --git a/arch/powerpc/kvm/book3s_segment.S b/arch/powerpc/kvm/book3s_segment.S
index 451264274b8c..aed32e517212 100644
--- a/arch/powerpc/kvm/book3s_segment.S
+++ b/arch/powerpc/kvm/book3s_segment.S
@@ -22,7 +22,7 @@
#if defined(CONFIG_PPC_BOOK3S_64)
#define GET_SHADOW_VCPU(reg) \
- addi reg, r13, PACA_KVM_SVCPU
+ mr reg, r13
#elif defined(CONFIG_PPC_BOOK3S_32)
@@ -71,6 +71,10 @@ kvmppc_handler_trampoline_enter:
/* r3 = shadow vcpu */
GET_SHADOW_VCPU(r3)
+ /* Save R1/R2 in the PACA (64-bit) or shadow_vcpu (32-bit) */
+ PPC_STL r1, HSTATE_HOST_R1(r3)
+ PPC_STL r2, HSTATE_HOST_R2(r3)
+
/* Move SRR0 and SRR1 into the respective regs */
PPC_LL r9, SVCPU_PC(r3)
mtsrr0 r9
@@ -78,36 +82,36 @@ kvmppc_handler_trampoline_enter:
/* Activate guest mode, so faults get handled by KVM */
li r11, KVM_GUEST_MODE_GUEST
- stb r11, SVCPU_IN_GUEST(r3)
+ stb r11, HSTATE_IN_GUEST(r3)
/* Switch to guest segment. This is subarch specific. */
LOAD_GUEST_SEGMENTS
/* Enter guest */
- PPC_LL r4, (SVCPU_CTR)(r3)
- PPC_LL r5, (SVCPU_LR)(r3)
- lwz r6, (SVCPU_CR)(r3)
- lwz r7, (SVCPU_XER)(r3)
+ PPC_LL r4, SVCPU_CTR(r3)
+ PPC_LL r5, SVCPU_LR(r3)
+ lwz r6, SVCPU_CR(r3)
+ lwz r7, SVCPU_XER(r3)
mtctr r4
mtlr r5
mtcr r6
mtxer r7
- PPC_LL r0, (SVCPU_R0)(r3)
- PPC_LL r1, (SVCPU_R1)(r3)
- PPC_LL r2, (SVCPU_R2)(r3)
- PPC_LL r4, (SVCPU_R4)(r3)
- PPC_LL r5, (SVCPU_R5)(r3)
- PPC_LL r6, (SVCPU_R6)(r3)
- PPC_LL r7, (SVCPU_R7)(r3)
- PPC_LL r8, (SVCPU_R8)(r3)
- PPC_LL r9, (SVCPU_R9)(r3)
- PPC_LL r10, (SVCPU_R10)(r3)
- PPC_LL r11, (SVCPU_R11)(r3)
- PPC_LL r12, (SVCPU_R12)(r3)
- PPC_LL r13, (SVCPU_R13)(r3)
+ PPC_LL r0, SVCPU_R0(r3)
+ PPC_LL r1, SVCPU_R1(r3)
+ PPC_LL r2, SVCPU_R2(r3)
+ PPC_LL r4, SVCPU_R4(r3)
+ PPC_LL r5, SVCPU_R5(r3)
+ PPC_LL r6, SVCPU_R6(r3)
+ PPC_LL r7, SVCPU_R7(r3)
+ PPC_LL r8, SVCPU_R8(r3)
+ PPC_LL r9, SVCPU_R9(r3)
+ PPC_LL r10, SVCPU_R10(r3)
+ PPC_LL r11, SVCPU_R11(r3)
+ PPC_LL r12, SVCPU_R12(r3)
+ PPC_LL r13, SVCPU_R13(r3)
PPC_LL r3, (SVCPU_R3)(r3)
@@ -125,56 +129,63 @@ kvmppc_handler_trampoline_enter_end:
.global kvmppc_handler_trampoline_exit
kvmppc_handler_trampoline_exit:
+.global kvmppc_interrupt
+kvmppc_interrupt:
+
/* Register usage at this point:
*
* SPRG_SCRATCH0 = guest R13
* R12 = exit handler id
- * R13 = shadow vcpu - SHADOW_VCPU_OFF [=PACA on PPC64]
- * SVCPU.SCRATCH0 = guest R12
- * SVCPU.SCRATCH1 = guest CR
+ * R13 = shadow vcpu (32-bit) or PACA (64-bit)
+ * HSTATE.SCRATCH0 = guest R12
+ * HSTATE.SCRATCH1 = guest CR
*
*/
/* Save registers */
- PPC_STL r0, (SHADOW_VCPU_OFF + SVCPU_R0)(r13)
- PPC_STL r1, (SHADOW_VCPU_OFF + SVCPU_R1)(r13)
- PPC_STL r2, (SHADOW_VCPU_OFF + SVCPU_R2)(r13)
- PPC_STL r3, (SHADOW_VCPU_OFF + SVCPU_R3)(r13)
- PPC_STL r4, (SHADOW_VCPU_OFF + SVCPU_R4)(r13)
- PPC_STL r5, (SHADOW_VCPU_OFF + SVCPU_R5)(r13)
- PPC_STL r6, (SHADOW_VCPU_OFF + SVCPU_R6)(r13)
- PPC_STL r7, (SHADOW_VCPU_OFF + SVCPU_R7)(r13)
- PPC_STL r8, (SHADOW_VCPU_OFF + SVCPU_R8)(r13)
- PPC_STL r9, (SHADOW_VCPU_OFF + SVCPU_R9)(r13)
- PPC_STL r10, (SHADOW_VCPU_OFF + SVCPU_R10)(r13)
- PPC_STL r11, (SHADOW_VCPU_OFF + SVCPU_R11)(r13)
+ PPC_STL r0, SVCPU_R0(r13)
+ PPC_STL r1, SVCPU_R1(r13)
+ PPC_STL r2, SVCPU_R2(r13)
+ PPC_STL r3, SVCPU_R3(r13)
+ PPC_STL r4, SVCPU_R4(r13)
+ PPC_STL r5, SVCPU_R5(r13)
+ PPC_STL r6, SVCPU_R6(r13)
+ PPC_STL r7, SVCPU_R7(r13)
+ PPC_STL r8, SVCPU_R8(r13)
+ PPC_STL r9, SVCPU_R9(r13)
+ PPC_STL r10, SVCPU_R10(r13)
+ PPC_STL r11, SVCPU_R11(r13)
/* Restore R1/R2 so we can handle faults */
- PPC_LL r1, (SHADOW_VCPU_OFF + SVCPU_HOST_R1)(r13)
- PPC_LL r2, (SHADOW_VCPU_OFF + SVCPU_HOST_R2)(r13)
+ PPC_LL r1, HSTATE_HOST_R1(r13)
+ PPC_LL r2, HSTATE_HOST_R2(r13)
/* Save guest PC and MSR */
+#ifdef CONFIG_PPC64
+BEGIN_FTR_SECTION
andi. r0,r12,0x2
beq 1f
mfspr r3,SPRN_HSRR0
mfspr r4,SPRN_HSRR1
andi. r12,r12,0x3ffd
b 2f
+END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
+#endif
1: mfsrr0 r3
mfsrr1 r4
2:
- PPC_STL r3, (SHADOW_VCPU_OFF + SVCPU_PC)(r13)
- PPC_STL r4, (SHADOW_VCPU_OFF + SVCPU_SHADOW_SRR1)(r13)
+ PPC_STL r3, SVCPU_PC(r13)
+ PPC_STL r4, SVCPU_SHADOW_SRR1(r13)
/* Get scratch'ed off registers */
GET_SCRATCH0(r9)
- PPC_LL r8, (SHADOW_VCPU_OFF + SVCPU_SCRATCH0)(r13)
- lwz r7, (SHADOW_VCPU_OFF + SVCPU_SCRATCH1)(r13)
+ PPC_LL r8, HSTATE_SCRATCH0(r13)
+ lwz r7, HSTATE_SCRATCH1(r13)
- PPC_STL r9, (SHADOW_VCPU_OFF + SVCPU_R13)(r13)
- PPC_STL r8, (SHADOW_VCPU_OFF + SVCPU_R12)(r13)
- stw r7, (SHADOW_VCPU_OFF + SVCPU_CR)(r13)
+ PPC_STL r9, SVCPU_R13(r13)
+ PPC_STL r8, SVCPU_R12(r13)
+ stw r7, SVCPU_CR(r13)
/* Save more register state */
@@ -184,11 +195,11 @@ kvmppc_handler_trampoline_exit:
mfctr r8
mflr r9
- stw r5, (SHADOW_VCPU_OFF + SVCPU_XER)(r13)
- PPC_STL r6, (SHADOW_VCPU_OFF + SVCPU_FAULT_DAR)(r13)
- stw r7, (SHADOW_VCPU_OFF + SVCPU_FAULT_DSISR)(r13)
- PPC_STL r8, (SHADOW_VCPU_OFF + SVCPU_CTR)(r13)
- PPC_STL r9, (SHADOW_VCPU_OFF + SVCPU_LR)(r13)
+ stw r5, SVCPU_XER(r13)
+ PPC_STL r6, SVCPU_FAULT_DAR(r13)
+ stw r7, SVCPU_FAULT_DSISR(r13)
+ PPC_STL r8, SVCPU_CTR(r13)
+ PPC_STL r9, SVCPU_LR(r13)
/*
* In order for us to easily get the last instruction,
@@ -218,7 +229,7 @@ ld_last_inst:
/* Set guest mode to 'jump over instruction' so if lwz faults
* we'll just continue at the next IP. */
li r9, KVM_GUEST_MODE_SKIP
- stb r9, (SHADOW_VCPU_OFF + SVCPU_IN_GUEST)(r13)
+ stb r9, HSTATE_IN_GUEST(r13)
/* 1) enable paging for data */
mfmsr r9
@@ -232,13 +243,13 @@ ld_last_inst:
sync
#endif
- stw r0, (SHADOW_VCPU_OFF + SVCPU_LAST_INST)(r13)
+ stw r0, SVCPU_LAST_INST(r13)
no_ld_last_inst:
/* Unset guest mode */
li r9, KVM_GUEST_MODE_NONE
- stb r9, (SHADOW_VCPU_OFF + SVCPU_IN_GUEST)(r13)
+ stb r9, HSTATE_IN_GUEST(r13)
/* Switch back to host MMU */
LOAD_HOST_SEGMENTS
@@ -248,7 +259,7 @@ no_ld_last_inst:
* R1 = host R1
* R2 = host R2
* R12 = exit handler id
- * R13 = shadow vcpu - SHADOW_VCPU_OFF [=PACA on PPC64]
+ * R13 = shadow vcpu (32-bit) or PACA (64-bit)
* SVCPU.* = guest *
*
*/
@@ -258,7 +269,7 @@ no_ld_last_inst:
ori r7, r7, MSR_IR|MSR_DR|MSR_RI|MSR_ME /* Enable paging */
mtsrr1 r7
/* Load highmem handler address */
- PPC_LL r8, (SHADOW_VCPU_OFF + SVCPU_VMHANDLER)(r13)
+ PPC_LL r8, HSTATE_VMHANDLER(r13)
mtsrr0 r8
RFI
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index 8462b3a1c1c7..ee45fa01220e 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -13,6 +13,7 @@
* Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* Copyright IBM Corp. 2007
+ * Copyright 2010-2011 Freescale Semiconductor, Inc.
*
* Authors: Hollis Blanchard <hollisb@us.ibm.com>
* Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
@@ -78,6 +79,60 @@ void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu)
}
}
+#ifdef CONFIG_SPE
+void kvmppc_vcpu_disable_spe(struct kvm_vcpu *vcpu)
+{
+ preempt_disable();
+ enable_kernel_spe();
+ kvmppc_save_guest_spe(vcpu);
+ vcpu->arch.shadow_msr &= ~MSR_SPE;
+ preempt_enable();
+}
+
+static void kvmppc_vcpu_enable_spe(struct kvm_vcpu *vcpu)
+{
+ preempt_disable();
+ enable_kernel_spe();
+ kvmppc_load_guest_spe(vcpu);
+ vcpu->arch.shadow_msr |= MSR_SPE;
+ preempt_enable();
+}
+
+static void kvmppc_vcpu_sync_spe(struct kvm_vcpu *vcpu)
+{
+ if (vcpu->arch.shared->msr & MSR_SPE) {
+ if (!(vcpu->arch.shadow_msr & MSR_SPE))
+ kvmppc_vcpu_enable_spe(vcpu);
+ } else if (vcpu->arch.shadow_msr & MSR_SPE) {
+ kvmppc_vcpu_disable_spe(vcpu);
+ }
+}
+#else
+static void kvmppc_vcpu_sync_spe(struct kvm_vcpu *vcpu)
+{
+}
+#endif
+
+/*
+ * Helper function for "full" MSR writes. No need to call this if only
+ * EE/CE/ME/DE/RI are changing.
+ */
+void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr)
+{
+ u32 old_msr = vcpu->arch.shared->msr;
+
+ vcpu->arch.shared->msr = new_msr;
+
+ kvmppc_mmu_msr_notify(vcpu, old_msr);
+
+ if (vcpu->arch.shared->msr & MSR_WE) {
+ kvm_vcpu_block(vcpu);
+ kvmppc_set_exit_type(vcpu, EMULATED_MTMSRWE_EXITS);
+ };
+
+ kvmppc_vcpu_sync_spe(vcpu);
+}
+
static void kvmppc_booke_queue_irqprio(struct kvm_vcpu *vcpu,
unsigned int priority)
{
@@ -257,6 +312,19 @@ void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu)
vcpu->arch.shared->int_pending = 0;
}
+int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
+{
+ int ret;
+
+ local_irq_disable();
+ kvm_guest_enter();
+ ret = __kvmppc_vcpu_run(kvm_run, vcpu);
+ kvm_guest_exit();
+ local_irq_enable();
+
+ return ret;
+}
+
/**
* kvmppc_handle_exit
*
@@ -344,10 +412,16 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
r = RESUME_GUEST;
break;
- case BOOKE_INTERRUPT_SPE_UNAVAIL:
- kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_UNAVAIL);
+#ifdef CONFIG_SPE
+ case BOOKE_INTERRUPT_SPE_UNAVAIL: {
+ if (vcpu->arch.shared->msr & MSR_SPE)
+ kvmppc_vcpu_enable_spe(vcpu);
+ else
+ kvmppc_booke_queue_irqprio(vcpu,
+ BOOKE_IRQPRIO_SPE_UNAVAIL);
r = RESUME_GUEST;
break;
+ }
case BOOKE_INTERRUPT_SPE_FP_DATA:
kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_DATA);
@@ -358,6 +432,28 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_ROUND);
r = RESUME_GUEST;
break;
+#else
+ case BOOKE_INTERRUPT_SPE_UNAVAIL:
+ /*
+ * Guest wants SPE, but host kernel doesn't support it. Send
+ * an "unimplemented operation" program check to the guest.
+ */
+ kvmppc_core_queue_program(vcpu, ESR_PUO | ESR_SPV);
+ r = RESUME_GUEST;
+ break;
+
+ /*
+ * These really should never happen without CONFIG_SPE,
+ * as we should never enable the real MSR[SPE] in the guest.
+ */
+ case BOOKE_INTERRUPT_SPE_FP_DATA:
+ case BOOKE_INTERRUPT_SPE_FP_ROUND:
+ printk(KERN_CRIT "%s: unexpected SPE interrupt %u at %08lx\n",
+ __func__, exit_nr, vcpu->arch.pc);
+ run->hw.hardware_exit_reason = exit_nr;
+ r = RESUME_HOST;
+ break;
+#endif
case BOOKE_INTERRUPT_DATA_STORAGE:
kvmppc_core_queue_data_storage(vcpu, vcpu->arch.fault_dear,
@@ -392,6 +488,17 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
gpa_t gpaddr;
gfn_t gfn;
+#ifdef CONFIG_KVM_E500
+ if (!(vcpu->arch.shared->msr & MSR_PR) &&
+ (eaddr & PAGE_MASK) == vcpu->arch.magic_page_ea) {
+ kvmppc_map_magic(vcpu);
+ kvmppc_account_exit(vcpu, DTLB_VIRT_MISS_EXITS);
+ r = RESUME_GUEST;
+
+ break;
+ }
+#endif
+
/* Check the guest TLB. */
gtlb_index = kvmppc_mmu_dtlb_index(vcpu, eaddr);
if (gtlb_index < 0) {
@@ -514,6 +621,7 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
vcpu->arch.pc = 0;
vcpu->arch.shared->msr = 0;
+ vcpu->arch.shadow_msr = MSR_USER | MSR_DE | MSR_IS | MSR_DS;
kvmppc_set_gpr(vcpu, 1, (16<<20) - 8); /* -8 for the callee-save LR slot */
vcpu->arch.shadow_pid = 1;
@@ -770,6 +878,26 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
return -ENOTSUPP;
}
+int kvmppc_core_prepare_memory_region(struct kvm *kvm,
+ struct kvm_userspace_memory_region *mem)
+{
+ return 0;
+}
+
+void kvmppc_core_commit_memory_region(struct kvm *kvm,
+ struct kvm_userspace_memory_region *mem)
+{
+}
+
+int kvmppc_core_init_vm(struct kvm *kvm)
+{
+ return 0;
+}
+
+void kvmppc_core_destroy_vm(struct kvm *kvm)
+{
+}
+
int __init kvmppc_booke_init(void)
{
unsigned long ivor[16];
diff --git a/arch/powerpc/kvm/booke.h b/arch/powerpc/kvm/booke.h
index 492bb7030358..8e1fe33d64e5 100644
--- a/arch/powerpc/kvm/booke.h
+++ b/arch/powerpc/kvm/booke.h
@@ -52,24 +52,19 @@
extern unsigned long kvmppc_booke_handlers;
-/* Helper function for "full" MSR writes. No need to call this if only EE is
- * changing. */
-static inline void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr)
-{
- if ((new_msr & MSR_PR) != (vcpu->arch.shared->msr & MSR_PR))
- kvmppc_mmu_priv_switch(vcpu, new_msr & MSR_PR);
-
- vcpu->arch.shared->msr = new_msr;
-
- if (vcpu->arch.shared->msr & MSR_WE) {
- kvm_vcpu_block(vcpu);
- kvmppc_set_exit_type(vcpu, EMULATED_MTMSRWE_EXITS);
- };
-}
+void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr);
+void kvmppc_mmu_msr_notify(struct kvm_vcpu *vcpu, u32 old_msr);
int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
unsigned int inst, int *advance);
int kvmppc_booke_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt);
int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs);
+/* low-level asm code to transfer guest state */
+void kvmppc_load_guest_spe(struct kvm_vcpu *vcpu);
+void kvmppc_save_guest_spe(struct kvm_vcpu *vcpu);
+
+/* high-level function, manages flags, host state */
+void kvmppc_vcpu_disable_spe(struct kvm_vcpu *vcpu);
+
#endif /* __KVM_BOOKE_H__ */
diff --git a/arch/powerpc/kvm/booke_interrupts.S b/arch/powerpc/kvm/booke_interrupts.S
index b58ccae95904..42f2fb1f66e9 100644
--- a/arch/powerpc/kvm/booke_interrupts.S
+++ b/arch/powerpc/kvm/booke_interrupts.S
@@ -13,6 +13,7 @@
* Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* Copyright IBM Corp. 2007
+ * Copyright 2011 Freescale Semiconductor, Inc.
*
* Authors: Hollis Blanchard <hollisb@us.ibm.com>
*/
@@ -24,8 +25,6 @@
#include <asm/page.h>
#include <asm/asm-offsets.h>
-#define KVMPPC_MSR_MASK (MSR_CE|MSR_EE|MSR_PR|MSR_DE|MSR_ME|MSR_IS|MSR_DS)
-
#define VCPU_GPR(n) (VCPU_GPRS + (n * 4))
/* The host stack layout: */
@@ -192,6 +191,12 @@ _GLOBAL(kvmppc_resume_host)
lwz r3, VCPU_HOST_PID(r4)
mtspr SPRN_PID, r3
+#ifdef CONFIG_FSL_BOOKE
+ /* we cheat and know that Linux doesn't use PID1 which is always 0 */
+ lis r3, 0
+ mtspr SPRN_PID1, r3
+#endif
+
/* Restore host IVPR before re-enabling interrupts. We cheat and know
* that Linux IVPR is always 0xc0000000. */
lis r3, 0xc000
@@ -241,6 +246,14 @@ _GLOBAL(kvmppc_resume_host)
heavyweight_exit:
/* Not returning to guest. */
+#ifdef CONFIG_SPE
+ /* save guest SPEFSCR and load host SPEFSCR */
+ mfspr r9, SPRN_SPEFSCR
+ stw r9, VCPU_SPEFSCR(r4)
+ lwz r9, VCPU_HOST_SPEFSCR(r4)
+ mtspr SPRN_SPEFSCR, r9
+#endif
+
/* We already saved guest volatile register state; now save the
* non-volatiles. */
stw r15, VCPU_GPR(r15)(r4)
@@ -342,6 +355,14 @@ _GLOBAL(__kvmppc_vcpu_run)
lwz r30, VCPU_GPR(r30)(r4)
lwz r31, VCPU_GPR(r31)(r4)
+#ifdef CONFIG_SPE
+ /* save host SPEFSCR and load guest SPEFSCR */
+ mfspr r3, SPRN_SPEFSCR
+ stw r3, VCPU_HOST_SPEFSCR(r4)
+ lwz r3, VCPU_SPEFSCR(r4)
+ mtspr SPRN_SPEFSCR, r3
+#endif
+
lightweight_exit:
stw r2, HOST_R2(r1)
@@ -350,6 +371,11 @@ lightweight_exit:
lwz r3, VCPU_SHADOW_PID(r4)
mtspr SPRN_PID, r3
+#ifdef CONFIG_FSL_BOOKE
+ lwz r3, VCPU_SHADOW_PID1(r4)
+ mtspr SPRN_PID1, r3
+#endif
+
#ifdef CONFIG_44x
iccci 0, 0 /* XXX hack */
#endif
@@ -405,20 +431,17 @@ lightweight_exit:
/* Finish loading guest volatiles and jump to guest. */
lwz r3, VCPU_CTR(r4)
+ lwz r5, VCPU_CR(r4)
+ lwz r6, VCPU_PC(r4)
+ lwz r7, VCPU_SHADOW_MSR(r4)
mtctr r3
- lwz r3, VCPU_CR(r4)
- mtcr r3
+ mtcr r5
+ mtsrr0 r6
+ mtsrr1 r7
lwz r5, VCPU_GPR(r5)(r4)
lwz r6, VCPU_GPR(r6)(r4)
lwz r7, VCPU_GPR(r7)(r4)
lwz r8, VCPU_GPR(r8)(r4)
- lwz r3, VCPU_PC(r4)
- mtsrr0 r3
- lwz r3, VCPU_SHARED(r4)
- lwz r3, (VCPU_SHARED_MSR + 4)(r3)
- oris r3, r3, KVMPPC_MSR_MASK@h
- ori r3, r3, KVMPPC_MSR_MASK@l
- mtsrr1 r3
/* Clear any debug events which occurred since we disabled MSR[DE].
* XXX This gives us a 3-instruction window in which a breakpoint
@@ -430,3 +453,24 @@ lightweight_exit:
lwz r3, VCPU_GPR(r3)(r4)
lwz r4, VCPU_GPR(r4)(r4)
rfi
+
+#ifdef CONFIG_SPE
+_GLOBAL(kvmppc_save_guest_spe)
+ cmpi 0,r3,0
+ beqlr-
+ SAVE_32EVRS(0, r4, r3, VCPU_EVR)
+ evxor evr6, evr6, evr6
+ evmwumiaa evr6, evr6, evr6
+ li r4,VCPU_ACC
+ evstddx evr6, r4, r3 /* save acc */
+ blr
+
+_GLOBAL(kvmppc_load_guest_spe)
+ cmpi 0,r3,0
+ beqlr-
+ li r4,VCPU_ACC
+ evlddx evr6,r4,r3
+ evmra evr6,evr6 /* load acc */
+ REST_32EVRS(0, r4, r3, VCPU_EVR)
+ blr
+#endif
diff --git a/arch/powerpc/kvm/e500.c b/arch/powerpc/kvm/e500.c
index 318dbc61ba44..797a7447c268 100644
--- a/arch/powerpc/kvm/e500.c
+++ b/arch/powerpc/kvm/e500.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2008 Freescale Semiconductor, Inc. All rights reserved.
+ * Copyright (C) 2008-2011 Freescale Semiconductor, Inc. All rights reserved.
*
* Author: Yu Liu, <yu.liu@freescale.com>
*
@@ -41,6 +41,11 @@ void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
{
kvmppc_e500_tlb_put(vcpu);
+
+#ifdef CONFIG_SPE
+ if (vcpu->arch.shadow_msr & MSR_SPE)
+ kvmppc_vcpu_disable_spe(vcpu);
+#endif
}
int kvmppc_core_check_processor_compat(void)
diff --git a/arch/powerpc/kvm/e500_emulate.c b/arch/powerpc/kvm/e500_emulate.c
index 69cd665a0caf..d48ae396f41e 100644
--- a/arch/powerpc/kvm/e500_emulate.c
+++ b/arch/powerpc/kvm/e500_emulate.c
@@ -81,8 +81,12 @@ int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
kvmppc_set_pid(vcpu, spr_val);
break;
case SPRN_PID1:
+ if (spr_val != 0)
+ return EMULATE_FAIL;
vcpu_e500->pid[1] = spr_val; break;
case SPRN_PID2:
+ if (spr_val != 0)
+ return EMULATE_FAIL;
vcpu_e500->pid[2] = spr_val; break;
case SPRN_MAS0:
vcpu_e500->mas0 = spr_val; break;
diff --git a/arch/powerpc/kvm/e500_tlb.c b/arch/powerpc/kvm/e500_tlb.c
index b18fe353397d..13c432ea2fa8 100644
--- a/arch/powerpc/kvm/e500_tlb.c
+++ b/arch/powerpc/kvm/e500_tlb.c
@@ -28,8 +28,196 @@
#define to_htlb1_esel(esel) (tlb1_entry_num - (esel) - 1)
+struct id {
+ unsigned long val;
+ struct id **pentry;
+};
+
+#define NUM_TIDS 256
+
+/*
+ * This table provide mappings from:
+ * (guestAS,guestTID,guestPR) --> ID of physical cpu
+ * guestAS [0..1]
+ * guestTID [0..255]
+ * guestPR [0..1]
+ * ID [1..255]
+ * Each vcpu keeps one vcpu_id_table.
+ */
+struct vcpu_id_table {
+ struct id id[2][NUM_TIDS][2];
+};
+
+/*
+ * This table provide reversed mappings of vcpu_id_table:
+ * ID --> address of vcpu_id_table item.
+ * Each physical core has one pcpu_id_table.
+ */
+struct pcpu_id_table {
+ struct id *entry[NUM_TIDS];
+};
+
+static DEFINE_PER_CPU(struct pcpu_id_table, pcpu_sids);
+
+/* This variable keeps last used shadow ID on local core.
+ * The valid range of shadow ID is [1..255] */
+static DEFINE_PER_CPU(unsigned long, pcpu_last_used_sid);
+
static unsigned int tlb1_entry_num;
+/*
+ * Allocate a free shadow id and setup a valid sid mapping in given entry.
+ * A mapping is only valid when vcpu_id_table and pcpu_id_table are match.
+ *
+ * The caller must have preemption disabled, and keep it that way until
+ * it has finished with the returned shadow id (either written into the
+ * TLB or arch.shadow_pid, or discarded).
+ */
+static inline int local_sid_setup_one(struct id *entry)
+{
+ unsigned long sid;
+ int ret = -1;
+
+ sid = ++(__get_cpu_var(pcpu_last_used_sid));
+ if (sid < NUM_TIDS) {
+ __get_cpu_var(pcpu_sids).entry[sid] = entry;
+ entry->val = sid;
+ entry->pentry = &__get_cpu_var(pcpu_sids).entry[sid];
+ ret = sid;
+ }
+
+ /*
+ * If sid == NUM_TIDS, we've run out of sids. We return -1, and
+ * the caller will invalidate everything and start over.
+ *
+ * sid > NUM_TIDS indicates a race, which we disable preemption to
+ * avoid.
+ */
+ WARN_ON(sid > NUM_TIDS);
+
+ return ret;
+}
+
+/*
+ * Check if given entry contain a valid shadow id mapping.
+ * An ID mapping is considered valid only if
+ * both vcpu and pcpu know this mapping.
+ *
+ * The caller must have preemption disabled, and keep it that way until
+ * it has finished with the returned shadow id (either written into the
+ * TLB or arch.shadow_pid, or discarded).
+ */
+static inline int local_sid_lookup(struct id *entry)
+{
+ if (entry && entry->val != 0 &&
+ __get_cpu_var(pcpu_sids).entry[entry->val] == entry &&
+ entry->pentry == &__get_cpu_var(pcpu_sids).entry[entry->val])
+ return entry->val;
+ return -1;
+}
+
+/* Invalidate all id mappings on local core */
+static inline void local_sid_destroy_all(void)
+{
+ preempt_disable();
+ __get_cpu_var(pcpu_last_used_sid) = 0;
+ memset(&__get_cpu_var(pcpu_sids), 0, sizeof(__get_cpu_var(pcpu_sids)));
+ preempt_enable();
+}
+
+static void *kvmppc_e500_id_table_alloc(struct kvmppc_vcpu_e500 *vcpu_e500)
+{
+ vcpu_e500->idt = kzalloc(sizeof(struct vcpu_id_table), GFP_KERNEL);
+ return vcpu_e500->idt;
+}
+
+static void kvmppc_e500_id_table_free(struct kvmppc_vcpu_e500 *vcpu_e500)
+{
+ kfree(vcpu_e500->idt);
+}
+
+/* Invalidate all mappings on vcpu */
+static void kvmppc_e500_id_table_reset_all(struct kvmppc_vcpu_e500 *vcpu_e500)
+{
+ memset(vcpu_e500->idt, 0, sizeof(struct vcpu_id_table));
+
+ /* Update shadow pid when mappings are changed */
+ kvmppc_e500_recalc_shadow_pid(vcpu_e500);
+}
+
+/* Invalidate one ID mapping on vcpu */
+static inline void kvmppc_e500_id_table_reset_one(
+ struct kvmppc_vcpu_e500 *vcpu_e500,
+ int as, int pid, int pr)
+{
+ struct vcpu_id_table *idt = vcpu_e500->idt;
+
+ BUG_ON(as >= 2);
+ BUG_ON(pid >= NUM_TIDS);
+ BUG_ON(pr >= 2);
+
+ idt->id[as][pid][pr].val = 0;
+ idt->id[as][pid][pr].pentry = NULL;
+
+ /* Update shadow pid when mappings are changed */
+ kvmppc_e500_recalc_shadow_pid(vcpu_e500);
+}
+
+/*
+ * Map guest (vcpu,AS,ID,PR) to physical core shadow id.
+ * This function first lookup if a valid mapping exists,
+ * if not, then creates a new one.
+ *
+ * The caller must have preemption disabled, and keep it that way until
+ * it has finished with the returned shadow id (either written into the
+ * TLB or arch.shadow_pid, or discarded).
+ */
+static unsigned int kvmppc_e500_get_sid(struct kvmppc_vcpu_e500 *vcpu_e500,
+ unsigned int as, unsigned int gid,
+ unsigned int pr, int avoid_recursion)
+{
+ struct vcpu_id_table *idt = vcpu_e500->idt;
+ int sid;
+
+ BUG_ON(as >= 2);
+ BUG_ON(gid >= NUM_TIDS);
+ BUG_ON(pr >= 2);
+
+ sid = local_sid_lookup(&idt->id[as][gid][pr]);
+
+ while (sid <= 0) {
+ /* No mapping yet */
+ sid = local_sid_setup_one(&idt->id[as][gid][pr]);
+ if (sid <= 0) {
+ _tlbil_all();
+ local_sid_destroy_all();
+ }
+
+ /* Update shadow pid when mappings are changed */
+ if (!avoid_recursion)
+ kvmppc_e500_recalc_shadow_pid(vcpu_e500);
+ }
+
+ return sid;
+}
+
+/* Map guest pid to shadow.
+ * We use PID to keep shadow of current guest non-zero PID,
+ * and use PID1 to keep shadow of guest zero PID.
+ * So that guest tlbe with TID=0 can be accessed at any time */
+void kvmppc_e500_recalc_shadow_pid(struct kvmppc_vcpu_e500 *vcpu_e500)
+{
+ preempt_disable();
+ vcpu_e500->vcpu.arch.shadow_pid = kvmppc_e500_get_sid(vcpu_e500,
+ get_cur_as(&vcpu_e500->vcpu),
+ get_cur_pid(&vcpu_e500->vcpu),
+ get_cur_pr(&vcpu_e500->vcpu), 1);
+ vcpu_e500->vcpu.arch.shadow_pid1 = kvmppc_e500_get_sid(vcpu_e500,
+ get_cur_as(&vcpu_e500->vcpu), 0,
+ get_cur_pr(&vcpu_e500->vcpu), 1);
+ preempt_enable();
+}
+
void kvmppc_dump_tlbs(struct kvm_vcpu *vcpu)
{
struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
@@ -41,25 +229,14 @@ void kvmppc_dump_tlbs(struct kvm_vcpu *vcpu)
for (tlbsel = 0; tlbsel < 2; tlbsel++) {
printk("Guest TLB%d:\n", tlbsel);
- for (i = 0; i < vcpu_e500->guest_tlb_size[tlbsel]; i++) {
- tlbe = &vcpu_e500->guest_tlb[tlbsel][i];
+ for (i = 0; i < vcpu_e500->gtlb_size[tlbsel]; i++) {
+ tlbe = &vcpu_e500->gtlb_arch[tlbsel][i];
if (tlbe->mas1 & MAS1_VALID)
printk(" G[%d][%3d] | %08X | %08X | %08X | %08X |\n",
tlbsel, i, tlbe->mas1, tlbe->mas2,
tlbe->mas3, tlbe->mas7);
}
}
-
- for (tlbsel = 0; tlbsel < 2; tlbsel++) {
- printk("Shadow TLB%d:\n", tlbsel);
- for (i = 0; i < vcpu_e500->shadow_tlb_size[tlbsel]; i++) {
- tlbe = &vcpu_e500->shadow_tlb[tlbsel][i];
- if (tlbe->mas1 & MAS1_VALID)
- printk(" S[%d][%3d] | %08X | %08X | %08X | %08X |\n",
- tlbsel, i, tlbe->mas1, tlbe->mas2,
- tlbe->mas3, tlbe->mas7);
- }
- }
}
static inline unsigned int tlb0_get_next_victim(
@@ -67,16 +244,17 @@ static inline unsigned int tlb0_get_next_victim(
{
unsigned int victim;
- victim = vcpu_e500->guest_tlb_nv[0]++;
- if (unlikely(vcpu_e500->guest_tlb_nv[0] >= KVM_E500_TLB0_WAY_NUM))
- vcpu_e500->guest_tlb_nv[0] = 0;
+ victim = vcpu_e500->gtlb_nv[0]++;
+ if (unlikely(vcpu_e500->gtlb_nv[0] >= KVM_E500_TLB0_WAY_NUM))
+ vcpu_e500->gtlb_nv[0] = 0;
return victim;
}
static inline unsigned int tlb1_max_shadow_size(void)
{
- return tlb1_entry_num - tlbcam_index;
+ /* reserve one entry for magic page */
+ return tlb1_entry_num - tlbcam_index - 1;
}
static inline int tlbe_is_writable(struct tlbe *tlbe)
@@ -112,72 +290,149 @@ static inline u32 e500_shadow_mas2_attrib(u32 mas2, int usermode)
/*
* writing shadow tlb entry to host TLB
*/
-static inline void __write_host_tlbe(struct tlbe *stlbe)
+static inline void __write_host_tlbe(struct tlbe *stlbe, uint32_t mas0)
{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ mtspr(SPRN_MAS0, mas0);
mtspr(SPRN_MAS1, stlbe->mas1);
mtspr(SPRN_MAS2, stlbe->mas2);
mtspr(SPRN_MAS3, stlbe->mas3);
mtspr(SPRN_MAS7, stlbe->mas7);
- __asm__ __volatile__ ("tlbwe\n" : : );
+ asm volatile("isync; tlbwe" : : : "memory");
+ local_irq_restore(flags);
}
static inline void write_host_tlbe(struct kvmppc_vcpu_e500 *vcpu_e500,
- int tlbsel, int esel)
+ int tlbsel, int esel, struct tlbe *stlbe)
{
- struct tlbe *stlbe = &vcpu_e500->shadow_tlb[tlbsel][esel];
-
- local_irq_disable();
if (tlbsel == 0) {
- __write_host_tlbe(stlbe);
+ __write_host_tlbe(stlbe,
+ MAS0_TLBSEL(0) |
+ MAS0_ESEL(esel & (KVM_E500_TLB0_WAY_NUM - 1)));
} else {
- unsigned register mas0;
-
- mas0 = mfspr(SPRN_MAS0);
-
- mtspr(SPRN_MAS0, MAS0_TLBSEL(1) | MAS0_ESEL(to_htlb1_esel(esel)));
- __write_host_tlbe(stlbe);
-
- mtspr(SPRN_MAS0, mas0);
+ __write_host_tlbe(stlbe,
+ MAS0_TLBSEL(1) |
+ MAS0_ESEL(to_htlb1_esel(esel)));
}
- local_irq_enable();
+ trace_kvm_stlb_write(index_of(tlbsel, esel), stlbe->mas1, stlbe->mas2,
+ stlbe->mas3, stlbe->mas7);
+}
+
+void kvmppc_map_magic(struct kvm_vcpu *vcpu)
+{
+ struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
+ struct tlbe magic;
+ ulong shared_page = ((ulong)vcpu->arch.shared) & PAGE_MASK;
+ unsigned int stid;
+ pfn_t pfn;
+
+ pfn = (pfn_t)virt_to_phys((void *)shared_page) >> PAGE_SHIFT;
+ get_page(pfn_to_page(pfn));
+
+ preempt_disable();
+ stid = kvmppc_e500_get_sid(vcpu_e500, 0, 0, 0, 0);
+
+ magic.mas1 = MAS1_VALID | MAS1_TS | MAS1_TID(stid) |
+ MAS1_TSIZE(BOOK3E_PAGESZ_4K);
+ magic.mas2 = vcpu->arch.magic_page_ea | MAS2_M;
+ magic.mas3 = (pfn << PAGE_SHIFT) |
+ MAS3_SW | MAS3_SR | MAS3_UW | MAS3_UR;
+ magic.mas7 = pfn >> (32 - PAGE_SHIFT);
+
+ __write_host_tlbe(&magic, MAS0_TLBSEL(1) | MAS0_ESEL(tlbcam_index));
+ preempt_enable();
}
void kvmppc_e500_tlb_load(struct kvm_vcpu *vcpu, int cpu)
{
struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
- int i;
- unsigned register mas0;
-
- /* Load all valid TLB1 entries to reduce guest tlb miss fault */
- local_irq_disable();
- mas0 = mfspr(SPRN_MAS0);
- for (i = 0; i < tlb1_max_shadow_size(); i++) {
- struct tlbe *stlbe = &vcpu_e500->shadow_tlb[1][i];
-
- if (get_tlb_v(stlbe)) {
- mtspr(SPRN_MAS0, MAS0_TLBSEL(1)
- | MAS0_ESEL(to_htlb1_esel(i)));
- __write_host_tlbe(stlbe);
- }
- }
- mtspr(SPRN_MAS0, mas0);
- local_irq_enable();
+
+ /* Shadow PID may be expired on local core */
+ kvmppc_e500_recalc_shadow_pid(vcpu_e500);
}
void kvmppc_e500_tlb_put(struct kvm_vcpu *vcpu)
{
- _tlbil_all();
+}
+
+static void kvmppc_e500_stlbe_invalidate(struct kvmppc_vcpu_e500 *vcpu_e500,
+ int tlbsel, int esel)
+{
+ struct tlbe *gtlbe = &vcpu_e500->gtlb_arch[tlbsel][esel];
+ struct vcpu_id_table *idt = vcpu_e500->idt;
+ unsigned int pr, tid, ts, pid;
+ u32 val, eaddr;
+ unsigned long flags;
+
+ ts = get_tlb_ts(gtlbe);
+ tid = get_tlb_tid(gtlbe);
+
+ preempt_disable();
+
+ /* One guest ID may be mapped to two shadow IDs */
+ for (pr = 0; pr < 2; pr++) {
+ /*
+ * The shadow PID can have a valid mapping on at most one
+ * host CPU. In the common case, it will be valid on this
+ * CPU, in which case (for TLB0) we do a local invalidation
+ * of the specific address.
+ *
+ * If the shadow PID is not valid on the current host CPU, or
+ * if we're invalidating a TLB1 entry, we invalidate the
+ * entire shadow PID.
+ */
+ if (tlbsel == 1 ||
+ (pid = local_sid_lookup(&idt->id[ts][tid][pr])) <= 0) {
+ kvmppc_e500_id_table_reset_one(vcpu_e500, ts, tid, pr);
+ continue;
+ }
+
+ /*
+ * The guest is invalidating a TLB0 entry which is in a PID
+ * that has a valid shadow mapping on this host CPU. We
+ * search host TLB0 to invalidate it's shadow TLB entry,
+ * similar to __tlbil_va except that we need to look in AS1.
+ */
+ val = (pid << MAS6_SPID_SHIFT) | MAS6_SAS;
+ eaddr = get_tlb_eaddr(gtlbe);
+
+ local_irq_save(flags);
+
+ mtspr(SPRN_MAS6, val);
+ asm volatile("tlbsx 0, %[eaddr]" : : [eaddr] "r" (eaddr));
+ val = mfspr(SPRN_MAS1);
+ if (val & MAS1_VALID) {
+ mtspr(SPRN_MAS1, val & ~MAS1_VALID);
+ asm volatile("tlbwe");
+ }
+
+ local_irq_restore(flags);
+ }
+
+ preempt_enable();
}
/* Search the guest TLB for a matching entry. */
static int kvmppc_e500_tlb_index(struct kvmppc_vcpu_e500 *vcpu_e500,
gva_t eaddr, int tlbsel, unsigned int pid, int as)
{
+ int size = vcpu_e500->gtlb_size[tlbsel];
+ int set_base;
int i;
- /* XXX Replace loop with fancy data structures. */
- for (i = 0; i < vcpu_e500->guest_tlb_size[tlbsel]; i++) {
- struct tlbe *tlbe = &vcpu_e500->guest_tlb[tlbsel][i];
+ if (tlbsel == 0) {
+ int mask = size / KVM_E500_TLB0_WAY_NUM - 1;
+ set_base = (eaddr >> PAGE_SHIFT) & mask;
+ set_base *= KVM_E500_TLB0_WAY_NUM;
+ size = KVM_E500_TLB0_WAY_NUM;
+ } else {
+ set_base = 0;
+ }
+
+ for (i = 0; i < size; i++) {
+ struct tlbe *tlbe = &vcpu_e500->gtlb_arch[tlbsel][set_base + i];
unsigned int tid;
if (eaddr < get_tlb_eaddr(tlbe))
@@ -196,66 +451,32 @@ static int kvmppc_e500_tlb_index(struct kvmppc_vcpu_e500 *vcpu_e500,
if (get_tlb_ts(tlbe) != as && as != -1)
continue;
- return i;
+ return set_base + i;
}
return -1;
}
-static void kvmppc_e500_shadow_release(struct kvmppc_vcpu_e500 *vcpu_e500,
- int tlbsel, int esel)
-{
- struct tlbe *stlbe = &vcpu_e500->shadow_tlb[tlbsel][esel];
- struct page *page = vcpu_e500->shadow_pages[tlbsel][esel];
-
- if (page) {
- vcpu_e500->shadow_pages[tlbsel][esel] = NULL;
-
- if (get_tlb_v(stlbe)) {
- if (tlbe_is_writable(stlbe))
- kvm_release_page_dirty(page);
- else
- kvm_release_page_clean(page);
- }
- }
-}
-
-static void kvmppc_e500_stlbe_invalidate(struct kvmppc_vcpu_e500 *vcpu_e500,
- int tlbsel, int esel)
+static inline void kvmppc_e500_priv_setup(struct tlbe_priv *priv,
+ struct tlbe *gtlbe,
+ pfn_t pfn)
{
- struct tlbe *stlbe = &vcpu_e500->shadow_tlb[tlbsel][esel];
+ priv->pfn = pfn;
+ priv->flags = E500_TLB_VALID;
- kvmppc_e500_shadow_release(vcpu_e500, tlbsel, esel);
- stlbe->mas1 = 0;
- trace_kvm_stlb_inval(index_of(tlbsel, esel));
+ if (tlbe_is_writable(gtlbe))
+ priv->flags |= E500_TLB_DIRTY;
}
-static void kvmppc_e500_tlb1_invalidate(struct kvmppc_vcpu_e500 *vcpu_e500,
- gva_t eaddr, gva_t eend, u32 tid)
+static inline void kvmppc_e500_priv_release(struct tlbe_priv *priv)
{
- unsigned int pid = tid & 0xff;
- unsigned int i;
-
- /* XXX Replace loop with fancy data structures. */
- for (i = 0; i < vcpu_e500->guest_tlb_size[1]; i++) {
- struct tlbe *stlbe = &vcpu_e500->shadow_tlb[1][i];
- unsigned int tid;
-
- if (!get_tlb_v(stlbe))
- continue;
-
- if (eend < get_tlb_eaddr(stlbe))
- continue;
+ if (priv->flags & E500_TLB_VALID) {
+ if (priv->flags & E500_TLB_DIRTY)
+ kvm_release_pfn_dirty(priv->pfn);
+ else
+ kvm_release_pfn_clean(priv->pfn);
- if (eaddr > get_tlb_end(stlbe))
- continue;
-
- tid = get_tlb_tid(stlbe);
- if (tid && (tid != pid))
- continue;
-
- kvmppc_e500_stlbe_invalidate(vcpu_e500, 1, i);
- write_host_tlbe(vcpu_e500, 1, i);
+ priv->flags = 0;
}
}
@@ -273,7 +494,7 @@ static inline void kvmppc_e500_deliver_tlb_miss(struct kvm_vcpu *vcpu,
tsized = (vcpu_e500->mas4 >> 7) & 0x1f;
vcpu_e500->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(victim)
- | MAS0_NV(vcpu_e500->guest_tlb_nv[tlbsel]);
+ | MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]);
vcpu_e500->mas1 = MAS1_VALID | (as ? MAS1_TS : 0)
| MAS1_TID(vcpu_e500->pid[pidsel])
| MAS1_TSIZE(tsized);
@@ -286,56 +507,154 @@ static inline void kvmppc_e500_deliver_tlb_miss(struct kvm_vcpu *vcpu,
vcpu_e500->mas7 = 0;
}
-static inline void kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
- u64 gvaddr, gfn_t gfn, struct tlbe *gtlbe, int tlbsel, int esel)
+static inline void kvmppc_e500_setup_stlbe(struct kvmppc_vcpu_e500 *vcpu_e500,
+ struct tlbe *gtlbe, int tsize,
+ struct tlbe_priv *priv,
+ u64 gvaddr, struct tlbe *stlbe)
{
- struct page *new_page;
- struct tlbe *stlbe;
- hpa_t hpaddr;
-
- stlbe = &vcpu_e500->shadow_tlb[tlbsel][esel];
-
- /* Get reference to new page. */
- new_page = gfn_to_page(vcpu_e500->vcpu.kvm, gfn);
- if (is_error_page(new_page)) {
- printk(KERN_ERR "Couldn't get guest page for gfn %lx!\n",
- (long)gfn);
- kvm_release_page_clean(new_page);
- return;
- }
- hpaddr = page_to_phys(new_page);
-
- /* Drop reference to old page. */
- kvmppc_e500_shadow_release(vcpu_e500, tlbsel, esel);
+ pfn_t pfn = priv->pfn;
+ unsigned int stid;
- vcpu_e500->shadow_pages[tlbsel][esel] = new_page;
+ stid = kvmppc_e500_get_sid(vcpu_e500, get_tlb_ts(gtlbe),
+ get_tlb_tid(gtlbe),
+ get_cur_pr(&vcpu_e500->vcpu), 0);
- /* Force TS=1 IPROT=0 TSIZE=4KB for all guest mappings. */
- stlbe->mas1 = MAS1_TSIZE(BOOK3E_PAGESZ_4K)
- | MAS1_TID(get_tlb_tid(gtlbe)) | MAS1_TS | MAS1_VALID;
+ /* Force TS=1 IPROT=0 for all guest mappings. */
+ stlbe->mas1 = MAS1_TSIZE(tsize)
+ | MAS1_TID(stid) | MAS1_TS | MAS1_VALID;
stlbe->mas2 = (gvaddr & MAS2_EPN)
| e500_shadow_mas2_attrib(gtlbe->mas2,
vcpu_e500->vcpu.arch.shared->msr & MSR_PR);
- stlbe->mas3 = (hpaddr & MAS3_RPN)
+ stlbe->mas3 = ((pfn << PAGE_SHIFT) & MAS3_RPN)
| e500_shadow_mas3_attrib(gtlbe->mas3,
vcpu_e500->vcpu.arch.shared->msr & MSR_PR);
- stlbe->mas7 = (hpaddr >> 32) & MAS7_RPN;
+ stlbe->mas7 = (pfn >> (32 - PAGE_SHIFT)) & MAS7_RPN;
+}
- trace_kvm_stlb_write(index_of(tlbsel, esel), stlbe->mas1, stlbe->mas2,
- stlbe->mas3, stlbe->mas7);
+
+static inline void kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
+ u64 gvaddr, gfn_t gfn, struct tlbe *gtlbe, int tlbsel, int esel,
+ struct tlbe *stlbe)
+{
+ struct kvm_memory_slot *slot;
+ unsigned long pfn, hva;
+ int pfnmap = 0;
+ int tsize = BOOK3E_PAGESZ_4K;
+ struct tlbe_priv *priv;
+
+ /*
+ * Translate guest physical to true physical, acquiring
+ * a page reference if it is normal, non-reserved memory.
+ *
+ * gfn_to_memslot() must succeed because otherwise we wouldn't
+ * have gotten this far. Eventually we should just pass the slot
+ * pointer through from the first lookup.
+ */
+ slot = gfn_to_memslot(vcpu_e500->vcpu.kvm, gfn);
+ hva = gfn_to_hva_memslot(slot, gfn);
+
+ if (tlbsel == 1) {
+ struct vm_area_struct *vma;
+ down_read(&current->mm->mmap_sem);
+
+ vma = find_vma(current->mm, hva);
+ if (vma && hva >= vma->vm_start &&
+ (vma->vm_flags & VM_PFNMAP)) {
+ /*
+ * This VMA is a physically contiguous region (e.g.
+ * /dev/mem) that bypasses normal Linux page
+ * management. Find the overlap between the
+ * vma and the memslot.
+ */
+
+ unsigned long start, end;
+ unsigned long slot_start, slot_end;
+
+ pfnmap = 1;
+
+ start = vma->vm_pgoff;
+ end = start +
+ ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT);
+
+ pfn = start + ((hva - vma->vm_start) >> PAGE_SHIFT);
+
+ slot_start = pfn - (gfn - slot->base_gfn);
+ slot_end = slot_start + slot->npages;
+
+ if (start < slot_start)
+ start = slot_start;
+ if (end > slot_end)
+ end = slot_end;
+
+ tsize = (gtlbe->mas1 & MAS1_TSIZE_MASK) >>
+ MAS1_TSIZE_SHIFT;
+
+ /*
+ * e500 doesn't implement the lowest tsize bit,
+ * or 1K pages.
+ */
+ tsize = max(BOOK3E_PAGESZ_4K, tsize & ~1);
+
+ /*
+ * Now find the largest tsize (up to what the guest
+ * requested) that will cover gfn, stay within the
+ * range, and for which gfn and pfn are mutually
+ * aligned.
+ */
+
+ for (; tsize > BOOK3E_PAGESZ_4K; tsize -= 2) {
+ unsigned long gfn_start, gfn_end, tsize_pages;
+ tsize_pages = 1 << (tsize - 2);
+
+ gfn_start = gfn & ~(tsize_pages - 1);
+ gfn_end = gfn_start + tsize_pages;
+
+ if (gfn_start + pfn - gfn < start)
+ continue;
+ if (gfn_end + pfn - gfn > end)
+ continue;
+ if ((gfn & (tsize_pages - 1)) !=
+ (pfn & (tsize_pages - 1)))
+ continue;
+
+ gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1);
+ pfn &= ~(tsize_pages - 1);
+ break;
+ }
+ }
+
+ up_read(&current->mm->mmap_sem);
+ }
+
+ if (likely(!pfnmap)) {
+ pfn = gfn_to_pfn_memslot(vcpu_e500->vcpu.kvm, slot, gfn);
+ if (is_error_pfn(pfn)) {
+ printk(KERN_ERR "Couldn't get real page for gfn %lx!\n",
+ (long)gfn);
+ kvm_release_pfn_clean(pfn);
+ return;
+ }
+ }
+
+ /* Drop old priv and setup new one. */
+ priv = &vcpu_e500->gtlb_priv[tlbsel][esel];
+ kvmppc_e500_priv_release(priv);
+ kvmppc_e500_priv_setup(priv, gtlbe, pfn);
+
+ kvmppc_e500_setup_stlbe(vcpu_e500, gtlbe, tsize, priv, gvaddr, stlbe);
}
/* XXX only map the one-one case, for now use TLB0 */
-static int kvmppc_e500_stlbe_map(struct kvmppc_vcpu_e500 *vcpu_e500,
- int tlbsel, int esel)
+static int kvmppc_e500_tlb0_map(struct kvmppc_vcpu_e500 *vcpu_e500,
+ int esel, struct tlbe *stlbe)
{
struct tlbe *gtlbe;
- gtlbe = &vcpu_e500->guest_tlb[tlbsel][esel];
+ gtlbe = &vcpu_e500->gtlb_arch[0][esel];
kvmppc_e500_shadow_map(vcpu_e500, get_tlb_eaddr(gtlbe),
get_tlb_raddr(gtlbe) >> PAGE_SHIFT,
- gtlbe, tlbsel, esel);
+ gtlbe, 0, esel, stlbe);
return esel;
}
@@ -344,53 +663,37 @@ static int kvmppc_e500_stlbe_map(struct kvmppc_vcpu_e500 *vcpu_e500,
* the shadow TLB. */
/* XXX for both one-one and one-to-many , for now use TLB1 */
static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500,
- u64 gvaddr, gfn_t gfn, struct tlbe *gtlbe)
+ u64 gvaddr, gfn_t gfn, struct tlbe *gtlbe, struct tlbe *stlbe)
{
unsigned int victim;
- victim = vcpu_e500->guest_tlb_nv[1]++;
+ victim = vcpu_e500->gtlb_nv[1]++;
- if (unlikely(vcpu_e500->guest_tlb_nv[1] >= tlb1_max_shadow_size()))
- vcpu_e500->guest_tlb_nv[1] = 0;
+ if (unlikely(vcpu_e500->gtlb_nv[1] >= tlb1_max_shadow_size()))
+ vcpu_e500->gtlb_nv[1] = 0;
- kvmppc_e500_shadow_map(vcpu_e500, gvaddr, gfn, gtlbe, 1, victim);
+ kvmppc_e500_shadow_map(vcpu_e500, gvaddr, gfn, gtlbe, 1, victim, stlbe);
return victim;
}
-/* Invalidate all guest kernel mappings when enter usermode,
- * so that when they fault back in they will get the
- * proper permission bits. */
-void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode)
+void kvmppc_mmu_msr_notify(struct kvm_vcpu *vcpu, u32 old_msr)
{
- if (usermode) {
- struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
- int i;
-
- /* XXX Replace loop with fancy data structures. */
- for (i = 0; i < tlb1_max_shadow_size(); i++)
- kvmppc_e500_stlbe_invalidate(vcpu_e500, 1, i);
+ struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
- _tlbil_all();
- }
+ /* Recalc shadow pid since MSR changes */
+ kvmppc_e500_recalc_shadow_pid(vcpu_e500);
}
-static int kvmppc_e500_gtlbe_invalidate(struct kvmppc_vcpu_e500 *vcpu_e500,
- int tlbsel, int esel)
+static inline int kvmppc_e500_gtlbe_invalidate(
+ struct kvmppc_vcpu_e500 *vcpu_e500,
+ int tlbsel, int esel)
{
- struct tlbe *gtlbe = &vcpu_e500->guest_tlb[tlbsel][esel];
+ struct tlbe *gtlbe = &vcpu_e500->gtlb_arch[tlbsel][esel];
if (unlikely(get_tlb_iprot(gtlbe)))
return -1;
- if (tlbsel == 1) {
- kvmppc_e500_tlb1_invalidate(vcpu_e500, get_tlb_eaddr(gtlbe),
- get_tlb_end(gtlbe),
- get_tlb_tid(gtlbe));
- } else {
- kvmppc_e500_stlbe_invalidate(vcpu_e500, tlbsel, esel);
- }
-
gtlbe->mas1 = 0;
return 0;
@@ -401,13 +704,14 @@ int kvmppc_e500_emul_mt_mmucsr0(struct kvmppc_vcpu_e500 *vcpu_e500, ulong value)
int esel;
if (value & MMUCSR0_TLB0FI)
- for (esel = 0; esel < vcpu_e500->guest_tlb_size[0]; esel++)
+ for (esel = 0; esel < vcpu_e500->gtlb_size[0]; esel++)
kvmppc_e500_gtlbe_invalidate(vcpu_e500, 0, esel);
if (value & MMUCSR0_TLB1FI)
- for (esel = 0; esel < vcpu_e500->guest_tlb_size[1]; esel++)
+ for (esel = 0; esel < vcpu_e500->gtlb_size[1]; esel++)
kvmppc_e500_gtlbe_invalidate(vcpu_e500, 1, esel);
- _tlbil_all();
+ /* Invalidate all vcpu id mappings */
+ kvmppc_e500_id_table_reset_all(vcpu_e500);
return EMULATE_DONE;
}
@@ -428,7 +732,7 @@ int kvmppc_e500_emul_tlbivax(struct kvm_vcpu *vcpu, int ra, int rb)
if (ia) {
/* invalidate all entries */
- for (esel = 0; esel < vcpu_e500->guest_tlb_size[tlbsel]; esel++)
+ for (esel = 0; esel < vcpu_e500->gtlb_size[tlbsel]; esel++)
kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel);
} else {
ea &= 0xfffff000;
@@ -438,7 +742,8 @@ int kvmppc_e500_emul_tlbivax(struct kvm_vcpu *vcpu, int ra, int rb)
kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel);
}
- _tlbil_all();
+ /* Invalidate all vcpu id mappings */
+ kvmppc_e500_id_table_reset_all(vcpu_e500);
return EMULATE_DONE;
}
@@ -452,9 +757,9 @@ int kvmppc_e500_emul_tlbre(struct kvm_vcpu *vcpu)
tlbsel = get_tlb_tlbsel(vcpu_e500);
esel = get_tlb_esel(vcpu_e500, tlbsel);
- gtlbe = &vcpu_e500->guest_tlb[tlbsel][esel];
+ gtlbe = &vcpu_e500->gtlb_arch[tlbsel][esel];
vcpu_e500->mas0 &= ~MAS0_NV(~0);
- vcpu_e500->mas0 |= MAS0_NV(vcpu_e500->guest_tlb_nv[tlbsel]);
+ vcpu_e500->mas0 |= MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]);
vcpu_e500->mas1 = gtlbe->mas1;
vcpu_e500->mas2 = gtlbe->mas2;
vcpu_e500->mas3 = gtlbe->mas3;
@@ -477,14 +782,14 @@ int kvmppc_e500_emul_tlbsx(struct kvm_vcpu *vcpu, int rb)
for (tlbsel = 0; tlbsel < 2; tlbsel++) {
esel = kvmppc_e500_tlb_index(vcpu_e500, ea, tlbsel, pid, as);
if (esel >= 0) {
- gtlbe = &vcpu_e500->guest_tlb[tlbsel][esel];
+ gtlbe = &vcpu_e500->gtlb_arch[tlbsel][esel];
break;
}
}
if (gtlbe) {
vcpu_e500->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(esel)
- | MAS0_NV(vcpu_e500->guest_tlb_nv[tlbsel]);
+ | MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]);
vcpu_e500->mas1 = gtlbe->mas1;
vcpu_e500->mas2 = gtlbe->mas2;
vcpu_e500->mas3 = gtlbe->mas3;
@@ -497,7 +802,7 @@ int kvmppc_e500_emul_tlbsx(struct kvm_vcpu *vcpu, int rb)
victim = (tlbsel == 0) ? tlb0_get_next_victim(vcpu_e500) : 0;
vcpu_e500->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(victim)
- | MAS0_NV(vcpu_e500->guest_tlb_nv[tlbsel]);
+ | MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]);
vcpu_e500->mas1 = (vcpu_e500->mas6 & MAS6_SPID0)
| (vcpu_e500->mas6 & (MAS6_SAS ? MAS1_TS : 0))
| (vcpu_e500->mas4 & MAS4_TSIZED(~0));
@@ -514,23 +819,16 @@ int kvmppc_e500_emul_tlbsx(struct kvm_vcpu *vcpu, int rb)
int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu)
{
struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
- u64 eaddr;
- u64 raddr;
- u32 tid;
struct tlbe *gtlbe;
- int tlbsel, esel, stlbsel, sesel;
+ int tlbsel, esel;
tlbsel = get_tlb_tlbsel(vcpu_e500);
esel = get_tlb_esel(vcpu_e500, tlbsel);
- gtlbe = &vcpu_e500->guest_tlb[tlbsel][esel];
+ gtlbe = &vcpu_e500->gtlb_arch[tlbsel][esel];
- if (get_tlb_v(gtlbe) && tlbsel == 1) {
- eaddr = get_tlb_eaddr(gtlbe);
- tid = get_tlb_tid(gtlbe);
- kvmppc_e500_tlb1_invalidate(vcpu_e500, eaddr,
- get_tlb_end(gtlbe), tid);
- }
+ if (get_tlb_v(gtlbe))
+ kvmppc_e500_stlbe_invalidate(vcpu_e500, tlbsel, esel);
gtlbe->mas1 = vcpu_e500->mas1;
gtlbe->mas2 = vcpu_e500->mas2;
@@ -542,6 +840,12 @@ int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu)
/* Invalidate shadow mappings for the about-to-be-clobbered TLBE. */
if (tlbe_is_host_safe(vcpu, gtlbe)) {
+ struct tlbe stlbe;
+ int stlbsel, sesel;
+ u64 eaddr;
+ u64 raddr;
+
+ preempt_disable();
switch (tlbsel) {
case 0:
/* TLB0 */
@@ -549,7 +853,7 @@ int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu)
gtlbe->mas1 |= MAS1_TSIZE(BOOK3E_PAGESZ_4K);
stlbsel = 0;
- sesel = kvmppc_e500_stlbe_map(vcpu_e500, 0, esel);
+ sesel = kvmppc_e500_tlb0_map(vcpu_e500, esel, &stlbe);
break;
@@ -564,13 +868,14 @@ int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu)
* are mapped on the fly. */
stlbsel = 1;
sesel = kvmppc_e500_tlb1_map(vcpu_e500, eaddr,
- raddr >> PAGE_SHIFT, gtlbe);
+ raddr >> PAGE_SHIFT, gtlbe, &stlbe);
break;
default:
BUG();
}
- write_host_tlbe(vcpu_e500, stlbsel, sesel);
+ write_host_tlbe(vcpu_e500, stlbsel, sesel, &stlbe);
+ preempt_enable();
}
kvmppc_set_exit_type(vcpu, EMULATED_TLBWE_EXITS);
@@ -610,7 +915,7 @@ gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int index,
{
struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
struct tlbe *gtlbe =
- &vcpu_e500->guest_tlb[tlbsel_of(index)][esel_of(index)];
+ &vcpu_e500->gtlb_arch[tlbsel_of(index)][esel_of(index)];
u64 pgmask = get_tlb_bytes(gtlbe) - 1;
return get_tlb_raddr(gtlbe) | (eaddr & pgmask);
@@ -618,38 +923,37 @@ gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int index,
void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
{
- struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
- int tlbsel, i;
-
- for (tlbsel = 0; tlbsel < 2; tlbsel++)
- for (i = 0; i < vcpu_e500->guest_tlb_size[tlbsel]; i++)
- kvmppc_e500_shadow_release(vcpu_e500, tlbsel, i);
-
- /* discard all guest mapping */
- _tlbil_all();
}
void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr,
unsigned int index)
{
struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
+ struct tlbe_priv *priv;
+ struct tlbe *gtlbe, stlbe;
int tlbsel = tlbsel_of(index);
int esel = esel_of(index);
int stlbsel, sesel;
+ gtlbe = &vcpu_e500->gtlb_arch[tlbsel][esel];
+
+ preempt_disable();
switch (tlbsel) {
case 0:
stlbsel = 0;
sesel = esel;
+ priv = &vcpu_e500->gtlb_priv[stlbsel][sesel];
+
+ kvmppc_e500_setup_stlbe(vcpu_e500, gtlbe, BOOK3E_PAGESZ_4K,
+ priv, eaddr, &stlbe);
break;
case 1: {
gfn_t gfn = gpaddr >> PAGE_SHIFT;
- struct tlbe *gtlbe
- = &vcpu_e500->guest_tlb[tlbsel][esel];
stlbsel = 1;
- sesel = kvmppc_e500_tlb1_map(vcpu_e500, eaddr, gfn, gtlbe);
+ sesel = kvmppc_e500_tlb1_map(vcpu_e500, eaddr, gfn,
+ gtlbe, &stlbe);
break;
}
@@ -657,7 +961,9 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr,
BUG();
break;
}
- write_host_tlbe(vcpu_e500, stlbsel, sesel);
+
+ write_host_tlbe(vcpu_e500, stlbsel, sesel, &stlbe);
+ preempt_enable();
}
int kvmppc_e500_tlb_search(struct kvm_vcpu *vcpu,
@@ -679,8 +985,10 @@ void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid)
{
struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
- vcpu_e500->pid[0] = vcpu->arch.shadow_pid =
- vcpu->arch.pid = pid;
+ if (vcpu->arch.pid != pid) {
+ vcpu_e500->pid[0] = vcpu->arch.pid = pid;
+ kvmppc_e500_recalc_shadow_pid(vcpu_e500);
+ }
}
void kvmppc_e500_tlb_setup(struct kvmppc_vcpu_e500 *vcpu_e500)
@@ -688,14 +996,14 @@ void kvmppc_e500_tlb_setup(struct kvmppc_vcpu_e500 *vcpu_e500)
struct tlbe *tlbe;
/* Insert large initial mapping for guest. */
- tlbe = &vcpu_e500->guest_tlb[1][0];
+ tlbe = &vcpu_e500->gtlb_arch[1][0];
tlbe->mas1 = MAS1_VALID | MAS1_TSIZE(BOOK3E_PAGESZ_256M);
tlbe->mas2 = 0;
tlbe->mas3 = E500_TLB_SUPER_PERM_MASK;
tlbe->mas7 = 0;
/* 4K map for serial output. Used by kernel wrapper. */
- tlbe = &vcpu_e500->guest_tlb[1][1];
+ tlbe = &vcpu_e500->gtlb_arch[1][1];
tlbe->mas1 = MAS1_VALID | MAS1_TSIZE(BOOK3E_PAGESZ_4K);
tlbe->mas2 = (0xe0004500 & 0xFFFFF000) | MAS2_I | MAS2_G;
tlbe->mas3 = (0xe0004500 & 0xFFFFF000) | E500_TLB_SUPER_PERM_MASK;
@@ -706,68 +1014,64 @@ int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *vcpu_e500)
{
tlb1_entry_num = mfspr(SPRN_TLB1CFG) & 0xFFF;
- vcpu_e500->guest_tlb_size[0] = KVM_E500_TLB0_SIZE;
- vcpu_e500->guest_tlb[0] =
+ vcpu_e500->gtlb_size[0] = KVM_E500_TLB0_SIZE;
+ vcpu_e500->gtlb_arch[0] =
kzalloc(sizeof(struct tlbe) * KVM_E500_TLB0_SIZE, GFP_KERNEL);
- if (vcpu_e500->guest_tlb[0] == NULL)
+ if (vcpu_e500->gtlb_arch[0] == NULL)
goto err_out;
- vcpu_e500->shadow_tlb_size[0] = KVM_E500_TLB0_SIZE;
- vcpu_e500->shadow_tlb[0] =
- kzalloc(sizeof(struct tlbe) * KVM_E500_TLB0_SIZE, GFP_KERNEL);
- if (vcpu_e500->shadow_tlb[0] == NULL)
- goto err_out_guest0;
-
- vcpu_e500->guest_tlb_size[1] = KVM_E500_TLB1_SIZE;
- vcpu_e500->guest_tlb[1] =
+ vcpu_e500->gtlb_size[1] = KVM_E500_TLB1_SIZE;
+ vcpu_e500->gtlb_arch[1] =
kzalloc(sizeof(struct tlbe) * KVM_E500_TLB1_SIZE, GFP_KERNEL);
- if (vcpu_e500->guest_tlb[1] == NULL)
- goto err_out_shadow0;
+ if (vcpu_e500->gtlb_arch[1] == NULL)
+ goto err_out_guest0;
- vcpu_e500->shadow_tlb_size[1] = tlb1_entry_num;
- vcpu_e500->shadow_tlb[1] =
- kzalloc(sizeof(struct tlbe) * tlb1_entry_num, GFP_KERNEL);
- if (vcpu_e500->shadow_tlb[1] == NULL)
+ vcpu_e500->gtlb_priv[0] = (struct tlbe_priv *)
+ kzalloc(sizeof(struct tlbe_priv) * KVM_E500_TLB0_SIZE, GFP_KERNEL);
+ if (vcpu_e500->gtlb_priv[0] == NULL)
goto err_out_guest1;
+ vcpu_e500->gtlb_priv[1] = (struct tlbe_priv *)
+ kzalloc(sizeof(struct tlbe_priv) * KVM_E500_TLB1_SIZE, GFP_KERNEL);
- vcpu_e500->shadow_pages[0] = (struct page **)
- kzalloc(sizeof(struct page *) * KVM_E500_TLB0_SIZE, GFP_KERNEL);
- if (vcpu_e500->shadow_pages[0] == NULL)
- goto err_out_shadow1;
+ if (vcpu_e500->gtlb_priv[1] == NULL)
+ goto err_out_priv0;
- vcpu_e500->shadow_pages[1] = (struct page **)
- kzalloc(sizeof(struct page *) * tlb1_entry_num, GFP_KERNEL);
- if (vcpu_e500->shadow_pages[1] == NULL)
- goto err_out_page0;
+ if (kvmppc_e500_id_table_alloc(vcpu_e500) == NULL)
+ goto err_out_priv1;
/* Init TLB configuration register */
vcpu_e500->tlb0cfg = mfspr(SPRN_TLB0CFG) & ~0xfffUL;
- vcpu_e500->tlb0cfg |= vcpu_e500->guest_tlb_size[0];
+ vcpu_e500->tlb0cfg |= vcpu_e500->gtlb_size[0];
vcpu_e500->tlb1cfg = mfspr(SPRN_TLB1CFG) & ~0xfffUL;
- vcpu_e500->tlb1cfg |= vcpu_e500->guest_tlb_size[1];
+ vcpu_e500->tlb1cfg |= vcpu_e500->gtlb_size[1];
return 0;
-err_out_page0:
- kfree(vcpu_e500->shadow_pages[0]);
-err_out_shadow1:
- kfree(vcpu_e500->shadow_tlb[1]);
+err_out_priv1:
+ kfree(vcpu_e500->gtlb_priv[1]);
+err_out_priv0:
+ kfree(vcpu_e500->gtlb_priv[0]);
err_out_guest1:
- kfree(vcpu_e500->guest_tlb[1]);
-err_out_shadow0:
- kfree(vcpu_e500->shadow_tlb[0]);
+ kfree(vcpu_e500->gtlb_arch[1]);
err_out_guest0:
- kfree(vcpu_e500->guest_tlb[0]);
+ kfree(vcpu_e500->gtlb_arch[0]);
err_out:
return -1;
}
void kvmppc_e500_tlb_uninit(struct kvmppc_vcpu_e500 *vcpu_e500)
{
- kfree(vcpu_e500->shadow_pages[1]);
- kfree(vcpu_e500->shadow_pages[0]);
- kfree(vcpu_e500->shadow_tlb[1]);
- kfree(vcpu_e500->guest_tlb[1]);
- kfree(vcpu_e500->shadow_tlb[0]);
- kfree(vcpu_e500->guest_tlb[0]);
+ int stlbsel, i;
+
+ /* release all privs */
+ for (stlbsel = 0; stlbsel < 2; stlbsel++)
+ for (i = 0; i < vcpu_e500->gtlb_size[stlbsel]; i++) {
+ struct tlbe_priv *priv =
+ &vcpu_e500->gtlb_priv[stlbsel][i];
+ kvmppc_e500_priv_release(priv);
+ }
+
+ kvmppc_e500_id_table_free(vcpu_e500);
+ kfree(vcpu_e500->gtlb_arch[1]);
+ kfree(vcpu_e500->gtlb_arch[0]);
}
diff --git a/arch/powerpc/kvm/e500_tlb.h b/arch/powerpc/kvm/e500_tlb.h
index 458946b4775d..59b88e99a235 100644
--- a/arch/powerpc/kvm/e500_tlb.h
+++ b/arch/powerpc/kvm/e500_tlb.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2008 Freescale Semiconductor, Inc. All rights reserved.
+ * Copyright (C) 2008-2011 Freescale Semiconductor, Inc. All rights reserved.
*
* Author: Yu Liu, yu.liu@freescale.com
*
@@ -55,6 +55,7 @@ extern void kvmppc_e500_tlb_load(struct kvm_vcpu *, int);
extern int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *);
extern void kvmppc_e500_tlb_uninit(struct kvmppc_vcpu_e500 *);
extern void kvmppc_e500_tlb_setup(struct kvmppc_vcpu_e500 *);
+extern void kvmppc_e500_recalc_shadow_pid(struct kvmppc_vcpu_e500 *);
/* TLB helper functions */
static inline unsigned int get_tlb_size(const struct tlbe *tlbe)
@@ -110,6 +111,16 @@ static inline unsigned int get_cur_pid(struct kvm_vcpu *vcpu)
return vcpu->arch.pid & 0xff;
}
+static inline unsigned int get_cur_as(struct kvm_vcpu *vcpu)
+{
+ return !!(vcpu->arch.shared->msr & (MSR_IS | MSR_DS));
+}
+
+static inline unsigned int get_cur_pr(struct kvm_vcpu *vcpu)
+{
+ return !!(vcpu->arch.shared->msr & MSR_PR);
+}
+
static inline unsigned int get_cur_spid(
const struct kvmppc_vcpu_e500 *vcpu_e500)
{
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 616dd516ca1f..a107c9be0fb1 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -30,6 +30,7 @@
#include <asm/uaccess.h>
#include <asm/kvm_ppc.h>
#include <asm/tlbflush.h>
+#include <asm/cputhreads.h>
#include "timing.h"
#include "../mm/mmu_decl.h"
@@ -38,8 +39,12 @@
int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
{
+#ifndef CONFIG_KVM_BOOK3S_64_HV
return !(v->arch.shared->msr & MSR_WE) ||
!!(v->arch.pending_exceptions);
+#else
+ return !(v->arch.ceded) || !!(v->arch.pending_exceptions);
+#endif
}
int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
@@ -73,7 +78,8 @@ int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
}
case HC_VENDOR_KVM | KVM_HC_FEATURES:
r = HC_EV_SUCCESS;
-#if defined(CONFIG_PPC_BOOK3S) /* XXX Missing magic page on BookE */
+#if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500)
+ /* XXX Missing magic page on 44x */
r2 |= (1 << KVM_FEATURE_MAGIC_PAGE);
#endif
@@ -147,7 +153,7 @@ void kvm_arch_check_processor_compat(void *rtn)
int kvm_arch_init_vm(struct kvm *kvm)
{
- return 0;
+ return kvmppc_core_init_vm(kvm);
}
void kvm_arch_destroy_vm(struct kvm *kvm)
@@ -163,6 +169,9 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
kvm->vcpus[i] = NULL;
atomic_set(&kvm->online_vcpus, 0);
+
+ kvmppc_core_destroy_vm(kvm);
+
mutex_unlock(&kvm->lock);
}
@@ -180,10 +189,13 @@ int kvm_dev_ioctl_check_extension(long ext)
#else
case KVM_CAP_PPC_SEGSTATE:
#endif
- case KVM_CAP_PPC_PAIRED_SINGLES:
case KVM_CAP_PPC_UNSET_IRQ:
case KVM_CAP_PPC_IRQ_LEVEL:
case KVM_CAP_ENABLE_CAP:
+ r = 1;
+ break;
+#ifndef CONFIG_KVM_BOOK3S_64_HV
+ case KVM_CAP_PPC_PAIRED_SINGLES:
case KVM_CAP_PPC_OSI:
case KVM_CAP_PPC_GET_PVINFO:
r = 1;
@@ -191,6 +203,21 @@ int kvm_dev_ioctl_check_extension(long ext)
case KVM_CAP_COALESCED_MMIO:
r = KVM_COALESCED_MMIO_PAGE_OFFSET;
break;
+#endif
+#ifdef CONFIG_KVM_BOOK3S_64_HV
+ case KVM_CAP_SPAPR_TCE:
+ r = 1;
+ break;
+ case KVM_CAP_PPC_SMT:
+ r = threads_per_core;
+ break;
+ case KVM_CAP_PPC_RMA:
+ r = 1;
+ /* PPC970 requires an RMA */
+ if (cpu_has_feature(CPU_FTR_ARCH_201))
+ r = 2;
+ break;
+#endif
default:
r = 0;
break;
@@ -211,7 +238,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem,
int user_alloc)
{
- return 0;
+ return kvmppc_core_prepare_memory_region(kvm, mem);
}
void kvm_arch_commit_memory_region(struct kvm *kvm,
@@ -219,7 +246,7 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
struct kvm_memory_slot old,
int user_alloc)
{
- return;
+ kvmppc_core_commit_memory_region(kvm, mem);
}
@@ -287,6 +314,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
tasklet_init(&vcpu->arch.tasklet, kvmppc_decrementer_func, (ulong)vcpu);
vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup;
+ vcpu->arch.dec_expires = ~(u64)0;
#ifdef CONFIG_KVM_EXIT_TIMING
mutex_init(&vcpu->arch.exit_timing_lock);
@@ -313,6 +341,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
mtspr(SPRN_VRSAVE, vcpu->arch.vrsave);
#endif
kvmppc_core_vcpu_load(vcpu, cpu);
+ vcpu->cpu = smp_processor_id();
}
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
@@ -321,6 +350,7 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
#ifdef CONFIG_BOOKE
vcpu->arch.vrsave = mfspr(SPRN_VRSAVE);
#endif
+ vcpu->cpu = -1;
}
int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
@@ -492,15 +522,18 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
for (i = 0; i < 32; i++)
kvmppc_set_gpr(vcpu, i, gprs[i]);
vcpu->arch.osi_needed = 0;
+ } else if (vcpu->arch.hcall_needed) {
+ int i;
+
+ kvmppc_set_gpr(vcpu, 3, run->papr_hcall.ret);
+ for (i = 0; i < 9; ++i)
+ kvmppc_set_gpr(vcpu, 4 + i, run->papr_hcall.args[i]);
+ vcpu->arch.hcall_needed = 0;
}
kvmppc_core_deliver_interrupts(vcpu);
- local_irq_disable();
- kvm_guest_enter();
- r = __kvmppc_vcpu_run(run, vcpu);
- kvm_guest_exit();
- local_irq_enable();
+ r = kvmppc_vcpu_run(run, vcpu);
if (vcpu->sigset_active)
sigprocmask(SIG_SETMASK, &sigsaved, NULL);
@@ -518,6 +551,8 @@ int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
if (waitqueue_active(&vcpu->wq)) {
wake_up_interruptible(&vcpu->wq);
vcpu->stat.halt_wakeup++;
+ } else if (vcpu->cpu != -1) {
+ smp_send_reschedule(vcpu->cpu);
}
return 0;
@@ -633,6 +668,29 @@ long kvm_arch_vm_ioctl(struct file *filp,
break;
}
+#ifdef CONFIG_KVM_BOOK3S_64_HV
+ case KVM_CREATE_SPAPR_TCE: {
+ struct kvm_create_spapr_tce create_tce;
+ struct kvm *kvm = filp->private_data;
+
+ r = -EFAULT;
+ if (copy_from_user(&create_tce, argp, sizeof(create_tce)))
+ goto out;
+ r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce);
+ goto out;
+ }
+
+ case KVM_ALLOCATE_RMA: {
+ struct kvm *kvm = filp->private_data;
+ struct kvm_allocate_rma rma;
+
+ r = kvm_vm_ioctl_allocate_rma(kvm, &rma);
+ if (r >= 0 && copy_to_user(argp, &rma, sizeof(rma)))
+ r = -EFAULT;
+ break;
+ }
+#endif /* CONFIG_KVM_BOOK3S_64_HV */
+
default:
r = -ENOTTY;
}
diff --git a/arch/powerpc/kvm/timing.c b/arch/powerpc/kvm/timing.c
index 319177df9587..07b6110a4bb7 100644
--- a/arch/powerpc/kvm/timing.c
+++ b/arch/powerpc/kvm/timing.c
@@ -56,15 +56,6 @@ static void add_exit_timing(struct kvm_vcpu *vcpu, u64 duration, int type)
{
u64 old;
- do_div(duration, tb_ticks_per_usec);
- if (unlikely(duration > 0xFFFFFFFF)) {
- printk(KERN_ERR"%s - duration too big -> overflow"
- " duration %lld type %d exit #%d\n",
- __func__, duration, type,
- vcpu->arch.timing_count_type[type]);
- return;
- }
-
mutex_lock(&vcpu->arch.exit_timing_lock);
vcpu->arch.timing_count_type[type]++;
diff --git a/arch/powerpc/kvm/trace.h b/arch/powerpc/kvm/trace.h
index 3aca1b042b8c..b135d3d397db 100644
--- a/arch/powerpc/kvm/trace.h
+++ b/arch/powerpc/kvm/trace.h
@@ -103,7 +103,7 @@ TRACE_EVENT(kvm_gtlb_write,
* Book3S trace points *
*************************************************************************/
-#ifdef CONFIG_PPC_BOOK3S
+#ifdef CONFIG_KVM_BOOK3S_PR
TRACE_EVENT(kvm_book3s_exit,
TP_PROTO(unsigned int exit_nr, struct kvm_vcpu *vcpu),
@@ -252,7 +252,7 @@ TRACE_EVENT(kvm_book3s_mmu_flush,
),
TP_fast_assign(
- __entry->count = vcpu->arch.hpte_cache_count;
+ __entry->count = to_book3s(vcpu)->hpte_cache_count;
__entry->p1 = p1;
__entry->p2 = p2;
__entry->type = type;
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index ad35f66c69e8..5efe8c96d37f 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -174,7 +174,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
die("Weird page fault", regs, SIGSEGV);
}
- perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
/* When running in the kernel we expect faults to occur only to
* addresses in user space. All other faults represent errors in the
@@ -320,7 +320,7 @@ good_area:
}
if (ret & VM_FAULT_MAJOR) {
current->maj_flt++;
- perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
regs, address);
#ifdef CONFIG_PPC_SMLPAR
if (firmware_has_feature(FW_FEATURE_CMO)) {
@@ -331,7 +331,7 @@ good_area:
#endif
} else {
current->min_flt++;
- perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
regs, address);
}
up_read(&mm->mmap_sem);
diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c
index dfd764896db0..90039bc64119 100644
--- a/arch/powerpc/mm/hash_native_64.c
+++ b/arch/powerpc/mm/hash_native_64.c
@@ -37,7 +37,7 @@
#define HPTE_LOCK_BIT 3
-static DEFINE_RAW_SPINLOCK(native_tlbie_lock);
+DEFINE_RAW_SPINLOCK(native_tlbie_lock);
static inline void __tlbie(unsigned long va, int psize, int ssize)
{
@@ -51,7 +51,7 @@ static inline void __tlbie(unsigned long va, int psize, int ssize)
va &= ~0xffful;
va |= ssize << 8;
asm volatile(ASM_FTR_IFCLR("tlbie %0,0", PPC_TLBIE(%1,%0), %2)
- : : "r" (va), "r"(0), "i" (CPU_FTR_HVMODE_206)
+ : : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206)
: "memory");
break;
default:
@@ -61,7 +61,7 @@ static inline void __tlbie(unsigned long va, int psize, int ssize)
va |= ssize << 8;
va |= 1; /* L */
asm volatile(ASM_FTR_IFCLR("tlbie %0,1", PPC_TLBIE(%1,%0), %2)
- : : "r" (va), "r"(0), "i" (CPU_FTR_HVMODE_206)
+ : : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206)
: "memory");
break;
}
diff --git a/arch/powerpc/net/Makefile b/arch/powerpc/net/Makefile
new file mode 100644
index 000000000000..266b3950c3ac
--- /dev/null
+++ b/arch/powerpc/net/Makefile
@@ -0,0 +1,4 @@
+#
+# Arch-specific network modules
+#
+obj-$(CONFIG_BPF_JIT) += bpf_jit_64.o bpf_jit_comp.o
diff --git a/arch/powerpc/net/bpf_jit.h b/arch/powerpc/net/bpf_jit.h
new file mode 100644
index 000000000000..af1ab5e9a691
--- /dev/null
+++ b/arch/powerpc/net/bpf_jit.h
@@ -0,0 +1,227 @@
+/* bpf_jit.h: BPF JIT compiler for PPC64
+ *
+ * Copyright 2011 Matt Evans <matt@ozlabs.org>, IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#ifndef _BPF_JIT_H
+#define _BPF_JIT_H
+
+#define BPF_PPC_STACK_LOCALS 32
+#define BPF_PPC_STACK_BASIC (48+64)
+#define BPF_PPC_STACK_SAVE (18*8)
+#define BPF_PPC_STACKFRAME (BPF_PPC_STACK_BASIC+BPF_PPC_STACK_LOCALS+ \
+ BPF_PPC_STACK_SAVE)
+#define BPF_PPC_SLOWPATH_FRAME (48+64)
+
+/*
+ * Generated code register usage:
+ *
+ * As normal PPC C ABI (e.g. r1=sp, r2=TOC), with:
+ *
+ * skb r3 (Entry parameter)
+ * A register r4
+ * X register r5
+ * addr param r6
+ * r7-r10 scratch
+ * skb->data r14
+ * skb headlen r15 (skb->len - skb->data_len)
+ * m[0] r16
+ * m[...] ...
+ * m[15] r31
+ */
+#define r_skb 3
+#define r_ret 3
+#define r_A 4
+#define r_X 5
+#define r_addr 6
+#define r_scratch1 7
+#define r_D 14
+#define r_HL 15
+#define r_M 16
+
+#ifndef __ASSEMBLY__
+
+/*
+ * Assembly helpers from arch/powerpc/net/bpf_jit.S:
+ */
+extern u8 sk_load_word[], sk_load_half[], sk_load_byte[], sk_load_byte_msh[];
+
+#define FUNCTION_DESCR_SIZE 24
+
+/*
+ * 16-bit immediate helper macros: HA() is for use with sign-extending instrs
+ * (e.g. LD, ADDI). If the bottom 16 bits is "-ve", add another bit into the
+ * top half to negate the effect (i.e. 0xffff + 1 = 0x(1)0000).
+ */
+#define IMM_H(i) ((uintptr_t)(i)>>16)
+#define IMM_HA(i) (((uintptr_t)(i)>>16) + \
+ (((uintptr_t)(i) & 0x8000) >> 15))
+#define IMM_L(i) ((uintptr_t)(i) & 0xffff)
+
+#define PLANT_INSTR(d, idx, instr) \
+ do { if (d) { (d)[idx] = instr; } idx++; } while (0)
+#define EMIT(instr) PLANT_INSTR(image, ctx->idx, instr)
+
+#define PPC_NOP() EMIT(PPC_INST_NOP)
+#define PPC_BLR() EMIT(PPC_INST_BLR)
+#define PPC_BLRL() EMIT(PPC_INST_BLRL)
+#define PPC_MTLR(r) EMIT(PPC_INST_MTLR | __PPC_RT(r))
+#define PPC_ADDI(d, a, i) EMIT(PPC_INST_ADDI | __PPC_RT(d) | \
+ __PPC_RA(a) | IMM_L(i))
+#define PPC_MR(d, a) PPC_OR(d, a, a)
+#define PPC_LI(r, i) PPC_ADDI(r, 0, i)
+#define PPC_ADDIS(d, a, i) EMIT(PPC_INST_ADDIS | \
+ __PPC_RS(d) | __PPC_RA(a) | IMM_L(i))
+#define PPC_LIS(r, i) PPC_ADDIS(r, 0, i)
+#define PPC_STD(r, base, i) EMIT(PPC_INST_STD | __PPC_RS(r) | \
+ __PPC_RA(base) | ((i) & 0xfffc))
+
+#define PPC_LD(r, base, i) EMIT(PPC_INST_LD | __PPC_RT(r) | \
+ __PPC_RA(base) | IMM_L(i))
+#define PPC_LWZ(r, base, i) EMIT(PPC_INST_LWZ | __PPC_RT(r) | \
+ __PPC_RA(base) | IMM_L(i))
+#define PPC_LHZ(r, base, i) EMIT(PPC_INST_LHZ | __PPC_RT(r) | \
+ __PPC_RA(base) | IMM_L(i))
+/* Convenience helpers for the above with 'far' offsets: */
+#define PPC_LD_OFFS(r, base, i) do { if ((i) < 32768) PPC_LD(r, base, i); \
+ else { PPC_ADDIS(r, base, IMM_HA(i)); \
+ PPC_LD(r, r, IMM_L(i)); } } while(0)
+
+#define PPC_LWZ_OFFS(r, base, i) do { if ((i) < 32768) PPC_LWZ(r, base, i); \
+ else { PPC_ADDIS(r, base, IMM_HA(i)); \
+ PPC_LWZ(r, r, IMM_L(i)); } } while(0)
+
+#define PPC_LHZ_OFFS(r, base, i) do { if ((i) < 32768) PPC_LHZ(r, base, i); \
+ else { PPC_ADDIS(r, base, IMM_HA(i)); \
+ PPC_LHZ(r, r, IMM_L(i)); } } while(0)
+
+#define PPC_CMPWI(a, i) EMIT(PPC_INST_CMPWI | __PPC_RA(a) | IMM_L(i))
+#define PPC_CMPDI(a, i) EMIT(PPC_INST_CMPDI | __PPC_RA(a) | IMM_L(i))
+#define PPC_CMPLWI(a, i) EMIT(PPC_INST_CMPLWI | __PPC_RA(a) | IMM_L(i))
+#define PPC_CMPLW(a, b) EMIT(PPC_INST_CMPLW | __PPC_RA(a) | __PPC_RB(b))
+
+#define PPC_SUB(d, a, b) EMIT(PPC_INST_SUB | __PPC_RT(d) | \
+ __PPC_RB(a) | __PPC_RA(b))
+#define PPC_ADD(d, a, b) EMIT(PPC_INST_ADD | __PPC_RT(d) | \
+ __PPC_RA(a) | __PPC_RB(b))
+#define PPC_MUL(d, a, b) EMIT(PPC_INST_MULLW | __PPC_RT(d) | \
+ __PPC_RA(a) | __PPC_RB(b))
+#define PPC_MULHWU(d, a, b) EMIT(PPC_INST_MULHWU | __PPC_RT(d) | \
+ __PPC_RA(a) | __PPC_RB(b))
+#define PPC_MULI(d, a, i) EMIT(PPC_INST_MULLI | __PPC_RT(d) | \
+ __PPC_RA(a) | IMM_L(i))
+#define PPC_DIVWU(d, a, b) EMIT(PPC_INST_DIVWU | __PPC_RT(d) | \
+ __PPC_RA(a) | __PPC_RB(b))
+#define PPC_AND(d, a, b) EMIT(PPC_INST_AND | __PPC_RA(d) | \
+ __PPC_RS(a) | __PPC_RB(b))
+#define PPC_ANDI(d, a, i) EMIT(PPC_INST_ANDI | __PPC_RA(d) | \
+ __PPC_RS(a) | IMM_L(i))
+#define PPC_AND_DOT(d, a, b) EMIT(PPC_INST_ANDDOT | __PPC_RA(d) | \
+ __PPC_RS(a) | __PPC_RB(b))
+#define PPC_OR(d, a, b) EMIT(PPC_INST_OR | __PPC_RA(d) | \
+ __PPC_RS(a) | __PPC_RB(b))
+#define PPC_ORI(d, a, i) EMIT(PPC_INST_ORI | __PPC_RA(d) | \
+ __PPC_RS(a) | IMM_L(i))
+#define PPC_ORIS(d, a, i) EMIT(PPC_INST_ORIS | __PPC_RA(d) | \
+ __PPC_RS(a) | IMM_L(i))
+#define PPC_SLW(d, a, s) EMIT(PPC_INST_SLW | __PPC_RA(d) | \
+ __PPC_RS(a) | __PPC_RB(s))
+#define PPC_SRW(d, a, s) EMIT(PPC_INST_SRW | __PPC_RA(d) | \
+ __PPC_RS(a) | __PPC_RB(s))
+/* slwi = rlwinm Rx, Ry, n, 0, 31-n */
+#define PPC_SLWI(d, a, i) EMIT(PPC_INST_RLWINM | __PPC_RA(d) | \
+ __PPC_RS(a) | __PPC_SH(i) | \
+ __PPC_MB(0) | __PPC_ME(31-(i)))
+/* srwi = rlwinm Rx, Ry, 32-n, n, 31 */
+#define PPC_SRWI(d, a, i) EMIT(PPC_INST_RLWINM | __PPC_RA(d) | \
+ __PPC_RS(a) | __PPC_SH(32-(i)) | \
+ __PPC_MB(i) | __PPC_ME(31))
+/* sldi = rldicr Rx, Ry, n, 63-n */
+#define PPC_SLDI(d, a, i) EMIT(PPC_INST_RLDICR | __PPC_RA(d) | \
+ __PPC_RS(a) | __PPC_SH(i) | \
+ __PPC_MB(63-(i)) | (((i) & 0x20) >> 4))
+#define PPC_NEG(d, a) EMIT(PPC_INST_NEG | __PPC_RT(d) | __PPC_RA(a))
+
+/* Long jump; (unconditional 'branch') */
+#define PPC_JMP(dest) EMIT(PPC_INST_BRANCH | \
+ (((dest) - (ctx->idx * 4)) & 0x03fffffc))
+/* "cond" here covers BO:BI fields. */
+#define PPC_BCC_SHORT(cond, dest) EMIT(PPC_INST_BRANCH_COND | \
+ (((cond) & 0x3ff) << 16) | \
+ (((dest) - (ctx->idx * 4)) & \
+ 0xfffc))
+#define PPC_LI32(d, i) do { PPC_LI(d, IMM_L(i)); \
+ if ((u32)(uintptr_t)(i) >= 32768) { \
+ PPC_ADDIS(d, d, IMM_HA(i)); \
+ } } while(0)
+#define PPC_LI64(d, i) do { \
+ if (!((uintptr_t)(i) & 0xffffffff00000000ULL)) \
+ PPC_LI32(d, i); \
+ else { \
+ PPC_LIS(d, ((uintptr_t)(i) >> 48)); \
+ if ((uintptr_t)(i) & 0x0000ffff00000000ULL) \
+ PPC_ORI(d, d, \
+ ((uintptr_t)(i) >> 32) & 0xffff); \
+ PPC_SLDI(d, d, 32); \
+ if ((uintptr_t)(i) & 0x00000000ffff0000ULL) \
+ PPC_ORIS(d, d, \
+ ((uintptr_t)(i) >> 16) & 0xffff); \
+ if ((uintptr_t)(i) & 0x000000000000ffffULL) \
+ PPC_ORI(d, d, (uintptr_t)(i) & 0xffff); \
+ } } while (0);
+
+static inline bool is_nearbranch(int offset)
+{
+ return (offset < 32768) && (offset >= -32768);
+}
+
+/*
+ * The fly in the ointment of code size changing from pass to pass is
+ * avoided by padding the short branch case with a NOP. If code size differs
+ * with different branch reaches we will have the issue of code moving from
+ * one pass to the next and will need a few passes to converge on a stable
+ * state.
+ */
+#define PPC_BCC(cond, dest) do { \
+ if (is_nearbranch((dest) - (ctx->idx * 4))) { \
+ PPC_BCC_SHORT(cond, dest); \
+ PPC_NOP(); \
+ } else { \
+ /* Flip the 'T or F' bit to invert comparison */ \
+ PPC_BCC_SHORT(cond ^ COND_CMP_TRUE, (ctx->idx+2)*4); \
+ PPC_JMP(dest); \
+ } } while(0)
+
+/* To create a branch condition, select a bit of cr0... */
+#define CR0_LT 0
+#define CR0_GT 1
+#define CR0_EQ 2
+/* ...and modify BO[3] */
+#define COND_CMP_TRUE 0x100
+#define COND_CMP_FALSE 0x000
+/* Together, they make all required comparisons: */
+#define COND_GT (CR0_GT | COND_CMP_TRUE)
+#define COND_GE (CR0_LT | COND_CMP_FALSE)
+#define COND_EQ (CR0_EQ | COND_CMP_TRUE)
+#define COND_NE (CR0_EQ | COND_CMP_FALSE)
+#define COND_LT (CR0_LT | COND_CMP_TRUE)
+
+#define SEEN_DATAREF 0x10000 /* might call external helpers */
+#define SEEN_XREG 0x20000 /* X reg is used */
+#define SEEN_MEM 0x40000 /* SEEN_MEM+(1<<n) = use mem[n] for temporary
+ * storage */
+#define SEEN_MEM_MSK 0x0ffff
+
+struct codegen_context {
+ unsigned int seen;
+ unsigned int idx;
+ int pc_ret0; /* bpf index of first RET #0 instruction (if any) */
+};
+
+#endif
+
+#endif
diff --git a/arch/powerpc/net/bpf_jit_64.S b/arch/powerpc/net/bpf_jit_64.S
new file mode 100644
index 000000000000..ff4506e85cce
--- /dev/null
+++ b/arch/powerpc/net/bpf_jit_64.S
@@ -0,0 +1,138 @@
+/* bpf_jit.S: Packet/header access helper functions
+ * for PPC64 BPF compiler.
+ *
+ * Copyright 2011 Matt Evans <matt@ozlabs.org>, IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <asm/ppc_asm.h>
+#include "bpf_jit.h"
+
+/*
+ * All of these routines are called directly from generated code,
+ * whose register usage is:
+ *
+ * r3 skb
+ * r4,r5 A,X
+ * r6 *** address parameter to helper ***
+ * r7-r10 scratch
+ * r14 skb->data
+ * r15 skb headlen
+ * r16-31 M[]
+ */
+
+/*
+ * To consider: These helpers are so small it could be better to just
+ * generate them inline. Inline code can do the simple headlen check
+ * then branch directly to slow_path_XXX if required. (In fact, could
+ * load a spare GPR with the address of slow_path_generic and pass size
+ * as an argument, making the call site a mtlr, li and bllr.)
+ *
+ * Technically, the "is addr < 0" check is unnecessary & slowing down
+ * the ABS path, as it's statically checked on generation.
+ */
+ .globl sk_load_word
+sk_load_word:
+ cmpdi r_addr, 0
+ blt bpf_error
+ /* Are we accessing past headlen? */
+ subi r_scratch1, r_HL, 4
+ cmpd r_scratch1, r_addr
+ blt bpf_slow_path_word
+ /* Nope, just hitting the header. cr0 here is eq or gt! */
+ lwzx r_A, r_D, r_addr
+ /* When big endian we don't need to byteswap. */
+ blr /* Return success, cr0 != LT */
+
+ .globl sk_load_half
+sk_load_half:
+ cmpdi r_addr, 0
+ blt bpf_error
+ subi r_scratch1, r_HL, 2
+ cmpd r_scratch1, r_addr
+ blt bpf_slow_path_half
+ lhzx r_A, r_D, r_addr
+ blr
+
+ .globl sk_load_byte
+sk_load_byte:
+ cmpdi r_addr, 0
+ blt bpf_error
+ cmpd r_HL, r_addr
+ ble bpf_slow_path_byte
+ lbzx r_A, r_D, r_addr
+ blr
+
+/*
+ * BPF_S_LDX_B_MSH: ldxb 4*([offset]&0xf)
+ * r_addr is the offset value, already known positive
+ */
+ .globl sk_load_byte_msh
+sk_load_byte_msh:
+ cmpd r_HL, r_addr
+ ble bpf_slow_path_byte_msh
+ lbzx r_X, r_D, r_addr
+ rlwinm r_X, r_X, 2, 32-4-2, 31-2
+ blr
+
+bpf_error:
+ /* Entered with cr0 = lt */
+ li r3, 0
+ /* Generated code will 'blt epilogue', returning 0. */
+ blr
+
+/* Call out to skb_copy_bits:
+ * We'll need to back up our volatile regs first; we have
+ * local variable space at r1+(BPF_PPC_STACK_BASIC).
+ * Allocate a new stack frame here to remain ABI-compliant in
+ * stashing LR.
+ */
+#define bpf_slow_path_common(SIZE) \
+ mflr r0; \
+ std r0, 16(r1); \
+ /* R3 goes in parameter space of caller's frame */ \
+ std r_skb, (BPF_PPC_STACKFRAME+48)(r1); \
+ std r_A, (BPF_PPC_STACK_BASIC+(0*8))(r1); \
+ std r_X, (BPF_PPC_STACK_BASIC+(1*8))(r1); \
+ addi r5, r1, BPF_PPC_STACK_BASIC+(2*8); \
+ stdu r1, -BPF_PPC_SLOWPATH_FRAME(r1); \
+ /* R3 = r_skb, as passed */ \
+ mr r4, r_addr; \
+ li r6, SIZE; \
+ bl skb_copy_bits; \
+ /* R3 = 0 on success */ \
+ addi r1, r1, BPF_PPC_SLOWPATH_FRAME; \
+ ld r0, 16(r1); \
+ ld r_A, (BPF_PPC_STACK_BASIC+(0*8))(r1); \
+ ld r_X, (BPF_PPC_STACK_BASIC+(1*8))(r1); \
+ mtlr r0; \
+ cmpdi r3, 0; \
+ blt bpf_error; /* cr0 = LT */ \
+ ld r_skb, (BPF_PPC_STACKFRAME+48)(r1); \
+ /* Great success! */
+
+bpf_slow_path_word:
+ bpf_slow_path_common(4)
+ /* Data value is on stack, and cr0 != LT */
+ lwz r_A, BPF_PPC_STACK_BASIC+(2*8)(r1)
+ blr
+
+bpf_slow_path_half:
+ bpf_slow_path_common(2)
+ lhz r_A, BPF_PPC_STACK_BASIC+(2*8)(r1)
+ blr
+
+bpf_slow_path_byte:
+ bpf_slow_path_common(1)
+ lbz r_A, BPF_PPC_STACK_BASIC+(2*8)(r1)
+ blr
+
+bpf_slow_path_byte_msh:
+ bpf_slow_path_common(1)
+ lbz r_X, BPF_PPC_STACK_BASIC+(2*8)(r1)
+ rlwinm r_X, r_X, 2, 32-4-2, 31-2
+ blr
diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c
new file mode 100644
index 000000000000..73619d3aeb6c
--- /dev/null
+++ b/arch/powerpc/net/bpf_jit_comp.c
@@ -0,0 +1,694 @@
+/* bpf_jit_comp.c: BPF JIT compiler for PPC64
+ *
+ * Copyright 2011 Matt Evans <matt@ozlabs.org>, IBM Corporation
+ *
+ * Based on the x86 BPF compiler, by Eric Dumazet (eric.dumazet@gmail.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#include <linux/moduleloader.h>
+#include <asm/cacheflush.h>
+#include <linux/netdevice.h>
+#include <linux/filter.h>
+#include "bpf_jit.h"
+
+#ifndef __BIG_ENDIAN
+/* There are endianness assumptions herein. */
+#error "Little-endian PPC not supported in BPF compiler"
+#endif
+
+int bpf_jit_enable __read_mostly;
+
+
+static inline void bpf_flush_icache(void *start, void *end)
+{
+ smp_wmb();
+ flush_icache_range((unsigned long)start, (unsigned long)end);
+}
+
+static void bpf_jit_build_prologue(struct sk_filter *fp, u32 *image,
+ struct codegen_context *ctx)
+{
+ int i;
+ const struct sock_filter *filter = fp->insns;
+
+ if (ctx->seen & (SEEN_MEM | SEEN_DATAREF)) {
+ /* Make stackframe */
+ if (ctx->seen & SEEN_DATAREF) {
+ /* If we call any helpers (for loads), save LR */
+ EMIT(PPC_INST_MFLR | __PPC_RT(0));
+ PPC_STD(0, 1, 16);
+
+ /* Back up non-volatile regs. */
+ PPC_STD(r_D, 1, -(8*(32-r_D)));
+ PPC_STD(r_HL, 1, -(8*(32-r_HL)));
+ }
+ if (ctx->seen & SEEN_MEM) {
+ /*
+ * Conditionally save regs r15-r31 as some will be used
+ * for M[] data.
+ */
+ for (i = r_M; i < (r_M+16); i++) {
+ if (ctx->seen & (1 << (i-r_M)))
+ PPC_STD(i, 1, -(8*(32-i)));
+ }
+ }
+ EMIT(PPC_INST_STDU | __PPC_RS(1) | __PPC_RA(1) |
+ (-BPF_PPC_STACKFRAME & 0xfffc));
+ }
+
+ if (ctx->seen & SEEN_DATAREF) {
+ /*
+ * If this filter needs to access skb data,
+ * prepare r_D and r_HL:
+ * r_HL = skb->len - skb->data_len
+ * r_D = skb->data
+ */
+ PPC_LWZ_OFFS(r_scratch1, r_skb, offsetof(struct sk_buff,
+ data_len));
+ PPC_LWZ_OFFS(r_HL, r_skb, offsetof(struct sk_buff, len));
+ PPC_SUB(r_HL, r_HL, r_scratch1);
+ PPC_LD_OFFS(r_D, r_skb, offsetof(struct sk_buff, data));
+ }
+
+ if (ctx->seen & SEEN_XREG) {
+ /*
+ * TODO: Could also detect whether first instr. sets X and
+ * avoid this (as below, with A).
+ */
+ PPC_LI(r_X, 0);
+ }
+
+ switch (filter[0].code) {
+ case BPF_S_RET_K:
+ case BPF_S_LD_W_LEN:
+ case BPF_S_ANC_PROTOCOL:
+ case BPF_S_ANC_IFINDEX:
+ case BPF_S_ANC_MARK:
+ case BPF_S_ANC_RXHASH:
+ case BPF_S_ANC_CPU:
+ case BPF_S_ANC_QUEUE:
+ case BPF_S_LD_W_ABS:
+ case BPF_S_LD_H_ABS:
+ case BPF_S_LD_B_ABS:
+ /* first instruction sets A register (or is RET 'constant') */
+ break;
+ default:
+ /* make sure we dont leak kernel information to user */
+ PPC_LI(r_A, 0);
+ }
+}
+
+static void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
+{
+ int i;
+
+ if (ctx->seen & (SEEN_MEM | SEEN_DATAREF)) {
+ PPC_ADDI(1, 1, BPF_PPC_STACKFRAME);
+ if (ctx->seen & SEEN_DATAREF) {
+ PPC_LD(0, 1, 16);
+ PPC_MTLR(0);
+ PPC_LD(r_D, 1, -(8*(32-r_D)));
+ PPC_LD(r_HL, 1, -(8*(32-r_HL)));
+ }
+ if (ctx->seen & SEEN_MEM) {
+ /* Restore any saved non-vol registers */
+ for (i = r_M; i < (r_M+16); i++) {
+ if (ctx->seen & (1 << (i-r_M)))
+ PPC_LD(i, 1, -(8*(32-i)));
+ }
+ }
+ }
+ /* The RETs have left a return value in R3. */
+
+ PPC_BLR();
+}
+
+/* Assemble the body code between the prologue & epilogue. */
+static int bpf_jit_build_body(struct sk_filter *fp, u32 *image,
+ struct codegen_context *ctx,
+ unsigned int *addrs)
+{
+ const struct sock_filter *filter = fp->insns;
+ int flen = fp->len;
+ u8 *func;
+ unsigned int true_cond;
+ int i;
+
+ /* Start of epilogue code */
+ unsigned int exit_addr = addrs[flen];
+
+ for (i = 0; i < flen; i++) {
+ unsigned int K = filter[i].k;
+
+ /*
+ * addrs[] maps a BPF bytecode address into a real offset from
+ * the start of the body code.
+ */
+ addrs[i] = ctx->idx * 4;
+
+ switch (filter[i].code) {
+ /*** ALU ops ***/
+ case BPF_S_ALU_ADD_X: /* A += X; */
+ ctx->seen |= SEEN_XREG;
+ PPC_ADD(r_A, r_A, r_X);
+ break;
+ case BPF_S_ALU_ADD_K: /* A += K; */
+ if (!K)
+ break;
+ PPC_ADDI(r_A, r_A, IMM_L(K));
+ if (K >= 32768)
+ PPC_ADDIS(r_A, r_A, IMM_HA(K));
+ break;
+ case BPF_S_ALU_SUB_X: /* A -= X; */
+ ctx->seen |= SEEN_XREG;
+ PPC_SUB(r_A, r_A, r_X);
+ break;
+ case BPF_S_ALU_SUB_K: /* A -= K */
+ if (!K)
+ break;
+ PPC_ADDI(r_A, r_A, IMM_L(-K));
+ if (K >= 32768)
+ PPC_ADDIS(r_A, r_A, IMM_HA(-K));
+ break;
+ case BPF_S_ALU_MUL_X: /* A *= X; */
+ ctx->seen |= SEEN_XREG;
+ PPC_MUL(r_A, r_A, r_X);
+ break;
+ case BPF_S_ALU_MUL_K: /* A *= K */
+ if (K < 32768)
+ PPC_MULI(r_A, r_A, K);
+ else {
+ PPC_LI32(r_scratch1, K);
+ PPC_MUL(r_A, r_A, r_scratch1);
+ }
+ break;
+ case BPF_S_ALU_DIV_X: /* A /= X; */
+ ctx->seen |= SEEN_XREG;
+ PPC_CMPWI(r_X, 0);
+ if (ctx->pc_ret0 != -1) {
+ PPC_BCC(COND_EQ, addrs[ctx->pc_ret0]);
+ } else {
+ /*
+ * Exit, returning 0; first pass hits here
+ * (longer worst-case code size).
+ */
+ PPC_BCC_SHORT(COND_NE, (ctx->idx*4)+12);
+ PPC_LI(r_ret, 0);
+ PPC_JMP(exit_addr);
+ }
+ PPC_DIVWU(r_A, r_A, r_X);
+ break;
+ case BPF_S_ALU_DIV_K: /* A = reciprocal_divide(A, K); */
+ PPC_LI32(r_scratch1, K);
+ /* Top 32 bits of 64bit result -> A */
+ PPC_MULHWU(r_A, r_A, r_scratch1);
+ break;
+ case BPF_S_ALU_AND_X:
+ ctx->seen |= SEEN_XREG;
+ PPC_AND(r_A, r_A, r_X);
+ break;
+ case BPF_S_ALU_AND_K:
+ if (!IMM_H(K))
+ PPC_ANDI(r_A, r_A, K);
+ else {
+ PPC_LI32(r_scratch1, K);
+ PPC_AND(r_A, r_A, r_scratch1);
+ }
+ break;
+ case BPF_S_ALU_OR_X:
+ ctx->seen |= SEEN_XREG;
+ PPC_OR(r_A, r_A, r_X);
+ break;
+ case BPF_S_ALU_OR_K:
+ if (IMM_L(K))
+ PPC_ORI(r_A, r_A, IMM_L(K));
+ if (K >= 65536)
+ PPC_ORIS(r_A, r_A, IMM_H(K));
+ break;
+ case BPF_S_ALU_LSH_X: /* A <<= X; */
+ ctx->seen |= SEEN_XREG;
+ PPC_SLW(r_A, r_A, r_X);
+ break;
+ case BPF_S_ALU_LSH_K:
+ if (K == 0)
+ break;
+ else
+ PPC_SLWI(r_A, r_A, K);
+ break;
+ case BPF_S_ALU_RSH_X: /* A >>= X; */
+ ctx->seen |= SEEN_XREG;
+ PPC_SRW(r_A, r_A, r_X);
+ break;
+ case BPF_S_ALU_RSH_K: /* A >>= K; */
+ if (K == 0)
+ break;
+ else
+ PPC_SRWI(r_A, r_A, K);
+ break;
+ case BPF_S_ALU_NEG:
+ PPC_NEG(r_A, r_A);
+ break;
+ case BPF_S_RET_K:
+ PPC_LI32(r_ret, K);
+ if (!K) {
+ if (ctx->pc_ret0 == -1)
+ ctx->pc_ret0 = i;
+ }
+ /*
+ * If this isn't the very last instruction, branch to
+ * the epilogue if we've stuff to clean up. Otherwise,
+ * if there's nothing to tidy, just return. If we /are/
+ * the last instruction, we're about to fall through to
+ * the epilogue to return.
+ */
+ if (i != flen - 1) {
+ /*
+ * Note: 'seen' is properly valid only on pass
+ * #2. Both parts of this conditional are the
+ * same instruction size though, meaning the
+ * first pass will still correctly determine the
+ * code size/addresses.
+ */
+ if (ctx->seen)
+ PPC_JMP(exit_addr);
+ else
+ PPC_BLR();
+ }
+ break;
+ case BPF_S_RET_A:
+ PPC_MR(r_ret, r_A);
+ if (i != flen - 1) {
+ if (ctx->seen)
+ PPC_JMP(exit_addr);
+ else
+ PPC_BLR();
+ }
+ break;
+ case BPF_S_MISC_TAX: /* X = A */
+ PPC_MR(r_X, r_A);
+ break;
+ case BPF_S_MISC_TXA: /* A = X */
+ ctx->seen |= SEEN_XREG;
+ PPC_MR(r_A, r_X);
+ break;
+
+ /*** Constant loads/M[] access ***/
+ case BPF_S_LD_IMM: /* A = K */
+ PPC_LI32(r_A, K);
+ break;
+ case BPF_S_LDX_IMM: /* X = K */
+ PPC_LI32(r_X, K);
+ break;
+ case BPF_S_LD_MEM: /* A = mem[K] */
+ PPC_MR(r_A, r_M + (K & 0xf));
+ ctx->seen |= SEEN_MEM | (1<<(K & 0xf));
+ break;
+ case BPF_S_LDX_MEM: /* X = mem[K] */
+ PPC_MR(r_X, r_M + (K & 0xf));
+ ctx->seen |= SEEN_MEM | (1<<(K & 0xf));
+ break;
+ case BPF_S_ST: /* mem[K] = A */
+ PPC_MR(r_M + (K & 0xf), r_A);
+ ctx->seen |= SEEN_MEM | (1<<(K & 0xf));
+ break;
+ case BPF_S_STX: /* mem[K] = X */
+ PPC_MR(r_M + (K & 0xf), r_X);
+ ctx->seen |= SEEN_XREG | SEEN_MEM | (1<<(K & 0xf));
+ break;
+ case BPF_S_LD_W_LEN: /* A = skb->len; */
+ BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4);
+ PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, len));
+ break;
+ case BPF_S_LDX_W_LEN: /* X = skb->len; */
+ PPC_LWZ_OFFS(r_X, r_skb, offsetof(struct sk_buff, len));
+ break;
+
+ /*** Ancillary info loads ***/
+
+ /* None of the BPF_S_ANC* codes appear to be passed by
+ * sk_chk_filter(). The interpreter and the x86 BPF
+ * compiler implement them so we do too -- they may be
+ * planted in future.
+ */
+ case BPF_S_ANC_PROTOCOL: /* A = ntohs(skb->protocol); */
+ BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
+ protocol) != 2);
+ PPC_LHZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
+ protocol));
+ /* ntohs is a NOP with BE loads. */
+ break;
+ case BPF_S_ANC_IFINDEX:
+ PPC_LD_OFFS(r_scratch1, r_skb, offsetof(struct sk_buff,
+ dev));
+ PPC_CMPDI(r_scratch1, 0);
+ if (ctx->pc_ret0 != -1) {
+ PPC_BCC(COND_EQ, addrs[ctx->pc_ret0]);
+ } else {
+ /* Exit, returning 0; first pass hits here. */
+ PPC_BCC_SHORT(COND_NE, (ctx->idx*4)+12);
+ PPC_LI(r_ret, 0);
+ PPC_JMP(exit_addr);
+ }
+ BUILD_BUG_ON(FIELD_SIZEOF(struct net_device,
+ ifindex) != 4);
+ PPC_LWZ_OFFS(r_A, r_scratch1,
+ offsetof(struct net_device, ifindex));
+ break;
+ case BPF_S_ANC_MARK:
+ BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
+ PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
+ mark));
+ break;
+ case BPF_S_ANC_RXHASH:
+ BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, rxhash) != 4);
+ PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
+ rxhash));
+ break;
+ case BPF_S_ANC_QUEUE:
+ BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
+ queue_mapping) != 2);
+ PPC_LHZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
+ queue_mapping));
+ break;
+ case BPF_S_ANC_CPU:
+#ifdef CONFIG_SMP
+ /*
+ * PACA ptr is r13:
+ * raw_smp_processor_id() = local_paca->paca_index
+ */
+ BUILD_BUG_ON(FIELD_SIZEOF(struct paca_struct,
+ paca_index) != 2);
+ PPC_LHZ_OFFS(r_A, 13,
+ offsetof(struct paca_struct, paca_index));
+#else
+ PPC_LI(r_A, 0);
+#endif
+ break;
+
+ /*** Absolute loads from packet header/data ***/
+ case BPF_S_LD_W_ABS:
+ func = sk_load_word;
+ goto common_load;
+ case BPF_S_LD_H_ABS:
+ func = sk_load_half;
+ goto common_load;
+ case BPF_S_LD_B_ABS:
+ func = sk_load_byte;
+ common_load:
+ /*
+ * Load from [K]. Reference with the (negative)
+ * SKF_NET_OFF/SKF_LL_OFF offsets is unsupported.
+ */
+ ctx->seen |= SEEN_DATAREF;
+ if ((int)K < 0)
+ return -ENOTSUPP;
+ PPC_LI64(r_scratch1, func);
+ PPC_MTLR(r_scratch1);
+ PPC_LI32(r_addr, K);
+ PPC_BLRL();
+ /*
+ * Helper returns 'lt' condition on error, and an
+ * appropriate return value in r3
+ */
+ PPC_BCC(COND_LT, exit_addr);
+ break;
+
+ /*** Indirect loads from packet header/data ***/
+ case BPF_S_LD_W_IND:
+ func = sk_load_word;
+ goto common_load_ind;
+ case BPF_S_LD_H_IND:
+ func = sk_load_half;
+ goto common_load_ind;
+ case BPF_S_LD_B_IND:
+ func = sk_load_byte;
+ common_load_ind:
+ /*
+ * Load from [X + K]. Negative offsets are tested for
+ * in the helper functions, and result in a 'ret 0'.
+ */
+ ctx->seen |= SEEN_DATAREF | SEEN_XREG;
+ PPC_LI64(r_scratch1, func);
+ PPC_MTLR(r_scratch1);
+ PPC_ADDI(r_addr, r_X, IMM_L(K));
+ if (K >= 32768)
+ PPC_ADDIS(r_addr, r_addr, IMM_HA(K));
+ PPC_BLRL();
+ /* If error, cr0.LT set */
+ PPC_BCC(COND_LT, exit_addr);
+ break;
+
+ case BPF_S_LDX_B_MSH:
+ /*
+ * x86 version drops packet (RET 0) when K<0, whereas
+ * interpreter does allow K<0 (__load_pointer, special
+ * ancillary data). common_load returns ENOTSUPP if K<0,
+ * so we fall back to interpreter & filter works.
+ */
+ func = sk_load_byte_msh;
+ goto common_load;
+ break;
+
+ /*** Jump and branches ***/
+ case BPF_S_JMP_JA:
+ if (K != 0)
+ PPC_JMP(addrs[i + 1 + K]);
+ break;
+
+ case BPF_S_JMP_JGT_K:
+ case BPF_S_JMP_JGT_X:
+ true_cond = COND_GT;
+ goto cond_branch;
+ case BPF_S_JMP_JGE_K:
+ case BPF_S_JMP_JGE_X:
+ true_cond = COND_GE;
+ goto cond_branch;
+ case BPF_S_JMP_JEQ_K:
+ case BPF_S_JMP_JEQ_X:
+ true_cond = COND_EQ;
+ goto cond_branch;
+ case BPF_S_JMP_JSET_K:
+ case BPF_S_JMP_JSET_X:
+ true_cond = COND_NE;
+ /* Fall through */
+ cond_branch:
+ /* same targets, can avoid doing the test :) */
+ if (filter[i].jt == filter[i].jf) {
+ if (filter[i].jt > 0)
+ PPC_JMP(addrs[i + 1 + filter[i].jt]);
+ break;
+ }
+
+ switch (filter[i].code) {
+ case BPF_S_JMP_JGT_X:
+ case BPF_S_JMP_JGE_X:
+ case BPF_S_JMP_JEQ_X:
+ ctx->seen |= SEEN_XREG;
+ PPC_CMPLW(r_A, r_X);
+ break;
+ case BPF_S_JMP_JSET_X:
+ ctx->seen |= SEEN_XREG;
+ PPC_AND_DOT(r_scratch1, r_A, r_X);
+ break;
+ case BPF_S_JMP_JEQ_K:
+ case BPF_S_JMP_JGT_K:
+ case BPF_S_JMP_JGE_K:
+ if (K < 32768)
+ PPC_CMPLWI(r_A, K);
+ else {
+ PPC_LI32(r_scratch1, K);
+ PPC_CMPLW(r_A, r_scratch1);
+ }
+ break;
+ case BPF_S_JMP_JSET_K:
+ if (K < 32768)
+ /* PPC_ANDI is /only/ dot-form */
+ PPC_ANDI(r_scratch1, r_A, K);
+ else {
+ PPC_LI32(r_scratch1, K);
+ PPC_AND_DOT(r_scratch1, r_A,
+ r_scratch1);
+ }
+ break;
+ }
+ /* Sometimes branches are constructed "backward", with
+ * the false path being the branch and true path being
+ * a fallthrough to the next instruction.
+ */
+ if (filter[i].jt == 0)
+ /* Swap the sense of the branch */
+ PPC_BCC(true_cond ^ COND_CMP_TRUE,
+ addrs[i + 1 + filter[i].jf]);
+ else {
+ PPC_BCC(true_cond, addrs[i + 1 + filter[i].jt]);
+ if (filter[i].jf != 0)
+ PPC_JMP(addrs[i + 1 + filter[i].jf]);
+ }
+ break;
+ default:
+ /* The filter contains something cruel & unusual.
+ * We don't handle it, but also there shouldn't be
+ * anything missing from our list.
+ */
+ if (printk_ratelimit())
+ pr_err("BPF filter opcode %04x (@%d) unsupported\n",
+ filter[i].code, i);
+ return -ENOTSUPP;
+ }
+
+ }
+ /* Set end-of-body-code address for exit. */
+ addrs[i] = ctx->idx * 4;
+
+ return 0;
+}
+
+void bpf_jit_compile(struct sk_filter *fp)
+{
+ unsigned int proglen;
+ unsigned int alloclen;
+ u32 *image = NULL;
+ u32 *code_base;
+ unsigned int *addrs;
+ struct codegen_context cgctx;
+ int pass;
+ int flen = fp->len;
+
+ if (!bpf_jit_enable)
+ return;
+
+ addrs = kzalloc((flen+1) * sizeof(*addrs), GFP_KERNEL);
+ if (addrs == NULL)
+ return;
+
+ /*
+ * There are multiple assembly passes as the generated code will change
+ * size as it settles down, figuring out the max branch offsets/exit
+ * paths required.
+ *
+ * The range of standard conditional branches is +/- 32Kbytes. Since
+ * BPF_MAXINSNS = 4096, we can only jump from (worst case) start to
+ * finish with 8 bytes/instruction. Not feasible, so long jumps are
+ * used, distinct from short branches.
+ *
+ * Current:
+ *
+ * For now, both branch types assemble to 2 words (short branches padded
+ * with a NOP); this is less efficient, but assembly will always complete
+ * after exactly 3 passes:
+ *
+ * First pass: No code buffer; Program is "faux-generated" -- no code
+ * emitted but maximum size of output determined (and addrs[] filled
+ * in). Also, we note whether we use M[], whether we use skb data, etc.
+ * All generation choices assumed to be 'worst-case', e.g. branches all
+ * far (2 instructions), return path code reduction not available, etc.
+ *
+ * Second pass: Code buffer allocated with size determined previously.
+ * Prologue generated to support features we have seen used. Exit paths
+ * determined and addrs[] is filled in again, as code may be slightly
+ * smaller as a result.
+ *
+ * Third pass: Code generated 'for real', and branch destinations
+ * determined from now-accurate addrs[] map.
+ *
+ * Ideal:
+ *
+ * If we optimise this, near branches will be shorter. On the
+ * first assembly pass, we should err on the side of caution and
+ * generate the biggest code. On subsequent passes, branches will be
+ * generated short or long and code size will reduce. With smaller
+ * code, more branches may fall into the short category, and code will
+ * reduce more.
+ *
+ * Finally, if we see one pass generate code the same size as the
+ * previous pass we have converged and should now generate code for
+ * real. Allocating at the end will also save the memory that would
+ * otherwise be wasted by the (small) current code shrinkage.
+ * Preferably, we should do a small number of passes (e.g. 5) and if we
+ * haven't converged by then, get impatient and force code to generate
+ * as-is, even if the odd branch would be left long. The chances of a
+ * long jump are tiny with all but the most enormous of BPF filter
+ * inputs, so we should usually converge on the third pass.
+ */
+
+ cgctx.idx = 0;
+ cgctx.seen = 0;
+ cgctx.pc_ret0 = -1;
+ /* Scouting faux-generate pass 0 */
+ if (bpf_jit_build_body(fp, 0, &cgctx, addrs))
+ /* We hit something illegal or unsupported. */
+ goto out;
+
+ /*
+ * Pretend to build prologue, given the features we've seen. This will
+ * update ctgtx.idx as it pretends to output instructions, then we can
+ * calculate total size from idx.
+ */
+ bpf_jit_build_prologue(fp, 0, &cgctx);
+ bpf_jit_build_epilogue(0, &cgctx);
+
+ proglen = cgctx.idx * 4;
+ alloclen = proglen + FUNCTION_DESCR_SIZE;
+ image = module_alloc(max_t(unsigned int, alloclen,
+ sizeof(struct work_struct)));
+ if (!image)
+ goto out;
+
+ code_base = image + (FUNCTION_DESCR_SIZE/4);
+
+ /* Code generation passes 1-2 */
+ for (pass = 1; pass < 3; pass++) {
+ /* Now build the prologue, body code & epilogue for real. */
+ cgctx.idx = 0;
+ bpf_jit_build_prologue(fp, code_base, &cgctx);
+ bpf_jit_build_body(fp, code_base, &cgctx, addrs);
+ bpf_jit_build_epilogue(code_base, &cgctx);
+
+ if (bpf_jit_enable > 1)
+ pr_info("Pass %d: shrink = %d, seen = 0x%x\n", pass,
+ proglen - (cgctx.idx * 4), cgctx.seen);
+ }
+
+ if (bpf_jit_enable > 1)
+ pr_info("flen=%d proglen=%u pass=%d image=%p\n",
+ flen, proglen, pass, image);
+
+ if (image) {
+ if (bpf_jit_enable > 1)
+ print_hex_dump(KERN_ERR, "JIT code: ",
+ DUMP_PREFIX_ADDRESS,
+ 16, 1, code_base,
+ proglen, false);
+
+ bpf_flush_icache(code_base, code_base + (proglen/4));
+ /* Function descriptor nastiness: Address + TOC */
+ ((u64 *)image)[0] = (u64)code_base;
+ ((u64 *)image)[1] = local_paca->kernel_toc;
+ fp->bpf_func = (void *)image;
+ }
+out:
+ kfree(addrs);
+ return;
+}
+
+static void jit_free_defer(struct work_struct *arg)
+{
+ module_free(NULL, arg);
+}
+
+/* run from softirq, we must use a work_struct to call
+ * module_free() from process context
+ */
+void bpf_jit_free(struct sk_filter *fp)
+{
+ if (fp->bpf_func != sk_run_filter) {
+ struct work_struct *work = (struct work_struct *)fp->bpf_func;
+
+ INIT_WORK(work, jit_free_defer);
+ schedule_work(work);
+ }
+}
diff --git a/arch/powerpc/platforms/52xx/Kconfig b/arch/powerpc/platforms/52xx/Kconfig
index 47ea1be1481b..90f4496017e4 100644
--- a/arch/powerpc/platforms/52xx/Kconfig
+++ b/arch/powerpc/platforms/52xx/Kconfig
@@ -55,14 +55,6 @@ config PPC_MPC5200_BUGFIX
It is safe to say 'Y' here
-config PPC_MPC5200_GPIO
- bool "MPC5200 GPIO support"
- depends on PPC_MPC52xx
- select ARCH_REQUIRE_GPIOLIB
- select GENERIC_GPIO
- help
- Enable gpiolib support for mpc5200 based boards
-
config PPC_MPC5200_LPBFIFO
tristate "MPC5200 LocalPlus bus FIFO driver"
depends on PPC_MPC52xx
diff --git a/arch/powerpc/platforms/52xx/Makefile b/arch/powerpc/platforms/52xx/Makefile
index 2bc8cd0c5cfc..4e62486791e9 100644
--- a/arch/powerpc/platforms/52xx/Makefile
+++ b/arch/powerpc/platforms/52xx/Makefile
@@ -14,5 +14,4 @@ ifeq ($(CONFIG_PPC_LITE5200),y)
obj-$(CONFIG_PM) += lite5200_sleep.o lite5200_pm.o
endif
-obj-$(CONFIG_PPC_MPC5200_GPIO) += mpc52xx_gpio.o
obj-$(CONFIG_PPC_MPC5200_LPBFIFO) += mpc52xx_lpbfifo.o
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_gpio.c b/arch/powerpc/platforms/52xx/mpc52xx_gpio.c
deleted file mode 100644
index 1757d1db4b51..000000000000
--- a/arch/powerpc/platforms/52xx/mpc52xx_gpio.c
+++ /dev/null
@@ -1,380 +0,0 @@
-/*
- * MPC52xx gpio driver
- *
- * Copyright (c) 2008 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#include <linux/of.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/of_gpio.h>
-#include <linux/io.h>
-#include <linux/of_platform.h>
-
-#include <asm/gpio.h>
-#include <asm/mpc52xx.h>
-#include <sysdev/fsl_soc.h>
-
-static DEFINE_SPINLOCK(gpio_lock);
-
-struct mpc52xx_gpiochip {
- struct of_mm_gpio_chip mmchip;
- unsigned int shadow_dvo;
- unsigned int shadow_gpioe;
- unsigned int shadow_ddr;
-};
-
-/*
- * GPIO LIB API implementation for wakeup GPIOs.
- *
- * There's a maximum of 8 wakeup GPIOs. Which of these are available
- * for use depends on your board setup.
- *
- * 0 -> GPIO_WKUP_7
- * 1 -> GPIO_WKUP_6
- * 2 -> PSC6_1
- * 3 -> PSC6_0
- * 4 -> ETH_17
- * 5 -> PSC3_9
- * 6 -> PSC2_4
- * 7 -> PSC1_4
- *
- */
-static int mpc52xx_wkup_gpio_get(struct gpio_chip *gc, unsigned int gpio)
-{
- struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
- struct mpc52xx_gpio_wkup __iomem *regs = mm_gc->regs;
- unsigned int ret;
-
- ret = (in_8(&regs->wkup_ival) >> (7 - gpio)) & 1;
-
- pr_debug("%s: gpio: %d ret: %d\n", __func__, gpio, ret);
-
- return ret;
-}
-
-static inline void
-__mpc52xx_wkup_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
-{
- struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
- struct mpc52xx_gpiochip *chip = container_of(mm_gc,
- struct mpc52xx_gpiochip, mmchip);
- struct mpc52xx_gpio_wkup __iomem *regs = mm_gc->regs;
-
- if (val)
- chip->shadow_dvo |= 1 << (7 - gpio);
- else
- chip->shadow_dvo &= ~(1 << (7 - gpio));
-
- out_8(&regs->wkup_dvo, chip->shadow_dvo);
-}
-
-static void
-mpc52xx_wkup_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&gpio_lock, flags);
-
- __mpc52xx_wkup_gpio_set(gc, gpio, val);
-
- spin_unlock_irqrestore(&gpio_lock, flags);
-
- pr_debug("%s: gpio: %d val: %d\n", __func__, gpio, val);
-}
-
-static int mpc52xx_wkup_gpio_dir_in(struct gpio_chip *gc, unsigned int gpio)
-{
- struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
- struct mpc52xx_gpiochip *chip = container_of(mm_gc,
- struct mpc52xx_gpiochip, mmchip);
- struct mpc52xx_gpio_wkup __iomem *regs = mm_gc->regs;
- unsigned long flags;
-
- spin_lock_irqsave(&gpio_lock, flags);
-
- /* set the direction */
- chip->shadow_ddr &= ~(1 << (7 - gpio));
- out_8(&regs->wkup_ddr, chip->shadow_ddr);
-
- /* and enable the pin */
- chip->shadow_gpioe |= 1 << (7 - gpio);
- out_8(&regs->wkup_gpioe, chip->shadow_gpioe);
-
- spin_unlock_irqrestore(&gpio_lock, flags);
-
- return 0;
-}
-
-static int
-mpc52xx_wkup_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
-{
- struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
- struct mpc52xx_gpio_wkup __iomem *regs = mm_gc->regs;
- struct mpc52xx_gpiochip *chip = container_of(mm_gc,
- struct mpc52xx_gpiochip, mmchip);
- unsigned long flags;
-
- spin_lock_irqsave(&gpio_lock, flags);
-
- __mpc52xx_wkup_gpio_set(gc, gpio, val);
-
- /* Then set direction */
- chip->shadow_ddr |= 1 << (7 - gpio);
- out_8(&regs->wkup_ddr, chip->shadow_ddr);
-
- /* Finally enable the pin */
- chip->shadow_gpioe |= 1 << (7 - gpio);
- out_8(&regs->wkup_gpioe, chip->shadow_gpioe);
-
- spin_unlock_irqrestore(&gpio_lock, flags);
-
- pr_debug("%s: gpio: %d val: %d\n", __func__, gpio, val);
-
- return 0;
-}
-
-static int __devinit mpc52xx_wkup_gpiochip_probe(struct platform_device *ofdev)
-{
- struct mpc52xx_gpiochip *chip;
- struct mpc52xx_gpio_wkup __iomem *regs;
- struct gpio_chip *gc;
- int ret;
-
- chip = kzalloc(sizeof(*chip), GFP_KERNEL);
- if (!chip)
- return -ENOMEM;
-
- gc = &chip->mmchip.gc;
-
- gc->ngpio = 8;
- gc->direction_input = mpc52xx_wkup_gpio_dir_in;
- gc->direction_output = mpc52xx_wkup_gpio_dir_out;
- gc->get = mpc52xx_wkup_gpio_get;
- gc->set = mpc52xx_wkup_gpio_set;
-
- ret = of_mm_gpiochip_add(ofdev->dev.of_node, &chip->mmchip);
- if (ret)
- return ret;
-
- regs = chip->mmchip.regs;
- chip->shadow_gpioe = in_8(&regs->wkup_gpioe);
- chip->shadow_ddr = in_8(&regs->wkup_ddr);
- chip->shadow_dvo = in_8(&regs->wkup_dvo);
-
- return 0;
-}
-
-static int mpc52xx_gpiochip_remove(struct platform_device *ofdev)
-{
- return -EBUSY;
-}
-
-static const struct of_device_id mpc52xx_wkup_gpiochip_match[] = {
- {
- .compatible = "fsl,mpc5200-gpio-wkup",
- },
- {}
-};
-
-static struct platform_driver mpc52xx_wkup_gpiochip_driver = {
- .driver = {
- .name = "gpio_wkup",
- .owner = THIS_MODULE,
- .of_match_table = mpc52xx_wkup_gpiochip_match,
- },
- .probe = mpc52xx_wkup_gpiochip_probe,
- .remove = mpc52xx_gpiochip_remove,
-};
-
-/*
- * GPIO LIB API implementation for simple GPIOs
- *
- * There's a maximum of 32 simple GPIOs. Which of these are available
- * for use depends on your board setup.
- * The numbering reflects the bit numbering in the port registers:
- *
- * 0..1 > reserved
- * 2..3 > IRDA
- * 4..7 > ETHR
- * 8..11 > reserved
- * 12..15 > USB
- * 16..17 > reserved
- * 18..23 > PSC3
- * 24..27 > PSC2
- * 28..31 > PSC1
- */
-static int mpc52xx_simple_gpio_get(struct gpio_chip *gc, unsigned int gpio)
-{
- struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
- struct mpc52xx_gpio __iomem *regs = mm_gc->regs;
- unsigned int ret;
-
- ret = (in_be32(&regs->simple_ival) >> (31 - gpio)) & 1;
-
- return ret;
-}
-
-static inline void
-__mpc52xx_simple_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
-{
- struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
- struct mpc52xx_gpiochip *chip = container_of(mm_gc,
- struct mpc52xx_gpiochip, mmchip);
- struct mpc52xx_gpio __iomem *regs = mm_gc->regs;
-
- if (val)
- chip->shadow_dvo |= 1 << (31 - gpio);
- else
- chip->shadow_dvo &= ~(1 << (31 - gpio));
- out_be32(&regs->simple_dvo, chip->shadow_dvo);
-}
-
-static void
-mpc52xx_simple_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&gpio_lock, flags);
-
- __mpc52xx_simple_gpio_set(gc, gpio, val);
-
- spin_unlock_irqrestore(&gpio_lock, flags);
-
- pr_debug("%s: gpio: %d val: %d\n", __func__, gpio, val);
-}
-
-static int mpc52xx_simple_gpio_dir_in(struct gpio_chip *gc, unsigned int gpio)
-{
- struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
- struct mpc52xx_gpiochip *chip = container_of(mm_gc,
- struct mpc52xx_gpiochip, mmchip);
- struct mpc52xx_gpio __iomem *regs = mm_gc->regs;
- unsigned long flags;
-
- spin_lock_irqsave(&gpio_lock, flags);
-
- /* set the direction */
- chip->shadow_ddr &= ~(1 << (31 - gpio));
- out_be32(&regs->simple_ddr, chip->shadow_ddr);
-
- /* and enable the pin */
- chip->shadow_gpioe |= 1 << (31 - gpio);
- out_be32(&regs->simple_gpioe, chip->shadow_gpioe);
-
- spin_unlock_irqrestore(&gpio_lock, flags);
-
- return 0;
-}
-
-static int
-mpc52xx_simple_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
-{
- struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
- struct mpc52xx_gpiochip *chip = container_of(mm_gc,
- struct mpc52xx_gpiochip, mmchip);
- struct mpc52xx_gpio __iomem *regs = mm_gc->regs;
- unsigned long flags;
-
- spin_lock_irqsave(&gpio_lock, flags);
-
- /* First set initial value */
- __mpc52xx_simple_gpio_set(gc, gpio, val);
-
- /* Then set direction */
- chip->shadow_ddr |= 1 << (31 - gpio);
- out_be32(&regs->simple_ddr, chip->shadow_ddr);
-
- /* Finally enable the pin */
- chip->shadow_gpioe |= 1 << (31 - gpio);
- out_be32(&regs->simple_gpioe, chip->shadow_gpioe);
-
- spin_unlock_irqrestore(&gpio_lock, flags);
-
- pr_debug("%s: gpio: %d val: %d\n", __func__, gpio, val);
-
- return 0;
-}
-
-static int __devinit mpc52xx_simple_gpiochip_probe(struct platform_device *ofdev)
-{
- struct mpc52xx_gpiochip *chip;
- struct gpio_chip *gc;
- struct mpc52xx_gpio __iomem *regs;
- int ret;
-
- chip = kzalloc(sizeof(*chip), GFP_KERNEL);
- if (!chip)
- return -ENOMEM;
-
- gc = &chip->mmchip.gc;
-
- gc->ngpio = 32;
- gc->direction_input = mpc52xx_simple_gpio_dir_in;
- gc->direction_output = mpc52xx_simple_gpio_dir_out;
- gc->get = mpc52xx_simple_gpio_get;
- gc->set = mpc52xx_simple_gpio_set;
-
- ret = of_mm_gpiochip_add(ofdev->dev.of_node, &chip->mmchip);
- if (ret)
- return ret;
-
- regs = chip->mmchip.regs;
- chip->shadow_gpioe = in_be32(&regs->simple_gpioe);
- chip->shadow_ddr = in_be32(&regs->simple_ddr);
- chip->shadow_dvo = in_be32(&regs->simple_dvo);
-
- return 0;
-}
-
-static const struct of_device_id mpc52xx_simple_gpiochip_match[] = {
- {
- .compatible = "fsl,mpc5200-gpio",
- },
- {}
-};
-
-static struct platform_driver mpc52xx_simple_gpiochip_driver = {
- .driver = {
- .name = "gpio",
- .owner = THIS_MODULE,
- .of_match_table = mpc52xx_simple_gpiochip_match,
- },
- .probe = mpc52xx_simple_gpiochip_probe,
- .remove = mpc52xx_gpiochip_remove,
-};
-
-static int __init mpc52xx_gpio_init(void)
-{
- if (platform_driver_register(&mpc52xx_wkup_gpiochip_driver))
- printk(KERN_ERR "Unable to register wakeup GPIO driver\n");
-
- if (platform_driver_register(&mpc52xx_simple_gpiochip_driver))
- printk(KERN_ERR "Unable to register simple GPIO driver\n");
-
- return 0;
-}
-
-
-/* Make sure we get initialised before anyone else tries to use us */
-subsys_initcall(mpc52xx_gpio_init);
-
-/* No exit call at the moment as we cannot unregister of gpio chips */
-
-MODULE_DESCRIPTION("Freescale MPC52xx gpio driver");
-MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de");
-MODULE_LICENSE("GPL v2");
-
diff --git a/arch/powerpc/platforms/amigaone/Kconfig b/arch/powerpc/platforms/amigaone/Kconfig
index 022476717718..128de25cc284 100644
--- a/arch/powerpc/platforms/amigaone/Kconfig
+++ b/arch/powerpc/platforms/amigaone/Kconfig
@@ -8,7 +8,7 @@ config AMIGAONE
select NOT_COHERENT_CACHE
select CHECK_CACHE_COHERENCY
select DEFAULT_UIMAGE
- select PCSPKR_PLATFORM
+ select HAVE_PCSPKR_PLATFORM
help
Select AmigaOne for the following machines:
- AmigaOne SE/Teron CX (G3 only)
diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
index 3c7c3f82d842..fb59c46e9e9e 100644
--- a/arch/powerpc/platforms/cell/spufs/file.c
+++ b/arch/powerpc/platforms/cell/spufs/file.c
@@ -1850,9 +1850,16 @@ out:
return ret;
}
-static int spufs_mfc_fsync(struct file *file, int datasync)
+static int spufs_mfc_fsync(struct file *file, loff_t start, loff_t end, int datasync)
{
- return spufs_mfc_flush(file, NULL);
+ struct inode *inode = file->f_path.dentry->d_inode;
+ int err = filemap_write_and_wait_range(inode->i_mapping, start, end);
+ if (!err) {
+ mutex_lock(&inode->i_mutex);
+ err = spufs_mfc_flush(file, NULL);
+ mutex_unlock(&inode->i_mutex);
+ }
+ return err;
}
static int spufs_mfc_fasync(int fd, struct file *file, int on)
diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c
index 856e9c398068..e481f6b9a789 100644
--- a/arch/powerpc/platforms/cell/spufs/inode.c
+++ b/arch/powerpc/platforms/cell/spufs/inode.c
@@ -611,15 +611,14 @@ out:
static struct file_system_type spufs_type;
-long spufs_create(struct nameidata *nd, unsigned int flags, mode_t mode,
- struct file *filp)
+long spufs_create(struct path *path, struct dentry *dentry,
+ unsigned int flags, mode_t mode, struct file *filp)
{
- struct dentry *dentry;
int ret;
ret = -EINVAL;
/* check if we are on spufs */
- if (nd->path.dentry->d_sb->s_type != &spufs_type)
+ if (path->dentry->d_sb->s_type != &spufs_type)
goto out;
/* don't accept undefined flags */
@@ -627,33 +626,27 @@ long spufs_create(struct nameidata *nd, unsigned int flags, mode_t mode,
goto out;
/* only threads can be underneath a gang */
- if (nd->path.dentry != nd->path.dentry->d_sb->s_root) {
+ if (path->dentry != path->dentry->d_sb->s_root) {
if ((flags & SPU_CREATE_GANG) ||
- !SPUFS_I(nd->path.dentry->d_inode)->i_gang)
+ !SPUFS_I(path->dentry->d_inode)->i_gang)
goto out;
}
- dentry = lookup_create(nd, 1);
- ret = PTR_ERR(dentry);
- if (IS_ERR(dentry))
- goto out_dir;
-
mode &= ~current_umask();
if (flags & SPU_CREATE_GANG)
- ret = spufs_create_gang(nd->path.dentry->d_inode,
- dentry, nd->path.mnt, mode);
+ ret = spufs_create_gang(path->dentry->d_inode,
+ dentry, path->mnt, mode);
else
- ret = spufs_create_context(nd->path.dentry->d_inode,
- dentry, nd->path.mnt, flags, mode,
+ ret = spufs_create_context(path->dentry->d_inode,
+ dentry, path->mnt, flags, mode,
filp);
if (ret >= 0)
- fsnotify_mkdir(nd->path.dentry->d_inode, dentry);
+ fsnotify_mkdir(path->dentry->d_inode, dentry);
return ret;
-out_dir:
- mutex_unlock(&nd->path.dentry->d_inode->i_mutex);
out:
+ mutex_unlock(&path->dentry->d_inode->i_mutex);
return ret;
}
diff --git a/arch/powerpc/platforms/cell/spufs/spufs.h b/arch/powerpc/platforms/cell/spufs/spufs.h
index c448bac65518..099245f230b2 100644
--- a/arch/powerpc/platforms/cell/spufs/spufs.h
+++ b/arch/powerpc/platforms/cell/spufs/spufs.h
@@ -248,7 +248,7 @@ extern const struct spufs_tree_descr spufs_dir_debug_contents[];
/* system call implementation */
extern struct spufs_calls spufs_calls;
long spufs_run_spu(struct spu_context *ctx, u32 *npc, u32 *status);
-long spufs_create(struct nameidata *nd, unsigned int flags,
+long spufs_create(struct path *nd, struct dentry *dentry, unsigned int flags,
mode_t mode, struct file *filp);
/* ELF coredump callbacks for writing SPU ELF notes */
extern int spufs_coredump_extra_notes_size(void);
diff --git a/arch/powerpc/platforms/cell/spufs/syscalls.c b/arch/powerpc/platforms/cell/spufs/syscalls.c
index a3d2ce54ea2e..609e016e92d0 100644
--- a/arch/powerpc/platforms/cell/spufs/syscalls.c
+++ b/arch/powerpc/platforms/cell/spufs/syscalls.c
@@ -62,21 +62,17 @@ out:
static long do_spu_create(const char __user *pathname, unsigned int flags,
mode_t mode, struct file *neighbor)
{
- char *tmp;
+ struct path path;
+ struct dentry *dentry;
int ret;
- tmp = getname(pathname);
- ret = PTR_ERR(tmp);
- if (!IS_ERR(tmp)) {
- struct nameidata nd;
-
- ret = kern_path_parent(tmp, &nd);
- if (!ret) {
- nd.flags |= LOOKUP_OPEN | LOOKUP_CREATE;
- ret = spufs_create(&nd, flags, mode, neighbor);
- path_put(&nd.path);
- }
- putname(tmp);
+ dentry = user_path_create(AT_FDCWD, pathname, &path, 1);
+ ret = PTR_ERR(dentry);
+ if (!IS_ERR(dentry)) {
+ ret = spufs_create(&path, dentry, flags, mode, neighbor);
+ mutex_unlock(&path.dentry->d_inode->i_mutex);
+ dput(dentry);
+ path_put(&path);
}
return ret;
diff --git a/arch/powerpc/platforms/chrp/Kconfig b/arch/powerpc/platforms/chrp/Kconfig
index bc0b0efdc5fe..d3cdab582c5d 100644
--- a/arch/powerpc/platforms/chrp/Kconfig
+++ b/arch/powerpc/platforms/chrp/Kconfig
@@ -1,6 +1,7 @@
config PPC_CHRP
bool "Common Hardware Reference Platform (CHRP) based machines"
depends on 6xx
+ select HAVE_PCSPKR_PLATFORM
select MPIC
select PPC_I8259
select PPC_INDIRECT_PCI
diff --git a/arch/powerpc/platforms/iseries/exception.S b/arch/powerpc/platforms/iseries/exception.S
index 29c02f36b32f..f519ee17ff7d 100644
--- a/arch/powerpc/platforms/iseries/exception.S
+++ b/arch/powerpc/platforms/iseries/exception.S
@@ -167,7 +167,7 @@ BEGIN_FTR_SECTION
std r12,PACA_EXGEN+EX_R13(r13)
EXCEPTION_PROLOG_ISERIES_1
FTR_SECTION_ELSE
- EXCEPTION_PROLOG_1(PACA_EXGEN)
+ EXCEPTION_PROLOG_1(PACA_EXGEN, NOTEST, 0)
EXCEPTION_PROLOG_ISERIES_1
ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_SLB)
b data_access_common
diff --git a/arch/powerpc/platforms/iseries/exception.h b/arch/powerpc/platforms/iseries/exception.h
index bae3fba5ad8e..50271b550a99 100644
--- a/arch/powerpc/platforms/iseries/exception.h
+++ b/arch/powerpc/platforms/iseries/exception.h
@@ -39,7 +39,7 @@
label##_iSeries: \
HMT_MEDIUM; \
mtspr SPRN_SPRG_SCRATCH0,r13; /* save r13 */ \
- EXCEPTION_PROLOG_1(area); \
+ EXCEPTION_PROLOG_1(area, NOTEST, 0); \
EXCEPTION_PROLOG_ISERIES_1; \
b label##_common
@@ -48,7 +48,7 @@ label##_iSeries: \
label##_iSeries: \
HMT_MEDIUM; \
mtspr SPRN_SPRG_SCRATCH0,r13; /* save r13 */ \
- EXCEPTION_PROLOG_1(PACA_EXGEN); \
+ EXCEPTION_PROLOG_1(PACA_EXGEN, NOTEST, 0); \
lbz r10,PACASOFTIRQEN(r13); \
cmpwi 0,r10,0; \
beq- label##_iSeries_masked; \
diff --git a/arch/powerpc/platforms/powermac/pci.c b/arch/powerpc/platforms/powermac/pci.c
index f33e08d573ce..abe8d7e2ebeb 100644
--- a/arch/powerpc/platforms/powermac/pci.c
+++ b/arch/powerpc/platforms/powermac/pci.c
@@ -17,6 +17,7 @@
#include <linux/init.h>
#include <linux/bootmem.h>
#include <linux/irq.h>
+#include <linux/of_pci.h>
#include <asm/sections.h>
#include <asm/io.h>
@@ -235,7 +236,7 @@ static int chaos_validate_dev(struct pci_bus *bus, int devfn, int offset)
if (offset >= 0x100)
return PCIBIOS_BAD_REGISTER_NUMBER;
- np = pci_busdev_to_OF_node(bus, devfn);
+ np = of_pci_find_child_device(bus->dev.of_node, devfn);
if (np == NULL)
return PCIBIOS_DEVICE_NOT_FOUND;
diff --git a/arch/powerpc/platforms/prep/Kconfig b/arch/powerpc/platforms/prep/Kconfig
index bf8330ef2e76..f0536c7cda9f 100644
--- a/arch/powerpc/platforms/prep/Kconfig
+++ b/arch/powerpc/platforms/prep/Kconfig
@@ -1,6 +1,7 @@
config PPC_PREP
bool "PowerPC Reference Platform (PReP) based machines"
depends on 6xx && BROKEN
+ select HAVE_PCSPKR_PLATFORM
select MPIC
select PPC_I8259
select PPC_INDIRECT_PCI
diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig
index 71af4c5d6c05..05cf4769b88c 100644
--- a/arch/powerpc/platforms/pseries/Kconfig
+++ b/arch/powerpc/platforms/pseries/Kconfig
@@ -1,6 +1,7 @@
config PPC_PSERIES
depends on PPC64 && PPC_BOOK3S
bool "IBM pSeries & new (POWER5-based) iSeries"
+ select HAVE_PCSPKR_PLATFORM
select MPIC
select PCI_MSI
select PPC_XICS
diff --git a/arch/powerpc/sysdev/xics/icp-native.c b/arch/powerpc/sysdev/xics/icp-native.c
index 1f15ad436140..ba382b59b926 100644
--- a/arch/powerpc/sysdev/xics/icp-native.c
+++ b/arch/powerpc/sysdev/xics/icp-native.c
@@ -17,6 +17,7 @@
#include <linux/cpu.h>
#include <linux/of.h>
#include <linux/spinlock.h>
+#include <linux/module.h>
#include <asm/prom.h>
#include <asm/io.h>
@@ -24,6 +25,7 @@
#include <asm/irq.h>
#include <asm/errno.h>
#include <asm/xics.h>
+#include <asm/kvm_ppc.h>
struct icp_ipl {
union {
@@ -139,6 +141,12 @@ static void icp_native_cause_ipi(int cpu, unsigned long data)
icp_native_set_qirr(cpu, IPI_PRIORITY);
}
+void xics_wake_cpu(int cpu)
+{
+ icp_native_set_qirr(cpu, IPI_PRIORITY);
+}
+EXPORT_SYMBOL_GPL(xics_wake_cpu);
+
static irqreturn_t icp_native_ipi_action(int irq, void *dev_id)
{
int cpu = smp_processor_id();
@@ -185,6 +193,7 @@ static int __init icp_native_map_one_cpu(int hw_id, unsigned long addr,
}
icp_native_regs[cpu] = ioremap(addr, size);
+ kvmppc_set_xics_phys(cpu, addr);
if (!icp_native_regs[cpu]) {
pr_warning("icp_native: Failed ioremap for CPU %d, "
"interrupt server #0x%x, addr %#lx\n",
diff --git a/arch/s390/boot/compressed/head31.S b/arch/s390/boot/compressed/head31.S
index 2a5523a32bcc..e8c9e18b8039 100644
--- a/arch/s390/boot/compressed/head31.S
+++ b/arch/s390/boot/compressed/head31.S
@@ -7,14 +7,14 @@
*/
#include <linux/init.h>
+#include <linux/linkage.h>
#include <asm/asm-offsets.h>
#include <asm/thread_info.h>
#include <asm/page.h>
#include "sizes.h"
__HEAD
- .globl startup_continue
-startup_continue:
+ENTRY(startup_continue)
basr %r13,0 # get base
.LPG1:
# setup stack
diff --git a/arch/s390/boot/compressed/head64.S b/arch/s390/boot/compressed/head64.S
index 2982cb140550..f86a4eef28a9 100644
--- a/arch/s390/boot/compressed/head64.S
+++ b/arch/s390/boot/compressed/head64.S
@@ -7,14 +7,14 @@
*/
#include <linux/init.h>
+#include <linux/linkage.h>
#include <asm/asm-offsets.h>
#include <asm/thread_info.h>
#include <asm/page.h>
#include "sizes.h"
__HEAD
- .globl startup_continue
-startup_continue:
+ENTRY(startup_continue)
basr %r13,0 # get base
.LPG1:
# setup stack
diff --git a/arch/s390/crypto/sha256_s390.c b/arch/s390/crypto/sha256_s390.c
index 5ed8d64fc2ed..0317a3547cb9 100644
--- a/arch/s390/crypto/sha256_s390.c
+++ b/arch/s390/crypto/sha256_s390.c
@@ -1,15 +1,12 @@
/*
* Cryptographic API.
*
- * s390 implementation of the SHA256 Secure Hash Algorithm.
+ * s390 implementation of the SHA256 and SHA224 Secure Hash Algorithm.
*
* s390 Version:
- * Copyright IBM Corp. 2005,2007
+ * Copyright IBM Corp. 2005,2011
* Author(s): Jan Glauber (jang@de.ibm.com)
*
- * Derived from "crypto/sha256_generic.c"
- * and "arch/s390/crypto/sha1_s390.c"
- *
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
@@ -65,7 +62,7 @@ static int sha256_import(struct shash_desc *desc, const void *in)
return 0;
}
-static struct shash_alg alg = {
+static struct shash_alg sha256_alg = {
.digestsize = SHA256_DIGEST_SIZE,
.init = sha256_init,
.update = s390_sha_update,
@@ -84,22 +81,69 @@ static struct shash_alg alg = {
}
};
-static int sha256_s390_init(void)
+static int sha224_init(struct shash_desc *desc)
{
+ struct s390_sha_ctx *sctx = shash_desc_ctx(desc);
+
+ sctx->state[0] = SHA224_H0;
+ sctx->state[1] = SHA224_H1;
+ sctx->state[2] = SHA224_H2;
+ sctx->state[3] = SHA224_H3;
+ sctx->state[4] = SHA224_H4;
+ sctx->state[5] = SHA224_H5;
+ sctx->state[6] = SHA224_H6;
+ sctx->state[7] = SHA224_H7;
+ sctx->count = 0;
+ sctx->func = KIMD_SHA_256;
+
+ return 0;
+}
+
+static struct shash_alg sha224_alg = {
+ .digestsize = SHA224_DIGEST_SIZE,
+ .init = sha224_init,
+ .update = s390_sha_update,
+ .final = s390_sha_final,
+ .export = sha256_export,
+ .import = sha256_import,
+ .descsize = sizeof(struct s390_sha_ctx),
+ .statesize = sizeof(struct sha256_state),
+ .base = {
+ .cra_name = "sha224",
+ .cra_driver_name= "sha224-s390",
+ .cra_priority = CRYPT_S390_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_SHASH,
+ .cra_blocksize = SHA224_BLOCK_SIZE,
+ .cra_module = THIS_MODULE,
+ }
+};
+
+static int __init sha256_s390_init(void)
+{
+ int ret;
+
if (!crypt_s390_func_available(KIMD_SHA_256, CRYPT_S390_MSA))
return -EOPNOTSUPP;
-
- return crypto_register_shash(&alg);
+ ret = crypto_register_shash(&sha256_alg);
+ if (ret < 0)
+ goto out;
+ ret = crypto_register_shash(&sha224_alg);
+ if (ret < 0)
+ crypto_unregister_shash(&sha256_alg);
+out:
+ return ret;
}
static void __exit sha256_s390_fini(void)
{
- crypto_unregister_shash(&alg);
+ crypto_unregister_shash(&sha224_alg);
+ crypto_unregister_shash(&sha256_alg);
}
module_init(sha256_s390_init);
module_exit(sha256_s390_fini);
MODULE_ALIAS("sha256");
+MODULE_ALIAS("sha224");
MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("SHA256 Secure Hash Algorithm");
+MODULE_DESCRIPTION("SHA256 and SHA224 Secure Hash Algorithm");
diff --git a/arch/s390/include/asm/irqflags.h b/arch/s390/include/asm/irqflags.h
index 865d6d891ace..38fdf451febb 100644
--- a/arch/s390/include/asm/irqflags.h
+++ b/arch/s390/include/asm/irqflags.h
@@ -29,42 +29,42 @@
})
/* set system mask. */
-static inline void __arch_local_irq_ssm(unsigned long flags)
+static inline notrace void __arch_local_irq_ssm(unsigned long flags)
{
asm volatile("ssm %0" : : "Q" (flags) : "memory");
}
-static inline unsigned long arch_local_save_flags(void)
+static inline notrace unsigned long arch_local_save_flags(void)
{
return __arch_local_irq_stosm(0x00);
}
-static inline unsigned long arch_local_irq_save(void)
+static inline notrace unsigned long arch_local_irq_save(void)
{
return __arch_local_irq_stnsm(0xfc);
}
-static inline void arch_local_irq_disable(void)
+static inline notrace void arch_local_irq_disable(void)
{
arch_local_irq_save();
}
-static inline void arch_local_irq_enable(void)
+static inline notrace void arch_local_irq_enable(void)
{
__arch_local_irq_stosm(0x03);
}
-static inline void arch_local_irq_restore(unsigned long flags)
+static inline notrace void arch_local_irq_restore(unsigned long flags)
{
__arch_local_irq_ssm(flags);
}
-static inline bool arch_irqs_disabled_flags(unsigned long flags)
+static inline notrace bool arch_irqs_disabled_flags(unsigned long flags)
{
return !(flags & (3UL << (BITS_PER_LONG - 8)));
}
-static inline bool arch_irqs_disabled(void)
+static inline notrace bool arch_irqs_disabled(void)
{
return arch_irqs_disabled_flags(arch_local_save_flags());
}
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index cef7dbf69dfc..00ff00dfb24c 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -93,9 +93,7 @@ struct kvm_s390_sie_block {
__u32 scaol; /* 0x0064 */
__u8 reserved68[4]; /* 0x0068 */
__u32 todpr; /* 0x006c */
- __u8 reserved70[16]; /* 0x0070 */
- __u64 gmsor; /* 0x0080 */
- __u64 gmslm; /* 0x0088 */
+ __u8 reserved70[32]; /* 0x0070 */
psw_t gpsw; /* 0x0090 */
__u64 gg14; /* 0x00a0 */
__u64 gg15; /* 0x00a8 */
@@ -138,6 +136,7 @@ struct kvm_vcpu_stat {
u32 instruction_chsc;
u32 instruction_stsi;
u32 instruction_stfl;
+ u32 instruction_tprot;
u32 instruction_sigp_sense;
u32 instruction_sigp_emergency;
u32 instruction_sigp_stop;
@@ -175,6 +174,10 @@ struct kvm_s390_prefix_info {
__u32 address;
};
+struct kvm_s390_emerg_info {
+ __u16 code;
+};
+
struct kvm_s390_interrupt_info {
struct list_head list;
u64 type;
@@ -182,6 +185,7 @@ struct kvm_s390_interrupt_info {
struct kvm_s390_io_info io;
struct kvm_s390_ext_info ext;
struct kvm_s390_pgm_info pgm;
+ struct kvm_s390_emerg_info emerg;
struct kvm_s390_prefix_info prefix;
};
};
@@ -226,6 +230,7 @@ struct kvm_vcpu_arch {
struct cpuid cpu_id;
u64 stidp_data;
};
+ struct gmap *gmap;
};
struct kvm_vm_stat {
@@ -236,6 +241,7 @@ struct kvm_arch{
struct sca_block *sca;
debug_info_t *dbf;
struct kvm_s390_float_interrupt float_int;
+ struct gmap *gmap;
};
extern int sie64a(struct kvm_s390_sie_block *, unsigned long *);
diff --git a/arch/s390/include/asm/linkage.h b/arch/s390/include/asm/linkage.h
index 291c2d01c44f..fc8a8284778e 100644
--- a/arch/s390/include/asm/linkage.h
+++ b/arch/s390/include/asm/linkage.h
@@ -1,6 +1,9 @@
#ifndef __ASM_LINKAGE_H
#define __ASM_LINKAGE_H
-/* Nothing to see here... */
+#include <linux/stringify.h>
+
+#define __ALIGN .align 4, 0x07
+#define __ALIGN_STR __stringify(__ALIGN)
#endif
diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h
index 228cf0b295db..f26280d9e88d 100644
--- a/arch/s390/include/asm/lowcore.h
+++ b/arch/s390/include/asm/lowcore.h
@@ -268,7 +268,7 @@ struct _lowcore {
__u64 vdso_per_cpu_data; /* 0x0358 */
__u64 machine_flags; /* 0x0360 */
__u64 ftrace_func; /* 0x0368 */
- __u64 sie_hook; /* 0x0370 */
+ __u64 gmap; /* 0x0370 */
__u64 cmf_hpp; /* 0x0378 */
/* Interrupt response block. */
diff --git a/arch/s390/include/asm/mmu.h b/arch/s390/include/asm/mmu.h
index 82d0847896a0..4506791adcd5 100644
--- a/arch/s390/include/asm/mmu.h
+++ b/arch/s390/include/asm/mmu.h
@@ -6,6 +6,7 @@ typedef struct {
unsigned int flush_mm;
spinlock_t list_lock;
struct list_head pgtable_list;
+ struct list_head gmap_list;
unsigned long asce_bits;
unsigned long asce_limit;
unsigned long vdso_base;
@@ -17,6 +18,7 @@ typedef struct {
#define INIT_MM_CONTEXT(name) \
.context.list_lock = __SPIN_LOCK_UNLOCKED(name.context.list_lock), \
- .context.pgtable_list = LIST_HEAD_INIT(name.context.pgtable_list),
+ .context.pgtable_list = LIST_HEAD_INIT(name.context.pgtable_list), \
+ .context.gmap_list = LIST_HEAD_INIT(name.context.gmap_list),
#endif
diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h
index 38e71ebcd3c2..8eef9b5b3cf4 100644
--- a/arch/s390/include/asm/pgalloc.h
+++ b/arch/s390/include/asm/pgalloc.h
@@ -20,7 +20,7 @@
unsigned long *crst_table_alloc(struct mm_struct *);
void crst_table_free(struct mm_struct *, unsigned long *);
-unsigned long *page_table_alloc(struct mm_struct *);
+unsigned long *page_table_alloc(struct mm_struct *, unsigned long);
void page_table_free(struct mm_struct *, unsigned long *);
#ifdef CONFIG_HAVE_RCU_TABLE_FREE
void page_table_free_rcu(struct mmu_gather *, unsigned long *);
@@ -115,6 +115,7 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
{
spin_lock_init(&mm->context.list_lock);
INIT_LIST_HEAD(&mm->context.pgtable_list);
+ INIT_LIST_HEAD(&mm->context.gmap_list);
return (pgd_t *) crst_table_alloc(mm);
}
#define pgd_free(mm, pgd) crst_table_free(mm, (unsigned long *) pgd)
@@ -133,8 +134,8 @@ static inline void pmd_populate(struct mm_struct *mm,
/*
* page table entry allocation/free routines.
*/
-#define pte_alloc_one_kernel(mm, vmaddr) ((pte_t *) page_table_alloc(mm))
-#define pte_alloc_one(mm, vmaddr) ((pte_t *) page_table_alloc(mm))
+#define pte_alloc_one_kernel(mm, vmaddr) ((pte_t *) page_table_alloc(mm, vmaddr))
+#define pte_alloc_one(mm, vmaddr) ((pte_t *) page_table_alloc(mm, vmaddr))
#define pte_free_kernel(mm, pte) page_table_free(mm, (unsigned long *) pte)
#define pte_free(mm, pte) page_table_free(mm, (unsigned long *) pte)
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 801fbe1d837d..519eb5f187ef 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -654,6 +654,48 @@ static inline void pgste_set_pte(pte_t *ptep, pgste_t pgste)
#endif
}
+/**
+ * struct gmap_struct - guest address space
+ * @mm: pointer to the parent mm_struct
+ * @table: pointer to the page directory
+ * @crst_list: list of all crst tables used in the guest address space
+ */
+struct gmap {
+ struct list_head list;
+ struct mm_struct *mm;
+ unsigned long *table;
+ struct list_head crst_list;
+};
+
+/**
+ * struct gmap_rmap - reverse mapping for segment table entries
+ * @next: pointer to the next gmap_rmap structure in the list
+ * @entry: pointer to a segment table entry
+ */
+struct gmap_rmap {
+ struct list_head list;
+ unsigned long *entry;
+};
+
+/**
+ * struct gmap_pgtable - gmap information attached to a page table
+ * @vmaddr: address of the 1MB segment in the process virtual memory
+ * @mapper: list of segment table entries maping a page table
+ */
+struct gmap_pgtable {
+ unsigned long vmaddr;
+ struct list_head mapper;
+};
+
+struct gmap *gmap_alloc(struct mm_struct *mm);
+void gmap_free(struct gmap *gmap);
+void gmap_enable(struct gmap *gmap);
+void gmap_disable(struct gmap *gmap);
+int gmap_map_segment(struct gmap *gmap, unsigned long from,
+ unsigned long to, unsigned long length);
+int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len);
+unsigned long gmap_fault(unsigned long address, struct gmap *);
+
/*
* Certain architectures need to do special things when PTEs
* within a page table are directly modified. Thus, the following
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index 1300c3025334..55dfcc8bdc0d 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -80,6 +80,7 @@ struct thread_struct {
mm_segment_t mm_segment;
unsigned long prot_addr; /* address of protection-excep. */
unsigned int trap_no;
+ unsigned long gmap_addr; /* address of last gmap fault. */
struct per_regs per_user; /* User specified PER registers */
struct per_event per_event; /* Cause of the last PER trap */
/* pfault_wait is used to block the process on a pfault event */
diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h
index ad1382f7932e..1a5dbb6f1495 100644
--- a/arch/s390/include/asm/thread_info.h
+++ b/arch/s390/include/asm/thread_info.h
@@ -94,6 +94,7 @@ static inline struct thread_info *current_thread_info(void)
#define TIF_SYSCALL_AUDIT 9 /* syscall auditing active */
#define TIF_SECCOMP 10 /* secure computing */
#define TIF_SYSCALL_TRACEPOINT 11 /* syscall tracepoint instrumentation */
+#define TIF_SIE 12 /* guest execution active */
#define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling
TIF_NEED_RESCHED */
#define TIF_31BIT 17 /* 32bit process */
@@ -113,6 +114,7 @@ static inline struct thread_info *current_thread_info(void)
#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
#define _TIF_SECCOMP (1<<TIF_SECCOMP)
#define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
+#define _TIF_SIE (1<<TIF_SIE)
#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
#define _TIF_31BIT (1<<TIF_31BIT)
#define _TIF_SINGLE_STEP (1<<TIF_FREEZE)
diff --git a/arch/s390/include/asm/tlbflush.h b/arch/s390/include/asm/tlbflush.h
index b7a4f2eb0057..304445382382 100644
--- a/arch/s390/include/asm/tlbflush.h
+++ b/arch/s390/include/asm/tlbflush.h
@@ -80,7 +80,7 @@ static inline void __tlb_flush_mm(struct mm_struct * mm)
* on all cpus instead of doing a local flush if the mm
* only ran on the local cpu.
*/
- if (MACHINE_HAS_IDTE)
+ if (MACHINE_HAS_IDTE && list_empty(&mm->context.gmap_list))
__tlb_flush_idte((unsigned long) mm->pgd |
mm->context.asce_bits);
else
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index edfbd17d7082..05d8f38734ec 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -151,7 +151,7 @@ int main(void)
DEFINE(__LC_FP_CREG_SAVE_AREA, offsetof(struct _lowcore, fpt_creg_save_area));
DEFINE(__LC_LAST_BREAK, offsetof(struct _lowcore, breaking_event_addr));
DEFINE(__LC_VDSO_PER_CPU, offsetof(struct _lowcore, vdso_per_cpu_data));
- DEFINE(__LC_SIE_HOOK, offsetof(struct _lowcore, sie_hook));
+ DEFINE(__LC_GMAP, offsetof(struct _lowcore, gmap));
DEFINE(__LC_CMF_HPP, offsetof(struct _lowcore, cmf_hpp));
#endif /* CONFIG_32BIT */
return 0;
diff --git a/arch/s390/kernel/base.S b/arch/s390/kernel/base.S
index 15e46ca94335..209938c1dfc8 100644
--- a/arch/s390/kernel/base.S
+++ b/arch/s390/kernel/base.S
@@ -6,13 +6,13 @@
* Michael Holzheu <holzheu@de.ibm.com>
*/
+#include <linux/linkage.h>
#include <asm/asm-offsets.h>
#include <asm/ptrace.h>
#ifdef CONFIG_64BIT
- .globl s390_base_mcck_handler
-s390_base_mcck_handler:
+ENTRY(s390_base_mcck_handler)
basr %r13,0
0: lg %r15,__LC_PANIC_STACK # load panic stack
aghi %r15,-STACK_FRAME_OVERHEAD
@@ -26,13 +26,13 @@ s390_base_mcck_handler:
lpswe __LC_MCK_OLD_PSW
.section .bss
+ .align 8
.globl s390_base_mcck_handler_fn
s390_base_mcck_handler_fn:
.quad 0
.previous
- .globl s390_base_ext_handler
-s390_base_ext_handler:
+ENTRY(s390_base_ext_handler)
stmg %r0,%r15,__LC_SAVE_AREA
basr %r13,0
0: aghi %r15,-STACK_FRAME_OVERHEAD
@@ -46,13 +46,13 @@ s390_base_ext_handler:
lpswe __LC_EXT_OLD_PSW
.section .bss
+ .align 8
.globl s390_base_ext_handler_fn
s390_base_ext_handler_fn:
.quad 0
.previous
- .globl s390_base_pgm_handler
-s390_base_pgm_handler:
+ENTRY(s390_base_pgm_handler)
stmg %r0,%r15,__LC_SAVE_AREA
basr %r13,0
0: aghi %r15,-STACK_FRAME_OVERHEAD
@@ -70,6 +70,7 @@ disabled_wait_psw:
.quad 0x0002000180000000,0x0000000000000000 + s390_base_pgm_handler
.section .bss
+ .align 8
.globl s390_base_pgm_handler_fn
s390_base_pgm_handler_fn:
.quad 0
@@ -77,8 +78,7 @@ s390_base_pgm_handler_fn:
#else /* CONFIG_64BIT */
- .globl s390_base_mcck_handler
-s390_base_mcck_handler:
+ENTRY(s390_base_mcck_handler)
basr %r13,0
0: l %r15,__LC_PANIC_STACK # load panic stack
ahi %r15,-STACK_FRAME_OVERHEAD
@@ -93,13 +93,13 @@ s390_base_mcck_handler:
2: .long s390_base_mcck_handler_fn
.section .bss
+ .align 4
.globl s390_base_mcck_handler_fn
s390_base_mcck_handler_fn:
.long 0
.previous
- .globl s390_base_ext_handler
-s390_base_ext_handler:
+ENTRY(s390_base_ext_handler)
stm %r0,%r15,__LC_SAVE_AREA
basr %r13,0
0: ahi %r15,-STACK_FRAME_OVERHEAD
@@ -115,13 +115,13 @@ s390_base_ext_handler:
2: .long s390_base_ext_handler_fn
.section .bss
+ .align 4
.globl s390_base_ext_handler_fn
s390_base_ext_handler_fn:
.long 0
.previous
- .globl s390_base_pgm_handler
-s390_base_pgm_handler:
+ENTRY(s390_base_pgm_handler)
stm %r0,%r15,__LC_SAVE_AREA
basr %r13,0
0: ahi %r15,-STACK_FRAME_OVERHEAD
@@ -142,6 +142,7 @@ disabled_wait_psw:
.long 0x000a0000,0x00000000 + s390_base_pgm_handler
.section .bss
+ .align 4
.globl s390_base_pgm_handler_fn
s390_base_pgm_handler_fn:
.long 0
diff --git a/arch/s390/kernel/compat_wrapper.S b/arch/s390/kernel/compat_wrapper.S
index 1f5eb789c3a7..08ab9aa6a0d5 100644
--- a/arch/s390/kernel/compat_wrapper.S
+++ b/arch/s390/kernel/compat_wrapper.S
@@ -7,86 +7,74 @@
* Thomas Spatzier (tspat@de.ibm.com)
*/
- .globl sys32_exit_wrapper
-sys32_exit_wrapper:
+#include <linux/linkage.h>
+
+ENTRY(sys32_exit_wrapper)
lgfr %r2,%r2 # int
jg sys_exit # branch to sys_exit
- .globl sys32_read_wrapper
-sys32_read_wrapper:
+ENTRY(sys32_read_wrapper)
llgfr %r2,%r2 # unsigned int
llgtr %r3,%r3 # char *
llgfr %r4,%r4 # size_t
jg sys32_read # branch to sys_read
- .globl sys32_write_wrapper
-sys32_write_wrapper:
+ENTRY(sys32_write_wrapper)
llgfr %r2,%r2 # unsigned int
llgtr %r3,%r3 # const char *
llgfr %r4,%r4 # size_t
jg sys32_write # branch to system call
- .globl sys32_open_wrapper
-sys32_open_wrapper:
+ENTRY(sys32_open_wrapper)
llgtr %r2,%r2 # const char *
lgfr %r3,%r3 # int
lgfr %r4,%r4 # int
jg sys_open # branch to system call
- .globl sys32_close_wrapper
-sys32_close_wrapper:
+ENTRY(sys32_close_wrapper)
llgfr %r2,%r2 # unsigned int
jg sys_close # branch to system call
- .globl sys32_creat_wrapper
-sys32_creat_wrapper:
+ENTRY(sys32_creat_wrapper)
llgtr %r2,%r2 # const char *
lgfr %r3,%r3 # int
jg sys_creat # branch to system call
- .globl sys32_link_wrapper
-sys32_link_wrapper:
+ENTRY(sys32_link_wrapper)
llgtr %r2,%r2 # const char *
llgtr %r3,%r3 # const char *
jg sys_link # branch to system call
- .globl sys32_unlink_wrapper
-sys32_unlink_wrapper:
+ENTRY(sys32_unlink_wrapper)
llgtr %r2,%r2 # const char *
jg sys_unlink # branch to system call
- .globl sys32_chdir_wrapper
-sys32_chdir_wrapper:
+ENTRY(sys32_chdir_wrapper)
llgtr %r2,%r2 # const char *
jg sys_chdir # branch to system call
- .globl sys32_time_wrapper
-sys32_time_wrapper:
+ENTRY(sys32_time_wrapper)
llgtr %r2,%r2 # int *
jg compat_sys_time # branch to system call
- .globl sys32_mknod_wrapper
-sys32_mknod_wrapper:
+ENTRY(sys32_mknod_wrapper)
llgtr %r2,%r2 # const char *
lgfr %r3,%r3 # int
llgfr %r4,%r4 # dev
jg sys_mknod # branch to system call
- .globl sys32_chmod_wrapper
-sys32_chmod_wrapper:
+ENTRY(sys32_chmod_wrapper)
llgtr %r2,%r2 # const char *
llgfr %r3,%r3 # mode_t
jg sys_chmod # branch to system call
- .globl sys32_lchown16_wrapper
-sys32_lchown16_wrapper:
+ENTRY(sys32_lchown16_wrapper)
llgtr %r2,%r2 # const char *
llgfr %r3,%r3 # __kernel_old_uid_emu31_t
llgfr %r4,%r4 # __kernel_old_uid_emu31_t
jg sys32_lchown16 # branch to system call
- .globl sys32_lseek_wrapper
-sys32_lseek_wrapper:
+ENTRY(sys32_lseek_wrapper)
llgfr %r2,%r2 # unsigned int
lgfr %r3,%r3 # off_t
llgfr %r4,%r4 # unsigned int
@@ -94,8 +82,7 @@ sys32_lseek_wrapper:
#sys32_getpid_wrapper # void
- .globl sys32_mount_wrapper
-sys32_mount_wrapper:
+ENTRY(sys32_mount_wrapper)
llgtr %r2,%r2 # char *
llgtr %r3,%r3 # char *
llgtr %r4,%r4 # char *
@@ -103,102 +90,85 @@ sys32_mount_wrapper:
llgtr %r6,%r6 # void *
jg compat_sys_mount # branch to system call
- .globl sys32_oldumount_wrapper
-sys32_oldumount_wrapper:
+ENTRY(sys32_oldumount_wrapper)
llgtr %r2,%r2 # char *
jg sys_oldumount # branch to system call
- .globl sys32_setuid16_wrapper
-sys32_setuid16_wrapper:
+ENTRY(sys32_setuid16_wrapper)
llgfr %r2,%r2 # __kernel_old_uid_emu31_t
jg sys32_setuid16 # branch to system call
#sys32_getuid16_wrapper # void
- .globl sys32_ptrace_wrapper
-sys32_ptrace_wrapper:
+ENTRY(sys32_ptrace_wrapper)
lgfr %r2,%r2 # long
lgfr %r3,%r3 # long
llgtr %r4,%r4 # long
llgfr %r5,%r5 # long
jg compat_sys_ptrace # branch to system call
- .globl sys32_alarm_wrapper
-sys32_alarm_wrapper:
+ENTRY(sys32_alarm_wrapper)
llgfr %r2,%r2 # unsigned int
jg sys_alarm # branch to system call
- .globl compat_sys_utime_wrapper
-compat_sys_utime_wrapper:
+ENTRY(compat_sys_utime_wrapper)
llgtr %r2,%r2 # char *
llgtr %r3,%r3 # struct compat_utimbuf *
jg compat_sys_utime # branch to system call
- .globl sys32_access_wrapper
-sys32_access_wrapper:
+ENTRY(sys32_access_wrapper)
llgtr %r2,%r2 # const char *
lgfr %r3,%r3 # int
jg sys_access # branch to system call
- .globl sys32_nice_wrapper
-sys32_nice_wrapper:
+ENTRY(sys32_nice_wrapper)
lgfr %r2,%r2 # int
jg sys_nice # branch to system call
#sys32_sync_wrapper # void
- .globl sys32_kill_wrapper
-sys32_kill_wrapper:
+ENTRY(sys32_kill_wrapper)
lgfr %r2,%r2 # int
lgfr %r3,%r3 # int
jg sys_kill # branch to system call
- .globl sys32_rename_wrapper
-sys32_rename_wrapper:
+ENTRY(sys32_rename_wrapper)
llgtr %r2,%r2 # const char *
llgtr %r3,%r3 # const char *
jg sys_rename # branch to system call
- .globl sys32_mkdir_wrapper
-sys32_mkdir_wrapper:
+ENTRY(sys32_mkdir_wrapper)
llgtr %r2,%r2 # const char *
lgfr %r3,%r3 # int
jg sys_mkdir # branch to system call
- .globl sys32_rmdir_wrapper
-sys32_rmdir_wrapper:
+ENTRY(sys32_rmdir_wrapper)
llgtr %r2,%r2 # const char *
jg sys_rmdir # branch to system call
- .globl sys32_dup_wrapper
-sys32_dup_wrapper:
+ENTRY(sys32_dup_wrapper)
llgfr %r2,%r2 # unsigned int
jg sys_dup # branch to system call
- .globl sys32_pipe_wrapper
-sys32_pipe_wrapper:
+ENTRY(sys32_pipe_wrapper)
llgtr %r2,%r2 # u32 *
jg sys_pipe # branch to system call
- .globl compat_sys_times_wrapper
-compat_sys_times_wrapper:
+ENTRY(compat_sys_times_wrapper)
llgtr %r2,%r2 # struct compat_tms *
jg compat_sys_times # branch to system call
- .globl sys32_brk_wrapper
-sys32_brk_wrapper:
+ENTRY(sys32_brk_wrapper)
llgtr %r2,%r2 # unsigned long
jg sys_brk # branch to system call
- .globl sys32_setgid16_wrapper
-sys32_setgid16_wrapper:
+ENTRY(sys32_setgid16_wrapper)
llgfr %r2,%r2 # __kernel_old_gid_emu31_t
jg sys32_setgid16 # branch to system call
#sys32_getgid16_wrapper # void
- .globl sys32_signal_wrapper
-sys32_signal_wrapper:
+ENTRY(sys32_signal_wrapper)
lgfr %r2,%r2 # int
llgtr %r3,%r3 # __sighandler_t
jg sys_signal
@@ -207,55 +177,46 @@ sys32_signal_wrapper:
#sys32_getegid16_wrapper # void
- .globl sys32_acct_wrapper
-sys32_acct_wrapper:
+ENTRY(sys32_acct_wrapper)
llgtr %r2,%r2 # char *
jg sys_acct # branch to system call
- .globl sys32_umount_wrapper
-sys32_umount_wrapper:
+ENTRY(sys32_umount_wrapper)
llgtr %r2,%r2 # char *
lgfr %r3,%r3 # int
jg sys_umount # branch to system call
- .globl compat_sys_ioctl_wrapper
-compat_sys_ioctl_wrapper:
+ENTRY(compat_sys_ioctl_wrapper)
llgfr %r2,%r2 # unsigned int
llgfr %r3,%r3 # unsigned int
llgfr %r4,%r4 # unsigned int
jg compat_sys_ioctl # branch to system call
- .globl compat_sys_fcntl_wrapper
-compat_sys_fcntl_wrapper:
+ENTRY(compat_sys_fcntl_wrapper)
llgfr %r2,%r2 # unsigned int
llgfr %r3,%r3 # unsigned int
llgfr %r4,%r4 # unsigned long
jg compat_sys_fcntl # branch to system call
- .globl sys32_setpgid_wrapper
-sys32_setpgid_wrapper:
+ENTRY(sys32_setpgid_wrapper)
lgfr %r2,%r2 # pid_t
lgfr %r3,%r3 # pid_t
jg sys_setpgid # branch to system call
- .globl sys32_umask_wrapper
-sys32_umask_wrapper:
+ENTRY(sys32_umask_wrapper)
lgfr %r2,%r2 # int
jg sys_umask # branch to system call
- .globl sys32_chroot_wrapper
-sys32_chroot_wrapper:
+ENTRY(sys32_chroot_wrapper)
llgtr %r2,%r2 # char *
jg sys_chroot # branch to system call
- .globl sys32_ustat_wrapper
-sys32_ustat_wrapper:
+ENTRY(sys32_ustat_wrapper)
llgfr %r2,%r2 # dev_t
llgtr %r3,%r3 # struct ustat *
jg compat_sys_ustat
- .globl sys32_dup2_wrapper
-sys32_dup2_wrapper:
+ENTRY(sys32_dup2_wrapper)
llgfr %r2,%r2 # unsigned int
llgfr %r3,%r3 # unsigned int
jg sys_dup2 # branch to system call
@@ -266,262 +227,220 @@ sys32_dup2_wrapper:
#sys32_setsid_wrapper # void
- .globl sys32_sigaction_wrapper
-sys32_sigaction_wrapper:
+ENTRY(sys32_sigaction_wrapper)
lgfr %r2,%r2 # int
llgtr %r3,%r3 # const struct old_sigaction *
llgtr %r4,%r4 # struct old_sigaction32 *
jg sys32_sigaction # branch to system call
- .globl sys32_setreuid16_wrapper
-sys32_setreuid16_wrapper:
+ENTRY(sys32_setreuid16_wrapper)
llgfr %r2,%r2 # __kernel_old_uid_emu31_t
llgfr %r3,%r3 # __kernel_old_uid_emu31_t
jg sys32_setreuid16 # branch to system call
- .globl sys32_setregid16_wrapper
-sys32_setregid16_wrapper:
+ENTRY(sys32_setregid16_wrapper)
llgfr %r2,%r2 # __kernel_old_gid_emu31_t
llgfr %r3,%r3 # __kernel_old_gid_emu31_t
jg sys32_setregid16 # branch to system call
- .globl sys_sigsuspend_wrapper
-sys_sigsuspend_wrapper:
+ENTRY(sys_sigsuspend_wrapper)
lgfr %r2,%r2 # int
lgfr %r3,%r3 # int
llgfr %r4,%r4 # old_sigset_t
jg sys_sigsuspend
- .globl compat_sys_sigpending_wrapper
-compat_sys_sigpending_wrapper:
+ENTRY(compat_sys_sigpending_wrapper)
llgtr %r2,%r2 # compat_old_sigset_t *
jg compat_sys_sigpending # branch to system call
- .globl sys32_sethostname_wrapper
-sys32_sethostname_wrapper:
+ENTRY(sys32_sethostname_wrapper)
llgtr %r2,%r2 # char *
lgfr %r3,%r3 # int
jg sys_sethostname # branch to system call
- .globl compat_sys_setrlimit_wrapper
-compat_sys_setrlimit_wrapper:
+ENTRY(compat_sys_setrlimit_wrapper)
llgfr %r2,%r2 # unsigned int
llgtr %r3,%r3 # struct rlimit_emu31 *
jg compat_sys_setrlimit # branch to system call
- .globl compat_sys_old_getrlimit_wrapper
-compat_sys_old_getrlimit_wrapper:
+ENTRY(compat_sys_old_getrlimit_wrapper)
llgfr %r2,%r2 # unsigned int
llgtr %r3,%r3 # struct rlimit_emu31 *
jg compat_sys_old_getrlimit # branch to system call
- .globl compat_sys_getrlimit_wrapper
-compat_sys_getrlimit_wrapper:
+ENTRY(compat_sys_getrlimit_wrapper)
llgfr %r2,%r2 # unsigned int
llgtr %r3,%r3 # struct rlimit_emu31 *
jg compat_sys_getrlimit # branch to system call
- .globl sys32_mmap2_wrapper
-sys32_mmap2_wrapper:
+ENTRY(sys32_mmap2_wrapper)
llgtr %r2,%r2 # struct mmap_arg_struct_emu31 *
jg sys32_mmap2 # branch to system call
- .globl compat_sys_getrusage_wrapper
-compat_sys_getrusage_wrapper:
+ENTRY(compat_sys_getrusage_wrapper)
lgfr %r2,%r2 # int
llgtr %r3,%r3 # struct rusage_emu31 *
jg compat_sys_getrusage # branch to system call
- .globl compat_sys_gettimeofday_wrapper
-compat_sys_gettimeofday_wrapper:
+ENTRY(compat_sys_gettimeofday_wrapper)
llgtr %r2,%r2 # struct timeval_emu31 *
llgtr %r3,%r3 # struct timezone *
jg compat_sys_gettimeofday # branch to system call
- .globl compat_sys_settimeofday_wrapper
-compat_sys_settimeofday_wrapper:
+ENTRY(compat_sys_settimeofday_wrapper)
llgtr %r2,%r2 # struct timeval_emu31 *
llgtr %r3,%r3 # struct timezone *
jg compat_sys_settimeofday # branch to system call
- .globl sys32_getgroups16_wrapper
-sys32_getgroups16_wrapper:
+ENTRY(sys32_getgroups16_wrapper)
lgfr %r2,%r2 # int
llgtr %r3,%r3 # __kernel_old_gid_emu31_t *
jg sys32_getgroups16 # branch to system call
- .globl sys32_setgroups16_wrapper
-sys32_setgroups16_wrapper:
+ENTRY(sys32_setgroups16_wrapper)
lgfr %r2,%r2 # int
llgtr %r3,%r3 # __kernel_old_gid_emu31_t *
jg sys32_setgroups16 # branch to system call
- .globl sys32_symlink_wrapper
-sys32_symlink_wrapper:
+ENTRY(sys32_symlink_wrapper)
llgtr %r2,%r2 # const char *
llgtr %r3,%r3 # const char *
jg sys_symlink # branch to system call
- .globl sys32_readlink_wrapper
-sys32_readlink_wrapper:
+ENTRY(sys32_readlink_wrapper)
llgtr %r2,%r2 # const char *
llgtr %r3,%r3 # char *
lgfr %r4,%r4 # int
jg sys_readlink # branch to system call
- .globl sys32_uselib_wrapper
-sys32_uselib_wrapper:
+ENTRY(sys32_uselib_wrapper)
llgtr %r2,%r2 # const char *
jg sys_uselib # branch to system call
- .globl sys32_swapon_wrapper
-sys32_swapon_wrapper:
+ENTRY(sys32_swapon_wrapper)
llgtr %r2,%r2 # const char *
lgfr %r3,%r3 # int
jg sys_swapon # branch to system call
- .globl sys32_reboot_wrapper
-sys32_reboot_wrapper:
+ENTRY(sys32_reboot_wrapper)
lgfr %r2,%r2 # int
lgfr %r3,%r3 # int
llgfr %r4,%r4 # unsigned int
llgtr %r5,%r5 # void *
jg sys_reboot # branch to system call
- .globl old32_readdir_wrapper
-old32_readdir_wrapper:
+ENTRY(old32_readdir_wrapper)
llgfr %r2,%r2 # unsigned int
llgtr %r3,%r3 # void *
llgfr %r4,%r4 # unsigned int
jg compat_sys_old_readdir # branch to system call
- .globl old32_mmap_wrapper
-old32_mmap_wrapper:
+ENTRY(old32_mmap_wrapper)
llgtr %r2,%r2 # struct mmap_arg_struct_emu31 *
jg old32_mmap # branch to system call
- .globl sys32_munmap_wrapper
-sys32_munmap_wrapper:
+ENTRY(sys32_munmap_wrapper)
llgfr %r2,%r2 # unsigned long
llgfr %r3,%r3 # size_t
jg sys_munmap # branch to system call
- .globl sys32_truncate_wrapper
-sys32_truncate_wrapper:
+ENTRY(sys32_truncate_wrapper)
llgtr %r2,%r2 # const char *
lgfr %r3,%r3 # long
jg sys_truncate # branch to system call
- .globl sys32_ftruncate_wrapper
-sys32_ftruncate_wrapper:
+ENTRY(sys32_ftruncate_wrapper)
llgfr %r2,%r2 # unsigned int
llgfr %r3,%r3 # unsigned long
jg sys_ftruncate # branch to system call
- .globl sys32_fchmod_wrapper
-sys32_fchmod_wrapper:
+ENTRY(sys32_fchmod_wrapper)
llgfr %r2,%r2 # unsigned int
llgfr %r3,%r3 # mode_t
jg sys_fchmod # branch to system call
- .globl sys32_fchown16_wrapper
-sys32_fchown16_wrapper:
+ENTRY(sys32_fchown16_wrapper)
llgfr %r2,%r2 # unsigned int
llgfr %r3,%r3 # compat_uid_t
llgfr %r4,%r4 # compat_uid_t
jg sys32_fchown16 # branch to system call
- .globl sys32_getpriority_wrapper
-sys32_getpriority_wrapper:
+ENTRY(sys32_getpriority_wrapper)
lgfr %r2,%r2 # int
lgfr %r3,%r3 # int
jg sys_getpriority # branch to system call
- .globl sys32_setpriority_wrapper
-sys32_setpriority_wrapper:
+ENTRY(sys32_setpriority_wrapper)
lgfr %r2,%r2 # int
lgfr %r3,%r3 # int
lgfr %r4,%r4 # int
jg sys_setpriority # branch to system call
- .globl compat_sys_statfs_wrapper
-compat_sys_statfs_wrapper:
+ENTRY(compat_sys_statfs_wrapper)
llgtr %r2,%r2 # char *
llgtr %r3,%r3 # struct compat_statfs *
jg compat_sys_statfs # branch to system call
- .globl compat_sys_fstatfs_wrapper
-compat_sys_fstatfs_wrapper:
+ENTRY(compat_sys_fstatfs_wrapper)
llgfr %r2,%r2 # unsigned int
llgtr %r3,%r3 # struct compat_statfs *
jg compat_sys_fstatfs # branch to system call
- .globl compat_sys_socketcall_wrapper
-compat_sys_socketcall_wrapper:
+ENTRY(compat_sys_socketcall_wrapper)
lgfr %r2,%r2 # int
llgtr %r3,%r3 # u32 *
jg compat_sys_socketcall # branch to system call
- .globl sys32_syslog_wrapper
-sys32_syslog_wrapper:
+ENTRY(sys32_syslog_wrapper)
lgfr %r2,%r2 # int
llgtr %r3,%r3 # char *
lgfr %r4,%r4 # int
jg sys_syslog # branch to system call
- .globl compat_sys_setitimer_wrapper
-compat_sys_setitimer_wrapper:
+ENTRY(compat_sys_setitimer_wrapper)
lgfr %r2,%r2 # int
llgtr %r3,%r3 # struct itimerval_emu31 *
llgtr %r4,%r4 # struct itimerval_emu31 *
jg compat_sys_setitimer # branch to system call
- .globl compat_sys_getitimer_wrapper
-compat_sys_getitimer_wrapper:
+ENTRY(compat_sys_getitimer_wrapper)
lgfr %r2,%r2 # int
llgtr %r3,%r3 # struct itimerval_emu31 *
jg compat_sys_getitimer # branch to system call
- .globl compat_sys_newstat_wrapper
-compat_sys_newstat_wrapper:
+ENTRY(compat_sys_newstat_wrapper)
llgtr %r2,%r2 # char *
llgtr %r3,%r3 # struct stat_emu31 *
jg compat_sys_newstat # branch to system call
- .globl compat_sys_newlstat_wrapper
-compat_sys_newlstat_wrapper:
+ENTRY(compat_sys_newlstat_wrapper)
llgtr %r2,%r2 # char *
llgtr %r3,%r3 # struct stat_emu31 *
jg compat_sys_newlstat # branch to system call
- .globl compat_sys_newfstat_wrapper
-compat_sys_newfstat_wrapper:
+ENTRY(compat_sys_newfstat_wrapper)
llgfr %r2,%r2 # unsigned int
llgtr %r3,%r3 # struct stat_emu31 *
jg compat_sys_newfstat # branch to system call
#sys32_vhangup_wrapper # void
- .globl compat_sys_wait4_wrapper
-compat_sys_wait4_wrapper:
+ENTRY(compat_sys_wait4_wrapper)
lgfr %r2,%r2 # pid_t
llgtr %r3,%r3 # unsigned int *
lgfr %r4,%r4 # int
llgtr %r5,%r5 # struct rusage *
jg compat_sys_wait4 # branch to system call
- .globl sys32_swapoff_wrapper
-sys32_swapoff_wrapper:
+ENTRY(sys32_swapoff_wrapper)
llgtr %r2,%r2 # const char *
jg sys_swapoff # branch to system call
- .globl compat_sys_sysinfo_wrapper
-compat_sys_sysinfo_wrapper:
+ENTRY(compat_sys_sysinfo_wrapper)
llgtr %r2,%r2 # struct sysinfo_emu31 *
jg compat_sys_sysinfo # branch to system call
- .globl sys32_ipc_wrapper
-sys32_ipc_wrapper:
+ENTRY(sys32_ipc_wrapper)
llgfr %r2,%r2 # uint
lgfr %r3,%r3 # int
lgfr %r4,%r4 # int
@@ -529,8 +448,7 @@ sys32_ipc_wrapper:
llgfr %r6,%r6 # u32
jg sys32_ipc # branch to system call
- .globl sys32_fsync_wrapper
-sys32_fsync_wrapper:
+ENTRY(sys32_fsync_wrapper)
llgfr %r2,%r2 # unsigned int
jg sys_fsync # branch to system call
@@ -538,97 +456,81 @@ sys32_fsync_wrapper:
#sys32_clone_wrapper # done in clone_glue
- .globl sys32_setdomainname_wrapper
-sys32_setdomainname_wrapper:
+ENTRY(sys32_setdomainname_wrapper)
llgtr %r2,%r2 # char *
lgfr %r3,%r3 # int
jg sys_setdomainname # branch to system call
- .globl sys32_newuname_wrapper
-sys32_newuname_wrapper:
+ENTRY(sys32_newuname_wrapper)
llgtr %r2,%r2 # struct new_utsname *
jg sys_newuname # branch to system call
- .globl compat_sys_adjtimex_wrapper
-compat_sys_adjtimex_wrapper:
+ENTRY(compat_sys_adjtimex_wrapper)
llgtr %r2,%r2 # struct compat_timex *
jg compat_sys_adjtimex # branch to system call
- .globl sys32_mprotect_wrapper
-sys32_mprotect_wrapper:
+ENTRY(sys32_mprotect_wrapper)
llgtr %r2,%r2 # unsigned long (actually pointer
llgfr %r3,%r3 # size_t
llgfr %r4,%r4 # unsigned long
jg sys_mprotect # branch to system call
- .globl compat_sys_sigprocmask_wrapper
-compat_sys_sigprocmask_wrapper:
+ENTRY(compat_sys_sigprocmask_wrapper)
lgfr %r2,%r2 # int
llgtr %r3,%r3 # compat_old_sigset_t *
llgtr %r4,%r4 # compat_old_sigset_t *
jg compat_sys_sigprocmask # branch to system call
- .globl sys_init_module_wrapper
-sys_init_module_wrapper:
+ENTRY(sys_init_module_wrapper)
llgtr %r2,%r2 # void *
llgfr %r3,%r3 # unsigned long
llgtr %r4,%r4 # char *
jg sys_init_module # branch to system call
- .globl sys_delete_module_wrapper
-sys_delete_module_wrapper:
+ENTRY(sys_delete_module_wrapper)
llgtr %r2,%r2 # const char *
llgfr %r3,%r3 # unsigned int
jg sys_delete_module # branch to system call
- .globl sys32_quotactl_wrapper
-sys32_quotactl_wrapper:
+ENTRY(sys32_quotactl_wrapper)
llgfr %r2,%r2 # unsigned int
llgtr %r3,%r3 # const char *
llgfr %r4,%r4 # qid_t
llgtr %r5,%r5 # caddr_t
jg sys_quotactl # branch to system call
- .globl sys32_getpgid_wrapper
-sys32_getpgid_wrapper:
+ENTRY(sys32_getpgid_wrapper)
lgfr %r2,%r2 # pid_t
jg sys_getpgid # branch to system call
- .globl sys32_fchdir_wrapper
-sys32_fchdir_wrapper:
+ENTRY(sys32_fchdir_wrapper)
llgfr %r2,%r2 # unsigned int
jg sys_fchdir # branch to system call
- .globl sys32_bdflush_wrapper
-sys32_bdflush_wrapper:
+ENTRY(sys32_bdflush_wrapper)
lgfr %r2,%r2 # int
lgfr %r3,%r3 # long
jg sys_bdflush # branch to system call
- .globl sys32_sysfs_wrapper
-sys32_sysfs_wrapper:
+ENTRY(sys32_sysfs_wrapper)
lgfr %r2,%r2 # int
llgfr %r3,%r3 # unsigned long
llgfr %r4,%r4 # unsigned long
jg sys_sysfs # branch to system call
- .globl sys32_personality_wrapper
-sys32_personality_wrapper:
+ENTRY(sys32_personality_wrapper)
llgfr %r2,%r2 # unsigned int
jg sys_s390_personality # branch to system call
- .globl sys32_setfsuid16_wrapper
-sys32_setfsuid16_wrapper:
+ENTRY(sys32_setfsuid16_wrapper)
llgfr %r2,%r2 # __kernel_old_uid_emu31_t
jg sys32_setfsuid16 # branch to system call
- .globl sys32_setfsgid16_wrapper
-sys32_setfsgid16_wrapper:
+ENTRY(sys32_setfsgid16_wrapper)
llgfr %r2,%r2 # __kernel_old_gid_emu31_t
jg sys32_setfsgid16 # branch to system call
- .globl sys32_llseek_wrapper
-sys32_llseek_wrapper:
+ENTRY(sys32_llseek_wrapper)
llgfr %r2,%r2 # unsigned int
llgfr %r3,%r3 # unsigned long
llgfr %r4,%r4 # unsigned long
@@ -636,15 +538,13 @@ sys32_llseek_wrapper:
llgfr %r6,%r6 # unsigned int
jg sys_llseek # branch to system call
- .globl sys32_getdents_wrapper
-sys32_getdents_wrapper:
+ENTRY(sys32_getdents_wrapper)
llgfr %r2,%r2 # unsigned int
llgtr %r3,%r3 # void *
llgfr %r4,%r4 # unsigned int
jg compat_sys_getdents # branch to system call
- .globl compat_sys_select_wrapper
-compat_sys_select_wrapper:
+ENTRY(compat_sys_select_wrapper)
lgfr %r2,%r2 # int
llgtr %r3,%r3 # compat_fd_set *
llgtr %r4,%r4 # compat_fd_set *
@@ -652,112 +552,94 @@ compat_sys_select_wrapper:
llgtr %r6,%r6 # struct compat_timeval *
jg compat_sys_select # branch to system call
- .globl sys32_flock_wrapper
-sys32_flock_wrapper:
+ENTRY(sys32_flock_wrapper)
llgfr %r2,%r2 # unsigned int
llgfr %r3,%r3 # unsigned int
jg sys_flock # branch to system call
- .globl sys32_msync_wrapper
-sys32_msync_wrapper:
+ENTRY(sys32_msync_wrapper)
llgfr %r2,%r2 # unsigned long
llgfr %r3,%r3 # size_t
lgfr %r4,%r4 # int
jg sys_msync # branch to system call
- .globl compat_sys_readv_wrapper
-compat_sys_readv_wrapper:
+ENTRY(compat_sys_readv_wrapper)
lgfr %r2,%r2 # int
llgtr %r3,%r3 # const struct compat_iovec *
llgfr %r4,%r4 # unsigned long
jg compat_sys_readv # branch to system call
- .globl compat_sys_writev_wrapper
-compat_sys_writev_wrapper:
+ENTRY(compat_sys_writev_wrapper)
lgfr %r2,%r2 # int
llgtr %r3,%r3 # const struct compat_iovec *
llgfr %r4,%r4 # unsigned long
jg compat_sys_writev # branch to system call
- .globl sys32_getsid_wrapper
-sys32_getsid_wrapper:
+ENTRY(sys32_getsid_wrapper)
lgfr %r2,%r2 # pid_t
jg sys_getsid # branch to system call
- .globl sys32_fdatasync_wrapper
-sys32_fdatasync_wrapper:
+ENTRY(sys32_fdatasync_wrapper)
llgfr %r2,%r2 # unsigned int
jg sys_fdatasync # branch to system call
- .globl sys32_mlock_wrapper
-sys32_mlock_wrapper:
+ENTRY(sys32_mlock_wrapper)
llgfr %r2,%r2 # unsigned long
llgfr %r3,%r3 # size_t
jg sys_mlock # branch to system call
- .globl sys32_munlock_wrapper
-sys32_munlock_wrapper:
+ENTRY(sys32_munlock_wrapper)
llgfr %r2,%r2 # unsigned long
llgfr %r3,%r3 # size_t
jg sys_munlock # branch to system call
- .globl sys32_mlockall_wrapper
-sys32_mlockall_wrapper:
+ENTRY(sys32_mlockall_wrapper)
lgfr %r2,%r2 # int
jg sys_mlockall # branch to system call
#sys32_munlockall_wrapper # void
- .globl sys32_sched_setparam_wrapper
-sys32_sched_setparam_wrapper:
+ENTRY(sys32_sched_setparam_wrapper)
lgfr %r2,%r2 # pid_t
llgtr %r3,%r3 # struct sched_param *
jg sys_sched_setparam # branch to system call
- .globl sys32_sched_getparam_wrapper
-sys32_sched_getparam_wrapper:
+ENTRY(sys32_sched_getparam_wrapper)
lgfr %r2,%r2 # pid_t
llgtr %r3,%r3 # struct sched_param *
jg sys_sched_getparam # branch to system call
- .globl sys32_sched_setscheduler_wrapper
-sys32_sched_setscheduler_wrapper:
+ENTRY(sys32_sched_setscheduler_wrapper)
lgfr %r2,%r2 # pid_t
lgfr %r3,%r3 # int
llgtr %r4,%r4 # struct sched_param *
jg sys_sched_setscheduler # branch to system call
- .globl sys32_sched_getscheduler_wrapper
-sys32_sched_getscheduler_wrapper:
+ENTRY(sys32_sched_getscheduler_wrapper)
lgfr %r2,%r2 # pid_t
jg sys_sched_getscheduler # branch to system call
#sys32_sched_yield_wrapper # void
- .globl sys32_sched_get_priority_max_wrapper
-sys32_sched_get_priority_max_wrapper:
+ENTRY(sys32_sched_get_priority_max_wrapper)
lgfr %r2,%r2 # int
jg sys_sched_get_priority_max # branch to system call
- .globl sys32_sched_get_priority_min_wrapper
-sys32_sched_get_priority_min_wrapper:
+ENTRY(sys32_sched_get_priority_min_wrapper)
lgfr %r2,%r2 # int
jg sys_sched_get_priority_min # branch to system call
- .globl sys32_sched_rr_get_interval_wrapper
-sys32_sched_rr_get_interval_wrapper:
+ENTRY(sys32_sched_rr_get_interval_wrapper)
lgfr %r2,%r2 # pid_t
llgtr %r3,%r3 # struct compat_timespec *
jg sys32_sched_rr_get_interval # branch to system call
- .globl compat_sys_nanosleep_wrapper
-compat_sys_nanosleep_wrapper:
+ENTRY(compat_sys_nanosleep_wrapper)
llgtr %r2,%r2 # struct compat_timespec *
llgtr %r3,%r3 # struct compat_timespec *
jg compat_sys_nanosleep # branch to system call
- .globl sys32_mremap_wrapper
-sys32_mremap_wrapper:
+ENTRY(sys32_mremap_wrapper)
llgfr %r2,%r2 # unsigned long
llgfr %r3,%r3 # unsigned long
llgfr %r4,%r4 # unsigned long
@@ -765,50 +647,43 @@ sys32_mremap_wrapper:
llgfr %r6,%r6 # unsigned long
jg sys_mremap # branch to system call
- .globl sys32_setresuid16_wrapper
-sys32_setresuid16_wrapper:
+ENTRY(sys32_setresuid16_wrapper)
llgfr %r2,%r2 # __kernel_old_uid_emu31_t
llgfr %r3,%r3 # __kernel_old_uid_emu31_t
llgfr %r4,%r4 # __kernel_old_uid_emu31_t
jg sys32_setresuid16 # branch to system call
- .globl sys32_getresuid16_wrapper
-sys32_getresuid16_wrapper:
+ENTRY(sys32_getresuid16_wrapper)
llgtr %r2,%r2 # __kernel_old_uid_emu31_t *
llgtr %r3,%r3 # __kernel_old_uid_emu31_t *
llgtr %r4,%r4 # __kernel_old_uid_emu31_t *
jg sys32_getresuid16 # branch to system call
- .globl sys32_poll_wrapper
-sys32_poll_wrapper:
+ENTRY(sys32_poll_wrapper)
llgtr %r2,%r2 # struct pollfd *
llgfr %r3,%r3 # unsigned int
lgfr %r4,%r4 # long
jg sys_poll # branch to system call
- .globl compat_sys_nfsservctl_wrapper
-compat_sys_nfsservctl_wrapper:
+ENTRY(compat_sys_nfsservctl_wrapper)
lgfr %r2,%r2 # int
llgtr %r3,%r3 # struct compat_nfsctl_arg*
llgtr %r4,%r4 # union compat_nfsctl_res*
jg compat_sys_nfsservctl # branch to system call
- .globl sys32_setresgid16_wrapper
-sys32_setresgid16_wrapper:
+ENTRY(sys32_setresgid16_wrapper)
llgfr %r2,%r2 # __kernel_old_gid_emu31_t
llgfr %r3,%r3 # __kernel_old_gid_emu31_t
llgfr %r4,%r4 # __kernel_old_gid_emu31_t
jg sys32_setresgid16 # branch to system call
- .globl sys32_getresgid16_wrapper
-sys32_getresgid16_wrapper:
+ENTRY(sys32_getresgid16_wrapper)
llgtr %r2,%r2 # __kernel_old_gid_emu31_t *
llgtr %r3,%r3 # __kernel_old_gid_emu31_t *
llgtr %r4,%r4 # __kernel_old_gid_emu31_t *
jg sys32_getresgid16 # branch to system call
- .globl sys32_prctl_wrapper
-sys32_prctl_wrapper:
+ENTRY(sys32_prctl_wrapper)
lgfr %r2,%r2 # int
llgfr %r3,%r3 # unsigned long
llgfr %r4,%r4 # unsigned long
@@ -818,51 +693,44 @@ sys32_prctl_wrapper:
#sys32_rt_sigreturn_wrapper # done in rt_sigreturn_glue
- .globl sys32_rt_sigaction_wrapper
-sys32_rt_sigaction_wrapper:
+ENTRY(sys32_rt_sigaction_wrapper)
lgfr %r2,%r2 # int
llgtr %r3,%r3 # const struct sigaction_emu31 *
llgtr %r4,%r4 # const struct sigaction_emu31 *
llgfr %r5,%r5 # size_t
jg sys32_rt_sigaction # branch to system call
- .globl sys32_rt_sigprocmask_wrapper
-sys32_rt_sigprocmask_wrapper:
+ENTRY(sys32_rt_sigprocmask_wrapper)
lgfr %r2,%r2 # int
llgtr %r3,%r3 # old_sigset_emu31 *
llgtr %r4,%r4 # old_sigset_emu31 *
llgfr %r5,%r5 # size_t
jg sys32_rt_sigprocmask # branch to system call
- .globl sys32_rt_sigpending_wrapper
-sys32_rt_sigpending_wrapper:
+ENTRY(sys32_rt_sigpending_wrapper)
llgtr %r2,%r2 # sigset_emu31 *
llgfr %r3,%r3 # size_t
jg sys32_rt_sigpending # branch to system call
- .globl compat_sys_rt_sigtimedwait_wrapper
-compat_sys_rt_sigtimedwait_wrapper:
+ENTRY(compat_sys_rt_sigtimedwait_wrapper)
llgtr %r2,%r2 # const sigset_emu31_t *
llgtr %r3,%r3 # siginfo_emu31_t *
llgtr %r4,%r4 # const struct compat_timespec *
llgfr %r5,%r5 # size_t
jg compat_sys_rt_sigtimedwait # branch to system call
- .globl sys32_rt_sigqueueinfo_wrapper
-sys32_rt_sigqueueinfo_wrapper:
+ENTRY(sys32_rt_sigqueueinfo_wrapper)
lgfr %r2,%r2 # int
lgfr %r3,%r3 # int
llgtr %r4,%r4 # siginfo_emu31_t *
jg sys32_rt_sigqueueinfo # branch to system call
- .globl compat_sys_rt_sigsuspend_wrapper
-compat_sys_rt_sigsuspend_wrapper:
+ENTRY(compat_sys_rt_sigsuspend_wrapper)
llgtr %r2,%r2 # compat_sigset_t *
llgfr %r3,%r3 # compat_size_t
jg compat_sys_rt_sigsuspend
- .globl sys32_pread64_wrapper
-sys32_pread64_wrapper:
+ENTRY(sys32_pread64_wrapper)
llgfr %r2,%r2 # unsigned int
llgtr %r3,%r3 # char *
llgfr %r4,%r4 # size_t
@@ -870,8 +738,7 @@ sys32_pread64_wrapper:
llgfr %r6,%r6 # u32
jg sys32_pread64 # branch to system call
- .globl sys32_pwrite64_wrapper
-sys32_pwrite64_wrapper:
+ENTRY(sys32_pwrite64_wrapper)
llgfr %r2,%r2 # unsigned int
llgtr %r3,%r3 # const char *
llgfr %r4,%r4 # size_t
@@ -879,39 +746,33 @@ sys32_pwrite64_wrapper:
llgfr %r6,%r6 # u32
jg sys32_pwrite64 # branch to system call
- .globl sys32_chown16_wrapper
-sys32_chown16_wrapper:
+ENTRY(sys32_chown16_wrapper)
llgtr %r2,%r2 # const char *
llgfr %r3,%r3 # __kernel_old_uid_emu31_t
llgfr %r4,%r4 # __kernel_old_gid_emu31_t
jg sys32_chown16 # branch to system call
- .globl sys32_getcwd_wrapper
-sys32_getcwd_wrapper:
+ENTRY(sys32_getcwd_wrapper)
llgtr %r2,%r2 # char *
llgfr %r3,%r3 # unsigned long
jg sys_getcwd # branch to system call
- .globl sys32_capget_wrapper
-sys32_capget_wrapper:
+ENTRY(sys32_capget_wrapper)
llgtr %r2,%r2 # cap_user_header_t
llgtr %r3,%r3 # cap_user_data_t
jg sys_capget # branch to system call
- .globl sys32_capset_wrapper
-sys32_capset_wrapper:
+ENTRY(sys32_capset_wrapper)
llgtr %r2,%r2 # cap_user_header_t
llgtr %r3,%r3 # const cap_user_data_t
jg sys_capset # branch to system call
- .globl sys32_sigaltstack_wrapper
-sys32_sigaltstack_wrapper:
+ENTRY(sys32_sigaltstack_wrapper)
llgtr %r2,%r2 # const stack_emu31_t *
llgtr %r3,%r3 # stack_emu31_t *
jg sys32_sigaltstack
- .globl sys32_sendfile_wrapper
-sys32_sendfile_wrapper:
+ENTRY(sys32_sendfile_wrapper)
lgfr %r2,%r2 # int
lgfr %r3,%r3 # int
llgtr %r4,%r4 # __kernel_off_emu31_t *
@@ -920,22 +781,19 @@ sys32_sendfile_wrapper:
#sys32_vfork_wrapper # done in vfork_glue
- .globl sys32_truncate64_wrapper
-sys32_truncate64_wrapper:
+ENTRY(sys32_truncate64_wrapper)
llgtr %r2,%r2 # const char *
llgfr %r3,%r3 # unsigned long
llgfr %r4,%r4 # unsigned long
jg sys32_truncate64 # branch to system call
- .globl sys32_ftruncate64_wrapper
-sys32_ftruncate64_wrapper:
+ENTRY(sys32_ftruncate64_wrapper)
llgfr %r2,%r2 # unsigned int
llgfr %r3,%r3 # unsigned long
llgfr %r4,%r4 # unsigned long
jg sys32_ftruncate64 # branch to system call
- .globl sys32_lchown_wrapper
-sys32_lchown_wrapper:
+ENTRY(sys32_lchown_wrapper)
llgtr %r2,%r2 # const char *
llgfr %r3,%r3 # uid_t
llgfr %r4,%r4 # gid_t
@@ -946,156 +804,131 @@ sys32_lchown_wrapper:
#sys32_geteuid_wrapper # void
#sys32_getegid_wrapper # void
- .globl sys32_setreuid_wrapper
-sys32_setreuid_wrapper:
+ENTRY(sys32_setreuid_wrapper)
llgfr %r2,%r2 # uid_t
llgfr %r3,%r3 # uid_t
jg sys_setreuid # branch to system call
- .globl sys32_setregid_wrapper
-sys32_setregid_wrapper:
+ENTRY(sys32_setregid_wrapper)
llgfr %r2,%r2 # gid_t
llgfr %r3,%r3 # gid_t
jg sys_setregid # branch to system call
- .globl sys32_getgroups_wrapper
-sys32_getgroups_wrapper:
+ENTRY(sys32_getgroups_wrapper)
lgfr %r2,%r2 # int
llgtr %r3,%r3 # gid_t *
jg sys_getgroups # branch to system call
- .globl sys32_setgroups_wrapper
-sys32_setgroups_wrapper:
+ENTRY(sys32_setgroups_wrapper)
lgfr %r2,%r2 # int
llgtr %r3,%r3 # gid_t *
jg sys_setgroups # branch to system call
- .globl sys32_fchown_wrapper
-sys32_fchown_wrapper:
+ENTRY(sys32_fchown_wrapper)
llgfr %r2,%r2 # unsigned int
llgfr %r3,%r3 # uid_t
llgfr %r4,%r4 # gid_t
jg sys_fchown # branch to system call
- .globl sys32_setresuid_wrapper
-sys32_setresuid_wrapper:
+ENTRY(sys32_setresuid_wrapper)
llgfr %r2,%r2 # uid_t
llgfr %r3,%r3 # uid_t
llgfr %r4,%r4 # uid_t
jg sys_setresuid # branch to system call
- .globl sys32_getresuid_wrapper
-sys32_getresuid_wrapper:
+ENTRY(sys32_getresuid_wrapper)
llgtr %r2,%r2 # uid_t *
llgtr %r3,%r3 # uid_t *
llgtr %r4,%r4 # uid_t *
jg sys_getresuid # branch to system call
- .globl sys32_setresgid_wrapper
-sys32_setresgid_wrapper:
+ENTRY(sys32_setresgid_wrapper)
llgfr %r2,%r2 # gid_t
llgfr %r3,%r3 # gid_t
llgfr %r4,%r4 # gid_t
jg sys_setresgid # branch to system call
- .globl sys32_getresgid_wrapper
-sys32_getresgid_wrapper:
+ENTRY(sys32_getresgid_wrapper)
llgtr %r2,%r2 # gid_t *
llgtr %r3,%r3 # gid_t *
llgtr %r4,%r4 # gid_t *
jg sys_getresgid # branch to system call
- .globl sys32_chown_wrapper
-sys32_chown_wrapper:
+ENTRY(sys32_chown_wrapper)
llgtr %r2,%r2 # const char *
llgfr %r3,%r3 # uid_t
llgfr %r4,%r4 # gid_t
jg sys_chown # branch to system call
- .globl sys32_setuid_wrapper
-sys32_setuid_wrapper:
+ENTRY(sys32_setuid_wrapper)
llgfr %r2,%r2 # uid_t
jg sys_setuid # branch to system call
- .globl sys32_setgid_wrapper
-sys32_setgid_wrapper:
+ENTRY(sys32_setgid_wrapper)
llgfr %r2,%r2 # gid_t
jg sys_setgid # branch to system call
- .globl sys32_setfsuid_wrapper
-sys32_setfsuid_wrapper:
+ENTRY(sys32_setfsuid_wrapper)
llgfr %r2,%r2 # uid_t
jg sys_setfsuid # branch to system call
- .globl sys32_setfsgid_wrapper
-sys32_setfsgid_wrapper:
+ENTRY(sys32_setfsgid_wrapper)
llgfr %r2,%r2 # gid_t
jg sys_setfsgid # branch to system call
- .globl sys32_pivot_root_wrapper
-sys32_pivot_root_wrapper:
+ENTRY(sys32_pivot_root_wrapper)
llgtr %r2,%r2 # const char *
llgtr %r3,%r3 # const char *
jg sys_pivot_root # branch to system call
- .globl sys32_mincore_wrapper
-sys32_mincore_wrapper:
+ENTRY(sys32_mincore_wrapper)
llgfr %r2,%r2 # unsigned long
llgfr %r3,%r3 # size_t
llgtr %r4,%r4 # unsigned char *
jg sys_mincore # branch to system call
- .globl sys32_madvise_wrapper
-sys32_madvise_wrapper:
+ENTRY(sys32_madvise_wrapper)
llgfr %r2,%r2 # unsigned long
llgfr %r3,%r3 # size_t
lgfr %r4,%r4 # int
jg sys_madvise # branch to system call
- .globl sys32_getdents64_wrapper
-sys32_getdents64_wrapper:
+ENTRY(sys32_getdents64_wrapper)
llgfr %r2,%r2 # unsigned int
llgtr %r3,%r3 # void *
llgfr %r4,%r4 # unsigned int
jg sys_getdents64 # branch to system call
- .globl compat_sys_fcntl64_wrapper
-compat_sys_fcntl64_wrapper:
+ENTRY(compat_sys_fcntl64_wrapper)
llgfr %r2,%r2 # unsigned int
llgfr %r3,%r3 # unsigned int
llgfr %r4,%r4 # unsigned long
jg compat_sys_fcntl64 # branch to system call
- .globl sys32_stat64_wrapper
-sys32_stat64_wrapper:
+ENTRY(sys32_stat64_wrapper)
llgtr %r2,%r2 # char *
llgtr %r3,%r3 # struct stat64 *
jg sys32_stat64 # branch to system call
- .globl sys32_lstat64_wrapper
-sys32_lstat64_wrapper:
+ENTRY(sys32_lstat64_wrapper)
llgtr %r2,%r2 # char *
llgtr %r3,%r3 # struct stat64 *
jg sys32_lstat64 # branch to system call
- .globl sys32_stime_wrapper
-sys32_stime_wrapper:
+ENTRY(sys32_stime_wrapper)
llgtr %r2,%r2 # long *
jg compat_sys_stime # branch to system call
- .globl sys32_sysctl_wrapper
-sys32_sysctl_wrapper:
+ENTRY(sys32_sysctl_wrapper)
llgtr %r2,%r2 # struct compat_sysctl_args *
jg compat_sys_sysctl
- .globl sys32_fstat64_wrapper
-sys32_fstat64_wrapper:
+ENTRY(sys32_fstat64_wrapper)
llgfr %r2,%r2 # unsigned long
llgtr %r3,%r3 # struct stat64 *
jg sys32_fstat64 # branch to system call
- .globl compat_sys_futex_wrapper
-compat_sys_futex_wrapper:
+ENTRY(compat_sys_futex_wrapper)
llgtr %r2,%r2 # u32 *
lgfr %r3,%r3 # int
lgfr %r4,%r4 # int
@@ -1105,8 +938,7 @@ compat_sys_futex_wrapper:
stg %r0,160(%r15)
jg compat_sys_futex # branch to system call
- .globl sys32_setxattr_wrapper
-sys32_setxattr_wrapper:
+ENTRY(sys32_setxattr_wrapper)
llgtr %r2,%r2 # char *
llgtr %r3,%r3 # char *
llgtr %r4,%r4 # void *
@@ -1114,8 +946,7 @@ sys32_setxattr_wrapper:
lgfr %r6,%r6 # int
jg sys_setxattr
- .globl sys32_lsetxattr_wrapper
-sys32_lsetxattr_wrapper:
+ENTRY(sys32_lsetxattr_wrapper)
llgtr %r2,%r2 # char *
llgtr %r3,%r3 # char *
llgtr %r4,%r4 # void *
@@ -1123,8 +954,7 @@ sys32_lsetxattr_wrapper:
lgfr %r6,%r6 # int
jg sys_lsetxattr
- .globl sys32_fsetxattr_wrapper
-sys32_fsetxattr_wrapper:
+ENTRY(sys32_fsetxattr_wrapper)
lgfr %r2,%r2 # int
llgtr %r3,%r3 # char *
llgtr %r4,%r4 # void *
@@ -1132,124 +962,106 @@ sys32_fsetxattr_wrapper:
lgfr %r6,%r6 # int
jg sys_fsetxattr
- .globl sys32_getxattr_wrapper
-sys32_getxattr_wrapper:
+ENTRY(sys32_getxattr_wrapper)
llgtr %r2,%r2 # char *
llgtr %r3,%r3 # char *
llgtr %r4,%r4 # void *
llgfr %r5,%r5 # size_t
jg sys_getxattr
- .globl sys32_lgetxattr_wrapper
-sys32_lgetxattr_wrapper:
+ENTRY(sys32_lgetxattr_wrapper)
llgtr %r2,%r2 # char *
llgtr %r3,%r3 # char *
llgtr %r4,%r4 # void *
llgfr %r5,%r5 # size_t
jg sys_lgetxattr
- .globl sys32_fgetxattr_wrapper
-sys32_fgetxattr_wrapper:
+ENTRY(sys32_fgetxattr_wrapper)
lgfr %r2,%r2 # int
llgtr %r3,%r3 # char *
llgtr %r4,%r4 # void *
llgfr %r5,%r5 # size_t
jg sys_fgetxattr
- .globl sys32_listxattr_wrapper
-sys32_listxattr_wrapper:
+ENTRY(sys32_listxattr_wrapper)
llgtr %r2,%r2 # char *
llgtr %r3,%r3 # char *
llgfr %r4,%r4 # size_t
jg sys_listxattr
- .globl sys32_llistxattr_wrapper
-sys32_llistxattr_wrapper:
+ENTRY(sys32_llistxattr_wrapper)
llgtr %r2,%r2 # char *
llgtr %r3,%r3 # char *
llgfr %r4,%r4 # size_t
jg sys_llistxattr
- .globl sys32_flistxattr_wrapper
-sys32_flistxattr_wrapper:
+ENTRY(sys32_flistxattr_wrapper)
lgfr %r2,%r2 # int
llgtr %r3,%r3 # char *
llgfr %r4,%r4 # size_t
jg sys_flistxattr
- .globl sys32_removexattr_wrapper
-sys32_removexattr_wrapper:
+ENTRY(sys32_removexattr_wrapper)
llgtr %r2,%r2 # char *
llgtr %r3,%r3 # char *
jg sys_removexattr
- .globl sys32_lremovexattr_wrapper
-sys32_lremovexattr_wrapper:
+ENTRY(sys32_lremovexattr_wrapper)
llgtr %r2,%r2 # char *
llgtr %r3,%r3 # char *
jg sys_lremovexattr
- .globl sys32_fremovexattr_wrapper
-sys32_fremovexattr_wrapper:
+ENTRY(sys32_fremovexattr_wrapper)
lgfr %r2,%r2 # int
llgtr %r3,%r3 # char *
jg sys_fremovexattr
- .globl sys32_sched_setaffinity_wrapper
-sys32_sched_setaffinity_wrapper:
+ENTRY(sys32_sched_setaffinity_wrapper)
lgfr %r2,%r2 # int
llgfr %r3,%r3 # unsigned int
llgtr %r4,%r4 # unsigned long *
jg compat_sys_sched_setaffinity
- .globl sys32_sched_getaffinity_wrapper
-sys32_sched_getaffinity_wrapper:
+ENTRY(sys32_sched_getaffinity_wrapper)
lgfr %r2,%r2 # int
llgfr %r3,%r3 # unsigned int
llgtr %r4,%r4 # unsigned long *
jg compat_sys_sched_getaffinity
- .globl sys32_exit_group_wrapper
-sys32_exit_group_wrapper:
+ENTRY(sys32_exit_group_wrapper)
lgfr %r2,%r2 # int
jg sys_exit_group # branch to system call
- .globl sys32_set_tid_address_wrapper
-sys32_set_tid_address_wrapper:
+ENTRY(sys32_set_tid_address_wrapper)
llgtr %r2,%r2 # int *
jg sys_set_tid_address # branch to system call
- .globl sys_epoll_create_wrapper
-sys_epoll_create_wrapper:
+ENTRY(sys_epoll_create_wrapper)
lgfr %r2,%r2 # int
jg sys_epoll_create # branch to system call
- .globl sys_epoll_ctl_wrapper
-sys_epoll_ctl_wrapper:
+ENTRY(sys_epoll_ctl_wrapper)
lgfr %r2,%r2 # int
lgfr %r3,%r3 # int
lgfr %r4,%r4 # int
llgtr %r5,%r5 # struct epoll_event *
jg sys_epoll_ctl # branch to system call
- .globl sys_epoll_wait_wrapper
-sys_epoll_wait_wrapper:
+ENTRY(sys_epoll_wait_wrapper)
lgfr %r2,%r2 # int
llgtr %r3,%r3 # struct epoll_event *
lgfr %r4,%r4 # int
lgfr %r5,%r5 # int
jg sys_epoll_wait # branch to system call
- .globl sys32_lookup_dcookie_wrapper
-sys32_lookup_dcookie_wrapper:
+ENTRY(sys32_lookup_dcookie_wrapper)
sllg %r2,%r2,32 # get high word of 64bit dcookie
or %r2,%r3 # get low word of 64bit dcookie
llgtr %r3,%r4 # char *
llgfr %r4,%r5 # size_t
jg sys_lookup_dcookie
- .globl sys32_fadvise64_wrapper
-sys32_fadvise64_wrapper:
+ENTRY(sys32_fadvise64_wrapper)
lgfr %r2,%r2 # int
sllg %r3,%r3,32 # get high word of 64bit loff_t
or %r3,%r4 # get low word of 64bit loff_t
@@ -1257,81 +1069,68 @@ sys32_fadvise64_wrapper:
lgfr %r5,%r6 # int
jg sys32_fadvise64
- .globl sys32_fadvise64_64_wrapper
-sys32_fadvise64_64_wrapper:
+ENTRY(sys32_fadvise64_64_wrapper)
llgtr %r2,%r2 # struct fadvise64_64_args *
jg sys32_fadvise64_64
- .globl sys32_clock_settime_wrapper
-sys32_clock_settime_wrapper:
+ENTRY(sys32_clock_settime_wrapper)
lgfr %r2,%r2 # clockid_t (int)
llgtr %r3,%r3 # struct compat_timespec *
jg compat_sys_clock_settime
- .globl sys32_clock_gettime_wrapper
-sys32_clock_gettime_wrapper:
+ENTRY(sys32_clock_gettime_wrapper)
lgfr %r2,%r2 # clockid_t (int)
llgtr %r3,%r3 # struct compat_timespec *
jg compat_sys_clock_gettime
- .globl sys32_clock_getres_wrapper
-sys32_clock_getres_wrapper:
+ENTRY(sys32_clock_getres_wrapper)
lgfr %r2,%r2 # clockid_t (int)
llgtr %r3,%r3 # struct compat_timespec *
jg compat_sys_clock_getres
- .globl sys32_clock_nanosleep_wrapper
-sys32_clock_nanosleep_wrapper:
+ENTRY(sys32_clock_nanosleep_wrapper)
lgfr %r2,%r2 # clockid_t (int)
lgfr %r3,%r3 # int
llgtr %r4,%r4 # struct compat_timespec *
llgtr %r5,%r5 # struct compat_timespec *
jg compat_sys_clock_nanosleep
- .globl sys32_timer_create_wrapper
-sys32_timer_create_wrapper:
+ENTRY(sys32_timer_create_wrapper)
lgfr %r2,%r2 # timer_t (int)
llgtr %r3,%r3 # struct compat_sigevent *
llgtr %r4,%r4 # timer_t *
jg compat_sys_timer_create
- .globl sys32_timer_settime_wrapper
-sys32_timer_settime_wrapper:
+ENTRY(sys32_timer_settime_wrapper)
lgfr %r2,%r2 # timer_t (int)
lgfr %r3,%r3 # int
llgtr %r4,%r4 # struct compat_itimerspec *
llgtr %r5,%r5 # struct compat_itimerspec *
jg compat_sys_timer_settime
- .globl sys32_timer_gettime_wrapper
-sys32_timer_gettime_wrapper:
+ENTRY(sys32_timer_gettime_wrapper)
lgfr %r2,%r2 # timer_t (int)
llgtr %r3,%r3 # struct compat_itimerspec *
jg compat_sys_timer_gettime
- .globl sys32_timer_getoverrun_wrapper
-sys32_timer_getoverrun_wrapper:
+ENTRY(sys32_timer_getoverrun_wrapper)
lgfr %r2,%r2 # timer_t (int)
jg sys_timer_getoverrun
- .globl sys32_timer_delete_wrapper
-sys32_timer_delete_wrapper:
+ENTRY(sys32_timer_delete_wrapper)
lgfr %r2,%r2 # timer_t (int)
jg sys_timer_delete
- .globl sys32_io_setup_wrapper
-sys32_io_setup_wrapper:
+ENTRY(sys32_io_setup_wrapper)
llgfr %r2,%r2 # unsigned int
llgtr %r3,%r3 # u32 *
jg compat_sys_io_setup
- .globl sys32_io_destroy_wrapper
-sys32_io_destroy_wrapper:
+ENTRY(sys32_io_destroy_wrapper)
llgfr %r2,%r2 # (aio_context_t) u32
jg sys_io_destroy
- .globl sys32_io_getevents_wrapper
-sys32_io_getevents_wrapper:
+ENTRY(sys32_io_getevents_wrapper)
llgfr %r2,%r2 # (aio_context_t) u32
lgfr %r3,%r3 # long
lgfr %r4,%r4 # long
@@ -1339,49 +1138,42 @@ sys32_io_getevents_wrapper:
llgtr %r6,%r6 # struct compat_timespec *
jg compat_sys_io_getevents
- .globl sys32_io_submit_wrapper
-sys32_io_submit_wrapper:
+ENTRY(sys32_io_submit_wrapper)
llgfr %r2,%r2 # (aio_context_t) u32
lgfr %r3,%r3 # long
llgtr %r4,%r4 # struct iocb **
jg compat_sys_io_submit
- .globl sys32_io_cancel_wrapper
-sys32_io_cancel_wrapper:
+ENTRY(sys32_io_cancel_wrapper)
llgfr %r2,%r2 # (aio_context_t) u32
llgtr %r3,%r3 # struct iocb *
llgtr %r4,%r4 # struct io_event *
jg sys_io_cancel
- .globl compat_sys_statfs64_wrapper
-compat_sys_statfs64_wrapper:
+ENTRY(compat_sys_statfs64_wrapper)
llgtr %r2,%r2 # const char *
llgfr %r3,%r3 # compat_size_t
llgtr %r4,%r4 # struct compat_statfs64 *
jg compat_sys_statfs64
- .globl compat_sys_fstatfs64_wrapper
-compat_sys_fstatfs64_wrapper:
+ENTRY(compat_sys_fstatfs64_wrapper)
llgfr %r2,%r2 # unsigned int fd
llgfr %r3,%r3 # compat_size_t
llgtr %r4,%r4 # struct compat_statfs64 *
jg compat_sys_fstatfs64
- .globl compat_sys_mq_open_wrapper
-compat_sys_mq_open_wrapper:
+ENTRY(compat_sys_mq_open_wrapper)
llgtr %r2,%r2 # const char *
lgfr %r3,%r3 # int
llgfr %r4,%r4 # mode_t
llgtr %r5,%r5 # struct compat_mq_attr *
jg compat_sys_mq_open
- .globl sys32_mq_unlink_wrapper
-sys32_mq_unlink_wrapper:
+ENTRY(sys32_mq_unlink_wrapper)
llgtr %r2,%r2 # const char *
jg sys_mq_unlink
- .globl compat_sys_mq_timedsend_wrapper
-compat_sys_mq_timedsend_wrapper:
+ENTRY(compat_sys_mq_timedsend_wrapper)
lgfr %r2,%r2 # mqd_t
llgtr %r3,%r3 # const char *
llgfr %r4,%r4 # size_t
@@ -1389,8 +1181,7 @@ compat_sys_mq_timedsend_wrapper:
llgtr %r6,%r6 # const struct compat_timespec *
jg compat_sys_mq_timedsend
- .globl compat_sys_mq_timedreceive_wrapper
-compat_sys_mq_timedreceive_wrapper:
+ENTRY(compat_sys_mq_timedreceive_wrapper)
lgfr %r2,%r2 # mqd_t
llgtr %r3,%r3 # char *
llgfr %r4,%r4 # size_t
@@ -1398,21 +1189,18 @@ compat_sys_mq_timedreceive_wrapper:
llgtr %r6,%r6 # const struct compat_timespec *
jg compat_sys_mq_timedreceive
- .globl compat_sys_mq_notify_wrapper
-compat_sys_mq_notify_wrapper:
+ENTRY(compat_sys_mq_notify_wrapper)
lgfr %r2,%r2 # mqd_t
llgtr %r3,%r3 # struct compat_sigevent *
jg compat_sys_mq_notify
- .globl compat_sys_mq_getsetattr_wrapper
-compat_sys_mq_getsetattr_wrapper:
+ENTRY(compat_sys_mq_getsetattr_wrapper)
lgfr %r2,%r2 # mqd_t
llgtr %r3,%r3 # struct compat_mq_attr *
llgtr %r4,%r4 # struct compat_mq_attr *
jg compat_sys_mq_getsetattr
- .globl compat_sys_add_key_wrapper
-compat_sys_add_key_wrapper:
+ENTRY(compat_sys_add_key_wrapper)
llgtr %r2,%r2 # const char *
llgtr %r3,%r3 # const char *
llgtr %r4,%r4 # const void *
@@ -1420,16 +1208,14 @@ compat_sys_add_key_wrapper:
llgfr %r6,%r6 # (key_serial_t) u32
jg sys_add_key
- .globl compat_sys_request_key_wrapper
-compat_sys_request_key_wrapper:
+ENTRY(compat_sys_request_key_wrapper)
llgtr %r2,%r2 # const char *
llgtr %r3,%r3 # const char *
llgtr %r4,%r4 # const void *
llgfr %r5,%r5 # (key_serial_t) u32
jg sys_request_key
- .globl sys32_remap_file_pages_wrapper
-sys32_remap_file_pages_wrapper:
+ENTRY(sys32_remap_file_pages_wrapper)
llgfr %r2,%r2 # unsigned long
llgfr %r3,%r3 # unsigned long
llgfr %r4,%r4 # unsigned long
@@ -1437,8 +1223,7 @@ sys32_remap_file_pages_wrapper:
llgfr %r6,%r6 # unsigned long
jg sys_remap_file_pages
- .globl compat_sys_waitid_wrapper
-compat_sys_waitid_wrapper:
+ENTRY(compat_sys_waitid_wrapper)
lgfr %r2,%r2 # int
lgfr %r3,%r3 # pid_t
llgtr %r4,%r4 # siginfo_emu31_t *
@@ -1446,65 +1231,56 @@ compat_sys_waitid_wrapper:
llgtr %r6,%r6 # struct rusage_emu31 *
jg compat_sys_waitid
- .globl compat_sys_kexec_load_wrapper
-compat_sys_kexec_load_wrapper:
+ENTRY(compat_sys_kexec_load_wrapper)
llgfr %r2,%r2 # unsigned long
llgfr %r3,%r3 # unsigned long
llgtr %r4,%r4 # struct kexec_segment *
llgfr %r5,%r5 # unsigned long
jg compat_sys_kexec_load
- .globl sys_ioprio_set_wrapper
-sys_ioprio_set_wrapper:
+ENTRY(sys_ioprio_set_wrapper)
lgfr %r2,%r2 # int
lgfr %r3,%r3 # int
lgfr %r4,%r4 # int
jg sys_ioprio_set
- .globl sys_ioprio_get_wrapper
-sys_ioprio_get_wrapper:
+ENTRY(sys_ioprio_get_wrapper)
lgfr %r2,%r2 # int
lgfr %r3,%r3 # int
jg sys_ioprio_get
- .globl sys_inotify_add_watch_wrapper
-sys_inotify_add_watch_wrapper:
+ENTRY(sys_inotify_add_watch_wrapper)
lgfr %r2,%r2 # int
llgtr %r3,%r3 # const char *
llgfr %r4,%r4 # u32
jg sys_inotify_add_watch
- .globl sys_inotify_rm_watch_wrapper
-sys_inotify_rm_watch_wrapper:
+ENTRY(sys_inotify_rm_watch_wrapper)
lgfr %r2,%r2 # int
llgfr %r3,%r3 # u32
jg sys_inotify_rm_watch
- .globl compat_sys_openat_wrapper
-compat_sys_openat_wrapper:
+ENTRY(compat_sys_openat_wrapper)
llgfr %r2,%r2 # unsigned int
llgtr %r3,%r3 # const char *
lgfr %r4,%r4 # int
lgfr %r5,%r5 # int
jg compat_sys_openat
- .globl sys_mkdirat_wrapper
-sys_mkdirat_wrapper:
+ENTRY(sys_mkdirat_wrapper)
lgfr %r2,%r2 # int
llgtr %r3,%r3 # const char *
lgfr %r4,%r4 # int
jg sys_mkdirat
- .globl sys_mknodat_wrapper
-sys_mknodat_wrapper:
+ENTRY(sys_mknodat_wrapper)
lgfr %r2,%r2 # int
llgtr %r3,%r3 # const char *
lgfr %r4,%r4 # int
llgfr %r5,%r5 # unsigned int
jg sys_mknodat
- .globl sys_fchownat_wrapper
-sys_fchownat_wrapper:
+ENTRY(sys_fchownat_wrapper)
lgfr %r2,%r2 # int
llgtr %r3,%r3 # const char *
llgfr %r4,%r4 # uid_t
@@ -1512,38 +1288,33 @@ sys_fchownat_wrapper:
lgfr %r6,%r6 # int
jg sys_fchownat
- .globl compat_sys_futimesat_wrapper
-compat_sys_futimesat_wrapper:
+ENTRY(compat_sys_futimesat_wrapper)
llgfr %r2,%r2 # unsigned int
llgtr %r3,%r3 # char *
llgtr %r4,%r4 # struct timeval *
jg compat_sys_futimesat
- .globl sys32_fstatat64_wrapper
-sys32_fstatat64_wrapper:
+ENTRY(sys32_fstatat64_wrapper)
llgfr %r2,%r2 # unsigned int
llgtr %r3,%r3 # char *
llgtr %r4,%r4 # struct stat64 *
lgfr %r5,%r5 # int
jg sys32_fstatat64
- .globl sys_unlinkat_wrapper
-sys_unlinkat_wrapper:
+ENTRY(sys_unlinkat_wrapper)
lgfr %r2,%r2 # int
llgtr %r3,%r3 # const char *
lgfr %r4,%r4 # int
jg sys_unlinkat
- .globl sys_renameat_wrapper
-sys_renameat_wrapper:
+ENTRY(sys_renameat_wrapper)
lgfr %r2,%r2 # int
llgtr %r3,%r3 # const char *
lgfr %r4,%r4 # int
llgtr %r5,%r5 # const char *
jg sys_renameat
- .globl sys_linkat_wrapper
-sys_linkat_wrapper:
+ENTRY(sys_linkat_wrapper)
lgfr %r2,%r2 # int
llgtr %r3,%r3 # const char *
lgfr %r4,%r4 # int
@@ -1551,37 +1322,32 @@ sys_linkat_wrapper:
lgfr %r6,%r6 # int
jg sys_linkat
- .globl sys_symlinkat_wrapper
-sys_symlinkat_wrapper:
+ENTRY(sys_symlinkat_wrapper)
llgtr %r2,%r2 # const char *
lgfr %r3,%r3 # int
llgtr %r4,%r4 # const char *
jg sys_symlinkat
- .globl sys_readlinkat_wrapper
-sys_readlinkat_wrapper:
+ENTRY(sys_readlinkat_wrapper)
lgfr %r2,%r2 # int
llgtr %r3,%r3 # const char *
llgtr %r4,%r4 # char *
lgfr %r5,%r5 # int
jg sys_readlinkat
- .globl sys_fchmodat_wrapper
-sys_fchmodat_wrapper:
+ENTRY(sys_fchmodat_wrapper)
lgfr %r2,%r2 # int
llgtr %r3,%r3 # const char *
llgfr %r4,%r4 # mode_t
jg sys_fchmodat
- .globl sys_faccessat_wrapper
-sys_faccessat_wrapper:
+ENTRY(sys_faccessat_wrapper)
lgfr %r2,%r2 # int
llgtr %r3,%r3 # const char *
lgfr %r4,%r4 # int
jg sys_faccessat
- .globl compat_sys_pselect6_wrapper
-compat_sys_pselect6_wrapper:
+ENTRY(compat_sys_pselect6_wrapper)
lgfr %r2,%r2 # int
llgtr %r3,%r3 # fd_set *
llgtr %r4,%r4 # fd_set *
@@ -1591,8 +1357,7 @@ compat_sys_pselect6_wrapper:
stg %r0,160(%r15)
jg compat_sys_pselect6
- .globl compat_sys_ppoll_wrapper
-compat_sys_ppoll_wrapper:
+ENTRY(compat_sys_ppoll_wrapper)
llgtr %r2,%r2 # struct pollfd *
llgfr %r3,%r3 # unsigned int
llgtr %r4,%r4 # struct timespec *
@@ -1600,26 +1365,22 @@ compat_sys_ppoll_wrapper:
llgfr %r6,%r6 # size_t
jg compat_sys_ppoll
- .globl sys_unshare_wrapper
-sys_unshare_wrapper:
+ENTRY(sys_unshare_wrapper)
llgfr %r2,%r2 # unsigned long
jg sys_unshare
- .globl compat_sys_set_robust_list_wrapper
-compat_sys_set_robust_list_wrapper:
+ENTRY(compat_sys_set_robust_list_wrapper)
llgtr %r2,%r2 # struct compat_robust_list_head *
llgfr %r3,%r3 # size_t
jg compat_sys_set_robust_list
- .globl compat_sys_get_robust_list_wrapper
-compat_sys_get_robust_list_wrapper:
+ENTRY(compat_sys_get_robust_list_wrapper)
lgfr %r2,%r2 # int
llgtr %r3,%r3 # compat_uptr_t_t *
llgtr %r4,%r4 # compat_size_t *
jg compat_sys_get_robust_list
- .globl sys_splice_wrapper
-sys_splice_wrapper:
+ENTRY(sys_splice_wrapper)
lgfr %r2,%r2 # int
llgtr %r3,%r3 # loff_t *
lgfr %r4,%r4 # int
@@ -1629,8 +1390,7 @@ sys_splice_wrapper:
stg %r0,160(%r15)
jg sys_splice
- .globl sys_sync_file_range_wrapper
-sys_sync_file_range_wrapper:
+ENTRY(sys_sync_file_range_wrapper)
lgfr %r2,%r2 # int
sllg %r3,%r3,32 # get high word of 64bit loff_t
or %r3,%r4 # get low word of 64bit loff_t
@@ -1639,31 +1399,27 @@ sys_sync_file_range_wrapper:
llgf %r5,164(%r15) # unsigned int
jg sys_sync_file_range
- .globl sys_tee_wrapper
-sys_tee_wrapper:
+ENTRY(sys_tee_wrapper)
lgfr %r2,%r2 # int
lgfr %r3,%r3 # int
llgfr %r4,%r4 # size_t
llgfr %r5,%r5 # unsigned int
jg sys_tee
- .globl compat_sys_vmsplice_wrapper
-compat_sys_vmsplice_wrapper:
+ENTRY(compat_sys_vmsplice_wrapper)
lgfr %r2,%r2 # int
llgtr %r3,%r3 # compat_iovec *
llgfr %r4,%r4 # unsigned int
llgfr %r5,%r5 # unsigned int
jg compat_sys_vmsplice
- .globl sys_getcpu_wrapper
-sys_getcpu_wrapper:
+ENTRY(sys_getcpu_wrapper)
llgtr %r2,%r2 # unsigned *
llgtr %r3,%r3 # unsigned *
llgtr %r4,%r4 # struct getcpu_cache *
jg sys_getcpu
- .globl compat_sys_epoll_pwait_wrapper
-compat_sys_epoll_pwait_wrapper:
+ENTRY(compat_sys_epoll_pwait_wrapper)
lgfr %r2,%r2 # int
llgtr %r3,%r3 # struct compat_epoll_event *
lgfr %r4,%r4 # int
@@ -1673,34 +1429,29 @@ compat_sys_epoll_pwait_wrapper:
stg %r0,160(%r15)
jg compat_sys_epoll_pwait
- .globl compat_sys_utimes_wrapper
-compat_sys_utimes_wrapper:
+ENTRY(compat_sys_utimes_wrapper)
llgtr %r2,%r2 # char *
llgtr %r3,%r3 # struct compat_timeval *
jg compat_sys_utimes
- .globl compat_sys_utimensat_wrapper
-compat_sys_utimensat_wrapper:
+ENTRY(compat_sys_utimensat_wrapper)
llgfr %r2,%r2 # unsigned int
llgtr %r3,%r3 # char *
llgtr %r4,%r4 # struct compat_timespec *
lgfr %r5,%r5 # int
jg compat_sys_utimensat
- .globl compat_sys_signalfd_wrapper
-compat_sys_signalfd_wrapper:
+ENTRY(compat_sys_signalfd_wrapper)
lgfr %r2,%r2 # int
llgtr %r3,%r3 # compat_sigset_t *
llgfr %r4,%r4 # compat_size_t
jg compat_sys_signalfd
- .globl sys_eventfd_wrapper
-sys_eventfd_wrapper:
+ENTRY(sys_eventfd_wrapper)
llgfr %r2,%r2 # unsigned int
jg sys_eventfd
- .globl sys_fallocate_wrapper
-sys_fallocate_wrapper:
+ENTRY(sys_fallocate_wrapper)
lgfr %r2,%r2 # int
lgfr %r3,%r3 # int
sllg %r4,%r4,32 # get high word of 64bit loff_t
@@ -1709,94 +1460,80 @@ sys_fallocate_wrapper:
l %r5,164(%r15) # get low word of 64bit loff_t
jg sys_fallocate
- .globl sys_timerfd_create_wrapper
-sys_timerfd_create_wrapper:
+ENTRY(sys_timerfd_create_wrapper)
lgfr %r2,%r2 # int
lgfr %r3,%r3 # int
jg sys_timerfd_create
- .globl compat_sys_timerfd_settime_wrapper
-compat_sys_timerfd_settime_wrapper:
+ENTRY(compat_sys_timerfd_settime_wrapper)
lgfr %r2,%r2 # int
lgfr %r3,%r3 # int
llgtr %r4,%r4 # struct compat_itimerspec *
llgtr %r5,%r5 # struct compat_itimerspec *
jg compat_sys_timerfd_settime
- .globl compat_sys_timerfd_gettime_wrapper
-compat_sys_timerfd_gettime_wrapper:
+ENTRY(compat_sys_timerfd_gettime_wrapper)
lgfr %r2,%r2 # int
llgtr %r3,%r3 # struct compat_itimerspec *
jg compat_sys_timerfd_gettime
- .globl compat_sys_signalfd4_wrapper
-compat_sys_signalfd4_wrapper:
+ENTRY(compat_sys_signalfd4_wrapper)
lgfr %r2,%r2 # int
llgtr %r3,%r3 # compat_sigset_t *
llgfr %r4,%r4 # compat_size_t
lgfr %r5,%r5 # int
jg compat_sys_signalfd4
- .globl sys_eventfd2_wrapper
-sys_eventfd2_wrapper:
+ENTRY(sys_eventfd2_wrapper)
llgfr %r2,%r2 # unsigned int
lgfr %r3,%r3 # int
jg sys_eventfd2
- .globl sys_inotify_init1_wrapper
-sys_inotify_init1_wrapper:
+ENTRY(sys_inotify_init1_wrapper)
lgfr %r2,%r2 # int
jg sys_inotify_init1
- .globl sys_pipe2_wrapper
-sys_pipe2_wrapper:
+ENTRY(sys_pipe2_wrapper)
llgtr %r2,%r2 # u32 *
lgfr %r3,%r3 # int
jg sys_pipe2 # branch to system call
- .globl sys_dup3_wrapper
-sys_dup3_wrapper:
+ENTRY(sys_dup3_wrapper)
llgfr %r2,%r2 # unsigned int
llgfr %r3,%r3 # unsigned int
lgfr %r4,%r4 # int
jg sys_dup3 # branch to system call
- .globl sys_epoll_create1_wrapper
-sys_epoll_create1_wrapper:
+ENTRY(sys_epoll_create1_wrapper)
lgfr %r2,%r2 # int
jg sys_epoll_create1 # branch to system call
- .globl sys32_readahead_wrapper
-sys32_readahead_wrapper:
+ENTRY(sys32_readahead_wrapper)
lgfr %r2,%r2 # int
llgfr %r3,%r3 # u32
llgfr %r4,%r4 # u32
lgfr %r5,%r5 # s32
jg sys32_readahead # branch to system call
- .globl sys32_sendfile64_wrapper
-sys32_sendfile64_wrapper:
+ENTRY(sys32_sendfile64_wrapper)
lgfr %r2,%r2 # int
lgfr %r3,%r3 # int
llgtr %r4,%r4 # compat_loff_t *
lgfr %r5,%r5 # s32
jg sys32_sendfile64 # branch to system call
- .globl sys_tkill_wrapper
-sys_tkill_wrapper:
+ENTRY(sys_tkill_wrapper)
lgfr %r2,%r2 # pid_t
lgfr %r3,%r3 # int
jg sys_tkill # branch to system call
- .globl sys_tgkill_wrapper
-sys_tgkill_wrapper:
+ENTRY(sys_tgkill_wrapper)
lgfr %r2,%r2 # pid_t
lgfr %r3,%r3 # pid_t
lgfr %r4,%r4 # int
jg sys_tgkill # branch to system call
- .globl compat_sys_keyctl_wrapper
-compat_sys_keyctl_wrapper:
+ENTRY(compat_sys_keyctl_wrapper)
llgfr %r2,%r2 # u32
llgfr %r3,%r3 # u32
llgfr %r4,%r4 # u32
@@ -1804,8 +1541,7 @@ compat_sys_keyctl_wrapper:
llgfr %r6,%r6 # u32
jg compat_sys_keyctl # branch to system call
- .globl compat_sys_preadv_wrapper
-compat_sys_preadv_wrapper:
+ENTRY(compat_sys_preadv_wrapper)
llgfr %r2,%r2 # unsigned long
llgtr %r3,%r3 # compat_iovec *
llgfr %r4,%r4 # unsigned long
@@ -1813,8 +1549,7 @@ compat_sys_preadv_wrapper:
llgfr %r6,%r6 # u32
jg compat_sys_preadv # branch to system call
- .globl compat_sys_pwritev_wrapper
-compat_sys_pwritev_wrapper:
+ENTRY(compat_sys_pwritev_wrapper)
llgfr %r2,%r2 # unsigned long
llgtr %r3,%r3 # compat_iovec *
llgfr %r4,%r4 # unsigned long
@@ -1822,16 +1557,14 @@ compat_sys_pwritev_wrapper:
llgfr %r6,%r6 # u32
jg compat_sys_pwritev # branch to system call
- .globl compat_sys_rt_tgsigqueueinfo_wrapper
-compat_sys_rt_tgsigqueueinfo_wrapper:
+ENTRY(compat_sys_rt_tgsigqueueinfo_wrapper)
lgfr %r2,%r2 # compat_pid_t
lgfr %r3,%r3 # compat_pid_t
lgfr %r4,%r4 # int
llgtr %r5,%r5 # struct compat_siginfo *
jg compat_sys_rt_tgsigqueueinfo_wrapper # branch to system call
- .globl sys_perf_event_open_wrapper
-sys_perf_event_open_wrapper:
+ENTRY(sys_perf_event_open_wrapper)
llgtr %r2,%r2 # const struct perf_event_attr *
lgfr %r3,%r3 # pid_t
lgfr %r4,%r4 # int
@@ -1839,29 +1572,25 @@ sys_perf_event_open_wrapper:
llgfr %r6,%r6 # unsigned long
jg sys_perf_event_open # branch to system call
- .globl sys_clone_wrapper
-sys_clone_wrapper:
+ENTRY(sys_clone_wrapper)
llgfr %r2,%r2 # unsigned long
llgfr %r3,%r3 # unsigned long
llgtr %r4,%r4 # int *
llgtr %r5,%r5 # int *
jg sys_clone # branch to system call
- .globl sys32_execve_wrapper
-sys32_execve_wrapper:
+ENTRY(sys32_execve_wrapper)
llgtr %r2,%r2 # char *
llgtr %r3,%r3 # compat_uptr_t *
llgtr %r4,%r4 # compat_uptr_t *
jg sys32_execve # branch to system call
- .globl sys_fanotify_init_wrapper
-sys_fanotify_init_wrapper:
+ENTRY(sys_fanotify_init_wrapper)
llgfr %r2,%r2 # unsigned int
llgfr %r3,%r3 # unsigned int
jg sys_fanotify_init # branch to system call
- .globl sys_fanotify_mark_wrapper
-sys_fanotify_mark_wrapper:
+ENTRY(sys_fanotify_mark_wrapper)
lgfr %r2,%r2 # int
llgfr %r3,%r3 # unsigned int
sllg %r4,%r4,32 # get high word of 64bit mask
@@ -1870,16 +1599,14 @@ sys_fanotify_mark_wrapper:
llgt %r6,164(%r15) # char *
jg sys_fanotify_mark # branch to system call
- .globl sys_prlimit64_wrapper
-sys_prlimit64_wrapper:
+ENTRY(sys_prlimit64_wrapper)
lgfr %r2,%r2 # pid_t
llgfr %r3,%r3 # unsigned int
llgtr %r4,%r4 # const struct rlimit64 __user *
llgtr %r5,%r5 # struct rlimit64 __user *
jg sys_prlimit64 # branch to system call
- .globl sys_name_to_handle_at_wrapper
-sys_name_to_handle_at_wrapper:
+ENTRY(sys_name_to_handle_at_wrapper)
lgfr %r2,%r2 # int
llgtr %r3,%r3 # const char __user *
llgtr %r4,%r4 # struct file_handle __user *
@@ -1887,21 +1614,18 @@ sys_name_to_handle_at_wrapper:
lgfr %r6,%r6 # int
jg sys_name_to_handle_at
- .globl compat_sys_open_by_handle_at_wrapper
-compat_sys_open_by_handle_at_wrapper:
+ENTRY(compat_sys_open_by_handle_at_wrapper)
lgfr %r2,%r2 # int
llgtr %r3,%r3 # struct file_handle __user *
lgfr %r4,%r4 # int
jg compat_sys_open_by_handle_at
- .globl compat_sys_clock_adjtime_wrapper
-compat_sys_clock_adjtime_wrapper:
+ENTRY(compat_sys_clock_adjtime_wrapper)
lgfr %r2,%r2 # clockid_t (int)
llgtr %r3,%r3 # struct compat_timex __user *
jg compat_sys_clock_adjtime
- .globl sys_syncfs_wrapper
-sys_syncfs_wrapper:
+ENTRY(sys_syncfs_wrapper)
lgfr %r2,%r2 # int
jg sys_syncfs
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 0476174dfff5..3eab7cfab07c 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -9,8 +9,8 @@
* Heiko Carstens <heiko.carstens@de.ibm.com>
*/
-#include <linux/linkage.h>
#include <linux/init.h>
+#include <linux/linkage.h>
#include <asm/cache.h>
#include <asm/errno.h>
#include <asm/ptrace.h>
@@ -197,8 +197,7 @@ STACK_SIZE = 1 << STACK_SHIFT
* Returns:
* gpr2 = prev
*/
- .globl __switch_to
-__switch_to:
+ENTRY(__switch_to)
basr %r1,0
0: l %r4,__THREAD_info(%r2) # get thread_info of prev
l %r5,__THREAD_info(%r3) # get thread_info of next
@@ -224,8 +223,7 @@ __critical_start:
* are executed with interrupts enabled.
*/
- .globl system_call
-system_call:
+ENTRY(system_call)
stpt __LC_SYNC_ENTER_TIMER
sysc_saveall:
SAVE_ALL_SVC __LC_SVC_OLD_PSW,__LC_SAVE_AREA
@@ -388,8 +386,7 @@ sysc_tracenogo:
#
# a new process exits the kernel with ret_from_fork
#
- .globl ret_from_fork
-ret_from_fork:
+ENTRY(ret_from_fork)
l %r13,__LC_SVC_NEW_PSW+4
l %r12,__LC_THREAD_INFO # load pointer to thread_info struct
tm SP_PSW+1(%r15),0x01 # forking a kernel thread ?
@@ -405,8 +402,7 @@ ret_from_fork:
# kernel_execve function needs to deal with pt_regs that is not
# at the usual place
#
- .globl kernel_execve
-kernel_execve:
+ENTRY(kernel_execve)
stm %r12,%r15,48(%r15)
lr %r14,%r15
l %r13,__LC_SVC_NEW_PSW+4
@@ -438,8 +434,7 @@ kernel_execve:
* Program check handler routine
*/
- .globl pgm_check_handler
-pgm_check_handler:
+ENTRY(pgm_check_handler)
/*
* First we need to check for a special case:
* Single stepping an instruction that disables the PER event mask will
@@ -565,8 +560,7 @@ kernel_per:
* IO interrupt handler routine
*/
- .globl io_int_handler
-io_int_handler:
+ENTRY(io_int_handler)
stck __LC_INT_CLOCK
stpt __LC_ASYNC_ENTER_TIMER
SAVE_ALL_ASYNC __LC_IO_OLD_PSW,__LC_SAVE_AREA+16
@@ -703,8 +697,7 @@ io_notify_resume:
* External interrupt handler routine
*/
- .globl ext_int_handler
-ext_int_handler:
+ENTRY(ext_int_handler)
stck __LC_INT_CLOCK
stpt __LC_ASYNC_ENTER_TIMER
SAVE_ALL_ASYNC __LC_EXT_OLD_PSW,__LC_SAVE_AREA+16
@@ -731,8 +724,7 @@ __critical_end:
* Machine check handler routines
*/
- .globl mcck_int_handler
-mcck_int_handler:
+ENTRY(mcck_int_handler)
stck __LC_MCCK_CLOCK
spt __LC_CPU_TIMER_SAVE_AREA # revalidate cpu timer
lm %r0,%r15,__LC_GPREGS_SAVE_AREA # revalidate gprs
@@ -818,8 +810,7 @@ mcck_return:
*/
#ifdef CONFIG_SMP
__CPUINIT
- .globl restart_int_handler
-restart_int_handler:
+ENTRY(restart_int_handler)
basr %r1,0
restart_base:
spt restart_vtime-restart_base(%r1)
@@ -848,8 +839,7 @@ restart_vtime:
/*
* If we do not run with SMP enabled, let the new CPU crash ...
*/
- .globl restart_int_handler
-restart_int_handler:
+ENTRY(restart_int_handler)
basr %r1,0
restart_base:
lpsw restart_crash-restart_base(%r1)
diff --git a/arch/s390/kernel/entry.h b/arch/s390/kernel/entry.h
index 17a6f83a2d67..66729eb7bbc5 100644
--- a/arch/s390/kernel/entry.h
+++ b/arch/s390/kernel/entry.h
@@ -5,10 +5,9 @@
#include <linux/signal.h>
#include <asm/ptrace.h>
-typedef void pgm_check_handler_t(struct pt_regs *, long, unsigned long);
-extern pgm_check_handler_t *pgm_check_table[128];
-pgm_check_handler_t do_protection_exception;
-pgm_check_handler_t do_dat_exception;
+void do_protection_exception(struct pt_regs *, long, unsigned long);
+void do_dat_exception(struct pt_regs *, long, unsigned long);
+void do_asce_exception(struct pt_regs *, long, unsigned long);
extern int sysctl_userprocess_debug;
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S
index d61967e2eab0..7a0fd426ca92 100644
--- a/arch/s390/kernel/entry64.S
+++ b/arch/s390/kernel/entry64.S
@@ -9,8 +9,8 @@
* Heiko Carstens <heiko.carstens@de.ibm.com>
*/
-#include <linux/linkage.h>
#include <linux/init.h>
+#include <linux/linkage.h>
#include <asm/cache.h>
#include <asm/errno.h>
#include <asm/ptrace.h>
@@ -56,15 +56,28 @@ _TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
_TIF_MCCK_PENDING)
_TIF_SYSCALL = (_TIF_SYSCALL_TRACE>>8 | _TIF_SYSCALL_AUDIT>>8 | \
_TIF_SECCOMP>>8 | _TIF_SYSCALL_TRACEPOINT>>8)
+_TIF_EXIT_SIE = (_TIF_SIGPENDING | _TIF_NEED_RESCHED | _TIF_MCCK_PENDING)
#define BASED(name) name-system_call(%r13)
+ .macro SPP newpp
+#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE)
+ tm __LC_MACHINE_FLAGS+6,0x20 # MACHINE_FLAG_SPP
+ jz .+8
+ .insn s,0xb2800000,\newpp
+#endif
+ .endm
+
.macro HANDLE_SIE_INTERCEPT
#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE)
- lg %r3,__LC_SIE_HOOK
- ltgr %r3,%r3
+ tm __TI_flags+6(%r12),_TIF_SIE>>8
jz 0f
- basr %r14,%r3
+ SPP __LC_CMF_HPP # set host id
+ clc SP_PSW+8(8,%r15),BASED(.Lsie_loop)
+ jl 0f
+ clc SP_PSW+8(8,%r15),BASED(.Lsie_done)
+ jhe 0f
+ mvc SP_PSW+8(8,%r15),BASED(.Lsie_loop)
0:
#endif
.endm
@@ -206,8 +219,7 @@ _TIF_SYSCALL = (_TIF_SYSCALL_TRACE>>8 | _TIF_SYSCALL_AUDIT>>8 | \
* Returns:
* gpr2 = prev
*/
- .globl __switch_to
-__switch_to:
+ENTRY(__switch_to)
lg %r4,__THREAD_info(%r2) # get thread_info of prev
lg %r5,__THREAD_info(%r3) # get thread_info of next
tm __TI_flags+7(%r4),_TIF_MCCK_PENDING # machine check pending?
@@ -232,8 +244,7 @@ __critical_start:
* are executed with interrupts enabled.
*/
- .globl system_call
-system_call:
+ENTRY(system_call)
stpt __LC_SYNC_ENTER_TIMER
sysc_saveall:
SAVE_ALL_SVC __LC_SVC_OLD_PSW,__LC_SAVE_AREA
@@ -395,8 +406,7 @@ sysc_tracenogo:
#
# a new process exits the kernel with ret_from_fork
#
- .globl ret_from_fork
-ret_from_fork:
+ENTRY(ret_from_fork)
lg %r13,__LC_SVC_NEW_PSW+8
lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct
tm SP_PSW+1(%r15),0x01 # forking a kernel thread ?
@@ -411,8 +421,7 @@ ret_from_fork:
# kernel_execve function needs to deal with pt_regs that is not
# at the usual place
#
- .globl kernel_execve
-kernel_execve:
+ENTRY(kernel_execve)
stmg %r12,%r15,96(%r15)
lgr %r14,%r15
aghi %r15,-SP_SIZE
@@ -442,8 +451,7 @@ kernel_execve:
* Program check handler routine
*/
- .globl pgm_check_handler
-pgm_check_handler:
+ENTRY(pgm_check_handler)
/*
* First we need to check for a special case:
* Single stepping an instruction that disables the PER event mask will
@@ -465,6 +473,7 @@ pgm_check_handler:
xc SP_ILC(4,%r15),SP_ILC(%r15)
mvc SP_PSW(16,%r15),__LC_PGM_OLD_PSW
lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct
+ HANDLE_SIE_INTERCEPT
tm SP_PSW+1(%r15),0x01 # interrupting from user ?
jz pgm_no_vtime
UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
@@ -472,7 +481,6 @@ pgm_check_handler:
mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
LAST_BREAK
pgm_no_vtime:
- HANDLE_SIE_INTERCEPT
stg %r11,SP_ARGS(%r15)
lgf %r3,__LC_PGM_ILC # load program interruption code
lg %r4,__LC_TRANS_EXC_CODE
@@ -507,6 +515,7 @@ pgm_per_std:
CREATE_STACK_FRAME __LC_SAVE_AREA
mvc SP_PSW(16,%r15),__LC_PGM_OLD_PSW
lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct
+ HANDLE_SIE_INTERCEPT
tm SP_PSW+1(%r15),0x01 # interrupting from user ?
jz pgm_no_vtime2
UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
@@ -514,7 +523,6 @@ pgm_per_std:
mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
LAST_BREAK
pgm_no_vtime2:
- HANDLE_SIE_INTERCEPT
lg %r1,__TI_task(%r12)
tm SP_PSW+1(%r15),0x01 # kernel per event ?
jz kernel_per
@@ -571,14 +579,14 @@ kernel_per:
/*
* IO interrupt handler routine
*/
- .globl io_int_handler
-io_int_handler:
+ENTRY(io_int_handler)
stck __LC_INT_CLOCK
stpt __LC_ASYNC_ENTER_TIMER
SAVE_ALL_ASYNC __LC_IO_OLD_PSW,__LC_SAVE_AREA+40
CREATE_STACK_FRAME __LC_SAVE_AREA+40
mvc SP_PSW(16,%r15),0(%r12) # move user PSW to stack
lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct
+ HANDLE_SIE_INTERCEPT
tm SP_PSW+1(%r15),0x01 # interrupting from user ?
jz io_no_vtime
UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER
@@ -586,7 +594,6 @@ io_int_handler:
mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER
LAST_BREAK
io_no_vtime:
- HANDLE_SIE_INTERCEPT
TRACE_IRQS_OFF
la %r2,SP_PTREGS(%r15) # address of register-save area
brasl %r14,do_IRQ # call standard irq handler
@@ -706,14 +713,14 @@ io_notify_resume:
/*
* External interrupt handler routine
*/
- .globl ext_int_handler
-ext_int_handler:
+ENTRY(ext_int_handler)
stck __LC_INT_CLOCK
stpt __LC_ASYNC_ENTER_TIMER
SAVE_ALL_ASYNC __LC_EXT_OLD_PSW,__LC_SAVE_AREA+40
CREATE_STACK_FRAME __LC_SAVE_AREA+40
mvc SP_PSW(16,%r15),0(%r12) # move user PSW to stack
lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct
+ HANDLE_SIE_INTERCEPT
tm SP_PSW+1(%r15),0x01 # interrupting from user ?
jz ext_no_vtime
UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER
@@ -721,7 +728,6 @@ ext_int_handler:
mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER
LAST_BREAK
ext_no_vtime:
- HANDLE_SIE_INTERCEPT
TRACE_IRQS_OFF
lghi %r1,4096
la %r2,SP_PTREGS(%r15) # address of register-save area
@@ -736,8 +742,7 @@ __critical_end:
/*
* Machine check handler routines
*/
- .globl mcck_int_handler
-mcck_int_handler:
+ENTRY(mcck_int_handler)
stck __LC_MCCK_CLOCK
la %r1,4095 # revalidate r1
spt __LC_CPU_TIMER_SAVE_AREA-4095(%r1) # revalidate cpu timer
@@ -785,6 +790,7 @@ mcck_int_main:
lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct
tm __LC_MCCK_CODE+2,0x08 # mwp of old psw valid?
jno mcck_no_vtime # no -> no timer update
+ HANDLE_SIE_INTERCEPT
tm SP_PSW+1(%r15),0x01 # interrupting from user ?
jz mcck_no_vtime
UPDATE_VTIME __LC_EXIT_TIMER,__LC_MCCK_ENTER_TIMER,__LC_USER_TIMER
@@ -804,7 +810,6 @@ mcck_no_vtime:
stosm __SF_EMPTY(%r15),0x04 # turn dat on
tm __TI_flags+7(%r12),_TIF_MCCK_PENDING
jno mcck_return
- HANDLE_SIE_INTERCEPT
TRACE_IRQS_OFF
brasl %r14,s390_handle_mcck
TRACE_IRQS_ON
@@ -823,8 +828,7 @@ mcck_done:
*/
#ifdef CONFIG_SMP
__CPUINIT
- .globl restart_int_handler
-restart_int_handler:
+ENTRY(restart_int_handler)
basr %r1,0
restart_base:
spt restart_vtime-restart_base(%r1)
@@ -851,8 +855,7 @@ restart_vtime:
/*
* If we do not run with SMP enabled, let the new CPU crash ...
*/
- .globl restart_int_handler
-restart_int_handler:
+ENTRY(restart_int_handler)
basr %r1,0
restart_base:
lpswe restart_crash-restart_base(%r1)
@@ -1036,6 +1039,56 @@ cleanup_io_restore_insn:
.Lcritical_end:
.quad __critical_end
+#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE)
+/*
+ * sie64a calling convention:
+ * %r2 pointer to sie control block
+ * %r3 guest register save area
+ */
+ENTRY(sie64a)
+ stmg %r6,%r14,__SF_GPRS(%r15) # save kernel registers
+ stg %r2,__SF_EMPTY(%r15) # save control block pointer
+ stg %r3,__SF_EMPTY+8(%r15) # save guest register save area
+ lmg %r0,%r13,0(%r3) # load guest gprs 0-13
+ lg %r14,__LC_THREAD_INFO # pointer thread_info struct
+ oi __TI_flags+6(%r14),_TIF_SIE>>8
+sie_loop:
+ lg %r14,__LC_THREAD_INFO # pointer thread_info struct
+ tm __TI_flags+7(%r14),_TIF_EXIT_SIE
+ jnz sie_exit
+ lg %r14,__SF_EMPTY(%r15) # get control block pointer
+ SPP __SF_EMPTY(%r15) # set guest id
+ sie 0(%r14)
+sie_done:
+ SPP __LC_CMF_HPP # set host id
+ lg %r14,__LC_THREAD_INFO # pointer thread_info struct
+sie_exit:
+ ni __TI_flags+6(%r14),255-(_TIF_SIE>>8)
+ lg %r14,__SF_EMPTY+8(%r15) # load guest register save area
+ stmg %r0,%r13,0(%r14) # save guest gprs 0-13
+ lmg %r6,%r14,__SF_GPRS(%r15) # restore kernel registers
+ lghi %r2,0
+ br %r14
+sie_fault:
+ lg %r14,__LC_THREAD_INFO # pointer thread_info struct
+ ni __TI_flags+6(%r14),255-(_TIF_SIE>>8)
+ lg %r14,__SF_EMPTY+8(%r15) # load guest register save area
+ stmg %r0,%r13,0(%r14) # save guest gprs 0-13
+ lmg %r6,%r14,__SF_GPRS(%r15) # restore kernel registers
+ lghi %r2,-EFAULT
+ br %r14
+
+ .align 8
+.Lsie_loop:
+ .quad sie_loop
+.Lsie_done:
+ .quad sie_done
+
+ .section __ex_table,"a"
+ .quad sie_loop,sie_fault
+ .previous
+#endif
+
.section .rodata, "a"
#define SYSCALL(esa,esame,emu) .long esame
.globl sys_call_table
diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S
index fb317bf2c378..2d781bab37bb 100644
--- a/arch/s390/kernel/head.S
+++ b/arch/s390/kernel/head.S
@@ -22,6 +22,7 @@
*/
#include <linux/init.h>
+#include <linux/linkage.h>
#include <asm/asm-offsets.h>
#include <asm/thread_info.h>
#include <asm/page.h>
@@ -383,8 +384,7 @@ iplstart:
# doesn't need a builtin ipl record.
#
.org 0x800
- .globl start
-start:
+ENTRY(start)
stm %r0,%r15,0x07b0 # store registers
basr %r12,%r0
.base:
@@ -448,8 +448,7 @@ start:
# or linload or SALIPL
#
.org 0x10000
- .globl startup
-startup:
+ENTRY(startup)
basr %r13,0 # get base
.LPG0:
xc 0x200(256),0x200 # partially clear lowcore
diff --git a/arch/s390/kernel/head31.S b/arch/s390/kernel/head31.S
index b8f8dc126102..f21954b44dc1 100644
--- a/arch/s390/kernel/head31.S
+++ b/arch/s390/kernel/head31.S
@@ -11,13 +11,13 @@
*/
#include <linux/init.h>
+#include <linux/linkage.h>
#include <asm/asm-offsets.h>
#include <asm/thread_info.h>
#include <asm/page.h>
__HEAD
- .globl startup_continue
-startup_continue:
+ENTRY(startup_continue)
basr %r13,0 # get base
.LPG1:
@@ -45,7 +45,7 @@ startup_continue:
# virtual and never return ...
.align 8
.Lentry:.long 0x00080000,0x80000000 + _stext
-.Lctl: .long 0x04b50002 # cr0: various things
+.Lctl: .long 0x04b50000 # cr0: various things
.long 0 # cr1: primary space segment table
.long .Lduct # cr2: dispatchable unit control table
.long 0 # cr3: instruction authorization
@@ -78,8 +78,7 @@ startup_continue:
.Lbase_cc:
.long sched_clock_base_cc
- .globl _ehead
-_ehead:
+ENTRY(_ehead)
#ifdef CONFIG_SHARED_KERNEL
.org 0x100000 - 0x11000 # head.o ends at 0x11000
@@ -88,8 +87,8 @@ _ehead:
#
# startup-code, running in absolute addressing mode
#
- .globl _stext
-_stext: basr %r13,0 # get base
+ENTRY(_stext)
+ basr %r13,0 # get base
.LPG3:
# check control registers
stctl %c0,%c15,0(%r15)
diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S
index cdef68717416..ae5d492b069e 100644
--- a/arch/s390/kernel/head64.S
+++ b/arch/s390/kernel/head64.S
@@ -11,13 +11,13 @@
*/
#include <linux/init.h>
+#include <linux/linkage.h>
#include <asm/asm-offsets.h>
#include <asm/thread_info.h>
#include <asm/page.h>
__HEAD
- .globl startup_continue
-startup_continue:
+ENTRY(startup_continue)
larl %r1,sched_clock_base_cc
mvc 0(8,%r1),__LC_LAST_UPDATE_CLOCK
larl %r13,.LPG1 # get base
@@ -46,7 +46,7 @@ startup_continue:
.align 16
.LPG1:
.Lentry:.quad 0x0000000180000000,_stext
-.Lctl: .quad 0x04350002 # cr0: various things
+.Lctl: .quad 0x04040000 # cr0: AFP registers & secondary space
.quad 0 # cr1: primary space segment table
.quad .Lduct # cr2: dispatchable unit control table
.quad 0 # cr3: instruction authorization
@@ -76,8 +76,7 @@ startup_continue:
.long 0x80000000,0,0,0 # invalid access-list entries
.endr
- .globl _ehead
-_ehead:
+ENTRY(_ehead)
#ifdef CONFIG_SHARED_KERNEL
.org 0x100000 - 0x11000 # head.o ends at 0x11000
@@ -86,8 +85,8 @@ _ehead:
#
# startup-code, running in absolute addressing mode
#
- .globl _stext
-_stext: basr %r13,0 # get base
+ENTRY(_stext)
+ basr %r13,0 # get base
.LPG3:
# check control registers
stctg %c0,%c15,0(%r15)
diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c
index e3264f6a9720..1f4050d45f78 100644
--- a/arch/s390/kernel/irq.c
+++ b/arch/s390/kernel/irq.c
@@ -88,15 +88,6 @@ int show_interrupts(struct seq_file *p, void *v)
}
/*
- * For compatibilty only. S/390 specific setup of interrupts et al. is done
- * much later in init_channel_subsystem().
- */
-void __init init_IRQ(void)
-{
- /* nothing... */
-}
-
-/*
* Switch to the asynchronous interrupt stack for softirq execution.
*/
asmlinkage void do_softirq(void)
@@ -144,28 +135,45 @@ void init_irq_proc(void)
#endif
/*
- * ext_int_hash[index] is the start of the list for all external interrupts
- * that hash to this index. With the current set of external interrupts
- * (0x1202 external call, 0x1004 cpu timer, 0x2401 hwc console, 0x4000
- * iucv and 0x2603 pfault) this is always the first element.
+ * ext_int_hash[index] is the list head for all external interrupts that hash
+ * to this index.
*/
+static struct list_head ext_int_hash[256];
struct ext_int_info {
- struct ext_int_info *next;
ext_int_handler_t handler;
u16 code;
+ struct list_head entry;
+ struct rcu_head rcu;
};
-static struct ext_int_info *ext_int_hash[256];
+/* ext_int_hash_lock protects the handler lists for external interrupts */
+DEFINE_SPINLOCK(ext_int_hash_lock);
+
+static void __init init_external_interrupts(void)
+{
+ int idx;
+
+ for (idx = 0; idx < ARRAY_SIZE(ext_int_hash); idx++)
+ INIT_LIST_HEAD(&ext_int_hash[idx]);
+}
static inline int ext_hash(u16 code)
{
return (code + (code >> 9)) & 0xff;
}
+static void ext_int_hash_update(struct rcu_head *head)
+{
+ struct ext_int_info *p = container_of(head, struct ext_int_info, rcu);
+
+ kfree(p);
+}
+
int register_external_interrupt(u16 code, ext_int_handler_t handler)
{
struct ext_int_info *p;
+ unsigned long flags;
int index;
p = kmalloc(sizeof(*p), GFP_ATOMIC);
@@ -174,33 +182,27 @@ int register_external_interrupt(u16 code, ext_int_handler_t handler)
p->code = code;
p->handler = handler;
index = ext_hash(code);
- p->next = ext_int_hash[index];
- ext_int_hash[index] = p;
+
+ spin_lock_irqsave(&ext_int_hash_lock, flags);
+ list_add_rcu(&p->entry, &ext_int_hash[index]);
+ spin_unlock_irqrestore(&ext_int_hash_lock, flags);
return 0;
}
EXPORT_SYMBOL(register_external_interrupt);
int unregister_external_interrupt(u16 code, ext_int_handler_t handler)
{
- struct ext_int_info *p, *q;
- int index;
+ struct ext_int_info *p;
+ unsigned long flags;
+ int index = ext_hash(code);
- index = ext_hash(code);
- q = NULL;
- p = ext_int_hash[index];
- while (p) {
- if (p->code == code && p->handler == handler)
- break;
- q = p;
- p = p->next;
- }
- if (!p)
- return -ENOENT;
- if (q)
- q->next = p->next;
- else
- ext_int_hash[index] = p->next;
- kfree(p);
+ spin_lock_irqsave(&ext_int_hash_lock, flags);
+ list_for_each_entry_rcu(p, &ext_int_hash[index], entry)
+ if (p->code == code && p->handler == handler) {
+ list_del_rcu(&p->entry);
+ call_rcu(&p->rcu, ext_int_hash_update);
+ }
+ spin_unlock_irqrestore(&ext_int_hash_lock, flags);
return 0;
}
EXPORT_SYMBOL(unregister_external_interrupt);
@@ -224,15 +226,22 @@ void __irq_entry do_extint(struct pt_regs *regs, unsigned int ext_int_code,
kstat_cpu(smp_processor_id()).irqs[EXTERNAL_INTERRUPT]++;
if (code != 0x1004)
__get_cpu_var(s390_idle).nohz_delay = 1;
+
index = ext_hash(code);
- for (p = ext_int_hash[index]; p; p = p->next) {
+ rcu_read_lock();
+ list_for_each_entry_rcu(p, &ext_int_hash[index], entry)
if (likely(p->code == code))
p->handler(ext_int_code, param32, param64);
- }
+ rcu_read_unlock();
irq_exit();
set_irq_regs(old_regs);
}
+void __init init_IRQ(void)
+{
+ init_external_interrupts();
+}
+
static DEFINE_SPINLOCK(sc_irq_lock);
static int sc_irq_refcount;
diff --git a/arch/s390/kernel/mcount.S b/arch/s390/kernel/mcount.S
index 1e6a55795628..7e2c38ba1373 100644
--- a/arch/s390/kernel/mcount.S
+++ b/arch/s390/kernel/mcount.S
@@ -5,21 +5,19 @@
*
*/
+#include <linux/linkage.h>
#include <asm/asm-offsets.h>
.section .kprobes.text, "ax"
- .globl ftrace_stub
-ftrace_stub:
+ENTRY(ftrace_stub)
br %r14
- .globl _mcount
-_mcount:
+ENTRY(_mcount)
#ifdef CONFIG_DYNAMIC_FTRACE
br %r14
- .globl ftrace_caller
-ftrace_caller:
+ENTRY(ftrace_caller)
#endif
stm %r2,%r5,16(%r15)
bras %r1,2f
@@ -41,8 +39,7 @@ ftrace_caller:
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
l %r2,100(%r15)
l %r3,152(%r15)
- .globl ftrace_graph_caller
-ftrace_graph_caller:
+ENTRY(ftrace_graph_caller)
# The bras instruction gets runtime patched to call prepare_ftrace_return.
# See ftrace_enable_ftrace_graph_caller. The patched instruction is:
# bras %r14,prepare_ftrace_return
@@ -56,8 +53,7 @@ ftrace_graph_caller:
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
- .globl return_to_handler
-return_to_handler:
+ENTRY(return_to_handler)
stm %r2,%r5,16(%r15)
st %r14,56(%r15)
lr %r0,%r15
diff --git a/arch/s390/kernel/mcount64.S b/arch/s390/kernel/mcount64.S
index e73667286ac0..f70cadec68fc 100644
--- a/arch/s390/kernel/mcount64.S
+++ b/arch/s390/kernel/mcount64.S
@@ -5,21 +5,19 @@
*
*/
+#include <linux/linkage.h>
#include <asm/asm-offsets.h>
.section .kprobes.text, "ax"
- .globl ftrace_stub
-ftrace_stub:
+ENTRY(ftrace_stub)
br %r14
- .globl _mcount
-_mcount:
+ENTRY(_mcount)
#ifdef CONFIG_DYNAMIC_FTRACE
br %r14
- .globl ftrace_caller
-ftrace_caller:
+ENTRY(ftrace_caller)
#endif
larl %r1,function_trace_stop
icm %r1,0xf,0(%r1)
@@ -37,8 +35,7 @@ ftrace_caller:
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
lg %r2,168(%r15)
lg %r3,272(%r15)
- .globl ftrace_graph_caller
-ftrace_graph_caller:
+ENTRY(ftrace_graph_caller)
# The bras instruction gets runtime patched to call prepare_ftrace_return.
# See ftrace_enable_ftrace_graph_caller. The patched instruction is:
# bras %r14,prepare_ftrace_return
@@ -52,8 +49,7 @@ ftrace_graph_caller:
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
- .globl return_to_handler
-return_to_handler:
+ENTRY(return_to_handler)
stmg %r2,%r5,32(%r15)
lgr %r1,%r15
aghi %r15,-160
diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
index f7167ee4604c..dfcb3436bad0 100644
--- a/arch/s390/kernel/module.c
+++ b/arch/s390/kernel/module.c
@@ -45,13 +45,6 @@
#define PLT_ENTRY_SIZE 20
#endif /* CONFIG_64BIT */
-void *module_alloc(unsigned long size)
-{
- if (size == 0)
- return NULL;
- return vmalloc(size);
-}
-
/* Free memory returned from module_alloc */
void module_free(struct module *mod, void *module_region)
{
@@ -176,15 +169,6 @@ module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
return 0;
}
-int
-apply_relocate(Elf_Shdr *sechdrs, const char *strtab, unsigned int symindex,
- unsigned int relsec, struct module *me)
-{
- printk(KERN_ERR "module %s: RELOCATION unsupported\n",
- me->name);
- return -ENOEXEC;
-}
-
static int
apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
struct module *me)
@@ -409,7 +393,3 @@ int module_finalize(const Elf_Ehdr *hdr,
me->arch.syminfo = NULL;
return 0;
}
-
-void module_arch_cleanup(struct module *mod)
-{
-}
diff --git a/arch/s390/kernel/reipl.S b/arch/s390/kernel/reipl.S
index cb899d9f8505..303d961c3bb5 100644
--- a/arch/s390/kernel/reipl.S
+++ b/arch/s390/kernel/reipl.S
@@ -6,14 +6,15 @@
* Author(s): Holger Smolinski (Holger.Smolinski@de.ibm.com)
*/
+#include <linux/linkage.h>
#include <asm/asm-offsets.h>
#
# do_reipl_asm
# Parameter: r2 = schid of reipl device
#
- .globl do_reipl_asm
-do_reipl_asm: basr %r13,0
+ENTRY(do_reipl_asm)
+ basr %r13,0
.Lpg0: lpsw .Lnewpsw-.Lpg0(%r13)
.Lpg1: # do store status of all registers
diff --git a/arch/s390/kernel/reipl64.S b/arch/s390/kernel/reipl64.S
index 9eabbc90795d..78eb7cfbd3d1 100644
--- a/arch/s390/kernel/reipl64.S
+++ b/arch/s390/kernel/reipl64.S
@@ -4,6 +4,7 @@
* Denis Joseph Barrow,
*/
+#include <linux/linkage.h>
#include <asm/asm-offsets.h>
#
@@ -11,8 +12,8 @@
# Parameter: r2 = schid of reipl device
#
- .globl do_reipl_asm
-do_reipl_asm: basr %r13,0
+ENTRY(do_reipl_asm)
+ basr %r13,0
.Lpg0: lpswe .Lnewpsw-.Lpg0(%r13)
.Lpg1: # do store status of all registers
diff --git a/arch/s390/kernel/relocate_kernel.S b/arch/s390/kernel/relocate_kernel.S
index 3b456b80bcee..c91d70aede91 100644
--- a/arch/s390/kernel/relocate_kernel.S
+++ b/arch/s390/kernel/relocate_kernel.S
@@ -8,6 +8,8 @@
*
*/
+#include <linux/linkage.h>
+
/*
* moves the new kernel to its destination...
* %r2 = pointer to first kimage_entry_t
@@ -22,8 +24,7 @@
*/
.text
- .globl relocate_kernel
- relocate_kernel:
+ENTRY(relocate_kernel)
basr %r13,0 # base address
.base:
stnsm sys_msk-.base(%r13),0xfb # disable DAT
@@ -112,6 +113,7 @@
.byte 0
.align 8
relocate_kernel_end:
+ .align 8
.globl relocate_kernel_len
relocate_kernel_len:
.quad relocate_kernel_end - relocate_kernel
diff --git a/arch/s390/kernel/relocate_kernel64.S b/arch/s390/kernel/relocate_kernel64.S
index 1f9ea2067b59..7c3ce589a7f0 100644
--- a/arch/s390/kernel/relocate_kernel64.S
+++ b/arch/s390/kernel/relocate_kernel64.S
@@ -8,6 +8,8 @@
*
*/
+#include <linux/linkage.h>
+
/*
* moves the new kernel to its destination...
* %r2 = pointer to first kimage_entry_t
@@ -23,8 +25,7 @@
*/
.text
- .globl relocate_kernel
- relocate_kernel:
+ENTRY(relocate_kernel)
basr %r13,0 # base address
.base:
stnsm sys_msk-.base(%r13),0xfb # disable DAT
@@ -115,6 +116,7 @@
.byte 0
.align 8
relocate_kernel_end:
+ .align 8
.globl relocate_kernel_len
relocate_kernel_len:
.quad relocate_kernel_end - relocate_kernel
diff --git a/arch/s390/kernel/s390_ksyms.c b/arch/s390/kernel/s390_ksyms.c
index 656fcbb9bd83..57b536649b00 100644
--- a/arch/s390/kernel/s390_ksyms.c
+++ b/arch/s390/kernel/s390_ksyms.c
@@ -1,6 +1,10 @@
#include <linux/module.h>
+#include <linux/kvm_host.h>
#include <asm/ftrace.h>
#ifdef CONFIG_FUNCTION_TRACER
EXPORT_SYMBOL(_mcount);
#endif
+#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE)
+EXPORT_SYMBOL(sie64a);
+#endif
diff --git a/arch/s390/kernel/sclp.S b/arch/s390/kernel/sclp.S
index 2e82fdd89320..95792d846bb6 100644
--- a/arch/s390/kernel/sclp.S
+++ b/arch/s390/kernel/sclp.S
@@ -8,6 +8,8 @@
*
*/
+#include <linux/linkage.h>
+
LC_EXT_NEW_PSW = 0x58 # addr of ext int handler
LC_EXT_NEW_PSW_64 = 0x1b0 # addr of ext int handler 64 bit
LC_EXT_INT_PARAM = 0x80 # addr of ext int parameter
@@ -260,8 +262,7 @@ _sclp_print:
# R2 = 0 on success, 1 on failure
#
- .globl _sclp_print_early
-_sclp_print_early:
+ENTRY(_sclp_print_early)
stm %r6,%r15,24(%r15) # save registers
ahi %r15,-96 # create stack frame
#ifdef CONFIG_64BIT
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 1d55c95f617c..a6d85c0a7f20 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -654,7 +654,8 @@ int __cpu_disable(void)
/* disable all external interrupts */
cr_parms.orvals[0] = 0;
cr_parms.andvals[0] = ~(1 << 15 | 1 << 14 | 1 << 13 | 1 << 11 |
- 1 << 10 | 1 << 9 | 1 << 6 | 1 << 4);
+ 1 << 10 | 1 << 9 | 1 << 6 | 1 << 5 |
+ 1 << 4);
/* disable all I/O interrupts */
cr_parms.orvals[6] = 0;
cr_parms.andvals[6] = ~(1 << 31 | 1 << 30 | 1 << 29 | 1 << 28 |
diff --git a/arch/s390/kernel/switch_cpu.S b/arch/s390/kernel/switch_cpu.S
index 20530dd2eab1..bfe070bc7659 100644
--- a/arch/s390/kernel/switch_cpu.S
+++ b/arch/s390/kernel/switch_cpu.S
@@ -5,6 +5,7 @@
*
*/
+#include <linux/linkage.h>
#include <asm/asm-offsets.h>
#include <asm/ptrace.h>
@@ -16,9 +17,7 @@
# %r6 - destination cpu
.section .text
- .align 4
- .globl smp_switch_to_cpu
-smp_switch_to_cpu:
+ENTRY(smp_switch_to_cpu)
stm %r6,%r15,__SF_GPRS(%r15)
lr %r1,%r15
ahi %r15,-STACK_FRAME_OVERHEAD
@@ -33,8 +32,7 @@ smp_switch_to_cpu:
brc 2,2b /* busy, try again */
3: j 3b
- .globl smp_restart_cpu
-smp_restart_cpu:
+ENTRY(smp_restart_cpu)
basr %r13,0
0: la %r1,.gprregs_addr-0b(%r13)
l %r1,0(%r1)
diff --git a/arch/s390/kernel/switch_cpu64.S b/arch/s390/kernel/switch_cpu64.S
index 5be3f43898f9..fcc42d799e41 100644
--- a/arch/s390/kernel/switch_cpu64.S
+++ b/arch/s390/kernel/switch_cpu64.S
@@ -5,6 +5,7 @@
*
*/
+#include <linux/linkage.h>
#include <asm/asm-offsets.h>
#include <asm/ptrace.h>
@@ -16,9 +17,7 @@
# %r6 - destination cpu
.section .text
- .align 4
- .globl smp_switch_to_cpu
-smp_switch_to_cpu:
+ENTRY(smp_switch_to_cpu)
stmg %r6,%r15,__SF_GPRS(%r15)
lgr %r1,%r15
aghi %r15,-STACK_FRAME_OVERHEAD
@@ -31,8 +30,7 @@ smp_switch_to_cpu:
brc 2,2b /* busy, try again */
3: j 3b
- .globl smp_restart_cpu
-smp_restart_cpu:
+ENTRY(smp_restart_cpu)
larl %r1,.gprregs
lmg %r0,%r15,0(%r1)
1: sigp %r0,%r5,__SIGP_SENSE /* Wait for calling CPU */
diff --git a/arch/s390/kernel/swsusp_asm64.S b/arch/s390/kernel/swsusp_asm64.S
index 1f066e46e83e..51bcdb50a230 100644
--- a/arch/s390/kernel/swsusp_asm64.S
+++ b/arch/s390/kernel/swsusp_asm64.S
@@ -7,6 +7,7 @@
* Michael Holzheu <holzheu@linux.vnet.ibm.com>
*/
+#include <linux/linkage.h>
#include <asm/page.h>
#include <asm/ptrace.h>
#include <asm/thread_info.h>
@@ -22,9 +23,7 @@
* This function runs with disabled interrupts.
*/
.section .text
- .align 4
- .globl swsusp_arch_suspend
-swsusp_arch_suspend:
+ENTRY(swsusp_arch_suspend)
stmg %r6,%r15,__SF_GPRS(%r15)
lgr %r1,%r15
aghi %r15,-STACK_FRAME_OVERHEAD
@@ -112,8 +111,7 @@ swsusp_arch_suspend:
* Then we return to the function that called swsusp_arch_suspend().
* swsusp_arch_resume() runs with disabled interrupts.
*/
- .globl swsusp_arch_resume
-swsusp_arch_resume:
+ENTRY(swsusp_arch_resume)
stmg %r6,%r15,__SF_GPRS(%r15)
lgr %r1,%r15
aghi %r15,-STACK_FRAME_OVERHEAD
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c
index a65d2e82f61d..e9372c77cced 100644
--- a/arch/s390/kernel/traps.c
+++ b/arch/s390/kernel/traps.c
@@ -18,7 +18,7 @@
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/errno.h>
-#include <linux/tracehook.h>
+#include <linux/ptrace.h>
#include <linux/timer.h>
#include <linux/mm.h>
#include <linux/smp.h>
@@ -43,14 +43,10 @@
#include <asm/debug.h>
#include "entry.h"
-pgm_check_handler_t *pgm_check_table[128];
+void (*pgm_check_table[128])(struct pt_regs *, long, unsigned long);
int show_unhandled_signals;
-extern pgm_check_handler_t do_protection_exception;
-extern pgm_check_handler_t do_dat_exception;
-extern pgm_check_handler_t do_asce_exception;
-
#define stack_pointer ({ void **sp; asm("la %0,0(15)" : "=&d" (sp)); sp; })
#ifndef CONFIG_64BIT
@@ -329,10 +325,17 @@ static inline void __user *get_psw_address(struct pt_regs *regs,
void __kprobes do_per_trap(struct pt_regs *regs)
{
+ siginfo_t info;
+
if (notify_die(DIE_SSTEP, "sstep", regs, 0, 0, SIGTRAP) == NOTIFY_STOP)
return;
- if (tracehook_consider_fatal_signal(current, SIGTRAP))
- force_sig(SIGTRAP, current);
+ if (!current->ptrace)
+ return;
+ info.si_signo = SIGTRAP;
+ info.si_errno = 0;
+ info.si_code = TRAP_HWBKPT;
+ info.si_addr = (void *) current->thread.per_event.address;
+ force_sig_info(SIGTRAP, &info, current);
}
static void default_trap_handler(struct pt_regs *regs, long pgm_int_code,
@@ -425,9 +428,13 @@ static void __kprobes illegal_op(struct pt_regs *regs, long pgm_int_code,
if (get_user(*((__u16 *) opcode), (__u16 __user *) location))
return;
if (*((__u16 *) opcode) == S390_BREAKPOINT_U16) {
- if (tracehook_consider_fatal_signal(current, SIGTRAP))
- force_sig(SIGTRAP, current);
- else
+ if (current->ptrace) {
+ info.si_signo = SIGTRAP;
+ info.si_errno = 0;
+ info.si_code = TRAP_BRKPT;
+ info.si_addr = location;
+ force_sig_info(SIGTRAP, &info, current);
+ } else
signal = SIGILL;
#ifdef CONFIG_MATHEMU
} else if (opcode[0] == 0xb3) {
@@ -489,9 +496,8 @@ static void __kprobes illegal_op(struct pt_regs *regs, long pgm_int_code,
#ifdef CONFIG_MATHEMU
-asmlinkage void specification_exception(struct pt_regs *regs,
- long pgm_int_code,
- unsigned long trans_exc_code)
+void specification_exception(struct pt_regs *regs, long pgm_int_code,
+ unsigned long trans_exc_code)
{
__u8 opcode[6];
__u16 __user *location = NULL;
@@ -648,7 +654,7 @@ static void space_switch_exception(struct pt_regs *regs, long pgm_int_code,
do_trap(pgm_int_code, SIGILL, "space switch event", regs, &info);
}
-asmlinkage void __kprobes kernel_stack_overflow(struct pt_regs * regs)
+void __kprobes kernel_stack_overflow(struct pt_regs * regs)
{
bust_spinlocks(1);
printk("Kernel stack overflow.\n");
diff --git a/arch/s390/kvm/Kconfig b/arch/s390/kvm/Kconfig
index f66a1bdbb61d..a21634173a66 100644
--- a/arch/s390/kvm/Kconfig
+++ b/arch/s390/kvm/Kconfig
@@ -37,6 +37,5 @@ config KVM
# OK, it's a little counter-intuitive to do this, but it puts it neatly under
# the virtualization menu.
source drivers/vhost/Kconfig
-source drivers/virtio/Kconfig
endif # VIRTUALIZATION
diff --git a/arch/s390/kvm/Makefile b/arch/s390/kvm/Makefile
index 860d26514c08..3975722bb19d 100644
--- a/arch/s390/kvm/Makefile
+++ b/arch/s390/kvm/Makefile
@@ -10,5 +10,5 @@ common-objs = $(addprefix ../../../virt/kvm/, kvm_main.o)
ccflags-y := -Ivirt/kvm -Iarch/s390/kvm
-kvm-objs := $(common-objs) kvm-s390.o sie64a.o intercept.o interrupt.o priv.o sigp.o diag.o
+kvm-objs := $(common-objs) kvm-s390.o intercept.o interrupt.o priv.o sigp.o diag.o
obj-$(CONFIG_KVM) += kvm.o
diff --git a/arch/s390/kvm/gaccess.h b/arch/s390/kvm/gaccess.h
index 03c716a0f01f..c86f6ae43f76 100644
--- a/arch/s390/kvm/gaccess.h
+++ b/arch/s390/kvm/gaccess.h
@@ -1,5 +1,5 @@
/*
- * gaccess.h - access guest memory
+ * access.h - access guest memory
*
* Copyright IBM Corp. 2008,2009
*
@@ -22,20 +22,13 @@ static inline void __user *__guestaddr_to_user(struct kvm_vcpu *vcpu,
unsigned long guestaddr)
{
unsigned long prefix = vcpu->arch.sie_block->prefix;
- unsigned long origin = vcpu->arch.sie_block->gmsor;
- unsigned long memsize = kvm_s390_vcpu_get_memsize(vcpu);
if (guestaddr < 2 * PAGE_SIZE)
guestaddr += prefix;
else if ((guestaddr >= prefix) && (guestaddr < prefix + 2 * PAGE_SIZE))
guestaddr -= prefix;
- if (guestaddr > memsize)
- return (void __user __force *) ERR_PTR(-EFAULT);
-
- guestaddr += origin;
-
- return (void __user *) guestaddr;
+ return (void __user *) gmap_fault(guestaddr, vcpu->arch.gmap);
}
static inline int get_guest_u64(struct kvm_vcpu *vcpu, unsigned long guestaddr,
@@ -141,11 +134,11 @@ static inline int put_guest_u8(struct kvm_vcpu *vcpu, unsigned long guestaddr,
static inline int __copy_to_guest_slow(struct kvm_vcpu *vcpu,
unsigned long guestdest,
- const void *from, unsigned long n)
+ void *from, unsigned long n)
{
int rc;
unsigned long i;
- const u8 *data = from;
+ u8 *data = from;
for (i = 0; i < n; i++) {
rc = put_guest_u8(vcpu, guestdest++, *(data++));
@@ -155,12 +148,95 @@ static inline int __copy_to_guest_slow(struct kvm_vcpu *vcpu,
return 0;
}
+static inline int __copy_to_guest_fast(struct kvm_vcpu *vcpu,
+ unsigned long guestdest,
+ void *from, unsigned long n)
+{
+ int r;
+ void __user *uptr;
+ unsigned long size;
+
+ if (guestdest + n < guestdest)
+ return -EFAULT;
+
+ /* simple case: all within one segment table entry? */
+ if ((guestdest & PMD_MASK) == ((guestdest+n) & PMD_MASK)) {
+ uptr = (void __user *) gmap_fault(guestdest, vcpu->arch.gmap);
+
+ if (IS_ERR((void __force *) uptr))
+ return PTR_ERR((void __force *) uptr);
+
+ r = copy_to_user(uptr, from, n);
+
+ if (r)
+ r = -EFAULT;
+
+ goto out;
+ }
+
+ /* copy first segment */
+ uptr = (void __user *)gmap_fault(guestdest, vcpu->arch.gmap);
+
+ if (IS_ERR((void __force *) uptr))
+ return PTR_ERR((void __force *) uptr);
+
+ size = PMD_SIZE - (guestdest & ~PMD_MASK);
+
+ r = copy_to_user(uptr, from, size);
+
+ if (r) {
+ r = -EFAULT;
+ goto out;
+ }
+ from += size;
+ n -= size;
+ guestdest += size;
+
+ /* copy full segments */
+ while (n >= PMD_SIZE) {
+ uptr = (void __user *)gmap_fault(guestdest, vcpu->arch.gmap);
+
+ if (IS_ERR((void __force *) uptr))
+ return PTR_ERR((void __force *) uptr);
+
+ r = copy_to_user(uptr, from, PMD_SIZE);
+
+ if (r) {
+ r = -EFAULT;
+ goto out;
+ }
+ from += PMD_SIZE;
+ n -= PMD_SIZE;
+ guestdest += PMD_SIZE;
+ }
+
+ /* copy the tail segment */
+ if (n) {
+ uptr = (void __user *)gmap_fault(guestdest, vcpu->arch.gmap);
+
+ if (IS_ERR((void __force *) uptr))
+ return PTR_ERR((void __force *) uptr);
+
+ r = copy_to_user(uptr, from, n);
+
+ if (r)
+ r = -EFAULT;
+ }
+out:
+ return r;
+}
+
+static inline int copy_to_guest_absolute(struct kvm_vcpu *vcpu,
+ unsigned long guestdest,
+ void *from, unsigned long n)
+{
+ return __copy_to_guest_fast(vcpu, guestdest, from, n);
+}
+
static inline int copy_to_guest(struct kvm_vcpu *vcpu, unsigned long guestdest,
- const void *from, unsigned long n)
+ void *from, unsigned long n)
{
unsigned long prefix = vcpu->arch.sie_block->prefix;
- unsigned long origin = vcpu->arch.sie_block->gmsor;
- unsigned long memsize = kvm_s390_vcpu_get_memsize(vcpu);
if ((guestdest < 2 * PAGE_SIZE) && (guestdest + n > 2 * PAGE_SIZE))
goto slowpath;
@@ -177,15 +253,7 @@ static inline int copy_to_guest(struct kvm_vcpu *vcpu, unsigned long guestdest,
else if ((guestdest >= prefix) && (guestdest < prefix + 2 * PAGE_SIZE))
guestdest -= prefix;
- if (guestdest + n > memsize)
- return -EFAULT;
-
- if (guestdest + n < guestdest)
- return -EFAULT;
-
- guestdest += origin;
-
- return copy_to_user((void __user *) guestdest, from, n);
+ return __copy_to_guest_fast(vcpu, guestdest, from, n);
slowpath:
return __copy_to_guest_slow(vcpu, guestdest, from, n);
}
@@ -206,74 +274,113 @@ static inline int __copy_from_guest_slow(struct kvm_vcpu *vcpu, void *to,
return 0;
}
-static inline int copy_from_guest(struct kvm_vcpu *vcpu, void *to,
- unsigned long guestsrc, unsigned long n)
+static inline int __copy_from_guest_fast(struct kvm_vcpu *vcpu, void *to,
+ unsigned long guestsrc,
+ unsigned long n)
{
- unsigned long prefix = vcpu->arch.sie_block->prefix;
- unsigned long origin = vcpu->arch.sie_block->gmsor;
- unsigned long memsize = kvm_s390_vcpu_get_memsize(vcpu);
+ int r;
+ void __user *uptr;
+ unsigned long size;
- if ((guestsrc < 2 * PAGE_SIZE) && (guestsrc + n > 2 * PAGE_SIZE))
- goto slowpath;
+ if (guestsrc + n < guestsrc)
+ return -EFAULT;
- if ((guestsrc < prefix) && (guestsrc + n > prefix))
- goto slowpath;
+ /* simple case: all within one segment table entry? */
+ if ((guestsrc & PMD_MASK) == ((guestsrc+n) & PMD_MASK)) {
+ uptr = (void __user *) gmap_fault(guestsrc, vcpu->arch.gmap);
- if ((guestsrc < prefix + 2 * PAGE_SIZE)
- && (guestsrc + n > prefix + 2 * PAGE_SIZE))
- goto slowpath;
+ if (IS_ERR((void __force *) uptr))
+ return PTR_ERR((void __force *) uptr);
- if (guestsrc < 2 * PAGE_SIZE)
- guestsrc += prefix;
- else if ((guestsrc >= prefix) && (guestsrc < prefix + 2 * PAGE_SIZE))
- guestsrc -= prefix;
+ r = copy_from_user(to, uptr, n);
- if (guestsrc + n > memsize)
- return -EFAULT;
+ if (r)
+ r = -EFAULT;
- if (guestsrc + n < guestsrc)
- return -EFAULT;
+ goto out;
+ }
- guestsrc += origin;
+ /* copy first segment */
+ uptr = (void __user *)gmap_fault(guestsrc, vcpu->arch.gmap);
- return copy_from_user(to, (void __user *) guestsrc, n);
-slowpath:
- return __copy_from_guest_slow(vcpu, to, guestsrc, n);
-}
+ if (IS_ERR((void __force *) uptr))
+ return PTR_ERR((void __force *) uptr);
-static inline int copy_to_guest_absolute(struct kvm_vcpu *vcpu,
- unsigned long guestdest,
- const void *from, unsigned long n)
-{
- unsigned long origin = vcpu->arch.sie_block->gmsor;
- unsigned long memsize = kvm_s390_vcpu_get_memsize(vcpu);
+ size = PMD_SIZE - (guestsrc & ~PMD_MASK);
- if (guestdest + n > memsize)
- return -EFAULT;
+ r = copy_from_user(to, uptr, size);
- if (guestdest + n < guestdest)
- return -EFAULT;
+ if (r) {
+ r = -EFAULT;
+ goto out;
+ }
+ to += size;
+ n -= size;
+ guestsrc += size;
+
+ /* copy full segments */
+ while (n >= PMD_SIZE) {
+ uptr = (void __user *)gmap_fault(guestsrc, vcpu->arch.gmap);
+
+ if (IS_ERR((void __force *) uptr))
+ return PTR_ERR((void __force *) uptr);
+
+ r = copy_from_user(to, uptr, PMD_SIZE);
+
+ if (r) {
+ r = -EFAULT;
+ goto out;
+ }
+ to += PMD_SIZE;
+ n -= PMD_SIZE;
+ guestsrc += PMD_SIZE;
+ }
+
+ /* copy the tail segment */
+ if (n) {
+ uptr = (void __user *)gmap_fault(guestsrc, vcpu->arch.gmap);
- guestdest += origin;
+ if (IS_ERR((void __force *) uptr))
+ return PTR_ERR((void __force *) uptr);
- return copy_to_user((void __user *) guestdest, from, n);
+ r = copy_from_user(to, uptr, n);
+
+ if (r)
+ r = -EFAULT;
+ }
+out:
+ return r;
}
static inline int copy_from_guest_absolute(struct kvm_vcpu *vcpu, void *to,
unsigned long guestsrc,
unsigned long n)
{
- unsigned long origin = vcpu->arch.sie_block->gmsor;
- unsigned long memsize = kvm_s390_vcpu_get_memsize(vcpu);
+ return __copy_from_guest_fast(vcpu, to, guestsrc, n);
+}
- if (guestsrc + n > memsize)
- return -EFAULT;
+static inline int copy_from_guest(struct kvm_vcpu *vcpu, void *to,
+ unsigned long guestsrc, unsigned long n)
+{
+ unsigned long prefix = vcpu->arch.sie_block->prefix;
- if (guestsrc + n < guestsrc)
- return -EFAULT;
+ if ((guestsrc < 2 * PAGE_SIZE) && (guestsrc + n > 2 * PAGE_SIZE))
+ goto slowpath;
- guestsrc += origin;
+ if ((guestsrc < prefix) && (guestsrc + n > prefix))
+ goto slowpath;
+
+ if ((guestsrc < prefix + 2 * PAGE_SIZE)
+ && (guestsrc + n > prefix + 2 * PAGE_SIZE))
+ goto slowpath;
+
+ if (guestsrc < 2 * PAGE_SIZE)
+ guestsrc += prefix;
+ else if ((guestsrc >= prefix) && (guestsrc < prefix + 2 * PAGE_SIZE))
+ guestsrc -= prefix;
- return copy_from_user(to, (void __user *) guestsrc, n);
+ return __copy_from_guest_fast(vcpu, to, guestsrc, n);
+slowpath:
+ return __copy_from_guest_slow(vcpu, to, guestsrc, n);
}
#endif
diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c
index f7b6df45d8be..c7c51898984e 100644
--- a/arch/s390/kvm/intercept.c
+++ b/arch/s390/kvm/intercept.c
@@ -105,6 +105,7 @@ static intercept_handler_t instruction_handlers[256] = {
[0xae] = kvm_s390_handle_sigp,
[0xb2] = kvm_s390_handle_b2,
[0xb7] = handle_lctl,
+ [0xe5] = kvm_s390_handle_e5,
[0xeb] = handle_lctlg,
};
@@ -159,22 +160,42 @@ static int handle_stop(struct kvm_vcpu *vcpu)
static int handle_validity(struct kvm_vcpu *vcpu)
{
+ unsigned long vmaddr;
int viwhy = vcpu->arch.sie_block->ipb >> 16;
int rc;
vcpu->stat.exit_validity++;
- if ((viwhy == 0x37) && (vcpu->arch.sie_block->prefix
- <= kvm_s390_vcpu_get_memsize(vcpu) - 2*PAGE_SIZE)) {
- rc = fault_in_pages_writeable((char __user *)
- vcpu->arch.sie_block->gmsor +
- vcpu->arch.sie_block->prefix,
- 2*PAGE_SIZE);
- if (rc)
+ if (viwhy == 0x37) {
+ vmaddr = gmap_fault(vcpu->arch.sie_block->prefix,
+ vcpu->arch.gmap);
+ if (IS_ERR_VALUE(vmaddr)) {
+ rc = -EOPNOTSUPP;
+ goto out;
+ }
+ rc = fault_in_pages_writeable((char __user *) vmaddr,
+ PAGE_SIZE);
+ if (rc) {
+ /* user will receive sigsegv, exit to user */
+ rc = -EOPNOTSUPP;
+ goto out;
+ }
+ vmaddr = gmap_fault(vcpu->arch.sie_block->prefix + PAGE_SIZE,
+ vcpu->arch.gmap);
+ if (IS_ERR_VALUE(vmaddr)) {
+ rc = -EOPNOTSUPP;
+ goto out;
+ }
+ rc = fault_in_pages_writeable((char __user *) vmaddr,
+ PAGE_SIZE);
+ if (rc) {
/* user will receive sigsegv, exit to user */
rc = -EOPNOTSUPP;
+ goto out;
+ }
} else
rc = -EOPNOTSUPP;
+out:
if (rc)
VCPU_EVENT(vcpu, 2, "unhandled validity intercept code %d",
viwhy);
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index 35c21bf910c5..c9aeb4b4d0b8 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -128,6 +128,10 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu,
if (rc == -EFAULT)
exception = 1;
+ rc = put_guest_u16(vcpu, __LC_CPU_ADDRESS, inti->emerg.code);
+ if (rc == -EFAULT)
+ exception = 1;
+
rc = copy_to_guest(vcpu, __LC_EXT_OLD_PSW,
&vcpu->arch.sie_block->gpsw, sizeof(psw_t));
if (rc == -EFAULT)
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 67345ae7ce8d..f17296e4fc89 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -62,6 +62,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
{ "instruction_chsc", VCPU_STAT(instruction_chsc) },
{ "instruction_stsi", VCPU_STAT(instruction_stsi) },
{ "instruction_stfl", VCPU_STAT(instruction_stfl) },
+ { "instruction_tprot", VCPU_STAT(instruction_tprot) },
{ "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
{ "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
{ "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
@@ -189,7 +190,13 @@ int kvm_arch_init_vm(struct kvm *kvm)
debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
VM_EVENT(kvm, 3, "%s", "vm created");
+ kvm->arch.gmap = gmap_alloc(current->mm);
+ if (!kvm->arch.gmap)
+ goto out_nogmap;
+
return 0;
+out_nogmap:
+ debug_unregister(kvm->arch.dbf);
out_nodbf:
free_page((unsigned long)(kvm->arch.sca));
out_err:
@@ -234,11 +241,13 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
kvm_free_vcpus(kvm);
free_page((unsigned long)(kvm->arch.sca));
debug_unregister(kvm->arch.dbf);
+ gmap_free(kvm->arch.gmap);
}
/* Section: vcpu related */
int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
{
+ vcpu->arch.gmap = vcpu->kvm->arch.gmap;
return 0;
}
@@ -284,8 +293,7 @@ static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
{
- atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH);
- set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests);
+ atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH | CPUSTAT_SM);
vcpu->arch.sie_block->ecb = 6;
vcpu->arch.sie_block->eca = 0xC1002001U;
vcpu->arch.sie_block->fac = (int) (long) facilities;
@@ -453,6 +461,7 @@ static void __vcpu_run(struct kvm_vcpu *vcpu)
local_irq_disable();
kvm_guest_enter();
local_irq_enable();
+ gmap_enable(vcpu->arch.gmap);
VCPU_EVENT(vcpu, 6, "entering sie flags %x",
atomic_read(&vcpu->arch.sie_block->cpuflags));
if (sie64a(vcpu->arch.sie_block, vcpu->arch.guest_gprs)) {
@@ -461,6 +470,7 @@ static void __vcpu_run(struct kvm_vcpu *vcpu)
}
VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
vcpu->arch.sie_block->icptcode);
+ gmap_disable(vcpu->arch.gmap);
local_irq_disable();
kvm_guest_exit();
local_irq_enable();
@@ -474,17 +484,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
sigset_t sigsaved;
rerun_vcpu:
- if (vcpu->requests)
- if (test_and_clear_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
- kvm_s390_vcpu_set_mem(vcpu);
-
- /* verify, that memory has been registered */
- if (!vcpu->arch.sie_block->gmslm) {
- vcpu_put(vcpu);
- VCPU_EVENT(vcpu, 3, "%s", "no memory registered to run vcpu");
- return -EINVAL;
- }
-
if (vcpu->sigset_active)
sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
@@ -545,7 +544,7 @@ rerun_vcpu:
return rc;
}
-static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, const void *from,
+static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, void *from,
unsigned long n, int prefix)
{
if (prefix)
@@ -562,7 +561,7 @@ static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, const void *from,
*/
int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
{
- const unsigned char archmode = 1;
+ unsigned char archmode = 1;
int prefix;
if (addr == KVM_S390_STORE_STATUS_NOADDR) {
@@ -680,10 +679,10 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
if (mem->guest_phys_addr)
return -EINVAL;
- if (mem->userspace_addr & (PAGE_SIZE - 1))
+ if (mem->userspace_addr & 0xffffful)
return -EINVAL;
- if (mem->memory_size & (PAGE_SIZE - 1))
+ if (mem->memory_size & 0xffffful)
return -EINVAL;
if (!user_alloc)
@@ -697,15 +696,14 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
struct kvm_memory_slot old,
int user_alloc)
{
- int i;
- struct kvm_vcpu *vcpu;
+ int rc;
- /* request update of sie control block for all available vcpus */
- kvm_for_each_vcpu(i, vcpu, kvm) {
- if (test_and_set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
- continue;
- kvm_s390_inject_sigp_stop(vcpu, ACTION_RELOADVCPU_ON_STOP);
- }
+
+ rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
+ mem->guest_phys_addr, mem->memory_size);
+ if (rc)
+ printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
+ return;
}
void kvm_arch_flush_shadow(struct kvm *kvm)
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
index a7b7586626db..99b0b7597115 100644
--- a/arch/s390/kvm/kvm-s390.h
+++ b/arch/s390/kvm/kvm-s390.h
@@ -58,35 +58,9 @@ int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu,
int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code);
int kvm_s390_inject_sigp_stop(struct kvm_vcpu *vcpu, int action);
-static inline long kvm_s390_vcpu_get_memsize(struct kvm_vcpu *vcpu)
-{
- return vcpu->arch.sie_block->gmslm
- - vcpu->arch.sie_block->gmsor
- - VIRTIODESCSPACE + 1ul;
-}
-
-static inline void kvm_s390_vcpu_set_mem(struct kvm_vcpu *vcpu)
-{
- int idx;
- struct kvm_memory_slot *mem;
- struct kvm_memslots *memslots;
-
- idx = srcu_read_lock(&vcpu->kvm->srcu);
- memslots = kvm_memslots(vcpu->kvm);
-
- mem = &memslots->memslots[0];
-
- vcpu->arch.sie_block->gmsor = mem->userspace_addr;
- vcpu->arch.sie_block->gmslm =
- mem->userspace_addr +
- (mem->npages << PAGE_SHIFT) +
- VIRTIODESCSPACE - 1ul;
-
- srcu_read_unlock(&vcpu->kvm->srcu, idx);
-}
-
/* implemented in priv.c */
int kvm_s390_handle_b2(struct kvm_vcpu *vcpu);
+int kvm_s390_handle_e5(struct kvm_vcpu *vcpu);
/* implemented in sigp.c */
int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu);
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
index 73c47bd95db3..391626361084 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/priv.c
@@ -326,3 +326,52 @@ int kvm_s390_handle_b2(struct kvm_vcpu *vcpu)
}
return -EOPNOTSUPP;
}
+
+static int handle_tprot(struct kvm_vcpu *vcpu)
+{
+ int base1 = (vcpu->arch.sie_block->ipb & 0xf0000000) >> 28;
+ int disp1 = (vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16;
+ int base2 = (vcpu->arch.sie_block->ipb & 0xf000) >> 12;
+ int disp2 = vcpu->arch.sie_block->ipb & 0x0fff;
+ u64 address1 = disp1 + base1 ? vcpu->arch.guest_gprs[base1] : 0;
+ u64 address2 = disp2 + base2 ? vcpu->arch.guest_gprs[base2] : 0;
+ struct vm_area_struct *vma;
+
+ vcpu->stat.instruction_tprot++;
+
+ /* we only handle the Linux memory detection case:
+ * access key == 0
+ * guest DAT == off
+ * everything else goes to userspace. */
+ if (address2 & 0xf0)
+ return -EOPNOTSUPP;
+ if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT)
+ return -EOPNOTSUPP;
+
+
+ down_read(&current->mm->mmap_sem);
+ vma = find_vma(current->mm,
+ (unsigned long) __guestaddr_to_user(vcpu, address1));
+ if (!vma) {
+ up_read(&current->mm->mmap_sem);
+ return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
+ }
+
+ vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
+ if (!(vma->vm_flags & VM_WRITE) && (vma->vm_flags & VM_READ))
+ vcpu->arch.sie_block->gpsw.mask |= (1ul << 44);
+ if (!(vma->vm_flags & VM_WRITE) && !(vma->vm_flags & VM_READ))
+ vcpu->arch.sie_block->gpsw.mask |= (2ul << 44);
+
+ up_read(&current->mm->mmap_sem);
+ return 0;
+}
+
+int kvm_s390_handle_e5(struct kvm_vcpu *vcpu)
+{
+ /* For e5xx... instructions we only handle TPROT */
+ if ((vcpu->arch.sie_block->ipa & 0x00ff) == 0x01)
+ return handle_tprot(vcpu);
+ return -EOPNOTSUPP;
+}
+
diff --git a/arch/s390/kvm/sie64a.S b/arch/s390/kvm/sie64a.S
deleted file mode 100644
index 5faa1b1b23fa..000000000000
--- a/arch/s390/kvm/sie64a.S
+++ /dev/null
@@ -1,98 +0,0 @@
-/*
- * sie64a.S - low level sie call
- *
- * Copyright IBM Corp. 2008,2010
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
- *
- * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
- * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
- */
-
-#include <linux/errno.h>
-#include <asm/asm-offsets.h>
-#include <asm/setup.h>
-#include <asm/asm-offsets.h>
-#include <asm/ptrace.h>
-#include <asm/thread_info.h>
-
-_TIF_EXIT_SIE = (_TIF_SIGPENDING | _TIF_NEED_RESCHED | _TIF_MCCK_PENDING)
-
-/*
- * offsets into stackframe
- * SP_ = offsets into stack sie64 is called with
- * SPI_ = offsets into irq stack
- */
-SP_GREGS = __SF_EMPTY
-SP_HOOK = __SF_EMPTY+8
-SP_GPP = __SF_EMPTY+16
-SPI_PSW = STACK_FRAME_OVERHEAD + __PT_PSW
-
-
- .macro SPP newpp
- tm __LC_MACHINE_FLAGS+6,0x20 # MACHINE_FLAG_SPP
- jz 0f
- .insn s,0xb2800000,\newpp
-0:
- .endm
-
-sie_irq_handler:
- SPP __LC_CMF_HPP # set host id
- larl %r2,sie_inst
- clg %r2,SPI_PSW+8(0,%r15) # intercepted sie
- jne 1f
- xc __LC_SIE_HOOK(8),__LC_SIE_HOOK
- lg %r2,__LC_THREAD_INFO # pointer thread_info struct
- tm __TI_flags+7(%r2),_TIF_EXIT_SIE
- jz 0f
- larl %r2,sie_exit # work pending, leave sie
- stg %r2,SPI_PSW+8(0,%r15)
- br %r14
-0: larl %r2,sie_reenter # re-enter with guest id
- stg %r2,SPI_PSW+8(0,%r15)
-1: br %r14
-
-/*
- * sie64a calling convention:
- * %r2 pointer to sie control block
- * %r3 guest register save area
- */
- .globl sie64a
-sie64a:
- stg %r3,SP_GREGS(%r15) # save guest register save area
- stmg %r6,%r14,__SF_GPRS(%r15) # save registers on entry
- lgr %r14,%r2 # pointer to sie control block
- larl %r5,sie_irq_handler
- stg %r2,SP_GPP(%r15)
- stg %r5,SP_HOOK(%r15) # save hook target
- lmg %r0,%r13,0(%r3) # load guest gprs 0-13
-sie_reenter:
- mvc __LC_SIE_HOOK(8),SP_HOOK(%r15)
- SPP SP_GPP(%r15) # set guest id
-sie_inst:
- sie 0(%r14)
- xc __LC_SIE_HOOK(8),__LC_SIE_HOOK
- SPP __LC_CMF_HPP # set host id
-sie_exit:
- lg %r14,SP_GREGS(%r15)
- stmg %r0,%r13,0(%r14) # save guest gprs 0-13
- lghi %r2,0
- lmg %r6,%r14,__SF_GPRS(%r15)
- br %r14
-
-sie_err:
- xc __LC_SIE_HOOK(8),__LC_SIE_HOOK
- SPP __LC_CMF_HPP # set host id
- lg %r14,SP_GREGS(%r15)
- stmg %r0,%r13,0(%r14) # save guest gprs 0-13
- lghi %r2,-EFAULT
- lmg %r6,%r14,__SF_GPRS(%r15)
- br %r14
-
- .section __ex_table,"a"
- .quad sie_inst,sie_err
- .quad sie_exit,sie_err
- .quad sie_reenter,sie_err
- .previous
diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c
index 702276f5e2fa..d6a50c1fb2e6 100644
--- a/arch/s390/kvm/sigp.c
+++ b/arch/s390/kvm/sigp.c
@@ -189,10 +189,8 @@ static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address,
/* make sure that the new value is valid memory */
address = address & 0x7fffe000u;
- if ((copy_from_user(&tmp, (void __user *)
- (address + vcpu->arch.sie_block->gmsor) , 1)) ||
- (copy_from_user(&tmp, (void __user *)(address +
- vcpu->arch.sie_block->gmsor + PAGE_SIZE), 1))) {
+ if (copy_from_guest_absolute(vcpu, &tmp, address, 1) ||
+ copy_from_guest_absolute(vcpu, &tmp, address + PAGE_SIZE, 1)) {
*reg |= SIGP_STAT_INVALID_PARAMETER;
return 1; /* invalid parameter */
}
diff --git a/arch/s390/lib/qrnnd.S b/arch/s390/lib/qrnnd.S
index eb1df632e749..d321329130ec 100644
--- a/arch/s390/lib/qrnnd.S
+++ b/arch/s390/lib/qrnnd.S
@@ -1,5 +1,7 @@
# S/390 __udiv_qrnnd
+#include <linux/linkage.h>
+
# r2 : &__r
# r3 : upper half of 64 bit word n
# r4 : lower half of 64 bit word n
@@ -8,8 +10,7 @@
# the quotient q is to be returned
.text
- .globl __udiv_qrnnd
-__udiv_qrnnd:
+ENTRY(__udiv_qrnnd)
st %r2,24(%r15) # store pointer to reminder for later
lr %r0,%r3 # reload n
lr %r1,%r4
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index fe103e891e7a..9564fc779b27 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -299,13 +299,28 @@ static inline int do_exception(struct pt_regs *regs, int access,
goto out;
address = trans_exc_code & __FAIL_ADDR_MASK;
- perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
flags = FAULT_FLAG_ALLOW_RETRY;
if (access == VM_WRITE || (trans_exc_code & store_indication) == 0x400)
flags |= FAULT_FLAG_WRITE;
-retry:
down_read(&mm->mmap_sem);
+#ifdef CONFIG_PGSTE
+ if (test_tsk_thread_flag(current, TIF_SIE) && S390_lowcore.gmap) {
+ address = gmap_fault(address,
+ (struct gmap *) S390_lowcore.gmap);
+ if (address == -EFAULT) {
+ fault = VM_FAULT_BADMAP;
+ goto out_up;
+ }
+ if (address == -ENOMEM) {
+ fault = VM_FAULT_OOM;
+ goto out_up;
+ }
+ }
+#endif
+
+retry:
fault = VM_FAULT_BADMAP;
vma = find_vma(mm, address);
if (!vma)
@@ -345,17 +360,18 @@ retry:
if (flags & FAULT_FLAG_ALLOW_RETRY) {
if (fault & VM_FAULT_MAJOR) {
tsk->maj_flt++;
- perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
regs, address);
} else {
tsk->min_flt++;
- perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
regs, address);
}
if (fault & VM_FAULT_RETRY) {
/* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
* of starvation. */
flags &= ~FAULT_FLAG_ALLOW_RETRY;
+ down_read(&mm->mmap_sem);
goto retry;
}
}
diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c
index a4d856db9154..597bb2d27c3c 100644
--- a/arch/s390/mm/hugetlbpage.c
+++ b/arch/s390/mm/hugetlbpage.c
@@ -35,7 +35,7 @@ int arch_prepare_hugepage(struct page *page)
if (MACHINE_HAS_HPAGE)
return 0;
- ptep = (pte_t *) pte_alloc_one(&init_mm, address);
+ ptep = (pte_t *) pte_alloc_one(&init_mm, addr);
if (!ptep)
return -ENOMEM;
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 37a23c223705..2adb23938a7f 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -16,6 +16,7 @@
#include <linux/module.h>
#include <linux/quicklist.h>
#include <linux/rcupdate.h>
+#include <linux/slab.h>
#include <asm/system.h>
#include <asm/pgtable.h>
@@ -133,30 +134,374 @@ void crst_table_downgrade(struct mm_struct *mm, unsigned long limit)
}
#endif
-static inline unsigned int atomic_xor_bits(atomic_t *v, unsigned int bits)
+#ifdef CONFIG_PGSTE
+
+/**
+ * gmap_alloc - allocate a guest address space
+ * @mm: pointer to the parent mm_struct
+ *
+ * Returns a guest address space structure.
+ */
+struct gmap *gmap_alloc(struct mm_struct *mm)
{
- unsigned int old, new;
+ struct gmap *gmap;
+ struct page *page;
+ unsigned long *table;
- do {
- old = atomic_read(v);
- new = old ^ bits;
- } while (atomic_cmpxchg(v, old, new) != old);
- return new;
+ gmap = kzalloc(sizeof(struct gmap), GFP_KERNEL);
+ if (!gmap)
+ goto out;
+ INIT_LIST_HEAD(&gmap->crst_list);
+ gmap->mm = mm;
+ page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
+ if (!page)
+ goto out_free;
+ list_add(&page->lru, &gmap->crst_list);
+ table = (unsigned long *) page_to_phys(page);
+ crst_table_init(table, _REGION1_ENTRY_EMPTY);
+ gmap->table = table;
+ list_add(&gmap->list, &mm->context.gmap_list);
+ return gmap;
+
+out_free:
+ kfree(gmap);
+out:
+ return NULL;
}
+EXPORT_SYMBOL_GPL(gmap_alloc);
-/*
- * page table entry allocation/free routines.
+static int gmap_unlink_segment(struct gmap *gmap, unsigned long *table)
+{
+ struct gmap_pgtable *mp;
+ struct gmap_rmap *rmap;
+ struct page *page;
+
+ if (*table & _SEGMENT_ENTRY_INV)
+ return 0;
+ page = pfn_to_page(*table >> PAGE_SHIFT);
+ mp = (struct gmap_pgtable *) page->index;
+ list_for_each_entry(rmap, &mp->mapper, list) {
+ if (rmap->entry != table)
+ continue;
+ list_del(&rmap->list);
+ kfree(rmap);
+ break;
+ }
+ *table = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | mp->vmaddr;
+ return 1;
+}
+
+static void gmap_flush_tlb(struct gmap *gmap)
+{
+ if (MACHINE_HAS_IDTE)
+ __tlb_flush_idte((unsigned long) gmap->table |
+ _ASCE_TYPE_REGION1);
+ else
+ __tlb_flush_global();
+}
+
+/**
+ * gmap_free - free a guest address space
+ * @gmap: pointer to the guest address space structure
*/
-#ifdef CONFIG_PGSTE
-static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm)
+void gmap_free(struct gmap *gmap)
+{
+ struct page *page, *next;
+ unsigned long *table;
+ int i;
+
+
+ /* Flush tlb. */
+ if (MACHINE_HAS_IDTE)
+ __tlb_flush_idte((unsigned long) gmap->table |
+ _ASCE_TYPE_REGION1);
+ else
+ __tlb_flush_global();
+
+ /* Free all segment & region tables. */
+ down_read(&gmap->mm->mmap_sem);
+ list_for_each_entry_safe(page, next, &gmap->crst_list, lru) {
+ table = (unsigned long *) page_to_phys(page);
+ if ((*table & _REGION_ENTRY_TYPE_MASK) == 0)
+ /* Remove gmap rmap structures for segment table. */
+ for (i = 0; i < PTRS_PER_PMD; i++, table++)
+ gmap_unlink_segment(gmap, table);
+ __free_pages(page, ALLOC_ORDER);
+ }
+ up_read(&gmap->mm->mmap_sem);
+ list_del(&gmap->list);
+ kfree(gmap);
+}
+EXPORT_SYMBOL_GPL(gmap_free);
+
+/**
+ * gmap_enable - switch primary space to the guest address space
+ * @gmap: pointer to the guest address space structure
+ */
+void gmap_enable(struct gmap *gmap)
+{
+ /* Load primary space page table origin. */
+ S390_lowcore.user_asce = _ASCE_TYPE_REGION1 | _ASCE_TABLE_LENGTH |
+ _ASCE_USER_BITS | __pa(gmap->table);
+ asm volatile("lctlg 1,1,%0\n" : : "m" (S390_lowcore.user_asce) );
+ S390_lowcore.gmap = (unsigned long) gmap;
+}
+EXPORT_SYMBOL_GPL(gmap_enable);
+
+/**
+ * gmap_disable - switch back to the standard primary address space
+ * @gmap: pointer to the guest address space structure
+ */
+void gmap_disable(struct gmap *gmap)
+{
+ /* Load primary space page table origin. */
+ S390_lowcore.user_asce =
+ gmap->mm->context.asce_bits | __pa(gmap->mm->pgd);
+ asm volatile("lctlg 1,1,%0\n" : : "m" (S390_lowcore.user_asce) );
+ S390_lowcore.gmap = 0UL;
+}
+EXPORT_SYMBOL_GPL(gmap_disable);
+
+static int gmap_alloc_table(struct gmap *gmap,
+ unsigned long *table, unsigned long init)
+{
+ struct page *page;
+ unsigned long *new;
+
+ page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
+ if (!page)
+ return -ENOMEM;
+ new = (unsigned long *) page_to_phys(page);
+ crst_table_init(new, init);
+ down_read(&gmap->mm->mmap_sem);
+ if (*table & _REGION_ENTRY_INV) {
+ list_add(&page->lru, &gmap->crst_list);
+ *table = (unsigned long) new | _REGION_ENTRY_LENGTH |
+ (*table & _REGION_ENTRY_TYPE_MASK);
+ } else
+ __free_pages(page, ALLOC_ORDER);
+ up_read(&gmap->mm->mmap_sem);
+ return 0;
+}
+
+/**
+ * gmap_unmap_segment - unmap segment from the guest address space
+ * @gmap: pointer to the guest address space structure
+ * @addr: address in the guest address space
+ * @len: length of the memory area to unmap
+ *
+ * Returns 0 if the unmap succeded, -EINVAL if not.
+ */
+int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len)
+{
+ unsigned long *table;
+ unsigned long off;
+ int flush;
+
+ if ((to | len) & (PMD_SIZE - 1))
+ return -EINVAL;
+ if (len == 0 || to + len < to)
+ return -EINVAL;
+
+ flush = 0;
+ down_read(&gmap->mm->mmap_sem);
+ for (off = 0; off < len; off += PMD_SIZE) {
+ /* Walk the guest addr space page table */
+ table = gmap->table + (((to + off) >> 53) & 0x7ff);
+ if (*table & _REGION_ENTRY_INV)
+ return 0;
+ table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
+ table = table + (((to + off) >> 42) & 0x7ff);
+ if (*table & _REGION_ENTRY_INV)
+ return 0;
+ table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
+ table = table + (((to + off) >> 31) & 0x7ff);
+ if (*table & _REGION_ENTRY_INV)
+ return 0;
+ table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
+ table = table + (((to + off) >> 20) & 0x7ff);
+
+ /* Clear segment table entry in guest address space. */
+ flush |= gmap_unlink_segment(gmap, table);
+ *table = _SEGMENT_ENTRY_INV;
+ }
+ up_read(&gmap->mm->mmap_sem);
+ if (flush)
+ gmap_flush_tlb(gmap);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(gmap_unmap_segment);
+
+/**
+ * gmap_mmap_segment - map a segment to the guest address space
+ * @gmap: pointer to the guest address space structure
+ * @from: source address in the parent address space
+ * @to: target address in the guest address space
+ *
+ * Returns 0 if the mmap succeded, -EINVAL or -ENOMEM if not.
+ */
+int gmap_map_segment(struct gmap *gmap, unsigned long from,
+ unsigned long to, unsigned long len)
+{
+ unsigned long *table;
+ unsigned long off;
+ int flush;
+
+ if ((from | to | len) & (PMD_SIZE - 1))
+ return -EINVAL;
+ if (len == 0 || from + len > PGDIR_SIZE ||
+ from + len < from || to + len < to)
+ return -EINVAL;
+
+ flush = 0;
+ down_read(&gmap->mm->mmap_sem);
+ for (off = 0; off < len; off += PMD_SIZE) {
+ /* Walk the gmap address space page table */
+ table = gmap->table + (((to + off) >> 53) & 0x7ff);
+ if ((*table & _REGION_ENTRY_INV) &&
+ gmap_alloc_table(gmap, table, _REGION2_ENTRY_EMPTY))
+ goto out_unmap;
+ table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
+ table = table + (((to + off) >> 42) & 0x7ff);
+ if ((*table & _REGION_ENTRY_INV) &&
+ gmap_alloc_table(gmap, table, _REGION3_ENTRY_EMPTY))
+ goto out_unmap;
+ table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
+ table = table + (((to + off) >> 31) & 0x7ff);
+ if ((*table & _REGION_ENTRY_INV) &&
+ gmap_alloc_table(gmap, table, _SEGMENT_ENTRY_EMPTY))
+ goto out_unmap;
+ table = (unsigned long *) (*table & _REGION_ENTRY_ORIGIN);
+ table = table + (((to + off) >> 20) & 0x7ff);
+
+ /* Store 'from' address in an invalid segment table entry. */
+ flush |= gmap_unlink_segment(gmap, table);
+ *table = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | (from + off);
+ }
+ up_read(&gmap->mm->mmap_sem);
+ if (flush)
+ gmap_flush_tlb(gmap);
+ return 0;
+
+out_unmap:
+ up_read(&gmap->mm->mmap_sem);
+ gmap_unmap_segment(gmap, to, len);
+ return -ENOMEM;
+}
+EXPORT_SYMBOL_GPL(gmap_map_segment);
+
+unsigned long gmap_fault(unsigned long address, struct gmap *gmap)
+{
+ unsigned long *table, vmaddr, segment;
+ struct mm_struct *mm;
+ struct gmap_pgtable *mp;
+ struct gmap_rmap *rmap;
+ struct vm_area_struct *vma;
+ struct page *page;
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+
+ current->thread.gmap_addr = address;
+ mm = gmap->mm;
+ /* Walk the gmap address space page table */
+ table = gmap->table + ((address >> 53) & 0x7ff);
+ if (unlikely(*table & _REGION_ENTRY_INV))
+ return -EFAULT;
+ table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
+ table = table + ((address >> 42) & 0x7ff);
+ if (unlikely(*table & _REGION_ENTRY_INV))
+ return -EFAULT;
+ table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
+ table = table + ((address >> 31) & 0x7ff);
+ if (unlikely(*table & _REGION_ENTRY_INV))
+ return -EFAULT;
+ table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
+ table = table + ((address >> 20) & 0x7ff);
+
+ /* Convert the gmap address to an mm address. */
+ segment = *table;
+ if (likely(!(segment & _SEGMENT_ENTRY_INV))) {
+ page = pfn_to_page(segment >> PAGE_SHIFT);
+ mp = (struct gmap_pgtable *) page->index;
+ return mp->vmaddr | (address & ~PMD_MASK);
+ } else if (segment & _SEGMENT_ENTRY_RO) {
+ vmaddr = segment & _SEGMENT_ENTRY_ORIGIN;
+ vma = find_vma(mm, vmaddr);
+ if (!vma || vma->vm_start > vmaddr)
+ return -EFAULT;
+
+ /* Walk the parent mm page table */
+ pgd = pgd_offset(mm, vmaddr);
+ pud = pud_alloc(mm, pgd, vmaddr);
+ if (!pud)
+ return -ENOMEM;
+ pmd = pmd_alloc(mm, pud, vmaddr);
+ if (!pmd)
+ return -ENOMEM;
+ if (!pmd_present(*pmd) &&
+ __pte_alloc(mm, vma, pmd, vmaddr))
+ return -ENOMEM;
+ /* pmd now points to a valid segment table entry. */
+ rmap = kmalloc(sizeof(*rmap), GFP_KERNEL|__GFP_REPEAT);
+ if (!rmap)
+ return -ENOMEM;
+ /* Link gmap segment table entry location to page table. */
+ page = pmd_page(*pmd);
+ mp = (struct gmap_pgtable *) page->index;
+ rmap->entry = table;
+ list_add(&rmap->list, &mp->mapper);
+ /* Set gmap segment table entry to page table. */
+ *table = pmd_val(*pmd) & PAGE_MASK;
+ return vmaddr | (address & ~PMD_MASK);
+ }
+ return -EFAULT;
+
+}
+EXPORT_SYMBOL_GPL(gmap_fault);
+
+void gmap_unmap_notifier(struct mm_struct *mm, unsigned long *table)
+{
+ struct gmap_rmap *rmap, *next;
+ struct gmap_pgtable *mp;
+ struct page *page;
+ int flush;
+
+ flush = 0;
+ spin_lock(&mm->page_table_lock);
+ page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
+ mp = (struct gmap_pgtable *) page->index;
+ list_for_each_entry_safe(rmap, next, &mp->mapper, list) {
+ *rmap->entry =
+ _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | mp->vmaddr;
+ list_del(&rmap->list);
+ kfree(rmap);
+ flush = 1;
+ }
+ spin_unlock(&mm->page_table_lock);
+ if (flush)
+ __tlb_flush_global();
+}
+
+static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm,
+ unsigned long vmaddr)
{
struct page *page;
unsigned long *table;
+ struct gmap_pgtable *mp;
page = alloc_page(GFP_KERNEL|__GFP_REPEAT);
if (!page)
return NULL;
+ mp = kmalloc(sizeof(*mp), GFP_KERNEL|__GFP_REPEAT);
+ if (!mp) {
+ __free_page(page);
+ return NULL;
+ }
pgtable_page_ctor(page);
+ mp->vmaddr = vmaddr & PMD_MASK;
+ INIT_LIST_HEAD(&mp->mapper);
+ page->index = (unsigned long) mp;
atomic_set(&page->_mapcount, 3);
table = (unsigned long *) page_to_phys(page);
clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE/2);
@@ -167,24 +512,57 @@ static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm)
static inline void page_table_free_pgste(unsigned long *table)
{
struct page *page;
+ struct gmap_pgtable *mp;
page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
+ mp = (struct gmap_pgtable *) page->index;
+ BUG_ON(!list_empty(&mp->mapper));
pgtable_page_ctor(page);
atomic_set(&page->_mapcount, -1);
+ kfree(mp);
__free_page(page);
}
-#endif
-unsigned long *page_table_alloc(struct mm_struct *mm)
+#else /* CONFIG_PGSTE */
+
+static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm,
+ unsigned long vmaddr)
+{
+}
+
+static inline void page_table_free_pgste(unsigned long *table)
+{
+}
+
+static inline void gmap_unmap_notifier(struct mm_struct *mm,
+ unsigned long *table)
+{
+}
+
+#endif /* CONFIG_PGSTE */
+
+static inline unsigned int atomic_xor_bits(atomic_t *v, unsigned int bits)
+{
+ unsigned int old, new;
+
+ do {
+ old = atomic_read(v);
+ new = old ^ bits;
+ } while (atomic_cmpxchg(v, old, new) != old);
+ return new;
+}
+
+/*
+ * page table entry allocation/free routines.
+ */
+unsigned long *page_table_alloc(struct mm_struct *mm, unsigned long vmaddr)
{
struct page *page;
unsigned long *table;
unsigned int mask, bit;
-#ifdef CONFIG_PGSTE
if (mm_has_pgste(mm))
- return page_table_alloc_pgste(mm);
-#endif
+ return page_table_alloc_pgste(mm, vmaddr);
/* Allocate fragments of a 4K page as 1K/2K page table */
spin_lock_bh(&mm->context.list_lock);
mask = FRAG_MASK;
@@ -222,10 +600,10 @@ void page_table_free(struct mm_struct *mm, unsigned long *table)
struct page *page;
unsigned int bit, mask;
-#ifdef CONFIG_PGSTE
- if (mm_has_pgste(mm))
+ if (mm_has_pgste(mm)) {
+ gmap_unmap_notifier(mm, table);
return page_table_free_pgste(table);
-#endif
+ }
/* Free 1K/2K page table fragment of a 4K page */
page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
bit = 1 << ((__pa(table) & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t)));
@@ -249,10 +627,8 @@ static void __page_table_free_rcu(void *table, unsigned bit)
{
struct page *page;
-#ifdef CONFIG_PGSTE
if (bit == FRAG_MASK)
return page_table_free_pgste(table);
-#endif
/* Free 1K/2K page table fragment of a 4K page */
page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
if (atomic_xor_bits(&page->_mapcount, bit) == 0) {
@@ -269,13 +645,12 @@ void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table)
unsigned int bit, mask;
mm = tlb->mm;
-#ifdef CONFIG_PGSTE
if (mm_has_pgste(mm)) {
+ gmap_unmap_notifier(mm, table);
table = (unsigned long *) (__pa(table) | FRAG_MASK);
tlb_remove_table(tlb, table);
return;
}
-#endif
bit = 1 << ((__pa(table) & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t)));
page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
spin_lock_bh(&mm->context.list_lock);
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index 8c1970d1dd91..781ff5169560 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -61,12 +61,12 @@ static inline pmd_t *vmem_pmd_alloc(void)
return pmd;
}
-static pte_t __ref *vmem_pte_alloc(void)
+static pte_t __ref *vmem_pte_alloc(unsigned long address)
{
pte_t *pte;
if (slab_is_available())
- pte = (pte_t *) page_table_alloc(&init_mm);
+ pte = (pte_t *) page_table_alloc(&init_mm, address);
else
pte = alloc_bootmem(PTRS_PER_PTE * sizeof(pte_t));
if (!pte)
@@ -120,7 +120,7 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
}
#endif
if (pmd_none(*pm_dir)) {
- pt_dir = vmem_pte_alloc();
+ pt_dir = vmem_pte_alloc(address);
if (!pt_dir)
goto out;
pmd_populate(&init_mm, pm_dir, pt_dir);
@@ -205,7 +205,7 @@ int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node)
pm_dir = pmd_offset(pu_dir, address);
if (pmd_none(*pm_dir)) {
- pt_dir = vmem_pte_alloc();
+ pt_dir = vmem_pte_alloc(address);
if (!pt_dir)
goto out;
pmd_populate(&init_mm, pm_dir, pt_dir);
diff --git a/arch/score/kernel/module.c b/arch/score/kernel/module.c
index 4de8d47becd3..469e3b64e2f2 100644
--- a/arch/score/kernel/module.c
+++ b/arch/score/kernel/module.c
@@ -27,23 +27,6 @@
#include <linux/module.h>
#include <linux/vmalloc.h>
-void *module_alloc(unsigned long size)
-{
- return size ? vmalloc(size) : NULL;
-}
-
-/* Free memory returned from module_alloc */
-void module_free(struct module *mod, void *module_region)
-{
- vfree(module_region);
-}
-
-int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
- char *secstrings, struct module *mod)
-{
- return 0;
-}
-
int apply_relocate(Elf_Shdr *sechdrs, const char *strtab,
unsigned int symindex, unsigned int relindex,
struct module *me)
@@ -146,6 +129,9 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
unsigned int symindex, unsigned int relsec,
struct module *me)
{
+ /* Non-standard return value... most other arch's return -ENOEXEC
+ * for an unsupported relocation variant
+ */
return 0;
}
@@ -154,12 +140,3 @@ const struct exception_table_entry *search_module_dbetables(unsigned long addr)
{
return NULL;
}
-
-/* Put in dbe list if necessary. */
-int module_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs,
- struct module *me)
-{
- return 0;
-}
-
-void module_arch_cleanup(struct module *mod) {}
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index bbdeb48bbf8e..748ff1920068 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -897,20 +897,4 @@ source "security/Kconfig"
source "crypto/Kconfig"
-menuconfig VIRTUALIZATION
- bool "Virtualization"
- default n
- ---help---
- Say Y here to get to see options for using your Linux host to run other
- operating systems inside virtual machines (guests).
- This option alone does not add any kernel code.
-
- If you say N, all options in this submenu will be skipped and disabled.
-
-if VIRTUALIZATION
-
-source drivers/virtio/Kconfig
-
-endif # VIRTUALIZATION
-
source "lib/Kconfig"
diff --git a/arch/sh/include/asm/delay.h b/arch/sh/include/asm/delay.h
index 4b16bf9b56bd..9670e127b7b2 100644
--- a/arch/sh/include/asm/delay.h
+++ b/arch/sh/include/asm/delay.h
@@ -1,26 +1 @@
-#ifndef __ASM_SH_DELAY_H
-#define __ASM_SH_DELAY_H
-
-/*
- * Copyright (C) 1993 Linus Torvalds
- *
- * Delay routines calling functions in arch/sh/lib/delay.c
- */
-
-extern void __bad_udelay(void);
-extern void __bad_ndelay(void);
-
-extern void __udelay(unsigned long usecs);
-extern void __ndelay(unsigned long nsecs);
-extern void __const_udelay(unsigned long xloops);
-extern void __delay(unsigned long loops);
-
-#define udelay(n) (__builtin_constant_p(n) ? \
- ((n) > 20000 ? __bad_udelay() : __const_udelay((n) * 0x10c6ul)) : \
- __udelay(n))
-
-#define ndelay(n) (__builtin_constant_p(n) ? \
- ((n) > 20000 ? __bad_ndelay() : __const_udelay((n) * 5ul)) : \
- __ndelay(n))
-
-#endif /* __ASM_SH_DELAY_H */
+#include <asm-generic/delay.h>
diff --git a/arch/sh/kernel/cpu/sh4/perf_event.c b/arch/sh/kernel/cpu/sh4/perf_event.c
index 748955df018d..fa4f724b295a 100644
--- a/arch/sh/kernel/cpu/sh4/perf_event.c
+++ b/arch/sh/kernel/cpu/sh4/perf_event.c
@@ -180,6 +180,21 @@ static const int sh7750_cache_events
[ C(RESULT_MISS) ] = -1,
},
},
+
+ [ C(NODE) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ },
};
static int sh7750_event_map(int event)
diff --git a/arch/sh/kernel/cpu/sh4a/perf_event.c b/arch/sh/kernel/cpu/sh4a/perf_event.c
index 17e6bebfede0..84a2c396ceee 100644
--- a/arch/sh/kernel/cpu/sh4a/perf_event.c
+++ b/arch/sh/kernel/cpu/sh4a/perf_event.c
@@ -205,6 +205,21 @@ static const int sh4a_cache_events
[ C(RESULT_MISS) ] = -1,
},
},
+
+ [ C(NODE) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ },
};
static int sh4a_event_map(int event)
diff --git a/arch/sh/kernel/cpu/shmobile/pm_runtime.c b/arch/sh/kernel/cpu/shmobile/pm_runtime.c
index 64c807c39208..bf280c812d2f 100644
--- a/arch/sh/kernel/cpu/shmobile/pm_runtime.c
+++ b/arch/sh/kernel/cpu/shmobile/pm_runtime.c
@@ -256,7 +256,7 @@ out:
return ret;
}
-static struct dev_power_domain default_power_domain = {
+static struct dev_pm_domain default_pm_domain = {
.ops = {
.runtime_suspend = default_platform_runtime_suspend,
.runtime_resume = default_platform_runtime_resume,
@@ -285,7 +285,7 @@ static int platform_bus_notify(struct notifier_block *nb,
hwblk_disable(hwblk_info, hwblk);
/* make sure driver re-inits itself once */
__set_bit(PDEV_ARCHDATA_FLAG_INIT, &pdev->archdata.flags);
- dev->pwr_domain = &default_power_domain;
+ dev->pm_domain = &default_pm_domain;
break;
/* TODO: add BUS_NOTIFY_BIND_DRIVER and increase idle count */
case BUS_NOTIFY_BOUND_DRIVER:
@@ -299,7 +299,7 @@ static int platform_bus_notify(struct notifier_block *nb,
__set_bit(PDEV_ARCHDATA_FLAG_INIT, &pdev->archdata.flags);
break;
case BUS_NOTIFY_DEL_DEVICE:
- dev->pwr_domain = NULL;
+ dev->pm_domain = NULL;
break;
}
return 0;
diff --git a/arch/sh/kernel/module.c b/arch/sh/kernel/module.c
index 19b1f8826aef..1b525dedd29a 100644
--- a/arch/sh/kernel/module.c
+++ b/arch/sh/kernel/module.c
@@ -34,30 +34,6 @@
#include <asm/unaligned.h>
#include <asm/dwarf.h>
-void *module_alloc(unsigned long size)
-{
- if (size == 0)
- return NULL;
-
- return vmalloc_exec(size);
-}
-
-
-/* Free memory returned from module_alloc */
-void module_free(struct module *mod, void *module_region)
-{
- vfree(module_region);
-}
-
-/* We don't need anything special. */
-int module_frob_arch_sections(Elf_Ehdr *hdr,
- Elf_Shdr *sechdrs,
- char *secstrings,
- struct module *mod)
-{
- return 0;
-}
-
int apply_relocate_add(Elf32_Shdr *sechdrs,
const char *strtab,
unsigned int symindex,
@@ -133,17 +109,6 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
return 0;
}
-int apply_relocate(Elf32_Shdr *sechdrs,
- const char *strtab,
- unsigned int symindex,
- unsigned int relsec,
- struct module *me)
-{
- printk(KERN_ERR "module %s: REL RELOCATION unsupported\n",
- me->name);
- return -ENOEXEC;
-}
-
int module_finalize(const Elf_Ehdr *hdr,
const Elf_Shdr *sechdrs,
struct module *me)
diff --git a/arch/sh/kernel/ptrace_32.c b/arch/sh/kernel/ptrace_32.c
index 3d7b209b2178..92b3c276339a 100644
--- a/arch/sh/kernel/ptrace_32.c
+++ b/arch/sh/kernel/ptrace_32.c
@@ -63,7 +63,7 @@ static inline int put_stack_long(struct task_struct *task, int offset,
return 0;
}
-void ptrace_triggered(struct perf_event *bp, int nmi,
+void ptrace_triggered(struct perf_event *bp,
struct perf_sample_data *data, struct pt_regs *regs)
{
struct perf_event_attr attr;
@@ -91,7 +91,8 @@ static int set_single_step(struct task_struct *tsk, unsigned long addr)
attr.bp_len = HW_BREAKPOINT_LEN_2;
attr.bp_type = HW_BREAKPOINT_R;
- bp = register_user_hw_breakpoint(&attr, ptrace_triggered, tsk);
+ bp = register_user_hw_breakpoint(&attr, ptrace_triggered,
+ NULL, tsk);
if (IS_ERR(bp))
return PTR_ERR(bp);
diff --git a/arch/sh/kernel/traps_32.c b/arch/sh/kernel/traps_32.c
index b51a17104b5f..d9006f8ffc14 100644
--- a/arch/sh/kernel/traps_32.c
+++ b/arch/sh/kernel/traps_32.c
@@ -393,7 +393,7 @@ int handle_unaligned_access(insn_size_t instruction, struct pt_regs *regs,
*/
if (!expected) {
unaligned_fixups_notify(current, instruction, regs);
- perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0,
+ perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1,
regs, address);
}
diff --git a/arch/sh/kernel/traps_64.c b/arch/sh/kernel/traps_64.c
index 6713ca97e553..67110be83fd7 100644
--- a/arch/sh/kernel/traps_64.c
+++ b/arch/sh/kernel/traps_64.c
@@ -434,7 +434,7 @@ static int misaligned_load(struct pt_regs *regs,
return error;
}
- perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, address);
+ perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, address);
destreg = (opcode >> 4) & 0x3f;
if (user_mode(regs)) {
@@ -512,7 +512,7 @@ static int misaligned_store(struct pt_regs *regs,
return error;
}
- perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, address);
+ perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, address);
srcreg = (opcode >> 4) & 0x3f;
if (user_mode(regs)) {
@@ -588,7 +588,7 @@ static int misaligned_fpu_load(struct pt_regs *regs,
return error;
}
- perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, address);
+ perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, address);
destreg = (opcode >> 4) & 0x3f;
if (user_mode(regs)) {
@@ -665,7 +665,7 @@ static int misaligned_fpu_store(struct pt_regs *regs,
return error;
}
- perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, address);
+ perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, address);
srcreg = (opcode >> 4) & 0x3f;
if (user_mode(regs)) {
diff --git a/arch/sh/math-emu/math.c b/arch/sh/math-emu/math.c
index f76a5090d5d1..977195210653 100644
--- a/arch/sh/math-emu/math.c
+++ b/arch/sh/math-emu/math.c
@@ -620,7 +620,7 @@ int do_fpu_inst(unsigned short inst, struct pt_regs *regs)
struct task_struct *tsk = current;
struct sh_fpu_soft_struct *fpu = &(tsk->thread.xstate->softfpu);
- perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0);
+ perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
if (!(task_thread_info(tsk)->status & TS_USEDFPU)) {
/* initialize once. */
diff --git a/arch/sh/mm/fault_32.c b/arch/sh/mm/fault_32.c
index d4c34d757f0d..7bebd044f2a1 100644
--- a/arch/sh/mm/fault_32.c
+++ b/arch/sh/mm/fault_32.c
@@ -160,7 +160,7 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
if ((regs->sr & SR_IMASK) != SR_IMASK)
local_irq_enable();
- perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
/*
* If we're in an interrupt, have no user context or are running
@@ -210,11 +210,11 @@ good_area:
}
if (fault & VM_FAULT_MAJOR) {
tsk->maj_flt++;
- perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
regs, address);
} else {
tsk->min_flt++;
- perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
regs, address);
}
diff --git a/arch/sh/mm/tlbflush_64.c b/arch/sh/mm/tlbflush_64.c
index 7f5810f5dfdc..e3430e093d43 100644
--- a/arch/sh/mm/tlbflush_64.c
+++ b/arch/sh/mm/tlbflush_64.c
@@ -116,7 +116,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long writeaccess,
/* Not an IO address, so reenable interrupts */
local_irq_enable();
- perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
/*
* If we're in an interrupt or have no user
@@ -200,11 +200,11 @@ good_area:
if (fault & VM_FAULT_MAJOR) {
tsk->maj_flt++;
- perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
regs, address);
} else {
tsk->min_flt++;
- perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
regs, address);
}
diff --git a/arch/sparc/include/asm/pci_32.h b/arch/sparc/include/asm/pci_32.h
index 862e3ce92b15..02939abd356c 100644
--- a/arch/sparc/include/asm/pci_32.h
+++ b/arch/sparc/include/asm/pci_32.h
@@ -42,9 +42,6 @@ static inline void pci_dma_burst_advice(struct pci_dev *pdev,
}
#endif
-struct device_node;
-extern struct device_node *pci_device_to_OF_node(struct pci_dev *pdev);
-
#endif /* __KERNEL__ */
#ifndef CONFIG_LEON_PCI
diff --git a/arch/sparc/include/asm/pci_64.h b/arch/sparc/include/asm/pci_64.h
index 948b686ec089..2614d96141c9 100644
--- a/arch/sparc/include/asm/pci_64.h
+++ b/arch/sparc/include/asm/pci_64.h
@@ -91,9 +91,6 @@ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
return PCI_IRQ_NONE;
}
-struct device_node;
-extern struct device_node *pci_device_to_OF_node(struct pci_dev *pdev);
-
#define HAVE_ARCH_PCI_RESOURCE_TO_USER
extern void pci_resource_to_user(const struct pci_dev *dev, int bar,
const struct resource *rsrc,
diff --git a/arch/sparc/include/asm/ptrace.h b/arch/sparc/include/asm/ptrace.h
index c7ad3fe2b252..b928b31424b1 100644
--- a/arch/sparc/include/asm/ptrace.h
+++ b/arch/sparc/include/asm/ptrace.h
@@ -205,6 +205,7 @@ do { current_thread_info()->syscall_noerror = 1; \
} while (0)
#define user_mode(regs) (!((regs)->tstate & TSTATE_PRIV))
#define instruction_pointer(regs) ((regs)->tpc)
+#define instruction_pointer_set(regs, val) ((regs)->tpc = (val))
#define user_stack_pointer(regs) ((regs)->u_regs[UREG_FP])
#define regs_return_value(regs) ((regs)->u_regs[UREG_I0])
#ifdef CONFIG_SMP
diff --git a/arch/sparc/kernel/module.c b/arch/sparc/kernel/module.c
index 99ba5baa9497..da0c6c70ccb2 100644
--- a/arch/sparc/kernel/module.c
+++ b/arch/sparc/kernel/module.c
@@ -68,12 +68,6 @@ void *module_alloc(unsigned long size)
return ret;
}
-/* Free memory returned from module_core_alloc/module_init_alloc */
-void module_free(struct module *mod, void *module_region)
-{
- vfree(module_region);
-}
-
/* Make generic code ignore STT_REGISTER dummy undefined symbols. */
int module_frob_arch_sections(Elf_Ehdr *hdr,
Elf_Shdr *sechdrs,
@@ -107,17 +101,6 @@ int module_frob_arch_sections(Elf_Ehdr *hdr,
return 0;
}
-int apply_relocate(Elf_Shdr *sechdrs,
- const char *strtab,
- unsigned int symindex,
- unsigned int relsec,
- struct module *me)
-{
- printk(KERN_ERR "module %s: non-ADD RELOCATION unsupported\n",
- me->name);
- return -ENOEXEC;
-}
-
int apply_relocate_add(Elf_Shdr *sechdrs,
const char *strtab,
unsigned int symindex,
@@ -239,15 +222,4 @@ int module_finalize(const Elf_Ehdr *hdr,
return 0;
}
-#else
-int module_finalize(const Elf_Ehdr *hdr,
- const Elf_Shdr *sechdrs,
- struct module *me)
-{
- return 0;
-}
#endif /* CONFIG_SPARC64 */
-
-void module_arch_cleanup(struct module *mod)
-{
-}
diff --git a/arch/sparc/kernel/pci.c b/arch/sparc/kernel/pci.c
index 713dc91020a6..80a87e2a3e7c 100644
--- a/arch/sparc/kernel/pci.c
+++ b/arch/sparc/kernel/pci.c
@@ -284,7 +284,7 @@ static struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm,
dev->sysdata = node;
dev->dev.parent = bus->bridge;
dev->dev.bus = &pci_bus_type;
- dev->dev.of_node = node;
+ dev->dev.of_node = of_node_get(node);
dev->devfn = devfn;
dev->multifunction = 0; /* maybe a lie? */
set_pcie_port_type(dev);
@@ -1021,12 +1021,6 @@ void arch_teardown_msi_irq(unsigned int irq)
}
#endif /* !(CONFIG_PCI_MSI) */
-struct device_node *pci_device_to_OF_node(struct pci_dev *pdev)
-{
- return pdev->dev.of_node;
-}
-EXPORT_SYMBOL(pci_device_to_OF_node);
-
static void ali_sound_dma_hack(struct pci_dev *pdev, int set_bit)
{
struct pci_dev *ali_isa_bridge;
diff --git a/arch/sparc/kernel/pcic.c b/arch/sparc/kernel/pcic.c
index 948601a066ff..a19f04195478 100644
--- a/arch/sparc/kernel/pcic.c
+++ b/arch/sparc/kernel/pcic.c
@@ -885,14 +885,6 @@ int pcibios_assign_resource(struct pci_dev *pdev, int resource)
return -ENXIO;
}
-struct device_node *pci_device_to_OF_node(struct pci_dev *pdev)
-{
- struct pcidev_cookie *pc = pdev->sysdata;
-
- return pc->prom_node;
-}
-EXPORT_SYMBOL(pci_device_to_OF_node);
-
/*
* This probably belongs here rather than ioport.c because
* we do not want this crud linked into SBus kernels.
diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c
index 2cb0e1c001e2..62a034318b18 100644
--- a/arch/sparc/kernel/perf_event.c
+++ b/arch/sparc/kernel/perf_event.c
@@ -246,6 +246,20 @@ static const cache_map_t ultra3_cache_map = {
[ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
},
},
+[C(NODE)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
+ [C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
+ [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
+ [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
+ },
+},
};
static const struct sparc_pmu ultra3_pmu = {
@@ -361,6 +375,20 @@ static const cache_map_t niagara1_cache_map = {
[ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
},
},
+[C(NODE)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
+ [C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
+ [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
+ [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
+ },
+},
};
static const struct sparc_pmu niagara1_pmu = {
@@ -473,6 +501,20 @@ static const cache_map_t niagara2_cache_map = {
[ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
},
},
+[C(NODE)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
+ [C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
+ [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
+ [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
+ },
+},
};
static const struct sparc_pmu niagara2_pmu = {
@@ -1277,7 +1319,7 @@ static int __kprobes perf_event_nmi_handler(struct notifier_block *self,
if (!sparc_perf_event_set_period(event, hwc, idx))
continue;
- if (perf_event_overflow(event, 1, &data, regs))
+ if (perf_event_overflow(event, &data, regs))
sparc_pmu_stop(event, 0);
}
diff --git a/arch/sparc/kernel/unaligned_32.c b/arch/sparc/kernel/unaligned_32.c
index 4491f4cb2695..7efbb2f9e77f 100644
--- a/arch/sparc/kernel/unaligned_32.c
+++ b/arch/sparc/kernel/unaligned_32.c
@@ -247,7 +247,7 @@ asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn)
unsigned long addr = compute_effective_address(regs, insn);
int err;
- perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, addr);
+ perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, addr);
switch (dir) {
case load:
err = do_int_load(fetch_reg_addr(((insn>>25)&0x1f),
@@ -338,7 +338,7 @@ asmlinkage void user_unaligned_trap(struct pt_regs *regs, unsigned int insn)
}
addr = compute_effective_address(regs, insn);
- perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, addr);
+ perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, addr);
switch(dir) {
case load:
err = do_int_load(fetch_reg_addr(((insn>>25)&0x1f),
diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
index b2b019ea8caa..35cff1673aa4 100644
--- a/arch/sparc/kernel/unaligned_64.c
+++ b/arch/sparc/kernel/unaligned_64.c
@@ -317,7 +317,7 @@ asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn)
addr = compute_effective_address(regs, insn,
((insn >> 25) & 0x1f));
- perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, addr);
+ perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, addr);
switch (asi) {
case ASI_NL:
case ASI_AIUPL:
@@ -384,7 +384,7 @@ int handle_popc(u32 insn, struct pt_regs *regs)
int ret, i, rd = ((insn >> 25) & 0x1f);
int from_kernel = (regs->tstate & TSTATE_PRIV) != 0;
- perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0);
+ perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
if (insn & 0x2000) {
maybe_flush_windows(0, 0, rd, from_kernel);
value = sign_extend_imm13(insn);
@@ -431,7 +431,7 @@ int handle_ldf_stq(u32 insn, struct pt_regs *regs)
int asi = decode_asi(insn, regs);
int flag = (freg < 32) ? FPRS_DL : FPRS_DU;
- perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0);
+ perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
save_and_clear_fpu();
current_thread_info()->xfsr[0] &= ~0x1c000;
@@ -554,7 +554,7 @@ void handle_ld_nf(u32 insn, struct pt_regs *regs)
int from_kernel = (regs->tstate & TSTATE_PRIV) != 0;
unsigned long *reg;
- perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0);
+ perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
maybe_flush_windows(0, 0, rd, from_kernel);
reg = fetch_reg_addr(rd, regs);
@@ -586,7 +586,7 @@ void handle_lddfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr
if (tstate & TSTATE_PRIV)
die_if_kernel("lddfmna from kernel", regs);
- perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, sfar);
+ perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, sfar);
if (test_thread_flag(TIF_32BIT))
pc = (u32)pc;
if (get_user(insn, (u32 __user *) pc) != -EFAULT) {
@@ -647,7 +647,7 @@ void handle_stdfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr
if (tstate & TSTATE_PRIV)
die_if_kernel("stdfmna from kernel", regs);
- perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, sfar);
+ perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, sfar);
if (test_thread_flag(TIF_32BIT))
pc = (u32)pc;
if (get_user(insn, (u32 __user *) pc) != -EFAULT) {
diff --git a/arch/sparc/kernel/visemul.c b/arch/sparc/kernel/visemul.c
index 36357717d691..32b626c9d815 100644
--- a/arch/sparc/kernel/visemul.c
+++ b/arch/sparc/kernel/visemul.c
@@ -802,7 +802,7 @@ int vis_emul(struct pt_regs *regs, unsigned int insn)
BUG_ON(regs->tstate & TSTATE_PRIV);
- perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0);
+ perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
if (test_thread_flag(TIF_32BIT))
pc = (u32)pc;
diff --git a/arch/sparc/math-emu/math_32.c b/arch/sparc/math-emu/math_32.c
index a3fccde894ec..aa4d55b0bdf0 100644
--- a/arch/sparc/math-emu/math_32.c
+++ b/arch/sparc/math-emu/math_32.c
@@ -164,7 +164,7 @@ int do_mathemu(struct pt_regs *regs, struct task_struct *fpt)
int retcode = 0; /* assume all succeed */
unsigned long insn;
- perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0);
+ perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
#ifdef DEBUG_MATHEMU
printk("In do_mathemu()... pc is %08lx\n", regs->pc);
diff --git a/arch/sparc/math-emu/math_64.c b/arch/sparc/math-emu/math_64.c
index 56d2c44747b8..e575bd2fe381 100644
--- a/arch/sparc/math-emu/math_64.c
+++ b/arch/sparc/math-emu/math_64.c
@@ -184,7 +184,7 @@ int do_mathemu(struct pt_regs *regs, struct fpustate *f)
if (tstate & TSTATE_PRIV)
die_if_kernel("unfinished/unimplemented FPop from kernel", regs);
- perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0);
+ perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
if (test_thread_flag(TIF_32BIT))
pc = (u32)pc;
if (get_user(insn, (u32 __user *) pc) != -EFAULT) {
diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
index 7543ddbdadb2..aa1c1b1ce5cc 100644
--- a/arch/sparc/mm/fault_32.c
+++ b/arch/sparc/mm/fault_32.c
@@ -251,7 +251,7 @@ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
if (in_atomic() || !mm)
goto no_context;
- perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
down_read(&mm->mmap_sem);
@@ -301,12 +301,10 @@ good_area:
}
if (fault & VM_FAULT_MAJOR) {
current->maj_flt++;
- perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
- regs, address);
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address);
} else {
current->min_flt++;
- perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
- regs, address);
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address);
}
up_read(&mm->mmap_sem);
return;
diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
index f92ce56a8b22..504c0622f729 100644
--- a/arch/sparc/mm/fault_64.c
+++ b/arch/sparc/mm/fault_64.c
@@ -325,7 +325,7 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
if (in_atomic() || !mm)
goto intr_or_no_mm;
- perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
if (!down_read_trylock(&mm->mmap_sem)) {
if ((regs->tstate & TSTATE_PRIV) &&
@@ -433,12 +433,10 @@ good_area:
}
if (fault & VM_FAULT_MAJOR) {
current->maj_flt++;
- perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
- regs, address);
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address);
} else {
current->min_flt++;
- perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
- regs, address);
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address);
}
up_read(&mm->mmap_sem);
diff --git a/arch/tile/kernel/module.c b/arch/tile/kernel/module.c
index f68df69f1f67..28fa6ece9d3a 100644
--- a/arch/tile/kernel/module.c
+++ b/arch/tile/kernel/module.c
@@ -98,25 +98,6 @@ void module_free(struct module *mod, void *module_region)
*/
}
-/* We don't need anything special. */
-int module_frob_arch_sections(Elf_Ehdr *hdr,
- Elf_Shdr *sechdrs,
- char *secstrings,
- struct module *mod)
-{
- return 0;
-}
-
-int apply_relocate(Elf_Shdr *sechdrs,
- const char *strtab,
- unsigned int symindex,
- unsigned int relsec,
- struct module *me)
-{
- pr_err("module %s: .rel relocation unsupported\n", me->name);
- return -ENOEXEC;
-}
-
#ifdef __tilegx__
/*
* Validate that the high 16 bits of "value" is just the sign-extension of
@@ -249,15 +230,3 @@ int apply_relocate_add(Elf_Shdr *sechdrs,
}
return 0;
}
-
-int module_finalize(const Elf_Ehdr *hdr,
- const Elf_Shdr *sechdrs,
- struct module *me)
-{
- /* FIXME: perhaps remove the "writable" bit from the TLB? */
- return 0;
-}
-
-void module_arch_cleanup(struct module *mod)
-{
-}
diff --git a/arch/tile/kvm/Kconfig b/arch/tile/kvm/Kconfig
index b88f9c047781..669fcdba31ea 100644
--- a/arch/tile/kvm/Kconfig
+++ b/arch/tile/kvm/Kconfig
@@ -33,6 +33,5 @@ config KVM
If unsure, say N.
source drivers/vhost/Kconfig
-source drivers/virtio/Kconfig
endif # VIRTUALIZATION
diff --git a/arch/um/sys-i386/Makefile b/arch/um/sys-i386/Makefile
index b1da91c1b200..87b659dadf3f 100644
--- a/arch/um/sys-i386/Makefile
+++ b/arch/um/sys-i386/Makefile
@@ -8,7 +8,8 @@ obj-y = bug.o bugs.o checksum.o delay.o fault.o ksyms.o ldt.o ptrace.o \
obj-$(CONFIG_BINFMT_ELF) += elfcore.o
-subarch-obj-y = lib/semaphore_32.o lib/string_32.o
+subarch-obj-y = lib/string_32.o
+subarch-obj-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += lib/rwsem.o
subarch-obj-$(CONFIG_HIGHMEM) += mm/highmem_32.o
subarch-obj-$(CONFIG_MODULES) += kernel/module.o
diff --git a/arch/um/sys-x86_64/Makefile b/arch/um/sys-x86_64/Makefile
index c1ea9eb04466..61fc99a42e10 100644
--- a/arch/um/sys-x86_64/Makefile
+++ b/arch/um/sys-x86_64/Makefile
@@ -9,7 +9,7 @@ obj-y = bug.o bugs.o delay.o fault.o ldt.o mem.o ptrace.o ptrace_user.o \
sysrq.o ksyms.o tls.o
subarch-obj-y = lib/csum-partial_64.o lib/memcpy_64.o lib/thunk_64.o \
- lib/rwsem_64.o
+ lib/rwsem.o
subarch-obj-$(CONFIG_MODULES) += kernel/module.o
ldt-y = ../sys-i386/ldt.o
diff --git a/arch/unicore32/kernel/module.c b/arch/unicore32/kernel/module.c
index 3e5a38d71a1e..8fbe8577f5e6 100644
--- a/arch/unicore32/kernel/module.c
+++ b/arch/unicore32/kernel/module.c
@@ -37,19 +37,6 @@ void *module_alloc(unsigned long size)
return __vmalloc_area(area, GFP_KERNEL, PAGE_KERNEL_EXEC);
}
-void module_free(struct module *module, void *region)
-{
- vfree(region);
-}
-
-int module_frob_arch_sections(Elf_Ehdr *hdr,
- Elf_Shdr *sechdrs,
- char *secstrings,
- struct module *mod)
-{
- return 0;
-}
-
int
apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
unsigned int relindex, struct module *module)
@@ -128,25 +115,3 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
}
return 0;
}
-
-int
-apply_relocate_add(Elf32_Shdr *sechdrs, const char *strtab,
- unsigned int symindex, unsigned int relsec,
- struct module *module)
-{
- printk(KERN_ERR "module %s: ADD RELOCATION unsupported\n",
- module->name);
- return -ENOEXEC;
-}
-
-int
-module_finalize(const Elf32_Ehdr *hdr, const Elf_Shdr *sechdrs,
- struct module *module)
-{
- return 0;
-}
-
-void
-module_arch_cleanup(struct module *mod)
-{
-}
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 37357a599dca..a67e014e4e44 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -20,6 +20,7 @@ config X86
select HAVE_UNSTABLE_SCHED_CLOCK
select HAVE_IDE
select HAVE_OPROFILE
+ select HAVE_PCSPKR_PLATFORM
select HAVE_PERF_EVENTS
select HAVE_IRQ_WORK
select HAVE_IOREMAP_PROT
@@ -70,6 +71,7 @@ config X86
select IRQ_FORCED_THREADING
select USE_GENERIC_SMP_HELPERS if SMP
select HAVE_BPF_JIT if (X86_64 && NET)
+ select CLKEVT_I8253
config INSTRUCTION_DECODER
def_bool (KPROBES || PERF_EVENTS)
@@ -93,6 +95,10 @@ config CLOCKSOURCE_WATCHDOG
config GENERIC_CLOCKEVENTS
def_bool y
+config ARCH_CLOCKSOURCE_DATA
+ def_bool y
+ depends on X86_64
+
config GENERIC_CLOCKEVENTS_BROADCAST
def_bool y
depends on X86_64 || (X86_32 && X86_LOCAL_APIC)
@@ -384,12 +390,21 @@ config X86_INTEL_CE
This option compiles in support for the CE4100 SOC for settop
boxes and media devices.
+config X86_INTEL_MID
+ bool "Intel MID platform support"
+ depends on X86_32
+ depends on X86_EXTENDED_PLATFORM
+ ---help---
+ Select to build a kernel capable of supporting Intel MID platform
+ systems which do not have the PCI legacy interfaces (Moorestown,
+ Medfield). If you are building for a PC class system say N here.
+
+if X86_INTEL_MID
+
config X86_MRST
bool "Moorestown MID platform"
depends on PCI
depends on PCI_GOANY
- depends on X86_32
- depends on X86_EXTENDED_PLATFORM
depends on X86_IO_APIC
select APB_TIMER
select I2C
@@ -404,6 +419,8 @@ config X86_MRST
nor standard legacy replacement devices/features. e.g. Moorestown does
not contain i8259, i8254, HPET, legacy BIOS, most of the io ports.
+endif
+
config X86_RDC321X
bool "RDC R-321x SoC"
depends on X86_32
@@ -512,6 +529,18 @@ menuconfig PARAVIRT_GUEST
if PARAVIRT_GUEST
+config PARAVIRT_TIME_ACCOUNTING
+ bool "Paravirtual steal time accounting"
+ select PARAVIRT
+ default n
+ ---help---
+ Select this option to enable fine granularity task steal time
+ accounting. Time spent executing other tasks in parallel with
+ the current vCPU is discounted from the vCPU power. To account for
+ that, there can be a small performance impact.
+
+ If in doubt, say N here.
+
source "arch/x86/xen/Kconfig"
config KVM_CLOCK
@@ -617,6 +646,7 @@ config HPET_EMULATE_RTC
config APB_TIMER
def_bool y if MRST
prompt "Langwell APB Timer Support" if X86_MRST
+ select DW_APB_TIMER
help
APB timer is the replacement for 8254, HPET on X86 MID platforms.
The APBT provides a stable time base on SMP
@@ -680,33 +710,6 @@ config CALGARY_IOMMU_ENABLED_BY_DEFAULT
Calgary anyway, pass 'iommu=calgary' on the kernel command line.
If unsure, say Y.
-config AMD_IOMMU
- bool "AMD IOMMU support"
- select SWIOTLB
- select PCI_MSI
- select PCI_IOV
- depends on X86_64 && PCI && ACPI
- ---help---
- With this option you can enable support for AMD IOMMU hardware in
- your system. An IOMMU is a hardware component which provides
- remapping of DMA memory accesses from devices. With an AMD IOMMU you
- can isolate the the DMA memory of different devices and protect the
- system from misbehaving device drivers or hardware.
-
- You can find out if your system has an AMD IOMMU if you look into
- your BIOS for an option to enable it or if you have an IVRS ACPI
- table.
-
-config AMD_IOMMU_STATS
- bool "Export AMD IOMMU statistics to debugfs"
- depends on AMD_IOMMU
- select DEBUG_FS
- ---help---
- This option enables code in the AMD IOMMU driver to collect various
- statistics about whats happening in the driver and exports that
- information to userspace via debugfs.
- If unsure, say N.
-
# need this always selected by IOMMU for the VIA workaround
config SWIOTLB
def_bool y if X86_64
@@ -720,9 +723,6 @@ config SWIOTLB
config IOMMU_HELPER
def_bool (CALGARY_IOMMU || GART_IOMMU || SWIOTLB || AMD_IOMMU)
-config IOMMU_API
- def_bool (AMD_IOMMU || DMAR)
-
config MAXSMP
bool "Enable Maximum number of SMP Processors and NUMA Nodes"
depends on X86_64 && SMP && DEBUG_KERNEL && EXPERIMENTAL
@@ -1942,55 +1942,6 @@ config PCI_CNB20LE_QUIRK
You should say N unless you know you need this.
-config DMAR
- bool "Support for DMA Remapping Devices (EXPERIMENTAL)"
- depends on PCI_MSI && ACPI && EXPERIMENTAL
- help
- DMA remapping (DMAR) devices support enables independent address
- translations for Direct Memory Access (DMA) from devices.
- These DMA remapping devices are reported via ACPI tables
- and include PCI device scope covered by these DMA
- remapping devices.
-
-config DMAR_DEFAULT_ON
- def_bool y
- prompt "Enable DMA Remapping Devices by default"
- depends on DMAR
- help
- Selecting this option will enable a DMAR device at boot time if
- one is found. If this option is not selected, DMAR support can
- be enabled by passing intel_iommu=on to the kernel. It is
- recommended you say N here while the DMAR code remains
- experimental.
-
-config DMAR_BROKEN_GFX_WA
- bool "Workaround broken graphics drivers (going away soon)"
- depends on DMAR && BROKEN
- ---help---
- Current Graphics drivers tend to use physical address
- for DMA and avoid using DMA APIs. Setting this config
- option permits the IOMMU driver to set a unity map for
- all the OS-visible memory. Hence the driver can continue
- to use physical addresses for DMA, at least until this
- option is removed in the 2.6.32 kernel.
-
-config DMAR_FLOPPY_WA
- def_bool y
- depends on DMAR
- ---help---
- Floppy disk drivers are known to bypass DMA API calls
- thereby failing to work when IOMMU is enabled. This
- workaround will setup a 1:1 mapping for the first
- 16MiB to make floppy (an ISA device) work.
-
-config INTR_REMAP
- bool "Support for Interrupt Remapping (EXPERIMENTAL)"
- depends on X86_64 && X86_IO_APIC && PCI_MSI && ACPI && EXPERIMENTAL
- ---help---
- Supports Interrupt remapping for IO-APIC and MSI devices.
- To use x2apic mode in the CPU's which support x2APIC enhancements or
- to support platforms with CPU's having > 8 bit APIC ID, say Y.
-
source "drivers/pci/pcie/Kconfig"
source "drivers/pci/Kconfig"
diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
index 6a7cfdf8ff69..e3ca7e0d858c 100644
--- a/arch/x86/Kconfig.cpu
+++ b/arch/x86/Kconfig.cpu
@@ -312,6 +312,9 @@ config X86_CMPXCHG
config CMPXCHG_LOCAL
def_bool X86_64 || (X86_32 && !M386)
+config CMPXCHG_DOUBLE
+ def_bool y
+
config X86_L1_CACHE_SHIFT
int
default "7" if MPENTIUM4 || MPSC
diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
index f7cb086b4add..95365a82b6a0 100644
--- a/arch/x86/boot/Makefile
+++ b/arch/x86/boot/Makefile
@@ -9,12 +9,6 @@
# Changed by many, many contributors over the years.
#
-# ROOT_DEV specifies the default root-device when making the image.
-# This can be either FLOPPY, CURRENT, /dev/xxxx or empty, in which case
-# the default of FLOPPY is used by 'build'.
-
-ROOT_DEV := CURRENT
-
# If you want to preset the SVGA mode, uncomment the next line and
# set SVGA_MODE to whatever number you want.
# Set it to -DSVGA_MODE=NORMAL_VGA if you just want the EGA/VGA mode.
@@ -75,8 +69,7 @@ GCOV_PROFILE := n
$(obj)/bzImage: asflags-y := $(SVGA_MODE)
quiet_cmd_image = BUILD $@
-cmd_image = $(obj)/tools/build $(obj)/setup.bin $(obj)/vmlinux.bin \
- $(ROOT_DEV) > $@
+cmd_image = $(obj)/tools/build $(obj)/setup.bin $(obj)/vmlinux.bin > $@
$(obj)/bzImage: $(obj)/setup.bin $(obj)/vmlinux.bin $(obj)/tools/build FORCE
$(call if_changed,image)
diff --git a/arch/x86/boot/tools/build.c b/arch/x86/boot/tools/build.c
index ee3a4ea923ac..fdc60a0b3c20 100644
--- a/arch/x86/boot/tools/build.c
+++ b/arch/x86/boot/tools/build.c
@@ -130,7 +130,7 @@ static void die(const char * str, ...)
static void usage(void)
{
- die("Usage: build setup system [rootdev] [> image]");
+ die("Usage: build setup system [> image]");
}
int main(int argc, char ** argv)
@@ -138,39 +138,14 @@ int main(int argc, char ** argv)
unsigned int i, sz, setup_sectors;
int c;
u32 sys_size;
- u8 major_root, minor_root;
struct stat sb;
FILE *file;
int fd;
void *kernel;
u32 crc = 0xffffffffUL;
- if ((argc < 3) || (argc > 4))
+ if (argc != 3)
usage();
- if (argc > 3) {
- if (!strcmp(argv[3], "CURRENT")) {
- if (stat("/", &sb)) {
- perror("/");
- die("Couldn't stat /");
- }
- major_root = major(sb.st_dev);
- minor_root = minor(sb.st_dev);
- } else if (strcmp(argv[3], "FLOPPY")) {
- if (stat(argv[3], &sb)) {
- perror(argv[3]);
- die("Couldn't stat root device.");
- }
- major_root = major(sb.st_rdev);
- minor_root = minor(sb.st_rdev);
- } else {
- major_root = 0;
- minor_root = 0;
- }
- } else {
- major_root = DEFAULT_MAJOR_ROOT;
- minor_root = DEFAULT_MINOR_ROOT;
- }
- fprintf(stderr, "Root device is (%d, %d)\n", major_root, minor_root);
/* Copy the setup code */
file = fopen(argv[1], "r");
@@ -193,8 +168,8 @@ int main(int argc, char ** argv)
memset(buf+c, 0, i-c);
/* Set the default root device */
- buf[508] = minor_root;
- buf[509] = major_root;
+ buf[508] = DEFAULT_MINOR_ROOT;
+ buf[509] = DEFAULT_MAJOR_ROOT;
fprintf(stderr, "Setup is %d bytes (padded to %d bytes).\n", c, i);
diff --git a/arch/x86/crypto/ghash-clmulni-intel_glue.c b/arch/x86/crypto/ghash-clmulni-intel_glue.c
index 7a6e68e4f748..976aa64d9a20 100644
--- a/arch/x86/crypto/ghash-clmulni-intel_glue.c
+++ b/arch/x86/crypto/ghash-clmulni-intel_glue.c
@@ -245,7 +245,7 @@ static int ghash_async_setkey(struct crypto_ahash *tfm, const u8 *key,
crypto_ahash_set_flags(tfm, crypto_ahash_get_flags(child)
& CRYPTO_TFM_RES_MASK);
- return 0;
+ return err;
}
static int ghash_async_init_tfm(struct crypto_tfm *tfm)
diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
index 588a7aa937e1..65577698cab2 100644
--- a/arch/x86/ia32/ia32_signal.c
+++ b/arch/x86/ia32/ia32_signal.c
@@ -127,15 +127,17 @@ int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
asmlinkage long sys32_sigsuspend(int history0, int history1, old_sigset_t mask)
{
- mask &= _BLOCKABLE;
- spin_lock_irq(&current->sighand->siglock);
+ sigset_t blocked;
+
current->saved_sigmask = current->blocked;
- siginitset(&current->blocked, mask);
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
+
+ mask &= _BLOCKABLE;
+ siginitset(&blocked, mask);
+ set_current_blocked(&blocked);
current->state = TASK_INTERRUPTIBLE;
schedule();
+
set_restore_sigmask();
return -ERESTARTNOHAND;
}
@@ -279,10 +281,7 @@ asmlinkage long sys32_sigreturn(struct pt_regs *regs)
goto badframe;
sigdelsetmask(&set, ~_BLOCKABLE);
- spin_lock_irq(&current->sighand->siglock);
- current->blocked = set;
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
+ set_current_blocked(&set);
if (ia32_restore_sigcontext(regs, &frame->sc, &ax))
goto badframe;
@@ -308,10 +307,7 @@ asmlinkage long sys32_rt_sigreturn(struct pt_regs *regs)
goto badframe;
sigdelsetmask(&set, ~_BLOCKABLE);
- spin_lock_irq(&current->sighand->siglock);
- current->blocked = set;
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
+ set_current_blocked(&set);
if (ia32_restore_sigcontext(regs, &frame->uc.uc_mcontext, &ax))
goto badframe;
diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
index c1870dddd322..a0e866d233ee 100644
--- a/arch/x86/ia32/ia32entry.S
+++ b/arch/x86/ia32/ia32entry.S
@@ -143,7 +143,7 @@ ENTRY(ia32_sysenter_target)
CFI_REL_OFFSET rip,0
pushq_cfi %rax
cld
- SAVE_ARGS 0,0,1
+ SAVE_ARGS 0,1,0
/* no need to do an access_ok check here because rbp has been
32bit zero extended */
1: movl (%rbp),%ebp
@@ -173,7 +173,7 @@ sysexit_from_sys_call:
andl $~0x200,EFLAGS-R11(%rsp)
movl RIP-R11(%rsp),%edx /* User %eip */
CFI_REGISTER rip,rdx
- RESTORE_ARGS 1,24,1,1,1,1
+ RESTORE_ARGS 0,24,0,0,0,0
xorq %r8,%r8
xorq %r9,%r9
xorq %r10,%r10
@@ -289,7 +289,7 @@ ENTRY(ia32_cstar_target)
* disabled irqs and here we enable it straight after entry:
*/
ENABLE_INTERRUPTS(CLBR_NONE)
- SAVE_ARGS 8,1,1
+ SAVE_ARGS 8,0,0
movl %eax,%eax /* zero extension */
movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
movq %rcx,RIP-ARGOFFSET(%rsp)
@@ -328,7 +328,7 @@ cstar_dispatch:
jnz sysretl_audit
sysretl_from_sys_call:
andl $~TS_COMPAT,TI_status(%r10)
- RESTORE_ARGS 1,-ARG_SKIP,1,1,1
+ RESTORE_ARGS 0,-ARG_SKIP,0,0,0
movl RIP-ARGOFFSET(%rsp),%ecx
CFI_REGISTER rip,rcx
movl EFLAGS-ARGOFFSET(%rsp),%r11d
@@ -419,7 +419,7 @@ ENTRY(ia32_syscall)
cld
/* note the registers are not zero extended to the sf.
this could be a problem. */
- SAVE_ARGS 0,0,1
+ SAVE_ARGS 0,1,0
GET_THREAD_INFO(%r10)
orl $TS_COMPAT,TI_status(%r10)
testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
index 94d420b360d1..4554cc6fb96a 100644
--- a/arch/x86/include/asm/alternative-asm.h
+++ b/arch/x86/include/asm/alternative-asm.h
@@ -17,8 +17,8 @@
.macro altinstruction_entry orig alt feature orig_len alt_len
.align 8
- .quad \orig
- .quad \alt
+ .long \orig - .
+ .long \alt - .
.word \feature
.byte \orig_len
.byte \alt_len
diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
index bf535f947e8c..23fb6d79f209 100644
--- a/arch/x86/include/asm/alternative.h
+++ b/arch/x86/include/asm/alternative.h
@@ -43,8 +43,8 @@
#endif
struct alt_instr {
- u8 *instr; /* original instruction */
- u8 *replacement;
+ s32 instr_offset; /* original instruction */
+ s32 repl_offset; /* offset to replacement instruction */
u16 cpuid; /* cpuid bit set for replacement */
u8 instrlen; /* length of original instruction */
u8 replacementlen; /* length of new instruction, <= instrlen */
@@ -84,8 +84,8 @@ static inline int alternatives_text_reserved(void *start, void *end)
"661:\n\t" oldinstr "\n662:\n" \
".section .altinstructions,\"a\"\n" \
_ASM_ALIGN "\n" \
- _ASM_PTR "661b\n" /* label */ \
- _ASM_PTR "663f\n" /* new instruction */ \
+ " .long 661b - .\n" /* label */ \
+ " .long 663f - .\n" /* new instruction */ \
" .word " __stringify(feature) "\n" /* feature bit */ \
" .byte 662b-661b\n" /* sourcelen */ \
" .byte 664f-663f\n" /* replacementlen */ \
diff --git a/arch/x86/include/asm/amd_iommu.h b/arch/x86/include/asm/amd_iommu.h
deleted file mode 100644
index a6863a2dec1f..000000000000
--- a/arch/x86/include/asm/amd_iommu.h
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * Copyright (C) 2007-2010 Advanced Micro Devices, Inc.
- * Author: Joerg Roedel <joerg.roedel@amd.com>
- * Leo Duran <leo.duran@amd.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#ifndef _ASM_X86_AMD_IOMMU_H
-#define _ASM_X86_AMD_IOMMU_H
-
-#include <linux/irqreturn.h>
-
-#ifdef CONFIG_AMD_IOMMU
-
-extern int amd_iommu_detect(void);
-
-#else
-
-static inline int amd_iommu_detect(void) { return -ENODEV; }
-
-#endif
-
-#endif /* _ASM_X86_AMD_IOMMU_H */
diff --git a/arch/x86/include/asm/amd_iommu_proto.h b/arch/x86/include/asm/amd_iommu_proto.h
deleted file mode 100644
index 55d95eb789b3..000000000000
--- a/arch/x86/include/asm/amd_iommu_proto.h
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * Copyright (C) 2009-2010 Advanced Micro Devices, Inc.
- * Author: Joerg Roedel <joerg.roedel@amd.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#ifndef _ASM_X86_AMD_IOMMU_PROTO_H
-#define _ASM_X86_AMD_IOMMU_PROTO_H
-
-#include <asm/amd_iommu_types.h>
-
-extern int amd_iommu_init_dma_ops(void);
-extern int amd_iommu_init_passthrough(void);
-extern irqreturn_t amd_iommu_int_thread(int irq, void *data);
-extern irqreturn_t amd_iommu_int_handler(int irq, void *data);
-extern void amd_iommu_apply_erratum_63(u16 devid);
-extern void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu);
-extern int amd_iommu_init_devices(void);
-extern void amd_iommu_uninit_devices(void);
-extern void amd_iommu_init_notifier(void);
-extern void amd_iommu_init_api(void);
-#ifndef CONFIG_AMD_IOMMU_STATS
-
-static inline void amd_iommu_stats_init(void) { }
-
-#endif /* !CONFIG_AMD_IOMMU_STATS */
-
-static inline bool is_rd890_iommu(struct pci_dev *pdev)
-{
- return (pdev->vendor == PCI_VENDOR_ID_ATI) &&
- (pdev->device == PCI_DEVICE_ID_RD890_IOMMU);
-}
-
-static inline bool iommu_feature(struct amd_iommu *iommu, u64 f)
-{
- if (!(iommu->cap & (1 << IOMMU_CAP_EFR)))
- return false;
-
- return !!(iommu->features & f);
-}
-
-#endif /* _ASM_X86_AMD_IOMMU_PROTO_H */
diff --git a/arch/x86/include/asm/amd_iommu_types.h b/arch/x86/include/asm/amd_iommu_types.h
deleted file mode 100644
index 4c9982995414..000000000000
--- a/arch/x86/include/asm/amd_iommu_types.h
+++ /dev/null
@@ -1,580 +0,0 @@
-/*
- * Copyright (C) 2007-2010 Advanced Micro Devices, Inc.
- * Author: Joerg Roedel <joerg.roedel@amd.com>
- * Leo Duran <leo.duran@amd.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#ifndef _ASM_X86_AMD_IOMMU_TYPES_H
-#define _ASM_X86_AMD_IOMMU_TYPES_H
-
-#include <linux/types.h>
-#include <linux/mutex.h>
-#include <linux/list.h>
-#include <linux/spinlock.h>
-
-/*
- * Maximum number of IOMMUs supported
- */
-#define MAX_IOMMUS 32
-
-/*
- * some size calculation constants
- */
-#define DEV_TABLE_ENTRY_SIZE 32
-#define ALIAS_TABLE_ENTRY_SIZE 2
-#define RLOOKUP_TABLE_ENTRY_SIZE (sizeof(void *))
-
-/* Length of the MMIO region for the AMD IOMMU */
-#define MMIO_REGION_LENGTH 0x4000
-
-/* Capability offsets used by the driver */
-#define MMIO_CAP_HDR_OFFSET 0x00
-#define MMIO_RANGE_OFFSET 0x0c
-#define MMIO_MISC_OFFSET 0x10
-
-/* Masks, shifts and macros to parse the device range capability */
-#define MMIO_RANGE_LD_MASK 0xff000000
-#define MMIO_RANGE_FD_MASK 0x00ff0000
-#define MMIO_RANGE_BUS_MASK 0x0000ff00
-#define MMIO_RANGE_LD_SHIFT 24
-#define MMIO_RANGE_FD_SHIFT 16
-#define MMIO_RANGE_BUS_SHIFT 8
-#define MMIO_GET_LD(x) (((x) & MMIO_RANGE_LD_MASK) >> MMIO_RANGE_LD_SHIFT)
-#define MMIO_GET_FD(x) (((x) & MMIO_RANGE_FD_MASK) >> MMIO_RANGE_FD_SHIFT)
-#define MMIO_GET_BUS(x) (((x) & MMIO_RANGE_BUS_MASK) >> MMIO_RANGE_BUS_SHIFT)
-#define MMIO_MSI_NUM(x) ((x) & 0x1f)
-
-/* Flag masks for the AMD IOMMU exclusion range */
-#define MMIO_EXCL_ENABLE_MASK 0x01ULL
-#define MMIO_EXCL_ALLOW_MASK 0x02ULL
-
-/* Used offsets into the MMIO space */
-#define MMIO_DEV_TABLE_OFFSET 0x0000
-#define MMIO_CMD_BUF_OFFSET 0x0008
-#define MMIO_EVT_BUF_OFFSET 0x0010
-#define MMIO_CONTROL_OFFSET 0x0018
-#define MMIO_EXCL_BASE_OFFSET 0x0020
-#define MMIO_EXCL_LIMIT_OFFSET 0x0028
-#define MMIO_EXT_FEATURES 0x0030
-#define MMIO_CMD_HEAD_OFFSET 0x2000
-#define MMIO_CMD_TAIL_OFFSET 0x2008
-#define MMIO_EVT_HEAD_OFFSET 0x2010
-#define MMIO_EVT_TAIL_OFFSET 0x2018
-#define MMIO_STATUS_OFFSET 0x2020
-
-
-/* Extended Feature Bits */
-#define FEATURE_PREFETCH (1ULL<<0)
-#define FEATURE_PPR (1ULL<<1)
-#define FEATURE_X2APIC (1ULL<<2)
-#define FEATURE_NX (1ULL<<3)
-#define FEATURE_GT (1ULL<<4)
-#define FEATURE_IA (1ULL<<6)
-#define FEATURE_GA (1ULL<<7)
-#define FEATURE_HE (1ULL<<8)
-#define FEATURE_PC (1ULL<<9)
-
-/* MMIO status bits */
-#define MMIO_STATUS_COM_WAIT_INT_MASK 0x04
-
-/* event logging constants */
-#define EVENT_ENTRY_SIZE 0x10
-#define EVENT_TYPE_SHIFT 28
-#define EVENT_TYPE_MASK 0xf
-#define EVENT_TYPE_ILL_DEV 0x1
-#define EVENT_TYPE_IO_FAULT 0x2
-#define EVENT_TYPE_DEV_TAB_ERR 0x3
-#define EVENT_TYPE_PAGE_TAB_ERR 0x4
-#define EVENT_TYPE_ILL_CMD 0x5
-#define EVENT_TYPE_CMD_HARD_ERR 0x6
-#define EVENT_TYPE_IOTLB_INV_TO 0x7
-#define EVENT_TYPE_INV_DEV_REQ 0x8
-#define EVENT_DEVID_MASK 0xffff
-#define EVENT_DEVID_SHIFT 0
-#define EVENT_DOMID_MASK 0xffff
-#define EVENT_DOMID_SHIFT 0
-#define EVENT_FLAGS_MASK 0xfff
-#define EVENT_FLAGS_SHIFT 0x10
-
-/* feature control bits */
-#define CONTROL_IOMMU_EN 0x00ULL
-#define CONTROL_HT_TUN_EN 0x01ULL
-#define CONTROL_EVT_LOG_EN 0x02ULL
-#define CONTROL_EVT_INT_EN 0x03ULL
-#define CONTROL_COMWAIT_EN 0x04ULL
-#define CONTROL_PASSPW_EN 0x08ULL
-#define CONTROL_RESPASSPW_EN 0x09ULL
-#define CONTROL_COHERENT_EN 0x0aULL
-#define CONTROL_ISOC_EN 0x0bULL
-#define CONTROL_CMDBUF_EN 0x0cULL
-#define CONTROL_PPFLOG_EN 0x0dULL
-#define CONTROL_PPFINT_EN 0x0eULL
-
-/* command specific defines */
-#define CMD_COMPL_WAIT 0x01
-#define CMD_INV_DEV_ENTRY 0x02
-#define CMD_INV_IOMMU_PAGES 0x03
-#define CMD_INV_IOTLB_PAGES 0x04
-#define CMD_INV_ALL 0x08
-
-#define CMD_COMPL_WAIT_STORE_MASK 0x01
-#define CMD_COMPL_WAIT_INT_MASK 0x02
-#define CMD_INV_IOMMU_PAGES_SIZE_MASK 0x01
-#define CMD_INV_IOMMU_PAGES_PDE_MASK 0x02
-
-#define CMD_INV_IOMMU_ALL_PAGES_ADDRESS 0x7fffffffffffffffULL
-
-/* macros and definitions for device table entries */
-#define DEV_ENTRY_VALID 0x00
-#define DEV_ENTRY_TRANSLATION 0x01
-#define DEV_ENTRY_IR 0x3d
-#define DEV_ENTRY_IW 0x3e
-#define DEV_ENTRY_NO_PAGE_FAULT 0x62
-#define DEV_ENTRY_EX 0x67
-#define DEV_ENTRY_SYSMGT1 0x68
-#define DEV_ENTRY_SYSMGT2 0x69
-#define DEV_ENTRY_INIT_PASS 0xb8
-#define DEV_ENTRY_EINT_PASS 0xb9
-#define DEV_ENTRY_NMI_PASS 0xba
-#define DEV_ENTRY_LINT0_PASS 0xbe
-#define DEV_ENTRY_LINT1_PASS 0xbf
-#define DEV_ENTRY_MODE_MASK 0x07
-#define DEV_ENTRY_MODE_SHIFT 0x09
-
-/* constants to configure the command buffer */
-#define CMD_BUFFER_SIZE 8192
-#define CMD_BUFFER_UNINITIALIZED 1
-#define CMD_BUFFER_ENTRIES 512
-#define MMIO_CMD_SIZE_SHIFT 56
-#define MMIO_CMD_SIZE_512 (0x9ULL << MMIO_CMD_SIZE_SHIFT)
-
-/* constants for event buffer handling */
-#define EVT_BUFFER_SIZE 8192 /* 512 entries */
-#define EVT_LEN_MASK (0x9ULL << 56)
-
-#define PAGE_MODE_NONE 0x00
-#define PAGE_MODE_1_LEVEL 0x01
-#define PAGE_MODE_2_LEVEL 0x02
-#define PAGE_MODE_3_LEVEL 0x03
-#define PAGE_MODE_4_LEVEL 0x04
-#define PAGE_MODE_5_LEVEL 0x05
-#define PAGE_MODE_6_LEVEL 0x06
-
-#define PM_LEVEL_SHIFT(x) (12 + ((x) * 9))
-#define PM_LEVEL_SIZE(x) (((x) < 6) ? \
- ((1ULL << PM_LEVEL_SHIFT((x))) - 1): \
- (0xffffffffffffffffULL))
-#define PM_LEVEL_INDEX(x, a) (((a) >> PM_LEVEL_SHIFT((x))) & 0x1ffULL)
-#define PM_LEVEL_ENC(x) (((x) << 9) & 0xe00ULL)
-#define PM_LEVEL_PDE(x, a) ((a) | PM_LEVEL_ENC((x)) | \
- IOMMU_PTE_P | IOMMU_PTE_IR | IOMMU_PTE_IW)
-#define PM_PTE_LEVEL(pte) (((pte) >> 9) & 0x7ULL)
-
-#define PM_MAP_4k 0
-#define PM_ADDR_MASK 0x000ffffffffff000ULL
-#define PM_MAP_MASK(lvl) (PM_ADDR_MASK & \
- (~((1ULL << (12 + ((lvl) * 9))) - 1)))
-#define PM_ALIGNED(lvl, addr) ((PM_MAP_MASK(lvl) & (addr)) == (addr))
-
-/*
- * Returns the page table level to use for a given page size
- * Pagesize is expected to be a power-of-two
- */
-#define PAGE_SIZE_LEVEL(pagesize) \
- ((__ffs(pagesize) - 12) / 9)
-/*
- * Returns the number of ptes to use for a given page size
- * Pagesize is expected to be a power-of-two
- */
-#define PAGE_SIZE_PTE_COUNT(pagesize) \
- (1ULL << ((__ffs(pagesize) - 12) % 9))
-
-/*
- * Aligns a given io-virtual address to a given page size
- * Pagesize is expected to be a power-of-two
- */
-#define PAGE_SIZE_ALIGN(address, pagesize) \
- ((address) & ~((pagesize) - 1))
-/*
- * Creates an IOMMU PTE for an address an a given pagesize
- * The PTE has no permission bits set
- * Pagesize is expected to be a power-of-two larger than 4096
- */
-#define PAGE_SIZE_PTE(address, pagesize) \
- (((address) | ((pagesize) - 1)) & \
- (~(pagesize >> 1)) & PM_ADDR_MASK)
-
-/*
- * Takes a PTE value with mode=0x07 and returns the page size it maps
- */
-#define PTE_PAGE_SIZE(pte) \
- (1ULL << (1 + ffz(((pte) | 0xfffULL))))
-
-#define IOMMU_PTE_P (1ULL << 0)
-#define IOMMU_PTE_TV (1ULL << 1)
-#define IOMMU_PTE_U (1ULL << 59)
-#define IOMMU_PTE_FC (1ULL << 60)
-#define IOMMU_PTE_IR (1ULL << 61)
-#define IOMMU_PTE_IW (1ULL << 62)
-
-#define DTE_FLAG_IOTLB 0x01
-
-#define IOMMU_PAGE_MASK (((1ULL << 52) - 1) & ~0xfffULL)
-#define IOMMU_PTE_PRESENT(pte) ((pte) & IOMMU_PTE_P)
-#define IOMMU_PTE_PAGE(pte) (phys_to_virt((pte) & IOMMU_PAGE_MASK))
-#define IOMMU_PTE_MODE(pte) (((pte) >> 9) & 0x07)
-
-#define IOMMU_PROT_MASK 0x03
-#define IOMMU_PROT_IR 0x01
-#define IOMMU_PROT_IW 0x02
-
-/* IOMMU capabilities */
-#define IOMMU_CAP_IOTLB 24
-#define IOMMU_CAP_NPCACHE 26
-#define IOMMU_CAP_EFR 27
-
-#define MAX_DOMAIN_ID 65536
-
-/* FIXME: move this macro to <linux/pci.h> */
-#define PCI_BUS(x) (((x) >> 8) & 0xff)
-
-/* Protection domain flags */
-#define PD_DMA_OPS_MASK (1UL << 0) /* domain used for dma_ops */
-#define PD_DEFAULT_MASK (1UL << 1) /* domain is a default dma_ops
- domain for an IOMMU */
-#define PD_PASSTHROUGH_MASK (1UL << 2) /* domain has no page
- translation */
-
-extern bool amd_iommu_dump;
-#define DUMP_printk(format, arg...) \
- do { \
- if (amd_iommu_dump) \
- printk(KERN_INFO "AMD-Vi: " format, ## arg); \
- } while(0);
-
-/* global flag if IOMMUs cache non-present entries */
-extern bool amd_iommu_np_cache;
-/* Only true if all IOMMUs support device IOTLBs */
-extern bool amd_iommu_iotlb_sup;
-
-/*
- * Make iterating over all IOMMUs easier
- */
-#define for_each_iommu(iommu) \
- list_for_each_entry((iommu), &amd_iommu_list, list)
-#define for_each_iommu_safe(iommu, next) \
- list_for_each_entry_safe((iommu), (next), &amd_iommu_list, list)
-
-#define APERTURE_RANGE_SHIFT 27 /* 128 MB */
-#define APERTURE_RANGE_SIZE (1ULL << APERTURE_RANGE_SHIFT)
-#define APERTURE_RANGE_PAGES (APERTURE_RANGE_SIZE >> PAGE_SHIFT)
-#define APERTURE_MAX_RANGES 32 /* allows 4GB of DMA address space */
-#define APERTURE_RANGE_INDEX(a) ((a) >> APERTURE_RANGE_SHIFT)
-#define APERTURE_PAGE_INDEX(a) (((a) >> 21) & 0x3fULL)
-
-/*
- * This structure contains generic data for IOMMU protection domains
- * independent of their use.
- */
-struct protection_domain {
- struct list_head list; /* for list of all protection domains */
- struct list_head dev_list; /* List of all devices in this domain */
- spinlock_t lock; /* mostly used to lock the page table*/
- struct mutex api_lock; /* protect page tables in the iommu-api path */
- u16 id; /* the domain id written to the device table */
- int mode; /* paging mode (0-6 levels) */
- u64 *pt_root; /* page table root pointer */
- unsigned long flags; /* flags to find out type of domain */
- bool updated; /* complete domain flush required */
- unsigned dev_cnt; /* devices assigned to this domain */
- unsigned dev_iommu[MAX_IOMMUS]; /* per-IOMMU reference count */
- void *priv; /* private data */
-
-};
-
-/*
- * This struct contains device specific data for the IOMMU
- */
-struct iommu_dev_data {
- struct list_head list; /* For domain->dev_list */
- struct device *dev; /* Device this data belong to */
- struct device *alias; /* The Alias Device */
- struct protection_domain *domain; /* Domain the device is bound to */
- atomic_t bind; /* Domain attach reverent count */
-};
-
-/*
- * For dynamic growth the aperture size is split into ranges of 128MB of
- * DMA address space each. This struct represents one such range.
- */
-struct aperture_range {
-
- /* address allocation bitmap */
- unsigned long *bitmap;
-
- /*
- * Array of PTE pages for the aperture. In this array we save all the
- * leaf pages of the domain page table used for the aperture. This way
- * we don't need to walk the page table to find a specific PTE. We can
- * just calculate its address in constant time.
- */
- u64 *pte_pages[64];
-
- unsigned long offset;
-};
-
-/*
- * Data container for a dma_ops specific protection domain
- */
-struct dma_ops_domain {
- struct list_head list;
-
- /* generic protection domain information */
- struct protection_domain domain;
-
- /* size of the aperture for the mappings */
- unsigned long aperture_size;
-
- /* address we start to search for free addresses */
- unsigned long next_address;
-
- /* address space relevant data */
- struct aperture_range *aperture[APERTURE_MAX_RANGES];
-
- /* This will be set to true when TLB needs to be flushed */
- bool need_flush;
-
- /*
- * if this is a preallocated domain, keep the device for which it was
- * preallocated in this variable
- */
- u16 target_dev;
-};
-
-/*
- * Structure where we save information about one hardware AMD IOMMU in the
- * system.
- */
-struct amd_iommu {
- struct list_head list;
-
- /* Index within the IOMMU array */
- int index;
-
- /* locks the accesses to the hardware */
- spinlock_t lock;
-
- /* Pointer to PCI device of this IOMMU */
- struct pci_dev *dev;
-
- /* physical address of MMIO space */
- u64 mmio_phys;
- /* virtual address of MMIO space */
- u8 *mmio_base;
-
- /* capabilities of that IOMMU read from ACPI */
- u32 cap;
-
- /* flags read from acpi table */
- u8 acpi_flags;
-
- /* Extended features */
- u64 features;
-
- /*
- * Capability pointer. There could be more than one IOMMU per PCI
- * device function if there are more than one AMD IOMMU capability
- * pointers.
- */
- u16 cap_ptr;
-
- /* pci domain of this IOMMU */
- u16 pci_seg;
-
- /* first device this IOMMU handles. read from PCI */
- u16 first_device;
- /* last device this IOMMU handles. read from PCI */
- u16 last_device;
-
- /* start of exclusion range of that IOMMU */
- u64 exclusion_start;
- /* length of exclusion range of that IOMMU */
- u64 exclusion_length;
-
- /* command buffer virtual address */
- u8 *cmd_buf;
- /* size of command buffer */
- u32 cmd_buf_size;
-
- /* size of event buffer */
- u32 evt_buf_size;
- /* event buffer virtual address */
- u8 *evt_buf;
- /* MSI number for event interrupt */
- u16 evt_msi_num;
-
- /* true if interrupts for this IOMMU are already enabled */
- bool int_enabled;
-
- /* if one, we need to send a completion wait command */
- bool need_sync;
-
- /* default dma_ops domain for that IOMMU */
- struct dma_ops_domain *default_dom;
-
- /*
- * We can't rely on the BIOS to restore all values on reinit, so we
- * need to stash them
- */
-
- /* The iommu BAR */
- u32 stored_addr_lo;
- u32 stored_addr_hi;
-
- /*
- * Each iommu has 6 l1s, each of which is documented as having 0x12
- * registers
- */
- u32 stored_l1[6][0x12];
-
- /* The l2 indirect registers */
- u32 stored_l2[0x83];
-};
-
-/*
- * List with all IOMMUs in the system. This list is not locked because it is
- * only written and read at driver initialization or suspend time
- */
-extern struct list_head amd_iommu_list;
-
-/*
- * Array with pointers to each IOMMU struct
- * The indices are referenced in the protection domains
- */
-extern struct amd_iommu *amd_iommus[MAX_IOMMUS];
-
-/* Number of IOMMUs present in the system */
-extern int amd_iommus_present;
-
-/*
- * Declarations for the global list of all protection domains
- */
-extern spinlock_t amd_iommu_pd_lock;
-extern struct list_head amd_iommu_pd_list;
-
-/*
- * Structure defining one entry in the device table
- */
-struct dev_table_entry {
- u32 data[8];
-};
-
-/*
- * One entry for unity mappings parsed out of the ACPI table.
- */
-struct unity_map_entry {
- struct list_head list;
-
- /* starting device id this entry is used for (including) */
- u16 devid_start;
- /* end device id this entry is used for (including) */
- u16 devid_end;
-
- /* start address to unity map (including) */
- u64 address_start;
- /* end address to unity map (including) */
- u64 address_end;
-
- /* required protection */
- int prot;
-};
-
-/*
- * List of all unity mappings. It is not locked because as runtime it is only
- * read. It is created at ACPI table parsing time.
- */
-extern struct list_head amd_iommu_unity_map;
-
-/*
- * Data structures for device handling
- */
-
-/*
- * Device table used by hardware. Read and write accesses by software are
- * locked with the amd_iommu_pd_table lock.
- */
-extern struct dev_table_entry *amd_iommu_dev_table;
-
-/*
- * Alias table to find requestor ids to device ids. Not locked because only
- * read on runtime.
- */
-extern u16 *amd_iommu_alias_table;
-
-/*
- * Reverse lookup table to find the IOMMU which translates a specific device.
- */
-extern struct amd_iommu **amd_iommu_rlookup_table;
-
-/* size of the dma_ops aperture as power of 2 */
-extern unsigned amd_iommu_aperture_order;
-
-/* largest PCI device id we expect translation requests for */
-extern u16 amd_iommu_last_bdf;
-
-/* allocation bitmap for domain ids */
-extern unsigned long *amd_iommu_pd_alloc_bitmap;
-
-/*
- * If true, the addresses will be flushed on unmap time, not when
- * they are reused
- */
-extern bool amd_iommu_unmap_flush;
-
-/* takes bus and device/function and returns the device id
- * FIXME: should that be in generic PCI code? */
-static inline u16 calc_devid(u8 bus, u8 devfn)
-{
- return (((u16)bus) << 8) | devfn;
-}
-
-#ifdef CONFIG_AMD_IOMMU_STATS
-
-struct __iommu_counter {
- char *name;
- struct dentry *dent;
- u64 value;
-};
-
-#define DECLARE_STATS_COUNTER(nm) \
- static struct __iommu_counter nm = { \
- .name = #nm, \
- }
-
-#define INC_STATS_COUNTER(name) name.value += 1
-#define ADD_STATS_COUNTER(name, x) name.value += (x)
-#define SUB_STATS_COUNTER(name, x) name.value -= (x)
-
-#else /* CONFIG_AMD_IOMMU_STATS */
-
-#define DECLARE_STATS_COUNTER(name)
-#define INC_STATS_COUNTER(name)
-#define ADD_STATS_COUNTER(name, x)
-#define SUB_STATS_COUNTER(name, x)
-
-#endif /* CONFIG_AMD_IOMMU_STATS */
-
-#endif /* _ASM_X86_AMD_IOMMU_TYPES_H */
diff --git a/arch/x86/include/asm/apb_timer.h b/arch/x86/include/asm/apb_timer.h
index af60d8a2e288..0acbac299e49 100644
--- a/arch/x86/include/asm/apb_timer.h
+++ b/arch/x86/include/asm/apb_timer.h
@@ -18,24 +18,6 @@
#ifdef CONFIG_APB_TIMER
-/* Langwell DW APB timer registers */
-#define APBTMR_N_LOAD_COUNT 0x00
-#define APBTMR_N_CURRENT_VALUE 0x04
-#define APBTMR_N_CONTROL 0x08
-#define APBTMR_N_EOI 0x0c
-#define APBTMR_N_INT_STATUS 0x10
-
-#define APBTMRS_INT_STATUS 0xa0
-#define APBTMRS_EOI 0xa4
-#define APBTMRS_RAW_INT_STATUS 0xa8
-#define APBTMRS_COMP_VERSION 0xac
-#define APBTMRS_REG_SIZE 0x14
-
-/* register bits */
-#define APBTMR_CONTROL_ENABLE (1<<0)
-#define APBTMR_CONTROL_MODE_PERIODIC (1<<1) /*1: periodic 0:free running */
-#define APBTMR_CONTROL_INT (1<<2)
-
/* default memory mapped register base */
#define LNW_SCU_ADDR 0xFF100000
#define LNW_EXT_TIMER_OFFSET 0x1B800
@@ -43,14 +25,13 @@
#define LNW_EXT_TIMER_PGOFFSET 0x800
/* APBT clock speed range from PCLK to fabric base, 25-100MHz */
-#define APBT_MAX_FREQ 50
-#define APBT_MIN_FREQ 1
+#define APBT_MAX_FREQ 50000000
+#define APBT_MIN_FREQ 1000000
#define APBT_MMAP_SIZE 1024
#define APBT_DEV_USED 1
extern void apbt_time_init(void);
-extern struct clock_event_device *global_clock_event;
extern unsigned long apbt_quick_calibrate(void);
extern int arch_setup_apbt_irqs(int irq, int trigger, int mask, int cpu);
extern void apbt_setup_secondary_clock(void);
diff --git a/arch/x86/include/asm/asm.h b/arch/x86/include/asm/asm.h
index b3ed1e1460ff..9412d6558c88 100644
--- a/arch/x86/include/asm/asm.h
+++ b/arch/x86/include/asm/asm.h
@@ -3,9 +3,11 @@
#ifdef __ASSEMBLY__
# define __ASM_FORM(x) x
+# define __ASM_FORM_COMMA(x) x,
# define __ASM_EX_SEC .section __ex_table, "a"
#else
# define __ASM_FORM(x) " " #x " "
+# define __ASM_FORM_COMMA(x) " " #x ","
# define __ASM_EX_SEC " .section __ex_table,\"a\"\n"
#endif
@@ -15,7 +17,8 @@
# define __ASM_SEL(a,b) __ASM_FORM(b)
#endif
-#define __ASM_SIZE(inst) __ASM_SEL(inst##l, inst##q)
+#define __ASM_SIZE(inst, ...) __ASM_SEL(inst##l##__VA_ARGS__, \
+ inst##q##__VA_ARGS__)
#define __ASM_REG(reg) __ASM_SEL(e##reg, r##reg)
#define _ASM_PTR __ASM_SEL(.long, .quad)
diff --git a/arch/x86/include/asm/calling.h b/arch/x86/include/asm/calling.h
index 30af5a832163..a9e3a740f697 100644
--- a/arch/x86/include/asm/calling.h
+++ b/arch/x86/include/asm/calling.h
@@ -46,6 +46,7 @@ For 32-bit we have the following conventions - kernel is built with
*/
+#include "dwarf2.h"
/*
* 64-bit system call stack frame layout defines and helpers, for
@@ -84,72 +85,57 @@ For 32-bit we have the following conventions - kernel is built with
#define ARGOFFSET R11
#define SWFRAME ORIG_RAX
- .macro SAVE_ARGS addskip=0, norcx=0, nor891011=0
+ .macro SAVE_ARGS addskip=0, save_rcx=1, save_r891011=1
subq $9*8+\addskip, %rsp
CFI_ADJUST_CFA_OFFSET 9*8+\addskip
- movq %rdi, 8*8(%rsp)
- CFI_REL_OFFSET rdi, 8*8
- movq %rsi, 7*8(%rsp)
- CFI_REL_OFFSET rsi, 7*8
- movq %rdx, 6*8(%rsp)
- CFI_REL_OFFSET rdx, 6*8
- .if \norcx
- .else
- movq %rcx, 5*8(%rsp)
- CFI_REL_OFFSET rcx, 5*8
+ movq_cfi rdi, 8*8
+ movq_cfi rsi, 7*8
+ movq_cfi rdx, 6*8
+
+ .if \save_rcx
+ movq_cfi rcx, 5*8
.endif
- movq %rax, 4*8(%rsp)
- CFI_REL_OFFSET rax, 4*8
- .if \nor891011
- .else
- movq %r8, 3*8(%rsp)
- CFI_REL_OFFSET r8, 3*8
- movq %r9, 2*8(%rsp)
- CFI_REL_OFFSET r9, 2*8
- movq %r10, 1*8(%rsp)
- CFI_REL_OFFSET r10, 1*8
- movq %r11, (%rsp)
- CFI_REL_OFFSET r11, 0*8
+
+ movq_cfi rax, 4*8
+
+ .if \save_r891011
+ movq_cfi r8, 3*8
+ movq_cfi r9, 2*8
+ movq_cfi r10, 1*8
+ movq_cfi r11, 0*8
.endif
+
.endm
#define ARG_SKIP (9*8)
- .macro RESTORE_ARGS skiprax=0, addskip=0, skiprcx=0, skipr11=0, \
- skipr8910=0, skiprdx=0
- .if \skipr11
- .else
- movq (%rsp), %r11
- CFI_RESTORE r11
+ .macro RESTORE_ARGS rstor_rax=1, addskip=0, rstor_rcx=1, rstor_r11=1, \
+ rstor_r8910=1, rstor_rdx=1
+ .if \rstor_r11
+ movq_cfi_restore 0*8, r11
.endif
- .if \skipr8910
- .else
- movq 1*8(%rsp), %r10
- CFI_RESTORE r10
- movq 2*8(%rsp), %r9
- CFI_RESTORE r9
- movq 3*8(%rsp), %r8
- CFI_RESTORE r8
+
+ .if \rstor_r8910
+ movq_cfi_restore 1*8, r10
+ movq_cfi_restore 2*8, r9
+ movq_cfi_restore 3*8, r8
.endif
- .if \skiprax
- .else
- movq 4*8(%rsp), %rax
- CFI_RESTORE rax
+
+ .if \rstor_rax
+ movq_cfi_restore 4*8, rax
.endif
- .if \skiprcx
- .else
- movq 5*8(%rsp), %rcx
- CFI_RESTORE rcx
+
+ .if \rstor_rcx
+ movq_cfi_restore 5*8, rcx
.endif
- .if \skiprdx
- .else
- movq 6*8(%rsp), %rdx
- CFI_RESTORE rdx
+
+ .if \rstor_rdx
+ movq_cfi_restore 6*8, rdx
.endif
- movq 7*8(%rsp), %rsi
- CFI_RESTORE rsi
- movq 8*8(%rsp), %rdi
- CFI_RESTORE rdi
+
+ movq_cfi_restore 7*8, rsi
+ movq_cfi_restore 8*8, rdi
+
.if ARG_SKIP+\addskip > 0
addq $ARG_SKIP+\addskip, %rsp
CFI_ADJUST_CFA_OFFSET -(ARG_SKIP+\addskip)
@@ -176,33 +162,21 @@ For 32-bit we have the following conventions - kernel is built with
.macro SAVE_REST
subq $REST_SKIP, %rsp
CFI_ADJUST_CFA_OFFSET REST_SKIP
- movq %rbx, 5*8(%rsp)
- CFI_REL_OFFSET rbx, 5*8
- movq %rbp, 4*8(%rsp)
- CFI_REL_OFFSET rbp, 4*8
- movq %r12, 3*8(%rsp)
- CFI_REL_OFFSET r12, 3*8
- movq %r13, 2*8(%rsp)
- CFI_REL_OFFSET r13, 2*8
- movq %r14, 1*8(%rsp)
- CFI_REL_OFFSET r14, 1*8
- movq %r15, (%rsp)
- CFI_REL_OFFSET r15, 0*8
+ movq_cfi rbx, 5*8
+ movq_cfi rbp, 4*8
+ movq_cfi r12, 3*8
+ movq_cfi r13, 2*8
+ movq_cfi r14, 1*8
+ movq_cfi r15, 0*8
.endm
.macro RESTORE_REST
- movq (%rsp), %r15
- CFI_RESTORE r15
- movq 1*8(%rsp), %r14
- CFI_RESTORE r14
- movq 2*8(%rsp), %r13
- CFI_RESTORE r13
- movq 3*8(%rsp), %r12
- CFI_RESTORE r12
- movq 4*8(%rsp), %rbp
- CFI_RESTORE rbp
- movq 5*8(%rsp), %rbx
- CFI_RESTORE rbx
+ movq_cfi_restore 0*8, r15
+ movq_cfi_restore 1*8, r14
+ movq_cfi_restore 2*8, r13
+ movq_cfi_restore 3*8, r12
+ movq_cfi_restore 4*8, rbp
+ movq_cfi_restore 5*8, rbx
addq $REST_SKIP, %rsp
CFI_ADJUST_CFA_OFFSET -(REST_SKIP)
.endm
@@ -214,7 +188,7 @@ For 32-bit we have the following conventions - kernel is built with
.macro RESTORE_ALL addskip=0
RESTORE_REST
- RESTORE_ARGS 0, \addskip
+ RESTORE_ARGS 1, \addskip
.endm
.macro icebp
diff --git a/arch/x86/include/asm/clocksource.h b/arch/x86/include/asm/clocksource.h
new file mode 100644
index 000000000000..0bdbbb3b9ce7
--- /dev/null
+++ b/arch/x86/include/asm/clocksource.h
@@ -0,0 +1,18 @@
+/* x86-specific clocksource additions */
+
+#ifndef _ASM_X86_CLOCKSOURCE_H
+#define _ASM_X86_CLOCKSOURCE_H
+
+#ifdef CONFIG_X86_64
+
+#define VCLOCK_NONE 0 /* No vDSO clock available. */
+#define VCLOCK_TSC 1 /* vDSO should use vread_tsc. */
+#define VCLOCK_HPET 2 /* vDSO should use vread_hpet. */
+
+struct arch_clocksource_data {
+ int vclock_mode;
+};
+
+#endif /* CONFIG_X86_64 */
+
+#endif /* _ASM_X86_CLOCKSOURCE_H */
diff --git a/arch/x86/include/asm/cmpxchg_32.h b/arch/x86/include/asm/cmpxchg_32.h
index 284a6e8f7ce1..3deb7250624c 100644
--- a/arch/x86/include/asm/cmpxchg_32.h
+++ b/arch/x86/include/asm/cmpxchg_32.h
@@ -280,4 +280,52 @@ static inline unsigned long cmpxchg_386(volatile void *ptr, unsigned long old,
#endif
+#define cmpxchg8b(ptr, o1, o2, n1, n2) \
+({ \
+ char __ret; \
+ __typeof__(o2) __dummy; \
+ __typeof__(*(ptr)) __old1 = (o1); \
+ __typeof__(o2) __old2 = (o2); \
+ __typeof__(*(ptr)) __new1 = (n1); \
+ __typeof__(o2) __new2 = (n2); \
+ asm volatile(LOCK_PREFIX "cmpxchg8b %2; setz %1" \
+ : "=d"(__dummy), "=a" (__ret), "+m" (*ptr)\
+ : "a" (__old1), "d"(__old2), \
+ "b" (__new1), "c" (__new2) \
+ : "memory"); \
+ __ret; })
+
+
+#define cmpxchg8b_local(ptr, o1, o2, n1, n2) \
+({ \
+ char __ret; \
+ __typeof__(o2) __dummy; \
+ __typeof__(*(ptr)) __old1 = (o1); \
+ __typeof__(o2) __old2 = (o2); \
+ __typeof__(*(ptr)) __new1 = (n1); \
+ __typeof__(o2) __new2 = (n2); \
+ asm volatile("cmpxchg8b %2; setz %1" \
+ : "=d"(__dummy), "=a"(__ret), "+m" (*ptr)\
+ : "a" (__old), "d"(__old2), \
+ "b" (__new1), "c" (__new2), \
+ : "memory"); \
+ __ret; })
+
+
+#define cmpxchg_double(ptr, o1, o2, n1, n2) \
+({ \
+ BUILD_BUG_ON(sizeof(*(ptr)) != 4); \
+ VM_BUG_ON((unsigned long)(ptr) % 8); \
+ cmpxchg8b((ptr), (o1), (o2), (n1), (n2)); \
+})
+
+#define cmpxchg_double_local(ptr, o1, o2, n1, n2) \
+({ \
+ BUILD_BUG_ON(sizeof(*(ptr)) != 4); \
+ VM_BUG_ON((unsigned long)(ptr) % 8); \
+ cmpxchg16b_local((ptr), (o1), (o2), (n1), (n2)); \
+})
+
+#define system_has_cmpxchg_double() cpu_has_cx8
+
#endif /* _ASM_X86_CMPXCHG_32_H */
diff --git a/arch/x86/include/asm/cmpxchg_64.h b/arch/x86/include/asm/cmpxchg_64.h
index 423ae58aa020..7cf5c0a24434 100644
--- a/arch/x86/include/asm/cmpxchg_64.h
+++ b/arch/x86/include/asm/cmpxchg_64.h
@@ -151,4 +151,49 @@ extern void __cmpxchg_wrong_size(void);
cmpxchg_local((ptr), (o), (n)); \
})
+#define cmpxchg16b(ptr, o1, o2, n1, n2) \
+({ \
+ char __ret; \
+ __typeof__(o2) __junk; \
+ __typeof__(*(ptr)) __old1 = (o1); \
+ __typeof__(o2) __old2 = (o2); \
+ __typeof__(*(ptr)) __new1 = (n1); \
+ __typeof__(o2) __new2 = (n2); \
+ asm volatile(LOCK_PREFIX "cmpxchg16b %2;setz %1" \
+ : "=d"(__junk), "=a"(__ret), "+m" (*ptr) \
+ : "b"(__new1), "c"(__new2), \
+ "a"(__old1), "d"(__old2)); \
+ __ret; })
+
+
+#define cmpxchg16b_local(ptr, o1, o2, n1, n2) \
+({ \
+ char __ret; \
+ __typeof__(o2) __junk; \
+ __typeof__(*(ptr)) __old1 = (o1); \
+ __typeof__(o2) __old2 = (o2); \
+ __typeof__(*(ptr)) __new1 = (n1); \
+ __typeof__(o2) __new2 = (n2); \
+ asm volatile("cmpxchg16b %2;setz %1" \
+ : "=d"(__junk), "=a"(__ret), "+m" (*ptr) \
+ : "b"(__new1), "c"(__new2), \
+ "a"(__old1), "d"(__old2)); \
+ __ret; })
+
+#define cmpxchg_double(ptr, o1, o2, n1, n2) \
+({ \
+ BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
+ VM_BUG_ON((unsigned long)(ptr) % 16); \
+ cmpxchg16b((ptr), (o1), (o2), (n1), (n2)); \
+})
+
+#define cmpxchg_double_local(ptr, o1, o2, n1, n2) \
+({ \
+ BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
+ VM_BUG_ON((unsigned long)(ptr) % 16); \
+ cmpxchg16b_local((ptr), (o1), (o2), (n1), (n2)); \
+})
+
+#define system_has_cmpxchg_double() cpu_has_cx16
+
#endif /* _ASM_X86_CMPXCHG_64_H */
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
index 71cc3800712c..4258aac99a6e 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -288,6 +288,8 @@ extern const char * const x86_power_flags[32];
#define cpu_has_hypervisor boot_cpu_has(X86_FEATURE_HYPERVISOR)
#define cpu_has_pclmulqdq boot_cpu_has(X86_FEATURE_PCLMULQDQ)
#define cpu_has_perfctr_core boot_cpu_has(X86_FEATURE_PERFCTR_CORE)
+#define cpu_has_cx8 boot_cpu_has(X86_FEATURE_CX8)
+#define cpu_has_cx16 boot_cpu_has(X86_FEATURE_CX16)
#if defined(CONFIG_X86_INVLPG) || defined(CONFIG_X86_64)
# define cpu_has_invlpg 1
@@ -331,8 +333,8 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
"2:\n"
".section .altinstructions,\"a\"\n"
_ASM_ALIGN "\n"
- _ASM_PTR "1b\n"
- _ASM_PTR "0\n" /* no replacement */
+ " .long 1b - .\n"
+ " .long 0\n" /* no replacement */
" .word %P0\n" /* feature bit */
" .byte 2b - 1b\n" /* source len */
" .byte 0\n" /* replacement len */
@@ -349,8 +351,8 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
"2:\n"
".section .altinstructions,\"a\"\n"
_ASM_ALIGN "\n"
- _ASM_PTR "1b\n"
- _ASM_PTR "3f\n"
+ " .long 1b - .\n"
+ " .long 3f - .\n"
" .word %P1\n" /* feature bit */
" .byte 2b - 1b\n" /* source len */
" .byte 4f - 3f\n" /* replacement len */
diff --git a/arch/x86/include/asm/delay.h b/arch/x86/include/asm/delay.h
index 409a649204aa..9b3b4f2754c7 100644
--- a/arch/x86/include/asm/delay.h
+++ b/arch/x86/include/asm/delay.h
@@ -1,30 +1,7 @@
#ifndef _ASM_X86_DELAY_H
#define _ASM_X86_DELAY_H
-/*
- * Copyright (C) 1993 Linus Torvalds
- *
- * Delay routines calling functions in arch/x86/lib/delay.c
- */
-
-/* Undefined functions to get compile-time errors */
-extern void __bad_udelay(void);
-extern void __bad_ndelay(void);
-
-extern void __udelay(unsigned long usecs);
-extern void __ndelay(unsigned long nsecs);
-extern void __const_udelay(unsigned long xloops);
-extern void __delay(unsigned long loops);
-
-/* 0x10c7 is 2**32 / 1000000 (rounded up) */
-#define udelay(n) (__builtin_constant_p(n) ? \
- ((n) > 20000 ? __bad_udelay() : __const_udelay((n) * 0x10c7ul)) : \
- __udelay(n))
-
-/* 0x5 is 2**32 / 1000000000 (rounded up) */
-#define ndelay(n) (__builtin_constant_p(n) ? \
- ((n) > 20000 ? __bad_ndelay() : __const_udelay((n) * 5ul)) : \
- __ndelay(n))
+#include <asm-generic/delay.h>
void use_tsc_delay(void);
diff --git a/arch/x86/include/asm/entry_arch.h b/arch/x86/include/asm/entry_arch.h
index 1cd6d26a0a8d..0baa628e330c 100644
--- a/arch/x86/include/asm/entry_arch.h
+++ b/arch/x86/include/asm/entry_arch.h
@@ -53,8 +53,4 @@ BUILD_INTERRUPT(thermal_interrupt,THERMAL_APIC_VECTOR)
BUILD_INTERRUPT(threshold_interrupt,THRESHOLD_APIC_VECTOR)
#endif
-#ifdef CONFIG_X86_MCE
-BUILD_INTERRUPT(mce_self_interrupt,MCE_SELF_VECTOR)
-#endif
-
#endif
diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h
index 4729b2b63117..460c74e4852c 100644
--- a/arch/x86/include/asm/fixmap.h
+++ b/arch/x86/include/asm/fixmap.h
@@ -78,6 +78,7 @@ enum fixed_addresses {
VSYSCALL_LAST_PAGE,
VSYSCALL_FIRST_PAGE = VSYSCALL_LAST_PAGE
+ ((VSYSCALL_END-VSYSCALL_START) >> PAGE_SHIFT) - 1,
+ VVAR_PAGE,
VSYSCALL_HPET,
#endif
FIX_DBGP_BASE,
diff --git a/arch/x86/include/asm/frame.h b/arch/x86/include/asm/frame.h
index 2c6fc9e62812..3b629f47eb65 100644
--- a/arch/x86/include/asm/frame.h
+++ b/arch/x86/include/asm/frame.h
@@ -1,5 +1,6 @@
#ifdef __ASSEMBLY__
+#include <asm/asm.h>
#include <asm/dwarf2.h>
/* The annotation hides the frame from the unwinder and makes it look
@@ -7,13 +8,13 @@
frame pointer later */
#ifdef CONFIG_FRAME_POINTER
.macro FRAME
- pushl_cfi %ebp
- CFI_REL_OFFSET ebp,0
- movl %esp,%ebp
+ __ASM_SIZE(push,_cfi) %__ASM_REG(bp)
+ CFI_REL_OFFSET __ASM_REG(bp), 0
+ __ASM_SIZE(mov) %__ASM_REG(sp), %__ASM_REG(bp)
.endm
.macro ENDFRAME
- popl_cfi %ebp
- CFI_RESTORE ebp
+ __ASM_SIZE(pop,_cfi) %__ASM_REG(bp)
+ CFI_RESTORE __ASM_REG(bp)
.endm
#else
.macro FRAME
diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
index bb9efe8706e2..13f5504c76c0 100644
--- a/arch/x86/include/asm/hw_irq.h
+++ b/arch/x86/include/asm/hw_irq.h
@@ -34,7 +34,6 @@ extern void irq_work_interrupt(void);
extern void spurious_interrupt(void);
extern void thermal_interrupt(void);
extern void reschedule_interrupt(void);
-extern void mce_self_interrupt(void);
extern void invalidate_interrupt(void);
extern void invalidate_interrupt0(void);
diff --git a/arch/x86/include/asm/i8253.h b/arch/x86/include/asm/i8253.h
deleted file mode 100644
index 65aaa91d5850..000000000000
--- a/arch/x86/include/asm/i8253.h
+++ /dev/null
@@ -1,20 +0,0 @@
-#ifndef _ASM_X86_I8253_H
-#define _ASM_X86_I8253_H
-
-/* i8253A PIT registers */
-#define PIT_MODE 0x43
-#define PIT_CH0 0x40
-#define PIT_CH2 0x42
-
-#define PIT_LATCH LATCH
-
-extern raw_spinlock_t i8253_lock;
-
-extern struct clock_event_device *global_clock_event;
-
-extern void setup_pit_timer(void);
-
-#define inb_pit inb_p
-#define outb_pit outb_p
-
-#endif /* _ASM_X86_I8253_H */
diff --git a/arch/x86/include/asm/irq_vectors.h b/arch/x86/include/asm/irq_vectors.h
index 6e976ee3b3ef..f9a320984a10 100644
--- a/arch/x86/include/asm/irq_vectors.h
+++ b/arch/x86/include/asm/irq_vectors.h
@@ -17,7 +17,8 @@
* Vectors 0 ... 31 : system traps and exceptions - hardcoded events
* Vectors 32 ... 127 : device interrupts
* Vector 128 : legacy int80 syscall interface
- * Vectors 129 ... INVALIDATE_TLB_VECTOR_START-1 : device interrupts
+ * Vector 204 : legacy x86_64 vsyscall emulation
+ * Vectors 129 ... INVALIDATE_TLB_VECTOR_START-1 except 204 : device interrupts
* Vectors INVALIDATE_TLB_VECTOR_START ... 255 : special interrupts
*
* 64-bit x86 has per CPU IDT tables, 32-bit has one shared IDT table.
@@ -50,6 +51,9 @@
#ifdef CONFIG_X86_32
# define SYSCALL_VECTOR 0x80
#endif
+#ifdef CONFIG_X86_64
+# define VSYSCALL_EMU_VECTOR 0xcc
+#endif
/*
* Vectors 0x30-0x3f are used for ISA interrupts.
@@ -109,11 +113,6 @@
#define UV_BAU_MESSAGE 0xf5
-/*
- * Self IPI vector for machine checks
- */
-#define MCE_SELF_VECTOR 0xf4
-
/* Xen vector callback to receive events in a HVM domain */
#define XEN_HVM_EVTCHN_CALLBACK 0xf3
diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
index 5745ce8bf108..bba3cf88e624 100644
--- a/arch/x86/include/asm/irqflags.h
+++ b/arch/x86/include/asm/irqflags.h
@@ -60,23 +60,24 @@ static inline void native_halt(void)
#include <asm/paravirt.h>
#else
#ifndef __ASSEMBLY__
+#include <linux/types.h>
-static inline unsigned long arch_local_save_flags(void)
+static inline notrace unsigned long arch_local_save_flags(void)
{
return native_save_fl();
}
-static inline void arch_local_irq_restore(unsigned long flags)
+static inline notrace void arch_local_irq_restore(unsigned long flags)
{
native_restore_fl(flags);
}
-static inline void arch_local_irq_disable(void)
+static inline notrace void arch_local_irq_disable(void)
{
native_irq_disable();
}
-static inline void arch_local_irq_enable(void)
+static inline notrace void arch_local_irq_enable(void)
{
native_irq_enable();
}
@@ -102,7 +103,7 @@ static inline void halt(void)
/*
* For spinlocks, etc:
*/
-static inline unsigned long arch_local_irq_save(void)
+static inline notrace unsigned long arch_local_irq_save(void)
{
unsigned long flags = arch_local_save_flags();
arch_local_irq_disable();
diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h
index 0049211959c0..6040d115ef51 100644
--- a/arch/x86/include/asm/kvm_emulate.h
+++ b/arch/x86/include/asm/kvm_emulate.h
@@ -229,7 +229,26 @@ struct read_cache {
unsigned long end;
};
-struct decode_cache {
+struct x86_emulate_ctxt {
+ struct x86_emulate_ops *ops;
+
+ /* Register state before/after emulation. */
+ unsigned long eflags;
+ unsigned long eip; /* eip before instruction emulation */
+ /* Emulated execution mode, represented by an X86EMUL_MODE value. */
+ int mode;
+
+ /* interruptibility state, as a result of execution of STI or MOV SS */
+ int interruptibility;
+
+ bool guest_mode; /* guest running a nested guest */
+ bool perm_ok; /* do not check permissions if true */
+ bool only_vendor_specific_insn;
+
+ bool have_exception;
+ struct x86_exception exception;
+
+ /* decode cache */
u8 twobyte;
u8 b;
u8 intercept;
@@ -246,8 +265,6 @@ struct decode_cache {
unsigned int d;
int (*execute)(struct x86_emulate_ctxt *ctxt);
int (*check_perm)(struct x86_emulate_ctxt *ctxt);
- unsigned long regs[NR_VCPU_REGS];
- unsigned long eip;
/* modrm */
u8 modrm;
u8 modrm_mod;
@@ -255,34 +272,14 @@ struct decode_cache {
u8 modrm_rm;
u8 modrm_seg;
bool rip_relative;
+ unsigned long _eip;
+ /* Fields above regs are cleared together. */
+ unsigned long regs[NR_VCPU_REGS];
struct fetch_cache fetch;
struct read_cache io_read;
struct read_cache mem_read;
};
-struct x86_emulate_ctxt {
- struct x86_emulate_ops *ops;
-
- /* Register state before/after emulation. */
- unsigned long eflags;
- unsigned long eip; /* eip before instruction emulation */
- /* Emulated execution mode, represented by an X86EMUL_MODE value. */
- int mode;
-
- /* interruptibility state, as a result of execution of STI or MOV SS */
- int interruptibility;
-
- bool guest_mode; /* guest running a nested guest */
- bool perm_ok; /* do not check permissions if true */
- bool only_vendor_specific_insn;
-
- bool have_exception;
- struct x86_exception exception;
-
- /* decode cache */
- struct decode_cache decode;
-};
-
/* Repeat String Operation Prefix */
#define REPE_PREFIX 0xf3
#define REPNE_PREFIX 0xf2
@@ -373,6 +370,5 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt);
int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
u16 tss_selector, int reason,
bool has_error_code, u32 error_code);
-int emulate_int_real(struct x86_emulate_ctxt *ctxt,
- struct x86_emulate_ops *ops, int irq);
+int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq);
#endif /* _ASM_X86_KVM_X86_EMULATE_H */
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index d2ac8e2ee897..dd51c83aa5de 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -48,7 +48,7 @@
(~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\
| X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE \
| X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR \
- | X86_CR4_OSXSAVE \
+ | X86_CR4_OSXSAVE | X86_CR4_SMEP | X86_CR4_RDWRGSFS \
| X86_CR4_OSXMMEXCPT | X86_CR4_VMXE))
#define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR)
@@ -205,6 +205,7 @@ union kvm_mmu_page_role {
unsigned invalid:1;
unsigned nxe:1;
unsigned cr0_wp:1;
+ unsigned smep_andnot_wp:1;
};
};
@@ -227,15 +228,17 @@ struct kvm_mmu_page {
* in this shadow page.
*/
DECLARE_BITMAP(slot_bitmap, KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS);
- bool multimapped; /* More than one parent_pte? */
bool unsync;
int root_count; /* Currently serving as active root */
unsigned int unsync_children;
- union {
- u64 *parent_pte; /* !multimapped */
- struct hlist_head parent_ptes; /* multimapped, kvm_pte_chain */
- };
+ unsigned long parent_ptes; /* Reverse mapping for parent_pte */
DECLARE_BITMAP(unsync_child_bitmap, 512);
+
+#ifdef CONFIG_X86_32
+ int clear_spte_count;
+#endif
+
+ struct rcu_head rcu;
};
struct kvm_pv_mmu_op_buffer {
@@ -269,8 +272,6 @@ struct kvm_mmu {
gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva, u32 access,
struct x86_exception *exception);
gpa_t (*translate_gpa)(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access);
- void (*prefetch_page)(struct kvm_vcpu *vcpu,
- struct kvm_mmu_page *page);
int (*sync_page)(struct kvm_vcpu *vcpu,
struct kvm_mmu_page *sp);
void (*invlpg)(struct kvm_vcpu *vcpu, gva_t gva);
@@ -346,8 +347,7 @@ struct kvm_vcpu_arch {
* put it here to avoid allocation */
struct kvm_pv_mmu_op_buffer mmu_op_buffer;
- struct kvm_mmu_memory_cache mmu_pte_chain_cache;
- struct kvm_mmu_memory_cache mmu_rmap_desc_cache;
+ struct kvm_mmu_memory_cache mmu_pte_list_desc_cache;
struct kvm_mmu_memory_cache mmu_page_cache;
struct kvm_mmu_memory_cache mmu_page_header_cache;
@@ -393,6 +393,15 @@ struct kvm_vcpu_arch {
unsigned int hw_tsc_khz;
unsigned int time_offset;
struct page *time_page;
+
+ struct {
+ u64 msr_val;
+ u64 last_steal;
+ u64 accum_steal;
+ struct gfn_to_hva_cache stime;
+ struct kvm_steal_time steal;
+ } st;
+
u64 last_guest_tsc;
u64 last_kernel_ns;
u64 last_tsc_nsec;
@@ -419,6 +428,11 @@ struct kvm_vcpu_arch {
u64 mcg_ctl;
u64 *mce_banks;
+ /* Cache MMIO info */
+ u64 mmio_gva;
+ unsigned access;
+ gfn_t mmio_gfn;
+
/* used for guest single stepping over the given code position */
unsigned long singlestep_rip;
@@ -441,6 +455,7 @@ struct kvm_arch {
unsigned int n_used_mmu_pages;
unsigned int n_requested_mmu_pages;
unsigned int n_max_mmu_pages;
+ unsigned int indirect_shadow_pages;
atomic_t invlpg_counter;
struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
/*
@@ -477,6 +492,8 @@ struct kvm_arch {
u64 hv_guest_os_id;
u64 hv_hypercall;
+ atomic_t reader_counter;
+
#ifdef CONFIG_KVM_MMU_AUDIT
int audit_point;
#endif
@@ -559,7 +576,7 @@ struct kvm_x86_ops {
void (*decache_cr4_guest_bits)(struct kvm_vcpu *vcpu);
void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0);
void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3);
- void (*set_cr4)(struct kvm_vcpu *vcpu, unsigned long cr4);
+ int (*set_cr4)(struct kvm_vcpu *vcpu, unsigned long cr4);
void (*set_efer)(struct kvm_vcpu *vcpu, u64 efer);
void (*get_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
void (*set_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
@@ -636,7 +653,6 @@ void kvm_mmu_module_exit(void);
void kvm_mmu_destroy(struct kvm_vcpu *vcpu);
int kvm_mmu_create(struct kvm_vcpu *vcpu);
int kvm_mmu_setup(struct kvm_vcpu *vcpu);
-void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte);
void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
u64 dirty_mask, u64 nx_mask, u64 x_mask);
@@ -830,11 +846,12 @@ enum {
asmlinkage void kvm_spurious_fault(void);
extern bool kvm_rebooting;
-#define __kvm_handle_fault_on_reboot(insn) \
+#define ____kvm_handle_fault_on_reboot(insn, cleanup_insn) \
"666: " insn "\n\t" \
"668: \n\t" \
".pushsection .fixup, \"ax\" \n" \
"667: \n\t" \
+ cleanup_insn "\n\t" \
"cmpb $0, kvm_rebooting \n\t" \
"jne 668b \n\t" \
__ASM_SIZE(push) " $666b \n\t" \
@@ -844,6 +861,9 @@ extern bool kvm_rebooting;
_ASM_PTR " 666b, 667b \n\t" \
".popsection"
+#define __kvm_handle_fault_on_reboot(insn) \
+ ____kvm_handle_fault_on_reboot(insn, "")
+
#define KVM_ARCH_WANT_MMU_NOTIFIER
int kvm_unmap_hva(struct kvm *kvm, unsigned long hva);
int kvm_age_hva(struct kvm *kvm, unsigned long hva);
diff --git a/arch/x86/include/asm/kvm_para.h b/arch/x86/include/asm/kvm_para.h
index a427bf77a93d..734c3767cfac 100644
--- a/arch/x86/include/asm/kvm_para.h
+++ b/arch/x86/include/asm/kvm_para.h
@@ -21,6 +21,7 @@
*/
#define KVM_FEATURE_CLOCKSOURCE2 3
#define KVM_FEATURE_ASYNC_PF 4
+#define KVM_FEATURE_STEAL_TIME 5
/* The last 8 bits are used to indicate how to interpret the flags field
* in pvclock structure. If no bits are set, all flags are ignored.
@@ -30,10 +31,23 @@
#define MSR_KVM_WALL_CLOCK 0x11
#define MSR_KVM_SYSTEM_TIME 0x12
+#define KVM_MSR_ENABLED 1
/* Custom MSRs falls in the range 0x4b564d00-0x4b564dff */
#define MSR_KVM_WALL_CLOCK_NEW 0x4b564d00
#define MSR_KVM_SYSTEM_TIME_NEW 0x4b564d01
#define MSR_KVM_ASYNC_PF_EN 0x4b564d02
+#define MSR_KVM_STEAL_TIME 0x4b564d03
+
+struct kvm_steal_time {
+ __u64 steal;
+ __u32 version;
+ __u32 flags;
+ __u32 pad[12];
+};
+
+#define KVM_STEAL_ALIGNMENT_BITS 5
+#define KVM_STEAL_VALID_BITS ((-1ULL << (KVM_STEAL_ALIGNMENT_BITS + 1)))
+#define KVM_STEAL_RESERVED_MASK (((1 << KVM_STEAL_ALIGNMENT_BITS) - 1 ) << 1)
#define KVM_MAX_MMU_OP_BATCH 32
@@ -178,6 +192,7 @@ void __init kvm_guest_init(void);
void kvm_async_pf_task_wait(u32 token);
void kvm_async_pf_task_wake(u32 token);
u32 kvm_read_and_reset_pf_reason(void);
+extern void kvm_disable_steal_time(void);
#else
#define kvm_guest_init() do { } while (0)
#define kvm_async_pf_task_wait(T) do {} while(0)
@@ -186,6 +201,11 @@ static inline u32 kvm_read_and_reset_pf_reason(void)
{
return 0;
}
+
+static inline void kvm_disable_steal_time(void)
+{
+ return;
+}
#endif
#endif /* __KERNEL__ */
diff --git a/arch/x86/include/asm/lguest_hcall.h b/arch/x86/include/asm/lguest_hcall.h
index b60f2924c413..879fd7d33877 100644
--- a/arch/x86/include/asm/lguest_hcall.h
+++ b/arch/x86/include/asm/lguest_hcall.h
@@ -61,6 +61,7 @@ hcall(unsigned long call,
: "memory");
return call;
}
+/*:*/
/* Can't use our min() macro here: needs to be a constant */
#define LGUEST_IRQS (NR_IRQS < 32 ? NR_IRQS: 32)
diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h
index 021979a6e23f..716b48af7863 100644
--- a/arch/x86/include/asm/mce.h
+++ b/arch/x86/include/asm/mce.h
@@ -8,6 +8,7 @@
* Machine Check support for x86
*/
+/* MCG_CAP register defines */
#define MCG_BANKCNT_MASK 0xff /* Number of Banks */
#define MCG_CTL_P (1ULL<<8) /* MCG_CTL register available */
#define MCG_EXT_P (1ULL<<9) /* Extended registers available */
@@ -17,10 +18,12 @@
#define MCG_EXT_CNT(c) (((c) & MCG_EXT_CNT_MASK) >> MCG_EXT_CNT_SHIFT)
#define MCG_SER_P (1ULL<<24) /* MCA recovery/new status bits */
+/* MCG_STATUS register defines */
#define MCG_STATUS_RIPV (1ULL<<0) /* restart ip valid */
#define MCG_STATUS_EIPV (1ULL<<1) /* ip points to correct instruction */
#define MCG_STATUS_MCIP (1ULL<<2) /* machine check in progress */
+/* MCi_STATUS register defines */
#define MCI_STATUS_VAL (1ULL<<63) /* valid error */
#define MCI_STATUS_OVER (1ULL<<62) /* previous errors lost */
#define MCI_STATUS_UC (1ULL<<61) /* uncorrected error */
@@ -31,12 +34,14 @@
#define MCI_STATUS_S (1ULL<<56) /* Signaled machine check */
#define MCI_STATUS_AR (1ULL<<55) /* Action required */
-/* MISC register defines */
-#define MCM_ADDR_SEGOFF 0 /* segment offset */
-#define MCM_ADDR_LINEAR 1 /* linear address */
-#define MCM_ADDR_PHYS 2 /* physical address */
-#define MCM_ADDR_MEM 3 /* memory address */
-#define MCM_ADDR_GENERIC 7 /* generic */
+/* MCi_MISC register defines */
+#define MCI_MISC_ADDR_LSB(m) ((m) & 0x3f)
+#define MCI_MISC_ADDR_MODE(m) (((m) >> 6) & 7)
+#define MCI_MISC_ADDR_SEGOFF 0 /* segment offset */
+#define MCI_MISC_ADDR_LINEAR 1 /* linear address */
+#define MCI_MISC_ADDR_PHYS 2 /* physical address */
+#define MCI_MISC_ADDR_MEM 3 /* memory address */
+#define MCI_MISC_ADDR_GENERIC 7 /* generic */
/* CTL2 register defines */
#define MCI_CTL2_CMCI_EN (1ULL << 30)
@@ -144,7 +149,7 @@ static inline void enable_p5_mce(void) {}
void mce_setup(struct mce *m);
void mce_log(struct mce *m);
-DECLARE_PER_CPU(struct sys_device, mce_dev);
+DECLARE_PER_CPU(struct sys_device, mce_sysdev);
/*
* Maximum banks number.
diff --git a/arch/x86/include/asm/mmzone_32.h b/arch/x86/include/asm/mmzone_32.h
index ffa037f28d39..55728e121473 100644
--- a/arch/x86/include/asm/mmzone_32.h
+++ b/arch/x86/include/asm/mmzone_32.h
@@ -34,15 +34,15 @@ static inline void resume_map_numa_kva(pgd_t *pgd) {}
* 64Gb / 4096bytes/page = 16777216 pages
*/
#define MAX_NR_PAGES 16777216
-#define MAX_ELEMENTS 1024
-#define PAGES_PER_ELEMENT (MAX_NR_PAGES/MAX_ELEMENTS)
+#define MAX_SECTIONS 1024
+#define PAGES_PER_SECTION (MAX_NR_PAGES/MAX_SECTIONS)
extern s8 physnode_map[];
static inline int pfn_to_nid(unsigned long pfn)
{
#ifdef CONFIG_NUMA
- return((int) physnode_map[(pfn) / PAGES_PER_ELEMENT]);
+ return((int) physnode_map[(pfn) / PAGES_PER_SECTION]);
#else
return 0;
#endif
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 485b4f1f079b..d52609aeeab8 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -259,6 +259,9 @@
#define MSR_IA32_TEMPERATURE_TARGET 0x000001a2
#define MSR_IA32_ENERGY_PERF_BIAS 0x000001b0
+#define ENERGY_PERF_BIAS_PERFORMANCE 0
+#define ENERGY_PERF_BIAS_NORMAL 6
+#define ENERGY_PERF_BIAS_POWERSAVE 15
#define MSR_IA32_PACKAGE_THERM_STATUS 0x000001b1
@@ -438,6 +441,18 @@
#define MSR_IA32_VMX_VMCS_ENUM 0x0000048a
#define MSR_IA32_VMX_PROCBASED_CTLS2 0x0000048b
#define MSR_IA32_VMX_EPT_VPID_CAP 0x0000048c
+#define MSR_IA32_VMX_TRUE_PINBASED_CTLS 0x0000048d
+#define MSR_IA32_VMX_TRUE_PROCBASED_CTLS 0x0000048e
+#define MSR_IA32_VMX_TRUE_EXIT_CTLS 0x0000048f
+#define MSR_IA32_VMX_TRUE_ENTRY_CTLS 0x00000490
+
+/* VMX_BASIC bits and bitmasks */
+#define VMX_BASIC_VMCS_SIZE_SHIFT 32
+#define VMX_BASIC_64 0x0001000000000000LLU
+#define VMX_BASIC_MEM_TYPE_SHIFT 50
+#define VMX_BASIC_MEM_TYPE_MASK 0x003c000000000000LLU
+#define VMX_BASIC_MEM_TYPE_WB 6LLU
+#define VMX_BASIC_INOUT 0x0040000000000000LLU
/* AMD-V MSRs */
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index ebbc4d8ab170..a7d2db9a74fb 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -230,6 +230,15 @@ static inline unsigned long long paravirt_sched_clock(void)
return PVOP_CALL0(unsigned long long, pv_time_ops.sched_clock);
}
+struct jump_label_key;
+extern struct jump_label_key paravirt_steal_enabled;
+extern struct jump_label_key paravirt_steal_rq_enabled;
+
+static inline u64 paravirt_steal_clock(int cpu)
+{
+ return PVOP_CALL1(u64, pv_time_ops.steal_clock, cpu);
+}
+
static inline unsigned long long paravirt_read_pmc(int counter)
{
return PVOP_CALL1(u64, pv_cpu_ops.read_pmc, counter);
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
index 82885099c869..2c7652163111 100644
--- a/arch/x86/include/asm/paravirt_types.h
+++ b/arch/x86/include/asm/paravirt_types.h
@@ -89,6 +89,7 @@ struct pv_lazy_ops {
struct pv_time_ops {
unsigned long long (*sched_clock)(void);
+ unsigned long long (*steal_clock)(int cpu);
unsigned long (*get_tsc_khz)(void);
};
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
index a0a9779084d1..3470c9d0ebba 100644
--- a/arch/x86/include/asm/percpu.h
+++ b/arch/x86/include/asm/percpu.h
@@ -388,12 +388,9 @@ do { \
#define __this_cpu_xor_1(pcp, val) percpu_to_op("xor", (pcp), val)
#define __this_cpu_xor_2(pcp, val) percpu_to_op("xor", (pcp), val)
#define __this_cpu_xor_4(pcp, val) percpu_to_op("xor", (pcp), val)
-/*
- * Generic fallback operations for __this_cpu_xchg_[1-4] are okay and much
- * faster than an xchg with forced lock semantics.
- */
-#define __this_cpu_xchg_8(pcp, nval) percpu_xchg_op(pcp, nval)
-#define __this_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
+#define __this_cpu_xchg_1(pcp, val) percpu_xchg_op(pcp, val)
+#define __this_cpu_xchg_2(pcp, val) percpu_xchg_op(pcp, val)
+#define __this_cpu_xchg_4(pcp, val) percpu_xchg_op(pcp, val)
#define this_cpu_read_1(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
#define this_cpu_read_2(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
@@ -485,6 +482,8 @@ do { \
#define __this_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val)
#define __this_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val)
#define __this_cpu_add_return_8(pcp, val) percpu_add_return_op(pcp, val)
+#define __this_cpu_xchg_8(pcp, nval) percpu_xchg_op(pcp, nval)
+#define __this_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
#define this_cpu_read_8(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
#define this_cpu_write_8(pcp, val) percpu_to_op("mov", (pcp), val)
diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
index d9d4dae305f6..094fb30817ab 100644
--- a/arch/x86/include/asm/perf_event.h
+++ b/arch/x86/include/asm/perf_event.h
@@ -152,6 +152,11 @@ extern unsigned long perf_misc_flags(struct pt_regs *regs);
(regs)->bp = caller_frame_pointer(); \
(regs)->cs = __KERNEL_CS; \
regs->flags = 0; \
+ asm volatile( \
+ _ASM_MOV "%%"_ASM_SP ", %0\n" \
+ : "=m" ((regs)->sp) \
+ :: "memory" \
+ ); \
}
#else
diff --git a/arch/x86/include/asm/perf_event_p4.h b/arch/x86/include/asm/perf_event_p4.h
index 56fd9e3abbda..4f7e67e2345e 100644
--- a/arch/x86/include/asm/perf_event_p4.h
+++ b/arch/x86/include/asm/perf_event_p4.h
@@ -102,6 +102,14 @@
#define P4_CONFIG_HT (1ULL << P4_CONFIG_HT_SHIFT)
/*
+ * If an event has alias it should be marked
+ * with a special bit. (Don't forget to check
+ * P4_PEBS_CONFIG_MASK and related bits on
+ * modification.)
+ */
+#define P4_CONFIG_ALIASABLE (1 << 9)
+
+/*
* The bits we allow to pass for RAW events
*/
#define P4_CONFIG_MASK_ESCR \
@@ -123,6 +131,31 @@
(p4_config_pack_escr(P4_CONFIG_MASK_ESCR)) | \
(p4_config_pack_cccr(P4_CONFIG_MASK_CCCR))
+/*
+ * In case of event aliasing we need to preserve some
+ * caller bits, otherwise the mapping won't be complete.
+ */
+#define P4_CONFIG_EVENT_ALIAS_MASK \
+ (p4_config_pack_escr(P4_CONFIG_MASK_ESCR) | \
+ p4_config_pack_cccr(P4_CCCR_EDGE | \
+ P4_CCCR_THRESHOLD_MASK | \
+ P4_CCCR_COMPLEMENT | \
+ P4_CCCR_COMPARE))
+
+#define P4_CONFIG_EVENT_ALIAS_IMMUTABLE_BITS \
+ ((P4_CONFIG_HT) | \
+ p4_config_pack_escr(P4_ESCR_T0_OS | \
+ P4_ESCR_T0_USR | \
+ P4_ESCR_T1_OS | \
+ P4_ESCR_T1_USR) | \
+ p4_config_pack_cccr(P4_CCCR_OVF | \
+ P4_CCCR_CASCADE | \
+ P4_CCCR_FORCE_OVF | \
+ P4_CCCR_THREAD_ANY | \
+ P4_CCCR_OVF_PMI_T0 | \
+ P4_CCCR_OVF_PMI_T1 | \
+ P4_CONFIG_ALIASABLE))
+
static inline bool p4_is_event_cascaded(u64 config)
{
u32 cccr = p4_config_unpack_cccr(config);
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
index d56187c6b838..013286a10c2c 100644
--- a/arch/x86/include/asm/pgtable_types.h
+++ b/arch/x86/include/asm/pgtable_types.h
@@ -107,7 +107,8 @@
#define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
#define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
-#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_VSYSCALL | _PAGE_PCD | _PAGE_PWT)
+#define __PAGE_KERNEL_VVAR (__PAGE_KERNEL_RO | _PAGE_USER)
+#define __PAGE_KERNEL_VVAR_NOCACHE (__PAGE_KERNEL_VVAR | _PAGE_PCD | _PAGE_PWT)
#define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
#define __PAGE_KERNEL_LARGE_NOCACHE (__PAGE_KERNEL | _PAGE_CACHE_UC | _PAGE_PSE)
#define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
@@ -129,7 +130,8 @@
#define PAGE_KERNEL_LARGE_NOCACHE __pgprot(__PAGE_KERNEL_LARGE_NOCACHE)
#define PAGE_KERNEL_LARGE_EXEC __pgprot(__PAGE_KERNEL_LARGE_EXEC)
#define PAGE_KERNEL_VSYSCALL __pgprot(__PAGE_KERNEL_VSYSCALL)
-#define PAGE_KERNEL_VSYSCALL_NOCACHE __pgprot(__PAGE_KERNEL_VSYSCALL_NOCACHE)
+#define PAGE_KERNEL_VVAR __pgprot(__PAGE_KERNEL_VVAR)
+#define PAGE_KERNEL_VVAR_NOCACHE __pgprot(__PAGE_KERNEL_VVAR_NOCACHE)
#define PAGE_KERNEL_IO __pgprot(__PAGE_KERNEL_IO)
#define PAGE_KERNEL_IO_NOCACHE __pgprot(__PAGE_KERNEL_IO_NOCACHE)
diff --git a/arch/x86/include/asm/processor-flags.h b/arch/x86/include/asm/processor-flags.h
index 59ab4dffa377..2dddb317bb39 100644
--- a/arch/x86/include/asm/processor-flags.h
+++ b/arch/x86/include/asm/processor-flags.h
@@ -59,6 +59,7 @@
#define X86_CR4_OSFXSR 0x00000200 /* enable fast FPU save and restore */
#define X86_CR4_OSXMMEXCPT 0x00000400 /* enable unmasked SSE exceptions */
#define X86_CR4_VMXE 0x00002000 /* enable VMX virtualization */
+#define X86_CR4_RDWRGSFS 0x00010000 /* enable RDWRGSFS support */
#define X86_CR4_OSXSAVE 0x00040000 /* enable xsave and xrestore */
#define X86_CR4_SMEP 0x00100000 /* enable SMEP support */
diff --git a/arch/x86/include/asm/prom.h b/arch/x86/include/asm/prom.h
index 971e0b46446e..df1287019e6d 100644
--- a/arch/x86/include/asm/prom.h
+++ b/arch/x86/include/asm/prom.h
@@ -30,17 +30,6 @@ extern void add_dtb(u64 data);
extern void x86_add_irq_domains(void);
void __cpuinit x86_of_pci_init(void);
void x86_dtb_init(void);
-
-static inline struct device_node *pci_device_to_OF_node(struct pci_dev *pdev)
-{
- return pdev ? pdev->dev.of_node : NULL;
-}
-
-static inline struct device_node *pci_bus_to_OF_node(struct pci_bus *bus)
-{
- return pci_device_to_OF_node(bus->self);
-}
-
#else
static inline void add_dtb(u64 data) { }
static inline void x86_add_irq_domains(void) { }
diff --git a/arch/x86/include/asm/rwlock.h b/arch/x86/include/asm/rwlock.h
index 6a8c0d645108..a5370a03d90c 100644
--- a/arch/x86/include/asm/rwlock.h
+++ b/arch/x86/include/asm/rwlock.h
@@ -1,7 +1,48 @@
#ifndef _ASM_X86_RWLOCK_H
#define _ASM_X86_RWLOCK_H
-#define RW_LOCK_BIAS 0x01000000
+#include <asm/asm.h>
+
+#if CONFIG_NR_CPUS <= 2048
+
+#ifndef __ASSEMBLY__
+typedef union {
+ s32 lock;
+ s32 write;
+} arch_rwlock_t;
+#endif
+
+#define RW_LOCK_BIAS 0x00100000
+#define READ_LOCK_SIZE(insn) __ASM_FORM(insn##l)
+#define READ_LOCK_ATOMIC(n) atomic_##n
+#define WRITE_LOCK_ADD(n) __ASM_FORM_COMMA(addl n)
+#define WRITE_LOCK_SUB(n) __ASM_FORM_COMMA(subl n)
+#define WRITE_LOCK_CMP RW_LOCK_BIAS
+
+#else /* CONFIG_NR_CPUS > 2048 */
+
+#include <linux/const.h>
+
+#ifndef __ASSEMBLY__
+typedef union {
+ s64 lock;
+ struct {
+ u32 read;
+ s32 write;
+ };
+} arch_rwlock_t;
+#endif
+
+#define RW_LOCK_BIAS (_AC(1,L) << 32)
+#define READ_LOCK_SIZE(insn) __ASM_FORM(insn##q)
+#define READ_LOCK_ATOMIC(n) atomic64_##n
+#define WRITE_LOCK_ADD(n) __ASM_FORM(incl)
+#define WRITE_LOCK_SUB(n) __ASM_FORM(decl)
+#define WRITE_LOCK_CMP 1
+
+#endif /* CONFIG_NR_CPUS */
+
+#define __ARCH_RW_LOCK_UNLOCKED { RW_LOCK_BIAS }
/* Actual code is in asm/spinlock.h or in arch/x86/lib/rwlock.S */
diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
index cd84f7208f76..5e641715c3fe 100644
--- a/arch/x86/include/asm/segment.h
+++ b/arch/x86/include/asm/segment.h
@@ -162,7 +162,7 @@
#define GDT_ENTRY_DEFAULT_USER32_CS 4
#define GDT_ENTRY_DEFAULT_USER_DS 5
#define GDT_ENTRY_DEFAULT_USER_CS 6
-#define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS * 8 + 3)
+#define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS*8+3)
#define __USER32_DS __USER_DS
#define GDT_ENTRY_TSS 8 /* needs two entries */
diff --git a/arch/x86/include/asm/smpboot_hooks.h b/arch/x86/include/asm/smpboot_hooks.h
index 725b77831993..49adfd7bb4a4 100644
--- a/arch/x86/include/asm/smpboot_hooks.h
+++ b/arch/x86/include/asm/smpboot_hooks.h
@@ -10,7 +10,11 @@ static inline void smpboot_clear_io_apic_irqs(void)
static inline void smpboot_setup_warm_reset_vector(unsigned long start_eip)
{
+ unsigned long flags;
+
+ spin_lock_irqsave(&rtc_lock, flags);
CMOS_WRITE(0xa, 0xf);
+ spin_unlock_irqrestore(&rtc_lock, flags);
local_flush_tlb();
pr_debug("1.\n");
*((volatile unsigned short *)phys_to_virt(apic->trampoline_phys_high)) =
@@ -23,6 +27,8 @@ static inline void smpboot_setup_warm_reset_vector(unsigned long start_eip)
static inline void smpboot_restore_warm_reset_vector(void)
{
+ unsigned long flags;
+
/*
* Install writable page 0 entry to set BIOS data area.
*/
@@ -32,7 +38,9 @@ static inline void smpboot_restore_warm_reset_vector(void)
* Paranoid: Set warm reset code and vector here back
* to default values.
*/
+ spin_lock_irqsave(&rtc_lock, flags);
CMOS_WRITE(0, 0xf);
+ spin_unlock_irqrestore(&rtc_lock, flags);
*((volatile u32 *)phys_to_virt(apic->trampoline_phys_low)) = 0;
}
diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
index 3089f70c0c52..e9e51f710e6c 100644
--- a/arch/x86/include/asm/spinlock.h
+++ b/arch/x86/include/asm/spinlock.h
@@ -2,7 +2,6 @@
#define _ASM_X86_SPINLOCK_H
#include <asm/atomic.h>
-#include <asm/rwlock.h>
#include <asm/page.h>
#include <asm/processor.h>
#include <linux/compiler.h>
@@ -234,7 +233,7 @@ static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
*/
static inline int arch_read_can_lock(arch_rwlock_t *lock)
{
- return (int)(lock)->lock > 0;
+ return lock->lock > 0;
}
/**
@@ -243,12 +242,12 @@ static inline int arch_read_can_lock(arch_rwlock_t *lock)
*/
static inline int arch_write_can_lock(arch_rwlock_t *lock)
{
- return (lock)->lock == RW_LOCK_BIAS;
+ return lock->write == WRITE_LOCK_CMP;
}
static inline void arch_read_lock(arch_rwlock_t *rw)
{
- asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t"
+ asm volatile(LOCK_PREFIX READ_LOCK_SIZE(dec) " (%0)\n\t"
"jns 1f\n"
"call __read_lock_failed\n\t"
"1:\n"
@@ -257,47 +256,55 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
static inline void arch_write_lock(arch_rwlock_t *rw)
{
- asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t"
+ asm volatile(LOCK_PREFIX WRITE_LOCK_SUB(%1) "(%0)\n\t"
"jz 1f\n"
"call __write_lock_failed\n\t"
"1:\n"
- ::LOCK_PTR_REG (rw), "i" (RW_LOCK_BIAS) : "memory");
+ ::LOCK_PTR_REG (&rw->write), "i" (RW_LOCK_BIAS)
+ : "memory");
}
static inline int arch_read_trylock(arch_rwlock_t *lock)
{
- atomic_t *count = (atomic_t *)lock;
+ READ_LOCK_ATOMIC(t) *count = (READ_LOCK_ATOMIC(t) *)lock;
- if (atomic_dec_return(count) >= 0)
+ if (READ_LOCK_ATOMIC(dec_return)(count) >= 0)
return 1;
- atomic_inc(count);
+ READ_LOCK_ATOMIC(inc)(count);
return 0;
}
static inline int arch_write_trylock(arch_rwlock_t *lock)
{
- atomic_t *count = (atomic_t *)lock;
+ atomic_t *count = (atomic_t *)&lock->write;
- if (atomic_sub_and_test(RW_LOCK_BIAS, count))
+ if (atomic_sub_and_test(WRITE_LOCK_CMP, count))
return 1;
- atomic_add(RW_LOCK_BIAS, count);
+ atomic_add(WRITE_LOCK_CMP, count);
return 0;
}
static inline void arch_read_unlock(arch_rwlock_t *rw)
{
- asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory");
+ asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0"
+ :"+m" (rw->lock) : : "memory");
}
static inline void arch_write_unlock(arch_rwlock_t *rw)
{
- asm volatile(LOCK_PREFIX "addl %1, %0"
- : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory");
+ asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0"
+ : "+m" (rw->write) : "i" (RW_LOCK_BIAS) : "memory");
}
#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
+#undef READ_LOCK_SIZE
+#undef READ_LOCK_ATOMIC
+#undef WRITE_LOCK_ADD
+#undef WRITE_LOCK_SUB
+#undef WRITE_LOCK_CMP
+
#define arch_spin_relax(lock) cpu_relax()
#define arch_read_relax(lock) cpu_relax()
#define arch_write_relax(lock) cpu_relax()
diff --git a/arch/x86/include/asm/spinlock_types.h b/arch/x86/include/asm/spinlock_types.h
index dcb48b2edc11..7c7a486fcb68 100644
--- a/arch/x86/include/asm/spinlock_types.h
+++ b/arch/x86/include/asm/spinlock_types.h
@@ -11,10 +11,6 @@ typedef struct arch_spinlock {
#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
-typedef struct {
- unsigned int lock;
-} arch_rwlock_t;
-
-#define __ARCH_RW_LOCK_UNLOCKED { RW_LOCK_BIAS }
+#include <asm/rwlock.h>
#endif /* _ASM_X86_SPINLOCK_TYPES_H */
diff --git a/arch/x86/include/asm/time.h b/arch/x86/include/asm/time.h
index 7bdec4e9b739..92b8aec06970 100644
--- a/arch/x86/include/asm/time.h
+++ b/arch/x86/include/asm/time.h
@@ -1,10 +1,12 @@
#ifndef _ASM_X86_TIME_H
#define _ASM_X86_TIME_H
-extern void hpet_time_init(void);
-
+#include <linux/clocksource.h>
#include <asm/mc146818rtc.h>
+extern void hpet_time_init(void);
extern void time_init(void);
+extern struct clock_event_device *global_clock_event;
+
#endif /* _ASM_X86_TIME_H */
diff --git a/arch/x86/include/asm/traps.h b/arch/x86/include/asm/traps.h
index 0310da67307f..2bae0a513b40 100644
--- a/arch/x86/include/asm/traps.h
+++ b/arch/x86/include/asm/traps.h
@@ -1,6 +1,8 @@
#ifndef _ASM_X86_TRAPS_H
#define _ASM_X86_TRAPS_H
+#include <linux/kprobes.h>
+
#include <asm/debugreg.h>
#include <asm/siginfo.h> /* TRAP_TRACE, ... */
@@ -38,6 +40,7 @@ asmlinkage void alignment_check(void);
asmlinkage void machine_check(void);
#endif /* CONFIG_X86_MCE */
asmlinkage void simd_coprocessor_error(void);
+asmlinkage void emulate_vsyscall(void);
dotraplinkage void do_divide_error(struct pt_regs *, long);
dotraplinkage void do_debug(struct pt_regs *, long);
@@ -64,6 +67,7 @@ dotraplinkage void do_alignment_check(struct pt_regs *, long);
dotraplinkage void do_machine_check(struct pt_regs *, long);
#endif
dotraplinkage void do_simd_coprocessor_error(struct pt_regs *, long);
+dotraplinkage void do_emulate_vsyscall(struct pt_regs *, long);
#ifdef CONFIG_X86_32
dotraplinkage void do_iret_error(struct pt_regs *, long);
#endif
diff --git a/arch/x86/include/asm/tsc.h b/arch/x86/include/asm/tsc.h
index 9db5583b6d38..83e2efd181e2 100644
--- a/arch/x86/include/asm/tsc.h
+++ b/arch/x86/include/asm/tsc.h
@@ -51,10 +51,6 @@ extern int unsynchronized_tsc(void);
extern int check_tsc_unstable(void);
extern unsigned long native_calibrate_tsc(void);
-#ifdef CONFIG_X86_64
-extern cycles_t vread_tsc(void);
-#endif
-
/*
* Boot-time check whether the TSCs are synchronized across
* all CPUs/cores:
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index 99ddd148a760..36361bf6fdd1 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -555,6 +555,9 @@ struct __large_struct { unsigned long buf[100]; };
#endif /* CONFIG_X86_WP_WORKS_OK */
+extern unsigned long
+copy_from_user_nmi(void *to, const void __user *from, unsigned long n);
+
/*
* movsl can be slow when source and dest are not both 8-byte aligned
*/
diff --git a/arch/x86/include/asm/uv/uv_bau.h b/arch/x86/include/asm/uv/uv_bau.h
index a291c40efd43..37d369859c8e 100644
--- a/arch/x86/include/asm/uv/uv_bau.h
+++ b/arch/x86/include/asm/uv/uv_bau.h
@@ -67,7 +67,7 @@
* we're using 655us, similar to UV1: 65 units of 10us
*/
#define UV1_INTD_SOFT_ACK_TIMEOUT_PERIOD (9UL)
-#define UV2_INTD_SOFT_ACK_TIMEOUT_PERIOD (65*10UL)
+#define UV2_INTD_SOFT_ACK_TIMEOUT_PERIOD (15UL)
#define UV_INTD_SOFT_ACK_TIMEOUT_PERIOD (is_uv1_hub() ? \
UV1_INTD_SOFT_ACK_TIMEOUT_PERIOD : \
@@ -106,12 +106,20 @@
#define DS_SOURCE_TIMEOUT 3
/*
* bits put together from HRP_LB_BAU_SB_ACTIVATION_STATUS_0/1/2
- * values 1 and 5 will not occur
+ * values 1 and 3 will not occur
+ * Decoded meaning ERROR BUSY AUX ERR
+ * ------------------------------- ---- ----- -------
+ * IDLE 0 0 0
+ * BUSY (active) 0 1 0
+ * SW Ack Timeout (destination) 1 0 0
+ * SW Ack INTD rejected (strong NACK) 1 0 1
+ * Source Side Time Out Detected 1 1 0
+ * Destination Side PUT Failed 1 1 1
*/
#define UV2H_DESC_IDLE 0
-#define UV2H_DESC_DEST_TIMEOUT 2
-#define UV2H_DESC_DEST_STRONG_NACK 3
-#define UV2H_DESC_BUSY 4
+#define UV2H_DESC_BUSY 2
+#define UV2H_DESC_DEST_TIMEOUT 4
+#define UV2H_DESC_DEST_STRONG_NACK 5
#define UV2H_DESC_SOURCE_TIMEOUT 6
#define UV2H_DESC_DEST_PUT_ERR 7
@@ -183,7 +191,7 @@
* 'base_dest_nasid' field of the header corresponds to the
* destination nodeID associated with that specified bit.
*/
-struct bau_targ_hubmask {
+struct pnmask {
unsigned long bits[BITS_TO_LONGS(UV_DISTRIBUTION_SIZE)];
};
@@ -314,7 +322,7 @@ struct bau_msg_header {
* Should be 64 bytes
*/
struct bau_desc {
- struct bau_targ_hubmask distribution;
+ struct pnmask distribution;
/*
* message template, consisting of header and payload:
*/
@@ -488,6 +496,7 @@ struct bau_control {
struct bau_control *uvhub_master;
struct bau_control *socket_master;
struct ptc_stats *statp;
+ cpumask_t *cpumask;
unsigned long timeout_interval;
unsigned long set_bau_on_time;
atomic_t active_descriptor_count;
@@ -526,90 +535,90 @@ struct bau_control {
struct hub_and_pnode *thp;
};
-static unsigned long read_mmr_uv2_status(void)
+static inline unsigned long read_mmr_uv2_status(void)
{
return read_lmmr(UV2H_LB_BAU_SB_ACTIVATION_STATUS_2);
}
-static void write_mmr_data_broadcast(int pnode, unsigned long mmr_image)
+static inline void write_mmr_data_broadcast(int pnode, unsigned long mmr_image)
{
write_gmmr(pnode, UVH_BAU_DATA_BROADCAST, mmr_image);
}
-static void write_mmr_descriptor_base(int pnode, unsigned long mmr_image)
+static inline void write_mmr_descriptor_base(int pnode, unsigned long mmr_image)
{
write_gmmr(pnode, UVH_LB_BAU_SB_DESCRIPTOR_BASE, mmr_image);
}
-static void write_mmr_activation(unsigned long index)
+static inline void write_mmr_activation(unsigned long index)
{
write_lmmr(UVH_LB_BAU_SB_ACTIVATION_CONTROL, index);
}
-static void write_gmmr_activation(int pnode, unsigned long mmr_image)
+static inline void write_gmmr_activation(int pnode, unsigned long mmr_image)
{
write_gmmr(pnode, UVH_LB_BAU_SB_ACTIVATION_CONTROL, mmr_image);
}
-static void write_mmr_payload_first(int pnode, unsigned long mmr_image)
+static inline void write_mmr_payload_first(int pnode, unsigned long mmr_image)
{
write_gmmr(pnode, UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST, mmr_image);
}
-static void write_mmr_payload_tail(int pnode, unsigned long mmr_image)
+static inline void write_mmr_payload_tail(int pnode, unsigned long mmr_image)
{
write_gmmr(pnode, UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL, mmr_image);
}
-static void write_mmr_payload_last(int pnode, unsigned long mmr_image)
+static inline void write_mmr_payload_last(int pnode, unsigned long mmr_image)
{
write_gmmr(pnode, UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST, mmr_image);
}
-static void write_mmr_misc_control(int pnode, unsigned long mmr_image)
+static inline void write_mmr_misc_control(int pnode, unsigned long mmr_image)
{
write_gmmr(pnode, UVH_LB_BAU_MISC_CONTROL, mmr_image);
}
-static unsigned long read_mmr_misc_control(int pnode)
+static inline unsigned long read_mmr_misc_control(int pnode)
{
return read_gmmr(pnode, UVH_LB_BAU_MISC_CONTROL);
}
-static void write_mmr_sw_ack(unsigned long mr)
+static inline void write_mmr_sw_ack(unsigned long mr)
{
uv_write_local_mmr(UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS, mr);
}
-static unsigned long read_mmr_sw_ack(void)
+static inline unsigned long read_mmr_sw_ack(void)
{
return read_lmmr(UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE);
}
-static unsigned long read_gmmr_sw_ack(int pnode)
+static inline unsigned long read_gmmr_sw_ack(int pnode)
{
return read_gmmr(pnode, UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE);
}
-static void write_mmr_data_config(int pnode, unsigned long mr)
+static inline void write_mmr_data_config(int pnode, unsigned long mr)
{
uv_write_global_mmr64(pnode, UVH_BAU_DATA_CONFIG, mr);
}
-static inline int bau_uvhub_isset(int uvhub, struct bau_targ_hubmask *dstp)
+static inline int bau_uvhub_isset(int uvhub, struct pnmask *dstp)
{
return constant_test_bit(uvhub, &dstp->bits[0]);
}
-static inline void bau_uvhub_set(int pnode, struct bau_targ_hubmask *dstp)
+static inline void bau_uvhub_set(int pnode, struct pnmask *dstp)
{
__set_bit(pnode, &dstp->bits[0]);
}
-static inline void bau_uvhubs_clear(struct bau_targ_hubmask *dstp,
+static inline void bau_uvhubs_clear(struct pnmask *dstp,
int nbits)
{
bitmap_zero(&dstp->bits[0], nbits);
}
-static inline int bau_uvhub_weight(struct bau_targ_hubmask *dstp)
+static inline int bau_uvhub_weight(struct pnmask *dstp)
{
return bitmap_weight((unsigned long *)&dstp->bits[0],
UV_DISTRIBUTION_SIZE);
diff --git a/arch/x86/include/asm/uv/uv_mmrs.h b/arch/x86/include/asm/uv/uv_mmrs.h
index 4be52c863448..10474fb1185d 100644
--- a/arch/x86/include/asm/uv/uv_mmrs.h
+++ b/arch/x86/include/asm/uv/uv_mmrs.h
@@ -61,1689 +61,2016 @@
/* Compat: if this #define is present, UV headers support UV2 */
#define UV2_HUB_IS_SUPPORTED 1
-/* KABI compat: if this #define is present, KABI hacks are present */
-#define UV2_HUB_KABI_HACKS 1
-
/* ========================================================================= */
/* UVH_BAU_DATA_BROADCAST */
/* ========================================================================= */
-#define UVH_BAU_DATA_BROADCAST 0x61688UL
-#define UVH_BAU_DATA_BROADCAST_32 0x440
+#define UVH_BAU_DATA_BROADCAST 0x61688UL
+#define UVH_BAU_DATA_BROADCAST_32 0x440
-#define UVH_BAU_DATA_BROADCAST_ENABLE_SHFT 0
-#define UVH_BAU_DATA_BROADCAST_ENABLE_MASK 0x0000000000000001UL
+#define UVH_BAU_DATA_BROADCAST_ENABLE_SHFT 0
+#define UVH_BAU_DATA_BROADCAST_ENABLE_MASK 0x0000000000000001UL
union uvh_bau_data_broadcast_u {
- unsigned long v;
- struct uvh_bau_data_broadcast_s {
- unsigned long enable : 1; /* RW */
- unsigned long rsvd_1_63: 63; /* */
- } s;
+ unsigned long v;
+ struct uvh_bau_data_broadcast_s {
+ unsigned long enable:1; /* RW */
+ unsigned long rsvd_1_63:63;
+ } s;
};
/* ========================================================================= */
/* UVH_BAU_DATA_CONFIG */
/* ========================================================================= */
-#define UVH_BAU_DATA_CONFIG 0x61680UL
-#define UVH_BAU_DATA_CONFIG_32 0x438
-
-#define UVH_BAU_DATA_CONFIG_VECTOR_SHFT 0
-#define UVH_BAU_DATA_CONFIG_VECTOR_MASK 0x00000000000000ffUL
-#define UVH_BAU_DATA_CONFIG_DM_SHFT 8
-#define UVH_BAU_DATA_CONFIG_DM_MASK 0x0000000000000700UL
-#define UVH_BAU_DATA_CONFIG_DESTMODE_SHFT 11
-#define UVH_BAU_DATA_CONFIG_DESTMODE_MASK 0x0000000000000800UL
-#define UVH_BAU_DATA_CONFIG_STATUS_SHFT 12
-#define UVH_BAU_DATA_CONFIG_STATUS_MASK 0x0000000000001000UL
-#define UVH_BAU_DATA_CONFIG_P_SHFT 13
-#define UVH_BAU_DATA_CONFIG_P_MASK 0x0000000000002000UL
-#define UVH_BAU_DATA_CONFIG_T_SHFT 15
-#define UVH_BAU_DATA_CONFIG_T_MASK 0x0000000000008000UL
-#define UVH_BAU_DATA_CONFIG_M_SHFT 16
-#define UVH_BAU_DATA_CONFIG_M_MASK 0x0000000000010000UL
-#define UVH_BAU_DATA_CONFIG_APIC_ID_SHFT 32
-#define UVH_BAU_DATA_CONFIG_APIC_ID_MASK 0xffffffff00000000UL
+#define UVH_BAU_DATA_CONFIG 0x61680UL
+#define UVH_BAU_DATA_CONFIG_32 0x438
+
+#define UVH_BAU_DATA_CONFIG_VECTOR_SHFT 0
+#define UVH_BAU_DATA_CONFIG_DM_SHFT 8
+#define UVH_BAU_DATA_CONFIG_DESTMODE_SHFT 11
+#define UVH_BAU_DATA_CONFIG_STATUS_SHFT 12
+#define UVH_BAU_DATA_CONFIG_P_SHFT 13
+#define UVH_BAU_DATA_CONFIG_T_SHFT 15
+#define UVH_BAU_DATA_CONFIG_M_SHFT 16
+#define UVH_BAU_DATA_CONFIG_APIC_ID_SHFT 32
+#define UVH_BAU_DATA_CONFIG_VECTOR_MASK 0x00000000000000ffUL
+#define UVH_BAU_DATA_CONFIG_DM_MASK 0x0000000000000700UL
+#define UVH_BAU_DATA_CONFIG_DESTMODE_MASK 0x0000000000000800UL
+#define UVH_BAU_DATA_CONFIG_STATUS_MASK 0x0000000000001000UL
+#define UVH_BAU_DATA_CONFIG_P_MASK 0x0000000000002000UL
+#define UVH_BAU_DATA_CONFIG_T_MASK 0x0000000000008000UL
+#define UVH_BAU_DATA_CONFIG_M_MASK 0x0000000000010000UL
+#define UVH_BAU_DATA_CONFIG_APIC_ID_MASK 0xffffffff00000000UL
union uvh_bau_data_config_u {
- unsigned long v;
- struct uvh_bau_data_config_s {
- unsigned long vector_ : 8; /* RW */
- unsigned long dm : 3; /* RW */
- unsigned long destmode : 1; /* RW */
- unsigned long status : 1; /* RO */
- unsigned long p : 1; /* RO */
- unsigned long rsvd_14 : 1; /* */
- unsigned long t : 1; /* RO */
- unsigned long m : 1; /* RW */
- unsigned long rsvd_17_31: 15; /* */
- unsigned long apic_id : 32; /* RW */
- } s;
+ unsigned long v;
+ struct uvh_bau_data_config_s {
+ unsigned long vector_:8; /* RW */
+ unsigned long dm:3; /* RW */
+ unsigned long destmode:1; /* RW */
+ unsigned long status:1; /* RO */
+ unsigned long p:1; /* RO */
+ unsigned long rsvd_14:1;
+ unsigned long t:1; /* RO */
+ unsigned long m:1; /* RW */
+ unsigned long rsvd_17_31:15;
+ unsigned long apic_id:32; /* RW */
+ } s;
};
/* ========================================================================= */
/* UVH_EVENT_OCCURRED0 */
/* ========================================================================= */
-#define UVH_EVENT_OCCURRED0 0x70000UL
-#define UVH_EVENT_OCCURRED0_32 0x5e8
-
-#define UV1H_EVENT_OCCURRED0_LB_HCERR_SHFT 0
-#define UV1H_EVENT_OCCURRED0_LB_HCERR_MASK 0x0000000000000001UL
-#define UV1H_EVENT_OCCURRED0_GR0_HCERR_SHFT 1
-#define UV1H_EVENT_OCCURRED0_GR0_HCERR_MASK 0x0000000000000002UL
-#define UV1H_EVENT_OCCURRED0_GR1_HCERR_SHFT 2
-#define UV1H_EVENT_OCCURRED0_GR1_HCERR_MASK 0x0000000000000004UL
-#define UV1H_EVENT_OCCURRED0_LH_HCERR_SHFT 3
-#define UV1H_EVENT_OCCURRED0_LH_HCERR_MASK 0x0000000000000008UL
-#define UV1H_EVENT_OCCURRED0_RH_HCERR_SHFT 4
-#define UV1H_EVENT_OCCURRED0_RH_HCERR_MASK 0x0000000000000010UL
-#define UV1H_EVENT_OCCURRED0_XN_HCERR_SHFT 5
-#define UV1H_EVENT_OCCURRED0_XN_HCERR_MASK 0x0000000000000020UL
-#define UV1H_EVENT_OCCURRED0_SI_HCERR_SHFT 6
-#define UV1H_EVENT_OCCURRED0_SI_HCERR_MASK 0x0000000000000040UL
-#define UV1H_EVENT_OCCURRED0_LB_AOERR0_SHFT 7
-#define UV1H_EVENT_OCCURRED0_LB_AOERR0_MASK 0x0000000000000080UL
-#define UV1H_EVENT_OCCURRED0_GR0_AOERR0_SHFT 8
-#define UV1H_EVENT_OCCURRED0_GR0_AOERR0_MASK 0x0000000000000100UL
-#define UV1H_EVENT_OCCURRED0_GR1_AOERR0_SHFT 9
-#define UV1H_EVENT_OCCURRED0_GR1_AOERR0_MASK 0x0000000000000200UL
-#define UV1H_EVENT_OCCURRED0_LH_AOERR0_SHFT 10
-#define UV1H_EVENT_OCCURRED0_LH_AOERR0_MASK 0x0000000000000400UL
-#define UV1H_EVENT_OCCURRED0_RH_AOERR0_SHFT 11
-#define UV1H_EVENT_OCCURRED0_RH_AOERR0_MASK 0x0000000000000800UL
-#define UV1H_EVENT_OCCURRED0_XN_AOERR0_SHFT 12
-#define UV1H_EVENT_OCCURRED0_XN_AOERR0_MASK 0x0000000000001000UL
-#define UV1H_EVENT_OCCURRED0_SI_AOERR0_SHFT 13
-#define UV1H_EVENT_OCCURRED0_SI_AOERR0_MASK 0x0000000000002000UL
-#define UV1H_EVENT_OCCURRED0_LB_AOERR1_SHFT 14
-#define UV1H_EVENT_OCCURRED0_LB_AOERR1_MASK 0x0000000000004000UL
-#define UV1H_EVENT_OCCURRED0_GR0_AOERR1_SHFT 15
-#define UV1H_EVENT_OCCURRED0_GR0_AOERR1_MASK 0x0000000000008000UL
-#define UV1H_EVENT_OCCURRED0_GR1_AOERR1_SHFT 16
-#define UV1H_EVENT_OCCURRED0_GR1_AOERR1_MASK 0x0000000000010000UL
-#define UV1H_EVENT_OCCURRED0_LH_AOERR1_SHFT 17
-#define UV1H_EVENT_OCCURRED0_LH_AOERR1_MASK 0x0000000000020000UL
-#define UV1H_EVENT_OCCURRED0_RH_AOERR1_SHFT 18
-#define UV1H_EVENT_OCCURRED0_RH_AOERR1_MASK 0x0000000000040000UL
-#define UV1H_EVENT_OCCURRED0_XN_AOERR1_SHFT 19
-#define UV1H_EVENT_OCCURRED0_XN_AOERR1_MASK 0x0000000000080000UL
-#define UV1H_EVENT_OCCURRED0_SI_AOERR1_SHFT 20
-#define UV1H_EVENT_OCCURRED0_SI_AOERR1_MASK 0x0000000000100000UL
-#define UV1H_EVENT_OCCURRED0_RH_VPI_INT_SHFT 21
-#define UV1H_EVENT_OCCURRED0_RH_VPI_INT_MASK 0x0000000000200000UL
-#define UV1H_EVENT_OCCURRED0_SYSTEM_SHUTDOWN_INT_SHFT 22
-#define UV1H_EVENT_OCCURRED0_SYSTEM_SHUTDOWN_INT_MASK 0x0000000000400000UL
-#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_0_SHFT 23
-#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_0_MASK 0x0000000000800000UL
-#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_1_SHFT 24
-#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_1_MASK 0x0000000001000000UL
-#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_2_SHFT 25
-#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_2_MASK 0x0000000002000000UL
-#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_3_SHFT 26
-#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_3_MASK 0x0000000004000000UL
-#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_4_SHFT 27
-#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_4_MASK 0x0000000008000000UL
-#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_5_SHFT 28
-#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_5_MASK 0x0000000010000000UL
-#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_6_SHFT 29
-#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_6_MASK 0x0000000020000000UL
-#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_7_SHFT 30
-#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_7_MASK 0x0000000040000000UL
-#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_8_SHFT 31
-#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_8_MASK 0x0000000080000000UL
-#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_9_SHFT 32
-#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_9_MASK 0x0000000100000000UL
-#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_10_SHFT 33
-#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_10_MASK 0x0000000200000000UL
-#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_11_SHFT 34
-#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_11_MASK 0x0000000400000000UL
-#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_12_SHFT 35
-#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_12_MASK 0x0000000800000000UL
-#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_13_SHFT 36
-#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_13_MASK 0x0000001000000000UL
-#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_14_SHFT 37
-#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_14_MASK 0x0000002000000000UL
-#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_15_SHFT 38
-#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_15_MASK 0x0000004000000000UL
-#define UV1H_EVENT_OCCURRED0_L1_NMI_INT_SHFT 39
-#define UV1H_EVENT_OCCURRED0_L1_NMI_INT_MASK 0x0000008000000000UL
-#define UV1H_EVENT_OCCURRED0_STOP_CLOCK_SHFT 40
-#define UV1H_EVENT_OCCURRED0_STOP_CLOCK_MASK 0x0000010000000000UL
-#define UV1H_EVENT_OCCURRED0_ASIC_TO_L1_SHFT 41
-#define UV1H_EVENT_OCCURRED0_ASIC_TO_L1_MASK 0x0000020000000000UL
-#define UV1H_EVENT_OCCURRED0_L1_TO_ASIC_SHFT 42
-#define UV1H_EVENT_OCCURRED0_L1_TO_ASIC_MASK 0x0000040000000000UL
-#define UV1H_EVENT_OCCURRED0_LTC_INT_SHFT 43
-#define UV1H_EVENT_OCCURRED0_LTC_INT_MASK 0x0000080000000000UL
-#define UV1H_EVENT_OCCURRED0_LA_SEQ_TRIGGER_SHFT 44
-#define UV1H_EVENT_OCCURRED0_LA_SEQ_TRIGGER_MASK 0x0000100000000000UL
-#define UV1H_EVENT_OCCURRED0_IPI_INT_SHFT 45
-#define UV1H_EVENT_OCCURRED0_IPI_INT_MASK 0x0000200000000000UL
-#define UV1H_EVENT_OCCURRED0_EXTIO_INT0_SHFT 46
-#define UV1H_EVENT_OCCURRED0_EXTIO_INT0_MASK 0x0000400000000000UL
-#define UV1H_EVENT_OCCURRED0_EXTIO_INT1_SHFT 47
-#define UV1H_EVENT_OCCURRED0_EXTIO_INT1_MASK 0x0000800000000000UL
-#define UV1H_EVENT_OCCURRED0_EXTIO_INT2_SHFT 48
-#define UV1H_EVENT_OCCURRED0_EXTIO_INT2_MASK 0x0001000000000000UL
-#define UV1H_EVENT_OCCURRED0_EXTIO_INT3_SHFT 49
-#define UV1H_EVENT_OCCURRED0_EXTIO_INT3_MASK 0x0002000000000000UL
-#define UV1H_EVENT_OCCURRED0_PROFILE_INT_SHFT 50
-#define UV1H_EVENT_OCCURRED0_PROFILE_INT_MASK 0x0004000000000000UL
-#define UV1H_EVENT_OCCURRED0_RTC0_SHFT 51
-#define UV1H_EVENT_OCCURRED0_RTC0_MASK 0x0008000000000000UL
-#define UV1H_EVENT_OCCURRED0_RTC1_SHFT 52
-#define UV1H_EVENT_OCCURRED0_RTC1_MASK 0x0010000000000000UL
-#define UV1H_EVENT_OCCURRED0_RTC2_SHFT 53
-#define UV1H_EVENT_OCCURRED0_RTC2_MASK 0x0020000000000000UL
-#define UV1H_EVENT_OCCURRED0_RTC3_SHFT 54
-#define UV1H_EVENT_OCCURRED0_RTC3_MASK 0x0040000000000000UL
-#define UV1H_EVENT_OCCURRED0_BAU_DATA_SHFT 55
-#define UV1H_EVENT_OCCURRED0_BAU_DATA_MASK 0x0080000000000000UL
-#define UV1H_EVENT_OCCURRED0_POWER_MANAGEMENT_REQ_SHFT 56
-#define UV1H_EVENT_OCCURRED0_POWER_MANAGEMENT_REQ_MASK 0x0100000000000000UL
-
-#define UV2H_EVENT_OCCURRED0_LB_HCERR_SHFT 0
-#define UV2H_EVENT_OCCURRED0_LB_HCERR_MASK 0x0000000000000001UL
-#define UV2H_EVENT_OCCURRED0_QP_HCERR_SHFT 1
-#define UV2H_EVENT_OCCURRED0_QP_HCERR_MASK 0x0000000000000002UL
-#define UV2H_EVENT_OCCURRED0_RH_HCERR_SHFT 2
-#define UV2H_EVENT_OCCURRED0_RH_HCERR_MASK 0x0000000000000004UL
-#define UV2H_EVENT_OCCURRED0_LH0_HCERR_SHFT 3
-#define UV2H_EVENT_OCCURRED0_LH0_HCERR_MASK 0x0000000000000008UL
-#define UV2H_EVENT_OCCURRED0_LH1_HCERR_SHFT 4
-#define UV2H_EVENT_OCCURRED0_LH1_HCERR_MASK 0x0000000000000010UL
-#define UV2H_EVENT_OCCURRED0_GR0_HCERR_SHFT 5
-#define UV2H_EVENT_OCCURRED0_GR0_HCERR_MASK 0x0000000000000020UL
-#define UV2H_EVENT_OCCURRED0_GR1_HCERR_SHFT 6
-#define UV2H_EVENT_OCCURRED0_GR1_HCERR_MASK 0x0000000000000040UL
-#define UV2H_EVENT_OCCURRED0_NI0_HCERR_SHFT 7
-#define UV2H_EVENT_OCCURRED0_NI0_HCERR_MASK 0x0000000000000080UL
-#define UV2H_EVENT_OCCURRED0_NI1_HCERR_SHFT 8
-#define UV2H_EVENT_OCCURRED0_NI1_HCERR_MASK 0x0000000000000100UL
-#define UV2H_EVENT_OCCURRED0_LB_AOERR0_SHFT 9
-#define UV2H_EVENT_OCCURRED0_LB_AOERR0_MASK 0x0000000000000200UL
-#define UV2H_EVENT_OCCURRED0_QP_AOERR0_SHFT 10
-#define UV2H_EVENT_OCCURRED0_QP_AOERR0_MASK 0x0000000000000400UL
-#define UV2H_EVENT_OCCURRED0_RH_AOERR0_SHFT 11
-#define UV2H_EVENT_OCCURRED0_RH_AOERR0_MASK 0x0000000000000800UL
-#define UV2H_EVENT_OCCURRED0_LH0_AOERR0_SHFT 12
-#define UV2H_EVENT_OCCURRED0_LH0_AOERR0_MASK 0x0000000000001000UL
-#define UV2H_EVENT_OCCURRED0_LH1_AOERR0_SHFT 13
-#define UV2H_EVENT_OCCURRED0_LH1_AOERR0_MASK 0x0000000000002000UL
-#define UV2H_EVENT_OCCURRED0_GR0_AOERR0_SHFT 14
-#define UV2H_EVENT_OCCURRED0_GR0_AOERR0_MASK 0x0000000000004000UL
-#define UV2H_EVENT_OCCURRED0_GR1_AOERR0_SHFT 15
-#define UV2H_EVENT_OCCURRED0_GR1_AOERR0_MASK 0x0000000000008000UL
-#define UV2H_EVENT_OCCURRED0_XB_AOERR0_SHFT 16
-#define UV2H_EVENT_OCCURRED0_XB_AOERR0_MASK 0x0000000000010000UL
-#define UV2H_EVENT_OCCURRED0_RT_AOERR0_SHFT 17
-#define UV2H_EVENT_OCCURRED0_RT_AOERR0_MASK 0x0000000000020000UL
-#define UV2H_EVENT_OCCURRED0_NI0_AOERR0_SHFT 18
-#define UV2H_EVENT_OCCURRED0_NI0_AOERR0_MASK 0x0000000000040000UL
-#define UV2H_EVENT_OCCURRED0_NI1_AOERR0_SHFT 19
-#define UV2H_EVENT_OCCURRED0_NI1_AOERR0_MASK 0x0000000000080000UL
-#define UV2H_EVENT_OCCURRED0_LB_AOERR1_SHFT 20
-#define UV2H_EVENT_OCCURRED0_LB_AOERR1_MASK 0x0000000000100000UL
-#define UV2H_EVENT_OCCURRED0_QP_AOERR1_SHFT 21
-#define UV2H_EVENT_OCCURRED0_QP_AOERR1_MASK 0x0000000000200000UL
-#define UV2H_EVENT_OCCURRED0_RH_AOERR1_SHFT 22
-#define UV2H_EVENT_OCCURRED0_RH_AOERR1_MASK 0x0000000000400000UL
-#define UV2H_EVENT_OCCURRED0_LH0_AOERR1_SHFT 23
-#define UV2H_EVENT_OCCURRED0_LH0_AOERR1_MASK 0x0000000000800000UL
-#define UV2H_EVENT_OCCURRED0_LH1_AOERR1_SHFT 24
-#define UV2H_EVENT_OCCURRED0_LH1_AOERR1_MASK 0x0000000001000000UL
-#define UV2H_EVENT_OCCURRED0_GR0_AOERR1_SHFT 25
-#define UV2H_EVENT_OCCURRED0_GR0_AOERR1_MASK 0x0000000002000000UL
-#define UV2H_EVENT_OCCURRED0_GR1_AOERR1_SHFT 26
-#define UV2H_EVENT_OCCURRED0_GR1_AOERR1_MASK 0x0000000004000000UL
-#define UV2H_EVENT_OCCURRED0_XB_AOERR1_SHFT 27
-#define UV2H_EVENT_OCCURRED0_XB_AOERR1_MASK 0x0000000008000000UL
-#define UV2H_EVENT_OCCURRED0_RT_AOERR1_SHFT 28
-#define UV2H_EVENT_OCCURRED0_RT_AOERR1_MASK 0x0000000010000000UL
-#define UV2H_EVENT_OCCURRED0_NI0_AOERR1_SHFT 29
-#define UV2H_EVENT_OCCURRED0_NI0_AOERR1_MASK 0x0000000020000000UL
-#define UV2H_EVENT_OCCURRED0_NI1_AOERR1_SHFT 30
-#define UV2H_EVENT_OCCURRED0_NI1_AOERR1_MASK 0x0000000040000000UL
-#define UV2H_EVENT_OCCURRED0_SYSTEM_SHUTDOWN_INT_SHFT 31
-#define UV2H_EVENT_OCCURRED0_SYSTEM_SHUTDOWN_INT_MASK 0x0000000080000000UL
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_0_SHFT 32
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_0_MASK 0x0000000100000000UL
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_1_SHFT 33
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_1_MASK 0x0000000200000000UL
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_2_SHFT 34
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_2_MASK 0x0000000400000000UL
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_3_SHFT 35
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_3_MASK 0x0000000800000000UL
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_4_SHFT 36
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_4_MASK 0x0000001000000000UL
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_5_SHFT 37
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_5_MASK 0x0000002000000000UL
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_6_SHFT 38
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_6_MASK 0x0000004000000000UL
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_7_SHFT 39
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_7_MASK 0x0000008000000000UL
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_8_SHFT 40
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_8_MASK 0x0000010000000000UL
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_9_SHFT 41
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_9_MASK 0x0000020000000000UL
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_10_SHFT 42
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_10_MASK 0x0000040000000000UL
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_11_SHFT 43
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_11_MASK 0x0000080000000000UL
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_12_SHFT 44
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_12_MASK 0x0000100000000000UL
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_13_SHFT 45
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_13_MASK 0x0000200000000000UL
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_14_SHFT 46
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_14_MASK 0x0000400000000000UL
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_15_SHFT 47
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_15_MASK 0x0000800000000000UL
-#define UV2H_EVENT_OCCURRED0_L1_NMI_INT_SHFT 48
-#define UV2H_EVENT_OCCURRED0_L1_NMI_INT_MASK 0x0001000000000000UL
-#define UV2H_EVENT_OCCURRED0_STOP_CLOCK_SHFT 49
-#define UV2H_EVENT_OCCURRED0_STOP_CLOCK_MASK 0x0002000000000000UL
-#define UV2H_EVENT_OCCURRED0_ASIC_TO_L1_SHFT 50
-#define UV2H_EVENT_OCCURRED0_ASIC_TO_L1_MASK 0x0004000000000000UL
-#define UV2H_EVENT_OCCURRED0_L1_TO_ASIC_SHFT 51
-#define UV2H_EVENT_OCCURRED0_L1_TO_ASIC_MASK 0x0008000000000000UL
-#define UV2H_EVENT_OCCURRED0_LA_SEQ_TRIGGER_SHFT 52
-#define UV2H_EVENT_OCCURRED0_LA_SEQ_TRIGGER_MASK 0x0010000000000000UL
-#define UV2H_EVENT_OCCURRED0_IPI_INT_SHFT 53
-#define UV2H_EVENT_OCCURRED0_IPI_INT_MASK 0x0020000000000000UL
-#define UV2H_EVENT_OCCURRED0_EXTIO_INT0_SHFT 54
-#define UV2H_EVENT_OCCURRED0_EXTIO_INT0_MASK 0x0040000000000000UL
-#define UV2H_EVENT_OCCURRED0_EXTIO_INT1_SHFT 55
-#define UV2H_EVENT_OCCURRED0_EXTIO_INT1_MASK 0x0080000000000000UL
-#define UV2H_EVENT_OCCURRED0_EXTIO_INT2_SHFT 56
-#define UV2H_EVENT_OCCURRED0_EXTIO_INT2_MASK 0x0100000000000000UL
-#define UV2H_EVENT_OCCURRED0_EXTIO_INT3_SHFT 57
-#define UV2H_EVENT_OCCURRED0_EXTIO_INT3_MASK 0x0200000000000000UL
-#define UV2H_EVENT_OCCURRED0_PROFILE_INT_SHFT 58
-#define UV2H_EVENT_OCCURRED0_PROFILE_INT_MASK 0x0400000000000000UL
+#define UVH_EVENT_OCCURRED0 0x70000UL
+#define UVH_EVENT_OCCURRED0_32 0x5e8
+
+#define UV1H_EVENT_OCCURRED0_LB_HCERR_SHFT 0
+#define UV1H_EVENT_OCCURRED0_GR0_HCERR_SHFT 1
+#define UV1H_EVENT_OCCURRED0_GR1_HCERR_SHFT 2
+#define UV1H_EVENT_OCCURRED0_LH_HCERR_SHFT 3
+#define UV1H_EVENT_OCCURRED0_RH_HCERR_SHFT 4
+#define UV1H_EVENT_OCCURRED0_XN_HCERR_SHFT 5
+#define UV1H_EVENT_OCCURRED0_SI_HCERR_SHFT 6
+#define UV1H_EVENT_OCCURRED0_LB_AOERR0_SHFT 7
+#define UV1H_EVENT_OCCURRED0_GR0_AOERR0_SHFT 8
+#define UV1H_EVENT_OCCURRED0_GR1_AOERR0_SHFT 9
+#define UV1H_EVENT_OCCURRED0_LH_AOERR0_SHFT 10
+#define UV1H_EVENT_OCCURRED0_RH_AOERR0_SHFT 11
+#define UV1H_EVENT_OCCURRED0_XN_AOERR0_SHFT 12
+#define UV1H_EVENT_OCCURRED0_SI_AOERR0_SHFT 13
+#define UV1H_EVENT_OCCURRED0_LB_AOERR1_SHFT 14
+#define UV1H_EVENT_OCCURRED0_GR0_AOERR1_SHFT 15
+#define UV1H_EVENT_OCCURRED0_GR1_AOERR1_SHFT 16
+#define UV1H_EVENT_OCCURRED0_LH_AOERR1_SHFT 17
+#define UV1H_EVENT_OCCURRED0_RH_AOERR1_SHFT 18
+#define UV1H_EVENT_OCCURRED0_XN_AOERR1_SHFT 19
+#define UV1H_EVENT_OCCURRED0_SI_AOERR1_SHFT 20
+#define UV1H_EVENT_OCCURRED0_RH_VPI_INT_SHFT 21
+#define UV1H_EVENT_OCCURRED0_SYSTEM_SHUTDOWN_INT_SHFT 22
+#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_0_SHFT 23
+#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_1_SHFT 24
+#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_2_SHFT 25
+#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_3_SHFT 26
+#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_4_SHFT 27
+#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_5_SHFT 28
+#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_6_SHFT 29
+#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_7_SHFT 30
+#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_8_SHFT 31
+#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_9_SHFT 32
+#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_10_SHFT 33
+#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_11_SHFT 34
+#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_12_SHFT 35
+#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_13_SHFT 36
+#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_14_SHFT 37
+#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_15_SHFT 38
+#define UV1H_EVENT_OCCURRED0_L1_NMI_INT_SHFT 39
+#define UV1H_EVENT_OCCURRED0_STOP_CLOCK_SHFT 40
+#define UV1H_EVENT_OCCURRED0_ASIC_TO_L1_SHFT 41
+#define UV1H_EVENT_OCCURRED0_L1_TO_ASIC_SHFT 42
+#define UV1H_EVENT_OCCURRED0_LTC_INT_SHFT 43
+#define UV1H_EVENT_OCCURRED0_LA_SEQ_TRIGGER_SHFT 44
+#define UV1H_EVENT_OCCURRED0_IPI_INT_SHFT 45
+#define UV1H_EVENT_OCCURRED0_EXTIO_INT0_SHFT 46
+#define UV1H_EVENT_OCCURRED0_EXTIO_INT1_SHFT 47
+#define UV1H_EVENT_OCCURRED0_EXTIO_INT2_SHFT 48
+#define UV1H_EVENT_OCCURRED0_EXTIO_INT3_SHFT 49
+#define UV1H_EVENT_OCCURRED0_PROFILE_INT_SHFT 50
+#define UV1H_EVENT_OCCURRED0_RTC0_SHFT 51
+#define UV1H_EVENT_OCCURRED0_RTC1_SHFT 52
+#define UV1H_EVENT_OCCURRED0_RTC2_SHFT 53
+#define UV1H_EVENT_OCCURRED0_RTC3_SHFT 54
+#define UV1H_EVENT_OCCURRED0_BAU_DATA_SHFT 55
+#define UV1H_EVENT_OCCURRED0_POWER_MANAGEMENT_REQ_SHFT 56
+#define UV1H_EVENT_OCCURRED0_LB_HCERR_MASK 0x0000000000000001UL
+#define UV1H_EVENT_OCCURRED0_GR0_HCERR_MASK 0x0000000000000002UL
+#define UV1H_EVENT_OCCURRED0_GR1_HCERR_MASK 0x0000000000000004UL
+#define UV1H_EVENT_OCCURRED0_LH_HCERR_MASK 0x0000000000000008UL
+#define UV1H_EVENT_OCCURRED0_RH_HCERR_MASK 0x0000000000000010UL
+#define UV1H_EVENT_OCCURRED0_XN_HCERR_MASK 0x0000000000000020UL
+#define UV1H_EVENT_OCCURRED0_SI_HCERR_MASK 0x0000000000000040UL
+#define UV1H_EVENT_OCCURRED0_LB_AOERR0_MASK 0x0000000000000080UL
+#define UV1H_EVENT_OCCURRED0_GR0_AOERR0_MASK 0x0000000000000100UL
+#define UV1H_EVENT_OCCURRED0_GR1_AOERR0_MASK 0x0000000000000200UL
+#define UV1H_EVENT_OCCURRED0_LH_AOERR0_MASK 0x0000000000000400UL
+#define UV1H_EVENT_OCCURRED0_RH_AOERR0_MASK 0x0000000000000800UL
+#define UV1H_EVENT_OCCURRED0_XN_AOERR0_MASK 0x0000000000001000UL
+#define UV1H_EVENT_OCCURRED0_SI_AOERR0_MASK 0x0000000000002000UL
+#define UV1H_EVENT_OCCURRED0_LB_AOERR1_MASK 0x0000000000004000UL
+#define UV1H_EVENT_OCCURRED0_GR0_AOERR1_MASK 0x0000000000008000UL
+#define UV1H_EVENT_OCCURRED0_GR1_AOERR1_MASK 0x0000000000010000UL
+#define UV1H_EVENT_OCCURRED0_LH_AOERR1_MASK 0x0000000000020000UL
+#define UV1H_EVENT_OCCURRED0_RH_AOERR1_MASK 0x0000000000040000UL
+#define UV1H_EVENT_OCCURRED0_XN_AOERR1_MASK 0x0000000000080000UL
+#define UV1H_EVENT_OCCURRED0_SI_AOERR1_MASK 0x0000000000100000UL
+#define UV1H_EVENT_OCCURRED0_RH_VPI_INT_MASK 0x0000000000200000UL
+#define UV1H_EVENT_OCCURRED0_SYSTEM_SHUTDOWN_INT_MASK 0x0000000000400000UL
+#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_0_MASK 0x0000000000800000UL
+#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_1_MASK 0x0000000001000000UL
+#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_2_MASK 0x0000000002000000UL
+#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_3_MASK 0x0000000004000000UL
+#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_4_MASK 0x0000000008000000UL
+#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_5_MASK 0x0000000010000000UL
+#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_6_MASK 0x0000000020000000UL
+#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_7_MASK 0x0000000040000000UL
+#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_8_MASK 0x0000000080000000UL
+#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_9_MASK 0x0000000100000000UL
+#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_10_MASK 0x0000000200000000UL
+#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_11_MASK 0x0000000400000000UL
+#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_12_MASK 0x0000000800000000UL
+#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_13_MASK 0x0000001000000000UL
+#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_14_MASK 0x0000002000000000UL
+#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_15_MASK 0x0000004000000000UL
+#define UV1H_EVENT_OCCURRED0_L1_NMI_INT_MASK 0x0000008000000000UL
+#define UV1H_EVENT_OCCURRED0_STOP_CLOCK_MASK 0x0000010000000000UL
+#define UV1H_EVENT_OCCURRED0_ASIC_TO_L1_MASK 0x0000020000000000UL
+#define UV1H_EVENT_OCCURRED0_L1_TO_ASIC_MASK 0x0000040000000000UL
+#define UV1H_EVENT_OCCURRED0_LTC_INT_MASK 0x0000080000000000UL
+#define UV1H_EVENT_OCCURRED0_LA_SEQ_TRIGGER_MASK 0x0000100000000000UL
+#define UV1H_EVENT_OCCURRED0_IPI_INT_MASK 0x0000200000000000UL
+#define UV1H_EVENT_OCCURRED0_EXTIO_INT0_MASK 0x0000400000000000UL
+#define UV1H_EVENT_OCCURRED0_EXTIO_INT1_MASK 0x0000800000000000UL
+#define UV1H_EVENT_OCCURRED0_EXTIO_INT2_MASK 0x0001000000000000UL
+#define UV1H_EVENT_OCCURRED0_EXTIO_INT3_MASK 0x0002000000000000UL
+#define UV1H_EVENT_OCCURRED0_PROFILE_INT_MASK 0x0004000000000000UL
+#define UV1H_EVENT_OCCURRED0_RTC0_MASK 0x0008000000000000UL
+#define UV1H_EVENT_OCCURRED0_RTC1_MASK 0x0010000000000000UL
+#define UV1H_EVENT_OCCURRED0_RTC2_MASK 0x0020000000000000UL
+#define UV1H_EVENT_OCCURRED0_RTC3_MASK 0x0040000000000000UL
+#define UV1H_EVENT_OCCURRED0_BAU_DATA_MASK 0x0080000000000000UL
+#define UV1H_EVENT_OCCURRED0_POWER_MANAGEMENT_REQ_MASK 0x0100000000000000UL
+
+#define UV2H_EVENT_OCCURRED0_LB_HCERR_SHFT 0
+#define UV2H_EVENT_OCCURRED0_QP_HCERR_SHFT 1
+#define UV2H_EVENT_OCCURRED0_RH_HCERR_SHFT 2
+#define UV2H_EVENT_OCCURRED0_LH0_HCERR_SHFT 3
+#define UV2H_EVENT_OCCURRED0_LH1_HCERR_SHFT 4
+#define UV2H_EVENT_OCCURRED0_GR0_HCERR_SHFT 5
+#define UV2H_EVENT_OCCURRED0_GR1_HCERR_SHFT 6
+#define UV2H_EVENT_OCCURRED0_NI0_HCERR_SHFT 7
+#define UV2H_EVENT_OCCURRED0_NI1_HCERR_SHFT 8
+#define UV2H_EVENT_OCCURRED0_LB_AOERR0_SHFT 9
+#define UV2H_EVENT_OCCURRED0_QP_AOERR0_SHFT 10
+#define UV2H_EVENT_OCCURRED0_RH_AOERR0_SHFT 11
+#define UV2H_EVENT_OCCURRED0_LH0_AOERR0_SHFT 12
+#define UV2H_EVENT_OCCURRED0_LH1_AOERR0_SHFT 13
+#define UV2H_EVENT_OCCURRED0_GR0_AOERR0_SHFT 14
+#define UV2H_EVENT_OCCURRED0_GR1_AOERR0_SHFT 15
+#define UV2H_EVENT_OCCURRED0_XB_AOERR0_SHFT 16
+#define UV2H_EVENT_OCCURRED0_RT_AOERR0_SHFT 17
+#define UV2H_EVENT_OCCURRED0_NI0_AOERR0_SHFT 18
+#define UV2H_EVENT_OCCURRED0_NI1_AOERR0_SHFT 19
+#define UV2H_EVENT_OCCURRED0_LB_AOERR1_SHFT 20
+#define UV2H_EVENT_OCCURRED0_QP_AOERR1_SHFT 21
+#define UV2H_EVENT_OCCURRED0_RH_AOERR1_SHFT 22
+#define UV2H_EVENT_OCCURRED0_LH0_AOERR1_SHFT 23
+#define UV2H_EVENT_OCCURRED0_LH1_AOERR1_SHFT 24
+#define UV2H_EVENT_OCCURRED0_GR0_AOERR1_SHFT 25
+#define UV2H_EVENT_OCCURRED0_GR1_AOERR1_SHFT 26
+#define UV2H_EVENT_OCCURRED0_XB_AOERR1_SHFT 27
+#define UV2H_EVENT_OCCURRED0_RT_AOERR1_SHFT 28
+#define UV2H_EVENT_OCCURRED0_NI0_AOERR1_SHFT 29
+#define UV2H_EVENT_OCCURRED0_NI1_AOERR1_SHFT 30
+#define UV2H_EVENT_OCCURRED0_SYSTEM_SHUTDOWN_INT_SHFT 31
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_0_SHFT 32
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_1_SHFT 33
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_2_SHFT 34
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_3_SHFT 35
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_4_SHFT 36
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_5_SHFT 37
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_6_SHFT 38
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_7_SHFT 39
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_8_SHFT 40
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_9_SHFT 41
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_10_SHFT 42
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_11_SHFT 43
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_12_SHFT 44
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_13_SHFT 45
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_14_SHFT 46
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_15_SHFT 47
+#define UV2H_EVENT_OCCURRED0_L1_NMI_INT_SHFT 48
+#define UV2H_EVENT_OCCURRED0_STOP_CLOCK_SHFT 49
+#define UV2H_EVENT_OCCURRED0_ASIC_TO_L1_SHFT 50
+#define UV2H_EVENT_OCCURRED0_L1_TO_ASIC_SHFT 51
+#define UV2H_EVENT_OCCURRED0_LA_SEQ_TRIGGER_SHFT 52
+#define UV2H_EVENT_OCCURRED0_IPI_INT_SHFT 53
+#define UV2H_EVENT_OCCURRED0_EXTIO_INT0_SHFT 54
+#define UV2H_EVENT_OCCURRED0_EXTIO_INT1_SHFT 55
+#define UV2H_EVENT_OCCURRED0_EXTIO_INT2_SHFT 56
+#define UV2H_EVENT_OCCURRED0_EXTIO_INT3_SHFT 57
+#define UV2H_EVENT_OCCURRED0_PROFILE_INT_SHFT 58
+#define UV2H_EVENT_OCCURRED0_LB_HCERR_MASK 0x0000000000000001UL
+#define UV2H_EVENT_OCCURRED0_QP_HCERR_MASK 0x0000000000000002UL
+#define UV2H_EVENT_OCCURRED0_RH_HCERR_MASK 0x0000000000000004UL
+#define UV2H_EVENT_OCCURRED0_LH0_HCERR_MASK 0x0000000000000008UL
+#define UV2H_EVENT_OCCURRED0_LH1_HCERR_MASK 0x0000000000000010UL
+#define UV2H_EVENT_OCCURRED0_GR0_HCERR_MASK 0x0000000000000020UL
+#define UV2H_EVENT_OCCURRED0_GR1_HCERR_MASK 0x0000000000000040UL
+#define UV2H_EVENT_OCCURRED0_NI0_HCERR_MASK 0x0000000000000080UL
+#define UV2H_EVENT_OCCURRED0_NI1_HCERR_MASK 0x0000000000000100UL
+#define UV2H_EVENT_OCCURRED0_LB_AOERR0_MASK 0x0000000000000200UL
+#define UV2H_EVENT_OCCURRED0_QP_AOERR0_MASK 0x0000000000000400UL
+#define UV2H_EVENT_OCCURRED0_RH_AOERR0_MASK 0x0000000000000800UL
+#define UV2H_EVENT_OCCURRED0_LH0_AOERR0_MASK 0x0000000000001000UL
+#define UV2H_EVENT_OCCURRED0_LH1_AOERR0_MASK 0x0000000000002000UL
+#define UV2H_EVENT_OCCURRED0_GR0_AOERR0_MASK 0x0000000000004000UL
+#define UV2H_EVENT_OCCURRED0_GR1_AOERR0_MASK 0x0000000000008000UL
+#define UV2H_EVENT_OCCURRED0_XB_AOERR0_MASK 0x0000000000010000UL
+#define UV2H_EVENT_OCCURRED0_RT_AOERR0_MASK 0x0000000000020000UL
+#define UV2H_EVENT_OCCURRED0_NI0_AOERR0_MASK 0x0000000000040000UL
+#define UV2H_EVENT_OCCURRED0_NI1_AOERR0_MASK 0x0000000000080000UL
+#define UV2H_EVENT_OCCURRED0_LB_AOERR1_MASK 0x0000000000100000UL
+#define UV2H_EVENT_OCCURRED0_QP_AOERR1_MASK 0x0000000000200000UL
+#define UV2H_EVENT_OCCURRED0_RH_AOERR1_MASK 0x0000000000400000UL
+#define UV2H_EVENT_OCCURRED0_LH0_AOERR1_MASK 0x0000000000800000UL
+#define UV2H_EVENT_OCCURRED0_LH1_AOERR1_MASK 0x0000000001000000UL
+#define UV2H_EVENT_OCCURRED0_GR0_AOERR1_MASK 0x0000000002000000UL
+#define UV2H_EVENT_OCCURRED0_GR1_AOERR1_MASK 0x0000000004000000UL
+#define UV2H_EVENT_OCCURRED0_XB_AOERR1_MASK 0x0000000008000000UL
+#define UV2H_EVENT_OCCURRED0_RT_AOERR1_MASK 0x0000000010000000UL
+#define UV2H_EVENT_OCCURRED0_NI0_AOERR1_MASK 0x0000000020000000UL
+#define UV2H_EVENT_OCCURRED0_NI1_AOERR1_MASK 0x0000000040000000UL
+#define UV2H_EVENT_OCCURRED0_SYSTEM_SHUTDOWN_INT_MASK 0x0000000080000000UL
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_0_MASK 0x0000000100000000UL
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_1_MASK 0x0000000200000000UL
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_2_MASK 0x0000000400000000UL
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_3_MASK 0x0000000800000000UL
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_4_MASK 0x0000001000000000UL
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_5_MASK 0x0000002000000000UL
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_6_MASK 0x0000004000000000UL
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_7_MASK 0x0000008000000000UL
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_8_MASK 0x0000010000000000UL
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_9_MASK 0x0000020000000000UL
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_10_MASK 0x0000040000000000UL
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_11_MASK 0x0000080000000000UL
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_12_MASK 0x0000100000000000UL
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_13_MASK 0x0000200000000000UL
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_14_MASK 0x0000400000000000UL
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_15_MASK 0x0000800000000000UL
+#define UV2H_EVENT_OCCURRED0_L1_NMI_INT_MASK 0x0001000000000000UL
+#define UV2H_EVENT_OCCURRED0_STOP_CLOCK_MASK 0x0002000000000000UL
+#define UV2H_EVENT_OCCURRED0_ASIC_TO_L1_MASK 0x0004000000000000UL
+#define UV2H_EVENT_OCCURRED0_L1_TO_ASIC_MASK 0x0008000000000000UL
+#define UV2H_EVENT_OCCURRED0_LA_SEQ_TRIGGER_MASK 0x0010000000000000UL
+#define UV2H_EVENT_OCCURRED0_IPI_INT_MASK 0x0020000000000000UL
+#define UV2H_EVENT_OCCURRED0_EXTIO_INT0_MASK 0x0040000000000000UL
+#define UV2H_EVENT_OCCURRED0_EXTIO_INT1_MASK 0x0080000000000000UL
+#define UV2H_EVENT_OCCURRED0_EXTIO_INT2_MASK 0x0100000000000000UL
+#define UV2H_EVENT_OCCURRED0_EXTIO_INT3_MASK 0x0200000000000000UL
+#define UV2H_EVENT_OCCURRED0_PROFILE_INT_MASK 0x0400000000000000UL
union uvh_event_occurred0_u {
- unsigned long v;
- struct uv1h_event_occurred0_s {
- unsigned long lb_hcerr : 1; /* RW, W1C */
- unsigned long gr0_hcerr : 1; /* RW, W1C */
- unsigned long gr1_hcerr : 1; /* RW, W1C */
- unsigned long lh_hcerr : 1; /* RW, W1C */
- unsigned long rh_hcerr : 1; /* RW, W1C */
- unsigned long xn_hcerr : 1; /* RW, W1C */
- unsigned long si_hcerr : 1; /* RW, W1C */
- unsigned long lb_aoerr0 : 1; /* RW, W1C */
- unsigned long gr0_aoerr0 : 1; /* RW, W1C */
- unsigned long gr1_aoerr0 : 1; /* RW, W1C */
- unsigned long lh_aoerr0 : 1; /* RW, W1C */
- unsigned long rh_aoerr0 : 1; /* RW, W1C */
- unsigned long xn_aoerr0 : 1; /* RW, W1C */
- unsigned long si_aoerr0 : 1; /* RW, W1C */
- unsigned long lb_aoerr1 : 1; /* RW, W1C */
- unsigned long gr0_aoerr1 : 1; /* RW, W1C */
- unsigned long gr1_aoerr1 : 1; /* RW, W1C */
- unsigned long lh_aoerr1 : 1; /* RW, W1C */
- unsigned long rh_aoerr1 : 1; /* RW, W1C */
- unsigned long xn_aoerr1 : 1; /* RW, W1C */
- unsigned long si_aoerr1 : 1; /* RW, W1C */
- unsigned long rh_vpi_int : 1; /* RW, W1C */
- unsigned long system_shutdown_int : 1; /* RW, W1C */
- unsigned long lb_irq_int_0 : 1; /* RW, W1C */
- unsigned long lb_irq_int_1 : 1; /* RW, W1C */
- unsigned long lb_irq_int_2 : 1; /* RW, W1C */
- unsigned long lb_irq_int_3 : 1; /* RW, W1C */
- unsigned long lb_irq_int_4 : 1; /* RW, W1C */
- unsigned long lb_irq_int_5 : 1; /* RW, W1C */
- unsigned long lb_irq_int_6 : 1; /* RW, W1C */
- unsigned long lb_irq_int_7 : 1; /* RW, W1C */
- unsigned long lb_irq_int_8 : 1; /* RW, W1C */
- unsigned long lb_irq_int_9 : 1; /* RW, W1C */
- unsigned long lb_irq_int_10 : 1; /* RW, W1C */
- unsigned long lb_irq_int_11 : 1; /* RW, W1C */
- unsigned long lb_irq_int_12 : 1; /* RW, W1C */
- unsigned long lb_irq_int_13 : 1; /* RW, W1C */
- unsigned long lb_irq_int_14 : 1; /* RW, W1C */
- unsigned long lb_irq_int_15 : 1; /* RW, W1C */
- unsigned long l1_nmi_int : 1; /* RW, W1C */
- unsigned long stop_clock : 1; /* RW, W1C */
- unsigned long asic_to_l1 : 1; /* RW, W1C */
- unsigned long l1_to_asic : 1; /* RW, W1C */
- unsigned long ltc_int : 1; /* RW, W1C */
- unsigned long la_seq_trigger : 1; /* RW, W1C */
- unsigned long ipi_int : 1; /* RW, W1C */
- unsigned long extio_int0 : 1; /* RW, W1C */
- unsigned long extio_int1 : 1; /* RW, W1C */
- unsigned long extio_int2 : 1; /* RW, W1C */
- unsigned long extio_int3 : 1; /* RW, W1C */
- unsigned long profile_int : 1; /* RW, W1C */
- unsigned long rtc0 : 1; /* RW, W1C */
- unsigned long rtc1 : 1; /* RW, W1C */
- unsigned long rtc2 : 1; /* RW, W1C */
- unsigned long rtc3 : 1; /* RW, W1C */
- unsigned long bau_data : 1; /* RW, W1C */
- unsigned long power_management_req : 1; /* RW, W1C */
- unsigned long rsvd_57_63 : 7; /* */
- } s1;
- struct uv2h_event_occurred0_s {
- unsigned long lb_hcerr : 1; /* RW */
- unsigned long qp_hcerr : 1; /* RW */
- unsigned long rh_hcerr : 1; /* RW */
- unsigned long lh0_hcerr : 1; /* RW */
- unsigned long lh1_hcerr : 1; /* RW */
- unsigned long gr0_hcerr : 1; /* RW */
- unsigned long gr1_hcerr : 1; /* RW */
- unsigned long ni0_hcerr : 1; /* RW */
- unsigned long ni1_hcerr : 1; /* RW */
- unsigned long lb_aoerr0 : 1; /* RW */
- unsigned long qp_aoerr0 : 1; /* RW */
- unsigned long rh_aoerr0 : 1; /* RW */
- unsigned long lh0_aoerr0 : 1; /* RW */
- unsigned long lh1_aoerr0 : 1; /* RW */
- unsigned long gr0_aoerr0 : 1; /* RW */
- unsigned long gr1_aoerr0 : 1; /* RW */
- unsigned long xb_aoerr0 : 1; /* RW */
- unsigned long rt_aoerr0 : 1; /* RW */
- unsigned long ni0_aoerr0 : 1; /* RW */
- unsigned long ni1_aoerr0 : 1; /* RW */
- unsigned long lb_aoerr1 : 1; /* RW */
- unsigned long qp_aoerr1 : 1; /* RW */
- unsigned long rh_aoerr1 : 1; /* RW */
- unsigned long lh0_aoerr1 : 1; /* RW */
- unsigned long lh1_aoerr1 : 1; /* RW */
- unsigned long gr0_aoerr1 : 1; /* RW */
- unsigned long gr1_aoerr1 : 1; /* RW */
- unsigned long xb_aoerr1 : 1; /* RW */
- unsigned long rt_aoerr1 : 1; /* RW */
- unsigned long ni0_aoerr1 : 1; /* RW */
- unsigned long ni1_aoerr1 : 1; /* RW */
- unsigned long system_shutdown_int : 1; /* RW */
- unsigned long lb_irq_int_0 : 1; /* RW */
- unsigned long lb_irq_int_1 : 1; /* RW */
- unsigned long lb_irq_int_2 : 1; /* RW */
- unsigned long lb_irq_int_3 : 1; /* RW */
- unsigned long lb_irq_int_4 : 1; /* RW */
- unsigned long lb_irq_int_5 : 1; /* RW */
- unsigned long lb_irq_int_6 : 1; /* RW */
- unsigned long lb_irq_int_7 : 1; /* RW */
- unsigned long lb_irq_int_8 : 1; /* RW */
- unsigned long lb_irq_int_9 : 1; /* RW */
- unsigned long lb_irq_int_10 : 1; /* RW */
- unsigned long lb_irq_int_11 : 1; /* RW */
- unsigned long lb_irq_int_12 : 1; /* RW */
- unsigned long lb_irq_int_13 : 1; /* RW */
- unsigned long lb_irq_int_14 : 1; /* RW */
- unsigned long lb_irq_int_15 : 1; /* RW */
- unsigned long l1_nmi_int : 1; /* RW */
- unsigned long stop_clock : 1; /* RW */
- unsigned long asic_to_l1 : 1; /* RW */
- unsigned long l1_to_asic : 1; /* RW */
- unsigned long la_seq_trigger : 1; /* RW */
- unsigned long ipi_int : 1; /* RW */
- unsigned long extio_int0 : 1; /* RW */
- unsigned long extio_int1 : 1; /* RW */
- unsigned long extio_int2 : 1; /* RW */
- unsigned long extio_int3 : 1; /* RW */
- unsigned long profile_int : 1; /* RW */
- unsigned long rsvd_59_63 : 5; /* */
- } s2;
+ unsigned long v;
+ struct uv1h_event_occurred0_s {
+ unsigned long lb_hcerr:1; /* RW, W1C */
+ unsigned long gr0_hcerr:1; /* RW, W1C */
+ unsigned long gr1_hcerr:1; /* RW, W1C */
+ unsigned long lh_hcerr:1; /* RW, W1C */
+ unsigned long rh_hcerr:1; /* RW, W1C */
+ unsigned long xn_hcerr:1; /* RW, W1C */
+ unsigned long si_hcerr:1; /* RW, W1C */
+ unsigned long lb_aoerr0:1; /* RW, W1C */
+ unsigned long gr0_aoerr0:1; /* RW, W1C */
+ unsigned long gr1_aoerr0:1; /* RW, W1C */
+ unsigned long lh_aoerr0:1; /* RW, W1C */
+ unsigned long rh_aoerr0:1; /* RW, W1C */
+ unsigned long xn_aoerr0:1; /* RW, W1C */
+ unsigned long si_aoerr0:1; /* RW, W1C */
+ unsigned long lb_aoerr1:1; /* RW, W1C */
+ unsigned long gr0_aoerr1:1; /* RW, W1C */
+ unsigned long gr1_aoerr1:1; /* RW, W1C */
+ unsigned long lh_aoerr1:1; /* RW, W1C */
+ unsigned long rh_aoerr1:1; /* RW, W1C */
+ unsigned long xn_aoerr1:1; /* RW, W1C */
+ unsigned long si_aoerr1:1; /* RW, W1C */
+ unsigned long rh_vpi_int:1; /* RW, W1C */
+ unsigned long system_shutdown_int:1; /* RW, W1C */
+ unsigned long lb_irq_int_0:1; /* RW, W1C */
+ unsigned long lb_irq_int_1:1; /* RW, W1C */
+ unsigned long lb_irq_int_2:1; /* RW, W1C */
+ unsigned long lb_irq_int_3:1; /* RW, W1C */
+ unsigned long lb_irq_int_4:1; /* RW, W1C */
+ unsigned long lb_irq_int_5:1; /* RW, W1C */
+ unsigned long lb_irq_int_6:1; /* RW, W1C */
+ unsigned long lb_irq_int_7:1; /* RW, W1C */
+ unsigned long lb_irq_int_8:1; /* RW, W1C */
+ unsigned long lb_irq_int_9:1; /* RW, W1C */
+ unsigned long lb_irq_int_10:1; /* RW, W1C */
+ unsigned long lb_irq_int_11:1; /* RW, W1C */
+ unsigned long lb_irq_int_12:1; /* RW, W1C */
+ unsigned long lb_irq_int_13:1; /* RW, W1C */
+ unsigned long lb_irq_int_14:1; /* RW, W1C */
+ unsigned long lb_irq_int_15:1; /* RW, W1C */
+ unsigned long l1_nmi_int:1; /* RW, W1C */
+ unsigned long stop_clock:1; /* RW, W1C */
+ unsigned long asic_to_l1:1; /* RW, W1C */
+ unsigned long l1_to_asic:1; /* RW, W1C */
+ unsigned long ltc_int:1; /* RW, W1C */
+ unsigned long la_seq_trigger:1; /* RW, W1C */
+ unsigned long ipi_int:1; /* RW, W1C */
+ unsigned long extio_int0:1; /* RW, W1C */
+ unsigned long extio_int1:1; /* RW, W1C */
+ unsigned long extio_int2:1; /* RW, W1C */
+ unsigned long extio_int3:1; /* RW, W1C */
+ unsigned long profile_int:1; /* RW, W1C */
+ unsigned long rtc0:1; /* RW, W1C */
+ unsigned long rtc1:1; /* RW, W1C */
+ unsigned long rtc2:1; /* RW, W1C */
+ unsigned long rtc3:1; /* RW, W1C */
+ unsigned long bau_data:1; /* RW, W1C */
+ unsigned long power_management_req:1; /* RW, W1C */
+ unsigned long rsvd_57_63:7;
+ } s1;
+ struct uv2h_event_occurred0_s {
+ unsigned long lb_hcerr:1; /* RW */
+ unsigned long qp_hcerr:1; /* RW */
+ unsigned long rh_hcerr:1; /* RW */
+ unsigned long lh0_hcerr:1; /* RW */
+ unsigned long lh1_hcerr:1; /* RW */
+ unsigned long gr0_hcerr:1; /* RW */
+ unsigned long gr1_hcerr:1; /* RW */
+ unsigned long ni0_hcerr:1; /* RW */
+ unsigned long ni1_hcerr:1; /* RW */
+ unsigned long lb_aoerr0:1; /* RW */
+ unsigned long qp_aoerr0:1; /* RW */
+ unsigned long rh_aoerr0:1; /* RW */
+ unsigned long lh0_aoerr0:1; /* RW */
+ unsigned long lh1_aoerr0:1; /* RW */
+ unsigned long gr0_aoerr0:1; /* RW */
+ unsigned long gr1_aoerr0:1; /* RW */
+ unsigned long xb_aoerr0:1; /* RW */
+ unsigned long rt_aoerr0:1; /* RW */
+ unsigned long ni0_aoerr0:1; /* RW */
+ unsigned long ni1_aoerr0:1; /* RW */
+ unsigned long lb_aoerr1:1; /* RW */
+ unsigned long qp_aoerr1:1; /* RW */
+ unsigned long rh_aoerr1:1; /* RW */
+ unsigned long lh0_aoerr1:1; /* RW */
+ unsigned long lh1_aoerr1:1; /* RW */
+ unsigned long gr0_aoerr1:1; /* RW */
+ unsigned long gr1_aoerr1:1; /* RW */
+ unsigned long xb_aoerr1:1; /* RW */
+ unsigned long rt_aoerr1:1; /* RW */
+ unsigned long ni0_aoerr1:1; /* RW */
+ unsigned long ni1_aoerr1:1; /* RW */
+ unsigned long system_shutdown_int:1; /* RW */
+ unsigned long lb_irq_int_0:1; /* RW */
+ unsigned long lb_irq_int_1:1; /* RW */
+ unsigned long lb_irq_int_2:1; /* RW */
+ unsigned long lb_irq_int_3:1; /* RW */
+ unsigned long lb_irq_int_4:1; /* RW */
+ unsigned long lb_irq_int_5:1; /* RW */
+ unsigned long lb_irq_int_6:1; /* RW */
+ unsigned long lb_irq_int_7:1; /* RW */
+ unsigned long lb_irq_int_8:1; /* RW */
+ unsigned long lb_irq_int_9:1; /* RW */
+ unsigned long lb_irq_int_10:1; /* RW */
+ unsigned long lb_irq_int_11:1; /* RW */
+ unsigned long lb_irq_int_12:1; /* RW */
+ unsigned long lb_irq_int_13:1; /* RW */
+ unsigned long lb_irq_int_14:1; /* RW */
+ unsigned long lb_irq_int_15:1; /* RW */
+ unsigned long l1_nmi_int:1; /* RW */
+ unsigned long stop_clock:1; /* RW */
+ unsigned long asic_to_l1:1; /* RW */
+ unsigned long l1_to_asic:1; /* RW */
+ unsigned long la_seq_trigger:1; /* RW */
+ unsigned long ipi_int:1; /* RW */
+ unsigned long extio_int0:1; /* RW */
+ unsigned long extio_int1:1; /* RW */
+ unsigned long extio_int2:1; /* RW */
+ unsigned long extio_int3:1; /* RW */
+ unsigned long profile_int:1; /* RW */
+ unsigned long rsvd_59_63:5;
+ } s2;
};
/* ========================================================================= */
/* UVH_EVENT_OCCURRED0_ALIAS */
/* ========================================================================= */
-#define UVH_EVENT_OCCURRED0_ALIAS 0x0000000000070008UL
-#define UVH_EVENT_OCCURRED0_ALIAS_32 0x5f0
+#define UVH_EVENT_OCCURRED0_ALIAS 0x0000000000070008UL
+#define UVH_EVENT_OCCURRED0_ALIAS_32 0x5f0
/* ========================================================================= */
/* UVH_GR0_TLB_INT0_CONFIG */
/* ========================================================================= */
-#define UVH_GR0_TLB_INT0_CONFIG 0x61b00UL
-
-#define UVH_GR0_TLB_INT0_CONFIG_VECTOR_SHFT 0
-#define UVH_GR0_TLB_INT0_CONFIG_VECTOR_MASK 0x00000000000000ffUL
-#define UVH_GR0_TLB_INT0_CONFIG_DM_SHFT 8
-#define UVH_GR0_TLB_INT0_CONFIG_DM_MASK 0x0000000000000700UL
-#define UVH_GR0_TLB_INT0_CONFIG_DESTMODE_SHFT 11
-#define UVH_GR0_TLB_INT0_CONFIG_DESTMODE_MASK 0x0000000000000800UL
-#define UVH_GR0_TLB_INT0_CONFIG_STATUS_SHFT 12
-#define UVH_GR0_TLB_INT0_CONFIG_STATUS_MASK 0x0000000000001000UL
-#define UVH_GR0_TLB_INT0_CONFIG_P_SHFT 13
-#define UVH_GR0_TLB_INT0_CONFIG_P_MASK 0x0000000000002000UL
-#define UVH_GR0_TLB_INT0_CONFIG_T_SHFT 15
-#define UVH_GR0_TLB_INT0_CONFIG_T_MASK 0x0000000000008000UL
-#define UVH_GR0_TLB_INT0_CONFIG_M_SHFT 16
-#define UVH_GR0_TLB_INT0_CONFIG_M_MASK 0x0000000000010000UL
-#define UVH_GR0_TLB_INT0_CONFIG_APIC_ID_SHFT 32
-#define UVH_GR0_TLB_INT0_CONFIG_APIC_ID_MASK 0xffffffff00000000UL
+#define UVH_GR0_TLB_INT0_CONFIG 0x61b00UL
+
+#define UVH_GR0_TLB_INT0_CONFIG_VECTOR_SHFT 0
+#define UVH_GR0_TLB_INT0_CONFIG_DM_SHFT 8
+#define UVH_GR0_TLB_INT0_CONFIG_DESTMODE_SHFT 11
+#define UVH_GR0_TLB_INT0_CONFIG_STATUS_SHFT 12
+#define UVH_GR0_TLB_INT0_CONFIG_P_SHFT 13
+#define UVH_GR0_TLB_INT0_CONFIG_T_SHFT 15
+#define UVH_GR0_TLB_INT0_CONFIG_M_SHFT 16
+#define UVH_GR0_TLB_INT0_CONFIG_APIC_ID_SHFT 32
+#define UVH_GR0_TLB_INT0_CONFIG_VECTOR_MASK 0x00000000000000ffUL
+#define UVH_GR0_TLB_INT0_CONFIG_DM_MASK 0x0000000000000700UL
+#define UVH_GR0_TLB_INT0_CONFIG_DESTMODE_MASK 0x0000000000000800UL
+#define UVH_GR0_TLB_INT0_CONFIG_STATUS_MASK 0x0000000000001000UL
+#define UVH_GR0_TLB_INT0_CONFIG_P_MASK 0x0000000000002000UL
+#define UVH_GR0_TLB_INT0_CONFIG_T_MASK 0x0000000000008000UL
+#define UVH_GR0_TLB_INT0_CONFIG_M_MASK 0x0000000000010000UL
+#define UVH_GR0_TLB_INT0_CONFIG_APIC_ID_MASK 0xffffffff00000000UL
union uvh_gr0_tlb_int0_config_u {
- unsigned long v;
- struct uvh_gr0_tlb_int0_config_s {
- unsigned long vector_ : 8; /* RW */
- unsigned long dm : 3; /* RW */
- unsigned long destmode : 1; /* RW */
- unsigned long status : 1; /* RO */
- unsigned long p : 1; /* RO */
- unsigned long rsvd_14 : 1; /* */
- unsigned long t : 1; /* RO */
- unsigned long m : 1; /* RW */
- unsigned long rsvd_17_31: 15; /* */
- unsigned long apic_id : 32; /* RW */
- } s;
+ unsigned long v;
+ struct uvh_gr0_tlb_int0_config_s {
+ unsigned long vector_:8; /* RW */
+ unsigned long dm:3; /* RW */
+ unsigned long destmode:1; /* RW */
+ unsigned long status:1; /* RO */
+ unsigned long p:1; /* RO */
+ unsigned long rsvd_14:1;
+ unsigned long t:1; /* RO */
+ unsigned long m:1; /* RW */
+ unsigned long rsvd_17_31:15;
+ unsigned long apic_id:32; /* RW */
+ } s;
};
/* ========================================================================= */
/* UVH_GR0_TLB_INT1_CONFIG */
/* ========================================================================= */
-#define UVH_GR0_TLB_INT1_CONFIG 0x61b40UL
-
-#define UVH_GR0_TLB_INT1_CONFIG_VECTOR_SHFT 0
-#define UVH_GR0_TLB_INT1_CONFIG_VECTOR_MASK 0x00000000000000ffUL
-#define UVH_GR0_TLB_INT1_CONFIG_DM_SHFT 8
-#define UVH_GR0_TLB_INT1_CONFIG_DM_MASK 0x0000000000000700UL
-#define UVH_GR0_TLB_INT1_CONFIG_DESTMODE_SHFT 11
-#define UVH_GR0_TLB_INT1_CONFIG_DESTMODE_MASK 0x0000000000000800UL
-#define UVH_GR0_TLB_INT1_CONFIG_STATUS_SHFT 12
-#define UVH_GR0_TLB_INT1_CONFIG_STATUS_MASK 0x0000000000001000UL
-#define UVH_GR0_TLB_INT1_CONFIG_P_SHFT 13
-#define UVH_GR0_TLB_INT1_CONFIG_P_MASK 0x0000000000002000UL
-#define UVH_GR0_TLB_INT1_CONFIG_T_SHFT 15
-#define UVH_GR0_TLB_INT1_CONFIG_T_MASK 0x0000000000008000UL
-#define UVH_GR0_TLB_INT1_CONFIG_M_SHFT 16
-#define UVH_GR0_TLB_INT1_CONFIG_M_MASK 0x0000000000010000UL
-#define UVH_GR0_TLB_INT1_CONFIG_APIC_ID_SHFT 32
-#define UVH_GR0_TLB_INT1_CONFIG_APIC_ID_MASK 0xffffffff00000000UL
+#define UVH_GR0_TLB_INT1_CONFIG 0x61b40UL
+
+#define UVH_GR0_TLB_INT1_CONFIG_VECTOR_SHFT 0
+#define UVH_GR0_TLB_INT1_CONFIG_DM_SHFT 8
+#define UVH_GR0_TLB_INT1_CONFIG_DESTMODE_SHFT 11
+#define UVH_GR0_TLB_INT1_CONFIG_STATUS_SHFT 12
+#define UVH_GR0_TLB_INT1_CONFIG_P_SHFT 13
+#define UVH_GR0_TLB_INT1_CONFIG_T_SHFT 15
+#define UVH_GR0_TLB_INT1_CONFIG_M_SHFT 16
+#define UVH_GR0_TLB_INT1_CONFIG_APIC_ID_SHFT 32
+#define UVH_GR0_TLB_INT1_CONFIG_VECTOR_MASK 0x00000000000000ffUL
+#define UVH_GR0_TLB_INT1_CONFIG_DM_MASK 0x0000000000000700UL
+#define UVH_GR0_TLB_INT1_CONFIG_DESTMODE_MASK 0x0000000000000800UL
+#define UVH_GR0_TLB_INT1_CONFIG_STATUS_MASK 0x0000000000001000UL
+#define UVH_GR0_TLB_INT1_CONFIG_P_MASK 0x0000000000002000UL
+#define UVH_GR0_TLB_INT1_CONFIG_T_MASK 0x0000000000008000UL
+#define UVH_GR0_TLB_INT1_CONFIG_M_MASK 0x0000000000010000UL
+#define UVH_GR0_TLB_INT1_CONFIG_APIC_ID_MASK 0xffffffff00000000UL
union uvh_gr0_tlb_int1_config_u {
- unsigned long v;
- struct uvh_gr0_tlb_int1_config_s {
- unsigned long vector_ : 8; /* RW */
- unsigned long dm : 3; /* RW */
- unsigned long destmode : 1; /* RW */
- unsigned long status : 1; /* RO */
- unsigned long p : 1; /* RO */
- unsigned long rsvd_14 : 1; /* */
- unsigned long t : 1; /* RO */
- unsigned long m : 1; /* RW */
- unsigned long rsvd_17_31: 15; /* */
- unsigned long apic_id : 32; /* RW */
- } s;
+ unsigned long v;
+ struct uvh_gr0_tlb_int1_config_s {
+ unsigned long vector_:8; /* RW */
+ unsigned long dm:3; /* RW */
+ unsigned long destmode:1; /* RW */
+ unsigned long status:1; /* RO */
+ unsigned long p:1; /* RO */
+ unsigned long rsvd_14:1;
+ unsigned long t:1; /* RO */
+ unsigned long m:1; /* RW */
+ unsigned long rsvd_17_31:15;
+ unsigned long apic_id:32; /* RW */
+ } s;
+};
+
+/* ========================================================================= */
+/* UVH_GR0_TLB_MMR_CONTROL */
+/* ========================================================================= */
+#define UV1H_GR0_TLB_MMR_CONTROL 0x401080UL
+#define UV2H_GR0_TLB_MMR_CONTROL 0xc01080UL
+#define UVH_GR0_TLB_MMR_CONTROL (is_uv1_hub() ? \
+ UV1H_GR0_TLB_MMR_CONTROL : \
+ UV2H_GR0_TLB_MMR_CONTROL)
+
+#define UVH_GR0_TLB_MMR_CONTROL_INDEX_SHFT 0
+#define UVH_GR0_TLB_MMR_CONTROL_MEM_SEL_SHFT 12
+#define UVH_GR0_TLB_MMR_CONTROL_AUTO_VALID_EN_SHFT 16
+#define UVH_GR0_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_SHFT 20
+#define UVH_GR0_TLB_MMR_CONTROL_MMR_WRITE_SHFT 30
+#define UVH_GR0_TLB_MMR_CONTROL_MMR_READ_SHFT 31
+#define UVH_GR0_TLB_MMR_CONTROL_INDEX_MASK 0x0000000000000fffUL
+#define UVH_GR0_TLB_MMR_CONTROL_MEM_SEL_MASK 0x0000000000003000UL
+#define UVH_GR0_TLB_MMR_CONTROL_AUTO_VALID_EN_MASK 0x0000000000010000UL
+#define UVH_GR0_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_MASK 0x0000000000100000UL
+#define UVH_GR0_TLB_MMR_CONTROL_MMR_WRITE_MASK 0x0000000040000000UL
+#define UVH_GR0_TLB_MMR_CONTROL_MMR_READ_MASK 0x0000000080000000UL
+
+#define UV1H_GR0_TLB_MMR_CONTROL_INDEX_SHFT 0
+#define UV1H_GR0_TLB_MMR_CONTROL_MEM_SEL_SHFT 12
+#define UV1H_GR0_TLB_MMR_CONTROL_AUTO_VALID_EN_SHFT 16
+#define UV1H_GR0_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_SHFT 20
+#define UV1H_GR0_TLB_MMR_CONTROL_MMR_WRITE_SHFT 30
+#define UV1H_GR0_TLB_MMR_CONTROL_MMR_READ_SHFT 31
+#define UV1H_GR0_TLB_MMR_CONTROL_MMR_INJ_CON_SHFT 48
+#define UV1H_GR0_TLB_MMR_CONTROL_MMR_INJ_TLBRAM_SHFT 52
+#define UV1H_GR0_TLB_MMR_CONTROL_MMR_INJ_TLBPGSIZE_SHFT 54
+#define UV1H_GR0_TLB_MMR_CONTROL_MMR_INJ_TLBRREG_SHFT 56
+#define UV1H_GR0_TLB_MMR_CONTROL_MMR_INJ_TLBLRUV_SHFT 60
+#define UV1H_GR0_TLB_MMR_CONTROL_INDEX_MASK 0x0000000000000fffUL
+#define UV1H_GR0_TLB_MMR_CONTROL_MEM_SEL_MASK 0x0000000000003000UL
+#define UV1H_GR0_TLB_MMR_CONTROL_AUTO_VALID_EN_MASK 0x0000000000010000UL
+#define UV1H_GR0_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_MASK 0x0000000000100000UL
+#define UV1H_GR0_TLB_MMR_CONTROL_MMR_WRITE_MASK 0x0000000040000000UL
+#define UV1H_GR0_TLB_MMR_CONTROL_MMR_READ_MASK 0x0000000080000000UL
+#define UV1H_GR0_TLB_MMR_CONTROL_MMR_INJ_CON_MASK 0x0001000000000000UL
+#define UV1H_GR0_TLB_MMR_CONTROL_MMR_INJ_TLBRAM_MASK 0x0010000000000000UL
+#define UV1H_GR0_TLB_MMR_CONTROL_MMR_INJ_TLBPGSIZE_MASK 0x0040000000000000UL
+#define UV1H_GR0_TLB_MMR_CONTROL_MMR_INJ_TLBRREG_MASK 0x0100000000000000UL
+#define UV1H_GR0_TLB_MMR_CONTROL_MMR_INJ_TLBLRUV_MASK 0x1000000000000000UL
+
+#define UV2H_GR0_TLB_MMR_CONTROL_INDEX_SHFT 0
+#define UV2H_GR0_TLB_MMR_CONTROL_MEM_SEL_SHFT 12
+#define UV2H_GR0_TLB_MMR_CONTROL_AUTO_VALID_EN_SHFT 16
+#define UV2H_GR0_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_SHFT 20
+#define UV2H_GR0_TLB_MMR_CONTROL_MMR_WRITE_SHFT 30
+#define UV2H_GR0_TLB_MMR_CONTROL_MMR_READ_SHFT 31
+#define UV2H_GR0_TLB_MMR_CONTROL_MMR_OP_DONE_SHFT 32
+#define UV2H_GR0_TLB_MMR_CONTROL_MMR_INJ_CON_SHFT 48
+#define UV2H_GR0_TLB_MMR_CONTROL_MMR_INJ_TLBRAM_SHFT 52
+#define UV2H_GR0_TLB_MMR_CONTROL_INDEX_MASK 0x0000000000000fffUL
+#define UV2H_GR0_TLB_MMR_CONTROL_MEM_SEL_MASK 0x0000000000003000UL
+#define UV2H_GR0_TLB_MMR_CONTROL_AUTO_VALID_EN_MASK 0x0000000000010000UL
+#define UV2H_GR0_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_MASK 0x0000000000100000UL
+#define UV2H_GR0_TLB_MMR_CONTROL_MMR_WRITE_MASK 0x0000000040000000UL
+#define UV2H_GR0_TLB_MMR_CONTROL_MMR_READ_MASK 0x0000000080000000UL
+#define UV2H_GR0_TLB_MMR_CONTROL_MMR_OP_DONE_MASK 0x0000000100000000UL
+#define UV2H_GR0_TLB_MMR_CONTROL_MMR_INJ_CON_MASK 0x0001000000000000UL
+#define UV2H_GR0_TLB_MMR_CONTROL_MMR_INJ_TLBRAM_MASK 0x0010000000000000UL
+
+union uvh_gr0_tlb_mmr_control_u {
+ unsigned long v;
+ struct uvh_gr0_tlb_mmr_control_s {
+ unsigned long index:12; /* RW */
+ unsigned long mem_sel:2; /* RW */
+ unsigned long rsvd_14_15:2;
+ unsigned long auto_valid_en:1; /* RW */
+ unsigned long rsvd_17_19:3;
+ unsigned long mmr_hash_index_en:1; /* RW */
+ unsigned long rsvd_21_29:9;
+ unsigned long mmr_write:1; /* WP */
+ unsigned long mmr_read:1; /* WP */
+ unsigned long rsvd_32_63:32;
+ } s;
+ struct uv1h_gr0_tlb_mmr_control_s {
+ unsigned long index:12; /* RW */
+ unsigned long mem_sel:2; /* RW */
+ unsigned long rsvd_14_15:2;
+ unsigned long auto_valid_en:1; /* RW */
+ unsigned long rsvd_17_19:3;
+ unsigned long mmr_hash_index_en:1; /* RW */
+ unsigned long rsvd_21_29:9;
+ unsigned long mmr_write:1; /* WP */
+ unsigned long mmr_read:1; /* WP */
+ unsigned long rsvd_32_47:16;
+ unsigned long mmr_inj_con:1; /* RW */
+ unsigned long rsvd_49_51:3;
+ unsigned long mmr_inj_tlbram:1; /* RW */
+ unsigned long rsvd_53:1;
+ unsigned long mmr_inj_tlbpgsize:1; /* RW */
+ unsigned long rsvd_55:1;
+ unsigned long mmr_inj_tlbrreg:1; /* RW */
+ unsigned long rsvd_57_59:3;
+ unsigned long mmr_inj_tlblruv:1; /* RW */
+ unsigned long rsvd_61_63:3;
+ } s1;
+ struct uv2h_gr0_tlb_mmr_control_s {
+ unsigned long index:12; /* RW */
+ unsigned long mem_sel:2; /* RW */
+ unsigned long rsvd_14_15:2;
+ unsigned long auto_valid_en:1; /* RW */
+ unsigned long rsvd_17_19:3;
+ unsigned long mmr_hash_index_en:1; /* RW */
+ unsigned long rsvd_21_29:9;
+ unsigned long mmr_write:1; /* WP */
+ unsigned long mmr_read:1; /* WP */
+ unsigned long mmr_op_done:1; /* RW */
+ unsigned long rsvd_33_47:15;
+ unsigned long mmr_inj_con:1; /* RW */
+ unsigned long rsvd_49_51:3;
+ unsigned long mmr_inj_tlbram:1; /* RW */
+ unsigned long rsvd_53_63:11;
+ } s2;
+};
+
+/* ========================================================================= */
+/* UVH_GR0_TLB_MMR_READ_DATA_HI */
+/* ========================================================================= */
+#define UV1H_GR0_TLB_MMR_READ_DATA_HI 0x4010a0UL
+#define UV2H_GR0_TLB_MMR_READ_DATA_HI 0xc010a0UL
+#define UVH_GR0_TLB_MMR_READ_DATA_HI (is_uv1_hub() ? \
+ UV1H_GR0_TLB_MMR_READ_DATA_HI : \
+ UV2H_GR0_TLB_MMR_READ_DATA_HI)
+
+#define UVH_GR0_TLB_MMR_READ_DATA_HI_PFN_SHFT 0
+#define UVH_GR0_TLB_MMR_READ_DATA_HI_GAA_SHFT 41
+#define UVH_GR0_TLB_MMR_READ_DATA_HI_DIRTY_SHFT 43
+#define UVH_GR0_TLB_MMR_READ_DATA_HI_LARGER_SHFT 44
+#define UVH_GR0_TLB_MMR_READ_DATA_HI_PFN_MASK 0x000001ffffffffffUL
+#define UVH_GR0_TLB_MMR_READ_DATA_HI_GAA_MASK 0x0000060000000000UL
+#define UVH_GR0_TLB_MMR_READ_DATA_HI_DIRTY_MASK 0x0000080000000000UL
+#define UVH_GR0_TLB_MMR_READ_DATA_HI_LARGER_MASK 0x0000100000000000UL
+
+union uvh_gr0_tlb_mmr_read_data_hi_u {
+ unsigned long v;
+ struct uvh_gr0_tlb_mmr_read_data_hi_s {
+ unsigned long pfn:41; /* RO */
+ unsigned long gaa:2; /* RO */
+ unsigned long dirty:1; /* RO */
+ unsigned long larger:1; /* RO */
+ unsigned long rsvd_45_63:19;
+ } s;
+};
+
+/* ========================================================================= */
+/* UVH_GR0_TLB_MMR_READ_DATA_LO */
+/* ========================================================================= */
+#define UV1H_GR0_TLB_MMR_READ_DATA_LO 0x4010a8UL
+#define UV2H_GR0_TLB_MMR_READ_DATA_LO 0xc010a8UL
+#define UVH_GR0_TLB_MMR_READ_DATA_LO (is_uv1_hub() ? \
+ UV1H_GR0_TLB_MMR_READ_DATA_LO : \
+ UV2H_GR0_TLB_MMR_READ_DATA_LO)
+
+#define UVH_GR0_TLB_MMR_READ_DATA_LO_VPN_SHFT 0
+#define UVH_GR0_TLB_MMR_READ_DATA_LO_ASID_SHFT 39
+#define UVH_GR0_TLB_MMR_READ_DATA_LO_VALID_SHFT 63
+#define UVH_GR0_TLB_MMR_READ_DATA_LO_VPN_MASK 0x0000007fffffffffUL
+#define UVH_GR0_TLB_MMR_READ_DATA_LO_ASID_MASK 0x7fffff8000000000UL
+#define UVH_GR0_TLB_MMR_READ_DATA_LO_VALID_MASK 0x8000000000000000UL
+
+union uvh_gr0_tlb_mmr_read_data_lo_u {
+ unsigned long v;
+ struct uvh_gr0_tlb_mmr_read_data_lo_s {
+ unsigned long vpn:39; /* RO */
+ unsigned long asid:24; /* RO */
+ unsigned long valid:1; /* RO */
+ } s;
};
/* ========================================================================= */
/* UVH_GR1_TLB_INT0_CONFIG */
/* ========================================================================= */
-#define UVH_GR1_TLB_INT0_CONFIG 0x61f00UL
-
-#define UVH_GR1_TLB_INT0_CONFIG_VECTOR_SHFT 0
-#define UVH_GR1_TLB_INT0_CONFIG_VECTOR_MASK 0x00000000000000ffUL
-#define UVH_GR1_TLB_INT0_CONFIG_DM_SHFT 8
-#define UVH_GR1_TLB_INT0_CONFIG_DM_MASK 0x0000000000000700UL
-#define UVH_GR1_TLB_INT0_CONFIG_DESTMODE_SHFT 11
-#define UVH_GR1_TLB_INT0_CONFIG_DESTMODE_MASK 0x0000000000000800UL
-#define UVH_GR1_TLB_INT0_CONFIG_STATUS_SHFT 12
-#define UVH_GR1_TLB_INT0_CONFIG_STATUS_MASK 0x0000000000001000UL
-#define UVH_GR1_TLB_INT0_CONFIG_P_SHFT 13
-#define UVH_GR1_TLB_INT0_CONFIG_P_MASK 0x0000000000002000UL
-#define UVH_GR1_TLB_INT0_CONFIG_T_SHFT 15
-#define UVH_GR1_TLB_INT0_CONFIG_T_MASK 0x0000000000008000UL
-#define UVH_GR1_TLB_INT0_CONFIG_M_SHFT 16
-#define UVH_GR1_TLB_INT0_CONFIG_M_MASK 0x0000000000010000UL
-#define UVH_GR1_TLB_INT0_CONFIG_APIC_ID_SHFT 32
-#define UVH_GR1_TLB_INT0_CONFIG_APIC_ID_MASK 0xffffffff00000000UL
+#define UVH_GR1_TLB_INT0_CONFIG 0x61f00UL
+
+#define UVH_GR1_TLB_INT0_CONFIG_VECTOR_SHFT 0
+#define UVH_GR1_TLB_INT0_CONFIG_DM_SHFT 8
+#define UVH_GR1_TLB_INT0_CONFIG_DESTMODE_SHFT 11
+#define UVH_GR1_TLB_INT0_CONFIG_STATUS_SHFT 12
+#define UVH_GR1_TLB_INT0_CONFIG_P_SHFT 13
+#define UVH_GR1_TLB_INT0_CONFIG_T_SHFT 15
+#define UVH_GR1_TLB_INT0_CONFIG_M_SHFT 16
+#define UVH_GR1_TLB_INT0_CONFIG_APIC_ID_SHFT 32
+#define UVH_GR1_TLB_INT0_CONFIG_VECTOR_MASK 0x00000000000000ffUL
+#define UVH_GR1_TLB_INT0_CONFIG_DM_MASK 0x0000000000000700UL
+#define UVH_GR1_TLB_INT0_CONFIG_DESTMODE_MASK 0x0000000000000800UL
+#define UVH_GR1_TLB_INT0_CONFIG_STATUS_MASK 0x0000000000001000UL
+#define UVH_GR1_TLB_INT0_CONFIG_P_MASK 0x0000000000002000UL
+#define UVH_GR1_TLB_INT0_CONFIG_T_MASK 0x0000000000008000UL
+#define UVH_GR1_TLB_INT0_CONFIG_M_MASK 0x0000000000010000UL
+#define UVH_GR1_TLB_INT0_CONFIG_APIC_ID_MASK 0xffffffff00000000UL
union uvh_gr1_tlb_int0_config_u {
- unsigned long v;
- struct uvh_gr1_tlb_int0_config_s {
- unsigned long vector_ : 8; /* RW */
- unsigned long dm : 3; /* RW */
- unsigned long destmode : 1; /* RW */
- unsigned long status : 1; /* RO */
- unsigned long p : 1; /* RO */
- unsigned long rsvd_14 : 1; /* */
- unsigned long t : 1; /* RO */
- unsigned long m : 1; /* RW */
- unsigned long rsvd_17_31: 15; /* */
- unsigned long apic_id : 32; /* RW */
- } s;
+ unsigned long v;
+ struct uvh_gr1_tlb_int0_config_s {
+ unsigned long vector_:8; /* RW */
+ unsigned long dm:3; /* RW */
+ unsigned long destmode:1; /* RW */
+ unsigned long status:1; /* RO */
+ unsigned long p:1; /* RO */
+ unsigned long rsvd_14:1;
+ unsigned long t:1; /* RO */
+ unsigned long m:1; /* RW */
+ unsigned long rsvd_17_31:15;
+ unsigned long apic_id:32; /* RW */
+ } s;
};
/* ========================================================================= */
/* UVH_GR1_TLB_INT1_CONFIG */
/* ========================================================================= */
-#define UVH_GR1_TLB_INT1_CONFIG 0x61f40UL
-
-#define UVH_GR1_TLB_INT1_CONFIG_VECTOR_SHFT 0
-#define UVH_GR1_TLB_INT1_CONFIG_VECTOR_MASK 0x00000000000000ffUL
-#define UVH_GR1_TLB_INT1_CONFIG_DM_SHFT 8
-#define UVH_GR1_TLB_INT1_CONFIG_DM_MASK 0x0000000000000700UL
-#define UVH_GR1_TLB_INT1_CONFIG_DESTMODE_SHFT 11
-#define UVH_GR1_TLB_INT1_CONFIG_DESTMODE_MASK 0x0000000000000800UL
-#define UVH_GR1_TLB_INT1_CONFIG_STATUS_SHFT 12
-#define UVH_GR1_TLB_INT1_CONFIG_STATUS_MASK 0x0000000000001000UL
-#define UVH_GR1_TLB_INT1_CONFIG_P_SHFT 13
-#define UVH_GR1_TLB_INT1_CONFIG_P_MASK 0x0000000000002000UL
-#define UVH_GR1_TLB_INT1_CONFIG_T_SHFT 15
-#define UVH_GR1_TLB_INT1_CONFIG_T_MASK 0x0000000000008000UL
-#define UVH_GR1_TLB_INT1_CONFIG_M_SHFT 16
-#define UVH_GR1_TLB_INT1_CONFIG_M_MASK 0x0000000000010000UL
-#define UVH_GR1_TLB_INT1_CONFIG_APIC_ID_SHFT 32
-#define UVH_GR1_TLB_INT1_CONFIG_APIC_ID_MASK 0xffffffff00000000UL
+#define UVH_GR1_TLB_INT1_CONFIG 0x61f40UL
+
+#define UVH_GR1_TLB_INT1_CONFIG_VECTOR_SHFT 0
+#define UVH_GR1_TLB_INT1_CONFIG_DM_SHFT 8
+#define UVH_GR1_TLB_INT1_CONFIG_DESTMODE_SHFT 11
+#define UVH_GR1_TLB_INT1_CONFIG_STATUS_SHFT 12
+#define UVH_GR1_TLB_INT1_CONFIG_P_SHFT 13
+#define UVH_GR1_TLB_INT1_CONFIG_T_SHFT 15
+#define UVH_GR1_TLB_INT1_CONFIG_M_SHFT 16
+#define UVH_GR1_TLB_INT1_CONFIG_APIC_ID_SHFT 32
+#define UVH_GR1_TLB_INT1_CONFIG_VECTOR_MASK 0x00000000000000ffUL
+#define UVH_GR1_TLB_INT1_CONFIG_DM_MASK 0x0000000000000700UL
+#define UVH_GR1_TLB_INT1_CONFIG_DESTMODE_MASK 0x0000000000000800UL
+#define UVH_GR1_TLB_INT1_CONFIG_STATUS_MASK 0x0000000000001000UL
+#define UVH_GR1_TLB_INT1_CONFIG_P_MASK 0x0000000000002000UL
+#define UVH_GR1_TLB_INT1_CONFIG_T_MASK 0x0000000000008000UL
+#define UVH_GR1_TLB_INT1_CONFIG_M_MASK 0x0000000000010000UL
+#define UVH_GR1_TLB_INT1_CONFIG_APIC_ID_MASK 0xffffffff00000000UL
union uvh_gr1_tlb_int1_config_u {
- unsigned long v;
- struct uvh_gr1_tlb_int1_config_s {
- unsigned long vector_ : 8; /* RW */
- unsigned long dm : 3; /* RW */
- unsigned long destmode : 1; /* RW */
- unsigned long status : 1; /* RO */
- unsigned long p : 1; /* RO */
- unsigned long rsvd_14 : 1; /* */
- unsigned long t : 1; /* RO */
- unsigned long m : 1; /* RW */
- unsigned long rsvd_17_31: 15; /* */
- unsigned long apic_id : 32; /* RW */
- } s;
+ unsigned long v;
+ struct uvh_gr1_tlb_int1_config_s {
+ unsigned long vector_:8; /* RW */
+ unsigned long dm:3; /* RW */
+ unsigned long destmode:1; /* RW */
+ unsigned long status:1; /* RO */
+ unsigned long p:1; /* RO */
+ unsigned long rsvd_14:1;
+ unsigned long t:1; /* RO */
+ unsigned long m:1; /* RW */
+ unsigned long rsvd_17_31:15;
+ unsigned long apic_id:32; /* RW */
+ } s;
+};
+
+/* ========================================================================= */
+/* UVH_GR1_TLB_MMR_CONTROL */
+/* ========================================================================= */
+#define UV1H_GR1_TLB_MMR_CONTROL 0x801080UL
+#define UV2H_GR1_TLB_MMR_CONTROL 0x1001080UL
+#define UVH_GR1_TLB_MMR_CONTROL (is_uv1_hub() ? \
+ UV1H_GR1_TLB_MMR_CONTROL : \
+ UV2H_GR1_TLB_MMR_CONTROL)
+
+#define UVH_GR1_TLB_MMR_CONTROL_INDEX_SHFT 0
+#define UVH_GR1_TLB_MMR_CONTROL_MEM_SEL_SHFT 12
+#define UVH_GR1_TLB_MMR_CONTROL_AUTO_VALID_EN_SHFT 16
+#define UVH_GR1_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_SHFT 20
+#define UVH_GR1_TLB_MMR_CONTROL_MMR_WRITE_SHFT 30
+#define UVH_GR1_TLB_MMR_CONTROL_MMR_READ_SHFT 31
+#define UVH_GR1_TLB_MMR_CONTROL_INDEX_MASK 0x0000000000000fffUL
+#define UVH_GR1_TLB_MMR_CONTROL_MEM_SEL_MASK 0x0000000000003000UL
+#define UVH_GR1_TLB_MMR_CONTROL_AUTO_VALID_EN_MASK 0x0000000000010000UL
+#define UVH_GR1_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_MASK 0x0000000000100000UL
+#define UVH_GR1_TLB_MMR_CONTROL_MMR_WRITE_MASK 0x0000000040000000UL
+#define UVH_GR1_TLB_MMR_CONTROL_MMR_READ_MASK 0x0000000080000000UL
+
+#define UV1H_GR1_TLB_MMR_CONTROL_INDEX_SHFT 0
+#define UV1H_GR1_TLB_MMR_CONTROL_MEM_SEL_SHFT 12
+#define UV1H_GR1_TLB_MMR_CONTROL_AUTO_VALID_EN_SHFT 16
+#define UV1H_GR1_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_SHFT 20
+#define UV1H_GR1_TLB_MMR_CONTROL_MMR_WRITE_SHFT 30
+#define UV1H_GR1_TLB_MMR_CONTROL_MMR_READ_SHFT 31
+#define UV1H_GR1_TLB_MMR_CONTROL_MMR_INJ_CON_SHFT 48
+#define UV1H_GR1_TLB_MMR_CONTROL_MMR_INJ_TLBRAM_SHFT 52
+#define UV1H_GR1_TLB_MMR_CONTROL_MMR_INJ_TLBPGSIZE_SHFT 54
+#define UV1H_GR1_TLB_MMR_CONTROL_MMR_INJ_TLBRREG_SHFT 56
+#define UV1H_GR1_TLB_MMR_CONTROL_MMR_INJ_TLBLRUV_SHFT 60
+#define UV1H_GR1_TLB_MMR_CONTROL_INDEX_MASK 0x0000000000000fffUL
+#define UV1H_GR1_TLB_MMR_CONTROL_MEM_SEL_MASK 0x0000000000003000UL
+#define UV1H_GR1_TLB_MMR_CONTROL_AUTO_VALID_EN_MASK 0x0000000000010000UL
+#define UV1H_GR1_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_MASK 0x0000000000100000UL
+#define UV1H_GR1_TLB_MMR_CONTROL_MMR_WRITE_MASK 0x0000000040000000UL
+#define UV1H_GR1_TLB_MMR_CONTROL_MMR_READ_MASK 0x0000000080000000UL
+#define UV1H_GR1_TLB_MMR_CONTROL_MMR_INJ_CON_MASK 0x0001000000000000UL
+#define UV1H_GR1_TLB_MMR_CONTROL_MMR_INJ_TLBRAM_MASK 0x0010000000000000UL
+#define UV1H_GR1_TLB_MMR_CONTROL_MMR_INJ_TLBPGSIZE_MASK 0x0040000000000000UL
+#define UV1H_GR1_TLB_MMR_CONTROL_MMR_INJ_TLBRREG_MASK 0x0100000000000000UL
+#define UV1H_GR1_TLB_MMR_CONTROL_MMR_INJ_TLBLRUV_MASK 0x1000000000000000UL
+
+#define UV2H_GR1_TLB_MMR_CONTROL_INDEX_SHFT 0
+#define UV2H_GR1_TLB_MMR_CONTROL_MEM_SEL_SHFT 12
+#define UV2H_GR1_TLB_MMR_CONTROL_AUTO_VALID_EN_SHFT 16
+#define UV2H_GR1_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_SHFT 20
+#define UV2H_GR1_TLB_MMR_CONTROL_MMR_WRITE_SHFT 30
+#define UV2H_GR1_TLB_MMR_CONTROL_MMR_READ_SHFT 31
+#define UV2H_GR1_TLB_MMR_CONTROL_MMR_OP_DONE_SHFT 32
+#define UV2H_GR1_TLB_MMR_CONTROL_MMR_INJ_CON_SHFT 48
+#define UV2H_GR1_TLB_MMR_CONTROL_MMR_INJ_TLBRAM_SHFT 52
+#define UV2H_GR1_TLB_MMR_CONTROL_INDEX_MASK 0x0000000000000fffUL
+#define UV2H_GR1_TLB_MMR_CONTROL_MEM_SEL_MASK 0x0000000000003000UL
+#define UV2H_GR1_TLB_MMR_CONTROL_AUTO_VALID_EN_MASK 0x0000000000010000UL
+#define UV2H_GR1_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_MASK 0x0000000000100000UL
+#define UV2H_GR1_TLB_MMR_CONTROL_MMR_WRITE_MASK 0x0000000040000000UL
+#define UV2H_GR1_TLB_MMR_CONTROL_MMR_READ_MASK 0x0000000080000000UL
+#define UV2H_GR1_TLB_MMR_CONTROL_MMR_OP_DONE_MASK 0x0000000100000000UL
+#define UV2H_GR1_TLB_MMR_CONTROL_MMR_INJ_CON_MASK 0x0001000000000000UL
+#define UV2H_GR1_TLB_MMR_CONTROL_MMR_INJ_TLBRAM_MASK 0x0010000000000000UL
+
+union uvh_gr1_tlb_mmr_control_u {
+ unsigned long v;
+ struct uvh_gr1_tlb_mmr_control_s {
+ unsigned long index:12; /* RW */
+ unsigned long mem_sel:2; /* RW */
+ unsigned long rsvd_14_15:2;
+ unsigned long auto_valid_en:1; /* RW */
+ unsigned long rsvd_17_19:3;
+ unsigned long mmr_hash_index_en:1; /* RW */
+ unsigned long rsvd_21_29:9;
+ unsigned long mmr_write:1; /* WP */
+ unsigned long mmr_read:1; /* WP */
+ unsigned long rsvd_32_63:32;
+ } s;
+ struct uv1h_gr1_tlb_mmr_control_s {
+ unsigned long index:12; /* RW */
+ unsigned long mem_sel:2; /* RW */
+ unsigned long rsvd_14_15:2;
+ unsigned long auto_valid_en:1; /* RW */
+ unsigned long rsvd_17_19:3;
+ unsigned long mmr_hash_index_en:1; /* RW */
+ unsigned long rsvd_21_29:9;
+ unsigned long mmr_write:1; /* WP */
+ unsigned long mmr_read:1; /* WP */
+ unsigned long rsvd_32_47:16;
+ unsigned long mmr_inj_con:1; /* RW */
+ unsigned long rsvd_49_51:3;
+ unsigned long mmr_inj_tlbram:1; /* RW */
+ unsigned long rsvd_53:1;
+ unsigned long mmr_inj_tlbpgsize:1; /* RW */
+ unsigned long rsvd_55:1;
+ unsigned long mmr_inj_tlbrreg:1; /* RW */
+ unsigned long rsvd_57_59:3;
+ unsigned long mmr_inj_tlblruv:1; /* RW */
+ unsigned long rsvd_61_63:3;
+ } s1;
+ struct uv2h_gr1_tlb_mmr_control_s {
+ unsigned long index:12; /* RW */
+ unsigned long mem_sel:2; /* RW */
+ unsigned long rsvd_14_15:2;
+ unsigned long auto_valid_en:1; /* RW */
+ unsigned long rsvd_17_19:3;
+ unsigned long mmr_hash_index_en:1; /* RW */
+ unsigned long rsvd_21_29:9;
+ unsigned long mmr_write:1; /* WP */
+ unsigned long mmr_read:1; /* WP */
+ unsigned long mmr_op_done:1; /* RW */
+ unsigned long rsvd_33_47:15;
+ unsigned long mmr_inj_con:1; /* RW */
+ unsigned long rsvd_49_51:3;
+ unsigned long mmr_inj_tlbram:1; /* RW */
+ unsigned long rsvd_53_63:11;
+ } s2;
+};
+
+/* ========================================================================= */
+/* UVH_GR1_TLB_MMR_READ_DATA_HI */
+/* ========================================================================= */
+#define UV1H_GR1_TLB_MMR_READ_DATA_HI 0x8010a0UL
+#define UV2H_GR1_TLB_MMR_READ_DATA_HI 0x10010a0UL
+#define UVH_GR1_TLB_MMR_READ_DATA_HI (is_uv1_hub() ? \
+ UV1H_GR1_TLB_MMR_READ_DATA_HI : \
+ UV2H_GR1_TLB_MMR_READ_DATA_HI)
+
+#define UVH_GR1_TLB_MMR_READ_DATA_HI_PFN_SHFT 0
+#define UVH_GR1_TLB_MMR_READ_DATA_HI_GAA_SHFT 41
+#define UVH_GR1_TLB_MMR_READ_DATA_HI_DIRTY_SHFT 43
+#define UVH_GR1_TLB_MMR_READ_DATA_HI_LARGER_SHFT 44
+#define UVH_GR1_TLB_MMR_READ_DATA_HI_PFN_MASK 0x000001ffffffffffUL
+#define UVH_GR1_TLB_MMR_READ_DATA_HI_GAA_MASK 0x0000060000000000UL
+#define UVH_GR1_TLB_MMR_READ_DATA_HI_DIRTY_MASK 0x0000080000000000UL
+#define UVH_GR1_TLB_MMR_READ_DATA_HI_LARGER_MASK 0x0000100000000000UL
+
+union uvh_gr1_tlb_mmr_read_data_hi_u {
+ unsigned long v;
+ struct uvh_gr1_tlb_mmr_read_data_hi_s {
+ unsigned long pfn:41; /* RO */
+ unsigned long gaa:2; /* RO */
+ unsigned long dirty:1; /* RO */
+ unsigned long larger:1; /* RO */
+ unsigned long rsvd_45_63:19;
+ } s;
+};
+
+/* ========================================================================= */
+/* UVH_GR1_TLB_MMR_READ_DATA_LO */
+/* ========================================================================= */
+#define UV1H_GR1_TLB_MMR_READ_DATA_LO 0x8010a8UL
+#define UV2H_GR1_TLB_MMR_READ_DATA_LO 0x10010a8UL
+#define UVH_GR1_TLB_MMR_READ_DATA_LO (is_uv1_hub() ? \
+ UV1H_GR1_TLB_MMR_READ_DATA_LO : \
+ UV2H_GR1_TLB_MMR_READ_DATA_LO)
+
+#define UVH_GR1_TLB_MMR_READ_DATA_LO_VPN_SHFT 0
+#define UVH_GR1_TLB_MMR_READ_DATA_LO_ASID_SHFT 39
+#define UVH_GR1_TLB_MMR_READ_DATA_LO_VALID_SHFT 63
+#define UVH_GR1_TLB_MMR_READ_DATA_LO_VPN_MASK 0x0000007fffffffffUL
+#define UVH_GR1_TLB_MMR_READ_DATA_LO_ASID_MASK 0x7fffff8000000000UL
+#define UVH_GR1_TLB_MMR_READ_DATA_LO_VALID_MASK 0x8000000000000000UL
+
+union uvh_gr1_tlb_mmr_read_data_lo_u {
+ unsigned long v;
+ struct uvh_gr1_tlb_mmr_read_data_lo_s {
+ unsigned long vpn:39; /* RO */
+ unsigned long asid:24; /* RO */
+ unsigned long valid:1; /* RO */
+ } s;
};
/* ========================================================================= */
/* UVH_INT_CMPB */
/* ========================================================================= */
-#define UVH_INT_CMPB 0x22080UL
+#define UVH_INT_CMPB 0x22080UL
-#define UVH_INT_CMPB_REAL_TIME_CMPB_SHFT 0
-#define UVH_INT_CMPB_REAL_TIME_CMPB_MASK 0x00ffffffffffffffUL
+#define UVH_INT_CMPB_REAL_TIME_CMPB_SHFT 0
+#define UVH_INT_CMPB_REAL_TIME_CMPB_MASK 0x00ffffffffffffffUL
union uvh_int_cmpb_u {
- unsigned long v;
- struct uvh_int_cmpb_s {
- unsigned long real_time_cmpb : 56; /* RW */
- unsigned long rsvd_56_63 : 8; /* */
- } s;
+ unsigned long v;
+ struct uvh_int_cmpb_s {
+ unsigned long real_time_cmpb:56; /* RW */
+ unsigned long rsvd_56_63:8;
+ } s;
};
/* ========================================================================= */
/* UVH_INT_CMPC */
/* ========================================================================= */
-#define UVH_INT_CMPC 0x22100UL
+#define UVH_INT_CMPC 0x22100UL
-#define UV1H_INT_CMPC_REAL_TIME_CMPC_SHFT 0
-#define UV2H_INT_CMPC_REAL_TIME_CMPC_SHFT 0
-#define UVH_INT_CMPC_REAL_TIME_CMPC_SHFT (is_uv1_hub() ? \
- UV1H_INT_CMPC_REAL_TIME_CMPC_SHFT : \
- UV2H_INT_CMPC_REAL_TIME_CMPC_SHFT)
-#define UV1H_INT_CMPC_REAL_TIME_CMPC_MASK 0xffffffffffffffUL
-#define UV2H_INT_CMPC_REAL_TIME_CMPC_MASK 0xffffffffffffffUL
-#define UVH_INT_CMPC_REAL_TIME_CMPC_MASK (is_uv1_hub() ? \
- UV1H_INT_CMPC_REAL_TIME_CMPC_MASK : \
- UV2H_INT_CMPC_REAL_TIME_CMPC_MASK)
+#define UVH_INT_CMPC_REAL_TIME_CMPC_SHFT 0
+#define UVH_INT_CMPC_REAL_TIME_CMPC_MASK 0xffffffffffffffUL
union uvh_int_cmpc_u {
- unsigned long v;
- struct uvh_int_cmpc_s {
- unsigned long real_time_cmpc : 56; /* RW */
- unsigned long rsvd_56_63 : 8; /* */
- } s;
+ unsigned long v;
+ struct uvh_int_cmpc_s {
+ unsigned long real_time_cmpc:56; /* RW */
+ unsigned long rsvd_56_63:8;
+ } s;
};
/* ========================================================================= */
/* UVH_INT_CMPD */
/* ========================================================================= */
-#define UVH_INT_CMPD 0x22180UL
+#define UVH_INT_CMPD 0x22180UL
-#define UV1H_INT_CMPD_REAL_TIME_CMPD_SHFT 0
-#define UV2H_INT_CMPD_REAL_TIME_CMPD_SHFT 0
-#define UVH_INT_CMPD_REAL_TIME_CMPD_SHFT (is_uv1_hub() ? \
- UV1H_INT_CMPD_REAL_TIME_CMPD_SHFT : \
- UV2H_INT_CMPD_REAL_TIME_CMPD_SHFT)
-#define UV1H_INT_CMPD_REAL_TIME_CMPD_MASK 0xffffffffffffffUL
-#define UV2H_INT_CMPD_REAL_TIME_CMPD_MASK 0xffffffffffffffUL
-#define UVH_INT_CMPD_REAL_TIME_CMPD_MASK (is_uv1_hub() ? \
- UV1H_INT_CMPD_REAL_TIME_CMPD_MASK : \
- UV2H_INT_CMPD_REAL_TIME_CMPD_MASK)
+#define UVH_INT_CMPD_REAL_TIME_CMPD_SHFT 0
+#define UVH_INT_CMPD_REAL_TIME_CMPD_MASK 0xffffffffffffffUL
union uvh_int_cmpd_u {
- unsigned long v;
- struct uvh_int_cmpd_s {
- unsigned long real_time_cmpd : 56; /* RW */
- unsigned long rsvd_56_63 : 8; /* */
- } s;
+ unsigned long v;
+ struct uvh_int_cmpd_s {
+ unsigned long real_time_cmpd:56; /* RW */
+ unsigned long rsvd_56_63:8;
+ } s;
};
/* ========================================================================= */
/* UVH_IPI_INT */
/* ========================================================================= */
-#define UVH_IPI_INT 0x60500UL
-#define UVH_IPI_INT_32 0x348
+#define UVH_IPI_INT 0x60500UL
+#define UVH_IPI_INT_32 0x348
-#define UVH_IPI_INT_VECTOR_SHFT 0
-#define UVH_IPI_INT_VECTOR_MASK 0x00000000000000ffUL
-#define UVH_IPI_INT_DELIVERY_MODE_SHFT 8
-#define UVH_IPI_INT_DELIVERY_MODE_MASK 0x0000000000000700UL
-#define UVH_IPI_INT_DESTMODE_SHFT 11
-#define UVH_IPI_INT_DESTMODE_MASK 0x0000000000000800UL
-#define UVH_IPI_INT_APIC_ID_SHFT 16
-#define UVH_IPI_INT_APIC_ID_MASK 0x0000ffffffff0000UL
-#define UVH_IPI_INT_SEND_SHFT 63
-#define UVH_IPI_INT_SEND_MASK 0x8000000000000000UL
+#define UVH_IPI_INT_VECTOR_SHFT 0
+#define UVH_IPI_INT_DELIVERY_MODE_SHFT 8
+#define UVH_IPI_INT_DESTMODE_SHFT 11
+#define UVH_IPI_INT_APIC_ID_SHFT 16
+#define UVH_IPI_INT_SEND_SHFT 63
+#define UVH_IPI_INT_VECTOR_MASK 0x00000000000000ffUL
+#define UVH_IPI_INT_DELIVERY_MODE_MASK 0x0000000000000700UL
+#define UVH_IPI_INT_DESTMODE_MASK 0x0000000000000800UL
+#define UVH_IPI_INT_APIC_ID_MASK 0x0000ffffffff0000UL
+#define UVH_IPI_INT_SEND_MASK 0x8000000000000000UL
union uvh_ipi_int_u {
- unsigned long v;
- struct uvh_ipi_int_s {
- unsigned long vector_ : 8; /* RW */
- unsigned long delivery_mode : 3; /* RW */
- unsigned long destmode : 1; /* RW */
- unsigned long rsvd_12_15 : 4; /* */
- unsigned long apic_id : 32; /* RW */
- unsigned long rsvd_48_62 : 15; /* */
- unsigned long send : 1; /* WP */
- } s;
+ unsigned long v;
+ struct uvh_ipi_int_s {
+ unsigned long vector_:8; /* RW */
+ unsigned long delivery_mode:3; /* RW */
+ unsigned long destmode:1; /* RW */
+ unsigned long rsvd_12_15:4;
+ unsigned long apic_id:32; /* RW */
+ unsigned long rsvd_48_62:15;
+ unsigned long send:1; /* WP */
+ } s;
};
/* ========================================================================= */
/* UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST */
/* ========================================================================= */
-#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST 0x320050UL
-#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_32 0x9c0
+#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST 0x320050UL
+#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_32 0x9c0
#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_ADDRESS_SHFT 4
-#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_ADDRESS_MASK 0x000007fffffffff0UL
#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_NODE_ID_SHFT 49
+#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_ADDRESS_MASK 0x000007fffffffff0UL
#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_NODE_ID_MASK 0x7ffe000000000000UL
union uvh_lb_bau_intd_payload_queue_first_u {
- unsigned long v;
- struct uvh_lb_bau_intd_payload_queue_first_s {
- unsigned long rsvd_0_3: 4; /* */
- unsigned long address : 39; /* RW */
- unsigned long rsvd_43_48: 6; /* */
- unsigned long node_id : 14; /* RW */
- unsigned long rsvd_63 : 1; /* */
- } s;
+ unsigned long v;
+ struct uvh_lb_bau_intd_payload_queue_first_s {
+ unsigned long rsvd_0_3:4;
+ unsigned long address:39; /* RW */
+ unsigned long rsvd_43_48:6;
+ unsigned long node_id:14; /* RW */
+ unsigned long rsvd_63:1;
+ } s;
};
/* ========================================================================= */
/* UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST */
/* ========================================================================= */
-#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST 0x320060UL
-#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST_32 0x9c8
+#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST 0x320060UL
+#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST_32 0x9c8
-#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST_ADDRESS_SHFT 4
-#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST_ADDRESS_MASK 0x000007fffffffff0UL
+#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST_ADDRESS_SHFT 4
+#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST_ADDRESS_MASK 0x000007fffffffff0UL
union uvh_lb_bau_intd_payload_queue_last_u {
- unsigned long v;
- struct uvh_lb_bau_intd_payload_queue_last_s {
- unsigned long rsvd_0_3: 4; /* */
- unsigned long address : 39; /* RW */
- unsigned long rsvd_43_63: 21; /* */
- } s;
+ unsigned long v;
+ struct uvh_lb_bau_intd_payload_queue_last_s {
+ unsigned long rsvd_0_3:4;
+ unsigned long address:39; /* RW */
+ unsigned long rsvd_43_63:21;
+ } s;
};
/* ========================================================================= */
/* UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL */
/* ========================================================================= */
-#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL 0x320070UL
-#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL_32 0x9d0
+#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL 0x320070UL
+#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL_32 0x9d0
-#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL_ADDRESS_SHFT 4
-#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL_ADDRESS_MASK 0x000007fffffffff0UL
+#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL_ADDRESS_SHFT 4
+#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL_ADDRESS_MASK 0x000007fffffffff0UL
union uvh_lb_bau_intd_payload_queue_tail_u {
- unsigned long v;
- struct uvh_lb_bau_intd_payload_queue_tail_s {
- unsigned long rsvd_0_3: 4; /* */
- unsigned long address : 39; /* RW */
- unsigned long rsvd_43_63: 21; /* */
- } s;
+ unsigned long v;
+ struct uvh_lb_bau_intd_payload_queue_tail_s {
+ unsigned long rsvd_0_3:4;
+ unsigned long address:39; /* RW */
+ unsigned long rsvd_43_63:21;
+ } s;
};
/* ========================================================================= */
/* UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE */
/* ========================================================================= */
-#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE 0x320080UL
-#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_32 0xa68
+#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE 0x320080UL
+#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_32 0xa68
#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_0_SHFT 0
-#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_0_MASK 0x0000000000000001UL
#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_1_SHFT 1
-#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_1_MASK 0x0000000000000002UL
#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_2_SHFT 2
-#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_2_MASK 0x0000000000000004UL
#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_3_SHFT 3
-#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_3_MASK 0x0000000000000008UL
#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_4_SHFT 4
-#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_4_MASK 0x0000000000000010UL
#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_5_SHFT 5
-#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_5_MASK 0x0000000000000020UL
#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_6_SHFT 6
-#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_6_MASK 0x0000000000000040UL
#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_7_SHFT 7
-#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_7_MASK 0x0000000000000080UL
#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_0_SHFT 8
-#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_0_MASK 0x0000000000000100UL
#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_1_SHFT 9
-#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_1_MASK 0x0000000000000200UL
#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_2_SHFT 10
-#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_2_MASK 0x0000000000000400UL
#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_3_SHFT 11
-#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_3_MASK 0x0000000000000800UL
#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_4_SHFT 12
-#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_4_MASK 0x0000000000001000UL
#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_5_SHFT 13
-#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_5_MASK 0x0000000000002000UL
#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_6_SHFT 14
-#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_6_MASK 0x0000000000004000UL
#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_7_SHFT 15
+#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_0_MASK 0x0000000000000001UL
+#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_1_MASK 0x0000000000000002UL
+#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_2_MASK 0x0000000000000004UL
+#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_3_MASK 0x0000000000000008UL
+#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_4_MASK 0x0000000000000010UL
+#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_5_MASK 0x0000000000000020UL
+#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_6_MASK 0x0000000000000040UL
+#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_7_MASK 0x0000000000000080UL
+#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_0_MASK 0x0000000000000100UL
+#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_1_MASK 0x0000000000000200UL
+#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_2_MASK 0x0000000000000400UL
+#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_3_MASK 0x0000000000000800UL
+#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_4_MASK 0x0000000000001000UL
+#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_5_MASK 0x0000000000002000UL
+#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_6_MASK 0x0000000000004000UL
#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_7_MASK 0x0000000000008000UL
union uvh_lb_bau_intd_software_acknowledge_u {
- unsigned long v;
- struct uvh_lb_bau_intd_software_acknowledge_s {
- unsigned long pending_0 : 1; /* RW, W1C */
- unsigned long pending_1 : 1; /* RW, W1C */
- unsigned long pending_2 : 1; /* RW, W1C */
- unsigned long pending_3 : 1; /* RW, W1C */
- unsigned long pending_4 : 1; /* RW, W1C */
- unsigned long pending_5 : 1; /* RW, W1C */
- unsigned long pending_6 : 1; /* RW, W1C */
- unsigned long pending_7 : 1; /* RW, W1C */
- unsigned long timeout_0 : 1; /* RW, W1C */
- unsigned long timeout_1 : 1; /* RW, W1C */
- unsigned long timeout_2 : 1; /* RW, W1C */
- unsigned long timeout_3 : 1; /* RW, W1C */
- unsigned long timeout_4 : 1; /* RW, W1C */
- unsigned long timeout_5 : 1; /* RW, W1C */
- unsigned long timeout_6 : 1; /* RW, W1C */
- unsigned long timeout_7 : 1; /* RW, W1C */
- unsigned long rsvd_16_63: 48; /* */
- } s;
+ unsigned long v;
+ struct uvh_lb_bau_intd_software_acknowledge_s {
+ unsigned long pending_0:1; /* RW, W1C */
+ unsigned long pending_1:1; /* RW, W1C */
+ unsigned long pending_2:1; /* RW, W1C */
+ unsigned long pending_3:1; /* RW, W1C */
+ unsigned long pending_4:1; /* RW, W1C */
+ unsigned long pending_5:1; /* RW, W1C */
+ unsigned long pending_6:1; /* RW, W1C */
+ unsigned long pending_7:1; /* RW, W1C */
+ unsigned long timeout_0:1; /* RW, W1C */
+ unsigned long timeout_1:1; /* RW, W1C */
+ unsigned long timeout_2:1; /* RW, W1C */
+ unsigned long timeout_3:1; /* RW, W1C */
+ unsigned long timeout_4:1; /* RW, W1C */
+ unsigned long timeout_5:1; /* RW, W1C */
+ unsigned long timeout_6:1; /* RW, W1C */
+ unsigned long timeout_7:1; /* RW, W1C */
+ unsigned long rsvd_16_63:48;
+ } s;
};
/* ========================================================================= */
/* UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS */
/* ========================================================================= */
-#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS 0x0000000000320088UL
-#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS_32 0xa70
+#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS 0x0000000000320088UL
+#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS_32 0xa70
/* ========================================================================= */
/* UVH_LB_BAU_MISC_CONTROL */
/* ========================================================================= */
-#define UVH_LB_BAU_MISC_CONTROL 0x320170UL
-#define UVH_LB_BAU_MISC_CONTROL_32 0xa10
-
-#define UVH_LB_BAU_MISC_CONTROL_REJECTION_DELAY_SHFT 0
-#define UVH_LB_BAU_MISC_CONTROL_REJECTION_DELAY_MASK 0x00000000000000ffUL
-#define UVH_LB_BAU_MISC_CONTROL_APIC_MODE_SHFT 8
-#define UVH_LB_BAU_MISC_CONTROL_APIC_MODE_MASK 0x0000000000000100UL
-#define UVH_LB_BAU_MISC_CONTROL_FORCE_BROADCAST_SHFT 9
-#define UVH_LB_BAU_MISC_CONTROL_FORCE_BROADCAST_MASK 0x0000000000000200UL
-#define UVH_LB_BAU_MISC_CONTROL_FORCE_LOCK_NOP_SHFT 10
-#define UVH_LB_BAU_MISC_CONTROL_FORCE_LOCK_NOP_MASK 0x0000000000000400UL
+#define UVH_LB_BAU_MISC_CONTROL 0x320170UL
+#define UVH_LB_BAU_MISC_CONTROL_32 0xa10
+
+#define UVH_LB_BAU_MISC_CONTROL_REJECTION_DELAY_SHFT 0
+#define UVH_LB_BAU_MISC_CONTROL_APIC_MODE_SHFT 8
+#define UVH_LB_BAU_MISC_CONTROL_FORCE_BROADCAST_SHFT 9
+#define UVH_LB_BAU_MISC_CONTROL_FORCE_LOCK_NOP_SHFT 10
#define UVH_LB_BAU_MISC_CONTROL_QPI_AGENT_PRESENCE_VECTOR_SHFT 11
-#define UVH_LB_BAU_MISC_CONTROL_QPI_AGENT_PRESENCE_VECTOR_MASK 0x0000000000003800UL
#define UVH_LB_BAU_MISC_CONTROL_DESCRIPTOR_FETCH_MODE_SHFT 14
-#define UVH_LB_BAU_MISC_CONTROL_DESCRIPTOR_FETCH_MODE_MASK 0x0000000000004000UL
#define UVH_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_SHFT 15
-#define UVH_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_MASK 0x0000000000008000UL
#define UVH_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHFT 16
-#define UVH_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_MASK 0x00000000000f0000UL
#define UVH_LB_BAU_MISC_CONTROL_ENABLE_DUAL_MAPPING_MODE_SHFT 20
-#define UVH_LB_BAU_MISC_CONTROL_ENABLE_DUAL_MAPPING_MODE_MASK 0x0000000000100000UL
#define UVH_LB_BAU_MISC_CONTROL_VGA_IO_PORT_DECODE_ENABLE_SHFT 21
-#define UVH_LB_BAU_MISC_CONTROL_VGA_IO_PORT_DECODE_ENABLE_MASK 0x0000000000200000UL
#define UVH_LB_BAU_MISC_CONTROL_VGA_IO_PORT_16_BIT_DECODE_SHFT 22
-#define UVH_LB_BAU_MISC_CONTROL_VGA_IO_PORT_16_BIT_DECODE_MASK 0x0000000000400000UL
#define UVH_LB_BAU_MISC_CONTROL_SUPPRESS_DEST_REGISTRATION_SHFT 23
-#define UVH_LB_BAU_MISC_CONTROL_SUPPRESS_DEST_REGISTRATION_MASK 0x0000000000800000UL
#define UVH_LB_BAU_MISC_CONTROL_PROGRAMMED_INITIAL_PRIORITY_SHFT 24
-#define UVH_LB_BAU_MISC_CONTROL_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000007000000UL
#define UVH_LB_BAU_MISC_CONTROL_USE_INCOMING_PRIORITY_SHFT 27
-#define UVH_LB_BAU_MISC_CONTROL_USE_INCOMING_PRIORITY_MASK 0x0000000008000000UL
#define UVH_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_SHFT 28
+#define UVH_LB_BAU_MISC_CONTROL_REJECTION_DELAY_MASK 0x00000000000000ffUL
+#define UVH_LB_BAU_MISC_CONTROL_APIC_MODE_MASK 0x0000000000000100UL
+#define UVH_LB_BAU_MISC_CONTROL_FORCE_BROADCAST_MASK 0x0000000000000200UL
+#define UVH_LB_BAU_MISC_CONTROL_FORCE_LOCK_NOP_MASK 0x0000000000000400UL
+#define UVH_LB_BAU_MISC_CONTROL_QPI_AGENT_PRESENCE_VECTOR_MASK 0x0000000000003800UL
+#define UVH_LB_BAU_MISC_CONTROL_DESCRIPTOR_FETCH_MODE_MASK 0x0000000000004000UL
+#define UVH_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_MASK 0x0000000000008000UL
+#define UVH_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_MASK 0x00000000000f0000UL
+#define UVH_LB_BAU_MISC_CONTROL_ENABLE_DUAL_MAPPING_MODE_MASK 0x0000000000100000UL
+#define UVH_LB_BAU_MISC_CONTROL_VGA_IO_PORT_DECODE_ENABLE_MASK 0x0000000000200000UL
+#define UVH_LB_BAU_MISC_CONTROL_VGA_IO_PORT_16_BIT_DECODE_MASK 0x0000000000400000UL
+#define UVH_LB_BAU_MISC_CONTROL_SUPPRESS_DEST_REGISTRATION_MASK 0x0000000000800000UL
+#define UVH_LB_BAU_MISC_CONTROL_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000007000000UL
+#define UVH_LB_BAU_MISC_CONTROL_USE_INCOMING_PRIORITY_MASK 0x0000000008000000UL
#define UVH_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000010000000UL
-#define UV1H_LB_BAU_MISC_CONTROL_REJECTION_DELAY_SHFT 0
-#define UV1H_LB_BAU_MISC_CONTROL_REJECTION_DELAY_MASK 0x00000000000000ffUL
-#define UV1H_LB_BAU_MISC_CONTROL_APIC_MODE_SHFT 8
-#define UV1H_LB_BAU_MISC_CONTROL_APIC_MODE_MASK 0x0000000000000100UL
-#define UV1H_LB_BAU_MISC_CONTROL_FORCE_BROADCAST_SHFT 9
-#define UV1H_LB_BAU_MISC_CONTROL_FORCE_BROADCAST_MASK 0x0000000000000200UL
-#define UV1H_LB_BAU_MISC_CONTROL_FORCE_LOCK_NOP_SHFT 10
-#define UV1H_LB_BAU_MISC_CONTROL_FORCE_LOCK_NOP_MASK 0x0000000000000400UL
+#define UV1H_LB_BAU_MISC_CONTROL_REJECTION_DELAY_SHFT 0
+#define UV1H_LB_BAU_MISC_CONTROL_APIC_MODE_SHFT 8
+#define UV1H_LB_BAU_MISC_CONTROL_FORCE_BROADCAST_SHFT 9
+#define UV1H_LB_BAU_MISC_CONTROL_FORCE_LOCK_NOP_SHFT 10
#define UV1H_LB_BAU_MISC_CONTROL_QPI_AGENT_PRESENCE_VECTOR_SHFT 11
-#define UV1H_LB_BAU_MISC_CONTROL_QPI_AGENT_PRESENCE_VECTOR_MASK 0x0000000000003800UL
#define UV1H_LB_BAU_MISC_CONTROL_DESCRIPTOR_FETCH_MODE_SHFT 14
-#define UV1H_LB_BAU_MISC_CONTROL_DESCRIPTOR_FETCH_MODE_MASK 0x0000000000004000UL
#define UV1H_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_SHFT 15
-#define UV1H_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_MASK 0x0000000000008000UL
#define UV1H_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHFT 16
-#define UV1H_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_MASK 0x00000000000f0000UL
#define UV1H_LB_BAU_MISC_CONTROL_ENABLE_DUAL_MAPPING_MODE_SHFT 20
-#define UV1H_LB_BAU_MISC_CONTROL_ENABLE_DUAL_MAPPING_MODE_MASK 0x0000000000100000UL
#define UV1H_LB_BAU_MISC_CONTROL_VGA_IO_PORT_DECODE_ENABLE_SHFT 21
-#define UV1H_LB_BAU_MISC_CONTROL_VGA_IO_PORT_DECODE_ENABLE_MASK 0x0000000000200000UL
#define UV1H_LB_BAU_MISC_CONTROL_VGA_IO_PORT_16_BIT_DECODE_SHFT 22
-#define UV1H_LB_BAU_MISC_CONTROL_VGA_IO_PORT_16_BIT_DECODE_MASK 0x0000000000400000UL
#define UV1H_LB_BAU_MISC_CONTROL_SUPPRESS_DEST_REGISTRATION_SHFT 23
-#define UV1H_LB_BAU_MISC_CONTROL_SUPPRESS_DEST_REGISTRATION_MASK 0x0000000000800000UL
#define UV1H_LB_BAU_MISC_CONTROL_PROGRAMMED_INITIAL_PRIORITY_SHFT 24
-#define UV1H_LB_BAU_MISC_CONTROL_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000007000000UL
#define UV1H_LB_BAU_MISC_CONTROL_USE_INCOMING_PRIORITY_SHFT 27
-#define UV1H_LB_BAU_MISC_CONTROL_USE_INCOMING_PRIORITY_MASK 0x0000000008000000UL
#define UV1H_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_SHFT 28
+#define UV1H_LB_BAU_MISC_CONTROL_FUN_SHFT 48
+#define UV1H_LB_BAU_MISC_CONTROL_REJECTION_DELAY_MASK 0x00000000000000ffUL
+#define UV1H_LB_BAU_MISC_CONTROL_APIC_MODE_MASK 0x0000000000000100UL
+#define UV1H_LB_BAU_MISC_CONTROL_FORCE_BROADCAST_MASK 0x0000000000000200UL
+#define UV1H_LB_BAU_MISC_CONTROL_FORCE_LOCK_NOP_MASK 0x0000000000000400UL
+#define UV1H_LB_BAU_MISC_CONTROL_QPI_AGENT_PRESENCE_VECTOR_MASK 0x0000000000003800UL
+#define UV1H_LB_BAU_MISC_CONTROL_DESCRIPTOR_FETCH_MODE_MASK 0x0000000000004000UL
+#define UV1H_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_MASK 0x0000000000008000UL
+#define UV1H_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_MASK 0x00000000000f0000UL
+#define UV1H_LB_BAU_MISC_CONTROL_ENABLE_DUAL_MAPPING_MODE_MASK 0x0000000000100000UL
+#define UV1H_LB_BAU_MISC_CONTROL_VGA_IO_PORT_DECODE_ENABLE_MASK 0x0000000000200000UL
+#define UV1H_LB_BAU_MISC_CONTROL_VGA_IO_PORT_16_BIT_DECODE_MASK 0x0000000000400000UL
+#define UV1H_LB_BAU_MISC_CONTROL_SUPPRESS_DEST_REGISTRATION_MASK 0x0000000000800000UL
+#define UV1H_LB_BAU_MISC_CONTROL_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000007000000UL
+#define UV1H_LB_BAU_MISC_CONTROL_USE_INCOMING_PRIORITY_MASK 0x0000000008000000UL
#define UV1H_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000010000000UL
-#define UV1H_LB_BAU_MISC_CONTROL_FUN_SHFT 48
-#define UV1H_LB_BAU_MISC_CONTROL_FUN_MASK 0xffff000000000000UL
-
-#define UV2H_LB_BAU_MISC_CONTROL_REJECTION_DELAY_SHFT 0
-#define UV2H_LB_BAU_MISC_CONTROL_REJECTION_DELAY_MASK 0x00000000000000ffUL
-#define UV2H_LB_BAU_MISC_CONTROL_APIC_MODE_SHFT 8
-#define UV2H_LB_BAU_MISC_CONTROL_APIC_MODE_MASK 0x0000000000000100UL
-#define UV2H_LB_BAU_MISC_CONTROL_FORCE_BROADCAST_SHFT 9
-#define UV2H_LB_BAU_MISC_CONTROL_FORCE_BROADCAST_MASK 0x0000000000000200UL
-#define UV2H_LB_BAU_MISC_CONTROL_FORCE_LOCK_NOP_SHFT 10
-#define UV2H_LB_BAU_MISC_CONTROL_FORCE_LOCK_NOP_MASK 0x0000000000000400UL
+#define UV1H_LB_BAU_MISC_CONTROL_FUN_MASK 0xffff000000000000UL
+
+#define UV2H_LB_BAU_MISC_CONTROL_REJECTION_DELAY_SHFT 0
+#define UV2H_LB_BAU_MISC_CONTROL_APIC_MODE_SHFT 8
+#define UV2H_LB_BAU_MISC_CONTROL_FORCE_BROADCAST_SHFT 9
+#define UV2H_LB_BAU_MISC_CONTROL_FORCE_LOCK_NOP_SHFT 10
#define UV2H_LB_BAU_MISC_CONTROL_QPI_AGENT_PRESENCE_VECTOR_SHFT 11
-#define UV2H_LB_BAU_MISC_CONTROL_QPI_AGENT_PRESENCE_VECTOR_MASK 0x0000000000003800UL
#define UV2H_LB_BAU_MISC_CONTROL_DESCRIPTOR_FETCH_MODE_SHFT 14
-#define UV2H_LB_BAU_MISC_CONTROL_DESCRIPTOR_FETCH_MODE_MASK 0x0000000000004000UL
#define UV2H_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_SHFT 15
-#define UV2H_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_MASK 0x0000000000008000UL
#define UV2H_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHFT 16
-#define UV2H_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_MASK 0x00000000000f0000UL
#define UV2H_LB_BAU_MISC_CONTROL_ENABLE_DUAL_MAPPING_MODE_SHFT 20
-#define UV2H_LB_BAU_MISC_CONTROL_ENABLE_DUAL_MAPPING_MODE_MASK 0x0000000000100000UL
#define UV2H_LB_BAU_MISC_CONTROL_VGA_IO_PORT_DECODE_ENABLE_SHFT 21
-#define UV2H_LB_BAU_MISC_CONTROL_VGA_IO_PORT_DECODE_ENABLE_MASK 0x0000000000200000UL
#define UV2H_LB_BAU_MISC_CONTROL_VGA_IO_PORT_16_BIT_DECODE_SHFT 22
-#define UV2H_LB_BAU_MISC_CONTROL_VGA_IO_PORT_16_BIT_DECODE_MASK 0x0000000000400000UL
#define UV2H_LB_BAU_MISC_CONTROL_SUPPRESS_DEST_REGISTRATION_SHFT 23
-#define UV2H_LB_BAU_MISC_CONTROL_SUPPRESS_DEST_REGISTRATION_MASK 0x0000000000800000UL
#define UV2H_LB_BAU_MISC_CONTROL_PROGRAMMED_INITIAL_PRIORITY_SHFT 24
-#define UV2H_LB_BAU_MISC_CONTROL_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000007000000UL
#define UV2H_LB_BAU_MISC_CONTROL_USE_INCOMING_PRIORITY_SHFT 27
-#define UV2H_LB_BAU_MISC_CONTROL_USE_INCOMING_PRIORITY_MASK 0x0000000008000000UL
#define UV2H_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_SHFT 28
-#define UV2H_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000010000000UL
#define UV2H_LB_BAU_MISC_CONTROL_ENABLE_AUTOMATIC_APIC_MODE_SELECTION_SHFT 29
-#define UV2H_LB_BAU_MISC_CONTROL_ENABLE_AUTOMATIC_APIC_MODE_SELECTION_MASK 0x0000000020000000UL
-#define UV2H_LB_BAU_MISC_CONTROL_APIC_MODE_STATUS_SHFT 30
-#define UV2H_LB_BAU_MISC_CONTROL_APIC_MODE_STATUS_MASK 0x0000000040000000UL
+#define UV2H_LB_BAU_MISC_CONTROL_APIC_MODE_STATUS_SHFT 30
#define UV2H_LB_BAU_MISC_CONTROL_SUPPRESS_INTERRUPTS_TO_SELF_SHFT 31
-#define UV2H_LB_BAU_MISC_CONTROL_SUPPRESS_INTERRUPTS_TO_SELF_MASK 0x0000000080000000UL
#define UV2H_LB_BAU_MISC_CONTROL_ENABLE_LOCK_BASED_SYSTEM_FLUSH_SHFT 32
-#define UV2H_LB_BAU_MISC_CONTROL_ENABLE_LOCK_BASED_SYSTEM_FLUSH_MASK 0x0000000100000000UL
#define UV2H_LB_BAU_MISC_CONTROL_ENABLE_EXTENDED_SB_STATUS_SHFT 33
-#define UV2H_LB_BAU_MISC_CONTROL_ENABLE_EXTENDED_SB_STATUS_MASK 0x0000000200000000UL
#define UV2H_LB_BAU_MISC_CONTROL_SUPPRESS_INT_PRIO_UDT_TO_SELF_SHFT 34
-#define UV2H_LB_BAU_MISC_CONTROL_SUPPRESS_INT_PRIO_UDT_TO_SELF_MASK 0x0000000400000000UL
#define UV2H_LB_BAU_MISC_CONTROL_USE_LEGACY_DESCRIPTOR_FORMATS_SHFT 35
+#define UV2H_LB_BAU_MISC_CONTROL_FUN_SHFT 48
+#define UV2H_LB_BAU_MISC_CONTROL_REJECTION_DELAY_MASK 0x00000000000000ffUL
+#define UV2H_LB_BAU_MISC_CONTROL_APIC_MODE_MASK 0x0000000000000100UL
+#define UV2H_LB_BAU_MISC_CONTROL_FORCE_BROADCAST_MASK 0x0000000000000200UL
+#define UV2H_LB_BAU_MISC_CONTROL_FORCE_LOCK_NOP_MASK 0x0000000000000400UL
+#define UV2H_LB_BAU_MISC_CONTROL_QPI_AGENT_PRESENCE_VECTOR_MASK 0x0000000000003800UL
+#define UV2H_LB_BAU_MISC_CONTROL_DESCRIPTOR_FETCH_MODE_MASK 0x0000000000004000UL
+#define UV2H_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_MASK 0x0000000000008000UL
+#define UV2H_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_MASK 0x00000000000f0000UL
+#define UV2H_LB_BAU_MISC_CONTROL_ENABLE_DUAL_MAPPING_MODE_MASK 0x0000000000100000UL
+#define UV2H_LB_BAU_MISC_CONTROL_VGA_IO_PORT_DECODE_ENABLE_MASK 0x0000000000200000UL
+#define UV2H_LB_BAU_MISC_CONTROL_VGA_IO_PORT_16_BIT_DECODE_MASK 0x0000000000400000UL
+#define UV2H_LB_BAU_MISC_CONTROL_SUPPRESS_DEST_REGISTRATION_MASK 0x0000000000800000UL
+#define UV2H_LB_BAU_MISC_CONTROL_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000007000000UL
+#define UV2H_LB_BAU_MISC_CONTROL_USE_INCOMING_PRIORITY_MASK 0x0000000008000000UL
+#define UV2H_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000010000000UL
+#define UV2H_LB_BAU_MISC_CONTROL_ENABLE_AUTOMATIC_APIC_MODE_SELECTION_MASK 0x0000000020000000UL
+#define UV2H_LB_BAU_MISC_CONTROL_APIC_MODE_STATUS_MASK 0x0000000040000000UL
+#define UV2H_LB_BAU_MISC_CONTROL_SUPPRESS_INTERRUPTS_TO_SELF_MASK 0x0000000080000000UL
+#define UV2H_LB_BAU_MISC_CONTROL_ENABLE_LOCK_BASED_SYSTEM_FLUSH_MASK 0x0000000100000000UL
+#define UV2H_LB_BAU_MISC_CONTROL_ENABLE_EXTENDED_SB_STATUS_MASK 0x0000000200000000UL
+#define UV2H_LB_BAU_MISC_CONTROL_SUPPRESS_INT_PRIO_UDT_TO_SELF_MASK 0x0000000400000000UL
#define UV2H_LB_BAU_MISC_CONTROL_USE_LEGACY_DESCRIPTOR_FORMATS_MASK 0x0000000800000000UL
-#define UV2H_LB_BAU_MISC_CONTROL_FUN_SHFT 48
-#define UV2H_LB_BAU_MISC_CONTROL_FUN_MASK 0xffff000000000000UL
+#define UV2H_LB_BAU_MISC_CONTROL_FUN_MASK 0xffff000000000000UL
union uvh_lb_bau_misc_control_u {
- unsigned long v;
- struct uvh_lb_bau_misc_control_s {
- unsigned long rejection_delay : 8; /* RW */
- unsigned long apic_mode : 1; /* RW */
- unsigned long force_broadcast : 1; /* RW */
- unsigned long force_lock_nop : 1; /* RW */
- unsigned long qpi_agent_presence_vector : 3; /* RW */
- unsigned long descriptor_fetch_mode : 1; /* RW */
- unsigned long enable_intd_soft_ack_mode : 1; /* RW */
- unsigned long intd_soft_ack_timeout_period : 4; /* RW */
- unsigned long enable_dual_mapping_mode : 1; /* RW */
- unsigned long vga_io_port_decode_enable : 1; /* RW */
- unsigned long vga_io_port_16_bit_decode : 1; /* RW */
- unsigned long suppress_dest_registration : 1; /* RW */
- unsigned long programmed_initial_priority : 3; /* RW */
- unsigned long use_incoming_priority : 1; /* RW */
- unsigned long enable_programmed_initial_priority : 1; /* RW */
- unsigned long rsvd_29_63 : 35;
- } s;
- struct uv1h_lb_bau_misc_control_s {
- unsigned long rejection_delay : 8; /* RW */
- unsigned long apic_mode : 1; /* RW */
- unsigned long force_broadcast : 1; /* RW */
- unsigned long force_lock_nop : 1; /* RW */
- unsigned long qpi_agent_presence_vector : 3; /* RW */
- unsigned long descriptor_fetch_mode : 1; /* RW */
- unsigned long enable_intd_soft_ack_mode : 1; /* RW */
- unsigned long intd_soft_ack_timeout_period : 4; /* RW */
- unsigned long enable_dual_mapping_mode : 1; /* RW */
- unsigned long vga_io_port_decode_enable : 1; /* RW */
- unsigned long vga_io_port_16_bit_decode : 1; /* RW */
- unsigned long suppress_dest_registration : 1; /* RW */
- unsigned long programmed_initial_priority : 3; /* RW */
- unsigned long use_incoming_priority : 1; /* RW */
- unsigned long enable_programmed_initial_priority : 1; /* RW */
- unsigned long rsvd_29_47 : 19; /* */
- unsigned long fun : 16; /* RW */
- } s1;
- struct uv2h_lb_bau_misc_control_s {
- unsigned long rejection_delay : 8; /* RW */
- unsigned long apic_mode : 1; /* RW */
- unsigned long force_broadcast : 1; /* RW */
- unsigned long force_lock_nop : 1; /* RW */
- unsigned long qpi_agent_presence_vector : 3; /* RW */
- unsigned long descriptor_fetch_mode : 1; /* RW */
- unsigned long enable_intd_soft_ack_mode : 1; /* RW */
- unsigned long intd_soft_ack_timeout_period : 4; /* RW */
- unsigned long enable_dual_mapping_mode : 1; /* RW */
- unsigned long vga_io_port_decode_enable : 1; /* RW */
- unsigned long vga_io_port_16_bit_decode : 1; /* RW */
- unsigned long suppress_dest_registration : 1; /* RW */
- unsigned long programmed_initial_priority : 3; /* RW */
- unsigned long use_incoming_priority : 1; /* RW */
- unsigned long enable_programmed_initial_priority : 1; /* RW */
- unsigned long enable_automatic_apic_mode_selection : 1; /* RW */
- unsigned long apic_mode_status : 1; /* RO */
- unsigned long suppress_interrupts_to_self : 1; /* RW */
- unsigned long enable_lock_based_system_flush : 1; /* RW */
- unsigned long enable_extended_sb_status : 1; /* RW */
- unsigned long suppress_int_prio_udt_to_self : 1; /* RW */
- unsigned long use_legacy_descriptor_formats : 1; /* RW */
- unsigned long rsvd_36_47 : 12; /* */
- unsigned long fun : 16; /* RW */
- } s2;
+ unsigned long v;
+ struct uvh_lb_bau_misc_control_s {
+ unsigned long rejection_delay:8; /* RW */
+ unsigned long apic_mode:1; /* RW */
+ unsigned long force_broadcast:1; /* RW */
+ unsigned long force_lock_nop:1; /* RW */
+ unsigned long qpi_agent_presence_vector:3; /* RW */
+ unsigned long descriptor_fetch_mode:1; /* RW */
+ unsigned long enable_intd_soft_ack_mode:1; /* RW */
+ unsigned long intd_soft_ack_timeout_period:4; /* RW */
+ unsigned long enable_dual_mapping_mode:1; /* RW */
+ unsigned long vga_io_port_decode_enable:1; /* RW */
+ unsigned long vga_io_port_16_bit_decode:1; /* RW */
+ unsigned long suppress_dest_registration:1; /* RW */
+ unsigned long programmed_initial_priority:3; /* RW */
+ unsigned long use_incoming_priority:1; /* RW */
+ unsigned long enable_programmed_initial_priority:1;/* RW */
+ unsigned long rsvd_29_63:35;
+ } s;
+ struct uv1h_lb_bau_misc_control_s {
+ unsigned long rejection_delay:8; /* RW */
+ unsigned long apic_mode:1; /* RW */
+ unsigned long force_broadcast:1; /* RW */
+ unsigned long force_lock_nop:1; /* RW */
+ unsigned long qpi_agent_presence_vector:3; /* RW */
+ unsigned long descriptor_fetch_mode:1; /* RW */
+ unsigned long enable_intd_soft_ack_mode:1; /* RW */
+ unsigned long intd_soft_ack_timeout_period:4; /* RW */
+ unsigned long enable_dual_mapping_mode:1; /* RW */
+ unsigned long vga_io_port_decode_enable:1; /* RW */
+ unsigned long vga_io_port_16_bit_decode:1; /* RW */
+ unsigned long suppress_dest_registration:1; /* RW */
+ unsigned long programmed_initial_priority:3; /* RW */
+ unsigned long use_incoming_priority:1; /* RW */
+ unsigned long enable_programmed_initial_priority:1;/* RW */
+ unsigned long rsvd_29_47:19;
+ unsigned long fun:16; /* RW */
+ } s1;
+ struct uv2h_lb_bau_misc_control_s {
+ unsigned long rejection_delay:8; /* RW */
+ unsigned long apic_mode:1; /* RW */
+ unsigned long force_broadcast:1; /* RW */
+ unsigned long force_lock_nop:1; /* RW */
+ unsigned long qpi_agent_presence_vector:3; /* RW */
+ unsigned long descriptor_fetch_mode:1; /* RW */
+ unsigned long enable_intd_soft_ack_mode:1; /* RW */
+ unsigned long intd_soft_ack_timeout_period:4; /* RW */
+ unsigned long enable_dual_mapping_mode:1; /* RW */
+ unsigned long vga_io_port_decode_enable:1; /* RW */
+ unsigned long vga_io_port_16_bit_decode:1; /* RW */
+ unsigned long suppress_dest_registration:1; /* RW */
+ unsigned long programmed_initial_priority:3; /* RW */
+ unsigned long use_incoming_priority:1; /* RW */
+ unsigned long enable_programmed_initial_priority:1;/* RW */
+ unsigned long enable_automatic_apic_mode_selection:1;/* RW */
+ unsigned long apic_mode_status:1; /* RO */
+ unsigned long suppress_interrupts_to_self:1; /* RW */
+ unsigned long enable_lock_based_system_flush:1;/* RW */
+ unsigned long enable_extended_sb_status:1; /* RW */
+ unsigned long suppress_int_prio_udt_to_self:1;/* RW */
+ unsigned long use_legacy_descriptor_formats:1;/* RW */
+ unsigned long rsvd_36_47:12;
+ unsigned long fun:16; /* RW */
+ } s2;
};
/* ========================================================================= */
/* UVH_LB_BAU_SB_ACTIVATION_CONTROL */
/* ========================================================================= */
-#define UVH_LB_BAU_SB_ACTIVATION_CONTROL 0x320020UL
-#define UVH_LB_BAU_SB_ACTIVATION_CONTROL_32 0x9a8
+#define UVH_LB_BAU_SB_ACTIVATION_CONTROL 0x320020UL
+#define UVH_LB_BAU_SB_ACTIVATION_CONTROL_32 0x9a8
-#define UVH_LB_BAU_SB_ACTIVATION_CONTROL_INDEX_SHFT 0
-#define UVH_LB_BAU_SB_ACTIVATION_CONTROL_INDEX_MASK 0x000000000000003fUL
-#define UVH_LB_BAU_SB_ACTIVATION_CONTROL_PUSH_SHFT 62
-#define UVH_LB_BAU_SB_ACTIVATION_CONTROL_PUSH_MASK 0x4000000000000000UL
-#define UVH_LB_BAU_SB_ACTIVATION_CONTROL_INIT_SHFT 63
-#define UVH_LB_BAU_SB_ACTIVATION_CONTROL_INIT_MASK 0x8000000000000000UL
+#define UVH_LB_BAU_SB_ACTIVATION_CONTROL_INDEX_SHFT 0
+#define UVH_LB_BAU_SB_ACTIVATION_CONTROL_PUSH_SHFT 62
+#define UVH_LB_BAU_SB_ACTIVATION_CONTROL_INIT_SHFT 63
+#define UVH_LB_BAU_SB_ACTIVATION_CONTROL_INDEX_MASK 0x000000000000003fUL
+#define UVH_LB_BAU_SB_ACTIVATION_CONTROL_PUSH_MASK 0x4000000000000000UL
+#define UVH_LB_BAU_SB_ACTIVATION_CONTROL_INIT_MASK 0x8000000000000000UL
union uvh_lb_bau_sb_activation_control_u {
- unsigned long v;
- struct uvh_lb_bau_sb_activation_control_s {
- unsigned long index : 6; /* RW */
- unsigned long rsvd_6_61: 56; /* */
- unsigned long push : 1; /* WP */
- unsigned long init : 1; /* WP */
- } s;
+ unsigned long v;
+ struct uvh_lb_bau_sb_activation_control_s {
+ unsigned long index:6; /* RW */
+ unsigned long rsvd_6_61:56;
+ unsigned long push:1; /* WP */
+ unsigned long init:1; /* WP */
+ } s;
};
/* ========================================================================= */
/* UVH_LB_BAU_SB_ACTIVATION_STATUS_0 */
/* ========================================================================= */
-#define UVH_LB_BAU_SB_ACTIVATION_STATUS_0 0x320030UL
-#define UVH_LB_BAU_SB_ACTIVATION_STATUS_0_32 0x9b0
+#define UVH_LB_BAU_SB_ACTIVATION_STATUS_0 0x320030UL
+#define UVH_LB_BAU_SB_ACTIVATION_STATUS_0_32 0x9b0
-#define UVH_LB_BAU_SB_ACTIVATION_STATUS_0_STATUS_SHFT 0
-#define UVH_LB_BAU_SB_ACTIVATION_STATUS_0_STATUS_MASK 0xffffffffffffffffUL
+#define UVH_LB_BAU_SB_ACTIVATION_STATUS_0_STATUS_SHFT 0
+#define UVH_LB_BAU_SB_ACTIVATION_STATUS_0_STATUS_MASK 0xffffffffffffffffUL
union uvh_lb_bau_sb_activation_status_0_u {
- unsigned long v;
- struct uvh_lb_bau_sb_activation_status_0_s {
- unsigned long status : 64; /* RW */
- } s;
+ unsigned long v;
+ struct uvh_lb_bau_sb_activation_status_0_s {
+ unsigned long status:64; /* RW */
+ } s;
};
/* ========================================================================= */
/* UVH_LB_BAU_SB_ACTIVATION_STATUS_1 */
/* ========================================================================= */
-#define UVH_LB_BAU_SB_ACTIVATION_STATUS_1 0x320040UL
-#define UVH_LB_BAU_SB_ACTIVATION_STATUS_1_32 0x9b8
+#define UVH_LB_BAU_SB_ACTIVATION_STATUS_1 0x320040UL
+#define UVH_LB_BAU_SB_ACTIVATION_STATUS_1_32 0x9b8
-#define UVH_LB_BAU_SB_ACTIVATION_STATUS_1_STATUS_SHFT 0
-#define UVH_LB_BAU_SB_ACTIVATION_STATUS_1_STATUS_MASK 0xffffffffffffffffUL
+#define UVH_LB_BAU_SB_ACTIVATION_STATUS_1_STATUS_SHFT 0
+#define UVH_LB_BAU_SB_ACTIVATION_STATUS_1_STATUS_MASK 0xffffffffffffffffUL
union uvh_lb_bau_sb_activation_status_1_u {
- unsigned long v;
- struct uvh_lb_bau_sb_activation_status_1_s {
- unsigned long status : 64; /* RW */
- } s;
+ unsigned long v;
+ struct uvh_lb_bau_sb_activation_status_1_s {
+ unsigned long status:64; /* RW */
+ } s;
};
/* ========================================================================= */
/* UVH_LB_BAU_SB_DESCRIPTOR_BASE */
/* ========================================================================= */
-#define UVH_LB_BAU_SB_DESCRIPTOR_BASE 0x320010UL
-#define UVH_LB_BAU_SB_DESCRIPTOR_BASE_32 0x9a0
+#define UVH_LB_BAU_SB_DESCRIPTOR_BASE 0x320010UL
+#define UVH_LB_BAU_SB_DESCRIPTOR_BASE_32 0x9a0
-#define UVH_LB_BAU_SB_DESCRIPTOR_BASE_PAGE_ADDRESS_SHFT 12
-#define UVH_LB_BAU_SB_DESCRIPTOR_BASE_PAGE_ADDRESS_MASK 0x000007fffffff000UL
-#define UVH_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_SHFT 49
-#define UVH_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_MASK 0x7ffe000000000000UL
+#define UVH_LB_BAU_SB_DESCRIPTOR_BASE_PAGE_ADDRESS_SHFT 12
+#define UVH_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_SHFT 49
+#define UVH_LB_BAU_SB_DESCRIPTOR_BASE_PAGE_ADDRESS_MASK 0x000007fffffff000UL
+#define UVH_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_MASK 0x7ffe000000000000UL
union uvh_lb_bau_sb_descriptor_base_u {
- unsigned long v;
- struct uvh_lb_bau_sb_descriptor_base_s {
- unsigned long rsvd_0_11 : 12; /* */
- unsigned long page_address : 31; /* RW */
- unsigned long rsvd_43_48 : 6; /* */
- unsigned long node_id : 14; /* RW */
- unsigned long rsvd_63 : 1; /* */
- } s;
+ unsigned long v;
+ struct uvh_lb_bau_sb_descriptor_base_s {
+ unsigned long rsvd_0_11:12;
+ unsigned long page_address:31; /* RW */
+ unsigned long rsvd_43_48:6;
+ unsigned long node_id:14; /* RW */
+ unsigned long rsvd_63:1;
+ } s;
};
/* ========================================================================= */
/* UVH_NODE_ID */
/* ========================================================================= */
-#define UVH_NODE_ID 0x0UL
-
-#define UVH_NODE_ID_FORCE1_SHFT 0
-#define UVH_NODE_ID_FORCE1_MASK 0x0000000000000001UL
-#define UVH_NODE_ID_MANUFACTURER_SHFT 1
-#define UVH_NODE_ID_MANUFACTURER_MASK 0x0000000000000ffeUL
-#define UVH_NODE_ID_PART_NUMBER_SHFT 12
-#define UVH_NODE_ID_PART_NUMBER_MASK 0x000000000ffff000UL
-#define UVH_NODE_ID_REVISION_SHFT 28
-#define UVH_NODE_ID_REVISION_MASK 0x00000000f0000000UL
-#define UVH_NODE_ID_NODE_ID_SHFT 32
-#define UVH_NODE_ID_NODE_ID_MASK 0x00007fff00000000UL
-
-#define UV1H_NODE_ID_FORCE1_SHFT 0
-#define UV1H_NODE_ID_FORCE1_MASK 0x0000000000000001UL
-#define UV1H_NODE_ID_MANUFACTURER_SHFT 1
-#define UV1H_NODE_ID_MANUFACTURER_MASK 0x0000000000000ffeUL
-#define UV1H_NODE_ID_PART_NUMBER_SHFT 12
-#define UV1H_NODE_ID_PART_NUMBER_MASK 0x000000000ffff000UL
-#define UV1H_NODE_ID_REVISION_SHFT 28
-#define UV1H_NODE_ID_REVISION_MASK 0x00000000f0000000UL
-#define UV1H_NODE_ID_NODE_ID_SHFT 32
-#define UV1H_NODE_ID_NODE_ID_MASK 0x00007fff00000000UL
-#define UV1H_NODE_ID_NODES_PER_BIT_SHFT 48
-#define UV1H_NODE_ID_NODES_PER_BIT_MASK 0x007f000000000000UL
-#define UV1H_NODE_ID_NI_PORT_SHFT 56
-#define UV1H_NODE_ID_NI_PORT_MASK 0x0f00000000000000UL
-
-#define UV2H_NODE_ID_FORCE1_SHFT 0
-#define UV2H_NODE_ID_FORCE1_MASK 0x0000000000000001UL
-#define UV2H_NODE_ID_MANUFACTURER_SHFT 1
-#define UV2H_NODE_ID_MANUFACTURER_MASK 0x0000000000000ffeUL
-#define UV2H_NODE_ID_PART_NUMBER_SHFT 12
-#define UV2H_NODE_ID_PART_NUMBER_MASK 0x000000000ffff000UL
-#define UV2H_NODE_ID_REVISION_SHFT 28
-#define UV2H_NODE_ID_REVISION_MASK 0x00000000f0000000UL
-#define UV2H_NODE_ID_NODE_ID_SHFT 32
-#define UV2H_NODE_ID_NODE_ID_MASK 0x00007fff00000000UL
-#define UV2H_NODE_ID_NODES_PER_BIT_SHFT 50
-#define UV2H_NODE_ID_NODES_PER_BIT_MASK 0x01fc000000000000UL
-#define UV2H_NODE_ID_NI_PORT_SHFT 57
-#define UV2H_NODE_ID_NI_PORT_MASK 0x3e00000000000000UL
+#define UVH_NODE_ID 0x0UL
+
+#define UVH_NODE_ID_FORCE1_SHFT 0
+#define UVH_NODE_ID_MANUFACTURER_SHFT 1
+#define UVH_NODE_ID_PART_NUMBER_SHFT 12
+#define UVH_NODE_ID_REVISION_SHFT 28
+#define UVH_NODE_ID_NODE_ID_SHFT 32
+#define UVH_NODE_ID_FORCE1_MASK 0x0000000000000001UL
+#define UVH_NODE_ID_MANUFACTURER_MASK 0x0000000000000ffeUL
+#define UVH_NODE_ID_PART_NUMBER_MASK 0x000000000ffff000UL
+#define UVH_NODE_ID_REVISION_MASK 0x00000000f0000000UL
+#define UVH_NODE_ID_NODE_ID_MASK 0x00007fff00000000UL
+
+#define UV1H_NODE_ID_FORCE1_SHFT 0
+#define UV1H_NODE_ID_MANUFACTURER_SHFT 1
+#define UV1H_NODE_ID_PART_NUMBER_SHFT 12
+#define UV1H_NODE_ID_REVISION_SHFT 28
+#define UV1H_NODE_ID_NODE_ID_SHFT 32
+#define UV1H_NODE_ID_NODES_PER_BIT_SHFT 48
+#define UV1H_NODE_ID_NI_PORT_SHFT 56
+#define UV1H_NODE_ID_FORCE1_MASK 0x0000000000000001UL
+#define UV1H_NODE_ID_MANUFACTURER_MASK 0x0000000000000ffeUL
+#define UV1H_NODE_ID_PART_NUMBER_MASK 0x000000000ffff000UL
+#define UV1H_NODE_ID_REVISION_MASK 0x00000000f0000000UL
+#define UV1H_NODE_ID_NODE_ID_MASK 0x00007fff00000000UL
+#define UV1H_NODE_ID_NODES_PER_BIT_MASK 0x007f000000000000UL
+#define UV1H_NODE_ID_NI_PORT_MASK 0x0f00000000000000UL
+
+#define UV2H_NODE_ID_FORCE1_SHFT 0
+#define UV2H_NODE_ID_MANUFACTURER_SHFT 1
+#define UV2H_NODE_ID_PART_NUMBER_SHFT 12
+#define UV2H_NODE_ID_REVISION_SHFT 28
+#define UV2H_NODE_ID_NODE_ID_SHFT 32
+#define UV2H_NODE_ID_NODES_PER_BIT_SHFT 50
+#define UV2H_NODE_ID_NI_PORT_SHFT 57
+#define UV2H_NODE_ID_FORCE1_MASK 0x0000000000000001UL
+#define UV2H_NODE_ID_MANUFACTURER_MASK 0x0000000000000ffeUL
+#define UV2H_NODE_ID_PART_NUMBER_MASK 0x000000000ffff000UL
+#define UV2H_NODE_ID_REVISION_MASK 0x00000000f0000000UL
+#define UV2H_NODE_ID_NODE_ID_MASK 0x00007fff00000000UL
+#define UV2H_NODE_ID_NODES_PER_BIT_MASK 0x01fc000000000000UL
+#define UV2H_NODE_ID_NI_PORT_MASK 0x3e00000000000000UL
union uvh_node_id_u {
- unsigned long v;
- struct uvh_node_id_s {
- unsigned long force1 : 1; /* RO */
- unsigned long manufacturer : 11; /* RO */
- unsigned long part_number : 16; /* RO */
- unsigned long revision : 4; /* RO */
- unsigned long node_id : 15; /* RW */
- unsigned long rsvd_47_63 : 17;
- } s;
- struct uv1h_node_id_s {
- unsigned long force1 : 1; /* RO */
- unsigned long manufacturer : 11; /* RO */
- unsigned long part_number : 16; /* RO */
- unsigned long revision : 4; /* RO */
- unsigned long node_id : 15; /* RW */
- unsigned long rsvd_47 : 1; /* */
- unsigned long nodes_per_bit : 7; /* RW */
- unsigned long rsvd_55 : 1; /* */
- unsigned long ni_port : 4; /* RO */
- unsigned long rsvd_60_63 : 4; /* */
- } s1;
- struct uv2h_node_id_s {
- unsigned long force1 : 1; /* RO */
- unsigned long manufacturer : 11; /* RO */
- unsigned long part_number : 16; /* RO */
- unsigned long revision : 4; /* RO */
- unsigned long node_id : 15; /* RW */
- unsigned long rsvd_47_49 : 3; /* */
- unsigned long nodes_per_bit : 7; /* RO */
- unsigned long ni_port : 5; /* RO */
- unsigned long rsvd_62_63 : 2; /* */
- } s2;
+ unsigned long v;
+ struct uvh_node_id_s {
+ unsigned long force1:1; /* RO */
+ unsigned long manufacturer:11; /* RO */
+ unsigned long part_number:16; /* RO */
+ unsigned long revision:4; /* RO */
+ unsigned long node_id:15; /* RW */
+ unsigned long rsvd_47_63:17;
+ } s;
+ struct uv1h_node_id_s {
+ unsigned long force1:1; /* RO */
+ unsigned long manufacturer:11; /* RO */
+ unsigned long part_number:16; /* RO */
+ unsigned long revision:4; /* RO */
+ unsigned long node_id:15; /* RW */
+ unsigned long rsvd_47:1;
+ unsigned long nodes_per_bit:7; /* RW */
+ unsigned long rsvd_55:1;
+ unsigned long ni_port:4; /* RO */
+ unsigned long rsvd_60_63:4;
+ } s1;
+ struct uv2h_node_id_s {
+ unsigned long force1:1; /* RO */
+ unsigned long manufacturer:11; /* RO */
+ unsigned long part_number:16; /* RO */
+ unsigned long revision:4; /* RO */
+ unsigned long node_id:15; /* RW */
+ unsigned long rsvd_47_49:3;
+ unsigned long nodes_per_bit:7; /* RO */
+ unsigned long ni_port:5; /* RO */
+ unsigned long rsvd_62_63:2;
+ } s2;
};
/* ========================================================================= */
/* UVH_NODE_PRESENT_TABLE */
/* ========================================================================= */
-#define UVH_NODE_PRESENT_TABLE 0x1400UL
-#define UVH_NODE_PRESENT_TABLE_DEPTH 16
+#define UVH_NODE_PRESENT_TABLE 0x1400UL
+#define UVH_NODE_PRESENT_TABLE_DEPTH 16
-#define UVH_NODE_PRESENT_TABLE_NODES_SHFT 0
-#define UVH_NODE_PRESENT_TABLE_NODES_MASK 0xffffffffffffffffUL
+#define UVH_NODE_PRESENT_TABLE_NODES_SHFT 0
+#define UVH_NODE_PRESENT_TABLE_NODES_MASK 0xffffffffffffffffUL
union uvh_node_present_table_u {
- unsigned long v;
- struct uvh_node_present_table_s {
- unsigned long nodes : 64; /* RW */
- } s;
+ unsigned long v;
+ struct uvh_node_present_table_s {
+ unsigned long nodes:64; /* RW */
+ } s;
};
/* ========================================================================= */
/* UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR */
/* ========================================================================= */
-#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR 0x16000c8UL
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR 0x16000c8UL
#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_BASE_SHFT 24
-#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_BASE_MASK 0x00000000ff000000UL
#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_M_ALIAS_SHFT 48
-#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_M_ALIAS_MASK 0x001f000000000000UL
#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_ENABLE_SHFT 63
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_BASE_MASK 0x00000000ff000000UL
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_M_ALIAS_MASK 0x001f000000000000UL
#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_ENABLE_MASK 0x8000000000000000UL
union uvh_rh_gam_alias210_overlay_config_0_mmr_u {
- unsigned long v;
- struct uvh_rh_gam_alias210_overlay_config_0_mmr_s {
- unsigned long rsvd_0_23: 24; /* */
- unsigned long base : 8; /* RW */
- unsigned long rsvd_32_47: 16; /* */
- unsigned long m_alias : 5; /* RW */
- unsigned long rsvd_53_62: 10; /* */
- unsigned long enable : 1; /* RW */
- } s;
+ unsigned long v;
+ struct uvh_rh_gam_alias210_overlay_config_0_mmr_s {
+ unsigned long rsvd_0_23:24;
+ unsigned long base:8; /* RW */
+ unsigned long rsvd_32_47:16;
+ unsigned long m_alias:5; /* RW */
+ unsigned long rsvd_53_62:10;
+ unsigned long enable:1; /* RW */
+ } s;
};
/* ========================================================================= */
/* UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR */
/* ========================================================================= */
-#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR 0x16000d8UL
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR 0x16000d8UL
#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_BASE_SHFT 24
-#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_BASE_MASK 0x00000000ff000000UL
#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_M_ALIAS_SHFT 48
-#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_M_ALIAS_MASK 0x001f000000000000UL
#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_ENABLE_SHFT 63
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_BASE_MASK 0x00000000ff000000UL
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_M_ALIAS_MASK 0x001f000000000000UL
#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_ENABLE_MASK 0x8000000000000000UL
union uvh_rh_gam_alias210_overlay_config_1_mmr_u {
- unsigned long v;
- struct uvh_rh_gam_alias210_overlay_config_1_mmr_s {
- unsigned long rsvd_0_23: 24; /* */
- unsigned long base : 8; /* RW */
- unsigned long rsvd_32_47: 16; /* */
- unsigned long m_alias : 5; /* RW */
- unsigned long rsvd_53_62: 10; /* */
- unsigned long enable : 1; /* RW */
- } s;
+ unsigned long v;
+ struct uvh_rh_gam_alias210_overlay_config_1_mmr_s {
+ unsigned long rsvd_0_23:24;
+ unsigned long base:8; /* RW */
+ unsigned long rsvd_32_47:16;
+ unsigned long m_alias:5; /* RW */
+ unsigned long rsvd_53_62:10;
+ unsigned long enable:1; /* RW */
+ } s;
};
/* ========================================================================= */
/* UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR */
/* ========================================================================= */
-#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR 0x16000e8UL
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR 0x16000e8UL
#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_BASE_SHFT 24
-#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_BASE_MASK 0x00000000ff000000UL
#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_M_ALIAS_SHFT 48
-#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_M_ALIAS_MASK 0x001f000000000000UL
#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_ENABLE_SHFT 63
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_BASE_MASK 0x00000000ff000000UL
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_M_ALIAS_MASK 0x001f000000000000UL
#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_ENABLE_MASK 0x8000000000000000UL
union uvh_rh_gam_alias210_overlay_config_2_mmr_u {
- unsigned long v;
- struct uvh_rh_gam_alias210_overlay_config_2_mmr_s {
- unsigned long rsvd_0_23: 24; /* */
- unsigned long base : 8; /* RW */
- unsigned long rsvd_32_47: 16; /* */
- unsigned long m_alias : 5; /* RW */
- unsigned long rsvd_53_62: 10; /* */
- unsigned long enable : 1; /* RW */
- } s;
+ unsigned long v;
+ struct uvh_rh_gam_alias210_overlay_config_2_mmr_s {
+ unsigned long rsvd_0_23:24;
+ unsigned long base:8; /* RW */
+ unsigned long rsvd_32_47:16;
+ unsigned long m_alias:5; /* RW */
+ unsigned long rsvd_53_62:10;
+ unsigned long enable:1; /* RW */
+ } s;
};
/* ========================================================================= */
/* UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR */
/* ========================================================================= */
-#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR 0x16000d0UL
+#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR 0x16000d0UL
#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_SHFT 24
#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_MASK 0x00003fffff000000UL
union uvh_rh_gam_alias210_redirect_config_0_mmr_u {
- unsigned long v;
- struct uvh_rh_gam_alias210_redirect_config_0_mmr_s {
- unsigned long rsvd_0_23 : 24; /* */
- unsigned long dest_base : 22; /* RW */
- unsigned long rsvd_46_63: 18; /* */
- } s;
+ unsigned long v;
+ struct uvh_rh_gam_alias210_redirect_config_0_mmr_s {
+ unsigned long rsvd_0_23:24;
+ unsigned long dest_base:22; /* RW */
+ unsigned long rsvd_46_63:18;
+ } s;
};
/* ========================================================================= */
/* UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR */
/* ========================================================================= */
-#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR 0x16000e0UL
+#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR 0x16000e0UL
#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR_DEST_BASE_SHFT 24
#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR_DEST_BASE_MASK 0x00003fffff000000UL
union uvh_rh_gam_alias210_redirect_config_1_mmr_u {
- unsigned long v;
- struct uvh_rh_gam_alias210_redirect_config_1_mmr_s {
- unsigned long rsvd_0_23 : 24; /* */
- unsigned long dest_base : 22; /* RW */
- unsigned long rsvd_46_63: 18; /* */
- } s;
+ unsigned long v;
+ struct uvh_rh_gam_alias210_redirect_config_1_mmr_s {
+ unsigned long rsvd_0_23:24;
+ unsigned long dest_base:22; /* RW */
+ unsigned long rsvd_46_63:18;
+ } s;
};
/* ========================================================================= */
/* UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR */
/* ========================================================================= */
-#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR 0x16000f0UL
+#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR 0x16000f0UL
#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR_DEST_BASE_SHFT 24
#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR_DEST_BASE_MASK 0x00003fffff000000UL
union uvh_rh_gam_alias210_redirect_config_2_mmr_u {
- unsigned long v;
- struct uvh_rh_gam_alias210_redirect_config_2_mmr_s {
- unsigned long rsvd_0_23 : 24; /* */
- unsigned long dest_base : 22; /* RW */
- unsigned long rsvd_46_63: 18; /* */
- } s;
+ unsigned long v;
+ struct uvh_rh_gam_alias210_redirect_config_2_mmr_s {
+ unsigned long rsvd_0_23:24;
+ unsigned long dest_base:22; /* RW */
+ unsigned long rsvd_46_63:18;
+ } s;
};
/* ========================================================================= */
/* UVH_RH_GAM_CONFIG_MMR */
/* ========================================================================= */
-#define UVH_RH_GAM_CONFIG_MMR 0x1600000UL
+#define UVH_RH_GAM_CONFIG_MMR 0x1600000UL
-#define UVH_RH_GAM_CONFIG_MMR_M_SKT_SHFT 0
-#define UVH_RH_GAM_CONFIG_MMR_M_SKT_MASK 0x000000000000003fUL
-#define UVH_RH_GAM_CONFIG_MMR_N_SKT_SHFT 6
-#define UVH_RH_GAM_CONFIG_MMR_N_SKT_MASK 0x00000000000003c0UL
+#define UVH_RH_GAM_CONFIG_MMR_M_SKT_SHFT 0
+#define UVH_RH_GAM_CONFIG_MMR_N_SKT_SHFT 6
+#define UVH_RH_GAM_CONFIG_MMR_M_SKT_MASK 0x000000000000003fUL
+#define UVH_RH_GAM_CONFIG_MMR_N_SKT_MASK 0x00000000000003c0UL
-#define UV1H_RH_GAM_CONFIG_MMR_M_SKT_SHFT 0
-#define UV1H_RH_GAM_CONFIG_MMR_M_SKT_MASK 0x000000000000003fUL
-#define UV1H_RH_GAM_CONFIG_MMR_N_SKT_SHFT 6
-#define UV1H_RH_GAM_CONFIG_MMR_N_SKT_MASK 0x00000000000003c0UL
-#define UV1H_RH_GAM_CONFIG_MMR_MMIOL_CFG_SHFT 12
-#define UV1H_RH_GAM_CONFIG_MMR_MMIOL_CFG_MASK 0x0000000000001000UL
+#define UV1H_RH_GAM_CONFIG_MMR_M_SKT_SHFT 0
+#define UV1H_RH_GAM_CONFIG_MMR_N_SKT_SHFT 6
+#define UV1H_RH_GAM_CONFIG_MMR_MMIOL_CFG_SHFT 12
+#define UV1H_RH_GAM_CONFIG_MMR_M_SKT_MASK 0x000000000000003fUL
+#define UV1H_RH_GAM_CONFIG_MMR_N_SKT_MASK 0x00000000000003c0UL
+#define UV1H_RH_GAM_CONFIG_MMR_MMIOL_CFG_MASK 0x0000000000001000UL
-#define UV2H_RH_GAM_CONFIG_MMR_M_SKT_SHFT 0
-#define UV2H_RH_GAM_CONFIG_MMR_M_SKT_MASK 0x000000000000003fUL
-#define UV2H_RH_GAM_CONFIG_MMR_N_SKT_SHFT 6
-#define UV2H_RH_GAM_CONFIG_MMR_N_SKT_MASK 0x00000000000003c0UL
+#define UV2H_RH_GAM_CONFIG_MMR_M_SKT_SHFT 0
+#define UV2H_RH_GAM_CONFIG_MMR_N_SKT_SHFT 6
+#define UV2H_RH_GAM_CONFIG_MMR_M_SKT_MASK 0x000000000000003fUL
+#define UV2H_RH_GAM_CONFIG_MMR_N_SKT_MASK 0x00000000000003c0UL
union uvh_rh_gam_config_mmr_u {
- unsigned long v;
- struct uvh_rh_gam_config_mmr_s {
- unsigned long m_skt : 6; /* RW */
- unsigned long n_skt : 4; /* RW */
- unsigned long rsvd_10_63 : 54;
- } s;
- struct uv1h_rh_gam_config_mmr_s {
- unsigned long m_skt : 6; /* RW */
- unsigned long n_skt : 4; /* RW */
- unsigned long rsvd_10_11: 2; /* */
- unsigned long mmiol_cfg : 1; /* RW */
- unsigned long rsvd_13_63: 51; /* */
- } s1;
- struct uv2h_rh_gam_config_mmr_s {
- unsigned long m_skt : 6; /* RW */
- unsigned long n_skt : 4; /* RW */
- unsigned long rsvd_10_63: 54; /* */
- } s2;
+ unsigned long v;
+ struct uvh_rh_gam_config_mmr_s {
+ unsigned long m_skt:6; /* RW */
+ unsigned long n_skt:4; /* RW */
+ unsigned long rsvd_10_63:54;
+ } s;
+ struct uv1h_rh_gam_config_mmr_s {
+ unsigned long m_skt:6; /* RW */
+ unsigned long n_skt:4; /* RW */
+ unsigned long rsvd_10_11:2;
+ unsigned long mmiol_cfg:1; /* RW */
+ unsigned long rsvd_13_63:51;
+ } s1;
+ struct uv2h_rh_gam_config_mmr_s {
+ unsigned long m_skt:6; /* RW */
+ unsigned long n_skt:4; /* RW */
+ unsigned long rsvd_10_63:54;
+ } s2;
};
/* ========================================================================= */
/* UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR */
/* ========================================================================= */
-#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR 0x1600010UL
+#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR 0x1600010UL
-#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT 28
-#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffff0000000UL
+#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT 28
+#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffff0000000UL
-#define UV1H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT 28
-#define UV1H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffff0000000UL
-#define UV1H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_GR4_SHFT 48
-#define UV1H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_GR4_MASK 0x0001000000000000UL
-#define UV1H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_SHFT 52
-#define UV1H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_MASK 0x00f0000000000000UL
-#define UV1H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63
-#define UV1H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
+#define UV1H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT 28
+#define UV1H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_GR4_SHFT 48
+#define UV1H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_SHFT 52
+#define UV1H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63
+#define UV1H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffff0000000UL
+#define UV1H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_GR4_MASK 0x0001000000000000UL
+#define UV1H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_MASK 0x00f0000000000000UL
+#define UV1H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
-#define UV2H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT 28
-#define UV2H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffff0000000UL
-#define UV2H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_SHFT 52
-#define UV2H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_MASK 0x00f0000000000000UL
-#define UV2H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63
-#define UV2H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
+#define UV2H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT 28
+#define UV2H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_SHFT 52
+#define UV2H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63
+#define UV2H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffff0000000UL
+#define UV2H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_MASK 0x00f0000000000000UL
+#define UV2H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
union uvh_rh_gam_gru_overlay_config_mmr_u {
- unsigned long v;
- struct uvh_rh_gam_gru_overlay_config_mmr_s {
- unsigned long rsvd_0_27: 28; /* */
- unsigned long base : 18; /* RW */
- unsigned long rsvd_46_62 : 17;
- unsigned long enable : 1; /* RW */
- } s;
- struct uv1h_rh_gam_gru_overlay_config_mmr_s {
- unsigned long rsvd_0_27: 28; /* */
- unsigned long base : 18; /* RW */
- unsigned long rsvd_46_47: 2; /* */
- unsigned long gr4 : 1; /* RW */
- unsigned long rsvd_49_51: 3; /* */
- unsigned long n_gru : 4; /* RW */
- unsigned long rsvd_56_62: 7; /* */
- unsigned long enable : 1; /* RW */
- } s1;
- struct uv2h_rh_gam_gru_overlay_config_mmr_s {
- unsigned long rsvd_0_27: 28; /* */
- unsigned long base : 18; /* RW */
- unsigned long rsvd_46_51: 6; /* */
- unsigned long n_gru : 4; /* RW */
- unsigned long rsvd_56_62: 7; /* */
- unsigned long enable : 1; /* RW */
- } s2;
+ unsigned long v;
+ struct uvh_rh_gam_gru_overlay_config_mmr_s {
+ unsigned long rsvd_0_27:28;
+ unsigned long base:18; /* RW */
+ unsigned long rsvd_46_62:17;
+ unsigned long enable:1; /* RW */
+ } s;
+ struct uv1h_rh_gam_gru_overlay_config_mmr_s {
+ unsigned long rsvd_0_27:28;
+ unsigned long base:18; /* RW */
+ unsigned long rsvd_46_47:2;
+ unsigned long gr4:1; /* RW */
+ unsigned long rsvd_49_51:3;
+ unsigned long n_gru:4; /* RW */
+ unsigned long rsvd_56_62:7;
+ unsigned long enable:1; /* RW */
+ } s1;
+ struct uv2h_rh_gam_gru_overlay_config_mmr_s {
+ unsigned long rsvd_0_27:28;
+ unsigned long base:18; /* RW */
+ unsigned long rsvd_46_51:6;
+ unsigned long n_gru:4; /* RW */
+ unsigned long rsvd_56_62:7;
+ unsigned long enable:1; /* RW */
+ } s2;
};
/* ========================================================================= */
/* UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR */
/* ========================================================================= */
-#define UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR 0x1600030UL
+#define UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR 0x1600030UL
-#define UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_BASE_SHFT 30
-#define UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003fffc0000000UL
-#define UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_M_IO_SHFT 46
-#define UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_M_IO_MASK 0x000fc00000000000UL
-#define UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_N_IO_SHFT 52
-#define UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_N_IO_MASK 0x00f0000000000000UL
+#define UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_BASE_SHFT 30
+#define UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_M_IO_SHFT 46
+#define UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_N_IO_SHFT 52
#define UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63
+#define UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003fffc0000000UL
+#define UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_M_IO_MASK 0x000fc00000000000UL
+#define UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_N_IO_MASK 0x00f0000000000000UL
#define UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
-#define UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_BASE_SHFT 27
-#define UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffff8000000UL
-#define UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_M_IO_SHFT 46
-#define UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_M_IO_MASK 0x000fc00000000000UL
-#define UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_N_IO_SHFT 52
-#define UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_N_IO_MASK 0x00f0000000000000UL
+#define UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_BASE_SHFT 27
+#define UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_M_IO_SHFT 46
+#define UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_N_IO_SHFT 52
#define UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63
+#define UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffff8000000UL
+#define UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_M_IO_MASK 0x000fc00000000000UL
+#define UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_N_IO_MASK 0x00f0000000000000UL
#define UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
union uvh_rh_gam_mmioh_overlay_config_mmr_u {
- unsigned long v;
- struct uv1h_rh_gam_mmioh_overlay_config_mmr_s {
- unsigned long rsvd_0_29: 30; /* */
- unsigned long base : 16; /* RW */
- unsigned long m_io : 6; /* RW */
- unsigned long n_io : 4; /* RW */
- unsigned long rsvd_56_62: 7; /* */
- unsigned long enable : 1; /* RW */
- } s1;
- struct uv2h_rh_gam_mmioh_overlay_config_mmr_s {
- unsigned long rsvd_0_26: 27; /* */
- unsigned long base : 19; /* RW */
- unsigned long m_io : 6; /* RW */
- unsigned long n_io : 4; /* RW */
- unsigned long rsvd_56_62: 7; /* */
- unsigned long enable : 1; /* RW */
- } s2;
+ unsigned long v;
+ struct uv1h_rh_gam_mmioh_overlay_config_mmr_s {
+ unsigned long rsvd_0_29:30;
+ unsigned long base:16; /* RW */
+ unsigned long m_io:6; /* RW */
+ unsigned long n_io:4; /* RW */
+ unsigned long rsvd_56_62:7;
+ unsigned long enable:1; /* RW */
+ } s1;
+ struct uv2h_rh_gam_mmioh_overlay_config_mmr_s {
+ unsigned long rsvd_0_26:27;
+ unsigned long base:19; /* RW */
+ unsigned long m_io:6; /* RW */
+ unsigned long n_io:4; /* RW */
+ unsigned long rsvd_56_62:7;
+ unsigned long enable:1; /* RW */
+ } s2;
};
/* ========================================================================= */
/* UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR */
/* ========================================================================= */
-#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR 0x1600028UL
+#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR 0x1600028UL
-#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_SHFT 26
-#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffffc000000UL
+#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_SHFT 26
+#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffffc000000UL
-#define UV1H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_SHFT 26
-#define UV1H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffffc000000UL
+#define UV1H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_SHFT 26
#define UV1H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_DUAL_HUB_SHFT 46
+#define UV1H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63
+#define UV1H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffffc000000UL
#define UV1H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_DUAL_HUB_MASK 0x0000400000000000UL
-#define UV1H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63
-#define UV1H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
+#define UV1H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
-#define UV2H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_SHFT 26
-#define UV2H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffffc000000UL
-#define UV2H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63
-#define UV2H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
+#define UV2H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_SHFT 26
+#define UV2H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63
+#define UV2H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffffc000000UL
+#define UV2H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
union uvh_rh_gam_mmr_overlay_config_mmr_u {
- unsigned long v;
- struct uvh_rh_gam_mmr_overlay_config_mmr_s {
- unsigned long rsvd_0_25: 26; /* */
- unsigned long base : 20; /* RW */
- unsigned long rsvd_46_62 : 17;
- unsigned long enable : 1; /* RW */
- } s;
- struct uv1h_rh_gam_mmr_overlay_config_mmr_s {
- unsigned long rsvd_0_25: 26; /* */
- unsigned long base : 20; /* RW */
- unsigned long dual_hub : 1; /* RW */
- unsigned long rsvd_47_62: 16; /* */
- unsigned long enable : 1; /* RW */
- } s1;
- struct uv2h_rh_gam_mmr_overlay_config_mmr_s {
- unsigned long rsvd_0_25: 26; /* */
- unsigned long base : 20; /* RW */
- unsigned long rsvd_46_62: 17; /* */
- unsigned long enable : 1; /* RW */
- } s2;
+ unsigned long v;
+ struct uvh_rh_gam_mmr_overlay_config_mmr_s {
+ unsigned long rsvd_0_25:26;
+ unsigned long base:20; /* RW */
+ unsigned long rsvd_46_62:17;
+ unsigned long enable:1; /* RW */
+ } s;
+ struct uv1h_rh_gam_mmr_overlay_config_mmr_s {
+ unsigned long rsvd_0_25:26;
+ unsigned long base:20; /* RW */
+ unsigned long dual_hub:1; /* RW */
+ unsigned long rsvd_47_62:16;
+ unsigned long enable:1; /* RW */
+ } s1;
+ struct uv2h_rh_gam_mmr_overlay_config_mmr_s {
+ unsigned long rsvd_0_25:26;
+ unsigned long base:20; /* RW */
+ unsigned long rsvd_46_62:17;
+ unsigned long enable:1; /* RW */
+ } s2;
};
/* ========================================================================= */
/* UVH_RTC */
/* ========================================================================= */
-#define UVH_RTC 0x340000UL
+#define UVH_RTC 0x340000UL
-#define UVH_RTC_REAL_TIME_CLOCK_SHFT 0
-#define UVH_RTC_REAL_TIME_CLOCK_MASK 0x00ffffffffffffffUL
+#define UVH_RTC_REAL_TIME_CLOCK_SHFT 0
+#define UVH_RTC_REAL_TIME_CLOCK_MASK 0x00ffffffffffffffUL
union uvh_rtc_u {
- unsigned long v;
- struct uvh_rtc_s {
- unsigned long real_time_clock : 56; /* RW */
- unsigned long rsvd_56_63 : 8; /* */
- } s;
+ unsigned long v;
+ struct uvh_rtc_s {
+ unsigned long real_time_clock:56; /* RW */
+ unsigned long rsvd_56_63:8;
+ } s;
};
/* ========================================================================= */
/* UVH_RTC1_INT_CONFIG */
/* ========================================================================= */
-#define UVH_RTC1_INT_CONFIG 0x615c0UL
-
-#define UVH_RTC1_INT_CONFIG_VECTOR_SHFT 0
-#define UVH_RTC1_INT_CONFIG_VECTOR_MASK 0x00000000000000ffUL
-#define UVH_RTC1_INT_CONFIG_DM_SHFT 8
-#define UVH_RTC1_INT_CONFIG_DM_MASK 0x0000000000000700UL
-#define UVH_RTC1_INT_CONFIG_DESTMODE_SHFT 11
-#define UVH_RTC1_INT_CONFIG_DESTMODE_MASK 0x0000000000000800UL
-#define UVH_RTC1_INT_CONFIG_STATUS_SHFT 12
-#define UVH_RTC1_INT_CONFIG_STATUS_MASK 0x0000000000001000UL
-#define UVH_RTC1_INT_CONFIG_P_SHFT 13
-#define UVH_RTC1_INT_CONFIG_P_MASK 0x0000000000002000UL
-#define UVH_RTC1_INT_CONFIG_T_SHFT 15
-#define UVH_RTC1_INT_CONFIG_T_MASK 0x0000000000008000UL
-#define UVH_RTC1_INT_CONFIG_M_SHFT 16
-#define UVH_RTC1_INT_CONFIG_M_MASK 0x0000000000010000UL
-#define UVH_RTC1_INT_CONFIG_APIC_ID_SHFT 32
-#define UVH_RTC1_INT_CONFIG_APIC_ID_MASK 0xffffffff00000000UL
+#define UVH_RTC1_INT_CONFIG 0x615c0UL
+
+#define UVH_RTC1_INT_CONFIG_VECTOR_SHFT 0
+#define UVH_RTC1_INT_CONFIG_DM_SHFT 8
+#define UVH_RTC1_INT_CONFIG_DESTMODE_SHFT 11
+#define UVH_RTC1_INT_CONFIG_STATUS_SHFT 12
+#define UVH_RTC1_INT_CONFIG_P_SHFT 13
+#define UVH_RTC1_INT_CONFIG_T_SHFT 15
+#define UVH_RTC1_INT_CONFIG_M_SHFT 16
+#define UVH_RTC1_INT_CONFIG_APIC_ID_SHFT 32
+#define UVH_RTC1_INT_CONFIG_VECTOR_MASK 0x00000000000000ffUL
+#define UVH_RTC1_INT_CONFIG_DM_MASK 0x0000000000000700UL
+#define UVH_RTC1_INT_CONFIG_DESTMODE_MASK 0x0000000000000800UL
+#define UVH_RTC1_INT_CONFIG_STATUS_MASK 0x0000000000001000UL
+#define UVH_RTC1_INT_CONFIG_P_MASK 0x0000000000002000UL
+#define UVH_RTC1_INT_CONFIG_T_MASK 0x0000000000008000UL
+#define UVH_RTC1_INT_CONFIG_M_MASK 0x0000000000010000UL
+#define UVH_RTC1_INT_CONFIG_APIC_ID_MASK 0xffffffff00000000UL
union uvh_rtc1_int_config_u {
- unsigned long v;
- struct uvh_rtc1_int_config_s {
- unsigned long vector_ : 8; /* RW */
- unsigned long dm : 3; /* RW */
- unsigned long destmode : 1; /* RW */
- unsigned long status : 1; /* RO */
- unsigned long p : 1; /* RO */
- unsigned long rsvd_14 : 1; /* */
- unsigned long t : 1; /* RO */
- unsigned long m : 1; /* RW */
- unsigned long rsvd_17_31: 15; /* */
- unsigned long apic_id : 32; /* RW */
- } s;
+ unsigned long v;
+ struct uvh_rtc1_int_config_s {
+ unsigned long vector_:8; /* RW */
+ unsigned long dm:3; /* RW */
+ unsigned long destmode:1; /* RW */
+ unsigned long status:1; /* RO */
+ unsigned long p:1; /* RO */
+ unsigned long rsvd_14:1;
+ unsigned long t:1; /* RO */
+ unsigned long m:1; /* RW */
+ unsigned long rsvd_17_31:15;
+ unsigned long apic_id:32; /* RW */
+ } s;
};
/* ========================================================================= */
/* UVH_SCRATCH5 */
/* ========================================================================= */
-#define UVH_SCRATCH5 0x2d0200UL
-#define UVH_SCRATCH5_32 0x778
+#define UVH_SCRATCH5 0x2d0200UL
+#define UVH_SCRATCH5_32 0x778
-#define UVH_SCRATCH5_SCRATCH5_SHFT 0
-#define UVH_SCRATCH5_SCRATCH5_MASK 0xffffffffffffffffUL
+#define UVH_SCRATCH5_SCRATCH5_SHFT 0
+#define UVH_SCRATCH5_SCRATCH5_MASK 0xffffffffffffffffUL
union uvh_scratch5_u {
- unsigned long v;
- struct uvh_scratch5_s {
- unsigned long scratch5 : 64; /* RW, W1CS */
- } s;
+ unsigned long v;
+ struct uvh_scratch5_s {
+ unsigned long scratch5:64; /* RW, W1CS */
+ } s;
};
/* ========================================================================= */
/* UV2H_EVENT_OCCURRED2 */
/* ========================================================================= */
-#define UV2H_EVENT_OCCURRED2 0x70100UL
-#define UV2H_EVENT_OCCURRED2_32 0xb68
-
-#define UV2H_EVENT_OCCURRED2_RTC_0_SHFT 0
-#define UV2H_EVENT_OCCURRED2_RTC_0_MASK 0x0000000000000001UL
-#define UV2H_EVENT_OCCURRED2_RTC_1_SHFT 1
-#define UV2H_EVENT_OCCURRED2_RTC_1_MASK 0x0000000000000002UL
-#define UV2H_EVENT_OCCURRED2_RTC_2_SHFT 2
-#define UV2H_EVENT_OCCURRED2_RTC_2_MASK 0x0000000000000004UL
-#define UV2H_EVENT_OCCURRED2_RTC_3_SHFT 3
-#define UV2H_EVENT_OCCURRED2_RTC_3_MASK 0x0000000000000008UL
-#define UV2H_EVENT_OCCURRED2_RTC_4_SHFT 4
-#define UV2H_EVENT_OCCURRED2_RTC_4_MASK 0x0000000000000010UL
-#define UV2H_EVENT_OCCURRED2_RTC_5_SHFT 5
-#define UV2H_EVENT_OCCURRED2_RTC_5_MASK 0x0000000000000020UL
-#define UV2H_EVENT_OCCURRED2_RTC_6_SHFT 6
-#define UV2H_EVENT_OCCURRED2_RTC_6_MASK 0x0000000000000040UL
-#define UV2H_EVENT_OCCURRED2_RTC_7_SHFT 7
-#define UV2H_EVENT_OCCURRED2_RTC_7_MASK 0x0000000000000080UL
-#define UV2H_EVENT_OCCURRED2_RTC_8_SHFT 8
-#define UV2H_EVENT_OCCURRED2_RTC_8_MASK 0x0000000000000100UL
-#define UV2H_EVENT_OCCURRED2_RTC_9_SHFT 9
-#define UV2H_EVENT_OCCURRED2_RTC_9_MASK 0x0000000000000200UL
-#define UV2H_EVENT_OCCURRED2_RTC_10_SHFT 10
-#define UV2H_EVENT_OCCURRED2_RTC_10_MASK 0x0000000000000400UL
-#define UV2H_EVENT_OCCURRED2_RTC_11_SHFT 11
-#define UV2H_EVENT_OCCURRED2_RTC_11_MASK 0x0000000000000800UL
-#define UV2H_EVENT_OCCURRED2_RTC_12_SHFT 12
-#define UV2H_EVENT_OCCURRED2_RTC_12_MASK 0x0000000000001000UL
-#define UV2H_EVENT_OCCURRED2_RTC_13_SHFT 13
-#define UV2H_EVENT_OCCURRED2_RTC_13_MASK 0x0000000000002000UL
-#define UV2H_EVENT_OCCURRED2_RTC_14_SHFT 14
-#define UV2H_EVENT_OCCURRED2_RTC_14_MASK 0x0000000000004000UL
-#define UV2H_EVENT_OCCURRED2_RTC_15_SHFT 15
-#define UV2H_EVENT_OCCURRED2_RTC_15_MASK 0x0000000000008000UL
-#define UV2H_EVENT_OCCURRED2_RTC_16_SHFT 16
-#define UV2H_EVENT_OCCURRED2_RTC_16_MASK 0x0000000000010000UL
-#define UV2H_EVENT_OCCURRED2_RTC_17_SHFT 17
-#define UV2H_EVENT_OCCURRED2_RTC_17_MASK 0x0000000000020000UL
-#define UV2H_EVENT_OCCURRED2_RTC_18_SHFT 18
-#define UV2H_EVENT_OCCURRED2_RTC_18_MASK 0x0000000000040000UL
-#define UV2H_EVENT_OCCURRED2_RTC_19_SHFT 19
-#define UV2H_EVENT_OCCURRED2_RTC_19_MASK 0x0000000000080000UL
-#define UV2H_EVENT_OCCURRED2_RTC_20_SHFT 20
-#define UV2H_EVENT_OCCURRED2_RTC_20_MASK 0x0000000000100000UL
-#define UV2H_EVENT_OCCURRED2_RTC_21_SHFT 21
-#define UV2H_EVENT_OCCURRED2_RTC_21_MASK 0x0000000000200000UL
-#define UV2H_EVENT_OCCURRED2_RTC_22_SHFT 22
-#define UV2H_EVENT_OCCURRED2_RTC_22_MASK 0x0000000000400000UL
-#define UV2H_EVENT_OCCURRED2_RTC_23_SHFT 23
-#define UV2H_EVENT_OCCURRED2_RTC_23_MASK 0x0000000000800000UL
-#define UV2H_EVENT_OCCURRED2_RTC_24_SHFT 24
-#define UV2H_EVENT_OCCURRED2_RTC_24_MASK 0x0000000001000000UL
-#define UV2H_EVENT_OCCURRED2_RTC_25_SHFT 25
-#define UV2H_EVENT_OCCURRED2_RTC_25_MASK 0x0000000002000000UL
-#define UV2H_EVENT_OCCURRED2_RTC_26_SHFT 26
-#define UV2H_EVENT_OCCURRED2_RTC_26_MASK 0x0000000004000000UL
-#define UV2H_EVENT_OCCURRED2_RTC_27_SHFT 27
-#define UV2H_EVENT_OCCURRED2_RTC_27_MASK 0x0000000008000000UL
-#define UV2H_EVENT_OCCURRED2_RTC_28_SHFT 28
-#define UV2H_EVENT_OCCURRED2_RTC_28_MASK 0x0000000010000000UL
-#define UV2H_EVENT_OCCURRED2_RTC_29_SHFT 29
-#define UV2H_EVENT_OCCURRED2_RTC_29_MASK 0x0000000020000000UL
-#define UV2H_EVENT_OCCURRED2_RTC_30_SHFT 30
-#define UV2H_EVENT_OCCURRED2_RTC_30_MASK 0x0000000040000000UL
-#define UV2H_EVENT_OCCURRED2_RTC_31_SHFT 31
-#define UV2H_EVENT_OCCURRED2_RTC_31_MASK 0x0000000080000000UL
+#define UV2H_EVENT_OCCURRED2 0x70100UL
+#define UV2H_EVENT_OCCURRED2_32 0xb68
+
+#define UV2H_EVENT_OCCURRED2_RTC_0_SHFT 0
+#define UV2H_EVENT_OCCURRED2_RTC_1_SHFT 1
+#define UV2H_EVENT_OCCURRED2_RTC_2_SHFT 2
+#define UV2H_EVENT_OCCURRED2_RTC_3_SHFT 3
+#define UV2H_EVENT_OCCURRED2_RTC_4_SHFT 4
+#define UV2H_EVENT_OCCURRED2_RTC_5_SHFT 5
+#define UV2H_EVENT_OCCURRED2_RTC_6_SHFT 6
+#define UV2H_EVENT_OCCURRED2_RTC_7_SHFT 7
+#define UV2H_EVENT_OCCURRED2_RTC_8_SHFT 8
+#define UV2H_EVENT_OCCURRED2_RTC_9_SHFT 9
+#define UV2H_EVENT_OCCURRED2_RTC_10_SHFT 10
+#define UV2H_EVENT_OCCURRED2_RTC_11_SHFT 11
+#define UV2H_EVENT_OCCURRED2_RTC_12_SHFT 12
+#define UV2H_EVENT_OCCURRED2_RTC_13_SHFT 13
+#define UV2H_EVENT_OCCURRED2_RTC_14_SHFT 14
+#define UV2H_EVENT_OCCURRED2_RTC_15_SHFT 15
+#define UV2H_EVENT_OCCURRED2_RTC_16_SHFT 16
+#define UV2H_EVENT_OCCURRED2_RTC_17_SHFT 17
+#define UV2H_EVENT_OCCURRED2_RTC_18_SHFT 18
+#define UV2H_EVENT_OCCURRED2_RTC_19_SHFT 19
+#define UV2H_EVENT_OCCURRED2_RTC_20_SHFT 20
+#define UV2H_EVENT_OCCURRED2_RTC_21_SHFT 21
+#define UV2H_EVENT_OCCURRED2_RTC_22_SHFT 22
+#define UV2H_EVENT_OCCURRED2_RTC_23_SHFT 23
+#define UV2H_EVENT_OCCURRED2_RTC_24_SHFT 24
+#define UV2H_EVENT_OCCURRED2_RTC_25_SHFT 25
+#define UV2H_EVENT_OCCURRED2_RTC_26_SHFT 26
+#define UV2H_EVENT_OCCURRED2_RTC_27_SHFT 27
+#define UV2H_EVENT_OCCURRED2_RTC_28_SHFT 28
+#define UV2H_EVENT_OCCURRED2_RTC_29_SHFT 29
+#define UV2H_EVENT_OCCURRED2_RTC_30_SHFT 30
+#define UV2H_EVENT_OCCURRED2_RTC_31_SHFT 31
+#define UV2H_EVENT_OCCURRED2_RTC_0_MASK 0x0000000000000001UL
+#define UV2H_EVENT_OCCURRED2_RTC_1_MASK 0x0000000000000002UL
+#define UV2H_EVENT_OCCURRED2_RTC_2_MASK 0x0000000000000004UL
+#define UV2H_EVENT_OCCURRED2_RTC_3_MASK 0x0000000000000008UL
+#define UV2H_EVENT_OCCURRED2_RTC_4_MASK 0x0000000000000010UL
+#define UV2H_EVENT_OCCURRED2_RTC_5_MASK 0x0000000000000020UL
+#define UV2H_EVENT_OCCURRED2_RTC_6_MASK 0x0000000000000040UL
+#define UV2H_EVENT_OCCURRED2_RTC_7_MASK 0x0000000000000080UL
+#define UV2H_EVENT_OCCURRED2_RTC_8_MASK 0x0000000000000100UL
+#define UV2H_EVENT_OCCURRED2_RTC_9_MASK 0x0000000000000200UL
+#define UV2H_EVENT_OCCURRED2_RTC_10_MASK 0x0000000000000400UL
+#define UV2H_EVENT_OCCURRED2_RTC_11_MASK 0x0000000000000800UL
+#define UV2H_EVENT_OCCURRED2_RTC_12_MASK 0x0000000000001000UL
+#define UV2H_EVENT_OCCURRED2_RTC_13_MASK 0x0000000000002000UL
+#define UV2H_EVENT_OCCURRED2_RTC_14_MASK 0x0000000000004000UL
+#define UV2H_EVENT_OCCURRED2_RTC_15_MASK 0x0000000000008000UL
+#define UV2H_EVENT_OCCURRED2_RTC_16_MASK 0x0000000000010000UL
+#define UV2H_EVENT_OCCURRED2_RTC_17_MASK 0x0000000000020000UL
+#define UV2H_EVENT_OCCURRED2_RTC_18_MASK 0x0000000000040000UL
+#define UV2H_EVENT_OCCURRED2_RTC_19_MASK 0x0000000000080000UL
+#define UV2H_EVENT_OCCURRED2_RTC_20_MASK 0x0000000000100000UL
+#define UV2H_EVENT_OCCURRED2_RTC_21_MASK 0x0000000000200000UL
+#define UV2H_EVENT_OCCURRED2_RTC_22_MASK 0x0000000000400000UL
+#define UV2H_EVENT_OCCURRED2_RTC_23_MASK 0x0000000000800000UL
+#define UV2H_EVENT_OCCURRED2_RTC_24_MASK 0x0000000001000000UL
+#define UV2H_EVENT_OCCURRED2_RTC_25_MASK 0x0000000002000000UL
+#define UV2H_EVENT_OCCURRED2_RTC_26_MASK 0x0000000004000000UL
+#define UV2H_EVENT_OCCURRED2_RTC_27_MASK 0x0000000008000000UL
+#define UV2H_EVENT_OCCURRED2_RTC_28_MASK 0x0000000010000000UL
+#define UV2H_EVENT_OCCURRED2_RTC_29_MASK 0x0000000020000000UL
+#define UV2H_EVENT_OCCURRED2_RTC_30_MASK 0x0000000040000000UL
+#define UV2H_EVENT_OCCURRED2_RTC_31_MASK 0x0000000080000000UL
union uv2h_event_occurred2_u {
- unsigned long v;
- struct uv2h_event_occurred2_s {
- unsigned long rtc_0 : 1; /* RW */
- unsigned long rtc_1 : 1; /* RW */
- unsigned long rtc_2 : 1; /* RW */
- unsigned long rtc_3 : 1; /* RW */
- unsigned long rtc_4 : 1; /* RW */
- unsigned long rtc_5 : 1; /* RW */
- unsigned long rtc_6 : 1; /* RW */
- unsigned long rtc_7 : 1; /* RW */
- unsigned long rtc_8 : 1; /* RW */
- unsigned long rtc_9 : 1; /* RW */
- unsigned long rtc_10 : 1; /* RW */
- unsigned long rtc_11 : 1; /* RW */
- unsigned long rtc_12 : 1; /* RW */
- unsigned long rtc_13 : 1; /* RW */
- unsigned long rtc_14 : 1; /* RW */
- unsigned long rtc_15 : 1; /* RW */
- unsigned long rtc_16 : 1; /* RW */
- unsigned long rtc_17 : 1; /* RW */
- unsigned long rtc_18 : 1; /* RW */
- unsigned long rtc_19 : 1; /* RW */
- unsigned long rtc_20 : 1; /* RW */
- unsigned long rtc_21 : 1; /* RW */
- unsigned long rtc_22 : 1; /* RW */
- unsigned long rtc_23 : 1; /* RW */
- unsigned long rtc_24 : 1; /* RW */
- unsigned long rtc_25 : 1; /* RW */
- unsigned long rtc_26 : 1; /* RW */
- unsigned long rtc_27 : 1; /* RW */
- unsigned long rtc_28 : 1; /* RW */
- unsigned long rtc_29 : 1; /* RW */
- unsigned long rtc_30 : 1; /* RW */
- unsigned long rtc_31 : 1; /* RW */
- unsigned long rsvd_32_63: 32; /* */
- } s1;
+ unsigned long v;
+ struct uv2h_event_occurred2_s {
+ unsigned long rtc_0:1; /* RW */
+ unsigned long rtc_1:1; /* RW */
+ unsigned long rtc_2:1; /* RW */
+ unsigned long rtc_3:1; /* RW */
+ unsigned long rtc_4:1; /* RW */
+ unsigned long rtc_5:1; /* RW */
+ unsigned long rtc_6:1; /* RW */
+ unsigned long rtc_7:1; /* RW */
+ unsigned long rtc_8:1; /* RW */
+ unsigned long rtc_9:1; /* RW */
+ unsigned long rtc_10:1; /* RW */
+ unsigned long rtc_11:1; /* RW */
+ unsigned long rtc_12:1; /* RW */
+ unsigned long rtc_13:1; /* RW */
+ unsigned long rtc_14:1; /* RW */
+ unsigned long rtc_15:1; /* RW */
+ unsigned long rtc_16:1; /* RW */
+ unsigned long rtc_17:1; /* RW */
+ unsigned long rtc_18:1; /* RW */
+ unsigned long rtc_19:1; /* RW */
+ unsigned long rtc_20:1; /* RW */
+ unsigned long rtc_21:1; /* RW */
+ unsigned long rtc_22:1; /* RW */
+ unsigned long rtc_23:1; /* RW */
+ unsigned long rtc_24:1; /* RW */
+ unsigned long rtc_25:1; /* RW */
+ unsigned long rtc_26:1; /* RW */
+ unsigned long rtc_27:1; /* RW */
+ unsigned long rtc_28:1; /* RW */
+ unsigned long rtc_29:1; /* RW */
+ unsigned long rtc_30:1; /* RW */
+ unsigned long rtc_31:1; /* RW */
+ unsigned long rsvd_32_63:32;
+ } s1;
};
/* ========================================================================= */
/* UV2H_EVENT_OCCURRED2_ALIAS */
/* ========================================================================= */
-#define UV2H_EVENT_OCCURRED2_ALIAS 0x70108UL
-#define UV2H_EVENT_OCCURRED2_ALIAS_32 0xb70
+#define UV2H_EVENT_OCCURRED2_ALIAS 0x70108UL
+#define UV2H_EVENT_OCCURRED2_ALIAS_32 0xb70
/* ========================================================================= */
/* UV2H_LB_BAU_SB_ACTIVATION_STATUS_2 */
/* ========================================================================= */
-#define UV2H_LB_BAU_SB_ACTIVATION_STATUS_2 0x320130UL
-#define UV2H_LB_BAU_SB_ACTIVATION_STATUS_2_32 0x9f0
+#define UV2H_LB_BAU_SB_ACTIVATION_STATUS_2 0x320130UL
+#define UV2H_LB_BAU_SB_ACTIVATION_STATUS_2_32 0x9f0
#define UV2H_LB_BAU_SB_ACTIVATION_STATUS_2_AUX_ERROR_SHFT 0
#define UV2H_LB_BAU_SB_ACTIVATION_STATUS_2_AUX_ERROR_MASK 0xffffffffffffffffUL
union uv2h_lb_bau_sb_activation_status_2_u {
- unsigned long v;
- struct uv2h_lb_bau_sb_activation_status_2_s {
- unsigned long aux_error : 64; /* RW */
- } s1;
+ unsigned long v;
+ struct uv2h_lb_bau_sb_activation_status_2_s {
+ unsigned long aux_error:64; /* RW */
+ } s1;
};
/* ========================================================================= */
/* UV1H_LB_TARGET_PHYSICAL_APIC_ID_MASK */
/* ========================================================================= */
-#define UV1H_LB_TARGET_PHYSICAL_APIC_ID_MASK 0x320130UL
-#define UV1H_LB_TARGET_PHYSICAL_APIC_ID_MASK_32 0x9f0
+#define UV1H_LB_TARGET_PHYSICAL_APIC_ID_MASK 0x320130UL
+#define UV1H_LB_TARGET_PHYSICAL_APIC_ID_MASK_32 0x9f0
#define UV1H_LB_TARGET_PHYSICAL_APIC_ID_MASK_BIT_ENABLES_SHFT 0
#define UV1H_LB_TARGET_PHYSICAL_APIC_ID_MASK_BIT_ENABLES_MASK 0x00000000ffffffffUL
union uv1h_lb_target_physical_apic_id_mask_u {
- unsigned long v;
- struct uv1h_lb_target_physical_apic_id_mask_s {
- unsigned long bit_enables : 32; /* RW */
- unsigned long rsvd_32_63 : 32; /* */
- } s1;
+ unsigned long v;
+ struct uv1h_lb_target_physical_apic_id_mask_s {
+ unsigned long bit_enables:32; /* RW */
+ unsigned long rsvd_32_63:32;
+ } s1;
};
-#endif /* __ASM_UV_MMRS_X86_H__ */
+#endif /* _ASM_X86_UV_UV_MMRS_H */
diff --git a/arch/x86/include/asm/vgtod.h b/arch/x86/include/asm/vgtod.h
index 646b4c1ca695..815285bcaceb 100644
--- a/arch/x86/include/asm/vgtod.h
+++ b/arch/x86/include/asm/vgtod.h
@@ -11,10 +11,9 @@ struct vsyscall_gtod_data {
time_t wall_time_sec;
u32 wall_time_nsec;
- int sysctl_enabled;
struct timezone sys_tz;
struct { /* extract of a clocksource struct */
- cycle_t (*vread)(void);
+ int vclock_mode;
cycle_t cycle_last;
cycle_t mask;
u32 mult;
diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h
index 84471b810460..2caf290e9895 100644
--- a/arch/x86/include/asm/vmx.h
+++ b/arch/x86/include/asm/vmx.h
@@ -132,6 +132,8 @@ enum vmcs_field {
GUEST_IA32_PAT_HIGH = 0x00002805,
GUEST_IA32_EFER = 0x00002806,
GUEST_IA32_EFER_HIGH = 0x00002807,
+ GUEST_IA32_PERF_GLOBAL_CTRL = 0x00002808,
+ GUEST_IA32_PERF_GLOBAL_CTRL_HIGH= 0x00002809,
GUEST_PDPTR0 = 0x0000280a,
GUEST_PDPTR0_HIGH = 0x0000280b,
GUEST_PDPTR1 = 0x0000280c,
@@ -144,6 +146,8 @@ enum vmcs_field {
HOST_IA32_PAT_HIGH = 0x00002c01,
HOST_IA32_EFER = 0x00002c02,
HOST_IA32_EFER_HIGH = 0x00002c03,
+ HOST_IA32_PERF_GLOBAL_CTRL = 0x00002c04,
+ HOST_IA32_PERF_GLOBAL_CTRL_HIGH = 0x00002c05,
PIN_BASED_VM_EXEC_CONTROL = 0x00004000,
CPU_BASED_VM_EXEC_CONTROL = 0x00004002,
EXCEPTION_BITMAP = 0x00004004,
@@ -426,4 +430,43 @@ struct vmx_msr_entry {
u64 value;
} __aligned(16);
+/*
+ * Exit Qualifications for entry failure during or after loading guest state
+ */
+#define ENTRY_FAIL_DEFAULT 0
+#define ENTRY_FAIL_PDPTE 2
+#define ENTRY_FAIL_NMI 3
+#define ENTRY_FAIL_VMCS_LINK_PTR 4
+
+/*
+ * VM-instruction error numbers
+ */
+enum vm_instruction_error_number {
+ VMXERR_VMCALL_IN_VMX_ROOT_OPERATION = 1,
+ VMXERR_VMCLEAR_INVALID_ADDRESS = 2,
+ VMXERR_VMCLEAR_VMXON_POINTER = 3,
+ VMXERR_VMLAUNCH_NONCLEAR_VMCS = 4,
+ VMXERR_VMRESUME_NONLAUNCHED_VMCS = 5,
+ VMXERR_VMRESUME_AFTER_VMXOFF = 6,
+ VMXERR_ENTRY_INVALID_CONTROL_FIELD = 7,
+ VMXERR_ENTRY_INVALID_HOST_STATE_FIELD = 8,
+ VMXERR_VMPTRLD_INVALID_ADDRESS = 9,
+ VMXERR_VMPTRLD_VMXON_POINTER = 10,
+ VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID = 11,
+ VMXERR_UNSUPPORTED_VMCS_COMPONENT = 12,
+ VMXERR_VMWRITE_READ_ONLY_VMCS_COMPONENT = 13,
+ VMXERR_VMXON_IN_VMX_ROOT_OPERATION = 15,
+ VMXERR_ENTRY_INVALID_EXECUTIVE_VMCS_POINTER = 16,
+ VMXERR_ENTRY_NONLAUNCHED_EXECUTIVE_VMCS = 17,
+ VMXERR_ENTRY_EXECUTIVE_VMCS_POINTER_NOT_VMXON_POINTER = 18,
+ VMXERR_VMCALL_NONCLEAR_VMCS = 19,
+ VMXERR_VMCALL_INVALID_VM_EXIT_CONTROL_FIELDS = 20,
+ VMXERR_VMCALL_INCORRECT_MSEG_REVISION_ID = 22,
+ VMXERR_VMXOFF_UNDER_DUAL_MONITOR_TREATMENT_OF_SMIS_AND_SMM = 23,
+ VMXERR_VMCALL_INVALID_SMM_MONITOR_FEATURES = 24,
+ VMXERR_ENTRY_INVALID_VM_EXECUTION_CONTROL_FIELDS_IN_EXECUTIVE_VMCS = 25,
+ VMXERR_ENTRY_EVENTS_BLOCKED_BY_MOV_SS = 26,
+ VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID = 28,
+};
+
#endif
diff --git a/arch/x86/include/asm/vsyscall.h b/arch/x86/include/asm/vsyscall.h
index d55597351f6a..60107072c28b 100644
--- a/arch/x86/include/asm/vsyscall.h
+++ b/arch/x86/include/asm/vsyscall.h
@@ -16,10 +16,6 @@ enum vsyscall_num {
#ifdef __KERNEL__
#include <linux/seqlock.h>
-/* Definitions for CONFIG_GENERIC_TIME definitions */
-#define __vsyscall_fn \
- __attribute__ ((unused, __section__(".vsyscall_fn"))) notrace
-
#define VGETCPU_RDTSCP 1
#define VGETCPU_LSL 2
diff --git a/arch/x86/include/asm/vvar.h b/arch/x86/include/asm/vvar.h
index 341b3559452b..de656ac2af41 100644
--- a/arch/x86/include/asm/vvar.h
+++ b/arch/x86/include/asm/vvar.h
@@ -10,15 +10,14 @@
* In normal kernel code, they are used like any other variable.
* In user code, they are accessed through the VVAR macro.
*
- * Each of these variables lives in the vsyscall page, and each
- * one needs a unique offset within the little piece of the page
- * reserved for vvars. Specify that offset in DECLARE_VVAR.
- * (There are 896 bytes available. If you mess up, the linker will
- * catch it.)
+ * These variables live in a page of kernel data that has an extra RO
+ * mapping for userspace. Each variable needs a unique offset within
+ * that page; specify that offset with the DECLARE_VVAR macro. (If
+ * you mess up, the linker will catch it.)
*/
-/* Offset of vars within vsyscall page */
-#define VSYSCALL_VARS_OFFSET (3072 + 128)
+/* Base address of vvars. This is not ABI. */
+#define VVAR_ADDRESS (-10*1024*1024 - 4096)
#if defined(__VVAR_KERNEL_LDS)
@@ -26,17 +25,17 @@
* right place.
*/
#define DECLARE_VVAR(offset, type, name) \
- EMIT_VVAR(name, VSYSCALL_VARS_OFFSET + offset)
+ EMIT_VVAR(name, offset)
#else
#define DECLARE_VVAR(offset, type, name) \
static type const * const vvaraddr_ ## name = \
- (void *)(VSYSCALL_START + VSYSCALL_VARS_OFFSET + (offset));
+ (void *)(VVAR_ADDRESS + (offset));
#define DEFINE_VVAR(type, name) \
- type __vvar_ ## name \
- __attribute__((section(".vsyscall_var_" #name), aligned(16)))
+ type name \
+ __attribute__((section(".vvar_" #name), aligned(16)))
#define VVAR(name) (*vvaraddr_ ## name)
@@ -45,8 +44,7 @@
/* DECLARE_VVAR(offset, type, name) */
DECLARE_VVAR(0, volatile unsigned long, jiffies)
-DECLARE_VVAR(8, int, vgetcpu_mode)
+DECLARE_VVAR(16, int, vgetcpu_mode)
DECLARE_VVAR(128, struct vsyscall_gtod_data, vsyscall_gtod_data)
#undef DECLARE_VVAR
-#undef VSYSCALL_VARS_OFFSET
diff --git a/arch/x86/include/asm/xen/hypercall.h b/arch/x86/include/asm/xen/hypercall.h
index d240ea950519..417777de5a40 100644
--- a/arch/x86/include/asm/xen/hypercall.h
+++ b/arch/x86/include/asm/xen/hypercall.h
@@ -39,6 +39,8 @@
#include <linux/string.h>
#include <linux/types.h>
+#include <trace/events/xen.h>
+
#include <asm/page.h>
#include <asm/pgtable.h>
@@ -459,6 +461,8 @@ MULTI_fpu_taskswitch(struct multicall_entry *mcl, int set)
{
mcl->op = __HYPERVISOR_fpu_taskswitch;
mcl->args[0] = set;
+
+ trace_xen_mc_entry(mcl, 1);
}
static inline void
@@ -475,6 +479,8 @@ MULTI_update_va_mapping(struct multicall_entry *mcl, unsigned long va,
mcl->args[2] = new_val.pte >> 32;
mcl->args[3] = flags;
}
+
+ trace_xen_mc_entry(mcl, sizeof(new_val) == sizeof(long) ? 3 : 4);
}
static inline void
@@ -485,6 +491,8 @@ MULTI_grant_table_op(struct multicall_entry *mcl, unsigned int cmd,
mcl->args[0] = cmd;
mcl->args[1] = (unsigned long)uop;
mcl->args[2] = count;
+
+ trace_xen_mc_entry(mcl, 3);
}
static inline void
@@ -504,6 +512,8 @@ MULTI_update_va_mapping_otherdomain(struct multicall_entry *mcl, unsigned long v
mcl->args[3] = flags;
mcl->args[4] = domid;
}
+
+ trace_xen_mc_entry(mcl, sizeof(new_val) == sizeof(long) ? 4 : 5);
}
static inline void
@@ -520,6 +530,8 @@ MULTI_update_descriptor(struct multicall_entry *mcl, u64 maddr,
mcl->args[2] = desc.a;
mcl->args[3] = desc.b;
}
+
+ trace_xen_mc_entry(mcl, sizeof(maddr) == sizeof(long) ? 2 : 4);
}
static inline void
@@ -528,6 +540,8 @@ MULTI_memory_op(struct multicall_entry *mcl, unsigned int cmd, void *arg)
mcl->op = __HYPERVISOR_memory_op;
mcl->args[0] = cmd;
mcl->args[1] = (unsigned long)arg;
+
+ trace_xen_mc_entry(mcl, 2);
}
static inline void
@@ -539,6 +553,8 @@ MULTI_mmu_update(struct multicall_entry *mcl, struct mmu_update *req,
mcl->args[1] = count;
mcl->args[2] = (unsigned long)success_count;
mcl->args[3] = domid;
+
+ trace_xen_mc_entry(mcl, 4);
}
static inline void
@@ -550,6 +566,8 @@ MULTI_mmuext_op(struct multicall_entry *mcl, struct mmuext_op *op, int count,
mcl->args[1] = count;
mcl->args[2] = (unsigned long)success_count;
mcl->args[3] = domid;
+
+ trace_xen_mc_entry(mcl, 4);
}
static inline void
@@ -558,6 +576,8 @@ MULTI_set_gdt(struct multicall_entry *mcl, unsigned long *frames, int entries)
mcl->op = __HYPERVISOR_set_gdt;
mcl->args[0] = (unsigned long)frames;
mcl->args[1] = entries;
+
+ trace_xen_mc_entry(mcl, 2);
}
static inline void
@@ -567,6 +587,8 @@ MULTI_stack_switch(struct multicall_entry *mcl,
mcl->op = __HYPERVISOR_stack_switch;
mcl->args[0] = ss;
mcl->args[1] = esp;
+
+ trace_xen_mc_entry(mcl, 2);
}
#endif /* _ASM_X86_XEN_HYPERCALL_H */
diff --git a/arch/x86/include/asm/xen/pci.h b/arch/x86/include/asm/xen/pci.h
index 4fbda9a3f339..968d57dd54c9 100644
--- a/arch/x86/include/asm/xen/pci.h
+++ b/arch/x86/include/asm/xen/pci.h
@@ -14,13 +14,14 @@ static inline int pci_xen_hvm_init(void)
}
#endif
#if defined(CONFIG_XEN_DOM0)
-void __init xen_setup_pirqs(void);
+int __init pci_xen_initial_domain(void);
int xen_find_device_domain_owner(struct pci_dev *dev);
int xen_register_device_domain_owner(struct pci_dev *dev, uint16_t domain);
int xen_unregister_device_domain_owner(struct pci_dev *dev);
#else
-static inline void __init xen_setup_pirqs(void)
+static inline int __init pci_xen_initial_domain(void)
{
+ return -1;
}
static inline int xen_find_device_domain_owner(struct pci_dev *dev)
{
diff --git a/arch/x86/include/asm/xen/trace_types.h b/arch/x86/include/asm/xen/trace_types.h
new file mode 100644
index 000000000000..21e1874c0a0b
--- /dev/null
+++ b/arch/x86/include/asm/xen/trace_types.h
@@ -0,0 +1,18 @@
+#ifndef _ASM_XEN_TRACE_TYPES_H
+#define _ASM_XEN_TRACE_TYPES_H
+
+enum xen_mc_flush_reason {
+ XEN_MC_FL_NONE, /* explicit flush */
+ XEN_MC_FL_BATCH, /* out of hypercall space */
+ XEN_MC_FL_ARGS, /* out of argument space */
+ XEN_MC_FL_CALLBACK, /* out of callback space */
+};
+
+enum xen_mc_extend_args {
+ XEN_MC_XE_OK,
+ XEN_MC_XE_BAD_OP,
+ XEN_MC_XE_NO_SPACE
+};
+typedef void (*xen_mc_callback_fn_t)(void *);
+
+#endif /* _ASM_XEN_TRACE_TYPES_H */
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index 90b06d4daee2..04105574c8e9 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -24,17 +24,12 @@ endif
nostackp := $(call cc-option, -fno-stack-protector)
CFLAGS_vsyscall_64.o := $(PROFILING) -g0 $(nostackp)
CFLAGS_hpet.o := $(nostackp)
-CFLAGS_vread_tsc_64.o := $(nostackp)
CFLAGS_paravirt.o := $(nostackp)
GCOV_PROFILE_vsyscall_64.o := n
GCOV_PROFILE_hpet.o := n
GCOV_PROFILE_tsc.o := n
-GCOV_PROFILE_vread_tsc_64.o := n
GCOV_PROFILE_paravirt.o := n
-# vread_tsc_64 is hot and should be fully optimized:
-CFLAGS_REMOVE_vread_tsc_64.o = -pg -fno-optimize-sibling-calls
-
obj-y := process_$(BITS).o signal.o entry_$(BITS).o
obj-y += traps.o irq.o irq_$(BITS).o dumpstack_$(BITS).o
obj-y += time.o ioport.o ldt.o dumpstack.o
@@ -43,7 +38,8 @@ obj-$(CONFIG_IRQ_WORK) += irq_work.o
obj-y += probe_roms.o
obj-$(CONFIG_X86_32) += sys_i386_32.o i386_ksyms_32.o
obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o
-obj-$(CONFIG_X86_64) += syscall_64.o vsyscall_64.o vread_tsc_64.o
+obj-$(CONFIG_X86_64) += syscall_64.o vsyscall_64.o
+obj-$(CONFIG_X86_64) += vsyscall_emu_64.o
obj-y += bootflag.o e820.o
obj-y += pci-dma.o quirks.o topology.o kdebugfs.o
obj-y += alternative.o i8253.o pci-nommu.o hw_breakpoint.o
@@ -123,7 +119,6 @@ ifeq ($(CONFIG_X86_64),y)
obj-$(CONFIG_GART_IOMMU) += amd_gart_64.o aperture_64.o
obj-$(CONFIG_CALGARY_IOMMU) += pci-calgary_64.o tce_64.o
- obj-$(CONFIG_AMD_IOMMU) += amd_iommu_init.o amd_iommu.o
obj-$(CONFIG_PCI_MMCONFIG) += mmconf-fam10h_64.o
obj-y += vsmp_64.o
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index a81f2d52f869..c63822816249 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -14,7 +14,6 @@
#include <asm/pgtable.h>
#include <asm/mce.h>
#include <asm/nmi.h>
-#include <asm/vsyscall.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
#include <asm/io.h>
@@ -250,7 +249,6 @@ static void __init_or_module add_nops(void *insns, unsigned int len)
extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
extern s32 __smp_locks[], __smp_locks_end[];
-extern char __vsyscall_0;
void *text_poke_early(void *addr, const void *opcode, size_t len);
/* Replace instructions with better alternatives for this CPU type.
@@ -263,6 +261,7 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
struct alt_instr *end)
{
struct alt_instr *a;
+ u8 *instr, *replacement;
u8 insnbuf[MAX_PATCH_LEN];
DPRINTK("%s: alt table %p -> %p\n", __func__, start, end);
@@ -276,25 +275,23 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
* order.
*/
for (a = start; a < end; a++) {
- u8 *instr = a->instr;
+ instr = (u8 *)&a->instr_offset + a->instr_offset;
+ replacement = (u8 *)&a->repl_offset + a->repl_offset;
BUG_ON(a->replacementlen > a->instrlen);
BUG_ON(a->instrlen > sizeof(insnbuf));
BUG_ON(a->cpuid >= NCAPINTS*32);
if (!boot_cpu_has(a->cpuid))
continue;
-#ifdef CONFIG_X86_64
- /* vsyscall code is not mapped yet. resolve it manually. */
- if (instr >= (u8 *)VSYSCALL_START && instr < (u8*)VSYSCALL_END) {
- instr = __va(instr - (u8*)VSYSCALL_START + (u8*)__pa_symbol(&__vsyscall_0));
- DPRINTK("%s: vsyscall fixup: %p => %p\n",
- __func__, a->instr, instr);
- }
-#endif
- memcpy(insnbuf, a->replacement, a->replacementlen);
+
+ memcpy(insnbuf, replacement, a->replacementlen);
+
+ /* 0xe8 is a relative jump; fix the offset. */
if (*insnbuf == 0xe8 && a->replacementlen == 5)
- *(s32 *)(insnbuf + 1) += a->replacement - a->instr;
+ *(s32 *)(insnbuf + 1) += replacement - instr;
+
add_nops(insnbuf + a->replacementlen,
a->instrlen - a->replacementlen);
+
text_poke_early(instr, insnbuf, a->instrlen);
}
}
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
deleted file mode 100644
index 7c3a95e54ec5..000000000000
--- a/arch/x86/kernel/amd_iommu.c
+++ /dev/null
@@ -1,2764 +0,0 @@
-/*
- * Copyright (C) 2007-2010 Advanced Micro Devices, Inc.
- * Author: Joerg Roedel <joerg.roedel@amd.com>
- * Leo Duran <leo.duran@amd.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#include <linux/pci.h>
-#include <linux/pci-ats.h>
-#include <linux/bitmap.h>
-#include <linux/slab.h>
-#include <linux/debugfs.h>
-#include <linux/scatterlist.h>
-#include <linux/dma-mapping.h>
-#include <linux/iommu-helper.h>
-#include <linux/iommu.h>
-#include <linux/delay.h>
-#include <asm/proto.h>
-#include <asm/iommu.h>
-#include <asm/gart.h>
-#include <asm/dma.h>
-#include <asm/amd_iommu_proto.h>
-#include <asm/amd_iommu_types.h>
-#include <asm/amd_iommu.h>
-
-#define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28))
-
-#define LOOP_TIMEOUT 100000
-
-static DEFINE_RWLOCK(amd_iommu_devtable_lock);
-
-/* A list of preallocated protection domains */
-static LIST_HEAD(iommu_pd_list);
-static DEFINE_SPINLOCK(iommu_pd_list_lock);
-
-/*
- * Domain for untranslated devices - only allocated
- * if iommu=pt passed on kernel cmd line.
- */
-static struct protection_domain *pt_domain;
-
-static struct iommu_ops amd_iommu_ops;
-
-/*
- * general struct to manage commands send to an IOMMU
- */
-struct iommu_cmd {
- u32 data[4];
-};
-
-static void update_domain(struct protection_domain *domain);
-
-/****************************************************************************
- *
- * Helper functions
- *
- ****************************************************************************/
-
-static inline u16 get_device_id(struct device *dev)
-{
- struct pci_dev *pdev = to_pci_dev(dev);
-
- return calc_devid(pdev->bus->number, pdev->devfn);
-}
-
-static struct iommu_dev_data *get_dev_data(struct device *dev)
-{
- return dev->archdata.iommu;
-}
-
-/*
- * In this function the list of preallocated protection domains is traversed to
- * find the domain for a specific device
- */
-static struct dma_ops_domain *find_protection_domain(u16 devid)
-{
- struct dma_ops_domain *entry, *ret = NULL;
- unsigned long flags;
- u16 alias = amd_iommu_alias_table[devid];
-
- if (list_empty(&iommu_pd_list))
- return NULL;
-
- spin_lock_irqsave(&iommu_pd_list_lock, flags);
-
- list_for_each_entry(entry, &iommu_pd_list, list) {
- if (entry->target_dev == devid ||
- entry->target_dev == alias) {
- ret = entry;
- break;
- }
- }
-
- spin_unlock_irqrestore(&iommu_pd_list_lock, flags);
-
- return ret;
-}
-
-/*
- * This function checks if the driver got a valid device from the caller to
- * avoid dereferencing invalid pointers.
- */
-static bool check_device(struct device *dev)
-{
- u16 devid;
-
- if (!dev || !dev->dma_mask)
- return false;
-
- /* No device or no PCI device */
- if (dev->bus != &pci_bus_type)
- return false;
-
- devid = get_device_id(dev);
-
- /* Out of our scope? */
- if (devid > amd_iommu_last_bdf)
- return false;
-
- if (amd_iommu_rlookup_table[devid] == NULL)
- return false;
-
- return true;
-}
-
-static int iommu_init_device(struct device *dev)
-{
- struct iommu_dev_data *dev_data;
- struct pci_dev *pdev;
- u16 devid, alias;
-
- if (dev->archdata.iommu)
- return 0;
-
- dev_data = kzalloc(sizeof(*dev_data), GFP_KERNEL);
- if (!dev_data)
- return -ENOMEM;
-
- dev_data->dev = dev;
-
- devid = get_device_id(dev);
- alias = amd_iommu_alias_table[devid];
- pdev = pci_get_bus_and_slot(PCI_BUS(alias), alias & 0xff);
- if (pdev)
- dev_data->alias = &pdev->dev;
- else {
- kfree(dev_data);
- return -ENOTSUPP;
- }
-
- atomic_set(&dev_data->bind, 0);
-
- dev->archdata.iommu = dev_data;
-
-
- return 0;
-}
-
-static void iommu_ignore_device(struct device *dev)
-{
- u16 devid, alias;
-
- devid = get_device_id(dev);
- alias = amd_iommu_alias_table[devid];
-
- memset(&amd_iommu_dev_table[devid], 0, sizeof(struct dev_table_entry));
- memset(&amd_iommu_dev_table[alias], 0, sizeof(struct dev_table_entry));
-
- amd_iommu_rlookup_table[devid] = NULL;
- amd_iommu_rlookup_table[alias] = NULL;
-}
-
-static void iommu_uninit_device(struct device *dev)
-{
- kfree(dev->archdata.iommu);
-}
-
-void __init amd_iommu_uninit_devices(void)
-{
- struct pci_dev *pdev = NULL;
-
- for_each_pci_dev(pdev) {
-
- if (!check_device(&pdev->dev))
- continue;
-
- iommu_uninit_device(&pdev->dev);
- }
-}
-
-int __init amd_iommu_init_devices(void)
-{
- struct pci_dev *pdev = NULL;
- int ret = 0;
-
- for_each_pci_dev(pdev) {
-
- if (!check_device(&pdev->dev))
- continue;
-
- ret = iommu_init_device(&pdev->dev);
- if (ret == -ENOTSUPP)
- iommu_ignore_device(&pdev->dev);
- else if (ret)
- goto out_free;
- }
-
- return 0;
-
-out_free:
-
- amd_iommu_uninit_devices();
-
- return ret;
-}
-#ifdef CONFIG_AMD_IOMMU_STATS
-
-/*
- * Initialization code for statistics collection
- */
-
-DECLARE_STATS_COUNTER(compl_wait);
-DECLARE_STATS_COUNTER(cnt_map_single);
-DECLARE_STATS_COUNTER(cnt_unmap_single);
-DECLARE_STATS_COUNTER(cnt_map_sg);
-DECLARE_STATS_COUNTER(cnt_unmap_sg);
-DECLARE_STATS_COUNTER(cnt_alloc_coherent);
-DECLARE_STATS_COUNTER(cnt_free_coherent);
-DECLARE_STATS_COUNTER(cross_page);
-DECLARE_STATS_COUNTER(domain_flush_single);
-DECLARE_STATS_COUNTER(domain_flush_all);
-DECLARE_STATS_COUNTER(alloced_io_mem);
-DECLARE_STATS_COUNTER(total_map_requests);
-
-static struct dentry *stats_dir;
-static struct dentry *de_fflush;
-
-static void amd_iommu_stats_add(struct __iommu_counter *cnt)
-{
- if (stats_dir == NULL)
- return;
-
- cnt->dent = debugfs_create_u64(cnt->name, 0444, stats_dir,
- &cnt->value);
-}
-
-static void amd_iommu_stats_init(void)
-{
- stats_dir = debugfs_create_dir("amd-iommu", NULL);
- if (stats_dir == NULL)
- return;
-
- de_fflush = debugfs_create_bool("fullflush", 0444, stats_dir,
- (u32 *)&amd_iommu_unmap_flush);
-
- amd_iommu_stats_add(&compl_wait);
- amd_iommu_stats_add(&cnt_map_single);
- amd_iommu_stats_add(&cnt_unmap_single);
- amd_iommu_stats_add(&cnt_map_sg);
- amd_iommu_stats_add(&cnt_unmap_sg);
- amd_iommu_stats_add(&cnt_alloc_coherent);
- amd_iommu_stats_add(&cnt_free_coherent);
- amd_iommu_stats_add(&cross_page);
- amd_iommu_stats_add(&domain_flush_single);
- amd_iommu_stats_add(&domain_flush_all);
- amd_iommu_stats_add(&alloced_io_mem);
- amd_iommu_stats_add(&total_map_requests);
-}
-
-#endif
-
-/****************************************************************************
- *
- * Interrupt handling functions
- *
- ****************************************************************************/
-
-static void dump_dte_entry(u16 devid)
-{
- int i;
-
- for (i = 0; i < 8; ++i)
- pr_err("AMD-Vi: DTE[%d]: %08x\n", i,
- amd_iommu_dev_table[devid].data[i]);
-}
-
-static void dump_command(unsigned long phys_addr)
-{
- struct iommu_cmd *cmd = phys_to_virt(phys_addr);
- int i;
-
- for (i = 0; i < 4; ++i)
- pr_err("AMD-Vi: CMD[%d]: %08x\n", i, cmd->data[i]);
-}
-
-static void iommu_print_event(struct amd_iommu *iommu, void *__evt)
-{
- u32 *event = __evt;
- int type = (event[1] >> EVENT_TYPE_SHIFT) & EVENT_TYPE_MASK;
- int devid = (event[0] >> EVENT_DEVID_SHIFT) & EVENT_DEVID_MASK;
- int domid = (event[1] >> EVENT_DOMID_SHIFT) & EVENT_DOMID_MASK;
- int flags = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK;
- u64 address = (u64)(((u64)event[3]) << 32) | event[2];
-
- printk(KERN_ERR "AMD-Vi: Event logged [");
-
- switch (type) {
- case EVENT_TYPE_ILL_DEV:
- printk("ILLEGAL_DEV_TABLE_ENTRY device=%02x:%02x.%x "
- "address=0x%016llx flags=0x%04x]\n",
- PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
- address, flags);
- dump_dte_entry(devid);
- break;
- case EVENT_TYPE_IO_FAULT:
- printk("IO_PAGE_FAULT device=%02x:%02x.%x "
- "domain=0x%04x address=0x%016llx flags=0x%04x]\n",
- PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
- domid, address, flags);
- break;
- case EVENT_TYPE_DEV_TAB_ERR:
- printk("DEV_TAB_HARDWARE_ERROR device=%02x:%02x.%x "
- "address=0x%016llx flags=0x%04x]\n",
- PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
- address, flags);
- break;
- case EVENT_TYPE_PAGE_TAB_ERR:
- printk("PAGE_TAB_HARDWARE_ERROR device=%02x:%02x.%x "
- "domain=0x%04x address=0x%016llx flags=0x%04x]\n",
- PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
- domid, address, flags);
- break;
- case EVENT_TYPE_ILL_CMD:
- printk("ILLEGAL_COMMAND_ERROR address=0x%016llx]\n", address);
- dump_command(address);
- break;
- case EVENT_TYPE_CMD_HARD_ERR:
- printk("COMMAND_HARDWARE_ERROR address=0x%016llx "
- "flags=0x%04x]\n", address, flags);
- break;
- case EVENT_TYPE_IOTLB_INV_TO:
- printk("IOTLB_INV_TIMEOUT device=%02x:%02x.%x "
- "address=0x%016llx]\n",
- PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
- address);
- break;
- case EVENT_TYPE_INV_DEV_REQ:
- printk("INVALID_DEVICE_REQUEST device=%02x:%02x.%x "
- "address=0x%016llx flags=0x%04x]\n",
- PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
- address, flags);
- break;
- default:
- printk(KERN_ERR "UNKNOWN type=0x%02x]\n", type);
- }
-}
-
-static void iommu_poll_events(struct amd_iommu *iommu)
-{
- u32 head, tail;
- unsigned long flags;
-
- spin_lock_irqsave(&iommu->lock, flags);
-
- head = readl(iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
- tail = readl(iommu->mmio_base + MMIO_EVT_TAIL_OFFSET);
-
- while (head != tail) {
- iommu_print_event(iommu, iommu->evt_buf + head);
- head = (head + EVENT_ENTRY_SIZE) % iommu->evt_buf_size;
- }
-
- writel(head, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
-
- spin_unlock_irqrestore(&iommu->lock, flags);
-}
-
-irqreturn_t amd_iommu_int_thread(int irq, void *data)
-{
- struct amd_iommu *iommu;
-
- for_each_iommu(iommu)
- iommu_poll_events(iommu);
-
- return IRQ_HANDLED;
-}
-
-irqreturn_t amd_iommu_int_handler(int irq, void *data)
-{
- return IRQ_WAKE_THREAD;
-}
-
-/****************************************************************************
- *
- * IOMMU command queuing functions
- *
- ****************************************************************************/
-
-static int wait_on_sem(volatile u64 *sem)
-{
- int i = 0;
-
- while (*sem == 0 && i < LOOP_TIMEOUT) {
- udelay(1);
- i += 1;
- }
-
- if (i == LOOP_TIMEOUT) {
- pr_alert("AMD-Vi: Completion-Wait loop timed out\n");
- return -EIO;
- }
-
- return 0;
-}
-
-static void copy_cmd_to_buffer(struct amd_iommu *iommu,
- struct iommu_cmd *cmd,
- u32 tail)
-{
- u8 *target;
-
- target = iommu->cmd_buf + tail;
- tail = (tail + sizeof(*cmd)) % iommu->cmd_buf_size;
-
- /* Copy command to buffer */
- memcpy(target, cmd, sizeof(*cmd));
-
- /* Tell the IOMMU about it */
- writel(tail, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
-}
-
-static void build_completion_wait(struct iommu_cmd *cmd, u64 address)
-{
- WARN_ON(address & 0x7ULL);
-
- memset(cmd, 0, sizeof(*cmd));
- cmd->data[0] = lower_32_bits(__pa(address)) | CMD_COMPL_WAIT_STORE_MASK;
- cmd->data[1] = upper_32_bits(__pa(address));
- cmd->data[2] = 1;
- CMD_SET_TYPE(cmd, CMD_COMPL_WAIT);
-}
-
-static void build_inv_dte(struct iommu_cmd *cmd, u16 devid)
-{
- memset(cmd, 0, sizeof(*cmd));
- cmd->data[0] = devid;
- CMD_SET_TYPE(cmd, CMD_INV_DEV_ENTRY);
-}
-
-static void build_inv_iommu_pages(struct iommu_cmd *cmd, u64 address,
- size_t size, u16 domid, int pde)
-{
- u64 pages;
- int s;
-
- pages = iommu_num_pages(address, size, PAGE_SIZE);
- s = 0;
-
- if (pages > 1) {
- /*
- * If we have to flush more than one page, flush all
- * TLB entries for this domain
- */
- address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS;
- s = 1;
- }
-
- address &= PAGE_MASK;
-
- memset(cmd, 0, sizeof(*cmd));
- cmd->data[1] |= domid;
- cmd->data[2] = lower_32_bits(address);
- cmd->data[3] = upper_32_bits(address);
- CMD_SET_TYPE(cmd, CMD_INV_IOMMU_PAGES);
- if (s) /* size bit - we flush more than one 4kb page */
- cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
- if (pde) /* PDE bit - we wan't flush everything not only the PTEs */
- cmd->data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK;
-}
-
-static void build_inv_iotlb_pages(struct iommu_cmd *cmd, u16 devid, int qdep,
- u64 address, size_t size)
-{
- u64 pages;
- int s;
-
- pages = iommu_num_pages(address, size, PAGE_SIZE);
- s = 0;
-
- if (pages > 1) {
- /*
- * If we have to flush more than one page, flush all
- * TLB entries for this domain
- */
- address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS;
- s = 1;
- }
-
- address &= PAGE_MASK;
-
- memset(cmd, 0, sizeof(*cmd));
- cmd->data[0] = devid;
- cmd->data[0] |= (qdep & 0xff) << 24;
- cmd->data[1] = devid;
- cmd->data[2] = lower_32_bits(address);
- cmd->data[3] = upper_32_bits(address);
- CMD_SET_TYPE(cmd, CMD_INV_IOTLB_PAGES);
- if (s)
- cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
-}
-
-static void build_inv_all(struct iommu_cmd *cmd)
-{
- memset(cmd, 0, sizeof(*cmd));
- CMD_SET_TYPE(cmd, CMD_INV_ALL);
-}
-
-/*
- * Writes the command to the IOMMUs command buffer and informs the
- * hardware about the new command.
- */
-static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd)
-{
- u32 left, tail, head, next_tail;
- unsigned long flags;
-
- WARN_ON(iommu->cmd_buf_size & CMD_BUFFER_UNINITIALIZED);
-
-again:
- spin_lock_irqsave(&iommu->lock, flags);
-
- head = readl(iommu->mmio_base + MMIO_CMD_HEAD_OFFSET);
- tail = readl(iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
- next_tail = (tail + sizeof(*cmd)) % iommu->cmd_buf_size;
- left = (head - next_tail) % iommu->cmd_buf_size;
-
- if (left <= 2) {
- struct iommu_cmd sync_cmd;
- volatile u64 sem = 0;
- int ret;
-
- build_completion_wait(&sync_cmd, (u64)&sem);
- copy_cmd_to_buffer(iommu, &sync_cmd, tail);
-
- spin_unlock_irqrestore(&iommu->lock, flags);
-
- if ((ret = wait_on_sem(&sem)) != 0)
- return ret;
-
- goto again;
- }
-
- copy_cmd_to_buffer(iommu, cmd, tail);
-
- /* We need to sync now to make sure all commands are processed */
- iommu->need_sync = true;
-
- spin_unlock_irqrestore(&iommu->lock, flags);
-
- return 0;
-}
-
-/*
- * This function queues a completion wait command into the command
- * buffer of an IOMMU
- */
-static int iommu_completion_wait(struct amd_iommu *iommu)
-{
- struct iommu_cmd cmd;
- volatile u64 sem = 0;
- int ret;
-
- if (!iommu->need_sync)
- return 0;
-
- build_completion_wait(&cmd, (u64)&sem);
-
- ret = iommu_queue_command(iommu, &cmd);
- if (ret)
- return ret;
-
- return wait_on_sem(&sem);
-}
-
-static int iommu_flush_dte(struct amd_iommu *iommu, u16 devid)
-{
- struct iommu_cmd cmd;
-
- build_inv_dte(&cmd, devid);
-
- return iommu_queue_command(iommu, &cmd);
-}
-
-static void iommu_flush_dte_all(struct amd_iommu *iommu)
-{
- u32 devid;
-
- for (devid = 0; devid <= 0xffff; ++devid)
- iommu_flush_dte(iommu, devid);
-
- iommu_completion_wait(iommu);
-}
-
-/*
- * This function uses heavy locking and may disable irqs for some time. But
- * this is no issue because it is only called during resume.
- */
-static void iommu_flush_tlb_all(struct amd_iommu *iommu)
-{
- u32 dom_id;
-
- for (dom_id = 0; dom_id <= 0xffff; ++dom_id) {
- struct iommu_cmd cmd;
- build_inv_iommu_pages(&cmd, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS,
- dom_id, 1);
- iommu_queue_command(iommu, &cmd);
- }
-
- iommu_completion_wait(iommu);
-}
-
-static void iommu_flush_all(struct amd_iommu *iommu)
-{
- struct iommu_cmd cmd;
-
- build_inv_all(&cmd);
-
- iommu_queue_command(iommu, &cmd);
- iommu_completion_wait(iommu);
-}
-
-void iommu_flush_all_caches(struct amd_iommu *iommu)
-{
- if (iommu_feature(iommu, FEATURE_IA)) {
- iommu_flush_all(iommu);
- } else {
- iommu_flush_dte_all(iommu);
- iommu_flush_tlb_all(iommu);
- }
-}
-
-/*
- * Command send function for flushing on-device TLB
- */
-static int device_flush_iotlb(struct device *dev, u64 address, size_t size)
-{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct amd_iommu *iommu;
- struct iommu_cmd cmd;
- u16 devid;
- int qdep;
-
- qdep = pci_ats_queue_depth(pdev);
- devid = get_device_id(dev);
- iommu = amd_iommu_rlookup_table[devid];
-
- build_inv_iotlb_pages(&cmd, devid, qdep, address, size);
-
- return iommu_queue_command(iommu, &cmd);
-}
-
-/*
- * Command send function for invalidating a device table entry
- */
-static int device_flush_dte(struct device *dev)
-{
- struct amd_iommu *iommu;
- struct pci_dev *pdev;
- u16 devid;
- int ret;
-
- pdev = to_pci_dev(dev);
- devid = get_device_id(dev);
- iommu = amd_iommu_rlookup_table[devid];
-
- ret = iommu_flush_dte(iommu, devid);
- if (ret)
- return ret;
-
- if (pci_ats_enabled(pdev))
- ret = device_flush_iotlb(dev, 0, ~0UL);
-
- return ret;
-}
-
-/*
- * TLB invalidation function which is called from the mapping functions.
- * It invalidates a single PTE if the range to flush is within a single
- * page. Otherwise it flushes the whole TLB of the IOMMU.
- */
-static void __domain_flush_pages(struct protection_domain *domain,
- u64 address, size_t size, int pde)
-{
- struct iommu_dev_data *dev_data;
- struct iommu_cmd cmd;
- int ret = 0, i;
-
- build_inv_iommu_pages(&cmd, address, size, domain->id, pde);
-
- for (i = 0; i < amd_iommus_present; ++i) {
- if (!domain->dev_iommu[i])
- continue;
-
- /*
- * Devices of this domain are behind this IOMMU
- * We need a TLB flush
- */
- ret |= iommu_queue_command(amd_iommus[i], &cmd);
- }
-
- list_for_each_entry(dev_data, &domain->dev_list, list) {
- struct pci_dev *pdev = to_pci_dev(dev_data->dev);
-
- if (!pci_ats_enabled(pdev))
- continue;
-
- ret |= device_flush_iotlb(dev_data->dev, address, size);
- }
-
- WARN_ON(ret);
-}
-
-static void domain_flush_pages(struct protection_domain *domain,
- u64 address, size_t size)
-{
- __domain_flush_pages(domain, address, size, 0);
-}
-
-/* Flush the whole IO/TLB for a given protection domain */
-static void domain_flush_tlb(struct protection_domain *domain)
-{
- __domain_flush_pages(domain, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 0);
-}
-
-/* Flush the whole IO/TLB for a given protection domain - including PDE */
-static void domain_flush_tlb_pde(struct protection_domain *domain)
-{
- __domain_flush_pages(domain, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 1);
-}
-
-static void domain_flush_complete(struct protection_domain *domain)
-{
- int i;
-
- for (i = 0; i < amd_iommus_present; ++i) {
- if (!domain->dev_iommu[i])
- continue;
-
- /*
- * Devices of this domain are behind this IOMMU
- * We need to wait for completion of all commands.
- */
- iommu_completion_wait(amd_iommus[i]);
- }
-}
-
-
-/*
- * This function flushes the DTEs for all devices in domain
- */
-static void domain_flush_devices(struct protection_domain *domain)
-{
- struct iommu_dev_data *dev_data;
- unsigned long flags;
-
- spin_lock_irqsave(&domain->lock, flags);
-
- list_for_each_entry(dev_data, &domain->dev_list, list)
- device_flush_dte(dev_data->dev);
-
- spin_unlock_irqrestore(&domain->lock, flags);
-}
-
-/****************************************************************************
- *
- * The functions below are used the create the page table mappings for
- * unity mapped regions.
- *
- ****************************************************************************/
-
-/*
- * This function is used to add another level to an IO page table. Adding
- * another level increases the size of the address space by 9 bits to a size up
- * to 64 bits.
- */
-static bool increase_address_space(struct protection_domain *domain,
- gfp_t gfp)
-{
- u64 *pte;
-
- if (domain->mode == PAGE_MODE_6_LEVEL)
- /* address space already 64 bit large */
- return false;
-
- pte = (void *)get_zeroed_page(gfp);
- if (!pte)
- return false;
-
- *pte = PM_LEVEL_PDE(domain->mode,
- virt_to_phys(domain->pt_root));
- domain->pt_root = pte;
- domain->mode += 1;
- domain->updated = true;
-
- return true;
-}
-
-static u64 *alloc_pte(struct protection_domain *domain,
- unsigned long address,
- unsigned long page_size,
- u64 **pte_page,
- gfp_t gfp)
-{
- int level, end_lvl;
- u64 *pte, *page;
-
- BUG_ON(!is_power_of_2(page_size));
-
- while (address > PM_LEVEL_SIZE(domain->mode))
- increase_address_space(domain, gfp);
-
- level = domain->mode - 1;
- pte = &domain->pt_root[PM_LEVEL_INDEX(level, address)];
- address = PAGE_SIZE_ALIGN(address, page_size);
- end_lvl = PAGE_SIZE_LEVEL(page_size);
-
- while (level > end_lvl) {
- if (!IOMMU_PTE_PRESENT(*pte)) {
- page = (u64 *)get_zeroed_page(gfp);
- if (!page)
- return NULL;
- *pte = PM_LEVEL_PDE(level, virt_to_phys(page));
- }
-
- /* No level skipping support yet */
- if (PM_PTE_LEVEL(*pte) != level)
- return NULL;
-
- level -= 1;
-
- pte = IOMMU_PTE_PAGE(*pte);
-
- if (pte_page && level == end_lvl)
- *pte_page = pte;
-
- pte = &pte[PM_LEVEL_INDEX(level, address)];
- }
-
- return pte;
-}
-
-/*
- * This function checks if there is a PTE for a given dma address. If
- * there is one, it returns the pointer to it.
- */
-static u64 *fetch_pte(struct protection_domain *domain, unsigned long address)
-{
- int level;
- u64 *pte;
-
- if (address > PM_LEVEL_SIZE(domain->mode))
- return NULL;
-
- level = domain->mode - 1;
- pte = &domain->pt_root[PM_LEVEL_INDEX(level, address)];
-
- while (level > 0) {
-
- /* Not Present */
- if (!IOMMU_PTE_PRESENT(*pte))
- return NULL;
-
- /* Large PTE */
- if (PM_PTE_LEVEL(*pte) == 0x07) {
- unsigned long pte_mask, __pte;
-
- /*
- * If we have a series of large PTEs, make
- * sure to return a pointer to the first one.
- */
- pte_mask = PTE_PAGE_SIZE(*pte);
- pte_mask = ~((PAGE_SIZE_PTE_COUNT(pte_mask) << 3) - 1);
- __pte = ((unsigned long)pte) & pte_mask;
-
- return (u64 *)__pte;
- }
-
- /* No level skipping support yet */
- if (PM_PTE_LEVEL(*pte) != level)
- return NULL;
-
- level -= 1;
-
- /* Walk to the next level */
- pte = IOMMU_PTE_PAGE(*pte);
- pte = &pte[PM_LEVEL_INDEX(level, address)];
- }
-
- return pte;
-}
-
-/*
- * Generic mapping functions. It maps a physical address into a DMA
- * address space. It allocates the page table pages if necessary.
- * In the future it can be extended to a generic mapping function
- * supporting all features of AMD IOMMU page tables like level skipping
- * and full 64 bit address spaces.
- */
-static int iommu_map_page(struct protection_domain *dom,
- unsigned long bus_addr,
- unsigned long phys_addr,
- int prot,
- unsigned long page_size)
-{
- u64 __pte, *pte;
- int i, count;
-
- if (!(prot & IOMMU_PROT_MASK))
- return -EINVAL;
-
- bus_addr = PAGE_ALIGN(bus_addr);
- phys_addr = PAGE_ALIGN(phys_addr);
- count = PAGE_SIZE_PTE_COUNT(page_size);
- pte = alloc_pte(dom, bus_addr, page_size, NULL, GFP_KERNEL);
-
- for (i = 0; i < count; ++i)
- if (IOMMU_PTE_PRESENT(pte[i]))
- return -EBUSY;
-
- if (page_size > PAGE_SIZE) {
- __pte = PAGE_SIZE_PTE(phys_addr, page_size);
- __pte |= PM_LEVEL_ENC(7) | IOMMU_PTE_P | IOMMU_PTE_FC;
- } else
- __pte = phys_addr | IOMMU_PTE_P | IOMMU_PTE_FC;
-
- if (prot & IOMMU_PROT_IR)
- __pte |= IOMMU_PTE_IR;
- if (prot & IOMMU_PROT_IW)
- __pte |= IOMMU_PTE_IW;
-
- for (i = 0; i < count; ++i)
- pte[i] = __pte;
-
- update_domain(dom);
-
- return 0;
-}
-
-static unsigned long iommu_unmap_page(struct protection_domain *dom,
- unsigned long bus_addr,
- unsigned long page_size)
-{
- unsigned long long unmap_size, unmapped;
- u64 *pte;
-
- BUG_ON(!is_power_of_2(page_size));
-
- unmapped = 0;
-
- while (unmapped < page_size) {
-
- pte = fetch_pte(dom, bus_addr);
-
- if (!pte) {
- /*
- * No PTE for this address
- * move forward in 4kb steps
- */
- unmap_size = PAGE_SIZE;
- } else if (PM_PTE_LEVEL(*pte) == 0) {
- /* 4kb PTE found for this address */
- unmap_size = PAGE_SIZE;
- *pte = 0ULL;
- } else {
- int count, i;
-
- /* Large PTE found which maps this address */
- unmap_size = PTE_PAGE_SIZE(*pte);
- count = PAGE_SIZE_PTE_COUNT(unmap_size);
- for (i = 0; i < count; i++)
- pte[i] = 0ULL;
- }
-
- bus_addr = (bus_addr & ~(unmap_size - 1)) + unmap_size;
- unmapped += unmap_size;
- }
-
- BUG_ON(!is_power_of_2(unmapped));
-
- return unmapped;
-}
-
-/*
- * This function checks if a specific unity mapping entry is needed for
- * this specific IOMMU.
- */
-static int iommu_for_unity_map(struct amd_iommu *iommu,
- struct unity_map_entry *entry)
-{
- u16 bdf, i;
-
- for (i = entry->devid_start; i <= entry->devid_end; ++i) {
- bdf = amd_iommu_alias_table[i];
- if (amd_iommu_rlookup_table[bdf] == iommu)
- return 1;
- }
-
- return 0;
-}
-
-/*
- * This function actually applies the mapping to the page table of the
- * dma_ops domain.
- */
-static int dma_ops_unity_map(struct dma_ops_domain *dma_dom,
- struct unity_map_entry *e)
-{
- u64 addr;
- int ret;
-
- for (addr = e->address_start; addr < e->address_end;
- addr += PAGE_SIZE) {
- ret = iommu_map_page(&dma_dom->domain, addr, addr, e->prot,
- PAGE_SIZE);
- if (ret)
- return ret;
- /*
- * if unity mapping is in aperture range mark the page
- * as allocated in the aperture
- */
- if (addr < dma_dom->aperture_size)
- __set_bit(addr >> PAGE_SHIFT,
- dma_dom->aperture[0]->bitmap);
- }
-
- return 0;
-}
-
-/*
- * Init the unity mappings for a specific IOMMU in the system
- *
- * Basically iterates over all unity mapping entries and applies them to
- * the default domain DMA of that IOMMU if necessary.
- */
-static int iommu_init_unity_mappings(struct amd_iommu *iommu)
-{
- struct unity_map_entry *entry;
- int ret;
-
- list_for_each_entry(entry, &amd_iommu_unity_map, list) {
- if (!iommu_for_unity_map(iommu, entry))
- continue;
- ret = dma_ops_unity_map(iommu->default_dom, entry);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
-/*
- * Inits the unity mappings required for a specific device
- */
-static int init_unity_mappings_for_device(struct dma_ops_domain *dma_dom,
- u16 devid)
-{
- struct unity_map_entry *e;
- int ret;
-
- list_for_each_entry(e, &amd_iommu_unity_map, list) {
- if (!(devid >= e->devid_start && devid <= e->devid_end))
- continue;
- ret = dma_ops_unity_map(dma_dom, e);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
-/****************************************************************************
- *
- * The next functions belong to the address allocator for the dma_ops
- * interface functions. They work like the allocators in the other IOMMU
- * drivers. Its basically a bitmap which marks the allocated pages in
- * the aperture. Maybe it could be enhanced in the future to a more
- * efficient allocator.
- *
- ****************************************************************************/
-
-/*
- * The address allocator core functions.
- *
- * called with domain->lock held
- */
-
-/*
- * Used to reserve address ranges in the aperture (e.g. for exclusion
- * ranges.
- */
-static void dma_ops_reserve_addresses(struct dma_ops_domain *dom,
- unsigned long start_page,
- unsigned int pages)
-{
- unsigned int i, last_page = dom->aperture_size >> PAGE_SHIFT;
-
- if (start_page + pages > last_page)
- pages = last_page - start_page;
-
- for (i = start_page; i < start_page + pages; ++i) {
- int index = i / APERTURE_RANGE_PAGES;
- int page = i % APERTURE_RANGE_PAGES;
- __set_bit(page, dom->aperture[index]->bitmap);
- }
-}
-
-/*
- * This function is used to add a new aperture range to an existing
- * aperture in case of dma_ops domain allocation or address allocation
- * failure.
- */
-static int alloc_new_range(struct dma_ops_domain *dma_dom,
- bool populate, gfp_t gfp)
-{
- int index = dma_dom->aperture_size >> APERTURE_RANGE_SHIFT;
- struct amd_iommu *iommu;
- unsigned long i;
-
-#ifdef CONFIG_IOMMU_STRESS
- populate = false;
-#endif
-
- if (index >= APERTURE_MAX_RANGES)
- return -ENOMEM;
-
- dma_dom->aperture[index] = kzalloc(sizeof(struct aperture_range), gfp);
- if (!dma_dom->aperture[index])
- return -ENOMEM;
-
- dma_dom->aperture[index]->bitmap = (void *)get_zeroed_page(gfp);
- if (!dma_dom->aperture[index]->bitmap)
- goto out_free;
-
- dma_dom->aperture[index]->offset = dma_dom->aperture_size;
-
- if (populate) {
- unsigned long address = dma_dom->aperture_size;
- int i, num_ptes = APERTURE_RANGE_PAGES / 512;
- u64 *pte, *pte_page;
-
- for (i = 0; i < num_ptes; ++i) {
- pte = alloc_pte(&dma_dom->domain, address, PAGE_SIZE,
- &pte_page, gfp);
- if (!pte)
- goto out_free;
-
- dma_dom->aperture[index]->pte_pages[i] = pte_page;
-
- address += APERTURE_RANGE_SIZE / 64;
- }
- }
-
- dma_dom->aperture_size += APERTURE_RANGE_SIZE;
-
- /* Initialize the exclusion range if necessary */
- for_each_iommu(iommu) {
- if (iommu->exclusion_start &&
- iommu->exclusion_start >= dma_dom->aperture[index]->offset
- && iommu->exclusion_start < dma_dom->aperture_size) {
- unsigned long startpage;
- int pages = iommu_num_pages(iommu->exclusion_start,
- iommu->exclusion_length,
- PAGE_SIZE);
- startpage = iommu->exclusion_start >> PAGE_SHIFT;
- dma_ops_reserve_addresses(dma_dom, startpage, pages);
- }
- }
-
- /*
- * Check for areas already mapped as present in the new aperture
- * range and mark those pages as reserved in the allocator. Such
- * mappings may already exist as a result of requested unity
- * mappings for devices.
- */
- for (i = dma_dom->aperture[index]->offset;
- i < dma_dom->aperture_size;
- i += PAGE_SIZE) {
- u64 *pte = fetch_pte(&dma_dom->domain, i);
- if (!pte || !IOMMU_PTE_PRESENT(*pte))
- continue;
-
- dma_ops_reserve_addresses(dma_dom, i << PAGE_SHIFT, 1);
- }
-
- update_domain(&dma_dom->domain);
-
- return 0;
-
-out_free:
- update_domain(&dma_dom->domain);
-
- free_page((unsigned long)dma_dom->aperture[index]->bitmap);
-
- kfree(dma_dom->aperture[index]);
- dma_dom->aperture[index] = NULL;
-
- return -ENOMEM;
-}
-
-static unsigned long dma_ops_area_alloc(struct device *dev,
- struct dma_ops_domain *dom,
- unsigned int pages,
- unsigned long align_mask,
- u64 dma_mask,
- unsigned long start)
-{
- unsigned long next_bit = dom->next_address % APERTURE_RANGE_SIZE;
- int max_index = dom->aperture_size >> APERTURE_RANGE_SHIFT;
- int i = start >> APERTURE_RANGE_SHIFT;
- unsigned long boundary_size;
- unsigned long address = -1;
- unsigned long limit;
-
- next_bit >>= PAGE_SHIFT;
-
- boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
- PAGE_SIZE) >> PAGE_SHIFT;
-
- for (;i < max_index; ++i) {
- unsigned long offset = dom->aperture[i]->offset >> PAGE_SHIFT;
-
- if (dom->aperture[i]->offset >= dma_mask)
- break;
-
- limit = iommu_device_max_index(APERTURE_RANGE_PAGES, offset,
- dma_mask >> PAGE_SHIFT);
-
- address = iommu_area_alloc(dom->aperture[i]->bitmap,
- limit, next_bit, pages, 0,
- boundary_size, align_mask);
- if (address != -1) {
- address = dom->aperture[i]->offset +
- (address << PAGE_SHIFT);
- dom->next_address = address + (pages << PAGE_SHIFT);
- break;
- }
-
- next_bit = 0;
- }
-
- return address;
-}
-
-static unsigned long dma_ops_alloc_addresses(struct device *dev,
- struct dma_ops_domain *dom,
- unsigned int pages,
- unsigned long align_mask,
- u64 dma_mask)
-{
- unsigned long address;
-
-#ifdef CONFIG_IOMMU_STRESS
- dom->next_address = 0;
- dom->need_flush = true;
-#endif
-
- address = dma_ops_area_alloc(dev, dom, pages, align_mask,
- dma_mask, dom->next_address);
-
- if (address == -1) {
- dom->next_address = 0;
- address = dma_ops_area_alloc(dev, dom, pages, align_mask,
- dma_mask, 0);
- dom->need_flush = true;
- }
-
- if (unlikely(address == -1))
- address = DMA_ERROR_CODE;
-
- WARN_ON((address + (PAGE_SIZE*pages)) > dom->aperture_size);
-
- return address;
-}
-
-/*
- * The address free function.
- *
- * called with domain->lock held
- */
-static void dma_ops_free_addresses(struct dma_ops_domain *dom,
- unsigned long address,
- unsigned int pages)
-{
- unsigned i = address >> APERTURE_RANGE_SHIFT;
- struct aperture_range *range = dom->aperture[i];
-
- BUG_ON(i >= APERTURE_MAX_RANGES || range == NULL);
-
-#ifdef CONFIG_IOMMU_STRESS
- if (i < 4)
- return;
-#endif
-
- if (address >= dom->next_address)
- dom->need_flush = true;
-
- address = (address % APERTURE_RANGE_SIZE) >> PAGE_SHIFT;
-
- bitmap_clear(range->bitmap, address, pages);
-
-}
-
-/****************************************************************************
- *
- * The next functions belong to the domain allocation. A domain is
- * allocated for every IOMMU as the default domain. If device isolation
- * is enabled, every device get its own domain. The most important thing
- * about domains is the page table mapping the DMA address space they
- * contain.
- *
- ****************************************************************************/
-
-/*
- * This function adds a protection domain to the global protection domain list
- */
-static void add_domain_to_list(struct protection_domain *domain)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&amd_iommu_pd_lock, flags);
- list_add(&domain->list, &amd_iommu_pd_list);
- spin_unlock_irqrestore(&amd_iommu_pd_lock, flags);
-}
-
-/*
- * This function removes a protection domain to the global
- * protection domain list
- */
-static void del_domain_from_list(struct protection_domain *domain)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&amd_iommu_pd_lock, flags);
- list_del(&domain->list);
- spin_unlock_irqrestore(&amd_iommu_pd_lock, flags);
-}
-
-static u16 domain_id_alloc(void)
-{
- unsigned long flags;
- int id;
-
- write_lock_irqsave(&amd_iommu_devtable_lock, flags);
- id = find_first_zero_bit(amd_iommu_pd_alloc_bitmap, MAX_DOMAIN_ID);
- BUG_ON(id == 0);
- if (id > 0 && id < MAX_DOMAIN_ID)
- __set_bit(id, amd_iommu_pd_alloc_bitmap);
- else
- id = 0;
- write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
-
- return id;
-}
-
-static void domain_id_free(int id)
-{
- unsigned long flags;
-
- write_lock_irqsave(&amd_iommu_devtable_lock, flags);
- if (id > 0 && id < MAX_DOMAIN_ID)
- __clear_bit(id, amd_iommu_pd_alloc_bitmap);
- write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
-}
-
-static void free_pagetable(struct protection_domain *domain)
-{
- int i, j;
- u64 *p1, *p2, *p3;
-
- p1 = domain->pt_root;
-
- if (!p1)
- return;
-
- for (i = 0; i < 512; ++i) {
- if (!IOMMU_PTE_PRESENT(p1[i]))
- continue;
-
- p2 = IOMMU_PTE_PAGE(p1[i]);
- for (j = 0; j < 512; ++j) {
- if (!IOMMU_PTE_PRESENT(p2[j]))
- continue;
- p3 = IOMMU_PTE_PAGE(p2[j]);
- free_page((unsigned long)p3);
- }
-
- free_page((unsigned long)p2);
- }
-
- free_page((unsigned long)p1);
-
- domain->pt_root = NULL;
-}
-
-/*
- * Free a domain, only used if something went wrong in the
- * allocation path and we need to free an already allocated page table
- */
-static void dma_ops_domain_free(struct dma_ops_domain *dom)
-{
- int i;
-
- if (!dom)
- return;
-
- del_domain_from_list(&dom->domain);
-
- free_pagetable(&dom->domain);
-
- for (i = 0; i < APERTURE_MAX_RANGES; ++i) {
- if (!dom->aperture[i])
- continue;
- free_page((unsigned long)dom->aperture[i]->bitmap);
- kfree(dom->aperture[i]);
- }
-
- kfree(dom);
-}
-
-/*
- * Allocates a new protection domain usable for the dma_ops functions.
- * It also initializes the page table and the address allocator data
- * structures required for the dma_ops interface
- */
-static struct dma_ops_domain *dma_ops_domain_alloc(void)
-{
- struct dma_ops_domain *dma_dom;
-
- dma_dom = kzalloc(sizeof(struct dma_ops_domain), GFP_KERNEL);
- if (!dma_dom)
- return NULL;
-
- spin_lock_init(&dma_dom->domain.lock);
-
- dma_dom->domain.id = domain_id_alloc();
- if (dma_dom->domain.id == 0)
- goto free_dma_dom;
- INIT_LIST_HEAD(&dma_dom->domain.dev_list);
- dma_dom->domain.mode = PAGE_MODE_2_LEVEL;
- dma_dom->domain.pt_root = (void *)get_zeroed_page(GFP_KERNEL);
- dma_dom->domain.flags = PD_DMA_OPS_MASK;
- dma_dom->domain.priv = dma_dom;
- if (!dma_dom->domain.pt_root)
- goto free_dma_dom;
-
- dma_dom->need_flush = false;
- dma_dom->target_dev = 0xffff;
-
- add_domain_to_list(&dma_dom->domain);
-
- if (alloc_new_range(dma_dom, true, GFP_KERNEL))
- goto free_dma_dom;
-
- /*
- * mark the first page as allocated so we never return 0 as
- * a valid dma-address. So we can use 0 as error value
- */
- dma_dom->aperture[0]->bitmap[0] = 1;
- dma_dom->next_address = 0;
-
-
- return dma_dom;
-
-free_dma_dom:
- dma_ops_domain_free(dma_dom);
-
- return NULL;
-}
-
-/*
- * little helper function to check whether a given protection domain is a
- * dma_ops domain
- */
-static bool dma_ops_domain(struct protection_domain *domain)
-{
- return domain->flags & PD_DMA_OPS_MASK;
-}
-
-static void set_dte_entry(u16 devid, struct protection_domain *domain, bool ats)
-{
- u64 pte_root = virt_to_phys(domain->pt_root);
- u32 flags = 0;
-
- pte_root |= (domain->mode & DEV_ENTRY_MODE_MASK)
- << DEV_ENTRY_MODE_SHIFT;
- pte_root |= IOMMU_PTE_IR | IOMMU_PTE_IW | IOMMU_PTE_P | IOMMU_PTE_TV;
-
- if (ats)
- flags |= DTE_FLAG_IOTLB;
-
- amd_iommu_dev_table[devid].data[3] |= flags;
- amd_iommu_dev_table[devid].data[2] = domain->id;
- amd_iommu_dev_table[devid].data[1] = upper_32_bits(pte_root);
- amd_iommu_dev_table[devid].data[0] = lower_32_bits(pte_root);
-}
-
-static void clear_dte_entry(u16 devid)
-{
- /* remove entry from the device table seen by the hardware */
- amd_iommu_dev_table[devid].data[0] = IOMMU_PTE_P | IOMMU_PTE_TV;
- amd_iommu_dev_table[devid].data[1] = 0;
- amd_iommu_dev_table[devid].data[2] = 0;
-
- amd_iommu_apply_erratum_63(devid);
-}
-
-static void do_attach(struct device *dev, struct protection_domain *domain)
-{
- struct iommu_dev_data *dev_data;
- struct amd_iommu *iommu;
- struct pci_dev *pdev;
- bool ats = false;
- u16 devid;
-
- devid = get_device_id(dev);
- iommu = amd_iommu_rlookup_table[devid];
- dev_data = get_dev_data(dev);
- pdev = to_pci_dev(dev);
-
- if (amd_iommu_iotlb_sup)
- ats = pci_ats_enabled(pdev);
-
- /* Update data structures */
- dev_data->domain = domain;
- list_add(&dev_data->list, &domain->dev_list);
- set_dte_entry(devid, domain, ats);
-
- /* Do reference counting */
- domain->dev_iommu[iommu->index] += 1;
- domain->dev_cnt += 1;
-
- /* Flush the DTE entry */
- device_flush_dte(dev);
-}
-
-static void do_detach(struct device *dev)
-{
- struct iommu_dev_data *dev_data;
- struct amd_iommu *iommu;
- u16 devid;
-
- devid = get_device_id(dev);
- iommu = amd_iommu_rlookup_table[devid];
- dev_data = get_dev_data(dev);
-
- /* decrease reference counters */
- dev_data->domain->dev_iommu[iommu->index] -= 1;
- dev_data->domain->dev_cnt -= 1;
-
- /* Update data structures */
- dev_data->domain = NULL;
- list_del(&dev_data->list);
- clear_dte_entry(devid);
-
- /* Flush the DTE entry */
- device_flush_dte(dev);
-}
-
-/*
- * If a device is not yet associated with a domain, this function does
- * assigns it visible for the hardware
- */
-static int __attach_device(struct device *dev,
- struct protection_domain *domain)
-{
- struct iommu_dev_data *dev_data, *alias_data;
- int ret;
-
- dev_data = get_dev_data(dev);
- alias_data = get_dev_data(dev_data->alias);
-
- if (!alias_data)
- return -EINVAL;
-
- /* lock domain */
- spin_lock(&domain->lock);
-
- /* Some sanity checks */
- ret = -EBUSY;
- if (alias_data->domain != NULL &&
- alias_data->domain != domain)
- goto out_unlock;
-
- if (dev_data->domain != NULL &&
- dev_data->domain != domain)
- goto out_unlock;
-
- /* Do real assignment */
- if (dev_data->alias != dev) {
- alias_data = get_dev_data(dev_data->alias);
- if (alias_data->domain == NULL)
- do_attach(dev_data->alias, domain);
-
- atomic_inc(&alias_data->bind);
- }
-
- if (dev_data->domain == NULL)
- do_attach(dev, domain);
-
- atomic_inc(&dev_data->bind);
-
- ret = 0;
-
-out_unlock:
-
- /* ready */
- spin_unlock(&domain->lock);
-
- return ret;
-}
-
-/*
- * If a device is not yet associated with a domain, this function does
- * assigns it visible for the hardware
- */
-static int attach_device(struct device *dev,
- struct protection_domain *domain)
-{
- struct pci_dev *pdev = to_pci_dev(dev);
- unsigned long flags;
- int ret;
-
- if (amd_iommu_iotlb_sup)
- pci_enable_ats(pdev, PAGE_SHIFT);
-
- write_lock_irqsave(&amd_iommu_devtable_lock, flags);
- ret = __attach_device(dev, domain);
- write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
-
- /*
- * We might boot into a crash-kernel here. The crashed kernel
- * left the caches in the IOMMU dirty. So we have to flush
- * here to evict all dirty stuff.
- */
- domain_flush_tlb_pde(domain);
-
- return ret;
-}
-
-/*
- * Removes a device from a protection domain (unlocked)
- */
-static void __detach_device(struct device *dev)
-{
- struct iommu_dev_data *dev_data = get_dev_data(dev);
- struct iommu_dev_data *alias_data;
- struct protection_domain *domain;
- unsigned long flags;
-
- BUG_ON(!dev_data->domain);
-
- domain = dev_data->domain;
-
- spin_lock_irqsave(&domain->lock, flags);
-
- if (dev_data->alias != dev) {
- alias_data = get_dev_data(dev_data->alias);
- if (atomic_dec_and_test(&alias_data->bind))
- do_detach(dev_data->alias);
- }
-
- if (atomic_dec_and_test(&dev_data->bind))
- do_detach(dev);
-
- spin_unlock_irqrestore(&domain->lock, flags);
-
- /*
- * If we run in passthrough mode the device must be assigned to the
- * passthrough domain if it is detached from any other domain.
- * Make sure we can deassign from the pt_domain itself.
- */
- if (iommu_pass_through &&
- (dev_data->domain == NULL && domain != pt_domain))
- __attach_device(dev, pt_domain);
-}
-
-/*
- * Removes a device from a protection domain (with devtable_lock held)
- */
-static void detach_device(struct device *dev)
-{
- struct pci_dev *pdev = to_pci_dev(dev);
- unsigned long flags;
-
- /* lock device table */
- write_lock_irqsave(&amd_iommu_devtable_lock, flags);
- __detach_device(dev);
- write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
-
- if (amd_iommu_iotlb_sup && pci_ats_enabled(pdev))
- pci_disable_ats(pdev);
-}
-
-/*
- * Find out the protection domain structure for a given PCI device. This
- * will give us the pointer to the page table root for example.
- */
-static struct protection_domain *domain_for_device(struct device *dev)
-{
- struct protection_domain *dom;
- struct iommu_dev_data *dev_data, *alias_data;
- unsigned long flags;
- u16 devid;
-
- devid = get_device_id(dev);
- dev_data = get_dev_data(dev);
- alias_data = get_dev_data(dev_data->alias);
- if (!alias_data)
- return NULL;
-
- read_lock_irqsave(&amd_iommu_devtable_lock, flags);
- dom = dev_data->domain;
- if (dom == NULL &&
- alias_data->domain != NULL) {
- __attach_device(dev, alias_data->domain);
- dom = alias_data->domain;
- }
-
- read_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
-
- return dom;
-}
-
-static int device_change_notifier(struct notifier_block *nb,
- unsigned long action, void *data)
-{
- struct device *dev = data;
- u16 devid;
- struct protection_domain *domain;
- struct dma_ops_domain *dma_domain;
- struct amd_iommu *iommu;
- unsigned long flags;
-
- if (!check_device(dev))
- return 0;
-
- devid = get_device_id(dev);
- iommu = amd_iommu_rlookup_table[devid];
-
- switch (action) {
- case BUS_NOTIFY_UNBOUND_DRIVER:
-
- domain = domain_for_device(dev);
-
- if (!domain)
- goto out;
- if (iommu_pass_through)
- break;
- detach_device(dev);
- break;
- case BUS_NOTIFY_ADD_DEVICE:
-
- iommu_init_device(dev);
-
- domain = domain_for_device(dev);
-
- /* allocate a protection domain if a device is added */
- dma_domain = find_protection_domain(devid);
- if (dma_domain)
- goto out;
- dma_domain = dma_ops_domain_alloc();
- if (!dma_domain)
- goto out;
- dma_domain->target_dev = devid;
-
- spin_lock_irqsave(&iommu_pd_list_lock, flags);
- list_add_tail(&dma_domain->list, &iommu_pd_list);
- spin_unlock_irqrestore(&iommu_pd_list_lock, flags);
-
- break;
- case BUS_NOTIFY_DEL_DEVICE:
-
- iommu_uninit_device(dev);
-
- default:
- goto out;
- }
-
- device_flush_dte(dev);
- iommu_completion_wait(iommu);
-
-out:
- return 0;
-}
-
-static struct notifier_block device_nb = {
- .notifier_call = device_change_notifier,
-};
-
-void amd_iommu_init_notifier(void)
-{
- bus_register_notifier(&pci_bus_type, &device_nb);
-}
-
-/*****************************************************************************
- *
- * The next functions belong to the dma_ops mapping/unmapping code.
- *
- *****************************************************************************/
-
-/*
- * In the dma_ops path we only have the struct device. This function
- * finds the corresponding IOMMU, the protection domain and the
- * requestor id for a given device.
- * If the device is not yet associated with a domain this is also done
- * in this function.
- */
-static struct protection_domain *get_domain(struct device *dev)
-{
- struct protection_domain *domain;
- struct dma_ops_domain *dma_dom;
- u16 devid = get_device_id(dev);
-
- if (!check_device(dev))
- return ERR_PTR(-EINVAL);
-
- domain = domain_for_device(dev);
- if (domain != NULL && !dma_ops_domain(domain))
- return ERR_PTR(-EBUSY);
-
- if (domain != NULL)
- return domain;
-
- /* Device not bount yet - bind it */
- dma_dom = find_protection_domain(devid);
- if (!dma_dom)
- dma_dom = amd_iommu_rlookup_table[devid]->default_dom;
- attach_device(dev, &dma_dom->domain);
- DUMP_printk("Using protection domain %d for device %s\n",
- dma_dom->domain.id, dev_name(dev));
-
- return &dma_dom->domain;
-}
-
-static void update_device_table(struct protection_domain *domain)
-{
- struct iommu_dev_data *dev_data;
-
- list_for_each_entry(dev_data, &domain->dev_list, list) {
- struct pci_dev *pdev = to_pci_dev(dev_data->dev);
- u16 devid = get_device_id(dev_data->dev);
- set_dte_entry(devid, domain, pci_ats_enabled(pdev));
- }
-}
-
-static void update_domain(struct protection_domain *domain)
-{
- if (!domain->updated)
- return;
-
- update_device_table(domain);
-
- domain_flush_devices(domain);
- domain_flush_tlb_pde(domain);
-
- domain->updated = false;
-}
-
-/*
- * This function fetches the PTE for a given address in the aperture
- */
-static u64* dma_ops_get_pte(struct dma_ops_domain *dom,
- unsigned long address)
-{
- struct aperture_range *aperture;
- u64 *pte, *pte_page;
-
- aperture = dom->aperture[APERTURE_RANGE_INDEX(address)];
- if (!aperture)
- return NULL;
-
- pte = aperture->pte_pages[APERTURE_PAGE_INDEX(address)];
- if (!pte) {
- pte = alloc_pte(&dom->domain, address, PAGE_SIZE, &pte_page,
- GFP_ATOMIC);
- aperture->pte_pages[APERTURE_PAGE_INDEX(address)] = pte_page;
- } else
- pte += PM_LEVEL_INDEX(0, address);
-
- update_domain(&dom->domain);
-
- return pte;
-}
-
-/*
- * This is the generic map function. It maps one 4kb page at paddr to
- * the given address in the DMA address space for the domain.
- */
-static dma_addr_t dma_ops_domain_map(struct dma_ops_domain *dom,
- unsigned long address,
- phys_addr_t paddr,
- int direction)
-{
- u64 *pte, __pte;
-
- WARN_ON(address > dom->aperture_size);
-
- paddr &= PAGE_MASK;
-
- pte = dma_ops_get_pte(dom, address);
- if (!pte)
- return DMA_ERROR_CODE;
-
- __pte = paddr | IOMMU_PTE_P | IOMMU_PTE_FC;
-
- if (direction == DMA_TO_DEVICE)
- __pte |= IOMMU_PTE_IR;
- else if (direction == DMA_FROM_DEVICE)
- __pte |= IOMMU_PTE_IW;
- else if (direction == DMA_BIDIRECTIONAL)
- __pte |= IOMMU_PTE_IR | IOMMU_PTE_IW;
-
- WARN_ON(*pte);
-
- *pte = __pte;
-
- return (dma_addr_t)address;
-}
-
-/*
- * The generic unmapping function for on page in the DMA address space.
- */
-static void dma_ops_domain_unmap(struct dma_ops_domain *dom,
- unsigned long address)
-{
- struct aperture_range *aperture;
- u64 *pte;
-
- if (address >= dom->aperture_size)
- return;
-
- aperture = dom->aperture[APERTURE_RANGE_INDEX(address)];
- if (!aperture)
- return;
-
- pte = aperture->pte_pages[APERTURE_PAGE_INDEX(address)];
- if (!pte)
- return;
-
- pte += PM_LEVEL_INDEX(0, address);
-
- WARN_ON(!*pte);
-
- *pte = 0ULL;
-}
-
-/*
- * This function contains common code for mapping of a physically
- * contiguous memory region into DMA address space. It is used by all
- * mapping functions provided with this IOMMU driver.
- * Must be called with the domain lock held.
- */
-static dma_addr_t __map_single(struct device *dev,
- struct dma_ops_domain *dma_dom,
- phys_addr_t paddr,
- size_t size,
- int dir,
- bool align,
- u64 dma_mask)
-{
- dma_addr_t offset = paddr & ~PAGE_MASK;
- dma_addr_t address, start, ret;
- unsigned int pages;
- unsigned long align_mask = 0;
- int i;
-
- pages = iommu_num_pages(paddr, size, PAGE_SIZE);
- paddr &= PAGE_MASK;
-
- INC_STATS_COUNTER(total_map_requests);
-
- if (pages > 1)
- INC_STATS_COUNTER(cross_page);
-
- if (align)
- align_mask = (1UL << get_order(size)) - 1;
-
-retry:
- address = dma_ops_alloc_addresses(dev, dma_dom, pages, align_mask,
- dma_mask);
- if (unlikely(address == DMA_ERROR_CODE)) {
- /*
- * setting next_address here will let the address
- * allocator only scan the new allocated range in the
- * first run. This is a small optimization.
- */
- dma_dom->next_address = dma_dom->aperture_size;
-
- if (alloc_new_range(dma_dom, false, GFP_ATOMIC))
- goto out;
-
- /*
- * aperture was successfully enlarged by 128 MB, try
- * allocation again
- */
- goto retry;
- }
-
- start = address;
- for (i = 0; i < pages; ++i) {
- ret = dma_ops_domain_map(dma_dom, start, paddr, dir);
- if (ret == DMA_ERROR_CODE)
- goto out_unmap;
-
- paddr += PAGE_SIZE;
- start += PAGE_SIZE;
- }
- address += offset;
-
- ADD_STATS_COUNTER(alloced_io_mem, size);
-
- if (unlikely(dma_dom->need_flush && !amd_iommu_unmap_flush)) {
- domain_flush_tlb(&dma_dom->domain);
- dma_dom->need_flush = false;
- } else if (unlikely(amd_iommu_np_cache))
- domain_flush_pages(&dma_dom->domain, address, size);
-
-out:
- return address;
-
-out_unmap:
-
- for (--i; i >= 0; --i) {
- start -= PAGE_SIZE;
- dma_ops_domain_unmap(dma_dom, start);
- }
-
- dma_ops_free_addresses(dma_dom, address, pages);
-
- return DMA_ERROR_CODE;
-}
-
-/*
- * Does the reverse of the __map_single function. Must be called with
- * the domain lock held too
- */
-static void __unmap_single(struct dma_ops_domain *dma_dom,
- dma_addr_t dma_addr,
- size_t size,
- int dir)
-{
- dma_addr_t flush_addr;
- dma_addr_t i, start;
- unsigned int pages;
-
- if ((dma_addr == DMA_ERROR_CODE) ||
- (dma_addr + size > dma_dom->aperture_size))
- return;
-
- flush_addr = dma_addr;
- pages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
- dma_addr &= PAGE_MASK;
- start = dma_addr;
-
- for (i = 0; i < pages; ++i) {
- dma_ops_domain_unmap(dma_dom, start);
- start += PAGE_SIZE;
- }
-
- SUB_STATS_COUNTER(alloced_io_mem, size);
-
- dma_ops_free_addresses(dma_dom, dma_addr, pages);
-
- if (amd_iommu_unmap_flush || dma_dom->need_flush) {
- domain_flush_pages(&dma_dom->domain, flush_addr, size);
- dma_dom->need_flush = false;
- }
-}
-
-/*
- * The exported map_single function for dma_ops.
- */
-static dma_addr_t map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size,
- enum dma_data_direction dir,
- struct dma_attrs *attrs)
-{
- unsigned long flags;
- struct protection_domain *domain;
- dma_addr_t addr;
- u64 dma_mask;
- phys_addr_t paddr = page_to_phys(page) + offset;
-
- INC_STATS_COUNTER(cnt_map_single);
-
- domain = get_domain(dev);
- if (PTR_ERR(domain) == -EINVAL)
- return (dma_addr_t)paddr;
- else if (IS_ERR(domain))
- return DMA_ERROR_CODE;
-
- dma_mask = *dev->dma_mask;
-
- spin_lock_irqsave(&domain->lock, flags);
-
- addr = __map_single(dev, domain->priv, paddr, size, dir, false,
- dma_mask);
- if (addr == DMA_ERROR_CODE)
- goto out;
-
- domain_flush_complete(domain);
-
-out:
- spin_unlock_irqrestore(&domain->lock, flags);
-
- return addr;
-}
-
-/*
- * The exported unmap_single function for dma_ops.
- */
-static void unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
- enum dma_data_direction dir, struct dma_attrs *attrs)
-{
- unsigned long flags;
- struct protection_domain *domain;
-
- INC_STATS_COUNTER(cnt_unmap_single);
-
- domain = get_domain(dev);
- if (IS_ERR(domain))
- return;
-
- spin_lock_irqsave(&domain->lock, flags);
-
- __unmap_single(domain->priv, dma_addr, size, dir);
-
- domain_flush_complete(domain);
-
- spin_unlock_irqrestore(&domain->lock, flags);
-}
-
-/*
- * This is a special map_sg function which is used if we should map a
- * device which is not handled by an AMD IOMMU in the system.
- */
-static int map_sg_no_iommu(struct device *dev, struct scatterlist *sglist,
- int nelems, int dir)
-{
- struct scatterlist *s;
- int i;
-
- for_each_sg(sglist, s, nelems, i) {
- s->dma_address = (dma_addr_t)sg_phys(s);
- s->dma_length = s->length;
- }
-
- return nelems;
-}
-
-/*
- * The exported map_sg function for dma_ops (handles scatter-gather
- * lists).
- */
-static int map_sg(struct device *dev, struct scatterlist *sglist,
- int nelems, enum dma_data_direction dir,
- struct dma_attrs *attrs)
-{
- unsigned long flags;
- struct protection_domain *domain;
- int i;
- struct scatterlist *s;
- phys_addr_t paddr;
- int mapped_elems = 0;
- u64 dma_mask;
-
- INC_STATS_COUNTER(cnt_map_sg);
-
- domain = get_domain(dev);
- if (PTR_ERR(domain) == -EINVAL)
- return map_sg_no_iommu(dev, sglist, nelems, dir);
- else if (IS_ERR(domain))
- return 0;
-
- dma_mask = *dev->dma_mask;
-
- spin_lock_irqsave(&domain->lock, flags);
-
- for_each_sg(sglist, s, nelems, i) {
- paddr = sg_phys(s);
-
- s->dma_address = __map_single(dev, domain->priv,
- paddr, s->length, dir, false,
- dma_mask);
-
- if (s->dma_address) {
- s->dma_length = s->length;
- mapped_elems++;
- } else
- goto unmap;
- }
-
- domain_flush_complete(domain);
-
-out:
- spin_unlock_irqrestore(&domain->lock, flags);
-
- return mapped_elems;
-unmap:
- for_each_sg(sglist, s, mapped_elems, i) {
- if (s->dma_address)
- __unmap_single(domain->priv, s->dma_address,
- s->dma_length, dir);
- s->dma_address = s->dma_length = 0;
- }
-
- mapped_elems = 0;
-
- goto out;
-}
-
-/*
- * The exported map_sg function for dma_ops (handles scatter-gather
- * lists).
- */
-static void unmap_sg(struct device *dev, struct scatterlist *sglist,
- int nelems, enum dma_data_direction dir,
- struct dma_attrs *attrs)
-{
- unsigned long flags;
- struct protection_domain *domain;
- struct scatterlist *s;
- int i;
-
- INC_STATS_COUNTER(cnt_unmap_sg);
-
- domain = get_domain(dev);
- if (IS_ERR(domain))
- return;
-
- spin_lock_irqsave(&domain->lock, flags);
-
- for_each_sg(sglist, s, nelems, i) {
- __unmap_single(domain->priv, s->dma_address,
- s->dma_length, dir);
- s->dma_address = s->dma_length = 0;
- }
-
- domain_flush_complete(domain);
-
- spin_unlock_irqrestore(&domain->lock, flags);
-}
-
-/*
- * The exported alloc_coherent function for dma_ops.
- */
-static void *alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_addr, gfp_t flag)
-{
- unsigned long flags;
- void *virt_addr;
- struct protection_domain *domain;
- phys_addr_t paddr;
- u64 dma_mask = dev->coherent_dma_mask;
-
- INC_STATS_COUNTER(cnt_alloc_coherent);
-
- domain = get_domain(dev);
- if (PTR_ERR(domain) == -EINVAL) {
- virt_addr = (void *)__get_free_pages(flag, get_order(size));
- *dma_addr = __pa(virt_addr);
- return virt_addr;
- } else if (IS_ERR(domain))
- return NULL;
-
- dma_mask = dev->coherent_dma_mask;
- flag &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
- flag |= __GFP_ZERO;
-
- virt_addr = (void *)__get_free_pages(flag, get_order(size));
- if (!virt_addr)
- return NULL;
-
- paddr = virt_to_phys(virt_addr);
-
- if (!dma_mask)
- dma_mask = *dev->dma_mask;
-
- spin_lock_irqsave(&domain->lock, flags);
-
- *dma_addr = __map_single(dev, domain->priv, paddr,
- size, DMA_BIDIRECTIONAL, true, dma_mask);
-
- if (*dma_addr == DMA_ERROR_CODE) {
- spin_unlock_irqrestore(&domain->lock, flags);
- goto out_free;
- }
-
- domain_flush_complete(domain);
-
- spin_unlock_irqrestore(&domain->lock, flags);
-
- return virt_addr;
-
-out_free:
-
- free_pages((unsigned long)virt_addr, get_order(size));
-
- return NULL;
-}
-
-/*
- * The exported free_coherent function for dma_ops.
- */
-static void free_coherent(struct device *dev, size_t size,
- void *virt_addr, dma_addr_t dma_addr)
-{
- unsigned long flags;
- struct protection_domain *domain;
-
- INC_STATS_COUNTER(cnt_free_coherent);
-
- domain = get_domain(dev);
- if (IS_ERR(domain))
- goto free_mem;
-
- spin_lock_irqsave(&domain->lock, flags);
-
- __unmap_single(domain->priv, dma_addr, size, DMA_BIDIRECTIONAL);
-
- domain_flush_complete(domain);
-
- spin_unlock_irqrestore(&domain->lock, flags);
-
-free_mem:
- free_pages((unsigned long)virt_addr, get_order(size));
-}
-
-/*
- * This function is called by the DMA layer to find out if we can handle a
- * particular device. It is part of the dma_ops.
- */
-static int amd_iommu_dma_supported(struct device *dev, u64 mask)
-{
- return check_device(dev);
-}
-
-/*
- * The function for pre-allocating protection domains.
- *
- * If the driver core informs the DMA layer if a driver grabs a device
- * we don't need to preallocate the protection domains anymore.
- * For now we have to.
- */
-static void prealloc_protection_domains(void)
-{
- struct pci_dev *dev = NULL;
- struct dma_ops_domain *dma_dom;
- u16 devid;
-
- for_each_pci_dev(dev) {
-
- /* Do we handle this device? */
- if (!check_device(&dev->dev))
- continue;
-
- /* Is there already any domain for it? */
- if (domain_for_device(&dev->dev))
- continue;
-
- devid = get_device_id(&dev->dev);
-
- dma_dom = dma_ops_domain_alloc();
- if (!dma_dom)
- continue;
- init_unity_mappings_for_device(dma_dom, devid);
- dma_dom->target_dev = devid;
-
- attach_device(&dev->dev, &dma_dom->domain);
-
- list_add_tail(&dma_dom->list, &iommu_pd_list);
- }
-}
-
-static struct dma_map_ops amd_iommu_dma_ops = {
- .alloc_coherent = alloc_coherent,
- .free_coherent = free_coherent,
- .map_page = map_page,
- .unmap_page = unmap_page,
- .map_sg = map_sg,
- .unmap_sg = unmap_sg,
- .dma_supported = amd_iommu_dma_supported,
-};
-
-static unsigned device_dma_ops_init(void)
-{
- struct pci_dev *pdev = NULL;
- unsigned unhandled = 0;
-
- for_each_pci_dev(pdev) {
- if (!check_device(&pdev->dev)) {
- unhandled += 1;
- continue;
- }
-
- pdev->dev.archdata.dma_ops = &amd_iommu_dma_ops;
- }
-
- return unhandled;
-}
-
-/*
- * The function which clues the AMD IOMMU driver into dma_ops.
- */
-
-void __init amd_iommu_init_api(void)
-{
- register_iommu(&amd_iommu_ops);
-}
-
-int __init amd_iommu_init_dma_ops(void)
-{
- struct amd_iommu *iommu;
- int ret, unhandled;
-
- /*
- * first allocate a default protection domain for every IOMMU we
- * found in the system. Devices not assigned to any other
- * protection domain will be assigned to the default one.
- */
- for_each_iommu(iommu) {
- iommu->default_dom = dma_ops_domain_alloc();
- if (iommu->default_dom == NULL)
- return -ENOMEM;
- iommu->default_dom->domain.flags |= PD_DEFAULT_MASK;
- ret = iommu_init_unity_mappings(iommu);
- if (ret)
- goto free_domains;
- }
-
- /*
- * Pre-allocate the protection domains for each device.
- */
- prealloc_protection_domains();
-
- iommu_detected = 1;
- swiotlb = 0;
-
- /* Make the driver finally visible to the drivers */
- unhandled = device_dma_ops_init();
- if (unhandled && max_pfn > MAX_DMA32_PFN) {
- /* There are unhandled devices - initialize swiotlb for them */
- swiotlb = 1;
- }
-
- amd_iommu_stats_init();
-
- return 0;
-
-free_domains:
-
- for_each_iommu(iommu) {
- if (iommu->default_dom)
- dma_ops_domain_free(iommu->default_dom);
- }
-
- return ret;
-}
-
-/*****************************************************************************
- *
- * The following functions belong to the exported interface of AMD IOMMU
- *
- * This interface allows access to lower level functions of the IOMMU
- * like protection domain handling and assignement of devices to domains
- * which is not possible with the dma_ops interface.
- *
- *****************************************************************************/
-
-static void cleanup_domain(struct protection_domain *domain)
-{
- struct iommu_dev_data *dev_data, *next;
- unsigned long flags;
-
- write_lock_irqsave(&amd_iommu_devtable_lock, flags);
-
- list_for_each_entry_safe(dev_data, next, &domain->dev_list, list) {
- struct device *dev = dev_data->dev;
-
- __detach_device(dev);
- atomic_set(&dev_data->bind, 0);
- }
-
- write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
-}
-
-static void protection_domain_free(struct protection_domain *domain)
-{
- if (!domain)
- return;
-
- del_domain_from_list(domain);
-
- if (domain->id)
- domain_id_free(domain->id);
-
- kfree(domain);
-}
-
-static struct protection_domain *protection_domain_alloc(void)
-{
- struct protection_domain *domain;
-
- domain = kzalloc(sizeof(*domain), GFP_KERNEL);
- if (!domain)
- return NULL;
-
- spin_lock_init(&domain->lock);
- mutex_init(&domain->api_lock);
- domain->id = domain_id_alloc();
- if (!domain->id)
- goto out_err;
- INIT_LIST_HEAD(&domain->dev_list);
-
- add_domain_to_list(domain);
-
- return domain;
-
-out_err:
- kfree(domain);
-
- return NULL;
-}
-
-static int amd_iommu_domain_init(struct iommu_domain *dom)
-{
- struct protection_domain *domain;
-
- domain = protection_domain_alloc();
- if (!domain)
- goto out_free;
-
- domain->mode = PAGE_MODE_3_LEVEL;
- domain->pt_root = (void *)get_zeroed_page(GFP_KERNEL);
- if (!domain->pt_root)
- goto out_free;
-
- dom->priv = domain;
-
- return 0;
-
-out_free:
- protection_domain_free(domain);
-
- return -ENOMEM;
-}
-
-static void amd_iommu_domain_destroy(struct iommu_domain *dom)
-{
- struct protection_domain *domain = dom->priv;
-
- if (!domain)
- return;
-
- if (domain->dev_cnt > 0)
- cleanup_domain(domain);
-
- BUG_ON(domain->dev_cnt != 0);
-
- free_pagetable(domain);
-
- protection_domain_free(domain);
-
- dom->priv = NULL;
-}
-
-static void amd_iommu_detach_device(struct iommu_domain *dom,
- struct device *dev)
-{
- struct iommu_dev_data *dev_data = dev->archdata.iommu;
- struct amd_iommu *iommu;
- u16 devid;
-
- if (!check_device(dev))
- return;
-
- devid = get_device_id(dev);
-
- if (dev_data->domain != NULL)
- detach_device(dev);
-
- iommu = amd_iommu_rlookup_table[devid];
- if (!iommu)
- return;
-
- device_flush_dte(dev);
- iommu_completion_wait(iommu);
-}
-
-static int amd_iommu_attach_device(struct iommu_domain *dom,
- struct device *dev)
-{
- struct protection_domain *domain = dom->priv;
- struct iommu_dev_data *dev_data;
- struct amd_iommu *iommu;
- int ret;
- u16 devid;
-
- if (!check_device(dev))
- return -EINVAL;
-
- dev_data = dev->archdata.iommu;
-
- devid = get_device_id(dev);
-
- iommu = amd_iommu_rlookup_table[devid];
- if (!iommu)
- return -EINVAL;
-
- if (dev_data->domain)
- detach_device(dev);
-
- ret = attach_device(dev, domain);
-
- iommu_completion_wait(iommu);
-
- return ret;
-}
-
-static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova,
- phys_addr_t paddr, int gfp_order, int iommu_prot)
-{
- unsigned long page_size = 0x1000UL << gfp_order;
- struct protection_domain *domain = dom->priv;
- int prot = 0;
- int ret;
-
- if (iommu_prot & IOMMU_READ)
- prot |= IOMMU_PROT_IR;
- if (iommu_prot & IOMMU_WRITE)
- prot |= IOMMU_PROT_IW;
-
- mutex_lock(&domain->api_lock);
- ret = iommu_map_page(domain, iova, paddr, prot, page_size);
- mutex_unlock(&domain->api_lock);
-
- return ret;
-}
-
-static int amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova,
- int gfp_order)
-{
- struct protection_domain *domain = dom->priv;
- unsigned long page_size, unmap_size;
-
- page_size = 0x1000UL << gfp_order;
-
- mutex_lock(&domain->api_lock);
- unmap_size = iommu_unmap_page(domain, iova, page_size);
- mutex_unlock(&domain->api_lock);
-
- domain_flush_tlb_pde(domain);
-
- return get_order(unmap_size);
-}
-
-static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom,
- unsigned long iova)
-{
- struct protection_domain *domain = dom->priv;
- unsigned long offset_mask;
- phys_addr_t paddr;
- u64 *pte, __pte;
-
- pte = fetch_pte(domain, iova);
-
- if (!pte || !IOMMU_PTE_PRESENT(*pte))
- return 0;
-
- if (PM_PTE_LEVEL(*pte) == 0)
- offset_mask = PAGE_SIZE - 1;
- else
- offset_mask = PTE_PAGE_SIZE(*pte) - 1;
-
- __pte = *pte & PM_ADDR_MASK;
- paddr = (__pte & ~offset_mask) | (iova & offset_mask);
-
- return paddr;
-}
-
-static int amd_iommu_domain_has_cap(struct iommu_domain *domain,
- unsigned long cap)
-{
- switch (cap) {
- case IOMMU_CAP_CACHE_COHERENCY:
- return 1;
- }
-
- return 0;
-}
-
-static struct iommu_ops amd_iommu_ops = {
- .domain_init = amd_iommu_domain_init,
- .domain_destroy = amd_iommu_domain_destroy,
- .attach_dev = amd_iommu_attach_device,
- .detach_dev = amd_iommu_detach_device,
- .map = amd_iommu_map,
- .unmap = amd_iommu_unmap,
- .iova_to_phys = amd_iommu_iova_to_phys,
- .domain_has_cap = amd_iommu_domain_has_cap,
-};
-
-/*****************************************************************************
- *
- * The next functions do a basic initialization of IOMMU for pass through
- * mode
- *
- * In passthrough mode the IOMMU is initialized and enabled but not used for
- * DMA-API translation.
- *
- *****************************************************************************/
-
-int __init amd_iommu_init_passthrough(void)
-{
- struct amd_iommu *iommu;
- struct pci_dev *dev = NULL;
- u16 devid;
-
- /* allocate passthrough domain */
- pt_domain = protection_domain_alloc();
- if (!pt_domain)
- return -ENOMEM;
-
- pt_domain->mode |= PAGE_MODE_NONE;
-
- for_each_pci_dev(dev) {
- if (!check_device(&dev->dev))
- continue;
-
- devid = get_device_id(&dev->dev);
-
- iommu = amd_iommu_rlookup_table[devid];
- if (!iommu)
- continue;
-
- attach_device(&dev->dev, pt_domain);
- }
-
- pr_info("AMD-Vi: Initialized for Passthrough Mode\n");
-
- return 0;
-}
diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c
deleted file mode 100644
index bfc8453bd98d..000000000000
--- a/arch/x86/kernel/amd_iommu_init.c
+++ /dev/null
@@ -1,1572 +0,0 @@
-/*
- * Copyright (C) 2007-2010 Advanced Micro Devices, Inc.
- * Author: Joerg Roedel <joerg.roedel@amd.com>
- * Leo Duran <leo.duran@amd.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#include <linux/pci.h>
-#include <linux/acpi.h>
-#include <linux/list.h>
-#include <linux/slab.h>
-#include <linux/syscore_ops.h>
-#include <linux/interrupt.h>
-#include <linux/msi.h>
-#include <asm/pci-direct.h>
-#include <asm/amd_iommu_proto.h>
-#include <asm/amd_iommu_types.h>
-#include <asm/amd_iommu.h>
-#include <asm/iommu.h>
-#include <asm/gart.h>
-#include <asm/x86_init.h>
-#include <asm/iommu_table.h>
-/*
- * definitions for the ACPI scanning code
- */
-#define IVRS_HEADER_LENGTH 48
-
-#define ACPI_IVHD_TYPE 0x10
-#define ACPI_IVMD_TYPE_ALL 0x20
-#define ACPI_IVMD_TYPE 0x21
-#define ACPI_IVMD_TYPE_RANGE 0x22
-
-#define IVHD_DEV_ALL 0x01
-#define IVHD_DEV_SELECT 0x02
-#define IVHD_DEV_SELECT_RANGE_START 0x03
-#define IVHD_DEV_RANGE_END 0x04
-#define IVHD_DEV_ALIAS 0x42
-#define IVHD_DEV_ALIAS_RANGE 0x43
-#define IVHD_DEV_EXT_SELECT 0x46
-#define IVHD_DEV_EXT_SELECT_RANGE 0x47
-
-#define IVHD_FLAG_HT_TUN_EN_MASK 0x01
-#define IVHD_FLAG_PASSPW_EN_MASK 0x02
-#define IVHD_FLAG_RESPASSPW_EN_MASK 0x04
-#define IVHD_FLAG_ISOC_EN_MASK 0x08
-
-#define IVMD_FLAG_EXCL_RANGE 0x08
-#define IVMD_FLAG_UNITY_MAP 0x01
-
-#define ACPI_DEVFLAG_INITPASS 0x01
-#define ACPI_DEVFLAG_EXTINT 0x02
-#define ACPI_DEVFLAG_NMI 0x04
-#define ACPI_DEVFLAG_SYSMGT1 0x10
-#define ACPI_DEVFLAG_SYSMGT2 0x20
-#define ACPI_DEVFLAG_LINT0 0x40
-#define ACPI_DEVFLAG_LINT1 0x80
-#define ACPI_DEVFLAG_ATSDIS 0x10000000
-
-/*
- * ACPI table definitions
- *
- * These data structures are laid over the table to parse the important values
- * out of it.
- */
-
-/*
- * structure describing one IOMMU in the ACPI table. Typically followed by one
- * or more ivhd_entrys.
- */
-struct ivhd_header {
- u8 type;
- u8 flags;
- u16 length;
- u16 devid;
- u16 cap_ptr;
- u64 mmio_phys;
- u16 pci_seg;
- u16 info;
- u32 reserved;
-} __attribute__((packed));
-
-/*
- * A device entry describing which devices a specific IOMMU translates and
- * which requestor ids they use.
- */
-struct ivhd_entry {
- u8 type;
- u16 devid;
- u8 flags;
- u32 ext;
-} __attribute__((packed));
-
-/*
- * An AMD IOMMU memory definition structure. It defines things like exclusion
- * ranges for devices and regions that should be unity mapped.
- */
-struct ivmd_header {
- u8 type;
- u8 flags;
- u16 length;
- u16 devid;
- u16 aux;
- u64 resv;
- u64 range_start;
- u64 range_length;
-} __attribute__((packed));
-
-bool amd_iommu_dump;
-
-static int __initdata amd_iommu_detected;
-static bool __initdata amd_iommu_disabled;
-
-u16 amd_iommu_last_bdf; /* largest PCI device id we have
- to handle */
-LIST_HEAD(amd_iommu_unity_map); /* a list of required unity mappings
- we find in ACPI */
-bool amd_iommu_unmap_flush; /* if true, flush on every unmap */
-
-LIST_HEAD(amd_iommu_list); /* list of all AMD IOMMUs in the
- system */
-
-/* Array to assign indices to IOMMUs*/
-struct amd_iommu *amd_iommus[MAX_IOMMUS];
-int amd_iommus_present;
-
-/* IOMMUs have a non-present cache? */
-bool amd_iommu_np_cache __read_mostly;
-bool amd_iommu_iotlb_sup __read_mostly = true;
-
-/*
- * The ACPI table parsing functions set this variable on an error
- */
-static int __initdata amd_iommu_init_err;
-
-/*
- * List of protection domains - used during resume
- */
-LIST_HEAD(amd_iommu_pd_list);
-spinlock_t amd_iommu_pd_lock;
-
-/*
- * Pointer to the device table which is shared by all AMD IOMMUs
- * it is indexed by the PCI device id or the HT unit id and contains
- * information about the domain the device belongs to as well as the
- * page table root pointer.
- */
-struct dev_table_entry *amd_iommu_dev_table;
-
-/*
- * The alias table is a driver specific data structure which contains the
- * mappings of the PCI device ids to the actual requestor ids on the IOMMU.
- * More than one device can share the same requestor id.
- */
-u16 *amd_iommu_alias_table;
-
-/*
- * The rlookup table is used to find the IOMMU which is responsible
- * for a specific device. It is also indexed by the PCI device id.
- */
-struct amd_iommu **amd_iommu_rlookup_table;
-
-/*
- * AMD IOMMU allows up to 2^16 differend protection domains. This is a bitmap
- * to know which ones are already in use.
- */
-unsigned long *amd_iommu_pd_alloc_bitmap;
-
-static u32 dev_table_size; /* size of the device table */
-static u32 alias_table_size; /* size of the alias table */
-static u32 rlookup_table_size; /* size if the rlookup table */
-
-/*
- * This function flushes all internal caches of
- * the IOMMU used by this driver.
- */
-extern void iommu_flush_all_caches(struct amd_iommu *iommu);
-
-static inline void update_last_devid(u16 devid)
-{
- if (devid > amd_iommu_last_bdf)
- amd_iommu_last_bdf = devid;
-}
-
-static inline unsigned long tbl_size(int entry_size)
-{
- unsigned shift = PAGE_SHIFT +
- get_order(((int)amd_iommu_last_bdf + 1) * entry_size);
-
- return 1UL << shift;
-}
-
-/* Access to l1 and l2 indexed register spaces */
-
-static u32 iommu_read_l1(struct amd_iommu *iommu, u16 l1, u8 address)
-{
- u32 val;
-
- pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16));
- pci_read_config_dword(iommu->dev, 0xfc, &val);
- return val;
-}
-
-static void iommu_write_l1(struct amd_iommu *iommu, u16 l1, u8 address, u32 val)
-{
- pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16 | 1 << 31));
- pci_write_config_dword(iommu->dev, 0xfc, val);
- pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16));
-}
-
-static u32 iommu_read_l2(struct amd_iommu *iommu, u8 address)
-{
- u32 val;
-
- pci_write_config_dword(iommu->dev, 0xf0, address);
- pci_read_config_dword(iommu->dev, 0xf4, &val);
- return val;
-}
-
-static void iommu_write_l2(struct amd_iommu *iommu, u8 address, u32 val)
-{
- pci_write_config_dword(iommu->dev, 0xf0, (address | 1 << 8));
- pci_write_config_dword(iommu->dev, 0xf4, val);
-}
-
-/****************************************************************************
- *
- * AMD IOMMU MMIO register space handling functions
- *
- * These functions are used to program the IOMMU device registers in
- * MMIO space required for that driver.
- *
- ****************************************************************************/
-
-/*
- * This function set the exclusion range in the IOMMU. DMA accesses to the
- * exclusion range are passed through untranslated
- */
-static void iommu_set_exclusion_range(struct amd_iommu *iommu)
-{
- u64 start = iommu->exclusion_start & PAGE_MASK;
- u64 limit = (start + iommu->exclusion_length) & PAGE_MASK;
- u64 entry;
-
- if (!iommu->exclusion_start)
- return;
-
- entry = start | MMIO_EXCL_ENABLE_MASK;
- memcpy_toio(iommu->mmio_base + MMIO_EXCL_BASE_OFFSET,
- &entry, sizeof(entry));
-
- entry = limit;
- memcpy_toio(iommu->mmio_base + MMIO_EXCL_LIMIT_OFFSET,
- &entry, sizeof(entry));
-}
-
-/* Programs the physical address of the device table into the IOMMU hardware */
-static void __init iommu_set_device_table(struct amd_iommu *iommu)
-{
- u64 entry;
-
- BUG_ON(iommu->mmio_base == NULL);
-
- entry = virt_to_phys(amd_iommu_dev_table);
- entry |= (dev_table_size >> 12) - 1;
- memcpy_toio(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET,
- &entry, sizeof(entry));
-}
-
-/* Generic functions to enable/disable certain features of the IOMMU. */
-static void iommu_feature_enable(struct amd_iommu *iommu, u8 bit)
-{
- u32 ctrl;
-
- ctrl = readl(iommu->mmio_base + MMIO_CONTROL_OFFSET);
- ctrl |= (1 << bit);
- writel(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
-}
-
-static void iommu_feature_disable(struct amd_iommu *iommu, u8 bit)
-{
- u32 ctrl;
-
- ctrl = readl(iommu->mmio_base + MMIO_CONTROL_OFFSET);
- ctrl &= ~(1 << bit);
- writel(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
-}
-
-/* Function to enable the hardware */
-static void iommu_enable(struct amd_iommu *iommu)
-{
- static const char * const feat_str[] = {
- "PreF", "PPR", "X2APIC", "NX", "GT", "[5]",
- "IA", "GA", "HE", "PC", NULL
- };
- int i;
-
- printk(KERN_INFO "AMD-Vi: Enabling IOMMU at %s cap 0x%hx",
- dev_name(&iommu->dev->dev), iommu->cap_ptr);
-
- if (iommu->cap & (1 << IOMMU_CAP_EFR)) {
- printk(KERN_CONT " extended features: ");
- for (i = 0; feat_str[i]; ++i)
- if (iommu_feature(iommu, (1ULL << i)))
- printk(KERN_CONT " %s", feat_str[i]);
- }
- printk(KERN_CONT "\n");
-
- iommu_feature_enable(iommu, CONTROL_IOMMU_EN);
-}
-
-static void iommu_disable(struct amd_iommu *iommu)
-{
- /* Disable command buffer */
- iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
-
- /* Disable event logging and event interrupts */
- iommu_feature_disable(iommu, CONTROL_EVT_INT_EN);
- iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN);
-
- /* Disable IOMMU hardware itself */
- iommu_feature_disable(iommu, CONTROL_IOMMU_EN);
-}
-
-/*
- * mapping and unmapping functions for the IOMMU MMIO space. Each AMD IOMMU in
- * the system has one.
- */
-static u8 * __init iommu_map_mmio_space(u64 address)
-{
- u8 *ret;
-
- if (!request_mem_region(address, MMIO_REGION_LENGTH, "amd_iommu")) {
- pr_err("AMD-Vi: Can not reserve memory region %llx for mmio\n",
- address);
- pr_err("AMD-Vi: This is a BIOS bug. Please contact your hardware vendor\n");
- return NULL;
- }
-
- ret = ioremap_nocache(address, MMIO_REGION_LENGTH);
- if (ret != NULL)
- return ret;
-
- release_mem_region(address, MMIO_REGION_LENGTH);
-
- return NULL;
-}
-
-static void __init iommu_unmap_mmio_space(struct amd_iommu *iommu)
-{
- if (iommu->mmio_base)
- iounmap(iommu->mmio_base);
- release_mem_region(iommu->mmio_phys, MMIO_REGION_LENGTH);
-}
-
-/****************************************************************************
- *
- * The functions below belong to the first pass of AMD IOMMU ACPI table
- * parsing. In this pass we try to find out the highest device id this
- * code has to handle. Upon this information the size of the shared data
- * structures is determined later.
- *
- ****************************************************************************/
-
-/*
- * This function calculates the length of a given IVHD entry
- */
-static inline int ivhd_entry_length(u8 *ivhd)
-{
- return 0x04 << (*ivhd >> 6);
-}
-
-/*
- * This function reads the last device id the IOMMU has to handle from the PCI
- * capability header for this IOMMU
- */
-static int __init find_last_devid_on_pci(int bus, int dev, int fn, int cap_ptr)
-{
- u32 cap;
-
- cap = read_pci_config(bus, dev, fn, cap_ptr+MMIO_RANGE_OFFSET);
- update_last_devid(calc_devid(MMIO_GET_BUS(cap), MMIO_GET_LD(cap)));
-
- return 0;
-}
-
-/*
- * After reading the highest device id from the IOMMU PCI capability header
- * this function looks if there is a higher device id defined in the ACPI table
- */
-static int __init find_last_devid_from_ivhd(struct ivhd_header *h)
-{
- u8 *p = (void *)h, *end = (void *)h;
- struct ivhd_entry *dev;
-
- p += sizeof(*h);
- end += h->length;
-
- find_last_devid_on_pci(PCI_BUS(h->devid),
- PCI_SLOT(h->devid),
- PCI_FUNC(h->devid),
- h->cap_ptr);
-
- while (p < end) {
- dev = (struct ivhd_entry *)p;
- switch (dev->type) {
- case IVHD_DEV_SELECT:
- case IVHD_DEV_RANGE_END:
- case IVHD_DEV_ALIAS:
- case IVHD_DEV_EXT_SELECT:
- /* all the above subfield types refer to device ids */
- update_last_devid(dev->devid);
- break;
- default:
- break;
- }
- p += ivhd_entry_length(p);
- }
-
- WARN_ON(p != end);
-
- return 0;
-}
-
-/*
- * Iterate over all IVHD entries in the ACPI table and find the highest device
- * id which we need to handle. This is the first of three functions which parse
- * the ACPI table. So we check the checksum here.
- */
-static int __init find_last_devid_acpi(struct acpi_table_header *table)
-{
- int i;
- u8 checksum = 0, *p = (u8 *)table, *end = (u8 *)table;
- struct ivhd_header *h;
-
- /*
- * Validate checksum here so we don't need to do it when
- * we actually parse the table
- */
- for (i = 0; i < table->length; ++i)
- checksum += p[i];
- if (checksum != 0) {
- /* ACPI table corrupt */
- amd_iommu_init_err = -ENODEV;
- return 0;
- }
-
- p += IVRS_HEADER_LENGTH;
-
- end += table->length;
- while (p < end) {
- h = (struct ivhd_header *)p;
- switch (h->type) {
- case ACPI_IVHD_TYPE:
- find_last_devid_from_ivhd(h);
- break;
- default:
- break;
- }
- p += h->length;
- }
- WARN_ON(p != end);
-
- return 0;
-}
-
-/****************************************************************************
- *
- * The following functions belong the the code path which parses the ACPI table
- * the second time. In this ACPI parsing iteration we allocate IOMMU specific
- * data structures, initialize the device/alias/rlookup table and also
- * basically initialize the hardware.
- *
- ****************************************************************************/
-
-/*
- * Allocates the command buffer. This buffer is per AMD IOMMU. We can
- * write commands to that buffer later and the IOMMU will execute them
- * asynchronously
- */
-static u8 * __init alloc_command_buffer(struct amd_iommu *iommu)
-{
- u8 *cmd_buf = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
- get_order(CMD_BUFFER_SIZE));
-
- if (cmd_buf == NULL)
- return NULL;
-
- iommu->cmd_buf_size = CMD_BUFFER_SIZE | CMD_BUFFER_UNINITIALIZED;
-
- return cmd_buf;
-}
-
-/*
- * This function resets the command buffer if the IOMMU stopped fetching
- * commands from it.
- */
-void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu)
-{
- iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
-
- writel(0x00, iommu->mmio_base + MMIO_CMD_HEAD_OFFSET);
- writel(0x00, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
-
- iommu_feature_enable(iommu, CONTROL_CMDBUF_EN);
-}
-
-/*
- * This function writes the command buffer address to the hardware and
- * enables it.
- */
-static void iommu_enable_command_buffer(struct amd_iommu *iommu)
-{
- u64 entry;
-
- BUG_ON(iommu->cmd_buf == NULL);
-
- entry = (u64)virt_to_phys(iommu->cmd_buf);
- entry |= MMIO_CMD_SIZE_512;
-
- memcpy_toio(iommu->mmio_base + MMIO_CMD_BUF_OFFSET,
- &entry, sizeof(entry));
-
- amd_iommu_reset_cmd_buffer(iommu);
- iommu->cmd_buf_size &= ~(CMD_BUFFER_UNINITIALIZED);
-}
-
-static void __init free_command_buffer(struct amd_iommu *iommu)
-{
- free_pages((unsigned long)iommu->cmd_buf,
- get_order(iommu->cmd_buf_size & ~(CMD_BUFFER_UNINITIALIZED)));
-}
-
-/* allocates the memory where the IOMMU will log its events to */
-static u8 * __init alloc_event_buffer(struct amd_iommu *iommu)
-{
- iommu->evt_buf = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
- get_order(EVT_BUFFER_SIZE));
-
- if (iommu->evt_buf == NULL)
- return NULL;
-
- iommu->evt_buf_size = EVT_BUFFER_SIZE;
-
- return iommu->evt_buf;
-}
-
-static void iommu_enable_event_buffer(struct amd_iommu *iommu)
-{
- u64 entry;
-
- BUG_ON(iommu->evt_buf == NULL);
-
- entry = (u64)virt_to_phys(iommu->evt_buf) | EVT_LEN_MASK;
-
- memcpy_toio(iommu->mmio_base + MMIO_EVT_BUF_OFFSET,
- &entry, sizeof(entry));
-
- /* set head and tail to zero manually */
- writel(0x00, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
- writel(0x00, iommu->mmio_base + MMIO_EVT_TAIL_OFFSET);
-
- iommu_feature_enable(iommu, CONTROL_EVT_LOG_EN);
-}
-
-static void __init free_event_buffer(struct amd_iommu *iommu)
-{
- free_pages((unsigned long)iommu->evt_buf, get_order(EVT_BUFFER_SIZE));
-}
-
-/* sets a specific bit in the device table entry. */
-static void set_dev_entry_bit(u16 devid, u8 bit)
-{
- int i = (bit >> 5) & 0x07;
- int _bit = bit & 0x1f;
-
- amd_iommu_dev_table[devid].data[i] |= (1 << _bit);
-}
-
-static int get_dev_entry_bit(u16 devid, u8 bit)
-{
- int i = (bit >> 5) & 0x07;
- int _bit = bit & 0x1f;
-
- return (amd_iommu_dev_table[devid].data[i] & (1 << _bit)) >> _bit;
-}
-
-
-void amd_iommu_apply_erratum_63(u16 devid)
-{
- int sysmgt;
-
- sysmgt = get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1) |
- (get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2) << 1);
-
- if (sysmgt == 0x01)
- set_dev_entry_bit(devid, DEV_ENTRY_IW);
-}
-
-/* Writes the specific IOMMU for a device into the rlookup table */
-static void __init set_iommu_for_device(struct amd_iommu *iommu, u16 devid)
-{
- amd_iommu_rlookup_table[devid] = iommu;
-}
-
-/*
- * This function takes the device specific flags read from the ACPI
- * table and sets up the device table entry with that information
- */
-static void __init set_dev_entry_from_acpi(struct amd_iommu *iommu,
- u16 devid, u32 flags, u32 ext_flags)
-{
- if (flags & ACPI_DEVFLAG_INITPASS)
- set_dev_entry_bit(devid, DEV_ENTRY_INIT_PASS);
- if (flags & ACPI_DEVFLAG_EXTINT)
- set_dev_entry_bit(devid, DEV_ENTRY_EINT_PASS);
- if (flags & ACPI_DEVFLAG_NMI)
- set_dev_entry_bit(devid, DEV_ENTRY_NMI_PASS);
- if (flags & ACPI_DEVFLAG_SYSMGT1)
- set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1);
- if (flags & ACPI_DEVFLAG_SYSMGT2)
- set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2);
- if (flags & ACPI_DEVFLAG_LINT0)
- set_dev_entry_bit(devid, DEV_ENTRY_LINT0_PASS);
- if (flags & ACPI_DEVFLAG_LINT1)
- set_dev_entry_bit(devid, DEV_ENTRY_LINT1_PASS);
-
- amd_iommu_apply_erratum_63(devid);
-
- set_iommu_for_device(iommu, devid);
-}
-
-/*
- * Reads the device exclusion range from ACPI and initialize IOMMU with
- * it
- */
-static void __init set_device_exclusion_range(u16 devid, struct ivmd_header *m)
-{
- struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
-
- if (!(m->flags & IVMD_FLAG_EXCL_RANGE))
- return;
-
- if (iommu) {
- /*
- * We only can configure exclusion ranges per IOMMU, not
- * per device. But we can enable the exclusion range per
- * device. This is done here
- */
- set_dev_entry_bit(m->devid, DEV_ENTRY_EX);
- iommu->exclusion_start = m->range_start;
- iommu->exclusion_length = m->range_length;
- }
-}
-
-/*
- * This function reads some important data from the IOMMU PCI space and
- * initializes the driver data structure with it. It reads the hardware
- * capabilities and the first/last device entries
- */
-static void __init init_iommu_from_pci(struct amd_iommu *iommu)
-{
- int cap_ptr = iommu->cap_ptr;
- u32 range, misc, low, high;
- int i, j;
-
- pci_read_config_dword(iommu->dev, cap_ptr + MMIO_CAP_HDR_OFFSET,
- &iommu->cap);
- pci_read_config_dword(iommu->dev, cap_ptr + MMIO_RANGE_OFFSET,
- &range);
- pci_read_config_dword(iommu->dev, cap_ptr + MMIO_MISC_OFFSET,
- &misc);
-
- iommu->first_device = calc_devid(MMIO_GET_BUS(range),
- MMIO_GET_FD(range));
- iommu->last_device = calc_devid(MMIO_GET_BUS(range),
- MMIO_GET_LD(range));
- iommu->evt_msi_num = MMIO_MSI_NUM(misc);
-
- if (!(iommu->cap & (1 << IOMMU_CAP_IOTLB)))
- amd_iommu_iotlb_sup = false;
-
- /* read extended feature bits */
- low = readl(iommu->mmio_base + MMIO_EXT_FEATURES);
- high = readl(iommu->mmio_base + MMIO_EXT_FEATURES + 4);
-
- iommu->features = ((u64)high << 32) | low;
-
- if (!is_rd890_iommu(iommu->dev))
- return;
-
- /*
- * Some rd890 systems may not be fully reconfigured by the BIOS, so
- * it's necessary for us to store this information so it can be
- * reprogrammed on resume
- */
-
- pci_read_config_dword(iommu->dev, iommu->cap_ptr + 4,
- &iommu->stored_addr_lo);
- pci_read_config_dword(iommu->dev, iommu->cap_ptr + 8,
- &iommu->stored_addr_hi);
-
- /* Low bit locks writes to configuration space */
- iommu->stored_addr_lo &= ~1;
-
- for (i = 0; i < 6; i++)
- for (j = 0; j < 0x12; j++)
- iommu->stored_l1[i][j] = iommu_read_l1(iommu, i, j);
-
- for (i = 0; i < 0x83; i++)
- iommu->stored_l2[i] = iommu_read_l2(iommu, i);
-}
-
-/*
- * Takes a pointer to an AMD IOMMU entry in the ACPI table and
- * initializes the hardware and our data structures with it.
- */
-static void __init init_iommu_from_acpi(struct amd_iommu *iommu,
- struct ivhd_header *h)
-{
- u8 *p = (u8 *)h;
- u8 *end = p, flags = 0;
- u16 devid = 0, devid_start = 0, devid_to = 0;
- u32 dev_i, ext_flags = 0;
- bool alias = false;
- struct ivhd_entry *e;
-
- /*
- * First save the recommended feature enable bits from ACPI
- */
- iommu->acpi_flags = h->flags;
-
- /*
- * Done. Now parse the device entries
- */
- p += sizeof(struct ivhd_header);
- end += h->length;
-
-
- while (p < end) {
- e = (struct ivhd_entry *)p;
- switch (e->type) {
- case IVHD_DEV_ALL:
-
- DUMP_printk(" DEV_ALL\t\t\t first devid: %02x:%02x.%x"
- " last device %02x:%02x.%x flags: %02x\n",
- PCI_BUS(iommu->first_device),
- PCI_SLOT(iommu->first_device),
- PCI_FUNC(iommu->first_device),
- PCI_BUS(iommu->last_device),
- PCI_SLOT(iommu->last_device),
- PCI_FUNC(iommu->last_device),
- e->flags);
-
- for (dev_i = iommu->first_device;
- dev_i <= iommu->last_device; ++dev_i)
- set_dev_entry_from_acpi(iommu, dev_i,
- e->flags, 0);
- break;
- case IVHD_DEV_SELECT:
-
- DUMP_printk(" DEV_SELECT\t\t\t devid: %02x:%02x.%x "
- "flags: %02x\n",
- PCI_BUS(e->devid),
- PCI_SLOT(e->devid),
- PCI_FUNC(e->devid),
- e->flags);
-
- devid = e->devid;
- set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
- break;
- case IVHD_DEV_SELECT_RANGE_START:
-
- DUMP_printk(" DEV_SELECT_RANGE_START\t "
- "devid: %02x:%02x.%x flags: %02x\n",
- PCI_BUS(e->devid),
- PCI_SLOT(e->devid),
- PCI_FUNC(e->devid),
- e->flags);
-
- devid_start = e->devid;
- flags = e->flags;
- ext_flags = 0;
- alias = false;
- break;
- case IVHD_DEV_ALIAS:
-
- DUMP_printk(" DEV_ALIAS\t\t\t devid: %02x:%02x.%x "
- "flags: %02x devid_to: %02x:%02x.%x\n",
- PCI_BUS(e->devid),
- PCI_SLOT(e->devid),
- PCI_FUNC(e->devid),
- e->flags,
- PCI_BUS(e->ext >> 8),
- PCI_SLOT(e->ext >> 8),
- PCI_FUNC(e->ext >> 8));
-
- devid = e->devid;
- devid_to = e->ext >> 8;
- set_dev_entry_from_acpi(iommu, devid , e->flags, 0);
- set_dev_entry_from_acpi(iommu, devid_to, e->flags, 0);
- amd_iommu_alias_table[devid] = devid_to;
- break;
- case IVHD_DEV_ALIAS_RANGE:
-
- DUMP_printk(" DEV_ALIAS_RANGE\t\t "
- "devid: %02x:%02x.%x flags: %02x "
- "devid_to: %02x:%02x.%x\n",
- PCI_BUS(e->devid),
- PCI_SLOT(e->devid),
- PCI_FUNC(e->devid),
- e->flags,
- PCI_BUS(e->ext >> 8),
- PCI_SLOT(e->ext >> 8),
- PCI_FUNC(e->ext >> 8));
-
- devid_start = e->devid;
- flags = e->flags;
- devid_to = e->ext >> 8;
- ext_flags = 0;
- alias = true;
- break;
- case IVHD_DEV_EXT_SELECT:
-
- DUMP_printk(" DEV_EXT_SELECT\t\t devid: %02x:%02x.%x "
- "flags: %02x ext: %08x\n",
- PCI_BUS(e->devid),
- PCI_SLOT(e->devid),
- PCI_FUNC(e->devid),
- e->flags, e->ext);
-
- devid = e->devid;
- set_dev_entry_from_acpi(iommu, devid, e->flags,
- e->ext);
- break;
- case IVHD_DEV_EXT_SELECT_RANGE:
-
- DUMP_printk(" DEV_EXT_SELECT_RANGE\t devid: "
- "%02x:%02x.%x flags: %02x ext: %08x\n",
- PCI_BUS(e->devid),
- PCI_SLOT(e->devid),
- PCI_FUNC(e->devid),
- e->flags, e->ext);
-
- devid_start = e->devid;
- flags = e->flags;
- ext_flags = e->ext;
- alias = false;
- break;
- case IVHD_DEV_RANGE_END:
-
- DUMP_printk(" DEV_RANGE_END\t\t devid: %02x:%02x.%x\n",
- PCI_BUS(e->devid),
- PCI_SLOT(e->devid),
- PCI_FUNC(e->devid));
-
- devid = e->devid;
- for (dev_i = devid_start; dev_i <= devid; ++dev_i) {
- if (alias) {
- amd_iommu_alias_table[dev_i] = devid_to;
- set_dev_entry_from_acpi(iommu,
- devid_to, flags, ext_flags);
- }
- set_dev_entry_from_acpi(iommu, dev_i,
- flags, ext_flags);
- }
- break;
- default:
- break;
- }
-
- p += ivhd_entry_length(p);
- }
-}
-
-/* Initializes the device->iommu mapping for the driver */
-static int __init init_iommu_devices(struct amd_iommu *iommu)
-{
- u32 i;
-
- for (i = iommu->first_device; i <= iommu->last_device; ++i)
- set_iommu_for_device(iommu, i);
-
- return 0;
-}
-
-static void __init free_iommu_one(struct amd_iommu *iommu)
-{
- free_command_buffer(iommu);
- free_event_buffer(iommu);
- iommu_unmap_mmio_space(iommu);
-}
-
-static void __init free_iommu_all(void)
-{
- struct amd_iommu *iommu, *next;
-
- for_each_iommu_safe(iommu, next) {
- list_del(&iommu->list);
- free_iommu_one(iommu);
- kfree(iommu);
- }
-}
-
-/*
- * This function clues the initialization function for one IOMMU
- * together and also allocates the command buffer and programs the
- * hardware. It does NOT enable the IOMMU. This is done afterwards.
- */
-static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
-{
- spin_lock_init(&iommu->lock);
-
- /* Add IOMMU to internal data structures */
- list_add_tail(&iommu->list, &amd_iommu_list);
- iommu->index = amd_iommus_present++;
-
- if (unlikely(iommu->index >= MAX_IOMMUS)) {
- WARN(1, "AMD-Vi: System has more IOMMUs than supported by this driver\n");
- return -ENOSYS;
- }
-
- /* Index is fine - add IOMMU to the array */
- amd_iommus[iommu->index] = iommu;
-
- /*
- * Copy data from ACPI table entry to the iommu struct
- */
- iommu->dev = pci_get_bus_and_slot(PCI_BUS(h->devid), h->devid & 0xff);
- if (!iommu->dev)
- return 1;
-
- iommu->cap_ptr = h->cap_ptr;
- iommu->pci_seg = h->pci_seg;
- iommu->mmio_phys = h->mmio_phys;
- iommu->mmio_base = iommu_map_mmio_space(h->mmio_phys);
- if (!iommu->mmio_base)
- return -ENOMEM;
-
- iommu->cmd_buf = alloc_command_buffer(iommu);
- if (!iommu->cmd_buf)
- return -ENOMEM;
-
- iommu->evt_buf = alloc_event_buffer(iommu);
- if (!iommu->evt_buf)
- return -ENOMEM;
-
- iommu->int_enabled = false;
-
- init_iommu_from_pci(iommu);
- init_iommu_from_acpi(iommu, h);
- init_iommu_devices(iommu);
-
- if (iommu->cap & (1UL << IOMMU_CAP_NPCACHE))
- amd_iommu_np_cache = true;
-
- return pci_enable_device(iommu->dev);
-}
-
-/*
- * Iterates over all IOMMU entries in the ACPI table, allocates the
- * IOMMU structure and initializes it with init_iommu_one()
- */
-static int __init init_iommu_all(struct acpi_table_header *table)
-{
- u8 *p = (u8 *)table, *end = (u8 *)table;
- struct ivhd_header *h;
- struct amd_iommu *iommu;
- int ret;
-
- end += table->length;
- p += IVRS_HEADER_LENGTH;
-
- while (p < end) {
- h = (struct ivhd_header *)p;
- switch (*p) {
- case ACPI_IVHD_TYPE:
-
- DUMP_printk("device: %02x:%02x.%01x cap: %04x "
- "seg: %d flags: %01x info %04x\n",
- PCI_BUS(h->devid), PCI_SLOT(h->devid),
- PCI_FUNC(h->devid), h->cap_ptr,
- h->pci_seg, h->flags, h->info);
- DUMP_printk(" mmio-addr: %016llx\n",
- h->mmio_phys);
-
- iommu = kzalloc(sizeof(struct amd_iommu), GFP_KERNEL);
- if (iommu == NULL) {
- amd_iommu_init_err = -ENOMEM;
- return 0;
- }
-
- ret = init_iommu_one(iommu, h);
- if (ret) {
- amd_iommu_init_err = ret;
- return 0;
- }
- break;
- default:
- break;
- }
- p += h->length;
-
- }
- WARN_ON(p != end);
-
- return 0;
-}
-
-/****************************************************************************
- *
- * The following functions initialize the MSI interrupts for all IOMMUs
- * in the system. Its a bit challenging because there could be multiple
- * IOMMUs per PCI BDF but we can call pci_enable_msi(x) only once per
- * pci_dev.
- *
- ****************************************************************************/
-
-static int iommu_setup_msi(struct amd_iommu *iommu)
-{
- int r;
-
- if (pci_enable_msi(iommu->dev))
- return 1;
-
- r = request_threaded_irq(iommu->dev->irq,
- amd_iommu_int_handler,
- amd_iommu_int_thread,
- 0, "AMD-Vi",
- iommu->dev);
-
- if (r) {
- pci_disable_msi(iommu->dev);
- return 1;
- }
-
- iommu->int_enabled = true;
- iommu_feature_enable(iommu, CONTROL_EVT_INT_EN);
-
- return 0;
-}
-
-static int iommu_init_msi(struct amd_iommu *iommu)
-{
- if (iommu->int_enabled)
- return 0;
-
- if (pci_find_capability(iommu->dev, PCI_CAP_ID_MSI))
- return iommu_setup_msi(iommu);
-
- return 1;
-}
-
-/****************************************************************************
- *
- * The next functions belong to the third pass of parsing the ACPI
- * table. In this last pass the memory mapping requirements are
- * gathered (like exclusion and unity mapping reanges).
- *
- ****************************************************************************/
-
-static void __init free_unity_maps(void)
-{
- struct unity_map_entry *entry, *next;
-
- list_for_each_entry_safe(entry, next, &amd_iommu_unity_map, list) {
- list_del(&entry->list);
- kfree(entry);
- }
-}
-
-/* called when we find an exclusion range definition in ACPI */
-static int __init init_exclusion_range(struct ivmd_header *m)
-{
- int i;
-
- switch (m->type) {
- case ACPI_IVMD_TYPE:
- set_device_exclusion_range(m->devid, m);
- break;
- case ACPI_IVMD_TYPE_ALL:
- for (i = 0; i <= amd_iommu_last_bdf; ++i)
- set_device_exclusion_range(i, m);
- break;
- case ACPI_IVMD_TYPE_RANGE:
- for (i = m->devid; i <= m->aux; ++i)
- set_device_exclusion_range(i, m);
- break;
- default:
- break;
- }
-
- return 0;
-}
-
-/* called for unity map ACPI definition */
-static int __init init_unity_map_range(struct ivmd_header *m)
-{
- struct unity_map_entry *e = 0;
- char *s;
-
- e = kzalloc(sizeof(*e), GFP_KERNEL);
- if (e == NULL)
- return -ENOMEM;
-
- switch (m->type) {
- default:
- kfree(e);
- return 0;
- case ACPI_IVMD_TYPE:
- s = "IVMD_TYPEi\t\t\t";
- e->devid_start = e->devid_end = m->devid;
- break;
- case ACPI_IVMD_TYPE_ALL:
- s = "IVMD_TYPE_ALL\t\t";
- e->devid_start = 0;
- e->devid_end = amd_iommu_last_bdf;
- break;
- case ACPI_IVMD_TYPE_RANGE:
- s = "IVMD_TYPE_RANGE\t\t";
- e->devid_start = m->devid;
- e->devid_end = m->aux;
- break;
- }
- e->address_start = PAGE_ALIGN(m->range_start);
- e->address_end = e->address_start + PAGE_ALIGN(m->range_length);
- e->prot = m->flags >> 1;
-
- DUMP_printk("%s devid_start: %02x:%02x.%x devid_end: %02x:%02x.%x"
- " range_start: %016llx range_end: %016llx flags: %x\n", s,
- PCI_BUS(e->devid_start), PCI_SLOT(e->devid_start),
- PCI_FUNC(e->devid_start), PCI_BUS(e->devid_end),
- PCI_SLOT(e->devid_end), PCI_FUNC(e->devid_end),
- e->address_start, e->address_end, m->flags);
-
- list_add_tail(&e->list, &amd_iommu_unity_map);
-
- return 0;
-}
-
-/* iterates over all memory definitions we find in the ACPI table */
-static int __init init_memory_definitions(struct acpi_table_header *table)
-{
- u8 *p = (u8 *)table, *end = (u8 *)table;
- struct ivmd_header *m;
-
- end += table->length;
- p += IVRS_HEADER_LENGTH;
-
- while (p < end) {
- m = (struct ivmd_header *)p;
- if (m->flags & IVMD_FLAG_EXCL_RANGE)
- init_exclusion_range(m);
- else if (m->flags & IVMD_FLAG_UNITY_MAP)
- init_unity_map_range(m);
-
- p += m->length;
- }
-
- return 0;
-}
-
-/*
- * Init the device table to not allow DMA access for devices and
- * suppress all page faults
- */
-static void init_device_table(void)
-{
- u32 devid;
-
- for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
- set_dev_entry_bit(devid, DEV_ENTRY_VALID);
- set_dev_entry_bit(devid, DEV_ENTRY_TRANSLATION);
- }
-}
-
-static void iommu_init_flags(struct amd_iommu *iommu)
-{
- iommu->acpi_flags & IVHD_FLAG_HT_TUN_EN_MASK ?
- iommu_feature_enable(iommu, CONTROL_HT_TUN_EN) :
- iommu_feature_disable(iommu, CONTROL_HT_TUN_EN);
-
- iommu->acpi_flags & IVHD_FLAG_PASSPW_EN_MASK ?
- iommu_feature_enable(iommu, CONTROL_PASSPW_EN) :
- iommu_feature_disable(iommu, CONTROL_PASSPW_EN);
-
- iommu->acpi_flags & IVHD_FLAG_RESPASSPW_EN_MASK ?
- iommu_feature_enable(iommu, CONTROL_RESPASSPW_EN) :
- iommu_feature_disable(iommu, CONTROL_RESPASSPW_EN);
-
- iommu->acpi_flags & IVHD_FLAG_ISOC_EN_MASK ?
- iommu_feature_enable(iommu, CONTROL_ISOC_EN) :
- iommu_feature_disable(iommu, CONTROL_ISOC_EN);
-
- /*
- * make IOMMU memory accesses cache coherent
- */
- iommu_feature_enable(iommu, CONTROL_COHERENT_EN);
-}
-
-static void iommu_apply_resume_quirks(struct amd_iommu *iommu)
-{
- int i, j;
- u32 ioc_feature_control;
- struct pci_dev *pdev = NULL;
-
- /* RD890 BIOSes may not have completely reconfigured the iommu */
- if (!is_rd890_iommu(iommu->dev))
- return;
-
- /*
- * First, we need to ensure that the iommu is enabled. This is
- * controlled by a register in the northbridge
- */
- pdev = pci_get_bus_and_slot(iommu->dev->bus->number, PCI_DEVFN(0, 0));
-
- if (!pdev)
- return;
-
- /* Select Northbridge indirect register 0x75 and enable writing */
- pci_write_config_dword(pdev, 0x60, 0x75 | (1 << 7));
- pci_read_config_dword(pdev, 0x64, &ioc_feature_control);
-
- /* Enable the iommu */
- if (!(ioc_feature_control & 0x1))
- pci_write_config_dword(pdev, 0x64, ioc_feature_control | 1);
-
- pci_dev_put(pdev);
-
- /* Restore the iommu BAR */
- pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4,
- iommu->stored_addr_lo);
- pci_write_config_dword(iommu->dev, iommu->cap_ptr + 8,
- iommu->stored_addr_hi);
-
- /* Restore the l1 indirect regs for each of the 6 l1s */
- for (i = 0; i < 6; i++)
- for (j = 0; j < 0x12; j++)
- iommu_write_l1(iommu, i, j, iommu->stored_l1[i][j]);
-
- /* Restore the l2 indirect regs */
- for (i = 0; i < 0x83; i++)
- iommu_write_l2(iommu, i, iommu->stored_l2[i]);
-
- /* Lock PCI setup registers */
- pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4,
- iommu->stored_addr_lo | 1);
-}
-
-/*
- * This function finally enables all IOMMUs found in the system after
- * they have been initialized
- */
-static void enable_iommus(void)
-{
- struct amd_iommu *iommu;
-
- for_each_iommu(iommu) {
- iommu_disable(iommu);
- iommu_init_flags(iommu);
- iommu_set_device_table(iommu);
- iommu_enable_command_buffer(iommu);
- iommu_enable_event_buffer(iommu);
- iommu_set_exclusion_range(iommu);
- iommu_init_msi(iommu);
- iommu_enable(iommu);
- iommu_flush_all_caches(iommu);
- }
-}
-
-static void disable_iommus(void)
-{
- struct amd_iommu *iommu;
-
- for_each_iommu(iommu)
- iommu_disable(iommu);
-}
-
-/*
- * Suspend/Resume support
- * disable suspend until real resume implemented
- */
-
-static void amd_iommu_resume(void)
-{
- struct amd_iommu *iommu;
-
- for_each_iommu(iommu)
- iommu_apply_resume_quirks(iommu);
-
- /* re-load the hardware */
- enable_iommus();
-
- /*
- * we have to flush after the IOMMUs are enabled because a
- * disabled IOMMU will never execute the commands we send
- */
- for_each_iommu(iommu)
- iommu_flush_all_caches(iommu);
-}
-
-static int amd_iommu_suspend(void)
-{
- /* disable IOMMUs to go out of the way for BIOS */
- disable_iommus();
-
- return 0;
-}
-
-static struct syscore_ops amd_iommu_syscore_ops = {
- .suspend = amd_iommu_suspend,
- .resume = amd_iommu_resume,
-};
-
-/*
- * This is the core init function for AMD IOMMU hardware in the system.
- * This function is called from the generic x86 DMA layer initialization
- * code.
- *
- * This function basically parses the ACPI table for AMD IOMMU (IVRS)
- * three times:
- *
- * 1 pass) Find the highest PCI device id the driver has to handle.
- * Upon this information the size of the data structures is
- * determined that needs to be allocated.
- *
- * 2 pass) Initialize the data structures just allocated with the
- * information in the ACPI table about available AMD IOMMUs
- * in the system. It also maps the PCI devices in the
- * system to specific IOMMUs
- *
- * 3 pass) After the basic data structures are allocated and
- * initialized we update them with information about memory
- * remapping requirements parsed out of the ACPI table in
- * this last pass.
- *
- * After that the hardware is initialized and ready to go. In the last
- * step we do some Linux specific things like registering the driver in
- * the dma_ops interface and initializing the suspend/resume support
- * functions. Finally it prints some information about AMD IOMMUs and
- * the driver state and enables the hardware.
- */
-static int __init amd_iommu_init(void)
-{
- int i, ret = 0;
-
- /*
- * First parse ACPI tables to find the largest Bus/Dev/Func
- * we need to handle. Upon this information the shared data
- * structures for the IOMMUs in the system will be allocated
- */
- if (acpi_table_parse("IVRS", find_last_devid_acpi) != 0)
- return -ENODEV;
-
- ret = amd_iommu_init_err;
- if (ret)
- goto out;
-
- dev_table_size = tbl_size(DEV_TABLE_ENTRY_SIZE);
- alias_table_size = tbl_size(ALIAS_TABLE_ENTRY_SIZE);
- rlookup_table_size = tbl_size(RLOOKUP_TABLE_ENTRY_SIZE);
-
- ret = -ENOMEM;
-
- /* Device table - directly used by all IOMMUs */
- amd_iommu_dev_table = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
- get_order(dev_table_size));
- if (amd_iommu_dev_table == NULL)
- goto out;
-
- /*
- * Alias table - map PCI Bus/Dev/Func to Bus/Dev/Func the
- * IOMMU see for that device
- */
- amd_iommu_alias_table = (void *)__get_free_pages(GFP_KERNEL,
- get_order(alias_table_size));
- if (amd_iommu_alias_table == NULL)
- goto free;
-
- /* IOMMU rlookup table - find the IOMMU for a specific device */
- amd_iommu_rlookup_table = (void *)__get_free_pages(
- GFP_KERNEL | __GFP_ZERO,
- get_order(rlookup_table_size));
- if (amd_iommu_rlookup_table == NULL)
- goto free;
-
- amd_iommu_pd_alloc_bitmap = (void *)__get_free_pages(
- GFP_KERNEL | __GFP_ZERO,
- get_order(MAX_DOMAIN_ID/8));
- if (amd_iommu_pd_alloc_bitmap == NULL)
- goto free;
-
- /* init the device table */
- init_device_table();
-
- /*
- * let all alias entries point to itself
- */
- for (i = 0; i <= amd_iommu_last_bdf; ++i)
- amd_iommu_alias_table[i] = i;
-
- /*
- * never allocate domain 0 because its used as the non-allocated and
- * error value placeholder
- */
- amd_iommu_pd_alloc_bitmap[0] = 1;
-
- spin_lock_init(&amd_iommu_pd_lock);
-
- /*
- * now the data structures are allocated and basically initialized
- * start the real acpi table scan
- */
- ret = -ENODEV;
- if (acpi_table_parse("IVRS", init_iommu_all) != 0)
- goto free;
-
- if (amd_iommu_init_err) {
- ret = amd_iommu_init_err;
- goto free;
- }
-
- if (acpi_table_parse("IVRS", init_memory_definitions) != 0)
- goto free;
-
- if (amd_iommu_init_err) {
- ret = amd_iommu_init_err;
- goto free;
- }
-
- ret = amd_iommu_init_devices();
- if (ret)
- goto free;
-
- enable_iommus();
-
- if (iommu_pass_through)
- ret = amd_iommu_init_passthrough();
- else
- ret = amd_iommu_init_dma_ops();
-
- if (ret)
- goto free_disable;
-
- amd_iommu_init_api();
-
- amd_iommu_init_notifier();
-
- register_syscore_ops(&amd_iommu_syscore_ops);
-
- if (iommu_pass_through)
- goto out;
-
- if (amd_iommu_unmap_flush)
- printk(KERN_INFO "AMD-Vi: IO/TLB flush on unmap enabled\n");
- else
- printk(KERN_INFO "AMD-Vi: Lazy IO/TLB flushing enabled\n");
-
- x86_platform.iommu_shutdown = disable_iommus;
-out:
- return ret;
-
-free_disable:
- disable_iommus();
-
-free:
- amd_iommu_uninit_devices();
-
- free_pages((unsigned long)amd_iommu_pd_alloc_bitmap,
- get_order(MAX_DOMAIN_ID/8));
-
- free_pages((unsigned long)amd_iommu_rlookup_table,
- get_order(rlookup_table_size));
-
- free_pages((unsigned long)amd_iommu_alias_table,
- get_order(alias_table_size));
-
- free_pages((unsigned long)amd_iommu_dev_table,
- get_order(dev_table_size));
-
- free_iommu_all();
-
- free_unity_maps();
-
-#ifdef CONFIG_GART_IOMMU
- /*
- * We failed to initialize the AMD IOMMU - try fallback to GART
- * if possible.
- */
- gart_iommu_init();
-
-#endif
-
- goto out;
-}
-
-/****************************************************************************
- *
- * Early detect code. This code runs at IOMMU detection time in the DMA
- * layer. It just looks if there is an IVRS ACPI table to detect AMD
- * IOMMUs
- *
- ****************************************************************************/
-static int __init early_amd_iommu_detect(struct acpi_table_header *table)
-{
- return 0;
-}
-
-int __init amd_iommu_detect(void)
-{
- if (no_iommu || (iommu_detected && !gart_iommu_aperture))
- return -ENODEV;
-
- if (amd_iommu_disabled)
- return -ENODEV;
-
- if (acpi_table_parse("IVRS", early_amd_iommu_detect) == 0) {
- iommu_detected = 1;
- amd_iommu_detected = 1;
- x86_init.iommu.iommu_init = amd_iommu_init;
-
- /* Make sure ACS will be enabled */
- pci_request_acs();
- return 1;
- }
- return -ENODEV;
-}
-
-/****************************************************************************
- *
- * Parsing functions for the AMD IOMMU specific kernel command line
- * options.
- *
- ****************************************************************************/
-
-static int __init parse_amd_iommu_dump(char *str)
-{
- amd_iommu_dump = true;
-
- return 1;
-}
-
-static int __init parse_amd_iommu_options(char *str)
-{
- for (; *str; ++str) {
- if (strncmp(str, "fullflush", 9) == 0)
- amd_iommu_unmap_flush = true;
- if (strncmp(str, "off", 3) == 0)
- amd_iommu_disabled = true;
- }
-
- return 1;
-}
-
-__setup("amd_iommu_dump", parse_amd_iommu_dump);
-__setup("amd_iommu=", parse_amd_iommu_options);
-
-IOMMU_INIT_FINISH(amd_iommu_detect,
- gart_iommu_hole_init,
- 0,
- 0);
diff --git a/arch/x86/kernel/apb_timer.c b/arch/x86/kernel/apb_timer.c
index 289e92862fd9..afdc3f756dea 100644
--- a/arch/x86/kernel/apb_timer.c
+++ b/arch/x86/kernel/apb_timer.c
@@ -27,15 +27,12 @@
* timer, but by default APB timer has higher rating than local APIC timers.
*/
-#include <linux/clocksource.h>
-#include <linux/clockchips.h>
#include <linux/delay.h>
+#include <linux/dw_apb_timer.h>
#include <linux/errno.h>
#include <linux/init.h>
-#include <linux/sysdev.h>
#include <linux/slab.h>
#include <linux/pm.h>
-#include <linux/pci.h>
#include <linux/sfi.h>
#include <linux/interrupt.h>
#include <linux/cpu.h>
@@ -44,76 +41,48 @@
#include <asm/fixmap.h>
#include <asm/apb_timer.h>
#include <asm/mrst.h>
+#include <asm/time.h>
-#define APBT_MASK CLOCKSOURCE_MASK(32)
-#define APBT_SHIFT 22
#define APBT_CLOCKEVENT_RATING 110
#define APBT_CLOCKSOURCE_RATING 250
-#define APBT_MIN_DELTA_USEC 200
-#define EVT_TO_APBT_DEV(evt) container_of(evt, struct apbt_dev, evt)
#define APBT_CLOCKEVENT0_NUM (0)
-#define APBT_CLOCKEVENT1_NUM (1)
#define APBT_CLOCKSOURCE_NUM (2)
-static unsigned long apbt_address;
+static phys_addr_t apbt_address;
static int apb_timer_block_enabled;
static void __iomem *apbt_virt_address;
-static int phy_cs_timer_id;
/*
* Common DW APB timer info
*/
-static uint64_t apbt_freq;
-
-static void apbt_set_mode(enum clock_event_mode mode,
- struct clock_event_device *evt);
-static int apbt_next_event(unsigned long delta,
- struct clock_event_device *evt);
-static cycle_t apbt_read_clocksource(struct clocksource *cs);
-static void apbt_restart_clocksource(struct clocksource *cs);
+static unsigned long apbt_freq;
struct apbt_dev {
- struct clock_event_device evt;
- unsigned int num;
- int cpu;
- unsigned int irq;
- unsigned int tick;
- unsigned int count;
- unsigned int flags;
- char name[10];
+ struct dw_apb_clock_event_device *timer;
+ unsigned int num;
+ int cpu;
+ unsigned int irq;
+ char name[10];
};
-static DEFINE_PER_CPU(struct apbt_dev, cpu_apbt_dev);
+static struct dw_apb_clocksource *clocksource_apbt;
-#ifdef CONFIG_SMP
-static unsigned int apbt_num_timers_used;
-static struct apbt_dev *apbt_devs;
-#endif
-
-static inline unsigned long apbt_readl_reg(unsigned long a)
+static inline void __iomem *adev_virt_addr(struct apbt_dev *adev)
{
- return readl(apbt_virt_address + a);
+ return apbt_virt_address + adev->num * APBTMRS_REG_SIZE;
}
-static inline void apbt_writel_reg(unsigned long d, unsigned long a)
-{
- writel(d, apbt_virt_address + a);
-}
-
-static inline unsigned long apbt_readl(int n, unsigned long a)
-{
- return readl(apbt_virt_address + a + n * APBTMRS_REG_SIZE);
-}
+static DEFINE_PER_CPU(struct apbt_dev, cpu_apbt_dev);
-static inline void apbt_writel(int n, unsigned long d, unsigned long a)
-{
- writel(d, apbt_virt_address + a + n * APBTMRS_REG_SIZE);
-}
+#ifdef CONFIG_SMP
+static unsigned int apbt_num_timers_used;
+#endif
static inline void apbt_set_mapping(void)
{
struct sfi_timer_table_entry *mtmr;
+ int phy_cs_timer_id = 0;
if (apbt_virt_address) {
pr_debug("APBT base already mapped\n");
@@ -125,21 +94,18 @@ static inline void apbt_set_mapping(void)
APBT_CLOCKEVENT0_NUM);
return;
}
- apbt_address = (unsigned long)mtmr->phys_addr;
+ apbt_address = (phys_addr_t)mtmr->phys_addr;
if (!apbt_address) {
printk(KERN_WARNING "No timer base from SFI, use default\n");
apbt_address = APBT_DEFAULT_BASE;
}
apbt_virt_address = ioremap_nocache(apbt_address, APBT_MMAP_SIZE);
- if (apbt_virt_address) {
- pr_debug("Mapped APBT physical addr %p at virtual addr %p\n",\
- (void *)apbt_address, (void *)apbt_virt_address);
- } else {
- pr_debug("Failed mapping APBT phy address at %p\n",\
- (void *)apbt_address);
+ if (!apbt_virt_address) {
+ pr_debug("Failed mapping APBT phy address at %lu\n",\
+ (unsigned long)apbt_address);
goto panic_noapbt;
}
- apbt_freq = mtmr->freq_hz / USEC_PER_SEC;
+ apbt_freq = mtmr->freq_hz;
sfi_free_mtmr(mtmr);
/* Now figure out the physical timer id for clocksource device */
@@ -148,9 +114,14 @@ static inline void apbt_set_mapping(void)
goto panic_noapbt;
/* Now figure out the physical timer id */
- phy_cs_timer_id = (unsigned int)(mtmr->phys_addr & 0xff)
- / APBTMRS_REG_SIZE;
- pr_debug("Use timer %d for clocksource\n", phy_cs_timer_id);
+ pr_debug("Use timer %d for clocksource\n",
+ (int)(mtmr->phys_addr & 0xff) / APBTMRS_REG_SIZE);
+ phy_cs_timer_id = (unsigned int)(mtmr->phys_addr & 0xff) /
+ APBTMRS_REG_SIZE;
+
+ clocksource_apbt = dw_apb_clocksource_init(APBT_CLOCKSOURCE_RATING,
+ "apbt0", apbt_virt_address + phy_cs_timer_id *
+ APBTMRS_REG_SIZE, apbt_freq);
return;
panic_noapbt:
@@ -172,82 +143,6 @@ static inline int is_apbt_capable(void)
return apbt_virt_address ? 1 : 0;
}
-static struct clocksource clocksource_apbt = {
- .name = "apbt",
- .rating = APBT_CLOCKSOURCE_RATING,
- .read = apbt_read_clocksource,
- .mask = APBT_MASK,
- .flags = CLOCK_SOURCE_IS_CONTINUOUS,
- .resume = apbt_restart_clocksource,
-};
-
-/* boot APB clock event device */
-static struct clock_event_device apbt_clockevent = {
- .name = "apbt0",
- .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
- .set_mode = apbt_set_mode,
- .set_next_event = apbt_next_event,
- .shift = APBT_SHIFT,
- .irq = 0,
- .rating = APBT_CLOCKEVENT_RATING,
-};
-
-/*
- * start count down from 0xffff_ffff. this is done by toggling the enable bit
- * then load initial load count to ~0.
- */
-static void apbt_start_counter(int n)
-{
- unsigned long ctrl = apbt_readl(n, APBTMR_N_CONTROL);
-
- ctrl &= ~APBTMR_CONTROL_ENABLE;
- apbt_writel(n, ctrl, APBTMR_N_CONTROL);
- apbt_writel(n, ~0, APBTMR_N_LOAD_COUNT);
- /* enable, mask interrupt */
- ctrl &= ~APBTMR_CONTROL_MODE_PERIODIC;
- ctrl |= (APBTMR_CONTROL_ENABLE | APBTMR_CONTROL_INT);
- apbt_writel(n, ctrl, APBTMR_N_CONTROL);
- /* read it once to get cached counter value initialized */
- apbt_read_clocksource(&clocksource_apbt);
-}
-
-static irqreturn_t apbt_interrupt_handler(int irq, void *data)
-{
- struct apbt_dev *dev = (struct apbt_dev *)data;
- struct clock_event_device *aevt = &dev->evt;
-
- if (!aevt->event_handler) {
- printk(KERN_INFO "Spurious APBT timer interrupt on %d\n",
- dev->num);
- return IRQ_NONE;
- }
- aevt->event_handler(aevt);
- return IRQ_HANDLED;
-}
-
-static void apbt_restart_clocksource(struct clocksource *cs)
-{
- apbt_start_counter(phy_cs_timer_id);
-}
-
-static void apbt_enable_int(int n)
-{
- unsigned long ctrl = apbt_readl(n, APBTMR_N_CONTROL);
- /* clear pending intr */
- apbt_readl(n, APBTMR_N_EOI);
- ctrl &= ~APBTMR_CONTROL_INT;
- apbt_writel(n, ctrl, APBTMR_N_CONTROL);
-}
-
-static void apbt_disable_int(int n)
-{
- unsigned long ctrl = apbt_readl(n, APBTMR_N_CONTROL);
-
- ctrl |= APBTMR_CONTROL_INT;
- apbt_writel(n, ctrl, APBTMR_N_CONTROL);
-}
-
-
static int __init apbt_clockevent_register(void)
{
struct sfi_timer_table_entry *mtmr;
@@ -260,45 +155,21 @@ static int __init apbt_clockevent_register(void)
return -ENODEV;
}
- /*
- * We need to calculate the scaled math multiplication factor for
- * nanosecond to apbt tick conversion.
- * mult = (nsec/cycle)*2^APBT_SHIFT
- */
- apbt_clockevent.mult = div_sc((unsigned long) mtmr->freq_hz
- , NSEC_PER_SEC, APBT_SHIFT);
-
- /* Calculate the min / max delta */
- apbt_clockevent.max_delta_ns = clockevent_delta2ns(0x7FFFFFFF,
- &apbt_clockevent);
- apbt_clockevent.min_delta_ns = clockevent_delta2ns(
- APBT_MIN_DELTA_USEC*apbt_freq,
- &apbt_clockevent);
- /*
- * Start apbt with the boot cpu mask and make it
- * global if not used for per cpu timer.
- */
- apbt_clockevent.cpumask = cpumask_of(smp_processor_id());
adev->num = smp_processor_id();
- memcpy(&adev->evt, &apbt_clockevent, sizeof(struct clock_event_device));
+ adev->timer = dw_apb_clockevent_init(smp_processor_id(), "apbt0",
+ mrst_timer_options == MRST_TIMER_LAPIC_APBT ?
+ APBT_CLOCKEVENT_RATING - 100 : APBT_CLOCKEVENT_RATING,
+ adev_virt_addr(adev), 0, apbt_freq);
+ /* Firmware does EOI handling for us. */
+ adev->timer->eoi = NULL;
if (mrst_timer_options == MRST_TIMER_LAPIC_APBT) {
- adev->evt.rating = APBT_CLOCKEVENT_RATING - 100;
- global_clock_event = &adev->evt;
+ global_clock_event = &adev->timer->ced;
printk(KERN_DEBUG "%s clockevent registered as global\n",
global_clock_event->name);
}
- if (request_irq(apbt_clockevent.irq, apbt_interrupt_handler,
- IRQF_TIMER | IRQF_DISABLED | IRQF_NOBALANCING,
- apbt_clockevent.name, adev)) {
- printk(KERN_ERR "Failed request IRQ for APBT%d\n",
- apbt_clockevent.irq);
- }
-
- clockevents_register_device(&adev->evt);
- /* Start APBT 0 interrupts */
- apbt_enable_int(APBT_CLOCKEVENT0_NUM);
+ dw_apb_clockevent_register(adev->timer);
sfi_free_mtmr(mtmr);
return 0;
@@ -316,52 +187,34 @@ static void apbt_setup_irq(struct apbt_dev *adev)
irq_set_affinity(adev->irq, cpumask_of(adev->cpu));
/* APB timer irqs are set up as mp_irqs, timer is edge type */
__irq_set_handler(adev->irq, handle_edge_irq, 0, "edge");
-
- if (system_state == SYSTEM_BOOTING) {
- if (request_irq(adev->irq, apbt_interrupt_handler,
- IRQF_TIMER | IRQF_DISABLED |
- IRQF_NOBALANCING,
- adev->name, adev)) {
- printk(KERN_ERR "Failed request IRQ for APBT%d\n",
- adev->num);
- }
- } else
- enable_irq(adev->irq);
}
/* Should be called with per cpu */
void apbt_setup_secondary_clock(void)
{
struct apbt_dev *adev;
- struct clock_event_device *aevt;
int cpu;
/* Don't register boot CPU clockevent */
cpu = smp_processor_id();
if (!cpu)
return;
- /*
- * We need to calculate the scaled math multiplication factor for
- * nanosecond to apbt tick conversion.
- * mult = (nsec/cycle)*2^APBT_SHIFT
- */
- printk(KERN_INFO "Init per CPU clockevent %d\n", cpu);
- adev = &per_cpu(cpu_apbt_dev, cpu);
- aevt = &adev->evt;
- memcpy(aevt, &apbt_clockevent, sizeof(*aevt));
- aevt->cpumask = cpumask_of(cpu);
- aevt->name = adev->name;
- aevt->mode = CLOCK_EVT_MODE_UNUSED;
+ adev = &__get_cpu_var(cpu_apbt_dev);
+ if (!adev->timer) {
+ adev->timer = dw_apb_clockevent_init(cpu, adev->name,
+ APBT_CLOCKEVENT_RATING, adev_virt_addr(adev),
+ adev->irq, apbt_freq);
+ adev->timer->eoi = NULL;
+ } else {
+ dw_apb_clockevent_resume(adev->timer);
+ }
- printk(KERN_INFO "Registering CPU %d clockevent device %s, mask %08x\n",
- cpu, aevt->name, *(u32 *)aevt->cpumask);
+ printk(KERN_INFO "Registering CPU %d clockevent device %s, cpu %08x\n",
+ cpu, adev->name, adev->cpu);
apbt_setup_irq(adev);
-
- clockevents_register_device(aevt);
-
- apbt_enable_int(cpu);
+ dw_apb_clockevent_register(adev->timer);
return;
}
@@ -384,13 +237,12 @@ static int apbt_cpuhp_notify(struct notifier_block *n,
switch (action & 0xf) {
case CPU_DEAD:
- disable_irq(adev->irq);
- apbt_disable_int(cpu);
+ dw_apb_clockevent_pause(adev->timer);
if (system_state == SYSTEM_RUNNING) {
pr_debug("skipping APBT CPU %lu offline\n", cpu);
} else if (adev) {
pr_debug("APBT clockevent for cpu %lu offline\n", cpu);
- free_irq(adev->irq, adev);
+ dw_apb_clockevent_stop(adev->timer);
}
break;
default:
@@ -415,116 +267,16 @@ void apbt_setup_secondary_clock(void) {}
#endif /* CONFIG_SMP */
-static void apbt_set_mode(enum clock_event_mode mode,
- struct clock_event_device *evt)
-{
- unsigned long ctrl;
- uint64_t delta;
- int timer_num;
- struct apbt_dev *adev = EVT_TO_APBT_DEV(evt);
-
- BUG_ON(!apbt_virt_address);
-
- timer_num = adev->num;
- pr_debug("%s CPU %d timer %d mode=%d\n",
- __func__, first_cpu(*evt->cpumask), timer_num, mode);
-
- switch (mode) {
- case CLOCK_EVT_MODE_PERIODIC:
- delta = ((uint64_t)(NSEC_PER_SEC/HZ)) * apbt_clockevent.mult;
- delta >>= apbt_clockevent.shift;
- ctrl = apbt_readl(timer_num, APBTMR_N_CONTROL);
- ctrl |= APBTMR_CONTROL_MODE_PERIODIC;
- apbt_writel(timer_num, ctrl, APBTMR_N_CONTROL);
- /*
- * DW APB p. 46, have to disable timer before load counter,
- * may cause sync problem.
- */
- ctrl &= ~APBTMR_CONTROL_ENABLE;
- apbt_writel(timer_num, ctrl, APBTMR_N_CONTROL);
- udelay(1);
- pr_debug("Setting clock period %d for HZ %d\n", (int)delta, HZ);
- apbt_writel(timer_num, delta, APBTMR_N_LOAD_COUNT);
- ctrl |= APBTMR_CONTROL_ENABLE;
- apbt_writel(timer_num, ctrl, APBTMR_N_CONTROL);
- break;
- /* APB timer does not have one-shot mode, use free running mode */
- case CLOCK_EVT_MODE_ONESHOT:
- ctrl = apbt_readl(timer_num, APBTMR_N_CONTROL);
- /*
- * set free running mode, this mode will let timer reload max
- * timeout which will give time (3min on 25MHz clock) to rearm
- * the next event, therefore emulate the one-shot mode.
- */
- ctrl &= ~APBTMR_CONTROL_ENABLE;
- ctrl &= ~APBTMR_CONTROL_MODE_PERIODIC;
-
- apbt_writel(timer_num, ctrl, APBTMR_N_CONTROL);
- /* write again to set free running mode */
- apbt_writel(timer_num, ctrl, APBTMR_N_CONTROL);
-
- /*
- * DW APB p. 46, load counter with all 1s before starting free
- * running mode.
- */
- apbt_writel(timer_num, ~0, APBTMR_N_LOAD_COUNT);
- ctrl &= ~APBTMR_CONTROL_INT;
- ctrl |= APBTMR_CONTROL_ENABLE;
- apbt_writel(timer_num, ctrl, APBTMR_N_CONTROL);
- break;
-
- case CLOCK_EVT_MODE_UNUSED:
- case CLOCK_EVT_MODE_SHUTDOWN:
- apbt_disable_int(timer_num);
- ctrl = apbt_readl(timer_num, APBTMR_N_CONTROL);
- ctrl &= ~APBTMR_CONTROL_ENABLE;
- apbt_writel(timer_num, ctrl, APBTMR_N_CONTROL);
- break;
-
- case CLOCK_EVT_MODE_RESUME:
- apbt_enable_int(timer_num);
- break;
- }
-}
-
-static int apbt_next_event(unsigned long delta,
- struct clock_event_device *evt)
-{
- unsigned long ctrl;
- int timer_num;
-
- struct apbt_dev *adev = EVT_TO_APBT_DEV(evt);
-
- timer_num = adev->num;
- /* Disable timer */
- ctrl = apbt_readl(timer_num, APBTMR_N_CONTROL);
- ctrl &= ~APBTMR_CONTROL_ENABLE;
- apbt_writel(timer_num, ctrl, APBTMR_N_CONTROL);
- /* write new count */
- apbt_writel(timer_num, delta, APBTMR_N_LOAD_COUNT);
- ctrl |= APBTMR_CONTROL_ENABLE;
- apbt_writel(timer_num, ctrl, APBTMR_N_CONTROL);
- return 0;
-}
-
-static cycle_t apbt_read_clocksource(struct clocksource *cs)
-{
- unsigned long current_count;
-
- current_count = apbt_readl(phy_cs_timer_id, APBTMR_N_CURRENT_VALUE);
- return (cycle_t)~current_count;
-}
-
static int apbt_clocksource_register(void)
{
u64 start, now;
cycle_t t1;
/* Start the counter, use timer 2 as source, timer 0/1 for event */
- apbt_start_counter(phy_cs_timer_id);
+ dw_apb_clocksource_start(clocksource_apbt);
/* Verify whether apbt counter works */
- t1 = apbt_read_clocksource(&clocksource_apbt);
+ t1 = dw_apb_clocksource_read(clocksource_apbt);
rdtscll(start);
/*
@@ -539,10 +291,10 @@ static int apbt_clocksource_register(void)
} while ((now - start) < 200000UL);
/* APBT is the only always on clocksource, it has to work! */
- if (t1 == apbt_read_clocksource(&clocksource_apbt))
+ if (t1 == dw_apb_clocksource_read(clocksource_apbt))
panic("APBT counter not counting. APBT disabled\n");
- clocksource_register_khz(&clocksource_apbt, (u32)apbt_freq*1000);
+ dw_apb_clocksource_register(clocksource_apbt);
return 0;
}
@@ -566,10 +318,7 @@ void __init apbt_time_init(void)
if (apb_timer_block_enabled)
return;
apbt_set_mapping();
- if (apbt_virt_address) {
- pr_debug("Found APBT version 0x%lx\n",\
- apbt_readl_reg(APBTMRS_COMP_VERSION));
- } else
+ if (!apbt_virt_address)
goto out_noapbt;
/*
* Read the frequency and check for a sane value, for ESL model
@@ -577,7 +326,7 @@ void __init apbt_time_init(void)
*/
if (apbt_freq < APBT_MIN_FREQ || apbt_freq > APBT_MAX_FREQ) {
- pr_debug("APBT has invalid freq 0x%llx\n", apbt_freq);
+ pr_debug("APBT has invalid freq 0x%lx\n", apbt_freq);
goto out_noapbt;
}
if (apbt_clocksource_register()) {
@@ -603,30 +352,20 @@ void __init apbt_time_init(void)
} else {
percpu_timer = 0;
apbt_num_timers_used = 1;
- adev = &per_cpu(cpu_apbt_dev, 0);
- adev->flags &= ~APBT_DEV_USED;
}
pr_debug("%s: %d APB timers used\n", __func__, apbt_num_timers_used);
/* here we set up per CPU timer data structure */
- apbt_devs = kzalloc(sizeof(struct apbt_dev) * apbt_num_timers_used,
- GFP_KERNEL);
- if (!apbt_devs) {
- printk(KERN_ERR "Failed to allocate APB timer devices\n");
- return;
- }
for (i = 0; i < apbt_num_timers_used; i++) {
adev = &per_cpu(cpu_apbt_dev, i);
adev->num = i;
adev->cpu = i;
p_mtmr = sfi_get_mtmr(i);
- if (p_mtmr) {
- adev->tick = p_mtmr->freq_hz;
+ if (p_mtmr)
adev->irq = p_mtmr->irq;
- } else
+ else
printk(KERN_ERR "Failed to get timer for cpu %d\n", i);
- adev->count = 0;
- sprintf(adev->name, "apbt%d", i);
+ snprintf(adev->name, sizeof(adev->name) - 1, "apbt%d", i);
}
#endif
@@ -638,17 +377,8 @@ out_noapbt:
panic("failed to enable APB timer\n");
}
-static inline void apbt_disable(int n)
-{
- if (is_apbt_capable()) {
- unsigned long ctrl = apbt_readl(n, APBTMR_N_CONTROL);
- ctrl &= ~APBTMR_CONTROL_ENABLE;
- apbt_writel(n, ctrl, APBTMR_N_CONTROL);
- }
-}
-
/* called before apb_timer_enable, use early map */
-unsigned long apbt_quick_calibrate()
+unsigned long apbt_quick_calibrate(void)
{
int i, scale;
u64 old, new;
@@ -657,31 +387,31 @@ unsigned long apbt_quick_calibrate()
u32 loop, shift;
apbt_set_mapping();
- apbt_start_counter(phy_cs_timer_id);
+ dw_apb_clocksource_start(clocksource_apbt);
/* check if the timer can count down, otherwise return */
- old = apbt_read_clocksource(&clocksource_apbt);
+ old = dw_apb_clocksource_read(clocksource_apbt);
i = 10000;
while (--i) {
- if (old != apbt_read_clocksource(&clocksource_apbt))
+ if (old != dw_apb_clocksource_read(clocksource_apbt))
break;
}
if (!i)
goto failed;
/* count 16 ms */
- loop = (apbt_freq * 1000) << 4;
+ loop = (apbt_freq / 1000) << 4;
/* restart the timer to ensure it won't get to 0 in the calibration */
- apbt_start_counter(phy_cs_timer_id);
+ dw_apb_clocksource_start(clocksource_apbt);
- old = apbt_read_clocksource(&clocksource_apbt);
+ old = dw_apb_clocksource_read(clocksource_apbt);
old += loop;
t1 = __native_read_tsc();
do {
- new = apbt_read_clocksource(&clocksource_apbt);
+ new = dw_apb_clocksource_read(clocksource_apbt);
} while (new < old);
t2 = __native_read_tsc();
@@ -693,7 +423,7 @@ unsigned long apbt_quick_calibrate()
return 0;
}
scale = (int)div_u64((t2 - t1), loop >> shift);
- khz = (scale * apbt_freq * 1000) >> shift;
+ khz = (scale * (apbt_freq / 1000)) >> shift;
printk(KERN_INFO "TSC freq calculated by APB timer is %lu khz\n", khz);
return khz;
failed:
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index b9338b8cf420..b24be38c8cf8 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -27,6 +27,7 @@
#include <linux/syscore_ops.h>
#include <linux/delay.h>
#include <linux/timex.h>
+#include <linux/i8253.h>
#include <linux/dmar.h>
#include <linux/init.h>
#include <linux/cpu.h>
@@ -39,7 +40,6 @@
#include <asm/pgalloc.h>
#include <asm/atomic.h>
#include <asm/mpspec.h>
-#include <asm/i8253.h>
#include <asm/i8259.h>
#include <asm/proto.h>
#include <asm/apic.h>
@@ -48,6 +48,7 @@
#include <asm/hpet.h>
#include <asm/idle.h>
#include <asm/mtrr.h>
+#include <asm/time.h>
#include <asm/smp.h>
#include <asm/mce.h>
#include <asm/tsc.h>
@@ -1429,7 +1430,7 @@ void enable_x2apic(void)
rdmsr(MSR_IA32_APICBASE, msr, msr2);
if (!(msr & X2APIC_ENABLE)) {
printk_once(KERN_INFO "Enabling x2apic\n");
- wrmsr(MSR_IA32_APICBASE, msr | X2APIC_ENABLE, 0);
+ wrmsr(MSR_IA32_APICBASE, msr | X2APIC_ENABLE, msr2);
}
}
#endif /* CONFIG_X86_X2APIC */
@@ -1943,10 +1944,28 @@ void disconnect_bsp_APIC(int virt_wire_setup)
void __cpuinit generic_processor_info(int apicid, int version)
{
- int cpu;
+ int cpu, max = nr_cpu_ids;
+ bool boot_cpu_detected = physid_isset(boot_cpu_physical_apicid,
+ phys_cpu_present_map);
+
+ /*
+ * If boot cpu has not been detected yet, then only allow upto
+ * nr_cpu_ids - 1 processors and keep one slot free for boot cpu
+ */
+ if (!boot_cpu_detected && num_processors >= nr_cpu_ids - 1 &&
+ apicid != boot_cpu_physical_apicid) {
+ int thiscpu = max + disabled_cpus - 1;
+
+ pr_warning(
+ "ACPI: NR_CPUS/possible_cpus limit of %i almost"
+ " reached. Keeping one slot for boot cpu."
+ " Processor %d/0x%x ignored.\n", max, thiscpu, apicid);
+
+ disabled_cpus++;
+ return;
+ }
if (num_processors >= nr_cpu_ids) {
- int max = nr_cpu_ids;
int thiscpu = max + disabled_cpus;
pr_warning(
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index e5293394b548..8eb863e27ea6 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -1295,6 +1295,16 @@ static int setup_ioapic_entry(int apic_id, int irq,
* irq handler will do the explicit EOI to the io-apic.
*/
ir_entry->vector = pin;
+
+ apic_printk(APIC_VERBOSE, KERN_DEBUG "IOAPIC[%d]: "
+ "Set IRTE entry (P:%d FPD:%d Dst_Mode:%d "
+ "Redir_hint:%d Trig_Mode:%d Dlvry_Mode:%X "
+ "Avail:%X Vector:%02X Dest:%08X "
+ "SID:%04X SQ:%X SVT:%X)\n",
+ apic_id, irte.present, irte.fpd, irte.dst_mode,
+ irte.redir_hint, irte.trigger_mode, irte.dlvry_mode,
+ irte.avail, irte.vector, irte.dest_id,
+ irte.sid, irte.sq, irte.svt);
} else {
entry->delivery_mode = apic->irq_delivery_mode;
entry->dest_mode = apic->irq_dest_mode;
@@ -1337,9 +1347,9 @@ static void setup_ioapic_irq(int apic_id, int pin, unsigned int irq,
apic_printk(APIC_VERBOSE,KERN_DEBUG
"IOAPIC[%d]: Set routing entry (%d-%d -> 0x%x -> "
- "IRQ %d Mode:%i Active:%i)\n",
+ "IRQ %d Mode:%i Active:%i Dest:%d)\n",
apic_id, mpc_ioapic_id(apic_id), pin, cfg->vector,
- irq, trigger, polarity);
+ irq, trigger, polarity, dest);
if (setup_ioapic_entry(mpc_ioapic_id(apic_id), irq, &entry,
@@ -1522,10 +1532,12 @@ __apicdebuginit(void) print_IO_APIC(void)
printk(KERN_DEBUG "....... : LTS : %X\n", reg_00.bits.LTS);
printk(KERN_DEBUG ".... register #01: %08X\n", *(int *)&reg_01);
- printk(KERN_DEBUG "....... : max redirection entries: %04X\n", reg_01.bits.entries);
+ printk(KERN_DEBUG "....... : max redirection entries: %02X\n",
+ reg_01.bits.entries);
printk(KERN_DEBUG "....... : PRQ implemented: %X\n", reg_01.bits.PRQ);
- printk(KERN_DEBUG "....... : IO APIC version: %04X\n", reg_01.bits.version);
+ printk(KERN_DEBUG "....... : IO APIC version: %02X\n",
+ reg_01.bits.version);
/*
* Some Intel chipsets with IO APIC VERSION of 0x1? don't have reg_02,
@@ -1550,31 +1562,60 @@ __apicdebuginit(void) print_IO_APIC(void)
printk(KERN_DEBUG ".... IRQ redirection table:\n");
- printk(KERN_DEBUG " NR Dst Mask Trig IRR Pol"
- " Stat Dmod Deli Vect:\n");
+ if (intr_remapping_enabled) {
+ printk(KERN_DEBUG " NR Indx Fmt Mask Trig IRR"
+ " Pol Stat Indx2 Zero Vect:\n");
+ } else {
+ printk(KERN_DEBUG " NR Dst Mask Trig IRR Pol"
+ " Stat Dmod Deli Vect:\n");
+ }
for (i = 0; i <= reg_01.bits.entries; i++) {
- struct IO_APIC_route_entry entry;
-
- entry = ioapic_read_entry(apic, i);
-
- printk(KERN_DEBUG " %02x %03X ",
- i,
- entry.dest
- );
+ if (intr_remapping_enabled) {
+ struct IO_APIC_route_entry entry;
+ struct IR_IO_APIC_route_entry *ir_entry;
+
+ entry = ioapic_read_entry(apic, i);
+ ir_entry = (struct IR_IO_APIC_route_entry *) &entry;
+ printk(KERN_DEBUG " %02x %04X ",
+ i,
+ ir_entry->index
+ );
+ printk("%1d %1d %1d %1d %1d "
+ "%1d %1d %X %02X\n",
+ ir_entry->format,
+ ir_entry->mask,
+ ir_entry->trigger,
+ ir_entry->irr,
+ ir_entry->polarity,
+ ir_entry->delivery_status,
+ ir_entry->index2,
+ ir_entry->zero,
+ ir_entry->vector
+ );
+ } else {
+ struct IO_APIC_route_entry entry;
- printk("%1d %1d %1d %1d %1d %1d %1d %02X\n",
- entry.mask,
- entry.trigger,
- entry.irr,
- entry.polarity,
- entry.delivery_status,
- entry.dest_mode,
- entry.delivery_mode,
- entry.vector
- );
+ entry = ioapic_read_entry(apic, i);
+ printk(KERN_DEBUG " %02x %02X ",
+ i,
+ entry.dest
+ );
+ printk("%1d %1d %1d %1d %1d "
+ "%1d %1d %02X\n",
+ entry.mask,
+ entry.trigger,
+ entry.irr,
+ entry.polarity,
+ entry.delivery_status,
+ entry.dest_mode,
+ entry.delivery_mode,
+ entry.vector
+ );
+ }
}
}
+
printk(KERN_DEBUG "IRQ to pin mappings:\n");
for_each_active_irq(irq) {
struct irq_pin_list *entry;
@@ -1792,7 +1833,7 @@ __apicdebuginit(int) print_ICs(void)
return 0;
}
-fs_initcall(print_ICs);
+late_initcall(print_ICs);
/* Where if anywhere is the i8259 connect in external int mode */
diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
index 965a7666c283..0371c484bb8a 100644
--- a/arch/x86/kernel/apm_32.c
+++ b/arch/x86/kernel/apm_32.c
@@ -229,11 +229,11 @@
#include <linux/jiffies.h>
#include <linux/acpi.h>
#include <linux/syscore_ops.h>
+#include <linux/i8253.h>
#include <asm/system.h>
#include <asm/uaccess.h>
#include <asm/desc.h>
-#include <asm/i8253.h>
#include <asm/olpc.h>
#include <asm/paravirt.h>
#include <asm/reboot.h>
@@ -1220,11 +1220,11 @@ static void reinit_timer(void)
raw_spin_lock_irqsave(&i8253_lock, flags);
/* set the clock to HZ */
- outb_pit(0x34, PIT_MODE); /* binary, mode 2, LSB/MSB, ch 0 */
+ outb_p(0x34, PIT_MODE); /* binary, mode 2, LSB/MSB, ch 0 */
udelay(10);
- outb_pit(LATCH & 0xff, PIT_CH0); /* LSB */
+ outb_p(LATCH & 0xff, PIT_CH0); /* LSB */
udelay(10);
- outb_pit(LATCH >> 8, PIT_CH0); /* MSB */
+ outb_p(LATCH >> 8, PIT_CH0); /* MSB */
udelay(10);
raw_spin_unlock_irqrestore(&i8253_lock, flags);
#endif
diff --git a/arch/x86/kernel/asm-offsets_32.c b/arch/x86/kernel/asm-offsets_32.c
index c29d631af6fc..395a10e68067 100644
--- a/arch/x86/kernel/asm-offsets_32.c
+++ b/arch/x86/kernel/asm-offsets_32.c
@@ -63,7 +63,6 @@ void foo(void)
BLANK();
OFFSET(LGUEST_DATA_irq_enabled, lguest_data, irq_enabled);
OFFSET(LGUEST_DATA_irq_pending, lguest_data, irq_pending);
- OFFSET(LGUEST_DATA_pgdir, lguest_data, pgdir);
BLANK();
OFFSET(LGUEST_PAGES_host_gdt_desc, lguest_pages, state.host_gdt_desc);
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index 525514cf33c3..46674fbb62ba 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -62,6 +62,8 @@ static void __init check_fpu(void)
return;
}
+ kernel_fpu_begin();
+
/*
* trap_init() enabled FXSR and company _before_ testing for FP
* problems here.
@@ -80,6 +82,8 @@ static void __init check_fpu(void)
: "=m" (*&fdiv_bug)
: "m" (*&x), "m" (*&y));
+ kernel_fpu_end();
+
boot_cpu_data.fdiv_bug = fdiv_bug;
if (boot_cpu_data.fdiv_bug)
printk(KERN_WARNING "Hmm, FPU with FDIV bug.\n");
diff --git a/arch/x86/kernel/cpu/hypervisor.c b/arch/x86/kernel/cpu/hypervisor.c
index 8095f8611f8a..755f64fb0743 100644
--- a/arch/x86/kernel/cpu/hypervisor.c
+++ b/arch/x86/kernel/cpu/hypervisor.c
@@ -32,11 +32,11 @@
*/
static const __initconst struct hypervisor_x86 * const hypervisors[] =
{
- &x86_hyper_vmware,
- &x86_hyper_ms_hyperv,
#ifdef CONFIG_XEN_PVHVM
&x86_hyper_xen_hvm,
#endif
+ &x86_hyper_vmware,
+ &x86_hyper_ms_hyperv,
};
const struct hypervisor_x86 *x86_hyper;
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index 1edf5ba4fb2b..ed6086eedf1d 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -456,6 +456,24 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c)
if (cpu_has(c, X86_FEATURE_VMX))
detect_vmx_virtcap(c);
+
+ /*
+ * Initialize MSR_IA32_ENERGY_PERF_BIAS if BIOS did not.
+ * x86_energy_perf_policy(8) is available to change it at run-time
+ */
+ if (cpu_has(c, X86_FEATURE_EPB)) {
+ u64 epb;
+
+ rdmsrl(MSR_IA32_ENERGY_PERF_BIAS, epb);
+ if ((epb & 0xF) == ENERGY_PERF_BIAS_PERFORMANCE) {
+ printk_once(KERN_WARNING "ENERGY_PERF_BIAS:"
+ " Set to 'normal', was 'performance'\n"
+ "ENERGY_PERF_BIAS: View and update with"
+ " x86_energy_perf_policy(8)\n");
+ epb = (epb & ~0xF) | ENERGY_PERF_BIAS_NORMAL;
+ wrmsrl(MSR_IA32_ENERGY_PERF_BIAS, epb);
+ }
+ }
}
#ifdef CONFIG_X86_32
diff --git a/arch/x86/kernel/cpu/mcheck/mce-severity.c b/arch/x86/kernel/cpu/mcheck/mce-severity.c
index 1e8d66c1336a..7395d5f4272d 100644
--- a/arch/x86/kernel/cpu/mcheck/mce-severity.c
+++ b/arch/x86/kernel/cpu/mcheck/mce-severity.c
@@ -43,61 +43,105 @@ static struct severity {
unsigned char covered;
char *msg;
} severities[] = {
-#define KERNEL .context = IN_KERNEL
-#define USER .context = IN_USER
-#define SER .ser = SER_REQUIRED
-#define NOSER .ser = NO_SER
-#define SEV(s) .sev = MCE_ ## s ## _SEVERITY
-#define BITCLR(x, s, m, r...) { .mask = x, .result = 0, SEV(s), .msg = m, ## r }
-#define BITSET(x, s, m, r...) { .mask = x, .result = x, SEV(s), .msg = m, ## r }
-#define MCGMASK(x, res, s, m, r...) \
- { .mcgmask = x, .mcgres = res, SEV(s), .msg = m, ## r }
-#define MASK(x, y, s, m, r...) \
- { .mask = x, .result = y, SEV(s), .msg = m, ## r }
+#define MCESEV(s, m, c...) { .sev = MCE_ ## s ## _SEVERITY, .msg = m, ## c }
+#define KERNEL .context = IN_KERNEL
+#define USER .context = IN_USER
+#define SER .ser = SER_REQUIRED
+#define NOSER .ser = NO_SER
+#define BITCLR(x) .mask = x, .result = 0
+#define BITSET(x) .mask = x, .result = x
+#define MCGMASK(x, y) .mcgmask = x, .mcgres = y
+#define MASK(x, y) .mask = x, .result = y
#define MCI_UC_S (MCI_STATUS_UC|MCI_STATUS_S)
#define MCI_UC_SAR (MCI_STATUS_UC|MCI_STATUS_S|MCI_STATUS_AR)
#define MCACOD 0xffff
- BITCLR(MCI_STATUS_VAL, NO, "Invalid"),
- BITCLR(MCI_STATUS_EN, NO, "Not enabled"),
- BITSET(MCI_STATUS_PCC, PANIC, "Processor context corrupt"),
+ MCESEV(
+ NO, "Invalid",
+ BITCLR(MCI_STATUS_VAL)
+ ),
+ MCESEV(
+ NO, "Not enabled",
+ BITCLR(MCI_STATUS_EN)
+ ),
+ MCESEV(
+ PANIC, "Processor context corrupt",
+ BITSET(MCI_STATUS_PCC)
+ ),
/* When MCIP is not set something is very confused */
- MCGMASK(MCG_STATUS_MCIP, 0, PANIC, "MCIP not set in MCA handler"),
+ MCESEV(
+ PANIC, "MCIP not set in MCA handler",
+ MCGMASK(MCG_STATUS_MCIP, 0)
+ ),
/* Neither return not error IP -- no chance to recover -> PANIC */
- MCGMASK(MCG_STATUS_RIPV|MCG_STATUS_EIPV, 0, PANIC,
- "Neither restart nor error IP"),
- MCGMASK(MCG_STATUS_RIPV, 0, PANIC, "In kernel and no restart IP",
- KERNEL),
- BITCLR(MCI_STATUS_UC, KEEP, "Corrected error", NOSER),
- MASK(MCI_STATUS_OVER|MCI_STATUS_UC|MCI_STATUS_EN, MCI_STATUS_UC, SOME,
- "Spurious not enabled", SER),
+ MCESEV(
+ PANIC, "Neither restart nor error IP",
+ MCGMASK(MCG_STATUS_RIPV|MCG_STATUS_EIPV, 0)
+ ),
+ MCESEV(
+ PANIC, "In kernel and no restart IP",
+ KERNEL, MCGMASK(MCG_STATUS_RIPV, 0)
+ ),
+ MCESEV(
+ KEEP, "Corrected error",
+ NOSER, BITCLR(MCI_STATUS_UC)
+ ),
/* ignore OVER for UCNA */
- MASK(MCI_UC_SAR, MCI_STATUS_UC, KEEP,
- "Uncorrected no action required", SER),
- MASK(MCI_STATUS_OVER|MCI_UC_SAR, MCI_STATUS_UC|MCI_STATUS_AR, PANIC,
- "Illegal combination (UCNA with AR=1)", SER),
- MASK(MCI_STATUS_S, 0, KEEP, "Non signalled machine check", SER),
+ MCESEV(
+ KEEP, "Uncorrected no action required",
+ SER, MASK(MCI_UC_SAR, MCI_STATUS_UC)
+ ),
+ MCESEV(
+ PANIC, "Illegal combination (UCNA with AR=1)",
+ SER,
+ MASK(MCI_STATUS_OVER|MCI_UC_SAR, MCI_STATUS_UC|MCI_STATUS_AR)
+ ),
+ MCESEV(
+ KEEP, "Non signalled machine check",
+ SER, BITCLR(MCI_STATUS_S)
+ ),
/* AR add known MCACODs here */
- MASK(MCI_STATUS_OVER|MCI_UC_SAR, MCI_STATUS_OVER|MCI_UC_SAR, PANIC,
- "Action required with lost events", SER),
- MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCACOD, MCI_UC_SAR, PANIC,
- "Action required; unknown MCACOD", SER),
+ MCESEV(
+ PANIC, "Action required with lost events",
+ SER, BITSET(MCI_STATUS_OVER|MCI_UC_SAR)
+ ),
+ MCESEV(
+ PANIC, "Action required: unknown MCACOD",
+ SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR, MCI_UC_SAR)
+ ),
/* known AO MCACODs: */
- MASK(MCI_UC_SAR|MCI_STATUS_OVER|0xfff0, MCI_UC_S|0xc0, AO,
- "Action optional: memory scrubbing error", SER),
- MASK(MCI_UC_SAR|MCI_STATUS_OVER|MCACOD, MCI_UC_S|0x17a, AO,
- "Action optional: last level cache writeback error", SER),
-
- MASK(MCI_STATUS_OVER|MCI_UC_SAR, MCI_UC_S, SOME,
- "Action optional unknown MCACOD", SER),
- MASK(MCI_STATUS_OVER|MCI_UC_SAR, MCI_UC_S|MCI_STATUS_OVER, SOME,
- "Action optional with lost events", SER),
- BITSET(MCI_STATUS_UC|MCI_STATUS_OVER, PANIC, "Overflowed uncorrected"),
- BITSET(MCI_STATUS_UC, UC, "Uncorrected"),
- BITSET(0, SOME, "No match") /* always matches. keep at end */
+ MCESEV(
+ AO, "Action optional: memory scrubbing error",
+ SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|0xfff0, MCI_UC_S|0x00c0)
+ ),
+ MCESEV(
+ AO, "Action optional: last level cache writeback error",
+ SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCACOD, MCI_UC_S|0x017a)
+ ),
+ MCESEV(
+ SOME, "Action optional: unknown MCACOD",
+ SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR, MCI_UC_S)
+ ),
+ MCESEV(
+ SOME, "Action optional with lost events",
+ SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR, MCI_STATUS_OVER|MCI_UC_S)
+ ),
+
+ MCESEV(
+ PANIC, "Overflowed uncorrected",
+ BITSET(MCI_STATUS_OVER|MCI_STATUS_UC)
+ ),
+ MCESEV(
+ UC, "Uncorrected",
+ BITSET(MCI_STATUS_UC)
+ ),
+ MCESEV(
+ SOME, "No match",
+ BITSET(0)
+ ) /* always matches. keep at end */
};
/*
@@ -112,15 +156,15 @@ static int error_context(struct mce *m)
return IN_KERNEL;
}
-int mce_severity(struct mce *a, int tolerant, char **msg)
+int mce_severity(struct mce *m, int tolerant, char **msg)
{
- enum context ctx = error_context(a);
+ enum context ctx = error_context(m);
struct severity *s;
for (s = severities;; s++) {
- if ((a->status & s->mask) != s->result)
+ if ((m->status & s->mask) != s->result)
continue;
- if ((a->mcgstatus & s->mcgmask) != s->mcgres)
+ if ((m->mcgstatus & s->mcgmask) != s->mcgres)
continue;
if (s->ser == SER_REQUIRED && !mce_ser)
continue;
@@ -197,15 +241,15 @@ static const struct file_operations severities_coverage_fops = {
static int __init severities_debugfs_init(void)
{
- struct dentry *dmce = NULL, *fseverities_coverage = NULL;
+ struct dentry *dmce, *fsev;
dmce = mce_get_debugfs_dir();
- if (dmce == NULL)
+ if (!dmce)
goto err_out;
- fseverities_coverage = debugfs_create_file("severities-coverage",
- 0444, dmce, NULL,
- &severities_coverage_fops);
- if (fseverities_coverage == NULL)
+
+ fsev = debugfs_create_file("severities-coverage", 0444, dmce, NULL,
+ &severities_coverage_fops);
+ if (!fsev)
goto err_out;
return 0;
@@ -214,4 +258,4 @@ err_out:
return -ENOMEM;
}
late_initcall(severities_debugfs_init);
-#endif
+#endif /* CONFIG_DEBUG_FS */
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index ff1ae9b6464d..08363b042122 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -10,7 +10,6 @@
#include <linux/thread_info.h>
#include <linux/capability.h>
#include <linux/miscdevice.h>
-#include <linux/interrupt.h>
#include <linux/ratelimit.h>
#include <linux/kallsyms.h>
#include <linux/rcupdate.h>
@@ -38,23 +37,20 @@
#include <linux/mm.h>
#include <linux/debugfs.h>
#include <linux/edac_mce.h>
+#include <linux/irq_work.h>
#include <asm/processor.h>
-#include <asm/hw_irq.h>
-#include <asm/apic.h>
-#include <asm/idle.h>
-#include <asm/ipi.h>
#include <asm/mce.h>
#include <asm/msr.h>
#include "mce-internal.h"
-static DEFINE_MUTEX(mce_read_mutex);
+static DEFINE_MUTEX(mce_chrdev_read_mutex);
#define rcu_dereference_check_mce(p) \
rcu_dereference_index_check((p), \
rcu_read_lock_sched_held() || \
- lockdep_is_held(&mce_read_mutex))
+ lockdep_is_held(&mce_chrdev_read_mutex))
#define CREATE_TRACE_POINTS
#include <trace/events/mce.h>
@@ -94,7 +90,8 @@ static unsigned long mce_need_notify;
static char mce_helper[128];
static char *mce_helper_argv[2] = { mce_helper, NULL };
-static DECLARE_WAIT_QUEUE_HEAD(mce_wait);
+static DECLARE_WAIT_QUEUE_HEAD(mce_chrdev_wait);
+
static DEFINE_PER_CPU(struct mce, mces_seen);
static int cpu_missing;
@@ -373,6 +370,31 @@ static void mce_wrmsrl(u32 msr, u64 v)
}
/*
+ * Collect all global (w.r.t. this processor) status about this machine
+ * check into our "mce" struct so that we can use it later to assess
+ * the severity of the problem as we read per-bank specific details.
+ */
+static inline void mce_gather_info(struct mce *m, struct pt_regs *regs)
+{
+ mce_setup(m);
+
+ m->mcgstatus = mce_rdmsrl(MSR_IA32_MCG_STATUS);
+ if (regs) {
+ /*
+ * Get the address of the instruction at the time of
+ * the machine check error.
+ */
+ if (m->mcgstatus & (MCG_STATUS_RIPV|MCG_STATUS_EIPV)) {
+ m->ip = regs->ip;
+ m->cs = regs->cs;
+ }
+ /* Use accurate RIP reporting if available. */
+ if (rip_msr)
+ m->ip = mce_rdmsrl(rip_msr);
+ }
+}
+
+/*
* Simple lockless ring to communicate PFNs from the exception handler with the
* process context work function. This is vastly simplified because there's
* only a single reader and a single writer.
@@ -443,40 +465,13 @@ static void mce_schedule_work(void)
}
}
-/*
- * Get the address of the instruction at the time of the machine check
- * error.
- */
-static inline void mce_get_rip(struct mce *m, struct pt_regs *regs)
-{
-
- if (regs && (m->mcgstatus & (MCG_STATUS_RIPV|MCG_STATUS_EIPV))) {
- m->ip = regs->ip;
- m->cs = regs->cs;
- } else {
- m->ip = 0;
- m->cs = 0;
- }
- if (rip_msr)
- m->ip = mce_rdmsrl(rip_msr);
-}
+DEFINE_PER_CPU(struct irq_work, mce_irq_work);
-#ifdef CONFIG_X86_LOCAL_APIC
-/*
- * Called after interrupts have been reenabled again
- * when a MCE happened during an interrupts off region
- * in the kernel.
- */
-asmlinkage void smp_mce_self_interrupt(struct pt_regs *regs)
+static void mce_irq_work_cb(struct irq_work *entry)
{
- ack_APIC_irq();
- exit_idle();
- irq_enter();
mce_notify_irq();
mce_schedule_work();
- irq_exit();
}
-#endif
static void mce_report_event(struct pt_regs *regs)
{
@@ -492,29 +487,7 @@ static void mce_report_event(struct pt_regs *regs)
return;
}
-#ifdef CONFIG_X86_LOCAL_APIC
- /*
- * Without APIC do not notify. The event will be picked
- * up eventually.
- */
- if (!cpu_has_apic)
- return;
-
- /*
- * When interrupts are disabled we cannot use
- * kernel services safely. Trigger an self interrupt
- * through the APIC to instead do the notification
- * after interrupts are reenabled again.
- */
- apic->send_IPI_self(MCE_SELF_VECTOR);
-
- /*
- * Wait for idle afterwards again so that we don't leave the
- * APIC in a non idle state because the normal APIC writes
- * cannot exclude us.
- */
- apic_wait_icr_idle();
-#endif
+ irq_work_queue(&__get_cpu_var(mce_irq_work));
}
DEFINE_PER_CPU(unsigned, mce_poll_count);
@@ -541,9 +514,8 @@ void machine_check_poll(enum mcp_flags flags, mce_banks_t *b)
percpu_inc(mce_poll_count);
- mce_setup(&m);
+ mce_gather_info(&m, NULL);
- m.mcgstatus = mce_rdmsrl(MSR_IA32_MCG_STATUS);
for (i = 0; i < banks; i++) {
if (!mce_banks[i].ctl || !test_bit(i, *b))
continue;
@@ -879,9 +851,9 @@ static int mce_usable_address(struct mce *m)
{
if (!(m->status & MCI_STATUS_MISCV) || !(m->status & MCI_STATUS_ADDRV))
return 0;
- if ((m->misc & 0x3f) > PAGE_SHIFT)
+ if (MCI_MISC_ADDR_LSB(m->misc) > PAGE_SHIFT)
return 0;
- if (((m->misc >> 6) & 7) != MCM_ADDR_PHYS)
+ if (MCI_MISC_ADDR_MODE(m->misc) != MCI_MISC_ADDR_PHYS)
return 0;
return 1;
}
@@ -942,9 +914,8 @@ void do_machine_check(struct pt_regs *regs, long error_code)
if (!banks)
goto out;
- mce_setup(&m);
+ mce_gather_info(&m, regs);
- m.mcgstatus = mce_rdmsrl(MSR_IA32_MCG_STATUS);
final = &__get_cpu_var(mces_seen);
*final = m;
@@ -1028,7 +999,6 @@ void do_machine_check(struct pt_regs *regs, long error_code)
if (severity == MCE_AO_SEVERITY && mce_usable_address(&m))
mce_ring_add(m.addr >> PAGE_SHIFT);
- mce_get_rip(&m, regs);
mce_log(&m);
if (severity > worst) {
@@ -1190,7 +1160,8 @@ int mce_notify_irq(void)
clear_thread_flag(TIF_MCE_NOTIFY);
if (test_and_clear_bit(0, &mce_need_notify)) {
- wake_up_interruptible(&mce_wait);
+ /* wake processes polling /dev/mcelog */
+ wake_up_interruptible(&mce_chrdev_wait);
/*
* There is no risk of missing notifications because
@@ -1363,18 +1334,23 @@ static int __cpuinit __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c)
return 0;
}
-static void __cpuinit __mcheck_cpu_ancient_init(struct cpuinfo_x86 *c)
+static int __cpuinit __mcheck_cpu_ancient_init(struct cpuinfo_x86 *c)
{
if (c->x86 != 5)
- return;
+ return 0;
+
switch (c->x86_vendor) {
case X86_VENDOR_INTEL:
intel_p5_mcheck_init(c);
+ return 1;
break;
case X86_VENDOR_CENTAUR:
winchip_mcheck_init(c);
+ return 1;
break;
}
+
+ return 0;
}
static void __mcheck_cpu_init_vendor(struct cpuinfo_x86 *c)
@@ -1428,7 +1404,8 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
if (mce_disabled)
return;
- __mcheck_cpu_ancient_init(c);
+ if (__mcheck_cpu_ancient_init(c))
+ return;
if (!mce_available(c))
return;
@@ -1444,44 +1421,45 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
__mcheck_cpu_init_vendor(c);
__mcheck_cpu_init_timer();
INIT_WORK(&__get_cpu_var(mce_work), mce_process_work);
-
+ init_irq_work(&__get_cpu_var(mce_irq_work), &mce_irq_work_cb);
}
/*
- * Character device to read and clear the MCE log.
+ * mce_chrdev: Character device /dev/mcelog to read and clear the MCE log.
*/
-static DEFINE_SPINLOCK(mce_state_lock);
-static int open_count; /* #times opened */
-static int open_exclu; /* already open exclusive? */
+static DEFINE_SPINLOCK(mce_chrdev_state_lock);
+static int mce_chrdev_open_count; /* #times opened */
+static int mce_chrdev_open_exclu; /* already open exclusive? */
-static int mce_open(struct inode *inode, struct file *file)
+static int mce_chrdev_open(struct inode *inode, struct file *file)
{
- spin_lock(&mce_state_lock);
+ spin_lock(&mce_chrdev_state_lock);
- if (open_exclu || (open_count && (file->f_flags & O_EXCL))) {
- spin_unlock(&mce_state_lock);
+ if (mce_chrdev_open_exclu ||
+ (mce_chrdev_open_count && (file->f_flags & O_EXCL))) {
+ spin_unlock(&mce_chrdev_state_lock);
return -EBUSY;
}
if (file->f_flags & O_EXCL)
- open_exclu = 1;
- open_count++;
+ mce_chrdev_open_exclu = 1;
+ mce_chrdev_open_count++;
- spin_unlock(&mce_state_lock);
+ spin_unlock(&mce_chrdev_state_lock);
return nonseekable_open(inode, file);
}
-static int mce_release(struct inode *inode, struct file *file)
+static int mce_chrdev_release(struct inode *inode, struct file *file)
{
- spin_lock(&mce_state_lock);
+ spin_lock(&mce_chrdev_state_lock);
- open_count--;
- open_exclu = 0;
+ mce_chrdev_open_count--;
+ mce_chrdev_open_exclu = 0;
- spin_unlock(&mce_state_lock);
+ spin_unlock(&mce_chrdev_state_lock);
return 0;
}
@@ -1530,8 +1508,8 @@ static int __mce_read_apei(char __user **ubuf, size_t usize)
return 0;
}
-static ssize_t mce_read(struct file *filp, char __user *ubuf, size_t usize,
- loff_t *off)
+static ssize_t mce_chrdev_read(struct file *filp, char __user *ubuf,
+ size_t usize, loff_t *off)
{
char __user *buf = ubuf;
unsigned long *cpu_tsc;
@@ -1542,7 +1520,7 @@ static ssize_t mce_read(struct file *filp, char __user *ubuf, size_t usize,
if (!cpu_tsc)
return -ENOMEM;
- mutex_lock(&mce_read_mutex);
+ mutex_lock(&mce_chrdev_read_mutex);
if (!mce_apei_read_done) {
err = __mce_read_apei(&buf, usize);
@@ -1562,19 +1540,18 @@ static ssize_t mce_read(struct file *filp, char __user *ubuf, size_t usize,
do {
for (i = prev; i < next; i++) {
unsigned long start = jiffies;
+ struct mce *m = &mcelog.entry[i];
- while (!mcelog.entry[i].finished) {
+ while (!m->finished) {
if (time_after_eq(jiffies, start + 2)) {
- memset(mcelog.entry + i, 0,
- sizeof(struct mce));
+ memset(m, 0, sizeof(*m));
goto timeout;
}
cpu_relax();
}
smp_rmb();
- err |= copy_to_user(buf, mcelog.entry + i,
- sizeof(struct mce));
- buf += sizeof(struct mce);
+ err |= copy_to_user(buf, m, sizeof(*m));
+ buf += sizeof(*m);
timeout:
;
}
@@ -1594,13 +1571,13 @@ timeout:
on_each_cpu(collect_tscs, cpu_tsc, 1);
for (i = next; i < MCE_LOG_LEN; i++) {
- if (mcelog.entry[i].finished &&
- mcelog.entry[i].tsc < cpu_tsc[mcelog.entry[i].cpu]) {
- err |= copy_to_user(buf, mcelog.entry+i,
- sizeof(struct mce));
+ struct mce *m = &mcelog.entry[i];
+
+ if (m->finished && m->tsc < cpu_tsc[m->cpu]) {
+ err |= copy_to_user(buf, m, sizeof(*m));
smp_rmb();
- buf += sizeof(struct mce);
- memset(&mcelog.entry[i], 0, sizeof(struct mce));
+ buf += sizeof(*m);
+ memset(m, 0, sizeof(*m));
}
}
@@ -1608,15 +1585,15 @@ timeout:
err = -EFAULT;
out:
- mutex_unlock(&mce_read_mutex);
+ mutex_unlock(&mce_chrdev_read_mutex);
kfree(cpu_tsc);
return err ? err : buf - ubuf;
}
-static unsigned int mce_poll(struct file *file, poll_table *wait)
+static unsigned int mce_chrdev_poll(struct file *file, poll_table *wait)
{
- poll_wait(file, &mce_wait, wait);
+ poll_wait(file, &mce_chrdev_wait, wait);
if (rcu_access_index(mcelog.next))
return POLLIN | POLLRDNORM;
if (!mce_apei_read_done && apei_check_mce())
@@ -1624,7 +1601,8 @@ static unsigned int mce_poll(struct file *file, poll_table *wait)
return 0;
}
-static long mce_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
+static long mce_chrdev_ioctl(struct file *f, unsigned int cmd,
+ unsigned long arg)
{
int __user *p = (int __user *)arg;
@@ -1652,16 +1630,16 @@ static long mce_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
/* Modified in mce-inject.c, so not static or const */
struct file_operations mce_chrdev_ops = {
- .open = mce_open,
- .release = mce_release,
- .read = mce_read,
- .poll = mce_poll,
- .unlocked_ioctl = mce_ioctl,
- .llseek = no_llseek,
+ .open = mce_chrdev_open,
+ .release = mce_chrdev_release,
+ .read = mce_chrdev_read,
+ .poll = mce_chrdev_poll,
+ .unlocked_ioctl = mce_chrdev_ioctl,
+ .llseek = no_llseek,
};
EXPORT_SYMBOL_GPL(mce_chrdev_ops);
-static struct miscdevice mce_log_device = {
+static struct miscdevice mce_chrdev_device = {
MISC_MCELOG_MINOR,
"mcelog",
&mce_chrdev_ops,
@@ -1719,7 +1697,7 @@ int __init mcheck_init(void)
}
/*
- * Sysfs support
+ * mce_syscore: PM support
*/
/*
@@ -1739,12 +1717,12 @@ static int mce_disable_error_reporting(void)
return 0;
}
-static int mce_suspend(void)
+static int mce_syscore_suspend(void)
{
return mce_disable_error_reporting();
}
-static void mce_shutdown(void)
+static void mce_syscore_shutdown(void)
{
mce_disable_error_reporting();
}
@@ -1754,18 +1732,22 @@ static void mce_shutdown(void)
* Only one CPU is active at this time, the others get re-added later using
* CPU hotplug:
*/
-static void mce_resume(void)
+static void mce_syscore_resume(void)
{
__mcheck_cpu_init_generic();
__mcheck_cpu_init_vendor(__this_cpu_ptr(&cpu_info));
}
static struct syscore_ops mce_syscore_ops = {
- .suspend = mce_suspend,
- .shutdown = mce_shutdown,
- .resume = mce_resume,
+ .suspend = mce_syscore_suspend,
+ .shutdown = mce_syscore_shutdown,
+ .resume = mce_syscore_resume,
};
+/*
+ * mce_sysdev: Sysfs support
+ */
+
static void mce_cpu_restart(void *data)
{
del_timer_sync(&__get_cpu_var(mce_timer));
@@ -1801,11 +1783,11 @@ static void mce_enable_ce(void *all)
__mcheck_cpu_init_timer();
}
-static struct sysdev_class mce_sysclass = {
+static struct sysdev_class mce_sysdev_class = {
.name = "machinecheck",
};
-DEFINE_PER_CPU(struct sys_device, mce_dev);
+DEFINE_PER_CPU(struct sys_device, mce_sysdev);
__cpuinitdata
void (*threshold_cpu_callback)(unsigned long action, unsigned int cpu);
@@ -1934,7 +1916,7 @@ static struct sysdev_ext_attribute attr_cmci_disabled = {
&mce_cmci_disabled
};
-static struct sysdev_attribute *mce_attrs[] = {
+static struct sysdev_attribute *mce_sysdev_attrs[] = {
&attr_tolerant.attr,
&attr_check_interval.attr,
&attr_trigger,
@@ -1945,66 +1927,67 @@ static struct sysdev_attribute *mce_attrs[] = {
NULL
};
-static cpumask_var_t mce_dev_initialized;
+static cpumask_var_t mce_sysdev_initialized;
/* Per cpu sysdev init. All of the cpus still share the same ctrl bank: */
-static __cpuinit int mce_create_device(unsigned int cpu)
+static __cpuinit int mce_sysdev_create(unsigned int cpu)
{
+ struct sys_device *sysdev = &per_cpu(mce_sysdev, cpu);
int err;
int i, j;
if (!mce_available(&boot_cpu_data))
return -EIO;
- memset(&per_cpu(mce_dev, cpu).kobj, 0, sizeof(struct kobject));
- per_cpu(mce_dev, cpu).id = cpu;
- per_cpu(mce_dev, cpu).cls = &mce_sysclass;
+ memset(&sysdev->kobj, 0, sizeof(struct kobject));
+ sysdev->id = cpu;
+ sysdev->cls = &mce_sysdev_class;
- err = sysdev_register(&per_cpu(mce_dev, cpu));
+ err = sysdev_register(sysdev);
if (err)
return err;
- for (i = 0; mce_attrs[i]; i++) {
- err = sysdev_create_file(&per_cpu(mce_dev, cpu), mce_attrs[i]);
+ for (i = 0; mce_sysdev_attrs[i]; i++) {
+ err = sysdev_create_file(sysdev, mce_sysdev_attrs[i]);
if (err)
goto error;
}
for (j = 0; j < banks; j++) {
- err = sysdev_create_file(&per_cpu(mce_dev, cpu),
- &mce_banks[j].attr);
+ err = sysdev_create_file(sysdev, &mce_banks[j].attr);
if (err)
goto error2;
}
- cpumask_set_cpu(cpu, mce_dev_initialized);
+ cpumask_set_cpu(cpu, mce_sysdev_initialized);
return 0;
error2:
while (--j >= 0)
- sysdev_remove_file(&per_cpu(mce_dev, cpu), &mce_banks[j].attr);
+ sysdev_remove_file(sysdev, &mce_banks[j].attr);
error:
while (--i >= 0)
- sysdev_remove_file(&per_cpu(mce_dev, cpu), mce_attrs[i]);
+ sysdev_remove_file(sysdev, mce_sysdev_attrs[i]);
- sysdev_unregister(&per_cpu(mce_dev, cpu));
+ sysdev_unregister(sysdev);
return err;
}
-static __cpuinit void mce_remove_device(unsigned int cpu)
+static __cpuinit void mce_sysdev_remove(unsigned int cpu)
{
+ struct sys_device *sysdev = &per_cpu(mce_sysdev, cpu);
int i;
- if (!cpumask_test_cpu(cpu, mce_dev_initialized))
+ if (!cpumask_test_cpu(cpu, mce_sysdev_initialized))
return;
- for (i = 0; mce_attrs[i]; i++)
- sysdev_remove_file(&per_cpu(mce_dev, cpu), mce_attrs[i]);
+ for (i = 0; mce_sysdev_attrs[i]; i++)
+ sysdev_remove_file(sysdev, mce_sysdev_attrs[i]);
for (i = 0; i < banks; i++)
- sysdev_remove_file(&per_cpu(mce_dev, cpu), &mce_banks[i].attr);
+ sysdev_remove_file(sysdev, &mce_banks[i].attr);
- sysdev_unregister(&per_cpu(mce_dev, cpu));
- cpumask_clear_cpu(cpu, mce_dev_initialized);
+ sysdev_unregister(sysdev);
+ cpumask_clear_cpu(cpu, mce_sysdev_initialized);
}
/* Make sure there are no machine checks on offlined CPUs. */
@@ -2054,7 +2037,7 @@ mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
switch (action) {
case CPU_ONLINE:
case CPU_ONLINE_FROZEN:
- mce_create_device(cpu);
+ mce_sysdev_create(cpu);
if (threshold_cpu_callback)
threshold_cpu_callback(action, cpu);
break;
@@ -2062,7 +2045,7 @@ mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
case CPU_DEAD_FROZEN:
if (threshold_cpu_callback)
threshold_cpu_callback(action, cpu);
- mce_remove_device(cpu);
+ mce_sysdev_remove(cpu);
break;
case CPU_DOWN_PREPARE:
case CPU_DOWN_PREPARE_FROZEN:
@@ -2116,27 +2099,28 @@ static __init int mcheck_init_device(void)
if (!mce_available(&boot_cpu_data))
return -EIO;
- zalloc_cpumask_var(&mce_dev_initialized, GFP_KERNEL);
+ zalloc_cpumask_var(&mce_sysdev_initialized, GFP_KERNEL);
mce_init_banks();
- err = sysdev_class_register(&mce_sysclass);
+ err = sysdev_class_register(&mce_sysdev_class);
if (err)
return err;
for_each_online_cpu(i) {
- err = mce_create_device(i);
+ err = mce_sysdev_create(i);
if (err)
return err;
}
register_syscore_ops(&mce_syscore_ops);
register_hotcpu_notifier(&mce_cpu_notifier);
- misc_register(&mce_log_device);
+
+ /* register character device /dev/mcelog */
+ misc_register(&mce_chrdev_device);
return err;
}
-
device_initcall(mcheck_init_device);
/*
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c
index bb0adad35143..f5474218cffe 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_amd.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c
@@ -548,7 +548,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
if (!b)
goto out;
- err = sysfs_create_link(&per_cpu(mce_dev, cpu).kobj,
+ err = sysfs_create_link(&per_cpu(mce_sysdev, cpu).kobj,
b->kobj, name);
if (err)
goto out;
@@ -571,7 +571,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
goto out;
}
- b->kobj = kobject_create_and_add(name, &per_cpu(mce_dev, cpu).kobj);
+ b->kobj = kobject_create_and_add(name, &per_cpu(mce_sysdev, cpu).kobj);
if (!b->kobj)
goto out_free;
@@ -591,7 +591,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
if (i == cpu)
continue;
- err = sysfs_create_link(&per_cpu(mce_dev, i).kobj,
+ err = sysfs_create_link(&per_cpu(mce_sysdev, i).kobj,
b->kobj, name);
if (err)
goto out;
@@ -669,7 +669,7 @@ static void threshold_remove_bank(unsigned int cpu, int bank)
#ifdef CONFIG_SMP
/* sibling symlink */
if (shared_bank[bank] && b->blocks->cpu != cpu) {
- sysfs_remove_link(&per_cpu(mce_dev, cpu).kobj, name);
+ sysfs_remove_link(&per_cpu(mce_sysdev, cpu).kobj, name);
per_cpu(threshold_banks, cpu)[bank] = NULL;
return;
@@ -681,7 +681,7 @@ static void threshold_remove_bank(unsigned int cpu, int bank)
if (i == cpu)
continue;
- sysfs_remove_link(&per_cpu(mce_dev, i).kobj, name);
+ sysfs_remove_link(&per_cpu(mce_sysdev, i).kobj, name);
per_cpu(threshold_banks, i)[bank] = NULL;
}
diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
index 929739a653d1..08119a37e53c 100644
--- a/arch/x86/kernel/cpu/mtrr/main.c
+++ b/arch/x86/kernel/cpu/mtrr/main.c
@@ -79,7 +79,6 @@ void set_mtrr_ops(const struct mtrr_ops *ops)
static int have_wrcomb(void)
{
struct pci_dev *dev;
- u8 rev;
dev = pci_get_class(PCI_CLASS_BRIDGE_HOST << 8, NULL);
if (dev != NULL) {
@@ -89,13 +88,11 @@ static int have_wrcomb(void)
* chipsets to be tagged
*/
if (dev->vendor == PCI_VENDOR_ID_SERVERWORKS &&
- dev->device == PCI_DEVICE_ID_SERVERWORKS_LE) {
- pci_read_config_byte(dev, PCI_CLASS_REVISION, &rev);
- if (rev <= 5) {
- pr_info("mtrr: Serverworks LE rev < 6 detected. Write-combining disabled.\n");
- pci_dev_put(dev);
- return 0;
- }
+ dev->device == PCI_DEVICE_ID_SERVERWORKS_LE &&
+ dev->revision <= 5) {
+ pr_info("mtrr: Serverworks LE rev < 6 detected. Write-combining disabled.\n");
+ pci_dev_put(dev);
+ return 0;
}
/*
* Intel 450NX errata # 23. Non ascending cacheline evictions to
@@ -137,55 +134,43 @@ static void __init init_table(void)
}
struct set_mtrr_data {
- atomic_t count;
- atomic_t gate;
unsigned long smp_base;
unsigned long smp_size;
unsigned int smp_reg;
mtrr_type smp_type;
};
-static DEFINE_PER_CPU(struct cpu_stop_work, mtrr_work);
-
/**
- * mtrr_work_handler - Synchronisation handler. Executed by "other" CPUs.
+ * mtrr_rendezvous_handler - Work done in the synchronization handler. Executed
+ * by all the CPUs.
* @info: pointer to mtrr configuration data
*
* Returns nothing.
*/
-static int mtrr_work_handler(void *info)
+static int mtrr_rendezvous_handler(void *info)
{
#ifdef CONFIG_SMP
struct set_mtrr_data *data = info;
- unsigned long flags;
-
- atomic_dec(&data->count);
- while (!atomic_read(&data->gate))
- cpu_relax();
-
- local_irq_save(flags);
-
- atomic_dec(&data->count);
- while (atomic_read(&data->gate))
- cpu_relax();
- /* The master has cleared me to execute */
+ /*
+ * We use this same function to initialize the mtrrs during boot,
+ * resume, runtime cpu online and on an explicit request to set a
+ * specific MTRR.
+ *
+ * During boot or suspend, the state of the boot cpu's mtrrs has been
+ * saved, and we want to replicate that across all the cpus that come
+ * online (either at the end of boot or resume or during a runtime cpu
+ * online). If we're doing that, @reg is set to something special and on
+ * all the cpu's we do mtrr_if->set_all() (On the logical cpu that
+ * started the boot/resume sequence, this might be a duplicate
+ * set_all()).
+ */
if (data->smp_reg != ~0U) {
mtrr_if->set(data->smp_reg, data->smp_base,
data->smp_size, data->smp_type);
- } else if (mtrr_aps_delayed_init) {
- /*
- * Initialize the MTRRs inaddition to the synchronisation.
- */
+ } else if (mtrr_aps_delayed_init || !cpu_online(smp_processor_id())) {
mtrr_if->set_all();
}
-
- atomic_dec(&data->count);
- while (!atomic_read(&data->gate))
- cpu_relax();
-
- atomic_dec(&data->count);
- local_irq_restore(flags);
#endif
return 0;
}
@@ -223,20 +208,11 @@ static inline int types_compatible(mtrr_type type1, mtrr_type type2)
* 14. Wait for buddies to catch up
* 15. Enable interrupts.
*
- * What does that mean for us? Well, first we set data.count to the number
- * of CPUs. As each CPU announces that it started the rendezvous handler by
- * decrementing the count, We reset data.count and set the data.gate flag
- * allowing all the cpu's to proceed with the work. As each cpu disables
- * interrupts, it'll decrement data.count once. We wait until it hits 0 and
- * proceed. We clear the data.gate flag and reset data.count. Meanwhile, they
- * are waiting for that flag to be cleared. Once it's cleared, each
- * CPU goes through the transition of updating MTRRs.
- * The CPU vendors may each do it differently,
- * so we call mtrr_if->set() callback and let them take care of it.
- * When they're done, they again decrement data->count and wait for data.gate
- * to be set.
- * When we finish, we wait for data.count to hit 0 and toggle the data.gate flag
- * Everyone then enables interrupts and we all continue on.
+ * What does that mean for us? Well, stop_machine() will ensure that
+ * the rendezvous handler is started on each CPU. And in lockstep they
+ * do the state transition of disabling interrupts, updating MTRR's
+ * (the CPU vendors may each do it differently, so we call mtrr_if->set()
+ * callback and let them take care of it.) and enabling interrupts.
*
* Note that the mechanism is the same for UP systems, too; all the SMP stuff
* becomes nops.
@@ -244,92 +220,26 @@ static inline int types_compatible(mtrr_type type1, mtrr_type type2)
static void
set_mtrr(unsigned int reg, unsigned long base, unsigned long size, mtrr_type type)
{
- struct set_mtrr_data data;
- unsigned long flags;
- int cpu;
-
- preempt_disable();
-
- data.smp_reg = reg;
- data.smp_base = base;
- data.smp_size = size;
- data.smp_type = type;
- atomic_set(&data.count, num_booting_cpus() - 1);
-
- /* Make sure data.count is visible before unleashing other CPUs */
- smp_wmb();
- atomic_set(&data.gate, 0);
-
- /* Start the ball rolling on other CPUs */
- for_each_online_cpu(cpu) {
- struct cpu_stop_work *work = &per_cpu(mtrr_work, cpu);
-
- if (cpu == smp_processor_id())
- continue;
-
- stop_one_cpu_nowait(cpu, mtrr_work_handler, &data, work);
- }
-
-
- while (atomic_read(&data.count))
- cpu_relax();
-
- /* Ok, reset count and toggle gate */
- atomic_set(&data.count, num_booting_cpus() - 1);
- smp_wmb();
- atomic_set(&data.gate, 1);
-
- local_irq_save(flags);
-
- while (atomic_read(&data.count))
- cpu_relax();
-
- /* Ok, reset count and toggle gate */
- atomic_set(&data.count, num_booting_cpus() - 1);
- smp_wmb();
- atomic_set(&data.gate, 0);
-
- /* Do our MTRR business */
-
- /*
- * HACK!
- *
- * We use this same function to initialize the mtrrs during boot,
- * resume, runtime cpu online and on an explicit request to set a
- * specific MTRR.
- *
- * During boot or suspend, the state of the boot cpu's mtrrs has been
- * saved, and we want to replicate that across all the cpus that come
- * online (either at the end of boot or resume or during a runtime cpu
- * online). If we're doing that, @reg is set to something special and on
- * this cpu we still do mtrr_if->set_all(). During boot/resume, this
- * is unnecessary if at this point we are still on the cpu that started
- * the boot/resume sequence. But there is no guarantee that we are still
- * on the same cpu. So we do mtrr_if->set_all() on this cpu aswell to be
- * sure that we are in sync with everyone else.
- */
- if (reg != ~0U)
- mtrr_if->set(reg, base, size, type);
- else
- mtrr_if->set_all();
+ struct set_mtrr_data data = { .smp_reg = reg,
+ .smp_base = base,
+ .smp_size = size,
+ .smp_type = type
+ };
- /* Wait for the others */
- while (atomic_read(&data.count))
- cpu_relax();
-
- atomic_set(&data.count, num_booting_cpus() - 1);
- smp_wmb();
- atomic_set(&data.gate, 1);
-
- /*
- * Wait here for everyone to have seen the gate change
- * So we're the last ones to touch 'data'
- */
- while (atomic_read(&data.count))
- cpu_relax();
+ stop_machine(mtrr_rendezvous_handler, &data, cpu_online_mask);
+}
- local_irq_restore(flags);
- preempt_enable();
+static void set_mtrr_from_inactive_cpu(unsigned int reg, unsigned long base,
+ unsigned long size, mtrr_type type)
+{
+ struct set_mtrr_data data = { .smp_reg = reg,
+ .smp_base = base,
+ .smp_size = size,
+ .smp_type = type
+ };
+
+ stop_machine_from_inactive_cpu(mtrr_rendezvous_handler, &data,
+ cpu_callout_mask);
}
/**
@@ -783,7 +693,7 @@ void mtrr_ap_init(void)
* 2. cpu hotadd time. We let mtrr_add/del_page hold cpuhotplug
* lock to prevent mtrr entry changes
*/
- set_mtrr(~0U, 0, 0, 0);
+ set_mtrr_from_inactive_cpu(~0U, 0, 0, 0);
}
/**
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 3a0338b4b179..4ee3abf20ed6 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -22,7 +22,6 @@
#include <linux/sched.h>
#include <linux/uaccess.h>
#include <linux/slab.h>
-#include <linux/highmem.h>
#include <linux/cpu.h>
#include <linux/bitops.h>
@@ -45,38 +44,27 @@ do { \
#endif
/*
- * best effort, GUP based copy_from_user() that assumes IRQ or NMI context
+ * | NHM/WSM | SNB |
+ * register -------------------------------
+ * | HT | no HT | HT | no HT |
+ *-----------------------------------------
+ * offcore | core | core | cpu | core |
+ * lbr_sel | core | core | cpu | core |
+ * ld_lat | cpu | core | cpu | core |
+ *-----------------------------------------
+ *
+ * Given that there is a small number of shared regs,
+ * we can pre-allocate their slot in the per-cpu
+ * per-core reg tables.
*/
-static unsigned long
-copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
-{
- unsigned long offset, addr = (unsigned long)from;
- unsigned long size, len = 0;
- struct page *page;
- void *map;
- int ret;
-
- do {
- ret = __get_user_pages_fast(addr, 1, 0, &page);
- if (!ret)
- break;
-
- offset = addr & (PAGE_SIZE - 1);
- size = min(PAGE_SIZE - offset, n - len);
-
- map = kmap_atomic(page);
- memcpy(to, map+offset, size);
- kunmap_atomic(map);
- put_page(page);
+enum extra_reg_type {
+ EXTRA_REG_NONE = -1, /* not used */
- len += size;
- to += size;
- addr += size;
+ EXTRA_REG_RSP_0 = 0, /* offcore_response_0 */
+ EXTRA_REG_RSP_1 = 1, /* offcore_response_1 */
- } while (len < n);
-
- return len;
-}
+ EXTRA_REG_MAX /* number of entries needed */
+};
struct event_constraint {
union {
@@ -132,11 +120,10 @@ struct cpu_hw_events {
struct perf_branch_entry lbr_entries[MAX_LBR_ENTRIES];
/*
- * Intel percore register state.
- * Coordinate shared resources between HT threads.
+ * manage shared (per-core, per-cpu) registers
+ * used on Intel NHM/WSM/SNB
*/
- int percore_used; /* Used by this CPU? */
- struct intel_percore *per_core;
+ struct intel_shared_regs *shared_regs;
/*
* AMD specific bits
@@ -187,26 +174,45 @@ struct cpu_hw_events {
for ((e) = (c); (e)->weight; (e)++)
/*
+ * Per register state.
+ */
+struct er_account {
+ raw_spinlock_t lock; /* per-core: protect structure */
+ u64 config; /* extra MSR config */
+ u64 reg; /* extra MSR number */
+ atomic_t ref; /* reference count */
+};
+
+/*
* Extra registers for specific events.
+ *
* Some events need large masks and require external MSRs.
- * Define a mapping to these extra registers.
+ * Those extra MSRs end up being shared for all events on
+ * a PMU and sometimes between PMU of sibling HT threads.
+ * In either case, the kernel needs to handle conflicting
+ * accesses to those extra, shared, regs. The data structure
+ * to manage those registers is stored in cpu_hw_event.
*/
struct extra_reg {
unsigned int event;
unsigned int msr;
u64 config_mask;
u64 valid_mask;
+ int idx; /* per_xxx->regs[] reg index */
};
-#define EVENT_EXTRA_REG(e, ms, m, vm) { \
+#define EVENT_EXTRA_REG(e, ms, m, vm, i) { \
.event = (e), \
.msr = (ms), \
.config_mask = (m), \
.valid_mask = (vm), \
+ .idx = EXTRA_REG_##i \
}
-#define INTEL_EVENT_EXTRA_REG(event, msr, vm) \
- EVENT_EXTRA_REG(event, msr, ARCH_PERFMON_EVENTSEL_EVENT, vm)
-#define EVENT_EXTRA_END EVENT_EXTRA_REG(0, 0, 0, 0)
+
+#define INTEL_EVENT_EXTRA_REG(event, msr, vm, idx) \
+ EVENT_EXTRA_REG(event, msr, ARCH_PERFMON_EVENTSEL_EVENT, vm, idx)
+
+#define EVENT_EXTRA_END EVENT_EXTRA_REG(0, 0, 0, 0, RSP_0)
union perf_capabilities {
struct {
@@ -252,7 +258,6 @@ struct x86_pmu {
void (*put_event_constraints)(struct cpu_hw_events *cpuc,
struct perf_event *event);
struct event_constraint *event_constraints;
- struct event_constraint *percore_constraints;
void (*quirks)(void);
int perfctr_second_write;
@@ -286,8 +291,12 @@ struct x86_pmu {
* Extra registers for events
*/
struct extra_reg *extra_regs;
+ unsigned int er_flags;
};
+#define ERF_NO_HT_SHARING 1
+#define ERF_HAS_RSP_1 2
+
static struct x86_pmu x86_pmu __read_mostly;
static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
@@ -393,10 +402,10 @@ static inline unsigned int x86_pmu_event_addr(int index)
*/
static int x86_pmu_extra_regs(u64 config, struct perf_event *event)
{
+ struct hw_perf_event_extra *reg;
struct extra_reg *er;
- event->hw.extra_reg = 0;
- event->hw.extra_config = 0;
+ reg = &event->hw.extra_reg;
if (!x86_pmu.extra_regs)
return 0;
@@ -406,8 +415,10 @@ static int x86_pmu_extra_regs(u64 config, struct perf_event *event)
continue;
if (event->attr.config1 & ~er->valid_mask)
return -EINVAL;
- event->hw.extra_reg = er->msr;
- event->hw.extra_config = event->attr.config1;
+
+ reg->idx = er->idx;
+ reg->config = event->attr.config1;
+ reg->reg = er->msr;
break;
}
return 0;
@@ -706,6 +717,9 @@ static int __x86_pmu_event_init(struct perf_event *event)
event->hw.last_cpu = -1;
event->hw.last_tag = ~0ULL;
+ /* mark unused */
+ event->hw.extra_reg.idx = EXTRA_REG_NONE;
+
return x86_pmu.hw_config(event);
}
@@ -747,8 +761,8 @@ static void x86_pmu_disable(struct pmu *pmu)
static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc,
u64 enable_mask)
{
- if (hwc->extra_reg)
- wrmsrl(hwc->extra_reg, hwc->extra_config);
+ if (hwc->extra_reg.reg)
+ wrmsrl(hwc->extra_reg.reg, hwc->extra_reg.config);
wrmsrl(hwc->config_base, hwc->config | enable_mask);
}
@@ -1332,7 +1346,7 @@ static int x86_pmu_handle_irq(struct pt_regs *regs)
if (!x86_perf_event_set_period(event))
continue;
- if (perf_event_overflow(event, 1, &data, regs))
+ if (perf_event_overflow(event, &data, regs))
x86_pmu_stop(event, 0);
}
@@ -1637,6 +1651,40 @@ static int x86_pmu_commit_txn(struct pmu *pmu)
perf_pmu_enable(pmu);
return 0;
}
+/*
+ * a fake_cpuc is used to validate event groups. Due to
+ * the extra reg logic, we need to also allocate a fake
+ * per_core and per_cpu structure. Otherwise, group events
+ * using extra reg may conflict without the kernel being
+ * able to catch this when the last event gets added to
+ * the group.
+ */
+static void free_fake_cpuc(struct cpu_hw_events *cpuc)
+{
+ kfree(cpuc->shared_regs);
+ kfree(cpuc);
+}
+
+static struct cpu_hw_events *allocate_fake_cpuc(void)
+{
+ struct cpu_hw_events *cpuc;
+ int cpu = raw_smp_processor_id();
+
+ cpuc = kzalloc(sizeof(*cpuc), GFP_KERNEL);
+ if (!cpuc)
+ return ERR_PTR(-ENOMEM);
+
+ /* only needed, if we have extra_regs */
+ if (x86_pmu.extra_regs) {
+ cpuc->shared_regs = allocate_shared_regs(cpu);
+ if (!cpuc->shared_regs)
+ goto error;
+ }
+ return cpuc;
+error:
+ free_fake_cpuc(cpuc);
+ return ERR_PTR(-ENOMEM);
+}
/*
* validate that we can schedule this event
@@ -1647,9 +1695,9 @@ static int validate_event(struct perf_event *event)
struct event_constraint *c;
int ret = 0;
- fake_cpuc = kmalloc(sizeof(*fake_cpuc), GFP_KERNEL | __GFP_ZERO);
- if (!fake_cpuc)
- return -ENOMEM;
+ fake_cpuc = allocate_fake_cpuc();
+ if (IS_ERR(fake_cpuc))
+ return PTR_ERR(fake_cpuc);
c = x86_pmu.get_event_constraints(fake_cpuc, event);
@@ -1659,7 +1707,7 @@ static int validate_event(struct perf_event *event)
if (x86_pmu.put_event_constraints)
x86_pmu.put_event_constraints(fake_cpuc, event);
- kfree(fake_cpuc);
+ free_fake_cpuc(fake_cpuc);
return ret;
}
@@ -1679,36 +1727,32 @@ static int validate_group(struct perf_event *event)
{
struct perf_event *leader = event->group_leader;
struct cpu_hw_events *fake_cpuc;
- int ret, n;
-
- ret = -ENOMEM;
- fake_cpuc = kmalloc(sizeof(*fake_cpuc), GFP_KERNEL | __GFP_ZERO);
- if (!fake_cpuc)
- goto out;
+ int ret = -ENOSPC, n;
+ fake_cpuc = allocate_fake_cpuc();
+ if (IS_ERR(fake_cpuc))
+ return PTR_ERR(fake_cpuc);
/*
* the event is not yet connected with its
* siblings therefore we must first collect
* existing siblings, then add the new event
* before we can simulate the scheduling
*/
- ret = -ENOSPC;
n = collect_events(fake_cpuc, leader, true);
if (n < 0)
- goto out_free;
+ goto out;
fake_cpuc->n_events = n;
n = collect_events(fake_cpuc, event, false);
if (n < 0)
- goto out_free;
+ goto out;
fake_cpuc->n_events = n;
ret = x86_pmu.schedule_events(fake_cpuc, n, NULL);
-out_free:
- kfree(fake_cpuc);
out:
+ free_fake_cpuc(fake_cpuc);
return ret;
}
diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c
index fe29c1d2219e..941caa2e449b 100644
--- a/arch/x86/kernel/cpu/perf_event_amd.c
+++ b/arch/x86/kernel/cpu/perf_event_amd.c
@@ -89,6 +89,20 @@ static __initconst const u64 amd_hw_cache_event_ids
[ C(RESULT_MISS) ] = -1,
},
},
+ [ C(NODE) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = 0xb8e9, /* CPU Request to Memory, l+r */
+ [ C(RESULT_MISS) ] = 0x98e9, /* CPU Request to Memory, r */
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ },
};
/*
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index 41178c826c48..45fbb8f7f549 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -1,25 +1,15 @@
#ifdef CONFIG_CPU_SUP_INTEL
-#define MAX_EXTRA_REGS 2
-
-/*
- * Per register state.
- */
-struct er_account {
- int ref; /* reference count */
- unsigned int extra_reg; /* extra MSR number */
- u64 extra_config; /* extra MSR config */
-};
-
/*
- * Per core state
- * This used to coordinate shared registers for HT threads.
+ * Per core/cpu state
+ *
+ * Used to coordinate shared registers between HT threads or
+ * among events on a single PMU.
*/
-struct intel_percore {
- raw_spinlock_t lock; /* protect structure */
- struct er_account regs[MAX_EXTRA_REGS];
- int refcnt; /* number of threads */
- unsigned core_id;
+struct intel_shared_regs {
+ struct er_account regs[EXTRA_REG_MAX];
+ int refcnt; /* per-core: #HT threads */
+ unsigned core_id; /* per-core: core id */
};
/*
@@ -88,16 +78,10 @@ static struct event_constraint intel_nehalem_event_constraints[] __read_mostly =
static struct extra_reg intel_nehalem_extra_regs[] __read_mostly =
{
- INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0xffff),
+ INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0),
EVENT_EXTRA_END
};
-static struct event_constraint intel_nehalem_percore_constraints[] __read_mostly =
-{
- INTEL_EVENT_CONSTRAINT(0xb7, 0),
- EVENT_CONSTRAINT_END
-};
-
static struct event_constraint intel_westmere_event_constraints[] __read_mostly =
{
FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
@@ -116,8 +100,6 @@ static struct event_constraint intel_snb_event_constraints[] __read_mostly =
FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
/* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */
INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.PENDING */
- INTEL_EVENT_CONSTRAINT(0xb7, 0x1), /* OFF_CORE_RESPONSE_0 */
- INTEL_EVENT_CONSTRAINT(0xbb, 0x8), /* OFF_CORE_RESPONSE_1 */
INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
EVENT_CONSTRAINT_END
@@ -125,15 +107,13 @@ static struct event_constraint intel_snb_event_constraints[] __read_mostly =
static struct extra_reg intel_westmere_extra_regs[] __read_mostly =
{
- INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0xffff),
- INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0xffff),
+ INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0),
+ INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0xffff, RSP_1),
EVENT_EXTRA_END
};
-static struct event_constraint intel_westmere_percore_constraints[] __read_mostly =
+static struct event_constraint intel_v1_event_constraints[] __read_mostly =
{
- INTEL_EVENT_CONSTRAINT(0xb7, 0),
- INTEL_EVENT_CONSTRAINT(0xbb, 0),
EVENT_CONSTRAINT_END
};
@@ -145,6 +125,12 @@ static struct event_constraint intel_gen_event_constraints[] __read_mostly =
EVENT_CONSTRAINT_END
};
+static struct extra_reg intel_snb_extra_regs[] __read_mostly = {
+ INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3fffffffffull, RSP_0),
+ INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3fffffffffull, RSP_1),
+ EVENT_EXTRA_END
+};
+
static u64 intel_pmu_event_map(int hw_event)
{
return intel_perfmon_event_map[hw_event];
@@ -245,6 +231,21 @@ static __initconst const u64 snb_hw_cache_event_ids
[ C(RESULT_MISS) ] = -1,
},
},
+ [ C(NODE) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ },
+
};
static __initconst const u64 westmere_hw_cache_event_ids
@@ -346,6 +347,20 @@ static __initconst const u64 westmere_hw_cache_event_ids
[ C(RESULT_MISS) ] = -1,
},
},
+ [ C(NODE) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = 0x01b7,
+ [ C(RESULT_MISS) ] = 0x01b7,
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = 0x01b7,
+ [ C(RESULT_MISS) ] = 0x01b7,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = 0x01b7,
+ [ C(RESULT_MISS) ] = 0x01b7,
+ },
+ },
};
/*
@@ -398,7 +413,21 @@ static __initconst const u64 nehalem_hw_cache_extra_regs
[ C(RESULT_ACCESS) ] = NHM_DMND_PREFETCH|NHM_L3_ACCESS,
[ C(RESULT_MISS) ] = NHM_DMND_PREFETCH|NHM_L3_MISS,
},
- }
+ },
+ [ C(NODE) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = NHM_DMND_READ|NHM_ALL_DRAM,
+ [ C(RESULT_MISS) ] = NHM_DMND_READ|NHM_REMOTE_DRAM,
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = NHM_DMND_WRITE|NHM_ALL_DRAM,
+ [ C(RESULT_MISS) ] = NHM_DMND_WRITE|NHM_REMOTE_DRAM,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = NHM_DMND_PREFETCH|NHM_ALL_DRAM,
+ [ C(RESULT_MISS) ] = NHM_DMND_PREFETCH|NHM_REMOTE_DRAM,
+ },
+ },
};
static __initconst const u64 nehalem_hw_cache_event_ids
@@ -500,6 +529,20 @@ static __initconst const u64 nehalem_hw_cache_event_ids
[ C(RESULT_MISS) ] = -1,
},
},
+ [ C(NODE) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = 0x01b7,
+ [ C(RESULT_MISS) ] = 0x01b7,
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = 0x01b7,
+ [ C(RESULT_MISS) ] = 0x01b7,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = 0x01b7,
+ [ C(RESULT_MISS) ] = 0x01b7,
+ },
+ },
};
static __initconst const u64 core2_hw_cache_event_ids
@@ -1003,7 +1046,7 @@ again:
data.period = event->hw.last_period;
- if (perf_event_overflow(event, 1, &data, regs))
+ if (perf_event_overflow(event, &data, regs))
x86_pmu_stop(event, 0);
}
@@ -1037,65 +1080,121 @@ intel_bts_constraints(struct perf_event *event)
return NULL;
}
+static bool intel_try_alt_er(struct perf_event *event, int orig_idx)
+{
+ if (!(x86_pmu.er_flags & ERF_HAS_RSP_1))
+ return false;
+
+ if (event->hw.extra_reg.idx == EXTRA_REG_RSP_0) {
+ event->hw.config &= ~INTEL_ARCH_EVENT_MASK;
+ event->hw.config |= 0x01bb;
+ event->hw.extra_reg.idx = EXTRA_REG_RSP_1;
+ event->hw.extra_reg.reg = MSR_OFFCORE_RSP_1;
+ } else if (event->hw.extra_reg.idx == EXTRA_REG_RSP_1) {
+ event->hw.config &= ~INTEL_ARCH_EVENT_MASK;
+ event->hw.config |= 0x01b7;
+ event->hw.extra_reg.idx = EXTRA_REG_RSP_0;
+ event->hw.extra_reg.reg = MSR_OFFCORE_RSP_0;
+ }
+
+ if (event->hw.extra_reg.idx == orig_idx)
+ return false;
+
+ return true;
+}
+
+/*
+ * manage allocation of shared extra msr for certain events
+ *
+ * sharing can be:
+ * per-cpu: to be shared between the various events on a single PMU
+ * per-core: per-cpu + shared by HT threads
+ */
static struct event_constraint *
-intel_percore_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
+__intel_shared_reg_get_constraints(struct cpu_hw_events *cpuc,
+ struct perf_event *event)
{
- struct hw_perf_event *hwc = &event->hw;
- unsigned int e = hwc->config & ARCH_PERFMON_EVENTSEL_EVENT;
- struct event_constraint *c;
- struct intel_percore *pc;
+ struct event_constraint *c = &emptyconstraint;
+ struct hw_perf_event_extra *reg = &event->hw.extra_reg;
struct er_account *era;
- int i;
- int free_slot;
- int found;
+ unsigned long flags;
+ int orig_idx = reg->idx;
- if (!x86_pmu.percore_constraints || hwc->extra_alloc)
- return NULL;
+ /* already allocated shared msr */
+ if (reg->alloc)
+ return &unconstrained;
- for (c = x86_pmu.percore_constraints; c->cmask; c++) {
- if (e != c->code)
- continue;
+again:
+ era = &cpuc->shared_regs->regs[reg->idx];
+ /*
+ * we use spin_lock_irqsave() to avoid lockdep issues when
+ * passing a fake cpuc
+ */
+ raw_spin_lock_irqsave(&era->lock, flags);
+
+ if (!atomic_read(&era->ref) || era->config == reg->config) {
+
+ /* lock in msr value */
+ era->config = reg->config;
+ era->reg = reg->reg;
+
+ /* one more user */
+ atomic_inc(&era->ref);
+
+ /* no need to reallocate during incremental event scheduling */
+ reg->alloc = 1;
/*
- * Allocate resource per core.
+ * All events using extra_reg are unconstrained.
+ * Avoids calling x86_get_event_constraints()
+ *
+ * Must revisit if extra_reg controlling events
+ * ever have constraints. Worst case we go through
+ * the regular event constraint table.
*/
- pc = cpuc->per_core;
- if (!pc)
- break;
- c = &emptyconstraint;
- raw_spin_lock(&pc->lock);
- free_slot = -1;
- found = 0;
- for (i = 0; i < MAX_EXTRA_REGS; i++) {
- era = &pc->regs[i];
- if (era->ref > 0 && hwc->extra_reg == era->extra_reg) {
- /* Allow sharing same config */
- if (hwc->extra_config == era->extra_config) {
- era->ref++;
- cpuc->percore_used = 1;
- hwc->extra_alloc = 1;
- c = NULL;
- }
- /* else conflict */
- found = 1;
- break;
- } else if (era->ref == 0 && free_slot == -1)
- free_slot = i;
- }
- if (!found && free_slot != -1) {
- era = &pc->regs[free_slot];
- era->ref = 1;
- era->extra_reg = hwc->extra_reg;
- era->extra_config = hwc->extra_config;
- cpuc->percore_used = 1;
- hwc->extra_alloc = 1;
- c = NULL;
- }
- raw_spin_unlock(&pc->lock);
- return c;
+ c = &unconstrained;
+ } else if (intel_try_alt_er(event, orig_idx)) {
+ raw_spin_unlock(&era->lock);
+ goto again;
}
+ raw_spin_unlock_irqrestore(&era->lock, flags);
- return NULL;
+ return c;
+}
+
+static void
+__intel_shared_reg_put_constraints(struct cpu_hw_events *cpuc,
+ struct hw_perf_event_extra *reg)
+{
+ struct er_account *era;
+
+ /*
+ * only put constraint if extra reg was actually
+ * allocated. Also takes care of event which do
+ * not use an extra shared reg
+ */
+ if (!reg->alloc)
+ return;
+
+ era = &cpuc->shared_regs->regs[reg->idx];
+
+ /* one fewer user */
+ atomic_dec(&era->ref);
+
+ /* allocate again next time */
+ reg->alloc = 0;
+}
+
+static struct event_constraint *
+intel_shared_regs_constraints(struct cpu_hw_events *cpuc,
+ struct perf_event *event)
+{
+ struct event_constraint *c = NULL;
+
+ if (event->hw.extra_reg.idx != EXTRA_REG_NONE)
+ c = __intel_shared_reg_get_constraints(cpuc, event);
+
+ return c;
}
static struct event_constraint *
@@ -1111,49 +1210,28 @@ intel_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event
if (c)
return c;
- c = intel_percore_constraints(cpuc, event);
+ c = intel_shared_regs_constraints(cpuc, event);
if (c)
return c;
return x86_get_event_constraints(cpuc, event);
}
-static void intel_put_event_constraints(struct cpu_hw_events *cpuc,
+static void
+intel_put_shared_regs_event_constraints(struct cpu_hw_events *cpuc,
struct perf_event *event)
{
- struct extra_reg *er;
- struct intel_percore *pc;
- struct er_account *era;
- struct hw_perf_event *hwc = &event->hw;
- int i, allref;
+ struct hw_perf_event_extra *reg;
- if (!cpuc->percore_used)
- return;
-
- for (er = x86_pmu.extra_regs; er->msr; er++) {
- if (er->event != (hwc->config & er->config_mask))
- continue;
+ reg = &event->hw.extra_reg;
+ if (reg->idx != EXTRA_REG_NONE)
+ __intel_shared_reg_put_constraints(cpuc, reg);
+}
- pc = cpuc->per_core;
- raw_spin_lock(&pc->lock);
- for (i = 0; i < MAX_EXTRA_REGS; i++) {
- era = &pc->regs[i];
- if (era->ref > 0 &&
- era->extra_config == hwc->extra_config &&
- era->extra_reg == er->msr) {
- era->ref--;
- hwc->extra_alloc = 0;
- break;
- }
- }
- allref = 0;
- for (i = 0; i < MAX_EXTRA_REGS; i++)
- allref += pc->regs[i].ref;
- if (allref == 0)
- cpuc->percore_used = 0;
- raw_spin_unlock(&pc->lock);
- break;
- }
+static void intel_put_event_constraints(struct cpu_hw_events *cpuc,
+ struct perf_event *event)
+{
+ intel_put_shared_regs_event_constraints(cpuc, event);
}
static int intel_pmu_hw_config(struct perf_event *event)
@@ -1231,20 +1309,36 @@ static __initconst const struct x86_pmu core_pmu = {
.event_constraints = intel_core_event_constraints,
};
+static struct intel_shared_regs *allocate_shared_regs(int cpu)
+{
+ struct intel_shared_regs *regs;
+ int i;
+
+ regs = kzalloc_node(sizeof(struct intel_shared_regs),
+ GFP_KERNEL, cpu_to_node(cpu));
+ if (regs) {
+ /*
+ * initialize the locks to keep lockdep happy
+ */
+ for (i = 0; i < EXTRA_REG_MAX; i++)
+ raw_spin_lock_init(&regs->regs[i].lock);
+
+ regs->core_id = -1;
+ }
+ return regs;
+}
+
static int intel_pmu_cpu_prepare(int cpu)
{
struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
- if (!cpu_has_ht_siblings())
+ if (!x86_pmu.extra_regs)
return NOTIFY_OK;
- cpuc->per_core = kzalloc_node(sizeof(struct intel_percore),
- GFP_KERNEL, cpu_to_node(cpu));
- if (!cpuc->per_core)
+ cpuc->shared_regs = allocate_shared_regs(cpu);
+ if (!cpuc->shared_regs)
return NOTIFY_BAD;
- raw_spin_lock_init(&cpuc->per_core->lock);
- cpuc->per_core->core_id = -1;
return NOTIFY_OK;
}
@@ -1260,32 +1354,34 @@ static void intel_pmu_cpu_starting(int cpu)
*/
intel_pmu_lbr_reset();
- if (!cpu_has_ht_siblings())
+ if (!cpuc->shared_regs || (x86_pmu.er_flags & ERF_NO_HT_SHARING))
return;
for_each_cpu(i, topology_thread_cpumask(cpu)) {
- struct intel_percore *pc = per_cpu(cpu_hw_events, i).per_core;
+ struct intel_shared_regs *pc;
+ pc = per_cpu(cpu_hw_events, i).shared_regs;
if (pc && pc->core_id == core_id) {
- kfree(cpuc->per_core);
- cpuc->per_core = pc;
+ kfree(cpuc->shared_regs);
+ cpuc->shared_regs = pc;
break;
}
}
- cpuc->per_core->core_id = core_id;
- cpuc->per_core->refcnt++;
+ cpuc->shared_regs->core_id = core_id;
+ cpuc->shared_regs->refcnt++;
}
static void intel_pmu_cpu_dying(int cpu)
{
struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
- struct intel_percore *pc = cpuc->per_core;
+ struct intel_shared_regs *pc;
+ pc = cpuc->shared_regs;
if (pc) {
if (pc->core_id == -1 || --pc->refcnt == 0)
kfree(pc);
- cpuc->per_core = NULL;
+ cpuc->shared_regs = NULL;
}
fini_debug_store_on_cpu(cpu);
@@ -1436,7 +1532,6 @@ static __init int intel_pmu_init(void)
x86_pmu.event_constraints = intel_nehalem_event_constraints;
x86_pmu.pebs_constraints = intel_nehalem_pebs_event_constraints;
- x86_pmu.percore_constraints = intel_nehalem_percore_constraints;
x86_pmu.enable_all = intel_pmu_nhm_enable_all;
x86_pmu.extra_regs = intel_nehalem_extra_regs;
@@ -1481,10 +1576,10 @@ static __init int intel_pmu_init(void)
intel_pmu_lbr_init_nhm();
x86_pmu.event_constraints = intel_westmere_event_constraints;
- x86_pmu.percore_constraints = intel_westmere_percore_constraints;
x86_pmu.enable_all = intel_pmu_nhm_enable_all;
x86_pmu.pebs_constraints = intel_westmere_pebs_event_constraints;
x86_pmu.extra_regs = intel_westmere_extra_regs;
+ x86_pmu.er_flags |= ERF_HAS_RSP_1;
/* UOPS_ISSUED.STALLED_CYCLES */
intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x180010e;
@@ -1502,6 +1597,10 @@ static __init int intel_pmu_init(void)
x86_pmu.event_constraints = intel_snb_event_constraints;
x86_pmu.pebs_constraints = intel_snb_pebs_events;
+ x86_pmu.extra_regs = intel_snb_extra_regs;
+ /* all extra regs are per-cpu when HT is on */
+ x86_pmu.er_flags |= ERF_HAS_RSP_1;
+ x86_pmu.er_flags |= ERF_NO_HT_SHARING;
/* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */
intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x180010e;
@@ -1512,11 +1611,19 @@ static __init int intel_pmu_init(void)
break;
default:
- /*
- * default constraints for v2 and up
- */
- x86_pmu.event_constraints = intel_gen_event_constraints;
- pr_cont("generic architected perfmon, ");
+ switch (x86_pmu.version) {
+ case 1:
+ x86_pmu.event_constraints = intel_v1_event_constraints;
+ pr_cont("generic architected perfmon v1, ");
+ break;
+ default:
+ /*
+ * default constraints for v2 and up
+ */
+ x86_pmu.event_constraints = intel_gen_event_constraints;
+ pr_cont("generic architected perfmon, ");
+ break;
+ }
}
return 0;
}
@@ -1528,4 +1635,8 @@ static int intel_pmu_init(void)
return 0;
}
+static struct intel_shared_regs *allocate_shared_regs(int cpu)
+{
+ return NULL;
+}
#endif /* CONFIG_CPU_SUP_INTEL */
diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c
index bab491b8ee25..1b1ef3addcfd 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_ds.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c
@@ -340,7 +340,7 @@ static int intel_pmu_drain_bts_buffer(void)
*/
perf_prepare_sample(&header, &data, event, &regs);
- if (perf_output_begin(&handle, event, header.size * (top - at), 1, 1))
+ if (perf_output_begin(&handle, event, header.size * (top - at)))
return 1;
for (; at < top; at++) {
@@ -616,7 +616,7 @@ static void __intel_pmu_pebs_event(struct perf_event *event,
else
regs.flags &= ~PERF_EFLAGS_EXACT;
- if (perf_event_overflow(event, 1, &data, &regs))
+ if (perf_event_overflow(event, &data, &regs))
x86_pmu_stop(event, 0);
}
diff --git a/arch/x86/kernel/cpu/perf_event_p4.c b/arch/x86/kernel/cpu/perf_event_p4.c
index ead584fb6a7d..7809d2bcb209 100644
--- a/arch/x86/kernel/cpu/perf_event_p4.c
+++ b/arch/x86/kernel/cpu/perf_event_p4.c
@@ -554,13 +554,102 @@ static __initconst const u64 p4_hw_cache_event_ids
[ C(RESULT_MISS) ] = -1,
},
},
+ [ C(NODE) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ },
};
+/*
+ * Because of Netburst being quite restricted in how many
+ * identical events may run simultaneously, we introduce event aliases,
+ * ie the different events which have the same functionality but
+ * utilize non-intersected resources (ESCR/CCCR/counter registers).
+ *
+ * This allow us to relax restrictions a bit and run two or more
+ * identical events together.
+ *
+ * Never set any custom internal bits such as P4_CONFIG_HT,
+ * P4_CONFIG_ALIASABLE or bits for P4_PEBS_METRIC, they are
+ * either up to date automatically or not applicable at all.
+ */
+struct p4_event_alias {
+ u64 original;
+ u64 alternative;
+} p4_event_aliases[] = {
+ {
+ /*
+ * Non-halted cycles can be substituted with non-sleeping cycles (see
+ * Intel SDM Vol3b for details). We need this alias to be able
+ * to run nmi-watchdog and 'perf top' (or any other user space tool
+ * which is interested in running PERF_COUNT_HW_CPU_CYCLES)
+ * simultaneously.
+ */
+ .original =
+ p4_config_pack_escr(P4_ESCR_EVENT(P4_EVENT_GLOBAL_POWER_EVENTS) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_GLOBAL_POWER_EVENTS, RUNNING)),
+ .alternative =
+ p4_config_pack_escr(P4_ESCR_EVENT(P4_EVENT_EXECUTION_EVENT) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, NBOGUS0)|
+ P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, NBOGUS1)|
+ P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, NBOGUS2)|
+ P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, NBOGUS3)|
+ P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, BOGUS0) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, BOGUS1) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, BOGUS2) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, BOGUS3))|
+ p4_config_pack_cccr(P4_CCCR_THRESHOLD(15) | P4_CCCR_COMPLEMENT |
+ P4_CCCR_COMPARE),
+ },
+};
+
+static u64 p4_get_alias_event(u64 config)
+{
+ u64 config_match;
+ int i;
+
+ /*
+ * Only event with special mark is allowed,
+ * we're to be sure it didn't come as malformed
+ * RAW event.
+ */
+ if (!(config & P4_CONFIG_ALIASABLE))
+ return 0;
+
+ config_match = config & P4_CONFIG_EVENT_ALIAS_MASK;
+
+ for (i = 0; i < ARRAY_SIZE(p4_event_aliases); i++) {
+ if (config_match == p4_event_aliases[i].original) {
+ config_match = p4_event_aliases[i].alternative;
+ break;
+ } else if (config_match == p4_event_aliases[i].alternative) {
+ config_match = p4_event_aliases[i].original;
+ break;
+ }
+ }
+
+ if (i >= ARRAY_SIZE(p4_event_aliases))
+ return 0;
+
+ return config_match | (config & P4_CONFIG_EVENT_ALIAS_IMMUTABLE_BITS);
+}
+
static u64 p4_general_events[PERF_COUNT_HW_MAX] = {
/* non-halted CPU clocks */
[PERF_COUNT_HW_CPU_CYCLES] =
p4_config_pack_escr(P4_ESCR_EVENT(P4_EVENT_GLOBAL_POWER_EVENTS) |
- P4_ESCR_EMASK_BIT(P4_EVENT_GLOBAL_POWER_EVENTS, RUNNING)),
+ P4_ESCR_EMASK_BIT(P4_EVENT_GLOBAL_POWER_EVENTS, RUNNING)) |
+ P4_CONFIG_ALIASABLE,
/*
* retired instructions
@@ -945,7 +1034,7 @@ static int p4_pmu_handle_irq(struct pt_regs *regs)
if (!x86_perf_event_set_period(event))
continue;
- if (perf_event_overflow(event, 1, &data, regs))
+ if (perf_event_overflow(event, &data, regs))
x86_pmu_stop(event, 0);
}
@@ -1120,6 +1209,8 @@ static int p4_pmu_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign
struct p4_event_bind *bind;
unsigned int i, thread, num;
int cntr_idx, escr_idx;
+ u64 config_alias;
+ int pass;
bitmap_zero(used_mask, X86_PMC_IDX_MAX);
bitmap_zero(escr_mask, P4_ESCR_MSR_TABLE_SIZE);
@@ -1128,6 +1219,17 @@ static int p4_pmu_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign
hwc = &cpuc->event_list[i]->hw;
thread = p4_ht_thread(cpu);
+ pass = 0;
+
+again:
+ /*
+ * It's possible to hit a circular lock
+ * between original and alternative events
+ * if both are scheduled already.
+ */
+ if (pass > 2)
+ goto done;
+
bind = p4_config_get_bind(hwc->config);
escr_idx = p4_get_escr_idx(bind->escr_msr[thread]);
if (unlikely(escr_idx == -1))
@@ -1141,8 +1243,17 @@ static int p4_pmu_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign
}
cntr_idx = p4_next_cntr(thread, used_mask, bind);
- if (cntr_idx == -1 || test_bit(escr_idx, escr_mask))
- goto done;
+ if (cntr_idx == -1 || test_bit(escr_idx, escr_mask)) {
+ /*
+ * Check whether an event alias is still available.
+ */
+ config_alias = p4_get_alias_event(hwc->config);
+ if (!config_alias)
+ goto done;
+ hwc->config = config_alias;
+ pass++;
+ goto again;
+ }
p4_pmu_swap_config_ts(hwc, cpu);
if (assign)
diff --git a/arch/x86/kernel/devicetree.c b/arch/x86/kernel/devicetree.c
index 9aeb78a23de4..a621f3427685 100644
--- a/arch/x86/kernel/devicetree.c
+++ b/arch/x86/kernel/devicetree.c
@@ -134,6 +134,24 @@ static int __init add_bus_probe(void)
module_init(add_bus_probe);
#ifdef CONFIG_PCI
+struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus)
+{
+ struct device_node *np;
+
+ for_each_node_by_type(np, "pci") {
+ const void *prop;
+ unsigned int bus_min;
+
+ prop = of_get_property(np, "bus-range", NULL);
+ if (!prop)
+ continue;
+ bus_min = be32_to_cpup(prop);
+ if (bus->number == bus_min)
+ return np;
+ }
+ return NULL;
+}
+
static int x86_of_pci_irq_enable(struct pci_dev *dev)
{
struct of_irq oirq;
@@ -165,50 +183,8 @@ static void x86_of_pci_irq_disable(struct pci_dev *dev)
void __cpuinit x86_of_pci_init(void)
{
- struct device_node *np;
-
pcibios_enable_irq = x86_of_pci_irq_enable;
pcibios_disable_irq = x86_of_pci_irq_disable;
-
- for_each_node_by_type(np, "pci") {
- const void *prop;
- struct pci_bus *bus;
- unsigned int bus_min;
- struct device_node *child;
-
- prop = of_get_property(np, "bus-range", NULL);
- if (!prop)
- continue;
- bus_min = be32_to_cpup(prop);
-
- bus = pci_find_bus(0, bus_min);
- if (!bus) {
- printk(KERN_ERR "Can't find a node for bus %s.\n",
- np->full_name);
- continue;
- }
-
- if (bus->self)
- bus->self->dev.of_node = np;
- else
- bus->dev.of_node = np;
-
- for_each_child_of_node(np, child) {
- struct pci_dev *dev;
- u32 devfn;
-
- prop = of_get_property(child, "reg", NULL);
- if (!prop)
- continue;
-
- devfn = (be32_to_cpup(prop) >> 8) & 0xff;
- dev = pci_get_slot(bus, devfn);
- if (!dev)
- continue;
- dev->dev.of_node = child;
- pci_dev_put(dev);
- }
- }
}
#endif
diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
index e71c98d3c0d2..19853ad8afc5 100644
--- a/arch/x86/kernel/dumpstack_64.c
+++ b/arch/x86/kernel/dumpstack_64.c
@@ -105,34 +105,6 @@ in_irq_stack(unsigned long *stack, unsigned long *irq_stack,
}
/*
- * We are returning from the irq stack and go to the previous one.
- * If the previous stack is also in the irq stack, then bp in the first
- * frame of the irq stack points to the previous, interrupted one.
- * Otherwise we have another level of indirection: We first save
- * the bp of the previous stack, then we switch the stack to the irq one
- * and save a new bp that links to the previous one.
- * (See save_args())
- */
-static inline unsigned long
-fixup_bp_irq_link(unsigned long bp, unsigned long *stack,
- unsigned long *irq_stack, unsigned long *irq_stack_end)
-{
-#ifdef CONFIG_FRAME_POINTER
- struct stack_frame *frame = (struct stack_frame *)bp;
- unsigned long next;
-
- if (!in_irq_stack(stack, irq_stack, irq_stack_end)) {
- if (!probe_kernel_address(&frame->next_frame, next))
- return next;
- else
- WARN_ONCE(1, "Perf: bad frame pointer = %p in "
- "callchain\n", &frame->next_frame);
- }
-#endif
- return bp;
-}
-
-/*
* x86-64 can have up to three kernel stacks:
* process stack
* interrupt stack
@@ -155,9 +127,12 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
task = current;
if (!stack) {
- stack = &dummy;
- if (task && task != current)
+ if (regs)
+ stack = (unsigned long *)regs->sp;
+ else if (task && task != current)
stack = (unsigned long *)task->thread.sp;
+ else
+ stack = &dummy;
}
if (!bp)
@@ -205,8 +180,6 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
* pointer (index -1 to end) in the IRQ stack:
*/
stack = (unsigned long *) (irq_stack_end[-1]);
- bp = fixup_bp_irq_link(bp, stack, irq_stack,
- irq_stack_end);
irq_stack_end = NULL;
ops->stack(data, "EOI");
continue;
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index 8a445a0c989e..e13329d800c8 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -9,6 +9,8 @@
/*
* entry.S contains the system-call and fault low-level handling routines.
*
+ * Some of this is documented in Documentation/x86/entry_64.txt
+ *
* NOTE: This code handles signal-recognition, which happens every time
* after an interrupt and after each system call.
*
@@ -297,27 +299,26 @@ ENDPROC(native_usergs_sysret64)
.endm
/* save partial stack frame */
- .pushsection .kprobes.text, "ax"
-ENTRY(save_args)
- XCPT_FRAME
+ .macro SAVE_ARGS_IRQ
cld
- /*
- * start from rbp in pt_regs and jump over
- * return address.
- */
- movq_cfi rdi, RDI+8-RBP
- movq_cfi rsi, RSI+8-RBP
- movq_cfi rdx, RDX+8-RBP
- movq_cfi rcx, RCX+8-RBP
- movq_cfi rax, RAX+8-RBP
- movq_cfi r8, R8+8-RBP
- movq_cfi r9, R9+8-RBP
- movq_cfi r10, R10+8-RBP
- movq_cfi r11, R11+8-RBP
-
- leaq -RBP+8(%rsp),%rdi /* arg1 for handler */
- movq_cfi rbp, 8 /* push %rbp */
- leaq 8(%rsp), %rbp /* mov %rsp, %ebp */
+ /* start from rbp in pt_regs and jump over */
+ movq_cfi rdi, RDI-RBP
+ movq_cfi rsi, RSI-RBP
+ movq_cfi rdx, RDX-RBP
+ movq_cfi rcx, RCX-RBP
+ movq_cfi rax, RAX-RBP
+ movq_cfi r8, R8-RBP
+ movq_cfi r9, R9-RBP
+ movq_cfi r10, R10-RBP
+ movq_cfi r11, R11-RBP
+
+ /* Save rbp so that we can unwind from get_irq_regs() */
+ movq_cfi rbp, 0
+
+ /* Save previous stack value */
+ movq %rsp, %rsi
+
+ leaq -RBP(%rsp),%rdi /* arg1 for handler */
testl $3, CS(%rdi)
je 1f
SWAPGS
@@ -329,19 +330,14 @@ ENTRY(save_args)
*/
1: incl PER_CPU_VAR(irq_count)
jne 2f
- popq_cfi %rax /* move return address... */
mov PER_CPU_VAR(irq_stack_ptr),%rsp
EMPTY_FRAME 0
- pushq_cfi %rbp /* backlink for unwinder */
- pushq_cfi %rax /* ... to the new stack */
- /*
- * We entered an interrupt context - irqs are off:
- */
-2: TRACE_IRQS_OFF
- ret
- CFI_ENDPROC
-END(save_args)
- .popsection
+
+2: /* Store previous stack value */
+ pushq %rsi
+ /* We entered an interrupt context - irqs are off: */
+ TRACE_IRQS_OFF
+ .endm
ENTRY(save_rest)
PARTIAL_FRAME 1 REST_SKIP+8
@@ -473,7 +469,7 @@ ENTRY(system_call_after_swapgs)
* and short:
*/
ENABLE_INTERRUPTS(CLBR_NONE)
- SAVE_ARGS 8,1
+ SAVE_ARGS 8,0
movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
movq %rcx,RIP-ARGOFFSET(%rsp)
CFI_REL_OFFSET rip,RIP-ARGOFFSET
@@ -508,7 +504,7 @@ sysret_check:
TRACE_IRQS_ON
movq RIP-ARGOFFSET(%rsp),%rcx
CFI_REGISTER rip,rcx
- RESTORE_ARGS 0,-ARG_SKIP,1
+ RESTORE_ARGS 1,-ARG_SKIP,0
/*CFI_REGISTER rflags,r11*/
movq PER_CPU_VAR(old_rsp), %rsp
USERGS_SYSRET64
@@ -791,7 +787,7 @@ END(interrupt)
/* reserve pt_regs for scratch regs and rbp */
subq $ORIG_RAX-RBP, %rsp
CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
- call save_args
+ SAVE_ARGS_IRQ
PARTIAL_FRAME 0
call \func
.endm
@@ -814,15 +810,14 @@ ret_from_intr:
DISABLE_INTERRUPTS(CLBR_NONE)
TRACE_IRQS_OFF
decl PER_CPU_VAR(irq_count)
- leaveq
- CFI_RESTORE rbp
+ /* Restore saved previous stack */
+ popq %rsi
+ leaq 16(%rsi), %rsp
+
CFI_DEF_CFA_REGISTER rsp
- CFI_ADJUST_CFA_OFFSET -8
+ CFI_ADJUST_CFA_OFFSET -16
- /* we did not save rbx, restore only from ARGOFFSET */
- addq $8, %rsp
- CFI_ADJUST_CFA_OFFSET -8
exit_intr:
GET_THREAD_INFO(%rcx)
testl $3,CS-ARGOFFSET(%rsp)
@@ -858,7 +853,7 @@ retint_restore_args: /* return to kernel space */
*/
TRACE_IRQS_IRETQ
restore_args:
- RESTORE_ARGS 0,8,0
+ RESTORE_ARGS 1,8,1
irq_return:
INTERRUPT_RETURN
@@ -991,11 +986,6 @@ apicinterrupt THRESHOLD_APIC_VECTOR \
apicinterrupt THERMAL_APIC_VECTOR \
thermal_interrupt smp_thermal_interrupt
-#ifdef CONFIG_X86_MCE
-apicinterrupt MCE_SELF_VECTOR \
- mce_self_interrupt smp_mce_self_interrupt
-#endif
-
#ifdef CONFIG_SMP
apicinterrupt CALL_FUNCTION_SINGLE_VECTOR \
call_function_single_interrupt smp_call_function_single_interrupt
@@ -1121,6 +1111,8 @@ zeroentry spurious_interrupt_bug do_spurious_interrupt_bug
zeroentry coprocessor_error do_coprocessor_error
errorentry alignment_check do_alignment_check
zeroentry simd_coprocessor_error do_simd_coprocessor_error
+zeroentry emulate_vsyscall do_emulate_vsyscall
+
/* Reload gs selector with exception handling */
/* edi: new selector */
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
index 6781765b3a0d..4aecc54236a9 100644
--- a/arch/x86/kernel/hpet.c
+++ b/arch/x86/kernel/hpet.c
@@ -4,6 +4,7 @@
#include <linux/sysdev.h>
#include <linux/delay.h>
#include <linux/errno.h>
+#include <linux/i8253.h>
#include <linux/slab.h>
#include <linux/hpet.h>
#include <linux/init.h>
@@ -12,8 +13,8 @@
#include <linux/io.h>
#include <asm/fixmap.h>
-#include <asm/i8253.h>
#include <asm/hpet.h>
+#include <asm/time.h>
#define HPET_MASK CLOCKSOURCE_MASK(32)
@@ -71,7 +72,7 @@ static inline void hpet_set_mapping(void)
{
hpet_virt_address = ioremap_nocache(hpet_address, HPET_MMAP_SIZE);
#ifdef CONFIG_X86_64
- __set_fixmap(VSYSCALL_HPET, hpet_address, PAGE_KERNEL_VSYSCALL_NOCACHE);
+ __set_fixmap(VSYSCALL_HPET, hpet_address, PAGE_KERNEL_VVAR_NOCACHE);
#endif
}
@@ -738,13 +739,6 @@ static cycle_t read_hpet(struct clocksource *cs)
return (cycle_t)hpet_readl(HPET_COUNTER);
}
-#ifdef CONFIG_X86_64
-static cycle_t __vsyscall_fn vread_hpet(void)
-{
- return readl((const void __iomem *)fix_to_virt(VSYSCALL_HPET) + 0xf0);
-}
-#endif
-
static struct clocksource clocksource_hpet = {
.name = "hpet",
.rating = 250,
@@ -753,7 +747,7 @@ static struct clocksource clocksource_hpet = {
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
.resume = hpet_resume_counter,
#ifdef CONFIG_X86_64
- .vread = vread_hpet,
+ .archdata = { .vclock_mode = VCLOCK_HPET },
#endif
};
diff --git a/arch/x86/kernel/i8253.c b/arch/x86/kernel/i8253.c
index fb66dc9e36cb..f2b96de3c7c1 100644
--- a/arch/x86/kernel/i8253.c
+++ b/arch/x86/kernel/i8253.c
@@ -3,113 +3,24 @@
*
*/
#include <linux/clockchips.h>
-#include <linux/interrupt.h>
-#include <linux/spinlock.h>
-#include <linux/jiffies.h>
#include <linux/module.h>
#include <linux/timex.h>
-#include <linux/delay.h>
-#include <linux/init.h>
-#include <linux/io.h>
+#include <linux/i8253.h>
-#include <asm/i8253.h>
#include <asm/hpet.h>
+#include <asm/time.h>
#include <asm/smp.h>
-DEFINE_RAW_SPINLOCK(i8253_lock);
-EXPORT_SYMBOL(i8253_lock);
-
/*
* HPET replaces the PIT, when enabled. So we need to know, which of
* the two timers is used
*/
struct clock_event_device *global_clock_event;
-/*
- * Initialize the PIT timer.
- *
- * This is also called after resume to bring the PIT into operation again.
- */
-static void init_pit_timer(enum clock_event_mode mode,
- struct clock_event_device *evt)
-{
- raw_spin_lock(&i8253_lock);
-
- switch (mode) {
- case CLOCK_EVT_MODE_PERIODIC:
- /* binary, mode 2, LSB/MSB, ch 0 */
- outb_pit(0x34, PIT_MODE);
- outb_pit(LATCH & 0xff , PIT_CH0); /* LSB */
- outb_pit(LATCH >> 8 , PIT_CH0); /* MSB */
- break;
-
- case CLOCK_EVT_MODE_SHUTDOWN:
- case CLOCK_EVT_MODE_UNUSED:
- if (evt->mode == CLOCK_EVT_MODE_PERIODIC ||
- evt->mode == CLOCK_EVT_MODE_ONESHOT) {
- outb_pit(0x30, PIT_MODE);
- outb_pit(0, PIT_CH0);
- outb_pit(0, PIT_CH0);
- }
- break;
-
- case CLOCK_EVT_MODE_ONESHOT:
- /* One shot setup */
- outb_pit(0x38, PIT_MODE);
- break;
-
- case CLOCK_EVT_MODE_RESUME:
- /* Nothing to do here */
- break;
- }
- raw_spin_unlock(&i8253_lock);
-}
-
-/*
- * Program the next event in oneshot mode
- *
- * Delta is given in PIT ticks
- */
-static int pit_next_event(unsigned long delta, struct clock_event_device *evt)
-{
- raw_spin_lock(&i8253_lock);
- outb_pit(delta & 0xff , PIT_CH0); /* LSB */
- outb_pit(delta >> 8 , PIT_CH0); /* MSB */
- raw_spin_unlock(&i8253_lock);
-
- return 0;
-}
-
-/*
- * On UP the PIT can serve all of the possible timer functions. On SMP systems
- * it can be solely used for the global tick.
- *
- * The profiling and update capabilities are switched off once the local apic is
- * registered. This mechanism replaces the previous #ifdef LOCAL_APIC -
- * !using_apic_timer decisions in do_timer_interrupt_hook()
- */
-static struct clock_event_device pit_ce = {
- .name = "pit",
- .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
- .set_mode = init_pit_timer,
- .set_next_event = pit_next_event,
- .irq = 0,
-};
-
-/*
- * Initialize the conversion factor and the min/max deltas of the clock event
- * structure and register the clock event source with the framework.
- */
void __init setup_pit_timer(void)
{
- /*
- * Start pit with the boot cpu mask and make it global after the
- * IO_APIC has been initialized.
- */
- pit_ce.cpumask = cpumask_of(smp_processor_id());
-
- clockevents_config_and_register(&pit_ce, CLOCK_TICK_RATE, 0xF, 0x7FFF);
- global_clock_event = &pit_ce;
+ clockevent_i8253_init(true);
+ global_clock_event = &i8253_clockevent;
}
#ifndef CONFIG_X86_64
@@ -123,7 +34,7 @@ static int __init init_pit_clocksource(void)
* - when local APIC timer is active (PIT is switched off)
*/
if (num_possible_cpus() > 1 || is_hpet_enabled() ||
- pit_ce.mode != CLOCK_EVT_MODE_PERIODIC)
+ i8253_clockevent.mode != CLOCK_EVT_MODE_PERIODIC)
return 0;
return clocksource_i8253_init();
diff --git a/arch/x86/kernel/irqinit.c b/arch/x86/kernel/irqinit.c
index f470e4ef993e..f09d4bbe2d2d 100644
--- a/arch/x86/kernel/irqinit.c
+++ b/arch/x86/kernel/irqinit.c
@@ -272,9 +272,6 @@ static void __init apic_intr_init(void)
#ifdef CONFIG_X86_MCE_THRESHOLD
alloc_intr_gate(THRESHOLD_APIC_VECTOR, threshold_interrupt);
#endif
-#if defined(CONFIG_X86_MCE) && defined(CONFIG_X86_LOCAL_APIC)
- alloc_intr_gate(MCE_SELF_VECTOR, mce_self_interrupt);
-#endif
#if defined(CONFIG_X86_64) || defined(CONFIG_X86_LOCAL_APIC)
/* self generated IPI for local APIC timer */
diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
index 5f9ecff328b5..00354d4919a9 100644
--- a/arch/x86/kernel/kgdb.c
+++ b/arch/x86/kernel/kgdb.c
@@ -608,7 +608,7 @@ int kgdb_arch_init(void)
return register_die_notifier(&kgdb_notifier);
}
-static void kgdb_hw_overflow_handler(struct perf_event *event, int nmi,
+static void kgdb_hw_overflow_handler(struct perf_event *event,
struct perf_sample_data *data, struct pt_regs *regs)
{
struct task_struct *tsk = current;
@@ -638,7 +638,7 @@ void kgdb_arch_late(void)
for (i = 0; i < HBP_NUM; i++) {
if (breakinfo[i].pev)
continue;
- breakinfo[i].pev = register_wide_hw_breakpoint(&attr, NULL);
+ breakinfo[i].pev = register_wide_hw_breakpoint(&attr, NULL, NULL);
if (IS_ERR((void * __force)breakinfo[i].pev)) {
printk(KERN_ERR "kgdb: Could not allocate hw"
"breakpoints\nDisabling the kernel debugger\n");
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index 33c07b0b122e..a9c2116001d6 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -51,6 +51,15 @@ static int parse_no_kvmapf(char *arg)
early_param("no-kvmapf", parse_no_kvmapf);
+static int steal_acc = 1;
+static int parse_no_stealacc(char *arg)
+{
+ steal_acc = 0;
+ return 0;
+}
+
+early_param("no-steal-acc", parse_no_stealacc);
+
struct kvm_para_state {
u8 mmu_queue[MMU_QUEUE_SIZE];
int mmu_queue_len;
@@ -58,6 +67,8 @@ struct kvm_para_state {
static DEFINE_PER_CPU(struct kvm_para_state, para_state);
static DEFINE_PER_CPU(struct kvm_vcpu_pv_apf_data, apf_reason) __aligned(64);
+static DEFINE_PER_CPU(struct kvm_steal_time, steal_time) __aligned(64);
+static int has_steal_clock = 0;
static struct kvm_para_state *kvm_para_state(void)
{
@@ -441,6 +452,21 @@ static void __init paravirt_ops_setup(void)
#endif
}
+static void kvm_register_steal_time(void)
+{
+ int cpu = smp_processor_id();
+ struct kvm_steal_time *st = &per_cpu(steal_time, cpu);
+
+ if (!has_steal_clock)
+ return;
+
+ memset(st, 0, sizeof(*st));
+
+ wrmsrl(MSR_KVM_STEAL_TIME, (__pa(st) | KVM_MSR_ENABLED));
+ printk(KERN_INFO "kvm-stealtime: cpu %d, msr %lx\n",
+ cpu, __pa(st));
+}
+
void __cpuinit kvm_guest_cpu_init(void)
{
if (!kvm_para_available())
@@ -457,6 +483,9 @@ void __cpuinit kvm_guest_cpu_init(void)
printk(KERN_INFO"KVM setup async PF for cpu %d\n",
smp_processor_id());
}
+
+ if (has_steal_clock)
+ kvm_register_steal_time();
}
static void kvm_pv_disable_apf(void *unused)
@@ -483,6 +512,31 @@ static struct notifier_block kvm_pv_reboot_nb = {
.notifier_call = kvm_pv_reboot_notify,
};
+static u64 kvm_steal_clock(int cpu)
+{
+ u64 steal;
+ struct kvm_steal_time *src;
+ int version;
+
+ src = &per_cpu(steal_time, cpu);
+ do {
+ version = src->version;
+ rmb();
+ steal = src->steal;
+ rmb();
+ } while ((version & 1) || (version != src->version));
+
+ return steal;
+}
+
+void kvm_disable_steal_time(void)
+{
+ if (!has_steal_clock)
+ return;
+
+ wrmsr(MSR_KVM_STEAL_TIME, 0, 0);
+}
+
#ifdef CONFIG_SMP
static void __init kvm_smp_prepare_boot_cpu(void)
{
@@ -500,6 +554,7 @@ static void __cpuinit kvm_guest_cpu_online(void *dummy)
static void kvm_guest_cpu_offline(void *dummy)
{
+ kvm_disable_steal_time();
kvm_pv_disable_apf(NULL);
apf_task_wake_all();
}
@@ -548,6 +603,11 @@ void __init kvm_guest_init(void)
if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF))
x86_init.irqs.trap_init = kvm_apf_trap_init;
+ if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
+ has_steal_clock = 1;
+ pv_time_ops.steal_clock = kvm_steal_clock;
+ }
+
#ifdef CONFIG_SMP
smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu;
register_cpu_notifier(&kvm_cpu_notifier);
@@ -555,3 +615,15 @@ void __init kvm_guest_init(void)
kvm_guest_cpu_init();
#endif
}
+
+static __init int activate_jump_labels(void)
+{
+ if (has_steal_clock) {
+ jump_label_inc(&paravirt_steal_enabled);
+ if (steal_acc)
+ jump_label_inc(&paravirt_steal_rq_enabled);
+ }
+
+ return 0;
+}
+arch_initcall(activate_jump_labels);
diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c
index 6389a6bca11b..c1a0188e29ae 100644
--- a/arch/x86/kernel/kvmclock.c
+++ b/arch/x86/kernel/kvmclock.c
@@ -160,6 +160,7 @@ static void __cpuinit kvm_setup_secondary_clock(void)
static void kvm_crash_shutdown(struct pt_regs *regs)
{
native_write_msr(msr_kvm_system_time, 0, 0);
+ kvm_disable_steal_time();
native_machine_crash_shutdown(regs);
}
#endif
@@ -167,6 +168,7 @@ static void kvm_crash_shutdown(struct pt_regs *regs)
static void kvm_shutdown(void)
{
native_write_msr(msr_kvm_system_time, 0, 0);
+ kvm_disable_steal_time();
native_machine_shutdown();
}
diff --git a/arch/x86/kernel/microcode_amd.c b/arch/x86/kernel/microcode_amd.c
index c5610384ab16..591be0ee1934 100644
--- a/arch/x86/kernel/microcode_amd.c
+++ b/arch/x86/kernel/microcode_amd.c
@@ -66,8 +66,8 @@ struct microcode_amd {
unsigned int mpb[0];
};
-#define UCODE_CONTAINER_SECTION_HDR 8
-#define UCODE_CONTAINER_HEADER_SIZE 12
+#define SECTION_HDR_SIZE 8
+#define CONTAINER_HDR_SZ 12
static struct equiv_cpu_entry *equiv_cpu_table;
@@ -157,7 +157,7 @@ static int apply_microcode_amd(int cpu)
static unsigned int verify_ucode_size(int cpu, const u8 *buf, unsigned int size)
{
struct cpuinfo_x86 *c = &cpu_data(cpu);
- unsigned int max_size, actual_size;
+ u32 max_size, actual_size;
#define F1XH_MPB_MAX_SIZE 2048
#define F14H_MPB_MAX_SIZE 1824
@@ -175,9 +175,9 @@ static unsigned int verify_ucode_size(int cpu, const u8 *buf, unsigned int size)
break;
}
- actual_size = buf[4] + (buf[5] << 8);
+ actual_size = *(u32 *)(buf + 4);
- if (actual_size > size || actual_size > max_size) {
+ if (actual_size + SECTION_HDR_SIZE > size || actual_size > max_size) {
pr_err("section size mismatch\n");
return 0;
}
@@ -191,7 +191,7 @@ get_next_ucode(int cpu, const u8 *buf, unsigned int size, unsigned int *mc_size)
struct microcode_header_amd *mc = NULL;
unsigned int actual_size = 0;
- if (buf[0] != UCODE_UCODE_TYPE) {
+ if (*(u32 *)buf != UCODE_UCODE_TYPE) {
pr_err("invalid type field in container file section header\n");
goto out;
}
@@ -204,8 +204,8 @@ get_next_ucode(int cpu, const u8 *buf, unsigned int size, unsigned int *mc_size)
if (!mc)
goto out;
- get_ucode_data(mc, buf + UCODE_CONTAINER_SECTION_HDR, actual_size);
- *mc_size = actual_size + UCODE_CONTAINER_SECTION_HDR;
+ get_ucode_data(mc, buf + SECTION_HDR_SIZE, actual_size);
+ *mc_size = actual_size + SECTION_HDR_SIZE;
out:
return mc;
@@ -229,9 +229,10 @@ static int install_equiv_cpu_table(const u8 *buf)
return -ENOMEM;
}
- get_ucode_data(equiv_cpu_table, buf + UCODE_CONTAINER_HEADER_SIZE, size);
+ get_ucode_data(equiv_cpu_table, buf + CONTAINER_HDR_SZ, size);
- return size + UCODE_CONTAINER_HEADER_SIZE; /* add header length */
+ /* add header length */
+ return size + CONTAINER_HDR_SZ;
}
static void free_equiv_cpu_table(void)
diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
index 52f256f2cc81..925179f871de 100644
--- a/arch/x86/kernel/module.c
+++ b/arch/x86/kernel/module.c
@@ -45,21 +45,6 @@ void *module_alloc(unsigned long size)
-1, __builtin_return_address(0));
}
-/* Free memory returned from module_alloc */
-void module_free(struct module *mod, void *module_region)
-{
- vfree(module_region);
-}
-
-/* We don't need anything special. */
-int module_frob_arch_sections(Elf_Ehdr *hdr,
- Elf_Shdr *sechdrs,
- char *secstrings,
- struct module *mod)
-{
- return 0;
-}
-
#ifdef CONFIG_X86_32
int apply_relocate(Elf32_Shdr *sechdrs,
const char *strtab,
@@ -100,17 +85,6 @@ int apply_relocate(Elf32_Shdr *sechdrs,
}
return 0;
}
-
-int apply_relocate_add(Elf32_Shdr *sechdrs,
- const char *strtab,
- unsigned int symindex,
- unsigned int relsec,
- struct module *me)
-{
- printk(KERN_ERR "module %s: ADD RELOCATION unsupported\n",
- me->name);
- return -ENOEXEC;
-}
#else /*X86_64*/
int apply_relocate_add(Elf64_Shdr *sechdrs,
const char *strtab,
@@ -181,17 +155,6 @@ overflow:
me->name);
return -ENOEXEC;
}
-
-int apply_relocate(Elf_Shdr *sechdrs,
- const char *strtab,
- unsigned int symindex,
- unsigned int relsec,
- struct module *me)
-{
- printk(KERN_ERR "non add relocation not supported\n");
- return -ENOSYS;
-}
-
#endif
int module_finalize(const Elf_Ehdr *hdr,
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index 869e1aeeb71b..613a7931ecc1 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -202,6 +202,14 @@ static void native_flush_tlb_single(unsigned long addr)
__native_flush_tlb_single(addr);
}
+struct jump_label_key paravirt_steal_enabled;
+struct jump_label_key paravirt_steal_rq_enabled;
+
+static u64 native_steal_clock(int cpu)
+{
+ return 0;
+}
+
/* These are in entry.S */
extern void native_iret(void);
extern void native_irq_enable_sysexit(void);
@@ -307,6 +315,7 @@ struct pv_init_ops pv_init_ops = {
struct pv_time_ops pv_time_ops = {
.sched_clock = native_sched_clock,
+ .steal_clock = native_steal_clock,
};
struct pv_irq_ops pv_irq_ops = {
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
index 807c2a2b80f1..82528799c5de 100644
--- a/arch/x86/kernel/ptrace.c
+++ b/arch/x86/kernel/ptrace.c
@@ -528,7 +528,7 @@ static int genregs_set(struct task_struct *target,
return ret;
}
-static void ptrace_triggered(struct perf_event *bp, int nmi,
+static void ptrace_triggered(struct perf_event *bp,
struct perf_sample_data *data,
struct pt_regs *regs)
{
@@ -715,7 +715,8 @@ static int ptrace_set_breakpoint_addr(struct task_struct *tsk, int nr,
attr.bp_type = HW_BREAKPOINT_W;
attr.disabled = 1;
- bp = register_user_hw_breakpoint(&attr, ptrace_triggered, tsk);
+ bp = register_user_hw_breakpoint(&attr, ptrace_triggered,
+ NULL, tsk);
/*
* CHECKME: the previous code returned -EIO if the addr wasn't
diff --git a/arch/x86/kernel/quirks.c b/arch/x86/kernel/quirks.c
index 8bbe8c56916d..b78643d0f9a5 100644
--- a/arch/x86/kernel/quirks.c
+++ b/arch/x86/kernel/quirks.c
@@ -10,7 +10,7 @@
static void __devinit quirk_intel_irqbalance(struct pci_dev *dev)
{
- u8 config, rev;
+ u8 config;
u16 word;
/* BIOS may enable hardware IRQ balancing for
@@ -18,8 +18,7 @@ static void __devinit quirk_intel_irqbalance(struct pci_dev *dev)
* based platforms.
* Disable SW irqbalance/affinity on those platforms.
*/
- pci_read_config_byte(dev, PCI_CLASS_REVISION, &rev);
- if (rev > 0x9)
+ if (dev->revision > 0x9)
return;
/* enable access to config space*/
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index 14eed214b584..9242436e9937 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -427,6 +427,22 @@ static struct dmi_system_id __initdata pci_reboot_dmi_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E6320"),
},
},
+ { /* Handle problems with rebooting on the Latitude E5420. */
+ .callback = set_pci_reboot,
+ .ident = "Dell Latitude E5420",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E5420"),
+ },
+ },
+ { /* Handle problems with rebooting on the Latitude E6420. */
+ .callback = set_pci_reboot,
+ .ident = "Dell Latitude E6420",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E6420"),
+ },
+ },
{ }
};
diff --git a/arch/x86/kernel/relocate_kernel_32.S b/arch/x86/kernel/relocate_kernel_32.S
index 41235531b11c..36818f8ec2be 100644
--- a/arch/x86/kernel/relocate_kernel_32.S
+++ b/arch/x86/kernel/relocate_kernel_32.S
@@ -97,6 +97,8 @@ relocate_kernel:
ret
identity_mapped:
+ /* set return address to 0 if not preserving context */
+ pushl $0
/* store the start address on the stack */
pushl %edx
diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
index 4de8f5b3d476..7a6f3b3be3cf 100644
--- a/arch/x86/kernel/relocate_kernel_64.S
+++ b/arch/x86/kernel/relocate_kernel_64.S
@@ -100,6 +100,8 @@ relocate_kernel:
ret
identity_mapped:
+ /* set return address to 0 if not preserving context */
+ pushq $0
/* store the start address on the stack */
pushq %rdx
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
index 40a24932a8a1..54ddaeb221c1 100644
--- a/arch/x86/kernel/signal.c
+++ b/arch/x86/kernel/signal.c
@@ -485,17 +485,18 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
asmlinkage int
sys_sigsuspend(int history0, int history1, old_sigset_t mask)
{
- mask &= _BLOCKABLE;
- spin_lock_irq(&current->sighand->siglock);
+ sigset_t blocked;
+
current->saved_sigmask = current->blocked;
- siginitset(&current->blocked, mask);
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
+
+ mask &= _BLOCKABLE;
+ siginitset(&blocked, mask);
+ set_current_blocked(&blocked);
current->state = TASK_INTERRUPTIBLE;
schedule();
- set_restore_sigmask();
+ set_restore_sigmask();
return -ERESTARTNOHAND;
}
@@ -572,10 +573,7 @@ unsigned long sys_sigreturn(struct pt_regs *regs)
goto badframe;
sigdelsetmask(&set, ~_BLOCKABLE);
- spin_lock_irq(&current->sighand->siglock);
- current->blocked = set;
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
+ set_current_blocked(&set);
if (restore_sigcontext(regs, &frame->sc, &ax))
goto badframe;
@@ -653,11 +651,15 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka,
static int
setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
- sigset_t *set, struct pt_regs *regs)
+ struct pt_regs *regs)
{
int usig = signr_convert(sig);
+ sigset_t *set = &current->blocked;
int ret;
+ if (current_thread_info()->status & TS_RESTORE_SIGMASK)
+ set = &current->saved_sigmask;
+
/* Set up the stack frame */
if (is_ia32) {
if (ka->sa.sa_flags & SA_SIGINFO)
@@ -672,12 +674,13 @@ setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
return -EFAULT;
}
+ current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
return ret;
}
static int
handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
- sigset_t *oldset, struct pt_regs *regs)
+ struct pt_regs *regs)
{
sigset_t blocked;
int ret;
@@ -712,20 +715,11 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
likely(test_and_clear_thread_flag(TIF_FORCED_TF)))
regs->flags &= ~X86_EFLAGS_TF;
- ret = setup_rt_frame(sig, ka, info, oldset, regs);
+ ret = setup_rt_frame(sig, ka, info, regs);
if (ret)
return ret;
-#ifdef CONFIG_X86_64
- /*
- * This has nothing to do with segment registers,
- * despite the name. This magic affects uaccess.h
- * macros' behavior. Reset it to the normal setting.
- */
- set_fs(USER_DS);
-#endif
-
/*
* Clear the direction flag as per the ABI for function entry.
*/
@@ -767,7 +761,6 @@ static void do_signal(struct pt_regs *regs)
struct k_sigaction ka;
siginfo_t info;
int signr;
- sigset_t *oldset;
/*
* We want the common case to go fast, which is why we may in certain
@@ -779,23 +772,10 @@ static void do_signal(struct pt_regs *regs)
if (!user_mode(regs))
return;
- if (current_thread_info()->status & TS_RESTORE_SIGMASK)
- oldset = &current->saved_sigmask;
- else
- oldset = &current->blocked;
-
signr = get_signal_to_deliver(&info, &ka, regs, NULL);
if (signr > 0) {
/* Whee! Actually deliver the signal. */
- if (handle_signal(signr, &info, &ka, oldset, regs) == 0) {
- /*
- * A signal was successfully delivered; the saved
- * sigmask will have been stored in the signal frame,
- * and will be restored by sigreturn, so we can simply
- * clear the TS_RESTORE_SIGMASK flag.
- */
- current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
- }
+ handle_signal(signr, &info, &ka, regs);
return;
}
@@ -823,7 +803,7 @@ static void do_signal(struct pt_regs *regs)
*/
if (current_thread_info()->status & TS_RESTORE_SIGMASK) {
current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
- sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
+ set_current_blocked(&current->saved_sigmask);
}
}
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 9fd3137230d4..9f548cb4a958 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -438,7 +438,7 @@ static void impress_friends(void)
void __inquire_remote_apic(int apicid)
{
unsigned i, regs[] = { APIC_ID >> 4, APIC_LVR >> 4, APIC_SPIV >> 4 };
- char *names[] = { "ID", "VERSION", "SPIV" };
+ const char * const names[] = { "ID", "VERSION", "SPIV" };
int timeout;
u32 status;
diff --git a/arch/x86/kernel/stacktrace.c b/arch/x86/kernel/stacktrace.c
index 55d9bc03f696..fdd0c6430e5a 100644
--- a/arch/x86/kernel/stacktrace.c
+++ b/arch/x86/kernel/stacktrace.c
@@ -66,7 +66,7 @@ void save_stack_trace(struct stack_trace *trace)
}
EXPORT_SYMBOL_GPL(save_stack_trace);
-void save_stack_trace_regs(struct stack_trace *trace, struct pt_regs *regs)
+void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
{
dump_trace(current, regs, NULL, 0, &save_stack_ops, trace);
if (trace->nr_entries < trace->max_entries)
diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
index 30ac65df7d4e..e07a2fc876b9 100644
--- a/arch/x86/kernel/tboot.c
+++ b/arch/x86/kernel/tboot.c
@@ -36,6 +36,7 @@
#include <asm/bootparam.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
+#include <asm/swiotlb.h>
#include <asm/fixmap.h>
#include <asm/proto.h>
#include <asm/setup.h>
diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
index 00cbb272627f..5a64d057be57 100644
--- a/arch/x86/kernel/time.c
+++ b/arch/x86/kernel/time.c
@@ -11,13 +11,13 @@
#include <linux/clockchips.h>
#include <linux/interrupt.h>
+#include <linux/i8253.h>
#include <linux/time.h>
#include <linux/mca.h>
#include <asm/vsyscall.h>
#include <asm/x86_init.h>
#include <asm/i8259.h>
-#include <asm/i8253.h>
#include <asm/timer.h>
#include <asm/hpet.h>
#include <asm/time.h>
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index b9b67166f9de..fbc097a085ca 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -872,6 +872,12 @@ void __init trap_init(void)
set_bit(SYSCALL_VECTOR, used_vectors);
#endif
+#ifdef CONFIG_X86_64
+ BUG_ON(test_bit(VSYSCALL_EMU_VECTOR, used_vectors));
+ set_system_intr_gate(VSYSCALL_EMU_VECTOR, &emulate_vsyscall);
+ set_bit(VSYSCALL_EMU_VECTOR, used_vectors);
+#endif
+
/*
* Should be a barrier for any external CPU state:
*/
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 6cc6922262af..db483369f10b 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -5,7 +5,6 @@
#include <linux/timer.h>
#include <linux/acpi_pmtmr.h>
#include <linux/cpufreq.h>
-#include <linux/dmi.h>
#include <linux/delay.h>
#include <linux/clocksource.h>
#include <linux/percpu.h>
@@ -777,7 +776,7 @@ static struct clocksource clocksource_tsc = {
.flags = CLOCK_SOURCE_IS_CONTINUOUS |
CLOCK_SOURCE_MUST_VERIFY,
#ifdef CONFIG_X86_64
- .vread = vread_tsc,
+ .archdata = { .vclock_mode = VCLOCK_TSC },
#endif
};
@@ -800,27 +799,6 @@ void mark_tsc_unstable(char *reason)
EXPORT_SYMBOL_GPL(mark_tsc_unstable);
-static int __init dmi_mark_tsc_unstable(const struct dmi_system_id *d)
-{
- printk(KERN_NOTICE "%s detected: marking TSC unstable.\n",
- d->ident);
- tsc_unstable = 1;
- return 0;
-}
-
-/* List of systems that have known TSC problems */
-static struct dmi_system_id __initdata bad_tsc_dmi_table[] = {
- {
- .callback = dmi_mark_tsc_unstable,
- .ident = "IBM Thinkpad 380XD",
- .matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
- DMI_MATCH(DMI_BOARD_NAME, "2635FA0"),
- },
- },
- {}
-};
-
static void __init check_system_tsc_reliable(void)
{
#ifdef CONFIG_MGEODE_LX
@@ -1010,8 +988,6 @@ void __init tsc_init(void)
lpj_fine = lpj;
use_tsc_delay();
- /* Check and install the TSC clocksource */
- dmi_check_system(bad_tsc_dmi_table);
if (unsynchronized_tsc())
mark_tsc_unstable("TSCs unsynchronized");
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
index 89aed99aafce..4aa9c54a9b76 100644
--- a/arch/x86/kernel/vmlinux.lds.S
+++ b/arch/x86/kernel/vmlinux.lds.S
@@ -161,50 +161,47 @@ SECTIONS
#define VVIRT_OFFSET (VSYSCALL_ADDR - __vsyscall_0)
#define VVIRT(x) (ADDR(x) - VVIRT_OFFSET)
-#define EMIT_VVAR(x, offset) .vsyscall_var_ ## x \
- ADDR(.vsyscall_0) + offset \
- : AT(VLOAD(.vsyscall_var_ ## x)) { \
- *(.vsyscall_var_ ## x) \
- } \
- x = VVIRT(.vsyscall_var_ ## x);
. = ALIGN(4096);
__vsyscall_0 = .;
. = VSYSCALL_ADDR;
- .vsyscall_0 : AT(VLOAD(.vsyscall_0)) {
+ .vsyscall : AT(VLOAD(.vsyscall)) {
*(.vsyscall_0)
- } :user
- . = ALIGN(L1_CACHE_BYTES);
- .vsyscall_fn : AT(VLOAD(.vsyscall_fn)) {
- *(.vsyscall_fn)
- }
-
- .vsyscall_1 ADDR(.vsyscall_0) + 1024: AT(VLOAD(.vsyscall_1)) {
+ . = 1024;
*(.vsyscall_1)
- }
- .vsyscall_2 ADDR(.vsyscall_0) + 2048: AT(VLOAD(.vsyscall_2)) {
- *(.vsyscall_2)
- }
- .vsyscall_3 ADDR(.vsyscall_0) + 3072: AT(VLOAD(.vsyscall_3)) {
- *(.vsyscall_3)
- }
-
-#define __VVAR_KERNEL_LDS
-#include <asm/vvar.h>
-#undef __VVAR_KERNEL_LDS
+ . = 2048;
+ *(.vsyscall_2)
- . = __vsyscall_0 + PAGE_SIZE;
+ . = 4096; /* Pad the whole page. */
+ } :user =0xcc
+ . = ALIGN(__vsyscall_0 + PAGE_SIZE, PAGE_SIZE);
#undef VSYSCALL_ADDR
#undef VLOAD_OFFSET
#undef VLOAD
#undef VVIRT_OFFSET
#undef VVIRT
+
+ __vvar_page = .;
+
+ .vvar : AT(ADDR(.vvar) - LOAD_OFFSET) {
+
+ /* Place all vvars at the offsets in asm/vvar.h. */
+#define EMIT_VVAR(name, offset) \
+ . = offset; \
+ *(.vvar_ ## name)
+#define __VVAR_KERNEL_LDS
+#include <asm/vvar.h>
+#undef __VVAR_KERNEL_LDS
#undef EMIT_VVAR
+ } :data
+
+ . = ALIGN(__vvar_page + PAGE_SIZE, PAGE_SIZE);
+
#endif /* CONFIG_X86_64 */
/* Init code and data - will be freed after init */
diff --git a/arch/x86/kernel/vread_tsc_64.c b/arch/x86/kernel/vread_tsc_64.c
deleted file mode 100644
index a81aa9e9894c..000000000000
--- a/arch/x86/kernel/vread_tsc_64.c
+++ /dev/null
@@ -1,36 +0,0 @@
-/* This code runs in userspace. */
-
-#define DISABLE_BRANCH_PROFILING
-#include <asm/vgtod.h>
-
-notrace cycle_t __vsyscall_fn vread_tsc(void)
-{
- cycle_t ret;
- u64 last;
-
- /*
- * Empirically, a fence (of type that depends on the CPU)
- * before rdtsc is enough to ensure that rdtsc is ordered
- * with respect to loads. The various CPU manuals are unclear
- * as to whether rdtsc can be reordered with later loads,
- * but no one has ever seen it happen.
- */
- rdtsc_barrier();
- ret = (cycle_t)vget_cycles();
-
- last = VVAR(vsyscall_gtod_data).clock.cycle_last;
-
- if (likely(ret >= last))
- return ret;
-
- /*
- * GCC likes to generate cmov here, but this branch is extremely
- * predictable (it's just a funciton of time and the likely is
- * very likely) and there's a data dependence, so force GCC
- * to generate a branch instead. I don't barrier() because
- * we don't actually need a barrier, and if this function
- * ever gets inlined it will generate worse code.
- */
- asm volatile ("");
- return last;
-}
diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
index 3e682184d76c..dda7dff9cef7 100644
--- a/arch/x86/kernel/vsyscall_64.c
+++ b/arch/x86/kernel/vsyscall_64.c
@@ -2,6 +2,8 @@
* Copyright (C) 2001 Andrea Arcangeli <andrea@suse.de> SuSE
* Copyright 2003 Andi Kleen, SuSE Labs.
*
+ * [ NOTE: this mechanism is now deprecated in favor of the vDSO. ]
+ *
* Thanks to hpa@transmeta.com for some useful hint.
* Special thanks to Ingo Molnar for his early experience with
* a different vsyscall implementation for Linux/IA32 and for the name.
@@ -11,10 +13,9 @@
* vsyscalls. One vsyscall can reserve more than 1 slot to avoid
* jumping out of line if necessary. We cannot add more with this
* mechanism because older kernels won't return -ENOSYS.
- * If we want more than four we need a vDSO.
*
- * Note: the concept clashes with user mode linux. If you use UML and
- * want per guest time just set the kernel.vsyscall64 sysctl to 0.
+ * Note: the concept clashes with user mode linux. UML users should
+ * use the vDSO.
*/
/* Disable profiling for userspace code: */
@@ -32,9 +33,12 @@
#include <linux/cpu.h>
#include <linux/smp.h>
#include <linux/notifier.h>
+#include <linux/syscalls.h>
+#include <linux/ratelimit.h>
#include <asm/vsyscall.h>
#include <asm/pgtable.h>
+#include <asm/compat.h>
#include <asm/page.h>
#include <asm/unistd.h>
#include <asm/fixmap.h>
@@ -44,16 +48,12 @@
#include <asm/desc.h>
#include <asm/topology.h>
#include <asm/vgtod.h>
-
-#define __vsyscall(nr) \
- __attribute__ ((unused, __section__(".vsyscall_" #nr))) notrace
-#define __syscall_clobber "r11","cx","memory"
+#include <asm/traps.h>
DEFINE_VVAR(int, vgetcpu_mode);
DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data) =
{
.lock = __SEQLOCK_UNLOCKED(__vsyscall_gtod_data.lock),
- .sysctl_enabled = 1,
};
void update_vsyscall_tz(void)
@@ -72,179 +72,149 @@ void update_vsyscall(struct timespec *wall_time, struct timespec *wtm,
unsigned long flags;
write_seqlock_irqsave(&vsyscall_gtod_data.lock, flags);
+
/* copy vsyscall data */
- vsyscall_gtod_data.clock.vread = clock->vread;
- vsyscall_gtod_data.clock.cycle_last = clock->cycle_last;
- vsyscall_gtod_data.clock.mask = clock->mask;
- vsyscall_gtod_data.clock.mult = mult;
- vsyscall_gtod_data.clock.shift = clock->shift;
- vsyscall_gtod_data.wall_time_sec = wall_time->tv_sec;
- vsyscall_gtod_data.wall_time_nsec = wall_time->tv_nsec;
- vsyscall_gtod_data.wall_to_monotonic = *wtm;
- vsyscall_gtod_data.wall_time_coarse = __current_kernel_time();
+ vsyscall_gtod_data.clock.vclock_mode = clock->archdata.vclock_mode;
+ vsyscall_gtod_data.clock.cycle_last = clock->cycle_last;
+ vsyscall_gtod_data.clock.mask = clock->mask;
+ vsyscall_gtod_data.clock.mult = mult;
+ vsyscall_gtod_data.clock.shift = clock->shift;
+ vsyscall_gtod_data.wall_time_sec = wall_time->tv_sec;
+ vsyscall_gtod_data.wall_time_nsec = wall_time->tv_nsec;
+ vsyscall_gtod_data.wall_to_monotonic = *wtm;
+ vsyscall_gtod_data.wall_time_coarse = __current_kernel_time();
+
write_sequnlock_irqrestore(&vsyscall_gtod_data.lock, flags);
}
-/* RED-PEN may want to readd seq locking, but then the variable should be
- * write-once.
- */
-static __always_inline void do_get_tz(struct timezone * tz)
+static void warn_bad_vsyscall(const char *level, struct pt_regs *regs,
+ const char *message)
{
- *tz = VVAR(vsyscall_gtod_data).sys_tz;
-}
+ static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL, DEFAULT_RATELIMIT_BURST);
+ struct task_struct *tsk;
-static __always_inline int gettimeofday(struct timeval *tv, struct timezone *tz)
-{
- int ret;
- asm volatile("syscall"
- : "=a" (ret)
- : "0" (__NR_gettimeofday),"D" (tv),"S" (tz)
- : __syscall_clobber );
- return ret;
-}
+ if (!show_unhandled_signals || !__ratelimit(&rs))
+ return;
-static __always_inline long time_syscall(long *t)
-{
- long secs;
- asm volatile("syscall"
- : "=a" (secs)
- : "0" (__NR_time),"D" (t) : __syscall_clobber);
- return secs;
-}
+ tsk = current;
-static __always_inline void do_vgettimeofday(struct timeval * tv)
-{
- cycle_t now, base, mask, cycle_delta;
- unsigned seq;
- unsigned long mult, shift, nsec;
- cycle_t (*vread)(void);
- do {
- seq = read_seqbegin(&VVAR(vsyscall_gtod_data).lock);
-
- vread = VVAR(vsyscall_gtod_data).clock.vread;
- if (unlikely(!VVAR(vsyscall_gtod_data).sysctl_enabled ||
- !vread)) {
- gettimeofday(tv,NULL);
- return;
- }
-
- now = vread();
- base = VVAR(vsyscall_gtod_data).clock.cycle_last;
- mask = VVAR(vsyscall_gtod_data).clock.mask;
- mult = VVAR(vsyscall_gtod_data).clock.mult;
- shift = VVAR(vsyscall_gtod_data).clock.shift;
-
- tv->tv_sec = VVAR(vsyscall_gtod_data).wall_time_sec;
- nsec = VVAR(vsyscall_gtod_data).wall_time_nsec;
- } while (read_seqretry(&VVAR(vsyscall_gtod_data).lock, seq));
-
- /* calculate interval: */
- cycle_delta = (now - base) & mask;
- /* convert to nsecs: */
- nsec += (cycle_delta * mult) >> shift;
-
- while (nsec >= NSEC_PER_SEC) {
- tv->tv_sec += 1;
- nsec -= NSEC_PER_SEC;
- }
- tv->tv_usec = nsec / NSEC_PER_USEC;
+ printk("%s%s[%d] %s ip:%lx cs:%lx sp:%lx ax:%lx si:%lx di:%lx\n",
+ level, tsk->comm, task_pid_nr(tsk),
+ message, regs->ip - 2, regs->cs,
+ regs->sp, regs->ax, regs->si, regs->di);
}
-int __vsyscall(0) vgettimeofday(struct timeval * tv, struct timezone * tz)
+static int addr_to_vsyscall_nr(unsigned long addr)
{
- if (tv)
- do_vgettimeofday(tv);
- if (tz)
- do_get_tz(tz);
- return 0;
-}
+ int nr;
-/* This will break when the xtime seconds get inaccurate, but that is
- * unlikely */
-time_t __vsyscall(1) vtime(time_t *t)
-{
- unsigned seq;
- time_t result;
- if (unlikely(!VVAR(vsyscall_gtod_data).sysctl_enabled))
- return time_syscall(t);
+ if ((addr & ~0xC00UL) != VSYSCALL_START)
+ return -EINVAL;
- do {
- seq = read_seqbegin(&VVAR(vsyscall_gtod_data).lock);
+ nr = (addr & 0xC00UL) >> 10;
+ if (nr >= 3)
+ return -EINVAL;
- result = VVAR(vsyscall_gtod_data).wall_time_sec;
+ return nr;
+}
- } while (read_seqretry(&VVAR(vsyscall_gtod_data).lock, seq));
+void dotraplinkage do_emulate_vsyscall(struct pt_regs *regs, long error_code)
+{
+ struct task_struct *tsk;
+ unsigned long caller;
+ int vsyscall_nr;
+ long ret;
+
+ local_irq_enable();
+
+ /*
+ * Real 64-bit user mode code has cs == __USER_CS. Anything else
+ * is bogus.
+ */
+ if (regs->cs != __USER_CS) {
+ /*
+ * If we trapped from kernel mode, we might as well OOPS now
+ * instead of returning to some random address and OOPSing
+ * then.
+ */
+ BUG_ON(!user_mode(regs));
+
+ /* Compat mode and non-compat 32-bit CS should both segfault. */
+ warn_bad_vsyscall(KERN_WARNING, regs,
+ "illegal int 0xcc from 32-bit mode");
+ goto sigsegv;
+ }
- if (t)
- *t = result;
- return result;
-}
+ /*
+ * x86-ism here: regs->ip points to the instruction after the int 0xcc,
+ * and int 0xcc is two bytes long.
+ */
+ vsyscall_nr = addr_to_vsyscall_nr(regs->ip - 2);
+ if (vsyscall_nr < 0) {
+ warn_bad_vsyscall(KERN_WARNING, regs,
+ "illegal int 0xcc (exploit attempt?)");
+ goto sigsegv;
+ }
-/* Fast way to get current CPU and node.
- This helps to do per node and per CPU caches in user space.
- The result is not guaranteed without CPU affinity, but usually
- works out because the scheduler tries to keep a thread on the same
- CPU.
+ if (get_user(caller, (unsigned long __user *)regs->sp) != 0) {
+ warn_bad_vsyscall(KERN_WARNING, regs, "int 0xcc with bad stack (exploit attempt?)");
+ goto sigsegv;
+ }
- tcache must point to a two element sized long array.
- All arguments can be NULL. */
-long __vsyscall(2)
-vgetcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache)
-{
- unsigned int p;
- unsigned long j = 0;
-
- /* Fast cache - only recompute value once per jiffies and avoid
- relatively costly rdtscp/cpuid otherwise.
- This works because the scheduler usually keeps the process
- on the same CPU and this syscall doesn't guarantee its
- results anyways.
- We do this here because otherwise user space would do it on
- its own in a likely inferior way (no access to jiffies).
- If you don't like it pass NULL. */
- if (tcache && tcache->blob[0] == (j = VVAR(jiffies))) {
- p = tcache->blob[1];
- } else if (VVAR(vgetcpu_mode) == VGETCPU_RDTSCP) {
- /* Load per CPU data from RDTSCP */
- native_read_tscp(&p);
- } else {
- /* Load per CPU data from GDT */
- asm("lsl %1,%0" : "=r" (p) : "r" (__PER_CPU_SEG));
+ tsk = current;
+ if (seccomp_mode(&tsk->seccomp))
+ do_exit(SIGKILL);
+
+ switch (vsyscall_nr) {
+ case 0:
+ ret = sys_gettimeofday(
+ (struct timeval __user *)regs->di,
+ (struct timezone __user *)regs->si);
+ break;
+
+ case 1:
+ ret = sys_time((time_t __user *)regs->di);
+ break;
+
+ case 2:
+ ret = sys_getcpu((unsigned __user *)regs->di,
+ (unsigned __user *)regs->si,
+ 0);
+ break;
}
- if (tcache) {
- tcache->blob[0] = j;
- tcache->blob[1] = p;
+
+ if (ret == -EFAULT) {
+ /*
+ * Bad news -- userspace fed a bad pointer to a vsyscall.
+ *
+ * With a real vsyscall, that would have caused SIGSEGV.
+ * To make writing reliable exploits using the emulated
+ * vsyscalls harder, generate SIGSEGV here as well.
+ */
+ warn_bad_vsyscall(KERN_INFO, regs,
+ "vsyscall fault (exploit attempt?)");
+ goto sigsegv;
}
- if (cpu)
- *cpu = p & 0xfff;
- if (node)
- *node = p >> 12;
- return 0;
-}
-static long __vsyscall(3) venosys_1(void)
-{
- return -ENOSYS;
-}
+ regs->ax = ret;
-#ifdef CONFIG_SYSCTL
-static ctl_table kernel_table2[] = {
- { .procname = "vsyscall64",
- .data = &vsyscall_gtod_data.sysctl_enabled, .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec },
- {}
-};
+ /* Emulate a ret instruction. */
+ regs->ip = caller;
+ regs->sp += 8;
-static ctl_table kernel_root_table2[] = {
- { .procname = "kernel", .mode = 0555,
- .child = kernel_table2 },
- {}
-};
-#endif
+ local_irq_disable();
+ return;
+
+sigsegv:
+ regs->ip -= 2; /* The faulting instruction should be the int 0xcc. */
+ force_sig(SIGSEGV, current);
+ local_irq_disable();
+}
-/* Assume __initcall executes before all user space. Hopefully kmod
- doesn't violate that. We'll find out if it does. */
+/*
+ * Assume __initcall executes before all user space. Hopefully kmod
+ * doesn't violate that. We'll find out if it does.
+ */
static void __cpuinit vsyscall_set_cpu(int cpu)
{
unsigned long d;
@@ -255,13 +225,15 @@ static void __cpuinit vsyscall_set_cpu(int cpu)
if (cpu_has(&cpu_data(cpu), X86_FEATURE_RDTSCP))
write_rdtscp_aux((node << 12) | cpu);
- /* Store cpu number in limit so that it can be loaded quickly
- in user space in vgetcpu.
- 12 bits for the CPU and 8 bits for the node. */
+ /*
+ * Store cpu number in limit so that it can be loaded quickly
+ * in user space in vgetcpu. (12 bits for the CPU and 8 bits for the node)
+ */
d = 0x0f40000000000ULL;
d |= cpu;
d |= (node & 0xf) << 12;
d |= (node >> 4) << 48;
+
write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_PER_CPU, &d, DESCTYPE_S);
}
@@ -275,8 +247,10 @@ static int __cpuinit
cpu_vsyscall_notifier(struct notifier_block *n, unsigned long action, void *arg)
{
long cpu = (long)arg;
+
if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN)
smp_call_function_single(cpu, cpu_vsyscall_init, NULL, 1);
+
return NOTIFY_DONE;
}
@@ -284,25 +258,23 @@ void __init map_vsyscall(void)
{
extern char __vsyscall_0;
unsigned long physaddr_page0 = __pa_symbol(&__vsyscall_0);
+ extern char __vvar_page;
+ unsigned long physaddr_vvar_page = __pa_symbol(&__vvar_page);
/* Note that VSYSCALL_MAPPED_PAGES must agree with the code below. */
__set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_page0, PAGE_KERNEL_VSYSCALL);
+ __set_fixmap(VVAR_PAGE, physaddr_vvar_page, PAGE_KERNEL_VVAR);
+ BUILD_BUG_ON((unsigned long)__fix_to_virt(VVAR_PAGE) != (unsigned long)VVAR_ADDRESS);
}
static int __init vsyscall_init(void)
{
- BUG_ON(((unsigned long) &vgettimeofday !=
- VSYSCALL_ADDR(__NR_vgettimeofday)));
- BUG_ON((unsigned long) &vtime != VSYSCALL_ADDR(__NR_vtime));
- BUG_ON((VSYSCALL_ADDR(0) != __fix_to_virt(VSYSCALL_FIRST_PAGE)));
- BUG_ON((unsigned long) &vgetcpu != VSYSCALL_ADDR(__NR_vgetcpu));
-#ifdef CONFIG_SYSCTL
- register_sysctl_table(kernel_root_table2);
-#endif
+ BUG_ON(VSYSCALL_ADDR(0) != __fix_to_virt(VSYSCALL_FIRST_PAGE));
+
on_each_cpu(cpu_vsyscall_init, NULL, 1);
/* notifier priority > KVM */
hotcpu_notifier(cpu_vsyscall_notifier, 30);
+
return 0;
}
-
__initcall(vsyscall_init);
diff --git a/arch/x86/kernel/vsyscall_emu_64.S b/arch/x86/kernel/vsyscall_emu_64.S
new file mode 100644
index 000000000000..ffa845eae5ca
--- /dev/null
+++ b/arch/x86/kernel/vsyscall_emu_64.S
@@ -0,0 +1,27 @@
+/*
+ * vsyscall_emu_64.S: Vsyscall emulation page
+ *
+ * Copyright (c) 2011 Andy Lutomirski
+ *
+ * Subject to the GNU General Public License, version 2
+ */
+
+#include <linux/linkage.h>
+#include <asm/irq_vectors.h>
+
+/* The unused parts of the page are filled with 0xcc by the linker script. */
+
+.section .vsyscall_0, "a"
+ENTRY(vsyscall_0)
+ int $VSYSCALL_EMU_VECTOR
+END(vsyscall_0)
+
+.section .vsyscall_1, "a"
+ENTRY(vsyscall_1)
+ int $VSYSCALL_EMU_VECTOR
+END(vsyscall_1)
+
+.section .vsyscall_2, "a"
+ENTRY(vsyscall_2)
+ int $VSYSCALL_EMU_VECTOR
+END(vsyscall_2)
diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig
index 50f63648ce1b..988724b236b6 100644
--- a/arch/x86/kvm/Kconfig
+++ b/arch/x86/kvm/Kconfig
@@ -31,6 +31,7 @@ config KVM
select KVM_ASYNC_PF
select USER_RETURN_NOTIFIER
select KVM_MMIO
+ select TASK_DELAY_ACCT
---help---
Support hosting fully virtualized guest machines using hardware
virtualization extensions. You will need a fairly recent
@@ -76,6 +77,5 @@ config KVM_MMU_AUDIT
# the virtualization menu.
source drivers/vhost/Kconfig
source drivers/lguest/Kconfig
-source drivers/virtio/Kconfig
endif # VIRTUALIZATION
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index adc98675cda0..6f08bc940fa8 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -407,76 +407,59 @@ struct gprefix {
} \
} while (0)
-/* Fetch next part of the instruction being emulated. */
-#define insn_fetch(_type, _size, _eip) \
-({ unsigned long _x; \
- rc = do_insn_fetch(ctxt, ops, (_eip), &_x, (_size)); \
- if (rc != X86EMUL_CONTINUE) \
- goto done; \
- (_eip) += (_size); \
- (_type)_x; \
-})
-
-#define insn_fetch_arr(_arr, _size, _eip) \
-({ rc = do_insn_fetch(ctxt, ops, (_eip), _arr, (_size)); \
- if (rc != X86EMUL_CONTINUE) \
- goto done; \
- (_eip) += (_size); \
-})
-
static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt,
enum x86_intercept intercept,
enum x86_intercept_stage stage)
{
struct x86_instruction_info info = {
.intercept = intercept,
- .rep_prefix = ctxt->decode.rep_prefix,
- .modrm_mod = ctxt->decode.modrm_mod,
- .modrm_reg = ctxt->decode.modrm_reg,
- .modrm_rm = ctxt->decode.modrm_rm,
- .src_val = ctxt->decode.src.val64,
- .src_bytes = ctxt->decode.src.bytes,
- .dst_bytes = ctxt->decode.dst.bytes,
- .ad_bytes = ctxt->decode.ad_bytes,
+ .rep_prefix = ctxt->rep_prefix,
+ .modrm_mod = ctxt->modrm_mod,
+ .modrm_reg = ctxt->modrm_reg,
+ .modrm_rm = ctxt->modrm_rm,
+ .src_val = ctxt->src.val64,
+ .src_bytes = ctxt->src.bytes,
+ .dst_bytes = ctxt->dst.bytes,
+ .ad_bytes = ctxt->ad_bytes,
.next_rip = ctxt->eip,
};
return ctxt->ops->intercept(ctxt, &info, stage);
}
-static inline unsigned long ad_mask(struct decode_cache *c)
+static inline unsigned long ad_mask(struct x86_emulate_ctxt *ctxt)
{
- return (1UL << (c->ad_bytes << 3)) - 1;
+ return (1UL << (ctxt->ad_bytes << 3)) - 1;
}
/* Access/update address held in a register, based on addressing mode. */
static inline unsigned long
-address_mask(struct decode_cache *c, unsigned long reg)
+address_mask(struct x86_emulate_ctxt *ctxt, unsigned long reg)
{
- if (c->ad_bytes == sizeof(unsigned long))
+ if (ctxt->ad_bytes == sizeof(unsigned long))
return reg;
else
- return reg & ad_mask(c);
+ return reg & ad_mask(ctxt);
}
static inline unsigned long
-register_address(struct decode_cache *c, unsigned long reg)
+register_address(struct x86_emulate_ctxt *ctxt, unsigned long reg)
{
- return address_mask(c, reg);
+ return address_mask(ctxt, reg);
}
static inline void
-register_address_increment(struct decode_cache *c, unsigned long *reg, int inc)
+register_address_increment(struct x86_emulate_ctxt *ctxt, unsigned long *reg, int inc)
{
- if (c->ad_bytes == sizeof(unsigned long))
+ if (ctxt->ad_bytes == sizeof(unsigned long))
*reg += inc;
else
- *reg = (*reg & ~ad_mask(c)) | ((*reg + inc) & ad_mask(c));
+ *reg = (*reg & ~ad_mask(ctxt)) | ((*reg + inc) & ad_mask(ctxt));
}
-static inline void jmp_rel(struct decode_cache *c, int rel)
+static inline void jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
{
- register_address_increment(c, &c->eip, rel);
+ register_address_increment(ctxt, &ctxt->_eip, rel);
}
static u32 desc_limit_scaled(struct desc_struct *desc)
@@ -486,28 +469,26 @@ static u32 desc_limit_scaled(struct desc_struct *desc)
return desc->g ? (limit << 12) | 0xfff : limit;
}
-static void set_seg_override(struct decode_cache *c, int seg)
+static void set_seg_override(struct x86_emulate_ctxt *ctxt, int seg)
{
- c->has_seg_override = true;
- c->seg_override = seg;
+ ctxt->has_seg_override = true;
+ ctxt->seg_override = seg;
}
-static unsigned long seg_base(struct x86_emulate_ctxt *ctxt,
- struct x86_emulate_ops *ops, int seg)
+static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg)
{
if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS)
return 0;
- return ops->get_cached_segment_base(ctxt, seg);
+ return ctxt->ops->get_cached_segment_base(ctxt, seg);
}
-static unsigned seg_override(struct x86_emulate_ctxt *ctxt,
- struct decode_cache *c)
+static unsigned seg_override(struct x86_emulate_ctxt *ctxt)
{
- if (!c->has_seg_override)
+ if (!ctxt->has_seg_override)
return 0;
- return c->seg_override;
+ return ctxt->seg_override;
}
static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
@@ -579,7 +560,6 @@ static int __linearize(struct x86_emulate_ctxt *ctxt,
unsigned size, bool write, bool fetch,
ulong *linear)
{
- struct decode_cache *c = &ctxt->decode;
struct desc_struct desc;
bool usable;
ulong la;
@@ -587,7 +567,7 @@ static int __linearize(struct x86_emulate_ctxt *ctxt,
u16 sel;
unsigned cpl, rpl;
- la = seg_base(ctxt, ctxt->ops, addr.seg) + addr.ea;
+ la = seg_base(ctxt, addr.seg) + addr.ea;
switch (ctxt->mode) {
case X86EMUL_MODE_REAL:
break;
@@ -637,7 +617,7 @@ static int __linearize(struct x86_emulate_ctxt *ctxt,
}
break;
}
- if (fetch ? ctxt->mode != X86EMUL_MODE_PROT64 : c->ad_bytes != 8)
+ if (fetch ? ctxt->mode != X86EMUL_MODE_PROT64 : ctxt->ad_bytes != 8)
la &= (u32)-1;
*linear = la;
return X86EMUL_CONTINUE;
@@ -671,11 +651,10 @@ static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception);
}
-static int do_fetch_insn_byte(struct x86_emulate_ctxt *ctxt,
- struct x86_emulate_ops *ops,
+static int do_insn_fetch_byte(struct x86_emulate_ctxt *ctxt,
unsigned long eip, u8 *dest)
{
- struct fetch_cache *fc = &ctxt->decode.fetch;
+ struct fetch_cache *fc = &ctxt->fetch;
int rc;
int size, cur_size;
@@ -687,8 +666,8 @@ static int do_fetch_insn_byte(struct x86_emulate_ctxt *ctxt,
rc = __linearize(ctxt, addr, size, false, true, &linear);
if (rc != X86EMUL_CONTINUE)
return rc;
- rc = ops->fetch(ctxt, linear, fc->data + cur_size,
- size, &ctxt->exception);
+ rc = ctxt->ops->fetch(ctxt, linear, fc->data + cur_size,
+ size, &ctxt->exception);
if (rc != X86EMUL_CONTINUE)
return rc;
fc->end += size;
@@ -698,7 +677,6 @@ static int do_fetch_insn_byte(struct x86_emulate_ctxt *ctxt,
}
static int do_insn_fetch(struct x86_emulate_ctxt *ctxt,
- struct x86_emulate_ops *ops,
unsigned long eip, void *dest, unsigned size)
{
int rc;
@@ -707,13 +685,30 @@ static int do_insn_fetch(struct x86_emulate_ctxt *ctxt,
if (eip + size - ctxt->eip > 15)
return X86EMUL_UNHANDLEABLE;
while (size--) {
- rc = do_fetch_insn_byte(ctxt, ops, eip++, dest++);
+ rc = do_insn_fetch_byte(ctxt, eip++, dest++);
if (rc != X86EMUL_CONTINUE)
return rc;
}
return X86EMUL_CONTINUE;
}
+/* Fetch next part of the instruction being emulated. */
+#define insn_fetch(_type, _size, _eip) \
+({ unsigned long _x; \
+ rc = do_insn_fetch(ctxt, (_eip), &_x, (_size)); \
+ if (rc != X86EMUL_CONTINUE) \
+ goto done; \
+ (_eip) += (_size); \
+ (_type)_x; \
+})
+
+#define insn_fetch_arr(_arr, _size, _eip) \
+({ rc = do_insn_fetch(ctxt, (_eip), _arr, (_size)); \
+ if (rc != X86EMUL_CONTINUE) \
+ goto done; \
+ (_eip) += (_size); \
+})
+
/*
* Given the 'reg' portion of a ModRM byte, and a register block, return a
* pointer into the block that addresses the relevant register.
@@ -857,16 +852,15 @@ static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data,
static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
struct operand *op,
- struct decode_cache *c,
int inhibit_bytereg)
{
- unsigned reg = c->modrm_reg;
- int highbyte_regs = c->rex_prefix == 0;
+ unsigned reg = ctxt->modrm_reg;
+ int highbyte_regs = ctxt->rex_prefix == 0;
- if (!(c->d & ModRM))
- reg = (c->b & 7) | ((c->rex_prefix & 1) << 3);
+ if (!(ctxt->d & ModRM))
+ reg = (ctxt->b & 7) | ((ctxt->rex_prefix & 1) << 3);
- if (c->d & Sse) {
+ if (ctxt->d & Sse) {
op->type = OP_XMM;
op->bytes = 16;
op->addr.xmm = reg;
@@ -875,49 +869,47 @@ static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
}
op->type = OP_REG;
- if ((c->d & ByteOp) && !inhibit_bytereg) {
- op->addr.reg = decode_register(reg, c->regs, highbyte_regs);
+ if ((ctxt->d & ByteOp) && !inhibit_bytereg) {
+ op->addr.reg = decode_register(reg, ctxt->regs, highbyte_regs);
op->bytes = 1;
} else {
- op->addr.reg = decode_register(reg, c->regs, 0);
- op->bytes = c->op_bytes;
+ op->addr.reg = decode_register(reg, ctxt->regs, 0);
+ op->bytes = ctxt->op_bytes;
}
fetch_register_operand(op);
op->orig_val = op->val;
}
static int decode_modrm(struct x86_emulate_ctxt *ctxt,
- struct x86_emulate_ops *ops,
struct operand *op)
{
- struct decode_cache *c = &ctxt->decode;
u8 sib;
int index_reg = 0, base_reg = 0, scale;
int rc = X86EMUL_CONTINUE;
ulong modrm_ea = 0;
- if (c->rex_prefix) {
- c->modrm_reg = (c->rex_prefix & 4) << 1; /* REX.R */
- index_reg = (c->rex_prefix & 2) << 2; /* REX.X */
- c->modrm_rm = base_reg = (c->rex_prefix & 1) << 3; /* REG.B */
+ if (ctxt->rex_prefix) {
+ ctxt->modrm_reg = (ctxt->rex_prefix & 4) << 1; /* REX.R */
+ index_reg = (ctxt->rex_prefix & 2) << 2; /* REX.X */
+ ctxt->modrm_rm = base_reg = (ctxt->rex_prefix & 1) << 3; /* REG.B */
}
- c->modrm = insn_fetch(u8, 1, c->eip);
- c->modrm_mod |= (c->modrm & 0xc0) >> 6;
- c->modrm_reg |= (c->modrm & 0x38) >> 3;
- c->modrm_rm |= (c->modrm & 0x07);
- c->modrm_seg = VCPU_SREG_DS;
+ ctxt->modrm = insn_fetch(u8, 1, ctxt->_eip);
+ ctxt->modrm_mod |= (ctxt->modrm & 0xc0) >> 6;
+ ctxt->modrm_reg |= (ctxt->modrm & 0x38) >> 3;
+ ctxt->modrm_rm |= (ctxt->modrm & 0x07);
+ ctxt->modrm_seg = VCPU_SREG_DS;
- if (c->modrm_mod == 3) {
+ if (ctxt->modrm_mod == 3) {
op->type = OP_REG;
- op->bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
- op->addr.reg = decode_register(c->modrm_rm,
- c->regs, c->d & ByteOp);
- if (c->d & Sse) {
+ op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
+ op->addr.reg = decode_register(ctxt->modrm_rm,
+ ctxt->regs, ctxt->d & ByteOp);
+ if (ctxt->d & Sse) {
op->type = OP_XMM;
op->bytes = 16;
- op->addr.xmm = c->modrm_rm;
- read_sse_reg(ctxt, &op->vec_val, c->modrm_rm);
+ op->addr.xmm = ctxt->modrm_rm;
+ read_sse_reg(ctxt, &op->vec_val, ctxt->modrm_rm);
return rc;
}
fetch_register_operand(op);
@@ -926,26 +918,26 @@ static int decode_modrm(struct x86_emulate_ctxt *ctxt,
op->type = OP_MEM;
- if (c->ad_bytes == 2) {
- unsigned bx = c->regs[VCPU_REGS_RBX];
- unsigned bp = c->regs[VCPU_REGS_RBP];
- unsigned si = c->regs[VCPU_REGS_RSI];
- unsigned di = c->regs[VCPU_REGS_RDI];
+ if (ctxt->ad_bytes == 2) {
+ unsigned bx = ctxt->regs[VCPU_REGS_RBX];
+ unsigned bp = ctxt->regs[VCPU_REGS_RBP];
+ unsigned si = ctxt->regs[VCPU_REGS_RSI];
+ unsigned di = ctxt->regs[VCPU_REGS_RDI];
/* 16-bit ModR/M decode. */
- switch (c->modrm_mod) {
+ switch (ctxt->modrm_mod) {
case 0:
- if (c->modrm_rm == 6)
- modrm_ea += insn_fetch(u16, 2, c->eip);
+ if (ctxt->modrm_rm == 6)
+ modrm_ea += insn_fetch(u16, 2, ctxt->_eip);
break;
case 1:
- modrm_ea += insn_fetch(s8, 1, c->eip);
+ modrm_ea += insn_fetch(s8, 1, ctxt->_eip);
break;
case 2:
- modrm_ea += insn_fetch(u16, 2, c->eip);
+ modrm_ea += insn_fetch(u16, 2, ctxt->_eip);
break;
}
- switch (c->modrm_rm) {
+ switch (ctxt->modrm_rm) {
case 0:
modrm_ea += bx + si;
break;
@@ -965,46 +957,46 @@ static int decode_modrm(struct x86_emulate_ctxt *ctxt,
modrm_ea += di;
break;
case 6:
- if (c->modrm_mod != 0)
+ if (ctxt->modrm_mod != 0)
modrm_ea += bp;
break;
case 7:
modrm_ea += bx;
break;
}
- if (c->modrm_rm == 2 || c->modrm_rm == 3 ||
- (c->modrm_rm == 6 && c->modrm_mod != 0))
- c->modrm_seg = VCPU_SREG_SS;
+ if (ctxt->modrm_rm == 2 || ctxt->modrm_rm == 3 ||
+ (ctxt->modrm_rm == 6 && ctxt->modrm_mod != 0))
+ ctxt->modrm_seg = VCPU_SREG_SS;
modrm_ea = (u16)modrm_ea;
} else {
/* 32/64-bit ModR/M decode. */
- if ((c->modrm_rm & 7) == 4) {
- sib = insn_fetch(u8, 1, c->eip);
+ if ((ctxt->modrm_rm & 7) == 4) {
+ sib = insn_fetch(u8, 1, ctxt->_eip);
index_reg |= (sib >> 3) & 7;
base_reg |= sib & 7;
scale = sib >> 6;
- if ((base_reg & 7) == 5 && c->modrm_mod == 0)
- modrm_ea += insn_fetch(s32, 4, c->eip);
+ if ((base_reg & 7) == 5 && ctxt->modrm_mod == 0)
+ modrm_ea += insn_fetch(s32, 4, ctxt->_eip);
else
- modrm_ea += c->regs[base_reg];
+ modrm_ea += ctxt->regs[base_reg];
if (index_reg != 4)
- modrm_ea += c->regs[index_reg] << scale;
- } else if ((c->modrm_rm & 7) == 5 && c->modrm_mod == 0) {
+ modrm_ea += ctxt->regs[index_reg] << scale;
+ } else if ((ctxt->modrm_rm & 7) == 5 && ctxt->modrm_mod == 0) {
if (ctxt->mode == X86EMUL_MODE_PROT64)
- c->rip_relative = 1;
+ ctxt->rip_relative = 1;
} else
- modrm_ea += c->regs[c->modrm_rm];
- switch (c->modrm_mod) {
+ modrm_ea += ctxt->regs[ctxt->modrm_rm];
+ switch (ctxt->modrm_mod) {
case 0:
- if (c->modrm_rm == 5)
- modrm_ea += insn_fetch(s32, 4, c->eip);
+ if (ctxt->modrm_rm == 5)
+ modrm_ea += insn_fetch(s32, 4, ctxt->_eip);
break;
case 1:
- modrm_ea += insn_fetch(s8, 1, c->eip);
+ modrm_ea += insn_fetch(s8, 1, ctxt->_eip);
break;
case 2:
- modrm_ea += insn_fetch(s32, 4, c->eip);
+ modrm_ea += insn_fetch(s32, 4, ctxt->_eip);
break;
}
}
@@ -1014,53 +1006,50 @@ done:
}
static int decode_abs(struct x86_emulate_ctxt *ctxt,
- struct x86_emulate_ops *ops,
struct operand *op)
{
- struct decode_cache *c = &ctxt->decode;
int rc = X86EMUL_CONTINUE;
op->type = OP_MEM;
- switch (c->ad_bytes) {
+ switch (ctxt->ad_bytes) {
case 2:
- op->addr.mem.ea = insn_fetch(u16, 2, c->eip);
+ op->addr.mem.ea = insn_fetch(u16, 2, ctxt->_eip);
break;
case 4:
- op->addr.mem.ea = insn_fetch(u32, 4, c->eip);
+ op->addr.mem.ea = insn_fetch(u32, 4, ctxt->_eip);
break;
case 8:
- op->addr.mem.ea = insn_fetch(u64, 8, c->eip);
+ op->addr.mem.ea = insn_fetch(u64, 8, ctxt->_eip);
break;
}
done:
return rc;
}
-static void fetch_bit_operand(struct decode_cache *c)
+static void fetch_bit_operand(struct x86_emulate_ctxt *ctxt)
{
long sv = 0, mask;
- if (c->dst.type == OP_MEM && c->src.type == OP_REG) {
- mask = ~(c->dst.bytes * 8 - 1);
+ if (ctxt->dst.type == OP_MEM && ctxt->src.type == OP_REG) {
+ mask = ~(ctxt->dst.bytes * 8 - 1);
- if (c->src.bytes == 2)
- sv = (s16)c->src.val & (s16)mask;
- else if (c->src.bytes == 4)
- sv = (s32)c->src.val & (s32)mask;
+ if (ctxt->src.bytes == 2)
+ sv = (s16)ctxt->src.val & (s16)mask;
+ else if (ctxt->src.bytes == 4)
+ sv = (s32)ctxt->src.val & (s32)mask;
- c->dst.addr.mem.ea += (sv >> 3);
+ ctxt->dst.addr.mem.ea += (sv >> 3);
}
/* only subword offset */
- c->src.val &= (c->dst.bytes << 3) - 1;
+ ctxt->src.val &= (ctxt->dst.bytes << 3) - 1;
}
static int read_emulated(struct x86_emulate_ctxt *ctxt,
- struct x86_emulate_ops *ops,
unsigned long addr, void *dest, unsigned size)
{
int rc;
- struct read_cache *mc = &ctxt->decode.mem_read;
+ struct read_cache *mc = &ctxt->mem_read;
while (size) {
int n = min(size, 8u);
@@ -1068,8 +1057,8 @@ static int read_emulated(struct x86_emulate_ctxt *ctxt,
if (mc->pos < mc->end)
goto read_cached;
- rc = ops->read_emulated(ctxt, addr, mc->data + mc->end, n,
- &ctxt->exception);
+ rc = ctxt->ops->read_emulated(ctxt, addr, mc->data + mc->end, n,
+ &ctxt->exception);
if (rc != X86EMUL_CONTINUE)
return rc;
mc->end += n;
@@ -1094,7 +1083,7 @@ static int segmented_read(struct x86_emulate_ctxt *ctxt,
rc = linearize(ctxt, addr, size, false, &linear);
if (rc != X86EMUL_CONTINUE)
return rc;
- return read_emulated(ctxt, ctxt->ops, linear, data, size);
+ return read_emulated(ctxt, linear, data, size);
}
static int segmented_write(struct x86_emulate_ctxt *ctxt,
@@ -1128,26 +1117,24 @@ static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt,
}
static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
- struct x86_emulate_ops *ops,
unsigned int size, unsigned short port,
void *dest)
{
- struct read_cache *rc = &ctxt->decode.io_read;
+ struct read_cache *rc = &ctxt->io_read;
if (rc->pos == rc->end) { /* refill pio read ahead */
- struct decode_cache *c = &ctxt->decode;
unsigned int in_page, n;
- unsigned int count = c->rep_prefix ?
- address_mask(c, c->regs[VCPU_REGS_RCX]) : 1;
+ unsigned int count = ctxt->rep_prefix ?
+ address_mask(ctxt, ctxt->regs[VCPU_REGS_RCX]) : 1;
in_page = (ctxt->eflags & EFLG_DF) ?
- offset_in_page(c->regs[VCPU_REGS_RDI]) :
- PAGE_SIZE - offset_in_page(c->regs[VCPU_REGS_RDI]);
+ offset_in_page(ctxt->regs[VCPU_REGS_RDI]) :
+ PAGE_SIZE - offset_in_page(ctxt->regs[VCPU_REGS_RDI]);
n = min(min(in_page, (unsigned int)sizeof(rc->data)) / size,
count);
if (n == 0)
n = 1;
rc->pos = rc->end = 0;
- if (!ops->pio_in_emulated(ctxt, size, port, rc->data, n))
+ if (!ctxt->ops->pio_in_emulated(ctxt, size, port, rc->data, n))
return 0;
rc->end = n * size;
}
@@ -1158,9 +1145,10 @@ static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
}
static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
- struct x86_emulate_ops *ops,
u16 selector, struct desc_ptr *dt)
{
+ struct x86_emulate_ops *ops = ctxt->ops;
+
if (selector & 1 << 2) {
struct desc_struct desc;
u16 sel;
@@ -1177,48 +1165,42 @@ static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
/* allowed just for 8 bytes segments */
static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
- struct x86_emulate_ops *ops,
u16 selector, struct desc_struct *desc)
{
struct desc_ptr dt;
u16 index = selector >> 3;
- int ret;
ulong addr;
- get_descriptor_table_ptr(ctxt, ops, selector, &dt);
+ get_descriptor_table_ptr(ctxt, selector, &dt);
if (dt.size < index * 8 + 7)
return emulate_gp(ctxt, selector & 0xfffc);
- addr = dt.address + index * 8;
- ret = ops->read_std(ctxt, addr, desc, sizeof *desc, &ctxt->exception);
- return ret;
+ addr = dt.address + index * 8;
+ return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc,
+ &ctxt->exception);
}
/* allowed just for 8 bytes segments */
static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
- struct x86_emulate_ops *ops,
u16 selector, struct desc_struct *desc)
{
struct desc_ptr dt;
u16 index = selector >> 3;
ulong addr;
- int ret;
- get_descriptor_table_ptr(ctxt, ops, selector, &dt);
+ get_descriptor_table_ptr(ctxt, selector, &dt);
if (dt.size < index * 8 + 7)
return emulate_gp(ctxt, selector & 0xfffc);
addr = dt.address + index * 8;
- ret = ops->write_std(ctxt, addr, desc, sizeof *desc, &ctxt->exception);
-
- return ret;
+ return ctxt->ops->write_std(ctxt, addr, desc, sizeof *desc,
+ &ctxt->exception);
}
/* Does not support long mode */
static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
- struct x86_emulate_ops *ops,
u16 selector, int seg)
{
struct desc_struct seg_desc;
@@ -1253,7 +1235,7 @@ static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
if (null_selector) /* for NULL selector skip all following checks */
goto load;
- ret = read_segment_descriptor(ctxt, ops, selector, &seg_desc);
+ ret = read_segment_descriptor(ctxt, selector, &seg_desc);
if (ret != X86EMUL_CONTINUE)
return ret;
@@ -1271,7 +1253,7 @@ static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
rpl = selector & 3;
dpl = seg_desc.dpl;
- cpl = ops->cpl(ctxt);
+ cpl = ctxt->ops->cpl(ctxt);
switch (seg) {
case VCPU_SREG_SS:
@@ -1322,12 +1304,12 @@ static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
if (seg_desc.s) {
/* mark segment as accessed */
seg_desc.type |= 1;
- ret = write_segment_descriptor(ctxt, ops, selector, &seg_desc);
+ ret = write_segment_descriptor(ctxt, selector, &seg_desc);
if (ret != X86EMUL_CONTINUE)
return ret;
}
load:
- ops->set_segment(ctxt, selector, &seg_desc, 0, seg);
+ ctxt->ops->set_segment(ctxt, selector, &seg_desc, 0, seg);
return X86EMUL_CONTINUE;
exception:
emulate_exception(ctxt, err_vec, err_code, true);
@@ -1356,29 +1338,28 @@ static void write_register_operand(struct operand *op)
static int writeback(struct x86_emulate_ctxt *ctxt)
{
int rc;
- struct decode_cache *c = &ctxt->decode;
- switch (c->dst.type) {
+ switch (ctxt->dst.type) {
case OP_REG:
- write_register_operand(&c->dst);
+ write_register_operand(&ctxt->dst);
break;
case OP_MEM:
- if (c->lock_prefix)
+ if (ctxt->lock_prefix)
rc = segmented_cmpxchg(ctxt,
- c->dst.addr.mem,
- &c->dst.orig_val,
- &c->dst.val,
- c->dst.bytes);
+ ctxt->dst.addr.mem,
+ &ctxt->dst.orig_val,
+ &ctxt->dst.val,
+ ctxt->dst.bytes);
else
rc = segmented_write(ctxt,
- c->dst.addr.mem,
- &c->dst.val,
- c->dst.bytes);
+ ctxt->dst.addr.mem,
+ &ctxt->dst.val,
+ ctxt->dst.bytes);
if (rc != X86EMUL_CONTINUE)
return rc;
break;
case OP_XMM:
- write_sse_reg(ctxt, &c->dst.vec_val, c->dst.addr.xmm);
+ write_sse_reg(ctxt, &ctxt->dst.vec_val, ctxt->dst.addr.xmm);
break;
case OP_NONE:
/* no writeback */
@@ -1391,50 +1372,45 @@ static int writeback(struct x86_emulate_ctxt *ctxt)
static int em_push(struct x86_emulate_ctxt *ctxt)
{
- struct decode_cache *c = &ctxt->decode;
struct segmented_address addr;
- register_address_increment(c, &c->regs[VCPU_REGS_RSP], -c->op_bytes);
- addr.ea = register_address(c, c->regs[VCPU_REGS_RSP]);
+ register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RSP], -ctxt->op_bytes);
+ addr.ea = register_address(ctxt, ctxt->regs[VCPU_REGS_RSP]);
addr.seg = VCPU_SREG_SS;
/* Disable writeback. */
- c->dst.type = OP_NONE;
- return segmented_write(ctxt, addr, &c->src.val, c->op_bytes);
+ ctxt->dst.type = OP_NONE;
+ return segmented_write(ctxt, addr, &ctxt->src.val, ctxt->op_bytes);
}
static int emulate_pop(struct x86_emulate_ctxt *ctxt,
void *dest, int len)
{
- struct decode_cache *c = &ctxt->decode;
int rc;
struct segmented_address addr;
- addr.ea = register_address(c, c->regs[VCPU_REGS_RSP]);
+ addr.ea = register_address(ctxt, ctxt->regs[VCPU_REGS_RSP]);
addr.seg = VCPU_SREG_SS;
rc = segmented_read(ctxt, addr, dest, len);
if (rc != X86EMUL_CONTINUE)
return rc;
- register_address_increment(c, &c->regs[VCPU_REGS_RSP], len);
+ register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RSP], len);
return rc;
}
static int em_pop(struct x86_emulate_ctxt *ctxt)
{
- struct decode_cache *c = &ctxt->decode;
-
- return emulate_pop(ctxt, &c->dst.val, c->op_bytes);
+ return emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
}
static int emulate_popf(struct x86_emulate_ctxt *ctxt,
- struct x86_emulate_ops *ops,
- void *dest, int len)
+ void *dest, int len)
{
int rc;
unsigned long val, change_mask;
int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
- int cpl = ops->cpl(ctxt);
+ int cpl = ctxt->ops->cpl(ctxt);
rc = emulate_pop(ctxt, &val, len);
if (rc != X86EMUL_CONTINUE)
@@ -1470,49 +1446,41 @@ static int emulate_popf(struct x86_emulate_ctxt *ctxt,
static int em_popf(struct x86_emulate_ctxt *ctxt)
{
- struct decode_cache *c = &ctxt->decode;
-
- c->dst.type = OP_REG;
- c->dst.addr.reg = &ctxt->eflags;
- c->dst.bytes = c->op_bytes;
- return emulate_popf(ctxt, ctxt->ops, &c->dst.val, c->op_bytes);
+ ctxt->dst.type = OP_REG;
+ ctxt->dst.addr.reg = &ctxt->eflags;
+ ctxt->dst.bytes = ctxt->op_bytes;
+ return emulate_popf(ctxt, &ctxt->dst.val, ctxt->op_bytes);
}
-static int emulate_push_sreg(struct x86_emulate_ctxt *ctxt,
- struct x86_emulate_ops *ops, int seg)
+static int emulate_push_sreg(struct x86_emulate_ctxt *ctxt, int seg)
{
- struct decode_cache *c = &ctxt->decode;
-
- c->src.val = get_segment_selector(ctxt, seg);
+ ctxt->src.val = get_segment_selector(ctxt, seg);
return em_push(ctxt);
}
-static int emulate_pop_sreg(struct x86_emulate_ctxt *ctxt,
- struct x86_emulate_ops *ops, int seg)
+static int emulate_pop_sreg(struct x86_emulate_ctxt *ctxt, int seg)
{
- struct decode_cache *c = &ctxt->decode;
unsigned long selector;
int rc;
- rc = emulate_pop(ctxt, &selector, c->op_bytes);
+ rc = emulate_pop(ctxt, &selector, ctxt->op_bytes);
if (rc != X86EMUL_CONTINUE)
return rc;
- rc = load_segment_descriptor(ctxt, ops, (u16)selector, seg);
+ rc = load_segment_descriptor(ctxt, (u16)selector, seg);
return rc;
}
static int em_pusha(struct x86_emulate_ctxt *ctxt)
{
- struct decode_cache *c = &ctxt->decode;
- unsigned long old_esp = c->regs[VCPU_REGS_RSP];
+ unsigned long old_esp = ctxt->regs[VCPU_REGS_RSP];
int rc = X86EMUL_CONTINUE;
int reg = VCPU_REGS_RAX;
while (reg <= VCPU_REGS_RDI) {
(reg == VCPU_REGS_RSP) ?
- (c->src.val = old_esp) : (c->src.val = c->regs[reg]);
+ (ctxt->src.val = old_esp) : (ctxt->src.val = ctxt->regs[reg]);
rc = em_push(ctxt);
if (rc != X86EMUL_CONTINUE)
@@ -1526,26 +1494,23 @@ static int em_pusha(struct x86_emulate_ctxt *ctxt)
static int em_pushf(struct x86_emulate_ctxt *ctxt)
{
- struct decode_cache *c = &ctxt->decode;
-
- c->src.val = (unsigned long)ctxt->eflags;
+ ctxt->src.val = (unsigned long)ctxt->eflags;
return em_push(ctxt);
}
static int em_popa(struct x86_emulate_ctxt *ctxt)
{
- struct decode_cache *c = &ctxt->decode;
int rc = X86EMUL_CONTINUE;
int reg = VCPU_REGS_RDI;
while (reg >= VCPU_REGS_RAX) {
if (reg == VCPU_REGS_RSP) {
- register_address_increment(c, &c->regs[VCPU_REGS_RSP],
- c->op_bytes);
+ register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RSP],
+ ctxt->op_bytes);
--reg;
}
- rc = emulate_pop(ctxt, &c->regs[reg], c->op_bytes);
+ rc = emulate_pop(ctxt, &ctxt->regs[reg], ctxt->op_bytes);
if (rc != X86EMUL_CONTINUE)
break;
--reg;
@@ -1553,10 +1518,9 @@ static int em_popa(struct x86_emulate_ctxt *ctxt)
return rc;
}
-int emulate_int_real(struct x86_emulate_ctxt *ctxt,
- struct x86_emulate_ops *ops, int irq)
+int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
{
- struct decode_cache *c = &ctxt->decode;
+ struct x86_emulate_ops *ops = ctxt->ops;
int rc;
struct desc_ptr dt;
gva_t cs_addr;
@@ -1564,19 +1528,19 @@ int emulate_int_real(struct x86_emulate_ctxt *ctxt,
u16 cs, eip;
/* TODO: Add limit checks */
- c->src.val = ctxt->eflags;
+ ctxt->src.val = ctxt->eflags;
rc = em_push(ctxt);
if (rc != X86EMUL_CONTINUE)
return rc;
ctxt->eflags &= ~(EFLG_IF | EFLG_TF | EFLG_AC);
- c->src.val = get_segment_selector(ctxt, VCPU_SREG_CS);
+ ctxt->src.val = get_segment_selector(ctxt, VCPU_SREG_CS);
rc = em_push(ctxt);
if (rc != X86EMUL_CONTINUE)
return rc;
- c->src.val = c->eip;
+ ctxt->src.val = ctxt->_eip;
rc = em_push(ctxt);
if (rc != X86EMUL_CONTINUE)
return rc;
@@ -1594,21 +1558,20 @@ int emulate_int_real(struct x86_emulate_ctxt *ctxt,
if (rc != X86EMUL_CONTINUE)
return rc;
- rc = load_segment_descriptor(ctxt, ops, cs, VCPU_SREG_CS);
+ rc = load_segment_descriptor(ctxt, cs, VCPU_SREG_CS);
if (rc != X86EMUL_CONTINUE)
return rc;
- c->eip = eip;
+ ctxt->_eip = eip;
return rc;
}
-static int emulate_int(struct x86_emulate_ctxt *ctxt,
- struct x86_emulate_ops *ops, int irq)
+static int emulate_int(struct x86_emulate_ctxt *ctxt, int irq)
{
switch(ctxt->mode) {
case X86EMUL_MODE_REAL:
- return emulate_int_real(ctxt, ops, irq);
+ return emulate_int_real(ctxt, irq);
case X86EMUL_MODE_VM86:
case X86EMUL_MODE_PROT16:
case X86EMUL_MODE_PROT32:
@@ -1619,10 +1582,8 @@ static int emulate_int(struct x86_emulate_ctxt *ctxt,
}
}
-static int emulate_iret_real(struct x86_emulate_ctxt *ctxt,
- struct x86_emulate_ops *ops)
+static int emulate_iret_real(struct x86_emulate_ctxt *ctxt)
{
- struct decode_cache *c = &ctxt->decode;
int rc = X86EMUL_CONTINUE;
unsigned long temp_eip = 0;
unsigned long temp_eflags = 0;
@@ -1634,7 +1595,7 @@ static int emulate_iret_real(struct x86_emulate_ctxt *ctxt,
/* TODO: Add stack limit check */
- rc = emulate_pop(ctxt, &temp_eip, c->op_bytes);
+ rc = emulate_pop(ctxt, &temp_eip, ctxt->op_bytes);
if (rc != X86EMUL_CONTINUE)
return rc;
@@ -1642,27 +1603,27 @@ static int emulate_iret_real(struct x86_emulate_ctxt *ctxt,
if (temp_eip & ~0xffff)
return emulate_gp(ctxt, 0);
- rc = emulate_pop(ctxt, &cs, c->op_bytes);
+ rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
if (rc != X86EMUL_CONTINUE)
return rc;
- rc = emulate_pop(ctxt, &temp_eflags, c->op_bytes);
+ rc = emulate_pop(ctxt, &temp_eflags, ctxt->op_bytes);
if (rc != X86EMUL_CONTINUE)
return rc;
- rc = load_segment_descriptor(ctxt, ops, (u16)cs, VCPU_SREG_CS);
+ rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
if (rc != X86EMUL_CONTINUE)
return rc;
- c->eip = temp_eip;
+ ctxt->_eip = temp_eip;
- if (c->op_bytes == 4)
+ if (ctxt->op_bytes == 4)
ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask));
- else if (c->op_bytes == 2) {
+ else if (ctxt->op_bytes == 2) {
ctxt->eflags &= ~0xffff;
ctxt->eflags |= temp_eflags;
}
@@ -1673,12 +1634,11 @@ static int emulate_iret_real(struct x86_emulate_ctxt *ctxt,
return rc;
}
-static inline int emulate_iret(struct x86_emulate_ctxt *ctxt,
- struct x86_emulate_ops* ops)
+static int em_iret(struct x86_emulate_ctxt *ctxt)
{
switch(ctxt->mode) {
case X86EMUL_MODE_REAL:
- return emulate_iret_real(ctxt, ops);
+ return emulate_iret_real(ctxt);
case X86EMUL_MODE_VM86:
case X86EMUL_MODE_PROT16:
case X86EMUL_MODE_PROT32:
@@ -1691,53 +1651,49 @@ static inline int emulate_iret(struct x86_emulate_ctxt *ctxt,
static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
{
- struct decode_cache *c = &ctxt->decode;
int rc;
unsigned short sel;
- memcpy(&sel, c->src.valptr + c->op_bytes, 2);
+ memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
- rc = load_segment_descriptor(ctxt, ctxt->ops, sel, VCPU_SREG_CS);
+ rc = load_segment_descriptor(ctxt, sel, VCPU_SREG_CS);
if (rc != X86EMUL_CONTINUE)
return rc;
- c->eip = 0;
- memcpy(&c->eip, c->src.valptr, c->op_bytes);
+ ctxt->_eip = 0;
+ memcpy(&ctxt->_eip, ctxt->src.valptr, ctxt->op_bytes);
return X86EMUL_CONTINUE;
}
static int em_grp1a(struct x86_emulate_ctxt *ctxt)
{
- struct decode_cache *c = &ctxt->decode;
-
- return emulate_pop(ctxt, &c->dst.val, c->dst.bytes);
+ return emulate_pop(ctxt, &ctxt->dst.val, ctxt->dst.bytes);
}
static int em_grp2(struct x86_emulate_ctxt *ctxt)
{
- struct decode_cache *c = &ctxt->decode;
- switch (c->modrm_reg) {
+ switch (ctxt->modrm_reg) {
case 0: /* rol */
- emulate_2op_SrcB("rol", c->src, c->dst, ctxt->eflags);
+ emulate_2op_SrcB("rol", ctxt->src, ctxt->dst, ctxt->eflags);
break;
case 1: /* ror */
- emulate_2op_SrcB("ror", c->src, c->dst, ctxt->eflags);
+ emulate_2op_SrcB("ror", ctxt->src, ctxt->dst, ctxt->eflags);
break;
case 2: /* rcl */
- emulate_2op_SrcB("rcl", c->src, c->dst, ctxt->eflags);
+ emulate_2op_SrcB("rcl", ctxt->src, ctxt->dst, ctxt->eflags);
break;
case 3: /* rcr */
- emulate_2op_SrcB("rcr", c->src, c->dst, ctxt->eflags);
+ emulate_2op_SrcB("rcr", ctxt->src, ctxt->dst, ctxt->eflags);
break;
case 4: /* sal/shl */
case 6: /* sal/shl */
- emulate_2op_SrcB("sal", c->src, c->dst, ctxt->eflags);
+ emulate_2op_SrcB("sal", ctxt->src, ctxt->dst, ctxt->eflags);
break;
case 5: /* shr */
- emulate_2op_SrcB("shr", c->src, c->dst, ctxt->eflags);
+ emulate_2op_SrcB("shr", ctxt->src, ctxt->dst, ctxt->eflags);
break;
case 7: /* sar */
- emulate_2op_SrcB("sar", c->src, c->dst, ctxt->eflags);
+ emulate_2op_SrcB("sar", ctxt->src, ctxt->dst, ctxt->eflags);
break;
}
return X86EMUL_CONTINUE;
@@ -1745,33 +1701,32 @@ static int em_grp2(struct x86_emulate_ctxt *ctxt)
static int em_grp3(struct x86_emulate_ctxt *ctxt)
{
- struct decode_cache *c = &ctxt->decode;
- unsigned long *rax = &c->regs[VCPU_REGS_RAX];
- unsigned long *rdx = &c->regs[VCPU_REGS_RDX];
+ unsigned long *rax = &ctxt->regs[VCPU_REGS_RAX];
+ unsigned long *rdx = &ctxt->regs[VCPU_REGS_RDX];
u8 de = 0;
- switch (c->modrm_reg) {
+ switch (ctxt->modrm_reg) {
case 0 ... 1: /* test */
- emulate_2op_SrcV("test", c->src, c->dst, ctxt->eflags);
+ emulate_2op_SrcV("test", ctxt->src, ctxt->dst, ctxt->eflags);
break;
case 2: /* not */
- c->dst.val = ~c->dst.val;
+ ctxt->dst.val = ~ctxt->dst.val;
break;
case 3: /* neg */
- emulate_1op("neg", c->dst, ctxt->eflags);
+ emulate_1op("neg", ctxt->dst, ctxt->eflags);
break;
case 4: /* mul */
- emulate_1op_rax_rdx("mul", c->src, *rax, *rdx, ctxt->eflags);
+ emulate_1op_rax_rdx("mul", ctxt->src, *rax, *rdx, ctxt->eflags);
break;
case 5: /* imul */
- emulate_1op_rax_rdx("imul", c->src, *rax, *rdx, ctxt->eflags);
+ emulate_1op_rax_rdx("imul", ctxt->src, *rax, *rdx, ctxt->eflags);
break;
case 6: /* div */
- emulate_1op_rax_rdx_ex("div", c->src, *rax, *rdx,
+ emulate_1op_rax_rdx_ex("div", ctxt->src, *rax, *rdx,
ctxt->eflags, de);
break;
case 7: /* idiv */
- emulate_1op_rax_rdx_ex("idiv", c->src, *rax, *rdx,
+ emulate_1op_rax_rdx_ex("idiv", ctxt->src, *rax, *rdx,
ctxt->eflags, de);
break;
default:
@@ -1784,26 +1739,25 @@ static int em_grp3(struct x86_emulate_ctxt *ctxt)
static int em_grp45(struct x86_emulate_ctxt *ctxt)
{
- struct decode_cache *c = &ctxt->decode;
int rc = X86EMUL_CONTINUE;
- switch (c->modrm_reg) {
+ switch (ctxt->modrm_reg) {
case 0: /* inc */
- emulate_1op("inc", c->dst, ctxt->eflags);
+ emulate_1op("inc", ctxt->dst, ctxt->eflags);
break;
case 1: /* dec */
- emulate_1op("dec", c->dst, ctxt->eflags);
+ emulate_1op("dec", ctxt->dst, ctxt->eflags);
break;
case 2: /* call near abs */ {
long int old_eip;
- old_eip = c->eip;
- c->eip = c->src.val;
- c->src.val = old_eip;
+ old_eip = ctxt->_eip;
+ ctxt->_eip = ctxt->src.val;
+ ctxt->src.val = old_eip;
rc = em_push(ctxt);
break;
}
case 4: /* jmp abs */
- c->eip = c->src.val;
+ ctxt->_eip = ctxt->src.val;
break;
case 5: /* jmp far */
rc = em_jmp_far(ctxt);
@@ -1817,68 +1771,70 @@ static int em_grp45(struct x86_emulate_ctxt *ctxt)
static int em_grp9(struct x86_emulate_ctxt *ctxt)
{
- struct decode_cache *c = &ctxt->decode;
- u64 old = c->dst.orig_val64;
+ u64 old = ctxt->dst.orig_val64;
- if (((u32) (old >> 0) != (u32) c->regs[VCPU_REGS_RAX]) ||
- ((u32) (old >> 32) != (u32) c->regs[VCPU_REGS_RDX])) {
- c->regs[VCPU_REGS_RAX] = (u32) (old >> 0);
- c->regs[VCPU_REGS_RDX] = (u32) (old >> 32);
+ if (((u32) (old >> 0) != (u32) ctxt->regs[VCPU_REGS_RAX]) ||
+ ((u32) (old >> 32) != (u32) ctxt->regs[VCPU_REGS_RDX])) {
+ ctxt->regs[VCPU_REGS_RAX] = (u32) (old >> 0);
+ ctxt->regs[VCPU_REGS_RDX] = (u32) (old >> 32);
ctxt->eflags &= ~EFLG_ZF;
} else {
- c->dst.val64 = ((u64)c->regs[VCPU_REGS_RCX] << 32) |
- (u32) c->regs[VCPU_REGS_RBX];
+ ctxt->dst.val64 = ((u64)ctxt->regs[VCPU_REGS_RCX] << 32) |
+ (u32) ctxt->regs[VCPU_REGS_RBX];
ctxt->eflags |= EFLG_ZF;
}
return X86EMUL_CONTINUE;
}
-static int emulate_ret_far(struct x86_emulate_ctxt *ctxt,
- struct x86_emulate_ops *ops)
+static int em_ret(struct x86_emulate_ctxt *ctxt)
+{
+ ctxt->dst.type = OP_REG;
+ ctxt->dst.addr.reg = &ctxt->_eip;
+ ctxt->dst.bytes = ctxt->op_bytes;
+ return em_pop(ctxt);
+}
+
+static int em_ret_far(struct x86_emulate_ctxt *ctxt)
{
- struct decode_cache *c = &ctxt->decode;
int rc;
unsigned long cs;
- rc = emulate_pop(ctxt, &c->eip, c->op_bytes);
+ rc = emulate_pop(ctxt, &ctxt->_eip, ctxt->op_bytes);
if (rc != X86EMUL_CONTINUE)
return rc;
- if (c->op_bytes == 4)
- c->eip = (u32)c->eip;
- rc = emulate_pop(ctxt, &cs, c->op_bytes);
+ if (ctxt->op_bytes == 4)
+ ctxt->_eip = (u32)ctxt->_eip;
+ rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
if (rc != X86EMUL_CONTINUE)
return rc;
- rc = load_segment_descriptor(ctxt, ops, (u16)cs, VCPU_SREG_CS);
+ rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
return rc;
}
-static int emulate_load_segment(struct x86_emulate_ctxt *ctxt,
- struct x86_emulate_ops *ops, int seg)
+static int emulate_load_segment(struct x86_emulate_ctxt *ctxt, int seg)
{
- struct decode_cache *c = &ctxt->decode;
unsigned short sel;
int rc;
- memcpy(&sel, c->src.valptr + c->op_bytes, 2);
+ memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
- rc = load_segment_descriptor(ctxt, ops, sel, seg);
+ rc = load_segment_descriptor(ctxt, sel, seg);
if (rc != X86EMUL_CONTINUE)
return rc;
- c->dst.val = c->src.val;
+ ctxt->dst.val = ctxt->src.val;
return rc;
}
-static inline void
+static void
setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
- struct x86_emulate_ops *ops, struct desc_struct *cs,
- struct desc_struct *ss)
+ struct desc_struct *cs, struct desc_struct *ss)
{
u16 selector;
memset(cs, 0, sizeof(struct desc_struct));
- ops->get_segment(ctxt, &selector, cs, NULL, VCPU_SREG_CS);
+ ctxt->ops->get_segment(ctxt, &selector, cs, NULL, VCPU_SREG_CS);
memset(ss, 0, sizeof(struct desc_struct));
cs->l = 0; /* will be adjusted later */
@@ -1901,10 +1857,9 @@ setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
ss->p = 1;
}
-static int
-emulate_syscall(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
+static int em_syscall(struct x86_emulate_ctxt *ctxt)
{
- struct decode_cache *c = &ctxt->decode;
+ struct x86_emulate_ops *ops = ctxt->ops;
struct desc_struct cs, ss;
u64 msr_data;
u16 cs_sel, ss_sel;
@@ -1916,7 +1871,7 @@ emulate_syscall(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
return emulate_ud(ctxt);
ops->get_msr(ctxt, MSR_EFER, &efer);
- setup_syscalls_segments(ctxt, ops, &cs, &ss);
+ setup_syscalls_segments(ctxt, &cs, &ss);
ops->get_msr(ctxt, MSR_STAR, &msr_data);
msr_data >>= 32;
@@ -1930,15 +1885,15 @@ emulate_syscall(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
- c->regs[VCPU_REGS_RCX] = c->eip;
+ ctxt->regs[VCPU_REGS_RCX] = ctxt->_eip;
if (efer & EFER_LMA) {
#ifdef CONFIG_X86_64
- c->regs[VCPU_REGS_R11] = ctxt->eflags & ~EFLG_RF;
+ ctxt->regs[VCPU_REGS_R11] = ctxt->eflags & ~EFLG_RF;
ops->get_msr(ctxt,
ctxt->mode == X86EMUL_MODE_PROT64 ?
MSR_LSTAR : MSR_CSTAR, &msr_data);
- c->eip = msr_data;
+ ctxt->_eip = msr_data;
ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data);
ctxt->eflags &= ~(msr_data | EFLG_RF);
@@ -1946,7 +1901,7 @@ emulate_syscall(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
} else {
/* legacy mode */
ops->get_msr(ctxt, MSR_STAR, &msr_data);
- c->eip = (u32)msr_data;
+ ctxt->_eip = (u32)msr_data;
ctxt->eflags &= ~(EFLG_VM | EFLG_IF | EFLG_RF);
}
@@ -1954,16 +1909,15 @@ emulate_syscall(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
return X86EMUL_CONTINUE;
}
-static int
-emulate_sysenter(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
+static int em_sysenter(struct x86_emulate_ctxt *ctxt)
{
- struct decode_cache *c = &ctxt->decode;
+ struct x86_emulate_ops *ops = ctxt->ops;
struct desc_struct cs, ss;
u64 msr_data;
u16 cs_sel, ss_sel;
u64 efer = 0;
- ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
+ ops->get_msr(ctxt, MSR_EFER, &efer);
/* inject #GP if in real mode */
if (ctxt->mode == X86EMUL_MODE_REAL)
return emulate_gp(ctxt, 0);
@@ -1974,7 +1928,7 @@ emulate_sysenter(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
if (ctxt->mode == X86EMUL_MODE_PROT64)
return emulate_ud(ctxt);
- setup_syscalls_segments(ctxt, ops, &cs, &ss);
+ setup_syscalls_segments(ctxt, &cs, &ss);
ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
switch (ctxt->mode) {
@@ -2002,31 +1956,30 @@ emulate_sysenter(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data);
- c->eip = msr_data;
+ ctxt->_eip = msr_data;
ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data);
- c->regs[VCPU_REGS_RSP] = msr_data;
+ ctxt->regs[VCPU_REGS_RSP] = msr_data;
return X86EMUL_CONTINUE;
}
-static int
-emulate_sysexit(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
+static int em_sysexit(struct x86_emulate_ctxt *ctxt)
{
- struct decode_cache *c = &ctxt->decode;
+ struct x86_emulate_ops *ops = ctxt->ops;
struct desc_struct cs, ss;
u64 msr_data;
int usermode;
- u16 cs_sel, ss_sel;
+ u16 cs_sel = 0, ss_sel = 0;
/* inject #GP if in real mode or Virtual 8086 mode */
if (ctxt->mode == X86EMUL_MODE_REAL ||
ctxt->mode == X86EMUL_MODE_VM86)
return emulate_gp(ctxt, 0);
- setup_syscalls_segments(ctxt, ops, &cs, &ss);
+ setup_syscalls_segments(ctxt, &cs, &ss);
- if ((c->rex_prefix & 0x8) != 0x0)
+ if ((ctxt->rex_prefix & 0x8) != 0x0)
usermode = X86EMUL_MODE_PROT64;
else
usermode = X86EMUL_MODE_PROT32;
@@ -2056,14 +2009,13 @@ emulate_sysexit(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
- c->eip = c->regs[VCPU_REGS_RDX];
- c->regs[VCPU_REGS_RSP] = c->regs[VCPU_REGS_RCX];
+ ctxt->_eip = ctxt->regs[VCPU_REGS_RDX];
+ ctxt->regs[VCPU_REGS_RSP] = ctxt->regs[VCPU_REGS_RCX];
return X86EMUL_CONTINUE;
}
-static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt,
- struct x86_emulate_ops *ops)
+static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt)
{
int iopl;
if (ctxt->mode == X86EMUL_MODE_REAL)
@@ -2071,13 +2023,13 @@ static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt,
if (ctxt->mode == X86EMUL_MODE_VM86)
return true;
iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
- return ops->cpl(ctxt) > iopl;
+ return ctxt->ops->cpl(ctxt) > iopl;
}
static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
- struct x86_emulate_ops *ops,
u16 port, u16 len)
{
+ struct x86_emulate_ops *ops = ctxt->ops;
struct desc_struct tr_seg;
u32 base3;
int r;
@@ -2108,14 +2060,13 @@ static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
}
static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt,
- struct x86_emulate_ops *ops,
u16 port, u16 len)
{
if (ctxt->perm_ok)
return true;
- if (emulator_bad_iopl(ctxt, ops))
- if (!emulator_io_port_access_allowed(ctxt, ops, port, len))
+ if (emulator_bad_iopl(ctxt))
+ if (!emulator_io_port_access_allowed(ctxt, port, len))
return false;
ctxt->perm_ok = true;
@@ -2124,21 +2075,18 @@ static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt,
}
static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt,
- struct x86_emulate_ops *ops,
struct tss_segment_16 *tss)
{
- struct decode_cache *c = &ctxt->decode;
-
- tss->ip = c->eip;
+ tss->ip = ctxt->_eip;
tss->flag = ctxt->eflags;
- tss->ax = c->regs[VCPU_REGS_RAX];
- tss->cx = c->regs[VCPU_REGS_RCX];
- tss->dx = c->regs[VCPU_REGS_RDX];
- tss->bx = c->regs[VCPU_REGS_RBX];
- tss->sp = c->regs[VCPU_REGS_RSP];
- tss->bp = c->regs[VCPU_REGS_RBP];
- tss->si = c->regs[VCPU_REGS_RSI];
- tss->di = c->regs[VCPU_REGS_RDI];
+ tss->ax = ctxt->regs[VCPU_REGS_RAX];
+ tss->cx = ctxt->regs[VCPU_REGS_RCX];
+ tss->dx = ctxt->regs[VCPU_REGS_RDX];
+ tss->bx = ctxt->regs[VCPU_REGS_RBX];
+ tss->sp = ctxt->regs[VCPU_REGS_RSP];
+ tss->bp = ctxt->regs[VCPU_REGS_RBP];
+ tss->si = ctxt->regs[VCPU_REGS_RSI];
+ tss->di = ctxt->regs[VCPU_REGS_RDI];
tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
@@ -2148,22 +2096,20 @@ static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt,
}
static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
- struct x86_emulate_ops *ops,
struct tss_segment_16 *tss)
{
- struct decode_cache *c = &ctxt->decode;
int ret;
- c->eip = tss->ip;
+ ctxt->_eip = tss->ip;
ctxt->eflags = tss->flag | 2;
- c->regs[VCPU_REGS_RAX] = tss->ax;
- c->regs[VCPU_REGS_RCX] = tss->cx;
- c->regs[VCPU_REGS_RDX] = tss->dx;
- c->regs[VCPU_REGS_RBX] = tss->bx;
- c->regs[VCPU_REGS_RSP] = tss->sp;
- c->regs[VCPU_REGS_RBP] = tss->bp;
- c->regs[VCPU_REGS_RSI] = tss->si;
- c->regs[VCPU_REGS_RDI] = tss->di;
+ ctxt->regs[VCPU_REGS_RAX] = tss->ax;
+ ctxt->regs[VCPU_REGS_RCX] = tss->cx;
+ ctxt->regs[VCPU_REGS_RDX] = tss->dx;
+ ctxt->regs[VCPU_REGS_RBX] = tss->bx;
+ ctxt->regs[VCPU_REGS_RSP] = tss->sp;
+ ctxt->regs[VCPU_REGS_RBP] = tss->bp;
+ ctxt->regs[VCPU_REGS_RSI] = tss->si;
+ ctxt->regs[VCPU_REGS_RDI] = tss->di;
/*
* SDM says that segment selectors are loaded before segment
@@ -2179,19 +2125,19 @@ static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
* Now load segment descriptors. If fault happenes at this stage
* it is handled in a context of new task
*/
- ret = load_segment_descriptor(ctxt, ops, tss->ldt, VCPU_SREG_LDTR);
+ ret = load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR);
if (ret != X86EMUL_CONTINUE)
return ret;
- ret = load_segment_descriptor(ctxt, ops, tss->es, VCPU_SREG_ES);
+ ret = load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES);
if (ret != X86EMUL_CONTINUE)
return ret;
- ret = load_segment_descriptor(ctxt, ops, tss->cs, VCPU_SREG_CS);
+ ret = load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS);
if (ret != X86EMUL_CONTINUE)
return ret;
- ret = load_segment_descriptor(ctxt, ops, tss->ss, VCPU_SREG_SS);
+ ret = load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS);
if (ret != X86EMUL_CONTINUE)
return ret;
- ret = load_segment_descriptor(ctxt, ops, tss->ds, VCPU_SREG_DS);
+ ret = load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS);
if (ret != X86EMUL_CONTINUE)
return ret;
@@ -2199,10 +2145,10 @@ static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
}
static int task_switch_16(struct x86_emulate_ctxt *ctxt,
- struct x86_emulate_ops *ops,
u16 tss_selector, u16 old_tss_sel,
ulong old_tss_base, struct desc_struct *new_desc)
{
+ struct x86_emulate_ops *ops = ctxt->ops;
struct tss_segment_16 tss_seg;
int ret;
u32 new_tss_base = get_desc_base(new_desc);
@@ -2213,7 +2159,7 @@ static int task_switch_16(struct x86_emulate_ctxt *ctxt,
/* FIXME: need to provide precise fault address */
return ret;
- save_state_to_tss16(ctxt, ops, &tss_seg);
+ save_state_to_tss16(ctxt, &tss_seg);
ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
&ctxt->exception);
@@ -2239,26 +2185,23 @@ static int task_switch_16(struct x86_emulate_ctxt *ctxt,
return ret;
}
- return load_state_from_tss16(ctxt, ops, &tss_seg);
+ return load_state_from_tss16(ctxt, &tss_seg);
}
static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
- struct x86_emulate_ops *ops,
struct tss_segment_32 *tss)
{
- struct decode_cache *c = &ctxt->decode;
-
- tss->cr3 = ops->get_cr(ctxt, 3);
- tss->eip = c->eip;
+ tss->cr3 = ctxt->ops->get_cr(ctxt, 3);
+ tss->eip = ctxt->_eip;
tss->eflags = ctxt->eflags;
- tss->eax = c->regs[VCPU_REGS_RAX];
- tss->ecx = c->regs[VCPU_REGS_RCX];
- tss->edx = c->regs[VCPU_REGS_RDX];
- tss->ebx = c->regs[VCPU_REGS_RBX];
- tss->esp = c->regs[VCPU_REGS_RSP];
- tss->ebp = c->regs[VCPU_REGS_RBP];
- tss->esi = c->regs[VCPU_REGS_RSI];
- tss->edi = c->regs[VCPU_REGS_RDI];
+ tss->eax = ctxt->regs[VCPU_REGS_RAX];
+ tss->ecx = ctxt->regs[VCPU_REGS_RCX];
+ tss->edx = ctxt->regs[VCPU_REGS_RDX];
+ tss->ebx = ctxt->regs[VCPU_REGS_RBX];
+ tss->esp = ctxt->regs[VCPU_REGS_RSP];
+ tss->ebp = ctxt->regs[VCPU_REGS_RBP];
+ tss->esi = ctxt->regs[VCPU_REGS_RSI];
+ tss->edi = ctxt->regs[VCPU_REGS_RDI];
tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
@@ -2270,24 +2213,22 @@ static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
}
static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
- struct x86_emulate_ops *ops,
struct tss_segment_32 *tss)
{
- struct decode_cache *c = &ctxt->decode;
int ret;
- if (ops->set_cr(ctxt, 3, tss->cr3))
+ if (ctxt->ops->set_cr(ctxt, 3, tss->cr3))
return emulate_gp(ctxt, 0);
- c->eip = tss->eip;
+ ctxt->_eip = tss->eip;
ctxt->eflags = tss->eflags | 2;
- c->regs[VCPU_REGS_RAX] = tss->eax;
- c->regs[VCPU_REGS_RCX] = tss->ecx;
- c->regs[VCPU_REGS_RDX] = tss->edx;
- c->regs[VCPU_REGS_RBX] = tss->ebx;
- c->regs[VCPU_REGS_RSP] = tss->esp;
- c->regs[VCPU_REGS_RBP] = tss->ebp;
- c->regs[VCPU_REGS_RSI] = tss->esi;
- c->regs[VCPU_REGS_RDI] = tss->edi;
+ ctxt->regs[VCPU_REGS_RAX] = tss->eax;
+ ctxt->regs[VCPU_REGS_RCX] = tss->ecx;
+ ctxt->regs[VCPU_REGS_RDX] = tss->edx;
+ ctxt->regs[VCPU_REGS_RBX] = tss->ebx;
+ ctxt->regs[VCPU_REGS_RSP] = tss->esp;
+ ctxt->regs[VCPU_REGS_RBP] = tss->ebp;
+ ctxt->regs[VCPU_REGS_RSI] = tss->esi;
+ ctxt->regs[VCPU_REGS_RDI] = tss->edi;
/*
* SDM says that segment selectors are loaded before segment
@@ -2305,25 +2246,25 @@ static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
* Now load segment descriptors. If fault happenes at this stage
* it is handled in a context of new task
*/
- ret = load_segment_descriptor(ctxt, ops, tss->ldt_selector, VCPU_SREG_LDTR);
+ ret = load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
if (ret != X86EMUL_CONTINUE)
return ret;
- ret = load_segment_descriptor(ctxt, ops, tss->es, VCPU_SREG_ES);
+ ret = load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES);
if (ret != X86EMUL_CONTINUE)
return ret;
- ret = load_segment_descriptor(ctxt, ops, tss->cs, VCPU_SREG_CS);
+ ret = load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS);
if (ret != X86EMUL_CONTINUE)
return ret;
- ret = load_segment_descriptor(ctxt, ops, tss->ss, VCPU_SREG_SS);
+ ret = load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS);
if (ret != X86EMUL_CONTINUE)
return ret;
- ret = load_segment_descriptor(ctxt, ops, tss->ds, VCPU_SREG_DS);
+ ret = load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS);
if (ret != X86EMUL_CONTINUE)
return ret;
- ret = load_segment_descriptor(ctxt, ops, tss->fs, VCPU_SREG_FS);
+ ret = load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS);
if (ret != X86EMUL_CONTINUE)
return ret;
- ret = load_segment_descriptor(ctxt, ops, tss->gs, VCPU_SREG_GS);
+ ret = load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS);
if (ret != X86EMUL_CONTINUE)
return ret;
@@ -2331,10 +2272,10 @@ static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
}
static int task_switch_32(struct x86_emulate_ctxt *ctxt,
- struct x86_emulate_ops *ops,
u16 tss_selector, u16 old_tss_sel,
ulong old_tss_base, struct desc_struct *new_desc)
{
+ struct x86_emulate_ops *ops = ctxt->ops;
struct tss_segment_32 tss_seg;
int ret;
u32 new_tss_base = get_desc_base(new_desc);
@@ -2345,7 +2286,7 @@ static int task_switch_32(struct x86_emulate_ctxt *ctxt,
/* FIXME: need to provide precise fault address */
return ret;
- save_state_to_tss32(ctxt, ops, &tss_seg);
+ save_state_to_tss32(ctxt, &tss_seg);
ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
&ctxt->exception);
@@ -2371,14 +2312,14 @@ static int task_switch_32(struct x86_emulate_ctxt *ctxt,
return ret;
}
- return load_state_from_tss32(ctxt, ops, &tss_seg);
+ return load_state_from_tss32(ctxt, &tss_seg);
}
static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
- struct x86_emulate_ops *ops,
u16 tss_selector, int reason,
bool has_error_code, u32 error_code)
{
+ struct x86_emulate_ops *ops = ctxt->ops;
struct desc_struct curr_tss_desc, next_tss_desc;
int ret;
u16 old_tss_sel = get_segment_selector(ctxt, VCPU_SREG_TR);
@@ -2388,10 +2329,10 @@ static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
/* FIXME: old_tss_base == ~0 ? */
- ret = read_segment_descriptor(ctxt, ops, tss_selector, &next_tss_desc);
+ ret = read_segment_descriptor(ctxt, tss_selector, &next_tss_desc);
if (ret != X86EMUL_CONTINUE)
return ret;
- ret = read_segment_descriptor(ctxt, ops, old_tss_sel, &curr_tss_desc);
+ ret = read_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc);
if (ret != X86EMUL_CONTINUE)
return ret;
@@ -2413,8 +2354,7 @@ static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */
- write_segment_descriptor(ctxt, ops, old_tss_sel,
- &curr_tss_desc);
+ write_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc);
}
if (reason == TASK_SWITCH_IRET)
@@ -2426,10 +2366,10 @@ static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
old_tss_sel = 0xffff;
if (next_tss_desc.type & 8)
- ret = task_switch_32(ctxt, ops, tss_selector, old_tss_sel,
+ ret = task_switch_32(ctxt, tss_selector, old_tss_sel,
old_tss_base, &next_tss_desc);
else
- ret = task_switch_16(ctxt, ops, tss_selector, old_tss_sel,
+ ret = task_switch_16(ctxt, tss_selector, old_tss_sel,
old_tss_base, &next_tss_desc);
if (ret != X86EMUL_CONTINUE)
return ret;
@@ -2439,19 +2379,16 @@ static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
if (reason != TASK_SWITCH_IRET) {
next_tss_desc.type |= (1 << 1); /* set busy flag */
- write_segment_descriptor(ctxt, ops, tss_selector,
- &next_tss_desc);
+ write_segment_descriptor(ctxt, tss_selector, &next_tss_desc);
}
ops->set_cr(ctxt, 0, ops->get_cr(ctxt, 0) | X86_CR0_TS);
ops->set_segment(ctxt, tss_selector, &next_tss_desc, 0, VCPU_SREG_TR);
if (has_error_code) {
- struct decode_cache *c = &ctxt->decode;
-
- c->op_bytes = c->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2;
- c->lock_prefix = 0;
- c->src.val = (unsigned long) error_code;
+ ctxt->op_bytes = ctxt->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2;
+ ctxt->lock_prefix = 0;
+ ctxt->src.val = (unsigned long) error_code;
ret = em_push(ctxt);
}
@@ -2462,18 +2399,16 @@ int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
u16 tss_selector, int reason,
bool has_error_code, u32 error_code)
{
- struct x86_emulate_ops *ops = ctxt->ops;
- struct decode_cache *c = &ctxt->decode;
int rc;
- c->eip = ctxt->eip;
- c->dst.type = OP_NONE;
+ ctxt->_eip = ctxt->eip;
+ ctxt->dst.type = OP_NONE;
- rc = emulator_do_task_switch(ctxt, ops, tss_selector, reason,
+ rc = emulator_do_task_switch(ctxt, tss_selector, reason,
has_error_code, error_code);
if (rc == X86EMUL_CONTINUE)
- ctxt->eip = c->eip;
+ ctxt->eip = ctxt->_eip;
return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
}
@@ -2481,22 +2416,20 @@ int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
static void string_addr_inc(struct x86_emulate_ctxt *ctxt, unsigned seg,
int reg, struct operand *op)
{
- struct decode_cache *c = &ctxt->decode;
int df = (ctxt->eflags & EFLG_DF) ? -1 : 1;
- register_address_increment(c, &c->regs[reg], df * op->bytes);
- op->addr.mem.ea = register_address(c, c->regs[reg]);
+ register_address_increment(ctxt, &ctxt->regs[reg], df * op->bytes);
+ op->addr.mem.ea = register_address(ctxt, ctxt->regs[reg]);
op->addr.mem.seg = seg;
}
static int em_das(struct x86_emulate_ctxt *ctxt)
{
- struct decode_cache *c = &ctxt->decode;
u8 al, old_al;
bool af, cf, old_cf;
cf = ctxt->eflags & X86_EFLAGS_CF;
- al = c->dst.val;
+ al = ctxt->dst.val;
old_al = al;
old_cf = cf;
@@ -2514,12 +2447,12 @@ static int em_das(struct x86_emulate_ctxt *ctxt)
cf = true;
}
- c->dst.val = al;
+ ctxt->dst.val = al;
/* Set PF, ZF, SF */
- c->src.type = OP_IMM;
- c->src.val = 0;
- c->src.bytes = 1;
- emulate_2op_SrcV("or", c->src, c->dst, ctxt->eflags);
+ ctxt->src.type = OP_IMM;
+ ctxt->src.val = 0;
+ ctxt->src.bytes = 1;
+ emulate_2op_SrcV("or", ctxt->src, ctxt->dst, ctxt->eflags);
ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF);
if (cf)
ctxt->eflags |= X86_EFLAGS_CF;
@@ -2530,175 +2463,189 @@ static int em_das(struct x86_emulate_ctxt *ctxt)
static int em_call_far(struct x86_emulate_ctxt *ctxt)
{
- struct decode_cache *c = &ctxt->decode;
u16 sel, old_cs;
ulong old_eip;
int rc;
old_cs = get_segment_selector(ctxt, VCPU_SREG_CS);
- old_eip = c->eip;
+ old_eip = ctxt->_eip;
- memcpy(&sel, c->src.valptr + c->op_bytes, 2);
- if (load_segment_descriptor(ctxt, ctxt->ops, sel, VCPU_SREG_CS))
+ memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
+ if (load_segment_descriptor(ctxt, sel, VCPU_SREG_CS))
return X86EMUL_CONTINUE;
- c->eip = 0;
- memcpy(&c->eip, c->src.valptr, c->op_bytes);
+ ctxt->_eip = 0;
+ memcpy(&ctxt->_eip, ctxt->src.valptr, ctxt->op_bytes);
- c->src.val = old_cs;
+ ctxt->src.val = old_cs;
rc = em_push(ctxt);
if (rc != X86EMUL_CONTINUE)
return rc;
- c->src.val = old_eip;
+ ctxt->src.val = old_eip;
return em_push(ctxt);
}
static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
{
- struct decode_cache *c = &ctxt->decode;
int rc;
- c->dst.type = OP_REG;
- c->dst.addr.reg = &c->eip;
- c->dst.bytes = c->op_bytes;
- rc = emulate_pop(ctxt, &c->dst.val, c->op_bytes);
+ ctxt->dst.type = OP_REG;
+ ctxt->dst.addr.reg = &ctxt->_eip;
+ ctxt->dst.bytes = ctxt->op_bytes;
+ rc = emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
if (rc != X86EMUL_CONTINUE)
return rc;
- register_address_increment(c, &c->regs[VCPU_REGS_RSP], c->src.val);
+ register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RSP], ctxt->src.val);
return X86EMUL_CONTINUE;
}
static int em_add(struct x86_emulate_ctxt *ctxt)
{
- struct decode_cache *c = &ctxt->decode;
-
- emulate_2op_SrcV("add", c->src, c->dst, ctxt->eflags);
+ emulate_2op_SrcV("add", ctxt->src, ctxt->dst, ctxt->eflags);
return X86EMUL_CONTINUE;
}
static int em_or(struct x86_emulate_ctxt *ctxt)
{
- struct decode_cache *c = &ctxt->decode;
-
- emulate_2op_SrcV("or", c->src, c->dst, ctxt->eflags);
+ emulate_2op_SrcV("or", ctxt->src, ctxt->dst, ctxt->eflags);
return X86EMUL_CONTINUE;
}
static int em_adc(struct x86_emulate_ctxt *ctxt)
{
- struct decode_cache *c = &ctxt->decode;
-
- emulate_2op_SrcV("adc", c->src, c->dst, ctxt->eflags);
+ emulate_2op_SrcV("adc", ctxt->src, ctxt->dst, ctxt->eflags);
return X86EMUL_CONTINUE;
}
static int em_sbb(struct x86_emulate_ctxt *ctxt)
{
- struct decode_cache *c = &ctxt->decode;
-
- emulate_2op_SrcV("sbb", c->src, c->dst, ctxt->eflags);
+ emulate_2op_SrcV("sbb", ctxt->src, ctxt->dst, ctxt->eflags);
return X86EMUL_CONTINUE;
}
static int em_and(struct x86_emulate_ctxt *ctxt)
{
- struct decode_cache *c = &ctxt->decode;
-
- emulate_2op_SrcV("and", c->src, c->dst, ctxt->eflags);
+ emulate_2op_SrcV("and", ctxt->src, ctxt->dst, ctxt->eflags);
return X86EMUL_CONTINUE;
}
static int em_sub(struct x86_emulate_ctxt *ctxt)
{
- struct decode_cache *c = &ctxt->decode;
-
- emulate_2op_SrcV("sub", c->src, c->dst, ctxt->eflags);
+ emulate_2op_SrcV("sub", ctxt->src, ctxt->dst, ctxt->eflags);
return X86EMUL_CONTINUE;
}
static int em_xor(struct x86_emulate_ctxt *ctxt)
{
- struct decode_cache *c = &ctxt->decode;
-
- emulate_2op_SrcV("xor", c->src, c->dst, ctxt->eflags);
+ emulate_2op_SrcV("xor", ctxt->src, ctxt->dst, ctxt->eflags);
return X86EMUL_CONTINUE;
}
static int em_cmp(struct x86_emulate_ctxt *ctxt)
{
- struct decode_cache *c = &ctxt->decode;
-
- emulate_2op_SrcV("cmp", c->src, c->dst, ctxt->eflags);
+ emulate_2op_SrcV("cmp", ctxt->src, ctxt->dst, ctxt->eflags);
/* Disable writeback. */
- c->dst.type = OP_NONE;
+ ctxt->dst.type = OP_NONE;
return X86EMUL_CONTINUE;
}
-static int em_imul(struct x86_emulate_ctxt *ctxt)
+static int em_test(struct x86_emulate_ctxt *ctxt)
+{
+ emulate_2op_SrcV("test", ctxt->src, ctxt->dst, ctxt->eflags);
+ return X86EMUL_CONTINUE;
+}
+
+static int em_xchg(struct x86_emulate_ctxt *ctxt)
{
- struct decode_cache *c = &ctxt->decode;
+ /* Write back the register source. */
+ ctxt->src.val = ctxt->dst.val;
+ write_register_operand(&ctxt->src);
- emulate_2op_SrcV_nobyte("imul", c->src, c->dst, ctxt->eflags);
+ /* Write back the memory destination with implicit LOCK prefix. */
+ ctxt->dst.val = ctxt->src.orig_val;
+ ctxt->lock_prefix = 1;
return X86EMUL_CONTINUE;
}
-static int em_imul_3op(struct x86_emulate_ctxt *ctxt)
+static int em_imul(struct x86_emulate_ctxt *ctxt)
{
- struct decode_cache *c = &ctxt->decode;
+ emulate_2op_SrcV_nobyte("imul", ctxt->src, ctxt->dst, ctxt->eflags);
+ return X86EMUL_CONTINUE;
+}
- c->dst.val = c->src2.val;
+static int em_imul_3op(struct x86_emulate_ctxt *ctxt)
+{
+ ctxt->dst.val = ctxt->src2.val;
return em_imul(ctxt);
}
static int em_cwd(struct x86_emulate_ctxt *ctxt)
{
- struct decode_cache *c = &ctxt->decode;
-
- c->dst.type = OP_REG;
- c->dst.bytes = c->src.bytes;
- c->dst.addr.reg = &c->regs[VCPU_REGS_RDX];
- c->dst.val = ~((c->src.val >> (c->src.bytes * 8 - 1)) - 1);
+ ctxt->dst.type = OP_REG;
+ ctxt->dst.bytes = ctxt->src.bytes;
+ ctxt->dst.addr.reg = &ctxt->regs[VCPU_REGS_RDX];
+ ctxt->dst.val = ~((ctxt->src.val >> (ctxt->src.bytes * 8 - 1)) - 1);
return X86EMUL_CONTINUE;
}
static int em_rdtsc(struct x86_emulate_ctxt *ctxt)
{
- struct decode_cache *c = &ctxt->decode;
u64 tsc = 0;
ctxt->ops->get_msr(ctxt, MSR_IA32_TSC, &tsc);
- c->regs[VCPU_REGS_RAX] = (u32)tsc;
- c->regs[VCPU_REGS_RDX] = tsc >> 32;
+ ctxt->regs[VCPU_REGS_RAX] = (u32)tsc;
+ ctxt->regs[VCPU_REGS_RDX] = tsc >> 32;
return X86EMUL_CONTINUE;
}
static int em_mov(struct x86_emulate_ctxt *ctxt)
{
- struct decode_cache *c = &ctxt->decode;
- c->dst.val = c->src.val;
+ ctxt->dst.val = ctxt->src.val;
return X86EMUL_CONTINUE;
}
+static int em_mov_rm_sreg(struct x86_emulate_ctxt *ctxt)
+{
+ if (ctxt->modrm_reg > VCPU_SREG_GS)
+ return emulate_ud(ctxt);
+
+ ctxt->dst.val = get_segment_selector(ctxt, ctxt->modrm_reg);
+ return X86EMUL_CONTINUE;
+}
+
+static int em_mov_sreg_rm(struct x86_emulate_ctxt *ctxt)
+{
+ u16 sel = ctxt->src.val;
+
+ if (ctxt->modrm_reg == VCPU_SREG_CS || ctxt->modrm_reg > VCPU_SREG_GS)
+ return emulate_ud(ctxt);
+
+ if (ctxt->modrm_reg == VCPU_SREG_SS)
+ ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
+
+ /* Disable writeback. */
+ ctxt->dst.type = OP_NONE;
+ return load_segment_descriptor(ctxt, sel, ctxt->modrm_reg);
+}
+
static int em_movdqu(struct x86_emulate_ctxt *ctxt)
{
- struct decode_cache *c = &ctxt->decode;
- memcpy(&c->dst.vec_val, &c->src.vec_val, c->op_bytes);
+ memcpy(&ctxt->dst.vec_val, &ctxt->src.vec_val, ctxt->op_bytes);
return X86EMUL_CONTINUE;
}
static int em_invlpg(struct x86_emulate_ctxt *ctxt)
{
- struct decode_cache *c = &ctxt->decode;
int rc;
ulong linear;
- rc = linearize(ctxt, c->src.addr.mem, 1, false, &linear);
+ rc = linearize(ctxt, ctxt->src.addr.mem, 1, false, &linear);
if (rc == X86EMUL_CONTINUE)
ctxt->ops->invlpg(ctxt, linear);
/* Disable writeback. */
- c->dst.type = OP_NONE;
+ ctxt->dst.type = OP_NONE;
return X86EMUL_CONTINUE;
}
@@ -2714,10 +2661,9 @@ static int em_clts(struct x86_emulate_ctxt *ctxt)
static int em_vmcall(struct x86_emulate_ctxt *ctxt)
{
- struct decode_cache *c = &ctxt->decode;
int rc;
- if (c->modrm_mod != 3 || c->modrm_rm != 1)
+ if (ctxt->modrm_mod != 3 || ctxt->modrm_rm != 1)
return X86EMUL_UNHANDLEABLE;
rc = ctxt->ops->fix_hypercall(ctxt);
@@ -2725,73 +2671,104 @@ static int em_vmcall(struct x86_emulate_ctxt *ctxt)
return rc;
/* Let the processor re-execute the fixed hypercall */
- c->eip = ctxt->eip;
+ ctxt->_eip = ctxt->eip;
/* Disable writeback. */
- c->dst.type = OP_NONE;
+ ctxt->dst.type = OP_NONE;
return X86EMUL_CONTINUE;
}
static int em_lgdt(struct x86_emulate_ctxt *ctxt)
{
- struct decode_cache *c = &ctxt->decode;
struct desc_ptr desc_ptr;
int rc;
- rc = read_descriptor(ctxt, c->src.addr.mem,
+ rc = read_descriptor(ctxt, ctxt->src.addr.mem,
&desc_ptr.size, &desc_ptr.address,
- c->op_bytes);
+ ctxt->op_bytes);
if (rc != X86EMUL_CONTINUE)
return rc;
ctxt->ops->set_gdt(ctxt, &desc_ptr);
/* Disable writeback. */
- c->dst.type = OP_NONE;
+ ctxt->dst.type = OP_NONE;
return X86EMUL_CONTINUE;
}
static int em_vmmcall(struct x86_emulate_ctxt *ctxt)
{
- struct decode_cache *c = &ctxt->decode;
int rc;
rc = ctxt->ops->fix_hypercall(ctxt);
/* Disable writeback. */
- c->dst.type = OP_NONE;
+ ctxt->dst.type = OP_NONE;
return rc;
}
static int em_lidt(struct x86_emulate_ctxt *ctxt)
{
- struct decode_cache *c = &ctxt->decode;
struct desc_ptr desc_ptr;
int rc;
- rc = read_descriptor(ctxt, c->src.addr.mem,
+ rc = read_descriptor(ctxt, ctxt->src.addr.mem,
&desc_ptr.size, &desc_ptr.address,
- c->op_bytes);
+ ctxt->op_bytes);
if (rc != X86EMUL_CONTINUE)
return rc;
ctxt->ops->set_idt(ctxt, &desc_ptr);
/* Disable writeback. */
- c->dst.type = OP_NONE;
+ ctxt->dst.type = OP_NONE;
return X86EMUL_CONTINUE;
}
static int em_smsw(struct x86_emulate_ctxt *ctxt)
{
- struct decode_cache *c = &ctxt->decode;
-
- c->dst.bytes = 2;
- c->dst.val = ctxt->ops->get_cr(ctxt, 0);
+ ctxt->dst.bytes = 2;
+ ctxt->dst.val = ctxt->ops->get_cr(ctxt, 0);
return X86EMUL_CONTINUE;
}
static int em_lmsw(struct x86_emulate_ctxt *ctxt)
{
- struct decode_cache *c = &ctxt->decode;
ctxt->ops->set_cr(ctxt, 0, (ctxt->ops->get_cr(ctxt, 0) & ~0x0eul)
- | (c->src.val & 0x0f));
- c->dst.type = OP_NONE;
+ | (ctxt->src.val & 0x0f));
+ ctxt->dst.type = OP_NONE;
+ return X86EMUL_CONTINUE;
+}
+
+static int em_loop(struct x86_emulate_ctxt *ctxt)
+{
+ register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RCX], -1);
+ if ((address_mask(ctxt, ctxt->regs[VCPU_REGS_RCX]) != 0) &&
+ (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags)))
+ jmp_rel(ctxt, ctxt->src.val);
+
+ return X86EMUL_CONTINUE;
+}
+
+static int em_jcxz(struct x86_emulate_ctxt *ctxt)
+{
+ if (address_mask(ctxt, ctxt->regs[VCPU_REGS_RCX]) == 0)
+ jmp_rel(ctxt, ctxt->src.val);
+
+ return X86EMUL_CONTINUE;
+}
+
+static int em_cli(struct x86_emulate_ctxt *ctxt)
+{
+ if (emulator_bad_iopl(ctxt))
+ return emulate_gp(ctxt, 0);
+
+ ctxt->eflags &= ~X86_EFLAGS_IF;
+ return X86EMUL_CONTINUE;
+}
+
+static int em_sti(struct x86_emulate_ctxt *ctxt)
+{
+ if (emulator_bad_iopl(ctxt))
+ return emulate_gp(ctxt, 0);
+
+ ctxt->interruptibility = KVM_X86_SHADOW_INT_STI;
+ ctxt->eflags |= X86_EFLAGS_IF;
return X86EMUL_CONTINUE;
}
@@ -2809,9 +2786,7 @@ static bool valid_cr(int nr)
static int check_cr_read(struct x86_emulate_ctxt *ctxt)
{
- struct decode_cache *c = &ctxt->decode;
-
- if (!valid_cr(c->modrm_reg))
+ if (!valid_cr(ctxt->modrm_reg))
return emulate_ud(ctxt);
return X86EMUL_CONTINUE;
@@ -2819,9 +2794,8 @@ static int check_cr_read(struct x86_emulate_ctxt *ctxt)
static int check_cr_write(struct x86_emulate_ctxt *ctxt)
{
- struct decode_cache *c = &ctxt->decode;
- u64 new_val = c->src.val64;
- int cr = c->modrm_reg;
+ u64 new_val = ctxt->src.val64;
+ int cr = ctxt->modrm_reg;
u64 efer = 0;
static u64 cr_reserved_bits[] = {
@@ -2898,8 +2872,7 @@ static int check_dr7_gd(struct x86_emulate_ctxt *ctxt)
static int check_dr_read(struct x86_emulate_ctxt *ctxt)
{
- struct decode_cache *c = &ctxt->decode;
- int dr = c->modrm_reg;
+ int dr = ctxt->modrm_reg;
u64 cr4;
if (dr > 7)
@@ -2917,9 +2890,8 @@ static int check_dr_read(struct x86_emulate_ctxt *ctxt)
static int check_dr_write(struct x86_emulate_ctxt *ctxt)
{
- struct decode_cache *c = &ctxt->decode;
- u64 new_val = c->src.val64;
- int dr = c->modrm_reg;
+ u64 new_val = ctxt->src.val64;
+ int dr = ctxt->modrm_reg;
if ((dr == 6 || dr == 7) && (new_val & 0xffffffff00000000ULL))
return emulate_gp(ctxt, 0);
@@ -2941,7 +2913,7 @@ static int check_svme(struct x86_emulate_ctxt *ctxt)
static int check_svme_pa(struct x86_emulate_ctxt *ctxt)
{
- u64 rax = ctxt->decode.regs[VCPU_REGS_RAX];
+ u64 rax = ctxt->regs[VCPU_REGS_RAX];
/* Valid physical address? */
if (rax & 0xffff000000000000ULL)
@@ -2963,7 +2935,7 @@ static int check_rdtsc(struct x86_emulate_ctxt *ctxt)
static int check_rdpmc(struct x86_emulate_ctxt *ctxt)
{
u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
- u64 rcx = ctxt->decode.regs[VCPU_REGS_RCX];
+ u64 rcx = ctxt->regs[VCPU_REGS_RCX];
if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) ||
(rcx > 3))
@@ -2974,10 +2946,8 @@ static int check_rdpmc(struct x86_emulate_ctxt *ctxt)
static int check_perm_in(struct x86_emulate_ctxt *ctxt)
{
- struct decode_cache *c = &ctxt->decode;
-
- c->dst.bytes = min(c->dst.bytes, 4u);
- if (!emulator_io_permited(ctxt, ctxt->ops, c->src.val, c->dst.bytes))
+ ctxt->dst.bytes = min(ctxt->dst.bytes, 4u);
+ if (!emulator_io_permited(ctxt, ctxt->src.val, ctxt->dst.bytes))
return emulate_gp(ctxt, 0);
return X86EMUL_CONTINUE;
@@ -2985,10 +2955,8 @@ static int check_perm_in(struct x86_emulate_ctxt *ctxt)
static int check_perm_out(struct x86_emulate_ctxt *ctxt)
{
- struct decode_cache *c = &ctxt->decode;
-
- c->src.bytes = min(c->src.bytes, 4u);
- if (!emulator_io_permited(ctxt, ctxt->ops, c->dst.val, c->src.bytes))
+ ctxt->src.bytes = min(ctxt->src.bytes, 4u);
+ if (!emulator_io_permited(ctxt, ctxt->dst.val, ctxt->src.bytes))
return emulate_gp(ctxt, 0);
return X86EMUL_CONTINUE;
@@ -3165,12 +3133,15 @@ static struct opcode opcode_table[256] = {
G(DstMem | SrcImm | ModRM | Group, group1),
G(ByteOp | DstMem | SrcImm | ModRM | No64 | Group, group1),
G(DstMem | SrcImmByte | ModRM | Group, group1),
- D2bv(DstMem | SrcReg | ModRM), D2bv(DstMem | SrcReg | ModRM | Lock),
+ I2bv(DstMem | SrcReg | ModRM, em_test),
+ I2bv(DstMem | SrcReg | ModRM | Lock, em_xchg),
/* 0x88 - 0x8F */
I2bv(DstMem | SrcReg | ModRM | Mov, em_mov),
I2bv(DstReg | SrcMem | ModRM | Mov, em_mov),
- D(DstMem | SrcNone | ModRM | Mov), D(ModRM | SrcMem | NoAccess | DstReg),
- D(ImplicitOps | SrcMem16 | ModRM), G(0, group1A),
+ I(DstMem | SrcNone | ModRM | Mov, em_mov_rm_sreg),
+ D(ModRM | SrcMem | NoAccess | DstReg),
+ I(ImplicitOps | SrcMem16 | ModRM, em_mov_sreg_rm),
+ G(0, group1A),
/* 0x90 - 0x97 */
DI(SrcAcc | DstReg, pause), X7(D(SrcAcc | DstReg)),
/* 0x98 - 0x9F */
@@ -3184,7 +3155,7 @@ static struct opcode opcode_table[256] = {
I2bv(SrcSI | DstDI | Mov | String, em_mov),
I2bv(SrcSI | DstDI | String, em_cmp),
/* 0xA8 - 0xAF */
- D2bv(DstAcc | SrcImm),
+ I2bv(DstAcc | SrcImm, em_test),
I2bv(SrcAcc | DstDI | Mov | String, em_mov),
I2bv(SrcSI | DstAcc | Mov | String, em_mov),
I2bv(SrcAcc | DstDI | String, em_cmp),
@@ -3195,25 +3166,26 @@ static struct opcode opcode_table[256] = {
/* 0xC0 - 0xC7 */
D2bv(DstMem | SrcImmByte | ModRM),
I(ImplicitOps | Stack | SrcImmU16, em_ret_near_imm),
- D(ImplicitOps | Stack),
+ I(ImplicitOps | Stack, em_ret),
D(DstReg | SrcMemFAddr | ModRM | No64), D(DstReg | SrcMemFAddr | ModRM | No64),
G(ByteOp, group11), G(0, group11),
/* 0xC8 - 0xCF */
- N, N, N, D(ImplicitOps | Stack),
+ N, N, N, I(ImplicitOps | Stack, em_ret_far),
D(ImplicitOps), DI(SrcImmByte, intn),
- D(ImplicitOps | No64), DI(ImplicitOps, iret),
+ D(ImplicitOps | No64), II(ImplicitOps, em_iret, iret),
/* 0xD0 - 0xD7 */
D2bv(DstMem | SrcOne | ModRM), D2bv(DstMem | ModRM),
N, N, N, N,
/* 0xD8 - 0xDF */
N, N, N, N, N, N, N, N,
/* 0xE0 - 0xE7 */
- X4(D(SrcImmByte)),
+ X3(I(SrcImmByte, em_loop)),
+ I(SrcImmByte, em_jcxz),
D2bvIP(SrcImmUByte | DstAcc, in, check_perm_in),
D2bvIP(SrcAcc | DstImmUByte, out, check_perm_out),
/* 0xE8 - 0xEF */
D(SrcImm | Stack), D(SrcImm | ImplicitOps),
- D(SrcImmFAddr | No64), D(SrcImmByte | ImplicitOps),
+ I(SrcImmFAddr | No64, em_jmp_far), D(SrcImmByte | ImplicitOps),
D2bvIP(SrcDX | DstAcc, in, check_perm_in),
D2bvIP(SrcAcc | DstDX, out, check_perm_out),
/* 0xF0 - 0xF7 */
@@ -3221,14 +3193,16 @@ static struct opcode opcode_table[256] = {
DI(ImplicitOps | Priv, hlt), D(ImplicitOps),
G(ByteOp, group3), G(0, group3),
/* 0xF8 - 0xFF */
- D(ImplicitOps), D(ImplicitOps), D(ImplicitOps), D(ImplicitOps),
+ D(ImplicitOps), D(ImplicitOps),
+ I(ImplicitOps, em_cli), I(ImplicitOps, em_sti),
D(ImplicitOps), D(ImplicitOps), G(0, group4), G(0, group5),
};
static struct opcode twobyte_table[256] = {
/* 0x00 - 0x0F */
G(0, group6), GD(0, &group7), N, N,
- N, D(ImplicitOps | VendorSpecific), DI(ImplicitOps | Priv, clts), N,
+ N, I(ImplicitOps | VendorSpecific, em_syscall),
+ II(ImplicitOps | Priv, em_clts, clts), N,
DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N,
N, D(ImplicitOps | ModRM), N, N,
/* 0x10 - 0x1F */
@@ -3245,7 +3219,8 @@ static struct opcode twobyte_table[256] = {
IIP(ImplicitOps, em_rdtsc, rdtsc, check_rdtsc),
DI(ImplicitOps | Priv, rdmsr),
DIP(ImplicitOps | Priv, rdpmc, check_rdpmc),
- D(ImplicitOps | VendorSpecific), D(ImplicitOps | Priv | VendorSpecific),
+ I(ImplicitOps | VendorSpecific, em_sysenter),
+ I(ImplicitOps | Priv | VendorSpecific, em_sysexit),
N, N,
N, N, N, N, N, N, N, N,
/* 0x40 - 0x4F */
@@ -3313,11 +3288,11 @@ static struct opcode twobyte_table[256] = {
#undef I2bv
#undef I6ALU
-static unsigned imm_size(struct decode_cache *c)
+static unsigned imm_size(struct x86_emulate_ctxt *ctxt)
{
unsigned size;
- size = (c->d & ByteOp) ? 1 : c->op_bytes;
+ size = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
if (size == 8)
size = 4;
return size;
@@ -3326,23 +3301,21 @@ static unsigned imm_size(struct decode_cache *c)
static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op,
unsigned size, bool sign_extension)
{
- struct decode_cache *c = &ctxt->decode;
- struct x86_emulate_ops *ops = ctxt->ops;
int rc = X86EMUL_CONTINUE;
op->type = OP_IMM;
op->bytes = size;
- op->addr.mem.ea = c->eip;
+ op->addr.mem.ea = ctxt->_eip;
/* NB. Immediates are sign-extended as necessary. */
switch (op->bytes) {
case 1:
- op->val = insn_fetch(s8, 1, c->eip);
+ op->val = insn_fetch(s8, 1, ctxt->_eip);
break;
case 2:
- op->val = insn_fetch(s16, 2, c->eip);
+ op->val = insn_fetch(s16, 2, ctxt->_eip);
break;
case 4:
- op->val = insn_fetch(s32, 4, c->eip);
+ op->val = insn_fetch(s32, 4, ctxt->_eip);
break;
}
if (!sign_extension) {
@@ -3362,11 +3335,8 @@ done:
return rc;
}
-int
-x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
+int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
{
- struct x86_emulate_ops *ops = ctxt->ops;
- struct decode_cache *c = &ctxt->decode;
int rc = X86EMUL_CONTINUE;
int mode = ctxt->mode;
int def_op_bytes, def_ad_bytes, goffset, simd_prefix;
@@ -3374,11 +3344,11 @@ x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
struct opcode opcode;
struct operand memop = { .type = OP_NONE }, *memopp = NULL;
- c->eip = ctxt->eip;
- c->fetch.start = c->eip;
- c->fetch.end = c->fetch.start + insn_len;
+ ctxt->_eip = ctxt->eip;
+ ctxt->fetch.start = ctxt->_eip;
+ ctxt->fetch.end = ctxt->fetch.start + insn_len;
if (insn_len > 0)
- memcpy(c->fetch.data, insn, insn_len);
+ memcpy(ctxt->fetch.data, insn, insn_len);
switch (mode) {
case X86EMUL_MODE_REAL:
@@ -3399,46 +3369,46 @@ x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
return -1;
}
- c->op_bytes = def_op_bytes;
- c->ad_bytes = def_ad_bytes;
+ ctxt->op_bytes = def_op_bytes;
+ ctxt->ad_bytes = def_ad_bytes;
/* Legacy prefixes. */
for (;;) {
- switch (c->b = insn_fetch(u8, 1, c->eip)) {
+ switch (ctxt->b = insn_fetch(u8, 1, ctxt->_eip)) {
case 0x66: /* operand-size override */
op_prefix = true;
/* switch between 2/4 bytes */
- c->op_bytes = def_op_bytes ^ 6;
+ ctxt->op_bytes = def_op_bytes ^ 6;
break;
case 0x67: /* address-size override */
if (mode == X86EMUL_MODE_PROT64)
/* switch between 4/8 bytes */
- c->ad_bytes = def_ad_bytes ^ 12;
+ ctxt->ad_bytes = def_ad_bytes ^ 12;
else
/* switch between 2/4 bytes */
- c->ad_bytes = def_ad_bytes ^ 6;
+ ctxt->ad_bytes = def_ad_bytes ^ 6;
break;
case 0x26: /* ES override */
case 0x2e: /* CS override */
case 0x36: /* SS override */
case 0x3e: /* DS override */
- set_seg_override(c, (c->b >> 3) & 3);
+ set_seg_override(ctxt, (ctxt->b >> 3) & 3);
break;
case 0x64: /* FS override */
case 0x65: /* GS override */
- set_seg_override(c, c->b & 7);
+ set_seg_override(ctxt, ctxt->b & 7);
break;
case 0x40 ... 0x4f: /* REX */
if (mode != X86EMUL_MODE_PROT64)
goto done_prefixes;
- c->rex_prefix = c->b;
+ ctxt->rex_prefix = ctxt->b;
continue;
case 0xf0: /* LOCK */
- c->lock_prefix = 1;
+ ctxt->lock_prefix = 1;
break;
case 0xf2: /* REPNE/REPNZ */
case 0xf3: /* REP/REPE/REPZ */
- c->rep_prefix = c->b;
+ ctxt->rep_prefix = ctxt->b;
break;
default:
goto done_prefixes;
@@ -3446,50 +3416,50 @@ x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
/* Any legacy prefix after a REX prefix nullifies its effect. */
- c->rex_prefix = 0;
+ ctxt->rex_prefix = 0;
}
done_prefixes:
/* REX prefix. */
- if (c->rex_prefix & 8)
- c->op_bytes = 8; /* REX.W */
+ if (ctxt->rex_prefix & 8)
+ ctxt->op_bytes = 8; /* REX.W */
/* Opcode byte(s). */
- opcode = opcode_table[c->b];
+ opcode = opcode_table[ctxt->b];
/* Two-byte opcode? */
- if (c->b == 0x0f) {
- c->twobyte = 1;
- c->b = insn_fetch(u8, 1, c->eip);
- opcode = twobyte_table[c->b];
+ if (ctxt->b == 0x0f) {
+ ctxt->twobyte = 1;
+ ctxt->b = insn_fetch(u8, 1, ctxt->_eip);
+ opcode = twobyte_table[ctxt->b];
}
- c->d = opcode.flags;
+ ctxt->d = opcode.flags;
- while (c->d & GroupMask) {
- switch (c->d & GroupMask) {
+ while (ctxt->d & GroupMask) {
+ switch (ctxt->d & GroupMask) {
case Group:
- c->modrm = insn_fetch(u8, 1, c->eip);
- --c->eip;
- goffset = (c->modrm >> 3) & 7;
+ ctxt->modrm = insn_fetch(u8, 1, ctxt->_eip);
+ --ctxt->_eip;
+ goffset = (ctxt->modrm >> 3) & 7;
opcode = opcode.u.group[goffset];
break;
case GroupDual:
- c->modrm = insn_fetch(u8, 1, c->eip);
- --c->eip;
- goffset = (c->modrm >> 3) & 7;
- if ((c->modrm >> 6) == 3)
+ ctxt->modrm = insn_fetch(u8, 1, ctxt->_eip);
+ --ctxt->_eip;
+ goffset = (ctxt->modrm >> 3) & 7;
+ if ((ctxt->modrm >> 6) == 3)
opcode = opcode.u.gdual->mod3[goffset];
else
opcode = opcode.u.gdual->mod012[goffset];
break;
case RMExt:
- goffset = c->modrm & 7;
+ goffset = ctxt->modrm & 7;
opcode = opcode.u.group[goffset];
break;
case Prefix:
- if (c->rep_prefix && op_prefix)
+ if (ctxt->rep_prefix && op_prefix)
return X86EMUL_UNHANDLEABLE;
- simd_prefix = op_prefix ? 0x66 : c->rep_prefix;
+ simd_prefix = op_prefix ? 0x66 : ctxt->rep_prefix;
switch (simd_prefix) {
case 0x00: opcode = opcode.u.gprefix->pfx_no; break;
case 0x66: opcode = opcode.u.gprefix->pfx_66; break;
@@ -3501,61 +3471,61 @@ done_prefixes:
return X86EMUL_UNHANDLEABLE;
}
- c->d &= ~GroupMask;
- c->d |= opcode.flags;
+ ctxt->d &= ~GroupMask;
+ ctxt->d |= opcode.flags;
}
- c->execute = opcode.u.execute;
- c->check_perm = opcode.check_perm;
- c->intercept = opcode.intercept;
+ ctxt->execute = opcode.u.execute;
+ ctxt->check_perm = opcode.check_perm;
+ ctxt->intercept = opcode.intercept;
/* Unrecognised? */
- if (c->d == 0 || (c->d & Undefined))
+ if (ctxt->d == 0 || (ctxt->d & Undefined))
return -1;
- if (!(c->d & VendorSpecific) && ctxt->only_vendor_specific_insn)
+ if (!(ctxt->d & VendorSpecific) && ctxt->only_vendor_specific_insn)
return -1;
- if (mode == X86EMUL_MODE_PROT64 && (c->d & Stack))
- c->op_bytes = 8;
+ if (mode == X86EMUL_MODE_PROT64 && (ctxt->d & Stack))
+ ctxt->op_bytes = 8;
- if (c->d & Op3264) {
+ if (ctxt->d & Op3264) {
if (mode == X86EMUL_MODE_PROT64)
- c->op_bytes = 8;
+ ctxt->op_bytes = 8;
else
- c->op_bytes = 4;
+ ctxt->op_bytes = 4;
}
- if (c->d & Sse)
- c->op_bytes = 16;
+ if (ctxt->d & Sse)
+ ctxt->op_bytes = 16;
/* ModRM and SIB bytes. */
- if (c->d & ModRM) {
- rc = decode_modrm(ctxt, ops, &memop);
- if (!c->has_seg_override)
- set_seg_override(c, c->modrm_seg);
- } else if (c->d & MemAbs)
- rc = decode_abs(ctxt, ops, &memop);
+ if (ctxt->d & ModRM) {
+ rc = decode_modrm(ctxt, &memop);
+ if (!ctxt->has_seg_override)
+ set_seg_override(ctxt, ctxt->modrm_seg);
+ } else if (ctxt->d & MemAbs)
+ rc = decode_abs(ctxt, &memop);
if (rc != X86EMUL_CONTINUE)
goto done;
- if (!c->has_seg_override)
- set_seg_override(c, VCPU_SREG_DS);
+ if (!ctxt->has_seg_override)
+ set_seg_override(ctxt, VCPU_SREG_DS);
- memop.addr.mem.seg = seg_override(ctxt, c);
+ memop.addr.mem.seg = seg_override(ctxt);
- if (memop.type == OP_MEM && c->ad_bytes != 8)
+ if (memop.type == OP_MEM && ctxt->ad_bytes != 8)
memop.addr.mem.ea = (u32)memop.addr.mem.ea;
/*
* Decode and fetch the source operand: register, memory
* or immediate.
*/
- switch (c->d & SrcMask) {
+ switch (ctxt->d & SrcMask) {
case SrcNone:
break;
case SrcReg:
- decode_register_operand(ctxt, &c->src, c, 0);
+ decode_register_operand(ctxt, &ctxt->src, 0);
break;
case SrcMem16:
memop.bytes = 2;
@@ -3564,60 +3534,60 @@ done_prefixes:
memop.bytes = 4;
goto srcmem_common;
case SrcMem:
- memop.bytes = (c->d & ByteOp) ? 1 :
- c->op_bytes;
+ memop.bytes = (ctxt->d & ByteOp) ? 1 :
+ ctxt->op_bytes;
srcmem_common:
- c->src = memop;
- memopp = &c->src;
+ ctxt->src = memop;
+ memopp = &ctxt->src;
break;
case SrcImmU16:
- rc = decode_imm(ctxt, &c->src, 2, false);
+ rc = decode_imm(ctxt, &ctxt->src, 2, false);
break;
case SrcImm:
- rc = decode_imm(ctxt, &c->src, imm_size(c), true);
+ rc = decode_imm(ctxt, &ctxt->src, imm_size(ctxt), true);
break;
case SrcImmU:
- rc = decode_imm(ctxt, &c->src, imm_size(c), false);
+ rc = decode_imm(ctxt, &ctxt->src, imm_size(ctxt), false);
break;
case SrcImmByte:
- rc = decode_imm(ctxt, &c->src, 1, true);
+ rc = decode_imm(ctxt, &ctxt->src, 1, true);
break;
case SrcImmUByte:
- rc = decode_imm(ctxt, &c->src, 1, false);
+ rc = decode_imm(ctxt, &ctxt->src, 1, false);
break;
case SrcAcc:
- c->src.type = OP_REG;
- c->src.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
- c->src.addr.reg = &c->regs[VCPU_REGS_RAX];
- fetch_register_operand(&c->src);
+ ctxt->src.type = OP_REG;
+ ctxt->src.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
+ ctxt->src.addr.reg = &ctxt->regs[VCPU_REGS_RAX];
+ fetch_register_operand(&ctxt->src);
break;
case SrcOne:
- c->src.bytes = 1;
- c->src.val = 1;
+ ctxt->src.bytes = 1;
+ ctxt->src.val = 1;
break;
case SrcSI:
- c->src.type = OP_MEM;
- c->src.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
- c->src.addr.mem.ea =
- register_address(c, c->regs[VCPU_REGS_RSI]);
- c->src.addr.mem.seg = seg_override(ctxt, c);
- c->src.val = 0;
+ ctxt->src.type = OP_MEM;
+ ctxt->src.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
+ ctxt->src.addr.mem.ea =
+ register_address(ctxt, ctxt->regs[VCPU_REGS_RSI]);
+ ctxt->src.addr.mem.seg = seg_override(ctxt);
+ ctxt->src.val = 0;
break;
case SrcImmFAddr:
- c->src.type = OP_IMM;
- c->src.addr.mem.ea = c->eip;
- c->src.bytes = c->op_bytes + 2;
- insn_fetch_arr(c->src.valptr, c->src.bytes, c->eip);
+ ctxt->src.type = OP_IMM;
+ ctxt->src.addr.mem.ea = ctxt->_eip;
+ ctxt->src.bytes = ctxt->op_bytes + 2;
+ insn_fetch_arr(ctxt->src.valptr, ctxt->src.bytes, ctxt->_eip);
break;
case SrcMemFAddr:
- memop.bytes = c->op_bytes + 2;
+ memop.bytes = ctxt->op_bytes + 2;
goto srcmem_common;
break;
case SrcDX:
- c->src.type = OP_REG;
- c->src.bytes = 2;
- c->src.addr.reg = &c->regs[VCPU_REGS_RDX];
- fetch_register_operand(&c->src);
+ ctxt->src.type = OP_REG;
+ ctxt->src.bytes = 2;
+ ctxt->src.addr.reg = &ctxt->regs[VCPU_REGS_RDX];
+ fetch_register_operand(&ctxt->src);
break;
}
@@ -3628,22 +3598,22 @@ done_prefixes:
* Decode and fetch the second source operand: register, memory
* or immediate.
*/
- switch (c->d & Src2Mask) {
+ switch (ctxt->d & Src2Mask) {
case Src2None:
break;
case Src2CL:
- c->src2.bytes = 1;
- c->src2.val = c->regs[VCPU_REGS_RCX] & 0x8;
+ ctxt->src2.bytes = 1;
+ ctxt->src2.val = ctxt->regs[VCPU_REGS_RCX] & 0x8;
break;
case Src2ImmByte:
- rc = decode_imm(ctxt, &c->src2, 1, true);
+ rc = decode_imm(ctxt, &ctxt->src2, 1, true);
break;
case Src2One:
- c->src2.bytes = 1;
- c->src2.val = 1;
+ ctxt->src2.bytes = 1;
+ ctxt->src2.val = 1;
break;
case Src2Imm:
- rc = decode_imm(ctxt, &c->src2, imm_size(c), true);
+ rc = decode_imm(ctxt, &ctxt->src2, imm_size(ctxt), true);
break;
}
@@ -3651,68 +3621,66 @@ done_prefixes:
goto done;
/* Decode and fetch the destination operand: register or memory. */
- switch (c->d & DstMask) {
+ switch (ctxt->d & DstMask) {
case DstReg:
- decode_register_operand(ctxt, &c->dst, c,
- c->twobyte && (c->b == 0xb6 || c->b == 0xb7));
+ decode_register_operand(ctxt, &ctxt->dst,
+ ctxt->twobyte && (ctxt->b == 0xb6 || ctxt->b == 0xb7));
break;
case DstImmUByte:
- c->dst.type = OP_IMM;
- c->dst.addr.mem.ea = c->eip;
- c->dst.bytes = 1;
- c->dst.val = insn_fetch(u8, 1, c->eip);
+ ctxt->dst.type = OP_IMM;
+ ctxt->dst.addr.mem.ea = ctxt->_eip;
+ ctxt->dst.bytes = 1;
+ ctxt->dst.val = insn_fetch(u8, 1, ctxt->_eip);
break;
case DstMem:
case DstMem64:
- c->dst = memop;
- memopp = &c->dst;
- if ((c->d & DstMask) == DstMem64)
- c->dst.bytes = 8;
+ ctxt->dst = memop;
+ memopp = &ctxt->dst;
+ if ((ctxt->d & DstMask) == DstMem64)
+ ctxt->dst.bytes = 8;
else
- c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
- if (c->d & BitOp)
- fetch_bit_operand(c);
- c->dst.orig_val = c->dst.val;
+ ctxt->dst.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
+ if (ctxt->d & BitOp)
+ fetch_bit_operand(ctxt);
+ ctxt->dst.orig_val = ctxt->dst.val;
break;
case DstAcc:
- c->dst.type = OP_REG;
- c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
- c->dst.addr.reg = &c->regs[VCPU_REGS_RAX];
- fetch_register_operand(&c->dst);
- c->dst.orig_val = c->dst.val;
+ ctxt->dst.type = OP_REG;
+ ctxt->dst.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
+ ctxt->dst.addr.reg = &ctxt->regs[VCPU_REGS_RAX];
+ fetch_register_operand(&ctxt->dst);
+ ctxt->dst.orig_val = ctxt->dst.val;
break;
case DstDI:
- c->dst.type = OP_MEM;
- c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
- c->dst.addr.mem.ea =
- register_address(c, c->regs[VCPU_REGS_RDI]);
- c->dst.addr.mem.seg = VCPU_SREG_ES;
- c->dst.val = 0;
+ ctxt->dst.type = OP_MEM;
+ ctxt->dst.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
+ ctxt->dst.addr.mem.ea =
+ register_address(ctxt, ctxt->regs[VCPU_REGS_RDI]);
+ ctxt->dst.addr.mem.seg = VCPU_SREG_ES;
+ ctxt->dst.val = 0;
break;
case DstDX:
- c->dst.type = OP_REG;
- c->dst.bytes = 2;
- c->dst.addr.reg = &c->regs[VCPU_REGS_RDX];
- fetch_register_operand(&c->dst);
+ ctxt->dst.type = OP_REG;
+ ctxt->dst.bytes = 2;
+ ctxt->dst.addr.reg = &ctxt->regs[VCPU_REGS_RDX];
+ fetch_register_operand(&ctxt->dst);
break;
case ImplicitOps:
/* Special instructions do their own operand decoding. */
default:
- c->dst.type = OP_NONE; /* Disable writeback. */
+ ctxt->dst.type = OP_NONE; /* Disable writeback. */
break;
}
done:
- if (memopp && memopp->type == OP_MEM && c->rip_relative)
- memopp->addr.mem.ea += c->eip;
+ if (memopp && memopp->type == OP_MEM && ctxt->rip_relative)
+ memopp->addr.mem.ea += ctxt->_eip;
return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
}
static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
{
- struct decode_cache *c = &ctxt->decode;
-
/* The second termination condition only applies for REPE
* and REPNE. Test if the repeat string operation prefix is
* REPE/REPZ or REPNE/REPNZ and if it's the case it tests the
@@ -3720,304 +3688,232 @@ static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
* - if REPE/REPZ and ZF = 0 then done
* - if REPNE/REPNZ and ZF = 1 then done
*/
- if (((c->b == 0xa6) || (c->b == 0xa7) ||
- (c->b == 0xae) || (c->b == 0xaf))
- && (((c->rep_prefix == REPE_PREFIX) &&
+ if (((ctxt->b == 0xa6) || (ctxt->b == 0xa7) ||
+ (ctxt->b == 0xae) || (ctxt->b == 0xaf))
+ && (((ctxt->rep_prefix == REPE_PREFIX) &&
((ctxt->eflags & EFLG_ZF) == 0))
- || ((c->rep_prefix == REPNE_PREFIX) &&
+ || ((ctxt->rep_prefix == REPNE_PREFIX) &&
((ctxt->eflags & EFLG_ZF) == EFLG_ZF))))
return true;
return false;
}
-int
-x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
+int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
{
struct x86_emulate_ops *ops = ctxt->ops;
u64 msr_data;
- struct decode_cache *c = &ctxt->decode;
int rc = X86EMUL_CONTINUE;
- int saved_dst_type = c->dst.type;
- int irq; /* Used for int 3, int, and into */
+ int saved_dst_type = ctxt->dst.type;
- ctxt->decode.mem_read.pos = 0;
+ ctxt->mem_read.pos = 0;
- if (ctxt->mode == X86EMUL_MODE_PROT64 && (c->d & No64)) {
+ if (ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) {
rc = emulate_ud(ctxt);
goto done;
}
/* LOCK prefix is allowed only with some instructions */
- if (c->lock_prefix && (!(c->d & Lock) || c->dst.type != OP_MEM)) {
+ if (ctxt->lock_prefix && (!(ctxt->d & Lock) || ctxt->dst.type != OP_MEM)) {
rc = emulate_ud(ctxt);
goto done;
}
- if ((c->d & SrcMask) == SrcMemFAddr && c->src.type != OP_MEM) {
+ if ((ctxt->d & SrcMask) == SrcMemFAddr && ctxt->src.type != OP_MEM) {
rc = emulate_ud(ctxt);
goto done;
}
- if ((c->d & Sse)
+ if ((ctxt->d & Sse)
&& ((ops->get_cr(ctxt, 0) & X86_CR0_EM)
|| !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) {
rc = emulate_ud(ctxt);
goto done;
}
- if ((c->d & Sse) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) {
+ if ((ctxt->d & Sse) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) {
rc = emulate_nm(ctxt);
goto done;
}
- if (unlikely(ctxt->guest_mode) && c->intercept) {
- rc = emulator_check_intercept(ctxt, c->intercept,
+ if (unlikely(ctxt->guest_mode) && ctxt->intercept) {
+ rc = emulator_check_intercept(ctxt, ctxt->intercept,
X86_ICPT_PRE_EXCEPT);
if (rc != X86EMUL_CONTINUE)
goto done;
}
/* Privileged instruction can be executed only in CPL=0 */
- if ((c->d & Priv) && ops->cpl(ctxt)) {
+ if ((ctxt->d & Priv) && ops->cpl(ctxt)) {
rc = emulate_gp(ctxt, 0);
goto done;
}
/* Instruction can only be executed in protected mode */
- if ((c->d & Prot) && !(ctxt->mode & X86EMUL_MODE_PROT)) {
+ if ((ctxt->d & Prot) && !(ctxt->mode & X86EMUL_MODE_PROT)) {
rc = emulate_ud(ctxt);
goto done;
}
/* Do instruction specific permission checks */
- if (c->check_perm) {
- rc = c->check_perm(ctxt);
+ if (ctxt->check_perm) {
+ rc = ctxt->check_perm(ctxt);
if (rc != X86EMUL_CONTINUE)
goto done;
}
- if (unlikely(ctxt->guest_mode) && c->intercept) {
- rc = emulator_check_intercept(ctxt, c->intercept,
+ if (unlikely(ctxt->guest_mode) && ctxt->intercept) {
+ rc = emulator_check_intercept(ctxt, ctxt->intercept,
X86_ICPT_POST_EXCEPT);
if (rc != X86EMUL_CONTINUE)
goto done;
}
- if (c->rep_prefix && (c->d & String)) {
+ if (ctxt->rep_prefix && (ctxt->d & String)) {
/* All REP prefixes have the same first termination condition */
- if (address_mask(c, c->regs[VCPU_REGS_RCX]) == 0) {
- ctxt->eip = c->eip;
+ if (address_mask(ctxt, ctxt->regs[VCPU_REGS_RCX]) == 0) {
+ ctxt->eip = ctxt->_eip;
goto done;
}
}
- if ((c->src.type == OP_MEM) && !(c->d & NoAccess)) {
- rc = segmented_read(ctxt, c->src.addr.mem,
- c->src.valptr, c->src.bytes);
+ if ((ctxt->src.type == OP_MEM) && !(ctxt->d & NoAccess)) {
+ rc = segmented_read(ctxt, ctxt->src.addr.mem,
+ ctxt->src.valptr, ctxt->src.bytes);
if (rc != X86EMUL_CONTINUE)
goto done;
- c->src.orig_val64 = c->src.val64;
+ ctxt->src.orig_val64 = ctxt->src.val64;
}
- if (c->src2.type == OP_MEM) {
- rc = segmented_read(ctxt, c->src2.addr.mem,
- &c->src2.val, c->src2.bytes);
+ if (ctxt->src2.type == OP_MEM) {
+ rc = segmented_read(ctxt, ctxt->src2.addr.mem,
+ &ctxt->src2.val, ctxt->src2.bytes);
if (rc != X86EMUL_CONTINUE)
goto done;
}
- if ((c->d & DstMask) == ImplicitOps)
+ if ((ctxt->d & DstMask) == ImplicitOps)
goto special_insn;
- if ((c->dst.type == OP_MEM) && !(c->d & Mov)) {
+ if ((ctxt->dst.type == OP_MEM) && !(ctxt->d & Mov)) {
/* optimisation - avoid slow emulated read if Mov */
- rc = segmented_read(ctxt, c->dst.addr.mem,
- &c->dst.val, c->dst.bytes);
+ rc = segmented_read(ctxt, ctxt->dst.addr.mem,
+ &ctxt->dst.val, ctxt->dst.bytes);
if (rc != X86EMUL_CONTINUE)
goto done;
}
- c->dst.orig_val = c->dst.val;
+ ctxt->dst.orig_val = ctxt->dst.val;
special_insn:
- if (unlikely(ctxt->guest_mode) && c->intercept) {
- rc = emulator_check_intercept(ctxt, c->intercept,
+ if (unlikely(ctxt->guest_mode) && ctxt->intercept) {
+ rc = emulator_check_intercept(ctxt, ctxt->intercept,
X86_ICPT_POST_MEMACCESS);
if (rc != X86EMUL_CONTINUE)
goto done;
}
- if (c->execute) {
- rc = c->execute(ctxt);
+ if (ctxt->execute) {
+ rc = ctxt->execute(ctxt);
if (rc != X86EMUL_CONTINUE)
goto done;
goto writeback;
}
- if (c->twobyte)
+ if (ctxt->twobyte)
goto twobyte_insn;
- switch (c->b) {
+ switch (ctxt->b) {
case 0x06: /* push es */
- rc = emulate_push_sreg(ctxt, ops, VCPU_SREG_ES);
+ rc = emulate_push_sreg(ctxt, VCPU_SREG_ES);
break;
case 0x07: /* pop es */
- rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_ES);
+ rc = emulate_pop_sreg(ctxt, VCPU_SREG_ES);
break;
case 0x0e: /* push cs */
- rc = emulate_push_sreg(ctxt, ops, VCPU_SREG_CS);
+ rc = emulate_push_sreg(ctxt, VCPU_SREG_CS);
break;
case 0x16: /* push ss */
- rc = emulate_push_sreg(ctxt, ops, VCPU_SREG_SS);
+ rc = emulate_push_sreg(ctxt, VCPU_SREG_SS);
break;
case 0x17: /* pop ss */
- rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_SS);
+ rc = emulate_pop_sreg(ctxt, VCPU_SREG_SS);
break;
case 0x1e: /* push ds */
- rc = emulate_push_sreg(ctxt, ops, VCPU_SREG_DS);
+ rc = emulate_push_sreg(ctxt, VCPU_SREG_DS);
break;
case 0x1f: /* pop ds */
- rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_DS);
+ rc = emulate_pop_sreg(ctxt, VCPU_SREG_DS);
break;
case 0x40 ... 0x47: /* inc r16/r32 */
- emulate_1op("inc", c->dst, ctxt->eflags);
+ emulate_1op("inc", ctxt->dst, ctxt->eflags);
break;
case 0x48 ... 0x4f: /* dec r16/r32 */
- emulate_1op("dec", c->dst, ctxt->eflags);
+ emulate_1op("dec", ctxt->dst, ctxt->eflags);
break;
case 0x63: /* movsxd */
if (ctxt->mode != X86EMUL_MODE_PROT64)
goto cannot_emulate;
- c->dst.val = (s32) c->src.val;
+ ctxt->dst.val = (s32) ctxt->src.val;
break;
case 0x6c: /* insb */
case 0x6d: /* insw/insd */
- c->src.val = c->regs[VCPU_REGS_RDX];
+ ctxt->src.val = ctxt->regs[VCPU_REGS_RDX];
goto do_io_in;
case 0x6e: /* outsb */
case 0x6f: /* outsw/outsd */
- c->dst.val = c->regs[VCPU_REGS_RDX];
+ ctxt->dst.val = ctxt->regs[VCPU_REGS_RDX];
goto do_io_out;
break;
case 0x70 ... 0x7f: /* jcc (short) */
- if (test_cc(c->b, ctxt->eflags))
- jmp_rel(c, c->src.val);
- break;
- case 0x84 ... 0x85:
- test:
- emulate_2op_SrcV("test", c->src, c->dst, ctxt->eflags);
- break;
- case 0x86 ... 0x87: /* xchg */
- xchg:
- /* Write back the register source. */
- c->src.val = c->dst.val;
- write_register_operand(&c->src);
- /*
- * Write back the memory destination with implicit LOCK
- * prefix.
- */
- c->dst.val = c->src.orig_val;
- c->lock_prefix = 1;
- break;
- case 0x8c: /* mov r/m, sreg */
- if (c->modrm_reg > VCPU_SREG_GS) {
- rc = emulate_ud(ctxt);
- goto done;
- }
- c->dst.val = get_segment_selector(ctxt, c->modrm_reg);
+ if (test_cc(ctxt->b, ctxt->eflags))
+ jmp_rel(ctxt, ctxt->src.val);
break;
case 0x8d: /* lea r16/r32, m */
- c->dst.val = c->src.addr.mem.ea;
+ ctxt->dst.val = ctxt->src.addr.mem.ea;
break;
- case 0x8e: { /* mov seg, r/m16 */
- uint16_t sel;
-
- sel = c->src.val;
-
- if (c->modrm_reg == VCPU_SREG_CS ||
- c->modrm_reg > VCPU_SREG_GS) {
- rc = emulate_ud(ctxt);
- goto done;
- }
-
- if (c->modrm_reg == VCPU_SREG_SS)
- ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
-
- rc = load_segment_descriptor(ctxt, ops, sel, c->modrm_reg);
-
- c->dst.type = OP_NONE; /* Disable writeback. */
- break;
- }
case 0x8f: /* pop (sole member of Grp1a) */
rc = em_grp1a(ctxt);
break;
case 0x90 ... 0x97: /* nop / xchg reg, rax */
- if (c->dst.addr.reg == &c->regs[VCPU_REGS_RAX])
+ if (ctxt->dst.addr.reg == &ctxt->regs[VCPU_REGS_RAX])
break;
- goto xchg;
+ rc = em_xchg(ctxt);
+ break;
case 0x98: /* cbw/cwde/cdqe */
- switch (c->op_bytes) {
- case 2: c->dst.val = (s8)c->dst.val; break;
- case 4: c->dst.val = (s16)c->dst.val; break;
- case 8: c->dst.val = (s32)c->dst.val; break;
+ switch (ctxt->op_bytes) {
+ case 2: ctxt->dst.val = (s8)ctxt->dst.val; break;
+ case 4: ctxt->dst.val = (s16)ctxt->dst.val; break;
+ case 8: ctxt->dst.val = (s32)ctxt->dst.val; break;
}
break;
- case 0xa8 ... 0xa9: /* test ax, imm */
- goto test;
case 0xc0 ... 0xc1:
rc = em_grp2(ctxt);
break;
- case 0xc3: /* ret */
- c->dst.type = OP_REG;
- c->dst.addr.reg = &c->eip;
- c->dst.bytes = c->op_bytes;
- rc = em_pop(ctxt);
- break;
case 0xc4: /* les */
- rc = emulate_load_segment(ctxt, ops, VCPU_SREG_ES);
+ rc = emulate_load_segment(ctxt, VCPU_SREG_ES);
break;
case 0xc5: /* lds */
- rc = emulate_load_segment(ctxt, ops, VCPU_SREG_DS);
- break;
- case 0xcb: /* ret far */
- rc = emulate_ret_far(ctxt, ops);
+ rc = emulate_load_segment(ctxt, VCPU_SREG_DS);
break;
case 0xcc: /* int3 */
- irq = 3;
- goto do_interrupt;
+ rc = emulate_int(ctxt, 3);
+ break;
case 0xcd: /* int n */
- irq = c->src.val;
- do_interrupt:
- rc = emulate_int(ctxt, ops, irq);
+ rc = emulate_int(ctxt, ctxt->src.val);
break;
case 0xce: /* into */
- if (ctxt->eflags & EFLG_OF) {
- irq = 4;
- goto do_interrupt;
- }
- break;
- case 0xcf: /* iret */
- rc = emulate_iret(ctxt, ops);
+ if (ctxt->eflags & EFLG_OF)
+ rc = emulate_int(ctxt, 4);
break;
case 0xd0 ... 0xd1: /* Grp2 */
rc = em_grp2(ctxt);
break;
case 0xd2 ... 0xd3: /* Grp2 */
- c->src.val = c->regs[VCPU_REGS_RCX];
+ ctxt->src.val = ctxt->regs[VCPU_REGS_RCX];
rc = em_grp2(ctxt);
break;
- case 0xe0 ... 0xe2: /* loop/loopz/loopnz */
- register_address_increment(c, &c->regs[VCPU_REGS_RCX], -1);
- if (address_mask(c, c->regs[VCPU_REGS_RCX]) != 0 &&
- (c->b == 0xe2 || test_cc(c->b ^ 0x5, ctxt->eflags)))
- jmp_rel(c, c->src.val);
- break;
- case 0xe3: /* jcxz/jecxz/jrcxz */
- if (address_mask(c, c->regs[VCPU_REGS_RCX]) == 0)
- jmp_rel(c, c->src.val);
- break;
case 0xe4: /* inb */
case 0xe5: /* in */
goto do_io_in;
@@ -4025,35 +3921,30 @@ special_insn:
case 0xe7: /* out */
goto do_io_out;
case 0xe8: /* call (near) */ {
- long int rel = c->src.val;
- c->src.val = (unsigned long) c->eip;
- jmp_rel(c, rel);
+ long int rel = ctxt->src.val;
+ ctxt->src.val = (unsigned long) ctxt->_eip;
+ jmp_rel(ctxt, rel);
rc = em_push(ctxt);
break;
}
case 0xe9: /* jmp rel */
- goto jmp;
- case 0xea: /* jmp far */
- rc = em_jmp_far(ctxt);
- break;
- case 0xeb:
- jmp: /* jmp rel short */
- jmp_rel(c, c->src.val);
- c->dst.type = OP_NONE; /* Disable writeback. */
+ case 0xeb: /* jmp rel short */
+ jmp_rel(ctxt, ctxt->src.val);
+ ctxt->dst.type = OP_NONE; /* Disable writeback. */
break;
case 0xec: /* in al,dx */
case 0xed: /* in (e/r)ax,dx */
do_io_in:
- if (!pio_in_emulated(ctxt, ops, c->dst.bytes, c->src.val,
- &c->dst.val))
+ if (!pio_in_emulated(ctxt, ctxt->dst.bytes, ctxt->src.val,
+ &ctxt->dst.val))
goto done; /* IO is needed */
break;
case 0xee: /* out dx,al */
case 0xef: /* out dx,(e/r)ax */
do_io_out:
- ops->pio_out_emulated(ctxt, c->src.bytes, c->dst.val,
- &c->src.val, 1);
- c->dst.type = OP_NONE; /* Disable writeback. */
+ ops->pio_out_emulated(ctxt, ctxt->src.bytes, ctxt->dst.val,
+ &ctxt->src.val, 1);
+ ctxt->dst.type = OP_NONE; /* Disable writeback. */
break;
case 0xf4: /* hlt */
ctxt->ops->halt(ctxt);
@@ -4071,22 +3962,6 @@ special_insn:
case 0xf9: /* stc */
ctxt->eflags |= EFLG_CF;
break;
- case 0xfa: /* cli */
- if (emulator_bad_iopl(ctxt, ops)) {
- rc = emulate_gp(ctxt, 0);
- goto done;
- } else
- ctxt->eflags &= ~X86_EFLAGS_IF;
- break;
- case 0xfb: /* sti */
- if (emulator_bad_iopl(ctxt, ops)) {
- rc = emulate_gp(ctxt, 0);
- goto done;
- } else {
- ctxt->interruptibility = KVM_X86_SHADOW_INT_STI;
- ctxt->eflags |= X86_EFLAGS_IF;
- }
- break;
case 0xfc: /* cld */
ctxt->eflags &= ~EFLG_DF;
break;
@@ -4115,40 +3990,40 @@ writeback:
* restore dst type in case the decoding will be reused
* (happens for string instruction )
*/
- c->dst.type = saved_dst_type;
+ ctxt->dst.type = saved_dst_type;
- if ((c->d & SrcMask) == SrcSI)
- string_addr_inc(ctxt, seg_override(ctxt, c),
- VCPU_REGS_RSI, &c->src);
+ if ((ctxt->d & SrcMask) == SrcSI)
+ string_addr_inc(ctxt, seg_override(ctxt),
+ VCPU_REGS_RSI, &ctxt->src);
- if ((c->d & DstMask) == DstDI)
+ if ((ctxt->d & DstMask) == DstDI)
string_addr_inc(ctxt, VCPU_SREG_ES, VCPU_REGS_RDI,
- &c->dst);
+ &ctxt->dst);
- if (c->rep_prefix && (c->d & String)) {
- struct read_cache *r = &ctxt->decode.io_read;
- register_address_increment(c, &c->regs[VCPU_REGS_RCX], -1);
+ if (ctxt->rep_prefix && (ctxt->d & String)) {
+ struct read_cache *r = &ctxt->io_read;
+ register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RCX], -1);
if (!string_insn_completed(ctxt)) {
/*
* Re-enter guest when pio read ahead buffer is empty
* or, if it is not used, after each 1024 iteration.
*/
- if ((r->end != 0 || c->regs[VCPU_REGS_RCX] & 0x3ff) &&
+ if ((r->end != 0 || ctxt->regs[VCPU_REGS_RCX] & 0x3ff) &&
(r->end == 0 || r->end != r->pos)) {
/*
* Reset read cache. Usually happens before
* decode, but since instruction is restarted
* we have to do it here.
*/
- ctxt->decode.mem_read.end = 0;
+ ctxt->mem_read.end = 0;
return EMULATION_RESTART;
}
goto done; /* skip rip writeback */
}
}
- ctxt->eip = c->eip;
+ ctxt->eip = ctxt->_eip;
done:
if (rc == X86EMUL_PROPAGATE_FAULT)
@@ -4159,13 +4034,7 @@ done:
return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
twobyte_insn:
- switch (c->b) {
- case 0x05: /* syscall */
- rc = emulate_syscall(ctxt, ops);
- break;
- case 0x06:
- rc = em_clts(ctxt);
- break;
+ switch (ctxt->b) {
case 0x09: /* wbinvd */
(ctxt->ops->wbinvd)(ctxt);
break;
@@ -4174,21 +4043,21 @@ twobyte_insn:
case 0x18: /* Grp16 (prefetch/nop) */
break;
case 0x20: /* mov cr, reg */
- c->dst.val = ops->get_cr(ctxt, c->modrm_reg);
+ ctxt->dst.val = ops->get_cr(ctxt, ctxt->modrm_reg);
break;
case 0x21: /* mov from dr to reg */
- ops->get_dr(ctxt, c->modrm_reg, &c->dst.val);
+ ops->get_dr(ctxt, ctxt->modrm_reg, &ctxt->dst.val);
break;
case 0x22: /* mov reg, cr */
- if (ops->set_cr(ctxt, c->modrm_reg, c->src.val)) {
+ if (ops->set_cr(ctxt, ctxt->modrm_reg, ctxt->src.val)) {
emulate_gp(ctxt, 0);
rc = X86EMUL_PROPAGATE_FAULT;
goto done;
}
- c->dst.type = OP_NONE;
+ ctxt->dst.type = OP_NONE;
break;
case 0x23: /* mov from reg to dr */
- if (ops->set_dr(ctxt, c->modrm_reg, c->src.val &
+ if (ops->set_dr(ctxt, ctxt->modrm_reg, ctxt->src.val &
((ctxt->mode == X86EMUL_MODE_PROT64) ?
~0ULL : ~0U)) < 0) {
/* #UD condition is already handled by the code above */
@@ -4197,13 +4066,13 @@ twobyte_insn:
goto done;
}
- c->dst.type = OP_NONE; /* no writeback */
+ ctxt->dst.type = OP_NONE; /* no writeback */
break;
case 0x30:
/* wrmsr */
- msr_data = (u32)c->regs[VCPU_REGS_RAX]
- | ((u64)c->regs[VCPU_REGS_RDX] << 32);
- if (ops->set_msr(ctxt, c->regs[VCPU_REGS_RCX], msr_data)) {
+ msr_data = (u32)ctxt->regs[VCPU_REGS_RAX]
+ | ((u64)ctxt->regs[VCPU_REGS_RDX] << 32);
+ if (ops->set_msr(ctxt, ctxt->regs[VCPU_REGS_RCX], msr_data)) {
emulate_gp(ctxt, 0);
rc = X86EMUL_PROPAGATE_FAULT;
goto done;
@@ -4212,64 +4081,58 @@ twobyte_insn:
break;
case 0x32:
/* rdmsr */
- if (ops->get_msr(ctxt, c->regs[VCPU_REGS_RCX], &msr_data)) {
+ if (ops->get_msr(ctxt, ctxt->regs[VCPU_REGS_RCX], &msr_data)) {
emulate_gp(ctxt, 0);
rc = X86EMUL_PROPAGATE_FAULT;
goto done;
} else {
- c->regs[VCPU_REGS_RAX] = (u32)msr_data;
- c->regs[VCPU_REGS_RDX] = msr_data >> 32;
+ ctxt->regs[VCPU_REGS_RAX] = (u32)msr_data;
+ ctxt->regs[VCPU_REGS_RDX] = msr_data >> 32;
}
rc = X86EMUL_CONTINUE;
break;
- case 0x34: /* sysenter */
- rc = emulate_sysenter(ctxt, ops);
- break;
- case 0x35: /* sysexit */
- rc = emulate_sysexit(ctxt, ops);
- break;
case 0x40 ... 0x4f: /* cmov */
- c->dst.val = c->dst.orig_val = c->src.val;
- if (!test_cc(c->b, ctxt->eflags))
- c->dst.type = OP_NONE; /* no writeback */
+ ctxt->dst.val = ctxt->dst.orig_val = ctxt->src.val;
+ if (!test_cc(ctxt->b, ctxt->eflags))
+ ctxt->dst.type = OP_NONE; /* no writeback */
break;
case 0x80 ... 0x8f: /* jnz rel, etc*/
- if (test_cc(c->b, ctxt->eflags))
- jmp_rel(c, c->src.val);
+ if (test_cc(ctxt->b, ctxt->eflags))
+ jmp_rel(ctxt, ctxt->src.val);
break;
case 0x90 ... 0x9f: /* setcc r/m8 */
- c->dst.val = test_cc(c->b, ctxt->eflags);
+ ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags);
break;
case 0xa0: /* push fs */
- rc = emulate_push_sreg(ctxt, ops, VCPU_SREG_FS);
+ rc = emulate_push_sreg(ctxt, VCPU_SREG_FS);
break;
case 0xa1: /* pop fs */
- rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_FS);
+ rc = emulate_pop_sreg(ctxt, VCPU_SREG_FS);
break;
case 0xa3:
bt: /* bt */
- c->dst.type = OP_NONE;
+ ctxt->dst.type = OP_NONE;
/* only subword offset */
- c->src.val &= (c->dst.bytes << 3) - 1;
- emulate_2op_SrcV_nobyte("bt", c->src, c->dst, ctxt->eflags);
+ ctxt->src.val &= (ctxt->dst.bytes << 3) - 1;
+ emulate_2op_SrcV_nobyte("bt", ctxt->src, ctxt->dst, ctxt->eflags);
break;
case 0xa4: /* shld imm8, r, r/m */
case 0xa5: /* shld cl, r, r/m */
- emulate_2op_cl("shld", c->src2, c->src, c->dst, ctxt->eflags);
+ emulate_2op_cl("shld", ctxt->src2, ctxt->src, ctxt->dst, ctxt->eflags);
break;
case 0xa8: /* push gs */
- rc = emulate_push_sreg(ctxt, ops, VCPU_SREG_GS);
+ rc = emulate_push_sreg(ctxt, VCPU_SREG_GS);
break;
case 0xa9: /* pop gs */
- rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_GS);
+ rc = emulate_pop_sreg(ctxt, VCPU_SREG_GS);
break;
case 0xab:
bts: /* bts */
- emulate_2op_SrcV_nobyte("bts", c->src, c->dst, ctxt->eflags);
+ emulate_2op_SrcV_nobyte("bts", ctxt->src, ctxt->dst, ctxt->eflags);
break;
case 0xac: /* shrd imm8, r, r/m */
case 0xad: /* shrd cl, r, r/m */
- emulate_2op_cl("shrd", c->src2, c->src, c->dst, ctxt->eflags);
+ emulate_2op_cl("shrd", ctxt->src2, ctxt->src, ctxt->dst, ctxt->eflags);
break;
case 0xae: /* clflush */
break;
@@ -4278,38 +4141,38 @@ twobyte_insn:
* Save real source value, then compare EAX against
* destination.
*/
- c->src.orig_val = c->src.val;
- c->src.val = c->regs[VCPU_REGS_RAX];
- emulate_2op_SrcV("cmp", c->src, c->dst, ctxt->eflags);
+ ctxt->src.orig_val = ctxt->src.val;
+ ctxt->src.val = ctxt->regs[VCPU_REGS_RAX];
+ emulate_2op_SrcV("cmp", ctxt->src, ctxt->dst, ctxt->eflags);
if (ctxt->eflags & EFLG_ZF) {
/* Success: write back to memory. */
- c->dst.val = c->src.orig_val;
+ ctxt->dst.val = ctxt->src.orig_val;
} else {
/* Failure: write the value we saw to EAX. */
- c->dst.type = OP_REG;
- c->dst.addr.reg = (unsigned long *)&c->regs[VCPU_REGS_RAX];
+ ctxt->dst.type = OP_REG;
+ ctxt->dst.addr.reg = (unsigned long *)&ctxt->regs[VCPU_REGS_RAX];
}
break;
case 0xb2: /* lss */
- rc = emulate_load_segment(ctxt, ops, VCPU_SREG_SS);
+ rc = emulate_load_segment(ctxt, VCPU_SREG_SS);
break;
case 0xb3:
btr: /* btr */
- emulate_2op_SrcV_nobyte("btr", c->src, c->dst, ctxt->eflags);
+ emulate_2op_SrcV_nobyte("btr", ctxt->src, ctxt->dst, ctxt->eflags);
break;
case 0xb4: /* lfs */
- rc = emulate_load_segment(ctxt, ops, VCPU_SREG_FS);
+ rc = emulate_load_segment(ctxt, VCPU_SREG_FS);
break;
case 0xb5: /* lgs */
- rc = emulate_load_segment(ctxt, ops, VCPU_SREG_GS);
+ rc = emulate_load_segment(ctxt, VCPU_SREG_GS);
break;
case 0xb6 ... 0xb7: /* movzx */
- c->dst.bytes = c->op_bytes;
- c->dst.val = (c->d & ByteOp) ? (u8) c->src.val
- : (u16) c->src.val;
+ ctxt->dst.bytes = ctxt->op_bytes;
+ ctxt->dst.val = (ctxt->d & ByteOp) ? (u8) ctxt->src.val
+ : (u16) ctxt->src.val;
break;
case 0xba: /* Grp8 */
- switch (c->modrm_reg & 3) {
+ switch (ctxt->modrm_reg & 3) {
case 0:
goto bt;
case 1:
@@ -4322,47 +4185,47 @@ twobyte_insn:
break;
case 0xbb:
btc: /* btc */
- emulate_2op_SrcV_nobyte("btc", c->src, c->dst, ctxt->eflags);
+ emulate_2op_SrcV_nobyte("btc", ctxt->src, ctxt->dst, ctxt->eflags);
break;
case 0xbc: { /* bsf */
u8 zf;
__asm__ ("bsf %2, %0; setz %1"
- : "=r"(c->dst.val), "=q"(zf)
- : "r"(c->src.val));
+ : "=r"(ctxt->dst.val), "=q"(zf)
+ : "r"(ctxt->src.val));
ctxt->eflags &= ~X86_EFLAGS_ZF;
if (zf) {
ctxt->eflags |= X86_EFLAGS_ZF;
- c->dst.type = OP_NONE; /* Disable writeback. */
+ ctxt->dst.type = OP_NONE; /* Disable writeback. */
}
break;
}
case 0xbd: { /* bsr */
u8 zf;
__asm__ ("bsr %2, %0; setz %1"
- : "=r"(c->dst.val), "=q"(zf)
- : "r"(c->src.val));
+ : "=r"(ctxt->dst.val), "=q"(zf)
+ : "r"(ctxt->src.val));
ctxt->eflags &= ~X86_EFLAGS_ZF;
if (zf) {
ctxt->eflags |= X86_EFLAGS_ZF;
- c->dst.type = OP_NONE; /* Disable writeback. */
+ ctxt->dst.type = OP_NONE; /* Disable writeback. */
}
break;
}
case 0xbe ... 0xbf: /* movsx */
- c->dst.bytes = c->op_bytes;
- c->dst.val = (c->d & ByteOp) ? (s8) c->src.val :
- (s16) c->src.val;
+ ctxt->dst.bytes = ctxt->op_bytes;
+ ctxt->dst.val = (ctxt->d & ByteOp) ? (s8) ctxt->src.val :
+ (s16) ctxt->src.val;
break;
case 0xc0 ... 0xc1: /* xadd */
- emulate_2op_SrcV("add", c->src, c->dst, ctxt->eflags);
+ emulate_2op_SrcV("add", ctxt->src, ctxt->dst, ctxt->eflags);
/* Write back the register source. */
- c->src.val = c->dst.orig_val;
- write_register_operand(&c->src);
+ ctxt->src.val = ctxt->dst.orig_val;
+ write_register_operand(&ctxt->src);
break;
case 0xc3: /* movnti */
- c->dst.bytes = c->op_bytes;
- c->dst.val = (c->op_bytes == 4) ? (u32) c->src.val :
- (u64) c->src.val;
+ ctxt->dst.bytes = ctxt->op_bytes;
+ ctxt->dst.val = (ctxt->op_bytes == 4) ? (u32) ctxt->src.val :
+ (u64) ctxt->src.val;
break;
case 0xc7: /* Grp9 (cmpxchg8b) */
rc = em_grp9(ctxt);
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index aee38623b768..9335e1bf72ad 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -148,7 +148,7 @@ module_param(oos_shadow, bool, 0644);
#define PT64_PERM_MASK (PT_PRESENT_MASK | PT_WRITABLE_MASK | PT_USER_MASK \
| PT64_NX_MASK)
-#define RMAP_EXT 4
+#define PTE_LIST_EXT 4
#define ACC_EXEC_MASK 1
#define ACC_WRITE_MASK PT_WRITABLE_MASK
@@ -164,16 +164,16 @@ module_param(oos_shadow, bool, 0644);
#define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
-struct kvm_rmap_desc {
- u64 *sptes[RMAP_EXT];
- struct kvm_rmap_desc *more;
+struct pte_list_desc {
+ u64 *sptes[PTE_LIST_EXT];
+ struct pte_list_desc *more;
};
struct kvm_shadow_walk_iterator {
u64 addr;
hpa_t shadow_addr;
- int level;
u64 *sptep;
+ int level;
unsigned index;
};
@@ -182,32 +182,68 @@ struct kvm_shadow_walk_iterator {
shadow_walk_okay(&(_walker)); \
shadow_walk_next(&(_walker)))
-typedef void (*mmu_parent_walk_fn) (struct kvm_mmu_page *sp, u64 *spte);
+#define for_each_shadow_entry_lockless(_vcpu, _addr, _walker, spte) \
+ for (shadow_walk_init(&(_walker), _vcpu, _addr); \
+ shadow_walk_okay(&(_walker)) && \
+ ({ spte = mmu_spte_get_lockless(_walker.sptep); 1; }); \
+ __shadow_walk_next(&(_walker), spte))
-static struct kmem_cache *pte_chain_cache;
-static struct kmem_cache *rmap_desc_cache;
+static struct kmem_cache *pte_list_desc_cache;
static struct kmem_cache *mmu_page_header_cache;
static struct percpu_counter kvm_total_used_mmu_pages;
-static u64 __read_mostly shadow_trap_nonpresent_pte;
-static u64 __read_mostly shadow_notrap_nonpresent_pte;
static u64 __read_mostly shadow_nx_mask;
static u64 __read_mostly shadow_x_mask; /* mutual exclusive with nx_mask */
static u64 __read_mostly shadow_user_mask;
static u64 __read_mostly shadow_accessed_mask;
static u64 __read_mostly shadow_dirty_mask;
+static u64 __read_mostly shadow_mmio_mask;
-static inline u64 rsvd_bits(int s, int e)
+static void mmu_spte_set(u64 *sptep, u64 spte);
+
+void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask)
{
- return ((1ULL << (e - s + 1)) - 1) << s;
+ shadow_mmio_mask = mmio_mask;
+}
+EXPORT_SYMBOL_GPL(kvm_mmu_set_mmio_spte_mask);
+
+static void mark_mmio_spte(u64 *sptep, u64 gfn, unsigned access)
+{
+ access &= ACC_WRITE_MASK | ACC_USER_MASK;
+
+ trace_mark_mmio_spte(sptep, gfn, access);
+ mmu_spte_set(sptep, shadow_mmio_mask | access | gfn << PAGE_SHIFT);
}
-void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte)
+static bool is_mmio_spte(u64 spte)
{
- shadow_trap_nonpresent_pte = trap_pte;
- shadow_notrap_nonpresent_pte = notrap_pte;
+ return (spte & shadow_mmio_mask) == shadow_mmio_mask;
+}
+
+static gfn_t get_mmio_spte_gfn(u64 spte)
+{
+ return (spte & ~shadow_mmio_mask) >> PAGE_SHIFT;
+}
+
+static unsigned get_mmio_spte_access(u64 spte)
+{
+ return (spte & ~shadow_mmio_mask) & ~PAGE_MASK;
+}
+
+static bool set_mmio_spte(u64 *sptep, gfn_t gfn, pfn_t pfn, unsigned access)
+{
+ if (unlikely(is_noslot_pfn(pfn))) {
+ mark_mmio_spte(sptep, gfn, access);
+ return true;
+ }
+
+ return false;
+}
+
+static inline u64 rsvd_bits(int s, int e)
+{
+ return ((1ULL << (e - s + 1)) - 1) << s;
}
-EXPORT_SYMBOL_GPL(kvm_mmu_set_nonpresent_ptes);
void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
u64 dirty_mask, u64 nx_mask, u64 x_mask)
@@ -220,11 +256,6 @@ void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
}
EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes);
-static bool is_write_protection(struct kvm_vcpu *vcpu)
-{
- return kvm_read_cr0_bits(vcpu, X86_CR0_WP);
-}
-
static int is_cpuid_PSE36(void)
{
return 1;
@@ -237,8 +268,7 @@ static int is_nx(struct kvm_vcpu *vcpu)
static int is_shadow_present_pte(u64 pte)
{
- return pte != shadow_trap_nonpresent_pte
- && pte != shadow_notrap_nonpresent_pte;
+ return pte & PT_PRESENT_MASK && !is_mmio_spte(pte);
}
static int is_large_pte(u64 pte)
@@ -246,11 +276,6 @@ static int is_large_pte(u64 pte)
return pte & PT_PAGE_SIZE_MASK;
}
-static int is_writable_pte(unsigned long pte)
-{
- return pte & PT_WRITABLE_MASK;
-}
-
static int is_dirty_gpte(unsigned long pte)
{
return pte & PT_DIRTY_MASK;
@@ -282,26 +307,154 @@ static gfn_t pse36_gfn_delta(u32 gpte)
return (gpte & PT32_DIR_PSE36_MASK) << shift;
}
+#ifdef CONFIG_X86_64
static void __set_spte(u64 *sptep, u64 spte)
{
- set_64bit(sptep, spte);
+ *sptep = spte;
}
-static u64 __xchg_spte(u64 *sptep, u64 new_spte)
+static void __update_clear_spte_fast(u64 *sptep, u64 spte)
{
-#ifdef CONFIG_X86_64
- return xchg(sptep, new_spte);
+ *sptep = spte;
+}
+
+static u64 __update_clear_spte_slow(u64 *sptep, u64 spte)
+{
+ return xchg(sptep, spte);
+}
+
+static u64 __get_spte_lockless(u64 *sptep)
+{
+ return ACCESS_ONCE(*sptep);
+}
+
+static bool __check_direct_spte_mmio_pf(u64 spte)
+{
+ /* It is valid if the spte is zapped. */
+ return spte == 0ull;
+}
#else
- u64 old_spte;
+union split_spte {
+ struct {
+ u32 spte_low;
+ u32 spte_high;
+ };
+ u64 spte;
+};
- do {
- old_spte = *sptep;
- } while (cmpxchg64(sptep, old_spte, new_spte) != old_spte);
+static void count_spte_clear(u64 *sptep, u64 spte)
+{
+ struct kvm_mmu_page *sp = page_header(__pa(sptep));
- return old_spte;
-#endif
+ if (is_shadow_present_pte(spte))
+ return;
+
+ /* Ensure the spte is completely set before we increase the count */
+ smp_wmb();
+ sp->clear_spte_count++;
+}
+
+static void __set_spte(u64 *sptep, u64 spte)
+{
+ union split_spte *ssptep, sspte;
+
+ ssptep = (union split_spte *)sptep;
+ sspte = (union split_spte)spte;
+
+ ssptep->spte_high = sspte.spte_high;
+
+ /*
+ * If we map the spte from nonpresent to present, We should store
+ * the high bits firstly, then set present bit, so cpu can not
+ * fetch this spte while we are setting the spte.
+ */
+ smp_wmb();
+
+ ssptep->spte_low = sspte.spte_low;
}
+static void __update_clear_spte_fast(u64 *sptep, u64 spte)
+{
+ union split_spte *ssptep, sspte;
+
+ ssptep = (union split_spte *)sptep;
+ sspte = (union split_spte)spte;
+
+ ssptep->spte_low = sspte.spte_low;
+
+ /*
+ * If we map the spte from present to nonpresent, we should clear
+ * present bit firstly to avoid vcpu fetch the old high bits.
+ */
+ smp_wmb();
+
+ ssptep->spte_high = sspte.spte_high;
+ count_spte_clear(sptep, spte);
+}
+
+static u64 __update_clear_spte_slow(u64 *sptep, u64 spte)
+{
+ union split_spte *ssptep, sspte, orig;
+
+ ssptep = (union split_spte *)sptep;
+ sspte = (union split_spte)spte;
+
+ /* xchg acts as a barrier before the setting of the high bits */
+ orig.spte_low = xchg(&ssptep->spte_low, sspte.spte_low);
+ orig.spte_high = ssptep->spte_high = sspte.spte_high;
+ count_spte_clear(sptep, spte);
+
+ return orig.spte;
+}
+
+/*
+ * The idea using the light way get the spte on x86_32 guest is from
+ * gup_get_pte(arch/x86/mm/gup.c).
+ * The difference is we can not catch the spte tlb flush if we leave
+ * guest mode, so we emulate it by increase clear_spte_count when spte
+ * is cleared.
+ */
+static u64 __get_spte_lockless(u64 *sptep)
+{
+ struct kvm_mmu_page *sp = page_header(__pa(sptep));
+ union split_spte spte, *orig = (union split_spte *)sptep;
+ int count;
+
+retry:
+ count = sp->clear_spte_count;
+ smp_rmb();
+
+ spte.spte_low = orig->spte_low;
+ smp_rmb();
+
+ spte.spte_high = orig->spte_high;
+ smp_rmb();
+
+ if (unlikely(spte.spte_low != orig->spte_low ||
+ count != sp->clear_spte_count))
+ goto retry;
+
+ return spte.spte;
+}
+
+static bool __check_direct_spte_mmio_pf(u64 spte)
+{
+ union split_spte sspte = (union split_spte)spte;
+ u32 high_mmio_mask = shadow_mmio_mask >> 32;
+
+ /* It is valid if the spte is zapped. */
+ if (spte == 0ull)
+ return true;
+
+ /* It is valid if the spte is being zapped. */
+ if (sspte.spte_low == 0ull &&
+ (sspte.spte_high & high_mmio_mask) == high_mmio_mask)
+ return true;
+
+ return false;
+}
+#endif
+
static bool spte_has_volatile_bits(u64 spte)
{
if (!shadow_accessed_mask)
@@ -322,12 +475,30 @@ static bool spte_is_bit_cleared(u64 old_spte, u64 new_spte, u64 bit_mask)
return (old_spte & bit_mask) && !(new_spte & bit_mask);
}
-static void update_spte(u64 *sptep, u64 new_spte)
+/* Rules for using mmu_spte_set:
+ * Set the sptep from nonpresent to present.
+ * Note: the sptep being assigned *must* be either not present
+ * or in a state where the hardware will not attempt to update
+ * the spte.
+ */
+static void mmu_spte_set(u64 *sptep, u64 new_spte)
+{
+ WARN_ON(is_shadow_present_pte(*sptep));
+ __set_spte(sptep, new_spte);
+}
+
+/* Rules for using mmu_spte_update:
+ * Update the state bits, it means the mapped pfn is not changged.
+ */
+static void mmu_spte_update(u64 *sptep, u64 new_spte)
{
u64 mask, old_spte = *sptep;
WARN_ON(!is_rmap_spte(new_spte));
+ if (!is_shadow_present_pte(old_spte))
+ return mmu_spte_set(sptep, new_spte);
+
new_spte |= old_spte & shadow_dirty_mask;
mask = shadow_accessed_mask;
@@ -335,9 +506,9 @@ static void update_spte(u64 *sptep, u64 new_spte)
mask |= shadow_dirty_mask;
if (!spte_has_volatile_bits(old_spte) || (new_spte & mask) == mask)
- __set_spte(sptep, new_spte);
+ __update_clear_spte_fast(sptep, new_spte);
else
- old_spte = __xchg_spte(sptep, new_spte);
+ old_spte = __update_clear_spte_slow(sptep, new_spte);
if (!shadow_accessed_mask)
return;
@@ -348,6 +519,64 @@ static void update_spte(u64 *sptep, u64 new_spte)
kvm_set_pfn_dirty(spte_to_pfn(old_spte));
}
+/*
+ * Rules for using mmu_spte_clear_track_bits:
+ * It sets the sptep from present to nonpresent, and track the
+ * state bits, it is used to clear the last level sptep.
+ */
+static int mmu_spte_clear_track_bits(u64 *sptep)
+{
+ pfn_t pfn;
+ u64 old_spte = *sptep;
+
+ if (!spte_has_volatile_bits(old_spte))
+ __update_clear_spte_fast(sptep, 0ull);
+ else
+ old_spte = __update_clear_spte_slow(sptep, 0ull);
+
+ if (!is_rmap_spte(old_spte))
+ return 0;
+
+ pfn = spte_to_pfn(old_spte);
+ if (!shadow_accessed_mask || old_spte & shadow_accessed_mask)
+ kvm_set_pfn_accessed(pfn);
+ if (!shadow_dirty_mask || (old_spte & shadow_dirty_mask))
+ kvm_set_pfn_dirty(pfn);
+ return 1;
+}
+
+/*
+ * Rules for using mmu_spte_clear_no_track:
+ * Directly clear spte without caring the state bits of sptep,
+ * it is used to set the upper level spte.
+ */
+static void mmu_spte_clear_no_track(u64 *sptep)
+{
+ __update_clear_spte_fast(sptep, 0ull);
+}
+
+static u64 mmu_spte_get_lockless(u64 *sptep)
+{
+ return __get_spte_lockless(sptep);
+}
+
+static void walk_shadow_page_lockless_begin(struct kvm_vcpu *vcpu)
+{
+ rcu_read_lock();
+ atomic_inc(&vcpu->kvm->arch.reader_counter);
+
+ /* Increase the counter before walking shadow page table */
+ smp_mb__after_atomic_inc();
+}
+
+static void walk_shadow_page_lockless_end(struct kvm_vcpu *vcpu)
+{
+ /* Decrease the counter after walking shadow page table finished */
+ smp_mb__before_atomic_dec();
+ atomic_dec(&vcpu->kvm->arch.reader_counter);
+ rcu_read_unlock();
+}
+
static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
struct kmem_cache *base_cache, int min)
{
@@ -397,12 +626,8 @@ static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu)
{
int r;
- r = mmu_topup_memory_cache(&vcpu->arch.mmu_pte_chain_cache,
- pte_chain_cache, 4);
- if (r)
- goto out;
- r = mmu_topup_memory_cache(&vcpu->arch.mmu_rmap_desc_cache,
- rmap_desc_cache, 4 + PTE_PREFETCH_NUM);
+ r = mmu_topup_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache,
+ pte_list_desc_cache, 8 + PTE_PREFETCH_NUM);
if (r)
goto out;
r = mmu_topup_memory_cache_page(&vcpu->arch.mmu_page_cache, 8);
@@ -416,8 +641,8 @@ out:
static void mmu_free_memory_caches(struct kvm_vcpu *vcpu)
{
- mmu_free_memory_cache(&vcpu->arch.mmu_pte_chain_cache, pte_chain_cache);
- mmu_free_memory_cache(&vcpu->arch.mmu_rmap_desc_cache, rmap_desc_cache);
+ mmu_free_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache,
+ pte_list_desc_cache);
mmu_free_memory_cache_page(&vcpu->arch.mmu_page_cache);
mmu_free_memory_cache(&vcpu->arch.mmu_page_header_cache,
mmu_page_header_cache);
@@ -433,26 +658,15 @@ static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc,
return p;
}
-static struct kvm_pte_chain *mmu_alloc_pte_chain(struct kvm_vcpu *vcpu)
-{
- return mmu_memory_cache_alloc(&vcpu->arch.mmu_pte_chain_cache,
- sizeof(struct kvm_pte_chain));
-}
-
-static void mmu_free_pte_chain(struct kvm_pte_chain *pc)
+static struct pte_list_desc *mmu_alloc_pte_list_desc(struct kvm_vcpu *vcpu)
{
- kmem_cache_free(pte_chain_cache, pc);
+ return mmu_memory_cache_alloc(&vcpu->arch.mmu_pte_list_desc_cache,
+ sizeof(struct pte_list_desc));
}
-static struct kvm_rmap_desc *mmu_alloc_rmap_desc(struct kvm_vcpu *vcpu)
+static void mmu_free_pte_list_desc(struct pte_list_desc *pte_list_desc)
{
- return mmu_memory_cache_alloc(&vcpu->arch.mmu_rmap_desc_cache,
- sizeof(struct kvm_rmap_desc));
-}
-
-static void mmu_free_rmap_desc(struct kvm_rmap_desc *rd)
-{
- kmem_cache_free(rmap_desc_cache, rd);
+ kmem_cache_free(pte_list_desc_cache, pte_list_desc);
}
static gfn_t kvm_mmu_page_get_gfn(struct kvm_mmu_page *sp, int index)
@@ -498,6 +712,7 @@ static void account_shadowed(struct kvm *kvm, gfn_t gfn)
linfo = lpage_info_slot(gfn, slot, i);
linfo->write_count += 1;
}
+ kvm->arch.indirect_shadow_pages++;
}
static void unaccount_shadowed(struct kvm *kvm, gfn_t gfn)
@@ -513,6 +728,7 @@ static void unaccount_shadowed(struct kvm *kvm, gfn_t gfn)
linfo->write_count -= 1;
WARN_ON(linfo->write_count < 0);
}
+ kvm->arch.indirect_shadow_pages--;
}
static int has_wrprotected_page(struct kvm *kvm,
@@ -588,67 +804,42 @@ static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn)
}
/*
- * Take gfn and return the reverse mapping to it.
- */
-
-static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, int level)
-{
- struct kvm_memory_slot *slot;
- struct kvm_lpage_info *linfo;
-
- slot = gfn_to_memslot(kvm, gfn);
- if (likely(level == PT_PAGE_TABLE_LEVEL))
- return &slot->rmap[gfn - slot->base_gfn];
-
- linfo = lpage_info_slot(gfn, slot, level);
-
- return &linfo->rmap_pde;
-}
-
-/*
- * Reverse mapping data structures:
+ * Pte mapping structures:
*
- * If rmapp bit zero is zero, then rmapp point to the shadw page table entry
- * that points to page_address(page).
+ * If pte_list bit zero is zero, then pte_list point to the spte.
*
- * If rmapp bit zero is one, (then rmap & ~1) points to a struct kvm_rmap_desc
- * containing more mappings.
+ * If pte_list bit zero is one, (then pte_list & ~1) points to a struct
+ * pte_list_desc containing more mappings.
*
- * Returns the number of rmap entries before the spte was added or zero if
+ * Returns the number of pte entries before the spte was added or zero if
* the spte was not added.
*
*/
-static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
+static int pte_list_add(struct kvm_vcpu *vcpu, u64 *spte,
+ unsigned long *pte_list)
{
- struct kvm_mmu_page *sp;
- struct kvm_rmap_desc *desc;
- unsigned long *rmapp;
+ struct pte_list_desc *desc;
int i, count = 0;
- if (!is_rmap_spte(*spte))
- return count;
- sp = page_header(__pa(spte));
- kvm_mmu_page_set_gfn(sp, spte - sp->spt, gfn);
- rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp->role.level);
- if (!*rmapp) {
- rmap_printk("rmap_add: %p %llx 0->1\n", spte, *spte);
- *rmapp = (unsigned long)spte;
- } else if (!(*rmapp & 1)) {
- rmap_printk("rmap_add: %p %llx 1->many\n", spte, *spte);
- desc = mmu_alloc_rmap_desc(vcpu);
- desc->sptes[0] = (u64 *)*rmapp;
+ if (!*pte_list) {
+ rmap_printk("pte_list_add: %p %llx 0->1\n", spte, *spte);
+ *pte_list = (unsigned long)spte;
+ } else if (!(*pte_list & 1)) {
+ rmap_printk("pte_list_add: %p %llx 1->many\n", spte, *spte);
+ desc = mmu_alloc_pte_list_desc(vcpu);
+ desc->sptes[0] = (u64 *)*pte_list;
desc->sptes[1] = spte;
- *rmapp = (unsigned long)desc | 1;
+ *pte_list = (unsigned long)desc | 1;
++count;
} else {
- rmap_printk("rmap_add: %p %llx many->many\n", spte, *spte);
- desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
- while (desc->sptes[RMAP_EXT-1] && desc->more) {
+ rmap_printk("pte_list_add: %p %llx many->many\n", spte, *spte);
+ desc = (struct pte_list_desc *)(*pte_list & ~1ul);
+ while (desc->sptes[PTE_LIST_EXT-1] && desc->more) {
desc = desc->more;
- count += RMAP_EXT;
+ count += PTE_LIST_EXT;
}
- if (desc->sptes[RMAP_EXT-1]) {
- desc->more = mmu_alloc_rmap_desc(vcpu);
+ if (desc->sptes[PTE_LIST_EXT-1]) {
+ desc->more = mmu_alloc_pte_list_desc(vcpu);
desc = desc->more;
}
for (i = 0; desc->sptes[i]; ++i)
@@ -658,59 +849,78 @@ static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
return count;
}
-static void rmap_desc_remove_entry(unsigned long *rmapp,
- struct kvm_rmap_desc *desc,
- int i,
- struct kvm_rmap_desc *prev_desc)
+static u64 *pte_list_next(unsigned long *pte_list, u64 *spte)
+{
+ struct pte_list_desc *desc;
+ u64 *prev_spte;
+ int i;
+
+ if (!*pte_list)
+ return NULL;
+ else if (!(*pte_list & 1)) {
+ if (!spte)
+ return (u64 *)*pte_list;
+ return NULL;
+ }
+ desc = (struct pte_list_desc *)(*pte_list & ~1ul);
+ prev_spte = NULL;
+ while (desc) {
+ for (i = 0; i < PTE_LIST_EXT && desc->sptes[i]; ++i) {
+ if (prev_spte == spte)
+ return desc->sptes[i];
+ prev_spte = desc->sptes[i];
+ }
+ desc = desc->more;
+ }
+ return NULL;
+}
+
+static void
+pte_list_desc_remove_entry(unsigned long *pte_list, struct pte_list_desc *desc,
+ int i, struct pte_list_desc *prev_desc)
{
int j;
- for (j = RMAP_EXT - 1; !desc->sptes[j] && j > i; --j)
+ for (j = PTE_LIST_EXT - 1; !desc->sptes[j] && j > i; --j)
;
desc->sptes[i] = desc->sptes[j];
desc->sptes[j] = NULL;
if (j != 0)
return;
if (!prev_desc && !desc->more)
- *rmapp = (unsigned long)desc->sptes[0];
+ *pte_list = (unsigned long)desc->sptes[0];
else
if (prev_desc)
prev_desc->more = desc->more;
else
- *rmapp = (unsigned long)desc->more | 1;
- mmu_free_rmap_desc(desc);
+ *pte_list = (unsigned long)desc->more | 1;
+ mmu_free_pte_list_desc(desc);
}
-static void rmap_remove(struct kvm *kvm, u64 *spte)
+static void pte_list_remove(u64 *spte, unsigned long *pte_list)
{
- struct kvm_rmap_desc *desc;
- struct kvm_rmap_desc *prev_desc;
- struct kvm_mmu_page *sp;
- gfn_t gfn;
- unsigned long *rmapp;
+ struct pte_list_desc *desc;
+ struct pte_list_desc *prev_desc;
int i;
- sp = page_header(__pa(spte));
- gfn = kvm_mmu_page_get_gfn(sp, spte - sp->spt);
- rmapp = gfn_to_rmap(kvm, gfn, sp->role.level);
- if (!*rmapp) {
- printk(KERN_ERR "rmap_remove: %p 0->BUG\n", spte);
+ if (!*pte_list) {
+ printk(KERN_ERR "pte_list_remove: %p 0->BUG\n", spte);
BUG();
- } else if (!(*rmapp & 1)) {
- rmap_printk("rmap_remove: %p 1->0\n", spte);
- if ((u64 *)*rmapp != spte) {
- printk(KERN_ERR "rmap_remove: %p 1->BUG\n", spte);
+ } else if (!(*pte_list & 1)) {
+ rmap_printk("pte_list_remove: %p 1->0\n", spte);
+ if ((u64 *)*pte_list != spte) {
+ printk(KERN_ERR "pte_list_remove: %p 1->BUG\n", spte);
BUG();
}
- *rmapp = 0;
+ *pte_list = 0;
} else {
- rmap_printk("rmap_remove: %p many->many\n", spte);
- desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
+ rmap_printk("pte_list_remove: %p many->many\n", spte);
+ desc = (struct pte_list_desc *)(*pte_list & ~1ul);
prev_desc = NULL;
while (desc) {
- for (i = 0; i < RMAP_EXT && desc->sptes[i]; ++i)
+ for (i = 0; i < PTE_LIST_EXT && desc->sptes[i]; ++i)
if (desc->sptes[i] == spte) {
- rmap_desc_remove_entry(rmapp,
+ pte_list_desc_remove_entry(pte_list,
desc, i,
prev_desc);
return;
@@ -718,62 +928,80 @@ static void rmap_remove(struct kvm *kvm, u64 *spte)
prev_desc = desc;
desc = desc->more;
}
- pr_err("rmap_remove: %p many->many\n", spte);
+ pr_err("pte_list_remove: %p many->many\n", spte);
BUG();
}
}
-static int set_spte_track_bits(u64 *sptep, u64 new_spte)
+typedef void (*pte_list_walk_fn) (u64 *spte);
+static void pte_list_walk(unsigned long *pte_list, pte_list_walk_fn fn)
{
- pfn_t pfn;
- u64 old_spte = *sptep;
+ struct pte_list_desc *desc;
+ int i;
- if (!spte_has_volatile_bits(old_spte))
- __set_spte(sptep, new_spte);
- else
- old_spte = __xchg_spte(sptep, new_spte);
+ if (!*pte_list)
+ return;
- if (!is_rmap_spte(old_spte))
- return 0;
+ if (!(*pte_list & 1))
+ return fn((u64 *)*pte_list);
- pfn = spte_to_pfn(old_spte);
- if (!shadow_accessed_mask || old_spte & shadow_accessed_mask)
- kvm_set_pfn_accessed(pfn);
- if (!shadow_dirty_mask || (old_spte & shadow_dirty_mask))
- kvm_set_pfn_dirty(pfn);
- return 1;
+ desc = (struct pte_list_desc *)(*pte_list & ~1ul);
+ while (desc) {
+ for (i = 0; i < PTE_LIST_EXT && desc->sptes[i]; ++i)
+ fn(desc->sptes[i]);
+ desc = desc->more;
+ }
}
-static void drop_spte(struct kvm *kvm, u64 *sptep, u64 new_spte)
+/*
+ * Take gfn and return the reverse mapping to it.
+ */
+static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, int level)
{
- if (set_spte_track_bits(sptep, new_spte))
- rmap_remove(kvm, sptep);
+ struct kvm_memory_slot *slot;
+ struct kvm_lpage_info *linfo;
+
+ slot = gfn_to_memslot(kvm, gfn);
+ if (likely(level == PT_PAGE_TABLE_LEVEL))
+ return &slot->rmap[gfn - slot->base_gfn];
+
+ linfo = lpage_info_slot(gfn, slot, level);
+
+ return &linfo->rmap_pde;
+}
+
+static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
+{
+ struct kvm_mmu_page *sp;
+ unsigned long *rmapp;
+
+ sp = page_header(__pa(spte));
+ kvm_mmu_page_set_gfn(sp, spte - sp->spt, gfn);
+ rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp->role.level);
+ return pte_list_add(vcpu, spte, rmapp);
}
static u64 *rmap_next(struct kvm *kvm, unsigned long *rmapp, u64 *spte)
{
- struct kvm_rmap_desc *desc;
- u64 *prev_spte;
- int i;
+ return pte_list_next(rmapp, spte);
+}
- if (!*rmapp)
- return NULL;
- else if (!(*rmapp & 1)) {
- if (!spte)
- return (u64 *)*rmapp;
- return NULL;
- }
- desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
- prev_spte = NULL;
- while (desc) {
- for (i = 0; i < RMAP_EXT && desc->sptes[i]; ++i) {
- if (prev_spte == spte)
- return desc->sptes[i];
- prev_spte = desc->sptes[i];
- }
- desc = desc->more;
- }
- return NULL;
+static void rmap_remove(struct kvm *kvm, u64 *spte)
+{
+ struct kvm_mmu_page *sp;
+ gfn_t gfn;
+ unsigned long *rmapp;
+
+ sp = page_header(__pa(spte));
+ gfn = kvm_mmu_page_get_gfn(sp, spte - sp->spt);
+ rmapp = gfn_to_rmap(kvm, gfn, sp->role.level);
+ pte_list_remove(spte, rmapp);
+}
+
+static void drop_spte(struct kvm *kvm, u64 *sptep)
+{
+ if (mmu_spte_clear_track_bits(sptep))
+ rmap_remove(kvm, sptep);
}
static int rmap_write_protect(struct kvm *kvm, u64 gfn)
@@ -790,7 +1018,7 @@ static int rmap_write_protect(struct kvm *kvm, u64 gfn)
BUG_ON(!(*spte & PT_PRESENT_MASK));
rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte);
if (is_writable_pte(*spte)) {
- update_spte(spte, *spte & ~PT_WRITABLE_MASK);
+ mmu_spte_update(spte, *spte & ~PT_WRITABLE_MASK);
write_protected = 1;
}
spte = rmap_next(kvm, rmapp, spte);
@@ -807,8 +1035,7 @@ static int rmap_write_protect(struct kvm *kvm, u64 gfn)
BUG_ON((*spte & (PT_PAGE_SIZE_MASK|PT_PRESENT_MASK)) != (PT_PAGE_SIZE_MASK|PT_PRESENT_MASK));
pgprintk("rmap_write_protect(large): spte %p %llx %lld\n", spte, *spte, gfn);
if (is_writable_pte(*spte)) {
- drop_spte(kvm, spte,
- shadow_trap_nonpresent_pte);
+ drop_spte(kvm, spte);
--kvm->stat.lpages;
spte = NULL;
write_protected = 1;
@@ -829,7 +1056,7 @@ static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp,
while ((spte = rmap_next(kvm, rmapp, NULL))) {
BUG_ON(!(*spte & PT_PRESENT_MASK));
rmap_printk("kvm_rmap_unmap_hva: spte %p %llx\n", spte, *spte);
- drop_spte(kvm, spte, shadow_trap_nonpresent_pte);
+ drop_spte(kvm, spte);
need_tlb_flush = 1;
}
return need_tlb_flush;
@@ -851,7 +1078,7 @@ static int kvm_set_pte_rmapp(struct kvm *kvm, unsigned long *rmapp,
rmap_printk("kvm_set_pte_rmapp: spte %p %llx\n", spte, *spte);
need_flush = 1;
if (pte_write(*ptep)) {
- drop_spte(kvm, spte, shadow_trap_nonpresent_pte);
+ drop_spte(kvm, spte);
spte = rmap_next(kvm, rmapp, NULL);
} else {
new_spte = *spte &~ (PT64_BASE_ADDR_MASK);
@@ -860,7 +1087,8 @@ static int kvm_set_pte_rmapp(struct kvm *kvm, unsigned long *rmapp,
new_spte &= ~PT_WRITABLE_MASK;
new_spte &= ~SPTE_HOST_WRITEABLE;
new_spte &= ~shadow_accessed_mask;
- set_spte_track_bits(spte, new_spte);
+ mmu_spte_clear_track_bits(spte);
+ mmu_spte_set(spte, new_spte);
spte = rmap_next(kvm, rmapp, spte);
}
}
@@ -1032,151 +1260,89 @@ static inline void kvm_mod_used_mmu_pages(struct kvm *kvm, int nr)
percpu_counter_add(&kvm_total_used_mmu_pages, nr);
}
-static void kvm_mmu_free_page(struct kvm *kvm, struct kvm_mmu_page *sp)
+/*
+ * Remove the sp from shadow page cache, after call it,
+ * we can not find this sp from the cache, and the shadow
+ * page table is still valid.
+ * It should be under the protection of mmu lock.
+ */
+static void kvm_mmu_isolate_page(struct kvm_mmu_page *sp)
{
ASSERT(is_empty_shadow_page(sp->spt));
hlist_del(&sp->hash_link);
- list_del(&sp->link);
- free_page((unsigned long)sp->spt);
if (!sp->role.direct)
free_page((unsigned long)sp->gfns);
- kmem_cache_free(mmu_page_header_cache, sp);
- kvm_mod_used_mmu_pages(kvm, -1);
}
-static unsigned kvm_page_table_hashfn(gfn_t gfn)
+/*
+ * Free the shadow page table and the sp, we can do it
+ * out of the protection of mmu lock.
+ */
+static void kvm_mmu_free_page(struct kvm_mmu_page *sp)
{
- return gfn & ((1 << KVM_MMU_HASH_SHIFT) - 1);
+ list_del(&sp->link);
+ free_page((unsigned long)sp->spt);
+ kmem_cache_free(mmu_page_header_cache, sp);
}
-static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu,
- u64 *parent_pte, int direct)
+static unsigned kvm_page_table_hashfn(gfn_t gfn)
{
- struct kvm_mmu_page *sp;
-
- sp = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache, sizeof *sp);
- sp->spt = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, PAGE_SIZE);
- if (!direct)
- sp->gfns = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache,
- PAGE_SIZE);
- set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
- list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages);
- bitmap_zero(sp->slot_bitmap, KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS);
- sp->multimapped = 0;
- sp->parent_pte = parent_pte;
- kvm_mod_used_mmu_pages(vcpu->kvm, +1);
- return sp;
+ return gfn & ((1 << KVM_MMU_HASH_SHIFT) - 1);
}
static void mmu_page_add_parent_pte(struct kvm_vcpu *vcpu,
struct kvm_mmu_page *sp, u64 *parent_pte)
{
- struct kvm_pte_chain *pte_chain;
- struct hlist_node *node;
- int i;
-
if (!parent_pte)
return;
- if (!sp->multimapped) {
- u64 *old = sp->parent_pte;
- if (!old) {
- sp->parent_pte = parent_pte;
- return;
- }
- sp->multimapped = 1;
- pte_chain = mmu_alloc_pte_chain(vcpu);
- INIT_HLIST_HEAD(&sp->parent_ptes);
- hlist_add_head(&pte_chain->link, &sp->parent_ptes);
- pte_chain->parent_ptes[0] = old;
- }
- hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link) {
- if (pte_chain->parent_ptes[NR_PTE_CHAIN_ENTRIES-1])
- continue;
- for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i)
- if (!pte_chain->parent_ptes[i]) {
- pte_chain->parent_ptes[i] = parent_pte;
- return;
- }
- }
- pte_chain = mmu_alloc_pte_chain(vcpu);
- BUG_ON(!pte_chain);
- hlist_add_head(&pte_chain->link, &sp->parent_ptes);
- pte_chain->parent_ptes[0] = parent_pte;
+ pte_list_add(vcpu, parent_pte, &sp->parent_ptes);
}
static void mmu_page_remove_parent_pte(struct kvm_mmu_page *sp,
u64 *parent_pte)
{
- struct kvm_pte_chain *pte_chain;
- struct hlist_node *node;
- int i;
-
- if (!sp->multimapped) {
- BUG_ON(sp->parent_pte != parent_pte);
- sp->parent_pte = NULL;
- return;
- }
- hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link)
- for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i) {
- if (!pte_chain->parent_ptes[i])
- break;
- if (pte_chain->parent_ptes[i] != parent_pte)
- continue;
- while (i + 1 < NR_PTE_CHAIN_ENTRIES
- && pte_chain->parent_ptes[i + 1]) {
- pte_chain->parent_ptes[i]
- = pte_chain->parent_ptes[i + 1];
- ++i;
- }
- pte_chain->parent_ptes[i] = NULL;
- if (i == 0) {
- hlist_del(&pte_chain->link);
- mmu_free_pte_chain(pte_chain);
- if (hlist_empty(&sp->parent_ptes)) {
- sp->multimapped = 0;
- sp->parent_pte = NULL;
- }
- }
- return;
- }
- BUG();
+ pte_list_remove(parent_pte, &sp->parent_ptes);
}
-static void mmu_parent_walk(struct kvm_mmu_page *sp, mmu_parent_walk_fn fn)
+static void drop_parent_pte(struct kvm_mmu_page *sp,
+ u64 *parent_pte)
{
- struct kvm_pte_chain *pte_chain;
- struct hlist_node *node;
- struct kvm_mmu_page *parent_sp;
- int i;
-
- if (!sp->multimapped && sp->parent_pte) {
- parent_sp = page_header(__pa(sp->parent_pte));
- fn(parent_sp, sp->parent_pte);
- return;
- }
-
- hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link)
- for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i) {
- u64 *spte = pte_chain->parent_ptes[i];
+ mmu_page_remove_parent_pte(sp, parent_pte);
+ mmu_spte_clear_no_track(parent_pte);
+}
- if (!spte)
- break;
- parent_sp = page_header(__pa(spte));
- fn(parent_sp, spte);
- }
+static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu,
+ u64 *parent_pte, int direct)
+{
+ struct kvm_mmu_page *sp;
+ sp = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache,
+ sizeof *sp);
+ sp->spt = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, PAGE_SIZE);
+ if (!direct)
+ sp->gfns = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache,
+ PAGE_SIZE);
+ set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
+ list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages);
+ bitmap_zero(sp->slot_bitmap, KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS);
+ sp->parent_ptes = 0;
+ mmu_page_add_parent_pte(vcpu, sp, parent_pte);
+ kvm_mod_used_mmu_pages(vcpu->kvm, +1);
+ return sp;
}
-static void mark_unsync(struct kvm_mmu_page *sp, u64 *spte);
+static void mark_unsync(u64 *spte);
static void kvm_mmu_mark_parents_unsync(struct kvm_mmu_page *sp)
{
- mmu_parent_walk(sp, mark_unsync);
+ pte_list_walk(&sp->parent_ptes, mark_unsync);
}
-static void mark_unsync(struct kvm_mmu_page *sp, u64 *spte)
+static void mark_unsync(u64 *spte)
{
+ struct kvm_mmu_page *sp;
unsigned int index;
+ sp = page_header(__pa(spte));
index = spte - sp->spt;
if (__test_and_set_bit(index, sp->unsync_child_bitmap))
return;
@@ -1185,15 +1351,6 @@ static void mark_unsync(struct kvm_mmu_page *sp, u64 *spte)
kvm_mmu_mark_parents_unsync(sp);
}
-static void nonpaging_prefetch_page(struct kvm_vcpu *vcpu,
- struct kvm_mmu_page *sp)
-{
- int i;
-
- for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
- sp->spt[i] = shadow_trap_nonpresent_pte;
-}
-
static int nonpaging_sync_page(struct kvm_vcpu *vcpu,
struct kvm_mmu_page *sp)
{
@@ -1475,6 +1632,14 @@ static void mmu_sync_children(struct kvm_vcpu *vcpu,
}
}
+static void init_shadow_page_table(struct kvm_mmu_page *sp)
+{
+ int i;
+
+ for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
+ sp->spt[i] = 0ull;
+}
+
static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
gfn_t gfn,
gva_t gaddr,
@@ -1537,10 +1702,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
account_shadowed(vcpu->kvm, gfn);
}
- if (shadow_trap_nonpresent_pte != shadow_notrap_nonpresent_pte)
- vcpu->arch.mmu.prefetch_page(vcpu, sp);
- else
- nonpaging_prefetch_page(vcpu, sp);
+ init_shadow_page_table(sp);
trace_kvm_mmu_get_page(sp, true);
return sp;
}
@@ -1572,21 +1734,28 @@ static bool shadow_walk_okay(struct kvm_shadow_walk_iterator *iterator)
if (iterator->level < PT_PAGE_TABLE_LEVEL)
return false;
- if (iterator->level == PT_PAGE_TABLE_LEVEL)
- if (is_large_pte(*iterator->sptep))
- return false;
-
iterator->index = SHADOW_PT_INDEX(iterator->addr, iterator->level);
iterator->sptep = ((u64 *)__va(iterator->shadow_addr)) + iterator->index;
return true;
}
-static void shadow_walk_next(struct kvm_shadow_walk_iterator *iterator)
+static void __shadow_walk_next(struct kvm_shadow_walk_iterator *iterator,
+ u64 spte)
{
- iterator->shadow_addr = *iterator->sptep & PT64_BASE_ADDR_MASK;
+ if (is_last_spte(spte, iterator->level)) {
+ iterator->level = 0;
+ return;
+ }
+
+ iterator->shadow_addr = spte & PT64_BASE_ADDR_MASK;
--iterator->level;
}
+static void shadow_walk_next(struct kvm_shadow_walk_iterator *iterator)
+{
+ return __shadow_walk_next(iterator, *iterator->sptep);
+}
+
static void link_shadow_page(u64 *sptep, struct kvm_mmu_page *sp)
{
u64 spte;
@@ -1594,13 +1763,13 @@ static void link_shadow_page(u64 *sptep, struct kvm_mmu_page *sp)
spte = __pa(sp->spt)
| PT_PRESENT_MASK | PT_ACCESSED_MASK
| PT_WRITABLE_MASK | PT_USER_MASK;
- __set_spte(sptep, spte);
+ mmu_spte_set(sptep, spte);
}
static void drop_large_spte(struct kvm_vcpu *vcpu, u64 *sptep)
{
if (is_large_pte(*sptep)) {
- drop_spte(vcpu->kvm, sptep, shadow_trap_nonpresent_pte);
+ drop_spte(vcpu->kvm, sptep);
kvm_flush_remote_tlbs(vcpu->kvm);
}
}
@@ -1622,38 +1791,39 @@ static void validate_direct_spte(struct kvm_vcpu *vcpu, u64 *sptep,
if (child->role.access == direct_access)
return;
- mmu_page_remove_parent_pte(child, sptep);
- __set_spte(sptep, shadow_trap_nonpresent_pte);
+ drop_parent_pte(child, sptep);
kvm_flush_remote_tlbs(vcpu->kvm);
}
}
+static void mmu_page_zap_pte(struct kvm *kvm, struct kvm_mmu_page *sp,
+ u64 *spte)
+{
+ u64 pte;
+ struct kvm_mmu_page *child;
+
+ pte = *spte;
+ if (is_shadow_present_pte(pte)) {
+ if (is_last_spte(pte, sp->role.level))
+ drop_spte(kvm, spte);
+ else {
+ child = page_header(pte & PT64_BASE_ADDR_MASK);
+ drop_parent_pte(child, spte);
+ }
+ } else if (is_mmio_spte(pte))
+ mmu_spte_clear_no_track(spte);
+
+ if (is_large_pte(pte))
+ --kvm->stat.lpages;
+}
+
static void kvm_mmu_page_unlink_children(struct kvm *kvm,
struct kvm_mmu_page *sp)
{
unsigned i;
- u64 *pt;
- u64 ent;
-
- pt = sp->spt;
-
- for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
- ent = pt[i];
-
- if (is_shadow_present_pte(ent)) {
- if (!is_last_spte(ent, sp->role.level)) {
- ent &= PT64_BASE_ADDR_MASK;
- mmu_page_remove_parent_pte(page_header(ent),
- &pt[i]);
- } else {
- if (is_large_pte(ent))
- --kvm->stat.lpages;
- drop_spte(kvm, &pt[i],
- shadow_trap_nonpresent_pte);
- }
- }
- pt[i] = shadow_trap_nonpresent_pte;
- }
+
+ for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
+ mmu_page_zap_pte(kvm, sp, sp->spt + i);
}
static void kvm_mmu_put_page(struct kvm_mmu_page *sp, u64 *parent_pte)
@@ -1674,20 +1844,8 @@ static void kvm_mmu_unlink_parents(struct kvm *kvm, struct kvm_mmu_page *sp)
{
u64 *parent_pte;
- while (sp->multimapped || sp->parent_pte) {
- if (!sp->multimapped)
- parent_pte = sp->parent_pte;
- else {
- struct kvm_pte_chain *chain;
-
- chain = container_of(sp->parent_ptes.first,
- struct kvm_pte_chain, link);
- parent_pte = chain->parent_ptes[0];
- }
- BUG_ON(!parent_pte);
- kvm_mmu_put_page(sp, parent_pte);
- __set_spte(parent_pte, shadow_trap_nonpresent_pte);
- }
+ while ((parent_pte = pte_list_next(&sp->parent_ptes, NULL)))
+ drop_parent_pte(sp, parent_pte);
}
static int mmu_zap_unsync_children(struct kvm *kvm,
@@ -1734,6 +1892,7 @@ static int kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
/* Count self */
ret++;
list_move(&sp->link, invalid_list);
+ kvm_mod_used_mmu_pages(kvm, -1);
} else {
list_move(&sp->link, &kvm->arch.active_mmu_pages);
kvm_reload_remote_mmus(kvm);
@@ -1744,6 +1903,30 @@ static int kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
return ret;
}
+static void kvm_mmu_isolate_pages(struct list_head *invalid_list)
+{
+ struct kvm_mmu_page *sp;
+
+ list_for_each_entry(sp, invalid_list, link)
+ kvm_mmu_isolate_page(sp);
+}
+
+static void free_pages_rcu(struct rcu_head *head)
+{
+ struct kvm_mmu_page *next, *sp;
+
+ sp = container_of(head, struct kvm_mmu_page, rcu);
+ while (sp) {
+ if (!list_empty(&sp->link))
+ next = list_first_entry(&sp->link,
+ struct kvm_mmu_page, link);
+ else
+ next = NULL;
+ kvm_mmu_free_page(sp);
+ sp = next;
+ }
+}
+
static void kvm_mmu_commit_zap_page(struct kvm *kvm,
struct list_head *invalid_list)
{
@@ -1754,10 +1937,21 @@ static void kvm_mmu_commit_zap_page(struct kvm *kvm,
kvm_flush_remote_tlbs(kvm);
+ if (atomic_read(&kvm->arch.reader_counter)) {
+ kvm_mmu_isolate_pages(invalid_list);
+ sp = list_first_entry(invalid_list, struct kvm_mmu_page, link);
+ list_del_init(invalid_list);
+
+ trace_kvm_mmu_delay_free_pages(sp);
+ call_rcu(&sp->rcu, free_pages_rcu);
+ return;
+ }
+
do {
sp = list_first_entry(invalid_list, struct kvm_mmu_page, link);
WARN_ON(!sp->role.invalid || sp->root_count);
- kvm_mmu_free_page(kvm, sp);
+ kvm_mmu_isolate_page(sp);
+ kvm_mmu_free_page(sp);
} while (!list_empty(invalid_list));
}
@@ -1783,8 +1977,8 @@ void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int goal_nr_mmu_pages)
page = container_of(kvm->arch.active_mmu_pages.prev,
struct kvm_mmu_page, link);
kvm_mmu_prepare_zap_page(kvm, page, &invalid_list);
- kvm_mmu_commit_zap_page(kvm, &invalid_list);
}
+ kvm_mmu_commit_zap_page(kvm, &invalid_list);
goal_nr_mmu_pages = kvm->arch.n_used_mmu_pages;
}
@@ -1833,20 +2027,6 @@ static void page_header_update_slot(struct kvm *kvm, void *pte, gfn_t gfn)
__set_bit(slot, sp->slot_bitmap);
}
-static void mmu_convert_notrap(struct kvm_mmu_page *sp)
-{
- int i;
- u64 *pt = sp->spt;
-
- if (shadow_trap_nonpresent_pte == shadow_notrap_nonpresent_pte)
- return;
-
- for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
- if (pt[i] == shadow_notrap_nonpresent_pte)
- __set_spte(&pt[i], shadow_trap_nonpresent_pte);
- }
-}
-
/*
* The function is based on mtrr_type_lookup() in
* arch/x86/kernel/cpu/mtrr/generic.c
@@ -1959,7 +2139,6 @@ static void __kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
sp->unsync = 1;
kvm_mmu_mark_parents_unsync(sp);
- mmu_convert_notrap(sp);
}
static void kvm_unsync_pages(struct kvm_vcpu *vcpu, gfn_t gfn)
@@ -2002,13 +2181,16 @@ static int mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn,
static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
unsigned pte_access, int user_fault,
- int write_fault, int dirty, int level,
+ int write_fault, int level,
gfn_t gfn, pfn_t pfn, bool speculative,
bool can_unsync, bool host_writable)
{
u64 spte, entry = *sptep;
int ret = 0;
+ if (set_mmio_spte(sptep, gfn, pfn, pte_access))
+ return 0;
+
/*
* We don't set the accessed bit, since we sometimes want to see
* whether the guest actually used the pte (in order to detect
@@ -2017,8 +2199,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
spte = PT_PRESENT_MASK;
if (!speculative)
spte |= shadow_accessed_mask;
- if (!dirty)
- pte_access &= ~ACC_WRITE_MASK;
+
if (pte_access & ACC_EXEC_MASK)
spte |= shadow_x_mask;
else
@@ -2045,15 +2226,24 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
if (level > PT_PAGE_TABLE_LEVEL &&
has_wrprotected_page(vcpu->kvm, gfn, level)) {
ret = 1;
- drop_spte(vcpu->kvm, sptep, shadow_trap_nonpresent_pte);
+ drop_spte(vcpu->kvm, sptep);
goto done;
}
spte |= PT_WRITABLE_MASK;
if (!vcpu->arch.mmu.direct_map
- && !(pte_access & ACC_WRITE_MASK))
+ && !(pte_access & ACC_WRITE_MASK)) {
spte &= ~PT_USER_MASK;
+ /*
+ * If we converted a user page to a kernel page,
+ * so that the kernel can write to it when cr0.wp=0,
+ * then we should prevent the kernel from executing it
+ * if SMEP is enabled.
+ */
+ if (kvm_read_cr4_bits(vcpu, X86_CR4_SMEP))
+ spte |= PT64_NX_MASK;
+ }
/*
* Optimization: for pte sync, if spte was writable the hash
@@ -2078,7 +2268,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
mark_page_dirty(vcpu->kvm, gfn);
set_pte:
- update_spte(sptep, spte);
+ mmu_spte_update(sptep, spte);
/*
* If we overwrite a writable spte with a read-only one we
* should flush remote TLBs. Otherwise rmap_write_protect
@@ -2093,8 +2283,8 @@ done:
static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
unsigned pt_access, unsigned pte_access,
- int user_fault, int write_fault, int dirty,
- int *ptwrite, int level, gfn_t gfn,
+ int user_fault, int write_fault,
+ int *emulate, int level, gfn_t gfn,
pfn_t pfn, bool speculative,
bool host_writable)
{
@@ -2117,26 +2307,28 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
u64 pte = *sptep;
child = page_header(pte & PT64_BASE_ADDR_MASK);
- mmu_page_remove_parent_pte(child, sptep);
- __set_spte(sptep, shadow_trap_nonpresent_pte);
+ drop_parent_pte(child, sptep);
kvm_flush_remote_tlbs(vcpu->kvm);
} else if (pfn != spte_to_pfn(*sptep)) {
pgprintk("hfn old %llx new %llx\n",
spte_to_pfn(*sptep), pfn);
- drop_spte(vcpu->kvm, sptep, shadow_trap_nonpresent_pte);
+ drop_spte(vcpu->kvm, sptep);
kvm_flush_remote_tlbs(vcpu->kvm);
} else
was_rmapped = 1;
}
if (set_spte(vcpu, sptep, pte_access, user_fault, write_fault,
- dirty, level, gfn, pfn, speculative, true,
+ level, gfn, pfn, speculative, true,
host_writable)) {
if (write_fault)
- *ptwrite = 1;
+ *emulate = 1;
kvm_mmu_flush_tlb(vcpu);
}
+ if (unlikely(is_mmio_spte(*sptep) && emulate))
+ *emulate = 1;
+
pgprintk("%s: setting spte %llx\n", __func__, *sptep);
pgprintk("instantiating %s PTE (%s) at %llx (%llx) addr %p\n",
is_large_pte(*sptep)? "2MB" : "4kB",
@@ -2145,11 +2337,13 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
if (!was_rmapped && is_large_pte(*sptep))
++vcpu->kvm->stat.lpages;
- page_header_update_slot(vcpu->kvm, sptep, gfn);
- if (!was_rmapped) {
- rmap_count = rmap_add(vcpu, sptep, gfn);
- if (rmap_count > RMAP_RECYCLE_THRESHOLD)
- rmap_recycle(vcpu, sptep, gfn);
+ if (is_shadow_present_pte(*sptep)) {
+ page_header_update_slot(vcpu->kvm, sptep, gfn);
+ if (!was_rmapped) {
+ rmap_count = rmap_add(vcpu, sptep, gfn);
+ if (rmap_count > RMAP_RECYCLE_THRESHOLD)
+ rmap_recycle(vcpu, sptep, gfn);
+ }
}
kvm_release_pfn_clean(pfn);
if (speculative) {
@@ -2170,8 +2364,8 @@ static pfn_t pte_prefetch_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn,
slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, no_dirty_log);
if (!slot) {
- get_page(bad_page);
- return page_to_pfn(bad_page);
+ get_page(fault_page);
+ return page_to_pfn(fault_page);
}
hva = gfn_to_hva_memslot(slot, gfn);
@@ -2198,7 +2392,7 @@ static int direct_pte_prefetch_many(struct kvm_vcpu *vcpu,
for (i = 0; i < ret; i++, gfn++, start++)
mmu_set_spte(vcpu, start, ACC_ALL,
- access, 0, 0, 1, NULL,
+ access, 0, 0, NULL,
sp->role.level, gfn,
page_to_pfn(pages[i]), true, true);
@@ -2217,7 +2411,7 @@ static void __direct_pte_prefetch(struct kvm_vcpu *vcpu,
spte = sp->spt + i;
for (i = 0; i < PTE_PREFETCH_NUM; i++, spte++) {
- if (*spte != shadow_trap_nonpresent_pte || spte == sptep) {
+ if (is_shadow_present_pte(*spte) || spte == sptep) {
if (!start)
continue;
if (direct_pte_prefetch_many(vcpu, sp, start, spte) < 0)
@@ -2254,7 +2448,7 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
{
struct kvm_shadow_walk_iterator iterator;
struct kvm_mmu_page *sp;
- int pt_write = 0;
+ int emulate = 0;
gfn_t pseudo_gfn;
for_each_shadow_entry(vcpu, (u64)gfn << PAGE_SHIFT, iterator) {
@@ -2262,14 +2456,14 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
unsigned pte_access = ACC_ALL;
mmu_set_spte(vcpu, iterator.sptep, ACC_ALL, pte_access,
- 0, write, 1, &pt_write,
+ 0, write, &emulate,
level, gfn, pfn, prefault, map_writable);
direct_pte_prefetch(vcpu, iterator.sptep);
++vcpu->stat.pf_fixed;
break;
}
- if (*iterator.sptep == shadow_trap_nonpresent_pte) {
+ if (!is_shadow_present_pte(*iterator.sptep)) {
u64 base_addr = iterator.addr;
base_addr &= PT64_LVL_ADDR_MASK(iterator.level);
@@ -2283,14 +2477,14 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
return -ENOMEM;
}
- __set_spte(iterator.sptep,
- __pa(sp->spt)
- | PT_PRESENT_MASK | PT_WRITABLE_MASK
- | shadow_user_mask | shadow_x_mask
- | shadow_accessed_mask);
+ mmu_spte_set(iterator.sptep,
+ __pa(sp->spt)
+ | PT_PRESENT_MASK | PT_WRITABLE_MASK
+ | shadow_user_mask | shadow_x_mask
+ | shadow_accessed_mask);
}
}
- return pt_write;
+ return emulate;
}
static void kvm_send_hwpoison_signal(unsigned long address, struct task_struct *tsk)
@@ -2306,16 +2500,15 @@ static void kvm_send_hwpoison_signal(unsigned long address, struct task_struct *
send_sig_info(SIGBUS, &info, tsk);
}
-static int kvm_handle_bad_page(struct kvm *kvm, gfn_t gfn, pfn_t pfn)
+static int kvm_handle_bad_page(struct kvm_vcpu *vcpu, gfn_t gfn, pfn_t pfn)
{
kvm_release_pfn_clean(pfn);
if (is_hwpoison_pfn(pfn)) {
- kvm_send_hwpoison_signal(gfn_to_hva(kvm, gfn), current);
+ kvm_send_hwpoison_signal(gfn_to_hva(vcpu->kvm, gfn), current);
return 0;
- } else if (is_fault_pfn(pfn))
- return -EFAULT;
+ }
- return 1;
+ return -EFAULT;
}
static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu,
@@ -2360,6 +2553,30 @@ static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu,
}
}
+static bool mmu_invalid_pfn(pfn_t pfn)
+{
+ return unlikely(is_invalid_pfn(pfn));
+}
+
+static bool handle_abnormal_pfn(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn,
+ pfn_t pfn, unsigned access, int *ret_val)
+{
+ bool ret = true;
+
+ /* The pfn is invalid, report the error! */
+ if (unlikely(is_invalid_pfn(pfn))) {
+ *ret_val = kvm_handle_bad_page(vcpu, gfn, pfn);
+ goto exit;
+ }
+
+ if (unlikely(is_noslot_pfn(pfn)))
+ vcpu_cache_mmio_info(vcpu, gva, gfn, access);
+
+ ret = false;
+exit:
+ return ret;
+}
+
static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
gva_t gva, pfn_t *pfn, bool write, bool *writable);
@@ -2394,9 +2611,8 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn,
if (try_async_pf(vcpu, prefault, gfn, v, &pfn, write, &map_writable))
return 0;
- /* mmio */
- if (is_error_pfn(pfn))
- return kvm_handle_bad_page(vcpu->kvm, gfn, pfn);
+ if (handle_abnormal_pfn(vcpu, v, gfn, pfn, ACC_ALL, &r))
+ return r;
spin_lock(&vcpu->kvm->mmu_lock);
if (mmu_notifier_retry(vcpu, mmu_seq))
@@ -2623,6 +2839,7 @@ static void mmu_sync_roots(struct kvm_vcpu *vcpu)
if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
return;
+ vcpu_clear_mmio_info(vcpu, ~0ul);
trace_kvm_mmu_audit(vcpu, AUDIT_PRE_SYNC);
if (vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL) {
hpa_t root = vcpu->arch.mmu.root_hpa;
@@ -2667,6 +2884,94 @@ static gpa_t nonpaging_gva_to_gpa_nested(struct kvm_vcpu *vcpu, gva_t vaddr,
return vcpu->arch.nested_mmu.translate_gpa(vcpu, vaddr, access);
}
+static bool quickly_check_mmio_pf(struct kvm_vcpu *vcpu, u64 addr, bool direct)
+{
+ if (direct)
+ return vcpu_match_mmio_gpa(vcpu, addr);
+
+ return vcpu_match_mmio_gva(vcpu, addr);
+}
+
+
+/*
+ * On direct hosts, the last spte is only allows two states
+ * for mmio page fault:
+ * - It is the mmio spte
+ * - It is zapped or it is being zapped.
+ *
+ * This function completely checks the spte when the last spte
+ * is not the mmio spte.
+ */
+static bool check_direct_spte_mmio_pf(u64 spte)
+{
+ return __check_direct_spte_mmio_pf(spte);
+}
+
+static u64 walk_shadow_page_get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr)
+{
+ struct kvm_shadow_walk_iterator iterator;
+ u64 spte = 0ull;
+
+ walk_shadow_page_lockless_begin(vcpu);
+ for_each_shadow_entry_lockless(vcpu, addr, iterator, spte)
+ if (!is_shadow_present_pte(spte))
+ break;
+ walk_shadow_page_lockless_end(vcpu);
+
+ return spte;
+}
+
+/*
+ * If it is a real mmio page fault, return 1 and emulat the instruction
+ * directly, return 0 to let CPU fault again on the address, -1 is
+ * returned if bug is detected.
+ */
+int handle_mmio_page_fault_common(struct kvm_vcpu *vcpu, u64 addr, bool direct)
+{
+ u64 spte;
+
+ if (quickly_check_mmio_pf(vcpu, addr, direct))
+ return 1;
+
+ spte = walk_shadow_page_get_mmio_spte(vcpu, addr);
+
+ if (is_mmio_spte(spte)) {
+ gfn_t gfn = get_mmio_spte_gfn(spte);
+ unsigned access = get_mmio_spte_access(spte);
+
+ if (direct)
+ addr = 0;
+
+ trace_handle_mmio_page_fault(addr, gfn, access);
+ vcpu_cache_mmio_info(vcpu, addr, gfn, access);
+ return 1;
+ }
+
+ /*
+ * It's ok if the gva is remapped by other cpus on shadow guest,
+ * it's a BUG if the gfn is not a mmio page.
+ */
+ if (direct && !check_direct_spte_mmio_pf(spte))
+ return -1;
+
+ /*
+ * If the page table is zapped by other cpus, let CPU fault again on
+ * the address.
+ */
+ return 0;
+}
+EXPORT_SYMBOL_GPL(handle_mmio_page_fault_common);
+
+static int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr,
+ u32 error_code, bool direct)
+{
+ int ret;
+
+ ret = handle_mmio_page_fault_common(vcpu, addr, direct);
+ WARN_ON(ret < 0);
+ return ret;
+}
+
static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
u32 error_code, bool prefault)
{
@@ -2674,6 +2979,10 @@ static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
int r;
pgprintk("%s: gva %lx error %x\n", __func__, gva, error_code);
+
+ if (unlikely(error_code & PFERR_RSVD_MASK))
+ return handle_mmio_page_fault(vcpu, gva, error_code, true);
+
r = mmu_topup_memory_caches(vcpu);
if (r)
return r;
@@ -2750,6 +3059,9 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code,
ASSERT(vcpu);
ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa));
+ if (unlikely(error_code & PFERR_RSVD_MASK))
+ return handle_mmio_page_fault(vcpu, gpa, error_code, true);
+
r = mmu_topup_memory_caches(vcpu);
if (r)
return r;
@@ -2767,9 +3079,9 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code,
if (try_async_pf(vcpu, prefault, gfn, gpa, &pfn, write, &map_writable))
return 0;
- /* mmio */
- if (is_error_pfn(pfn))
- return kvm_handle_bad_page(vcpu->kvm, gfn, pfn);
+ if (handle_abnormal_pfn(vcpu, 0, gfn, pfn, ACC_ALL, &r))
+ return r;
+
spin_lock(&vcpu->kvm->mmu_lock);
if (mmu_notifier_retry(vcpu, mmu_seq))
goto out_unlock;
@@ -2800,7 +3112,6 @@ static int nonpaging_init_context(struct kvm_vcpu *vcpu,
context->page_fault = nonpaging_page_fault;
context->gva_to_gpa = nonpaging_gva_to_gpa;
context->free = nonpaging_free;
- context->prefetch_page = nonpaging_prefetch_page;
context->sync_page = nonpaging_sync_page;
context->invlpg = nonpaging_invlpg;
context->update_pte = nonpaging_update_pte;
@@ -2848,6 +3159,23 @@ static bool is_rsvd_bits_set(struct kvm_mmu *mmu, u64 gpte, int level)
return (gpte & mmu->rsvd_bits_mask[bit7][level-1]) != 0;
}
+static bool sync_mmio_spte(u64 *sptep, gfn_t gfn, unsigned access,
+ int *nr_present)
+{
+ if (unlikely(is_mmio_spte(*sptep))) {
+ if (gfn != get_mmio_spte_gfn(*sptep)) {
+ mmu_spte_clear_no_track(sptep);
+ return true;
+ }
+
+ (*nr_present)++;
+ mark_mmio_spte(sptep, gfn, access);
+ return true;
+ }
+
+ return false;
+}
+
#define PTTYPE 64
#include "paging_tmpl.h"
#undef PTTYPE
@@ -2930,7 +3258,6 @@ static int paging64_init_context_common(struct kvm_vcpu *vcpu,
context->new_cr3 = paging_new_cr3;
context->page_fault = paging64_page_fault;
context->gva_to_gpa = paging64_gva_to_gpa;
- context->prefetch_page = paging64_prefetch_page;
context->sync_page = paging64_sync_page;
context->invlpg = paging64_invlpg;
context->update_pte = paging64_update_pte;
@@ -2959,7 +3286,6 @@ static int paging32_init_context(struct kvm_vcpu *vcpu,
context->page_fault = paging32_page_fault;
context->gva_to_gpa = paging32_gva_to_gpa;
context->free = paging_free;
- context->prefetch_page = paging32_prefetch_page;
context->sync_page = paging32_sync_page;
context->invlpg = paging32_invlpg;
context->update_pte = paging32_update_pte;
@@ -2984,7 +3310,6 @@ static int init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
context->new_cr3 = nonpaging_new_cr3;
context->page_fault = tdp_page_fault;
context->free = nonpaging_free;
- context->prefetch_page = nonpaging_prefetch_page;
context->sync_page = nonpaging_sync_page;
context->invlpg = nonpaging_invlpg;
context->update_pte = nonpaging_update_pte;
@@ -3023,6 +3348,7 @@ static int init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
int kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context)
{
int r;
+ bool smep = kvm_read_cr4_bits(vcpu, X86_CR4_SMEP);
ASSERT(vcpu);
ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
@@ -3037,6 +3363,8 @@ int kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context)
vcpu->arch.mmu.base_role.cr4_pae = !!is_pae(vcpu);
vcpu->arch.mmu.base_role.cr0_wp = is_write_protection(vcpu);
+ vcpu->arch.mmu.base_role.smep_andnot_wp
+ = smep && !is_write_protection(vcpu);
return r;
}
@@ -3141,27 +3469,6 @@ void kvm_mmu_unload(struct kvm_vcpu *vcpu)
}
EXPORT_SYMBOL_GPL(kvm_mmu_unload);
-static void mmu_pte_write_zap_pte(struct kvm_vcpu *vcpu,
- struct kvm_mmu_page *sp,
- u64 *spte)
-{
- u64 pte;
- struct kvm_mmu_page *child;
-
- pte = *spte;
- if (is_shadow_present_pte(pte)) {
- if (is_last_spte(pte, sp->role.level))
- drop_spte(vcpu->kvm, spte, shadow_trap_nonpresent_pte);
- else {
- child = page_header(pte & PT64_BASE_ADDR_MASK);
- mmu_page_remove_parent_pte(child, spte);
- }
- }
- __set_spte(spte, shadow_trap_nonpresent_pte);
- if (is_large_pte(pte))
- --vcpu->kvm->stat.lpages;
-}
-
static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu,
struct kvm_mmu_page *sp, u64 *spte,
const void *new)
@@ -3233,6 +3540,13 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
int level, npte, invlpg_counter, r, flooded = 0;
bool remote_flush, local_flush, zap_page;
+ /*
+ * If we don't have indirect shadow pages, it means no page is
+ * write-protected, so we can exit simply.
+ */
+ if (!ACCESS_ONCE(vcpu->kvm->arch.indirect_shadow_pages))
+ return;
+
zap_page = remote_flush = local_flush = false;
offset = offset_in_page(gpa);
@@ -3336,7 +3650,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
spte = &sp->spt[page_offset / sizeof(*spte)];
while (npte--) {
entry = *spte;
- mmu_pte_write_zap_pte(vcpu, sp, spte);
+ mmu_page_zap_pte(vcpu->kvm, sp, spte);
if (gentry &&
!((sp->role.word ^ vcpu->arch.mmu.base_role.word)
& mask.word))
@@ -3380,9 +3694,9 @@ void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
sp = container_of(vcpu->kvm->arch.active_mmu_pages.prev,
struct kvm_mmu_page, link);
kvm_mmu_prepare_zap_page(vcpu->kvm, sp, &invalid_list);
- kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
++vcpu->kvm->stat.mmu_recycled;
}
+ kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
}
int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code,
@@ -3506,15 +3820,15 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
continue;
if (is_large_pte(pt[i])) {
- drop_spte(kvm, &pt[i],
- shadow_trap_nonpresent_pte);
+ drop_spte(kvm, &pt[i]);
--kvm->stat.lpages;
continue;
}
/* avoid RMW */
if (is_writable_pte(pt[i]))
- update_spte(&pt[i], pt[i] & ~PT_WRITABLE_MASK);
+ mmu_spte_update(&pt[i],
+ pt[i] & ~PT_WRITABLE_MASK);
}
}
kvm_flush_remote_tlbs(kvm);
@@ -3590,25 +3904,18 @@ static struct shrinker mmu_shrinker = {
static void mmu_destroy_caches(void)
{
- if (pte_chain_cache)
- kmem_cache_destroy(pte_chain_cache);
- if (rmap_desc_cache)
- kmem_cache_destroy(rmap_desc_cache);
+ if (pte_list_desc_cache)
+ kmem_cache_destroy(pte_list_desc_cache);
if (mmu_page_header_cache)
kmem_cache_destroy(mmu_page_header_cache);
}
int kvm_mmu_module_init(void)
{
- pte_chain_cache = kmem_cache_create("kvm_pte_chain",
- sizeof(struct kvm_pte_chain),
- 0, 0, NULL);
- if (!pte_chain_cache)
- goto nomem;
- rmap_desc_cache = kmem_cache_create("kvm_rmap_desc",
- sizeof(struct kvm_rmap_desc),
+ pte_list_desc_cache = kmem_cache_create("pte_list_desc",
+ sizeof(struct pte_list_desc),
0, 0, NULL);
- if (!rmap_desc_cache)
+ if (!pte_list_desc_cache)
goto nomem;
mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header",
@@ -3775,16 +4082,17 @@ out:
int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4])
{
struct kvm_shadow_walk_iterator iterator;
+ u64 spte;
int nr_sptes = 0;
- spin_lock(&vcpu->kvm->mmu_lock);
- for_each_shadow_entry(vcpu, addr, iterator) {
- sptes[iterator.level-1] = *iterator.sptep;
+ walk_shadow_page_lockless_begin(vcpu);
+ for_each_shadow_entry_lockless(vcpu, addr, iterator, spte) {
+ sptes[iterator.level-1] = spte;
nr_sptes++;
- if (!is_shadow_present_pte(*iterator.sptep))
+ if (!is_shadow_present_pte(spte))
break;
}
- spin_unlock(&vcpu->kvm->mmu_lock);
+ walk_shadow_page_lockless_end(vcpu);
return nr_sptes;
}
diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
index 7086ca85d3e7..e374db9af021 100644
--- a/arch/x86/kvm/mmu.h
+++ b/arch/x86/kvm/mmu.h
@@ -49,6 +49,8 @@
#define PFERR_FETCH_MASK (1U << 4)
int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4]);
+void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask);
+int handle_mmio_page_fault_common(struct kvm_vcpu *vcpu, u64 addr, bool direct);
int kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context);
static inline unsigned int kvm_mmu_available_pages(struct kvm *kvm)
@@ -76,4 +78,27 @@ static inline int is_present_gpte(unsigned long pte)
return pte & PT_PRESENT_MASK;
}
+static inline int is_writable_pte(unsigned long pte)
+{
+ return pte & PT_WRITABLE_MASK;
+}
+
+static inline bool is_write_protection(struct kvm_vcpu *vcpu)
+{
+ return kvm_read_cr0_bits(vcpu, X86_CR0_WP);
+}
+
+static inline bool check_write_user_access(struct kvm_vcpu *vcpu,
+ bool write_fault, bool user_fault,
+ unsigned long pte)
+{
+ if (unlikely(write_fault && !is_writable_pte(pte)
+ && (user_fault || is_write_protection(vcpu))))
+ return false;
+
+ if (unlikely(user_fault && !(pte & PT_USER_MASK)))
+ return false;
+
+ return true;
+}
#endif
diff --git a/arch/x86/kvm/mmu_audit.c b/arch/x86/kvm/mmu_audit.c
index 5f6223b8bcf7..2460a265be23 100644
--- a/arch/x86/kvm/mmu_audit.c
+++ b/arch/x86/kvm/mmu_audit.c
@@ -99,18 +99,6 @@ static void audit_mappings(struct kvm_vcpu *vcpu, u64 *sptep, int level)
"level = %d\n", sp, level);
return;
}
-
- if (*sptep == shadow_notrap_nonpresent_pte) {
- audit_printk(vcpu->kvm, "notrap spte in unsync "
- "sp: %p\n", sp);
- return;
- }
- }
-
- if (sp->role.direct && *sptep == shadow_notrap_nonpresent_pte) {
- audit_printk(vcpu->kvm, "notrap spte in direct sp: %p\n",
- sp);
- return;
}
if (!is_shadow_present_pte(*sptep) || !is_last_spte(*sptep, level))
diff --git a/arch/x86/kvm/mmutrace.h b/arch/x86/kvm/mmutrace.h
index b60b4fdb3eda..eed67f34146d 100644
--- a/arch/x86/kvm/mmutrace.h
+++ b/arch/x86/kvm/mmutrace.h
@@ -196,6 +196,54 @@ DEFINE_EVENT(kvm_mmu_page_class, kvm_mmu_prepare_zap_page,
TP_ARGS(sp)
);
+DEFINE_EVENT(kvm_mmu_page_class, kvm_mmu_delay_free_pages,
+ TP_PROTO(struct kvm_mmu_page *sp),
+
+ TP_ARGS(sp)
+);
+
+TRACE_EVENT(
+ mark_mmio_spte,
+ TP_PROTO(u64 *sptep, gfn_t gfn, unsigned access),
+ TP_ARGS(sptep, gfn, access),
+
+ TP_STRUCT__entry(
+ __field(void *, sptep)
+ __field(gfn_t, gfn)
+ __field(unsigned, access)
+ ),
+
+ TP_fast_assign(
+ __entry->sptep = sptep;
+ __entry->gfn = gfn;
+ __entry->access = access;
+ ),
+
+ TP_printk("sptep:%p gfn %llx access %x", __entry->sptep, __entry->gfn,
+ __entry->access)
+);
+
+TRACE_EVENT(
+ handle_mmio_page_fault,
+ TP_PROTO(u64 addr, gfn_t gfn, unsigned access),
+ TP_ARGS(addr, gfn, access),
+
+ TP_STRUCT__entry(
+ __field(u64, addr)
+ __field(gfn_t, gfn)
+ __field(unsigned, access)
+ ),
+
+ TP_fast_assign(
+ __entry->addr = addr;
+ __entry->gfn = gfn;
+ __entry->access = access;
+ ),
+
+ TP_printk("addr:%llx gfn %llx access %x", __entry->addr, __entry->gfn,
+ __entry->access)
+);
+
TRACE_EVENT(
kvm_mmu_audit,
TP_PROTO(struct kvm_vcpu *vcpu, int audit_point),
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 9d03ad4dd5ec..507e2b844cfa 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -101,11 +101,15 @@ static int FNAME(cmpxchg_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
return (ret != orig_pte);
}
-static unsigned FNAME(gpte_access)(struct kvm_vcpu *vcpu, pt_element_t gpte)
+static unsigned FNAME(gpte_access)(struct kvm_vcpu *vcpu, pt_element_t gpte,
+ bool last)
{
unsigned access;
access = (gpte & (PT_WRITABLE_MASK | PT_USER_MASK)) | ACC_EXEC_MASK;
+ if (last && !is_dirty_gpte(gpte))
+ access &= ~ACC_WRITE_MASK;
+
#if PTTYPE == 64
if (vcpu->arch.mmu.nx)
access &= ~(gpte >> PT64_NX_SHIFT);
@@ -113,6 +117,24 @@ static unsigned FNAME(gpte_access)(struct kvm_vcpu *vcpu, pt_element_t gpte)
return access;
}
+static bool FNAME(is_last_gpte)(struct guest_walker *walker,
+ struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
+ pt_element_t gpte)
+{
+ if (walker->level == PT_PAGE_TABLE_LEVEL)
+ return true;
+
+ if ((walker->level == PT_DIRECTORY_LEVEL) && is_large_pte(gpte) &&
+ (PTTYPE == 64 || is_pse(vcpu)))
+ return true;
+
+ if ((walker->level == PT_PDPE_LEVEL) && is_large_pte(gpte) &&
+ (mmu->root_level == PT64_ROOT_LEVEL))
+ return true;
+
+ return false;
+}
+
/*
* Fetch a guest pte for a guest virtual address
*/
@@ -125,18 +147,17 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker,
gfn_t table_gfn;
unsigned index, pt_access, uninitialized_var(pte_access);
gpa_t pte_gpa;
- bool eperm, present, rsvd_fault;
- int offset, write_fault, user_fault, fetch_fault;
-
- write_fault = access & PFERR_WRITE_MASK;
- user_fault = access & PFERR_USER_MASK;
- fetch_fault = access & PFERR_FETCH_MASK;
+ bool eperm;
+ int offset;
+ const int write_fault = access & PFERR_WRITE_MASK;
+ const int user_fault = access & PFERR_USER_MASK;
+ const int fetch_fault = access & PFERR_FETCH_MASK;
+ u16 errcode = 0;
trace_kvm_mmu_pagetable_walk(addr, write_fault, user_fault,
fetch_fault);
-walk:
- present = true;
- eperm = rsvd_fault = false;
+retry_walk:
+ eperm = false;
walker->level = mmu->root_level;
pte = mmu->get_cr3(vcpu);
@@ -144,10 +165,8 @@ walk:
if (walker->level == PT32E_ROOT_LEVEL) {
pte = kvm_pdptr_read_mmu(vcpu, mmu, (addr >> 30) & 3);
trace_kvm_mmu_paging_element(pte, walker->level);
- if (!is_present_gpte(pte)) {
- present = false;
+ if (!is_present_gpte(pte))
goto error;
- }
--walker->level;
}
#endif
@@ -170,42 +189,31 @@ walk:
real_gfn = mmu->translate_gpa(vcpu, gfn_to_gpa(table_gfn),
PFERR_USER_MASK|PFERR_WRITE_MASK);
- if (unlikely(real_gfn == UNMAPPED_GVA)) {
- present = false;
- break;
- }
+ if (unlikely(real_gfn == UNMAPPED_GVA))
+ goto error;
real_gfn = gpa_to_gfn(real_gfn);
host_addr = gfn_to_hva(vcpu->kvm, real_gfn);
- if (unlikely(kvm_is_error_hva(host_addr))) {
- present = false;
- break;
- }
+ if (unlikely(kvm_is_error_hva(host_addr)))
+ goto error;
ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
- if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte)))) {
- present = false;
- break;
- }
+ if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte))))
+ goto error;
trace_kvm_mmu_paging_element(pte, walker->level);
- if (unlikely(!is_present_gpte(pte))) {
- present = false;
- break;
- }
+ if (unlikely(!is_present_gpte(pte)))
+ goto error;
if (unlikely(is_rsvd_bits_set(&vcpu->arch.mmu, pte,
walker->level))) {
- rsvd_fault = true;
- break;
+ errcode |= PFERR_RSVD_MASK | PFERR_PRESENT_MASK;
+ goto error;
}
- if (unlikely(write_fault && !is_writable_pte(pte)
- && (user_fault || is_write_protection(vcpu))))
- eperm = true;
-
- if (unlikely(user_fault && !(pte & PT_USER_MASK)))
+ if (!check_write_user_access(vcpu, write_fault, user_fault,
+ pte))
eperm = true;
#if PTTYPE == 64
@@ -213,39 +221,35 @@ walk:
eperm = true;
#endif
- if (!eperm && !rsvd_fault
- && unlikely(!(pte & PT_ACCESSED_MASK))) {
+ if (!eperm && unlikely(!(pte & PT_ACCESSED_MASK))) {
int ret;
trace_kvm_mmu_set_accessed_bit(table_gfn, index,
sizeof(pte));
ret = FNAME(cmpxchg_gpte)(vcpu, mmu, ptep_user, index,
pte, pte|PT_ACCESSED_MASK);
- if (unlikely(ret < 0)) {
- present = false;
- break;
- } else if (ret)
- goto walk;
+ if (unlikely(ret < 0))
+ goto error;
+ else if (ret)
+ goto retry_walk;
mark_page_dirty(vcpu->kvm, table_gfn);
pte |= PT_ACCESSED_MASK;
}
- pte_access = pt_access & FNAME(gpte_access)(vcpu, pte);
-
walker->ptes[walker->level - 1] = pte;
- if ((walker->level == PT_PAGE_TABLE_LEVEL) ||
- ((walker->level == PT_DIRECTORY_LEVEL) &&
- is_large_pte(pte) &&
- (PTTYPE == 64 || is_pse(vcpu))) ||
- ((walker->level == PT_PDPE_LEVEL) &&
- is_large_pte(pte) &&
- mmu->root_level == PT64_ROOT_LEVEL)) {
+ if (FNAME(is_last_gpte)(walker, vcpu, mmu, pte)) {
int lvl = walker->level;
gpa_t real_gpa;
gfn_t gfn;
u32 ac;
+ /* check if the kernel is fetching from user page */
+ if (unlikely(pte_access & PT_USER_MASK) &&
+ kvm_read_cr4_bits(vcpu, X86_CR4_SMEP))
+ if (fetch_fault && !user_fault)
+ eperm = true;
+
gfn = gpte_to_gfn_lvl(pte, lvl);
gfn += (addr & PT_LVL_OFFSET_MASK(lvl)) >> PAGE_SHIFT;
@@ -266,12 +270,14 @@ walk:
break;
}
- pt_access = pte_access;
+ pt_access &= FNAME(gpte_access)(vcpu, pte, false);
--walker->level;
}
- if (unlikely(!present || eperm || rsvd_fault))
+ if (unlikely(eperm)) {
+ errcode |= PFERR_PRESENT_MASK;
goto error;
+ }
if (write_fault && unlikely(!is_dirty_gpte(pte))) {
int ret;
@@ -279,17 +285,17 @@ walk:
trace_kvm_mmu_set_dirty_bit(table_gfn, index, sizeof(pte));
ret = FNAME(cmpxchg_gpte)(vcpu, mmu, ptep_user, index,
pte, pte|PT_DIRTY_MASK);
- if (unlikely(ret < 0)) {
- present = false;
+ if (unlikely(ret < 0))
goto error;
- } else if (ret)
- goto walk;
+ else if (ret)
+ goto retry_walk;
mark_page_dirty(vcpu->kvm, table_gfn);
pte |= PT_DIRTY_MASK;
walker->ptes[walker->level - 1] = pte;
}
+ pte_access = pt_access & FNAME(gpte_access)(vcpu, pte, true);
walker->pt_access = pt_access;
walker->pte_access = pte_access;
pgprintk("%s: pte %llx pte_access %x pt_access %x\n",
@@ -297,19 +303,14 @@ walk:
return 1;
error:
+ errcode |= write_fault | user_fault;
+ if (fetch_fault && (mmu->nx ||
+ kvm_read_cr4_bits(vcpu, X86_CR4_SMEP)))
+ errcode |= PFERR_FETCH_MASK;
+
walker->fault.vector = PF_VECTOR;
walker->fault.error_code_valid = true;
- walker->fault.error_code = 0;
- if (present)
- walker->fault.error_code |= PFERR_PRESENT_MASK;
-
- walker->fault.error_code |= write_fault | user_fault;
-
- if (fetch_fault && mmu->nx)
- walker->fault.error_code |= PFERR_FETCH_MASK;
- if (rsvd_fault)
- walker->fault.error_code |= PFERR_RSVD_MASK;
-
+ walker->fault.error_code = errcode;
walker->fault.address = addr;
walker->fault.nested_page_fault = mmu != vcpu->arch.walk_mmu;
@@ -336,16 +337,11 @@ static bool FNAME(prefetch_invalid_gpte)(struct kvm_vcpu *vcpu,
struct kvm_mmu_page *sp, u64 *spte,
pt_element_t gpte)
{
- u64 nonpresent = shadow_trap_nonpresent_pte;
-
if (is_rsvd_bits_set(&vcpu->arch.mmu, gpte, PT_PAGE_TABLE_LEVEL))
goto no_present;
- if (!is_present_gpte(gpte)) {
- if (!sp->unsync)
- nonpresent = shadow_notrap_nonpresent_pte;
+ if (!is_present_gpte(gpte))
goto no_present;
- }
if (!(gpte & PT_ACCESSED_MASK))
goto no_present;
@@ -353,7 +349,7 @@ static bool FNAME(prefetch_invalid_gpte)(struct kvm_vcpu *vcpu,
return false;
no_present:
- drop_spte(vcpu->kvm, spte, nonpresent);
+ drop_spte(vcpu->kvm, spte);
return true;
}
@@ -369,9 +365,9 @@ static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
return;
pgprintk("%s: gpte %llx spte %p\n", __func__, (u64)gpte, spte);
- pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte);
+ pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte, true);
pfn = gfn_to_pfn_atomic(vcpu->kvm, gpte_to_gfn(gpte));
- if (is_error_pfn(pfn)) {
+ if (mmu_invalid_pfn(pfn)) {
kvm_release_pfn_clean(pfn);
return;
}
@@ -381,7 +377,7 @@ static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
* vcpu->arch.update_pte.pfn was fetched from get_user_pages(write = 1).
*/
mmu_set_spte(vcpu, spte, sp->role.access, pte_access, 0, 0,
- is_dirty_gpte(gpte), NULL, PT_PAGE_TABLE_LEVEL,
+ NULL, PT_PAGE_TABLE_LEVEL,
gpte_to_gfn(gpte), pfn, true, true);
}
@@ -432,12 +428,11 @@ static void FNAME(pte_prefetch)(struct kvm_vcpu *vcpu, struct guest_walker *gw,
unsigned pte_access;
gfn_t gfn;
pfn_t pfn;
- bool dirty;
if (spte == sptep)
continue;
- if (*spte != shadow_trap_nonpresent_pte)
+ if (is_shadow_present_pte(*spte))
continue;
gpte = gptep[i];
@@ -445,18 +440,18 @@ static void FNAME(pte_prefetch)(struct kvm_vcpu *vcpu, struct guest_walker *gw,
if (FNAME(prefetch_invalid_gpte)(vcpu, sp, spte, gpte))
continue;
- pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte);
+ pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte,
+ true);
gfn = gpte_to_gfn(gpte);
- dirty = is_dirty_gpte(gpte);
pfn = pte_prefetch_gfn_to_pfn(vcpu, gfn,
- (pte_access & ACC_WRITE_MASK) && dirty);
- if (is_error_pfn(pfn)) {
+ pte_access & ACC_WRITE_MASK);
+ if (mmu_invalid_pfn(pfn)) {
kvm_release_pfn_clean(pfn);
break;
}
mmu_set_spte(vcpu, spte, sp->role.access, pte_access, 0, 0,
- dirty, NULL, PT_PAGE_TABLE_LEVEL, gfn,
+ NULL, PT_PAGE_TABLE_LEVEL, gfn,
pfn, true, true);
}
}
@@ -467,12 +462,11 @@ static void FNAME(pte_prefetch)(struct kvm_vcpu *vcpu, struct guest_walker *gw,
static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
struct guest_walker *gw,
int user_fault, int write_fault, int hlevel,
- int *ptwrite, pfn_t pfn, bool map_writable,
+ int *emulate, pfn_t pfn, bool map_writable,
bool prefault)
{
unsigned access = gw->pt_access;
struct kvm_mmu_page *sp = NULL;
- bool dirty = is_dirty_gpte(gw->ptes[gw->level - 1]);
int top_level;
unsigned direct_access;
struct kvm_shadow_walk_iterator it;
@@ -480,9 +474,7 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
if (!is_present_gpte(gw->ptes[gw->level - 1]))
return NULL;
- direct_access = gw->pt_access & gw->pte_access;
- if (!dirty)
- direct_access &= ~ACC_WRITE_MASK;
+ direct_access = gw->pte_access;
top_level = vcpu->arch.mmu.root_level;
if (top_level == PT32E_ROOT_LEVEL)
@@ -540,8 +532,8 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
link_shadow_page(it.sptep, sp);
}
- mmu_set_spte(vcpu, it.sptep, access, gw->pte_access & access,
- user_fault, write_fault, dirty, ptwrite, it.level,
+ mmu_set_spte(vcpu, it.sptep, access, gw->pte_access,
+ user_fault, write_fault, emulate, it.level,
gw->gfn, pfn, prefault, map_writable);
FNAME(pte_prefetch)(vcpu, gw, it.sptep);
@@ -575,7 +567,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
int user_fault = error_code & PFERR_USER_MASK;
struct guest_walker walker;
u64 *sptep;
- int write_pt = 0;
+ int emulate = 0;
int r;
pfn_t pfn;
int level = PT_PAGE_TABLE_LEVEL;
@@ -585,6 +577,10 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
+ if (unlikely(error_code & PFERR_RSVD_MASK))
+ return handle_mmio_page_fault(vcpu, addr, error_code,
+ mmu_is_nested(vcpu));
+
r = mmu_topup_memory_caches(vcpu);
if (r)
return r;
@@ -623,9 +619,9 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
&map_writable))
return 0;
- /* mmio */
- if (is_error_pfn(pfn))
- return kvm_handle_bad_page(vcpu->kvm, walker.gfn, pfn);
+ if (handle_abnormal_pfn(vcpu, mmu_is_nested(vcpu) ? 0 : addr,
+ walker.gfn, pfn, walker.pte_access, &r))
+ return r;
spin_lock(&vcpu->kvm->mmu_lock);
if (mmu_notifier_retry(vcpu, mmu_seq))
@@ -636,19 +632,19 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
if (!force_pt_level)
transparent_hugepage_adjust(vcpu, &walker.gfn, &pfn, &level);
sptep = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault,
- level, &write_pt, pfn, map_writable, prefault);
+ level, &emulate, pfn, map_writable, prefault);
(void)sptep;
- pgprintk("%s: shadow pte %p %llx ptwrite %d\n", __func__,
- sptep, *sptep, write_pt);
+ pgprintk("%s: shadow pte %p %llx emulate %d\n", __func__,
+ sptep, *sptep, emulate);
- if (!write_pt)
+ if (!emulate)
vcpu->arch.last_pt_write_count = 0; /* reset fork detector */
++vcpu->stat.pf_fixed;
trace_kvm_mmu_audit(vcpu, AUDIT_POST_PAGE_FAULT);
spin_unlock(&vcpu->kvm->mmu_lock);
- return write_pt;
+ return emulate;
out_unlock:
spin_unlock(&vcpu->kvm->mmu_lock);
@@ -665,6 +661,8 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
u64 *sptep;
int need_flush = 0;
+ vcpu_clear_mmio_info(vcpu, gva);
+
spin_lock(&vcpu->kvm->mmu_lock);
for_each_shadow_entry(vcpu, gva, iterator) {
@@ -688,11 +686,11 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
if (is_shadow_present_pte(*sptep)) {
if (is_large_pte(*sptep))
--vcpu->kvm->stat.lpages;
- drop_spte(vcpu->kvm, sptep,
- shadow_trap_nonpresent_pte);
+ drop_spte(vcpu->kvm, sptep);
need_flush = 1;
- } else
- __set_spte(sptep, shadow_trap_nonpresent_pte);
+ } else if (is_mmio_spte(*sptep))
+ mmu_spte_clear_no_track(sptep);
+
break;
}
@@ -752,36 +750,6 @@ static gpa_t FNAME(gva_to_gpa_nested)(struct kvm_vcpu *vcpu, gva_t vaddr,
return gpa;
}
-static void FNAME(prefetch_page)(struct kvm_vcpu *vcpu,
- struct kvm_mmu_page *sp)
-{
- int i, j, offset, r;
- pt_element_t pt[256 / sizeof(pt_element_t)];
- gpa_t pte_gpa;
-
- if (sp->role.direct
- || (PTTYPE == 32 && sp->role.level > PT_PAGE_TABLE_LEVEL)) {
- nonpaging_prefetch_page(vcpu, sp);
- return;
- }
-
- pte_gpa = gfn_to_gpa(sp->gfn);
- if (PTTYPE == 32) {
- offset = sp->role.quadrant << PT64_LEVEL_BITS;
- pte_gpa += offset * sizeof(pt_element_t);
- }
-
- for (i = 0; i < PT64_ENT_PER_PAGE; i += ARRAY_SIZE(pt)) {
- r = kvm_read_guest_atomic(vcpu->kvm, pte_gpa, pt, sizeof pt);
- pte_gpa += ARRAY_SIZE(pt) * sizeof(pt_element_t);
- for (j = 0; j < ARRAY_SIZE(pt); ++j)
- if (r || is_present_gpte(pt[j]))
- sp->spt[i+j] = shadow_trap_nonpresent_pte;
- else
- sp->spt[i+j] = shadow_notrap_nonpresent_pte;
- }
-}
-
/*
* Using the cached information from sp->gfns is safe because:
* - The spte has a reference to the struct page, so the pfn for a given gfn
@@ -817,7 +785,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
gpa_t pte_gpa;
gfn_t gfn;
- if (!is_shadow_present_pte(sp->spt[i]))
+ if (!sp->spt[i])
continue;
pte_gpa = first_pte_gpa + i * sizeof(pt_element_t);
@@ -826,26 +794,30 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
sizeof(pt_element_t)))
return -EINVAL;
- gfn = gpte_to_gfn(gpte);
-
if (FNAME(prefetch_invalid_gpte)(vcpu, sp, &sp->spt[i], gpte)) {
vcpu->kvm->tlbs_dirty++;
continue;
}
+ gfn = gpte_to_gfn(gpte);
+ pte_access = sp->role.access;
+ pte_access &= FNAME(gpte_access)(vcpu, gpte, true);
+
+ if (sync_mmio_spte(&sp->spt[i], gfn, pte_access, &nr_present))
+ continue;
+
if (gfn != sp->gfns[i]) {
- drop_spte(vcpu->kvm, &sp->spt[i],
- shadow_trap_nonpresent_pte);
+ drop_spte(vcpu->kvm, &sp->spt[i]);
vcpu->kvm->tlbs_dirty++;
continue;
}
nr_present++;
- pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte);
+
host_writable = sp->spt[i] & SPTE_HOST_WRITEABLE;
set_spte(vcpu, &sp->spt[i], pte_access, 0, 0,
- is_dirty_gpte(gpte), PT_PAGE_TABLE_LEVEL, gfn,
+ PT_PAGE_TABLE_LEVEL, gfn,
spte_to_pfn(sp->spt[i]), true, false,
host_writable);
}
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 506e4fe23adc..475d1c948501 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -1496,11 +1496,14 @@ static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
update_cr0_intercept(svm);
}
-static void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
+static int svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
{
unsigned long host_cr4_mce = read_cr4() & X86_CR4_MCE;
unsigned long old_cr4 = to_svm(vcpu)->vmcb->save.cr4;
+ if (cr4 & X86_CR4_VMXE)
+ return 1;
+
if (npt_enabled && ((old_cr4 ^ cr4) & X86_CR4_PGE))
svm_flush_tlb(vcpu);
@@ -1510,6 +1513,7 @@ static void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
cr4 |= host_cr4_mce;
to_svm(vcpu)->vmcb->save.cr4 = cr4;
mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR);
+ return 0;
}
static void svm_set_segment(struct kvm_vcpu *vcpu,
diff --git a/arch/x86/kvm/trace.h b/arch/x86/kvm/trace.h
index db932760ea82..3ff898c104f7 100644
--- a/arch/x86/kvm/trace.h
+++ b/arch/x86/kvm/trace.h
@@ -675,12 +675,12 @@ TRACE_EVENT(kvm_emulate_insn,
),
TP_fast_assign(
- __entry->rip = vcpu->arch.emulate_ctxt.decode.fetch.start;
+ __entry->rip = vcpu->arch.emulate_ctxt.fetch.start;
__entry->csbase = kvm_x86_ops->get_segment_base(vcpu, VCPU_SREG_CS);
- __entry->len = vcpu->arch.emulate_ctxt.decode.eip
- - vcpu->arch.emulate_ctxt.decode.fetch.start;
+ __entry->len = vcpu->arch.emulate_ctxt._eip
+ - vcpu->arch.emulate_ctxt.fetch.start;
memcpy(__entry->insn,
- vcpu->arch.emulate_ctxt.decode.fetch.data,
+ vcpu->arch.emulate_ctxt.fetch.data,
15);
__entry->flags = kei_decode_mode(vcpu->arch.emulate_ctxt.mode);
__entry->failed = failed;
@@ -698,6 +698,29 @@ TRACE_EVENT(kvm_emulate_insn,
#define trace_kvm_emulate_insn_start(vcpu) trace_kvm_emulate_insn(vcpu, 0)
#define trace_kvm_emulate_insn_failed(vcpu) trace_kvm_emulate_insn(vcpu, 1)
+TRACE_EVENT(
+ vcpu_match_mmio,
+ TP_PROTO(gva_t gva, gpa_t gpa, bool write, bool gpa_match),
+ TP_ARGS(gva, gpa, write, gpa_match),
+
+ TP_STRUCT__entry(
+ __field(gva_t, gva)
+ __field(gpa_t, gpa)
+ __field(bool, write)
+ __field(bool, gpa_match)
+ ),
+
+ TP_fast_assign(
+ __entry->gva = gva;
+ __entry->gpa = gpa;
+ __entry->write = write;
+ __entry->gpa_match = gpa_match
+ ),
+
+ TP_printk("gva %#lx gpa %#llx %s %s", __entry->gva, __entry->gpa,
+ __entry->write ? "Write" : "Read",
+ __entry->gpa_match ? "GPA" : "GVA")
+);
#endif /* _TRACE_KVM_H */
#undef TRACE_INCLUDE_PATH
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index d48ec60ea421..e65a158dee64 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -43,13 +43,12 @@
#include "trace.h"
#define __ex(x) __kvm_handle_fault_on_reboot(x)
+#define __ex_clear(x, reg) \
+ ____kvm_handle_fault_on_reboot(x, "xor " reg " , " reg)
MODULE_AUTHOR("Qumranet");
MODULE_LICENSE("GPL");
-static int __read_mostly bypass_guest_pf = 1;
-module_param(bypass_guest_pf, bool, S_IRUGO);
-
static int __read_mostly enable_vpid = 1;
module_param_named(vpid, enable_vpid, bool, 0444);
@@ -72,6 +71,14 @@ module_param(vmm_exclusive, bool, S_IRUGO);
static int __read_mostly yield_on_hlt = 1;
module_param(yield_on_hlt, bool, S_IRUGO);
+/*
+ * If nested=1, nested virtualization is supported, i.e., guests may use
+ * VMX and be a hypervisor for its own guests. If nested=0, guests may not
+ * use VMX instructions.
+ */
+static int __read_mostly nested = 0;
+module_param(nested, bool, S_IRUGO);
+
#define KVM_GUEST_CR0_MASK_UNRESTRICTED_GUEST \
(X86_CR0_WP | X86_CR0_NE | X86_CR0_NW | X86_CR0_CD)
#define KVM_GUEST_CR0_MASK \
@@ -109,6 +116,7 @@ static int ple_window = KVM_VMX_DEFAULT_PLE_WINDOW;
module_param(ple_window, int, S_IRUGO);
#define NR_AUTOLOAD_MSRS 1
+#define VMCS02_POOL_SIZE 1
struct vmcs {
u32 revision_id;
@@ -116,17 +124,237 @@ struct vmcs {
char data[0];
};
+/*
+ * Track a VMCS that may be loaded on a certain CPU. If it is (cpu!=-1), also
+ * remember whether it was VMLAUNCHed, and maintain a linked list of all VMCSs
+ * loaded on this CPU (so we can clear them if the CPU goes down).
+ */
+struct loaded_vmcs {
+ struct vmcs *vmcs;
+ int cpu;
+ int launched;
+ struct list_head loaded_vmcss_on_cpu_link;
+};
+
struct shared_msr_entry {
unsigned index;
u64 data;
u64 mask;
};
+/*
+ * struct vmcs12 describes the state that our guest hypervisor (L1) keeps for a
+ * single nested guest (L2), hence the name vmcs12. Any VMX implementation has
+ * a VMCS structure, and vmcs12 is our emulated VMX's VMCS. This structure is
+ * stored in guest memory specified by VMPTRLD, but is opaque to the guest,
+ * which must access it using VMREAD/VMWRITE/VMCLEAR instructions.
+ * More than one of these structures may exist, if L1 runs multiple L2 guests.
+ * nested_vmx_run() will use the data here to build a vmcs02: a VMCS for the
+ * underlying hardware which will be used to run L2.
+ * This structure is packed to ensure that its layout is identical across
+ * machines (necessary for live migration).
+ * If there are changes in this struct, VMCS12_REVISION must be changed.
+ */
+typedef u64 natural_width;
+struct __packed vmcs12 {
+ /* According to the Intel spec, a VMCS region must start with the
+ * following two fields. Then follow implementation-specific data.
+ */
+ u32 revision_id;
+ u32 abort;
+
+ u32 launch_state; /* set to 0 by VMCLEAR, to 1 by VMLAUNCH */
+ u32 padding[7]; /* room for future expansion */
+
+ u64 io_bitmap_a;
+ u64 io_bitmap_b;
+ u64 msr_bitmap;
+ u64 vm_exit_msr_store_addr;
+ u64 vm_exit_msr_load_addr;
+ u64 vm_entry_msr_load_addr;
+ u64 tsc_offset;
+ u64 virtual_apic_page_addr;
+ u64 apic_access_addr;
+ u64 ept_pointer;
+ u64 guest_physical_address;
+ u64 vmcs_link_pointer;
+ u64 guest_ia32_debugctl;
+ u64 guest_ia32_pat;
+ u64 guest_ia32_efer;
+ u64 guest_ia32_perf_global_ctrl;
+ u64 guest_pdptr0;
+ u64 guest_pdptr1;
+ u64 guest_pdptr2;
+ u64 guest_pdptr3;
+ u64 host_ia32_pat;
+ u64 host_ia32_efer;
+ u64 host_ia32_perf_global_ctrl;
+ u64 padding64[8]; /* room for future expansion */
+ /*
+ * To allow migration of L1 (complete with its L2 guests) between
+ * machines of different natural widths (32 or 64 bit), we cannot have
+ * unsigned long fields with no explict size. We use u64 (aliased
+ * natural_width) instead. Luckily, x86 is little-endian.
+ */
+ natural_width cr0_guest_host_mask;
+ natural_width cr4_guest_host_mask;
+ natural_width cr0_read_shadow;
+ natural_width cr4_read_shadow;
+ natural_width cr3_target_value0;
+ natural_width cr3_target_value1;
+ natural_width cr3_target_value2;
+ natural_width cr3_target_value3;
+ natural_width exit_qualification;
+ natural_width guest_linear_address;
+ natural_width guest_cr0;
+ natural_width guest_cr3;
+ natural_width guest_cr4;
+ natural_width guest_es_base;
+ natural_width guest_cs_base;
+ natural_width guest_ss_base;
+ natural_width guest_ds_base;
+ natural_width guest_fs_base;
+ natural_width guest_gs_base;
+ natural_width guest_ldtr_base;
+ natural_width guest_tr_base;
+ natural_width guest_gdtr_base;
+ natural_width guest_idtr_base;
+ natural_width guest_dr7;
+ natural_width guest_rsp;
+ natural_width guest_rip;
+ natural_width guest_rflags;
+ natural_width guest_pending_dbg_exceptions;
+ natural_width guest_sysenter_esp;
+ natural_width guest_sysenter_eip;
+ natural_width host_cr0;
+ natural_width host_cr3;
+ natural_width host_cr4;
+ natural_width host_fs_base;
+ natural_width host_gs_base;
+ natural_width host_tr_base;
+ natural_width host_gdtr_base;
+ natural_width host_idtr_base;
+ natural_width host_ia32_sysenter_esp;
+ natural_width host_ia32_sysenter_eip;
+ natural_width host_rsp;
+ natural_width host_rip;
+ natural_width paddingl[8]; /* room for future expansion */
+ u32 pin_based_vm_exec_control;
+ u32 cpu_based_vm_exec_control;
+ u32 exception_bitmap;
+ u32 page_fault_error_code_mask;
+ u32 page_fault_error_code_match;
+ u32 cr3_target_count;
+ u32 vm_exit_controls;
+ u32 vm_exit_msr_store_count;
+ u32 vm_exit_msr_load_count;
+ u32 vm_entry_controls;
+ u32 vm_entry_msr_load_count;
+ u32 vm_entry_intr_info_field;
+ u32 vm_entry_exception_error_code;
+ u32 vm_entry_instruction_len;
+ u32 tpr_threshold;
+ u32 secondary_vm_exec_control;
+ u32 vm_instruction_error;
+ u32 vm_exit_reason;
+ u32 vm_exit_intr_info;
+ u32 vm_exit_intr_error_code;
+ u32 idt_vectoring_info_field;
+ u32 idt_vectoring_error_code;
+ u32 vm_exit_instruction_len;
+ u32 vmx_instruction_info;
+ u32 guest_es_limit;
+ u32 guest_cs_limit;
+ u32 guest_ss_limit;
+ u32 guest_ds_limit;
+ u32 guest_fs_limit;
+ u32 guest_gs_limit;
+ u32 guest_ldtr_limit;
+ u32 guest_tr_limit;
+ u32 guest_gdtr_limit;
+ u32 guest_idtr_limit;
+ u32 guest_es_ar_bytes;
+ u32 guest_cs_ar_bytes;
+ u32 guest_ss_ar_bytes;
+ u32 guest_ds_ar_bytes;
+ u32 guest_fs_ar_bytes;
+ u32 guest_gs_ar_bytes;
+ u32 guest_ldtr_ar_bytes;
+ u32 guest_tr_ar_bytes;
+ u32 guest_interruptibility_info;
+ u32 guest_activity_state;
+ u32 guest_sysenter_cs;
+ u32 host_ia32_sysenter_cs;
+ u32 padding32[8]; /* room for future expansion */
+ u16 virtual_processor_id;
+ u16 guest_es_selector;
+ u16 guest_cs_selector;
+ u16 guest_ss_selector;
+ u16 guest_ds_selector;
+ u16 guest_fs_selector;
+ u16 guest_gs_selector;
+ u16 guest_ldtr_selector;
+ u16 guest_tr_selector;
+ u16 host_es_selector;
+ u16 host_cs_selector;
+ u16 host_ss_selector;
+ u16 host_ds_selector;
+ u16 host_fs_selector;
+ u16 host_gs_selector;
+ u16 host_tr_selector;
+};
+
+/*
+ * VMCS12_REVISION is an arbitrary id that should be changed if the content or
+ * layout of struct vmcs12 is changed. MSR_IA32_VMX_BASIC returns this id, and
+ * VMPTRLD verifies that the VMCS region that L1 is loading contains this id.
+ */
+#define VMCS12_REVISION 0x11e57ed0
+
+/*
+ * VMCS12_SIZE is the number of bytes L1 should allocate for the VMXON region
+ * and any VMCS region. Although only sizeof(struct vmcs12) are used by the
+ * current implementation, 4K are reserved to avoid future complications.
+ */
+#define VMCS12_SIZE 0x1000
+
+/* Used to remember the last vmcs02 used for some recently used vmcs12s */
+struct vmcs02_list {
+ struct list_head list;
+ gpa_t vmptr;
+ struct loaded_vmcs vmcs02;
+};
+
+/*
+ * The nested_vmx structure is part of vcpu_vmx, and holds information we need
+ * for correct emulation of VMX (i.e., nested VMX) on this vcpu.
+ */
+struct nested_vmx {
+ /* Has the level1 guest done vmxon? */
+ bool vmxon;
+
+ /* The guest-physical address of the current VMCS L1 keeps for L2 */
+ gpa_t current_vmptr;
+ /* The host-usable pointer to the above */
+ struct page *current_vmcs12_page;
+ struct vmcs12 *current_vmcs12;
+
+ /* vmcs02_list cache of VMCSs recently used to run L2 guests */
+ struct list_head vmcs02_pool;
+ int vmcs02_num;
+ u64 vmcs01_tsc_offset;
+ /* L2 must run next, and mustn't decide to exit to L1. */
+ bool nested_run_pending;
+ /*
+ * Guest pages referred to in vmcs02 with host-physical pointers, so
+ * we must keep them pinned while L2 runs.
+ */
+ struct page *apic_access_page;
+};
+
struct vcpu_vmx {
struct kvm_vcpu vcpu;
- struct list_head local_vcpus_link;
unsigned long host_rsp;
- int launched;
u8 fail;
u8 cpl;
bool nmi_known_unmasked;
@@ -140,7 +368,14 @@ struct vcpu_vmx {
u64 msr_host_kernel_gs_base;
u64 msr_guest_kernel_gs_base;
#endif
- struct vmcs *vmcs;
+ /*
+ * loaded_vmcs points to the VMCS currently used in this vcpu. For a
+ * non-nested (L1) guest, it always points to vmcs01. For a nested
+ * guest (L2), it points to a different VMCS.
+ */
+ struct loaded_vmcs vmcs01;
+ struct loaded_vmcs *loaded_vmcs;
+ bool __launched; /* temporary, used in vmx_vcpu_run */
struct msr_autoload {
unsigned nr;
struct vmx_msr_entry guest[NR_AUTOLOAD_MSRS];
@@ -176,6 +411,9 @@ struct vcpu_vmx {
u32 exit_reason;
bool rdtscp_enabled;
+
+ /* Support for a guest hypervisor (nested VMX) */
+ struct nested_vmx nested;
};
enum segment_cache_field {
@@ -192,6 +430,174 @@ static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu)
return container_of(vcpu, struct vcpu_vmx, vcpu);
}
+#define VMCS12_OFFSET(x) offsetof(struct vmcs12, x)
+#define FIELD(number, name) [number] = VMCS12_OFFSET(name)
+#define FIELD64(number, name) [number] = VMCS12_OFFSET(name), \
+ [number##_HIGH] = VMCS12_OFFSET(name)+4
+
+static unsigned short vmcs_field_to_offset_table[] = {
+ FIELD(VIRTUAL_PROCESSOR_ID, virtual_processor_id),
+ FIELD(GUEST_ES_SELECTOR, guest_es_selector),
+ FIELD(GUEST_CS_SELECTOR, guest_cs_selector),
+ FIELD(GUEST_SS_SELECTOR, guest_ss_selector),
+ FIELD(GUEST_DS_SELECTOR, guest_ds_selector),
+ FIELD(GUEST_FS_SELECTOR, guest_fs_selector),
+ FIELD(GUEST_GS_SELECTOR, guest_gs_selector),
+ FIELD(GUEST_LDTR_SELECTOR, guest_ldtr_selector),
+ FIELD(GUEST_TR_SELECTOR, guest_tr_selector),
+ FIELD(HOST_ES_SELECTOR, host_es_selector),
+ FIELD(HOST_CS_SELECTOR, host_cs_selector),
+ FIELD(HOST_SS_SELECTOR, host_ss_selector),
+ FIELD(HOST_DS_SELECTOR, host_ds_selector),
+ FIELD(HOST_FS_SELECTOR, host_fs_selector),
+ FIELD(HOST_GS_SELECTOR, host_gs_selector),
+ FIELD(HOST_TR_SELECTOR, host_tr_selector),
+ FIELD64(IO_BITMAP_A, io_bitmap_a),
+ FIELD64(IO_BITMAP_B, io_bitmap_b),
+ FIELD64(MSR_BITMAP, msr_bitmap),
+ FIELD64(VM_EXIT_MSR_STORE_ADDR, vm_exit_msr_store_addr),
+ FIELD64(VM_EXIT_MSR_LOAD_ADDR, vm_exit_msr_load_addr),
+ FIELD64(VM_ENTRY_MSR_LOAD_ADDR, vm_entry_msr_load_addr),
+ FIELD64(TSC_OFFSET, tsc_offset),
+ FIELD64(VIRTUAL_APIC_PAGE_ADDR, virtual_apic_page_addr),
+ FIELD64(APIC_ACCESS_ADDR, apic_access_addr),
+ FIELD64(EPT_POINTER, ept_pointer),
+ FIELD64(GUEST_PHYSICAL_ADDRESS, guest_physical_address),
+ FIELD64(VMCS_LINK_POINTER, vmcs_link_pointer),
+ FIELD64(GUEST_IA32_DEBUGCTL, guest_ia32_debugctl),
+ FIELD64(GUEST_IA32_PAT, guest_ia32_pat),
+ FIELD64(GUEST_IA32_EFER, guest_ia32_efer),
+ FIELD64(GUEST_IA32_PERF_GLOBAL_CTRL, guest_ia32_perf_global_ctrl),
+ FIELD64(GUEST_PDPTR0, guest_pdptr0),
+ FIELD64(GUEST_PDPTR1, guest_pdptr1),
+ FIELD64(GUEST_PDPTR2, guest_pdptr2),
+ FIELD64(GUEST_PDPTR3, guest_pdptr3),
+ FIELD64(HOST_IA32_PAT, host_ia32_pat),
+ FIELD64(HOST_IA32_EFER, host_ia32_efer),
+ FIELD64(HOST_IA32_PERF_GLOBAL_CTRL, host_ia32_perf_global_ctrl),
+ FIELD(PIN_BASED_VM_EXEC_CONTROL, pin_based_vm_exec_control),
+ FIELD(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control),
+ FIELD(EXCEPTION_BITMAP, exception_bitmap),
+ FIELD(PAGE_FAULT_ERROR_CODE_MASK, page_fault_error_code_mask),
+ FIELD(PAGE_FAULT_ERROR_CODE_MATCH, page_fault_error_code_match),
+ FIELD(CR3_TARGET_COUNT, cr3_target_count),
+ FIELD(VM_EXIT_CONTROLS, vm_exit_controls),
+ FIELD(VM_EXIT_MSR_STORE_COUNT, vm_exit_msr_store_count),
+ FIELD(VM_EXIT_MSR_LOAD_COUNT, vm_exit_msr_load_count),
+ FIELD(VM_ENTRY_CONTROLS, vm_entry_controls),
+ FIELD(VM_ENTRY_MSR_LOAD_COUNT, vm_entry_msr_load_count),
+ FIELD(VM_ENTRY_INTR_INFO_FIELD, vm_entry_intr_info_field),
+ FIELD(VM_ENTRY_EXCEPTION_ERROR_CODE, vm_entry_exception_error_code),
+ FIELD(VM_ENTRY_INSTRUCTION_LEN, vm_entry_instruction_len),
+ FIELD(TPR_THRESHOLD, tpr_threshold),
+ FIELD(SECONDARY_VM_EXEC_CONTROL, secondary_vm_exec_control),
+ FIELD(VM_INSTRUCTION_ERROR, vm_instruction_error),
+ FIELD(VM_EXIT_REASON, vm_exit_reason),
+ FIELD(VM_EXIT_INTR_INFO, vm_exit_intr_info),
+ FIELD(VM_EXIT_INTR_ERROR_CODE, vm_exit_intr_error_code),
+ FIELD(IDT_VECTORING_INFO_FIELD, idt_vectoring_info_field),
+ FIELD(IDT_VECTORING_ERROR_CODE, idt_vectoring_error_code),
+ FIELD(VM_EXIT_INSTRUCTION_LEN, vm_exit_instruction_len),
+ FIELD(VMX_INSTRUCTION_INFO, vmx_instruction_info),
+ FIELD(GUEST_ES_LIMIT, guest_es_limit),
+ FIELD(GUEST_CS_LIMIT, guest_cs_limit),
+ FIELD(GUEST_SS_LIMIT, guest_ss_limit),
+ FIELD(GUEST_DS_LIMIT, guest_ds_limit),
+ FIELD(GUEST_FS_LIMIT, guest_fs_limit),
+ FIELD(GUEST_GS_LIMIT, guest_gs_limit),
+ FIELD(GUEST_LDTR_LIMIT, guest_ldtr_limit),
+ FIELD(GUEST_TR_LIMIT, guest_tr_limit),
+ FIELD(GUEST_GDTR_LIMIT, guest_gdtr_limit),
+ FIELD(GUEST_IDTR_LIMIT, guest_idtr_limit),
+ FIELD(GUEST_ES_AR_BYTES, guest_es_ar_bytes),
+ FIELD(GUEST_CS_AR_BYTES, guest_cs_ar_bytes),
+ FIELD(GUEST_SS_AR_BYTES, guest_ss_ar_bytes),
+ FIELD(GUEST_DS_AR_BYTES, guest_ds_ar_bytes),
+ FIELD(GUEST_FS_AR_BYTES, guest_fs_ar_bytes),
+ FIELD(GUEST_GS_AR_BYTES, guest_gs_ar_bytes),
+ FIELD(GUEST_LDTR_AR_BYTES, guest_ldtr_ar_bytes),
+ FIELD(GUEST_TR_AR_BYTES, guest_tr_ar_bytes),
+ FIELD(GUEST_INTERRUPTIBILITY_INFO, guest_interruptibility_info),
+ FIELD(GUEST_ACTIVITY_STATE, guest_activity_state),
+ FIELD(GUEST_SYSENTER_CS, guest_sysenter_cs),
+ FIELD(HOST_IA32_SYSENTER_CS, host_ia32_sysenter_cs),
+ FIELD(CR0_GUEST_HOST_MASK, cr0_guest_host_mask),
+ FIELD(CR4_GUEST_HOST_MASK, cr4_guest_host_mask),
+ FIELD(CR0_READ_SHADOW, cr0_read_shadow),
+ FIELD(CR4_READ_SHADOW, cr4_read_shadow),
+ FIELD(CR3_TARGET_VALUE0, cr3_target_value0),
+ FIELD(CR3_TARGET_VALUE1, cr3_target_value1),
+ FIELD(CR3_TARGET_VALUE2, cr3_target_value2),
+ FIELD(CR3_TARGET_VALUE3, cr3_target_value3),
+ FIELD(EXIT_QUALIFICATION, exit_qualification),
+ FIELD(GUEST_LINEAR_ADDRESS, guest_linear_address),
+ FIELD(GUEST_CR0, guest_cr0),
+ FIELD(GUEST_CR3, guest_cr3),
+ FIELD(GUEST_CR4, guest_cr4),
+ FIELD(GUEST_ES_BASE, guest_es_base),
+ FIELD(GUEST_CS_BASE, guest_cs_base),
+ FIELD(GUEST_SS_BASE, guest_ss_base),
+ FIELD(GUEST_DS_BASE, guest_ds_base),
+ FIELD(GUEST_FS_BASE, guest_fs_base),
+ FIELD(GUEST_GS_BASE, guest_gs_base),
+ FIELD(GUEST_LDTR_BASE, guest_ldtr_base),
+ FIELD(GUEST_TR_BASE, guest_tr_base),
+ FIELD(GUEST_GDTR_BASE, guest_gdtr_base),
+ FIELD(GUEST_IDTR_BASE, guest_idtr_base),
+ FIELD(GUEST_DR7, guest_dr7),
+ FIELD(GUEST_RSP, guest_rsp),
+ FIELD(GUEST_RIP, guest_rip),
+ FIELD(GUEST_RFLAGS, guest_rflags),
+ FIELD(GUEST_PENDING_DBG_EXCEPTIONS, guest_pending_dbg_exceptions),
+ FIELD(GUEST_SYSENTER_ESP, guest_sysenter_esp),
+ FIELD(GUEST_SYSENTER_EIP, guest_sysenter_eip),
+ FIELD(HOST_CR0, host_cr0),
+ FIELD(HOST_CR3, host_cr3),
+ FIELD(HOST_CR4, host_cr4),
+ FIELD(HOST_FS_BASE, host_fs_base),
+ FIELD(HOST_GS_BASE, host_gs_base),
+ FIELD(HOST_TR_BASE, host_tr_base),
+ FIELD(HOST_GDTR_BASE, host_gdtr_base),
+ FIELD(HOST_IDTR_BASE, host_idtr_base),
+ FIELD(HOST_IA32_SYSENTER_ESP, host_ia32_sysenter_esp),
+ FIELD(HOST_IA32_SYSENTER_EIP, host_ia32_sysenter_eip),
+ FIELD(HOST_RSP, host_rsp),
+ FIELD(HOST_RIP, host_rip),
+};
+static const int max_vmcs_field = ARRAY_SIZE(vmcs_field_to_offset_table);
+
+static inline short vmcs_field_to_offset(unsigned long field)
+{
+ if (field >= max_vmcs_field || vmcs_field_to_offset_table[field] == 0)
+ return -1;
+ return vmcs_field_to_offset_table[field];
+}
+
+static inline struct vmcs12 *get_vmcs12(struct kvm_vcpu *vcpu)
+{
+ return to_vmx(vcpu)->nested.current_vmcs12;
+}
+
+static struct page *nested_get_page(struct kvm_vcpu *vcpu, gpa_t addr)
+{
+ struct page *page = gfn_to_page(vcpu->kvm, addr >> PAGE_SHIFT);
+ if (is_error_page(page)) {
+ kvm_release_page_clean(page);
+ return NULL;
+ }
+ return page;
+}
+
+static void nested_release_page(struct page *page)
+{
+ kvm_release_page_dirty(page);
+}
+
+static void nested_release_page_clean(struct page *page)
+{
+ kvm_release_page_clean(page);
+}
+
static u64 construct_eptp(unsigned long root_hpa);
static void kvm_cpu_vmxon(u64 addr);
static void kvm_cpu_vmxoff(void);
@@ -200,7 +606,11 @@ static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr);
static DEFINE_PER_CPU(struct vmcs *, vmxarea);
static DEFINE_PER_CPU(struct vmcs *, current_vmcs);
-static DEFINE_PER_CPU(struct list_head, vcpus_on_cpu);
+/*
+ * We maintain a per-CPU linked-list of VMCS loaded on that CPU. This is needed
+ * when a CPU is brought down, and we need to VMCLEAR all VMCSs loaded on it.
+ */
+static DEFINE_PER_CPU(struct list_head, loaded_vmcss_on_cpu);
static DEFINE_PER_CPU(struct desc_ptr, host_gdt);
static unsigned long *vmx_io_bitmap_a;
@@ -442,6 +852,35 @@ static inline bool report_flexpriority(void)
return flexpriority_enabled;
}
+static inline bool nested_cpu_has(struct vmcs12 *vmcs12, u32 bit)
+{
+ return vmcs12->cpu_based_vm_exec_control & bit;
+}
+
+static inline bool nested_cpu_has2(struct vmcs12 *vmcs12, u32 bit)
+{
+ return (vmcs12->cpu_based_vm_exec_control &
+ CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) &&
+ (vmcs12->secondary_vm_exec_control & bit);
+}
+
+static inline bool nested_cpu_has_virtual_nmis(struct vmcs12 *vmcs12,
+ struct kvm_vcpu *vcpu)
+{
+ return vmcs12->pin_based_vm_exec_control & PIN_BASED_VIRTUAL_NMIS;
+}
+
+static inline bool is_exception(u32 intr_info)
+{
+ return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK))
+ == (INTR_TYPE_HARD_EXCEPTION | INTR_INFO_VALID_MASK);
+}
+
+static void nested_vmx_vmexit(struct kvm_vcpu *vcpu);
+static void nested_vmx_entry_failure(struct kvm_vcpu *vcpu,
+ struct vmcs12 *vmcs12,
+ u32 reason, unsigned long qualification);
+
static int __find_msr_index(struct vcpu_vmx *vmx, u32 msr)
{
int i;
@@ -501,6 +940,13 @@ static void vmcs_clear(struct vmcs *vmcs)
vmcs, phys_addr);
}
+static inline void loaded_vmcs_init(struct loaded_vmcs *loaded_vmcs)
+{
+ vmcs_clear(loaded_vmcs->vmcs);
+ loaded_vmcs->cpu = -1;
+ loaded_vmcs->launched = 0;
+}
+
static void vmcs_load(struct vmcs *vmcs)
{
u64 phys_addr = __pa(vmcs);
@@ -510,29 +956,28 @@ static void vmcs_load(struct vmcs *vmcs)
: "=qm"(error) : "a"(&phys_addr), "m"(phys_addr)
: "cc", "memory");
if (error)
- printk(KERN_ERR "kvm: vmptrld %p/%llx fail\n",
+ printk(KERN_ERR "kvm: vmptrld %p/%llx failed\n",
vmcs, phys_addr);
}
-static void __vcpu_clear(void *arg)
+static void __loaded_vmcs_clear(void *arg)
{
- struct vcpu_vmx *vmx = arg;
+ struct loaded_vmcs *loaded_vmcs = arg;
int cpu = raw_smp_processor_id();
- if (vmx->vcpu.cpu == cpu)
- vmcs_clear(vmx->vmcs);
- if (per_cpu(current_vmcs, cpu) == vmx->vmcs)
+ if (loaded_vmcs->cpu != cpu)
+ return; /* vcpu migration can race with cpu offline */
+ if (per_cpu(current_vmcs, cpu) == loaded_vmcs->vmcs)
per_cpu(current_vmcs, cpu) = NULL;
- list_del(&vmx->local_vcpus_link);
- vmx->vcpu.cpu = -1;
- vmx->launched = 0;
+ list_del(&loaded_vmcs->loaded_vmcss_on_cpu_link);
+ loaded_vmcs_init(loaded_vmcs);
}
-static void vcpu_clear(struct vcpu_vmx *vmx)
+static void loaded_vmcs_clear(struct loaded_vmcs *loaded_vmcs)
{
- if (vmx->vcpu.cpu == -1)
- return;
- smp_call_function_single(vmx->vcpu.cpu, __vcpu_clear, vmx, 1);
+ if (loaded_vmcs->cpu != -1)
+ smp_call_function_single(
+ loaded_vmcs->cpu, __loaded_vmcs_clear, loaded_vmcs, 1);
}
static inline void vpid_sync_vcpu_single(struct vcpu_vmx *vmx)
@@ -585,26 +1030,26 @@ static inline void ept_sync_individual_addr(u64 eptp, gpa_t gpa)
}
}
-static unsigned long vmcs_readl(unsigned long field)
+static __always_inline unsigned long vmcs_readl(unsigned long field)
{
- unsigned long value = 0;
+ unsigned long value;
- asm volatile (__ex(ASM_VMX_VMREAD_RDX_RAX)
- : "+a"(value) : "d"(field) : "cc");
+ asm volatile (__ex_clear(ASM_VMX_VMREAD_RDX_RAX, "%0")
+ : "=a"(value) : "d"(field) : "cc");
return value;
}
-static u16 vmcs_read16(unsigned long field)
+static __always_inline u16 vmcs_read16(unsigned long field)
{
return vmcs_readl(field);
}
-static u32 vmcs_read32(unsigned long field)
+static __always_inline u32 vmcs_read32(unsigned long field)
{
return vmcs_readl(field);
}
-static u64 vmcs_read64(unsigned long field)
+static __always_inline u64 vmcs_read64(unsigned long field)
{
#ifdef CONFIG_X86_64
return vmcs_readl(field);
@@ -731,6 +1176,15 @@ static void update_exception_bitmap(struct kvm_vcpu *vcpu)
eb &= ~(1u << PF_VECTOR); /* bypass_guest_pf = 0 */
if (vcpu->fpu_active)
eb &= ~(1u << NM_VECTOR);
+
+ /* When we are running a nested L2 guest and L1 specified for it a
+ * certain exception bitmap, we must trap the same exceptions and pass
+ * them to L1. When running L2, we will only handle the exceptions
+ * specified above if L1 did not want them.
+ */
+ if (is_guest_mode(vcpu))
+ eb |= get_vmcs12(vcpu)->exception_bitmap;
+
vmcs_write32(EXCEPTION_BITMAP, eb);
}
@@ -971,22 +1425,22 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
if (!vmm_exclusive)
kvm_cpu_vmxon(phys_addr);
- else if (vcpu->cpu != cpu)
- vcpu_clear(vmx);
+ else if (vmx->loaded_vmcs->cpu != cpu)
+ loaded_vmcs_clear(vmx->loaded_vmcs);
- if (per_cpu(current_vmcs, cpu) != vmx->vmcs) {
- per_cpu(current_vmcs, cpu) = vmx->vmcs;
- vmcs_load(vmx->vmcs);
+ if (per_cpu(current_vmcs, cpu) != vmx->loaded_vmcs->vmcs) {
+ per_cpu(current_vmcs, cpu) = vmx->loaded_vmcs->vmcs;
+ vmcs_load(vmx->loaded_vmcs->vmcs);
}
- if (vcpu->cpu != cpu) {
+ if (vmx->loaded_vmcs->cpu != cpu) {
struct desc_ptr *gdt = &__get_cpu_var(host_gdt);
unsigned long sysenter_esp;
kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
local_irq_disable();
- list_add(&vmx->local_vcpus_link,
- &per_cpu(vcpus_on_cpu, cpu));
+ list_add(&vmx->loaded_vmcs->loaded_vmcss_on_cpu_link,
+ &per_cpu(loaded_vmcss_on_cpu, cpu));
local_irq_enable();
/*
@@ -998,6 +1452,7 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
+ vmx->loaded_vmcs->cpu = cpu;
}
}
@@ -1005,7 +1460,8 @@ static void vmx_vcpu_put(struct kvm_vcpu *vcpu)
{
__vmx_load_host_state(to_vmx(vcpu));
if (!vmm_exclusive) {
- __vcpu_clear(to_vmx(vcpu));
+ __loaded_vmcs_clear(to_vmx(vcpu)->loaded_vmcs);
+ vcpu->cpu = -1;
kvm_cpu_vmxoff();
}
}
@@ -1023,19 +1479,55 @@ static void vmx_fpu_activate(struct kvm_vcpu *vcpu)
vmcs_writel(GUEST_CR0, cr0);
update_exception_bitmap(vcpu);
vcpu->arch.cr0_guest_owned_bits = X86_CR0_TS;
+ if (is_guest_mode(vcpu))
+ vcpu->arch.cr0_guest_owned_bits &=
+ ~get_vmcs12(vcpu)->cr0_guest_host_mask;
vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits);
}
static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu);
+/*
+ * Return the cr0 value that a nested guest would read. This is a combination
+ * of the real cr0 used to run the guest (guest_cr0), and the bits shadowed by
+ * its hypervisor (cr0_read_shadow).
+ */
+static inline unsigned long nested_read_cr0(struct vmcs12 *fields)
+{
+ return (fields->guest_cr0 & ~fields->cr0_guest_host_mask) |
+ (fields->cr0_read_shadow & fields->cr0_guest_host_mask);
+}
+static inline unsigned long nested_read_cr4(struct vmcs12 *fields)
+{
+ return (fields->guest_cr4 & ~fields->cr4_guest_host_mask) |
+ (fields->cr4_read_shadow & fields->cr4_guest_host_mask);
+}
+
static void vmx_fpu_deactivate(struct kvm_vcpu *vcpu)
{
+ /* Note that there is no vcpu->fpu_active = 0 here. The caller must
+ * set this *before* calling this function.
+ */
vmx_decache_cr0_guest_bits(vcpu);
vmcs_set_bits(GUEST_CR0, X86_CR0_TS | X86_CR0_MP);
update_exception_bitmap(vcpu);
vcpu->arch.cr0_guest_owned_bits = 0;
vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits);
- vmcs_writel(CR0_READ_SHADOW, vcpu->arch.cr0);
+ if (is_guest_mode(vcpu)) {
+ /*
+ * L1's specified read shadow might not contain the TS bit,
+ * so now that we turned on shadowing of this bit, we need to
+ * set this bit of the shadow. Like in nested_vmx_run we need
+ * nested_read_cr0(vmcs12), but vmcs12->guest_cr0 is not yet
+ * up-to-date here because we just decached cr0.TS (and we'll
+ * only update vmcs12->guest_cr0 on nested exit).
+ */
+ struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
+ vmcs12->guest_cr0 = (vmcs12->guest_cr0 & ~X86_CR0_TS) |
+ (vcpu->arch.cr0 & X86_CR0_TS);
+ vmcs_writel(CR0_READ_SHADOW, nested_read_cr0(vmcs12));
+ } else
+ vmcs_writel(CR0_READ_SHADOW, vcpu->arch.cr0);
}
static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu)
@@ -1119,6 +1611,25 @@ static void vmx_clear_hlt(struct kvm_vcpu *vcpu)
vmcs_write32(GUEST_ACTIVITY_STATE, GUEST_ACTIVITY_ACTIVE);
}
+/*
+ * KVM wants to inject page-faults which it got to the guest. This function
+ * checks whether in a nested guest, we need to inject them to L1 or L2.
+ * This function assumes it is called with the exit reason in vmcs02 being
+ * a #PF exception (this is the only case in which KVM injects a #PF when L2
+ * is running).
+ */
+static int nested_pf_handled(struct kvm_vcpu *vcpu)
+{
+ struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
+
+ /* TODO: also check PFEC_MATCH/MASK, not just EB.PF. */
+ if (!(vmcs12->exception_bitmap & PF_VECTOR))
+ return 0;
+
+ nested_vmx_vmexit(vcpu);
+ return 1;
+}
+
static void vmx_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
bool has_error_code, u32 error_code,
bool reinject)
@@ -1126,6 +1637,10 @@ static void vmx_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
struct vcpu_vmx *vmx = to_vmx(vcpu);
u32 intr_info = nr | INTR_INFO_VALID_MASK;
+ if (nr == PF_VECTOR && is_guest_mode(vcpu) &&
+ nested_pf_handled(vcpu))
+ return;
+
if (has_error_code) {
vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code);
intr_info |= INTR_INFO_DELIVER_CODE_MASK;
@@ -1248,12 +1763,24 @@ static void vmx_set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz)
static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
{
vmcs_write64(TSC_OFFSET, offset);
+ if (is_guest_mode(vcpu))
+ /*
+ * We're here if L1 chose not to trap the TSC MSR. Since
+ * prepare_vmcs12() does not copy tsc_offset, we need to also
+ * set the vmcs12 field here.
+ */
+ get_vmcs12(vcpu)->tsc_offset = offset -
+ to_vmx(vcpu)->nested.vmcs01_tsc_offset;
}
static void vmx_adjust_tsc_offset(struct kvm_vcpu *vcpu, s64 adjustment)
{
u64 offset = vmcs_read64(TSC_OFFSET);
vmcs_write64(TSC_OFFSET, offset + adjustment);
+ if (is_guest_mode(vcpu)) {
+ /* Even when running L2, the adjustment needs to apply to L1 */
+ to_vmx(vcpu)->nested.vmcs01_tsc_offset += adjustment;
+ }
}
static u64 vmx_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc)
@@ -1261,6 +1788,236 @@ static u64 vmx_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc)
return target_tsc - native_read_tsc();
}
+static bool guest_cpuid_has_vmx(struct kvm_vcpu *vcpu)
+{
+ struct kvm_cpuid_entry2 *best = kvm_find_cpuid_entry(vcpu, 1, 0);
+ return best && (best->ecx & (1 << (X86_FEATURE_VMX & 31)));
+}
+
+/*
+ * nested_vmx_allowed() checks whether a guest should be allowed to use VMX
+ * instructions and MSRs (i.e., nested VMX). Nested VMX is disabled for
+ * all guests if the "nested" module option is off, and can also be disabled
+ * for a single guest by disabling its VMX cpuid bit.
+ */
+static inline bool nested_vmx_allowed(struct kvm_vcpu *vcpu)
+{
+ return nested && guest_cpuid_has_vmx(vcpu);
+}
+
+/*
+ * nested_vmx_setup_ctls_msrs() sets up variables containing the values to be
+ * returned for the various VMX controls MSRs when nested VMX is enabled.
+ * The same values should also be used to verify that vmcs12 control fields are
+ * valid during nested entry from L1 to L2.
+ * Each of these control msrs has a low and high 32-bit half: A low bit is on
+ * if the corresponding bit in the (32-bit) control field *must* be on, and a
+ * bit in the high half is on if the corresponding bit in the control field
+ * may be on. See also vmx_control_verify().
+ * TODO: allow these variables to be modified (downgraded) by module options
+ * or other means.
+ */
+static u32 nested_vmx_procbased_ctls_low, nested_vmx_procbased_ctls_high;
+static u32 nested_vmx_secondary_ctls_low, nested_vmx_secondary_ctls_high;
+static u32 nested_vmx_pinbased_ctls_low, nested_vmx_pinbased_ctls_high;
+static u32 nested_vmx_exit_ctls_low, nested_vmx_exit_ctls_high;
+static u32 nested_vmx_entry_ctls_low, nested_vmx_entry_ctls_high;
+static __init void nested_vmx_setup_ctls_msrs(void)
+{
+ /*
+ * Note that as a general rule, the high half of the MSRs (bits in
+ * the control fields which may be 1) should be initialized by the
+ * intersection of the underlying hardware's MSR (i.e., features which
+ * can be supported) and the list of features we want to expose -
+ * because they are known to be properly supported in our code.
+ * Also, usually, the low half of the MSRs (bits which must be 1) can
+ * be set to 0, meaning that L1 may turn off any of these bits. The
+ * reason is that if one of these bits is necessary, it will appear
+ * in vmcs01 and prepare_vmcs02, when it bitwise-or's the control
+ * fields of vmcs01 and vmcs02, will turn these bits off - and
+ * nested_vmx_exit_handled() will not pass related exits to L1.
+ * These rules have exceptions below.
+ */
+
+ /* pin-based controls */
+ /*
+ * According to the Intel spec, if bit 55 of VMX_BASIC is off (as it is
+ * in our case), bits 1, 2 and 4 (i.e., 0x16) must be 1 in this MSR.
+ */
+ nested_vmx_pinbased_ctls_low = 0x16 ;
+ nested_vmx_pinbased_ctls_high = 0x16 |
+ PIN_BASED_EXT_INTR_MASK | PIN_BASED_NMI_EXITING |
+ PIN_BASED_VIRTUAL_NMIS;
+
+ /* exit controls */
+ nested_vmx_exit_ctls_low = 0;
+ /* Note that guest use of VM_EXIT_ACK_INTR_ON_EXIT is not supported. */
+#ifdef CONFIG_X86_64
+ nested_vmx_exit_ctls_high = VM_EXIT_HOST_ADDR_SPACE_SIZE;
+#else
+ nested_vmx_exit_ctls_high = 0;
+#endif
+
+ /* entry controls */
+ rdmsr(MSR_IA32_VMX_ENTRY_CTLS,
+ nested_vmx_entry_ctls_low, nested_vmx_entry_ctls_high);
+ nested_vmx_entry_ctls_low = 0;
+ nested_vmx_entry_ctls_high &=
+ VM_ENTRY_LOAD_IA32_PAT | VM_ENTRY_IA32E_MODE;
+
+ /* cpu-based controls */
+ rdmsr(MSR_IA32_VMX_PROCBASED_CTLS,
+ nested_vmx_procbased_ctls_low, nested_vmx_procbased_ctls_high);
+ nested_vmx_procbased_ctls_low = 0;
+ nested_vmx_procbased_ctls_high &=
+ CPU_BASED_VIRTUAL_INTR_PENDING | CPU_BASED_USE_TSC_OFFSETING |
+ CPU_BASED_HLT_EXITING | CPU_BASED_INVLPG_EXITING |
+ CPU_BASED_MWAIT_EXITING | CPU_BASED_CR3_LOAD_EXITING |
+ CPU_BASED_CR3_STORE_EXITING |
+#ifdef CONFIG_X86_64
+ CPU_BASED_CR8_LOAD_EXITING | CPU_BASED_CR8_STORE_EXITING |
+#endif
+ CPU_BASED_MOV_DR_EXITING | CPU_BASED_UNCOND_IO_EXITING |
+ CPU_BASED_USE_IO_BITMAPS | CPU_BASED_MONITOR_EXITING |
+ CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
+ /*
+ * We can allow some features even when not supported by the
+ * hardware. For example, L1 can specify an MSR bitmap - and we
+ * can use it to avoid exits to L1 - even when L0 runs L2
+ * without MSR bitmaps.
+ */
+ nested_vmx_procbased_ctls_high |= CPU_BASED_USE_MSR_BITMAPS;
+
+ /* secondary cpu-based controls */
+ rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2,
+ nested_vmx_secondary_ctls_low, nested_vmx_secondary_ctls_high);
+ nested_vmx_secondary_ctls_low = 0;
+ nested_vmx_secondary_ctls_high &=
+ SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
+}
+
+static inline bool vmx_control_verify(u32 control, u32 low, u32 high)
+{
+ /*
+ * Bits 0 in high must be 0, and bits 1 in low must be 1.
+ */
+ return ((control & high) | low) == control;
+}
+
+static inline u64 vmx_control_msr(u32 low, u32 high)
+{
+ return low | ((u64)high << 32);
+}
+
+/*
+ * If we allow our guest to use VMX instructions (i.e., nested VMX), we should
+ * also let it use VMX-specific MSRs.
+ * vmx_get_vmx_msr() and vmx_set_vmx_msr() return 1 when we handled a
+ * VMX-specific MSR, or 0 when we haven't (and the caller should handle it
+ * like all other MSRs).
+ */
+static int vmx_get_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
+{
+ if (!nested_vmx_allowed(vcpu) && msr_index >= MSR_IA32_VMX_BASIC &&
+ msr_index <= MSR_IA32_VMX_TRUE_ENTRY_CTLS) {
+ /*
+ * According to the spec, processors which do not support VMX
+ * should throw a #GP(0) when VMX capability MSRs are read.
+ */
+ kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
+ return 1;
+ }
+
+ switch (msr_index) {
+ case MSR_IA32_FEATURE_CONTROL:
+ *pdata = 0;
+ break;
+ case MSR_IA32_VMX_BASIC:
+ /*
+ * This MSR reports some information about VMX support. We
+ * should return information about the VMX we emulate for the
+ * guest, and the VMCS structure we give it - not about the
+ * VMX support of the underlying hardware.
+ */
+ *pdata = VMCS12_REVISION |
+ ((u64)VMCS12_SIZE << VMX_BASIC_VMCS_SIZE_SHIFT) |
+ (VMX_BASIC_MEM_TYPE_WB << VMX_BASIC_MEM_TYPE_SHIFT);
+ break;
+ case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
+ case MSR_IA32_VMX_PINBASED_CTLS:
+ *pdata = vmx_control_msr(nested_vmx_pinbased_ctls_low,
+ nested_vmx_pinbased_ctls_high);
+ break;
+ case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
+ case MSR_IA32_VMX_PROCBASED_CTLS:
+ *pdata = vmx_control_msr(nested_vmx_procbased_ctls_low,
+ nested_vmx_procbased_ctls_high);
+ break;
+ case MSR_IA32_VMX_TRUE_EXIT_CTLS:
+ case MSR_IA32_VMX_EXIT_CTLS:
+ *pdata = vmx_control_msr(nested_vmx_exit_ctls_low,
+ nested_vmx_exit_ctls_high);
+ break;
+ case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
+ case MSR_IA32_VMX_ENTRY_CTLS:
+ *pdata = vmx_control_msr(nested_vmx_entry_ctls_low,
+ nested_vmx_entry_ctls_high);
+ break;
+ case MSR_IA32_VMX_MISC:
+ *pdata = 0;
+ break;
+ /*
+ * These MSRs specify bits which the guest must keep fixed (on or off)
+ * while L1 is in VMXON mode (in L1's root mode, or running an L2).
+ * We picked the standard core2 setting.
+ */
+#define VMXON_CR0_ALWAYSON (X86_CR0_PE | X86_CR0_PG | X86_CR0_NE)
+#define VMXON_CR4_ALWAYSON X86_CR4_VMXE
+ case MSR_IA32_VMX_CR0_FIXED0:
+ *pdata = VMXON_CR0_ALWAYSON;
+ break;
+ case MSR_IA32_VMX_CR0_FIXED1:
+ *pdata = -1ULL;
+ break;
+ case MSR_IA32_VMX_CR4_FIXED0:
+ *pdata = VMXON_CR4_ALWAYSON;
+ break;
+ case MSR_IA32_VMX_CR4_FIXED1:
+ *pdata = -1ULL;
+ break;
+ case MSR_IA32_VMX_VMCS_ENUM:
+ *pdata = 0x1f;
+ break;
+ case MSR_IA32_VMX_PROCBASED_CTLS2:
+ *pdata = vmx_control_msr(nested_vmx_secondary_ctls_low,
+ nested_vmx_secondary_ctls_high);
+ break;
+ case MSR_IA32_VMX_EPT_VPID_CAP:
+ /* Currently, no nested ept or nested vpid */
+ *pdata = 0;
+ break;
+ default:
+ return 0;
+ }
+
+ return 1;
+}
+
+static int vmx_set_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
+{
+ if (!nested_vmx_allowed(vcpu))
+ return 0;
+
+ if (msr_index == MSR_IA32_FEATURE_CONTROL)
+ /* TODO: the right thing. */
+ return 1;
+ /*
+ * No need to treat VMX capability MSRs specially: If we don't handle
+ * them, handle_wrmsr will #GP(0), which is correct (they are readonly)
+ */
+ return 0;
+}
+
/*
* Reads an msr value (of 'msr_index') into 'pdata'.
* Returns 0 on success, non-0 otherwise.
@@ -1309,6 +2066,8 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
/* Otherwise falls through */
default:
vmx_load_host_state(to_vmx(vcpu));
+ if (vmx_get_vmx_msr(vcpu, msr_index, pdata))
+ return 0;
msr = find_msr_entry(to_vmx(vcpu), msr_index);
if (msr) {
vmx_load_host_state(to_vmx(vcpu));
@@ -1380,6 +2139,8 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
return 1;
/* Otherwise falls through */
default:
+ if (vmx_set_vmx_msr(vcpu, msr_index, data))
+ break;
msr = find_msr_entry(vmx, msr_index);
if (msr) {
vmx_load_host_state(vmx);
@@ -1469,7 +2230,7 @@ static int hardware_enable(void *garbage)
if (read_cr4() & X86_CR4_VMXE)
return -EBUSY;
- INIT_LIST_HEAD(&per_cpu(vcpus_on_cpu, cpu));
+ INIT_LIST_HEAD(&per_cpu(loaded_vmcss_on_cpu, cpu));
rdmsrl(MSR_IA32_FEATURE_CONTROL, old);
test_bits = FEATURE_CONTROL_LOCKED;
@@ -1493,14 +2254,14 @@ static int hardware_enable(void *garbage)
return 0;
}
-static void vmclear_local_vcpus(void)
+static void vmclear_local_loaded_vmcss(void)
{
int cpu = raw_smp_processor_id();
- struct vcpu_vmx *vmx, *n;
+ struct loaded_vmcs *v, *n;
- list_for_each_entry_safe(vmx, n, &per_cpu(vcpus_on_cpu, cpu),
- local_vcpus_link)
- __vcpu_clear(vmx);
+ list_for_each_entry_safe(v, n, &per_cpu(loaded_vmcss_on_cpu, cpu),
+ loaded_vmcss_on_cpu_link)
+ __loaded_vmcs_clear(v);
}
@@ -1515,7 +2276,7 @@ static void kvm_cpu_vmxoff(void)
static void hardware_disable(void *garbage)
{
if (vmm_exclusive) {
- vmclear_local_vcpus();
+ vmclear_local_loaded_vmcss();
kvm_cpu_vmxoff();
}
write_cr4(read_cr4() & ~X86_CR4_VMXE);
@@ -1696,6 +2457,18 @@ static void free_vmcs(struct vmcs *vmcs)
free_pages((unsigned long)vmcs, vmcs_config.order);
}
+/*
+ * Free a VMCS, but before that VMCLEAR it on the CPU where it was last loaded
+ */
+static void free_loaded_vmcs(struct loaded_vmcs *loaded_vmcs)
+{
+ if (!loaded_vmcs->vmcs)
+ return;
+ loaded_vmcs_clear(loaded_vmcs);
+ free_vmcs(loaded_vmcs->vmcs);
+ loaded_vmcs->vmcs = NULL;
+}
+
static void free_kvm_area(void)
{
int cpu;
@@ -1756,6 +2529,9 @@ static __init int hardware_setup(void)
if (!cpu_has_vmx_ple())
ple_gap = 0;
+ if (nested)
+ nested_vmx_setup_ctls_msrs();
+
return alloc_kvm_area();
}
@@ -2041,7 +2817,7 @@ static void ept_save_pdptrs(struct kvm_vcpu *vcpu)
(unsigned long *)&vcpu->arch.regs_dirty);
}
-static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
+static int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
static void ept_update_paging_mode_cr0(unsigned long *hw_cr0,
unsigned long cr0,
@@ -2139,11 +2915,23 @@ static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
vmcs_writel(GUEST_CR3, guest_cr3);
}
-static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
+static int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
{
unsigned long hw_cr4 = cr4 | (to_vmx(vcpu)->rmode.vm86_active ?
KVM_RMODE_VM_CR4_ALWAYS_ON : KVM_PMODE_VM_CR4_ALWAYS_ON);
+ if (cr4 & X86_CR4_VMXE) {
+ /*
+ * To use VMXON (and later other VMX instructions), a guest
+ * must first be able to turn on cr4.VMXE (see handle_vmon()).
+ * So basically the check on whether to allow nested VMX
+ * is here.
+ */
+ if (!nested_vmx_allowed(vcpu))
+ return 1;
+ } else if (to_vmx(vcpu)->nested.vmxon)
+ return 1;
+
vcpu->arch.cr4 = cr4;
if (enable_ept) {
if (!is_paging(vcpu)) {
@@ -2156,6 +2944,7 @@ static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
vmcs_writel(CR4_READ_SHADOW, cr4);
vmcs_writel(GUEST_CR4, hw_cr4);
+ return 0;
}
static void vmx_get_segment(struct kvm_vcpu *vcpu,
@@ -2721,18 +3510,110 @@ static void vmx_disable_intercept_for_msr(u32 msr, bool longmode_only)
}
/*
+ * Set up the vmcs's constant host-state fields, i.e., host-state fields that
+ * will not change in the lifetime of the guest.
+ * Note that host-state that does change is set elsewhere. E.g., host-state
+ * that is set differently for each CPU is set in vmx_vcpu_load(), not here.
+ */
+static void vmx_set_constant_host_state(void)
+{
+ u32 low32, high32;
+ unsigned long tmpl;
+ struct desc_ptr dt;
+
+ vmcs_writel(HOST_CR0, read_cr0() | X86_CR0_TS); /* 22.2.3 */
+ vmcs_writel(HOST_CR4, read_cr4()); /* 22.2.3, 22.2.5 */
+ vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
+
+ vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */
+ vmcs_write16(HOST_DS_SELECTOR, __KERNEL_DS); /* 22.2.4 */
+ vmcs_write16(HOST_ES_SELECTOR, __KERNEL_DS); /* 22.2.4 */
+ vmcs_write16(HOST_SS_SELECTOR, __KERNEL_DS); /* 22.2.4 */
+ vmcs_write16(HOST_TR_SELECTOR, GDT_ENTRY_TSS*8); /* 22.2.4 */
+
+ native_store_idt(&dt);
+ vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
+
+ asm("mov $.Lkvm_vmx_return, %0" : "=r"(tmpl));
+ vmcs_writel(HOST_RIP, tmpl); /* 22.2.5 */
+
+ rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
+ vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
+ rdmsrl(MSR_IA32_SYSENTER_EIP, tmpl);
+ vmcs_writel(HOST_IA32_SYSENTER_EIP, tmpl); /* 22.2.3 */
+
+ if (vmcs_config.vmexit_ctrl & VM_EXIT_LOAD_IA32_PAT) {
+ rdmsr(MSR_IA32_CR_PAT, low32, high32);
+ vmcs_write64(HOST_IA32_PAT, low32 | ((u64) high32 << 32));
+ }
+}
+
+static void set_cr4_guest_host_mask(struct vcpu_vmx *vmx)
+{
+ vmx->vcpu.arch.cr4_guest_owned_bits = KVM_CR4_GUEST_OWNED_BITS;
+ if (enable_ept)
+ vmx->vcpu.arch.cr4_guest_owned_bits |= X86_CR4_PGE;
+ if (is_guest_mode(&vmx->vcpu))
+ vmx->vcpu.arch.cr4_guest_owned_bits &=
+ ~get_vmcs12(&vmx->vcpu)->cr4_guest_host_mask;
+ vmcs_writel(CR4_GUEST_HOST_MASK, ~vmx->vcpu.arch.cr4_guest_owned_bits);
+}
+
+static u32 vmx_exec_control(struct vcpu_vmx *vmx)
+{
+ u32 exec_control = vmcs_config.cpu_based_exec_ctrl;
+ if (!vm_need_tpr_shadow(vmx->vcpu.kvm)) {
+ exec_control &= ~CPU_BASED_TPR_SHADOW;
+#ifdef CONFIG_X86_64
+ exec_control |= CPU_BASED_CR8_STORE_EXITING |
+ CPU_BASED_CR8_LOAD_EXITING;
+#endif
+ }
+ if (!enable_ept)
+ exec_control |= CPU_BASED_CR3_STORE_EXITING |
+ CPU_BASED_CR3_LOAD_EXITING |
+ CPU_BASED_INVLPG_EXITING;
+ return exec_control;
+}
+
+static u32 vmx_secondary_exec_control(struct vcpu_vmx *vmx)
+{
+ u32 exec_control = vmcs_config.cpu_based_2nd_exec_ctrl;
+ if (!vm_need_virtualize_apic_accesses(vmx->vcpu.kvm))
+ exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
+ if (vmx->vpid == 0)
+ exec_control &= ~SECONDARY_EXEC_ENABLE_VPID;
+ if (!enable_ept) {
+ exec_control &= ~SECONDARY_EXEC_ENABLE_EPT;
+ enable_unrestricted_guest = 0;
+ }
+ if (!enable_unrestricted_guest)
+ exec_control &= ~SECONDARY_EXEC_UNRESTRICTED_GUEST;
+ if (!ple_gap)
+ exec_control &= ~SECONDARY_EXEC_PAUSE_LOOP_EXITING;
+ return exec_control;
+}
+
+static void ept_set_mmio_spte_mask(void)
+{
+ /*
+ * EPT Misconfigurations can be generated if the value of bits 2:0
+ * of an EPT paging-structure entry is 110b (write/execute).
+ * Also, magic bits (0xffull << 49) is set to quickly identify mmio
+ * spte.
+ */
+ kvm_mmu_set_mmio_spte_mask(0xffull << 49 | 0x6ull);
+}
+
+/*
* Sets up the vmcs for emulated real mode.
*/
static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
{
- u32 host_sysenter_cs, msr_low, msr_high;
- u32 junk;
- u64 host_pat;
+#ifdef CONFIG_X86_64
unsigned long a;
- struct desc_ptr dt;
+#endif
int i;
- unsigned long kvm_vmx_return;
- u32 exec_control;
/* I/O */
vmcs_write64(IO_BITMAP_A, __pa(vmx_io_bitmap_a));
@@ -2747,36 +3628,11 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
vmcs_write32(PIN_BASED_VM_EXEC_CONTROL,
vmcs_config.pin_based_exec_ctrl);
- exec_control = vmcs_config.cpu_based_exec_ctrl;
- if (!vm_need_tpr_shadow(vmx->vcpu.kvm)) {
- exec_control &= ~CPU_BASED_TPR_SHADOW;
-#ifdef CONFIG_X86_64
- exec_control |= CPU_BASED_CR8_STORE_EXITING |
- CPU_BASED_CR8_LOAD_EXITING;
-#endif
- }
- if (!enable_ept)
- exec_control |= CPU_BASED_CR3_STORE_EXITING |
- CPU_BASED_CR3_LOAD_EXITING |
- CPU_BASED_INVLPG_EXITING;
- vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, exec_control);
+ vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, vmx_exec_control(vmx));
if (cpu_has_secondary_exec_ctrls()) {
- exec_control = vmcs_config.cpu_based_2nd_exec_ctrl;
- if (!vm_need_virtualize_apic_accesses(vmx->vcpu.kvm))
- exec_control &=
- ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
- if (vmx->vpid == 0)
- exec_control &= ~SECONDARY_EXEC_ENABLE_VPID;
- if (!enable_ept) {
- exec_control &= ~SECONDARY_EXEC_ENABLE_EPT;
- enable_unrestricted_guest = 0;
- }
- if (!enable_unrestricted_guest)
- exec_control &= ~SECONDARY_EXEC_UNRESTRICTED_GUEST;
- if (!ple_gap)
- exec_control &= ~SECONDARY_EXEC_PAUSE_LOOP_EXITING;
- vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_control);
+ vmcs_write32(SECONDARY_VM_EXEC_CONTROL,
+ vmx_secondary_exec_control(vmx));
}
if (ple_gap) {
@@ -2784,20 +3640,13 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
vmcs_write32(PLE_WINDOW, ple_window);
}
- vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, !!bypass_guest_pf);
- vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, !!bypass_guest_pf);
+ vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, 0);
+ vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, 0);
vmcs_write32(CR3_TARGET_COUNT, 0); /* 22.2.1 */
- vmcs_writel(HOST_CR0, read_cr0() | X86_CR0_TS); /* 22.2.3 */
- vmcs_writel(HOST_CR4, read_cr4()); /* 22.2.3, 22.2.5 */
- vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
-
- vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */
- vmcs_write16(HOST_DS_SELECTOR, __KERNEL_DS); /* 22.2.4 */
- vmcs_write16(HOST_ES_SELECTOR, __KERNEL_DS); /* 22.2.4 */
vmcs_write16(HOST_FS_SELECTOR, 0); /* 22.2.4 */
vmcs_write16(HOST_GS_SELECTOR, 0); /* 22.2.4 */
- vmcs_write16(HOST_SS_SELECTOR, __KERNEL_DS); /* 22.2.4 */
+ vmx_set_constant_host_state();
#ifdef CONFIG_X86_64
rdmsrl(MSR_FS_BASE, a);
vmcs_writel(HOST_FS_BASE, a); /* 22.2.4 */
@@ -2808,32 +3657,15 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
vmcs_writel(HOST_GS_BASE, 0); /* 22.2.4 */
#endif
- vmcs_write16(HOST_TR_SELECTOR, GDT_ENTRY_TSS*8); /* 22.2.4 */
-
- native_store_idt(&dt);
- vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
-
- asm("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return));
- vmcs_writel(HOST_RIP, kvm_vmx_return); /* 22.2.5 */
vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host));
vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0);
vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest));
- rdmsr(MSR_IA32_SYSENTER_CS, host_sysenter_cs, junk);
- vmcs_write32(HOST_IA32_SYSENTER_CS, host_sysenter_cs);
- rdmsrl(MSR_IA32_SYSENTER_ESP, a);
- vmcs_writel(HOST_IA32_SYSENTER_ESP, a); /* 22.2.3 */
- rdmsrl(MSR_IA32_SYSENTER_EIP, a);
- vmcs_writel(HOST_IA32_SYSENTER_EIP, a); /* 22.2.3 */
-
- if (vmcs_config.vmexit_ctrl & VM_EXIT_LOAD_IA32_PAT) {
- rdmsr(MSR_IA32_CR_PAT, msr_low, msr_high);
- host_pat = msr_low | ((u64) msr_high << 32);
- vmcs_write64(HOST_IA32_PAT, host_pat);
- }
if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) {
+ u32 msr_low, msr_high;
+ u64 host_pat;
rdmsr(MSR_IA32_CR_PAT, msr_low, msr_high);
host_pat = msr_low | ((u64) msr_high << 32);
/* Write the default value follow host pat */
@@ -2863,10 +3695,7 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
vmcs_write32(VM_ENTRY_CONTROLS, vmcs_config.vmentry_ctrl);
vmcs_writel(CR0_GUEST_HOST_MASK, ~0UL);
- vmx->vcpu.arch.cr4_guest_owned_bits = KVM_CR4_GUEST_OWNED_BITS;
- if (enable_ept)
- vmx->vcpu.arch.cr4_guest_owned_bits |= X86_CR4_PGE;
- vmcs_writel(CR4_GUEST_HOST_MASK, ~vmx->vcpu.arch.cr4_guest_owned_bits);
+ set_cr4_guest_host_mask(vmx);
kvm_write_tsc(&vmx->vcpu, 0);
@@ -2990,9 +3819,25 @@ out:
return ret;
}
+/*
+ * In nested virtualization, check if L1 asked to exit on external interrupts.
+ * For most existing hypervisors, this will always return true.
+ */
+static bool nested_exit_on_intr(struct kvm_vcpu *vcpu)
+{
+ return get_vmcs12(vcpu)->pin_based_vm_exec_control &
+ PIN_BASED_EXT_INTR_MASK;
+}
+
static void enable_irq_window(struct kvm_vcpu *vcpu)
{
u32 cpu_based_vm_exec_control;
+ if (is_guest_mode(vcpu) && nested_exit_on_intr(vcpu))
+ /* We can get here when nested_run_pending caused
+ * vmx_interrupt_allowed() to return false. In this case, do
+ * nothing - the interrupt will be injected later.
+ */
+ return;
cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_INTR_PENDING;
@@ -3049,6 +3894,9 @@ static void vmx_inject_nmi(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
+ if (is_guest_mode(vcpu))
+ return;
+
if (!cpu_has_virtual_nmis()) {
/*
* Tracking the NMI-blocked state in software is built upon
@@ -3115,6 +3963,17 @@ static void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
static int vmx_interrupt_allowed(struct kvm_vcpu *vcpu)
{
+ if (is_guest_mode(vcpu) && nested_exit_on_intr(vcpu)) {
+ struct vmcs12 *vmcs12;
+ if (to_vmx(vcpu)->nested.nested_run_pending)
+ return 0;
+ nested_vmx_vmexit(vcpu);
+ vmcs12 = get_vmcs12(vcpu);
+ vmcs12->vm_exit_reason = EXIT_REASON_EXTERNAL_INTERRUPT;
+ vmcs12->vm_exit_intr_info = 0;
+ /* fall through to normal code, but now in L1, not L2 */
+ }
+
return (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) &&
!(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) &
(GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS));
@@ -3356,6 +4215,58 @@ vmx_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall)
hypercall[2] = 0xc1;
}
+/* called to set cr0 as approriate for a mov-to-cr0 exit. */
+static int handle_set_cr0(struct kvm_vcpu *vcpu, unsigned long val)
+{
+ if (to_vmx(vcpu)->nested.vmxon &&
+ ((val & VMXON_CR0_ALWAYSON) != VMXON_CR0_ALWAYSON))
+ return 1;
+
+ if (is_guest_mode(vcpu)) {
+ /*
+ * We get here when L2 changed cr0 in a way that did not change
+ * any of L1's shadowed bits (see nested_vmx_exit_handled_cr),
+ * but did change L0 shadowed bits. This can currently happen
+ * with the TS bit: L0 may want to leave TS on (for lazy fpu
+ * loading) while pretending to allow the guest to change it.
+ */
+ if (kvm_set_cr0(vcpu, (val & vcpu->arch.cr0_guest_owned_bits) |
+ (vcpu->arch.cr0 & ~vcpu->arch.cr0_guest_owned_bits)))
+ return 1;
+ vmcs_writel(CR0_READ_SHADOW, val);
+ return 0;
+ } else
+ return kvm_set_cr0(vcpu, val);
+}
+
+static int handle_set_cr4(struct kvm_vcpu *vcpu, unsigned long val)
+{
+ if (is_guest_mode(vcpu)) {
+ if (kvm_set_cr4(vcpu, (val & vcpu->arch.cr4_guest_owned_bits) |
+ (vcpu->arch.cr4 & ~vcpu->arch.cr4_guest_owned_bits)))
+ return 1;
+ vmcs_writel(CR4_READ_SHADOW, val);
+ return 0;
+ } else
+ return kvm_set_cr4(vcpu, val);
+}
+
+/* called to set cr0 as approriate for clts instruction exit. */
+static void handle_clts(struct kvm_vcpu *vcpu)
+{
+ if (is_guest_mode(vcpu)) {
+ /*
+ * We get here when L2 did CLTS, and L1 didn't shadow CR0.TS
+ * but we did (!fpu_active). We need to keep GUEST_CR0.TS on,
+ * just pretend it's off (also in arch.cr0 for fpu_activate).
+ */
+ vmcs_writel(CR0_READ_SHADOW,
+ vmcs_readl(CR0_READ_SHADOW) & ~X86_CR0_TS);
+ vcpu->arch.cr0 &= ~X86_CR0_TS;
+ } else
+ vmx_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~X86_CR0_TS));
+}
+
static int handle_cr(struct kvm_vcpu *vcpu)
{
unsigned long exit_qualification, val;
@@ -3372,7 +4283,7 @@ static int handle_cr(struct kvm_vcpu *vcpu)
trace_kvm_cr_write(cr, val);
switch (cr) {
case 0:
- err = kvm_set_cr0(vcpu, val);
+ err = handle_set_cr0(vcpu, val);
kvm_complete_insn_gp(vcpu, err);
return 1;
case 3:
@@ -3380,7 +4291,7 @@ static int handle_cr(struct kvm_vcpu *vcpu)
kvm_complete_insn_gp(vcpu, err);
return 1;
case 4:
- err = kvm_set_cr4(vcpu, val);
+ err = handle_set_cr4(vcpu, val);
kvm_complete_insn_gp(vcpu, err);
return 1;
case 8: {
@@ -3398,7 +4309,7 @@ static int handle_cr(struct kvm_vcpu *vcpu)
};
break;
case 2: /* clts */
- vmx_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~X86_CR0_TS));
+ handle_clts(vcpu);
trace_kvm_cr_write(0, kvm_read_cr0(vcpu));
skip_emulated_instruction(vcpu);
vmx_fpu_activate(vcpu);
@@ -3574,12 +4485,6 @@ static int handle_vmcall(struct kvm_vcpu *vcpu)
return 1;
}
-static int handle_vmx_insn(struct kvm_vcpu *vcpu)
-{
- kvm_queue_exception(vcpu, UD_VECTOR);
- return 1;
-}
-
static int handle_invd(struct kvm_vcpu *vcpu)
{
return emulate_instruction(vcpu, 0) == EMULATE_DONE;
@@ -3777,11 +4682,19 @@ static void ept_misconfig_inspect_spte(struct kvm_vcpu *vcpu, u64 spte,
static int handle_ept_misconfig(struct kvm_vcpu *vcpu)
{
u64 sptes[4];
- int nr_sptes, i;
+ int nr_sptes, i, ret;
gpa_t gpa;
gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS);
+ ret = handle_mmio_page_fault_common(vcpu, gpa, true);
+ if (likely(ret == 1))
+ return x86_emulate_instruction(vcpu, gpa, 0, NULL, 0) ==
+ EMULATE_DONE;
+ if (unlikely(!ret))
+ return 1;
+
+ /* It is the real ept misconfig */
printk(KERN_ERR "EPT: Misconfiguration.\n");
printk(KERN_ERR "EPT: GPA: 0x%llx\n", gpa);
@@ -3866,6 +4779,639 @@ static int handle_invalid_op(struct kvm_vcpu *vcpu)
}
/*
+ * To run an L2 guest, we need a vmcs02 based on the L1-specified vmcs12.
+ * We could reuse a single VMCS for all the L2 guests, but we also want the
+ * option to allocate a separate vmcs02 for each separate loaded vmcs12 - this
+ * allows keeping them loaded on the processor, and in the future will allow
+ * optimizations where prepare_vmcs02 doesn't need to set all the fields on
+ * every entry if they never change.
+ * So we keep, in vmx->nested.vmcs02_pool, a cache of size VMCS02_POOL_SIZE
+ * (>=0) with a vmcs02 for each recently loaded vmcs12s, most recent first.
+ *
+ * The following functions allocate and free a vmcs02 in this pool.
+ */
+
+/* Get a VMCS from the pool to use as vmcs02 for the current vmcs12. */
+static struct loaded_vmcs *nested_get_current_vmcs02(struct vcpu_vmx *vmx)
+{
+ struct vmcs02_list *item;
+ list_for_each_entry(item, &vmx->nested.vmcs02_pool, list)
+ if (item->vmptr == vmx->nested.current_vmptr) {
+ list_move(&item->list, &vmx->nested.vmcs02_pool);
+ return &item->vmcs02;
+ }
+
+ if (vmx->nested.vmcs02_num >= max(VMCS02_POOL_SIZE, 1)) {
+ /* Recycle the least recently used VMCS. */
+ item = list_entry(vmx->nested.vmcs02_pool.prev,
+ struct vmcs02_list, list);
+ item->vmptr = vmx->nested.current_vmptr;
+ list_move(&item->list, &vmx->nested.vmcs02_pool);
+ return &item->vmcs02;
+ }
+
+ /* Create a new VMCS */
+ item = (struct vmcs02_list *)
+ kmalloc(sizeof(struct vmcs02_list), GFP_KERNEL);
+ if (!item)
+ return NULL;
+ item->vmcs02.vmcs = alloc_vmcs();
+ if (!item->vmcs02.vmcs) {
+ kfree(item);
+ return NULL;
+ }
+ loaded_vmcs_init(&item->vmcs02);
+ item->vmptr = vmx->nested.current_vmptr;
+ list_add(&(item->list), &(vmx->nested.vmcs02_pool));
+ vmx->nested.vmcs02_num++;
+ return &item->vmcs02;
+}
+
+/* Free and remove from pool a vmcs02 saved for a vmcs12 (if there is one) */
+static void nested_free_vmcs02(struct vcpu_vmx *vmx, gpa_t vmptr)
+{
+ struct vmcs02_list *item;
+ list_for_each_entry(item, &vmx->nested.vmcs02_pool, list)
+ if (item->vmptr == vmptr) {
+ free_loaded_vmcs(&item->vmcs02);
+ list_del(&item->list);
+ kfree(item);
+ vmx->nested.vmcs02_num--;
+ return;
+ }
+}
+
+/*
+ * Free all VMCSs saved for this vcpu, except the one pointed by
+ * vmx->loaded_vmcs. These include the VMCSs in vmcs02_pool (except the one
+ * currently used, if running L2), and vmcs01 when running L2.
+ */
+static void nested_free_all_saved_vmcss(struct vcpu_vmx *vmx)
+{
+ struct vmcs02_list *item, *n;
+ list_for_each_entry_safe(item, n, &vmx->nested.vmcs02_pool, list) {
+ if (vmx->loaded_vmcs != &item->vmcs02)
+ free_loaded_vmcs(&item->vmcs02);
+ list_del(&item->list);
+ kfree(item);
+ }
+ vmx->nested.vmcs02_num = 0;
+
+ if (vmx->loaded_vmcs != &vmx->vmcs01)
+ free_loaded_vmcs(&vmx->vmcs01);
+}
+
+/*
+ * Emulate the VMXON instruction.
+ * Currently, we just remember that VMX is active, and do not save or even
+ * inspect the argument to VMXON (the so-called "VMXON pointer") because we
+ * do not currently need to store anything in that guest-allocated memory
+ * region. Consequently, VMCLEAR and VMPTRLD also do not verify that the their
+ * argument is different from the VMXON pointer (which the spec says they do).
+ */
+static int handle_vmon(struct kvm_vcpu *vcpu)
+{
+ struct kvm_segment cs;
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+
+ /* The Intel VMX Instruction Reference lists a bunch of bits that
+ * are prerequisite to running VMXON, most notably cr4.VMXE must be
+ * set to 1 (see vmx_set_cr4() for when we allow the guest to set this).
+ * Otherwise, we should fail with #UD. We test these now:
+ */
+ if (!kvm_read_cr4_bits(vcpu, X86_CR4_VMXE) ||
+ !kvm_read_cr0_bits(vcpu, X86_CR0_PE) ||
+ (vmx_get_rflags(vcpu) & X86_EFLAGS_VM)) {
+ kvm_queue_exception(vcpu, UD_VECTOR);
+ return 1;
+ }
+
+ vmx_get_segment(vcpu, &cs, VCPU_SREG_CS);
+ if (is_long_mode(vcpu) && !cs.l) {
+ kvm_queue_exception(vcpu, UD_VECTOR);
+ return 1;
+ }
+
+ if (vmx_get_cpl(vcpu)) {
+ kvm_inject_gp(vcpu, 0);
+ return 1;
+ }
+
+ INIT_LIST_HEAD(&(vmx->nested.vmcs02_pool));
+ vmx->nested.vmcs02_num = 0;
+
+ vmx->nested.vmxon = true;
+
+ skip_emulated_instruction(vcpu);
+ return 1;
+}
+
+/*
+ * Intel's VMX Instruction Reference specifies a common set of prerequisites
+ * for running VMX instructions (except VMXON, whose prerequisites are
+ * slightly different). It also specifies what exception to inject otherwise.
+ */
+static int nested_vmx_check_permission(struct kvm_vcpu *vcpu)
+{
+ struct kvm_segment cs;
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+
+ if (!vmx->nested.vmxon) {
+ kvm_queue_exception(vcpu, UD_VECTOR);
+ return 0;
+ }
+
+ vmx_get_segment(vcpu, &cs, VCPU_SREG_CS);
+ if ((vmx_get_rflags(vcpu) & X86_EFLAGS_VM) ||
+ (is_long_mode(vcpu) && !cs.l)) {
+ kvm_queue_exception(vcpu, UD_VECTOR);
+ return 0;
+ }
+
+ if (vmx_get_cpl(vcpu)) {
+ kvm_inject_gp(vcpu, 0);
+ return 0;
+ }
+
+ return 1;
+}
+
+/*
+ * Free whatever needs to be freed from vmx->nested when L1 goes down, or
+ * just stops using VMX.
+ */
+static void free_nested(struct vcpu_vmx *vmx)
+{
+ if (!vmx->nested.vmxon)
+ return;
+ vmx->nested.vmxon = false;
+ if (vmx->nested.current_vmptr != -1ull) {
+ kunmap(vmx->nested.current_vmcs12_page);
+ nested_release_page(vmx->nested.current_vmcs12_page);
+ vmx->nested.current_vmptr = -1ull;
+ vmx->nested.current_vmcs12 = NULL;
+ }
+ /* Unpin physical memory we referred to in current vmcs02 */
+ if (vmx->nested.apic_access_page) {
+ nested_release_page(vmx->nested.apic_access_page);
+ vmx->nested.apic_access_page = 0;
+ }
+
+ nested_free_all_saved_vmcss(vmx);
+}
+
+/* Emulate the VMXOFF instruction */
+static int handle_vmoff(struct kvm_vcpu *vcpu)
+{
+ if (!nested_vmx_check_permission(vcpu))
+ return 1;
+ free_nested(to_vmx(vcpu));
+ skip_emulated_instruction(vcpu);
+ return 1;
+}
+
+/*
+ * Decode the memory-address operand of a vmx instruction, as recorded on an
+ * exit caused by such an instruction (run by a guest hypervisor).
+ * On success, returns 0. When the operand is invalid, returns 1 and throws
+ * #UD or #GP.
+ */
+static int get_vmx_mem_address(struct kvm_vcpu *vcpu,
+ unsigned long exit_qualification,
+ u32 vmx_instruction_info, gva_t *ret)
+{
+ /*
+ * According to Vol. 3B, "Information for VM Exits Due to Instruction
+ * Execution", on an exit, vmx_instruction_info holds most of the
+ * addressing components of the operand. Only the displacement part
+ * is put in exit_qualification (see 3B, "Basic VM-Exit Information").
+ * For how an actual address is calculated from all these components,
+ * refer to Vol. 1, "Operand Addressing".
+ */
+ int scaling = vmx_instruction_info & 3;
+ int addr_size = (vmx_instruction_info >> 7) & 7;
+ bool is_reg = vmx_instruction_info & (1u << 10);
+ int seg_reg = (vmx_instruction_info >> 15) & 7;
+ int index_reg = (vmx_instruction_info >> 18) & 0xf;
+ bool index_is_valid = !(vmx_instruction_info & (1u << 22));
+ int base_reg = (vmx_instruction_info >> 23) & 0xf;
+ bool base_is_valid = !(vmx_instruction_info & (1u << 27));
+
+ if (is_reg) {
+ kvm_queue_exception(vcpu, UD_VECTOR);
+ return 1;
+ }
+
+ /* Addr = segment_base + offset */
+ /* offset = base + [index * scale] + displacement */
+ *ret = vmx_get_segment_base(vcpu, seg_reg);
+ if (base_is_valid)
+ *ret += kvm_register_read(vcpu, base_reg);
+ if (index_is_valid)
+ *ret += kvm_register_read(vcpu, index_reg)<<scaling;
+ *ret += exit_qualification; /* holds the displacement */
+
+ if (addr_size == 1) /* 32 bit */
+ *ret &= 0xffffffff;
+
+ /*
+ * TODO: throw #GP (and return 1) in various cases that the VM*
+ * instructions require it - e.g., offset beyond segment limit,
+ * unusable or unreadable/unwritable segment, non-canonical 64-bit
+ * address, and so on. Currently these are not checked.
+ */
+ return 0;
+}
+
+/*
+ * The following 3 functions, nested_vmx_succeed()/failValid()/failInvalid(),
+ * set the success or error code of an emulated VMX instruction, as specified
+ * by Vol 2B, VMX Instruction Reference, "Conventions".
+ */
+static void nested_vmx_succeed(struct kvm_vcpu *vcpu)
+{
+ vmx_set_rflags(vcpu, vmx_get_rflags(vcpu)
+ & ~(X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
+ X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF));
+}
+
+static void nested_vmx_failInvalid(struct kvm_vcpu *vcpu)
+{
+ vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu)
+ & ~(X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF |
+ X86_EFLAGS_SF | X86_EFLAGS_OF))
+ | X86_EFLAGS_CF);
+}
+
+static void nested_vmx_failValid(struct kvm_vcpu *vcpu,
+ u32 vm_instruction_error)
+{
+ if (to_vmx(vcpu)->nested.current_vmptr == -1ull) {
+ /*
+ * failValid writes the error number to the current VMCS, which
+ * can't be done there isn't a current VMCS.
+ */
+ nested_vmx_failInvalid(vcpu);
+ return;
+ }
+ vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu)
+ & ~(X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
+ X86_EFLAGS_SF | X86_EFLAGS_OF))
+ | X86_EFLAGS_ZF);
+ get_vmcs12(vcpu)->vm_instruction_error = vm_instruction_error;
+}
+
+/* Emulate the VMCLEAR instruction */
+static int handle_vmclear(struct kvm_vcpu *vcpu)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ gva_t gva;
+ gpa_t vmptr;
+ struct vmcs12 *vmcs12;
+ struct page *page;
+ struct x86_exception e;
+
+ if (!nested_vmx_check_permission(vcpu))
+ return 1;
+
+ if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
+ vmcs_read32(VMX_INSTRUCTION_INFO), &gva))
+ return 1;
+
+ if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &vmptr,
+ sizeof(vmptr), &e)) {
+ kvm_inject_page_fault(vcpu, &e);
+ return 1;
+ }
+
+ if (!IS_ALIGNED(vmptr, PAGE_SIZE)) {
+ nested_vmx_failValid(vcpu, VMXERR_VMCLEAR_INVALID_ADDRESS);
+ skip_emulated_instruction(vcpu);
+ return 1;
+ }
+
+ if (vmptr == vmx->nested.current_vmptr) {
+ kunmap(vmx->nested.current_vmcs12_page);
+ nested_release_page(vmx->nested.current_vmcs12_page);
+ vmx->nested.current_vmptr = -1ull;
+ vmx->nested.current_vmcs12 = NULL;
+ }
+
+ page = nested_get_page(vcpu, vmptr);
+ if (page == NULL) {
+ /*
+ * For accurate processor emulation, VMCLEAR beyond available
+ * physical memory should do nothing at all. However, it is
+ * possible that a nested vmx bug, not a guest hypervisor bug,
+ * resulted in this case, so let's shut down before doing any
+ * more damage:
+ */
+ kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
+ return 1;
+ }
+ vmcs12 = kmap(page);
+ vmcs12->launch_state = 0;
+ kunmap(page);
+ nested_release_page(page);
+
+ nested_free_vmcs02(vmx, vmptr);
+
+ skip_emulated_instruction(vcpu);
+ nested_vmx_succeed(vcpu);
+ return 1;
+}
+
+static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch);
+
+/* Emulate the VMLAUNCH instruction */
+static int handle_vmlaunch(struct kvm_vcpu *vcpu)
+{
+ return nested_vmx_run(vcpu, true);
+}
+
+/* Emulate the VMRESUME instruction */
+static int handle_vmresume(struct kvm_vcpu *vcpu)
+{
+
+ return nested_vmx_run(vcpu, false);
+}
+
+enum vmcs_field_type {
+ VMCS_FIELD_TYPE_U16 = 0,
+ VMCS_FIELD_TYPE_U64 = 1,
+ VMCS_FIELD_TYPE_U32 = 2,
+ VMCS_FIELD_TYPE_NATURAL_WIDTH = 3
+};
+
+static inline int vmcs_field_type(unsigned long field)
+{
+ if (0x1 & field) /* the *_HIGH fields are all 32 bit */
+ return VMCS_FIELD_TYPE_U32;
+ return (field >> 13) & 0x3 ;
+}
+
+static inline int vmcs_field_readonly(unsigned long field)
+{
+ return (((field >> 10) & 0x3) == 1);
+}
+
+/*
+ * Read a vmcs12 field. Since these can have varying lengths and we return
+ * one type, we chose the biggest type (u64) and zero-extend the return value
+ * to that size. Note that the caller, handle_vmread, might need to use only
+ * some of the bits we return here (e.g., on 32-bit guests, only 32 bits of
+ * 64-bit fields are to be returned).
+ */
+static inline bool vmcs12_read_any(struct kvm_vcpu *vcpu,
+ unsigned long field, u64 *ret)
+{
+ short offset = vmcs_field_to_offset(field);
+ char *p;
+
+ if (offset < 0)
+ return 0;
+
+ p = ((char *)(get_vmcs12(vcpu))) + offset;
+
+ switch (vmcs_field_type(field)) {
+ case VMCS_FIELD_TYPE_NATURAL_WIDTH:
+ *ret = *((natural_width *)p);
+ return 1;
+ case VMCS_FIELD_TYPE_U16:
+ *ret = *((u16 *)p);
+ return 1;
+ case VMCS_FIELD_TYPE_U32:
+ *ret = *((u32 *)p);
+ return 1;
+ case VMCS_FIELD_TYPE_U64:
+ *ret = *((u64 *)p);
+ return 1;
+ default:
+ return 0; /* can never happen. */
+ }
+}
+
+/*
+ * VMX instructions which assume a current vmcs12 (i.e., that VMPTRLD was
+ * used before) all generate the same failure when it is missing.
+ */
+static int nested_vmx_check_vmcs12(struct kvm_vcpu *vcpu)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ if (vmx->nested.current_vmptr == -1ull) {
+ nested_vmx_failInvalid(vcpu);
+ skip_emulated_instruction(vcpu);
+ return 0;
+ }
+ return 1;
+}
+
+static int handle_vmread(struct kvm_vcpu *vcpu)
+{
+ unsigned long field;
+ u64 field_value;
+ unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
+ u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
+ gva_t gva = 0;
+
+ if (!nested_vmx_check_permission(vcpu) ||
+ !nested_vmx_check_vmcs12(vcpu))
+ return 1;
+
+ /* Decode instruction info and find the field to read */
+ field = kvm_register_read(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
+ /* Read the field, zero-extended to a u64 field_value */
+ if (!vmcs12_read_any(vcpu, field, &field_value)) {
+ nested_vmx_failValid(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
+ skip_emulated_instruction(vcpu);
+ return 1;
+ }
+ /*
+ * Now copy part of this value to register or memory, as requested.
+ * Note that the number of bits actually copied is 32 or 64 depending
+ * on the guest's mode (32 or 64 bit), not on the given field's length.
+ */
+ if (vmx_instruction_info & (1u << 10)) {
+ kvm_register_write(vcpu, (((vmx_instruction_info) >> 3) & 0xf),
+ field_value);
+ } else {
+ if (get_vmx_mem_address(vcpu, exit_qualification,
+ vmx_instruction_info, &gva))
+ return 1;
+ /* _system ok, as nested_vmx_check_permission verified cpl=0 */
+ kvm_write_guest_virt_system(&vcpu->arch.emulate_ctxt, gva,
+ &field_value, (is_long_mode(vcpu) ? 8 : 4), NULL);
+ }
+
+ nested_vmx_succeed(vcpu);
+ skip_emulated_instruction(vcpu);
+ return 1;
+}
+
+
+static int handle_vmwrite(struct kvm_vcpu *vcpu)
+{
+ unsigned long field;
+ gva_t gva;
+ unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
+ u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
+ char *p;
+ short offset;
+ /* The value to write might be 32 or 64 bits, depending on L1's long
+ * mode, and eventually we need to write that into a field of several
+ * possible lengths. The code below first zero-extends the value to 64
+ * bit (field_value), and then copies only the approriate number of
+ * bits into the vmcs12 field.
+ */
+ u64 field_value = 0;
+ struct x86_exception e;
+
+ if (!nested_vmx_check_permission(vcpu) ||
+ !nested_vmx_check_vmcs12(vcpu))
+ return 1;
+
+ if (vmx_instruction_info & (1u << 10))
+ field_value = kvm_register_read(vcpu,
+ (((vmx_instruction_info) >> 3) & 0xf));
+ else {
+ if (get_vmx_mem_address(vcpu, exit_qualification,
+ vmx_instruction_info, &gva))
+ return 1;
+ if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva,
+ &field_value, (is_long_mode(vcpu) ? 8 : 4), &e)) {
+ kvm_inject_page_fault(vcpu, &e);
+ return 1;
+ }
+ }
+
+
+ field = kvm_register_read(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
+ if (vmcs_field_readonly(field)) {
+ nested_vmx_failValid(vcpu,
+ VMXERR_VMWRITE_READ_ONLY_VMCS_COMPONENT);
+ skip_emulated_instruction(vcpu);
+ return 1;
+ }
+
+ offset = vmcs_field_to_offset(field);
+ if (offset < 0) {
+ nested_vmx_failValid(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
+ skip_emulated_instruction(vcpu);
+ return 1;
+ }
+ p = ((char *) get_vmcs12(vcpu)) + offset;
+
+ switch (vmcs_field_type(field)) {
+ case VMCS_FIELD_TYPE_U16:
+ *(u16 *)p = field_value;
+ break;
+ case VMCS_FIELD_TYPE_U32:
+ *(u32 *)p = field_value;
+ break;
+ case VMCS_FIELD_TYPE_U64:
+ *(u64 *)p = field_value;
+ break;
+ case VMCS_FIELD_TYPE_NATURAL_WIDTH:
+ *(natural_width *)p = field_value;
+ break;
+ default:
+ nested_vmx_failValid(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
+ skip_emulated_instruction(vcpu);
+ return 1;
+ }
+
+ nested_vmx_succeed(vcpu);
+ skip_emulated_instruction(vcpu);
+ return 1;
+}
+
+/* Emulate the VMPTRLD instruction */
+static int handle_vmptrld(struct kvm_vcpu *vcpu)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ gva_t gva;
+ gpa_t vmptr;
+ struct x86_exception e;
+
+ if (!nested_vmx_check_permission(vcpu))
+ return 1;
+
+ if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
+ vmcs_read32(VMX_INSTRUCTION_INFO), &gva))
+ return 1;
+
+ if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &vmptr,
+ sizeof(vmptr), &e)) {
+ kvm_inject_page_fault(vcpu, &e);
+ return 1;
+ }
+
+ if (!IS_ALIGNED(vmptr, PAGE_SIZE)) {
+ nested_vmx_failValid(vcpu, VMXERR_VMPTRLD_INVALID_ADDRESS);
+ skip_emulated_instruction(vcpu);
+ return 1;
+ }
+
+ if (vmx->nested.current_vmptr != vmptr) {
+ struct vmcs12 *new_vmcs12;
+ struct page *page;
+ page = nested_get_page(vcpu, vmptr);
+ if (page == NULL) {
+ nested_vmx_failInvalid(vcpu);
+ skip_emulated_instruction(vcpu);
+ return 1;
+ }
+ new_vmcs12 = kmap(page);
+ if (new_vmcs12->revision_id != VMCS12_REVISION) {
+ kunmap(page);
+ nested_release_page_clean(page);
+ nested_vmx_failValid(vcpu,
+ VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID);
+ skip_emulated_instruction(vcpu);
+ return 1;
+ }
+ if (vmx->nested.current_vmptr != -1ull) {
+ kunmap(vmx->nested.current_vmcs12_page);
+ nested_release_page(vmx->nested.current_vmcs12_page);
+ }
+
+ vmx->nested.current_vmptr = vmptr;
+ vmx->nested.current_vmcs12 = new_vmcs12;
+ vmx->nested.current_vmcs12_page = page;
+ }
+
+ nested_vmx_succeed(vcpu);
+ skip_emulated_instruction(vcpu);
+ return 1;
+}
+
+/* Emulate the VMPTRST instruction */
+static int handle_vmptrst(struct kvm_vcpu *vcpu)
+{
+ unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
+ u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
+ gva_t vmcs_gva;
+ struct x86_exception e;
+
+ if (!nested_vmx_check_permission(vcpu))
+ return 1;
+
+ if (get_vmx_mem_address(vcpu, exit_qualification,
+ vmx_instruction_info, &vmcs_gva))
+ return 1;
+ /* ok to use *_system, as nested_vmx_check_permission verified cpl=0 */
+ if (kvm_write_guest_virt_system(&vcpu->arch.emulate_ctxt, vmcs_gva,
+ (void *)&to_vmx(vcpu)->nested.current_vmptr,
+ sizeof(u64), &e)) {
+ kvm_inject_page_fault(vcpu, &e);
+ return 1;
+ }
+ nested_vmx_succeed(vcpu);
+ skip_emulated_instruction(vcpu);
+ return 1;
+}
+
+/*
* The exit handlers return 1 if the exit was handled fully and guest execution
* may resume. Otherwise they set the kvm_run parameter to indicate what needs
* to be done to userspace and return 0.
@@ -3886,15 +5432,15 @@ static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = {
[EXIT_REASON_INVD] = handle_invd,
[EXIT_REASON_INVLPG] = handle_invlpg,
[EXIT_REASON_VMCALL] = handle_vmcall,
- [EXIT_REASON_VMCLEAR] = handle_vmx_insn,
- [EXIT_REASON_VMLAUNCH] = handle_vmx_insn,
- [EXIT_REASON_VMPTRLD] = handle_vmx_insn,
- [EXIT_REASON_VMPTRST] = handle_vmx_insn,
- [EXIT_REASON_VMREAD] = handle_vmx_insn,
- [EXIT_REASON_VMRESUME] = handle_vmx_insn,
- [EXIT_REASON_VMWRITE] = handle_vmx_insn,
- [EXIT_REASON_VMOFF] = handle_vmx_insn,
- [EXIT_REASON_VMON] = handle_vmx_insn,
+ [EXIT_REASON_VMCLEAR] = handle_vmclear,
+ [EXIT_REASON_VMLAUNCH] = handle_vmlaunch,
+ [EXIT_REASON_VMPTRLD] = handle_vmptrld,
+ [EXIT_REASON_VMPTRST] = handle_vmptrst,
+ [EXIT_REASON_VMREAD] = handle_vmread,
+ [EXIT_REASON_VMRESUME] = handle_vmresume,
+ [EXIT_REASON_VMWRITE] = handle_vmwrite,
+ [EXIT_REASON_VMOFF] = handle_vmoff,
+ [EXIT_REASON_VMON] = handle_vmon,
[EXIT_REASON_TPR_BELOW_THRESHOLD] = handle_tpr_below_threshold,
[EXIT_REASON_APIC_ACCESS] = handle_apic_access,
[EXIT_REASON_WBINVD] = handle_wbinvd,
@@ -3911,6 +5457,229 @@ static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = {
static const int kvm_vmx_max_exit_handlers =
ARRAY_SIZE(kvm_vmx_exit_handlers);
+/*
+ * Return 1 if we should exit from L2 to L1 to handle an MSR access access,
+ * rather than handle it ourselves in L0. I.e., check whether L1 expressed
+ * disinterest in the current event (read or write a specific MSR) by using an
+ * MSR bitmap. This may be the case even when L0 doesn't use MSR bitmaps.
+ */
+static bool nested_vmx_exit_handled_msr(struct kvm_vcpu *vcpu,
+ struct vmcs12 *vmcs12, u32 exit_reason)
+{
+ u32 msr_index = vcpu->arch.regs[VCPU_REGS_RCX];
+ gpa_t bitmap;
+
+ if (!nested_cpu_has(get_vmcs12(vcpu), CPU_BASED_USE_MSR_BITMAPS))
+ return 1;
+
+ /*
+ * The MSR_BITMAP page is divided into four 1024-byte bitmaps,
+ * for the four combinations of read/write and low/high MSR numbers.
+ * First we need to figure out which of the four to use:
+ */
+ bitmap = vmcs12->msr_bitmap;
+ if (exit_reason == EXIT_REASON_MSR_WRITE)
+ bitmap += 2048;
+ if (msr_index >= 0xc0000000) {
+ msr_index -= 0xc0000000;
+ bitmap += 1024;
+ }
+
+ /* Then read the msr_index'th bit from this bitmap: */
+ if (msr_index < 1024*8) {
+ unsigned char b;
+ kvm_read_guest(vcpu->kvm, bitmap + msr_index/8, &b, 1);
+ return 1 & (b >> (msr_index & 7));
+ } else
+ return 1; /* let L1 handle the wrong parameter */
+}
+
+/*
+ * Return 1 if we should exit from L2 to L1 to handle a CR access exit,
+ * rather than handle it ourselves in L0. I.e., check if L1 wanted to
+ * intercept (via guest_host_mask etc.) the current event.
+ */
+static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu,
+ struct vmcs12 *vmcs12)
+{
+ unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
+ int cr = exit_qualification & 15;
+ int reg = (exit_qualification >> 8) & 15;
+ unsigned long val = kvm_register_read(vcpu, reg);
+
+ switch ((exit_qualification >> 4) & 3) {
+ case 0: /* mov to cr */
+ switch (cr) {
+ case 0:
+ if (vmcs12->cr0_guest_host_mask &
+ (val ^ vmcs12->cr0_read_shadow))
+ return 1;
+ break;
+ case 3:
+ if ((vmcs12->cr3_target_count >= 1 &&
+ vmcs12->cr3_target_value0 == val) ||
+ (vmcs12->cr3_target_count >= 2 &&
+ vmcs12->cr3_target_value1 == val) ||
+ (vmcs12->cr3_target_count >= 3 &&
+ vmcs12->cr3_target_value2 == val) ||
+ (vmcs12->cr3_target_count >= 4 &&
+ vmcs12->cr3_target_value3 == val))
+ return 0;
+ if (nested_cpu_has(vmcs12, CPU_BASED_CR3_LOAD_EXITING))
+ return 1;
+ break;
+ case 4:
+ if (vmcs12->cr4_guest_host_mask &
+ (vmcs12->cr4_read_shadow ^ val))
+ return 1;
+ break;
+ case 8:
+ if (nested_cpu_has(vmcs12, CPU_BASED_CR8_LOAD_EXITING))
+ return 1;
+ break;
+ }
+ break;
+ case 2: /* clts */
+ if ((vmcs12->cr0_guest_host_mask & X86_CR0_TS) &&
+ (vmcs12->cr0_read_shadow & X86_CR0_TS))
+ return 1;
+ break;
+ case 1: /* mov from cr */
+ switch (cr) {
+ case 3:
+ if (vmcs12->cpu_based_vm_exec_control &
+ CPU_BASED_CR3_STORE_EXITING)
+ return 1;
+ break;
+ case 8:
+ if (vmcs12->cpu_based_vm_exec_control &
+ CPU_BASED_CR8_STORE_EXITING)
+ return 1;
+ break;
+ }
+ break;
+ case 3: /* lmsw */
+ /*
+ * lmsw can change bits 1..3 of cr0, and only set bit 0 of
+ * cr0. Other attempted changes are ignored, with no exit.
+ */
+ if (vmcs12->cr0_guest_host_mask & 0xe &
+ (val ^ vmcs12->cr0_read_shadow))
+ return 1;
+ if ((vmcs12->cr0_guest_host_mask & 0x1) &&
+ !(vmcs12->cr0_read_shadow & 0x1) &&
+ (val & 0x1))
+ return 1;
+ break;
+ }
+ return 0;
+}
+
+/*
+ * Return 1 if we should exit from L2 to L1 to handle an exit, or 0 if we
+ * should handle it ourselves in L0 (and then continue L2). Only call this
+ * when in is_guest_mode (L2).
+ */
+static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu)
+{
+ u32 exit_reason = vmcs_read32(VM_EXIT_REASON);
+ u32 intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
+
+ if (vmx->nested.nested_run_pending)
+ return 0;
+
+ if (unlikely(vmx->fail)) {
+ printk(KERN_INFO "%s failed vm entry %x\n",
+ __func__, vmcs_read32(VM_INSTRUCTION_ERROR));
+ return 1;
+ }
+
+ switch (exit_reason) {
+ case EXIT_REASON_EXCEPTION_NMI:
+ if (!is_exception(intr_info))
+ return 0;
+ else if (is_page_fault(intr_info))
+ return enable_ept;
+ return vmcs12->exception_bitmap &
+ (1u << (intr_info & INTR_INFO_VECTOR_MASK));
+ case EXIT_REASON_EXTERNAL_INTERRUPT:
+ return 0;
+ case EXIT_REASON_TRIPLE_FAULT:
+ return 1;
+ case EXIT_REASON_PENDING_INTERRUPT:
+ case EXIT_REASON_NMI_WINDOW:
+ /*
+ * prepare_vmcs02() set the CPU_BASED_VIRTUAL_INTR_PENDING bit
+ * (aka Interrupt Window Exiting) only when L1 turned it on,
+ * so if we got a PENDING_INTERRUPT exit, this must be for L1.
+ * Same for NMI Window Exiting.
+ */
+ return 1;
+ case EXIT_REASON_TASK_SWITCH:
+ return 1;
+ case EXIT_REASON_CPUID:
+ return 1;
+ case EXIT_REASON_HLT:
+ return nested_cpu_has(vmcs12, CPU_BASED_HLT_EXITING);
+ case EXIT_REASON_INVD:
+ return 1;
+ case EXIT_REASON_INVLPG:
+ return nested_cpu_has(vmcs12, CPU_BASED_INVLPG_EXITING);
+ case EXIT_REASON_RDPMC:
+ return nested_cpu_has(vmcs12, CPU_BASED_RDPMC_EXITING);
+ case EXIT_REASON_RDTSC:
+ return nested_cpu_has(vmcs12, CPU_BASED_RDTSC_EXITING);
+ case EXIT_REASON_VMCALL: case EXIT_REASON_VMCLEAR:
+ case EXIT_REASON_VMLAUNCH: case EXIT_REASON_VMPTRLD:
+ case EXIT_REASON_VMPTRST: case EXIT_REASON_VMREAD:
+ case EXIT_REASON_VMRESUME: case EXIT_REASON_VMWRITE:
+ case EXIT_REASON_VMOFF: case EXIT_REASON_VMON:
+ /*
+ * VMX instructions trap unconditionally. This allows L1 to
+ * emulate them for its L2 guest, i.e., allows 3-level nesting!
+ */
+ return 1;
+ case EXIT_REASON_CR_ACCESS:
+ return nested_vmx_exit_handled_cr(vcpu, vmcs12);
+ case EXIT_REASON_DR_ACCESS:
+ return nested_cpu_has(vmcs12, CPU_BASED_MOV_DR_EXITING);
+ case EXIT_REASON_IO_INSTRUCTION:
+ /* TODO: support IO bitmaps */
+ return 1;
+ case EXIT_REASON_MSR_READ:
+ case EXIT_REASON_MSR_WRITE:
+ return nested_vmx_exit_handled_msr(vcpu, vmcs12, exit_reason);
+ case EXIT_REASON_INVALID_STATE:
+ return 1;
+ case EXIT_REASON_MWAIT_INSTRUCTION:
+ return nested_cpu_has(vmcs12, CPU_BASED_MWAIT_EXITING);
+ case EXIT_REASON_MONITOR_INSTRUCTION:
+ return nested_cpu_has(vmcs12, CPU_BASED_MONITOR_EXITING);
+ case EXIT_REASON_PAUSE_INSTRUCTION:
+ return nested_cpu_has(vmcs12, CPU_BASED_PAUSE_EXITING) ||
+ nested_cpu_has2(vmcs12,
+ SECONDARY_EXEC_PAUSE_LOOP_EXITING);
+ case EXIT_REASON_MCE_DURING_VMENTRY:
+ return 0;
+ case EXIT_REASON_TPR_BELOW_THRESHOLD:
+ return 1;
+ case EXIT_REASON_APIC_ACCESS:
+ return nested_cpu_has2(vmcs12,
+ SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES);
+ case EXIT_REASON_EPT_VIOLATION:
+ case EXIT_REASON_EPT_MISCONFIG:
+ return 0;
+ case EXIT_REASON_WBINVD:
+ return nested_cpu_has2(vmcs12, SECONDARY_EXEC_WBINVD_EXITING);
+ case EXIT_REASON_XSETBV:
+ return 1;
+ default:
+ return 1;
+ }
+}
+
static void vmx_get_exit_info(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2)
{
*info1 = vmcs_readl(EXIT_QUALIFICATION);
@@ -3933,6 +5702,25 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu)
if (vmx->emulation_required && emulate_invalid_guest_state)
return handle_invalid_guest_state(vcpu);
+ /*
+ * the KVM_REQ_EVENT optimization bit is only on for one entry, and if
+ * we did not inject a still-pending event to L1 now because of
+ * nested_run_pending, we need to re-enable this bit.
+ */
+ if (vmx->nested.nested_run_pending)
+ kvm_make_request(KVM_REQ_EVENT, vcpu);
+
+ if (!is_guest_mode(vcpu) && (exit_reason == EXIT_REASON_VMLAUNCH ||
+ exit_reason == EXIT_REASON_VMRESUME))
+ vmx->nested.nested_run_pending = 1;
+ else
+ vmx->nested.nested_run_pending = 0;
+
+ if (is_guest_mode(vcpu) && nested_vmx_exit_handled(vcpu)) {
+ nested_vmx_vmexit(vcpu);
+ return 1;
+ }
+
if (exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY) {
vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY;
vcpu->run->fail_entry.hardware_entry_failure_reason
@@ -3955,7 +5743,9 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu)
"(0x%x) and exit reason is 0x%x\n",
__func__, vectoring_info, exit_reason);
- if (unlikely(!cpu_has_virtual_nmis() && vmx->soft_vnmi_blocked)) {
+ if (unlikely(!cpu_has_virtual_nmis() && vmx->soft_vnmi_blocked &&
+ !(is_guest_mode(vcpu) && nested_cpu_has_virtual_nmis(
+ get_vmcs12(vcpu), vcpu)))) {
if (vmx_interrupt_allowed(vcpu)) {
vmx->soft_vnmi_blocked = 0;
} else if (vmx->vnmi_blocked_time > 1000000000LL &&
@@ -4118,6 +5908,8 @@ static void __vmx_complete_interrupts(struct vcpu_vmx *vmx,
static void vmx_complete_interrupts(struct vcpu_vmx *vmx)
{
+ if (is_guest_mode(&vmx->vcpu))
+ return;
__vmx_complete_interrupts(vmx, vmx->idt_vectoring_info,
VM_EXIT_INSTRUCTION_LEN,
IDT_VECTORING_ERROR_CODE);
@@ -4125,6 +5917,8 @@ static void vmx_complete_interrupts(struct vcpu_vmx *vmx)
static void vmx_cancel_injection(struct kvm_vcpu *vcpu)
{
+ if (is_guest_mode(vcpu))
+ return;
__vmx_complete_interrupts(to_vmx(vcpu),
vmcs_read32(VM_ENTRY_INTR_INFO_FIELD),
VM_ENTRY_INSTRUCTION_LEN,
@@ -4145,6 +5939,21 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
+ if (is_guest_mode(vcpu) && !vmx->nested.nested_run_pending) {
+ struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
+ if (vmcs12->idt_vectoring_info_field &
+ VECTORING_INFO_VALID_MASK) {
+ vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
+ vmcs12->idt_vectoring_info_field);
+ vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
+ vmcs12->vm_exit_instruction_len);
+ if (vmcs12->idt_vectoring_info_field &
+ VECTORING_INFO_DELIVER_CODE_MASK)
+ vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE,
+ vmcs12->idt_vectoring_error_code);
+ }
+ }
+
/* Record the guest's net vcpu time for enforced NMI injections. */
if (unlikely(!cpu_has_virtual_nmis() && vmx->soft_vnmi_blocked))
vmx->entry_time = ktime_get();
@@ -4167,6 +5976,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
vmx_set_interrupt_shadow(vcpu, 0);
+ vmx->__launched = vmx->loaded_vmcs->launched;
asm(
/* Store host registers */
"push %%"R"dx; push %%"R"bp;"
@@ -4237,7 +6047,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
"pop %%"R"bp; pop %%"R"dx \n\t"
"setbe %c[fail](%0) \n\t"
: : "c"(vmx), "d"((unsigned long)HOST_RSP),
- [launched]"i"(offsetof(struct vcpu_vmx, launched)),
+ [launched]"i"(offsetof(struct vcpu_vmx, __launched)),
[fail]"i"(offsetof(struct vcpu_vmx, fail)),
[host_rsp]"i"(offsetof(struct vcpu_vmx, host_rsp)),
[rax]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RAX])),
@@ -4276,8 +6086,19 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
+ if (is_guest_mode(vcpu)) {
+ struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
+ vmcs12->idt_vectoring_info_field = vmx->idt_vectoring_info;
+ if (vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK) {
+ vmcs12->idt_vectoring_error_code =
+ vmcs_read32(IDT_VECTORING_ERROR_CODE);
+ vmcs12->vm_exit_instruction_len =
+ vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
+ }
+ }
+
asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
- vmx->launched = 1;
+ vmx->loaded_vmcs->launched = 1;
vmx->exit_reason = vmcs_read32(VM_EXIT_REASON);
@@ -4289,41 +6110,18 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
#undef R
#undef Q
-static void vmx_free_vmcs(struct kvm_vcpu *vcpu)
-{
- struct vcpu_vmx *vmx = to_vmx(vcpu);
-
- if (vmx->vmcs) {
- vcpu_clear(vmx);
- free_vmcs(vmx->vmcs);
- vmx->vmcs = NULL;
- }
-}
-
static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
free_vpid(vmx);
- vmx_free_vmcs(vcpu);
+ free_nested(vmx);
+ free_loaded_vmcs(vmx->loaded_vmcs);
kfree(vmx->guest_msrs);
kvm_vcpu_uninit(vcpu);
kmem_cache_free(kvm_vcpu_cache, vmx);
}
-static inline void vmcs_init(struct vmcs *vmcs)
-{
- u64 phys_addr = __pa(per_cpu(vmxarea, raw_smp_processor_id()));
-
- if (!vmm_exclusive)
- kvm_cpu_vmxon(phys_addr);
-
- vmcs_clear(vmcs);
-
- if (!vmm_exclusive)
- kvm_cpu_vmxoff();
-}
-
static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
{
int err;
@@ -4345,11 +6143,15 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
goto uninit_vcpu;
}
- vmx->vmcs = alloc_vmcs();
- if (!vmx->vmcs)
+ vmx->loaded_vmcs = &vmx->vmcs01;
+ vmx->loaded_vmcs->vmcs = alloc_vmcs();
+ if (!vmx->loaded_vmcs->vmcs)
goto free_msrs;
-
- vmcs_init(vmx->vmcs);
+ if (!vmm_exclusive)
+ kvm_cpu_vmxon(__pa(per_cpu(vmxarea, raw_smp_processor_id())));
+ loaded_vmcs_init(vmx->loaded_vmcs);
+ if (!vmm_exclusive)
+ kvm_cpu_vmxoff();
cpu = get_cpu();
vmx_vcpu_load(&vmx->vcpu, cpu);
@@ -4375,10 +6177,13 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
goto free_vmcs;
}
+ vmx->nested.current_vmptr = -1ull;
+ vmx->nested.current_vmcs12 = NULL;
+
return &vmx->vcpu;
free_vmcs:
- free_vmcs(vmx->vmcs);
+ free_vmcs(vmx->loaded_vmcs->vmcs);
free_msrs:
kfree(vmx->guest_msrs);
uninit_vcpu:
@@ -4512,6 +6317,650 @@ static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
static void vmx_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry)
{
+ if (func == 1 && nested)
+ entry->ecx |= bit(X86_FEATURE_VMX);
+}
+
+/*
+ * prepare_vmcs02 is called when the L1 guest hypervisor runs its nested
+ * L2 guest. L1 has a vmcs for L2 (vmcs12), and this function "merges" it
+ * with L0's requirements for its guest (a.k.a. vmsc01), so we can run the L2
+ * guest in a way that will both be appropriate to L1's requests, and our
+ * needs. In addition to modifying the active vmcs (which is vmcs02), this
+ * function also has additional necessary side-effects, like setting various
+ * vcpu->arch fields.
+ */
+static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ u32 exec_control;
+
+ vmcs_write16(GUEST_ES_SELECTOR, vmcs12->guest_es_selector);
+ vmcs_write16(GUEST_CS_SELECTOR, vmcs12->guest_cs_selector);
+ vmcs_write16(GUEST_SS_SELECTOR, vmcs12->guest_ss_selector);
+ vmcs_write16(GUEST_DS_SELECTOR, vmcs12->guest_ds_selector);
+ vmcs_write16(GUEST_FS_SELECTOR, vmcs12->guest_fs_selector);
+ vmcs_write16(GUEST_GS_SELECTOR, vmcs12->guest_gs_selector);
+ vmcs_write16(GUEST_LDTR_SELECTOR, vmcs12->guest_ldtr_selector);
+ vmcs_write16(GUEST_TR_SELECTOR, vmcs12->guest_tr_selector);
+ vmcs_write32(GUEST_ES_LIMIT, vmcs12->guest_es_limit);
+ vmcs_write32(GUEST_CS_LIMIT, vmcs12->guest_cs_limit);
+ vmcs_write32(GUEST_SS_LIMIT, vmcs12->guest_ss_limit);
+ vmcs_write32(GUEST_DS_LIMIT, vmcs12->guest_ds_limit);
+ vmcs_write32(GUEST_FS_LIMIT, vmcs12->guest_fs_limit);
+ vmcs_write32(GUEST_GS_LIMIT, vmcs12->guest_gs_limit);
+ vmcs_write32(GUEST_LDTR_LIMIT, vmcs12->guest_ldtr_limit);
+ vmcs_write32(GUEST_TR_LIMIT, vmcs12->guest_tr_limit);
+ vmcs_write32(GUEST_GDTR_LIMIT, vmcs12->guest_gdtr_limit);
+ vmcs_write32(GUEST_IDTR_LIMIT, vmcs12->guest_idtr_limit);
+ vmcs_write32(GUEST_ES_AR_BYTES, vmcs12->guest_es_ar_bytes);
+ vmcs_write32(GUEST_CS_AR_BYTES, vmcs12->guest_cs_ar_bytes);
+ vmcs_write32(GUEST_SS_AR_BYTES, vmcs12->guest_ss_ar_bytes);
+ vmcs_write32(GUEST_DS_AR_BYTES, vmcs12->guest_ds_ar_bytes);
+ vmcs_write32(GUEST_FS_AR_BYTES, vmcs12->guest_fs_ar_bytes);
+ vmcs_write32(GUEST_GS_AR_BYTES, vmcs12->guest_gs_ar_bytes);
+ vmcs_write32(GUEST_LDTR_AR_BYTES, vmcs12->guest_ldtr_ar_bytes);
+ vmcs_write32(GUEST_TR_AR_BYTES, vmcs12->guest_tr_ar_bytes);
+ vmcs_writel(GUEST_ES_BASE, vmcs12->guest_es_base);
+ vmcs_writel(GUEST_CS_BASE, vmcs12->guest_cs_base);
+ vmcs_writel(GUEST_SS_BASE, vmcs12->guest_ss_base);
+ vmcs_writel(GUEST_DS_BASE, vmcs12->guest_ds_base);
+ vmcs_writel(GUEST_FS_BASE, vmcs12->guest_fs_base);
+ vmcs_writel(GUEST_GS_BASE, vmcs12->guest_gs_base);
+ vmcs_writel(GUEST_LDTR_BASE, vmcs12->guest_ldtr_base);
+ vmcs_writel(GUEST_TR_BASE, vmcs12->guest_tr_base);
+ vmcs_writel(GUEST_GDTR_BASE, vmcs12->guest_gdtr_base);
+ vmcs_writel(GUEST_IDTR_BASE, vmcs12->guest_idtr_base);
+
+ vmcs_write64(GUEST_IA32_DEBUGCTL, vmcs12->guest_ia32_debugctl);
+ vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
+ vmcs12->vm_entry_intr_info_field);
+ vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE,
+ vmcs12->vm_entry_exception_error_code);
+ vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
+ vmcs12->vm_entry_instruction_len);
+ vmcs_write32(GUEST_INTERRUPTIBILITY_INFO,
+ vmcs12->guest_interruptibility_info);
+ vmcs_write32(GUEST_ACTIVITY_STATE, vmcs12->guest_activity_state);
+ vmcs_write32(GUEST_SYSENTER_CS, vmcs12->guest_sysenter_cs);
+ vmcs_writel(GUEST_DR7, vmcs12->guest_dr7);
+ vmcs_writel(GUEST_RFLAGS, vmcs12->guest_rflags);
+ vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS,
+ vmcs12->guest_pending_dbg_exceptions);
+ vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->guest_sysenter_esp);
+ vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->guest_sysenter_eip);
+
+ vmcs_write64(VMCS_LINK_POINTER, -1ull);
+
+ vmcs_write32(PIN_BASED_VM_EXEC_CONTROL,
+ (vmcs_config.pin_based_exec_ctrl |
+ vmcs12->pin_based_vm_exec_control));
+
+ /*
+ * Whether page-faults are trapped is determined by a combination of
+ * 3 settings: PFEC_MASK, PFEC_MATCH and EXCEPTION_BITMAP.PF.
+ * If enable_ept, L0 doesn't care about page faults and we should
+ * set all of these to L1's desires. However, if !enable_ept, L0 does
+ * care about (at least some) page faults, and because it is not easy
+ * (if at all possible?) to merge L0 and L1's desires, we simply ask
+ * to exit on each and every L2 page fault. This is done by setting
+ * MASK=MATCH=0 and (see below) EB.PF=1.
+ * Note that below we don't need special code to set EB.PF beyond the
+ * "or"ing of the EB of vmcs01 and vmcs12, because when enable_ept,
+ * vmcs01's EB.PF is 0 so the "or" will take vmcs12's value, and when
+ * !enable_ept, EB.PF is 1, so the "or" will always be 1.
+ *
+ * A problem with this approach (when !enable_ept) is that L1 may be
+ * injected with more page faults than it asked for. This could have
+ * caused problems, but in practice existing hypervisors don't care.
+ * To fix this, we will need to emulate the PFEC checking (on the L1
+ * page tables), using walk_addr(), when injecting PFs to L1.
+ */
+ vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK,
+ enable_ept ? vmcs12->page_fault_error_code_mask : 0);
+ vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH,
+ enable_ept ? vmcs12->page_fault_error_code_match : 0);
+
+ if (cpu_has_secondary_exec_ctrls()) {
+ u32 exec_control = vmx_secondary_exec_control(vmx);
+ if (!vmx->rdtscp_enabled)
+ exec_control &= ~SECONDARY_EXEC_RDTSCP;
+ /* Take the following fields only from vmcs12 */
+ exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
+ if (nested_cpu_has(vmcs12,
+ CPU_BASED_ACTIVATE_SECONDARY_CONTROLS))
+ exec_control |= vmcs12->secondary_vm_exec_control;
+
+ if (exec_control & SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES) {
+ /*
+ * Translate L1 physical address to host physical
+ * address for vmcs02. Keep the page pinned, so this
+ * physical address remains valid. We keep a reference
+ * to it so we can release it later.
+ */
+ if (vmx->nested.apic_access_page) /* shouldn't happen */
+ nested_release_page(vmx->nested.apic_access_page);
+ vmx->nested.apic_access_page =
+ nested_get_page(vcpu, vmcs12->apic_access_addr);
+ /*
+ * If translation failed, no matter: This feature asks
+ * to exit when accessing the given address, and if it
+ * can never be accessed, this feature won't do
+ * anything anyway.
+ */
+ if (!vmx->nested.apic_access_page)
+ exec_control &=
+ ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
+ else
+ vmcs_write64(APIC_ACCESS_ADDR,
+ page_to_phys(vmx->nested.apic_access_page));
+ }
+
+ vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_control);
+ }
+
+
+ /*
+ * Set host-state according to L0's settings (vmcs12 is irrelevant here)
+ * Some constant fields are set here by vmx_set_constant_host_state().
+ * Other fields are different per CPU, and will be set later when
+ * vmx_vcpu_load() is called, and when vmx_save_host_state() is called.
+ */
+ vmx_set_constant_host_state();
+
+ /*
+ * HOST_RSP is normally set correctly in vmx_vcpu_run() just before
+ * entry, but only if the current (host) sp changed from the value
+ * we wrote last (vmx->host_rsp). This cache is no longer relevant
+ * if we switch vmcs, and rather than hold a separate cache per vmcs,
+ * here we just force the write to happen on entry.
+ */
+ vmx->host_rsp = 0;
+
+ exec_control = vmx_exec_control(vmx); /* L0's desires */
+ exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING;
+ exec_control &= ~CPU_BASED_VIRTUAL_NMI_PENDING;
+ exec_control &= ~CPU_BASED_TPR_SHADOW;
+ exec_control |= vmcs12->cpu_based_vm_exec_control;
+ /*
+ * Merging of IO and MSR bitmaps not currently supported.
+ * Rather, exit every time.
+ */
+ exec_control &= ~CPU_BASED_USE_MSR_BITMAPS;
+ exec_control &= ~CPU_BASED_USE_IO_BITMAPS;
+ exec_control |= CPU_BASED_UNCOND_IO_EXITING;
+
+ vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, exec_control);
+
+ /* EXCEPTION_BITMAP and CR0_GUEST_HOST_MASK should basically be the
+ * bitwise-or of what L1 wants to trap for L2, and what we want to
+ * trap. Note that CR0.TS also needs updating - we do this later.
+ */
+ update_exception_bitmap(vcpu);
+ vcpu->arch.cr0_guest_owned_bits &= ~vmcs12->cr0_guest_host_mask;
+ vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits);
+
+ /* Note: IA32_MODE, LOAD_IA32_EFER are modified by vmx_set_efer below */
+ vmcs_write32(VM_EXIT_CONTROLS,
+ vmcs12->vm_exit_controls | vmcs_config.vmexit_ctrl);
+ vmcs_write32(VM_ENTRY_CONTROLS, vmcs12->vm_entry_controls |
+ (vmcs_config.vmentry_ctrl & ~VM_ENTRY_IA32E_MODE));
+
+ if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PAT)
+ vmcs_write64(GUEST_IA32_PAT, vmcs12->guest_ia32_pat);
+ else if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT)
+ vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat);
+
+
+ set_cr4_guest_host_mask(vmx);
+
+ vmcs_write64(TSC_OFFSET,
+ vmx->nested.vmcs01_tsc_offset + vmcs12->tsc_offset);
+
+ if (enable_vpid) {
+ /*
+ * Trivially support vpid by letting L2s share their parent
+ * L1's vpid. TODO: move to a more elaborate solution, giving
+ * each L2 its own vpid and exposing the vpid feature to L1.
+ */
+ vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid);
+ vmx_flush_tlb(vcpu);
+ }
+
+ if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER)
+ vcpu->arch.efer = vmcs12->guest_ia32_efer;
+ if (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE)
+ vcpu->arch.efer |= (EFER_LMA | EFER_LME);
+ else
+ vcpu->arch.efer &= ~(EFER_LMA | EFER_LME);
+ /* Note: modifies VM_ENTRY/EXIT_CONTROLS and GUEST/HOST_IA32_EFER */
+ vmx_set_efer(vcpu, vcpu->arch.efer);
+
+ /*
+ * This sets GUEST_CR0 to vmcs12->guest_cr0, with possibly a modified
+ * TS bit (for lazy fpu) and bits which we consider mandatory enabled.
+ * The CR0_READ_SHADOW is what L2 should have expected to read given
+ * the specifications by L1; It's not enough to take
+ * vmcs12->cr0_read_shadow because on our cr0_guest_host_mask we we
+ * have more bits than L1 expected.
+ */
+ vmx_set_cr0(vcpu, vmcs12->guest_cr0);
+ vmcs_writel(CR0_READ_SHADOW, nested_read_cr0(vmcs12));
+
+ vmx_set_cr4(vcpu, vmcs12->guest_cr4);
+ vmcs_writel(CR4_READ_SHADOW, nested_read_cr4(vmcs12));
+
+ /* shadow page tables on either EPT or shadow page tables */
+ kvm_set_cr3(vcpu, vmcs12->guest_cr3);
+ kvm_mmu_reset_context(vcpu);
+
+ kvm_register_write(vcpu, VCPU_REGS_RSP, vmcs12->guest_rsp);
+ kvm_register_write(vcpu, VCPU_REGS_RIP, vmcs12->guest_rip);
+}
+
+/*
+ * nested_vmx_run() handles a nested entry, i.e., a VMLAUNCH or VMRESUME on L1
+ * for running an L2 nested guest.
+ */
+static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
+{
+ struct vmcs12 *vmcs12;
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ int cpu;
+ struct loaded_vmcs *vmcs02;
+
+ if (!nested_vmx_check_permission(vcpu) ||
+ !nested_vmx_check_vmcs12(vcpu))
+ return 1;
+
+ skip_emulated_instruction(vcpu);
+ vmcs12 = get_vmcs12(vcpu);
+
+ /*
+ * The nested entry process starts with enforcing various prerequisites
+ * on vmcs12 as required by the Intel SDM, and act appropriately when
+ * they fail: As the SDM explains, some conditions should cause the
+ * instruction to fail, while others will cause the instruction to seem
+ * to succeed, but return an EXIT_REASON_INVALID_STATE.
+ * To speed up the normal (success) code path, we should avoid checking
+ * for misconfigurations which will anyway be caught by the processor
+ * when using the merged vmcs02.
+ */
+ if (vmcs12->launch_state == launch) {
+ nested_vmx_failValid(vcpu,
+ launch ? VMXERR_VMLAUNCH_NONCLEAR_VMCS
+ : VMXERR_VMRESUME_NONLAUNCHED_VMCS);
+ return 1;
+ }
+
+ if ((vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_MSR_BITMAPS) &&
+ !IS_ALIGNED(vmcs12->msr_bitmap, PAGE_SIZE)) {
+ /*TODO: Also verify bits beyond physical address width are 0*/
+ nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
+ return 1;
+ }
+
+ if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES) &&
+ !IS_ALIGNED(vmcs12->apic_access_addr, PAGE_SIZE)) {
+ /*TODO: Also verify bits beyond physical address width are 0*/
+ nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
+ return 1;
+ }
+
+ if (vmcs12->vm_entry_msr_load_count > 0 ||
+ vmcs12->vm_exit_msr_load_count > 0 ||
+ vmcs12->vm_exit_msr_store_count > 0) {
+ if (printk_ratelimit())
+ printk(KERN_WARNING
+ "%s: VMCS MSR_{LOAD,STORE} unsupported\n", __func__);
+ nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
+ return 1;
+ }
+
+ if (!vmx_control_verify(vmcs12->cpu_based_vm_exec_control,
+ nested_vmx_procbased_ctls_low, nested_vmx_procbased_ctls_high) ||
+ !vmx_control_verify(vmcs12->secondary_vm_exec_control,
+ nested_vmx_secondary_ctls_low, nested_vmx_secondary_ctls_high) ||
+ !vmx_control_verify(vmcs12->pin_based_vm_exec_control,
+ nested_vmx_pinbased_ctls_low, nested_vmx_pinbased_ctls_high) ||
+ !vmx_control_verify(vmcs12->vm_exit_controls,
+ nested_vmx_exit_ctls_low, nested_vmx_exit_ctls_high) ||
+ !vmx_control_verify(vmcs12->vm_entry_controls,
+ nested_vmx_entry_ctls_low, nested_vmx_entry_ctls_high))
+ {
+ nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
+ return 1;
+ }
+
+ if (((vmcs12->host_cr0 & VMXON_CR0_ALWAYSON) != VMXON_CR0_ALWAYSON) ||
+ ((vmcs12->host_cr4 & VMXON_CR4_ALWAYSON) != VMXON_CR4_ALWAYSON)) {
+ nested_vmx_failValid(vcpu,
+ VMXERR_ENTRY_INVALID_HOST_STATE_FIELD);
+ return 1;
+ }
+
+ if (((vmcs12->guest_cr0 & VMXON_CR0_ALWAYSON) != VMXON_CR0_ALWAYSON) ||
+ ((vmcs12->guest_cr4 & VMXON_CR4_ALWAYSON) != VMXON_CR4_ALWAYSON)) {
+ nested_vmx_entry_failure(vcpu, vmcs12,
+ EXIT_REASON_INVALID_STATE, ENTRY_FAIL_DEFAULT);
+ return 1;
+ }
+ if (vmcs12->vmcs_link_pointer != -1ull) {
+ nested_vmx_entry_failure(vcpu, vmcs12,
+ EXIT_REASON_INVALID_STATE, ENTRY_FAIL_VMCS_LINK_PTR);
+ return 1;
+ }
+
+ /*
+ * We're finally done with prerequisite checking, and can start with
+ * the nested entry.
+ */
+
+ vmcs02 = nested_get_current_vmcs02(vmx);
+ if (!vmcs02)
+ return -ENOMEM;
+
+ enter_guest_mode(vcpu);
+
+ vmx->nested.vmcs01_tsc_offset = vmcs_read64(TSC_OFFSET);
+
+ cpu = get_cpu();
+ vmx->loaded_vmcs = vmcs02;
+ vmx_vcpu_put(vcpu);
+ vmx_vcpu_load(vcpu, cpu);
+ vcpu->cpu = cpu;
+ put_cpu();
+
+ vmcs12->launch_state = 1;
+
+ prepare_vmcs02(vcpu, vmcs12);
+
+ /*
+ * Note no nested_vmx_succeed or nested_vmx_fail here. At this point
+ * we are no longer running L1, and VMLAUNCH/VMRESUME has not yet
+ * returned as far as L1 is concerned. It will only return (and set
+ * the success flag) when L2 exits (see nested_vmx_vmexit()).
+ */
+ return 1;
+}
+
+/*
+ * On a nested exit from L2 to L1, vmcs12.guest_cr0 might not be up-to-date
+ * because L2 may have changed some cr0 bits directly (CRO_GUEST_HOST_MASK).
+ * This function returns the new value we should put in vmcs12.guest_cr0.
+ * It's not enough to just return the vmcs02 GUEST_CR0. Rather,
+ * 1. Bits that neither L0 nor L1 trapped, were set directly by L2 and are now
+ * available in vmcs02 GUEST_CR0. (Note: It's enough to check that L0
+ * didn't trap the bit, because if L1 did, so would L0).
+ * 2. Bits that L1 asked to trap (and therefore L0 also did) could not have
+ * been modified by L2, and L1 knows it. So just leave the old value of
+ * the bit from vmcs12.guest_cr0. Note that the bit from vmcs02 GUEST_CR0
+ * isn't relevant, because if L0 traps this bit it can set it to anything.
+ * 3. Bits that L1 didn't trap, but L0 did. L1 believes the guest could have
+ * changed these bits, and therefore they need to be updated, but L0
+ * didn't necessarily allow them to be changed in GUEST_CR0 - and rather
+ * put them in vmcs02 CR0_READ_SHADOW. So take these bits from there.
+ */
+static inline unsigned long
+vmcs12_guest_cr0(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
+{
+ return
+ /*1*/ (vmcs_readl(GUEST_CR0) & vcpu->arch.cr0_guest_owned_bits) |
+ /*2*/ (vmcs12->guest_cr0 & vmcs12->cr0_guest_host_mask) |
+ /*3*/ (vmcs_readl(CR0_READ_SHADOW) & ~(vmcs12->cr0_guest_host_mask |
+ vcpu->arch.cr0_guest_owned_bits));
+}
+
+static inline unsigned long
+vmcs12_guest_cr4(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
+{
+ return
+ /*1*/ (vmcs_readl(GUEST_CR4) & vcpu->arch.cr4_guest_owned_bits) |
+ /*2*/ (vmcs12->guest_cr4 & vmcs12->cr4_guest_host_mask) |
+ /*3*/ (vmcs_readl(CR4_READ_SHADOW) & ~(vmcs12->cr4_guest_host_mask |
+ vcpu->arch.cr4_guest_owned_bits));
+}
+
+/*
+ * prepare_vmcs12 is part of what we need to do when the nested L2 guest exits
+ * and we want to prepare to run its L1 parent. L1 keeps a vmcs for L2 (vmcs12),
+ * and this function updates it to reflect the changes to the guest state while
+ * L2 was running (and perhaps made some exits which were handled directly by L0
+ * without going back to L1), and to reflect the exit reason.
+ * Note that we do not have to copy here all VMCS fields, just those that
+ * could have changed by the L2 guest or the exit - i.e., the guest-state and
+ * exit-information fields only. Other fields are modified by L1 with VMWRITE,
+ * which already writes to vmcs12 directly.
+ */
+void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
+{
+ /* update guest state fields: */
+ vmcs12->guest_cr0 = vmcs12_guest_cr0(vcpu, vmcs12);
+ vmcs12->guest_cr4 = vmcs12_guest_cr4(vcpu, vmcs12);
+
+ kvm_get_dr(vcpu, 7, (unsigned long *)&vmcs12->guest_dr7);
+ vmcs12->guest_rsp = kvm_register_read(vcpu, VCPU_REGS_RSP);
+ vmcs12->guest_rip = kvm_register_read(vcpu, VCPU_REGS_RIP);
+ vmcs12->guest_rflags = vmcs_readl(GUEST_RFLAGS);
+
+ vmcs12->guest_es_selector = vmcs_read16(GUEST_ES_SELECTOR);
+ vmcs12->guest_cs_selector = vmcs_read16(GUEST_CS_SELECTOR);
+ vmcs12->guest_ss_selector = vmcs_read16(GUEST_SS_SELECTOR);
+ vmcs12->guest_ds_selector = vmcs_read16(GUEST_DS_SELECTOR);
+ vmcs12->guest_fs_selector = vmcs_read16(GUEST_FS_SELECTOR);
+ vmcs12->guest_gs_selector = vmcs_read16(GUEST_GS_SELECTOR);
+ vmcs12->guest_ldtr_selector = vmcs_read16(GUEST_LDTR_SELECTOR);
+ vmcs12->guest_tr_selector = vmcs_read16(GUEST_TR_SELECTOR);
+ vmcs12->guest_es_limit = vmcs_read32(GUEST_ES_LIMIT);
+ vmcs12->guest_cs_limit = vmcs_read32(GUEST_CS_LIMIT);
+ vmcs12->guest_ss_limit = vmcs_read32(GUEST_SS_LIMIT);
+ vmcs12->guest_ds_limit = vmcs_read32(GUEST_DS_LIMIT);
+ vmcs12->guest_fs_limit = vmcs_read32(GUEST_FS_LIMIT);
+ vmcs12->guest_gs_limit = vmcs_read32(GUEST_GS_LIMIT);
+ vmcs12->guest_ldtr_limit = vmcs_read32(GUEST_LDTR_LIMIT);
+ vmcs12->guest_tr_limit = vmcs_read32(GUEST_TR_LIMIT);
+ vmcs12->guest_gdtr_limit = vmcs_read32(GUEST_GDTR_LIMIT);
+ vmcs12->guest_idtr_limit = vmcs_read32(GUEST_IDTR_LIMIT);
+ vmcs12->guest_es_ar_bytes = vmcs_read32(GUEST_ES_AR_BYTES);
+ vmcs12->guest_cs_ar_bytes = vmcs_read32(GUEST_CS_AR_BYTES);
+ vmcs12->guest_ss_ar_bytes = vmcs_read32(GUEST_SS_AR_BYTES);
+ vmcs12->guest_ds_ar_bytes = vmcs_read32(GUEST_DS_AR_BYTES);
+ vmcs12->guest_fs_ar_bytes = vmcs_read32(GUEST_FS_AR_BYTES);
+ vmcs12->guest_gs_ar_bytes = vmcs_read32(GUEST_GS_AR_BYTES);
+ vmcs12->guest_ldtr_ar_bytes = vmcs_read32(GUEST_LDTR_AR_BYTES);
+ vmcs12->guest_tr_ar_bytes = vmcs_read32(GUEST_TR_AR_BYTES);
+ vmcs12->guest_es_base = vmcs_readl(GUEST_ES_BASE);
+ vmcs12->guest_cs_base = vmcs_readl(GUEST_CS_BASE);
+ vmcs12->guest_ss_base = vmcs_readl(GUEST_SS_BASE);
+ vmcs12->guest_ds_base = vmcs_readl(GUEST_DS_BASE);
+ vmcs12->guest_fs_base = vmcs_readl(GUEST_FS_BASE);
+ vmcs12->guest_gs_base = vmcs_readl(GUEST_GS_BASE);
+ vmcs12->guest_ldtr_base = vmcs_readl(GUEST_LDTR_BASE);
+ vmcs12->guest_tr_base = vmcs_readl(GUEST_TR_BASE);
+ vmcs12->guest_gdtr_base = vmcs_readl(GUEST_GDTR_BASE);
+ vmcs12->guest_idtr_base = vmcs_readl(GUEST_IDTR_BASE);
+
+ vmcs12->guest_activity_state = vmcs_read32(GUEST_ACTIVITY_STATE);
+ vmcs12->guest_interruptibility_info =
+ vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
+ vmcs12->guest_pending_dbg_exceptions =
+ vmcs_readl(GUEST_PENDING_DBG_EXCEPTIONS);
+
+ /* TODO: These cannot have changed unless we have MSR bitmaps and
+ * the relevant bit asks not to trap the change */
+ vmcs12->guest_ia32_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL);
+ if (vmcs12->vm_entry_controls & VM_EXIT_SAVE_IA32_PAT)
+ vmcs12->guest_ia32_pat = vmcs_read64(GUEST_IA32_PAT);
+ vmcs12->guest_sysenter_cs = vmcs_read32(GUEST_SYSENTER_CS);
+ vmcs12->guest_sysenter_esp = vmcs_readl(GUEST_SYSENTER_ESP);
+ vmcs12->guest_sysenter_eip = vmcs_readl(GUEST_SYSENTER_EIP);
+
+ /* update exit information fields: */
+
+ vmcs12->vm_exit_reason = vmcs_read32(VM_EXIT_REASON);
+ vmcs12->exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
+
+ vmcs12->vm_exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
+ vmcs12->vm_exit_intr_error_code = vmcs_read32(VM_EXIT_INTR_ERROR_CODE);
+ vmcs12->idt_vectoring_info_field =
+ vmcs_read32(IDT_VECTORING_INFO_FIELD);
+ vmcs12->idt_vectoring_error_code =
+ vmcs_read32(IDT_VECTORING_ERROR_CODE);
+ vmcs12->vm_exit_instruction_len = vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
+ vmcs12->vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
+
+ /* clear vm-entry fields which are to be cleared on exit */
+ if (!(vmcs12->vm_exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY))
+ vmcs12->vm_entry_intr_info_field &= ~INTR_INFO_VALID_MASK;
+}
+
+/*
+ * A part of what we need to when the nested L2 guest exits and we want to
+ * run its L1 parent, is to reset L1's guest state to the host state specified
+ * in vmcs12.
+ * This function is to be called not only on normal nested exit, but also on
+ * a nested entry failure, as explained in Intel's spec, 3B.23.7 ("VM-Entry
+ * Failures During or After Loading Guest State").
+ * This function should be called when the active VMCS is L1's (vmcs01).
+ */
+void load_vmcs12_host_state(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
+{
+ if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER)
+ vcpu->arch.efer = vmcs12->host_ia32_efer;
+ if (vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE)
+ vcpu->arch.efer |= (EFER_LMA | EFER_LME);
+ else
+ vcpu->arch.efer &= ~(EFER_LMA | EFER_LME);
+ vmx_set_efer(vcpu, vcpu->arch.efer);
+
+ kvm_register_write(vcpu, VCPU_REGS_RSP, vmcs12->host_rsp);
+ kvm_register_write(vcpu, VCPU_REGS_RIP, vmcs12->host_rip);
+ /*
+ * Note that calling vmx_set_cr0 is important, even if cr0 hasn't
+ * actually changed, because it depends on the current state of
+ * fpu_active (which may have changed).
+ * Note that vmx_set_cr0 refers to efer set above.
+ */
+ kvm_set_cr0(vcpu, vmcs12->host_cr0);
+ /*
+ * If we did fpu_activate()/fpu_deactivate() during L2's run, we need
+ * to apply the same changes to L1's vmcs. We just set cr0 correctly,
+ * but we also need to update cr0_guest_host_mask and exception_bitmap.
+ */
+ update_exception_bitmap(vcpu);
+ vcpu->arch.cr0_guest_owned_bits = (vcpu->fpu_active ? X86_CR0_TS : 0);
+ vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits);
+
+ /*
+ * Note that CR4_GUEST_HOST_MASK is already set in the original vmcs01
+ * (KVM doesn't change it)- no reason to call set_cr4_guest_host_mask();
+ */
+ vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK);
+ kvm_set_cr4(vcpu, vmcs12->host_cr4);
+
+ /* shadow page tables on either EPT or shadow page tables */
+ kvm_set_cr3(vcpu, vmcs12->host_cr3);
+ kvm_mmu_reset_context(vcpu);
+
+ if (enable_vpid) {
+ /*
+ * Trivially support vpid by letting L2s share their parent
+ * L1's vpid. TODO: move to a more elaborate solution, giving
+ * each L2 its own vpid and exposing the vpid feature to L1.
+ */
+ vmx_flush_tlb(vcpu);
+ }
+
+
+ vmcs_write32(GUEST_SYSENTER_CS, vmcs12->host_ia32_sysenter_cs);
+ vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->host_ia32_sysenter_esp);
+ vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->host_ia32_sysenter_eip);
+ vmcs_writel(GUEST_IDTR_BASE, vmcs12->host_idtr_base);
+ vmcs_writel(GUEST_GDTR_BASE, vmcs12->host_gdtr_base);
+ vmcs_writel(GUEST_TR_BASE, vmcs12->host_tr_base);
+ vmcs_writel(GUEST_GS_BASE, vmcs12->host_gs_base);
+ vmcs_writel(GUEST_FS_BASE, vmcs12->host_fs_base);
+ vmcs_write16(GUEST_ES_SELECTOR, vmcs12->host_es_selector);
+ vmcs_write16(GUEST_CS_SELECTOR, vmcs12->host_cs_selector);
+ vmcs_write16(GUEST_SS_SELECTOR, vmcs12->host_ss_selector);
+ vmcs_write16(GUEST_DS_SELECTOR, vmcs12->host_ds_selector);
+ vmcs_write16(GUEST_FS_SELECTOR, vmcs12->host_fs_selector);
+ vmcs_write16(GUEST_GS_SELECTOR, vmcs12->host_gs_selector);
+ vmcs_write16(GUEST_TR_SELECTOR, vmcs12->host_tr_selector);
+
+ if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PAT)
+ vmcs_write64(GUEST_IA32_PAT, vmcs12->host_ia32_pat);
+ if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL)
+ vmcs_write64(GUEST_IA32_PERF_GLOBAL_CTRL,
+ vmcs12->host_ia32_perf_global_ctrl);
+}
+
+/*
+ * Emulate an exit from nested guest (L2) to L1, i.e., prepare to run L1
+ * and modify vmcs12 to make it see what it would expect to see there if
+ * L2 was its real guest. Must only be called when in L2 (is_guest_mode())
+ */
+static void nested_vmx_vmexit(struct kvm_vcpu *vcpu)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ int cpu;
+ struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
+
+ leave_guest_mode(vcpu);
+ prepare_vmcs12(vcpu, vmcs12);
+
+ cpu = get_cpu();
+ vmx->loaded_vmcs = &vmx->vmcs01;
+ vmx_vcpu_put(vcpu);
+ vmx_vcpu_load(vcpu, cpu);
+ vcpu->cpu = cpu;
+ put_cpu();
+
+ /* if no vmcs02 cache requested, remove the one we used */
+ if (VMCS02_POOL_SIZE == 0)
+ nested_free_vmcs02(vmx, vmx->nested.current_vmptr);
+
+ load_vmcs12_host_state(vcpu, vmcs12);
+
+ /* Update TSC_OFFSET if vmx_adjust_tsc_offset() was used while L2 ran */
+ vmcs_write64(TSC_OFFSET, vmx->nested.vmcs01_tsc_offset);
+
+ /* This is needed for same reason as it was needed in prepare_vmcs02 */
+ vmx->host_rsp = 0;
+
+ /* Unpin physical memory we referred to in vmcs02 */
+ if (vmx->nested.apic_access_page) {
+ nested_release_page(vmx->nested.apic_access_page);
+ vmx->nested.apic_access_page = 0;
+ }
+
+ /*
+ * Exiting from L2 to L1, we're now back to L1 which thinks it just
+ * finished a VMLAUNCH or VMRESUME instruction, so we need to set the
+ * success or failure flag accordingly.
+ */
+ if (unlikely(vmx->fail)) {
+ vmx->fail = 0;
+ nested_vmx_failValid(vcpu, vmcs_read32(VM_INSTRUCTION_ERROR));
+ } else
+ nested_vmx_succeed(vcpu);
+}
+
+/*
+ * L1's failure to enter L2 is a subset of a normal exit, as explained in
+ * 23.7 "VM-entry failures during or after loading guest state" (this also
+ * lists the acceptable exit-reason and exit-qualification parameters).
+ * It should only be called before L2 actually succeeded to run, and when
+ * vmcs01 is current (it doesn't leave_guest_mode() or switch vmcss).
+ */
+static void nested_vmx_entry_failure(struct kvm_vcpu *vcpu,
+ struct vmcs12 *vmcs12,
+ u32 reason, unsigned long qualification)
+{
+ load_vmcs12_host_state(vcpu, vmcs12);
+ vmcs12->vm_exit_reason = reason | VMX_EXIT_REASONS_FAILED_VMENTRY;
+ vmcs12->exit_qualification = qualification;
+ nested_vmx_succeed(vcpu);
}
static int vmx_check_intercept(struct kvm_vcpu *vcpu,
@@ -4670,16 +7119,13 @@ static int __init vmx_init(void)
vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_EIP, false);
if (enable_ept) {
- bypass_guest_pf = 0;
kvm_mmu_set_mask_ptes(0ull, 0ull, 0ull, 0ull,
VMX_EPT_EXECUTABLE_MASK);
+ ept_set_mmio_spte_mask();
kvm_enable_tdp();
} else
kvm_disable_tdp();
- if (bypass_guest_pf)
- kvm_mmu_set_nonpresent_ptes(~0xffeull, 0ull);
-
return 0;
out3:
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 77c9d8673dc4..84a28ea45fa4 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -347,6 +347,7 @@ void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault)
vcpu->arch.cr2 = fault->address;
kvm_queue_exception_e(vcpu, PF_VECTOR, fault->error_code);
}
+EXPORT_SYMBOL_GPL(kvm_inject_page_fault);
void kvm_propagate_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault)
{
@@ -579,6 +580,22 @@ static bool guest_cpuid_has_xsave(struct kvm_vcpu *vcpu)
return best && (best->ecx & bit(X86_FEATURE_XSAVE));
}
+static bool guest_cpuid_has_smep(struct kvm_vcpu *vcpu)
+{
+ struct kvm_cpuid_entry2 *best;
+
+ best = kvm_find_cpuid_entry(vcpu, 7, 0);
+ return best && (best->ebx & bit(X86_FEATURE_SMEP));
+}
+
+static bool guest_cpuid_has_fsgsbase(struct kvm_vcpu *vcpu)
+{
+ struct kvm_cpuid_entry2 *best;
+
+ best = kvm_find_cpuid_entry(vcpu, 7, 0);
+ return best && (best->ebx & bit(X86_FEATURE_FSGSBASE));
+}
+
static void update_cpuid(struct kvm_vcpu *vcpu)
{
struct kvm_cpuid_entry2 *best;
@@ -598,14 +615,20 @@ static void update_cpuid(struct kvm_vcpu *vcpu)
int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
{
unsigned long old_cr4 = kvm_read_cr4(vcpu);
- unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE;
-
+ unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE |
+ X86_CR4_PAE | X86_CR4_SMEP;
if (cr4 & CR4_RESERVED_BITS)
return 1;
if (!guest_cpuid_has_xsave(vcpu) && (cr4 & X86_CR4_OSXSAVE))
return 1;
+ if (!guest_cpuid_has_smep(vcpu) && (cr4 & X86_CR4_SMEP))
+ return 1;
+
+ if (!guest_cpuid_has_fsgsbase(vcpu) && (cr4 & X86_CR4_RDWRGSFS))
+ return 1;
+
if (is_long_mode(vcpu)) {
if (!(cr4 & X86_CR4_PAE))
return 1;
@@ -615,11 +638,9 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
kvm_read_cr3(vcpu)))
return 1;
- if (cr4 & X86_CR4_VMXE)
+ if (kvm_x86_ops->set_cr4(vcpu, cr4))
return 1;
- kvm_x86_ops->set_cr4(vcpu, cr4);
-
if ((cr4 ^ old_cr4) & pdptr_bits)
kvm_mmu_reset_context(vcpu);
@@ -787,12 +808,12 @@ EXPORT_SYMBOL_GPL(kvm_get_dr);
* kvm-specific. Those are put in the beginning of the list.
*/
-#define KVM_SAVE_MSRS_BEGIN 8
+#define KVM_SAVE_MSRS_BEGIN 9
static u32 msrs_to_save[] = {
MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK,
MSR_KVM_SYSTEM_TIME_NEW, MSR_KVM_WALL_CLOCK_NEW,
HV_X64_MSR_GUEST_OS_ID, HV_X64_MSR_HYPERCALL,
- HV_X64_MSR_APIC_ASSIST_PAGE, MSR_KVM_ASYNC_PF_EN,
+ HV_X64_MSR_APIC_ASSIST_PAGE, MSR_KVM_ASYNC_PF_EN, MSR_KVM_STEAL_TIME,
MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
MSR_STAR,
#ifdef CONFIG_X86_64
@@ -1388,7 +1409,7 @@ static int set_msr_hyperv_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data)
return 1;
kvm_x86_ops->patch_hypercall(vcpu, instructions);
((unsigned char *)instructions)[3] = 0xc3; /* ret */
- if (copy_to_user((void __user *)addr, instructions, 4))
+ if (__copy_to_user((void __user *)addr, instructions, 4))
return 1;
kvm->arch.hv_hypercall = data;
break;
@@ -1415,7 +1436,7 @@ static int set_msr_hyperv(struct kvm_vcpu *vcpu, u32 msr, u64 data)
HV_X64_MSR_APIC_ASSIST_PAGE_ADDRESS_SHIFT);
if (kvm_is_error_hva(addr))
return 1;
- if (clear_user((void __user *)addr, PAGE_SIZE))
+ if (__clear_user((void __user *)addr, PAGE_SIZE))
return 1;
vcpu->arch.hv_vapic = data;
break;
@@ -1467,6 +1488,35 @@ static void kvmclock_reset(struct kvm_vcpu *vcpu)
}
}
+static void accumulate_steal_time(struct kvm_vcpu *vcpu)
+{
+ u64 delta;
+
+ if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
+ return;
+
+ delta = current->sched_info.run_delay - vcpu->arch.st.last_steal;
+ vcpu->arch.st.last_steal = current->sched_info.run_delay;
+ vcpu->arch.st.accum_steal = delta;
+}
+
+static void record_steal_time(struct kvm_vcpu *vcpu)
+{
+ if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
+ return;
+
+ if (unlikely(kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
+ &vcpu->arch.st.steal, sizeof(struct kvm_steal_time))))
+ return;
+
+ vcpu->arch.st.steal.steal += vcpu->arch.st.accum_steal;
+ vcpu->arch.st.steal.version += 2;
+ vcpu->arch.st.accum_steal = 0;
+
+ kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
+ &vcpu->arch.st.steal, sizeof(struct kvm_steal_time));
+}
+
int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
{
switch (msr) {
@@ -1549,6 +1599,33 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
if (kvm_pv_enable_async_pf(vcpu, data))
return 1;
break;
+ case MSR_KVM_STEAL_TIME:
+
+ if (unlikely(!sched_info_on()))
+ return 1;
+
+ if (data & KVM_STEAL_RESERVED_MASK)
+ return 1;
+
+ if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.st.stime,
+ data & KVM_STEAL_VALID_BITS))
+ return 1;
+
+ vcpu->arch.st.msr_val = data;
+
+ if (!(data & KVM_MSR_ENABLED))
+ break;
+
+ vcpu->arch.st.last_steal = current->sched_info.run_delay;
+
+ preempt_disable();
+ accumulate_steal_time(vcpu);
+ preempt_enable();
+
+ kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
+
+ break;
+
case MSR_IA32_MCG_CTL:
case MSR_IA32_MCG_STATUS:
case MSR_IA32_MC0_CTL ... MSR_IA32_MC0_CTL + 4 * KVM_MAX_MCE_BANKS - 1:
@@ -1834,6 +1911,9 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
case MSR_KVM_ASYNC_PF_EN:
data = vcpu->arch.apf.msr_val;
break;
+ case MSR_KVM_STEAL_TIME:
+ data = vcpu->arch.st.msr_val;
+ break;
case MSR_IA32_P5_MC_ADDR:
case MSR_IA32_P5_MC_TYPE:
case MSR_IA32_MCG_CAP:
@@ -2145,6 +2225,9 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
kvm_migrate_timers(vcpu);
vcpu->cpu = cpu;
}
+
+ accumulate_steal_time(vcpu);
+ kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
}
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
@@ -2283,6 +2366,13 @@ static void do_cpuid_1_ent(struct kvm_cpuid_entry2 *entry, u32 function,
entry->flags = 0;
}
+static bool supported_xcr0_bit(unsigned bit)
+{
+ u64 mask = ((u64)1 << bit);
+
+ return mask & (XSTATE_FP | XSTATE_SSE | XSTATE_YMM) & host_xcr0;
+}
+
#define F(x) bit(X86_FEATURE_##x)
static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
@@ -2328,7 +2418,7 @@ static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
0 /* Reserved, DCA */ | F(XMM4_1) |
F(XMM4_2) | F(X2APIC) | F(MOVBE) | F(POPCNT) |
0 /* Reserved*/ | F(AES) | F(XSAVE) | 0 /* OSXSAVE */ | F(AVX) |
- F(F16C);
+ F(F16C) | F(RDRAND);
/* cpuid 0x80000001.ecx */
const u32 kvm_supported_word6_x86_features =
F(LAHF_LM) | F(CMP_LEGACY) | 0 /*SVM*/ | 0 /* ExtApicSpace */ |
@@ -2342,6 +2432,10 @@ static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
F(ACE2) | F(ACE2_EN) | F(PHE) | F(PHE_EN) |
F(PMM) | F(PMM_EN);
+ /* cpuid 7.0.ebx */
+ const u32 kvm_supported_word9_x86_features =
+ F(SMEP) | F(FSGSBASE) | F(ERMS);
+
/* all calls to cpuid_count() should be made on the same cpu */
get_cpu();
do_cpuid_1_ent(entry, function, index);
@@ -2376,7 +2470,7 @@ static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
}
break;
}
- /* function 4 and 0xb have additional index. */
+ /* function 4 has additional index. */
case 4: {
int i, cache_type;
@@ -2393,6 +2487,22 @@ static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
}
break;
}
+ case 7: {
+ entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
+ /* Mask ebx against host capbability word 9 */
+ if (index == 0) {
+ entry->ebx &= kvm_supported_word9_x86_features;
+ cpuid_mask(&entry->ebx, 9);
+ } else
+ entry->ebx = 0;
+ entry->eax = 0;
+ entry->ecx = 0;
+ entry->edx = 0;
+ break;
+ }
+ case 9:
+ break;
+ /* function 0xb has additional index. */
case 0xb: {
int i, level_type;
@@ -2410,16 +2520,17 @@ static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
break;
}
case 0xd: {
- int i;
+ int idx, i;
entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
- for (i = 1; *nent < maxnent && i < 64; ++i) {
- if (entry[i].eax == 0)
+ for (idx = 1, i = 1; *nent < maxnent && idx < 64; ++idx) {
+ do_cpuid_1_ent(&entry[i], function, idx);
+ if (entry[i].eax == 0 || !supported_xcr0_bit(idx))
continue;
- do_cpuid_1_ent(&entry[i], function, i);
entry[i].flags |=
KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
++*nent;
+ ++i;
}
break;
}
@@ -2438,6 +2549,10 @@ static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
(1 << KVM_FEATURE_CLOCKSOURCE2) |
(1 << KVM_FEATURE_ASYNC_PF) |
(1 << KVM_FEATURE_CLOCKSOURCE_STABLE_BIT);
+
+ if (sched_info_on())
+ entry->eax |= (1 << KVM_FEATURE_STEAL_TIME);
+
entry->ebx = 0;
entry->ecx = 0;
entry->edx = 0;
@@ -2451,6 +2566,24 @@ static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
entry->ecx &= kvm_supported_word6_x86_features;
cpuid_mask(&entry->ecx, 6);
break;
+ case 0x80000008: {
+ unsigned g_phys_as = (entry->eax >> 16) & 0xff;
+ unsigned virt_as = max((entry->eax >> 8) & 0xff, 48U);
+ unsigned phys_as = entry->eax & 0xff;
+
+ if (!g_phys_as)
+ g_phys_as = phys_as;
+ entry->eax = g_phys_as | (virt_as << 8);
+ entry->ebx = entry->edx = 0;
+ break;
+ }
+ case 0x80000019:
+ entry->ecx = entry->edx = 0;
+ break;
+ case 0x8000001a:
+ break;
+ case 0x8000001d:
+ break;
/*Add support for Centaur's CPUID instruction*/
case 0xC0000000:
/*Just support up to 0xC0000004 now*/
@@ -2460,10 +2593,16 @@ static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
entry->edx &= kvm_supported_word5_x86_features;
cpuid_mask(&entry->edx, 5);
break;
+ case 3: /* Processor serial number */
+ case 5: /* MONITOR/MWAIT */
+ case 6: /* Thermal management */
+ case 0xA: /* Architectural Performance Monitoring */
+ case 0x80000007: /* Advanced power management */
case 0xC0000002:
case 0xC0000003:
case 0xC0000004:
- /*Now nothing to do, reserved for the future*/
+ default:
+ entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
break;
}
@@ -3817,7 +3956,7 @@ static int kvm_fetch_guest_virt(struct x86_emulate_ctxt *ctxt,
exception);
}
-static int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt,
+int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt,
gva_t addr, void *val, unsigned int bytes,
struct x86_exception *exception)
{
@@ -3827,6 +3966,7 @@ static int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt,
return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access,
exception);
}
+EXPORT_SYMBOL_GPL(kvm_read_guest_virt);
static int kvm_read_guest_virt_system(struct x86_emulate_ctxt *ctxt,
gva_t addr, void *val, unsigned int bytes,
@@ -3836,7 +3976,7 @@ static int kvm_read_guest_virt_system(struct x86_emulate_ctxt *ctxt,
return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, 0, exception);
}
-static int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt,
+int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt,
gva_t addr, void *val,
unsigned int bytes,
struct x86_exception *exception)
@@ -3868,6 +4008,42 @@ static int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt,
out:
return r;
}
+EXPORT_SYMBOL_GPL(kvm_write_guest_virt_system);
+
+static int vcpu_mmio_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva,
+ gpa_t *gpa, struct x86_exception *exception,
+ bool write)
+{
+ u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
+
+ if (vcpu_match_mmio_gva(vcpu, gva) &&
+ check_write_user_access(vcpu, write, access,
+ vcpu->arch.access)) {
+ *gpa = vcpu->arch.mmio_gfn << PAGE_SHIFT |
+ (gva & (PAGE_SIZE - 1));
+ trace_vcpu_match_mmio(gva, *gpa, write, false);
+ return 1;
+ }
+
+ if (write)
+ access |= PFERR_WRITE_MASK;
+
+ *gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception);
+
+ if (*gpa == UNMAPPED_GVA)
+ return -1;
+
+ /* For APIC access vmexit */
+ if ((*gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
+ return 1;
+
+ if (vcpu_match_mmio_gpa(vcpu, *gpa)) {
+ trace_vcpu_match_mmio(gva, *gpa, write, true);
+ return 1;
+ }
+
+ return 0;
+}
static int emulator_read_emulated(struct x86_emulate_ctxt *ctxt,
unsigned long addr,
@@ -3876,8 +4052,8 @@ static int emulator_read_emulated(struct x86_emulate_ctxt *ctxt,
struct x86_exception *exception)
{
struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
- gpa_t gpa;
- int handled;
+ gpa_t gpa;
+ int handled, ret;
if (vcpu->mmio_read_completed) {
memcpy(val, vcpu->mmio_data, bytes);
@@ -3887,13 +4063,12 @@ static int emulator_read_emulated(struct x86_emulate_ctxt *ctxt,
return X86EMUL_CONTINUE;
}
- gpa = kvm_mmu_gva_to_gpa_read(vcpu, addr, exception);
+ ret = vcpu_mmio_gva_to_gpa(vcpu, addr, &gpa, exception, false);
- if (gpa == UNMAPPED_GVA)
+ if (ret < 0)
return X86EMUL_PROPAGATE_FAULT;
- /* For APIC access vmexit */
- if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
+ if (ret)
goto mmio;
if (kvm_read_guest_virt(ctxt, addr, val, bytes, exception)
@@ -3944,16 +4119,16 @@ static int emulator_write_emulated_onepage(unsigned long addr,
struct x86_exception *exception,
struct kvm_vcpu *vcpu)
{
- gpa_t gpa;
- int handled;
+ gpa_t gpa;
+ int handled, ret;
- gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, exception);
+ ret = vcpu_mmio_gva_to_gpa(vcpu, addr, &gpa, exception, true);
- if (gpa == UNMAPPED_GVA)
+ if (ret < 0)
return X86EMUL_PROPAGATE_FAULT;
/* For APIC access vmexit */
- if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
+ if (ret)
goto mmio;
if (emulator_write_phys(vcpu, gpa, val, bytes))
@@ -4473,9 +4648,24 @@ static void inject_emulated_exception(struct kvm_vcpu *vcpu)
kvm_queue_exception(vcpu, ctxt->exception.vector);
}
+static void init_decode_cache(struct x86_emulate_ctxt *ctxt,
+ const unsigned long *regs)
+{
+ memset(&ctxt->twobyte, 0,
+ (void *)&ctxt->regs - (void *)&ctxt->twobyte);
+ memcpy(ctxt->regs, regs, sizeof(ctxt->regs));
+
+ ctxt->fetch.start = 0;
+ ctxt->fetch.end = 0;
+ ctxt->io_read.pos = 0;
+ ctxt->io_read.end = 0;
+ ctxt->mem_read.pos = 0;
+ ctxt->mem_read.end = 0;
+}
+
static void init_emulate_ctxt(struct kvm_vcpu *vcpu)
{
- struct decode_cache *c = &vcpu->arch.emulate_ctxt.decode;
+ struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
int cs_db, cs_l;
/*
@@ -4488,40 +4678,38 @@ static void init_emulate_ctxt(struct kvm_vcpu *vcpu)
kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
- vcpu->arch.emulate_ctxt.eflags = kvm_get_rflags(vcpu);
- vcpu->arch.emulate_ctxt.eip = kvm_rip_read(vcpu);
- vcpu->arch.emulate_ctxt.mode =
- (!is_protmode(vcpu)) ? X86EMUL_MODE_REAL :
- (vcpu->arch.emulate_ctxt.eflags & X86_EFLAGS_VM)
- ? X86EMUL_MODE_VM86 : cs_l
- ? X86EMUL_MODE_PROT64 : cs_db
- ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
- vcpu->arch.emulate_ctxt.guest_mode = is_guest_mode(vcpu);
- memset(c, 0, sizeof(struct decode_cache));
- memcpy(c->regs, vcpu->arch.regs, sizeof c->regs);
+ ctxt->eflags = kvm_get_rflags(vcpu);
+ ctxt->eip = kvm_rip_read(vcpu);
+ ctxt->mode = (!is_protmode(vcpu)) ? X86EMUL_MODE_REAL :
+ (ctxt->eflags & X86_EFLAGS_VM) ? X86EMUL_MODE_VM86 :
+ cs_l ? X86EMUL_MODE_PROT64 :
+ cs_db ? X86EMUL_MODE_PROT32 :
+ X86EMUL_MODE_PROT16;
+ ctxt->guest_mode = is_guest_mode(vcpu);
+
+ init_decode_cache(ctxt, vcpu->arch.regs);
vcpu->arch.emulate_regs_need_sync_from_vcpu = false;
}
int kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip)
{
- struct decode_cache *c = &vcpu->arch.emulate_ctxt.decode;
+ struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
int ret;
init_emulate_ctxt(vcpu);
- vcpu->arch.emulate_ctxt.decode.op_bytes = 2;
- vcpu->arch.emulate_ctxt.decode.ad_bytes = 2;
- vcpu->arch.emulate_ctxt.decode.eip = vcpu->arch.emulate_ctxt.eip +
- inc_eip;
- ret = emulate_int_real(&vcpu->arch.emulate_ctxt, &emulate_ops, irq);
+ ctxt->op_bytes = 2;
+ ctxt->ad_bytes = 2;
+ ctxt->_eip = ctxt->eip + inc_eip;
+ ret = emulate_int_real(ctxt, irq);
if (ret != X86EMUL_CONTINUE)
return EMULATE_FAIL;
- vcpu->arch.emulate_ctxt.eip = c->eip;
- memcpy(vcpu->arch.regs, c->regs, sizeof c->regs);
- kvm_rip_write(vcpu, vcpu->arch.emulate_ctxt.eip);
- kvm_set_rflags(vcpu, vcpu->arch.emulate_ctxt.eflags);
+ ctxt->eip = ctxt->_eip;
+ memcpy(vcpu->arch.regs, ctxt->regs, sizeof ctxt->regs);
+ kvm_rip_write(vcpu, ctxt->eip);
+ kvm_set_rflags(vcpu, ctxt->eflags);
if (irq == NMI_VECTOR)
vcpu->arch.nmi_pending = false;
@@ -4582,21 +4770,21 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu,
int insn_len)
{
int r;
- struct decode_cache *c = &vcpu->arch.emulate_ctxt.decode;
+ struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
bool writeback = true;
kvm_clear_exception_queue(vcpu);
if (!(emulation_type & EMULTYPE_NO_DECODE)) {
init_emulate_ctxt(vcpu);
- vcpu->arch.emulate_ctxt.interruptibility = 0;
- vcpu->arch.emulate_ctxt.have_exception = false;
- vcpu->arch.emulate_ctxt.perm_ok = false;
+ ctxt->interruptibility = 0;
+ ctxt->have_exception = false;
+ ctxt->perm_ok = false;
- vcpu->arch.emulate_ctxt.only_vendor_specific_insn
+ ctxt->only_vendor_specific_insn
= emulation_type & EMULTYPE_TRAP_UD;
- r = x86_decode_insn(&vcpu->arch.emulate_ctxt, insn, insn_len);
+ r = x86_decode_insn(ctxt, insn, insn_len);
trace_kvm_emulate_insn_start(vcpu);
++vcpu->stat.insn_emulation;
@@ -4612,7 +4800,7 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu,
}
if (emulation_type & EMULTYPE_SKIP) {
- kvm_rip_write(vcpu, vcpu->arch.emulate_ctxt.decode.eip);
+ kvm_rip_write(vcpu, ctxt->_eip);
return EMULATE_DONE;
}
@@ -4620,11 +4808,11 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu,
changes registers values during IO operation */
if (vcpu->arch.emulate_regs_need_sync_from_vcpu) {
vcpu->arch.emulate_regs_need_sync_from_vcpu = false;
- memcpy(c->regs, vcpu->arch.regs, sizeof c->regs);
+ memcpy(ctxt->regs, vcpu->arch.regs, sizeof ctxt->regs);
}
restart:
- r = x86_emulate_insn(&vcpu->arch.emulate_ctxt);
+ r = x86_emulate_insn(ctxt);
if (r == EMULATION_INTERCEPTED)
return EMULATE_DONE;
@@ -4636,7 +4824,7 @@ restart:
return handle_emulation_failure(vcpu);
}
- if (vcpu->arch.emulate_ctxt.have_exception) {
+ if (ctxt->have_exception) {
inject_emulated_exception(vcpu);
r = EMULATE_DONE;
} else if (vcpu->arch.pio.count) {
@@ -4655,13 +4843,12 @@ restart:
r = EMULATE_DONE;
if (writeback) {
- toggle_interruptibility(vcpu,
- vcpu->arch.emulate_ctxt.interruptibility);
- kvm_set_rflags(vcpu, vcpu->arch.emulate_ctxt.eflags);
+ toggle_interruptibility(vcpu, ctxt->interruptibility);
+ kvm_set_rflags(vcpu, ctxt->eflags);
kvm_make_request(KVM_REQ_EVENT, vcpu);
- memcpy(vcpu->arch.regs, c->regs, sizeof c->regs);
+ memcpy(vcpu->arch.regs, ctxt->regs, sizeof ctxt->regs);
vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
- kvm_rip_write(vcpu, vcpu->arch.emulate_ctxt.eip);
+ kvm_rip_write(vcpu, ctxt->eip);
} else
vcpu->arch.emulate_regs_need_sync_to_vcpu = true;
@@ -4878,6 +5065,30 @@ void kvm_after_handle_nmi(struct kvm_vcpu *vcpu)
}
EXPORT_SYMBOL_GPL(kvm_after_handle_nmi);
+static void kvm_set_mmio_spte_mask(void)
+{
+ u64 mask;
+ int maxphyaddr = boot_cpu_data.x86_phys_bits;
+
+ /*
+ * Set the reserved bits and the present bit of an paging-structure
+ * entry to generate page fault with PFER.RSV = 1.
+ */
+ mask = ((1ull << (62 - maxphyaddr + 1)) - 1) << maxphyaddr;
+ mask |= 1ull;
+
+#ifdef CONFIG_X86_64
+ /*
+ * If reserved bit is not supported, clear the present bit to disable
+ * mmio page fault.
+ */
+ if (maxphyaddr == 52)
+ mask &= ~1ull;
+#endif
+
+ kvm_mmu_set_mmio_spte_mask(mask);
+}
+
int kvm_arch_init(void *opaque)
{
int r;
@@ -4904,10 +5115,10 @@ int kvm_arch_init(void *opaque)
if (r)
goto out;
+ kvm_set_mmio_spte_mask();
kvm_init_msr_list();
kvm_x86_ops = ops;
- kvm_mmu_set_nonpresent_ptes(0ull, 0ull);
kvm_mmu_set_mask_ptes(PT_USER_MASK, PT_ACCESSED_MASK,
PT_DIRTY_MASK, PT64_NX_MASK, 0);
@@ -5082,8 +5293,7 @@ int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt)
kvm_x86_ops->patch_hypercall(vcpu, instruction);
- return emulator_write_emulated(&vcpu->arch.emulate_ctxt,
- rip, instruction, 3, NULL);
+ return emulator_write_emulated(ctxt, rip, instruction, 3, NULL);
}
static int move_to_next_stateful_cpuid_entry(struct kvm_vcpu *vcpu, int i)
@@ -5384,6 +5594,9 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
r = 1;
goto out;
}
+ if (kvm_check_request(KVM_REQ_STEAL_UPDATE, vcpu))
+ record_steal_time(vcpu);
+
}
r = kvm_mmu_reload(vcpu);
@@ -5671,8 +5884,8 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
* that usually, but some bad designed PV devices (vmware
* backdoor interface) need this to work
*/
- struct decode_cache *c = &vcpu->arch.emulate_ctxt.decode;
- memcpy(vcpu->arch.regs, c->regs, sizeof c->regs);
+ struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
+ memcpy(vcpu->arch.regs, ctxt->regs, sizeof ctxt->regs);
vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
}
regs->rax = kvm_register_read(vcpu, VCPU_REGS_RAX);
@@ -5801,21 +6014,20 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason,
bool has_error_code, u32 error_code)
{
- struct decode_cache *c = &vcpu->arch.emulate_ctxt.decode;
+ struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
int ret;
init_emulate_ctxt(vcpu);
- ret = emulator_task_switch(&vcpu->arch.emulate_ctxt,
- tss_selector, reason, has_error_code,
- error_code);
+ ret = emulator_task_switch(ctxt, tss_selector, reason,
+ has_error_code, error_code);
if (ret)
return EMULATE_FAIL;
- memcpy(vcpu->arch.regs, c->regs, sizeof c->regs);
- kvm_rip_write(vcpu, vcpu->arch.emulate_ctxt.eip);
- kvm_set_rflags(vcpu, vcpu->arch.emulate_ctxt.eflags);
+ memcpy(vcpu->arch.regs, ctxt->regs, sizeof ctxt->regs);
+ kvm_rip_write(vcpu, ctxt->eip);
+ kvm_set_rflags(vcpu, ctxt->eflags);
kvm_make_request(KVM_REQ_EVENT, vcpu);
return EMULATE_DONE;
}
@@ -6093,12 +6305,7 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
if (r == 0)
r = kvm_mmu_setup(vcpu);
vcpu_put(vcpu);
- if (r < 0)
- goto free_vcpu;
- return 0;
-free_vcpu:
- kvm_x86_ops->vcpu_free(vcpu);
return r;
}
@@ -6126,6 +6333,7 @@ int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu)
kvm_make_request(KVM_REQ_EVENT, vcpu);
vcpu->arch.apf.msr_val = 0;
+ vcpu->arch.st.msr_val = 0;
kvmclock_reset(vcpu);
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
index e407ed3df817..d36fe237c665 100644
--- a/arch/x86/kvm/x86.h
+++ b/arch/x86/kvm/x86.h
@@ -75,10 +75,54 @@ static inline u32 bit(int bitno)
return 1 << (bitno & 31);
}
+static inline void vcpu_cache_mmio_info(struct kvm_vcpu *vcpu,
+ gva_t gva, gfn_t gfn, unsigned access)
+{
+ vcpu->arch.mmio_gva = gva & PAGE_MASK;
+ vcpu->arch.access = access;
+ vcpu->arch.mmio_gfn = gfn;
+}
+
+/*
+ * Clear the mmio cache info for the given gva,
+ * specially, if gva is ~0ul, we clear all mmio cache info.
+ */
+static inline void vcpu_clear_mmio_info(struct kvm_vcpu *vcpu, gva_t gva)
+{
+ if (gva != (~0ul) && vcpu->arch.mmio_gva != (gva & PAGE_MASK))
+ return;
+
+ vcpu->arch.mmio_gva = 0;
+}
+
+static inline bool vcpu_match_mmio_gva(struct kvm_vcpu *vcpu, unsigned long gva)
+{
+ if (vcpu->arch.mmio_gva && vcpu->arch.mmio_gva == (gva & PAGE_MASK))
+ return true;
+
+ return false;
+}
+
+static inline bool vcpu_match_mmio_gpa(struct kvm_vcpu *vcpu, gpa_t gpa)
+{
+ if (vcpu->arch.mmio_gfn && vcpu->arch.mmio_gfn == gpa >> PAGE_SHIFT)
+ return true;
+
+ return false;
+}
+
void kvm_before_handle_nmi(struct kvm_vcpu *vcpu);
void kvm_after_handle_nmi(struct kvm_vcpu *vcpu);
int kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip);
void kvm_write_tsc(struct kvm_vcpu *vcpu, u64 data);
+int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt,
+ gva_t addr, void *val, unsigned int bytes,
+ struct x86_exception *exception);
+
+int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt,
+ gva_t addr, void *val, unsigned int bytes,
+ struct x86_exception *exception);
+
#endif
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
index db832fd65ecb..13ee258442ae 100644
--- a/arch/x86/lguest/boot.c
+++ b/arch/x86/lguest/boot.c
@@ -71,7 +71,8 @@
#include <asm/stackprotector.h>
#include <asm/reboot.h> /* for struct machine_ops */
-/*G:010 Welcome to the Guest!
+/*G:010
+ * Welcome to the Guest!
*
* The Guest in our tale is a simple creature: identical to the Host but
* behaving in simplified but equivalent ways. In particular, the Guest is the
@@ -190,15 +191,23 @@ static void lazy_hcall4(unsigned long call,
#endif
/*G:036
- * When lazy mode is turned off reset the per-cpu lazy mode variable and then
- * issue the do-nothing hypercall to flush any stored calls.
-:*/
+ * When lazy mode is turned off, we issue the do-nothing hypercall to
+ * flush any stored calls, and call the generic helper to reset the
+ * per-cpu lazy mode variable.
+ */
static void lguest_leave_lazy_mmu_mode(void)
{
hcall(LHCALL_FLUSH_ASYNC, 0, 0, 0, 0);
paravirt_leave_lazy_mmu();
}
+/*
+ * We also catch the end of context switch; we enter lazy mode for much of
+ * that too, so again we need to flush here.
+ *
+ * (Technically, this is lazy CPU mode, and normally we're in lazy MMU
+ * mode, but unlike Xen, lguest doesn't care about the difference).
+ */
static void lguest_end_context_switch(struct task_struct *next)
{
hcall(LHCALL_FLUSH_ASYNC, 0, 0, 0, 0);
@@ -391,7 +400,7 @@ static void lguest_load_tr_desc(void)
* giant ball of hair. Its entry in the current Intel manual runs to 28 pages.
*
* This instruction even it has its own Wikipedia entry. The Wikipedia entry
- * has been translated into 5 languages. I am not making this up!
+ * has been translated into 6 languages. I am not making this up!
*
* We could get funky here and identify ourselves as "GenuineLguest", but
* instead we just use the real "cpuid" instruction. Then I pretty much turned
@@ -458,7 +467,7 @@ static void lguest_cpuid(unsigned int *ax, unsigned int *bx,
/*
* PAE systems can mark pages as non-executable. Linux calls this the
* NX bit. Intel calls it XD (eXecute Disable), AMD EVP (Enhanced
- * Virus Protection). We just switch turn if off here, since we don't
+ * Virus Protection). We just switch it off here, since we don't
* support it.
*/
case 0x80000001:
@@ -520,17 +529,16 @@ static unsigned long lguest_read_cr2(void)
/* See lguest_set_pte() below. */
static bool cr3_changed = false;
+static unsigned long current_cr3;
/*
* cr3 is the current toplevel pagetable page: the principle is the same as
- * cr0. Keep a local copy, and tell the Host when it changes. The only
- * difference is that our local copy is in lguest_data because the Host needs
- * to set it upon our initial hypercall.
+ * cr0. Keep a local copy, and tell the Host when it changes.
*/
static void lguest_write_cr3(unsigned long cr3)
{
- lguest_data.pgdir = cr3;
lazy_hcall1(LHCALL_NEW_PGTABLE, cr3);
+ current_cr3 = cr3;
/* These two page tables are simple, linear, and used during boot */
if (cr3 != __pa(swapper_pg_dir) && cr3 != __pa(initial_page_table))
@@ -539,7 +547,7 @@ static void lguest_write_cr3(unsigned long cr3)
static unsigned long lguest_read_cr3(void)
{
- return lguest_data.pgdir;
+ return current_cr3;
}
/* cr4 is used to enable and disable PGE, but we don't care. */
@@ -641,7 +649,7 @@ static void lguest_write_cr4(unsigned long val)
/*
* The Guest calls this after it has set a second-level entry (pte), ie. to map
- * a page into a process' address space. Wetell the Host the toplevel and
+ * a page into a process' address space. We tell the Host the toplevel and
* address this corresponds to. The Guest uses one pagetable per process, so
* we need to tell the Host which one we're changing (mm->pgd).
*/
@@ -758,7 +766,7 @@ static void lguest_pmd_clear(pmd_t *pmdp)
static void lguest_flush_tlb_single(unsigned long addr)
{
/* Simply set it to zero: if it was not, it will fault back in. */
- lazy_hcall3(LHCALL_SET_PTE, lguest_data.pgdir, addr, 0);
+ lazy_hcall3(LHCALL_SET_PTE, current_cr3, addr, 0);
}
/*
@@ -1140,7 +1148,7 @@ static struct notifier_block paniced = {
static __init char *lguest_memory_setup(void)
{
/*
- *The Linux bootloader header contains an "e820" memory map: the
+ * The Linux bootloader header contains an "e820" memory map: the
* Launcher populated the first entry with our memory limit.
*/
e820_add_region(boot_params.e820_map[0].addr,
diff --git a/arch/x86/lguest/i386_head.S b/arch/x86/lguest/i386_head.S
index 4f420c2f2d55..6ddfe4fc23c3 100644
--- a/arch/x86/lguest/i386_head.S
+++ b/arch/x86/lguest/i386_head.S
@@ -6,18 +6,22 @@
#include <asm/processor-flags.h>
/*G:020
- * Our story starts with the kernel booting into startup_32 in
- * arch/x86/kernel/head_32.S. It expects a boot header, which is created by
- * the bootloader (the Launcher in our case).
+
+ * Our story starts with the bzImage: booting starts at startup_32 in
+ * arch/x86/boot/compressed/head_32.S. This merely uncompresses the real
+ * kernel in place and then jumps into it: startup_32 in
+ * arch/x86/kernel/head_32.S. Both routines expects a boot header in the %esi
+ * register, which is created by the bootloader (the Launcher in our case).
*
* The startup_32 function does very little: it clears the uninitialized global
* C variables which we expect to be zero (ie. BSS) and then copies the boot
- * header and kernel command line somewhere safe. Finally it checks the
- * 'hardware_subarch' field. This was introduced in 2.6.24 for lguest and Xen:
- * if it's set to '1' (lguest's assigned number), then it calls us here.
+ * header and kernel command line somewhere safe, and populates some initial
+ * page tables. Finally it checks the 'hardware_subarch' field. This was
+ * introduced in 2.6.24 for lguest and Xen: if it's set to '1' (lguest's
+ * assigned number), then it calls us here.
*
* WARNING: be very careful here! We're running at addresses equal to physical
- * addesses (around 0), not above PAGE_OFFSET as most code expectes
+ * addresses (around 0), not above PAGE_OFFSET as most code expects
* (eg. 0xC0000000). Jumps are relative, so they're OK, but we can't touch any
* data without remembering to subtract __PAGE_OFFSET!
*
@@ -27,13 +31,18 @@
.section .init.text, "ax", @progbits
ENTRY(lguest_entry)
/*
- * We make the "initialization" hypercall now to tell the Host about
- * us, and also find out where it put our page tables.
+ * We make the "initialization" hypercall now to tell the Host where
+ * our lguest_data struct is.
*/
movl $LHCALL_LGUEST_INIT, %eax
movl $lguest_data - __PAGE_OFFSET, %ebx
int $LGUEST_TRAP_ENTRY
+ /* Now turn our pagetables on; setup by arch/x86/kernel/head_32.S. */
+ movl $LHCALL_NEW_PGTABLE, %eax
+ movl $(initial_page_table - __PAGE_OFFSET), %ebx
+ int $LGUEST_TRAP_ENTRY
+
/* Set up the initial stack so we can run C code. */
movl $(init_thread_union+THREAD_SIZE),%esp
@@ -96,12 +105,8 @@ send_interrupts:
*/
pushl %eax
movl $LHCALL_SEND_INTERRUPTS, %eax
- /*
- * This is a vmcall instruction (same thing that KVM uses). Older
- * assembler versions might not know the "vmcall" instruction, so we
- * create one manually here.
- */
- .byte 0x0f,0x01,0xc1 /* KVM_HYPERCALL */
+ /* This is the actual hypercall trap. */
+ int $LGUEST_TRAP_ENTRY
/* Put eax back the way we found it. */
popl %eax
ret
diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile
index f2479f19ddde..b00f6785da74 100644
--- a/arch/x86/lib/Makefile
+++ b/arch/x86/lib/Makefile
@@ -18,8 +18,10 @@ obj-$(CONFIG_SMP) += msr-smp.o cache-smp.o
lib-y := delay.o
lib-y += thunk_$(BITS).o
-lib-y += usercopy_$(BITS).o getuser.o putuser.o
+lib-y += usercopy_$(BITS).o usercopy.o getuser.o putuser.o
lib-y += memcpy_$(BITS).o
+lib-$(CONFIG_SMP) += rwlock.o
+lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o
lib-$(CONFIG_INSTRUCTION_DECODER) += insn.o inat.o
obj-y += msr.o msr-reg.o msr-reg-export.o
@@ -29,7 +31,7 @@ ifeq ($(CONFIG_X86_32),y)
lib-y += atomic64_cx8_32.o
lib-y += checksum_32.o
lib-y += strstr_32.o
- lib-y += semaphore_32.o string_32.o
+ lib-y += string_32.o
lib-y += cmpxchg.o
ifneq ($(CONFIG_X86_CMPXCHG64),y)
lib-y += cmpxchg8b_emu.o atomic64_386_32.o
@@ -40,7 +42,6 @@ else
lib-y += csum-partial_64.o csum-copy_64.o csum-wrappers_64.o
lib-y += thunk_64.o clear_page_64.o copy_page_64.o
lib-y += memmove_64.o memset_64.o
- lib-y += copy_user_64.o rwlock_64.o copy_user_nocache_64.o
- lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem_64.o
+ lib-y += copy_user_64.o copy_user_nocache_64.o
lib-y += cmpxchg16b_emu.o
endif
diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
index 6fec2d1cebe1..01c805ba5359 100644
--- a/arch/x86/lib/copy_page_64.S
+++ b/arch/x86/lib/copy_page_64.S
@@ -2,6 +2,7 @@
#include <linux/linkage.h>
#include <asm/dwarf2.h>
+#include <asm/alternative-asm.h>
ALIGN
copy_page_c:
@@ -110,10 +111,6 @@ ENDPROC(copy_page)
2:
.previous
.section .altinstructions,"a"
- .align 8
- .quad copy_page
- .quad 1b
- .word X86_FEATURE_REP_GOOD
- .byte .Lcopy_page_end - copy_page
- .byte 2b - 1b
+ altinstruction_entry copy_page, 1b, X86_FEATURE_REP_GOOD, \
+ .Lcopy_page_end-copy_page, 2b-1b
.previous
diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
index d0ec9c2936d7..ee164610ec46 100644
--- a/arch/x86/lib/memmove_64.S
+++ b/arch/x86/lib/memmove_64.S
@@ -9,6 +9,7 @@
#include <linux/linkage.h>
#include <asm/dwarf2.h>
#include <asm/cpufeature.h>
+#include <asm/alternative-asm.h>
#undef memmove
@@ -214,11 +215,9 @@ ENTRY(memmove)
.previous
.section .altinstructions,"a"
- .align 8
- .quad .Lmemmove_begin_forward
- .quad .Lmemmove_begin_forward_efs
- .word X86_FEATURE_ERMS
- .byte .Lmemmove_end_forward-.Lmemmove_begin_forward
- .byte .Lmemmove_end_forward_efs-.Lmemmove_begin_forward_efs
+ altinstruction_entry .Lmemmove_begin_forward, \
+ .Lmemmove_begin_forward_efs,X86_FEATURE_ERMS, \
+ .Lmemmove_end_forward-.Lmemmove_begin_forward, \
+ .Lmemmove_end_forward_efs-.Lmemmove_begin_forward_efs
.previous
ENDPROC(memmove)
diff --git a/arch/x86/lib/rwlock.S b/arch/x86/lib/rwlock.S
new file mode 100644
index 000000000000..1cad22139c88
--- /dev/null
+++ b/arch/x86/lib/rwlock.S
@@ -0,0 +1,44 @@
+/* Slow paths of read/write spinlocks. */
+
+#include <linux/linkage.h>
+#include <asm/alternative-asm.h>
+#include <asm/frame.h>
+#include <asm/rwlock.h>
+
+#ifdef CONFIG_X86_32
+# define __lock_ptr eax
+#else
+# define __lock_ptr rdi
+#endif
+
+ENTRY(__write_lock_failed)
+ CFI_STARTPROC
+ FRAME
+0: LOCK_PREFIX
+ WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
+1: rep; nop
+ cmpl $WRITE_LOCK_CMP, (%__lock_ptr)
+ jne 1b
+ LOCK_PREFIX
+ WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
+ jnz 0b
+ ENDFRAME
+ ret
+ CFI_ENDPROC
+END(__write_lock_failed)
+
+ENTRY(__read_lock_failed)
+ CFI_STARTPROC
+ FRAME
+0: LOCK_PREFIX
+ READ_LOCK_SIZE(inc) (%__lock_ptr)
+1: rep; nop
+ READ_LOCK_SIZE(cmp) $1, (%__lock_ptr)
+ js 1b
+ LOCK_PREFIX
+ READ_LOCK_SIZE(dec) (%__lock_ptr)
+ js 0b
+ ENDFRAME
+ ret
+ CFI_ENDPROC
+END(__read_lock_failed)
diff --git a/arch/x86/lib/rwlock_64.S b/arch/x86/lib/rwlock_64.S
deleted file mode 100644
index 05ea55f71405..000000000000
--- a/arch/x86/lib/rwlock_64.S
+++ /dev/null
@@ -1,38 +0,0 @@
-/* Slow paths of read/write spinlocks. */
-
-#include <linux/linkage.h>
-#include <asm/rwlock.h>
-#include <asm/alternative-asm.h>
-#include <asm/dwarf2.h>
-
-/* rdi: pointer to rwlock_t */
-ENTRY(__write_lock_failed)
- CFI_STARTPROC
- LOCK_PREFIX
- addl $RW_LOCK_BIAS,(%rdi)
-1: rep
- nop
- cmpl $RW_LOCK_BIAS,(%rdi)
- jne 1b
- LOCK_PREFIX
- subl $RW_LOCK_BIAS,(%rdi)
- jnz __write_lock_failed
- ret
- CFI_ENDPROC
-END(__write_lock_failed)
-
-/* rdi: pointer to rwlock_t */
-ENTRY(__read_lock_failed)
- CFI_STARTPROC
- LOCK_PREFIX
- incl (%rdi)
-1: rep
- nop
- cmpl $1,(%rdi)
- js 1b
- LOCK_PREFIX
- decl (%rdi)
- js __read_lock_failed
- ret
- CFI_ENDPROC
-END(__read_lock_failed)
diff --git a/arch/x86/lib/rwsem_64.S b/arch/x86/lib/rwsem.S
index 67743977398b..5dff5f042468 100644
--- a/arch/x86/lib/rwsem_64.S
+++ b/arch/x86/lib/rwsem.S
@@ -1,4 +1,51 @@
/*
+ * x86 semaphore implementation.
+ *
+ * (C) Copyright 1999 Linus Torvalds
+ *
+ * Portions Copyright 1999 Red Hat, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * rw semaphores implemented November 1999 by Benjamin LaHaise <bcrl@kvack.org>
+ */
+
+#include <linux/linkage.h>
+#include <asm/alternative-asm.h>
+#include <asm/dwarf2.h>
+
+#define __ASM_HALF_REG(reg) __ASM_SEL(reg, e##reg)
+#define __ASM_HALF_SIZE(inst) __ASM_SEL(inst##w, inst##l)
+
+#ifdef CONFIG_X86_32
+
+/*
+ * The semaphore operations have a special calling sequence that
+ * allow us to do a simpler in-line version of them. These routines
+ * need to convert that sequence back into the C sequence when
+ * there is contention on the semaphore.
+ *
+ * %eax contains the semaphore pointer on entry. Save the C-clobbered
+ * registers (%eax, %edx and %ecx) except %eax whish is either a return
+ * value or just clobbered..
+ */
+
+#define save_common_regs \
+ pushl_cfi %ecx; CFI_REL_OFFSET ecx, 0
+
+#define restore_common_regs \
+ popl_cfi %ecx; CFI_RESTORE ecx
+
+ /* Avoid uglifying the argument copying x86-64 needs to do. */
+ .macro movq src, dst
+ .endm
+
+#else
+
+/*
* x86-64 rwsem wrappers
*
* This interfaces the inline asm code to the slow-path
@@ -16,12 +63,6 @@
* but %rdi, %rsi, %rcx, %r8-r11 always need saving.
*/
-#include <linux/linkage.h>
-#include <asm/rwlock.h>
-#include <asm/alternative-asm.h>
-#include <asm/frame.h>
-#include <asm/dwarf2.h>
-
#define save_common_regs \
pushq_cfi %rdi; CFI_REL_OFFSET rdi, 0; \
pushq_cfi %rsi; CFI_REL_OFFSET rsi, 0; \
@@ -40,16 +81,18 @@
popq_cfi %rsi; CFI_RESTORE rsi; \
popq_cfi %rdi; CFI_RESTORE rdi
+#endif
+
/* Fix up special calling conventions */
ENTRY(call_rwsem_down_read_failed)
CFI_STARTPROC
save_common_regs
- pushq_cfi %rdx
- CFI_REL_OFFSET rdx, 0
+ __ASM_SIZE(push,_cfi) %__ASM_REG(dx)
+ CFI_REL_OFFSET __ASM_REG(dx), 0
movq %rax,%rdi
call rwsem_down_read_failed
- popq_cfi %rdx
- CFI_RESTORE rdx
+ __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
+ CFI_RESTORE __ASM_REG(dx)
restore_common_regs
ret
CFI_ENDPROC
@@ -67,7 +110,8 @@ ENDPROC(call_rwsem_down_write_failed)
ENTRY(call_rwsem_wake)
CFI_STARTPROC
- decl %edx /* do nothing if still outstanding active readers */
+ /* do nothing if still outstanding active readers */
+ __ASM_HALF_SIZE(dec) %__ASM_HALF_REG(dx)
jnz 1f
save_common_regs
movq %rax,%rdi
@@ -77,16 +121,15 @@ ENTRY(call_rwsem_wake)
CFI_ENDPROC
ENDPROC(call_rwsem_wake)
-/* Fix up special calling conventions */
ENTRY(call_rwsem_downgrade_wake)
CFI_STARTPROC
save_common_regs
- pushq_cfi %rdx
- CFI_REL_OFFSET rdx, 0
+ __ASM_SIZE(push,_cfi) %__ASM_REG(dx)
+ CFI_REL_OFFSET __ASM_REG(dx), 0
movq %rax,%rdi
call rwsem_downgrade_wake
- popq_cfi %rdx
- CFI_RESTORE rdx
+ __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
+ CFI_RESTORE __ASM_REG(dx)
restore_common_regs
ret
CFI_ENDPROC
diff --git a/arch/x86/lib/semaphore_32.S b/arch/x86/lib/semaphore_32.S
deleted file mode 100644
index 06691daa4108..000000000000
--- a/arch/x86/lib/semaphore_32.S
+++ /dev/null
@@ -1,124 +0,0 @@
-/*
- * i386 semaphore implementation.
- *
- * (C) Copyright 1999 Linus Torvalds
- *
- * Portions Copyright 1999 Red Hat, Inc.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- *
- * rw semaphores implemented November 1999 by Benjamin LaHaise <bcrl@kvack.org>
- */
-
-#include <linux/linkage.h>
-#include <asm/rwlock.h>
-#include <asm/alternative-asm.h>
-#include <asm/frame.h>
-#include <asm/dwarf2.h>
-
-/*
- * The semaphore operations have a special calling sequence that
- * allow us to do a simpler in-line version of them. These routines
- * need to convert that sequence back into the C sequence when
- * there is contention on the semaphore.
- *
- * %eax contains the semaphore pointer on entry. Save the C-clobbered
- * registers (%eax, %edx and %ecx) except %eax whish is either a return
- * value or just clobbered..
- */
- .section .sched.text, "ax"
-
-/*
- * rw spinlock fallbacks
- */
-#ifdef CONFIG_SMP
-ENTRY(__write_lock_failed)
- CFI_STARTPROC
- FRAME
-2: LOCK_PREFIX
- addl $ RW_LOCK_BIAS,(%eax)
-1: rep; nop
- cmpl $ RW_LOCK_BIAS,(%eax)
- jne 1b
- LOCK_PREFIX
- subl $ RW_LOCK_BIAS,(%eax)
- jnz 2b
- ENDFRAME
- ret
- CFI_ENDPROC
- ENDPROC(__write_lock_failed)
-
-ENTRY(__read_lock_failed)
- CFI_STARTPROC
- FRAME
-2: LOCK_PREFIX
- incl (%eax)
-1: rep; nop
- cmpl $1,(%eax)
- js 1b
- LOCK_PREFIX
- decl (%eax)
- js 2b
- ENDFRAME
- ret
- CFI_ENDPROC
- ENDPROC(__read_lock_failed)
-
-#endif
-
-#ifdef CONFIG_RWSEM_XCHGADD_ALGORITHM
-
-/* Fix up special calling conventions */
-ENTRY(call_rwsem_down_read_failed)
- CFI_STARTPROC
- pushl_cfi %ecx
- CFI_REL_OFFSET ecx,0
- pushl_cfi %edx
- CFI_REL_OFFSET edx,0
- call rwsem_down_read_failed
- popl_cfi %edx
- popl_cfi %ecx
- ret
- CFI_ENDPROC
- ENDPROC(call_rwsem_down_read_failed)
-
-ENTRY(call_rwsem_down_write_failed)
- CFI_STARTPROC
- pushl_cfi %ecx
- CFI_REL_OFFSET ecx,0
- calll rwsem_down_write_failed
- popl_cfi %ecx
- ret
- CFI_ENDPROC
- ENDPROC(call_rwsem_down_write_failed)
-
-ENTRY(call_rwsem_wake)
- CFI_STARTPROC
- decw %dx /* do nothing if still outstanding active readers */
- jnz 1f
- pushl_cfi %ecx
- CFI_REL_OFFSET ecx,0
- call rwsem_wake
- popl_cfi %ecx
-1: ret
- CFI_ENDPROC
- ENDPROC(call_rwsem_wake)
-
-/* Fix up special calling conventions */
-ENTRY(call_rwsem_downgrade_wake)
- CFI_STARTPROC
- pushl_cfi %ecx
- CFI_REL_OFFSET ecx,0
- pushl_cfi %edx
- CFI_REL_OFFSET edx,0
- call rwsem_downgrade_wake
- popl_cfi %edx
- popl_cfi %ecx
- ret
- CFI_ENDPROC
- ENDPROC(call_rwsem_downgrade_wake)
-
-#endif
diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S
index 782b082c9ff7..a63efd6bb6a5 100644
--- a/arch/x86/lib/thunk_64.S
+++ b/arch/x86/lib/thunk_64.S
@@ -5,50 +5,41 @@
* Added trace_hardirqs callers - Copyright 2007 Steven Rostedt, Red Hat, Inc.
* Subject to the GNU public license, v.2. No warranty of any kind.
*/
+#include <linux/linkage.h>
+#include <asm/dwarf2.h>
+#include <asm/calling.h>
- #include <linux/linkage.h>
- #include <asm/dwarf2.h>
- #include <asm/calling.h>
- #include <asm/rwlock.h>
-
- /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
- .macro thunk name,func
- .globl \name
-\name:
- CFI_STARTPROC
- SAVE_ARGS
- call \func
- jmp restore
- CFI_ENDPROC
- .endm
-
-#ifdef CONFIG_TRACE_IRQFLAGS
- /* put return address in rdi (arg1) */
- .macro thunk_ra name,func
+ /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
+ .macro THUNK name, func, put_ret_addr_in_rdi=0
.globl \name
\name:
CFI_STARTPROC
+
+ /* this one pushes 9 elems, the next one would be %rIP */
SAVE_ARGS
- /* SAVE_ARGS pushs 9 elements */
- /* the next element would be the rip */
- movq 9*8(%rsp), %rdi
+
+ .if \put_ret_addr_in_rdi
+ movq_cfi_restore 9*8, rdi
+ .endif
+
call \func
jmp restore
CFI_ENDPROC
.endm
- thunk_ra trace_hardirqs_on_thunk,trace_hardirqs_on_caller
- thunk_ra trace_hardirqs_off_thunk,trace_hardirqs_off_caller
+#ifdef CONFIG_TRACE_IRQFLAGS
+ THUNK trace_hardirqs_on_thunk,trace_hardirqs_on_caller,1
+ THUNK trace_hardirqs_off_thunk,trace_hardirqs_off_caller,1
#endif
#ifdef CONFIG_DEBUG_LOCK_ALLOC
- thunk lockdep_sys_exit_thunk,lockdep_sys_exit
+ THUNK lockdep_sys_exit_thunk,lockdep_sys_exit
#endif
-
+
/* SAVE_ARGS below is used only for the .cfi directives it contains. */
CFI_STARTPROC
SAVE_ARGS
restore:
RESTORE_ARGS
- ret
+ ret
CFI_ENDPROC
diff --git a/arch/x86/lib/usercopy.c b/arch/x86/lib/usercopy.c
new file mode 100644
index 000000000000..97be9cb54483
--- /dev/null
+++ b/arch/x86/lib/usercopy.c
@@ -0,0 +1,43 @@
+/*
+ * User address space access functions.
+ *
+ * For licencing details see kernel-base/COPYING
+ */
+
+#include <linux/highmem.h>
+#include <linux/module.h>
+
+/*
+ * best effort, GUP based copy_from_user() that is NMI-safe
+ */
+unsigned long
+copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
+{
+ unsigned long offset, addr = (unsigned long)from;
+ unsigned long size, len = 0;
+ struct page *page;
+ void *map;
+ int ret;
+
+ do {
+ ret = __get_user_pages_fast(addr, 1, 0, &page);
+ if (!ret)
+ break;
+
+ offset = addr & (PAGE_SIZE - 1);
+ size = min(PAGE_SIZE - offset, n - len);
+
+ map = kmap_atomic(page);
+ memcpy(to, map+offset, size);
+ kunmap_atomic(map);
+ put_page(page);
+
+ len += size;
+ to += size;
+ addr += size;
+
+ } while (len < n);
+
+ return len;
+}
+EXPORT_SYMBOL_GPL(copy_from_user_nmi);
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 2dbf6bf4c7e5..4d09df054e39 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -1059,7 +1059,7 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
if (unlikely(error_code & PF_RSVD))
pgtable_bad(regs, error_code, address);
- perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
/*
* If we're in an interrupt, have no user context or are running
@@ -1161,11 +1161,11 @@ good_area:
if (flags & FAULT_FLAG_ALLOW_RETRY) {
if (fault & VM_FAULT_MAJOR) {
tsk->maj_flt++;
- perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
regs, address);
} else {
tsk->min_flt++;
- perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
regs, address);
}
if (fault & VM_FAULT_RETRY) {
diff --git a/arch/x86/mm/kmemcheck/error.c b/arch/x86/mm/kmemcheck/error.c
index 704a37cedddb..dab41876cdd5 100644
--- a/arch/x86/mm/kmemcheck/error.c
+++ b/arch/x86/mm/kmemcheck/error.c
@@ -185,7 +185,7 @@ void kmemcheck_error_save(enum kmemcheck_shadow state,
e->trace.entries = e->trace_entries;
e->trace.max_entries = ARRAY_SIZE(e->trace_entries);
e->trace.skip = 0;
- save_stack_trace_regs(&e->trace, regs);
+ save_stack_trace_regs(regs, &e->trace);
/* Round address down to nearest 16 bytes */
shadow_copy = kmemcheck_shadow_lookup(address
diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
index f5510d889a22..fbeaaf416610 100644
--- a/arch/x86/mm/numa.c
+++ b/arch/x86/mm/numa.c
@@ -496,6 +496,7 @@ static bool __init numa_meminfo_cover_memory(const struct numa_meminfo *mi)
static int __init numa_register_memblks(struct numa_meminfo *mi)
{
+ unsigned long uninitialized_var(pfn_align);
int i, nid;
/* Account for nodes with cpus and no memory */
@@ -511,6 +512,20 @@ static int __init numa_register_memblks(struct numa_meminfo *mi)
/* for out of order entries */
sort_node_map();
+
+ /*
+ * If sections array is gonna be used for pfn -> nid mapping, check
+ * whether its granularity is fine enough.
+ */
+#ifdef NODE_NOT_IN_PAGE_FLAGS
+ pfn_align = node_map_pfn_alignment();
+ if (pfn_align && pfn_align < PAGES_PER_SECTION) {
+ printk(KERN_WARNING "Node alignment %LuMB < min %LuMB, rejecting NUMA config\n",
+ PFN_PHYS(pfn_align) >> 20,
+ PFN_PHYS(PAGES_PER_SECTION) >> 20);
+ return -EINVAL;
+ }
+#endif
if (!numa_meminfo_cover_memory(mi))
return -EINVAL;
diff --git a/arch/x86/mm/numa_32.c b/arch/x86/mm/numa_32.c
index 849a975d3fa0..3adebe7e536a 100644
--- a/arch/x86/mm/numa_32.c
+++ b/arch/x86/mm/numa_32.c
@@ -41,7 +41,7 @@
* physnode_map[16-31] = 1;
* physnode_map[32- ] = -1;
*/
-s8 physnode_map[MAX_ELEMENTS] __read_mostly = { [0 ... (MAX_ELEMENTS - 1)] = -1};
+s8 physnode_map[MAX_SECTIONS] __read_mostly = { [0 ... (MAX_SECTIONS - 1)] = -1};
EXPORT_SYMBOL(physnode_map);
void memory_present(int nid, unsigned long start, unsigned long end)
@@ -52,8 +52,8 @@ void memory_present(int nid, unsigned long start, unsigned long end)
nid, start, end);
printk(KERN_DEBUG " Setting physnode_map array to node %d for pfns:\n", nid);
printk(KERN_DEBUG " ");
- for (pfn = start; pfn < end; pfn += PAGES_PER_ELEMENT) {
- physnode_map[pfn / PAGES_PER_ELEMENT] = nid;
+ for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) {
+ physnode_map[pfn / PAGES_PER_SECTION] = nid;
printk(KERN_CONT "%lx ", pfn);
}
printk(KERN_CONT "\n");
diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c
index e1d106909218..b0086567271c 100644
--- a/arch/x86/mm/pageattr-test.c
+++ b/arch/x86/mm/pageattr-test.c
@@ -123,12 +123,11 @@ static int pageattr_test(void)
if (print)
printk(KERN_INFO "CPA self-test:\n");
- bm = vmalloc((max_pfn_mapped + 7) / 8);
+ bm = vzalloc((max_pfn_mapped + 7) / 8);
if (!bm) {
printk(KERN_ERR "CPA Cannot vmalloc bitmap\n");
return -ENOMEM;
}
- memset(bm, 0, (max_pfn_mapped + 7) / 8);
failed += print_split(&sa);
srandom32(100);
diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
index a5b64ab4cd6e..bff89dfe3619 100644
--- a/arch/x86/oprofile/backtrace.c
+++ b/arch/x86/oprofile/backtrace.c
@@ -11,10 +11,11 @@
#include <linux/oprofile.h>
#include <linux/sched.h>
#include <linux/mm.h>
+#include <linux/compat.h>
+#include <linux/uaccess.h>
+
#include <asm/ptrace.h>
-#include <asm/uaccess.h>
#include <asm/stacktrace.h>
-#include <linux/compat.h>
static int backtrace_stack(void *data, char *name)
{
@@ -40,13 +41,13 @@ static struct stacktrace_ops backtrace_ops = {
static struct stack_frame_ia32 *
dump_user_backtrace_32(struct stack_frame_ia32 *head)
{
+ /* Also check accessibility of one struct frame_head beyond: */
struct stack_frame_ia32 bufhead[2];
struct stack_frame_ia32 *fp;
+ unsigned long bytes;
- /* Also check accessibility of one struct frame_head beyond */
- if (!access_ok(VERIFY_READ, head, sizeof(bufhead)))
- return NULL;
- if (__copy_from_user_inatomic(bufhead, head, sizeof(bufhead)))
+ bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
+ if (bytes != sizeof(bufhead))
return NULL;
fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame);
@@ -87,12 +88,12 @@ x86_backtrace_32(struct pt_regs * const regs, unsigned int depth)
static struct stack_frame *dump_user_backtrace(struct stack_frame *head)
{
+ /* Also check accessibility of one struct frame_head beyond: */
struct stack_frame bufhead[2];
+ unsigned long bytes;
- /* Also check accessibility of one struct stack_frame beyond */
- if (!access_ok(VERIFY_READ, head, sizeof(bufhead)))
- return NULL;
- if (__copy_from_user_inatomic(bufhead, head, sizeof(bufhead)))
+ bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
+ if (bytes != sizeof(bufhead))
return NULL;
oprofile_add_trace(bufhead[0].return_address);
diff --git a/arch/x86/pci/mmconfig-shared.c b/arch/x86/pci/mmconfig-shared.c
index 750c346ef50a..301e325992f6 100644
--- a/arch/x86/pci/mmconfig-shared.c
+++ b/arch/x86/pci/mmconfig-shared.c
@@ -519,7 +519,8 @@ static int __init acpi_mcfg_check_entry(struct acpi_table_mcfg *mcfg,
if (cfg->address < 0xFFFFFFFF)
return 0;
- if (!strcmp(mcfg->header.oem_id, "SGI"))
+ if (!strcmp(mcfg->header.oem_id, "SGI") ||
+ !strcmp(mcfg->header.oem_id, "SGI2"))
return 0;
if (mcfg->header.revision >= 1) {
diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c
index f567965c0620..1017c7bee388 100644
--- a/arch/x86/pci/xen.c
+++ b/arch/x86/pci/xen.c
@@ -1,8 +1,13 @@
/*
- * Xen PCI Frontend Stub - puts some "dummy" functions in to the Linux
- * x86 PCI core to support the Xen PCI Frontend
+ * Xen PCI - handle PCI (INTx) and MSI infrastructure calls for PV, HVM and
+ * initial domain support. We also handle the DSDT _PRT callbacks for GSI's
+ * used in HVM and initial domain mode (PV does not parse ACPI, so it has no
+ * concept of GSIs). Under PV we hook under the pnbbios API for IRQs and
+ * 0xcf8 PCI configuration read/write.
*
* Author: Ryan Wilson <hap9@epoch.ncsc.mil>
+ * Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+ * Stefano Stabellini <stefano.stabellini@eu.citrix.com>
*/
#include <linux/module.h>
#include <linux/init.h>
@@ -19,22 +24,53 @@
#include <xen/events.h>
#include <asm/xen/pci.h>
+static int xen_pcifront_enable_irq(struct pci_dev *dev)
+{
+ int rc;
+ int share = 1;
+ int pirq;
+ u8 gsi;
+
+ rc = pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &gsi);
+ if (rc < 0) {
+ dev_warn(&dev->dev, "Xen PCI: failed to read interrupt line: %d\n",
+ rc);
+ return rc;
+ }
+ /* In PV DomU the Xen PCI backend puts the PIRQ in the interrupt line.*/
+ pirq = gsi;
+
+ if (gsi < NR_IRQS_LEGACY)
+ share = 0;
+
+ rc = xen_bind_pirq_gsi_to_irq(gsi, pirq, share, "pcifront");
+ if (rc < 0) {
+ dev_warn(&dev->dev, "Xen PCI: failed to bind GSI%d (PIRQ%d) to IRQ: %d\n",
+ gsi, pirq, rc);
+ return rc;
+ }
+
+ dev->irq = rc;
+ dev_info(&dev->dev, "Xen PCI mapped GSI%d to IRQ%d\n", gsi, dev->irq);
+ return 0;
+}
+
#ifdef CONFIG_ACPI
-static int acpi_register_gsi_xen_hvm(struct device *dev, u32 gsi,
- int trigger, int polarity)
+static int xen_register_pirq(u32 gsi, int gsi_override, int triggering,
+ bool set_pirq)
{
- int rc, irq;
+ int rc, pirq = -1, irq = -1;
struct physdev_map_pirq map_irq;
int shareable = 0;
char *name;
- if (!xen_hvm_domain())
- return -1;
+ if (set_pirq)
+ pirq = gsi;
map_irq.domid = DOMID_SELF;
map_irq.type = MAP_PIRQ_TYPE_GSI;
map_irq.index = gsi;
- map_irq.pirq = -1;
+ map_irq.pirq = pirq;
rc = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &map_irq);
if (rc) {
@@ -42,7 +78,7 @@ static int acpi_register_gsi_xen_hvm(struct device *dev, u32 gsi,
return -1;
}
- if (trigger == ACPI_EDGE_SENSITIVE) {
+ if (triggering == ACPI_EDGE_SENSITIVE) {
shareable = 0;
name = "ioapic-edge";
} else {
@@ -50,12 +86,63 @@ static int acpi_register_gsi_xen_hvm(struct device *dev, u32 gsi,
name = "ioapic-level";
}
+ if (gsi_override >= 0)
+ gsi = gsi_override;
+
irq = xen_bind_pirq_gsi_to_irq(gsi, map_irq.pirq, shareable, name);
+ if (irq < 0)
+ goto out;
+
+ printk(KERN_DEBUG "xen: --> pirq=%d -> irq=%d (gsi=%d)\n", map_irq.pirq, irq, gsi);
+out:
+ return irq;
+}
+
+static int acpi_register_gsi_xen_hvm(struct device *dev, u32 gsi,
+ int trigger, int polarity)
+{
+ if (!xen_hvm_domain())
+ return -1;
- printk(KERN_DEBUG "xen: --> irq=%d, pirq=%d\n", irq, map_irq.pirq);
+ return xen_register_pirq(gsi, -1 /* no GSI override */, trigger,
+ false /* no mapping of GSI to PIRQ */);
+}
+
+#ifdef CONFIG_XEN_DOM0
+static int xen_register_gsi(u32 gsi, int gsi_override, int triggering, int polarity)
+{
+ int rc, irq;
+ struct physdev_setup_gsi setup_gsi;
+
+ if (!xen_pv_domain())
+ return -1;
+
+ printk(KERN_DEBUG "xen: registering gsi %u triggering %d polarity %d\n",
+ gsi, triggering, polarity);
+
+ irq = xen_register_pirq(gsi, gsi_override, triggering, true);
+
+ setup_gsi.gsi = gsi;
+ setup_gsi.triggering = (triggering == ACPI_EDGE_SENSITIVE ? 0 : 1);
+ setup_gsi.polarity = (polarity == ACPI_ACTIVE_HIGH ? 0 : 1);
+
+ rc = HYPERVISOR_physdev_op(PHYSDEVOP_setup_gsi, &setup_gsi);
+ if (rc == -EEXIST)
+ printk(KERN_INFO "Already setup the GSI :%d\n", gsi);
+ else if (rc) {
+ printk(KERN_ERR "Failed to setup GSI :%d, err_code:%d\n",
+ gsi, rc);
+ }
return irq;
}
+
+static int acpi_register_gsi_xen(struct device *dev, u32 gsi,
+ int trigger, int polarity)
+{
+ return xen_register_gsi(gsi, -1 /* no GSI override */, trigger, polarity);
+}
+#endif
#endif
#if defined(CONFIG_PCI_MSI)
@@ -65,6 +152,43 @@ static int acpi_register_gsi_xen_hvm(struct device *dev, u32 gsi,
struct xen_pci_frontend_ops *xen_pci_frontend;
EXPORT_SYMBOL_GPL(xen_pci_frontend);
+static int xen_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
+{
+ int irq, ret, i;
+ struct msi_desc *msidesc;
+ int *v;
+
+ v = kzalloc(sizeof(int) * max(1, nvec), GFP_KERNEL);
+ if (!v)
+ return -ENOMEM;
+
+ if (type == PCI_CAP_ID_MSIX)
+ ret = xen_pci_frontend_enable_msix(dev, v, nvec);
+ else
+ ret = xen_pci_frontend_enable_msi(dev, v);
+ if (ret)
+ goto error;
+ i = 0;
+ list_for_each_entry(msidesc, &dev->msi_list, list) {
+ irq = xen_bind_pirq_msi_to_irq(dev, msidesc, v[i], 0,
+ (type == PCI_CAP_ID_MSIX) ?
+ "pcifront-msi-x" :
+ "pcifront-msi",
+ DOMID_SELF);
+ if (irq < 0)
+ goto free;
+ i++;
+ }
+ kfree(v);
+ return 0;
+
+error:
+ dev_err(&dev->dev, "Xen PCI frontend has not registered MSI/MSI-X support!\n");
+free:
+ kfree(v);
+ return ret;
+}
+
#define XEN_PIRQ_MSI_DATA (MSI_DATA_TRIGGER_EDGE | \
MSI_DATA_LEVEL_ASSERT | (3 << 8) | MSI_DATA_VECTOR(0))
@@ -123,67 +247,6 @@ error:
return -ENODEV;
}
-/*
- * For MSI interrupts we have to use drivers/xen/event.s functions to
- * allocate an irq_desc and setup the right */
-
-
-static int xen_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
-{
- int irq, ret, i;
- struct msi_desc *msidesc;
- int *v;
-
- v = kzalloc(sizeof(int) * max(1, nvec), GFP_KERNEL);
- if (!v)
- return -ENOMEM;
-
- if (type == PCI_CAP_ID_MSIX)
- ret = xen_pci_frontend_enable_msix(dev, v, nvec);
- else
- ret = xen_pci_frontend_enable_msi(dev, v);
- if (ret)
- goto error;
- i = 0;
- list_for_each_entry(msidesc, &dev->msi_list, list) {
- irq = xen_bind_pirq_msi_to_irq(dev, msidesc, v[i], 0,
- (type == PCI_CAP_ID_MSIX) ?
- "pcifront-msi-x" :
- "pcifront-msi",
- DOMID_SELF);
- if (irq < 0)
- goto free;
- i++;
- }
- kfree(v);
- return 0;
-
-error:
- dev_err(&dev->dev, "Xen PCI frontend has not registered MSI/MSI-X support!\n");
-free:
- kfree(v);
- return ret;
-}
-
-static void xen_teardown_msi_irqs(struct pci_dev *dev)
-{
- struct msi_desc *msidesc;
-
- msidesc = list_entry(dev->msi_list.next, struct msi_desc, list);
- if (msidesc->msi_attrib.is_msix)
- xen_pci_frontend_disable_msix(dev);
- else
- xen_pci_frontend_disable_msi(dev);
-
- /* Free the IRQ's and the msidesc using the generic code. */
- default_teardown_msi_irqs(dev);
-}
-
-static void xen_teardown_msi_irq(unsigned int irq)
-{
- xen_destroy_irq(irq);
-}
-
#ifdef CONFIG_XEN_DOM0
static int xen_initdom_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
{
@@ -242,45 +305,28 @@ out:
return ret;
}
#endif
-#endif
-static int xen_pcifront_enable_irq(struct pci_dev *dev)
+static void xen_teardown_msi_irqs(struct pci_dev *dev)
{
- int rc;
- int share = 1;
- int pirq;
- u8 gsi;
-
- rc = pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &gsi);
- if (rc < 0) {
- dev_warn(&dev->dev, "Xen PCI: failed to read interrupt line: %d\n",
- rc);
- return rc;
- }
-
- rc = xen_allocate_pirq_gsi(gsi);
- if (rc < 0) {
- dev_warn(&dev->dev, "Xen PCI: failed to allocate a PIRQ for GSI%d: %d\n",
- gsi, rc);
- return rc;
- }
- pirq = rc;
+ struct msi_desc *msidesc;
- if (gsi < NR_IRQS_LEGACY)
- share = 0;
+ msidesc = list_entry(dev->msi_list.next, struct msi_desc, list);
+ if (msidesc->msi_attrib.is_msix)
+ xen_pci_frontend_disable_msix(dev);
+ else
+ xen_pci_frontend_disable_msi(dev);
- rc = xen_bind_pirq_gsi_to_irq(gsi, pirq, share, "pcifront");
- if (rc < 0) {
- dev_warn(&dev->dev, "Xen PCI: failed to bind GSI%d (PIRQ%d) to IRQ: %d\n",
- gsi, pirq, rc);
- return rc;
- }
+ /* Free the IRQ's and the msidesc using the generic code. */
+ default_teardown_msi_irqs(dev);
+}
- dev->irq = rc;
- dev_info(&dev->dev, "Xen PCI mapped GSI%d to IRQ%d\n", gsi, dev->irq);
- return 0;
+static void xen_teardown_msi_irq(unsigned int irq)
+{
+ xen_destroy_irq(irq);
}
+#endif
+
int __init pci_xen_init(void)
{
if (!xen_pv_domain() || xen_initial_domain())
@@ -327,79 +373,6 @@ int __init pci_xen_hvm_init(void)
}
#ifdef CONFIG_XEN_DOM0
-static int xen_register_pirq(u32 gsi, int gsi_override, int triggering)
-{
- int rc, pirq, irq = -1;
- struct physdev_map_pirq map_irq;
- int shareable = 0;
- char *name;
-
- if (!xen_pv_domain())
- return -1;
-
- if (triggering == ACPI_EDGE_SENSITIVE) {
- shareable = 0;
- name = "ioapic-edge";
- } else {
- shareable = 1;
- name = "ioapic-level";
- }
- pirq = xen_allocate_pirq_gsi(gsi);
- if (pirq < 0)
- goto out;
-
- if (gsi_override >= 0)
- irq = xen_bind_pirq_gsi_to_irq(gsi_override, pirq, shareable, name);
- else
- irq = xen_bind_pirq_gsi_to_irq(gsi, pirq, shareable, name);
- if (irq < 0)
- goto out;
-
- printk(KERN_DEBUG "xen: --> pirq=%d -> irq=%d (gsi=%d)\n", pirq, irq, gsi);
-
- map_irq.domid = DOMID_SELF;
- map_irq.type = MAP_PIRQ_TYPE_GSI;
- map_irq.index = gsi;
- map_irq.pirq = pirq;
-
- rc = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &map_irq);
- if (rc) {
- printk(KERN_WARNING "xen map irq failed %d\n", rc);
- return -1;
- }
-
-out:
- return irq;
-}
-
-static int xen_register_gsi(u32 gsi, int gsi_override, int triggering, int polarity)
-{
- int rc, irq;
- struct physdev_setup_gsi setup_gsi;
-
- if (!xen_pv_domain())
- return -1;
-
- printk(KERN_DEBUG "xen: registering gsi %u triggering %d polarity %d\n",
- gsi, triggering, polarity);
-
- irq = xen_register_pirq(gsi, gsi_override, triggering);
-
- setup_gsi.gsi = gsi;
- setup_gsi.triggering = (triggering == ACPI_EDGE_SENSITIVE ? 0 : 1);
- setup_gsi.polarity = (polarity == ACPI_ACTIVE_HIGH ? 0 : 1);
-
- rc = HYPERVISOR_physdev_op(PHYSDEVOP_setup_gsi, &setup_gsi);
- if (rc == -EEXIST)
- printk(KERN_INFO "Already setup the GSI :%d\n", gsi);
- else if (rc) {
- printk(KERN_ERR "Failed to setup GSI :%d, err_code:%d\n",
- gsi, rc);
- }
-
- return irq;
-}
-
static __init void xen_setup_acpi_sci(void)
{
int rc;
@@ -419,7 +392,7 @@ static __init void xen_setup_acpi_sci(void)
}
trigger = trigger ? ACPI_LEVEL_SENSITIVE : ACPI_EDGE_SENSITIVE;
polarity = polarity ? ACPI_ACTIVE_LOW : ACPI_ACTIVE_HIGH;
-
+
printk(KERN_INFO "xen: sci override: global_irq=%d trigger=%d "
"polarity=%d\n", gsi, trigger, polarity);
@@ -434,10 +407,9 @@ static __init void xen_setup_acpi_sci(void)
* the ACPI interpreter and keels over since IRQ 9 has not been
* setup as we had setup IRQ 20 for it).
*/
- /* Check whether the GSI != IRQ */
if (acpi_gsi_to_irq(gsi, &irq) == 0) {
- if (irq >= 0 && irq != gsi)
- /* Bugger, we MUST have that IRQ. */
+ /* Use the provided value if it's valid. */
+ if (irq >= 0)
gsi_override = irq;
}
@@ -447,41 +419,16 @@ static __init void xen_setup_acpi_sci(void)
return;
}
-static int acpi_register_gsi_xen(struct device *dev, u32 gsi,
- int trigger, int polarity)
+int __init pci_xen_initial_domain(void)
{
- return xen_register_gsi(gsi, -1 /* no GSI override */, trigger, polarity);
-}
+ int irq;
-static int __init pci_xen_initial_domain(void)
-{
#ifdef CONFIG_PCI_MSI
x86_msi.setup_msi_irqs = xen_initdom_setup_msi_irqs;
x86_msi.teardown_msi_irq = xen_teardown_msi_irq;
#endif
xen_setup_acpi_sci();
__acpi_register_gsi = acpi_register_gsi_xen;
-
- return 0;
-}
-
-void __init xen_setup_pirqs(void)
-{
- int pirq, irq;
-
- pci_xen_initial_domain();
-
- if (0 == nr_ioapics) {
- for (irq = 0; irq < NR_IRQS_LEGACY; irq++) {
- pirq = xen_allocate_pirq_gsi(irq);
- if (WARN(pirq < 0,
- "Could not allocate PIRQ for legacy interrupt\n"))
- break;
- irq = xen_bind_pirq_gsi_to_irq(irq, pirq, 0, "xt-pic");
- }
- return;
- }
-
/* Pre-allocate legacy irqs */
for (irq = 0; irq < NR_IRQS_LEGACY; irq++) {
int trigger, polarity;
@@ -490,12 +437,16 @@ void __init xen_setup_pirqs(void)
continue;
xen_register_pirq(irq, -1 /* no GSI override */,
- trigger ? ACPI_LEVEL_SENSITIVE : ACPI_EDGE_SENSITIVE);
+ trigger ? ACPI_LEVEL_SENSITIVE : ACPI_EDGE_SENSITIVE,
+ true /* Map GSI to PIRQ */);
}
+ if (0 == nr_ioapics) {
+ for (irq = 0; irq < NR_IRQS_LEGACY; irq++)
+ xen_bind_pirq_gsi_to_irq(irq, irq, 0, "xt-pic");
+ }
+ return 0;
}
-#endif
-#ifdef CONFIG_XEN_DOM0
struct xen_device_domain_owner {
domid_t domain;
struct pci_dev *dev;
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
index 899e393d8e73..3ae4128013e6 100644
--- a/arch/x86/platform/efi/efi.c
+++ b/arch/x86/platform/efi/efi.c
@@ -51,7 +51,17 @@
int efi_enabled;
EXPORT_SYMBOL(efi_enabled);
-struct efi efi;
+struct efi __read_mostly efi = {
+ .mps = EFI_INVALID_TABLE_ADDR,
+ .acpi = EFI_INVALID_TABLE_ADDR,
+ .acpi20 = EFI_INVALID_TABLE_ADDR,
+ .smbios = EFI_INVALID_TABLE_ADDR,
+ .sal_systab = EFI_INVALID_TABLE_ADDR,
+ .boot_info = EFI_INVALID_TABLE_ADDR,
+ .hcdp = EFI_INVALID_TABLE_ADDR,
+ .uga = EFI_INVALID_TABLE_ADDR,
+ .uv_systab = EFI_INVALID_TABLE_ADDR,
+};
EXPORT_SYMBOL(efi);
struct efi_memory_map memmap;
@@ -79,26 +89,50 @@ early_param("add_efi_memmap", setup_add_efi_memmap);
static efi_status_t virt_efi_get_time(efi_time_t *tm, efi_time_cap_t *tc)
{
- return efi_call_virt2(get_time, tm, tc);
+ unsigned long flags;
+ efi_status_t status;
+
+ spin_lock_irqsave(&rtc_lock, flags);
+ status = efi_call_virt2(get_time, tm, tc);
+ spin_unlock_irqrestore(&rtc_lock, flags);
+ return status;
}
static efi_status_t virt_efi_set_time(efi_time_t *tm)
{
- return efi_call_virt1(set_time, tm);
+ unsigned long flags;
+ efi_status_t status;
+
+ spin_lock_irqsave(&rtc_lock, flags);
+ status = efi_call_virt1(set_time, tm);
+ spin_unlock_irqrestore(&rtc_lock, flags);
+ return status;
}
static efi_status_t virt_efi_get_wakeup_time(efi_bool_t *enabled,
efi_bool_t *pending,
efi_time_t *tm)
{
- return efi_call_virt3(get_wakeup_time,
- enabled, pending, tm);
+ unsigned long flags;
+ efi_status_t status;
+
+ spin_lock_irqsave(&rtc_lock, flags);
+ status = efi_call_virt3(get_wakeup_time,
+ enabled, pending, tm);
+ spin_unlock_irqrestore(&rtc_lock, flags);
+ return status;
}
static efi_status_t virt_efi_set_wakeup_time(efi_bool_t enabled, efi_time_t *tm)
{
- return efi_call_virt2(set_wakeup_time,
- enabled, tm);
+ unsigned long flags;
+ efi_status_t status;
+
+ spin_lock_irqsave(&rtc_lock, flags);
+ status = efi_call_virt2(set_wakeup_time,
+ enabled, tm);
+ spin_unlock_irqrestore(&rtc_lock, flags);
+ return status;
}
static efi_status_t virt_efi_get_variable(efi_char16_t *name,
@@ -122,7 +156,7 @@ static efi_status_t virt_efi_get_next_variable(unsigned long *name_size,
static efi_status_t virt_efi_set_variable(efi_char16_t *name,
efi_guid_t *vendor,
- unsigned long attr,
+ u32 attr,
unsigned long data_size,
void *data)
{
@@ -131,6 +165,18 @@ static efi_status_t virt_efi_set_variable(efi_char16_t *name,
data_size, data);
}
+static efi_status_t virt_efi_query_variable_info(u32 attr,
+ u64 *storage_space,
+ u64 *remaining_space,
+ u64 *max_variable_size)
+{
+ if (efi.runtime_version < EFI_2_00_SYSTEM_TABLE_REVISION)
+ return EFI_UNSUPPORTED;
+
+ return efi_call_virt4(query_variable_info, attr, storage_space,
+ remaining_space, max_variable_size);
+}
+
static efi_status_t virt_efi_get_next_high_mono_count(u32 *count)
{
return efi_call_virt1(get_next_high_mono_count, count);
@@ -145,6 +191,28 @@ static void virt_efi_reset_system(int reset_type,
data_size, data);
}
+static efi_status_t virt_efi_update_capsule(efi_capsule_header_t **capsules,
+ unsigned long count,
+ unsigned long sg_list)
+{
+ if (efi.runtime_version < EFI_2_00_SYSTEM_TABLE_REVISION)
+ return EFI_UNSUPPORTED;
+
+ return efi_call_virt3(update_capsule, capsules, count, sg_list);
+}
+
+static efi_status_t virt_efi_query_capsule_caps(efi_capsule_header_t **capsules,
+ unsigned long count,
+ u64 *max_size,
+ int *reset_type)
+{
+ if (efi.runtime_version < EFI_2_00_SYSTEM_TABLE_REVISION)
+ return EFI_UNSUPPORTED;
+
+ return efi_call_virt4(query_capsule_caps, capsules, count, max_size,
+ reset_type);
+}
+
static efi_status_t __init phys_efi_set_virtual_address_map(
unsigned long memory_map_size,
unsigned long descriptor_size,
@@ -164,11 +232,14 @@ static efi_status_t __init phys_efi_set_virtual_address_map(
static efi_status_t __init phys_efi_get_time(efi_time_t *tm,
efi_time_cap_t *tc)
{
+ unsigned long flags;
efi_status_t status;
+ spin_lock_irqsave(&rtc_lock, flags);
efi_call_phys_prelog();
status = efi_call_phys2(efi_phys.get_time, tm, tc);
efi_call_phys_epilog();
+ spin_unlock_irqrestore(&rtc_lock, flags);
return status;
}
@@ -669,6 +740,9 @@ void __init efi_enter_virtual_mode(void)
efi.get_next_high_mono_count = virt_efi_get_next_high_mono_count;
efi.reset_system = virt_efi_reset_system;
efi.set_virtual_address_map = NULL;
+ efi.query_variable_info = virt_efi_query_variable_info;
+ efi.update_capsule = virt_efi_update_capsule;
+ efi.query_capsule_caps = virt_efi_query_capsule_caps;
if (__supported_pte_mask & _PAGE_NX)
runtime_code_page_mkexec();
early_iounmap(memmap.map, memmap.nr_map * memmap.desc_size);
diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c
index 68e467f69fec..db8b915f54bc 100644
--- a/arch/x86/platform/uv/tlb_uv.c
+++ b/arch/x86/platform/uv/tlb_uv.c
@@ -296,14 +296,18 @@ static void bau_process_message(struct msg_desc *mdp,
}
/*
- * Determine the first cpu on a uvhub.
+ * Determine the first cpu on a pnode.
*/
-static int uvhub_to_first_cpu(int uvhub)
+static int pnode_to_first_cpu(int pnode, struct bau_control *smaster)
{
int cpu;
- for_each_present_cpu(cpu)
- if (uvhub == uv_cpu_to_blade_id(cpu))
+ struct hub_and_pnode *hpp;
+
+ for_each_present_cpu(cpu) {
+ hpp = &smaster->thp[cpu];
+ if (pnode == hpp->pnode)
return cpu;
+ }
return -1;
}
@@ -366,28 +370,32 @@ static void do_reset(void *ptr)
* Use IPI to get all target uvhubs to release resources held by
* a given sending cpu number.
*/
-static void reset_with_ipi(struct bau_targ_hubmask *distribution, int sender)
+static void reset_with_ipi(struct pnmask *distribution, struct bau_control *bcp)
{
- int uvhub;
+ int pnode;
+ int apnode;
int maskbits;
- cpumask_t mask;
+ int sender = bcp->cpu;
+ cpumask_t *mask = bcp->uvhub_master->cpumask;
+ struct bau_control *smaster = bcp->socket_master;
struct reset_args reset_args;
reset_args.sender = sender;
- cpus_clear(mask);
+ cpus_clear(*mask);
/* find a single cpu for each uvhub in this distribution mask */
- maskbits = sizeof(struct bau_targ_hubmask) * BITSPERBYTE;
- for (uvhub = 0; uvhub < maskbits; uvhub++) {
+ maskbits = sizeof(struct pnmask) * BITSPERBYTE;
+ /* each bit is a pnode relative to the partition base pnode */
+ for (pnode = 0; pnode < maskbits; pnode++) {
int cpu;
- if (!bau_uvhub_isset(uvhub, distribution))
+ if (!bau_uvhub_isset(pnode, distribution))
continue;
- /* find a cpu for this uvhub */
- cpu = uvhub_to_first_cpu(uvhub);
- cpu_set(cpu, mask);
+ apnode = pnode + bcp->partition_base_pnode;
+ cpu = pnode_to_first_cpu(apnode, smaster);
+ cpu_set(cpu, *mask);
}
/* IPI all cpus; preemption is already disabled */
- smp_call_function_many(&mask, do_reset, (void *)&reset_args, 1);
+ smp_call_function_many(mask, do_reset, (void *)&reset_args, 1);
return;
}
@@ -604,7 +612,7 @@ static void destination_plugged(struct bau_desc *bau_desc,
quiesce_local_uvhub(hmaster);
spin_lock(&hmaster->queue_lock);
- reset_with_ipi(&bau_desc->distribution, bcp->cpu);
+ reset_with_ipi(&bau_desc->distribution, bcp);
spin_unlock(&hmaster->queue_lock);
end_uvhub_quiesce(hmaster);
@@ -626,7 +634,7 @@ static void destination_timeout(struct bau_desc *bau_desc,
quiesce_local_uvhub(hmaster);
spin_lock(&hmaster->queue_lock);
- reset_with_ipi(&bau_desc->distribution, bcp->cpu);
+ reset_with_ipi(&bau_desc->distribution, bcp);
spin_unlock(&hmaster->queue_lock);
end_uvhub_quiesce(hmaster);
@@ -1334,9 +1342,10 @@ static ssize_t tunables_write(struct file *file, const char __user *user,
instr[count] = '\0';
- bcp = &per_cpu(bau_control, smp_processor_id());
-
+ cpu = get_cpu();
+ bcp = &per_cpu(bau_control, cpu);
ret = parse_tunables_write(bcp, instr, count);
+ put_cpu();
if (ret)
return ret;
@@ -1687,6 +1696,16 @@ static void make_per_cpu_thp(struct bau_control *smaster)
}
/*
+ * Each uvhub is to get a local cpumask.
+ */
+static void make_per_hub_cpumask(struct bau_control *hmaster)
+{
+ int sz = sizeof(cpumask_t);
+
+ hmaster->cpumask = kzalloc_node(sz, GFP_KERNEL, hmaster->osnode);
+}
+
+/*
* Initialize all the per_cpu information for the cpu's on a given socket,
* given what has been gathered into the socket_desc struct.
* And reports the chosen hub and socket masters back to the caller.
@@ -1751,11 +1770,12 @@ static int __init summarize_uvhub_sockets(int nuvhubs,
sdp = &bdp->socket[socket];
if (scan_sock(sdp, bdp, &smaster, &hmaster))
return 1;
+ make_per_cpu_thp(smaster);
}
socket++;
socket_mask = (socket_mask >> 1);
- make_per_cpu_thp(smaster);
}
+ make_per_hub_cpumask(hmaster);
}
return 0;
}
@@ -1777,15 +1797,20 @@ static int __init init_per_cpu(int nuvhubs, int base_part_pnode)
uvhub_mask = kzalloc((nuvhubs+7)/8, GFP_KERNEL);
if (get_cpu_topology(base_part_pnode, uvhub_descs, uvhub_mask))
- return 1;
+ goto fail;
if (summarize_uvhub_sockets(nuvhubs, uvhub_descs, uvhub_mask))
- return 1;
+ goto fail;
kfree(uvhub_descs);
kfree(uvhub_mask);
init_per_cpu_tunables();
return 0;
+
+fail:
+ kfree(uvhub_descs);
+ kfree(uvhub_mask);
+ return 1;
}
/*
diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
index bef0bc962400..5d179502a52c 100644
--- a/arch/x86/vdso/Makefile
+++ b/arch/x86/vdso/Makefile
@@ -26,6 +26,7 @@ targets += vdso.so vdso.so.dbg vdso.lds $(vobjs-y)
export CPPFLAGS_vdso.lds += -P -C
VDSO_LDFLAGS_vdso.lds = -m64 -Wl,-soname=linux-vdso.so.1 \
+ -Wl,--no-undefined \
-Wl,-z,max-page-size=4096 -Wl,-z,common-page-size=4096
$(obj)/vdso.o: $(src)/vdso.S $(obj)/vdso.so
diff --git a/arch/x86/vdso/vclock_gettime.c b/arch/x86/vdso/vclock_gettime.c
index a724905fdae7..6bc0e723b6e8 100644
--- a/arch/x86/vdso/vclock_gettime.c
+++ b/arch/x86/vdso/vclock_gettime.c
@@ -6,7 +6,6 @@
*
* The code should have no internal unresolved relocations.
* Check with readelf after changing.
- * Also alternative() doesn't work.
*/
/* Disable profiling for userspace code: */
@@ -17,6 +16,7 @@
#include <linux/time.h>
#include <linux/string.h>
#include <asm/vsyscall.h>
+#include <asm/fixmap.h>
#include <asm/vgtod.h>
#include <asm/timex.h>
#include <asm/hpet.h>
@@ -25,6 +25,43 @@
#define gtod (&VVAR(vsyscall_gtod_data))
+notrace static cycle_t vread_tsc(void)
+{
+ cycle_t ret;
+ u64 last;
+
+ /*
+ * Empirically, a fence (of type that depends on the CPU)
+ * before rdtsc is enough to ensure that rdtsc is ordered
+ * with respect to loads. The various CPU manuals are unclear
+ * as to whether rdtsc can be reordered with later loads,
+ * but no one has ever seen it happen.
+ */
+ rdtsc_barrier();
+ ret = (cycle_t)vget_cycles();
+
+ last = VVAR(vsyscall_gtod_data).clock.cycle_last;
+
+ if (likely(ret >= last))
+ return ret;
+
+ /*
+ * GCC likes to generate cmov here, but this branch is extremely
+ * predictable (it's just a funciton of time and the likely is
+ * very likely) and there's a data dependence, so force GCC
+ * to generate a branch instead. I don't barrier() because
+ * we don't actually need a barrier, and if this function
+ * ever gets inlined it will generate worse code.
+ */
+ asm volatile ("");
+ return last;
+}
+
+static notrace cycle_t vread_hpet(void)
+{
+ return readl((const void __iomem *)fix_to_virt(VSYSCALL_HPET) + 0xf0);
+}
+
notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
{
long ret;
@@ -36,9 +73,12 @@ notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
notrace static inline long vgetns(void)
{
long v;
- cycles_t (*vread)(void);
- vread = gtod->clock.vread;
- v = (vread() - gtod->clock.cycle_last) & gtod->clock.mask;
+ cycles_t cycles;
+ if (gtod->clock.vclock_mode == VCLOCK_TSC)
+ cycles = vread_tsc();
+ else
+ cycles = vread_hpet();
+ v = (cycles - gtod->clock.cycle_last) & gtod->clock.mask;
return (v * gtod->clock.mult) >> gtod->clock.shift;
}
@@ -116,21 +156,21 @@ notrace static noinline int do_monotonic_coarse(struct timespec *ts)
notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
{
- if (likely(gtod->sysctl_enabled))
- switch (clock) {
- case CLOCK_REALTIME:
- if (likely(gtod->clock.vread))
- return do_realtime(ts);
- break;
- case CLOCK_MONOTONIC:
- if (likely(gtod->clock.vread))
- return do_monotonic(ts);
- break;
- case CLOCK_REALTIME_COARSE:
- return do_realtime_coarse(ts);
- case CLOCK_MONOTONIC_COARSE:
- return do_monotonic_coarse(ts);
- }
+ switch (clock) {
+ case CLOCK_REALTIME:
+ if (likely(gtod->clock.vclock_mode != VCLOCK_NONE))
+ return do_realtime(ts);
+ break;
+ case CLOCK_MONOTONIC:
+ if (likely(gtod->clock.vclock_mode != VCLOCK_NONE))
+ return do_monotonic(ts);
+ break;
+ case CLOCK_REALTIME_COARSE:
+ return do_realtime_coarse(ts);
+ case CLOCK_MONOTONIC_COARSE:
+ return do_monotonic_coarse(ts);
+ }
+
return vdso_fallback_gettime(clock, ts);
}
int clock_gettime(clockid_t, struct timespec *)
@@ -139,7 +179,7 @@ int clock_gettime(clockid_t, struct timespec *)
notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
{
long ret;
- if (likely(gtod->sysctl_enabled && gtod->clock.vread)) {
+ if (likely(gtod->clock.vclock_mode != VCLOCK_NONE)) {
if (likely(tv != NULL)) {
BUILD_BUG_ON(offsetof(struct timeval, tv_usec) !=
offsetof(struct timespec, tv_nsec) ||
@@ -161,27 +201,14 @@ notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
int gettimeofday(struct timeval *, struct timezone *)
__attribute__((weak, alias("__vdso_gettimeofday")));
-/* This will break when the xtime seconds get inaccurate, but that is
- * unlikely */
-
-static __always_inline long time_syscall(long *t)
-{
- long secs;
- asm volatile("syscall"
- : "=a" (secs)
- : "0" (__NR_time), "D" (t) : "cc", "r11", "cx", "memory");
- return secs;
-}
-
+/*
+ * This will break when the xtime seconds get inaccurate, but that is
+ * unlikely
+ */
notrace time_t __vdso_time(time_t *t)
{
- time_t result;
-
- if (unlikely(!VVAR(vsyscall_gtod_data).sysctl_enabled))
- return time_syscall(t);
-
/* This is atomic on x86_64 so we don't need any locks. */
- result = ACCESS_ONCE(VVAR(vsyscall_gtod_data).wall_time_sec);
+ time_t result = ACCESS_ONCE(VVAR(vsyscall_gtod_data).wall_time_sec);
if (t)
*t = result;
diff --git a/arch/x86/vdso/vdso.S b/arch/x86/vdso/vdso.S
index 1d3aa6b87181..1b979c12ba85 100644
--- a/arch/x86/vdso/vdso.S
+++ b/arch/x86/vdso/vdso.S
@@ -1,10 +1,21 @@
+#include <asm/page_types.h>
+#include <linux/linkage.h>
#include <linux/init.h>
-__INITDATA
+__PAGE_ALIGNED_DATA
.globl vdso_start, vdso_end
+ .align PAGE_SIZE
vdso_start:
.incbin "arch/x86/vdso/vdso.so"
vdso_end:
-__FINIT
+.previous
+
+ .globl vdso_pages
+ .bss
+ .align 8
+ .type vdso_pages, @object
+vdso_pages:
+ .zero (vdso_end - vdso_start + PAGE_SIZE - 1) / PAGE_SIZE * 8
+ .size vdso_pages, .-vdso_pages
diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
index 7abd2be0f9b9..316fbca3490e 100644
--- a/arch/x86/vdso/vma.c
+++ b/arch/x86/vdso/vma.c
@@ -14,41 +14,61 @@
#include <asm/vgtod.h>
#include <asm/proto.h>
#include <asm/vdso.h>
+#include <asm/page.h>
unsigned int __read_mostly vdso_enabled = 1;
extern char vdso_start[], vdso_end[];
extern unsigned short vdso_sync_cpuid;
-static struct page **vdso_pages;
+extern struct page *vdso_pages[];
static unsigned vdso_size;
-static int __init init_vdso_vars(void)
+static void __init patch_vdso(void *vdso, size_t len)
+{
+ Elf64_Ehdr *hdr = vdso;
+ Elf64_Shdr *sechdrs, *alt_sec = 0;
+ char *secstrings;
+ void *alt_data;
+ int i;
+
+ BUG_ON(len < sizeof(Elf64_Ehdr));
+ BUG_ON(memcmp(hdr->e_ident, ELFMAG, SELFMAG) != 0);
+
+ sechdrs = (void *)hdr + hdr->e_shoff;
+ secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
+
+ for (i = 1; i < hdr->e_shnum; i++) {
+ Elf64_Shdr *shdr = &sechdrs[i];
+ if (!strcmp(secstrings + shdr->sh_name, ".altinstructions")) {
+ alt_sec = shdr;
+ goto found;
+ }
+ }
+
+ /* If we get here, it's probably a bug. */
+ pr_warning("patch_vdso: .altinstructions not found\n");
+ return; /* nothing to patch */
+
+found:
+ alt_data = (void *)hdr + alt_sec->sh_offset;
+ apply_alternatives(alt_data, alt_data + alt_sec->sh_size);
+}
+
+static int __init init_vdso(void)
{
int npages = (vdso_end - vdso_start + PAGE_SIZE - 1) / PAGE_SIZE;
int i;
+ patch_vdso(vdso_start, vdso_end - vdso_start);
+
vdso_size = npages << PAGE_SHIFT;
- vdso_pages = kmalloc(sizeof(struct page *) * npages, GFP_KERNEL);
- if (!vdso_pages)
- goto oom;
- for (i = 0; i < npages; i++) {
- struct page *p;
- p = alloc_page(GFP_KERNEL);
- if (!p)
- goto oom;
- vdso_pages[i] = p;
- copy_page(page_address(p), vdso_start + i*PAGE_SIZE);
- }
+ for (i = 0; i < npages; i++)
+ vdso_pages[i] = virt_to_page(vdso_start + i*PAGE_SIZE);
return 0;
-
- oom:
- printk("Cannot allocate vdso\n");
- vdso_enabled = 0;
- return -ENOMEM;
}
-subsys_initcall(init_vdso_vars);
+subsys_initcall(init_vdso);
struct linux_binprm;
diff --git a/arch/x86/xen/Makefile b/arch/x86/xen/Makefile
index 17c565de3d64..ccf73b2f3e69 100644
--- a/arch/x86/xen/Makefile
+++ b/arch/x86/xen/Makefile
@@ -13,10 +13,10 @@ CFLAGS_mmu.o := $(nostackp)
obj-y := enlighten.o setup.o multicalls.o mmu.o irq.o \
time.o xen-asm.o xen-asm_$(BITS).o \
grant-table.o suspend.o platform-pci-unplug.o \
- p2m.o
+ p2m.o trace.o
obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_PARAVIRT_SPINLOCKS)+= spinlock.o
obj-$(CONFIG_XEN_DEBUG_FS) += debugfs.o
-
+obj-$(CONFIG_XEN_DOM0) += vga.o
obj-$(CONFIG_SWIOTLB_XEN) += pci-swiotlb-xen.o
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 5525163a0398..974a528458a0 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -341,6 +341,8 @@ static void xen_set_ldt(const void *addr, unsigned entries)
struct mmuext_op *op;
struct multicall_space mcs = xen_mc_entry(sizeof(*op));
+ trace_xen_cpu_set_ldt(addr, entries);
+
op = mcs.args;
op->cmd = MMUEXT_SET_LDT;
op->arg1.linear_addr = (unsigned long)addr;
@@ -496,6 +498,8 @@ static void xen_write_ldt_entry(struct desc_struct *dt, int entrynum,
xmaddr_t mach_lp = arbitrary_virt_to_machine(&dt[entrynum]);
u64 entry = *(u64 *)ptr;
+ trace_xen_cpu_write_ldt_entry(dt, entrynum, entry);
+
preempt_disable();
xen_mc_flush();
@@ -565,6 +569,8 @@ static void xen_write_idt_entry(gate_desc *dt, int entrynum, const gate_desc *g)
unsigned long p = (unsigned long)&dt[entrynum];
unsigned long start, end;
+ trace_xen_cpu_write_idt_entry(dt, entrynum, g);
+
preempt_disable();
start = __this_cpu_read(idt_desc.address);
@@ -619,6 +625,8 @@ static void xen_load_idt(const struct desc_ptr *desc)
static DEFINE_SPINLOCK(lock);
static struct trap_info traps[257];
+ trace_xen_cpu_load_idt(desc);
+
spin_lock(&lock);
__get_cpu_var(idt_desc) = *desc;
@@ -637,6 +645,8 @@ static void xen_load_idt(const struct desc_ptr *desc)
static void xen_write_gdt_entry(struct desc_struct *dt, int entry,
const void *desc, int type)
{
+ trace_xen_cpu_write_gdt_entry(dt, entry, desc, type);
+
preempt_disable();
switch (type) {
@@ -665,6 +675,8 @@ static void xen_write_gdt_entry(struct desc_struct *dt, int entry,
static void __init xen_write_gdt_entry_boot(struct desc_struct *dt, int entry,
const void *desc, int type)
{
+ trace_xen_cpu_write_gdt_entry(dt, entry, desc, type);
+
switch (type) {
case DESC_LDT:
case DESC_TSS:
@@ -684,7 +696,9 @@ static void __init xen_write_gdt_entry_boot(struct desc_struct *dt, int entry,
static void xen_load_sp0(struct tss_struct *tss,
struct thread_struct *thread)
{
- struct multicall_space mcs = xen_mc_entry(0);
+ struct multicall_space mcs;
+
+ mcs = xen_mc_entry(0);
MULTI_stack_switch(mcs.mc, __KERNEL_DS, thread->sp0);
xen_mc_issue(PARAVIRT_LAZY_CPU);
}
@@ -1248,6 +1262,14 @@ asmlinkage void __init xen_start_kernel(void)
if (pci_xen)
x86_init.pci.arch_init = pci_xen_init;
} else {
+ const struct dom0_vga_console_info *info =
+ (void *)((char *)xen_start_info +
+ xen_start_info->console.dom0.info_off);
+
+ xen_init_vga(info, xen_start_info->console.dom0.info_size);
+ xen_start_info->console.domU.mfn = 0;
+ xen_start_info->console.domU.evtchn = 0;
+
/* Make sure ACS will be enabled */
pci_request_acs();
}
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 0ccccb67a993..f987bde77c49 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -48,6 +48,8 @@
#include <linux/memblock.h>
#include <linux/seq_file.h>
+#include <trace/events/xen.h>
+
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include <asm/fixmap.h>
@@ -194,6 +196,8 @@ void xen_set_domain_pte(pte_t *ptep, pte_t pteval, unsigned domid)
struct multicall_space mcs;
struct mmu_update *u;
+ trace_xen_mmu_set_domain_pte(ptep, pteval, domid);
+
mcs = xen_mc_entry(sizeof(*u));
u = mcs.args;
@@ -225,6 +229,24 @@ static void xen_extend_mmu_update(const struct mmu_update *update)
*u = *update;
}
+static void xen_extend_mmuext_op(const struct mmuext_op *op)
+{
+ struct multicall_space mcs;
+ struct mmuext_op *u;
+
+ mcs = xen_mc_extend_args(__HYPERVISOR_mmuext_op, sizeof(*u));
+
+ if (mcs.mc != NULL) {
+ mcs.mc->args[1]++;
+ } else {
+ mcs = __xen_mc_entry(sizeof(*u));
+ MULTI_mmuext_op(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
+ }
+
+ u = mcs.args;
+ *u = *op;
+}
+
static void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val)
{
struct mmu_update u;
@@ -245,6 +267,8 @@ static void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val)
static void xen_set_pmd(pmd_t *ptr, pmd_t val)
{
+ trace_xen_mmu_set_pmd(ptr, val);
+
/* If page is not pinned, we can just update the entry
directly */
if (!xen_page_pinned(ptr)) {
@@ -282,22 +306,30 @@ static bool xen_batched_set_pte(pte_t *ptep, pte_t pteval)
return true;
}
-static void xen_set_pte(pte_t *ptep, pte_t pteval)
+static inline void __xen_set_pte(pte_t *ptep, pte_t pteval)
{
if (!xen_batched_set_pte(ptep, pteval))
native_set_pte(ptep, pteval);
}
+static void xen_set_pte(pte_t *ptep, pte_t pteval)
+{
+ trace_xen_mmu_set_pte(ptep, pteval);
+ __xen_set_pte(ptep, pteval);
+}
+
static void xen_set_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pteval)
{
- xen_set_pte(ptep, pteval);
+ trace_xen_mmu_set_pte_at(mm, addr, ptep, pteval);
+ __xen_set_pte(ptep, pteval);
}
pte_t xen_ptep_modify_prot_start(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{
/* Just return the pte as-is. We preserve the bits on commit */
+ trace_xen_mmu_ptep_modify_prot_start(mm, addr, ptep, *ptep);
return *ptep;
}
@@ -306,6 +338,7 @@ void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
{
struct mmu_update u;
+ trace_xen_mmu_ptep_modify_prot_commit(mm, addr, ptep, pte);
xen_mc_batch();
u.ptr = virt_to_machine(ptep).maddr | MMU_PT_UPDATE_PRESERVE_AD;
@@ -530,6 +563,8 @@ static void xen_set_pud_hyper(pud_t *ptr, pud_t val)
static void xen_set_pud(pud_t *ptr, pud_t val)
{
+ trace_xen_mmu_set_pud(ptr, val);
+
/* If page is not pinned, we can just update the entry
directly */
if (!xen_page_pinned(ptr)) {
@@ -543,17 +578,20 @@ static void xen_set_pud(pud_t *ptr, pud_t val)
#ifdef CONFIG_X86_PAE
static void xen_set_pte_atomic(pte_t *ptep, pte_t pte)
{
+ trace_xen_mmu_set_pte_atomic(ptep, pte);
set_64bit((u64 *)ptep, native_pte_val(pte));
}
static void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
{
+ trace_xen_mmu_pte_clear(mm, addr, ptep);
if (!xen_batched_set_pte(ptep, native_make_pte(0)))
native_pte_clear(mm, addr, ptep);
}
static void xen_pmd_clear(pmd_t *pmdp)
{
+ trace_xen_mmu_pmd_clear(pmdp);
set_pmd(pmdp, __pmd(0));
}
#endif /* CONFIG_X86_PAE */
@@ -629,6 +667,8 @@ static void xen_set_pgd(pgd_t *ptr, pgd_t val)
{
pgd_t *user_ptr = xen_get_user_pgd(ptr);
+ trace_xen_mmu_set_pgd(ptr, user_ptr, val);
+
/* If page is not pinned, we can just update the entry
directly */
if (!xen_page_pinned(ptr)) {
@@ -788,14 +828,12 @@ static void xen_pte_unlock(void *v)
static void xen_do_pin(unsigned level, unsigned long pfn)
{
- struct mmuext_op *op;
- struct multicall_space mcs;
+ struct mmuext_op op;
- mcs = __xen_mc_entry(sizeof(*op));
- op = mcs.args;
- op->cmd = level;
- op->arg1.mfn = pfn_to_mfn(pfn);
- MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
+ op.cmd = level;
+ op.arg1.mfn = pfn_to_mfn(pfn);
+
+ xen_extend_mmuext_op(&op);
}
static int xen_pin_page(struct mm_struct *mm, struct page *page,
@@ -863,6 +901,8 @@ static int xen_pin_page(struct mm_struct *mm, struct page *page,
read-only, and can be pinned. */
static void __xen_pgd_pin(struct mm_struct *mm, pgd_t *pgd)
{
+ trace_xen_mmu_pgd_pin(mm, pgd);
+
xen_mc_batch();
if (__xen_pgd_walk(mm, pgd, xen_pin_page, USER_LIMIT)) {
@@ -988,6 +1028,8 @@ static int xen_unpin_page(struct mm_struct *mm, struct page *page,
/* Release a pagetables pages back as normal RW */
static void __xen_pgd_unpin(struct mm_struct *mm, pgd_t *pgd)
{
+ trace_xen_mmu_pgd_unpin(mm, pgd);
+
xen_mc_batch();
xen_do_pin(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
@@ -1196,6 +1238,8 @@ static void xen_flush_tlb(void)
struct mmuext_op *op;
struct multicall_space mcs;
+ trace_xen_mmu_flush_tlb(0);
+
preempt_disable();
mcs = xen_mc_entry(sizeof(*op));
@@ -1214,6 +1258,8 @@ static void xen_flush_tlb_single(unsigned long addr)
struct mmuext_op *op;
struct multicall_space mcs;
+ trace_xen_mmu_flush_tlb_single(addr);
+
preempt_disable();
mcs = xen_mc_entry(sizeof(*op));
@@ -1240,6 +1286,8 @@ static void xen_flush_tlb_others(const struct cpumask *cpus,
} *args;
struct multicall_space mcs;
+ trace_xen_mmu_flush_tlb_others(cpus, mm, va);
+
if (cpumask_empty(cpus))
return; /* nothing to do */
@@ -1275,10 +1323,11 @@ static void set_current_cr3(void *v)
static void __xen_write_cr3(bool kernel, unsigned long cr3)
{
- struct mmuext_op *op;
- struct multicall_space mcs;
+ struct mmuext_op op;
unsigned long mfn;
+ trace_xen_mmu_write_cr3(kernel, cr3);
+
if (cr3)
mfn = pfn_to_mfn(PFN_DOWN(cr3));
else
@@ -1286,13 +1335,10 @@ static void __xen_write_cr3(bool kernel, unsigned long cr3)
WARN_ON(mfn == 0 && kernel);
- mcs = __xen_mc_entry(sizeof(*op));
-
- op = mcs.args;
- op->cmd = kernel ? MMUEXT_NEW_BASEPTR : MMUEXT_NEW_USER_BASEPTR;
- op->arg1.mfn = mfn;
+ op.cmd = kernel ? MMUEXT_NEW_BASEPTR : MMUEXT_NEW_USER_BASEPTR;
+ op.arg1.mfn = mfn;
- MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
+ xen_extend_mmuext_op(&op);
if (kernel) {
percpu_write(xen_cr3, cr3);
@@ -1451,19 +1497,52 @@ static void __init xen_release_pmd_init(unsigned long pfn)
make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
}
+static inline void __pin_pagetable_pfn(unsigned cmd, unsigned long pfn)
+{
+ struct multicall_space mcs;
+ struct mmuext_op *op;
+
+ mcs = __xen_mc_entry(sizeof(*op));
+ op = mcs.args;
+ op->cmd = cmd;
+ op->arg1.mfn = pfn_to_mfn(pfn);
+
+ MULTI_mmuext_op(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
+}
+
+static inline void __set_pfn_prot(unsigned long pfn, pgprot_t prot)
+{
+ struct multicall_space mcs;
+ unsigned long addr = (unsigned long)__va(pfn << PAGE_SHIFT);
+
+ mcs = __xen_mc_entry(0);
+ MULTI_update_va_mapping(mcs.mc, (unsigned long)addr,
+ pfn_pte(pfn, prot), 0);
+}
+
/* This needs to make sure the new pte page is pinned iff its being
attached to a pinned pagetable. */
-static void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn, unsigned level)
+static inline void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn,
+ unsigned level)
{
- struct page *page = pfn_to_page(pfn);
+ bool pinned = PagePinned(virt_to_page(mm->pgd));
+
+ trace_xen_mmu_alloc_ptpage(mm, pfn, level, pinned);
+
+ if (pinned) {
+ struct page *page = pfn_to_page(pfn);
- if (PagePinned(virt_to_page(mm->pgd))) {
SetPagePinned(page);
if (!PageHighMem(page)) {
- make_lowmem_page_readonly(__va(PFN_PHYS((unsigned long)pfn)));
+ xen_mc_batch();
+
+ __set_pfn_prot(pfn, PAGE_KERNEL_RO);
+
if (level == PT_PTE && USE_SPLIT_PTLOCKS)
- pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
+ __pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
+
+ xen_mc_issue(PARAVIRT_LAZY_MMU);
} else {
/* make sure there are no stray mappings of
this page */
@@ -1483,15 +1562,23 @@ static void xen_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
}
/* This should never happen until we're OK to use struct page */
-static void xen_release_ptpage(unsigned long pfn, unsigned level)
+static inline void xen_release_ptpage(unsigned long pfn, unsigned level)
{
struct page *page = pfn_to_page(pfn);
+ bool pinned = PagePinned(page);
- if (PagePinned(page)) {
+ trace_xen_mmu_release_ptpage(pfn, level, pinned);
+
+ if (pinned) {
if (!PageHighMem(page)) {
+ xen_mc_batch();
+
if (level == PT_PTE && USE_SPLIT_PTLOCKS)
- pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
- make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
+ __pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
+
+ __set_pfn_prot(pfn, PAGE_KERNEL);
+
+ xen_mc_issue(PARAVIRT_LAZY_MMU);
}
ClearPagePinned(page);
}
diff --git a/arch/x86/xen/multicalls.c b/arch/x86/xen/multicalls.c
index 1b2b73ff0a6e..0d82003e76ad 100644
--- a/arch/x86/xen/multicalls.c
+++ b/arch/x86/xen/multicalls.c
@@ -30,12 +30,13 @@
#define MC_BATCH 32
-#define MC_DEBUG 1
+#define MC_DEBUG 0
#define MC_ARGS (MC_BATCH * 16)
struct mc_buffer {
+ unsigned mcidx, argidx, cbidx;
struct multicall_entry entries[MC_BATCH];
#if MC_DEBUG
struct multicall_entry debug[MC_BATCH];
@@ -46,85 +47,15 @@ struct mc_buffer {
void (*fn)(void *);
void *data;
} callbacks[MC_BATCH];
- unsigned mcidx, argidx, cbidx;
};
static DEFINE_PER_CPU(struct mc_buffer, mc_buffer);
DEFINE_PER_CPU(unsigned long, xen_mc_irq_flags);
-/* flush reasons 0- slots, 1- args, 2- callbacks */
-enum flush_reasons
-{
- FL_SLOTS,
- FL_ARGS,
- FL_CALLBACKS,
-
- FL_N_REASONS
-};
-
-#ifdef CONFIG_XEN_DEBUG_FS
-#define NHYPERCALLS 40 /* not really */
-
-static struct {
- unsigned histo[MC_BATCH+1];
-
- unsigned issued;
- unsigned arg_total;
- unsigned hypercalls;
- unsigned histo_hypercalls[NHYPERCALLS];
-
- unsigned flush[FL_N_REASONS];
-} mc_stats;
-
-static u8 zero_stats;
-
-static inline void check_zero(void)
-{
- if (unlikely(zero_stats)) {
- memset(&mc_stats, 0, sizeof(mc_stats));
- zero_stats = 0;
- }
-}
-
-static void mc_add_stats(const struct mc_buffer *mc)
-{
- int i;
-
- check_zero();
-
- mc_stats.issued++;
- mc_stats.hypercalls += mc->mcidx;
- mc_stats.arg_total += mc->argidx;
-
- mc_stats.histo[mc->mcidx]++;
- for(i = 0; i < mc->mcidx; i++) {
- unsigned op = mc->entries[i].op;
- if (op < NHYPERCALLS)
- mc_stats.histo_hypercalls[op]++;
- }
-}
-
-static void mc_stats_flush(enum flush_reasons idx)
-{
- check_zero();
-
- mc_stats.flush[idx]++;
-}
-
-#else /* !CONFIG_XEN_DEBUG_FS */
-
-static inline void mc_add_stats(const struct mc_buffer *mc)
-{
-}
-
-static inline void mc_stats_flush(enum flush_reasons idx)
-{
-}
-#endif /* CONFIG_XEN_DEBUG_FS */
-
void xen_mc_flush(void)
{
struct mc_buffer *b = &__get_cpu_var(mc_buffer);
+ struct multicall_entry *mc;
int ret = 0;
unsigned long flags;
int i;
@@ -135,9 +66,26 @@ void xen_mc_flush(void)
something in the middle */
local_irq_save(flags);
- mc_add_stats(b);
+ trace_xen_mc_flush(b->mcidx, b->argidx, b->cbidx);
+
+ switch (b->mcidx) {
+ case 0:
+ /* no-op */
+ BUG_ON(b->argidx != 0);
+ break;
+
+ case 1:
+ /* Singleton multicall - bypass multicall machinery
+ and just do the call directly. */
+ mc = &b->entries[0];
+
+ mc->result = privcmd_call(mc->op,
+ mc->args[0], mc->args[1], mc->args[2],
+ mc->args[3], mc->args[4]);
+ ret = mc->result < 0;
+ break;
- if (b->mcidx) {
+ default:
#if MC_DEBUG
memcpy(b->debug, b->entries,
b->mcidx * sizeof(struct multicall_entry));
@@ -164,11 +112,10 @@ void xen_mc_flush(void)
}
}
#endif
+ }
- b->mcidx = 0;
- b->argidx = 0;
- } else
- BUG_ON(b->argidx != 0);
+ b->mcidx = 0;
+ b->argidx = 0;
for (i = 0; i < b->cbidx; i++) {
struct callback *cb = &b->callbacks[i];
@@ -188,18 +135,21 @@ struct multicall_space __xen_mc_entry(size_t args)
struct multicall_space ret;
unsigned argidx = roundup(b->argidx, sizeof(u64));
+ trace_xen_mc_entry_alloc(args);
+
BUG_ON(preemptible());
BUG_ON(b->argidx >= MC_ARGS);
- if (b->mcidx == MC_BATCH ||
- (argidx + args) >= MC_ARGS) {
- mc_stats_flush(b->mcidx == MC_BATCH ? FL_SLOTS : FL_ARGS);
+ if (unlikely(b->mcidx == MC_BATCH ||
+ (argidx + args) >= MC_ARGS)) {
+ trace_xen_mc_flush_reason((b->mcidx == MC_BATCH) ?
+ XEN_MC_FL_BATCH : XEN_MC_FL_ARGS);
xen_mc_flush();
argidx = roundup(b->argidx, sizeof(u64));
}
ret.mc = &b->entries[b->mcidx];
-#ifdef MC_DEBUG
+#if MC_DEBUG
b->caller[b->mcidx] = __builtin_return_address(0);
#endif
b->mcidx++;
@@ -218,20 +168,25 @@ struct multicall_space xen_mc_extend_args(unsigned long op, size_t size)
BUG_ON(preemptible());
BUG_ON(b->argidx >= MC_ARGS);
- if (b->mcidx == 0)
- return ret;
-
- if (b->entries[b->mcidx - 1].op != op)
- return ret;
+ if (unlikely(b->mcidx == 0 ||
+ b->entries[b->mcidx - 1].op != op)) {
+ trace_xen_mc_extend_args(op, size, XEN_MC_XE_BAD_OP);
+ goto out;
+ }
- if ((b->argidx + size) >= MC_ARGS)
- return ret;
+ if (unlikely((b->argidx + size) >= MC_ARGS)) {
+ trace_xen_mc_extend_args(op, size, XEN_MC_XE_NO_SPACE);
+ goto out;
+ }
ret.mc = &b->entries[b->mcidx - 1];
ret.args = &b->args[b->argidx];
b->argidx += size;
BUG_ON(b->argidx >= MC_ARGS);
+
+ trace_xen_mc_extend_args(op, size, XEN_MC_XE_OK);
+out:
return ret;
}
@@ -241,43 +196,13 @@ void xen_mc_callback(void (*fn)(void *), void *data)
struct callback *cb;
if (b->cbidx == MC_BATCH) {
- mc_stats_flush(FL_CALLBACKS);
+ trace_xen_mc_flush_reason(XEN_MC_FL_CALLBACK);
xen_mc_flush();
}
+ trace_xen_mc_callback(fn, data);
+
cb = &b->callbacks[b->cbidx++];
cb->fn = fn;
cb->data = data;
}
-
-#ifdef CONFIG_XEN_DEBUG_FS
-
-static struct dentry *d_mc_debug;
-
-static int __init xen_mc_debugfs(void)
-{
- struct dentry *d_xen = xen_init_debugfs();
-
- if (d_xen == NULL)
- return -ENOMEM;
-
- d_mc_debug = debugfs_create_dir("multicalls", d_xen);
-
- debugfs_create_u8("zero_stats", 0644, d_mc_debug, &zero_stats);
-
- debugfs_create_u32("batches", 0444, d_mc_debug, &mc_stats.issued);
- debugfs_create_u32("hypercalls", 0444, d_mc_debug, &mc_stats.hypercalls);
- debugfs_create_u32("arg_total", 0444, d_mc_debug, &mc_stats.arg_total);
-
- xen_debugfs_create_u32_array("batch_histo", 0444, d_mc_debug,
- mc_stats.histo, MC_BATCH);
- xen_debugfs_create_u32_array("hypercall_histo", 0444, d_mc_debug,
- mc_stats.histo_hypercalls, NHYPERCALLS);
- xen_debugfs_create_u32_array("flush_reasons", 0444, d_mc_debug,
- mc_stats.flush, FL_N_REASONS);
-
- return 0;
-}
-fs_initcall(xen_mc_debugfs);
-
-#endif /* CONFIG_XEN_DEBUG_FS */
diff --git a/arch/x86/xen/multicalls.h b/arch/x86/xen/multicalls.h
index 4ec8035e3216..dee79b78a90f 100644
--- a/arch/x86/xen/multicalls.h
+++ b/arch/x86/xen/multicalls.h
@@ -1,6 +1,8 @@
#ifndef _XEN_MULTICALLS_H
#define _XEN_MULTICALLS_H
+#include <trace/events/xen.h>
+
#include "xen-ops.h"
/* Multicalls */
@@ -20,8 +22,10 @@ DECLARE_PER_CPU(unsigned long, xen_mc_irq_flags);
static inline void xen_mc_batch(void)
{
unsigned long flags;
+
/* need to disable interrupts until this entry is complete */
local_irq_save(flags);
+ trace_xen_mc_batch(paravirt_get_lazy_mode());
__this_cpu_write(xen_mc_irq_flags, flags);
}
@@ -37,6 +41,8 @@ void xen_mc_flush(void);
/* Issue a multicall if we're not in a lazy mode */
static inline void xen_mc_issue(unsigned mode)
{
+ trace_xen_mc_issue(mode);
+
if ((paravirt_get_lazy_mode() & mode) == 0)
xen_mc_flush();
diff --git a/arch/x86/xen/platform-pci-unplug.c b/arch/x86/xen/platform-pci-unplug.c
index 25c52f94a27c..ffcf2615640b 100644
--- a/arch/x86/xen/platform-pci-unplug.c
+++ b/arch/x86/xen/platform-pci-unplug.c
@@ -35,7 +35,7 @@ EXPORT_SYMBOL_GPL(xen_platform_pci_unplug);
#ifdef CONFIG_XEN_PVHVM
static int xen_emul_unplug;
-static int __init check_platform_magic(void)
+static int check_platform_magic(void)
{
short magic;
char protocol;
diff --git a/arch/x86/xen/trace.c b/arch/x86/xen/trace.c
new file mode 100644
index 000000000000..734beba2a08c
--- /dev/null
+++ b/arch/x86/xen/trace.c
@@ -0,0 +1,61 @@
+#include <linux/ftrace.h>
+
+#define N(x) [__HYPERVISOR_##x] = "("#x")"
+static const char *xen_hypercall_names[] = {
+ N(set_trap_table),
+ N(mmu_update),
+ N(set_gdt),
+ N(stack_switch),
+ N(set_callbacks),
+ N(fpu_taskswitch),
+ N(sched_op_compat),
+ N(dom0_op),
+ N(set_debugreg),
+ N(get_debugreg),
+ N(update_descriptor),
+ N(memory_op),
+ N(multicall),
+ N(update_va_mapping),
+ N(set_timer_op),
+ N(event_channel_op_compat),
+ N(xen_version),
+ N(console_io),
+ N(physdev_op_compat),
+ N(grant_table_op),
+ N(vm_assist),
+ N(update_va_mapping_otherdomain),
+ N(iret),
+ N(vcpu_op),
+ N(set_segment_base),
+ N(mmuext_op),
+ N(acm_op),
+ N(nmi_op),
+ N(sched_op),
+ N(callback_op),
+ N(xenoprof_op),
+ N(event_channel_op),
+ N(physdev_op),
+ N(hvm_op),
+
+/* Architecture-specific hypercall definitions. */
+ N(arch_0),
+ N(arch_1),
+ N(arch_2),
+ N(arch_3),
+ N(arch_4),
+ N(arch_5),
+ N(arch_6),
+ N(arch_7),
+};
+#undef N
+
+static const char *xen_hypercall_name(unsigned op)
+{
+ if (op < ARRAY_SIZE(xen_hypercall_names) && xen_hypercall_names[op] != NULL)
+ return xen_hypercall_names[op];
+
+ return "";
+}
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/xen.h>
diff --git a/arch/x86/xen/vga.c b/arch/x86/xen/vga.c
new file mode 100644
index 000000000000..1cd7f4d11e29
--- /dev/null
+++ b/arch/x86/xen/vga.c
@@ -0,0 +1,67 @@
+#include <linux/screen_info.h>
+#include <linux/init.h>
+
+#include <asm/bootparam.h>
+#include <asm/setup.h>
+
+#include <xen/interface/xen.h>
+
+#include "xen-ops.h"
+
+void __init xen_init_vga(const struct dom0_vga_console_info *info, size_t size)
+{
+ struct screen_info *screen_info = &boot_params.screen_info;
+
+ /* This is drawn from a dump from vgacon:startup in
+ * standard Linux. */
+ screen_info->orig_video_mode = 3;
+ screen_info->orig_video_isVGA = 1;
+ screen_info->orig_video_lines = 25;
+ screen_info->orig_video_cols = 80;
+ screen_info->orig_video_ega_bx = 3;
+ screen_info->orig_video_points = 16;
+ screen_info->orig_y = screen_info->orig_video_lines - 1;
+
+ switch (info->video_type) {
+ case XEN_VGATYPE_TEXT_MODE_3:
+ if (size < offsetof(struct dom0_vga_console_info, u.text_mode_3)
+ + sizeof(info->u.text_mode_3))
+ break;
+ screen_info->orig_video_lines = info->u.text_mode_3.rows;
+ screen_info->orig_video_cols = info->u.text_mode_3.columns;
+ screen_info->orig_x = info->u.text_mode_3.cursor_x;
+ screen_info->orig_y = info->u.text_mode_3.cursor_y;
+ screen_info->orig_video_points =
+ info->u.text_mode_3.font_height;
+ break;
+
+ case XEN_VGATYPE_VESA_LFB:
+ if (size < offsetof(struct dom0_vga_console_info,
+ u.vesa_lfb.gbl_caps))
+ break;
+ screen_info->orig_video_isVGA = VIDEO_TYPE_VLFB;
+ screen_info->lfb_width = info->u.vesa_lfb.width;
+ screen_info->lfb_height = info->u.vesa_lfb.height;
+ screen_info->lfb_depth = info->u.vesa_lfb.bits_per_pixel;
+ screen_info->lfb_base = info->u.vesa_lfb.lfb_base;
+ screen_info->lfb_size = info->u.vesa_lfb.lfb_size;
+ screen_info->lfb_linelength = info->u.vesa_lfb.bytes_per_line;
+ screen_info->red_size = info->u.vesa_lfb.red_size;
+ screen_info->red_pos = info->u.vesa_lfb.red_pos;
+ screen_info->green_size = info->u.vesa_lfb.green_size;
+ screen_info->green_pos = info->u.vesa_lfb.green_pos;
+ screen_info->blue_size = info->u.vesa_lfb.blue_size;
+ screen_info->blue_pos = info->u.vesa_lfb.blue_pos;
+ screen_info->rsvd_size = info->u.vesa_lfb.rsvd_size;
+ screen_info->rsvd_pos = info->u.vesa_lfb.rsvd_pos;
+ if (size >= offsetof(struct dom0_vga_console_info,
+ u.vesa_lfb.gbl_caps)
+ + sizeof(info->u.vesa_lfb.gbl_caps))
+ screen_info->capabilities = info->u.vesa_lfb.gbl_caps;
+ if (size >= offsetof(struct dom0_vga_console_info,
+ u.vesa_lfb.mode_attrs)
+ + sizeof(info->u.vesa_lfb.mode_attrs))
+ screen_info->vesa_attributes = info->u.vesa_lfb.mode_attrs;
+ break;
+ }
+}
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
index 97dfdc8757b3..b095739ccd4c 100644
--- a/arch/x86/xen/xen-ops.h
+++ b/arch/x86/xen/xen-ops.h
@@ -88,6 +88,17 @@ static inline void xen_uninit_lock_cpu(int cpu)
}
#endif
+struct dom0_vga_console_info;
+
+#ifdef CONFIG_XEN_DOM0
+void __init xen_init_vga(const struct dom0_vga_console_info *, size_t size);
+#else
+static inline void __init xen_init_vga(const struct dom0_vga_console_info *info,
+ size_t size)
+{
+}
+#endif
+
/* Declare an asm function, along with symbols needed to make it
inlineable */
#define DECL_ASM(ret, name, ...) \
diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig
index 5d43c1f8ada8..c346ccdce0df 100644
--- a/arch/xtensa/Kconfig
+++ b/arch/xtensa/Kconfig
@@ -80,18 +80,7 @@ config XTENSA_UNALIGNED_USER
Say Y here to enable unaligned memory access in user space.
-config PREEMPT
- bool "Preemptible Kernel"
- help
- This option reduces the latency of the kernel when reacting to
- real-time or interactive events by allowing a low priority process to
- be preempted even if it is in kernel mode executing a system call.
- Unfortunately the kernel code has some race conditions if both
- CONFIG_SMP and CONFIG_PREEMPT are enabled, so this option is
- currently disabled if you are building an SMP kernel.
-
- Say Y here if you are building a kernel for a desktop, embedded
- or real-time system. Say N if you are unsure.
+source "kernel/Kconfig.preempt"
config MATH_EMULATION
bool "Math emulation"
diff --git a/arch/xtensa/kernel/module.c b/arch/xtensa/kernel/module.c
index c1accea8cb56..451dda928c93 100644
--- a/arch/xtensa/kernel/module.c
+++ b/arch/xtensa/kernel/module.c
@@ -24,26 +24,6 @@
#undef DEBUG_RELOCATE
-void *module_alloc(unsigned long size)
-{
- if (size == 0)
- return NULL;
- return vmalloc_exec(size);
-}
-
-void module_free(struct module *mod, void *module_region)
-{
- vfree(module_region);
-}
-
-int module_frob_arch_sections(Elf32_Ehdr *hdr,
- Elf32_Shdr *sechdrs,
- char *secstrings,
- struct module *mod)
-{
- return 0;
-}
-
static int
decode_calln_opcode (unsigned char *location)
{
@@ -66,18 +46,6 @@ decode_l32r_opcode (unsigned char *location)
#endif
}
-int apply_relocate(Elf32_Shdr *sechdrs,
- const char *strtab,
- unsigned int symindex,
- unsigned int relsec,
- struct module *mod)
-{
- printk(KERN_ERR "module %s: REL RELOCATION unsupported\n",
- mod->name);
- return -ENOEXEC;
-
-}
-
int apply_relocate_add(Elf32_Shdr *sechdrs,
const char *strtab,
unsigned int symindex,
@@ -222,14 +190,3 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
}
return 0;
}
-
-int module_finalize(const Elf_Ehdr *hdr,
- const Elf_Shdr *sechdrs,
- struct module *mod)
-{
- return 0;
-}
-
-void module_arch_cleanup(struct module *mod)
-{
-}